remove dot imports in e2e/node

k3s-v1.15.3
danielqsj 2019-05-10 12:32:08 +08:00
parent d01c015346
commit 087bc1369e
13 changed files with 200 additions and 200 deletions

View File

@ -21,29 +21,29 @@ import (
"k8s.io/kubernetes/test/e2e/framework"
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
. "github.com/onsi/ginkgo"
"github.com/onsi/ginkgo"
)
var _ = SIGDescribe("AppArmor", func() {
f := framework.NewDefaultFramework("apparmor")
Context("load AppArmor profiles", func() {
BeforeEach(func() {
ginkgo.Context("load AppArmor profiles", func() {
ginkgo.BeforeEach(func() {
common.SkipIfAppArmorNotSupported()
common.LoadAppArmorProfiles(f)
})
AfterEach(func() {
if !CurrentGinkgoTestDescription().Failed {
ginkgo.AfterEach(func() {
if !ginkgo.CurrentGinkgoTestDescription().Failed {
return
}
framework.LogFailedContainers(f.ClientSet, f.Namespace.Name, e2elog.Logf)
})
It("should enforce an AppArmor profile", func() {
ginkgo.It("should enforce an AppArmor profile", func() {
common.CreateAppArmorTestPod(f, false, true)
})
It("can disable an AppArmor profile, using unconfined", func() {
ginkgo.It("can disable an AppArmor profile, using unconfined", func() {
common.CreateAppArmorTestPod(f, true, true)
})
})

View File

@ -24,22 +24,22 @@ import (
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
e2essh "k8s.io/kubernetes/test/e2e/framework/ssh"
. "github.com/onsi/ginkgo"
"github.com/onsi/ginkgo"
)
var _ = SIGDescribe("crictl", func() {
f := framework.NewDefaultFramework("crictl")
BeforeEach(func() {
ginkgo.BeforeEach(func() {
// `crictl` is not available on all cloud providers.
framework.SkipUnlessProviderIs("gce", "gke")
// The test requires $HOME/.ssh/id_rsa key to be present.
framework.SkipUnlessSSHKeyPresent()
})
It("should be able to run crictl on the node", func() {
ginkgo.It("should be able to run crictl on the node", func() {
// Get all nodes' external IPs.
By("Getting all nodes' SSH-able IP addresses")
ginkgo.By("Getting all nodes' SSH-able IP addresses")
hosts, err := e2essh.NodeSSHHosts(f.ClientSet)
if err != nil {
framework.Failf("Error getting node hostnames: %v", err)
@ -55,7 +55,7 @@ var _ = SIGDescribe("crictl", func() {
for _, testCase := range testCases {
// Choose an arbitrary node to test.
host := hosts[0]
By(fmt.Sprintf("SSH'ing to node %q to run %q", host, testCase.cmd))
ginkgo.By(fmt.Sprintf("SSH'ing to node %q to run %q", host, testCase.cmd))
result, err := e2essh.SSH(testCase.cmd, host, framework.TestContext.Provider)
stdout, stderr := strings.TrimSpace(result.Stdout), strings.TrimSpace(result.Stderr)

View File

@ -29,8 +29,8 @@ import (
"k8s.io/kubernetes/test/e2e/framework"
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"github.com/onsi/ginkgo"
"github.com/onsi/gomega"
)
var _ = SIGDescribe("Events", func() {
@ -45,7 +45,7 @@ var _ = SIGDescribe("Events", func() {
podClient := f.ClientSet.CoreV1().Pods(f.Namespace.Name)
By("creating the pod")
ginkgo.By("creating the pod")
name := "send-events-" + string(uuid.NewUUID())
value := strconv.Itoa(time.Now().Nanosecond())
pod := &v1.Pod{
@ -67,9 +67,9 @@ var _ = SIGDescribe("Events", func() {
},
}
By("submitting the pod to kubernetes")
ginkgo.By("submitting the pod to kubernetes")
defer func() {
By("deleting the pod")
ginkgo.By("deleting the pod")
podClient.Delete(pod.Name, nil)
}()
if _, err := podClient.Create(pod); err != nil {
@ -78,13 +78,13 @@ var _ = SIGDescribe("Events", func() {
framework.ExpectNoError(f.WaitForPodRunning(pod.Name))
By("verifying the pod is in kubernetes")
ginkgo.By("verifying the pod is in kubernetes")
selector := labels.SelectorFromSet(labels.Set(map[string]string{"time": value}))
options := metav1.ListOptions{LabelSelector: selector.String()}
pods, err := podClient.List(options)
Expect(len(pods.Items)).To(Equal(1))
gomega.Expect(len(pods.Items)).To(gomega.Equal(1))
By("retrieving the pod")
ginkgo.By("retrieving the pod")
podWithUid, err := podClient.Get(pod.Name, metav1.GetOptions{})
if err != nil {
framework.Failf("Failed to get pod: %v", err)
@ -92,7 +92,7 @@ var _ = SIGDescribe("Events", func() {
e2elog.Logf("%+v\n", podWithUid)
var events *v1.EventList
// Check for scheduler event about the pod.
By("checking for scheduler event about the pod")
ginkgo.By("checking for scheduler event about the pod")
framework.ExpectNoError(wait.Poll(time.Second*2, time.Second*60, func() (bool, error) {
selector := fields.Set{
"involvedObject.kind": "Pod",
@ -112,7 +112,7 @@ var _ = SIGDescribe("Events", func() {
return false, nil
}))
// Check for kubelet event about the pod.
By("checking for kubelet event about the pod")
ginkgo.By("checking for kubelet event about the pod")
framework.ExpectNoError(wait.Poll(time.Second*2, time.Second*60, func() (bool, error) {
selector := fields.Set{
"involvedObject.uid": string(podWithUid.UID),

View File

@ -35,8 +35,8 @@ import (
testutils "k8s.io/kubernetes/test/utils"
imageutils "k8s.io/kubernetes/test/utils/image"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"github.com/onsi/ginkgo"
"github.com/onsi/gomega"
)
const (
@ -119,7 +119,7 @@ func stopNfsServer(serverPod *v1.Pod) {
// will execute the passed in shell cmd. Waits for the pod to start.
// Note: the nfs plugin is defined inline, no PV or PVC.
func createPodUsingNfs(f *framework.Framework, c clientset.Interface, ns, nfsIP, cmd string) *v1.Pod {
By("create pod using nfs volume")
ginkgo.By("create pod using nfs volume")
isPrivileged := true
cmdLine := []string{"-c", cmd}
@ -166,13 +166,13 @@ func createPodUsingNfs(f *framework.Framework, c clientset.Interface, ns, nfsIP,
},
}
rtnPod, err := c.CoreV1().Pods(ns).Create(pod)
Expect(err).NotTo(HaveOccurred())
gomega.Expect(err).NotTo(gomega.HaveOccurred())
err = f.WaitForPodReady(rtnPod.Name) // running & ready
Expect(err).NotTo(HaveOccurred())
gomega.Expect(err).NotTo(gomega.HaveOccurred())
rtnPod, err = c.CoreV1().Pods(ns).Get(rtnPod.Name, metav1.GetOptions{}) // return fresh pod
Expect(err).NotTo(HaveOccurred())
gomega.Expect(err).NotTo(gomega.HaveOccurred())
return rtnPod
}
@ -189,7 +189,7 @@ func checkPodCleanup(c clientset.Interface, pod *v1.Pod, expectClean bool) {
mountDir := filepath.Join(podDir, "volumes", "kubernetes.io~nfs")
// use ip rather than hostname in GCE
nodeIP, err := framework.GetHostExternalAddress(c, pod)
Expect(err).NotTo(HaveOccurred())
gomega.Expect(err).NotTo(gomega.HaveOccurred())
condMsg := "deleted"
if !expectClean {
@ -216,7 +216,7 @@ func checkPodCleanup(c clientset.Interface, pod *v1.Pod, expectClean bool) {
e2elog.Logf("Wait up to %v for host's (%v) %q to be %v", timeout, nodeIP, test.feature, condMsg)
err = wait.Poll(poll, timeout, func() (bool, error) {
result, err := e2essh.NodeExec(nodeIP, test.cmd, framework.TestContext.Provider)
Expect(err).NotTo(HaveOccurred())
gomega.Expect(err).NotTo(gomega.HaveOccurred())
e2essh.LogResult(result)
ok := (result.Code == 0 && len(result.Stdout) > 0 && len(result.Stderr) == 0)
if expectClean && ok { // keep trying
@ -227,7 +227,7 @@ func checkPodCleanup(c clientset.Interface, pod *v1.Pod, expectClean bool) {
}
return true, nil // done, host is as expected
})
Expect(err).NotTo(HaveOccurred(), fmt.Sprintf("Host (%v) cleanup error: %v. Expected %q to be %v", nodeIP, err, test.feature, condMsg))
gomega.Expect(err).NotTo(gomega.HaveOccurred(), fmt.Sprintf("Host (%v) cleanup error: %v. Expected %q to be %v", nodeIP, err, test.feature, condMsg))
}
if expectClean {
@ -244,7 +244,7 @@ var _ = SIGDescribe("kubelet", func() {
)
f := framework.NewDefaultFramework("kubelet")
BeforeEach(func() {
ginkgo.BeforeEach(func() {
c = f.ClientSet
ns = f.Namespace.Name
})
@ -265,14 +265,14 @@ var _ = SIGDescribe("kubelet", func() {
{podsPerNode: 10, timeout: 1 * time.Minute},
}
BeforeEach(func() {
ginkgo.BeforeEach(func() {
// Use node labels to restrict the pods to be assigned only to the
// nodes we observe initially.
nodeLabels = make(map[string]string)
nodeLabels["kubelet_cleanup"] = "true"
nodes := framework.GetReadySchedulableNodesOrDie(c)
numNodes = len(nodes.Items)
Expect(numNodes).NotTo(BeZero())
gomega.Expect(numNodes).NotTo(gomega.BeZero())
nodeNames = sets.NewString()
// If there are a lot of nodes, we don't want to use all of them
// (if there are 1000 nodes in the cluster, starting 10 pods/node
@ -297,7 +297,7 @@ var _ = SIGDescribe("kubelet", func() {
}
})
AfterEach(func() {
ginkgo.AfterEach(func() {
if resourceMonitor != nil {
resourceMonitor.Stop()
}
@ -312,30 +312,30 @@ var _ = SIGDescribe("kubelet", func() {
for _, itArg := range deleteTests {
name := fmt.Sprintf(
"kubelet should be able to delete %d pods per node in %v.", itArg.podsPerNode, itArg.timeout)
It(name, func() {
ginkgo.It(name, func() {
totalPods := itArg.podsPerNode * numNodes
By(fmt.Sprintf("Creating a RC of %d pods and wait until all pods of this RC are running", totalPods))
ginkgo.By(fmt.Sprintf("Creating a RC of %d pods and wait until all pods of this RC are running", totalPods))
rcName := fmt.Sprintf("cleanup%d-%s", totalPods, string(uuid.NewUUID()))
Expect(framework.RunRC(testutils.RCConfig{
gomega.Expect(framework.RunRC(testutils.RCConfig{
Client: f.ClientSet,
Name: rcName,
Namespace: f.Namespace.Name,
Image: imageutils.GetPauseImageName(),
Replicas: totalPods,
NodeSelector: nodeLabels,
})).NotTo(HaveOccurred())
})).NotTo(gomega.HaveOccurred())
// Perform a sanity check so that we know all desired pods are
// running on the nodes according to kubelet. The timeout is set to
// only 30 seconds here because framework.RunRC already waited for all pods to
// transition to the running status.
Expect(waitTillNPodsRunningOnNodes(f.ClientSet, nodeNames, rcName, ns, totalPods,
time.Second*30)).NotTo(HaveOccurred())
gomega.Expect(waitTillNPodsRunningOnNodes(f.ClientSet, nodeNames, rcName, ns, totalPods,
time.Second*30)).NotTo(gomega.HaveOccurred())
if resourceMonitor != nil {
resourceMonitor.LogLatest()
}
By("Deleting the RC")
ginkgo.By("Deleting the RC")
framework.DeleteRCAndWaitForGC(f.ClientSet, f.Namespace.Name, rcName)
// Check that the pods really are gone by querying /runningpods on the
// node. The /runningpods handler checks the container runtime (or its
@ -345,8 +345,8 @@ var _ = SIGDescribe("kubelet", func() {
// - a bug in graceful termination (if it is enabled)
// - docker slow to delete pods (or resource problems causing slowness)
start := time.Now()
Expect(waitTillNPodsRunningOnNodes(f.ClientSet, nodeNames, rcName, ns, 0,
itArg.timeout)).NotTo(HaveOccurred())
gomega.Expect(waitTillNPodsRunningOnNodes(f.ClientSet, nodeNames, rcName, ns, 0,
itArg.timeout)).NotTo(gomega.HaveOccurred())
e2elog.Logf("Deleting %d pods on %d nodes completed in %v after the RC was deleted", totalPods, len(nodeNames),
time.Since(start))
if resourceMonitor != nil {
@ -369,7 +369,7 @@ var _ = SIGDescribe("kubelet", func() {
// If the nfs-server pod is deleted the client pod's mount can not be unmounted.
// If the nfs-server pod is deleted and re-created, due to having a different ip
// addr, the client pod's mount still cannot be unmounted.
Context("Host cleanup after disrupting NFS volume [NFS]", func() {
ginkgo.Context("Host cleanup after disrupting NFS volume [NFS]", func() {
// issue #31272
var (
nfsServerPod *v1.Pod
@ -389,38 +389,38 @@ var _ = SIGDescribe("kubelet", func() {
},
}
BeforeEach(func() {
ginkgo.BeforeEach(func() {
framework.SkipUnlessProviderIs(framework.ProvidersWithSSH...)
_, nfsServerPod, nfsIP = volume.NewNFSServer(c, ns, []string{"-G", "777", "/exports"})
})
AfterEach(func() {
ginkgo.AfterEach(func() {
err := framework.DeletePodWithWait(f, c, pod)
Expect(err).NotTo(HaveOccurred(), "AfterEach: Failed to delete client pod ", pod.Name)
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "AfterEach: Failed to delete client pod ", pod.Name)
err = framework.DeletePodWithWait(f, c, nfsServerPod)
Expect(err).NotTo(HaveOccurred(), "AfterEach: Failed to delete server pod ", nfsServerPod.Name)
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "AfterEach: Failed to delete server pod ", nfsServerPod.Name)
})
// execute It blocks from above table of tests
for _, t := range testTbl {
It(t.itDescr, func() {
ginkgo.It(t.itDescr, func() {
pod = createPodUsingNfs(f, c, ns, nfsIP, t.podCmd)
By("Stop the NFS server")
ginkgo.By("Stop the NFS server")
stopNfsServer(nfsServerPod)
By("Delete the pod mounted to the NFS volume -- expect failure")
ginkgo.By("Delete the pod mounted to the NFS volume -- expect failure")
err := framework.DeletePodWithWait(f, c, pod)
Expect(err).To(HaveOccurred())
gomega.Expect(err).To(gomega.HaveOccurred())
// pod object is now stale, but is intentionally not nil
By("Check if pod's host has been cleaned up -- expect not")
ginkgo.By("Check if pod's host has been cleaned up -- expect not")
checkPodCleanup(c, pod, false)
By("Restart the nfs server")
ginkgo.By("Restart the nfs server")
restartNfsServer(nfsServerPod)
By("Verify that the deleted client pod is now cleaned up")
ginkgo.By("Verify that the deleted client pod is now cleaned up")
checkPodCleanup(c, pod, true)
})
}

View File

@ -30,8 +30,8 @@ import (
testutils "k8s.io/kubernetes/test/utils"
imageutils "k8s.io/kubernetes/test/utils/image"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"github.com/onsi/ginkgo"
"github.com/onsi/gomega"
)
const (
@ -66,23 +66,23 @@ func runResourceTrackingTest(f *framework.Framework, podsPerNode int, nodeNames
expectedCPU map[string]map[float64]float64, expectedMemory framework.ResourceUsagePerContainer) {
numNodes := nodeNames.Len()
totalPods := podsPerNode * numNodes
By(fmt.Sprintf("Creating a RC of %d pods and wait until all pods of this RC are running", totalPods))
ginkgo.By(fmt.Sprintf("Creating a RC of %d pods and wait until all pods of this RC are running", totalPods))
rcName := fmt.Sprintf("resource%d-%s", totalPods, string(uuid.NewUUID()))
// TODO: Use a more realistic workload
Expect(framework.RunRC(testutils.RCConfig{
gomega.Expect(framework.RunRC(testutils.RCConfig{
Client: f.ClientSet,
Name: rcName,
Namespace: f.Namespace.Name,
Image: imageutils.GetPauseImageName(),
Replicas: totalPods,
})).NotTo(HaveOccurred())
})).NotTo(gomega.HaveOccurred())
// Log once and flush the stats.
rm.LogLatest()
rm.Reset()
By("Start monitoring resource usage")
ginkgo.By("Start monitoring resource usage")
// Periodically dump the cpu summary until the deadline is met.
// Note that without calling framework.ResourceMonitor.Reset(), the stats
// would occupy increasingly more memory. This should be fine
@ -100,10 +100,10 @@ func runResourceTrackingTest(f *framework.Framework, podsPerNode int, nodeNames
logPodsOnNodes(f.ClientSet, nodeNames.List())
}
By("Reporting overall resource usage")
ginkgo.By("Reporting overall resource usage")
logPodsOnNodes(f.ClientSet, nodeNames.List())
usageSummary, err := rm.GetLatest()
Expect(err).NotTo(HaveOccurred())
gomega.Expect(err).NotTo(gomega.HaveOccurred())
// TODO(random-liu): Remove the original log when we migrate to new perfdash
e2elog.Logf("%s", rm.FormatResourceUsage(usageSummary))
// Log perf result
@ -116,7 +116,7 @@ func runResourceTrackingTest(f *framework.Framework, podsPerNode int, nodeNames
framework.PrintPerfData(framework.CPUUsageToPerfData(rm.GetMasterNodeCPUSummary(cpuSummary)))
verifyCPULimits(expectedCPU, cpuSummary)
By("Deleting the RC")
ginkgo.By("Deleting the RC")
framework.DeleteRCAndWaitForGC(f.ClientSet, f.Namespace.Name, rcName)
}
@ -197,7 +197,7 @@ var _ = SIGDescribe("Kubelet [Serial] [Slow]", func() {
var om *framework.RuntimeOperationMonitor
var rm *framework.ResourceMonitor
BeforeEach(func() {
ginkgo.BeforeEach(func() {
nodes := framework.GetReadySchedulableNodesOrDie(f.ClientSet)
nodeNames = sets.NewString()
for _, node := range nodes.Items {
@ -208,7 +208,7 @@ var _ = SIGDescribe("Kubelet [Serial] [Slow]", func() {
rm.Start()
})
AfterEach(func() {
ginkgo.AfterEach(func() {
rm.Stop()
result := om.GetLatestRuntimeOperationErrorRate()
e2elog.Logf("runtime operation error metrics:\n%s", framework.FormatRuntimeOperationErrorRate(result))
@ -260,7 +260,7 @@ var _ = SIGDescribe("Kubelet [Serial] [Slow]", func() {
podsPerNode := itArg.podsPerNode
name := fmt.Sprintf(
"resource tracking for %d pods per node", podsPerNode)
It(name, func() {
ginkgo.It(name, func() {
runResourceTrackingTest(f, podsPerNode, nodeNames, rm, itArg.cpuLimits, itArg.memLimits)
})
}
@ -271,7 +271,7 @@ var _ = SIGDescribe("Kubelet [Serial] [Slow]", func() {
podsPerNode := density[i]
name := fmt.Sprintf(
"resource tracking for %d pods per node", podsPerNode)
It(name, func() {
ginkgo.It(name, func() {
runResourceTrackingTest(f, podsPerNode, nodeNames, rm, nil, nil)
})
}

View File

@ -27,8 +27,8 @@ import (
e2essh "k8s.io/kubernetes/test/e2e/framework/ssh"
imageutils "k8s.io/kubernetes/test/utils/image"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"github.com/onsi/ginkgo"
"github.com/onsi/gomega"
)
func preparePod(name string, node *v1.Node, propagation *v1.MountPropagationMode, hostDir string) *v1.Pod {
@ -80,7 +80,7 @@ func preparePod(name string, node *v1.Node, propagation *v1.MountPropagationMode
var _ = SIGDescribe("Mount propagation", func() {
f := framework.NewDefaultFramework("mount-propagation")
It("should propagate mounts to the host", func() {
ginkgo.It("should propagate mounts to the host", func() {
// This test runs two pods: master and slave with respective mount
// propagation on common /var/lib/kubelet/XXXX directory. Both mount a
// tmpfs to a subdirectory there. We check that these mounts are
@ -88,13 +88,13 @@ var _ = SIGDescribe("Mount propagation", func() {
// Pick a node where all pods will run.
nodes := framework.GetReadySchedulableNodesOrDie(f.ClientSet)
Expect(len(nodes.Items)).NotTo(BeZero(), "No available nodes for scheduling")
gomega.Expect(len(nodes.Items)).NotTo(gomega.BeZero(), "No available nodes for scheduling")
node := &nodes.Items[0]
// Fail the test if the namespace is not set. We expect that the
// namespace is unique and we might delete user data if it's not.
if len(f.Namespace.Name) == 0 {
Expect(f.Namespace.Name).ToNot(Equal(""))
gomega.Expect(f.Namespace.Name).ToNot(gomega.Equal(""))
return
}
@ -172,10 +172,10 @@ var _ = SIGDescribe("Mount propagation", func() {
shouldBeVisible := mounts.Has(mountName)
if shouldBeVisible {
framework.ExpectNoError(err, "%s: failed to run %q", msg, cmd)
Expect(stdout).To(Equal(mountName), msg)
gomega.Expect(stdout).To(gomega.Equal(mountName), msg)
} else {
// We *expect* cat to return error here
Expect(err).To(HaveOccurred(), msg)
gomega.Expect(err).To(gomega.HaveOccurred(), msg)
}
}
}

View File

@ -32,8 +32,8 @@ import (
e2essh "k8s.io/kubernetes/test/e2e/framework/ssh"
testutils "k8s.io/kubernetes/test/utils"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"github.com/onsi/ginkgo"
"github.com/onsi/gomega"
)
// This test checks if node-problem-detector (NPD) runs fine without error on
@ -45,7 +45,7 @@ var _ = SIGDescribe("NodeProblemDetector [DisabledForLargeClusters]", func() {
)
f := framework.NewDefaultFramework("node-problem-detector")
BeforeEach(func() {
ginkgo.BeforeEach(func() {
framework.SkipUnlessSSHKeyPresent()
framework.SkipUnlessProviderIs(framework.ProvidersWithSSH...)
framework.SkipUnlessProviderIs("gce", "gke")
@ -53,10 +53,10 @@ var _ = SIGDescribe("NodeProblemDetector [DisabledForLargeClusters]", func() {
framework.WaitForAllNodesHealthy(f.ClientSet, time.Minute)
})
It("should run without error", func() {
By("Getting all nodes and their SSH-able IP addresses")
ginkgo.It("should run without error", func() {
ginkgo.By("Getting all nodes and their SSH-able IP addresses")
nodes := framework.GetReadySchedulableNodesOrDie(f.ClientSet)
Expect(len(nodes.Items)).NotTo(BeZero())
gomega.Expect(len(nodes.Items)).NotTo(gomega.BeZero())
hosts := []string{}
for _, node := range nodes.Items {
for _, addr := range node.Status.Addresses {
@ -66,7 +66,7 @@ var _ = SIGDescribe("NodeProblemDetector [DisabledForLargeClusters]", func() {
}
}
}
Expect(len(hosts)).To(Equal(len(nodes.Items)))
gomega.Expect(len(hosts)).To(gomega.Equal(len(nodes.Items)))
isStandaloneMode := make(map[string]bool)
cpuUsageStats := make(map[string][]float64)
@ -84,22 +84,22 @@ var _ = SIGDescribe("NodeProblemDetector [DisabledForLargeClusters]", func() {
result, err := e2essh.SSH(cmd, host, framework.TestContext.Provider)
isStandaloneMode[host] = (err == nil && result.Code == 0)
By(fmt.Sprintf("Check node %q has node-problem-detector process", host))
ginkgo.By(fmt.Sprintf("Check node %q has node-problem-detector process", host))
// Using brackets "[n]" is a trick to prevent grep command itself from
// showing up, because string text "[n]ode-problem-detector" does not
// match regular expression "[n]ode-problem-detector".
psCmd := "ps aux | grep [n]ode-problem-detector"
result, err = e2essh.SSH(psCmd, host, framework.TestContext.Provider)
framework.ExpectNoError(err)
Expect(result.Code).To(BeZero())
Expect(result.Stdout).To(ContainSubstring("node-problem-detector"))
gomega.Expect(result.Code).To(gomega.BeZero())
gomega.Expect(result.Stdout).To(gomega.ContainSubstring("node-problem-detector"))
By(fmt.Sprintf("Check node-problem-detector is running fine on node %q", host))
ginkgo.By(fmt.Sprintf("Check node-problem-detector is running fine on node %q", host))
journalctlCmd := "sudo journalctl -u node-problem-detector"
result, err = e2essh.SSH(journalctlCmd, host, framework.TestContext.Provider)
framework.ExpectNoError(err)
Expect(result.Code).To(BeZero())
Expect(result.Stdout).NotTo(ContainSubstring("node-problem-detector.service: Failed"))
gomega.Expect(result.Code).To(gomega.BeZero())
gomega.Expect(result.Stdout).NotTo(gomega.ContainSubstring("node-problem-detector.service: Failed"))
if isStandaloneMode[host] {
cpuUsage, uptime := getCpuStat(f, host)
@ -107,29 +107,29 @@ var _ = SIGDescribe("NodeProblemDetector [DisabledForLargeClusters]", func() {
uptimeStats[host] = append(uptimeStats[host], uptime)
}
By(fmt.Sprintf("Inject log to trigger AUFSUmountHung on node %q", host))
ginkgo.By(fmt.Sprintf("Inject log to trigger AUFSUmountHung on node %q", host))
log := "INFO: task umount.aufs:21568 blocked for more than 120 seconds."
injectLogCmd := "sudo sh -c \"echo 'kernel: " + log + "' >> /dev/kmsg\""
_, err = e2essh.SSH(injectLogCmd, host, framework.TestContext.Provider)
framework.ExpectNoError(err)
Expect(result.Code).To(BeZero())
gomega.Expect(result.Code).To(gomega.BeZero())
}
By("Check node-problem-detector can post conditions and events to API server")
ginkgo.By("Check node-problem-detector can post conditions and events to API server")
for _, node := range nodes.Items {
By(fmt.Sprintf("Check node-problem-detector posted KernelDeadlock condition on node %q", node.Name))
Eventually(func() error {
ginkgo.By(fmt.Sprintf("Check node-problem-detector posted KernelDeadlock condition on node %q", node.Name))
gomega.Eventually(func() error {
return verifyNodeCondition(f, "KernelDeadlock", v1.ConditionTrue, "AUFSUmountHung", node.Name)
}, pollTimeout, pollInterval).Should(Succeed())
}, pollTimeout, pollInterval).Should(gomega.Succeed())
By(fmt.Sprintf("Check node-problem-detector posted AUFSUmountHung event on node %q", node.Name))
ginkgo.By(fmt.Sprintf("Check node-problem-detector posted AUFSUmountHung event on node %q", node.Name))
eventListOptions := metav1.ListOptions{FieldSelector: fields.Set{"involvedObject.kind": "Node"}.AsSelector().String()}
Eventually(func() error {
gomega.Eventually(func() error {
return verifyEvents(f, eventListOptions, 1, "AUFSUmountHung", node.Name)
}, pollTimeout, pollInterval).Should(Succeed())
}, pollTimeout, pollInterval).Should(gomega.Succeed())
}
By("Gather node-problem-detector cpu and memory stats")
ginkgo.By("Gather node-problem-detector cpu and memory stats")
numIterations := 60
for i := 1; i <= numIterations; i++ {
for j, host := range hosts {
@ -217,22 +217,22 @@ func getMemoryStat(f *framework.Framework, host string) (rss, workingSet float64
memCmd := "cat /sys/fs/cgroup/memory/system.slice/node-problem-detector.service/memory.usage_in_bytes && cat /sys/fs/cgroup/memory/system.slice/node-problem-detector.service/memory.stat"
result, err := e2essh.SSH(memCmd, host, framework.TestContext.Provider)
framework.ExpectNoError(err)
Expect(result.Code).To(BeZero())
gomega.Expect(result.Code).To(gomega.BeZero())
lines := strings.Split(result.Stdout, "\n")
memoryUsage, err := strconv.ParseFloat(lines[0], 64)
Expect(err).To(BeNil())
gomega.Expect(err).To(gomega.BeNil())
var totalInactiveFile float64
for _, line := range lines[1:] {
tokens := strings.Split(line, " ")
if tokens[0] == "total_rss" {
rss, err = strconv.ParseFloat(tokens[1], 64)
Expect(err).To(BeNil())
gomega.Expect(err).To(gomega.BeNil())
}
if tokens[0] == "total_inactive_file" {
totalInactiveFile, err = strconv.ParseFloat(tokens[1], 64)
Expect(err).To(BeNil())
gomega.Expect(err).To(gomega.BeNil())
}
}
@ -253,7 +253,7 @@ func getCpuStat(f *framework.Framework, host string) (usage, uptime float64) {
cpuCmd := "cat /sys/fs/cgroup/cpu/system.slice/node-problem-detector.service/cpuacct.usage && cat /proc/uptime | awk '{print $1}'"
result, err := e2essh.SSH(cpuCmd, host, framework.TestContext.Provider)
framework.ExpectNoError(err)
Expect(result.Code).To(BeZero())
gomega.Expect(result.Code).To(gomega.BeZero())
lines := strings.Split(result.Stdout, "\n")
usage, err = strconv.ParseFloat(lines[0], 64)
@ -279,6 +279,6 @@ func getNpdPodStat(f *framework.Framework, nodeName string) (cpuUsage, rss, work
hasNpdPod = true
break
}
Expect(hasNpdPod).To(BeTrue())
gomega.Expect(hasNpdPod).To(gomega.BeTrue())
return
}

View File

@ -20,7 +20,7 @@ import (
"fmt"
"time"
. "github.com/onsi/ginkgo"
"github.com/onsi/ginkgo"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
@ -36,7 +36,7 @@ import (
// Slow by design (7 min)
var _ = SIGDescribe("Pod garbage collector [Feature:PodGarbageCollector] [Slow]", func() {
f := framework.NewDefaultFramework("pod-garbage-collector")
It("should handle the creation of 1000 pods", func() {
ginkgo.It("should handle the creation of 1000 pods", func() {
var count int
for count < 1000 {
pod, err := createTerminatingPod(f)
@ -62,7 +62,7 @@ var _ = SIGDescribe("Pod garbage collector [Feature:PodGarbageCollector] [Slow]"
timeout := 2 * time.Minute
gcThreshold := 100
By(fmt.Sprintf("Waiting for gc controller to gc all but %d pods", gcThreshold))
ginkgo.By(fmt.Sprintf("Waiting for gc controller to gc all but %d pods", gcThreshold))
pollErr := wait.Poll(1*time.Minute, timeout, func() (bool, error) {
pods, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).List(metav1.ListOptions{})
if err != nil {

View File

@ -34,8 +34,8 @@ import (
"k8s.io/kubernetes/test/e2e/framework"
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"github.com/onsi/ginkgo"
"github.com/onsi/gomega"
imageutils "k8s.io/kubernetes/test/utils/image"
)
@ -44,7 +44,7 @@ var _ = SIGDescribe("Pods Extended", func() {
framework.KubeDescribe("Delete Grace Period", func() {
var podClient *framework.PodClient
BeforeEach(func() {
ginkgo.BeforeEach(func() {
podClient = f.PodClient()
})
@ -54,7 +54,7 @@ var _ = SIGDescribe("Pods Extended", func() {
Description: Create a pod, make sure it is running. Create a 'kubectl local proxy', capture the port the proxy is listening. Using the http client send a delete with gracePeriodSeconds=30. Pod SHOULD get deleted within 30 seconds.
*/
framework.ConformanceIt("should be submitted and removed", func() {
By("creating the pod")
ginkgo.By("creating the pod")
name := "pod-submit-remove-" + string(uuid.NewUUID())
value := strconv.Itoa(time.Now().Nanosecond())
pod := &v1.Pod{
@ -75,51 +75,51 @@ var _ = SIGDescribe("Pods Extended", func() {
},
}
By("setting up selector")
ginkgo.By("setting up selector")
selector := labels.SelectorFromSet(labels.Set(map[string]string{"time": value}))
options := metav1.ListOptions{LabelSelector: selector.String()}
pods, err := podClient.List(options)
Expect(err).NotTo(HaveOccurred(), "failed to query for pod")
Expect(len(pods.Items)).To(Equal(0))
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to query for pod")
gomega.Expect(len(pods.Items)).To(gomega.Equal(0))
options = metav1.ListOptions{
LabelSelector: selector.String(),
ResourceVersion: pods.ListMeta.ResourceVersion,
}
By("submitting the pod to kubernetes")
ginkgo.By("submitting the pod to kubernetes")
podClient.Create(pod)
By("verifying the pod is in kubernetes")
ginkgo.By("verifying the pod is in kubernetes")
selector = labels.SelectorFromSet(labels.Set(map[string]string{"time": value}))
options = metav1.ListOptions{LabelSelector: selector.String()}
pods, err = podClient.List(options)
Expect(err).NotTo(HaveOccurred(), "failed to query for pod")
Expect(len(pods.Items)).To(Equal(1))
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to query for pod")
gomega.Expect(len(pods.Items)).To(gomega.Equal(1))
// We need to wait for the pod to be running, otherwise the deletion
// may be carried out immediately rather than gracefully.
framework.ExpectNoError(f.WaitForPodRunning(pod.Name))
// save the running pod
pod, err = podClient.Get(pod.Name, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred(), "failed to GET scheduled pod")
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to GET scheduled pod")
// start local proxy, so we can send graceful deletion over query string, rather than body parameter
cmd := framework.KubectlCmd("proxy", "-p", "0")
stdout, stderr, err := framework.StartCmdAndStreamOutput(cmd)
Expect(err).NotTo(HaveOccurred(), "failed to start up proxy")
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to start up proxy")
defer stdout.Close()
defer stderr.Close()
defer framework.TryKill(cmd)
buf := make([]byte, 128)
var n int
n, err = stdout.Read(buf)
Expect(err).NotTo(HaveOccurred(), "failed to read from kubectl proxy stdout")
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to read from kubectl proxy stdout")
output := string(buf[:n])
proxyRegexp := regexp.MustCompile("Starting to serve on 127.0.0.1:([0-9]+)")
match := proxyRegexp.FindStringSubmatch(output)
Expect(len(match)).To(Equal(2))
gomega.Expect(len(match)).To(gomega.Equal(2))
port, err := strconv.Atoi(match[1])
Expect(err).NotTo(HaveOccurred(), "failed to convert port into string")
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to convert port into string")
endpoint := fmt.Sprintf("http://localhost:%d/api/v1/namespaces/%s/pods/%s?gracePeriodSeconds=30", port, pod.Namespace, pod.Name)
tr := &http.Transport{
@ -127,21 +127,21 @@ var _ = SIGDescribe("Pods Extended", func() {
}
client := &http.Client{Transport: tr}
req, err := http.NewRequest("DELETE", endpoint, nil)
Expect(err).NotTo(HaveOccurred(), "failed to create http request")
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to create http request")
By("deleting the pod gracefully")
ginkgo.By("deleting the pod gracefully")
rsp, err := client.Do(req)
Expect(err).NotTo(HaveOccurred(), "failed to use http client to send delete")
Expect(rsp.StatusCode).Should(Equal(http.StatusOK), "failed to delete gracefully by client request")
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to use http client to send delete")
gomega.Expect(rsp.StatusCode).Should(gomega.Equal(http.StatusOK), "failed to delete gracefully by client request")
var lastPod v1.Pod
err = json.NewDecoder(rsp.Body).Decode(&lastPod)
Expect(err).NotTo(HaveOccurred(), "failed to decode graceful termination proxy response")
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to decode graceful termination proxy response")
defer rsp.Body.Close()
By("verifying the kubelet observed the termination notice")
ginkgo.By("verifying the kubelet observed the termination notice")
Expect(wait.Poll(time.Second*5, time.Second*30, func() (bool, error) {
gomega.Expect(wait.Poll(time.Second*5, time.Second*30, func() (bool, error) {
podList, err := framework.GetKubeletPods(f.ClientSet, pod.Spec.NodeName)
if err != nil {
e2elog.Logf("Unable to retrieve kubelet pods for node %v: %v", pod.Spec.NodeName, err)
@ -159,23 +159,23 @@ var _ = SIGDescribe("Pods Extended", func() {
}
e2elog.Logf("no pod exists with the name we were looking for, assuming the termination request was observed and completed")
return true, nil
})).NotTo(HaveOccurred(), "kubelet never observed the termination notice")
})).NotTo(gomega.HaveOccurred(), "kubelet never observed the termination notice")
Expect(lastPod.DeletionTimestamp).ToNot(BeNil())
Expect(lastPod.Spec.TerminationGracePeriodSeconds).ToNot(BeZero())
gomega.Expect(lastPod.DeletionTimestamp).ToNot(gomega.BeNil())
gomega.Expect(lastPod.Spec.TerminationGracePeriodSeconds).ToNot(gomega.BeZero())
selector = labels.SelectorFromSet(labels.Set(map[string]string{"time": value}))
options = metav1.ListOptions{LabelSelector: selector.String()}
pods, err = podClient.List(options)
Expect(err).NotTo(HaveOccurred(), "failed to query for pods")
Expect(len(pods.Items)).To(Equal(0))
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to query for pods")
gomega.Expect(len(pods.Items)).To(gomega.Equal(0))
})
})
framework.KubeDescribe("Pods Set QOS Class", func() {
var podClient *framework.PodClient
BeforeEach(func() {
ginkgo.BeforeEach(func() {
podClient = f.PodClient()
})
/*
@ -184,7 +184,7 @@ var _ = SIGDescribe("Pods Extended", func() {
Description: Create a Pod with CPU and Memory request and limits. Pos status MUST have QOSClass set to PodQOSGuaranteed.
*/
framework.ConformanceIt("should be submitted and removed ", func() {
By("creating the pod")
ginkgo.By("creating the pod")
name := "pod-qos-class-" + string(uuid.NewUUID())
pod := &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
@ -213,13 +213,13 @@ var _ = SIGDescribe("Pods Extended", func() {
},
}
By("submitting the pod to kubernetes")
ginkgo.By("submitting the pod to kubernetes")
podClient.Create(pod)
By("verifying QOS class is set on the pod")
ginkgo.By("verifying QOS class is set on the pod")
pod, err := podClient.Get(name, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred(), "failed to query for pod")
Expect(pod.Status.QOSClass == v1.PodQOSGuaranteed)
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to query for pod")
gomega.Expect(pod.Status.QOSClass == v1.PodQOSGuaranteed)
})
})
})

View File

@ -32,8 +32,8 @@ import (
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
imageutils "k8s.io/kubernetes/test/utils/image"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"github.com/onsi/ginkgo"
"github.com/onsi/gomega"
)
// partially cloned from webserver.go
@ -57,17 +57,17 @@ func testPreStop(c clientset.Interface, ns string) {
},
},
}
By(fmt.Sprintf("Creating server pod %s in namespace %s", podDescr.Name, ns))
ginkgo.By(fmt.Sprintf("Creating server pod %s in namespace %s", podDescr.Name, ns))
podDescr, err := c.CoreV1().Pods(ns).Create(podDescr)
framework.ExpectNoError(err, fmt.Sprintf("creating pod %s", podDescr.Name))
// At the end of the test, clean up by removing the pod.
defer func() {
By("Deleting the server pod")
ginkgo.By("Deleting the server pod")
c.CoreV1().Pods(ns).Delete(podDescr.Name, nil)
}()
By("Waiting for pods to come up.")
ginkgo.By("Waiting for pods to come up.")
err = framework.WaitForPodRunningInNamespace(c, podDescr)
framework.ExpectNoError(err, "waiting for server pod to start")
@ -100,7 +100,7 @@ func testPreStop(c clientset.Interface, ns string) {
},
}
By(fmt.Sprintf("Creating tester pod %s in namespace %s", preStopDescr.Name, ns))
ginkgo.By(fmt.Sprintf("Creating tester pod %s in namespace %s", preStopDescr.Name, ns))
preStopDescr, err = c.CoreV1().Pods(ns).Create(preStopDescr)
framework.ExpectNoError(err, fmt.Sprintf("creating pod %s", preStopDescr.Name))
deletePreStop := true
@ -108,7 +108,7 @@ func testPreStop(c clientset.Interface, ns string) {
// At the end of the test, clean up by removing the pod.
defer func() {
if deletePreStop {
By("Deleting the tester pod")
ginkgo.By("Deleting the tester pod")
c.CoreV1().Pods(ns).Delete(preStopDescr.Name, nil)
}
}()
@ -117,7 +117,7 @@ func testPreStop(c clientset.Interface, ns string) {
framework.ExpectNoError(err, "waiting for tester pod to start")
// Delete the pod with the preStop handler.
By("Deleting pre-stop pod")
ginkgo.By("Deleting pre-stop pod")
if err := c.CoreV1().Pods(ns).Delete(preStopDescr.Name, nil); err == nil {
deletePreStop = false
}
@ -144,7 +144,7 @@ func testPreStop(c clientset.Interface, ns string) {
framework.Failf("Error validating prestop: %v", err)
return true, err
}
By(fmt.Sprintf("Error validating prestop: %v", err))
ginkgo.By(fmt.Sprintf("Error validating prestop: %v", err))
} else {
e2elog.Logf("Saw: %s", string(body))
state := State{}
@ -165,7 +165,7 @@ func testPreStop(c clientset.Interface, ns string) {
var _ = SIGDescribe("PreStop", func() {
f := framework.NewDefaultFramework("prestop")
var podClient *framework.PodClient
BeforeEach(func() {
ginkgo.BeforeEach(func() {
podClient = f.PodClient()
})
@ -178,36 +178,36 @@ var _ = SIGDescribe("PreStop", func() {
testPreStop(f.ClientSet, f.Namespace.Name)
})
It("graceful pod terminated should wait until preStop hook completes the process", func() {
ginkgo.It("graceful pod terminated should wait until preStop hook completes the process", func() {
gracefulTerminationPeriodSeconds := int64(30)
By("creating the pod")
ginkgo.By("creating the pod")
name := "pod-prestop-hook-" + string(uuid.NewUUID())
pod := getPodWithpreStopLifeCycle(name)
By("submitting the pod to kubernetes")
ginkgo.By("submitting the pod to kubernetes")
podClient.Create(pod)
By("waiting for pod running")
ginkgo.By("waiting for pod running")
framework.ExpectNoError(f.WaitForPodRunning(pod.Name))
var err error
pod, err = podClient.Get(pod.Name, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred(), "failed to GET scheduled pod")
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to GET scheduled pod")
By("deleting the pod gracefully")
ginkgo.By("deleting the pod gracefully")
err = podClient.Delete(pod.Name, metav1.NewDeleteOptions(gracefulTerminationPeriodSeconds))
Expect(err).NotTo(HaveOccurred(), "failed to delete pod")
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to delete pod")
//wait up to graceful termination period seconds
time.Sleep(30 * time.Second)
By("verifying the pod running state after graceful termination")
ginkgo.By("verifying the pod running state after graceful termination")
result := &v1.PodList{}
err = wait.Poll(time.Second*5, time.Second*60, func() (bool, error) {
client, err := framework.NodeProxyRequest(f.ClientSet, pod.Spec.NodeName, "pods", ports.KubeletPort)
Expect(err).NotTo(HaveOccurred(), "failed to get the pods of the node")
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to get the pods of the node")
err = client.Into(result)
Expect(err).NotTo(HaveOccurred(), "failed to parse the pods of the node")
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to parse the pods of the node")
for _, kubeletPod := range result.Items {
if pod.Name != kubeletPod.Name {

View File

@ -31,8 +31,8 @@ import (
"k8s.io/kubernetes/test/e2e/framework"
imageutils "k8s.io/kubernetes/test/utils/image"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"github.com/onsi/ginkgo"
"github.com/onsi/gomega"
)
func scTestPod(hostIPC bool, hostPID bool) *v1.Pod {
@ -63,7 +63,7 @@ func scTestPod(hostIPC bool, hostPID bool) *v1.Pod {
var _ = SIGDescribe("Security Context", func() {
f := framework.NewDefaultFramework("security-context")
It("should support pod.Spec.SecurityContext.SupplementalGroups [LinuxOnly]", func() {
ginkgo.It("should support pod.Spec.SecurityContext.SupplementalGroups [LinuxOnly]", func() {
pod := scTestPod(false, false)
pod.Spec.Containers[0].Command = []string{"id", "-G"}
pod.Spec.SecurityContext.SupplementalGroups = []int64{1234, 5678}
@ -71,7 +71,7 @@ var _ = SIGDescribe("Security Context", func() {
f.TestContainerOutput("pod.Spec.SecurityContext.SupplementalGroups", pod, 0, groups)
})
It("should support pod.Spec.SecurityContext.RunAsUser [LinuxOnly]", func() {
ginkgo.It("should support pod.Spec.SecurityContext.RunAsUser [LinuxOnly]", func() {
pod := scTestPod(false, false)
userID := int64(1001)
pod.Spec.SecurityContext.RunAsUser = &userID
@ -83,7 +83,7 @@ var _ = SIGDescribe("Security Context", func() {
})
})
It("should support pod.Spec.SecurityContext.RunAsUser And pod.Spec.SecurityContext.RunAsGroup [LinuxOnly]", func() {
ginkgo.It("should support pod.Spec.SecurityContext.RunAsUser And pod.Spec.SecurityContext.RunAsGroup [LinuxOnly]", func() {
pod := scTestPod(false, false)
userID := int64(1001)
groupID := int64(2002)
@ -97,7 +97,7 @@ var _ = SIGDescribe("Security Context", func() {
})
})
It("should support container.SecurityContext.RunAsUser [LinuxOnly]", func() {
ginkgo.It("should support container.SecurityContext.RunAsUser [LinuxOnly]", func() {
pod := scTestPod(false, false)
userID := int64(1001)
overrideUserID := int64(1002)
@ -112,7 +112,7 @@ var _ = SIGDescribe("Security Context", func() {
})
})
It("should support container.SecurityContext.RunAsUser And container.SecurityContext.RunAsGroup [LinuxOnly]", func() {
ginkgo.It("should support container.SecurityContext.RunAsUser And container.SecurityContext.RunAsGroup [LinuxOnly]", func() {
pod := scTestPod(false, false)
userID := int64(1001)
groupID := int64(2001)
@ -131,19 +131,19 @@ var _ = SIGDescribe("Security Context", func() {
})
})
It("should support volume SELinux relabeling [Flaky] [LinuxOnly]", func() {
ginkgo.It("should support volume SELinux relabeling [Flaky] [LinuxOnly]", func() {
testPodSELinuxLabeling(f, false, false)
})
It("should support volume SELinux relabeling when using hostIPC [Flaky] [LinuxOnly]", func() {
ginkgo.It("should support volume SELinux relabeling when using hostIPC [Flaky] [LinuxOnly]", func() {
testPodSELinuxLabeling(f, true, false)
})
It("should support volume SELinux relabeling when using hostPID [Flaky] [LinuxOnly]", func() {
ginkgo.It("should support volume SELinux relabeling when using hostPID [Flaky] [LinuxOnly]", func() {
testPodSELinuxLabeling(f, false, true)
})
It("should support seccomp alpha unconfined annotation on the container [Feature:Seccomp] [LinuxOnly]", func() {
ginkgo.It("should support seccomp alpha unconfined annotation on the container [Feature:Seccomp] [LinuxOnly]", func() {
// TODO: port to SecurityContext as soon as seccomp is out of alpha
pod := scTestPod(false, false)
pod.Annotations[v1.SeccompContainerAnnotationKeyPrefix+"test-container"] = "unconfined"
@ -152,7 +152,7 @@ var _ = SIGDescribe("Security Context", func() {
f.TestContainerOutput(v1.SeccompPodAnnotationKey, pod, 0, []string{"0"}) // seccomp disabled
})
It("should support seccomp alpha unconfined annotation on the pod [Feature:Seccomp] [LinuxOnly]", func() {
ginkgo.It("should support seccomp alpha unconfined annotation on the pod [Feature:Seccomp] [LinuxOnly]", func() {
// TODO: port to SecurityContext as soon as seccomp is out of alpha
pod := scTestPod(false, false)
pod.Annotations[v1.SeccompPodAnnotationKey] = "unconfined"
@ -160,7 +160,7 @@ var _ = SIGDescribe("Security Context", func() {
f.TestContainerOutput(v1.SeccompPodAnnotationKey, pod, 0, []string{"0"}) // seccomp disabled
})
It("should support seccomp alpha runtime/default annotation [Feature:Seccomp] [LinuxOnly]", func() {
ginkgo.It("should support seccomp alpha runtime/default annotation [Feature:Seccomp] [LinuxOnly]", func() {
// TODO: port to SecurityContext as soon as seccomp is out of alpha
pod := scTestPod(false, false)
pod.Annotations[v1.SeccompContainerAnnotationKeyPrefix+"test-container"] = v1.SeccompProfileRuntimeDefault
@ -168,7 +168,7 @@ var _ = SIGDescribe("Security Context", func() {
f.TestContainerOutput(v1.SeccompPodAnnotationKey, pod, 0, []string{"2"}) // seccomp filtered
})
It("should support seccomp default which is unconfined [Feature:Seccomp] [LinuxOnly]", func() {
ginkgo.It("should support seccomp default which is unconfined [Feature:Seccomp] [LinuxOnly]", func() {
// TODO: port to SecurityContext as soon as seccomp is out of alpha
pod := scTestPod(false, false)
pod.Spec.Containers[0].Command = []string{"grep", "ecc", "/proc/self/status"}
@ -212,18 +212,18 @@ func testPodSELinuxLabeling(f *framework.Framework, hostIPC bool, hostPID bool)
testContent := "hello"
testFilePath := mountPath + "/TEST"
err = f.WriteFileViaContainer(pod.Name, pod.Spec.Containers[0].Name, testFilePath, testContent)
Expect(err).To(BeNil())
gomega.Expect(err).To(gomega.BeNil())
content, err := f.ReadFileViaContainer(pod.Name, pod.Spec.Containers[0].Name, testFilePath)
Expect(err).To(BeNil())
Expect(content).To(ContainSubstring(testContent))
gomega.Expect(err).To(gomega.BeNil())
gomega.Expect(content).To(gomega.ContainSubstring(testContent))
foundPod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(pod.Name, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred())
gomega.Expect(err).NotTo(gomega.HaveOccurred())
// Confirm that the file can be accessed from a second
// pod using host_path with the same MCS label
volumeHostPath := fmt.Sprintf("%s/pods/%s/volumes/kubernetes.io~empty-dir/%s", framework.TestContext.KubeVolumeDir, foundPod.UID, volumeName)
By(fmt.Sprintf("confirming a container with the same label can read the file under --volume-dir=%s", framework.TestContext.KubeVolumeDir))
ginkgo.By(fmt.Sprintf("confirming a container with the same label can read the file under --volume-dir=%s", framework.TestContext.KubeVolumeDir))
pod = scTestPod(hostIPC, hostPID)
pod.Spec.NodeName = foundPod.Spec.NodeName
volumeMounts := []v1.VolumeMount{
@ -266,5 +266,5 @@ func testPodSELinuxLabeling(f *framework.Framework, hostIPC bool, hostPID bool)
framework.ExpectNoError(err, "Error waiting for pod to run %v", pod)
content, err = f.ReadFileViaContainer(pod.Name, "test-container", testFilePath)
Expect(content).NotTo(ContainSubstring(testContent))
gomega.Expect(content).NotTo(gomega.ContainSubstring(testContent))
}

View File

@ -24,7 +24,7 @@ import (
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
e2essh "k8s.io/kubernetes/test/e2e/framework/ssh"
. "github.com/onsi/ginkgo"
"github.com/onsi/ginkgo"
)
const maxNodes = 100
@ -33,7 +33,7 @@ var _ = SIGDescribe("SSH", func() {
f := framework.NewDefaultFramework("ssh")
BeforeEach(func() {
ginkgo.BeforeEach(func() {
// When adding more providers here, also implement their functionality in e2essh.GetSigner(...).
framework.SkipUnlessProviderIs(framework.ProvidersWithSSH...)
@ -42,9 +42,9 @@ var _ = SIGDescribe("SSH", func() {
framework.SkipUnlessSSHKeyPresent()
})
It("should SSH to all nodes and run commands", func() {
ginkgo.It("should SSH to all nodes and run commands", func() {
// Get all nodes' external IPs.
By("Getting all nodes' SSH-able IP addresses")
ginkgo.By("Getting all nodes' SSH-able IP addresses")
hosts, err := e2essh.NodeSSHHosts(f.ClientSet)
if err != nil {
framework.Failf("Error getting node hostnames: %v", err)
@ -76,7 +76,7 @@ var _ = SIGDescribe("SSH", func() {
nodes = maxNodes
}
testhosts := hosts[:nodes]
By(fmt.Sprintf("SSH'ing to %d nodes and running %s", len(testhosts), testCase.cmd))
ginkgo.By(fmt.Sprintf("SSH'ing to %d nodes and running %s", len(testhosts), testCase.cmd))
for _, host := range testhosts {
result, err := e2essh.SSH(testCase.cmd, host, framework.TestContext.Provider)
@ -104,7 +104,7 @@ var _ = SIGDescribe("SSH", func() {
}
// Quickly test that SSH itself errors correctly.
By("SSH'ing to a nonexistent host")
ginkgo.By("SSH'ing to a nonexistent host")
if _, err = e2essh.SSH(`echo "hello"`, "i.do.not.exist", framework.TestContext.Provider); err == nil {
framework.Failf("Expected error trying to SSH to nonexistent host.")
}

View File

@ -27,8 +27,8 @@ import (
jobutil "k8s.io/kubernetes/test/e2e/framework/job"
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"github.com/onsi/ginkgo"
"github.com/onsi/gomega"
)
const dummyFinalizer = "k8s.io/dummy-finalizer"
@ -36,7 +36,7 @@ const dummyFinalizer = "k8s.io/dummy-finalizer"
var _ = framework.KubeDescribe("[Feature:TTLAfterFinished][NodeAlphaFeature:TTLAfterFinished]", func() {
f := framework.NewDefaultFramework("ttlafterfinished")
It("job should be deleted once it finishes after TTL seconds", func() {
ginkgo.It("job should be deleted once it finishes after TTL seconds", func() {
testFinishedJob(f)
})
})
@ -50,11 +50,11 @@ func cleanupJob(f *framework.Framework, job *batch.Job) {
j.ObjectMeta.Finalizers = slice.RemoveString(j.ObjectMeta.Finalizers, dummyFinalizer, nil)
}
_, err := jobutil.UpdateJobWithRetries(c, ns, job.Name, removeFinalizerFunc)
Expect(err).NotTo(HaveOccurred())
gomega.Expect(err).NotTo(gomega.HaveOccurred())
jobutil.WaitForJobGone(c, ns, job.Name, wait.ForeverTestTimeout)
err = jobutil.WaitForAllJobPodsGone(c, ns, job.Name)
Expect(err).NotTo(HaveOccurred())
gomega.Expect(err).NotTo(gomega.HaveOccurred())
}
func testFinishedJob(f *framework.Framework) {
@ -73,26 +73,26 @@ func testFinishedJob(f *framework.Framework) {
e2elog.Logf("Create a Job %s/%s with TTL", ns, job.Name)
job, err := jobutil.CreateJob(c, ns, job)
Expect(err).NotTo(HaveOccurred())
gomega.Expect(err).NotTo(gomega.HaveOccurred())
e2elog.Logf("Wait for the Job to finish")
err = jobutil.WaitForJobFinish(c, ns, job.Name)
Expect(err).NotTo(HaveOccurred())
gomega.Expect(err).NotTo(gomega.HaveOccurred())
e2elog.Logf("Wait for TTL after finished controller to delete the Job")
err = jobutil.WaitForJobDeleting(c, ns, job.Name)
Expect(err).NotTo(HaveOccurred())
gomega.Expect(err).NotTo(gomega.HaveOccurred())
e2elog.Logf("Check Job's deletionTimestamp and compare with the time when the Job finished")
job, err = jobutil.GetJob(c, ns, job.Name)
Expect(err).NotTo(HaveOccurred())
gomega.Expect(err).NotTo(gomega.HaveOccurred())
finishTime := jobutil.FinishTime(job)
finishTimeUTC := finishTime.UTC()
Expect(finishTime.IsZero()).NotTo(BeTrue())
gomega.Expect(finishTime.IsZero()).NotTo(gomega.BeTrue())
deleteAtUTC := job.ObjectMeta.DeletionTimestamp.UTC()
Expect(deleteAtUTC).NotTo(BeNil())
gomega.Expect(deleteAtUTC).NotTo(gomega.BeNil())
expireAtUTC := finishTimeUTC.Add(time.Duration(ttl) * time.Second)
Expect(deleteAtUTC.Before(expireAtUTC)).To(BeFalse())
gomega.Expect(deleteAtUTC.Before(expireAtUTC)).To(gomega.BeFalse())
}