mirror of https://github.com/k3s-io/k3s
refactor: use e2elog.Logf instead of framework.Logf
parent
a26709ecee
commit
da7507500f
|
@ -66,6 +66,7 @@ go_library(
|
||||||
"//test/e2e/framework:go_default_library",
|
"//test/e2e/framework:go_default_library",
|
||||||
"//test/e2e/framework/auth:go_default_library",
|
"//test/e2e/framework/auth:go_default_library",
|
||||||
"//test/e2e/framework/ginkgowrapper:go_default_library",
|
"//test/e2e/framework/ginkgowrapper:go_default_library",
|
||||||
|
"//test/e2e/framework/log:go_default_library",
|
||||||
"//test/e2e/framework/metrics:go_default_library",
|
"//test/e2e/framework/metrics:go_default_library",
|
||||||
"//test/e2e/framework/providers/aws:go_default_library",
|
"//test/e2e/framework/providers/aws:go_default_library",
|
||||||
"//test/e2e/framework/providers/azure:go_default_library",
|
"//test/e2e/framework/providers/azure:go_default_library",
|
||||||
|
|
|
@ -38,6 +38,7 @@ import (
|
||||||
commontest "k8s.io/kubernetes/test/e2e/common"
|
commontest "k8s.io/kubernetes/test/e2e/common"
|
||||||
"k8s.io/kubernetes/test/e2e/framework"
|
"k8s.io/kubernetes/test/e2e/framework"
|
||||||
"k8s.io/kubernetes/test/e2e/framework/ginkgowrapper"
|
"k8s.io/kubernetes/test/e2e/framework/ginkgowrapper"
|
||||||
|
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||||
"k8s.io/kubernetes/test/e2e/framework/metrics"
|
"k8s.io/kubernetes/test/e2e/framework/metrics"
|
||||||
"k8s.io/kubernetes/test/e2e/manifest"
|
"k8s.io/kubernetes/test/e2e/manifest"
|
||||||
testutils "k8s.io/kubernetes/test/utils"
|
testutils "k8s.io/kubernetes/test/utils"
|
||||||
|
@ -119,26 +120,26 @@ var _ = ginkgo.SynchronizedBeforeSuite(func() []byte {
|
||||||
// number equal to the number of allowed not-ready nodes).
|
// number equal to the number of allowed not-ready nodes).
|
||||||
if err := framework.WaitForPodsRunningReady(c, metav1.NamespaceSystem, int32(framework.TestContext.MinStartupPods), int32(framework.TestContext.AllowedNotReadyNodes), podStartupTimeout, map[string]string{}); err != nil {
|
if err := framework.WaitForPodsRunningReady(c, metav1.NamespaceSystem, int32(framework.TestContext.MinStartupPods), int32(framework.TestContext.AllowedNotReadyNodes), podStartupTimeout, map[string]string{}); err != nil {
|
||||||
framework.DumpAllNamespaceInfo(c, metav1.NamespaceSystem)
|
framework.DumpAllNamespaceInfo(c, metav1.NamespaceSystem)
|
||||||
framework.LogFailedContainers(c, metav1.NamespaceSystem, framework.Logf)
|
framework.LogFailedContainers(c, metav1.NamespaceSystem, e2elog.Logf)
|
||||||
runKubernetesServiceTestContainer(c, metav1.NamespaceDefault)
|
runKubernetesServiceTestContainer(c, metav1.NamespaceDefault)
|
||||||
framework.Failf("Error waiting for all pods to be running and ready: %v", err)
|
framework.Failf("Error waiting for all pods to be running and ready: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := framework.WaitForDaemonSets(c, metav1.NamespaceSystem, int32(framework.TestContext.AllowedNotReadyNodes), framework.TestContext.SystemDaemonsetStartupTimeout); err != nil {
|
if err := framework.WaitForDaemonSets(c, metav1.NamespaceSystem, int32(framework.TestContext.AllowedNotReadyNodes), framework.TestContext.SystemDaemonsetStartupTimeout); err != nil {
|
||||||
framework.Logf("WARNING: Waiting for all daemonsets to be ready failed: %v", err)
|
e2elog.Logf("WARNING: Waiting for all daemonsets to be ready failed: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Log the version of the server and this client.
|
// Log the version of the server and this client.
|
||||||
framework.Logf("e2e test version: %s", version.Get().GitVersion)
|
e2elog.Logf("e2e test version: %s", version.Get().GitVersion)
|
||||||
|
|
||||||
dc := c.DiscoveryClient
|
dc := c.DiscoveryClient
|
||||||
|
|
||||||
serverVersion, serverErr := dc.ServerVersion()
|
serverVersion, serverErr := dc.ServerVersion()
|
||||||
if serverErr != nil {
|
if serverErr != nil {
|
||||||
framework.Logf("Unexpected server error retrieving version: %v", serverErr)
|
e2elog.Logf("Unexpected server error retrieving version: %v", serverErr)
|
||||||
}
|
}
|
||||||
if serverVersion != nil {
|
if serverVersion != nil {
|
||||||
framework.Logf("kube-apiserver version: %s", serverVersion.GitVersion)
|
e2elog.Logf("kube-apiserver version: %s", serverVersion.GitVersion)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Reference common test to make the import valid.
|
// Reference common test to make the import valid.
|
||||||
|
@ -160,17 +161,17 @@ var _ = ginkgo.SynchronizedBeforeSuite(func() []byte {
|
||||||
// and then the function that only runs on the first Ginkgo node.
|
// and then the function that only runs on the first Ginkgo node.
|
||||||
var _ = ginkgo.SynchronizedAfterSuite(func() {
|
var _ = ginkgo.SynchronizedAfterSuite(func() {
|
||||||
// Run on all Ginkgo nodes
|
// Run on all Ginkgo nodes
|
||||||
framework.Logf("Running AfterSuite actions on all nodes")
|
e2elog.Logf("Running AfterSuite actions on all nodes")
|
||||||
framework.RunCleanupActions()
|
framework.RunCleanupActions()
|
||||||
}, func() {
|
}, func() {
|
||||||
// Run only Ginkgo on node 1
|
// Run only Ginkgo on node 1
|
||||||
framework.Logf("Running AfterSuite actions on node 1")
|
e2elog.Logf("Running AfterSuite actions on node 1")
|
||||||
if framework.TestContext.ReportDir != "" {
|
if framework.TestContext.ReportDir != "" {
|
||||||
framework.CoreDump(framework.TestContext.ReportDir)
|
framework.CoreDump(framework.TestContext.ReportDir)
|
||||||
}
|
}
|
||||||
if framework.TestContext.GatherSuiteMetricsAfterTest {
|
if framework.TestContext.GatherSuiteMetricsAfterTest {
|
||||||
if err := gatherTestSuiteMetrics(); err != nil {
|
if err := gatherTestSuiteMetrics(); err != nil {
|
||||||
framework.Logf("Error gathering metrics: %v", err)
|
e2elog.Logf("Error gathering metrics: %v", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if framework.TestContext.NodeKiller.Enabled {
|
if framework.TestContext.NodeKiller.Enabled {
|
||||||
|
@ -179,7 +180,7 @@ var _ = ginkgo.SynchronizedAfterSuite(func() {
|
||||||
})
|
})
|
||||||
|
|
||||||
func gatherTestSuiteMetrics() error {
|
func gatherTestSuiteMetrics() error {
|
||||||
framework.Logf("Gathering metrics")
|
e2elog.Logf("Gathering metrics")
|
||||||
c, err := framework.LoadClientset()
|
c, err := framework.LoadClientset()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("error loading client: %v", err)
|
return fmt.Errorf("error loading client: %v", err)
|
||||||
|
@ -204,7 +205,7 @@ func gatherTestSuiteMetrics() error {
|
||||||
return fmt.Errorf("error writing to %q: %v", filePath, err)
|
return fmt.Errorf("error writing to %q: %v", filePath, err)
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
framework.Logf("\n\nTest Suite Metrics:\n%s\n", metricsJSON)
|
e2elog.Logf("\n\nTest Suite Metrics:\n%s\n", metricsJSON)
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
|
@ -246,31 +247,31 @@ func RunE2ETests(t *testing.T) {
|
||||||
// to flip to Ready, log its output and delete it.
|
// to flip to Ready, log its output and delete it.
|
||||||
func runKubernetesServiceTestContainer(c clientset.Interface, ns string) {
|
func runKubernetesServiceTestContainer(c clientset.Interface, ns string) {
|
||||||
path := "test/images/clusterapi-tester/pod.yaml"
|
path := "test/images/clusterapi-tester/pod.yaml"
|
||||||
framework.Logf("Parsing pod from %v", path)
|
e2elog.Logf("Parsing pod from %v", path)
|
||||||
p, err := manifest.PodFromManifest(path)
|
p, err := manifest.PodFromManifest(path)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
framework.Logf("Failed to parse clusterapi-tester from manifest %v: %v", path, err)
|
e2elog.Logf("Failed to parse clusterapi-tester from manifest %v: %v", path, err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
p.Namespace = ns
|
p.Namespace = ns
|
||||||
if _, err := c.CoreV1().Pods(ns).Create(p); err != nil {
|
if _, err := c.CoreV1().Pods(ns).Create(p); err != nil {
|
||||||
framework.Logf("Failed to create %v: %v", p.Name, err)
|
e2elog.Logf("Failed to create %v: %v", p.Name, err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
defer func() {
|
defer func() {
|
||||||
if err := c.CoreV1().Pods(ns).Delete(p.Name, nil); err != nil {
|
if err := c.CoreV1().Pods(ns).Delete(p.Name, nil); err != nil {
|
||||||
framework.Logf("Failed to delete pod %v: %v", p.Name, err)
|
e2elog.Logf("Failed to delete pod %v: %v", p.Name, err)
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
timeout := 5 * time.Minute
|
timeout := 5 * time.Minute
|
||||||
if err := framework.WaitForPodCondition(c, ns, p.Name, "clusterapi-tester", timeout, testutils.PodRunningReady); err != nil {
|
if err := framework.WaitForPodCondition(c, ns, p.Name, "clusterapi-tester", timeout, testutils.PodRunningReady); err != nil {
|
||||||
framework.Logf("Pod %v took longer than %v to enter running/ready: %v", p.Name, timeout, err)
|
e2elog.Logf("Pod %v took longer than %v to enter running/ready: %v", p.Name, timeout, err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
logs, err := framework.GetPodLogs(c, ns, p.Name, p.Spec.Containers[0].Name)
|
logs, err := framework.GetPodLogs(c, ns, p.Name, p.Spec.Containers[0].Name)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
framework.Logf("Failed to retrieve logs from %v: %v", p.Name, err)
|
e2elog.Logf("Failed to retrieve logs from %v: %v", p.Name, err)
|
||||||
} else {
|
} else {
|
||||||
framework.Logf("Output of clusterapi-tester:\n%v", logs)
|
e2elog.Logf("Output of clusterapi-tester:\n%v", logs)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -31,6 +31,7 @@ import (
|
||||||
commonutils "k8s.io/kubernetes/test/e2e/common"
|
commonutils "k8s.io/kubernetes/test/e2e/common"
|
||||||
"k8s.io/kubernetes/test/e2e/framework"
|
"k8s.io/kubernetes/test/e2e/framework"
|
||||||
"k8s.io/kubernetes/test/e2e/framework/auth"
|
"k8s.io/kubernetes/test/e2e/framework/auth"
|
||||||
|
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||||
"k8s.io/kubernetes/test/e2e/framework/testfiles"
|
"k8s.io/kubernetes/test/e2e/framework/testfiles"
|
||||||
|
|
||||||
. "github.com/onsi/ginkgo"
|
. "github.com/onsi/ginkgo"
|
||||||
|
@ -82,14 +83,14 @@ var _ = framework.KubeDescribe("[Feature:Example]", func() {
|
||||||
pod, err := c.CoreV1().Pods(ns).Get(podName, metav1.GetOptions{})
|
pod, err := c.CoreV1().Pods(ns).Get(podName, metav1.GetOptions{})
|
||||||
framework.ExpectNoError(err, fmt.Sprintf("getting pod %s", podName))
|
framework.ExpectNoError(err, fmt.Sprintf("getting pod %s", podName))
|
||||||
stat := podutil.GetExistingContainerStatus(pod.Status.ContainerStatuses, podName)
|
stat := podutil.GetExistingContainerStatus(pod.Status.ContainerStatuses, podName)
|
||||||
framework.Logf("Pod: %s, restart count:%d", stat.Name, stat.RestartCount)
|
e2elog.Logf("Pod: %s, restart count:%d", stat.Name, stat.RestartCount)
|
||||||
if stat.RestartCount > 0 {
|
if stat.RestartCount > 0 {
|
||||||
framework.Logf("Saw %v restart, succeeded...", podName)
|
e2elog.Logf("Saw %v restart, succeeded...", podName)
|
||||||
wg.Done()
|
wg.Done()
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
framework.Logf("Failed waiting for %v restart! ", podName)
|
e2elog.Logf("Failed waiting for %v restart! ", podName)
|
||||||
passed = false
|
passed = false
|
||||||
wg.Done()
|
wg.Done()
|
||||||
}
|
}
|
||||||
|
|
|
@ -24,6 +24,7 @@ import (
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
"k8s.io/apimachinery/pkg/util/uuid"
|
"k8s.io/apimachinery/pkg/util/uuid"
|
||||||
"k8s.io/kubernetes/test/e2e/framework"
|
"k8s.io/kubernetes/test/e2e/framework"
|
||||||
|
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||||
|
|
||||||
. "github.com/onsi/ginkgo"
|
. "github.com/onsi/ginkgo"
|
||||||
)
|
)
|
||||||
|
@ -37,14 +38,14 @@ var _ = framework.KubeDescribe("GKE local SSD [Feature:GKELocalSSD]", func() {
|
||||||
})
|
})
|
||||||
|
|
||||||
It("should write and read from node local SSD [Feature:GKELocalSSD]", func() {
|
It("should write and read from node local SSD [Feature:GKELocalSSD]", func() {
|
||||||
framework.Logf("Start local SSD test")
|
e2elog.Logf("Start local SSD test")
|
||||||
createNodePoolWithLocalSsds("np-ssd")
|
createNodePoolWithLocalSsds("np-ssd")
|
||||||
doTestWriteAndReadToLocalSsd(f)
|
doTestWriteAndReadToLocalSsd(f)
|
||||||
})
|
})
|
||||||
})
|
})
|
||||||
|
|
||||||
func createNodePoolWithLocalSsds(nodePoolName string) {
|
func createNodePoolWithLocalSsds(nodePoolName string) {
|
||||||
framework.Logf("Create node pool: %s with local SSDs in cluster: %s ",
|
e2elog.Logf("Create node pool: %s with local SSDs in cluster: %s ",
|
||||||
nodePoolName, framework.TestContext.CloudConfig.Cluster)
|
nodePoolName, framework.TestContext.CloudConfig.Cluster)
|
||||||
out, err := exec.Command("gcloud", "alpha", "container", "node-pools", "create",
|
out, err := exec.Command("gcloud", "alpha", "container", "node-pools", "create",
|
||||||
nodePoolName,
|
nodePoolName,
|
||||||
|
@ -53,7 +54,7 @@ func createNodePoolWithLocalSsds(nodePoolName string) {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
framework.Failf("Failed to create node pool %s: Err: %v\n%v", nodePoolName, err, string(out))
|
framework.Failf("Failed to create node pool %s: Err: %v\n%v", nodePoolName, err, string(out))
|
||||||
}
|
}
|
||||||
framework.Logf("Successfully created node pool %s:\n%v", nodePoolName, string(out))
|
e2elog.Logf("Successfully created node pool %s:\n%v", nodePoolName, string(out))
|
||||||
}
|
}
|
||||||
|
|
||||||
func doTestWriteAndReadToLocalSsd(f *framework.Framework) {
|
func doTestWriteAndReadToLocalSsd(f *framework.Framework) {
|
||||||
|
|
|
@ -21,6 +21,7 @@ import (
|
||||||
"os/exec"
|
"os/exec"
|
||||||
|
|
||||||
"k8s.io/kubernetes/test/e2e/framework"
|
"k8s.io/kubernetes/test/e2e/framework"
|
||||||
|
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||||
|
|
||||||
. "github.com/onsi/ginkgo"
|
. "github.com/onsi/ginkgo"
|
||||||
)
|
)
|
||||||
|
@ -34,13 +35,13 @@ var _ = framework.KubeDescribe("GKE node pools [Feature:GKENodePool]", func() {
|
||||||
})
|
})
|
||||||
|
|
||||||
It("should create a cluster with multiple node pools [Feature:GKENodePool]", func() {
|
It("should create a cluster with multiple node pools [Feature:GKENodePool]", func() {
|
||||||
framework.Logf("Start create node pool test")
|
e2elog.Logf("Start create node pool test")
|
||||||
testCreateDeleteNodePool(f, "test-pool")
|
testCreateDeleteNodePool(f, "test-pool")
|
||||||
})
|
})
|
||||||
})
|
})
|
||||||
|
|
||||||
func testCreateDeleteNodePool(f *framework.Framework, poolName string) {
|
func testCreateDeleteNodePool(f *framework.Framework, poolName string) {
|
||||||
framework.Logf("Create node pool: %q in cluster: %q", poolName, framework.TestContext.CloudConfig.Cluster)
|
e2elog.Logf("Create node pool: %q in cluster: %q", poolName, framework.TestContext.CloudConfig.Cluster)
|
||||||
|
|
||||||
clusterStr := fmt.Sprintf("--cluster=%s", framework.TestContext.CloudConfig.Cluster)
|
clusterStr := fmt.Sprintf("--cluster=%s", framework.TestContext.CloudConfig.Cluster)
|
||||||
|
|
||||||
|
@ -48,50 +49,50 @@ func testCreateDeleteNodePool(f *framework.Framework, poolName string) {
|
||||||
poolName,
|
poolName,
|
||||||
clusterStr,
|
clusterStr,
|
||||||
"--num-nodes=2").CombinedOutput()
|
"--num-nodes=2").CombinedOutput()
|
||||||
framework.Logf("\n%s", string(out))
|
e2elog.Logf("\n%s", string(out))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
framework.Failf("Failed to create node pool %q. Err: %v\n%v", poolName, err, string(out))
|
framework.Failf("Failed to create node pool %q. Err: %v\n%v", poolName, err, string(out))
|
||||||
}
|
}
|
||||||
framework.Logf("Successfully created node pool %q.", poolName)
|
e2elog.Logf("Successfully created node pool %q.", poolName)
|
||||||
|
|
||||||
out, err = exec.Command("gcloud", "container", "node-pools", "list",
|
out, err = exec.Command("gcloud", "container", "node-pools", "list",
|
||||||
clusterStr).CombinedOutput()
|
clusterStr).CombinedOutput()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
framework.Failf("Failed to list node pools from cluster %q. Err: %v\n%v", framework.TestContext.CloudConfig.Cluster, err, string(out))
|
framework.Failf("Failed to list node pools from cluster %q. Err: %v\n%v", framework.TestContext.CloudConfig.Cluster, err, string(out))
|
||||||
}
|
}
|
||||||
framework.Logf("Node pools:\n%s", string(out))
|
e2elog.Logf("Node pools:\n%s", string(out))
|
||||||
|
|
||||||
framework.Logf("Checking that 2 nodes have the correct node pool label.")
|
e2elog.Logf("Checking that 2 nodes have the correct node pool label.")
|
||||||
nodeCount := nodesWithPoolLabel(f, poolName)
|
nodeCount := nodesWithPoolLabel(f, poolName)
|
||||||
if nodeCount != 2 {
|
if nodeCount != 2 {
|
||||||
framework.Failf("Wanted 2 nodes with node pool label, got: %v", nodeCount)
|
framework.Failf("Wanted 2 nodes with node pool label, got: %v", nodeCount)
|
||||||
}
|
}
|
||||||
framework.Logf("Success, found 2 nodes with correct node pool labels.")
|
e2elog.Logf("Success, found 2 nodes with correct node pool labels.")
|
||||||
|
|
||||||
framework.Logf("Deleting node pool: %q in cluster: %q", poolName, framework.TestContext.CloudConfig.Cluster)
|
e2elog.Logf("Deleting node pool: %q in cluster: %q", poolName, framework.TestContext.CloudConfig.Cluster)
|
||||||
out, err = exec.Command("gcloud", "container", "node-pools", "delete",
|
out, err = exec.Command("gcloud", "container", "node-pools", "delete",
|
||||||
poolName,
|
poolName,
|
||||||
clusterStr,
|
clusterStr,
|
||||||
"-q").CombinedOutput()
|
"-q").CombinedOutput()
|
||||||
framework.Logf("\n%s", string(out))
|
e2elog.Logf("\n%s", string(out))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
framework.Failf("Failed to delete node pool %q. Err: %v\n%v", poolName, err, string(out))
|
framework.Failf("Failed to delete node pool %q. Err: %v\n%v", poolName, err, string(out))
|
||||||
}
|
}
|
||||||
framework.Logf("Successfully deleted node pool %q.", poolName)
|
e2elog.Logf("Successfully deleted node pool %q.", poolName)
|
||||||
|
|
||||||
out, err = exec.Command("gcloud", "container", "node-pools", "list",
|
out, err = exec.Command("gcloud", "container", "node-pools", "list",
|
||||||
clusterStr).CombinedOutput()
|
clusterStr).CombinedOutput()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
framework.Failf("\nFailed to list node pools from cluster %q. Err: %v\n%v", framework.TestContext.CloudConfig.Cluster, err, string(out))
|
framework.Failf("\nFailed to list node pools from cluster %q. Err: %v\n%v", framework.TestContext.CloudConfig.Cluster, err, string(out))
|
||||||
}
|
}
|
||||||
framework.Logf("\nNode pools:\n%s", string(out))
|
e2elog.Logf("\nNode pools:\n%s", string(out))
|
||||||
|
|
||||||
framework.Logf("Checking that no nodes have the deleted node pool's label.")
|
e2elog.Logf("Checking that no nodes have the deleted node pool's label.")
|
||||||
nodeCount = nodesWithPoolLabel(f, poolName)
|
nodeCount = nodesWithPoolLabel(f, poolName)
|
||||||
if nodeCount != 0 {
|
if nodeCount != 0 {
|
||||||
framework.Failf("Wanted 0 nodes with node pool label, got: %v", nodeCount)
|
framework.Failf("Wanted 0 nodes with node pool label, got: %v", nodeCount)
|
||||||
}
|
}
|
||||||
framework.Logf("Success, found no nodes with the deleted node pool's label.")
|
e2elog.Logf("Success, found no nodes with the deleted node pool's label.")
|
||||||
}
|
}
|
||||||
|
|
||||||
// nodesWithPoolLabel returns the number of nodes that have the "gke-nodepool"
|
// nodesWithPoolLabel returns the number of nodes that have the "gke-nodepool"
|
||||||
|
|
|
@ -45,6 +45,7 @@ go_library(
|
||||||
"//test/e2e/common:go_default_library",
|
"//test/e2e/common:go_default_library",
|
||||||
"//test/e2e/framework:go_default_library",
|
"//test/e2e/framework:go_default_library",
|
||||||
"//test/e2e/framework/gpu:go_default_library",
|
"//test/e2e/framework/gpu:go_default_library",
|
||||||
|
"//test/e2e/framework/log:go_default_library",
|
||||||
"//test/e2e/framework/providers/gce:go_default_library",
|
"//test/e2e/framework/providers/gce:go_default_library",
|
||||||
"//test/e2e/framework/replicaset:go_default_library",
|
"//test/e2e/framework/replicaset:go_default_library",
|
||||||
"//test/utils:go_default_library",
|
"//test/utils:go_default_library",
|
||||||
|
|
|
@ -27,6 +27,7 @@ import (
|
||||||
clientset "k8s.io/client-go/kubernetes"
|
clientset "k8s.io/client-go/kubernetes"
|
||||||
api "k8s.io/kubernetes/pkg/apis/core"
|
api "k8s.io/kubernetes/pkg/apis/core"
|
||||||
"k8s.io/kubernetes/test/e2e/framework"
|
"k8s.io/kubernetes/test/e2e/framework"
|
||||||
|
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||||
testutils "k8s.io/kubernetes/test/utils"
|
testutils "k8s.io/kubernetes/test/utils"
|
||||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||||
|
|
||||||
|
@ -72,7 +73,7 @@ var _ = framework.KubeDescribe("EquivalenceCache [Serial]", func() {
|
||||||
Expect(err).NotTo(HaveOccurred())
|
Expect(err).NotTo(HaveOccurred())
|
||||||
|
|
||||||
for _, node := range nodeList.Items {
|
for _, node := range nodeList.Items {
|
||||||
framework.Logf("\nLogging pods the kubelet thinks is on node %v before test", node.Name)
|
e2elog.Logf("\nLogging pods the kubelet thinks is on node %v before test", node.Name)
|
||||||
framework.PrintAllKubeletPods(cs, node.Name)
|
framework.PrintAllKubeletPods(cs, node.Name)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -28,6 +28,7 @@ import (
|
||||||
"k8s.io/apimachinery/pkg/util/wait"
|
"k8s.io/apimachinery/pkg/util/wait"
|
||||||
"k8s.io/apimachinery/pkg/watch"
|
"k8s.io/apimachinery/pkg/watch"
|
||||||
"k8s.io/kubernetes/test/e2e/framework"
|
"k8s.io/kubernetes/test/e2e/framework"
|
||||||
|
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||||
|
|
||||||
. "github.com/onsi/ginkgo"
|
. "github.com/onsi/ginkgo"
|
||||||
. "github.com/onsi/gomega"
|
. "github.com/onsi/gomega"
|
||||||
|
@ -100,7 +101,7 @@ var _ = SIGDescribe("LimitRange", func() {
|
||||||
err = equalResourceRequirement(expected, pod.Spec.Containers[i].Resources)
|
err = equalResourceRequirement(expected, pod.Spec.Containers[i].Resources)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
// Print the pod to help in debugging.
|
// Print the pod to help in debugging.
|
||||||
framework.Logf("Pod %+v does not have the expected requirements", pod)
|
e2elog.Logf("Pod %+v does not have the expected requirements", pod)
|
||||||
Expect(err).NotTo(HaveOccurred())
|
Expect(err).NotTo(HaveOccurred())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -121,7 +122,7 @@ var _ = SIGDescribe("LimitRange", func() {
|
||||||
err = equalResourceRequirement(expected, pod.Spec.Containers[i].Resources)
|
err = equalResourceRequirement(expected, pod.Spec.Containers[i].Resources)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
// Print the pod to help in debugging.
|
// Print the pod to help in debugging.
|
||||||
framework.Logf("Pod %+v does not have the expected requirements", pod)
|
e2elog.Logf("Pod %+v does not have the expected requirements", pod)
|
||||||
Expect(err).NotTo(HaveOccurred())
|
Expect(err).NotTo(HaveOccurred())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -170,18 +171,18 @@ var _ = SIGDescribe("LimitRange", func() {
|
||||||
limitRanges, err := f.ClientSet.CoreV1().LimitRanges(f.Namespace.Name).List(options)
|
limitRanges, err := f.ClientSet.CoreV1().LimitRanges(f.Namespace.Name).List(options)
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
framework.Logf("Unable to retrieve LimitRanges: %v", err)
|
e2elog.Logf("Unable to retrieve LimitRanges: %v", err)
|
||||||
return false, nil
|
return false, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(limitRanges.Items) == 0 {
|
if len(limitRanges.Items) == 0 {
|
||||||
framework.Logf("limitRange is already deleted")
|
e2elog.Logf("limitRange is already deleted")
|
||||||
return true, nil
|
return true, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(limitRanges.Items) > 0 {
|
if len(limitRanges.Items) > 0 {
|
||||||
if limitRanges.Items[0].ObjectMeta.DeletionTimestamp == nil {
|
if limitRanges.Items[0].ObjectMeta.DeletionTimestamp == nil {
|
||||||
framework.Logf("deletion has not yet been observed")
|
e2elog.Logf("deletion has not yet been observed")
|
||||||
return false, nil
|
return false, nil
|
||||||
}
|
}
|
||||||
return true, nil
|
return true, nil
|
||||||
|
@ -200,12 +201,12 @@ var _ = SIGDescribe("LimitRange", func() {
|
||||||
})
|
})
|
||||||
|
|
||||||
func equalResourceRequirement(expected v1.ResourceRequirements, actual v1.ResourceRequirements) error {
|
func equalResourceRequirement(expected v1.ResourceRequirements, actual v1.ResourceRequirements) error {
|
||||||
framework.Logf("Verifying requests: expected %v with actual %v", expected.Requests, actual.Requests)
|
e2elog.Logf("Verifying requests: expected %v with actual %v", expected.Requests, actual.Requests)
|
||||||
err := equalResourceList(expected.Requests, actual.Requests)
|
err := equalResourceList(expected.Requests, actual.Requests)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
framework.Logf("Verifying limits: expected %v with actual %v", expected.Limits, actual.Limits)
|
e2elog.Logf("Verifying limits: expected %v with actual %v", expected.Limits, actual.Limits)
|
||||||
err = equalResourceList(expected.Limits, actual.Limits)
|
err = equalResourceList(expected.Limits, actual.Limits)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
|
@ -27,6 +27,7 @@ import (
|
||||||
extensionsinternal "k8s.io/kubernetes/pkg/apis/extensions"
|
extensionsinternal "k8s.io/kubernetes/pkg/apis/extensions"
|
||||||
"k8s.io/kubernetes/test/e2e/framework"
|
"k8s.io/kubernetes/test/e2e/framework"
|
||||||
"k8s.io/kubernetes/test/e2e/framework/gpu"
|
"k8s.io/kubernetes/test/e2e/framework/gpu"
|
||||||
|
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||||
|
|
||||||
. "github.com/onsi/ginkgo"
|
. "github.com/onsi/ginkgo"
|
||||||
|
@ -81,25 +82,25 @@ func logOSImages(f *framework.Framework) {
|
||||||
nodeList, err := f.ClientSet.CoreV1().Nodes().List(metav1.ListOptions{})
|
nodeList, err := f.ClientSet.CoreV1().Nodes().List(metav1.ListOptions{})
|
||||||
framework.ExpectNoError(err, "getting node list")
|
framework.ExpectNoError(err, "getting node list")
|
||||||
for _, node := range nodeList.Items {
|
for _, node := range nodeList.Items {
|
||||||
framework.Logf("Nodename: %v, OS Image: %v", node.Name, node.Status.NodeInfo.OSImage)
|
e2elog.Logf("Nodename: %v, OS Image: %v", node.Name, node.Status.NodeInfo.OSImage)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func areGPUsAvailableOnAllSchedulableNodes(f *framework.Framework) bool {
|
func areGPUsAvailableOnAllSchedulableNodes(f *framework.Framework) bool {
|
||||||
framework.Logf("Getting list of Nodes from API server")
|
e2elog.Logf("Getting list of Nodes from API server")
|
||||||
nodeList, err := f.ClientSet.CoreV1().Nodes().List(metav1.ListOptions{})
|
nodeList, err := f.ClientSet.CoreV1().Nodes().List(metav1.ListOptions{})
|
||||||
framework.ExpectNoError(err, "getting node list")
|
framework.ExpectNoError(err, "getting node list")
|
||||||
for _, node := range nodeList.Items {
|
for _, node := range nodeList.Items {
|
||||||
if node.Spec.Unschedulable {
|
if node.Spec.Unschedulable {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
framework.Logf("gpuResourceName %s", gpuResourceName)
|
e2elog.Logf("gpuResourceName %s", gpuResourceName)
|
||||||
if val, ok := node.Status.Capacity[gpuResourceName]; !ok || val.Value() == 0 {
|
if val, ok := node.Status.Capacity[gpuResourceName]; !ok || val.Value() == 0 {
|
||||||
framework.Logf("Nvidia GPUs not available on Node: %q", node.Name)
|
e2elog.Logf("Nvidia GPUs not available on Node: %q", node.Name)
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
framework.Logf("Nvidia GPUs exist on all schedulable nodes")
|
e2elog.Logf("Nvidia GPUs exist on all schedulable nodes")
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -126,34 +127,34 @@ func SetupNVIDIAGPUNode(f *framework.Framework, setupResourceGatherer bool) *fra
|
||||||
}
|
}
|
||||||
gpuResourceName = gpu.NVIDIAGPUResourceName
|
gpuResourceName = gpu.NVIDIAGPUResourceName
|
||||||
|
|
||||||
framework.Logf("Using %v", dsYamlUrl)
|
e2elog.Logf("Using %v", dsYamlUrl)
|
||||||
// Creates the DaemonSet that installs Nvidia Drivers.
|
// Creates the DaemonSet that installs Nvidia Drivers.
|
||||||
ds, err := framework.DsFromManifest(dsYamlUrl)
|
ds, err := framework.DsFromManifest(dsYamlUrl)
|
||||||
Expect(err).NotTo(HaveOccurred())
|
Expect(err).NotTo(HaveOccurred())
|
||||||
ds.Namespace = f.Namespace.Name
|
ds.Namespace = f.Namespace.Name
|
||||||
_, err = f.ClientSet.AppsV1().DaemonSets(f.Namespace.Name).Create(ds)
|
_, err = f.ClientSet.AppsV1().DaemonSets(f.Namespace.Name).Create(ds)
|
||||||
framework.ExpectNoError(err, "failed to create nvidia-driver-installer daemonset")
|
framework.ExpectNoError(err, "failed to create nvidia-driver-installer daemonset")
|
||||||
framework.Logf("Successfully created daemonset to install Nvidia drivers.")
|
e2elog.Logf("Successfully created daemonset to install Nvidia drivers.")
|
||||||
|
|
||||||
pods, err := framework.WaitForControlledPods(f.ClientSet, ds.Namespace, ds.Name, extensionsinternal.Kind("DaemonSet"))
|
pods, err := framework.WaitForControlledPods(f.ClientSet, ds.Namespace, ds.Name, extensionsinternal.Kind("DaemonSet"))
|
||||||
framework.ExpectNoError(err, "failed to get pods controlled by the nvidia-driver-installer daemonset")
|
framework.ExpectNoError(err, "failed to get pods controlled by the nvidia-driver-installer daemonset")
|
||||||
|
|
||||||
devicepluginPods, err := framework.WaitForControlledPods(f.ClientSet, "kube-system", "nvidia-gpu-device-plugin", extensionsinternal.Kind("DaemonSet"))
|
devicepluginPods, err := framework.WaitForControlledPods(f.ClientSet, "kube-system", "nvidia-gpu-device-plugin", extensionsinternal.Kind("DaemonSet"))
|
||||||
if err == nil {
|
if err == nil {
|
||||||
framework.Logf("Adding deviceplugin addon pod.")
|
e2elog.Logf("Adding deviceplugin addon pod.")
|
||||||
pods.Items = append(pods.Items, devicepluginPods.Items...)
|
pods.Items = append(pods.Items, devicepluginPods.Items...)
|
||||||
}
|
}
|
||||||
|
|
||||||
var rsgather *framework.ContainerResourceGatherer
|
var rsgather *framework.ContainerResourceGatherer
|
||||||
if setupResourceGatherer {
|
if setupResourceGatherer {
|
||||||
framework.Logf("Starting ResourceUsageGather for the created DaemonSet pods.")
|
e2elog.Logf("Starting ResourceUsageGather for the created DaemonSet pods.")
|
||||||
rsgather, err = framework.NewResourceUsageGatherer(f.ClientSet, framework.ResourceGathererOptions{InKubemark: false, Nodes: framework.AllNodes, ResourceDataGatheringPeriod: 2 * time.Second, ProbeDuration: 2 * time.Second, PrintVerboseLogs: true}, pods)
|
rsgather, err = framework.NewResourceUsageGatherer(f.ClientSet, framework.ResourceGathererOptions{InKubemark: false, Nodes: framework.AllNodes, ResourceDataGatheringPeriod: 2 * time.Second, ProbeDuration: 2 * time.Second, PrintVerboseLogs: true}, pods)
|
||||||
framework.ExpectNoError(err, "creating ResourceUsageGather for the daemonset pods")
|
framework.ExpectNoError(err, "creating ResourceUsageGather for the daemonset pods")
|
||||||
go rsgather.StartGatheringData()
|
go rsgather.StartGatheringData()
|
||||||
}
|
}
|
||||||
|
|
||||||
// Wait for Nvidia GPUs to be available on nodes
|
// Wait for Nvidia GPUs to be available on nodes
|
||||||
framework.Logf("Waiting for drivers to be installed and GPUs to be available in Node Capacity...")
|
e2elog.Logf("Waiting for drivers to be installed and GPUs to be available in Node Capacity...")
|
||||||
Eventually(func() bool {
|
Eventually(func() bool {
|
||||||
return areGPUsAvailableOnAllSchedulableNodes(f)
|
return areGPUsAvailableOnAllSchedulableNodes(f)
|
||||||
}, driverInstallTimeout, time.Second).Should(BeTrue())
|
}, driverInstallTimeout, time.Second).Should(BeTrue())
|
||||||
|
@ -163,18 +164,18 @@ func SetupNVIDIAGPUNode(f *framework.Framework, setupResourceGatherer bool) *fra
|
||||||
|
|
||||||
func testNvidiaGPUs(f *framework.Framework) {
|
func testNvidiaGPUs(f *framework.Framework) {
|
||||||
rsgather := SetupNVIDIAGPUNode(f, true)
|
rsgather := SetupNVIDIAGPUNode(f, true)
|
||||||
framework.Logf("Creating as many pods as there are Nvidia GPUs and have the pods run a CUDA app")
|
e2elog.Logf("Creating as many pods as there are Nvidia GPUs and have the pods run a CUDA app")
|
||||||
podList := []*v1.Pod{}
|
podList := []*v1.Pod{}
|
||||||
for i := int64(0); i < getGPUsAvailable(f); i++ {
|
for i := int64(0); i < getGPUsAvailable(f); i++ {
|
||||||
podList = append(podList, f.PodClient().Create(makeCudaAdditionDevicePluginTestPod()))
|
podList = append(podList, f.PodClient().Create(makeCudaAdditionDevicePluginTestPod()))
|
||||||
}
|
}
|
||||||
framework.Logf("Wait for all test pods to succeed")
|
e2elog.Logf("Wait for all test pods to succeed")
|
||||||
// Wait for all pods to succeed
|
// Wait for all pods to succeed
|
||||||
for _, po := range podList {
|
for _, po := range podList {
|
||||||
f.PodClient().WaitForSuccess(po.Name, 5*time.Minute)
|
f.PodClient().WaitForSuccess(po.Name, 5*time.Minute)
|
||||||
}
|
}
|
||||||
|
|
||||||
framework.Logf("Stopping ResourceUsageGather")
|
e2elog.Logf("Stopping ResourceUsageGather")
|
||||||
constraints := make(map[string]framework.ResourceConstraint)
|
constraints := make(map[string]framework.ResourceConstraint)
|
||||||
// For now, just gets summary. Can pass valid constraints in the future.
|
// For now, just gets summary. Can pass valid constraints in the future.
|
||||||
summary, err := rsgather.StopAndSummarize([]int{50, 90, 100}, constraints)
|
summary, err := rsgather.StopAndSummarize([]int{50, 90, 100}, constraints)
|
||||||
|
|
|
@ -30,6 +30,7 @@ import (
|
||||||
clientset "k8s.io/client-go/kubernetes"
|
clientset "k8s.io/client-go/kubernetes"
|
||||||
"k8s.io/kubernetes/test/e2e/common"
|
"k8s.io/kubernetes/test/e2e/common"
|
||||||
"k8s.io/kubernetes/test/e2e/framework"
|
"k8s.io/kubernetes/test/e2e/framework"
|
||||||
|
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||||
testutils "k8s.io/kubernetes/test/utils"
|
testutils "k8s.io/kubernetes/test/utils"
|
||||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||||
|
|
||||||
|
@ -88,7 +89,7 @@ var _ = SIGDescribe("SchedulerPredicates [Serial]", func() {
|
||||||
framework.ExpectNoError(err)
|
framework.ExpectNoError(err)
|
||||||
|
|
||||||
for _, node := range nodeList.Items {
|
for _, node := range nodeList.Items {
|
||||||
framework.Logf("\nLogging pods the kubelet thinks is on node %v before test", node.Name)
|
e2elog.Logf("\nLogging pods the kubelet thinks is on node %v before test", node.Name)
|
||||||
framework.PrintAllKubeletPods(cs, node.Name)
|
framework.PrintAllKubeletPods(cs, node.Name)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -103,7 +104,7 @@ var _ = SIGDescribe("SchedulerPredicates [Serial]", func() {
|
||||||
totalPodCapacity = 0
|
totalPodCapacity = 0
|
||||||
|
|
||||||
for _, node := range nodeList.Items {
|
for _, node := range nodeList.Items {
|
||||||
framework.Logf("Node: %v", node)
|
e2elog.Logf("Node: %v", node)
|
||||||
podCapacity, found := node.Status.Capacity[v1.ResourcePods]
|
podCapacity, found := node.Status.Capacity[v1.ResourcePods]
|
||||||
Expect(found).To(Equal(true))
|
Expect(found).To(Equal(true))
|
||||||
totalPodCapacity += podCapacity.Value()
|
totalPodCapacity += podCapacity.Value()
|
||||||
|
@ -123,7 +124,7 @@ var _ = SIGDescribe("SchedulerPredicates [Serial]", func() {
|
||||||
*initPausePod(f, pausePodConfig{
|
*initPausePod(f, pausePodConfig{
|
||||||
Name: "",
|
Name: "",
|
||||||
Labels: map[string]string{"name": ""},
|
Labels: map[string]string{"name": ""},
|
||||||
}), true, framework.Logf))
|
}), true, e2elog.Logf))
|
||||||
}
|
}
|
||||||
podName := "additional-pod"
|
podName := "additional-pod"
|
||||||
WaitForSchedulerAfterAction(f, createPausePodAction(f, pausePodConfig{
|
WaitForSchedulerAfterAction(f, createPausePodAction(f, pausePodConfig{
|
||||||
|
@ -158,7 +159,7 @@ var _ = SIGDescribe("SchedulerPredicates [Serial]", func() {
|
||||||
for _, pod := range pods.Items {
|
for _, pod := range pods.Items {
|
||||||
_, found := nodeToAllocatableMap[pod.Spec.NodeName]
|
_, found := nodeToAllocatableMap[pod.Spec.NodeName]
|
||||||
if found && pod.Status.Phase != v1.PodSucceeded && pod.Status.Phase != v1.PodFailed {
|
if found && pod.Status.Phase != v1.PodSucceeded && pod.Status.Phase != v1.PodFailed {
|
||||||
framework.Logf("Pod %v requesting local ephemeral resource =%vm on Node %v", pod.Name, getRequestedStorageEphemeralStorage(pod), pod.Spec.NodeName)
|
e2elog.Logf("Pod %v requesting local ephemeral resource =%vm on Node %v", pod.Name, getRequestedStorageEphemeralStorage(pod), pod.Spec.NodeName)
|
||||||
nodeToAllocatableMap[pod.Spec.NodeName] -= getRequestedStorageEphemeralStorage(pod)
|
nodeToAllocatableMap[pod.Spec.NodeName] -= getRequestedStorageEphemeralStorage(pod)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -167,9 +168,9 @@ var _ = SIGDescribe("SchedulerPredicates [Serial]", func() {
|
||||||
|
|
||||||
milliEphemeralStoragePerPod := nodeMaxAllocatable / maxNumberOfPods
|
milliEphemeralStoragePerPod := nodeMaxAllocatable / maxNumberOfPods
|
||||||
|
|
||||||
framework.Logf("Using pod capacity: %vm", milliEphemeralStoragePerPod)
|
e2elog.Logf("Using pod capacity: %vm", milliEphemeralStoragePerPod)
|
||||||
for name, leftAllocatable := range nodeToAllocatableMap {
|
for name, leftAllocatable := range nodeToAllocatableMap {
|
||||||
framework.Logf("Node: %v has local ephemeral resource allocatable: %vm", name, leftAllocatable)
|
e2elog.Logf("Node: %v has local ephemeral resource allocatable: %vm", name, leftAllocatable)
|
||||||
podsNeededForSaturation += (int)(leftAllocatable / milliEphemeralStoragePerPod)
|
podsNeededForSaturation += (int)(leftAllocatable / milliEphemeralStoragePerPod)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -192,7 +193,7 @@ var _ = SIGDescribe("SchedulerPredicates [Serial]", func() {
|
||||||
v1.ResourceEphemeralStorage: *resource.NewMilliQuantity(milliEphemeralStoragePerPod, "DecimalSI"),
|
v1.ResourceEphemeralStorage: *resource.NewMilliQuantity(milliEphemeralStoragePerPod, "DecimalSI"),
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
}), true, framework.Logf))
|
}), true, e2elog.Logf))
|
||||||
}
|
}
|
||||||
podName := "additional-pod"
|
podName := "additional-pod"
|
||||||
conf := pausePodConfig{
|
conf := pausePodConfig{
|
||||||
|
@ -262,7 +263,7 @@ var _ = SIGDescribe("SchedulerPredicates [Serial]", func() {
|
||||||
for _, pod := range pods.Items {
|
for _, pod := range pods.Items {
|
||||||
_, found := nodeToAllocatableMap[pod.Spec.NodeName]
|
_, found := nodeToAllocatableMap[pod.Spec.NodeName]
|
||||||
if found && pod.Status.Phase != v1.PodSucceeded && pod.Status.Phase != v1.PodFailed {
|
if found && pod.Status.Phase != v1.PodSucceeded && pod.Status.Phase != v1.PodFailed {
|
||||||
framework.Logf("Pod %v requesting resource cpu=%vm on Node %v", pod.Name, getRequestedCPU(pod), pod.Spec.NodeName)
|
e2elog.Logf("Pod %v requesting resource cpu=%vm on Node %v", pod.Name, getRequestedCPU(pod), pod.Spec.NodeName)
|
||||||
nodeToAllocatableMap[pod.Spec.NodeName] -= getRequestedCPU(pod)
|
nodeToAllocatableMap[pod.Spec.NodeName] -= getRequestedCPU(pod)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -34,6 +34,7 @@ import (
|
||||||
clientset "k8s.io/client-go/kubernetes"
|
clientset "k8s.io/client-go/kubernetes"
|
||||||
"k8s.io/kubernetes/pkg/apis/scheduling"
|
"k8s.io/kubernetes/pkg/apis/scheduling"
|
||||||
"k8s.io/kubernetes/test/e2e/framework"
|
"k8s.io/kubernetes/test/e2e/framework"
|
||||||
|
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||||
"k8s.io/kubernetes/test/e2e/framework/replicaset"
|
"k8s.io/kubernetes/test/e2e/framework/replicaset"
|
||||||
|
|
||||||
. "github.com/onsi/ginkgo"
|
. "github.com/onsi/ginkgo"
|
||||||
|
@ -115,7 +116,7 @@ var _ = SIGDescribe("SchedulerPreemption [Serial]", func() {
|
||||||
Requests: podRes,
|
Requests: podRes,
|
||||||
},
|
},
|
||||||
})
|
})
|
||||||
framework.Logf("Created pod: %v", pods[i].Name)
|
e2elog.Logf("Created pod: %v", pods[i].Name)
|
||||||
}
|
}
|
||||||
By("Wait for pods to be scheduled.")
|
By("Wait for pods to be scheduled.")
|
||||||
for _, pod := range pods {
|
for _, pod := range pods {
|
||||||
|
@ -175,7 +176,7 @@ var _ = SIGDescribe("SchedulerPreemption [Serial]", func() {
|
||||||
Requests: podRes,
|
Requests: podRes,
|
||||||
},
|
},
|
||||||
})
|
})
|
||||||
framework.Logf("Created pod: %v", pods[i].Name)
|
e2elog.Logf("Created pod: %v", pods[i].Name)
|
||||||
}
|
}
|
||||||
By("Wait for pods to be scheduled.")
|
By("Wait for pods to be scheduled.")
|
||||||
for _, pod := range pods {
|
for _, pod := range pods {
|
||||||
|
@ -285,7 +286,7 @@ var _ = SIGDescribe("SchedulerPreemption [Serial]", func() {
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
})
|
})
|
||||||
framework.Logf("Created pod: %v", pods[i].Name)
|
e2elog.Logf("Created pod: %v", pods[i].Name)
|
||||||
}
|
}
|
||||||
defer func() { // Remove added labels
|
defer func() { // Remove added labels
|
||||||
for i := 0; i < numPods; i++ {
|
for i := 0; i < numPods; i++ {
|
||||||
|
@ -368,7 +369,7 @@ var _ = SIGDescribe("PodPriorityResolution [Serial]", func() {
|
||||||
framework.ExpectNoError(err)
|
framework.ExpectNoError(err)
|
||||||
}()
|
}()
|
||||||
Expect(pod.Spec.Priority).NotTo(BeNil())
|
Expect(pod.Spec.Priority).NotTo(BeNil())
|
||||||
framework.Logf("Created pod: %v", pod.Name)
|
e2elog.Logf("Created pod: %v", pod.Name)
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
})
|
})
|
||||||
|
@ -391,11 +392,11 @@ var _ = SIGDescribe("PreemptionExecutionPath", func() {
|
||||||
// list existing priorities
|
// list existing priorities
|
||||||
priorityList, err := cs.SchedulingV1().PriorityClasses().List(metav1.ListOptions{})
|
priorityList, err := cs.SchedulingV1().PriorityClasses().List(metav1.ListOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
framework.Logf("Unable to list priorities: %v", err)
|
e2elog.Logf("Unable to list priorities: %v", err)
|
||||||
} else {
|
} else {
|
||||||
framework.Logf("List existing priorities:")
|
e2elog.Logf("List existing priorities:")
|
||||||
for _, p := range priorityList.Items {
|
for _, p := range priorityList.Items {
|
||||||
framework.Logf("%v/%v created at %v", p.Name, p.Value, p.CreationTimestamp)
|
e2elog.Logf("%v/%v created at %v", p.Name, p.Value, p.CreationTimestamp)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -420,7 +421,7 @@ var _ = SIGDescribe("PreemptionExecutionPath", func() {
|
||||||
// find an available node
|
// find an available node
|
||||||
By("Finding an available node")
|
By("Finding an available node")
|
||||||
nodeName := GetNodeThatCanRunPod(f)
|
nodeName := GetNodeThatCanRunPod(f)
|
||||||
framework.Logf("found a healthy node: %s", nodeName)
|
e2elog.Logf("found a healthy node: %s", nodeName)
|
||||||
|
|
||||||
// get the node API object
|
// get the node API object
|
||||||
var err error
|
var err error
|
||||||
|
@ -449,8 +450,8 @@ var _ = SIGDescribe("PreemptionExecutionPath", func() {
|
||||||
priorityPairs = append(priorityPairs, priorityPair{name: priorityName, value: priorityVal})
|
priorityPairs = append(priorityPairs, priorityPair{name: priorityName, value: priorityVal})
|
||||||
_, err := cs.SchedulingV1().PriorityClasses().Create(&schedulerapi.PriorityClass{ObjectMeta: metav1.ObjectMeta{Name: priorityName}, Value: priorityVal})
|
_, err := cs.SchedulingV1().PriorityClasses().Create(&schedulerapi.PriorityClass{ObjectMeta: metav1.ObjectMeta{Name: priorityName}, Value: priorityVal})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
framework.Logf("Failed to create priority '%v/%v': %v", priorityName, priorityVal, err)
|
e2elog.Logf("Failed to create priority '%v/%v': %v", priorityName, priorityVal, err)
|
||||||
framework.Logf("Reason: %v. Msg: %v", errors.ReasonForError(err), err)
|
e2elog.Logf("Reason: %v. Msg: %v", errors.ReasonForError(err), err)
|
||||||
}
|
}
|
||||||
Expect(err == nil || errors.IsAlreadyExists(err)).To(Equal(true))
|
Expect(err == nil || errors.IsAlreadyExists(err)).To(Equal(true))
|
||||||
}
|
}
|
||||||
|
@ -549,16 +550,16 @@ var _ = SIGDescribe("PreemptionExecutionPath", func() {
|
||||||
runPauseRS(f, rsConfs[i])
|
runPauseRS(f, rsConfs[i])
|
||||||
}
|
}
|
||||||
|
|
||||||
framework.Logf("pods created so far: %v", podNamesSeen)
|
e2elog.Logf("pods created so far: %v", podNamesSeen)
|
||||||
framework.Logf("length of pods created so far: %v", len(podNamesSeen))
|
e2elog.Logf("length of pods created so far: %v", len(podNamesSeen))
|
||||||
|
|
||||||
// create ReplicaSet4
|
// create ReplicaSet4
|
||||||
// if runPauseRS failed, it means ReplicaSet4 cannot be scheduled even after 1 minute
|
// if runPauseRS failed, it means ReplicaSet4 cannot be scheduled even after 1 minute
|
||||||
// which is unacceptable
|
// which is unacceptable
|
||||||
runPauseRS(f, rsConfs[rsNum-1])
|
runPauseRS(f, rsConfs[rsNum-1])
|
||||||
|
|
||||||
framework.Logf("pods created so far: %v", podNamesSeen)
|
e2elog.Logf("pods created so far: %v", podNamesSeen)
|
||||||
framework.Logf("length of pods created so far: %v", len(podNamesSeen))
|
e2elog.Logf("length of pods created so far: %v", len(podNamesSeen))
|
||||||
|
|
||||||
// count pods number of ReplicaSet{1,2,3}, if it's more than expected replicas
|
// count pods number of ReplicaSet{1,2,3}, if it's more than expected replicas
|
||||||
// then it denotes its pods have been over-preempted
|
// then it denotes its pods have been over-preempted
|
||||||
|
|
|
@ -35,6 +35,7 @@ import (
|
||||||
priorityutil "k8s.io/kubernetes/pkg/scheduler/algorithm/priorities/util"
|
priorityutil "k8s.io/kubernetes/pkg/scheduler/algorithm/priorities/util"
|
||||||
"k8s.io/kubernetes/test/e2e/common"
|
"k8s.io/kubernetes/test/e2e/common"
|
||||||
"k8s.io/kubernetes/test/e2e/framework"
|
"k8s.io/kubernetes/test/e2e/framework"
|
||||||
|
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||||
testutils "k8s.io/kubernetes/test/utils"
|
testutils "k8s.io/kubernetes/test/utils"
|
||||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||||
)
|
)
|
||||||
|
@ -154,7 +155,7 @@ var _ = SIGDescribe("SchedulerPriorities [Serial]", func() {
|
||||||
defer func() {
|
defer func() {
|
||||||
// Resize the replication controller to zero to get rid of pods.
|
// Resize the replication controller to zero to get rid of pods.
|
||||||
if err := framework.DeleteRCAndWaitForGC(f.ClientSet, f.Namespace.Name, rc.Name); err != nil {
|
if err := framework.DeleteRCAndWaitForGC(f.ClientSet, f.Namespace.Name, rc.Name); err != nil {
|
||||||
framework.Logf("Failed to cleanup replication controller %v: %v.", rc.Name, err)
|
e2elog.Logf("Failed to cleanup replication controller %v: %v.", rc.Name, err)
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
|
|
||||||
|
@ -301,7 +302,7 @@ func createBalancedPodForNodes(f *framework.Framework, cs clientset.Interface, n
|
||||||
Requests: needCreateResource,
|
Requests: needCreateResource,
|
||||||
},
|
},
|
||||||
NodeName: node.Name,
|
NodeName: node.Name,
|
||||||
}), true, framework.Logf)
|
}), true, e2elog.Logf)
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
|
@ -317,7 +318,7 @@ func createBalancedPodForNodes(f *framework.Framework, cs clientset.Interface, n
|
||||||
}
|
}
|
||||||
|
|
||||||
func computeCpuMemFraction(cs clientset.Interface, node v1.Node, resource *v1.ResourceRequirements) (float64, float64) {
|
func computeCpuMemFraction(cs clientset.Interface, node v1.Node, resource *v1.ResourceRequirements) (float64, float64) {
|
||||||
framework.Logf("ComputeCpuMemFraction for node: %v", node.Name)
|
e2elog.Logf("ComputeCpuMemFraction for node: %v", node.Name)
|
||||||
totalRequestedCpuResource := resource.Requests.Cpu().MilliValue()
|
totalRequestedCpuResource := resource.Requests.Cpu().MilliValue()
|
||||||
totalRequestedMemResource := resource.Requests.Memory().Value()
|
totalRequestedMemResource := resource.Requests.Memory().Value()
|
||||||
allpods, err := cs.CoreV1().Pods(metav1.NamespaceAll).List(metav1.ListOptions{})
|
allpods, err := cs.CoreV1().Pods(metav1.NamespaceAll).List(metav1.ListOptions{})
|
||||||
|
@ -326,7 +327,7 @@ func computeCpuMemFraction(cs clientset.Interface, node v1.Node, resource *v1.Re
|
||||||
}
|
}
|
||||||
for _, pod := range allpods.Items {
|
for _, pod := range allpods.Items {
|
||||||
if pod.Spec.NodeName == node.Name {
|
if pod.Spec.NodeName == node.Name {
|
||||||
framework.Logf("Pod for on the node: %v, Cpu: %v, Mem: %v", pod.Name, getNonZeroRequests(&pod).MilliCPU, getNonZeroRequests(&pod).Memory)
|
e2elog.Logf("Pod for on the node: %v, Cpu: %v, Mem: %v", pod.Name, getNonZeroRequests(&pod).MilliCPU, getNonZeroRequests(&pod).Memory)
|
||||||
// Ignore best effort pods while computing fractions as they won't be taken in account by scheduler.
|
// Ignore best effort pods while computing fractions as they won't be taken in account by scheduler.
|
||||||
if v1qos.GetPodQOS(&pod) == v1.PodQOSBestEffort {
|
if v1qos.GetPodQOS(&pod) == v1.PodQOSBestEffort {
|
||||||
continue
|
continue
|
||||||
|
@ -352,8 +353,8 @@ func computeCpuMemFraction(cs clientset.Interface, node v1.Node, resource *v1.Re
|
||||||
memFraction = floatOne
|
memFraction = floatOne
|
||||||
}
|
}
|
||||||
|
|
||||||
framework.Logf("Node: %v, totalRequestedCpuResource: %v, cpuAllocatableMil: %v, cpuFraction: %v", node.Name, totalRequestedCpuResource, cpuAllocatableMil, cpuFraction)
|
e2elog.Logf("Node: %v, totalRequestedCpuResource: %v, cpuAllocatableMil: %v, cpuFraction: %v", node.Name, totalRequestedCpuResource, cpuAllocatableMil, cpuFraction)
|
||||||
framework.Logf("Node: %v, totalRequestedMemResource: %v, memAllocatableVal: %v, memFraction: %v", node.Name, totalRequestedMemResource, memAllocatableVal, memFraction)
|
e2elog.Logf("Node: %v, totalRequestedMemResource: %v, memAllocatableVal: %v, memFraction: %v", node.Name, totalRequestedMemResource, memAllocatableVal, memFraction)
|
||||||
|
|
||||||
return cpuFraction, memFraction
|
return cpuFraction, memFraction
|
||||||
}
|
}
|
||||||
|
|
|
@ -19,20 +19,19 @@ package scheduling
|
||||||
import (
|
import (
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
. "github.com/onsi/ginkgo"
|
||||||
|
_ "github.com/stretchr/testify/assert"
|
||||||
|
|
||||||
|
"k8s.io/api/core/v1"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
"k8s.io/apimachinery/pkg/labels"
|
"k8s.io/apimachinery/pkg/labels"
|
||||||
"k8s.io/apimachinery/pkg/runtime"
|
"k8s.io/apimachinery/pkg/runtime"
|
||||||
"k8s.io/apimachinery/pkg/watch"
|
"k8s.io/apimachinery/pkg/watch"
|
||||||
|
|
||||||
"k8s.io/client-go/tools/cache"
|
|
||||||
|
|
||||||
"k8s.io/api/core/v1"
|
|
||||||
clientset "k8s.io/client-go/kubernetes"
|
clientset "k8s.io/client-go/kubernetes"
|
||||||
|
"k8s.io/client-go/tools/cache"
|
||||||
"k8s.io/kubernetes/test/e2e/framework"
|
"k8s.io/kubernetes/test/e2e/framework"
|
||||||
|
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||||
testutils "k8s.io/kubernetes/test/utils"
|
testutils "k8s.io/kubernetes/test/utils"
|
||||||
|
|
||||||
. "github.com/onsi/ginkgo"
|
|
||||||
_ "github.com/stretchr/testify/assert"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
func getTestTaint() v1.Taint {
|
func getTestTaint() v1.Taint {
|
||||||
|
@ -137,7 +136,7 @@ func createTestController(cs clientset.Interface, observedDeletions chan string,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
)
|
)
|
||||||
framework.Logf("Starting informer...")
|
e2elog.Logf("Starting informer...")
|
||||||
go controller.Run(stopCh)
|
go controller.Run(stopCh)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -179,7 +178,7 @@ var _ = SIGDescribe("NoExecuteTaintManager Single Pod [Serial]", func() {
|
||||||
By("Starting pod...")
|
By("Starting pod...")
|
||||||
nodeName, err := testutils.RunPodAndGetNodeName(cs, pod, 2*time.Minute)
|
nodeName, err := testutils.RunPodAndGetNodeName(cs, pod, 2*time.Minute)
|
||||||
framework.ExpectNoError(err)
|
framework.ExpectNoError(err)
|
||||||
framework.Logf("Pod is running on %v. Tainting Node", nodeName)
|
e2elog.Logf("Pod is running on %v. Tainting Node", nodeName)
|
||||||
|
|
||||||
By("Trying to apply a taint on the Node")
|
By("Trying to apply a taint on the Node")
|
||||||
testTaint := getTestTaint()
|
testTaint := getTestTaint()
|
||||||
|
@ -194,7 +193,7 @@ var _ = SIGDescribe("NoExecuteTaintManager Single Pod [Serial]", func() {
|
||||||
case <-timeoutChannel:
|
case <-timeoutChannel:
|
||||||
framework.Failf("Failed to evict Pod")
|
framework.Failf("Failed to evict Pod")
|
||||||
case <-observedDeletions:
|
case <-observedDeletions:
|
||||||
framework.Logf("Noticed Pod eviction. Test successful")
|
e2elog.Logf("Noticed Pod eviction. Test successful")
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
|
|
||||||
|
@ -211,7 +210,7 @@ var _ = SIGDescribe("NoExecuteTaintManager Single Pod [Serial]", func() {
|
||||||
By("Starting pod...")
|
By("Starting pod...")
|
||||||
nodeName, err := testutils.RunPodAndGetNodeName(cs, pod, 2*time.Minute)
|
nodeName, err := testutils.RunPodAndGetNodeName(cs, pod, 2*time.Minute)
|
||||||
framework.ExpectNoError(err)
|
framework.ExpectNoError(err)
|
||||||
framework.Logf("Pod is running on %v. Tainting Node", nodeName)
|
e2elog.Logf("Pod is running on %v. Tainting Node", nodeName)
|
||||||
|
|
||||||
By("Trying to apply a taint on the Node")
|
By("Trying to apply a taint on the Node")
|
||||||
testTaint := getTestTaint()
|
testTaint := getTestTaint()
|
||||||
|
@ -224,7 +223,7 @@ var _ = SIGDescribe("NoExecuteTaintManager Single Pod [Serial]", func() {
|
||||||
timeoutChannel := time.NewTimer(time.Duration(KubeletPodDeletionDelaySeconds+AdditionalWaitPerDeleteSeconds) * time.Second).C
|
timeoutChannel := time.NewTimer(time.Duration(KubeletPodDeletionDelaySeconds+AdditionalWaitPerDeleteSeconds) * time.Second).C
|
||||||
select {
|
select {
|
||||||
case <-timeoutChannel:
|
case <-timeoutChannel:
|
||||||
framework.Logf("Pod wasn't evicted. Test successful")
|
e2elog.Logf("Pod wasn't evicted. Test successful")
|
||||||
case <-observedDeletions:
|
case <-observedDeletions:
|
||||||
framework.Failf("Pod was evicted despite toleration")
|
framework.Failf("Pod was evicted despite toleration")
|
||||||
}
|
}
|
||||||
|
@ -244,7 +243,7 @@ var _ = SIGDescribe("NoExecuteTaintManager Single Pod [Serial]", func() {
|
||||||
By("Starting pod...")
|
By("Starting pod...")
|
||||||
nodeName, err := testutils.RunPodAndGetNodeName(cs, pod, 2*time.Minute)
|
nodeName, err := testutils.RunPodAndGetNodeName(cs, pod, 2*time.Minute)
|
||||||
framework.ExpectNoError(err)
|
framework.ExpectNoError(err)
|
||||||
framework.Logf("Pod is running on %v. Tainting Node", nodeName)
|
e2elog.Logf("Pod is running on %v. Tainting Node", nodeName)
|
||||||
|
|
||||||
By("Trying to apply a taint on the Node")
|
By("Trying to apply a taint on the Node")
|
||||||
testTaint := getTestTaint()
|
testTaint := getTestTaint()
|
||||||
|
@ -257,7 +256,7 @@ var _ = SIGDescribe("NoExecuteTaintManager Single Pod [Serial]", func() {
|
||||||
timeoutChannel := time.NewTimer(time.Duration(KubeletPodDeletionDelaySeconds+AdditionalWaitPerDeleteSeconds) * time.Second).C
|
timeoutChannel := time.NewTimer(time.Duration(KubeletPodDeletionDelaySeconds+AdditionalWaitPerDeleteSeconds) * time.Second).C
|
||||||
select {
|
select {
|
||||||
case <-timeoutChannel:
|
case <-timeoutChannel:
|
||||||
framework.Logf("Pod wasn't evicted")
|
e2elog.Logf("Pod wasn't evicted")
|
||||||
case <-observedDeletions:
|
case <-observedDeletions:
|
||||||
framework.Failf("Pod was evicted despite toleration")
|
framework.Failf("Pod was evicted despite toleration")
|
||||||
return
|
return
|
||||||
|
@ -268,7 +267,7 @@ var _ = SIGDescribe("NoExecuteTaintManager Single Pod [Serial]", func() {
|
||||||
case <-timeoutChannel:
|
case <-timeoutChannel:
|
||||||
framework.Failf("Pod wasn't evicted")
|
framework.Failf("Pod wasn't evicted")
|
||||||
case <-observedDeletions:
|
case <-observedDeletions:
|
||||||
framework.Logf("Pod was evicted after toleration time run out. Test successful")
|
e2elog.Logf("Pod was evicted after toleration time run out. Test successful")
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
|
@ -288,7 +287,7 @@ var _ = SIGDescribe("NoExecuteTaintManager Single Pod [Serial]", func() {
|
||||||
By("Starting pod...")
|
By("Starting pod...")
|
||||||
nodeName, err := testutils.RunPodAndGetNodeName(cs, pod, 2*time.Minute)
|
nodeName, err := testutils.RunPodAndGetNodeName(cs, pod, 2*time.Minute)
|
||||||
framework.ExpectNoError(err)
|
framework.ExpectNoError(err)
|
||||||
framework.Logf("Pod is running on %v. Tainting Node", nodeName)
|
e2elog.Logf("Pod is running on %v. Tainting Node", nodeName)
|
||||||
|
|
||||||
By("Trying to apply a taint on the Node")
|
By("Trying to apply a taint on the Node")
|
||||||
testTaint := getTestTaint()
|
testTaint := getTestTaint()
|
||||||
|
@ -306,19 +305,19 @@ var _ = SIGDescribe("NoExecuteTaintManager Single Pod [Serial]", func() {
|
||||||
timeoutChannel := time.NewTimer(AdditionalWaitPerDeleteSeconds).C
|
timeoutChannel := time.NewTimer(AdditionalWaitPerDeleteSeconds).C
|
||||||
select {
|
select {
|
||||||
case <-timeoutChannel:
|
case <-timeoutChannel:
|
||||||
framework.Logf("Pod wasn't evicted. Proceeding")
|
e2elog.Logf("Pod wasn't evicted. Proceeding")
|
||||||
case <-observedDeletions:
|
case <-observedDeletions:
|
||||||
framework.Failf("Pod was evicted despite toleration")
|
framework.Failf("Pod was evicted despite toleration")
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
framework.Logf("Removing taint from Node")
|
e2elog.Logf("Removing taint from Node")
|
||||||
framework.RemoveTaintOffNode(cs, nodeName, testTaint)
|
framework.RemoveTaintOffNode(cs, nodeName, testTaint)
|
||||||
taintRemoved = true
|
taintRemoved = true
|
||||||
By("Waiting some time to make sure that toleration time passed.")
|
By("Waiting some time to make sure that toleration time passed.")
|
||||||
timeoutChannel = time.NewTimer(time.Duration(KubeletPodDeletionDelaySeconds+3*AdditionalWaitPerDeleteSeconds) * time.Second).C
|
timeoutChannel = time.NewTimer(time.Duration(KubeletPodDeletionDelaySeconds+3*AdditionalWaitPerDeleteSeconds) * time.Second).C
|
||||||
select {
|
select {
|
||||||
case <-timeoutChannel:
|
case <-timeoutChannel:
|
||||||
framework.Logf("Pod wasn't evicted. Test successful")
|
e2elog.Logf("Pod wasn't evicted. Test successful")
|
||||||
case <-observedDeletions:
|
case <-observedDeletions:
|
||||||
framework.Failf("Pod was evicted despite toleration")
|
framework.Failf("Pod was evicted despite toleration")
|
||||||
}
|
}
|
||||||
|
@ -355,10 +354,10 @@ var _ = SIGDescribe("NoExecuteTaintManager Multiple Pods [Serial]", func() {
|
||||||
By("Starting pods...")
|
By("Starting pods...")
|
||||||
nodeName1, err := testutils.RunPodAndGetNodeName(cs, pod1, 2*time.Minute)
|
nodeName1, err := testutils.RunPodAndGetNodeName(cs, pod1, 2*time.Minute)
|
||||||
framework.ExpectNoError(err)
|
framework.ExpectNoError(err)
|
||||||
framework.Logf("Pod1 is running on %v. Tainting Node", nodeName1)
|
e2elog.Logf("Pod1 is running on %v. Tainting Node", nodeName1)
|
||||||
nodeName2, err := testutils.RunPodAndGetNodeName(cs, pod2, 2*time.Minute)
|
nodeName2, err := testutils.RunPodAndGetNodeName(cs, pod2, 2*time.Minute)
|
||||||
framework.ExpectNoError(err)
|
framework.ExpectNoError(err)
|
||||||
framework.Logf("Pod2 is running on %v. Tainting Node", nodeName2)
|
e2elog.Logf("Pod2 is running on %v. Tainting Node", nodeName2)
|
||||||
|
|
||||||
By("Trying to apply a taint on the Nodes")
|
By("Trying to apply a taint on the Nodes")
|
||||||
testTaint := getTestTaint()
|
testTaint := getTestTaint()
|
||||||
|
@ -387,7 +386,7 @@ var _ = SIGDescribe("NoExecuteTaintManager Multiple Pods [Serial]", func() {
|
||||||
case podName := <-observedDeletions:
|
case podName := <-observedDeletions:
|
||||||
evicted++
|
evicted++
|
||||||
if podName == podGroup+"1" {
|
if podName == podGroup+"1" {
|
||||||
framework.Logf("Noticed Pod %q gets evicted.", podName)
|
e2elog.Logf("Noticed Pod %q gets evicted.", podName)
|
||||||
} else if podName == podGroup+"2" {
|
} else if podName == podGroup+"2" {
|
||||||
framework.Failf("Unexepected Pod %q gets evicted.", podName)
|
framework.Failf("Unexepected Pod %q gets evicted.", podName)
|
||||||
return
|
return
|
||||||
|
@ -417,12 +416,12 @@ var _ = SIGDescribe("NoExecuteTaintManager Multiple Pods [Serial]", func() {
|
||||||
framework.Failf("error getting kubernetes.io/hostname label on node %s", nodeName)
|
framework.Failf("error getting kubernetes.io/hostname label on node %s", nodeName)
|
||||||
}
|
}
|
||||||
framework.ExpectNoError(err)
|
framework.ExpectNoError(err)
|
||||||
framework.Logf("Pod1 is running on %v. Tainting Node", nodeName)
|
e2elog.Logf("Pod1 is running on %v. Tainting Node", nodeName)
|
||||||
// ensure pod2 lands on the same node as pod1
|
// ensure pod2 lands on the same node as pod1
|
||||||
pod2.Spec.NodeSelector = map[string]string{"kubernetes.io/hostname": nodeHostNameLabel}
|
pod2.Spec.NodeSelector = map[string]string{"kubernetes.io/hostname": nodeHostNameLabel}
|
||||||
_, err = testutils.RunPodAndGetNodeName(cs, pod2, 2*time.Minute)
|
_, err = testutils.RunPodAndGetNodeName(cs, pod2, 2*time.Minute)
|
||||||
framework.ExpectNoError(err)
|
framework.ExpectNoError(err)
|
||||||
framework.Logf("Pod2 is running on %v. Tainting Node", nodeName)
|
e2elog.Logf("Pod2 is running on %v. Tainting Node", nodeName)
|
||||||
|
|
||||||
By("Trying to apply a taint on the Node")
|
By("Trying to apply a taint on the Node")
|
||||||
testTaint := getTestTaint()
|
testTaint := getTestTaint()
|
||||||
|
@ -440,7 +439,7 @@ var _ = SIGDescribe("NoExecuteTaintManager Multiple Pods [Serial]", func() {
|
||||||
framework.Failf("Failed to evict all Pods. %d pod(s) is not evicted.", 2-evicted)
|
framework.Failf("Failed to evict all Pods. %d pod(s) is not evicted.", 2-evicted)
|
||||||
return
|
return
|
||||||
case podName := <-observedDeletions:
|
case podName := <-observedDeletions:
|
||||||
framework.Logf("Noticed Pod %q gets evicted.", podName)
|
e2elog.Logf("Noticed Pod %q gets evicted.", podName)
|
||||||
evicted++
|
evicted++
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -29,6 +29,7 @@ import (
|
||||||
"k8s.io/apimachinery/pkg/util/uuid"
|
"k8s.io/apimachinery/pkg/util/uuid"
|
||||||
clientset "k8s.io/client-go/kubernetes"
|
clientset "k8s.io/client-go/kubernetes"
|
||||||
"k8s.io/kubernetes/test/e2e/framework"
|
"k8s.io/kubernetes/test/e2e/framework"
|
||||||
|
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||||
testutils "k8s.io/kubernetes/test/utils"
|
testutils "k8s.io/kubernetes/test/utils"
|
||||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||||
)
|
)
|
||||||
|
@ -100,7 +101,7 @@ func SpreadServiceOrFail(f *framework.Framework, replicaCount int, image string)
|
||||||
// Based on the callers, replicas is always positive number: zoneCount >= 0 implies (2*zoneCount)+1 > 0.
|
// Based on the callers, replicas is always positive number: zoneCount >= 0 implies (2*zoneCount)+1 > 0.
|
||||||
// Thus, no need to test for it. Once the precondition changes to zero number of replicas,
|
// Thus, no need to test for it. Once the precondition changes to zero number of replicas,
|
||||||
// test for replicaCount > 0. Otherwise, StartPods panics.
|
// test for replicaCount > 0. Otherwise, StartPods panics.
|
||||||
framework.ExpectNoError(testutils.StartPods(f.ClientSet, replicaCount, f.Namespace.Name, serviceName, *podSpec, false, framework.Logf))
|
framework.ExpectNoError(testutils.StartPods(f.ClientSet, replicaCount, f.Namespace.Name, serviceName, *podSpec, false, e2elog.Logf))
|
||||||
|
|
||||||
// Wait for all of them to be scheduled
|
// Wait for all of them to be scheduled
|
||||||
selector := labels.SelectorFromSet(labels.Set(map[string]string{"service": serviceName}))
|
selector := labels.SelectorFromSet(labels.Set(map[string]string{"service": serviceName}))
|
||||||
|
@ -207,7 +208,7 @@ func SpreadRCOrFail(f *framework.Framework, replicaCount int32, image string) {
|
||||||
defer func() {
|
defer func() {
|
||||||
// Resize the replication controller to zero to get rid of pods.
|
// Resize the replication controller to zero to get rid of pods.
|
||||||
if err := framework.DeleteRCAndWaitForGC(f.ClientSet, f.Namespace.Name, controller.Name); err != nil {
|
if err := framework.DeleteRCAndWaitForGC(f.ClientSet, f.Namespace.Name, controller.Name); err != nil {
|
||||||
framework.Logf("Failed to cleanup replication controller %v: %v.", controller.Name, err)
|
e2elog.Logf("Failed to cleanup replication controller %v: %v.", controller.Name, err)
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
// List the pods, making sure we observe all the replicas.
|
// List the pods, making sure we observe all the replicas.
|
||||||
|
|
|
@ -29,6 +29,7 @@ import (
|
||||||
"k8s.io/apimachinery/pkg/util/sets"
|
"k8s.io/apimachinery/pkg/util/sets"
|
||||||
"k8s.io/apimachinery/pkg/util/uuid"
|
"k8s.io/apimachinery/pkg/util/uuid"
|
||||||
"k8s.io/kubernetes/test/e2e/framework"
|
"k8s.io/kubernetes/test/e2e/framework"
|
||||||
|
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||||
"k8s.io/kubernetes/test/e2e/framework/providers/gce"
|
"k8s.io/kubernetes/test/e2e/framework/providers/gce"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -65,7 +66,7 @@ func OnlyAllowNodeZones(f *framework.Framework, zoneCount int, image string) {
|
||||||
// Get all the zones that the nodes are in
|
// Get all the zones that the nodes are in
|
||||||
expectedZones, err := gceCloud.GetAllZonesFromCloudProvider()
|
expectedZones, err := gceCloud.GetAllZonesFromCloudProvider()
|
||||||
Expect(err).NotTo(HaveOccurred())
|
Expect(err).NotTo(HaveOccurred())
|
||||||
framework.Logf("Expected zones: %v", expectedZones)
|
e2elog.Logf("Expected zones: %v", expectedZones)
|
||||||
|
|
||||||
// Get all the zones in this current region
|
// Get all the zones in this current region
|
||||||
region := gceCloud.Region()
|
region := gceCloud.Region()
|
||||||
|
@ -120,7 +121,7 @@ func OnlyAllowNodeZones(f *framework.Framework, zoneCount int, image string) {
|
||||||
|
|
||||||
defer func() {
|
defer func() {
|
||||||
// Teardown of the compute instance
|
// Teardown of the compute instance
|
||||||
framework.Logf("Deleting compute resource: %v", name)
|
e2elog.Logf("Deleting compute resource: %v", name)
|
||||||
err := gceCloud.DeleteInstance(project, zone, name)
|
err := gceCloud.DeleteInstance(project, zone, name)
|
||||||
Expect(err).NotTo(HaveOccurred())
|
Expect(err).NotTo(HaveOccurred())
|
||||||
}()
|
}()
|
||||||
|
@ -140,7 +141,7 @@ func OnlyAllowNodeZones(f *framework.Framework, zoneCount int, image string) {
|
||||||
|
|
||||||
// Defer the cleanup
|
// Defer the cleanup
|
||||||
defer func() {
|
defer func() {
|
||||||
framework.Logf("deleting claim %q/%q", pvc.Namespace, pvc.Name)
|
e2elog.Logf("deleting claim %q/%q", pvc.Namespace, pvc.Name)
|
||||||
err = c.CoreV1().PersistentVolumeClaims(pvc.Namespace).Delete(pvc.Name, nil)
|
err = c.CoreV1().PersistentVolumeClaims(pvc.Namespace).Delete(pvc.Name, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
framework.Failf("Error deleting claim %q. Error: %v", pvc.Name, err)
|
framework.Failf("Error deleting claim %q. Error: %v", pvc.Name, err)
|
||||||
|
|
|
@ -14,6 +14,7 @@ go_library(
|
||||||
"//staging/src/k8s.io/apimachinery/pkg/util/net:go_default_library",
|
"//staging/src/k8s.io/apimachinery/pkg/util/net:go_default_library",
|
||||||
"//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
"//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||||
"//test/e2e/framework:go_default_library",
|
"//test/e2e/framework:go_default_library",
|
||||||
|
"//test/e2e/framework/log:go_default_library",
|
||||||
"//test/utils:go_default_library",
|
"//test/utils:go_default_library",
|
||||||
"//vendor/github.com/onsi/ginkgo:go_default_library",
|
"//vendor/github.com/onsi/ginkgo:go_default_library",
|
||||||
],
|
],
|
||||||
|
|
|
@ -26,6 +26,7 @@ import (
|
||||||
utilnet "k8s.io/apimachinery/pkg/util/net"
|
utilnet "k8s.io/apimachinery/pkg/util/net"
|
||||||
"k8s.io/apimachinery/pkg/util/wait"
|
"k8s.io/apimachinery/pkg/util/wait"
|
||||||
"k8s.io/kubernetes/test/e2e/framework"
|
"k8s.io/kubernetes/test/e2e/framework"
|
||||||
|
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||||
testutils "k8s.io/kubernetes/test/utils"
|
testutils "k8s.io/kubernetes/test/utils"
|
||||||
|
|
||||||
"github.com/onsi/ginkgo"
|
"github.com/onsi/ginkgo"
|
||||||
|
@ -63,7 +64,7 @@ var _ = SIGDescribe("Kubernetes Dashboard", func() {
|
||||||
var status int
|
var status int
|
||||||
proxyRequest, errProxy := framework.GetServicesProxyRequest(f.ClientSet, f.ClientSet.CoreV1().RESTClient().Get())
|
proxyRequest, errProxy := framework.GetServicesProxyRequest(f.ClientSet, f.ClientSet.CoreV1().RESTClient().Get())
|
||||||
if errProxy != nil {
|
if errProxy != nil {
|
||||||
framework.Logf("Get services proxy request failed: %v", errProxy)
|
e2elog.Logf("Get services proxy request failed: %v", errProxy)
|
||||||
}
|
}
|
||||||
|
|
||||||
ctx, cancel := context.WithTimeout(context.Background(), framework.SingleCallTimeout)
|
ctx, cancel := context.WithTimeout(context.Background(), framework.SingleCallTimeout)
|
||||||
|
@ -82,9 +83,9 @@ var _ = SIGDescribe("Kubernetes Dashboard", func() {
|
||||||
framework.Failf("Request to kubernetes-dashboard failed: %v", err)
|
framework.Failf("Request to kubernetes-dashboard failed: %v", err)
|
||||||
return true, err
|
return true, err
|
||||||
}
|
}
|
||||||
framework.Logf("Request to kubernetes-dashboard failed: %v", err)
|
e2elog.Logf("Request to kubernetes-dashboard failed: %v", err)
|
||||||
} else if status != http.StatusOK {
|
} else if status != http.StatusOK {
|
||||||
framework.Logf("Unexpected status from kubernetes-dashboard: %v", status)
|
e2elog.Logf("Unexpected status from kubernetes-dashboard: %v", status)
|
||||||
}
|
}
|
||||||
// Don't return err here as it aborts polling.
|
// Don't return err here as it aborts polling.
|
||||||
return status == http.StatusOK, nil
|
return status == http.StatusOK, nil
|
||||||
|
|
Loading…
Reference in New Issue