mirror of https://github.com/k3s-io/k3s
Merge pull request #77401 from johnSchnake/frameworkLogRefactoringStorageVsphereTestSuites
Move storage tests to use the framework/log packagek3s-v1.15.3
commit
43ce2f17cf
|
@ -66,6 +66,7 @@ go_library(
|
|||
"//test/e2e/framework:go_default_library",
|
||||
"//test/e2e/framework/auth:go_default_library",
|
||||
"//test/e2e/framework/deployment:go_default_library",
|
||||
"//test/e2e/framework/log:go_default_library",
|
||||
"//test/e2e/framework/metrics:go_default_library",
|
||||
"//test/e2e/framework/providers/gce:go_default_library",
|
||||
"//test/e2e/framework/testfiles:go_default_library",
|
||||
|
|
|
@ -37,6 +37,7 @@ import (
|
|||
|
||||
volumeutil "k8s.io/kubernetes/pkg/volume/util"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||
"k8s.io/kubernetes/test/e2e/storage/drivers"
|
||||
"k8s.io/kubernetes/test/e2e/storage/testsuites"
|
||||
"k8s.io/kubernetes/test/e2e/storage/utils"
|
||||
|
@ -715,7 +716,7 @@ func checkPodInfo(cs clientset.Interface, namespace, driverPodName, driverContai
|
|||
if err != nil {
|
||||
return fmt.Errorf("could not load CSI driver logs: %s", err)
|
||||
}
|
||||
framework.Logf("CSI driver logs:\n%s", log)
|
||||
e2elog.Logf("CSI driver logs:\n%s", log)
|
||||
// Find NodePublish in the logs
|
||||
foundAttributes := sets.NewString()
|
||||
logLines := strings.Split(log, "\n")
|
||||
|
@ -734,7 +735,7 @@ func checkPodInfo(cs clientset.Interface, namespace, driverPodName, driverContai
|
|||
var call MockCSICall
|
||||
err := json.Unmarshal([]byte(line), &call)
|
||||
if err != nil {
|
||||
framework.Logf("Could not parse CSI driver log line %q: %s", line, err)
|
||||
e2elog.Logf("Could not parse CSI driver log line %q: %s", line, err)
|
||||
continue
|
||||
}
|
||||
if call.Method != "/csi.v1.Node/NodePublishVolume" {
|
||||
|
@ -745,7 +746,7 @@ func checkPodInfo(cs clientset.Interface, namespace, driverPodName, driverContai
|
|||
vv, found := call.Request.VolumeContext[k]
|
||||
if found && v == vv {
|
||||
foundAttributes.Insert(k)
|
||||
framework.Logf("Found volume attribute %s: %s", k, v)
|
||||
e2elog.Logf("Found volume attribute %s: %s", k, v)
|
||||
}
|
||||
}
|
||||
// Process just the first NodePublish, the rest of the log is useless.
|
||||
|
@ -767,7 +768,7 @@ func checkPodInfo(cs clientset.Interface, namespace, driverPodName, driverContai
|
|||
func waitForCSIDriver(cs clientset.Interface, driverName string) error {
|
||||
timeout := 4 * time.Minute
|
||||
|
||||
framework.Logf("waiting up to %v for CSIDriver %q", timeout, driverName)
|
||||
e2elog.Logf("waiting up to %v for CSIDriver %q", timeout, driverName)
|
||||
for start := time.Now(); time.Since(start) < timeout; time.Sleep(framework.Poll) {
|
||||
_, err := cs.StorageV1beta1().CSIDrivers().Get(driverName, metav1.GetOptions{})
|
||||
if !errors.IsNotFound(err) {
|
||||
|
@ -780,9 +781,9 @@ func waitForCSIDriver(cs clientset.Interface, driverName string) error {
|
|||
func destroyCSIDriver(cs clientset.Interface, driverName string) {
|
||||
driverGet, err := cs.StorageV1beta1().CSIDrivers().Get(driverName, metav1.GetOptions{})
|
||||
if err == nil {
|
||||
framework.Logf("deleting %s.%s: %s", driverGet.TypeMeta.APIVersion, driverGet.TypeMeta.Kind, driverGet.ObjectMeta.Name)
|
||||
e2elog.Logf("deleting %s.%s: %s", driverGet.TypeMeta.APIVersion, driverGet.TypeMeta.Kind, driverGet.ObjectMeta.Name)
|
||||
// Uncomment the following line to get full dump of CSIDriver object
|
||||
// framework.Logf("%s", framework.PrettyPrint(driverGet))
|
||||
// e2elog.Logf("%s", framework.PrettyPrint(driverGet))
|
||||
cs.StorageV1beta1().CSIDrivers().Delete(driverName, nil)
|
||||
}
|
||||
}
|
||||
|
|
|
@ -23,6 +23,7 @@ go_library(
|
|||
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//test/e2e/framework:go_default_library",
|
||||
"//test/e2e/framework/auth:go_default_library",
|
||||
"//test/e2e/framework/log:go_default_library",
|
||||
"//test/e2e/framework/volume:go_default_library",
|
||||
"//test/e2e/storage/testpatterns:go_default_library",
|
||||
"//test/e2e/storage/testsuites:go_default_library",
|
||||
|
|
|
@ -27,13 +27,14 @@ import (
|
|||
"path"
|
||||
"path/filepath"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/uuid"
|
||||
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||
)
|
||||
|
||||
var (
|
||||
|
@ -49,16 +50,16 @@ var (
|
|||
|
||||
func shredFile(filePath string) {
|
||||
if _, err := os.Stat(filePath); os.IsNotExist(err) {
|
||||
framework.Logf("File %v was not found, skipping shredding", filePath)
|
||||
e2elog.Logf("File %v was not found, skipping shredding", filePath)
|
||||
return
|
||||
}
|
||||
framework.Logf("Shredding file %v", filePath)
|
||||
e2elog.Logf("Shredding file %v", filePath)
|
||||
_, _, err := framework.RunCmd("shred", "--remove", filePath)
|
||||
if err != nil {
|
||||
framework.Logf("Failed to shred file %v: %v", filePath, err)
|
||||
e2elog.Logf("Failed to shred file %v: %v", filePath, err)
|
||||
}
|
||||
if _, err := os.Stat(filePath); os.IsNotExist(err) {
|
||||
framework.Logf("File %v successfully shredded", filePath)
|
||||
e2elog.Logf("File %v successfully shredded", filePath)
|
||||
return
|
||||
}
|
||||
// Shred failed Try to remove the file for good meausure
|
||||
|
@ -78,13 +79,13 @@ func createGCESecrets(client clientset.Interface, ns string) {
|
|||
|
||||
premadeSAFile, ok := os.LookupEnv(saEnv)
|
||||
if !ok {
|
||||
framework.Logf("Could not find env var %v, please either create cloud-sa"+
|
||||
e2elog.Logf("Could not find env var %v, please either create cloud-sa"+
|
||||
" secret manually or rerun test after setting %v to the filepath of"+
|
||||
" the GCP Service Account to give to the GCE Persistent Disk CSI Driver", saEnv, saEnv)
|
||||
return
|
||||
}
|
||||
|
||||
framework.Logf("Found CI service account key at %v", premadeSAFile)
|
||||
e2elog.Logf("Found CI service account key at %v", premadeSAFile)
|
||||
// Need to copy it saFile
|
||||
stdout, stderr, err := framework.RunCmd("cp", premadeSAFile, saFile)
|
||||
framework.ExpectNoError(err, "error copying service account key: %s\nstdout: %s\nstderr: %s", err, stdout, stderr)
|
||||
|
|
|
@ -55,6 +55,7 @@ import (
|
|||
"k8s.io/apiserver/pkg/authentication/serviceaccount"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
"k8s.io/kubernetes/test/e2e/framework/auth"
|
||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||
"k8s.io/kubernetes/test/e2e/framework/volume"
|
||||
"k8s.io/kubernetes/test/e2e/storage/testpatterns"
|
||||
"k8s.io/kubernetes/test/e2e/storage/testsuites"
|
||||
|
@ -311,15 +312,15 @@ func (v *glusterVolume) DeleteVolume() {
|
|||
|
||||
name := v.prefix + "-server"
|
||||
|
||||
framework.Logf("Deleting Gluster endpoints %q...", name)
|
||||
e2elog.Logf("Deleting Gluster endpoints %q...", name)
|
||||
err := cs.CoreV1().Endpoints(ns.Name).Delete(name, nil)
|
||||
if err != nil {
|
||||
if !errors.IsNotFound(err) {
|
||||
framework.Failf("Gluster delete endpoints failed: %v", err)
|
||||
}
|
||||
framework.Logf("Gluster endpoints %q not found, assuming deleted", name)
|
||||
e2elog.Logf("Gluster endpoints %q not found, assuming deleted", name)
|
||||
}
|
||||
framework.Logf("Deleting Gluster server pod %q...", v.serverPod.Name)
|
||||
e2elog.Logf("Deleting Gluster server pod %q...", v.serverPod.Name)
|
||||
err = framework.DeletePodWithWait(f, cs, v.serverPod)
|
||||
if err != nil {
|
||||
framework.Failf("Gluster server pod delete failed: %v", err)
|
||||
|
@ -1057,7 +1058,7 @@ func (c *cinderDriver) CreateVolume(config *testsuites.PerTestConfig, volType te
|
|||
By("creating a test Cinder volume")
|
||||
output, err := exec.Command("cinder", "create", "--display-name="+volumeName, "1").CombinedOutput()
|
||||
outputString := string(output[:])
|
||||
framework.Logf("cinder output:\n%s", outputString)
|
||||
e2elog.Logf("cinder output:\n%s", outputString)
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
// Parse 'id'' from stdout. Expected format:
|
||||
|
@ -1077,7 +1078,7 @@ func (c *cinderDriver) CreateVolume(config *testsuites.PerTestConfig, volType te
|
|||
volumeID = fields[3]
|
||||
break
|
||||
}
|
||||
framework.Logf("Volume ID: %s", volumeID)
|
||||
e2elog.Logf("Volume ID: %s", volumeID)
|
||||
Expect(volumeID).NotTo(Equal(""))
|
||||
return &cinderVolume{
|
||||
volumeName: volumeName,
|
||||
|
@ -1094,16 +1095,16 @@ func (v *cinderVolume) DeleteVolume() {
|
|||
var err error
|
||||
timeout := time.Second * 120
|
||||
|
||||
framework.Logf("Waiting up to %v for removal of cinder volume %s", timeout, name)
|
||||
e2elog.Logf("Waiting up to %v for removal of cinder volume %s", timeout, name)
|
||||
for start := time.Now(); time.Since(start) < timeout; time.Sleep(5 * time.Second) {
|
||||
output, err = exec.Command("cinder", "delete", name).CombinedOutput()
|
||||
if err == nil {
|
||||
framework.Logf("Cinder volume %s deleted", name)
|
||||
e2elog.Logf("Cinder volume %s deleted", name)
|
||||
return
|
||||
}
|
||||
framework.Logf("Failed to delete volume %s: %v", name, err)
|
||||
e2elog.Logf("Failed to delete volume %s: %v", name, err)
|
||||
}
|
||||
framework.Logf("Giving up deleting volume %s: %v\n%s", name, err, string(output[:]))
|
||||
e2elog.Logf("Giving up deleting volume %s: %v\n%s", name, err, string(output[:]))
|
||||
}
|
||||
|
||||
// GCE
|
||||
|
|
|
@ -21,11 +21,12 @@ import (
|
|||
"strings"
|
||||
"time"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/rand"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||
"k8s.io/kubernetes/test/e2e/storage/utils"
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
|
||||
|
@ -58,7 +59,7 @@ var _ = utils.SIGDescribe("Ephemeralstorage", func() {
|
|||
|
||||
// Allow it to sleep for 30 seconds
|
||||
time.Sleep(30 * time.Second)
|
||||
framework.Logf("Deleting pod %q/%q", pod.Namespace, pod.Name)
|
||||
e2elog.Logf("Deleting pod %q/%q", pod.Namespace, pod.Name)
|
||||
framework.ExpectNoError(framework.DeletePodWithWait(f, c, pod))
|
||||
})
|
||||
}
|
||||
|
|
|
@ -22,7 +22,7 @@ import (
|
|||
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
"k8s.io/api/core/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
storage "k8s.io/api/storage/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
@ -30,6 +30,7 @@ import (
|
|||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
e2edeploy "k8s.io/kubernetes/test/e2e/framework/deployment"
|
||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||
"k8s.io/kubernetes/test/e2e/storage/testsuites"
|
||||
"k8s.io/kubernetes/test/e2e/storage/utils"
|
||||
)
|
||||
|
@ -102,7 +103,7 @@ var _ = utils.SIGDescribe("Mounted flexvolume expand[Slow]", func() {
|
|||
})
|
||||
|
||||
AfterEach(func() {
|
||||
framework.Logf("AfterEach: Cleaning up resources for mounted volume resize")
|
||||
e2elog.Logf("AfterEach: Cleaning up resources for mounted volume resize")
|
||||
|
||||
if c != nil {
|
||||
if errs := framework.PVPVCCleanup(c, ns, nil, pvc); len(errs) > 0 {
|
||||
|
|
|
@ -22,12 +22,13 @@ import (
|
|||
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
"k8s.io/api/core/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
storage "k8s.io/api/storage/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
utilerrors "k8s.io/apimachinery/pkg/util/errors"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||
"k8s.io/kubernetes/test/e2e/storage/testsuites"
|
||||
"k8s.io/kubernetes/test/e2e/storage/utils"
|
||||
)
|
||||
|
@ -101,7 +102,7 @@ var _ = utils.SIGDescribe("Mounted flexvolume volume expand [Slow] [Feature:Expa
|
|||
})
|
||||
|
||||
AfterEach(func() {
|
||||
framework.Logf("AfterEach: Cleaning up resources for mounted volume resize")
|
||||
e2elog.Logf("AfterEach: Cleaning up resources for mounted volume resize")
|
||||
|
||||
if c != nil {
|
||||
if errs := framework.PVPVCCleanup(c, ns, nil, pvc); len(errs) > 0 {
|
||||
|
|
|
@ -20,9 +20,10 @@ import (
|
|||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||
"k8s.io/kubernetes/test/e2e/storage/testsuites"
|
||||
"k8s.io/kubernetes/test/e2e/storage/utils"
|
||||
)
|
||||
|
@ -65,7 +66,7 @@ var _ = utils.SIGDescribe("GenericPersistentVolume[Disruptive]", func() {
|
|||
pv *v1.PersistentVolume
|
||||
)
|
||||
BeforeEach(func() {
|
||||
framework.Logf("Initializing pod and pvcs for test")
|
||||
e2elog.Logf("Initializing pod and pvcs for test")
|
||||
clientPod, pvc, pv = createPodPVCFromSC(f, c, ns)
|
||||
})
|
||||
for _, test := range disruptiveTestTable {
|
||||
|
@ -77,7 +78,7 @@ var _ = utils.SIGDescribe("GenericPersistentVolume[Disruptive]", func() {
|
|||
}(test)
|
||||
}
|
||||
AfterEach(func() {
|
||||
framework.Logf("Tearing down test spec")
|
||||
e2elog.Logf("Tearing down test spec")
|
||||
tearDownTestCase(c, f, ns, clientPod, pvc, pv, false)
|
||||
pvc, clientPod = nil, nil
|
||||
})
|
||||
|
|
|
@ -22,7 +22,7 @@ import (
|
|||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
apps "k8s.io/api/apps/v1"
|
||||
"k8s.io/api/core/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
storage "k8s.io/api/storage/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
@ -32,6 +32,7 @@ import (
|
|||
"k8s.io/kubernetes/pkg/client/conditions"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
e2edeploy "k8s.io/kubernetes/test/e2e/framework/deployment"
|
||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||
"k8s.io/kubernetes/test/e2e/storage/testsuites"
|
||||
"k8s.io/kubernetes/test/e2e/storage/utils"
|
||||
)
|
||||
|
@ -97,7 +98,7 @@ var _ = utils.SIGDescribe("Mounted volume expand", func() {
|
|||
})
|
||||
|
||||
AfterEach(func() {
|
||||
framework.Logf("AfterEach: Cleaning up resources for mounted volume resize")
|
||||
e2elog.Logf("AfterEach: Cleaning up resources for mounted volume resize")
|
||||
|
||||
if c != nil {
|
||||
if errs := framework.PVPVCCleanup(c, ns, nil, pvc); len(errs) > 0 {
|
||||
|
|
|
@ -22,12 +22,13 @@ import (
|
|||
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
"k8s.io/api/core/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
utilerrors "k8s.io/apimachinery/pkg/util/errors"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||
"k8s.io/kubernetes/test/e2e/framework/volume"
|
||||
"k8s.io/kubernetes/test/e2e/storage/utils"
|
||||
)
|
||||
|
@ -87,7 +88,7 @@ var _ = utils.SIGDescribe("NFSPersistentVolumes[Disruptive][Flaky]", func() {
|
|||
// Get the first ready node IP that is not hosting the NFS pod.
|
||||
var err error
|
||||
if clientNodeIP == "" {
|
||||
framework.Logf("Designating test node")
|
||||
e2elog.Logf("Designating test node")
|
||||
nodes := framework.GetReadySchedulableNodesOrDie(c)
|
||||
for _, node := range nodes.Items {
|
||||
if node.Name != nfsServerPod.Spec.NodeName {
|
||||
|
@ -185,7 +186,7 @@ var _ = utils.SIGDescribe("NFSPersistentVolumes[Disruptive][Flaky]", func() {
|
|||
framework.ExpectNoError(err)
|
||||
err = framework.WaitForControllerManagerUp()
|
||||
framework.ExpectNoError(err)
|
||||
framework.Logf("kube-controller-manager restarted")
|
||||
e2elog.Logf("kube-controller-manager restarted")
|
||||
|
||||
By("Observing the kube-controller-manager healthy for at least 2 minutes")
|
||||
// Continue checking for 2 minutes to make sure kube-controller-manager is healthy
|
||||
|
@ -203,12 +204,12 @@ var _ = utils.SIGDescribe("NFSPersistentVolumes[Disruptive][Flaky]", func() {
|
|||
)
|
||||
|
||||
BeforeEach(func() {
|
||||
framework.Logf("Initializing test spec")
|
||||
e2elog.Logf("Initializing test spec")
|
||||
clientPod, pv, pvc = initTestCase(f, c, nfsPVconfig, pvcConfig, ns, clientNode.Name)
|
||||
})
|
||||
|
||||
AfterEach(func() {
|
||||
framework.Logf("Tearing down test spec")
|
||||
e2elog.Logf("Tearing down test spec")
|
||||
tearDownTestCase(c, f, ns, clientPod, pvc, pv, true /* force PV delete */)
|
||||
pv, pvc, clientPod = nil, nil, nil
|
||||
})
|
||||
|
@ -256,9 +257,9 @@ func initTestCase(f *framework.Framework, c clientset.Interface, pvConfig framew
|
|||
framework.ExpectNoError(err)
|
||||
pod := framework.MakePod(ns, nil, []*v1.PersistentVolumeClaim{pvc}, true, "")
|
||||
pod.Spec.NodeName = nodeName
|
||||
framework.Logf("Creating NFS client pod.")
|
||||
e2elog.Logf("Creating NFS client pod.")
|
||||
pod, err = c.CoreV1().Pods(ns).Create(pod)
|
||||
framework.Logf("NFS client Pod %q created on Node %q", pod.Name, nodeName)
|
||||
e2elog.Logf("NFS client Pod %q created on Node %q", pod.Name, nodeName)
|
||||
framework.ExpectNoError(err)
|
||||
defer func() {
|
||||
if err != nil {
|
||||
|
|
|
@ -29,7 +29,7 @@ import (
|
|||
"github.com/aws/aws-sdk-go/service/ec2"
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
"k8s.io/api/core/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
policy "k8s.io/api/policy/v1beta1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
@ -39,6 +39,7 @@ import (
|
|||
clientset "k8s.io/client-go/kubernetes"
|
||||
v1core "k8s.io/client-go/kubernetes/typed/core/v1"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||
"k8s.io/kubernetes/test/e2e/framework/providers/gce"
|
||||
"k8s.io/kubernetes/test/e2e/storage/utils"
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
|
@ -146,7 +147,7 @@ var _ = utils.SIGDescribe("Pod Disks", func() {
|
|||
|
||||
By("deleting the fmtPod")
|
||||
framework.ExpectNoError(podClient.Delete(fmtPod.Name, metav1.NewDeleteOptions(0)), "Failed to delete fmtPod")
|
||||
framework.Logf("deleted fmtPod %q", fmtPod.Name)
|
||||
e2elog.Logf("deleted fmtPod %q", fmtPod.Name)
|
||||
By("waiting for PD to detach")
|
||||
framework.ExpectNoError(waitForPDDetach(diskName, host0Name))
|
||||
}
|
||||
|
@ -158,7 +159,7 @@ var _ = utils.SIGDescribe("Pod Disks", func() {
|
|||
defer func() {
|
||||
// Teardown should do nothing unless test failed
|
||||
By("defer: cleaning up PD-RW test environment")
|
||||
framework.Logf("defer cleanup errors can usually be ignored")
|
||||
e2elog.Logf("defer cleanup errors can usually be ignored")
|
||||
if fmtPod != nil {
|
||||
podClient.Delete(fmtPod.Name, podDelOpt)
|
||||
}
|
||||
|
@ -171,7 +172,7 @@ var _ = utils.SIGDescribe("Pod Disks", func() {
|
|||
_, err = podClient.Create(host0Pod)
|
||||
framework.ExpectNoError(err, fmt.Sprintf("Failed to create host0Pod: %v", err))
|
||||
framework.ExpectNoError(f.WaitForPodRunningSlow(host0Pod.Name))
|
||||
framework.Logf("host0Pod: %q, node0: %q", host0Pod.Name, host0Name)
|
||||
e2elog.Logf("host0Pod: %q, node0: %q", host0Pod.Name, host0Name)
|
||||
|
||||
var containerName, testFile, testFileContents string
|
||||
if !readOnly {
|
||||
|
@ -180,36 +181,36 @@ var _ = utils.SIGDescribe("Pod Disks", func() {
|
|||
testFile = "/testpd1/tracker"
|
||||
testFileContents = fmt.Sprintf("%v", mathrand.Int())
|
||||
framework.ExpectNoError(f.WriteFileViaContainer(host0Pod.Name, containerName, testFile, testFileContents))
|
||||
framework.Logf("wrote %q to file %q in pod %q on node %q", testFileContents, testFile, host0Pod.Name, host0Name)
|
||||
e2elog.Logf("wrote %q to file %q in pod %q on node %q", testFileContents, testFile, host0Pod.Name, host0Name)
|
||||
By("verifying PD is present in node0's VolumeInUse list")
|
||||
framework.ExpectNoError(waitForPDInVolumesInUse(nodeClient, diskName, host0Name, nodeStatusTimeout, true /* shouldExist */))
|
||||
By("deleting host0Pod") // delete this pod before creating next pod
|
||||
framework.ExpectNoError(podClient.Delete(host0Pod.Name, podDelOpt), "Failed to delete host0Pod")
|
||||
framework.Logf("deleted host0Pod %q", host0Pod.Name)
|
||||
e2elog.Logf("deleted host0Pod %q", host0Pod.Name)
|
||||
}
|
||||
|
||||
By("creating host1Pod on node1")
|
||||
_, err = podClient.Create(host1Pod)
|
||||
framework.ExpectNoError(err, "Failed to create host1Pod")
|
||||
framework.ExpectNoError(f.WaitForPodRunningSlow(host1Pod.Name))
|
||||
framework.Logf("host1Pod: %q, node1: %q", host1Pod.Name, host1Name)
|
||||
e2elog.Logf("host1Pod: %q, node1: %q", host1Pod.Name, host1Name)
|
||||
|
||||
if readOnly {
|
||||
By("deleting host0Pod")
|
||||
framework.ExpectNoError(podClient.Delete(host0Pod.Name, podDelOpt), "Failed to delete host0Pod")
|
||||
framework.Logf("deleted host0Pod %q", host0Pod.Name)
|
||||
e2elog.Logf("deleted host0Pod %q", host0Pod.Name)
|
||||
} else {
|
||||
By("verifying PD contents in host1Pod")
|
||||
verifyPDContentsViaContainer(f, host1Pod.Name, containerName, map[string]string{testFile: testFileContents})
|
||||
framework.Logf("verified PD contents in pod %q", host1Pod.Name)
|
||||
e2elog.Logf("verified PD contents in pod %q", host1Pod.Name)
|
||||
By("verifying PD is removed from node0")
|
||||
framework.ExpectNoError(waitForPDInVolumesInUse(nodeClient, diskName, host0Name, nodeStatusTimeout, false /* shouldExist */))
|
||||
framework.Logf("PD %q removed from node %q's VolumeInUse list", diskName, host1Pod.Name)
|
||||
e2elog.Logf("PD %q removed from node %q's VolumeInUse list", diskName, host1Pod.Name)
|
||||
}
|
||||
|
||||
By("deleting host1Pod")
|
||||
framework.ExpectNoError(podClient.Delete(host1Pod.Name, podDelOpt), "Failed to delete host1Pod")
|
||||
framework.Logf("deleted host1Pod %q", host1Pod.Name)
|
||||
e2elog.Logf("deleted host1Pod %q", host1Pod.Name)
|
||||
|
||||
By("Test completed successfully, waiting for PD to detach from both nodes")
|
||||
waitForPDDetach(diskName, host0Name)
|
||||
|
@ -258,7 +259,7 @@ var _ = utils.SIGDescribe("Pod Disks", func() {
|
|||
defer func() {
|
||||
// Teardown should do nothing unless test failed.
|
||||
By("defer: cleaning up PD-RW test environment")
|
||||
framework.Logf("defer cleanup errors can usually be ignored")
|
||||
e2elog.Logf("defer cleanup errors can usually be ignored")
|
||||
if host0Pod != nil {
|
||||
podClient.Delete(host0Pod.Name, metav1.NewDeleteOptions(0))
|
||||
}
|
||||
|
@ -268,7 +269,7 @@ var _ = utils.SIGDescribe("Pod Disks", func() {
|
|||
}()
|
||||
|
||||
for i := 0; i < t.repeatCnt; i++ { // "rapid" repeat loop
|
||||
framework.Logf("PD Read/Writer Iteration #%v", i)
|
||||
e2elog.Logf("PD Read/Writer Iteration #%v", i)
|
||||
By(fmt.Sprintf("creating host0Pod with %d containers on node0", numContainers))
|
||||
host0Pod = testPDPod(diskNames, host0Name, false /* readOnly */, numContainers)
|
||||
_, err = podClient.Create(host0Pod)
|
||||
|
@ -285,7 +286,7 @@ var _ = utils.SIGDescribe("Pod Disks", func() {
|
|||
testFileContents := fmt.Sprintf("%v", mathrand.Int())
|
||||
fileAndContentToVerify[testFile] = testFileContents
|
||||
framework.ExpectNoError(f.WriteFileViaContainer(host0Pod.Name, containerName, testFile, testFileContents))
|
||||
framework.Logf("wrote %q to file %q in pod %q (container %q) on node %q", testFileContents, testFile, host0Pod.Name, containerName, host0Name)
|
||||
e2elog.Logf("wrote %q to file %q in pod %q (container %q) on node %q", testFileContents, testFile, host0Pod.Name, containerName, host0Name)
|
||||
}
|
||||
|
||||
By("verifying PD contents via a container")
|
||||
|
@ -346,7 +347,7 @@ var _ = utils.SIGDescribe("Pod Disks", func() {
|
|||
|
||||
defer func() {
|
||||
By("defer: cleaning up PD-RW test env")
|
||||
framework.Logf("defer cleanup errors can usually be ignored")
|
||||
e2elog.Logf("defer cleanup errors can usually be ignored")
|
||||
By("defer: delete host0Pod")
|
||||
podClient.Delete(host0Pod.Name, metav1.NewDeleteOptions(0))
|
||||
By("defer: detach and delete PDs")
|
||||
|
@ -379,7 +380,7 @@ var _ = utils.SIGDescribe("Pod Disks", func() {
|
|||
testFile := "/testpd1/tracker"
|
||||
testFileContents := fmt.Sprintf("%v", mathrand.Int())
|
||||
framework.ExpectNoError(f.WriteFileViaContainer(host0Pod.Name, containerName, testFile, testFileContents))
|
||||
framework.Logf("wrote %q to file %q in pod %q on node %q", testFileContents, testFile, host0Pod.Name, host0Name)
|
||||
e2elog.Logf("wrote %q to file %q in pod %q on node %q", testFileContents, testFile, host0Pod.Name, host0Name)
|
||||
|
||||
By("verifying PD is present in node0's VolumeInUse list")
|
||||
framework.ExpectNoError(waitForPDInVolumesInUse(nodeClient, diskName, host0Name, nodeStatusTimeout, true /* should exist*/))
|
||||
|
@ -456,17 +457,17 @@ func verifyPDContentsViaContainer(f *framework.Framework, podName, containerName
|
|||
v, err := f.ReadFileViaContainer(podName, containerName, filePath)
|
||||
value = v
|
||||
if err != nil {
|
||||
framework.Logf("Error reading file: %v", err)
|
||||
e2elog.Logf("Error reading file: %v", err)
|
||||
}
|
||||
framework.ExpectNoError(err)
|
||||
framework.Logf("Read file %q with content: %v (iteration %d)", filePath, v, i)
|
||||
e2elog.Logf("Read file %q with content: %v (iteration %d)", filePath, v, i)
|
||||
if strings.TrimSpace(v) != strings.TrimSpace(expectedContents) {
|
||||
framework.Logf("Warning: read content <%q> does not match execpted content <%q>.", v, expectedContents)
|
||||
e2elog.Logf("Warning: read content <%q> does not match execpted content <%q>.", v, expectedContents)
|
||||
size, err := f.CheckFileSizeViaContainer(podName, containerName, filePath)
|
||||
if err != nil {
|
||||
framework.Logf("Error checking file size: %v", err)
|
||||
e2elog.Logf("Error checking file size: %v", err)
|
||||
}
|
||||
framework.Logf("Check file %q size: %q", filePath, size)
|
||||
e2elog.Logf("Check file %q size: %q", filePath, size)
|
||||
} else {
|
||||
break
|
||||
}
|
||||
|
@ -487,7 +488,7 @@ func detachPD(nodeName types.NodeName, pdName string) error {
|
|||
// PD already detached, ignore error.
|
||||
return nil
|
||||
}
|
||||
framework.Logf("Error detaching PD %q: %v", pdName, err)
|
||||
e2elog.Logf("Error detaching PD %q: %v", pdName, err)
|
||||
}
|
||||
return err
|
||||
|
||||
|
@ -580,7 +581,7 @@ func testPDPod(diskNames []string, targetNode types.NodeName, readOnly bool, num
|
|||
// Waits for specified PD to detach from specified hostName
|
||||
func waitForPDDetach(diskName string, nodeName types.NodeName) error {
|
||||
if framework.TestContext.Provider == "gce" || framework.TestContext.Provider == "gke" {
|
||||
framework.Logf("Waiting for GCE PD %q to detach from node %q.", diskName, nodeName)
|
||||
e2elog.Logf("Waiting for GCE PD %q to detach from node %q.", diskName, nodeName)
|
||||
gceCloud, err := gce.GetGCECloud()
|
||||
if err != nil {
|
||||
return err
|
||||
|
@ -588,15 +589,15 @@ func waitForPDDetach(diskName string, nodeName types.NodeName) error {
|
|||
for start := time.Now(); time.Since(start) < gcePDDetachTimeout; time.Sleep(gcePDDetachPollTime) {
|
||||
diskAttached, err := gceCloud.DiskIsAttached(diskName, nodeName)
|
||||
if err != nil {
|
||||
framework.Logf("Error waiting for PD %q to detach from node %q. 'DiskIsAttached(...)' failed with %v", diskName, nodeName, err)
|
||||
e2elog.Logf("Error waiting for PD %q to detach from node %q. 'DiskIsAttached(...)' failed with %v", diskName, nodeName, err)
|
||||
return err
|
||||
}
|
||||
if !diskAttached {
|
||||
// Specified disk does not appear to be attached to specified node
|
||||
framework.Logf("GCE PD %q appears to have successfully detached from %q.", diskName, nodeName)
|
||||
e2elog.Logf("GCE PD %q appears to have successfully detached from %q.", diskName, nodeName)
|
||||
return nil
|
||||
}
|
||||
framework.Logf("Waiting for GCE PD %q to detach from %q.", diskName, nodeName)
|
||||
e2elog.Logf("Waiting for GCE PD %q to detach from %q.", diskName, nodeName)
|
||||
}
|
||||
return fmt.Errorf("Gave up waiting for GCE PD %q to detach from %q after %v", diskName, nodeName, gcePDDetachTimeout)
|
||||
}
|
||||
|
@ -605,7 +606,7 @@ func waitForPDDetach(diskName string, nodeName types.NodeName) error {
|
|||
|
||||
func detachAndDeletePDs(diskName string, hosts []types.NodeName) {
|
||||
for _, host := range hosts {
|
||||
framework.Logf("Detaching GCE PD %q from node %q.", diskName, host)
|
||||
e2elog.Logf("Detaching GCE PD %q from node %q.", diskName, host)
|
||||
detachPD(host, diskName)
|
||||
By(fmt.Sprintf("Waiting for PD %q to detach from %q", diskName, host))
|
||||
waitForPDDetach(diskName, host)
|
||||
|
@ -624,11 +625,11 @@ func waitForPDInVolumesInUse(
|
|||
if !shouldExist {
|
||||
logStr = "to NOT contain"
|
||||
}
|
||||
framework.Logf("Waiting for node %s's VolumesInUse Status %s PD %q", nodeName, logStr, diskName)
|
||||
e2elog.Logf("Waiting for node %s's VolumesInUse Status %s PD %q", nodeName, logStr, diskName)
|
||||
for start := time.Now(); time.Since(start) < timeout; time.Sleep(nodeStatusPollTime) {
|
||||
nodeObj, err := nodeClient.Get(string(nodeName), metav1.GetOptions{})
|
||||
if err != nil || nodeObj == nil {
|
||||
framework.Logf("Failed to fetch node object %q from API server. err=%v", nodeName, err)
|
||||
e2elog.Logf("Failed to fetch node object %q from API server. err=%v", nodeName, err)
|
||||
continue
|
||||
}
|
||||
exists := false
|
||||
|
@ -636,14 +637,14 @@ func waitForPDInVolumesInUse(
|
|||
volumeInUseStr := string(volumeInUse)
|
||||
if strings.Contains(volumeInUseStr, diskName) {
|
||||
if shouldExist {
|
||||
framework.Logf("Found PD %q in node %q's VolumesInUse Status: %q", diskName, nodeName, volumeInUseStr)
|
||||
e2elog.Logf("Found PD %q in node %q's VolumesInUse Status: %q", diskName, nodeName, volumeInUseStr)
|
||||
return nil
|
||||
}
|
||||
exists = true
|
||||
}
|
||||
}
|
||||
if !shouldExist && !exists {
|
||||
framework.Logf("Verified PD %q does not exist in node %q's VolumesInUse Status.", diskName, nodeName)
|
||||
e2elog.Logf("Verified PD %q does not exist in node %q's VolumesInUse Status.", diskName, nodeName)
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
|
|
@ -19,13 +19,14 @@ package storage
|
|||
import (
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
"k8s.io/api/core/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
utilerrors "k8s.io/apimachinery/pkg/util/errors"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||
"k8s.io/kubernetes/test/e2e/framework/providers/gce"
|
||||
"k8s.io/kubernetes/test/e2e/storage/utils"
|
||||
)
|
||||
|
@ -104,7 +105,7 @@ var _ = utils.SIGDescribe("PersistentVolumes GCEPD", func() {
|
|||
})
|
||||
|
||||
AfterEach(func() {
|
||||
framework.Logf("AfterEach: Cleaning up test resources")
|
||||
e2elog.Logf("AfterEach: Cleaning up test resources")
|
||||
if c != nil {
|
||||
framework.ExpectNoError(framework.DeletePodWithWait(f, c, clientPod))
|
||||
if errs := framework.PVPVCCleanup(c, ns, pv, pvc); len(errs) > 0 {
|
||||
|
|
|
@ -39,6 +39,7 @@ import (
|
|||
"k8s.io/apimachinery/pkg/watch"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||
"k8s.io/kubernetes/test/e2e/storage/utils"
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
)
|
||||
|
@ -567,7 +568,7 @@ var _ = utils.SIGDescribe("PersistentVolumes-local ", func() {
|
|||
|
||||
for _, pod := range pods {
|
||||
if err := deletePodAndPVCs(config, pod); err != nil {
|
||||
framework.Logf("Deleting pod %v failed: %v", pod.Name, err)
|
||||
e2elog.Logf("Deleting pod %v failed: %v", pod.Name, err)
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
@ -591,7 +592,7 @@ var _ = utils.SIGDescribe("PersistentVolumes-local ", func() {
|
|||
}
|
||||
delete(pods, pod.Name)
|
||||
numFinished++
|
||||
framework.Logf("%v/%v pods finished", numFinished, totalPods)
|
||||
e2elog.Logf("%v/%v pods finished", numFinished, totalPods)
|
||||
case v1.PodFailed:
|
||||
case v1.PodUnknown:
|
||||
return false, fmt.Errorf("pod %v is in %v phase", pod.Name, pod.Status.Phase)
|
||||
|
@ -671,7 +672,7 @@ var _ = utils.SIGDescribe("PersistentVolumes-local ", func() {
|
|||
})
|
||||
|
||||
func deletePodAndPVCs(config *localTestConfig, pod *v1.Pod) error {
|
||||
framework.Logf("Deleting pod %v", pod.Name)
|
||||
e2elog.Logf("Deleting pod %v", pod.Name)
|
||||
if err := config.client.CoreV1().Pods(config.ns).Delete(pod.Name, nil); err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -845,7 +846,7 @@ func verifyLocalVolume(config *localTestConfig, volume *localTestVolume) {
|
|||
func verifyLocalPod(config *localTestConfig, volume *localTestVolume, pod *v1.Pod, expectedNodeName string) {
|
||||
podNodeName, err := podNodeName(config, pod)
|
||||
framework.ExpectNoError(err)
|
||||
framework.Logf("pod %q created on Node %q", pod.Name, podNodeName)
|
||||
e2elog.Logf("pod %q created on Node %q", pod.Name, podNodeName)
|
||||
Expect(podNodeName).To(Equal(expectedNodeName))
|
||||
}
|
||||
|
||||
|
@ -1030,7 +1031,7 @@ func testReadFileContent(testFileDir string, testFile string, testFileContent st
|
|||
// Fail on error
|
||||
func podRWCmdExec(pod *v1.Pod, cmd string) string {
|
||||
out, err := utils.PodExec(pod, cmd)
|
||||
framework.Logf("podRWCmdExec out: %q err: %v", out, err)
|
||||
e2elog.Logf("podRWCmdExec out: %q err: %v", out, err)
|
||||
framework.ExpectNoError(err)
|
||||
return out
|
||||
}
|
||||
|
|
|
@ -23,12 +23,13 @@ import (
|
|||
|
||||
. "github.com/onsi/ginkgo"
|
||||
appsv1 "k8s.io/api/apps/v1"
|
||||
"k8s.io/api/core/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
utilerrors "k8s.io/apimachinery/pkg/util/errors"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||
"k8s.io/kubernetes/test/e2e/framework/volume"
|
||||
"k8s.io/kubernetes/test/e2e/storage/utils"
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
|
@ -150,7 +151,7 @@ var _ = utils.SIGDescribe("PersistentVolumes", func() {
|
|||
Context("with Single PV - PVC pairs", func() {
|
||||
// Note: this is the only code where the pv is deleted.
|
||||
AfterEach(func() {
|
||||
framework.Logf("AfterEach: Cleaning up test resources.")
|
||||
e2elog.Logf("AfterEach: Cleaning up test resources.")
|
||||
if errs := framework.PVPVCCleanup(c, ns, pv, pvc); len(errs) > 0 {
|
||||
framework.Failf("AfterEach: Failed to delete PVC and/or PV. Errors: %v", utilerrors.NewAggregate(errs))
|
||||
}
|
||||
|
@ -212,7 +213,7 @@ var _ = utils.SIGDescribe("PersistentVolumes", func() {
|
|||
var claims framework.PVCMap
|
||||
|
||||
AfterEach(func() {
|
||||
framework.Logf("AfterEach: deleting %v PVCs and %v PVs...", len(claims), len(pvols))
|
||||
e2elog.Logf("AfterEach: deleting %v PVCs and %v PVs...", len(claims), len(pvols))
|
||||
errs := framework.PVPVCMapCleanup(c, ns, pvols, claims)
|
||||
if len(errs) > 0 {
|
||||
errmsg := []string{}
|
||||
|
@ -266,7 +267,7 @@ var _ = utils.SIGDescribe("PersistentVolumes", func() {
|
|||
})
|
||||
|
||||
AfterEach(func() {
|
||||
framework.Logf("AfterEach: Cleaning up test resources.")
|
||||
e2elog.Logf("AfterEach: Cleaning up test resources.")
|
||||
if errs := framework.PVPVCCleanup(c, ns, pv, pvc); len(errs) > 0 {
|
||||
framework.Failf("AfterEach: Failed to delete PVC and/or PV. Errors: %v", utilerrors.NewAggregate(errs))
|
||||
}
|
||||
|
@ -300,7 +301,7 @@ var _ = utils.SIGDescribe("PersistentVolumes", func() {
|
|||
framework.ExpectNoError(err)
|
||||
framework.ExpectNoError(framework.WaitForPodSuccessInNamespace(c, pod.Name, ns))
|
||||
framework.ExpectNoError(framework.DeletePodWithWait(f, c, pod))
|
||||
framework.Logf("Pod exited without failure; the volume has been recycled.")
|
||||
e2elog.Logf("Pod exited without failure; the volume has been recycled.")
|
||||
})
|
||||
})
|
||||
})
|
||||
|
|
|
@ -22,7 +22,7 @@ import (
|
|||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
utilerrors "k8s.io/apimachinery/pkg/util/errors"
|
||||
|
@ -30,6 +30,7 @@ import (
|
|||
"k8s.io/kubernetes/pkg/util/slice"
|
||||
volumeutil "k8s.io/kubernetes/pkg/volume/util"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||
"k8s.io/kubernetes/test/e2e/storage/utils"
|
||||
)
|
||||
|
||||
|
@ -89,7 +90,7 @@ var _ = utils.SIGDescribe("PV Protection", func() {
|
|||
})
|
||||
|
||||
AfterEach(func() {
|
||||
framework.Logf("AfterEach: Cleaning up test resources.")
|
||||
e2elog.Logf("AfterEach: Cleaning up test resources.")
|
||||
if errs := framework.PVPVCCleanup(client, nameSpace, pv, pvc); len(errs) > 0 {
|
||||
framework.Failf("AfterEach: Failed to delete PVC and/or PV. Errors: %v", utilerrors.NewAggregate(errs))
|
||||
}
|
||||
|
|
|
@ -39,6 +39,7 @@ import (
|
|||
volumehelpers "k8s.io/cloud-provider/volume/helpers"
|
||||
podutil "k8s.io/kubernetes/pkg/api/v1/pod"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||
"k8s.io/kubernetes/test/e2e/storage/testsuites"
|
||||
"k8s.io/kubernetes/test/e2e/storage/utils"
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
|
@ -177,7 +178,7 @@ func testZonalFailover(c clientset.Interface, ns string) {
|
|||
_, err := c.StorageV1().StorageClasses().Create(class)
|
||||
framework.ExpectNoError(err)
|
||||
defer func() {
|
||||
framework.Logf("deleting storage class %s", class.Name)
|
||||
e2elog.Logf("deleting storage class %s", class.Name)
|
||||
framework.ExpectNoError(c.StorageV1().StorageClasses().Delete(class.Name, nil),
|
||||
"Error deleting StorageClass %s", class.Name)
|
||||
}()
|
||||
|
@ -189,19 +190,19 @@ func testZonalFailover(c clientset.Interface, ns string) {
|
|||
framework.ExpectNoError(err)
|
||||
|
||||
defer func() {
|
||||
framework.Logf("deleting statefulset%q/%q", statefulSet.Namespace, statefulSet.Name)
|
||||
e2elog.Logf("deleting statefulset%q/%q", statefulSet.Namespace, statefulSet.Name)
|
||||
// typically this claim has already been deleted
|
||||
framework.ExpectNoError(c.AppsV1().StatefulSets(ns).Delete(statefulSet.Name, nil /* options */),
|
||||
"Error deleting StatefulSet %s", statefulSet.Name)
|
||||
|
||||
framework.Logf("deleting claims in namespace %s", ns)
|
||||
e2elog.Logf("deleting claims in namespace %s", ns)
|
||||
pvc := getPVC(c, ns, regionalPDLabels)
|
||||
framework.ExpectNoError(c.CoreV1().PersistentVolumeClaims(pvc.Namespace).Delete(pvc.Name, nil),
|
||||
"Error deleting claim %s.", pvc.Name)
|
||||
if pvc.Spec.VolumeName != "" {
|
||||
err = framework.WaitForPersistentVolumeDeleted(c, pvc.Spec.VolumeName, framework.Poll, pvDeletionTimeout)
|
||||
if err != nil {
|
||||
framework.Logf("WARNING: PV %s is not yet deleted, and subsequent tests may be affected.", pvc.Spec.VolumeName)
|
||||
e2elog.Logf("WARNING: PV %s is not yet deleted, and subsequent tests may be affected.", pvc.Spec.VolumeName)
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
@ -230,7 +231,7 @@ func testZonalFailover(c clientset.Interface, ns string) {
|
|||
removeTaintFunc := addTaint(c, ns, nodesInZone.Items, podZone)
|
||||
|
||||
defer func() {
|
||||
framework.Logf("removing previously added node taints")
|
||||
e2elog.Logf("removing previously added node taints")
|
||||
removeTaintFunc()
|
||||
}()
|
||||
|
||||
|
@ -246,7 +247,7 @@ func testZonalFailover(c clientset.Interface, ns string) {
|
|||
otherZone = cloudZones[0]
|
||||
}
|
||||
err = wait.PollImmediate(framework.Poll, statefulSetReadyTimeout, func() (bool, error) {
|
||||
framework.Logf("checking whether new pod is scheduled in zone %q", otherZone)
|
||||
e2elog.Logf("checking whether new pod is scheduled in zone %q", otherZone)
|
||||
pod = getPod(c, ns, regionalPDLabels)
|
||||
nodeName = pod.Spec.NodeName
|
||||
node, err = c.CoreV1().Nodes().Get(nodeName, metav1.GetOptions{})
|
||||
|
|
|
@ -34,6 +34,7 @@ go_library(
|
|||
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//staging/src/k8s.io/csi-translation-lib:go_default_library",
|
||||
"//test/e2e/framework:go_default_library",
|
||||
"//test/e2e/framework/log:go_default_library",
|
||||
"//test/e2e/framework/metrics:go_default_library",
|
||||
"//test/e2e/framework/podlogs:go_default_library",
|
||||
"//test/e2e/framework/volume:go_default_library",
|
||||
|
|
|
@ -37,6 +37,7 @@ import (
|
|||
clientset "k8s.io/client-go/kubernetes"
|
||||
csilib "k8s.io/csi-translation-lib"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||
"k8s.io/kubernetes/test/e2e/framework/metrics"
|
||||
"k8s.io/kubernetes/test/e2e/framework/podlogs"
|
||||
"k8s.io/kubernetes/test/e2e/framework/volume"
|
||||
|
@ -193,13 +194,13 @@ func createGenericVolumeTestResource(driver TestDriver, config *PerTestConfig, p
|
|||
|
||||
switch volType {
|
||||
case testpatterns.InlineVolume:
|
||||
framework.Logf("Creating resource for inline volume")
|
||||
e2elog.Logf("Creating resource for inline volume")
|
||||
if iDriver, ok := driver.(InlineVolumeTestDriver); ok {
|
||||
r.volSource = iDriver.GetVolumeSource(false, fsType, r.volume)
|
||||
r.volType = dInfo.Name
|
||||
}
|
||||
case testpatterns.PreprovisionedPV:
|
||||
framework.Logf("Creating resource for pre-provisioned PV")
|
||||
e2elog.Logf("Creating resource for pre-provisioned PV")
|
||||
if pDriver, ok := driver.(PreprovisionedPVTestDriver); ok {
|
||||
pvSource, volumeNodeAffinity := pDriver.GetPersistentVolumeSource(false, fsType, r.volume)
|
||||
if pvSource != nil {
|
||||
|
@ -208,7 +209,7 @@ func createGenericVolumeTestResource(driver TestDriver, config *PerTestConfig, p
|
|||
r.volType = fmt.Sprintf("%s-preprovisionedPV", dInfo.Name)
|
||||
}
|
||||
case testpatterns.DynamicPV:
|
||||
framework.Logf("Creating resource for dynamic PV")
|
||||
e2elog.Logf("Creating resource for dynamic PV")
|
||||
if dDriver, ok := driver.(DynamicPVTestDriver); ok {
|
||||
claimSize := dDriver.GetClaimSize()
|
||||
r.sc = dDriver.GetDynamicProvisionStorageClass(r.config, fsType)
|
||||
|
@ -302,7 +303,7 @@ func createVolumeSourceWithPVCPV(
|
|||
pvcConfig.VolumeMode = &volMode
|
||||
}
|
||||
|
||||
framework.Logf("Creating PVC and PV")
|
||||
e2elog.Logf("Creating PVC and PV")
|
||||
pv, pvc, err := framework.CreatePVCPV(f.ClientSet, pvConfig, pvcConfig, f.Namespace.Name, false)
|
||||
framework.ExpectNoError(err, "PVC, PV creation failed")
|
||||
|
||||
|
@ -522,7 +523,7 @@ func getVolumeOpCounts(c clientset.Interface, pluginName string) opCounts {
|
|||
framework.ExpectNoError(err, "Error getting c-m metrics : %v", err)
|
||||
totOps := getVolumeOpsFromMetricsForPlugin(metrics.Metrics(controllerMetrics), pluginName)
|
||||
|
||||
framework.Logf("Node name not specified for getVolumeOpCounts, falling back to listing nodes from API Server")
|
||||
e2elog.Logf("Node name not specified for getVolumeOpCounts, falling back to listing nodes from API Server")
|
||||
nodes, err := c.CoreV1().Nodes().List(metav1.ListOptions{})
|
||||
framework.ExpectNoError(err, "Error listing nodes: %v", err)
|
||||
if len(nodes.Items) <= nodeLimit {
|
||||
|
@ -535,7 +536,7 @@ func getVolumeOpCounts(c clientset.Interface, pluginName string) opCounts {
|
|||
totOps = addOpCounts(totOps, getVolumeOpsFromMetricsForPlugin(metrics.Metrics(nodeMetrics), pluginName))
|
||||
}
|
||||
} else {
|
||||
framework.Logf("Skipping operation metrics gathering from nodes in getVolumeOpCounts, greater than %v nodes", nodeLimit)
|
||||
e2elog.Logf("Skipping operation metrics gathering from nodes in getVolumeOpCounts, greater than %v nodes", nodeLimit)
|
||||
}
|
||||
|
||||
return totOps
|
||||
|
@ -561,7 +562,7 @@ func getMigrationVolumeOpCounts(cs clientset.Interface, pluginName string) (opCo
|
|||
var migratedOps opCounts
|
||||
csiName, err := csilib.GetCSINameFromInTreeName(pluginName)
|
||||
if err != nil {
|
||||
framework.Logf("Could not find CSI Name for in-tree plugin %v", pluginName)
|
||||
e2elog.Logf("Could not find CSI Name for in-tree plugin %v", pluginName)
|
||||
migratedOps = opCounts{}
|
||||
} else {
|
||||
csiName = "kubernetes.io/csi:" + csiName
|
||||
|
@ -570,7 +571,7 @@ func getMigrationVolumeOpCounts(cs clientset.Interface, pluginName string) (opCo
|
|||
return getVolumeOpCounts(cs, pluginName), migratedOps
|
||||
} else {
|
||||
// Not an in-tree driver
|
||||
framework.Logf("Test running for native CSI Driver, not checking metrics")
|
||||
e2elog.Logf("Test running for native CSI Driver, not checking metrics")
|
||||
return opCounts{}, opCounts{}
|
||||
}
|
||||
}
|
||||
|
@ -602,7 +603,7 @@ func validateMigrationVolumeOpCounts(cs clientset.Interface, pluginName string,
|
|||
// may not do any volume operations and therefore not emit any metrics
|
||||
} else {
|
||||
// In-tree plugin is not migrated
|
||||
framework.Logf("In-tree plugin %v is not migrated, not validating any metrics", pluginName)
|
||||
e2elog.Logf("In-tree plugin %v is not migrated, not validating any metrics", pluginName)
|
||||
|
||||
// We don't check in-tree plugin metrics because some negative test
|
||||
// cases may not do any volume operations and therefore not emit any
|
||||
|
|
|
@ -34,6 +34,7 @@ import (
|
|||
"k8s.io/client-go/dynamic"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||
"k8s.io/kubernetes/test/e2e/framework/volume"
|
||||
"k8s.io/kubernetes/test/e2e/storage/testpatterns"
|
||||
)
|
||||
|
@ -131,7 +132,7 @@ func (p *provisioningTestSuite) defineTests(driver TestDriver, pattern testpatte
|
|||
}
|
||||
l.pvc = getClaim(claimSize, l.config.Framework.Namespace.Name)
|
||||
l.pvc.Spec.StorageClassName = &l.sc.Name
|
||||
framework.Logf("In creating storage class object and pvc object for driver - sc: %v, pvc: %v", l.sc, l.pvc)
|
||||
e2elog.Logf("In creating storage class object and pvc object for driver - sc: %v, pvc: %v", l.sc, l.pvc)
|
||||
l.testCase = &StorageClassTest{
|
||||
Client: l.config.Framework.ClientSet,
|
||||
Claim: l.pvc,
|
||||
|
@ -245,7 +246,7 @@ func (t StorageClassTest) TestDynamicProvisioning() *v1.PersistentVolume {
|
|||
class, err = client.StorageV1().StorageClasses().Get(class.Name, metav1.GetOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
defer func() {
|
||||
framework.Logf("deleting storage class %s", class.Name)
|
||||
e2elog.Logf("deleting storage class %s", class.Name)
|
||||
framework.ExpectNoError(client.StorageV1().StorageClasses().Delete(class.Name, nil))
|
||||
}()
|
||||
}
|
||||
|
@ -254,7 +255,7 @@ func (t StorageClassTest) TestDynamicProvisioning() *v1.PersistentVolume {
|
|||
claim, err = client.CoreV1().PersistentVolumeClaims(claim.Namespace).Create(claim)
|
||||
framework.ExpectNoError(err)
|
||||
defer func() {
|
||||
framework.Logf("deleting claim %q/%q", claim.Namespace, claim.Name)
|
||||
e2elog.Logf("deleting claim %q/%q", claim.Namespace, claim.Name)
|
||||
// typically this claim has already been deleted
|
||||
err = client.CoreV1().PersistentVolumeClaims(claim.Namespace).Delete(claim.Name, nil)
|
||||
if err != nil && !apierrs.IsNotFound(err) {
|
||||
|
@ -475,7 +476,7 @@ func (t StorageClassTest) TestBindingWaitForFirstConsumerMultiPVC(claims []*v1.P
|
|||
}
|
||||
if len(errors) > 0 {
|
||||
for claimName, err := range errors {
|
||||
framework.Logf("Failed to delete PVC: %s due to error: %v", claimName, err)
|
||||
e2elog.Logf("Failed to delete PVC: %s due to error: %v", claimName, err)
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
@ -593,9 +594,9 @@ func StopPod(c clientset.Interface, pod *v1.Pod) {
|
|||
}
|
||||
body, err := c.CoreV1().Pods(pod.Namespace).GetLogs(pod.Name, &v1.PodLogOptions{}).Do().Raw()
|
||||
if err != nil {
|
||||
framework.Logf("Error getting logs for pod %s: %v", pod.Name, err)
|
||||
e2elog.Logf("Error getting logs for pod %s: %v", pod.Name, err)
|
||||
} else {
|
||||
framework.Logf("Pod %s has the following logs: %s", pod.Name, body)
|
||||
e2elog.Logf("Pod %s has the following logs: %s", pod.Name, body)
|
||||
}
|
||||
framework.DeletePodOrFail(c, pod.Namespace, pod.Name)
|
||||
}
|
||||
|
@ -663,19 +664,19 @@ func prepareDataSourceForProvisioning(
|
|||
}
|
||||
|
||||
cleanupFunc := func() {
|
||||
framework.Logf("deleting snapshot %q/%q", snapshot.GetNamespace(), snapshot.GetName())
|
||||
e2elog.Logf("deleting snapshot %q/%q", snapshot.GetNamespace(), snapshot.GetName())
|
||||
err = dynamicClient.Resource(snapshotGVR).Namespace(updatedClaim.Namespace).Delete(snapshot.GetName(), nil)
|
||||
if err != nil && !apierrs.IsNotFound(err) {
|
||||
framework.Failf("Error deleting snapshot %q. Error: %v", snapshot.GetName(), err)
|
||||
}
|
||||
|
||||
framework.Logf("deleting initClaim %q/%q", updatedClaim.Namespace, updatedClaim.Name)
|
||||
e2elog.Logf("deleting initClaim %q/%q", updatedClaim.Namespace, updatedClaim.Name)
|
||||
err = client.CoreV1().PersistentVolumeClaims(updatedClaim.Namespace).Delete(updatedClaim.Name, nil)
|
||||
if err != nil && !apierrs.IsNotFound(err) {
|
||||
framework.Failf("Error deleting initClaim %q. Error: %v", updatedClaim.Name, err)
|
||||
}
|
||||
|
||||
framework.Logf("deleting SnapshotClass %s", snapshotClass.GetName())
|
||||
e2elog.Logf("deleting SnapshotClass %s", snapshotClass.GetName())
|
||||
framework.ExpectNoError(dynamicClient.Resource(snapshotClassGVR).Delete(snapshotClass.GetName(), nil))
|
||||
}
|
||||
|
||||
|
|
|
@ -23,12 +23,13 @@ import (
|
|||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
apierrs "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/client-go/dynamic"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||
"k8s.io/kubernetes/test/e2e/storage/testpatterns"
|
||||
)
|
||||
|
||||
|
@ -119,13 +120,13 @@ func (s *snapshottableTestSuite) defineTests(driver TestDriver, pattern testpatt
|
|||
claimSize := dDriver.GetClaimSize()
|
||||
pvc := getClaim(claimSize, config.Framework.Namespace.Name)
|
||||
pvc.Spec.StorageClassName = &class.Name
|
||||
framework.Logf("In creating storage class object and pvc object for driver - sc: %v, pvc: %v", class, pvc)
|
||||
e2elog.Logf("In creating storage class object and pvc object for driver - sc: %v, pvc: %v", class, pvc)
|
||||
|
||||
By("creating a StorageClass " + class.Name)
|
||||
class, err := cs.StorageV1().StorageClasses().Create(class)
|
||||
framework.ExpectNoError(err)
|
||||
defer func() {
|
||||
framework.Logf("deleting storage class %s", class.Name)
|
||||
e2elog.Logf("deleting storage class %s", class.Name)
|
||||
framework.ExpectNoError(cs.StorageV1().StorageClasses().Delete(class.Name, nil))
|
||||
}()
|
||||
|
||||
|
@ -133,7 +134,7 @@ func (s *snapshottableTestSuite) defineTests(driver TestDriver, pattern testpatt
|
|||
pvc, err = cs.CoreV1().PersistentVolumeClaims(pvc.Namespace).Create(pvc)
|
||||
framework.ExpectNoError(err)
|
||||
defer func() {
|
||||
framework.Logf("deleting claim %q/%q", pvc.Namespace, pvc.Name)
|
||||
e2elog.Logf("deleting claim %q/%q", pvc.Namespace, pvc.Name)
|
||||
// typically this claim has already been deleted
|
||||
err = cs.CoreV1().PersistentVolumeClaims(pvc.Namespace).Delete(pvc.Name, nil)
|
||||
if err != nil && !apierrs.IsNotFound(err) {
|
||||
|
@ -156,7 +157,7 @@ func (s *snapshottableTestSuite) defineTests(driver TestDriver, pattern testpatt
|
|||
vsc, err = dc.Resource(snapshotClassGVR).Create(vsc, metav1.CreateOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
defer func() {
|
||||
framework.Logf("deleting SnapshotClass %s", vsc.GetName())
|
||||
e2elog.Logf("deleting SnapshotClass %s", vsc.GetName())
|
||||
framework.ExpectNoError(dc.Resource(snapshotClassGVR).Delete(vsc.GetName(), nil))
|
||||
}()
|
||||
|
||||
|
@ -166,7 +167,7 @@ func (s *snapshottableTestSuite) defineTests(driver TestDriver, pattern testpatt
|
|||
snapshot, err = dc.Resource(snapshotGVR).Namespace(snapshot.GetNamespace()).Create(snapshot, metav1.CreateOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
defer func() {
|
||||
framework.Logf("deleting snapshot %q/%q", snapshot.GetNamespace(), snapshot.GetName())
|
||||
e2elog.Logf("deleting snapshot %q/%q", snapshot.GetNamespace(), snapshot.GetName())
|
||||
// typically this snapshot has already been deleted
|
||||
err = dc.Resource(snapshotGVR).Namespace(snapshot.GetNamespace()).Delete(snapshot.GetName(), nil)
|
||||
if err != nil && !apierrs.IsNotFound(err) {
|
||||
|
@ -202,27 +203,27 @@ func (s *snapshottableTestSuite) defineTests(driver TestDriver, pattern testpatt
|
|||
|
||||
// WaitForSnapshotReady waits for a VolumeSnapshot to be ready to use or until timeout occurs, whichever comes first.
|
||||
func WaitForSnapshotReady(c dynamic.Interface, ns string, snapshotName string, Poll, timeout time.Duration) error {
|
||||
framework.Logf("Waiting up to %v for VolumeSnapshot %s to become ready", timeout, snapshotName)
|
||||
e2elog.Logf("Waiting up to %v for VolumeSnapshot %s to become ready", timeout, snapshotName)
|
||||
for start := time.Now(); time.Since(start) < timeout; time.Sleep(Poll) {
|
||||
snapshot, err := c.Resource(snapshotGVR).Namespace(ns).Get(snapshotName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
framework.Logf("Failed to get claim %q, retrying in %v. Error: %v", snapshotName, Poll, err)
|
||||
e2elog.Logf("Failed to get claim %q, retrying in %v. Error: %v", snapshotName, Poll, err)
|
||||
continue
|
||||
} else {
|
||||
status := snapshot.Object["status"]
|
||||
if status == nil {
|
||||
framework.Logf("VolumeSnapshot %s found but is not ready.", snapshotName)
|
||||
e2elog.Logf("VolumeSnapshot %s found but is not ready.", snapshotName)
|
||||
continue
|
||||
}
|
||||
value := status.(map[string]interface{})
|
||||
if value["readyToUse"] == true {
|
||||
framework.Logf("VolumeSnapshot %s found and is ready", snapshotName, time.Since(start))
|
||||
e2elog.Logf("VolumeSnapshot %s found and is ready", snapshotName, time.Since(start))
|
||||
return nil
|
||||
} else if value["ready"] == true {
|
||||
framework.Logf("VolumeSnapshot %s found and is ready", snapshotName, time.Since(start))
|
||||
e2elog.Logf("VolumeSnapshot %s found and is ready", snapshotName, time.Since(start))
|
||||
return nil
|
||||
} else {
|
||||
framework.Logf("VolumeSnapshot %s found but is not ready.", snapshotName)
|
||||
e2elog.Logf("VolumeSnapshot %s found but is not ready.", snapshotName)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -27,6 +27,7 @@ import (
|
|||
"k8s.io/apimachinery/pkg/util/rand"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||
"k8s.io/kubernetes/test/e2e/storage/testpatterns"
|
||||
"k8s.io/kubernetes/test/e2e/storage/utils"
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
|
@ -797,7 +798,7 @@ func testPodContainerRestart(f *framework.Framework, pod *v1.Pod) {
|
|||
|
||||
By("Failing liveness probe")
|
||||
out, err := podContainerExec(pod, 1, fmt.Sprintf("rm %v", probeFilePath))
|
||||
framework.Logf("Pod exec output: %v", out)
|
||||
e2elog.Logf("Pod exec output: %v", out)
|
||||
Expect(err).ToNot(HaveOccurred(), "while failing liveness probe")
|
||||
|
||||
// Check that container has restarted
|
||||
|
@ -810,10 +811,10 @@ func testPodContainerRestart(f *framework.Framework, pod *v1.Pod) {
|
|||
}
|
||||
for _, status := range pod.Status.ContainerStatuses {
|
||||
if status.Name == pod.Spec.Containers[0].Name {
|
||||
framework.Logf("Container %v, restarts: %v", status.Name, status.RestartCount)
|
||||
e2elog.Logf("Container %v, restarts: %v", status.Name, status.RestartCount)
|
||||
restarts = status.RestartCount
|
||||
if restarts > 0 {
|
||||
framework.Logf("Container has restart count: %v", restarts)
|
||||
e2elog.Logf("Container has restart count: %v", restarts)
|
||||
return true, nil
|
||||
}
|
||||
}
|
||||
|
@ -826,7 +827,7 @@ func testPodContainerRestart(f *framework.Framework, pod *v1.Pod) {
|
|||
By("Rewriting the file")
|
||||
writeCmd := fmt.Sprintf("echo test-after > %v", probeFilePath)
|
||||
out, err = podContainerExec(pod, 1, writeCmd)
|
||||
framework.Logf("Pod exec output: %v", out)
|
||||
e2elog.Logf("Pod exec output: %v", out)
|
||||
Expect(err).ToNot(HaveOccurred(), "while rewriting the probe file")
|
||||
|
||||
// Wait for container restarts to stabilize
|
||||
|
@ -843,13 +844,13 @@ func testPodContainerRestart(f *framework.Framework, pod *v1.Pod) {
|
|||
if status.RestartCount == restarts {
|
||||
stableCount++
|
||||
if stableCount > stableThreshold {
|
||||
framework.Logf("Container restart has stabilized")
|
||||
e2elog.Logf("Container restart has stabilized")
|
||||
return true, nil
|
||||
}
|
||||
} else {
|
||||
restarts = status.RestartCount
|
||||
stableCount = 0
|
||||
framework.Logf("Container has restart count: %v", restarts)
|
||||
e2elog.Logf("Container has restart count: %v", restarts)
|
||||
}
|
||||
break
|
||||
}
|
||||
|
|
|
@ -30,10 +30,11 @@ import (
|
|||
"time"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
"k8s.io/api/core/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||
"k8s.io/kubernetes/test/e2e/framework/volume"
|
||||
"k8s.io/kubernetes/test/e2e/storage/testpatterns"
|
||||
"k8s.io/kubernetes/test/e2e/storage/utils"
|
||||
|
@ -277,7 +278,7 @@ func deleteFile(pod *v1.Pod, fpath string) {
|
|||
_, err := utils.PodExec(pod, fmt.Sprintf("rm -f %s", fpath))
|
||||
if err != nil {
|
||||
// keep going, the test dir will be deleted when the volume is unmounted
|
||||
framework.Logf("unable to delete test file %s: %v\nerror ignored, continuing test", fpath, err)
|
||||
e2elog.Logf("unable to delete test file %s: %v\nerror ignored, continuing test", fpath, err)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -309,12 +310,12 @@ func testVolumeIO(f *framework.Framework, cs clientset.Interface, config volume.
|
|||
By(fmt.Sprintf("deleting client pod %q...", clientPod.Name))
|
||||
e := framework.DeletePodWithWait(f, cs, clientPod)
|
||||
if e != nil {
|
||||
framework.Logf("client pod failed to delete: %v", e)
|
||||
e2elog.Logf("client pod failed to delete: %v", e)
|
||||
if err == nil { // delete err is returned if err is not set
|
||||
err = e
|
||||
}
|
||||
} else {
|
||||
framework.Logf("sleeping a bit so kubelet can unmount and detach the volume")
|
||||
e2elog.Logf("sleeping a bit so kubelet can unmount and detach the volume")
|
||||
time.Sleep(volume.PodCleanupTimeout)
|
||||
}
|
||||
}()
|
||||
|
|
|
@ -26,6 +26,7 @@ go_library(
|
|||
"//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//test/e2e/framework:go_default_library",
|
||||
"//test/e2e/framework/log:go_default_library",
|
||||
"//test/utils/image:go_default_library",
|
||||
"//vendor/github.com/onsi/ginkgo:go_default_library",
|
||||
"//vendor/github.com/onsi/gomega:go_default_library",
|
||||
|
|
|
@ -27,13 +27,14 @@ import (
|
|||
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
"k8s.io/api/core/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
rbacv1 "k8s.io/api/rbac/v1"
|
||||
apierrs "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
uexec "k8s.io/utils/exec"
|
||||
)
|
||||
|
@ -108,14 +109,14 @@ func KubeletCommand(kOp KubeletOpt, c clientset.Interface, pod *v1.Pod) {
|
|||
framework.ExpectNoError(err)
|
||||
nodeIP = nodeIP + ":22"
|
||||
|
||||
framework.Logf("Checking if sudo command is present")
|
||||
e2elog.Logf("Checking if sudo command is present")
|
||||
sshResult, err := framework.SSH("sudo --version", nodeIP, framework.TestContext.Provider)
|
||||
framework.ExpectNoError(err, fmt.Sprintf("SSH to Node %q errored.", pod.Spec.NodeName))
|
||||
if !strings.Contains(sshResult.Stderr, "command not found") {
|
||||
sudoPresent = true
|
||||
}
|
||||
|
||||
framework.Logf("Checking if systemctl command is present")
|
||||
e2elog.Logf("Checking if systemctl command is present")
|
||||
sshResult, err = framework.SSH("systemctl --version", nodeIP, framework.TestContext.Provider)
|
||||
framework.ExpectNoError(err, fmt.Sprintf("SSH to Node %q errored.", pod.Spec.NodeName))
|
||||
if !strings.Contains(sshResult.Stderr, "command not found") {
|
||||
|
@ -132,7 +133,7 @@ func KubeletCommand(kOp KubeletOpt, c clientset.Interface, pod *v1.Pod) {
|
|||
kubeletPid = getKubeletMainPid(nodeIP, sudoPresent, systemctlPresent)
|
||||
}
|
||||
|
||||
framework.Logf("Attempting `%s`", command)
|
||||
e2elog.Logf("Attempting `%s`", command)
|
||||
sshResult, err = framework.SSH(command, nodeIP, framework.TestContext.Provider)
|
||||
framework.ExpectNoError(err, fmt.Sprintf("SSH to Node %q errored.", pod.Spec.NodeName))
|
||||
framework.LogSSHResult(sshResult)
|
||||
|
@ -154,7 +155,7 @@ func KubeletCommand(kOp KubeletOpt, c clientset.Interface, pod *v1.Pod) {
|
|||
}
|
||||
}
|
||||
Expect(isPidChanged).To(BeTrue(), "Kubelet PID remained unchanged after restarting Kubelet")
|
||||
framework.Logf("Noticed that kubelet PID is changed. Waiting for 30 Seconds for Kubelet to come back")
|
||||
e2elog.Logf("Noticed that kubelet PID is changed. Waiting for 30 Seconds for Kubelet to come back")
|
||||
time.Sleep(30 * time.Second)
|
||||
}
|
||||
if kOp == KStart || kOp == KRestart {
|
||||
|
@ -176,7 +177,7 @@ func getKubeletMainPid(nodeIP string, sudoPresent bool, systemctlPresent bool) s
|
|||
if sudoPresent {
|
||||
command = fmt.Sprintf("sudo %s", command)
|
||||
}
|
||||
framework.Logf("Attempting `%s`", command)
|
||||
e2elog.Logf("Attempting `%s`", command)
|
||||
sshResult, err := framework.SSH(command, nodeIP, framework.TestContext.Provider)
|
||||
framework.ExpectNoError(err, fmt.Sprintf("SSH to Node %q errored.", nodeIP))
|
||||
framework.LogSSHResult(sshResult)
|
||||
|
@ -190,7 +191,7 @@ func TestKubeletRestartsAndRestoresMount(c clientset.Interface, f *framework.Fra
|
|||
By("Writing to the volume.")
|
||||
file := "/mnt/_SUCCESS"
|
||||
out, err := PodExec(clientPod, fmt.Sprintf("touch %s", file))
|
||||
framework.Logf(out)
|
||||
e2elog.Logf(out)
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
By("Restarting kubelet")
|
||||
|
@ -198,9 +199,9 @@ func TestKubeletRestartsAndRestoresMount(c clientset.Interface, f *framework.Fra
|
|||
|
||||
By("Testing that written file is accessible.")
|
||||
out, err = PodExec(clientPod, fmt.Sprintf("cat %s", file))
|
||||
framework.Logf(out)
|
||||
e2elog.Logf(out)
|
||||
framework.ExpectNoError(err)
|
||||
framework.Logf("Volume mount detected on pod %s and written file %s is readable post-restart.", clientPod.Name, file)
|
||||
e2elog.Logf("Volume mount detected on pod %s and written file %s is readable post-restart.", clientPod.Name, file)
|
||||
}
|
||||
|
||||
// TestVolumeUnmountsFromDeletedPod tests that a volume unmounts if the client pod was deleted while the kubelet was down.
|
||||
|
@ -257,7 +258,7 @@ func TestVolumeUnmountsFromDeletedPodWithForceOption(c clientset.Interface, f *f
|
|||
framework.LogSSHResult(result)
|
||||
framework.ExpectNoError(err, "Encountered SSH error.")
|
||||
Expect(result.Stdout).To(BeEmpty(), "Expected grep stdout to be empty (i.e. no mount found).")
|
||||
framework.Logf("Volume unmounted on node %s", clientPod.Spec.NodeName)
|
||||
e2elog.Logf("Volume unmounted on node %s", clientPod.Spec.NodeName)
|
||||
|
||||
if checkSubpath {
|
||||
By("Expecting the volume subpath mount not to be found.")
|
||||
|
@ -265,7 +266,7 @@ func TestVolumeUnmountsFromDeletedPodWithForceOption(c clientset.Interface, f *f
|
|||
framework.LogSSHResult(result)
|
||||
framework.ExpectNoError(err, "Encountered SSH error.")
|
||||
Expect(result.Stdout).To(BeEmpty(), "Expected grep stdout to be empty (i.e. no subpath mount found).")
|
||||
framework.Logf("Subpath volume unmounted on node %s", clientPod.Spec.NodeName)
|
||||
e2elog.Logf("Subpath volume unmounted on node %s", clientPod.Spec.NodeName)
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -23,13 +23,14 @@ import (
|
|||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
storage "k8s.io/api/storage/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||
"k8s.io/kubernetes/test/e2e/storage/testsuites"
|
||||
"k8s.io/kubernetes/test/e2e/storage/utils"
|
||||
)
|
||||
|
@ -221,7 +222,7 @@ func expandPVCSize(origPVC *v1.PersistentVolumeClaim, size resource.Quantity, c
|
|||
if err == nil {
|
||||
return true, nil
|
||||
}
|
||||
framework.Logf("Error updating pvc %s with %v", pvcName, err)
|
||||
e2elog.Logf("Error updating pvc %s with %v", pvcName, err)
|
||||
return false, nil
|
||||
})
|
||||
return updatedPVC, waitErr
|
||||
|
|
|
@ -31,6 +31,7 @@ import (
|
|||
clientset "k8s.io/client-go/kubernetes"
|
||||
kubeletmetrics "k8s.io/kubernetes/pkg/kubelet/metrics"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||
"k8s.io/kubernetes/test/e2e/framework/metrics"
|
||||
"k8s.io/kubernetes/test/e2e/storage/testsuites"
|
||||
"k8s.io/kubernetes/test/e2e/storage/utils"
|
||||
|
@ -75,7 +76,7 @@ var _ = utils.SIGDescribe("[Serial] Volume metrics", func() {
|
|||
AfterEach(func() {
|
||||
newPvc, err := c.CoreV1().PersistentVolumeClaims(pvc.Namespace).Get(pvc.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
framework.Logf("Failed to get pvc %s/%s: %v", pvc.Namespace, pvc.Name, err)
|
||||
e2elog.Logf("Failed to get pvc %s/%s: %v", pvc.Namespace, pvc.Name, err)
|
||||
} else {
|
||||
framework.DeletePersistentVolumeClaim(c, newPvc.Name, newPvc.Namespace)
|
||||
if newPvc.Spec.VolumeName != "" {
|
||||
|
@ -117,7 +118,7 @@ var _ = utils.SIGDescribe("[Serial] Volume metrics", func() {
|
|||
err = framework.WaitForPodRunningInNamespace(c, pod)
|
||||
framework.ExpectNoError(framework.WaitForPodRunningInNamespace(c, pod), "Error starting pod ", pod.Name)
|
||||
|
||||
framework.Logf("Deleting pod %q/%q", pod.Namespace, pod.Name)
|
||||
e2elog.Logf("Deleting pod %q/%q", pod.Namespace, pod.Name)
|
||||
framework.ExpectNoError(framework.DeletePodWithWait(f, c, pod))
|
||||
|
||||
updatedStorageMetrics := waitForDetachAndGrabMetrics(storageOpMetrics, metricsGrabber)
|
||||
|
@ -176,7 +177,7 @@ var _ = utils.SIGDescribe("[Serial] Volume metrics", func() {
|
|||
err = framework.WaitTimeoutForPodRunningInNamespace(c, pod.Name, pod.Namespace, framework.PodStartShortTimeout)
|
||||
Expect(err).To(HaveOccurred())
|
||||
|
||||
framework.Logf("Deleting pod %q/%q", pod.Namespace, pod.Name)
|
||||
e2elog.Logf("Deleting pod %q/%q", pod.Namespace, pod.Name)
|
||||
framework.ExpectNoError(framework.DeletePodWithWait(f, c, pod))
|
||||
|
||||
By("Checking failure metrics")
|
||||
|
@ -218,12 +219,12 @@ var _ = utils.SIGDescribe("[Serial] Volume metrics", func() {
|
|||
// by the volume stats collector
|
||||
var kubeMetrics metrics.KubeletMetrics
|
||||
waitErr := wait.Poll(30*time.Second, 5*time.Minute, func() (bool, error) {
|
||||
framework.Logf("Grabbing Kubelet metrics")
|
||||
e2elog.Logf("Grabbing Kubelet metrics")
|
||||
// Grab kubelet metrics from the node the pod was scheduled on
|
||||
var err error
|
||||
kubeMetrics, err = metricsGrabber.GrabFromKubelet(pod.Spec.NodeName)
|
||||
if err != nil {
|
||||
framework.Logf("Error fetching kubelet metrics")
|
||||
e2elog.Logf("Error fetching kubelet metrics")
|
||||
return false, err
|
||||
}
|
||||
key := volumeStatKeys[0]
|
||||
|
@ -241,7 +242,7 @@ var _ = utils.SIGDescribe("[Serial] Volume metrics", func() {
|
|||
Expect(found).To(BeTrue(), "PVC %s, Namespace %s not found for %s", pvc.Name, pvc.Namespace, kubeletKeyName)
|
||||
}
|
||||
|
||||
framework.Logf("Deleting pod %q/%q", pod.Namespace, pod.Name)
|
||||
e2elog.Logf("Deleting pod %q/%q", pod.Namespace, pod.Name)
|
||||
framework.ExpectNoError(framework.DeletePodWithWait(f, c, pod))
|
||||
})
|
||||
|
||||
|
@ -272,7 +273,7 @@ var _ = utils.SIGDescribe("[Serial] Volume metrics", func() {
|
|||
valid := hasValidMetrics(metrics.Metrics(controllerMetrics), metricKey, dimensions...)
|
||||
Expect(valid).To(BeTrue(), "Invalid metric in P/V Controller metrics: %q", metricKey)
|
||||
|
||||
framework.Logf("Deleting pod %q/%q", pod.Namespace, pod.Name)
|
||||
e2elog.Logf("Deleting pod %q/%q", pod.Namespace, pod.Name)
|
||||
framework.ExpectNoError(framework.DeletePodWithWait(f, c, pod))
|
||||
})
|
||||
|
||||
|
@ -302,7 +303,7 @@ var _ = utils.SIGDescribe("[Serial] Volume metrics", func() {
|
|||
valid := hasValidMetrics(metrics.Metrics(kubeMetrics), totalVolumesKey, dimensions...)
|
||||
Expect(valid).To(BeTrue(), "Invalid metric in Volume Manager metrics: %q", totalVolumesKey)
|
||||
|
||||
framework.Logf("Deleting pod %q/%q", pod.Namespace, pod.Name)
|
||||
e2elog.Logf("Deleting pod %q/%q", pod.Namespace, pod.Name)
|
||||
framework.ExpectNoError(framework.DeletePodWithWait(f, c, pod))
|
||||
})
|
||||
|
||||
|
@ -362,7 +363,7 @@ var _ = utils.SIGDescribe("[Serial] Volume metrics", func() {
|
|||
}
|
||||
}
|
||||
|
||||
framework.Logf("Deleting pod %q/%q", pod.Namespace, pod.Name)
|
||||
e2elog.Logf("Deleting pod %q/%q", pod.Namespace, pod.Name)
|
||||
framework.ExpectNoError(framework.DeletePodWithWait(f, c, pod))
|
||||
})
|
||||
|
||||
|
@ -534,7 +535,7 @@ func waitForDetachAndGrabMetrics(oldMetrics *storageControllerMetrics, metricsGr
|
|||
updatedMetrics, err := metricsGrabber.GrabFromControllerManager()
|
||||
|
||||
if err != nil {
|
||||
framework.Logf("Error fetching controller-manager metrics")
|
||||
e2elog.Logf("Error fetching controller-manager metrics")
|
||||
return false, err
|
||||
}
|
||||
|
||||
|
@ -637,18 +638,18 @@ func getControllerStorageMetrics(ms metrics.ControllerManagerMetrics) *storageCo
|
|||
func findVolumeStatMetric(metricKeyName string, namespace string, pvcName string, kubeletMetrics metrics.KubeletMetrics) bool {
|
||||
found := false
|
||||
errCount := 0
|
||||
framework.Logf("Looking for sample in metric `%s` tagged with namespace `%s`, PVC `%s`", metricKeyName, namespace, pvcName)
|
||||
e2elog.Logf("Looking for sample in metric `%s` tagged with namespace `%s`, PVC `%s`", metricKeyName, namespace, pvcName)
|
||||
if samples, ok := kubeletMetrics[metricKeyName]; ok {
|
||||
for _, sample := range samples {
|
||||
framework.Logf("Found sample %s", sample.String())
|
||||
e2elog.Logf("Found sample %s", sample.String())
|
||||
samplePVC, ok := sample.Metric["persistentvolumeclaim"]
|
||||
if !ok {
|
||||
framework.Logf("Error getting pvc for metric %s, sample %s", metricKeyName, sample.String())
|
||||
e2elog.Logf("Error getting pvc for metric %s, sample %s", metricKeyName, sample.String())
|
||||
errCount++
|
||||
}
|
||||
sampleNS, ok := sample.Metric["namespace"]
|
||||
if !ok {
|
||||
framework.Logf("Error getting namespace for metric %s, sample %s", metricKeyName, sample.String())
|
||||
e2elog.Logf("Error getting namespace for metric %s, sample %s", metricKeyName, sample.String())
|
||||
errCount++
|
||||
}
|
||||
|
||||
|
@ -672,7 +673,7 @@ func waitForPVControllerSync(metricsGrabber *metrics.Grabber, metricName, dimens
|
|||
verifyMetricFunc := func() (bool, error) {
|
||||
updatedMetrics, err := metricsGrabber.GrabFromControllerManager()
|
||||
if err != nil {
|
||||
framework.Logf("Error fetching controller-manager metrics")
|
||||
e2elog.Logf("Error fetching controller-manager metrics")
|
||||
return false, err
|
||||
}
|
||||
return len(getPVControllerMetrics(updatedMetrics, metricName, dimension)) > 0, nil
|
||||
|
@ -715,17 +716,17 @@ func calculateRelativeValues(originValues, updatedValues map[string]int64) map[s
|
|||
|
||||
func hasValidMetrics(metrics metrics.Metrics, metricKey string, dimensions ...string) bool {
|
||||
var errCount int
|
||||
framework.Logf("Looking for sample in metric %q", metricKey)
|
||||
e2elog.Logf("Looking for sample in metric %q", metricKey)
|
||||
samples, ok := metrics[metricKey]
|
||||
if !ok {
|
||||
framework.Logf("Key %q was not found in metrics", metricKey)
|
||||
e2elog.Logf("Key %q was not found in metrics", metricKey)
|
||||
return false
|
||||
}
|
||||
for _, sample := range samples {
|
||||
framework.Logf("Found sample %q", sample.String())
|
||||
e2elog.Logf("Found sample %q", sample.String())
|
||||
for _, d := range dimensions {
|
||||
if _, ok := sample.Metric[model.LabelName(d)]; !ok {
|
||||
framework.Logf("Error getting dimension %q for metric %q, sample %q", d, metricKey, sample.String())
|
||||
e2elog.Logf("Error getting dimension %q for metric %q, sample %q", d, metricKey, sample.String())
|
||||
errCount++
|
||||
}
|
||||
}
|
||||
|
@ -736,7 +737,7 @@ func hasValidMetrics(metrics metrics.Metrics, metricKey string, dimensions ...st
|
|||
func getStatesMetrics(metricKey string, givenMetrics metrics.Metrics) map[string]map[string]int64 {
|
||||
states := make(map[string]map[string]int64)
|
||||
for _, sample := range givenMetrics[metricKey] {
|
||||
framework.Logf("Found sample %q", sample.String())
|
||||
e2elog.Logf("Found sample %q", sample.String())
|
||||
state := string(sample.Metric["state"])
|
||||
pluginName := string(sample.Metric["plugin_name"])
|
||||
states[state] = map[string]int64{pluginName: int64(sample.Value)}
|
||||
|
|
|
@ -47,6 +47,7 @@ import (
|
|||
storageutil "k8s.io/kubernetes/pkg/apis/storage/v1/util"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
"k8s.io/kubernetes/test/e2e/framework/auth"
|
||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||
"k8s.io/kubernetes/test/e2e/framework/providers/gce"
|
||||
"k8s.io/kubernetes/test/e2e/storage/testsuites"
|
||||
"k8s.io/kubernetes/test/e2e/storage/utils"
|
||||
|
@ -129,10 +130,10 @@ func checkAWSEBS(volume *v1.PersistentVolume, volumeType string, encrypted bool)
|
|||
if len(zone) > 0 {
|
||||
region := zone[:len(zone)-1]
|
||||
cfg := aws.Config{Region: ®ion}
|
||||
framework.Logf("using region %s", region)
|
||||
e2elog.Logf("using region %s", region)
|
||||
client = ec2.New(session.New(), &cfg)
|
||||
} else {
|
||||
framework.Logf("no region configured")
|
||||
e2elog.Logf("no region configured")
|
||||
client = ec2.New(session.New())
|
||||
}
|
||||
|
||||
|
@ -207,7 +208,7 @@ func testZonalDelayedBinding(c clientset.Interface, ns string, specifyAllowedTop
|
|||
}
|
||||
for _, test := range tests {
|
||||
if !framework.ProviderIs(test.CloudProviders...) {
|
||||
framework.Logf("Skipping %q: cloud providers is not %v", test.Name, test.CloudProviders)
|
||||
e2elog.Logf("Skipping %q: cloud providers is not %v", test.Name, test.CloudProviders)
|
||||
continue
|
||||
}
|
||||
action := "creating claims with class with waitForFirstConsumer"
|
||||
|
@ -446,7 +447,7 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() {
|
|||
test := t
|
||||
|
||||
if !framework.ProviderIs(test.CloudProviders...) {
|
||||
framework.Logf("Skipping %q: cloud providers is not %v", test.Name, test.CloudProviders)
|
||||
e2elog.Logf("Skipping %q: cloud providers is not %v", test.Name, test.CloudProviders)
|
||||
continue
|
||||
}
|
||||
|
||||
|
@ -573,7 +574,7 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() {
|
|||
// The claim should timeout phase:Pending
|
||||
err = framework.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, c, ns, pvc.Name, 2*time.Second, framework.ClaimProvisionShortTimeout)
|
||||
Expect(err).To(HaveOccurred())
|
||||
framework.Logf(err.Error())
|
||||
e2elog.Logf(err.Error())
|
||||
})
|
||||
|
||||
It("should test that deleting a claim before the volume is provisioned deletes the volume.", func() {
|
||||
|
@ -616,13 +617,13 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() {
|
|||
|
||||
// Report indicators of regression
|
||||
if len(residualPVs) > 0 {
|
||||
framework.Logf("Remaining PersistentVolumes:")
|
||||
e2elog.Logf("Remaining PersistentVolumes:")
|
||||
for i, pv := range residualPVs {
|
||||
framework.Logf("\t%d) %s", i+1, pv.Name)
|
||||
e2elog.Logf("\t%d) %s", i+1, pv.Name)
|
||||
}
|
||||
framework.Failf("Expected 0 PersistentVolumes remaining. Found %d", len(residualPVs))
|
||||
}
|
||||
framework.Logf("0 PersistentVolumes remain.")
|
||||
e2elog.Logf("0 PersistentVolumes remain.")
|
||||
})
|
||||
|
||||
It("deletion should be idempotent", func() {
|
||||
|
@ -800,7 +801,7 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() {
|
|||
// The claim should timeout phase:Pending
|
||||
err = framework.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, c, ns, claim.Name, 2*time.Second, framework.ClaimProvisionShortTimeout)
|
||||
Expect(err).To(HaveOccurred())
|
||||
framework.Logf(err.Error())
|
||||
e2elog.Logf(err.Error())
|
||||
claim, err = c.CoreV1().PersistentVolumeClaims(ns).Get(claim.Name, metav1.GetOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
Expect(claim.Status.Phase).To(Equal(v1.ClaimPending))
|
||||
|
@ -834,7 +835,7 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() {
|
|||
// The claim should timeout phase:Pending
|
||||
err = framework.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, c, ns, claim.Name, 2*time.Second, framework.ClaimProvisionShortTimeout)
|
||||
Expect(err).To(HaveOccurred())
|
||||
framework.Logf(err.Error())
|
||||
e2elog.Logf(err.Error())
|
||||
claim, err = c.CoreV1().PersistentVolumeClaims(ns).Get(claim.Name, metav1.GetOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
Expect(claim.Status.Phase).To(Equal(v1.ClaimPending))
|
||||
|
@ -883,7 +884,7 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() {
|
|||
class, err := c.StorageV1().StorageClasses().Create(class)
|
||||
framework.ExpectNoError(err)
|
||||
defer func() {
|
||||
framework.Logf("deleting storage class %s", class.Name)
|
||||
e2elog.Logf("deleting storage class %s", class.Name)
|
||||
framework.ExpectNoError(c.StorageV1().StorageClasses().Delete(class.Name, nil))
|
||||
}()
|
||||
|
||||
|
@ -893,7 +894,7 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() {
|
|||
claim, err = c.CoreV1().PersistentVolumeClaims(claim.Namespace).Create(claim)
|
||||
framework.ExpectNoError(err)
|
||||
defer func() {
|
||||
framework.Logf("deleting claim %q/%q", claim.Namespace, claim.Name)
|
||||
e2elog.Logf("deleting claim %q/%q", claim.Namespace, claim.Name)
|
||||
err = c.CoreV1().PersistentVolumeClaims(claim.Namespace).Delete(claim.Name, nil)
|
||||
if err != nil && !apierrs.IsNotFound(err) {
|
||||
framework.Failf("Error deleting claim %q. Error: %v", claim.Name, err)
|
||||
|
@ -925,7 +926,7 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() {
|
|||
return false, nil
|
||||
})
|
||||
if err == wait.ErrWaitTimeout {
|
||||
framework.Logf("The test missed event about failed provisioning, but checked that no volume was provisioned for %v", framework.ClaimProvisionTimeout)
|
||||
e2elog.Logf("The test missed event about failed provisioning, but checked that no volume was provisioned for %v", framework.ClaimProvisionTimeout)
|
||||
err = nil
|
||||
}
|
||||
framework.ExpectNoError(err)
|
||||
|
@ -957,7 +958,7 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() {
|
|||
}
|
||||
for _, test := range tests {
|
||||
if !framework.ProviderIs(test.CloudProviders...) {
|
||||
framework.Logf("Skipping %q: cloud providers is not %v", test.Name, test.CloudProviders)
|
||||
e2elog.Logf("Skipping %q: cloud providers is not %v", test.Name, test.CloudProviders)
|
||||
continue
|
||||
}
|
||||
By("creating a claim with class with allowedTopologies set")
|
||||
|
|
|
@ -53,6 +53,7 @@ go_library(
|
|||
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//test/e2e/framework:go_default_library",
|
||||
"//test/e2e/framework/deployment:go_default_library",
|
||||
"//test/e2e/framework/log:go_default_library",
|
||||
"//test/e2e/storage/utils:go_default_library",
|
||||
"//test/utils/image:go_default_library",
|
||||
"//vendor/github.com/onsi/ginkgo:go_default_library",
|
||||
|
|
|
@ -23,7 +23,7 @@ import (
|
|||
"os"
|
||||
|
||||
"gopkg.in/gcfg.v1"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||
)
|
||||
|
||||
const (
|
||||
|
@ -130,13 +130,13 @@ func populateInstanceMap(cfg *ConfigFile) (map[string]*VSphere, error) {
|
|||
if cfg.Workspace.VCenterIP == "" || cfg.Workspace.DefaultDatastore == "" || cfg.Workspace.Folder == "" || cfg.Workspace.Datacenter == "" {
|
||||
msg := fmt.Sprintf("All fields in workspace are mandatory."+
|
||||
" vsphere.conf does not have the workspace specified correctly. cfg.Workspace: %+v", cfg.Workspace)
|
||||
framework.Logf(msg)
|
||||
e2elog.Logf(msg)
|
||||
return nil, errors.New(msg)
|
||||
}
|
||||
for vcServer, vcConfig := range cfg.VirtualCenter {
|
||||
framework.Logf("Initializing vc server %s", vcServer)
|
||||
e2elog.Logf("Initializing vc server %s", vcServer)
|
||||
if vcServer == "" {
|
||||
framework.Logf("vsphere.conf does not have the VirtualCenter IP address specified")
|
||||
e2elog.Logf("vsphere.conf does not have the VirtualCenter IP address specified")
|
||||
return nil, errors.New("vsphere.conf does not have the VirtualCenter IP address specified")
|
||||
}
|
||||
vcConfig.Hostname = vcServer
|
||||
|
@ -149,12 +149,12 @@ func populateInstanceMap(cfg *ConfigFile) (map[string]*VSphere, error) {
|
|||
}
|
||||
if vcConfig.Username == "" {
|
||||
msg := fmt.Sprintf("vcConfig.Username is empty for vc %s!", vcServer)
|
||||
framework.Logf(msg)
|
||||
e2elog.Logf(msg)
|
||||
return nil, errors.New(msg)
|
||||
}
|
||||
if vcConfig.Password == "" {
|
||||
msg := fmt.Sprintf("vcConfig.Password is empty for vc %s!", vcServer)
|
||||
framework.Logf(msg)
|
||||
e2elog.Logf(msg)
|
||||
return nil, errors.New(msg)
|
||||
}
|
||||
if vcConfig.Port == "" {
|
||||
|
@ -176,6 +176,6 @@ func populateInstanceMap(cfg *ConfigFile) (map[string]*VSphere, error) {
|
|||
vsphereInstances[vcServer] = &vsphereIns
|
||||
}
|
||||
|
||||
framework.Logf("ConfigFile %v \n vSphere instances %v", cfg, vsphereInstances)
|
||||
e2elog.Logf("ConfigFile %v \n vSphere instances %v", cfg, vsphereInstances)
|
||||
return vsphereInstances, nil
|
||||
}
|
||||
|
|
|
@ -27,8 +27,8 @@ import (
|
|||
"github.com/vmware/govmomi/vapi/tags"
|
||||
"github.com/vmware/govmomi/vim25/mo"
|
||||
"github.com/vmware/govmomi/vim25/types"
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||
|
||||
neturl "net/url"
|
||||
)
|
||||
|
@ -77,7 +77,7 @@ func (nm *NodeMapper) GenerateNodeMap(vSphereInstances map[string]*VSphere, node
|
|||
if vs.Config.Datacenters == "" {
|
||||
datacenters, err = vs.GetAllDatacenter(ctx)
|
||||
if err != nil {
|
||||
framework.Logf("NodeMapper error: %v", err)
|
||||
e2elog.Logf("NodeMapper error: %v", err)
|
||||
continue
|
||||
}
|
||||
} else {
|
||||
|
@ -89,7 +89,7 @@ func (nm *NodeMapper) GenerateNodeMap(vSphereInstances map[string]*VSphere, node
|
|||
}
|
||||
datacenter, err := vs.GetDatacenter(ctx, dc)
|
||||
if err != nil {
|
||||
framework.Logf("NodeMapper error dc: %s \n err: %v", dc, err)
|
||||
e2elog.Logf("NodeMapper error dc: %s \n err: %v", dc, err)
|
||||
|
||||
continue
|
||||
}
|
||||
|
@ -98,7 +98,7 @@ func (nm *NodeMapper) GenerateNodeMap(vSphereInstances map[string]*VSphere, node
|
|||
}
|
||||
|
||||
for _, dc := range datacenters {
|
||||
framework.Logf("Search candidates vc=%s and datacenter=%s", vs.Config.Hostname, dc.Name())
|
||||
e2elog.Logf("Search candidates vc=%s and datacenter=%s", vs.Config.Hostname, dc.Name())
|
||||
queueChannel = append(queueChannel, &VmSearch{vs: vs, datacenter: dc})
|
||||
}
|
||||
}
|
||||
|
@ -107,20 +107,20 @@ func (nm *NodeMapper) GenerateNodeMap(vSphereInstances map[string]*VSphere, node
|
|||
n := node
|
||||
go func() {
|
||||
nodeUUID := getUUIDFromProviderID(n.Spec.ProviderID)
|
||||
framework.Logf("Searching for node with UUID: %s", nodeUUID)
|
||||
e2elog.Logf("Searching for node with UUID: %s", nodeUUID)
|
||||
for _, res := range queueChannel {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
vm, err := res.vs.GetVMByUUID(ctx, nodeUUID, res.datacenter)
|
||||
if err != nil {
|
||||
framework.Logf("Error %v while looking for node=%s in vc=%s and datacenter=%s",
|
||||
e2elog.Logf("Error %v while looking for node=%s in vc=%s and datacenter=%s",
|
||||
err, n.Name, res.vs.Config.Hostname, res.datacenter.Name())
|
||||
continue
|
||||
}
|
||||
if vm != nil {
|
||||
hostSystemRef := res.vs.GetHostFromVMReference(ctx, vm.Reference())
|
||||
zones := retrieveZoneInformationForNode(n.Name, res.vs, hostSystemRef)
|
||||
framework.Logf("Found node %s as vm=%+v placed on host=%+v under zones %s in vc=%s and datacenter=%s",
|
||||
e2elog.Logf("Found node %s as vm=%+v placed on host=%+v under zones %s in vc=%s and datacenter=%s",
|
||||
n.Name, vm, hostSystemRef, zones, res.vs.Config.Hostname, res.datacenter.Name())
|
||||
nodeInfo := &NodeInfo{Name: n.Name, DataCenterRef: res.datacenter.Reference(), VirtualMachineRef: vm.Reference(), HostSystemRef: hostSystemRef, VSphere: res.vs, Zones: zones}
|
||||
nm.SetNodeInfo(n.Name, nodeInfo)
|
||||
|
@ -192,10 +192,10 @@ func retrieveZoneInformationForNode(nodeName string, connection *VSphere, hostSy
|
|||
}
|
||||
switch {
|
||||
case category.Name == "k8s-zone":
|
||||
framework.Logf("Found %s associated with %s for %s", tag.Name, ancestor.Name, nodeName)
|
||||
e2elog.Logf("Found %s associated with %s for %s", tag.Name, ancestor.Name, nodeName)
|
||||
zonesAttachedToObject = append(zonesAttachedToObject, tag.Name)
|
||||
case category.Name == "k8s-region":
|
||||
framework.Logf("Found %s associated with %s for %s", tag.Name, ancestor.Name, nodeName)
|
||||
e2elog.Logf("Found %s associated with %s for %s", tag.Name, ancestor.Name, nodeName)
|
||||
}
|
||||
}
|
||||
// Overwrite zone information if it exists for this object
|
||||
|
@ -250,7 +250,7 @@ func (nm *NodeMapper) GenerateZoneToDatastoreMap() error {
|
|||
vcToZoneDatastoresMap[vc][zone] = commonDatastores
|
||||
}
|
||||
}
|
||||
framework.Logf("Zone to datastores map : %+v", vcToZoneDatastoresMap)
|
||||
e2elog.Logf("Zone to datastores map : %+v", vcToZoneDatastoresMap)
|
||||
return nil
|
||||
}
|
||||
|
||||
|
|
|
@ -21,11 +21,12 @@ import (
|
|||
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
"k8s.io/api/core/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||
"k8s.io/kubernetes/test/e2e/storage/utils"
|
||||
)
|
||||
|
||||
|
@ -111,7 +112,7 @@ var _ = utils.SIGDescribe("PersistentVolumes:vsphere", func() {
|
|||
})
|
||||
|
||||
AfterEach(func() {
|
||||
framework.Logf("AfterEach: Cleaning up test resources")
|
||||
e2elog.Logf("AfterEach: Cleaning up test resources")
|
||||
if c != nil {
|
||||
framework.ExpectNoError(framework.DeletePodWithWait(f, c, clientPod), "AfterEach: failed to delete pod ", clientPod.Name)
|
||||
|
||||
|
|
|
@ -22,11 +22,12 @@ import (
|
|||
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
"k8s.io/api/core/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
apierrs "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||
"k8s.io/kubernetes/test/e2e/storage/utils"
|
||||
)
|
||||
|
||||
|
@ -129,7 +130,7 @@ var _ = utils.SIGDescribe("PersistentVolumes [Feature:ReclaimPolicy]", func() {
|
|||
|
||||
By("Verify the volume is accessible and available in the pod")
|
||||
verifyVSphereVolumesAccessible(c, pod, []*v1.PersistentVolume{pv})
|
||||
framework.Logf("Verified that Volume is accessible in the POD after deleting PV claim")
|
||||
e2elog.Logf("Verified that Volume is accessible in the POD after deleting PV claim")
|
||||
|
||||
By("Deleting the Pod")
|
||||
framework.ExpectNoError(framework.DeletePodWithWait(f, c, pod), "Failed to delete pod ", pod.Name)
|
||||
|
@ -175,7 +176,7 @@ var _ = utils.SIGDescribe("PersistentVolumes [Feature:ReclaimPolicy]", func() {
|
|||
pvc = nil
|
||||
|
||||
By("Verify PV is retained")
|
||||
framework.Logf("Waiting for PV %v to become Released", pv.Name)
|
||||
e2elog.Logf("Waiting for PV %v to become Released", pv.Name)
|
||||
err = framework.WaitForPersistentVolumePhase(v1.VolumeReleased, c, pv.Name, 3*time.Second, 300*time.Second)
|
||||
framework.ExpectNoError(err)
|
||||
framework.ExpectNoError(framework.DeletePersistentVolume(c, pv.Name), "Failed to delete PV ", pv.Name)
|
||||
|
|
|
@ -30,7 +30,7 @@ import (
|
|||
"github.com/vmware/govmomi/vim25/mo"
|
||||
"github.com/vmware/govmomi/vim25/soap"
|
||||
"github.com/vmware/govmomi/vim25/types"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||
)
|
||||
|
||||
const (
|
||||
|
@ -121,7 +121,7 @@ func (vs *VSphere) GetFolderByPath(ctx context.Context, dc object.Reference, fol
|
|||
finder.SetDatacenter(datacenter)
|
||||
vmFolder, err := finder.Folder(ctx, folderPath)
|
||||
if err != nil {
|
||||
framework.Logf("Failed to get the folder reference for %s. err: %+v", folderPath, err)
|
||||
e2elog.Logf("Failed to get the folder reference for %s. err: %+v", folderPath, err)
|
||||
return vmFolderMor, err
|
||||
}
|
||||
return vmFolder.Reference(), nil
|
||||
|
@ -156,15 +156,15 @@ func (vs *VSphere) CreateVolume(volumeOptions *VolumeOptions, dataCenterRef type
|
|||
soapFault := soap.ToSoapFault(err)
|
||||
if _, ok := soapFault.VimFault().(types.FileAlreadyExists); ok {
|
||||
directoryAlreadyPresent = true
|
||||
framework.Logf("Directory with the path %+q is already present", directoryPath)
|
||||
e2elog.Logf("Directory with the path %+q is already present", directoryPath)
|
||||
}
|
||||
}
|
||||
if !directoryAlreadyPresent {
|
||||
framework.Logf("Cannot create dir %#v. err %s", directoryPath, err)
|
||||
e2elog.Logf("Cannot create dir %#v. err %s", directoryPath, err)
|
||||
return "", err
|
||||
}
|
||||
}
|
||||
framework.Logf("Created dir with path as %+q", directoryPath)
|
||||
e2elog.Logf("Created dir with path as %+q", directoryPath)
|
||||
vmdkPath := directoryPath + volumeOptions.Name + ".vmdk"
|
||||
|
||||
// Create a virtual disk manager
|
||||
|
@ -180,12 +180,12 @@ func (vs *VSphere) CreateVolume(volumeOptions *VolumeOptions, dataCenterRef type
|
|||
// Create virtual disk
|
||||
task, err := vdm.CreateVirtualDisk(ctx, vmdkPath, datacenter, vmDiskSpec)
|
||||
if err != nil {
|
||||
framework.Logf("Failed to create virtual disk: %s. err: %+v", vmdkPath, err)
|
||||
e2elog.Logf("Failed to create virtual disk: %s. err: %+v", vmdkPath, err)
|
||||
return "", err
|
||||
}
|
||||
taskInfo, err := task.WaitForResult(ctx, nil)
|
||||
if err != nil {
|
||||
framework.Logf("Failed to complete virtual disk creation: %s. err: %+v", vmdkPath, err)
|
||||
e2elog.Logf("Failed to complete virtual disk creation: %s. err: %+v", vmdkPath, err)
|
||||
return "", err
|
||||
}
|
||||
volumePath := taskInfo.Result.(string)
|
||||
|
@ -209,12 +209,12 @@ func (vs *VSphere) DeleteVolume(volumePath string, dataCenterRef types.ManagedOb
|
|||
// Delete virtual disk
|
||||
task, err := virtualDiskManager.DeleteVirtualDisk(ctx, diskPath, datacenter)
|
||||
if err != nil {
|
||||
framework.Logf("Failed to delete virtual disk. err: %v", err)
|
||||
e2elog.Logf("Failed to delete virtual disk. err: %v", err)
|
||||
return err
|
||||
}
|
||||
err = task.Wait(ctx)
|
||||
if err != nil {
|
||||
framework.Logf("Failed to delete virtual disk. err: %v", err)
|
||||
e2elog.Logf("Failed to delete virtual disk. err: %v", err)
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
|
@ -233,7 +233,7 @@ func (vs *VSphere) IsVMPresent(vmName string, dataCenterRef types.ManagedObjectR
|
|||
vmFolder := object.NewFolder(vs.Client.Client, folderMor)
|
||||
vmFoldersChildren, err := vmFolder.Children(ctx)
|
||||
if err != nil {
|
||||
framework.Logf("Failed to get children from Folder: %s. err: %+v", vmFolder.InventoryPath, err)
|
||||
e2elog.Logf("Failed to get children from Folder: %s. err: %+v", vmFolder.InventoryPath, err)
|
||||
return
|
||||
}
|
||||
for _, vmFoldersChild := range vmFoldersChildren {
|
||||
|
|
|
@ -25,6 +25,7 @@ import (
|
|||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||
"k8s.io/kubernetes/test/e2e/storage/utils"
|
||||
)
|
||||
|
||||
|
@ -63,7 +64,7 @@ var _ = utils.SIGDescribe("vsphere statefulset", func() {
|
|||
Bootstrap(f)
|
||||
})
|
||||
AfterEach(func() {
|
||||
framework.Logf("Deleting all statefulset in namespace: %v", namespace)
|
||||
e2elog.Logf("Deleting all statefulset in namespace: %v", namespace)
|
||||
framework.DeleteAllStatefulSets(client, namespace)
|
||||
})
|
||||
|
||||
|
@ -114,7 +115,7 @@ var _ = utils.SIGDescribe("vsphere statefulset", func() {
|
|||
for _, volumespec := range sspod.Spec.Volumes {
|
||||
if volumespec.PersistentVolumeClaim != nil {
|
||||
vSpherediskPath := getvSphereVolumePathFromClaim(client, statefulset.Namespace, volumespec.PersistentVolumeClaim.ClaimName)
|
||||
framework.Logf("Waiting for Volume: %q to detach from Node: %q", vSpherediskPath, sspod.Spec.NodeName)
|
||||
e2elog.Logf("Waiting for Volume: %q to detach from Node: %q", vSpherediskPath, sspod.Spec.NodeName)
|
||||
framework.ExpectNoError(waitForVSphereDiskToDetach(vSpherediskPath, sspod.Spec.NodeName))
|
||||
}
|
||||
}
|
||||
|
@ -141,7 +142,7 @@ var _ = utils.SIGDescribe("vsphere statefulset", func() {
|
|||
for _, volumespec := range pod.Spec.Volumes {
|
||||
if volumespec.PersistentVolumeClaim != nil {
|
||||
vSpherediskPath := getvSphereVolumePathFromClaim(client, statefulset.Namespace, volumespec.PersistentVolumeClaim.ClaimName)
|
||||
framework.Logf("Verify Volume: %q is attached to the Node: %q", vSpherediskPath, sspod.Spec.NodeName)
|
||||
e2elog.Logf("Verify Volume: %q is attached to the Node: %q", vSpherediskPath, sspod.Spec.NodeName)
|
||||
// Verify scale up has re-attached the same volumes and not introduced new volume
|
||||
Expect(volumesBeforeScaleDown[vSpherediskPath] == "").To(BeFalse())
|
||||
isVolumeAttached, verifyDiskAttachedError := diskIsAttached(vSpherediskPath, sspod.Spec.NodeName)
|
||||
|
|
|
@ -32,7 +32,7 @@ import (
|
|||
vim25types "github.com/vmware/govmomi/vim25/types"
|
||||
"k8s.io/klog"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
storage "k8s.io/api/storage/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
@ -42,6 +42,7 @@ import (
|
|||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/kubernetes/pkg/volume/util"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||
"k8s.io/kubernetes/test/e2e/storage/utils"
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
)
|
||||
|
@ -80,13 +81,13 @@ func waitForVSphereDisksToDetach(nodeVolumes map[string][]string) error {
|
|||
for nodeName, nodeVolumes := range attachedResult {
|
||||
for volumePath, attached := range nodeVolumes {
|
||||
if attached {
|
||||
framework.Logf("Waiting for volumes %q to detach from %q.", volumePath, string(nodeName))
|
||||
e2elog.Logf("Waiting for volumes %q to detach from %q.", volumePath, string(nodeName))
|
||||
return false, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
disksAttached = false
|
||||
framework.Logf("Volume are successfully detached from all the nodes: %+v", nodeVolumes)
|
||||
e2elog.Logf("Volume are successfully detached from all the nodes: %+v", nodeVolumes)
|
||||
return true, nil
|
||||
})
|
||||
if err != nil {
|
||||
|
@ -126,10 +127,10 @@ func waitForVSphereDiskStatus(volumePath string, nodeName string, expectedState
|
|||
|
||||
currentState = attachedState[diskAttached]
|
||||
if currentState == expectedState {
|
||||
framework.Logf("Volume %q has successfully %s %q", volumePath, attachedStateMsg[currentState], nodeName)
|
||||
e2elog.Logf("Volume %q has successfully %s %q", volumePath, attachedStateMsg[currentState], nodeName)
|
||||
return true, nil
|
||||
}
|
||||
framework.Logf("Waiting for Volume %q to be %s %q.", volumePath, attachedStateMsg[expectedState], nodeName)
|
||||
e2elog.Logf("Waiting for Volume %q to be %s %q.", volumePath, attachedStateMsg[expectedState], nodeName)
|
||||
return false, nil
|
||||
})
|
||||
if err != nil {
|
||||
|
@ -226,13 +227,13 @@ func getVSpherePersistentVolumeClaimSpec(namespace string, labels map[string]str
|
|||
// function to write content to the volume backed by given PVC
|
||||
func writeContentToVSpherePV(client clientset.Interface, pvc *v1.PersistentVolumeClaim, expectedContent string) {
|
||||
utils.RunInPodWithVolume(client, pvc.Namespace, pvc.Name, "echo "+expectedContent+" > /mnt/test/data")
|
||||
framework.Logf("Done with writing content to volume")
|
||||
e2elog.Logf("Done with writing content to volume")
|
||||
}
|
||||
|
||||
// function to verify content is matching on the volume backed for given PVC
|
||||
func verifyContentOfVSpherePV(client clientset.Interface, pvc *v1.PersistentVolumeClaim, expectedContent string) {
|
||||
utils.RunInPodWithVolume(client, pvc.Namespace, pvc.Name, "grep '"+expectedContent+"' /mnt/test/data")
|
||||
framework.Logf("Successfully verified content of the volume")
|
||||
e2elog.Logf("Successfully verified content of the volume")
|
||||
}
|
||||
|
||||
func getVSphereStorageClassSpec(name string, scParameters map[string]string, zones []string) *storage.StorageClass {
|
||||
|
@ -495,7 +496,7 @@ func getPathFromVMDiskPath(vmDiskPath string) string {
|
|||
datastorePathObj := new(object.DatastorePath)
|
||||
isSuccess := datastorePathObj.FromString(vmDiskPath)
|
||||
if !isSuccess {
|
||||
framework.Logf("Failed to parse vmDiskPath: %s", vmDiskPath)
|
||||
e2elog.Logf("Failed to parse vmDiskPath: %s", vmDiskPath)
|
||||
return ""
|
||||
}
|
||||
return datastorePathObj.Path
|
||||
|
@ -506,7 +507,7 @@ func getDatastorePathObjFromVMDiskPath(vmDiskPath string) (*object.DatastorePath
|
|||
datastorePathObj := new(object.DatastorePath)
|
||||
isSuccess := datastorePathObj.FromString(vmDiskPath)
|
||||
if !isSuccess {
|
||||
framework.Logf("Failed to parse volPath: %s", vmDiskPath)
|
||||
e2elog.Logf("Failed to parse volPath: %s", vmDiskPath)
|
||||
return nil, fmt.Errorf("Failed to parse volPath: %s", vmDiskPath)
|
||||
}
|
||||
return datastorePathObj, nil
|
||||
|
@ -558,7 +559,7 @@ func removeStorageClusterORFolderNameFromVDiskPath(vDiskPath string) string {
|
|||
func getVirtualDeviceByPath(ctx context.Context, vm *object.VirtualMachine, diskPath string) (vim25types.BaseVirtualDevice, error) {
|
||||
vmDevices, err := vm.Device(ctx)
|
||||
if err != nil {
|
||||
framework.Logf("Failed to get the devices for VM: %q. err: %+v", vm.InventoryPath, err)
|
||||
e2elog.Logf("Failed to get the devices for VM: %q. err: %+v", vm.InventoryPath, err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
|
@ -568,10 +569,10 @@ func getVirtualDeviceByPath(ctx context.Context, vm *object.VirtualMachine, disk
|
|||
virtualDevice := device.GetVirtualDevice()
|
||||
if backing, ok := virtualDevice.Backing.(*vim25types.VirtualDiskFlatVer2BackingInfo); ok {
|
||||
if matchVirtualDiskAndVolPath(backing.FileName, diskPath) {
|
||||
framework.Logf("Found VirtualDisk backing with filename %q for diskPath %q", backing.FileName, diskPath)
|
||||
e2elog.Logf("Found VirtualDisk backing with filename %q for diskPath %q", backing.FileName, diskPath)
|
||||
return device, nil
|
||||
} else {
|
||||
framework.Logf("VirtualDisk backing filename %q does not match with diskPath %q", backing.FileName, diskPath)
|
||||
e2elog.Logf("VirtualDisk backing filename %q does not match with diskPath %q", backing.FileName, diskPath)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -595,7 +596,7 @@ func convertVolPathsToDevicePaths(ctx context.Context, nodeVolumes map[string][]
|
|||
for i, volPath := range volPaths {
|
||||
deviceVolPath, err := convertVolPathToDevicePath(ctx, datacenter, volPath)
|
||||
if err != nil {
|
||||
framework.Logf("Failed to convert vsphere volume path %s to device path for volume %s. err: %+v", volPath, deviceVolPath, err)
|
||||
e2elog.Logf("Failed to convert vsphere volume path %s to device path for volume %s. err: %+v", volPath, deviceVolPath, err)
|
||||
return nil, err
|
||||
}
|
||||
volPaths[i] = deviceVolPath
|
||||
|
@ -611,7 +612,7 @@ func convertVolPathToDevicePath(ctx context.Context, dc *object.Datacenter, volP
|
|||
// Get the canonical volume path for volPath.
|
||||
canonicalVolumePath, err := getCanonicalVolumePath(ctx, dc, volPath)
|
||||
if err != nil {
|
||||
framework.Logf("Failed to get canonical vsphere volume path for volume: %s. err: %+v", volPath, err)
|
||||
e2elog.Logf("Failed to get canonical vsphere volume path for volume: %s. err: %+v", volPath, err)
|
||||
return "", err
|
||||
}
|
||||
// Check if the volume path contains .vmdk extension. If not, add the extension and update the nodeVolumes Map
|
||||
|
@ -632,7 +633,7 @@ func getVMXFilePath(vmObject *object.VirtualMachine) (vmxPath string) {
|
|||
Expect(nodeVM.Config).NotTo(BeNil())
|
||||
|
||||
vmxPath = nodeVM.Config.Files.VmPathName
|
||||
framework.Logf("vmx file path is %s", vmxPath)
|
||||
e2elog.Logf("vmx file path is %s", vmxPath)
|
||||
return vmxPath
|
||||
}
|
||||
|
||||
|
@ -657,7 +658,7 @@ func poweroffNodeVM(nodeName string, vm *object.VirtualMachine) {
|
|||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
framework.Logf("Powering off node VM %s", nodeName)
|
||||
e2elog.Logf("Powering off node VM %s", nodeName)
|
||||
|
||||
_, err := vm.PowerOff(ctx)
|
||||
framework.ExpectNoError(err)
|
||||
|
@ -670,7 +671,7 @@ func poweronNodeVM(nodeName string, vm *object.VirtualMachine) {
|
|||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
framework.Logf("Powering on node VM %s", nodeName)
|
||||
e2elog.Logf("Powering on node VM %s", nodeName)
|
||||
|
||||
vm.PowerOn(ctx)
|
||||
err := vm.WaitForPowerState(ctx, vim25types.VirtualMachinePowerStatePoweredOn)
|
||||
|
@ -684,7 +685,7 @@ func unregisterNodeVM(nodeName string, vm *object.VirtualMachine) {
|
|||
|
||||
poweroffNodeVM(nodeName, vm)
|
||||
|
||||
framework.Logf("Unregistering node VM %s", nodeName)
|
||||
e2elog.Logf("Unregistering node VM %s", nodeName)
|
||||
err := vm.Unregister(ctx)
|
||||
framework.ExpectNoError(err, "Unable to unregister the node")
|
||||
}
|
||||
|
@ -694,7 +695,7 @@ func registerNodeVM(nodeName, workingDir, vmxFilePath string, rpool *object.Reso
|
|||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
framework.Logf("Registering node VM %s with vmx file path %s", nodeName, vmxFilePath)
|
||||
e2elog.Logf("Registering node VM %s with vmx file path %s", nodeName, vmxFilePath)
|
||||
|
||||
nodeInfo := TestContext.NodeMapper.GetNodeInfo(nodeName)
|
||||
finder := find.NewFinder(nodeInfo.VSphere.Client.Client, false)
|
||||
|
@ -726,7 +727,7 @@ func disksAreAttached(nodeVolumes map[string][]string) (nodeVolumesAttachMap map
|
|||
// Convert VolPaths into canonical form so that it can be compared with the VM device path.
|
||||
vmVolumes, err := convertVolPathsToDevicePaths(ctx, nodeVolumes)
|
||||
if err != nil {
|
||||
framework.Logf("Failed to convert volPaths to devicePaths: %+v. err: %+v", nodeVolumes, err)
|
||||
e2elog.Logf("Failed to convert volPaths to devicePaths: %+v. err: %+v", nodeVolumes, err)
|
||||
return nil, err
|
||||
}
|
||||
for vm, volumes := range vmVolumes {
|
||||
|
@ -754,7 +755,7 @@ func diskIsAttached(volPath string, nodeName string) (bool, error) {
|
|||
volPath = removeStorageClusterORFolderNameFromVDiskPath(volPath)
|
||||
device, err := getVirtualDeviceByPath(ctx, vm, volPath)
|
||||
if err != nil {
|
||||
framework.Logf("diskIsAttached failed to determine whether disk %q is still attached on node %q",
|
||||
e2elog.Logf("diskIsAttached failed to determine whether disk %q is still attached on node %q",
|
||||
volPath,
|
||||
nodeName)
|
||||
return false, err
|
||||
|
@ -762,7 +763,7 @@ func diskIsAttached(volPath string, nodeName string) (bool, error) {
|
|||
if device == nil {
|
||||
return false, nil
|
||||
}
|
||||
framework.Logf("diskIsAttached found the disk %q attached on node %q", volPath, nodeName)
|
||||
e2elog.Logf("diskIsAttached found the disk %q attached on node %q", volPath, nodeName)
|
||||
return true, nil
|
||||
}
|
||||
|
||||
|
@ -799,7 +800,7 @@ func GetReadySchedulableRandomNodeInfo() *NodeInfo {
|
|||
// via service-control on the given vCenter host over SSH.
|
||||
func invokeVCenterServiceControl(command, service, host string) error {
|
||||
sshCmd := fmt.Sprintf("service-control --%s %s", command, service)
|
||||
framework.Logf("Invoking command %v on vCenter host %v", sshCmd, host)
|
||||
e2elog.Logf("Invoking command %v on vCenter host %v", sshCmd, host)
|
||||
result, err := framework.SSH(sshCmd, host, framework.TestContext.Provider)
|
||||
if err != nil || result.Code != 0 {
|
||||
framework.LogSSHResult(result)
|
||||
|
|
|
@ -25,11 +25,12 @@ import (
|
|||
"github.com/vmware/govmomi/object"
|
||||
"github.com/vmware/govmomi/vim25/types"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/uuid"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||
"k8s.io/kubernetes/test/e2e/storage/utils"
|
||||
)
|
||||
|
||||
|
@ -101,7 +102,7 @@ var _ = utils.SIGDescribe("Volume Disk Format [Feature:vsphere]", func() {
|
|||
|
||||
func invokeTest(f *framework.Framework, client clientset.Interface, namespace string, nodeName string, nodeKeyValueLabel map[string]string, diskFormat string) {
|
||||
|
||||
framework.Logf("Invoking Test for DiskFomat: %s", diskFormat)
|
||||
e2elog.Logf("Invoking Test for DiskFomat: %s", diskFormat)
|
||||
scParameters := make(map[string]string)
|
||||
scParameters["diskformat"] = diskFormat
|
||||
|
||||
|
|
|
@ -22,10 +22,11 @@ import (
|
|||
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
"k8s.io/api/core/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||
"k8s.io/kubernetes/test/e2e/storage/utils"
|
||||
)
|
||||
|
||||
|
@ -93,7 +94,7 @@ var _ = utils.SIGDescribe("Volume FStype [Feature:vsphere]", func() {
|
|||
})
|
||||
|
||||
func invokeTestForFstype(f *framework.Framework, client clientset.Interface, namespace string, fstype string, expectedContent string) {
|
||||
framework.Logf("Invoking Test for fstype: %s", fstype)
|
||||
e2elog.Logf("Invoking Test for fstype: %s", fstype)
|
||||
scParameters := make(map[string]string)
|
||||
scParameters["fstype"] = fstype
|
||||
|
||||
|
|
|
@ -27,12 +27,13 @@ import (
|
|||
vimtypes "github.com/vmware/govmomi/vim25/types"
|
||||
|
||||
apps "k8s.io/api/apps/v1"
|
||||
"k8s.io/api/core/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
e2edeploy "k8s.io/kubernetes/test/e2e/framework/deployment"
|
||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||
"k8s.io/kubernetes/test/e2e/storage/utils"
|
||||
)
|
||||
|
||||
|
@ -158,19 +159,19 @@ func waitForPodToFailover(client clientset.Interface, deployment *apps.Deploymen
|
|||
}
|
||||
|
||||
if newNode != oldNode {
|
||||
framework.Logf("The pod has been failed over from %q to %q", oldNode, newNode)
|
||||
e2elog.Logf("The pod has been failed over from %q to %q", oldNode, newNode)
|
||||
return true, nil
|
||||
}
|
||||
|
||||
framework.Logf("Waiting for pod to be failed over from %q", oldNode)
|
||||
e2elog.Logf("Waiting for pod to be failed over from %q", oldNode)
|
||||
return false, nil
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
if err == wait.ErrWaitTimeout {
|
||||
framework.Logf("Time out after waiting for %v", timeout)
|
||||
e2elog.Logf("Time out after waiting for %v", timeout)
|
||||
}
|
||||
framework.Logf("Pod did not fail over from %q with error: %v", oldNode, err)
|
||||
e2elog.Logf("Pod did not fail over from %q with error: %v", oldNode, err)
|
||||
return "", err
|
||||
}
|
||||
|
||||
|
|
|
@ -22,10 +22,11 @@ import (
|
|||
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
"k8s.io/api/core/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
storageV1 "k8s.io/api/storage/v1"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||
"k8s.io/kubernetes/test/e2e/storage/utils"
|
||||
)
|
||||
|
||||
|
@ -103,11 +104,11 @@ var _ = utils.SIGDescribe("vcp-performance [Feature:vsphere]", func() {
|
|||
}
|
||||
|
||||
iterations64 := float64(iterations)
|
||||
framework.Logf("Average latency for below operations")
|
||||
framework.Logf("Creating %d PVCs and waiting for bound phase: %v seconds", volumeCount, sumLatency[CreateOp]/iterations64)
|
||||
framework.Logf("Creating %v Pod: %v seconds", volumeCount/volumesPerPod, sumLatency[AttachOp]/iterations64)
|
||||
framework.Logf("Deleting %v Pod and waiting for disk to be detached: %v seconds", volumeCount/volumesPerPod, sumLatency[DetachOp]/iterations64)
|
||||
framework.Logf("Deleting %v PVCs: %v seconds", volumeCount, sumLatency[DeleteOp]/iterations64)
|
||||
e2elog.Logf("Average latency for below operations")
|
||||
e2elog.Logf("Creating %d PVCs and waiting for bound phase: %v seconds", volumeCount, sumLatency[CreateOp]/iterations64)
|
||||
e2elog.Logf("Creating %v Pod: %v seconds", volumeCount/volumesPerPod, sumLatency[AttachOp]/iterations64)
|
||||
e2elog.Logf("Deleting %v Pod and waiting for disk to be detached: %v seconds", volumeCount/volumesPerPod, sumLatency[DetachOp]/iterations64)
|
||||
e2elog.Logf("Deleting %v PVCs: %v seconds", volumeCount, sumLatency[DeleteOp]/iterations64)
|
||||
|
||||
})
|
||||
})
|
||||
|
|
|
@ -24,11 +24,12 @@ import (
|
|||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/uuid"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||
"k8s.io/kubernetes/test/e2e/storage/utils"
|
||||
)
|
||||
|
||||
|
@ -105,7 +106,7 @@ var _ = utils.SIGDescribe("Verify Volume Attach Through vpxd Restart [Feature:vs
|
|||
pods []*v1.Pod
|
||||
)
|
||||
|
||||
framework.Logf("Testing for nodes on vCenter host: %s", vcHost)
|
||||
e2elog.Logf("Testing for nodes on vCenter host: %s", vcHost)
|
||||
|
||||
for i, node := range nodes {
|
||||
By(fmt.Sprintf("Creating test vsphere volume %d", i))
|
||||
|
|
|
@ -25,10 +25,11 @@ import (
|
|||
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
"k8s.io/api/core/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||
"k8s.io/kubernetes/test/e2e/storage/utils"
|
||||
)
|
||||
|
||||
|
@ -103,7 +104,7 @@ var _ = utils.SIGDescribe("Storage Policy Based Volume Provisioning [Feature:vsp
|
|||
namespace = f.Namespace.Name
|
||||
policyName = GetAndExpectStringEnvVar(SPBMPolicyName)
|
||||
tagPolicy = GetAndExpectStringEnvVar(SPBMTagPolicy)
|
||||
framework.Logf("framework: %+v", f)
|
||||
e2elog.Logf("framework: %+v", f)
|
||||
scParameters = make(map[string]string)
|
||||
nodeList := framework.GetReadySchedulableNodesOrDie(f.ClientSet)
|
||||
if !(len(nodeList.Items) > 0) {
|
||||
|
@ -119,7 +120,7 @@ var _ = utils.SIGDescribe("Storage Policy Based Volume Provisioning [Feature:vsp
|
|||
By(fmt.Sprintf("Invoking test for VSAN policy hostFailuresToTolerate: %s, cacheReservation: %s", HostFailuresToTolerateCapabilityVal, CacheReservationCapabilityVal))
|
||||
scParameters[Policy_HostFailuresToTolerate] = HostFailuresToTolerateCapabilityVal
|
||||
scParameters[Policy_CacheReservation] = CacheReservationCapabilityVal
|
||||
framework.Logf("Invoking test for VSAN storage capabilities: %+v", scParameters)
|
||||
e2elog.Logf("Invoking test for VSAN storage capabilities: %+v", scParameters)
|
||||
invokeValidPolicyTest(f, client, namespace, scParameters)
|
||||
})
|
||||
|
||||
|
@ -128,7 +129,7 @@ var _ = utils.SIGDescribe("Storage Policy Based Volume Provisioning [Feature:vsp
|
|||
By(fmt.Sprintf("Invoking test for VSAN policy diskStripes: %s, objectSpaceReservation: %s", DiskStripesCapabilityVal, ObjectSpaceReservationCapabilityVal))
|
||||
scParameters[Policy_DiskStripes] = "1"
|
||||
scParameters[Policy_ObjectSpaceReservation] = "30"
|
||||
framework.Logf("Invoking test for VSAN storage capabilities: %+v", scParameters)
|
||||
e2elog.Logf("Invoking test for VSAN storage capabilities: %+v", scParameters)
|
||||
invokeValidPolicyTest(f, client, namespace, scParameters)
|
||||
})
|
||||
|
||||
|
@ -138,7 +139,7 @@ var _ = utils.SIGDescribe("Storage Policy Based Volume Provisioning [Feature:vsp
|
|||
scParameters[Policy_DiskStripes] = DiskStripesCapabilityVal
|
||||
scParameters[Policy_ObjectSpaceReservation] = ObjectSpaceReservationCapabilityVal
|
||||
scParameters[Datastore] = VsanDatastore
|
||||
framework.Logf("Invoking test for VSAN storage capabilities: %+v", scParameters)
|
||||
e2elog.Logf("Invoking test for VSAN storage capabilities: %+v", scParameters)
|
||||
invokeValidPolicyTest(f, client, namespace, scParameters)
|
||||
})
|
||||
|
||||
|
@ -147,7 +148,7 @@ var _ = utils.SIGDescribe("Storage Policy Based Volume Provisioning [Feature:vsp
|
|||
By(fmt.Sprintf("Invoking test for VSAN policy objectSpaceReservation: %s, iopsLimit: %s", ObjectSpaceReservationCapabilityVal, IopsLimitCapabilityVal))
|
||||
scParameters[Policy_ObjectSpaceReservation] = ObjectSpaceReservationCapabilityVal
|
||||
scParameters[Policy_IopsLimit] = IopsLimitCapabilityVal
|
||||
framework.Logf("Invoking test for VSAN storage capabilities: %+v", scParameters)
|
||||
e2elog.Logf("Invoking test for VSAN storage capabilities: %+v", scParameters)
|
||||
invokeValidPolicyTest(f, client, namespace, scParameters)
|
||||
})
|
||||
|
||||
|
@ -156,7 +157,7 @@ var _ = utils.SIGDescribe("Storage Policy Based Volume Provisioning [Feature:vsp
|
|||
By(fmt.Sprintf("Invoking test for VSAN policy objectSpaceReserve: %s, stripeWidth: %s", ObjectSpaceReservationCapabilityVal, StripeWidthCapabilityVal))
|
||||
scParameters["objectSpaceReserve"] = ObjectSpaceReservationCapabilityVal
|
||||
scParameters[Policy_DiskStripes] = StripeWidthCapabilityVal
|
||||
framework.Logf("Invoking test for VSAN storage capabilities: %+v", scParameters)
|
||||
e2elog.Logf("Invoking test for VSAN storage capabilities: %+v", scParameters)
|
||||
err := invokeInvalidPolicyTestNeg(client, namespace, scParameters)
|
||||
Expect(err).To(HaveOccurred())
|
||||
errorMsg := "invalid option \\\"objectSpaceReserve\\\" for volume plugin kubernetes.io/vsphere-volume"
|
||||
|
@ -171,7 +172,7 @@ var _ = utils.SIGDescribe("Storage Policy Based Volume Provisioning [Feature:vsp
|
|||
By(fmt.Sprintf("Invoking test for VSAN policy diskStripes: %s, cacheReservation: %s", DiskStripesCapabilityInvalidVal, CacheReservationCapabilityVal))
|
||||
scParameters[Policy_DiskStripes] = DiskStripesCapabilityInvalidVal
|
||||
scParameters[Policy_CacheReservation] = CacheReservationCapabilityVal
|
||||
framework.Logf("Invoking test for VSAN storage capabilities: %+v", scParameters)
|
||||
e2elog.Logf("Invoking test for VSAN storage capabilities: %+v", scParameters)
|
||||
err := invokeInvalidPolicyTestNeg(client, namespace, scParameters)
|
||||
Expect(err).To(HaveOccurred())
|
||||
errorMsg := "Invalid value for " + Policy_DiskStripes + "."
|
||||
|
@ -185,7 +186,7 @@ var _ = utils.SIGDescribe("Storage Policy Based Volume Provisioning [Feature:vsp
|
|||
It("verify VSAN storage capability with invalid hostFailuresToTolerate value is not honored for dynamically provisioned pvc using storageclass", func() {
|
||||
By(fmt.Sprintf("Invoking test for VSAN policy hostFailuresToTolerate: %s", HostFailuresToTolerateCapabilityInvalidVal))
|
||||
scParameters[Policy_HostFailuresToTolerate] = HostFailuresToTolerateCapabilityInvalidVal
|
||||
framework.Logf("Invoking test for VSAN storage capabilities: %+v", scParameters)
|
||||
e2elog.Logf("Invoking test for VSAN storage capabilities: %+v", scParameters)
|
||||
err := invokeInvalidPolicyTestNeg(client, namespace, scParameters)
|
||||
Expect(err).To(HaveOccurred())
|
||||
errorMsg := "Invalid value for " + Policy_HostFailuresToTolerate + "."
|
||||
|
@ -201,7 +202,7 @@ var _ = utils.SIGDescribe("Storage Policy Based Volume Provisioning [Feature:vsp
|
|||
scParameters[Policy_DiskStripes] = DiskStripesCapabilityVal
|
||||
scParameters[Policy_ObjectSpaceReservation] = ObjectSpaceReservationCapabilityVal
|
||||
scParameters[Datastore] = VmfsDatastore
|
||||
framework.Logf("Invoking test for VSAN storage capabilities: %+v", scParameters)
|
||||
e2elog.Logf("Invoking test for VSAN storage capabilities: %+v", scParameters)
|
||||
err := invokeInvalidPolicyTestNeg(client, namespace, scParameters)
|
||||
Expect(err).To(HaveOccurred())
|
||||
errorMsg := "The specified datastore: \\\"" + VmfsDatastore + "\\\" is not a VSAN datastore. " +
|
||||
|
@ -215,7 +216,7 @@ var _ = utils.SIGDescribe("Storage Policy Based Volume Provisioning [Feature:vsp
|
|||
By(fmt.Sprintf("Invoking test for SPBM policy: %s", policyName))
|
||||
scParameters[SpbmStoragePolicy] = policyName
|
||||
scParameters[DiskFormat] = ThinDisk
|
||||
framework.Logf("Invoking test for SPBM storage policy: %+v", scParameters)
|
||||
e2elog.Logf("Invoking test for SPBM storage policy: %+v", scParameters)
|
||||
invokeValidPolicyTest(f, client, namespace, scParameters)
|
||||
})
|
||||
|
||||
|
@ -223,7 +224,7 @@ var _ = utils.SIGDescribe("Storage Policy Based Volume Provisioning [Feature:vsp
|
|||
scParameters[Policy_DiskStripes] = DiskStripesCapabilityMaxVal
|
||||
scParameters[Policy_ObjectSpaceReservation] = ObjectSpaceReservationCapabilityVal
|
||||
scParameters[Datastore] = VsanDatastore
|
||||
framework.Logf("Invoking test for SPBM storage policy: %+v", scParameters)
|
||||
e2elog.Logf("Invoking test for SPBM storage policy: %+v", scParameters)
|
||||
kubernetesClusterName := GetAndExpectStringEnvVar(KubernetesClusterName)
|
||||
invokeStaleDummyVMTestWithStoragePolicy(client, masterNode, namespace, kubernetesClusterName, scParameters)
|
||||
})
|
||||
|
@ -233,7 +234,7 @@ var _ = utils.SIGDescribe("Storage Policy Based Volume Provisioning [Feature:vsp
|
|||
scParameters[SpbmStoragePolicy] = tagPolicy
|
||||
scParameters[Datastore] = VsanDatastore
|
||||
scParameters[DiskFormat] = ThinDisk
|
||||
framework.Logf("Invoking test for SPBM storage policy on a non-compatible datastore: %+v", scParameters)
|
||||
e2elog.Logf("Invoking test for SPBM storage policy on a non-compatible datastore: %+v", scParameters)
|
||||
err := invokeInvalidPolicyTestNeg(client, namespace, scParameters)
|
||||
Expect(err).To(HaveOccurred())
|
||||
errorMsg := "User specified datastore is not compatible with the storagePolicy: \\\"" + tagPolicy + "\\\""
|
||||
|
@ -246,7 +247,7 @@ var _ = utils.SIGDescribe("Storage Policy Based Volume Provisioning [Feature:vsp
|
|||
By(fmt.Sprintf("Invoking test for SPBM policy: %s", BronzeStoragePolicy))
|
||||
scParameters[SpbmStoragePolicy] = BronzeStoragePolicy
|
||||
scParameters[DiskFormat] = ThinDisk
|
||||
framework.Logf("Invoking test for non-existing SPBM storage policy: %+v", scParameters)
|
||||
e2elog.Logf("Invoking test for non-existing SPBM storage policy: %+v", scParameters)
|
||||
err := invokeInvalidPolicyTestNeg(client, namespace, scParameters)
|
||||
Expect(err).To(HaveOccurred())
|
||||
errorMsg := "no pbm profile found with name: \\\"" + BronzeStoragePolicy + "\\"
|
||||
|
@ -261,7 +262,7 @@ var _ = utils.SIGDescribe("Storage Policy Based Volume Provisioning [Feature:vsp
|
|||
Expect(scParameters[SpbmStoragePolicy]).NotTo(BeEmpty())
|
||||
scParameters[Policy_DiskStripes] = DiskStripesCapabilityVal
|
||||
scParameters[DiskFormat] = ThinDisk
|
||||
framework.Logf("Invoking test for SPBM storage policy and VSAN capabilities together: %+v", scParameters)
|
||||
e2elog.Logf("Invoking test for SPBM storage policy and VSAN capabilities together: %+v", scParameters)
|
||||
err := invokeInvalidPolicyTestNeg(client, namespace, scParameters)
|
||||
Expect(err).To(HaveOccurred())
|
||||
errorMsg := "Cannot specify storage policy capabilities along with storage policy name. Please specify only one"
|
||||
|
|
|
@ -24,10 +24,11 @@ import (
|
|||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||
"k8s.io/kubernetes/test/e2e/storage/utils"
|
||||
)
|
||||
|
||||
|
@ -352,7 +353,7 @@ func verifyPVCCreationFails(client clientset.Interface, namespace string, scPara
|
|||
Expect(err).To(HaveOccurred())
|
||||
|
||||
eventList, err := client.CoreV1().Events(pvclaim.Namespace).List(metav1.ListOptions{})
|
||||
framework.Logf("Failure message : %+q", eventList.Items[0].Message)
|
||||
e2elog.Logf("Failure message : %+q", eventList.Items[0].Message)
|
||||
return fmt.Errorf("Failure message: %+q", eventList.Items[0].Message)
|
||||
}
|
||||
|
||||
|
|
Loading…
Reference in New Issue