mirror of https://github.com/k3s-io/k3s
Merge pull request #57948 from zhangxiaoyu-zidif/fix-ut-print
Automatic merge from submit-queue (batch tested with PRs 58438, 58523, 58513, 57948). If you want to cherry-pick this change to another branch, please follow the instructions <a href="https://github.com/kubernetes/community/blob/master/contributors/devel/cherry-picks.md">here</a>. fix csi ut print **What this PR does / why we need it**: fix csi ut print **Which issue(s) this PR fixes** *(optional, in `fixes #<issue number>(, fixes #<issue_number>, ...)` format, will close the issue(s) when PR gets merged)*: Fixes # **Special notes for your reviewer**: **Release note**: ```release-note NONE ```pull/6/head
commit
5f6e12793b
|
@ -110,13 +110,13 @@ func TestAttacherAttach(t *testing.T) {
|
|||
|
||||
// attacher loop
|
||||
for i, tc := range testCases {
|
||||
t.Log("test case: ", tc.name)
|
||||
t.Logf("test case: %s", tc.name)
|
||||
spec := volume.NewSpecFromPersistentVolume(makeTestPV(fmt.Sprintf("test-pv%d", i), 10, tc.driverName, tc.volumeName), false)
|
||||
|
||||
go func(id, nodename string, fail bool) {
|
||||
attachID, err := csiAttacher.Attach(spec, types.NodeName(nodename))
|
||||
if !fail && err != nil {
|
||||
t.Error("was not expecting failure, but got err: ", err)
|
||||
t.Errorf("expecting no failure, but got err: %v", err)
|
||||
}
|
||||
if attachID != id && !fail {
|
||||
t.Errorf("expecting attachID %v, got %v", id, attachID)
|
||||
|
@ -178,7 +178,7 @@ func TestAttacherWaitForVolumeAttachment(t *testing.T) {
|
|||
}
|
||||
|
||||
for i, tc := range testCases {
|
||||
t.Logf("running test: %v", tc.name)
|
||||
t.Logf("running test: %s", tc.name)
|
||||
pvName := fmt.Sprintf("test-pv-%d", i)
|
||||
volID := fmt.Sprintf("test-vol-%d", i)
|
||||
attachID := getAttachmentName(volID, testDriver, nodeName)
|
||||
|
|
|
@ -52,12 +52,12 @@ func TestClientAssertSupportedVersion(t *testing.T) {
|
|||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Log("case: ", tc.testName)
|
||||
t.Logf("test case: %s", tc.testName)
|
||||
client := setupClient(t)
|
||||
client.idClient.(*fake.IdentityClient).SetNextError(tc.err)
|
||||
err := client.AssertSupportedVersion(grpctx.Background(), tc.ver)
|
||||
if tc.mustFail && err == nil {
|
||||
t.Error("must fail, but err = nil")
|
||||
t.Error("test must fail, but err = nil")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -74,12 +74,12 @@ func TestClientNodeProbe(t *testing.T) {
|
|||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Log("case: ", tc.testName)
|
||||
t.Logf("test case: %s", tc.testName)
|
||||
client := setupClient(t)
|
||||
client.nodeClient.(*fake.NodeClient).SetNextError(tc.err)
|
||||
err := client.NodeProbe(grpctx.Background(), tc.ver)
|
||||
if tc.mustFail && err == nil {
|
||||
t.Error("must fail, but err = nil")
|
||||
t.Error("test must fail, but err = nil")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -103,7 +103,7 @@ func TestClientNodePublishVolume(t *testing.T) {
|
|||
client := setupClient(t)
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Log("case: ", tc.name)
|
||||
t.Logf("test case: %s", tc.name)
|
||||
client.nodeClient.(*fake.NodeClient).SetNextError(tc.err)
|
||||
err := client.NodePublishVolume(
|
||||
grpctx.Background(),
|
||||
|
@ -117,7 +117,7 @@ func TestClientNodePublishVolume(t *testing.T) {
|
|||
)
|
||||
|
||||
if tc.mustFail && err == nil {
|
||||
t.Error("must fail, but err is nil: ", err)
|
||||
t.Error("test must fail, but err is nil")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -139,11 +139,11 @@ func TestClientNodeUnpublishVolume(t *testing.T) {
|
|||
client := setupClient(t)
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Log("case: ", tc.name)
|
||||
t.Logf("test case: %s", tc.name)
|
||||
client.nodeClient.(*fake.NodeClient).SetNextError(tc.err)
|
||||
err := client.NodeUnpublishVolume(grpctx.Background(), tc.volID, tc.targetPath)
|
||||
if tc.mustFail && err == nil {
|
||||
t.Error("must fail, but err is nil: ", err)
|
||||
t.Error("test must fail, but err is nil")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -64,7 +64,7 @@ func TestMounterGetPath(t *testing.T) {
|
|||
},
|
||||
}
|
||||
for _, tc := range testCases {
|
||||
t.Log("test case:", tc.name)
|
||||
t.Logf("test case: %s", tc.name)
|
||||
pv := makeTestPV(tc.specVolumeName, 10, testDriver, testVol)
|
||||
spec := volume.NewSpecFromPersistentVolume(pv, pv.Spec.PersistentVolumeSource.CSI.ReadOnly)
|
||||
mounter, err := plug.NewMounter(
|
||||
|
@ -78,7 +78,7 @@ func TestMounterGetPath(t *testing.T) {
|
|||
csiMounter := mounter.(*csiMountMgr)
|
||||
|
||||
path := csiMounter.GetPath()
|
||||
t.Log("*** GetPath: ", path)
|
||||
t.Logf("*** GetPath: %s", path)
|
||||
|
||||
if tc.path != path {
|
||||
t.Errorf("expecting path %s, got %s", tc.path, path)
|
||||
|
@ -186,7 +186,7 @@ func TestUnmounterTeardown(t *testing.T) {
|
|||
"test-pv",
|
||||
map[string]string{volDataKey.specVolID: "test-pv", volDataKey.driverName: "driver", volDataKey.volHandle: "vol-handle"},
|
||||
); err != nil {
|
||||
t.Fatal("failed to save volume data:", err)
|
||||
t.Fatalf("failed to save volume data: %v", err)
|
||||
}
|
||||
|
||||
err = csiUnmounter.TearDownAt(dir)
|
||||
|
@ -231,11 +231,11 @@ func TestGetVolAttribsFromSpec(t *testing.T) {
|
|||
}
|
||||
spec := volume.NewSpecFromPersistentVolume(makeTestPV("test-pv", 10, testDriver, testVol), false)
|
||||
for _, tc := range testCases {
|
||||
t.Log("test case:", tc.name)
|
||||
t.Logf("test case: %s", tc.name)
|
||||
spec.PersistentVolume.Annotations = tc.annotations
|
||||
attribs, err := getVolAttribsFromSpec(spec)
|
||||
if !tc.shouldFail && err != nil {
|
||||
t.Error("test case should not fail, but err != nil", err)
|
||||
t.Errorf("test case should not fail, but err != nil: %v", err)
|
||||
}
|
||||
eq := true
|
||||
for k, v := range attribs {
|
||||
|
@ -262,7 +262,7 @@ func TestSaveVolumeData(t *testing.T) {
|
|||
}
|
||||
|
||||
for i, tc := range testCases {
|
||||
t.Log("test case:", tc.name)
|
||||
t.Logf("test case: %s", tc.name)
|
||||
specVolID := fmt.Sprintf("spec-volid-%d", i)
|
||||
mountDir := path.Join(getTargetPath(testPodUID, specVolID, plug.host), "/mount")
|
||||
if err := os.MkdirAll(mountDir, 0755); err != nil && !os.IsNotExist(err) {
|
||||
|
@ -272,24 +272,24 @@ func TestSaveVolumeData(t *testing.T) {
|
|||
err := saveVolumeData(plug, testPodUID, specVolID, tc.data)
|
||||
|
||||
if !tc.shouldFail && err != nil {
|
||||
t.Error("unexpected failure: ", err)
|
||||
t.Errorf("unexpected failure: %v", err)
|
||||
}
|
||||
// did file get created
|
||||
dataDir := getTargetPath(testPodUID, specVolID, plug.host)
|
||||
file := path.Join(dataDir, volDataFileName)
|
||||
if _, err := os.Stat(file); err != nil {
|
||||
t.Error("failed to create data dir:", err)
|
||||
t.Errorf("failed to create data dir: %v", err)
|
||||
}
|
||||
|
||||
// validate content
|
||||
data, err := ioutil.ReadFile(file)
|
||||
if !tc.shouldFail && err != nil {
|
||||
t.Error("failed to read data file:", err)
|
||||
t.Errorf("failed to read data file: %v", err)
|
||||
}
|
||||
|
||||
jsonData := new(bytes.Buffer)
|
||||
if err := json.NewEncoder(jsonData).Encode(tc.data); err != nil {
|
||||
t.Error("failed to encode json:", err)
|
||||
t.Errorf("failed to encode json: %v", err)
|
||||
}
|
||||
if string(data) != jsonData.String() {
|
||||
t.Errorf("expecting encoded data %v, got %v", string(data), jsonData)
|
||||
|
|
Loading…
Reference in New Issue