mirror of https://github.com/k3s-io/k3s
Merge pull request #59232 from liubin/fix4
Automatic merge from submit-queue (batch tested with PRs 59607, 59232). If you want to cherry-pick this change to another branch, please follow the instructions <a href="https://github.com/kubernetes/community/blob/master/contributors/devel/cherry-picks.md">here</a>. Make log content more information And change some `fmt.Errorf` to `fmt.Error`pull/6/head
commit
977d03bd8b
|
@ -490,7 +490,7 @@ func (plugin *glusterfsPlugin) collectGids(className string, gidTable *MinMaxAll
|
|||
}
|
||||
pvList, err := kubeClient.CoreV1().PersistentVolumes().List(metav1.ListOptions{LabelSelector: labels.Everything().String()})
|
||||
if err != nil {
|
||||
glog.Errorf("failed to get existing persistent volumes")
|
||||
glog.Error("failed to get existing persistent volumes")
|
||||
return err
|
||||
}
|
||||
|
||||
|
@ -510,7 +510,7 @@ func (plugin *glusterfsPlugin) collectGids(className string, gidTable *MinMaxAll
|
|||
|
||||
gid, err := convertGid(gidStr)
|
||||
if err != nil {
|
||||
glog.Error(err)
|
||||
glog.Errorf("failed to parse gid[%s]: %v", gidStr, err)
|
||||
continue
|
||||
}
|
||||
|
||||
|
@ -638,12 +638,12 @@ func (d *glusterfsVolumeDeleter) Delete() error {
|
|||
}
|
||||
err = cli.VolumeDelete(volumeID)
|
||||
if err != nil {
|
||||
glog.Errorf("error when deleting the volume :%v", err)
|
||||
glog.Errorf("error when deleting the volume[%s]: %v", volumeName, err)
|
||||
return err
|
||||
}
|
||||
glog.V(2).Infof("volume %s deleted successfully", volumeName)
|
||||
|
||||
//Deleter takes endpoint and endpointnamespace from pv spec.
|
||||
//Deleter takes endpoint and namespace from pv spec.
|
||||
pvSpec := d.spec.Spec
|
||||
var dynamicEndpoint, dynamicNamespace string
|
||||
if pvSpec.ClaimRef == nil {
|
||||
|
@ -891,7 +891,6 @@ func parseSecret(namespace, secretName string, kubeClient clientset.Interface) (
|
|||
}
|
||||
|
||||
// getClusterNodes() returns the cluster nodes of a given cluster
|
||||
|
||||
func getClusterNodes(cli *gcli.Client, cluster string) (dynamicHostIps []string, err error) {
|
||||
clusterinfo, err := cli.ClusterInfo(cluster)
|
||||
if err != nil {
|
||||
|
@ -905,7 +904,7 @@ func getClusterNodes(cli *gcli.Client, cluster string) (dynamicHostIps []string,
|
|||
for _, node := range clusterinfo.Nodes {
|
||||
nodei, err := cli.NodeInfo(string(node))
|
||||
if err != nil {
|
||||
glog.Errorf(" failed to get hostip: %v", err)
|
||||
glog.Errorf("failed to get hostip: %v", err)
|
||||
return nil, fmt.Errorf("failed to get hostip: %v", err)
|
||||
}
|
||||
ipaddr := dstrings.Join(nodei.NodeAddRequest.Hostnames.Storage, "")
|
||||
|
@ -952,7 +951,7 @@ func parseClassParameters(params map[string]string, kubeClient clientset.Interfa
|
|||
case "gidmin":
|
||||
parseGidMin, err := convertGid(v)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("invalid value %q for volume plugin %s", k, glusterfsPluginName)
|
||||
return nil, fmt.Errorf("invalid gidMin value %q for volume plugin %s", k, glusterfsPluginName)
|
||||
}
|
||||
if parseGidMin < absoluteGidMin {
|
||||
return nil, fmt.Errorf("gidMin must be >= %v", absoluteGidMin)
|
||||
|
@ -964,7 +963,7 @@ func parseClassParameters(params map[string]string, kubeClient clientset.Interfa
|
|||
case "gidmax":
|
||||
parseGidMax, err := convertGid(v)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("invalid value %q for volume plugin %s", k, glusterfsPluginName)
|
||||
return nil, fmt.Errorf("invalid gidMax value %q for volume plugin %s", k, glusterfsPluginName)
|
||||
}
|
||||
if parseGidMax < absoluteGidMin {
|
||||
return nil, fmt.Errorf("gidMax must be >= %v", absoluteGidMin)
|
||||
|
@ -1095,12 +1094,12 @@ func getVolumeID(pv *v1.PersistentVolume, volumeName string) (string, error) {
|
|||
|
||||
func (plugin *glusterfsPlugin) ExpandVolumeDevice(spec *volume.Spec, newSize resource.Quantity, oldSize resource.Quantity) (resource.Quantity, error) {
|
||||
pvSpec := spec.PersistentVolume.Spec
|
||||
glog.V(2).Infof("Request to expand volume: %s ", pvSpec.Glusterfs.Path)
|
||||
volumeName := pvSpec.Glusterfs.Path
|
||||
glog.V(2).Infof("Request to expand volume: %s ", volumeName)
|
||||
volumeID, err := getVolumeID(spec.PersistentVolume, volumeName)
|
||||
|
||||
if err != nil {
|
||||
return oldSize, fmt.Errorf("failed to get volumeID, err: %v", err)
|
||||
return oldSize, fmt.Errorf("failed to get volumeID for volume %s, err: %v", volumeName, err)
|
||||
}
|
||||
|
||||
//Get details of SC.
|
||||
|
@ -1127,13 +1126,12 @@ func (plugin *glusterfsPlugin) ExpandVolumeDevice(spec *volume.Spec, newSize res
|
|||
expansionSizeGiB := int(volume.RoundUpSize(expansionSize, volume.GIB))
|
||||
|
||||
// Find out requested Size
|
||||
|
||||
requestGiB := volume.RoundUpToGiB(newSize)
|
||||
|
||||
//Check the existing volume size
|
||||
currentVolumeInfo, err := cli.VolumeInfo(volumeID)
|
||||
if err != nil {
|
||||
glog.Errorf("error when fetching details of volume :%v", err)
|
||||
glog.Errorf("error when fetching details of volume[%s]: %v", volumeName, err)
|
||||
return oldSize, err
|
||||
}
|
||||
|
||||
|
@ -1147,7 +1145,7 @@ func (plugin *glusterfsPlugin) ExpandVolumeDevice(spec *volume.Spec, newSize res
|
|||
// Expand the volume
|
||||
volumeInfoRes, err := cli.VolumeExpand(volumeID, volumeExpandReq)
|
||||
if err != nil {
|
||||
glog.Errorf("error when expanding the volume :%v", err)
|
||||
glog.Errorf("error when expanding the volume[%s]: %v", volumeName, err)
|
||||
return oldSize, err
|
||||
}
|
||||
|
||||
|
|
|
@ -27,7 +27,7 @@ import (
|
|||
// readGlusterLog will take the last 2 lines of the log file
|
||||
// on failure of gluster SetUp and return those so kubelet can
|
||||
// properly expose them
|
||||
// return nil on any failure
|
||||
// return error on any failure
|
||||
func readGlusterLog(path string, podName string) error {
|
||||
|
||||
var line1 string
|
||||
|
|
Loading…
Reference in New Issue