Avoid deleted iSCSI LUNs in the kernel

This change ensures that iSCSI block devices are deleted after
unmounting, and implements scanning of individual LUNs rather
than scanning the whole iSCSI bus.

In cases where an iSCSI bus is in use by more than one attachment,
detaching used to leave behind phantom block devices, which could
cause I/O errors, long timeouts, or even corruption in the case
when the underlying LUN number was recycled. This change makes
sure to flush references to the block devices after unmounting.

The original iSCSI code scanned the whole target every time a LUN
was attached. On storage controllers that export multiple LUNs on
the same target IQN, this led to a situation where nodes would
see SCSI disks that they weren't supposed to -- possibly dozens or
hundreds of extra SCSI disks. This caused 3 significant problems:

1) The large number of disks wasted resources on the node and
caused a minor drag on performance.
2) The scanning of all the devices caused a huge number of uevents
from the kernel, causing udev to bog down for multiple minutes in
some cases, triggering timeouts and other transient failures.
3) Because Kubernetes was not tracking all the "extra" LUNs that
got discovered, they would not get cleaned up until the last LUN
on a particular target was detached, causing a logout. This led
to significant complications:

In the time window between when a LUN was unintentially scanned,
and when it was removed due to a logout, if it was deleted on the
backend, a phantom reference remained on the node. In the best
case, the phantom LUN would cause I/O errors and timeouts in the
udev system. In the worst case, the backend could reuse the LUN
number for a new volume, and if that new volume were to be
scheduled to a pod with a phantom reference to the old LUN by the
same number, the initiator could get confused and possibly corrupt
data on that volume.

To avoid these problems, the new implementation only scans for
the specific LUN number it expects to see. It's worth noting that
the default behavior of iscsiadm is to automatically scan the
whole bus on login. That behavior can be disabled by setting
node.session.scan = manual
in iscsid.conf, and for the reasons mentioned above, it is
strongly recommended to set that option. This change still works
regardless of the setting in iscsid.conf, and while automatic
scanning will cause some problems, this change doesn't make the
problems any worse, and can make things better in some cases.
pull/8/head
Ben Swartzlander 2018-07-24 23:58:19 -04:00
parent 0ffee495ad
commit 6d23d8edbb
9 changed files with 568 additions and 67 deletions

View File

@ -18,6 +18,7 @@ go_library(
importpath = "k8s.io/kubernetes/pkg/volume/iscsi",
deps = [
"//pkg/features:go_default_library",
"//pkg/util/keymutex:go_default_library",
"//pkg/util/mount:go_default_library",
"//pkg/util/strings:go_default_library",
"//pkg/volume:go_default_library",

View File

@ -26,14 +26,16 @@ import (
"k8s.io/apimachinery/pkg/types"
utilfeature "k8s.io/apiserver/pkg/util/feature"
"k8s.io/kubernetes/pkg/features"
"k8s.io/kubernetes/pkg/util/keymutex"
"k8s.io/kubernetes/pkg/util/mount"
"k8s.io/kubernetes/pkg/volume"
volumeutil "k8s.io/kubernetes/pkg/volume/util"
)
type iscsiAttacher struct {
host volume.VolumeHost
manager diskManager
host volume.VolumeHost
targetLocks keymutex.KeyMutex
manager diskManager
}
var _ volume.Attacher = &iscsiAttacher{}
@ -42,8 +44,9 @@ var _ volume.AttachableVolumePlugin = &iscsiPlugin{}
func (plugin *iscsiPlugin) NewAttacher() (volume.Attacher, error) {
return &iscsiAttacher{
host: plugin.host,
manager: &ISCSIUtil{},
host: plugin.host,
targetLocks: plugin.targetLocks,
manager: &ISCSIUtil{},
}, nil
}
@ -66,7 +69,7 @@ func (attacher *iscsiAttacher) VolumesAreAttached(specs []*volume.Spec, nodeName
}
func (attacher *iscsiAttacher) WaitForAttach(spec *volume.Spec, devicePath string, pod *v1.Pod, timeout time.Duration) (string, error) {
mounter, err := volumeSpecToMounter(spec, attacher.host, pod)
mounter, err := volumeSpecToMounter(spec, attacher.host, attacher.targetLocks, pod)
if err != nil {
glog.Warningf("failed to get iscsi mounter: %v", err)
return "", err
@ -76,7 +79,7 @@ func (attacher *iscsiAttacher) WaitForAttach(spec *volume.Spec, devicePath strin
func (attacher *iscsiAttacher) GetDeviceMountPath(
spec *volume.Spec) (string, error) {
mounter, err := volumeSpecToMounter(spec, attacher.host, nil)
mounter, err := volumeSpecToMounter(spec, attacher.host, attacher.targetLocks, nil)
if err != nil {
glog.Warningf("failed to get iscsi mounter: %v", err)
return "", err
@ -157,7 +160,7 @@ func (detacher *iscsiDetacher) UnmountDevice(deviceMountPath string) error {
return nil
}
func volumeSpecToMounter(spec *volume.Spec, host volume.VolumeHost, pod *v1.Pod) (*iscsiDiskMounter, error) {
func volumeSpecToMounter(spec *volume.Spec, host volume.VolumeHost, targetLocks keymutex.KeyMutex, pod *v1.Pod) (*iscsiDiskMounter, error) {
var secret map[string]string
readOnly, fsType, err := getISCSIVolumeInfo(spec)
if err != nil {
@ -165,7 +168,7 @@ func volumeSpecToMounter(spec *volume.Spec, host volume.VolumeHost, pod *v1.Pod)
}
var podUID types.UID
if pod != nil {
secret, err = createSecretMap(spec, &iscsiPlugin{host: host}, pod.Namespace)
secret, err = createSecretMap(spec, &iscsiPlugin{host: host, targetLocks: targetLocks}, pod.Namespace)
if err != nil {
return nil, err
}
@ -173,7 +176,7 @@ func volumeSpecToMounter(spec *volume.Spec, host volume.VolumeHost, pod *v1.Pod)
}
iscsiDisk, err := createISCSIDisk(spec,
podUID,
&iscsiPlugin{host: host},
&iscsiPlugin{host: host, targetLocks: targetLocks},
&ISCSIUtil{},
secret,
)
@ -214,7 +217,8 @@ func volumeSpecToUnmounter(mounter mount.Interface, host volume.VolumeHost) *isc
iscsiDisk: &iscsiDisk{
plugin: &iscsiPlugin{},
},
mounter: mounter,
exec: exec,
mounter: mounter,
exec: exec,
deviceUtil: volumeutil.NewDeviceHandler(volumeutil.NewIOHandler()),
}
}

View File

@ -27,6 +27,7 @@ import (
"k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/kubernetes/pkg/util/keymutex"
"k8s.io/kubernetes/pkg/util/mount"
utilstrings "k8s.io/kubernetes/pkg/util/strings"
"k8s.io/kubernetes/pkg/volume"
@ -36,11 +37,12 @@ import (
// This is the primary entrypoint for volume plugins.
func ProbeVolumePlugins() []volume.VolumePlugin {
return []volume.VolumePlugin{&iscsiPlugin{nil}}
return []volume.VolumePlugin{&iscsiPlugin{}}
}
type iscsiPlugin struct {
host volume.VolumeHost
host volume.VolumeHost
targetLocks keymutex.KeyMutex
}
var _ volume.VolumePlugin = &iscsiPlugin{}
@ -53,6 +55,7 @@ const (
func (plugin *iscsiPlugin) Init(host volume.VolumeHost) error {
plugin.host = host
plugin.targetLocks = keymutex.NewKeyMutex()
return nil
}
@ -176,8 +179,9 @@ func (plugin *iscsiPlugin) newUnmounterInternal(volName string, podUID types.UID
plugin: plugin,
MetricsProvider: volume.NewMetricsStatFS(plugin.host.GetPodVolumeDir(podUID, utilstrings.EscapeQualifiedNameForDisk(iscsiPluginName), volName)),
},
mounter: mounter,
exec: exec,
mounter: mounter,
exec: exec,
deviceUtil: ioutil.NewDeviceHandler(ioutil.NewIOHandler()),
}, nil
}
@ -281,7 +285,7 @@ func (iscsi *iscsiDisk) GetPath() string {
}
func (iscsi *iscsiDisk) iscsiGlobalMapPath(spec *volume.Spec) (string, error) {
mounter, err := volumeSpecToMounter(spec, iscsi.plugin.host, nil /* pod */)
mounter, err := volumeSpecToMounter(spec, iscsi.plugin.host, iscsi.plugin.targetLocks, nil /* pod */)
if err != nil {
glog.Warningf("failed to get iscsi mounter: %v", err)
return "", err
@ -337,8 +341,9 @@ func (b *iscsiDiskMounter) SetUpAt(dir string, fsGroup *int64) error {
type iscsiDiskUnmounter struct {
*iscsiDisk
mounter mount.Interface
exec mount.Exec
mounter mount.Interface
exec mount.Exec
deviceUtil ioutil.DeviceUtil
}
var _ volume.Unmounter = &iscsiDiskUnmounter{}

View File

@ -34,6 +34,7 @@ import (
"k8s.io/kubernetes/pkg/volume"
volumeutil "k8s.io/kubernetes/pkg/volume/util"
"k8s.io/kubernetes/pkg/volume/util/volumepathhandler"
"strconv"
)
var (
@ -212,6 +213,34 @@ func (util *ISCSIUtil) loadISCSI(conf *iscsiDisk, mnt string) error {
return nil
}
// scanOneLun scans a single LUN on one SCSI bus
// Use this to avoid scanning the whole SCSI bus for all of the LUNs, which
// would result in the kernel on this node discovering LUNs that it shouldn't
// know about. Extraneous LUNs cause problems because they may get deleted
// without us getting notified, since we were never supposed to know about
// them. When LUNs are deleted without proper cleanup in the kernel, I/O errors
// and timeouts result, which can noticeably degrade performance of future
// operations.
func scanOneLun(hostNumber int, lunNumber int) error {
filename := fmt.Sprintf("/sys/class/scsi_host/host%d/scan", hostNumber)
fd, err := os.OpenFile(filename, os.O_WRONLY, 0)
if err != nil {
return err
}
defer fd.Close()
// Channel/Target are always 0 for iSCSI
scanCmd := fmt.Sprintf("0 0 %d", lunNumber)
if written, err := fd.WriteString(scanCmd); err != nil {
return err
} else if 0 == written {
return fmt.Errorf("No data written to file: %s", filename)
}
glog.V(3).Infof("Scanned SCSI host %d LUN %d", hostNumber, lunNumber)
return nil
}
// AttachDisk returns devicePath of volume if attach succeeded otherwise returns error
func (util *ISCSIUtil) AttachDisk(b iscsiDiskMounter) (string, error) {
var devicePath string
@ -242,12 +271,84 @@ func (util *ISCSIUtil) AttachDisk(b iscsiDiskMounter) (string, error) {
b.Iface = newIface
}
// Lock the target while we login to avoid races between 2 volumes that share the same
// target both logging in or one logging out while another logs in.
b.plugin.targetLocks.LockKey(b.Iqn)
defer b.plugin.targetLocks.UnlockKey(b.Iqn)
// Build a map of SCSI hosts for each target portal. We will need this to
// issue the bus rescans.
portalHostMap, err := b.deviceUtil.GetISCSIPortalHostMapForTarget(b.Iqn)
if err != nil {
return "", err
}
glog.V(4).Infof("AttachDisk portal->host map for %s is %v", b.Iqn, portalHostMap)
for _, tp := range bkpPortal {
// Rescan sessions to discover newly mapped LUNs. Do not specify the interface when rescanning
// to avoid establishing additional sessions to the same target.
out, err := b.exec.Run("iscsiadm", "-m", "node", "-p", tp, "-T", b.Iqn, "-R")
hostNumber, loggedIn := portalHostMap[tp]
if !loggedIn {
glog.V(4).Infof("Could not get SCSI host number for portal %s, will attempt login", tp)
// build discoverydb and discover iscsi target
b.exec.Run("iscsiadm", "-m", "discoverydb", "-t", "sendtargets", "-p", tp, "-I", b.Iface, "-o", "new")
// update discoverydb with CHAP secret
err = updateISCSIDiscoverydb(b, tp)
if err != nil {
lastErr = fmt.Errorf("iscsi: failed to update discoverydb to portal %s error: %v", tp, err)
continue
}
out, err = b.exec.Run("iscsiadm", "-m", "discoverydb", "-t", "sendtargets", "-p", tp, "-I", b.Iface, "--discover")
if err != nil {
// delete discoverydb record
b.exec.Run("iscsiadm", "-m", "discoverydb", "-t", "sendtargets", "-p", tp, "-I", b.Iface, "-o", "delete")
lastErr = fmt.Errorf("iscsi: failed to sendtargets to portal %s output: %s, err %v", tp, string(out), err)
continue
}
err = updateISCSINode(b, tp)
if err != nil {
// failure to update node db is rare. But deleting record will likely impact those who already start using it.
lastErr = fmt.Errorf("iscsi: failed to update iscsi node to portal %s error: %v", tp, err)
continue
}
// login to iscsi target
out, err = b.exec.Run("iscsiadm", "-m", "node", "-p", tp, "-T", b.Iqn, "-I", b.Iface, "--login")
if err != nil {
// delete the node record from database
b.exec.Run("iscsiadm", "-m", "node", "-p", tp, "-I", b.Iface, "-T", b.Iqn, "-o", "delete")
lastErr = fmt.Errorf("iscsi: failed to attach disk: Error: %s (%v)", string(out), err)
continue
}
// in case of node failure/restart, explicitly set to manual login so it doesn't hang on boot
out, err = b.exec.Run("iscsiadm", "-m", "node", "-p", tp, "-T", b.Iqn, "-o", "update", "-n", "node.startup", "-v", "manual")
if err != nil {
// don't fail if we can't set startup mode, but log warning so there is a clue
glog.Warningf("Warning: Failed to set iSCSI login mode to manual. Error: %v", err)
}
// Rebuild the host map after logging in
portalHostMap, err := b.deviceUtil.GetISCSIPortalHostMapForTarget(b.Iqn)
if err != nil {
return "", err
}
glog.V(6).Infof("AttachDisk portal->host map for %s is %v", b.Iqn, portalHostMap)
hostNumber, loggedIn = portalHostMap[tp]
if !loggedIn {
glog.Warningf("Could not get SCSI host number for portal %s after logging in", tp)
continue
}
}
glog.V(5).Infof("AttachDisk: scanning SCSI host %d LUN %s", hostNumber, b.Lun)
lunNumber, err := strconv.Atoi(b.Lun)
if err != nil {
glog.Errorf("iscsi: failed to rescan session with error: %s (%v)", string(out), err)
return "", fmt.Errorf("AttachDisk: lun is not a number: %s\nError: %v", b.Lun, err)
}
// Scan the iSCSI bus for the LUN
err = scanOneLun(hostNumber, lunNumber)
if err != nil {
return "", err
}
if iscsiTransport == "" {
@ -260,46 +361,6 @@ func (util *ISCSIUtil) AttachDisk(b iscsiDiskMounter) (string, error) {
devicePath = strings.Join([]string{"/dev/disk/by-path/pci", "*", "ip", tp, "iscsi", b.Iqn, "lun", b.Lun}, "-")
}
if exist := waitForPathToExist(&devicePath, 1, iscsiTransport); exist {
glog.V(4).Infof("iscsi: devicepath (%s) exists", devicePath)
devicePaths = append(devicePaths, devicePath)
continue
}
// build discoverydb and discover iscsi target
b.exec.Run("iscsiadm", "-m", "discoverydb", "-t", "sendtargets", "-p", tp, "-I", b.Iface, "-o", "new")
// update discoverydb with CHAP secret
err = updateISCSIDiscoverydb(b, tp)
if err != nil {
lastErr = fmt.Errorf("iscsi: failed to update discoverydb to portal %s error: %v", tp, err)
continue
}
out, err = b.exec.Run("iscsiadm", "-m", "discoverydb", "-t", "sendtargets", "-p", tp, "-I", b.Iface, "--discover")
if err != nil {
// delete discoverydb record
b.exec.Run("iscsiadm", "-m", "discoverydb", "-t", "sendtargets", "-p", tp, "-I", b.Iface, "-o", "delete")
lastErr = fmt.Errorf("iscsi: failed to sendtargets to portal %s output: %s, err %v", tp, string(out), err)
continue
}
err = updateISCSINode(b, tp)
if err != nil {
// failure to update node db is rare. But deleting record will likely impact those who already start using it.
lastErr = fmt.Errorf("iscsi: failed to update iscsi node to portal %s error: %v", tp, err)
continue
}
// login to iscsi target
out, err = b.exec.Run("iscsiadm", "-m", "node", "-p", tp, "-T", b.Iqn, "-I", b.Iface, "--login")
if err != nil {
// delete the node record from database
b.exec.Run("iscsiadm", "-m", "node", "-p", tp, "-I", b.Iface, "-T", b.Iqn, "-o", "delete")
lastErr = fmt.Errorf("iscsi: failed to attach disk: Error: %s (%v)", string(out), err)
continue
}
// in case of node failure/restart, explicitly set to manual login so it doesn't hang on boot
out, err = b.exec.Run("iscsiadm", "-m", "node", "-p", tp, "-T", b.Iqn, "-o", "update", "node.startup", "-v", "manual")
if err != nil {
// don't fail if we can't set startup mode, but log warning so there is a clue
glog.Warningf("Warning: Failed to set iSCSI login mode to manual. Error: %v", err)
}
if exist := waitForPathToExist(&devicePath, 10, iscsiTransport); !exist {
glog.Errorf("Could not attach disk: Timeout after 10s")
// update last error
@ -334,6 +395,7 @@ func (util *ISCSIUtil) AttachDisk(b iscsiDiskMounter) (string, error) {
break
}
}
glog.V(5).Infof("iscsi: AttachDisk devicePath: %s", devicePath)
// run global mount path related operations based on volumeMode
return globalPDPathOperation(b)(b, devicePath, util)
@ -394,6 +456,68 @@ func globalPDPathOperation(b iscsiDiskMounter) func(iscsiDiskMounter, string, *I
}
}
// Delete 1 block device of the form "sd*"
func deleteDevice(deviceName string) error {
filename := fmt.Sprintf("/sys/block/%s/device/delete", deviceName)
fd, err := os.OpenFile(filename, os.O_WRONLY, 0)
if err != nil {
// The file was not present, so just return without error
return nil
}
defer fd.Close()
if written, err := fd.WriteString("1"); err != nil {
return err
} else if 0 == written {
return fmt.Errorf("No data written to file: %s", filename)
}
glog.V(4).Infof("Deleted block device: %s", deviceName)
return nil
}
// deleteDevices tries to remove all the block devices and multipath map devices
// associated with a given iscsi device
func deleteDevices(c iscsiDiskUnmounter) error {
lunNumber, err := strconv.Atoi(c.iscsiDisk.Lun)
if err != nil {
glog.Errorf("iscsi delete devices: lun is not a number: %s\nError: %v", c.iscsiDisk.Lun, err)
return err
}
// Enumerate the devices so we can delete them
deviceNames, err := c.deviceUtil.FindDevicesForISCSILun(c.iscsiDisk.Iqn, lunNumber)
if err != nil {
glog.Errorf("iscsi delete devices: could not get devices associated with LUN %d on target %s\nError: %v",
lunNumber, c.iscsiDisk.Iqn, err)
return err
}
// Find the multipath device path(s)
mpathDevices := make(map[string]bool)
for _, deviceName := range deviceNames {
path := "/dev/" + deviceName
// check if the dev is using mpio and if so mount it via the dm-XX device
if mappedDevicePath := c.deviceUtil.FindMultipathDeviceForDevice(path); mappedDevicePath != "" {
mpathDevices[mappedDevicePath] = true
}
}
// Flush any multipath device maps
for mpathDevice := range mpathDevices {
_, err = c.exec.Run("multipath", "-f", mpathDevice)
if err != nil {
glog.Warningf("Warning: Failed to flush multipath device map: %s\nError: %v", mpathDevice, err)
// Fall through -- keep deleting the block devices
}
glog.V(4).Infof("Flushed multipath device: %s", mpathDevice)
}
for _, deviceName := range deviceNames {
err = deleteDevice(deviceName)
if err != nil {
glog.Warningf("Warning: Failed to delete block device: %s\nError: %v", deviceName, err)
// Fall through -- keep deleting other block devices
}
}
return nil
}
// DetachDisk unmounts and detaches a volume from node
func (util *ISCSIUtil) DetachDisk(c iscsiDiskUnmounter, mntPath string) error {
if pathExists, pathErr := volumeutil.PathExists(mntPath); pathErr != nil {
@ -411,10 +535,6 @@ func (util *ISCSIUtil) DetachDisk(c iscsiDiskUnmounter, mntPath string) error {
if err != nil {
return err
}
refCount, err := getDevicePrefixRefCount(c.mounter, prefix)
if err != nil || refCount != 0 {
return nil
}
var bkpPortal []string
var volName, iqn, iface, initiatorName string
@ -438,6 +558,23 @@ func (util *ISCSIUtil) DetachDisk(c iscsiDiskUnmounter, mntPath string) error {
// Logout may fail as no session may exist for the portal/IQN on the specified interface.
iface, found = extractIface(mntPath)
}
// Delete all the scsi devices and any multipath devices after unmounting
if err = deleteDevices(c); err != nil {
glog.Warningf("iscsi detach disk: failed to delete devices\nError: %v", err)
// Fall through -- even if deleting fails, a logout may fix problems
}
// Lock the target while we determine if we can safely log out or not
c.plugin.targetLocks.LockKey(iqn)
defer c.plugin.targetLocks.UnlockKey(iqn)
// if device is no longer used, see if need to logout the target
refCount, err := getDevicePrefixRefCount(c.mounter, prefix)
if err != nil || refCount != 0 {
return nil
}
portals := removeDuplicate(bkpPortal)
if len(portals) == 0 {
return fmt.Errorf("iscsi detach disk: failed to detach iscsi disk. Couldn't get connected portals from configurations")

View File

@ -20,6 +20,8 @@ package util
type DeviceUtil interface {
FindMultipathDeviceForDevice(disk string) string
FindSlaveDevicesOnMultipath(disk string) []string
GetISCSIPortalHostMapForTarget(targetIqn string) (map[string]int, error)
FindDevicesForISCSILun(targetIqn string, lun int) ([]string, error)
}
type deviceHandler struct {

View File

@ -20,7 +20,10 @@ package util
import (
"errors"
"fmt"
"github.com/golang/glog"
"path"
"strconv"
"strings"
)
@ -80,3 +83,206 @@ func (handler *deviceHandler) FindSlaveDevicesOnMultipath(dm string) []string {
}
return devices
}
// GetISCSIPortalHostMapForTarget given a target iqn, find all the scsi hosts logged into
// that target. Returns a map of iSCSI portals (string) to SCSI host numbers (integers).
// For example: {
// "192.168.30.7:3260": 2,
// "192.168.30.8:3260": 3,
// }
func (handler *deviceHandler) GetISCSIPortalHostMapForTarget(targetIqn string) (map[string]int, error) {
portalHostMap := make(map[string]int)
io := handler.get_io
// Iterate over all the iSCSI hosts in sysfs
sysPath := "/sys/class/iscsi_host"
hostDirs, err := io.ReadDir(sysPath)
if err != nil {
return nil, err
}
for _, hostDir := range hostDirs {
// iSCSI hosts are always of the format "host%d"
// See drivers/scsi/hosts.c in Linux
hostName := hostDir.Name()
if !strings.HasPrefix(hostName, "host") {
continue
}
hostNumber, err := strconv.Atoi(strings.TrimPrefix(hostName, "host"))
if err != nil {
glog.Errorf("Could not get number from iSCSI host: %s", hostName)
continue
}
// Iterate over the children of the iscsi_host device
// We are looking for the associated session
devicePath := sysPath + "/" + hostName + "/device"
deviceDirs, err := io.ReadDir(devicePath)
if err != nil {
return nil, err
}
for _, deviceDir := range deviceDirs {
// Skip over files that aren't the session
// Sessions are of the format "session%u"
// See drivers/scsi/scsi_transport_iscsi.c in Linux
sessionName := deviceDir.Name()
if !strings.HasPrefix(sessionName, "session") {
continue
}
sessionPath := devicePath + "/" + sessionName
// Read the target name for the iSCSI session
targetNamePath := sessionPath + "/iscsi_session/" + sessionName + "/targetname"
targetName, err := io.ReadFile(targetNamePath)
if err != nil {
return nil, err
}
// Ignore hosts that don't matchthe target we were looking for.
if strings.TrimSpace(string(targetName)) != targetIqn {
continue
}
// Iterate over the children of the iSCSI session looking
// for the iSCSI connection.
dirs2, err := io.ReadDir(sessionPath)
if err != nil {
return nil, err
}
for _, dir2 := range dirs2 {
// Skip over files that aren't the connection
// Connections are of the format "connection%d:%u"
// See drivers/scsi/scsi_transport_iscsi.c in Linux
dirName := dir2.Name()
if !strings.HasPrefix(dirName, "connection") {
continue
}
connectionPath := sessionPath + "/" + dirName + "/iscsi_connection/" + dirName
// Read the current and persistent portal information for the connection.
addrPath := connectionPath + "/address"
addr, err := io.ReadFile(addrPath)
if err != nil {
return nil, err
}
portPath := connectionPath + "/port"
port, err := io.ReadFile(portPath)
if err != nil {
return nil, err
}
persistentAddrPath := connectionPath + "/persistent_address"
persistentAddr, err := io.ReadFile(persistentAddrPath)
if err != nil {
return nil, err
}
persistentPortPath := connectionPath + "/persistent_port"
persistentPort, err := io.ReadFile(persistentPortPath)
if err != nil {
return nil, err
}
// Add entries to the map for both the current and persistent portals
// pointing to the SCSI host for those connections
portal := strings.TrimSpace(string(addr)) + ":" +
strings.TrimSpace(string(port))
portalHostMap[portal] = hostNumber
persistentPortal := strings.TrimSpace(string(persistentAddr)) + ":" +
strings.TrimSpace(string(persistentPort))
portalHostMap[persistentPortal] = hostNumber
}
}
}
return portalHostMap, nil
}
// FindDevicesForISCSILun given an iqn, and lun number, find all the devices
// corresponding to that LUN.
func (handler *deviceHandler) FindDevicesForISCSILun(targetIqn string, lun int) ([]string, error) {
devices := make([]string, 0)
io := handler.get_io
// Iterate over all the iSCSI hosts in sysfs
sysPath := "/sys/class/iscsi_host"
hostDirs, err := io.ReadDir(sysPath)
if err != nil {
return nil, err
}
for _, hostDir := range hostDirs {
// iSCSI hosts are always of the format "host%d"
// See drivers/scsi/hosts.c in Linux
hostName := hostDir.Name()
if !strings.HasPrefix(hostName, "host") {
continue
}
hostNumber, err := strconv.Atoi(strings.TrimPrefix(hostName, "host"))
if err != nil {
glog.Errorf("Could not get number from iSCSI host: %s", hostName)
continue
}
// Iterate over the children of the iscsi_host device
// We are looking for the associated session
devicePath := sysPath + "/" + hostName + "/device"
deviceDirs, err := io.ReadDir(devicePath)
if err != nil {
return nil, err
}
for _, deviceDir := range deviceDirs {
// Skip over files that aren't the session
// Sessions are of the format "session%u"
// See drivers/scsi/scsi_transport_iscsi.c in Linux
sessionName := deviceDir.Name()
if !strings.HasPrefix(sessionName, "session") {
continue
}
// Read the target name for the iSCSI session
targetNamePath := devicePath + "/" + sessionName + "/iscsi_session/" + sessionName + "/targetname"
targetName, err := io.ReadFile(targetNamePath)
if err != nil {
return nil, err
}
// Only if the session matches the target we were looking for,
// add it to the map
if strings.TrimSpace(string(targetName)) != targetIqn {
continue
}
// The list of block devices on the scsi bus will be in a
// directory called "target%d:%d:%d".
// See drivers/scsi/scsi_scan.c in Linux
// We assume the channel/bus and device/controller are always zero for iSCSI
targetPath := devicePath + "/" + sessionName + fmt.Sprintf("/target%d:0:0", hostNumber)
// The block device for a given lun will be "%d:%d:%d:%d" --
// host:channel:bus:LUN
blockDevicePath := targetPath + fmt.Sprintf("/%d:0:0:%d", hostNumber, lun)
// If the LUN doesn't exist on this bus, continue on
_, err = io.Lstat(blockDevicePath)
if err != nil {
continue
}
// Read the block directory, there should only be one child --
// the block device "sd*"
path := blockDevicePath + "/block"
dirs, err := io.ReadDir(path)
if err != nil {
return nil, err
}
if 0 < len(dirs) {
devices = append(devices, dirs[0].Name())
}
}
}
return devices, nil
}

View File

@ -22,12 +22,41 @@ import (
"errors"
"os"
"reflect"
"regexp"
"testing"
"time"
)
type mockOsIOHandler struct{}
func (handler *mockOsIOHandler) ReadFile(filename string) ([]byte, error) {
portPattern := regexp.MustCompile("^/sys/class/iscsi_host/(host\\d)/device/session\\d/connection\\d:0/iscsi_connection/connection\\d:0/(?:persistent_)?port$")
if portPattern.MatchString(filename) {
return []byte("3260"), nil
}
addressPattern := regexp.MustCompile("^/sys/class/iscsi_host/(host\\d)/device/session\\d/connection\\d:0/iscsi_connection/connection\\d:0/(?:persistent_)?address$")
matches := addressPattern.FindStringSubmatch(filename)
if nil != matches {
switch matches[1] {
case "host2":
return []byte("10.0.0.1"), nil
case "host3":
return []byte("10.0.0.2"), nil
}
}
targetNamePattern := regexp.MustCompile("^/sys/class/iscsi_host/(host\\d)/device/session\\d/iscsi_session/session\\d/targetname$")
matches = targetNamePattern.FindStringSubmatch(filename)
if nil != matches {
switch matches[1] {
case "host2":
return []byte("target1"), nil
case "host3":
return []byte("target2"), nil
}
}
return nil, errors.New("Not Implemented for Mock")
}
func (handler *mockOsIOHandler) ReadDir(dirname string) ([]os.FileInfo, error) {
switch dirname {
case "/sys/block/dm-1/slaves":
@ -46,14 +75,81 @@ func (handler *mockOsIOHandler) ReadDir(dirname string) ([]os.FileInfo, error) {
name: "dm-1",
}
return []os.FileInfo{f1, f2}, nil
case "/sys/class/iscsi_host":
f1 := &fakeFileInfo{
name: "host2",
}
f2 := &fakeFileInfo{
name: "host3",
}
f3 := &fakeFileInfo{
name: "ignore",
}
return []os.FileInfo{f1, f2, f3}, nil
case "/sys/class/iscsi_host/host2/device":
f1 := &fakeFileInfo{
name: "session1",
}
f2 := &fakeFileInfo{
name: "ignore",
}
return []os.FileInfo{f1, f2}, nil
case "/sys/class/iscsi_host/host3/device":
f1 := &fakeFileInfo{
name: "session2",
}
f2 := &fakeFileInfo{
name: "ignore",
}
return []os.FileInfo{f1, f2}, nil
case "/sys/class/iscsi_host/host2/device/session1":
f1 := &fakeFileInfo{
name: "connection1:0",
}
f2 := &fakeFileInfo{
name: "ignore",
}
return []os.FileInfo{f1, f2}, nil
case "/sys/class/iscsi_host/host3/device/session2":
f1 := &fakeFileInfo{
name: "connection2:0",
}
f2 := &fakeFileInfo{
name: "ignore",
}
return []os.FileInfo{f1, f2}, nil
case "/sys/class/iscsi_host/host2/device/session1/target2:0:0/2:0:0:1/block":
f1 := &fakeFileInfo{
name: "sda",
}
return []os.FileInfo{f1}, nil
case "/sys/class/iscsi_host/host2/device/session1/target2:0:0/2:0:0:2/block":
f1 := &fakeFileInfo{
name: "sdc",
}
return []os.FileInfo{f1}, nil
case "/sys/class/iscsi_host/host3/device/session2/target3:0:0/3:0:0:1/block":
f1 := &fakeFileInfo{
name: "sdb",
}
return []os.FileInfo{f1}, nil
case "/sys/class/iscsi_host/host3/device/session2/target3:0:0/3:0:0:2/block":
f1 := &fakeFileInfo{
name: "sdd",
}
return []os.FileInfo{f1}, nil
}
return nil, nil
return nil, errors.New("Not Implemented for Mock")
}
func (handler *mockOsIOHandler) Lstat(name string) (os.FileInfo, error) {
links := map[string]string{
"/sys/block/dm-1/slaves/sda": "sda",
"/dev/sda": "sda",
"/sys/class/iscsi_host/host2/device/session1/target2:0:0/2:0:0:1": "2:0:0:1",
"/sys/class/iscsi_host/host2/device/session1/target2:0:0/2:0:0:2": "2:0:0:2",
"/sys/class/iscsi_host/host3/device/session2/target3:0:0/3:0:0:1": "3:0:0:1",
"/sys/class/iscsi_host/host3/device/session2/target3:0:0/3:0:0:2": "3:0:0:2",
}
if dev, ok := links[name]; ok {
return &fakeFileInfo{name: dev}, nil
@ -158,3 +254,37 @@ func TestFindSlaveDevicesOnMultipath(t *testing.T) {
t.Fatalf("mpio device not found '' expected got [%s]", dev)
}
}
func TestGetISCSIPortalHostMapForTarget(t *testing.T) {
mockDeviceUtil := NewDeviceHandler(&mockOsIOHandler{})
portalHostMap, err := mockDeviceUtil.GetISCSIPortalHostMapForTarget("target1")
if nil != err {
t.Fatalf("error getting scsi hosts for target: %v", err)
}
if nil == portalHostMap {
t.Fatal("no portal host map returned")
}
if 1 != len(portalHostMap) {
t.Fatalf("wrong number of map entries in portal host map: %d", len(portalHostMap))
}
if 2 != portalHostMap["10.0.0.1:3260"] {
t.Fatalf("incorrect entry in portal host map: %v", portalHostMap)
}
}
func TestFindDevicesForISCSILun(t *testing.T) {
mockDeviceUtil := NewDeviceHandler(&mockOsIOHandler{})
devices, err := mockDeviceUtil.FindDevicesForISCSILun("target1", 1)
if nil != err {
t.Fatalf("error getting devices for lun: %v", err)
}
if nil == devices {
t.Fatal("no devices returned")
}
if 1 != len(devices) {
t.Fatalf("wrong number of devices: %d", len(devices))
}
if "sda" != devices[0] {
t.Fatalf("incorrect device %v", devices)
}
}

View File

@ -28,3 +28,15 @@ func (handler *deviceHandler) FindSlaveDevicesOnMultipath(disk string) []string
out := []string{}
return out
}
// GetISCSIPortalHostMapForTarget unsupported returns nil
func (handler *deviceHandler) GetISCSIPortalHostMapForTarget(targetIqn string) (map[string]int, error) {
portalHostMap := make(map[string]int)
return portalHostMap, nil
}
// FindDevicesForISCSILun unsupported returns nil
func (handler *deviceHandler) FindDevicesForISCSILun(targetIqn string, lun int) ([]string, error) {
devices := []string{}
return devices, nil
}

View File

@ -24,6 +24,7 @@ import (
// IoUtil is a mockable util for common IO operations
type IoUtil interface {
ReadFile(filename string) ([]byte, error)
ReadDir(dirname string) ([]os.FileInfo, error)
Lstat(name string) (os.FileInfo, error)
EvalSymlinks(path string) (string, error)
@ -36,6 +37,9 @@ func NewIOHandler() IoUtil {
return &osIOHandler{}
}
func (handler *osIOHandler) ReadFile(filename string) ([]byte, error) {
return ioutil.ReadFile(filename)
}
func (handler *osIOHandler) ReadDir(dirname string) ([]os.FileInfo, error) {
return ioutil.ReadDir(dirname)
}