Make iscsi pv claim aware of nodiskconflict feature.

Being ISCSI a RWO/ROX volumes it should inherit nodiskconflict feature.

Signed-off-by: Humble Chirammal <hchiramm@redhat.com>
pull/6/head
Humble Chirammal 2016-12-09 21:47:13 +05:30
parent f5f109ca11
commit 28088159c3
2 changed files with 82 additions and 1 deletions

View File

@ -20,6 +20,7 @@ import (
"fmt"
"math/rand"
"strconv"
"strings"
"sync"
"time"
@ -109,7 +110,7 @@ type predicateMetadata struct {
func isVolumeConflict(volume v1.Volume, pod *v1.Pod) bool {
// fast path if there is no conflict checking targets.
if volume.GCEPersistentDisk == nil && volume.AWSElasticBlockStore == nil && volume.RBD == nil {
if volume.GCEPersistentDisk == nil && volume.AWSElasticBlockStore == nil && volume.RBD == nil && volume.ISCSI == nil {
return false
}
@ -128,6 +129,26 @@ func isVolumeConflict(volume v1.Volume, pod *v1.Pod) bool {
}
}
if volume.ISCSI != nil && existingVolume.ISCSI != nil {
iqn, lun, target := volume.ISCSI.IQN, volume.ISCSI.Lun, volume.ISCSI.TargetPortal
eiqn, elun, etarget := existingVolume.ISCSI.IQN, existingVolume.ISCSI.Lun, existingVolume.ISCSI.TargetPortal
if !strings.Contains(target, ":") {
target = target + ":3260"
}
if !strings.Contains(etarget, ":") {
etarget = etarget + ":3260"
}
lun1 := strconv.Itoa(int(lun))
elun1 := strconv.Itoa(int(elun))
// two ISCSI volumes are same, if they share the same iqn, lun and target. As iscsi volumes are of type
// RWO or ROX, we could permit only one RW mount. Same iscsi volume mounted by multiple Pods
// conflict unless all other pods mount as read only.
if iqn == eiqn && lun1 == elun1 && target == etarget && !(volume.ISCSI.ReadOnly && existingVolume.ISCSI.ReadOnly) {
return true
}
}
if volume.RBD != nil && existingVolume.RBD != nil {
mon, pool, image := volume.RBD.CephMonitors, volume.RBD.RBDPool, volume.RBD.RBDImage
emon, epool, eimage := existingVolume.RBD.CephMonitors, existingVolume.RBD.RBDPool, existingVolume.RBD.RBDImage
@ -150,6 +171,7 @@ func isVolumeConflict(volume v1.Volume, pod *v1.Pod) bool {
// - GCE PD allows multiple mounts as long as they're all read-only
// - AWS EBS forbids any two pods mounting the same volume ID
// - Ceph RBD forbids if any two pods share at least same monitor, and match pool and image.
// - ISCSI forbids if any two pods share at least same IQN, LUN and Target
// TODO: migrate this into some per-volume specific code?
func NoDiskConflict(pod *v1.Pod, meta interface{}, nodeInfo *schedulercache.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) {
for _, v := range pod.Spec.Volumes {

View File

@ -738,6 +738,65 @@ func TestRBDDiskConflicts(t *testing.T) {
}
}
func TestISCSIDiskConflicts(t *testing.T) {
volState := v1.PodSpec{
Volumes: []v1.Volume{
{
VolumeSource: v1.VolumeSource{
ISCSI: &v1.ISCSIVolumeSource{
TargetPortal: "127.0.0.1:3260",
IQN: "iqn.2014-12.server:storage.target01",
FSType: "ext4",
Lun: 0,
},
},
},
},
}
volState2 := v1.PodSpec{
Volumes: []v1.Volume{
{
VolumeSource: v1.VolumeSource{
ISCSI: &v1.ISCSIVolumeSource{
TargetPortal: "127.0.0.2:3260",
IQN: "iqn.2014-12.server:storage.target01",
FSType: "ext4",
Lun: 1,
},
},
},
},
}
tests := []struct {
pod *v1.Pod
nodeInfo *schedulercache.NodeInfo
isOk bool
test string
}{
{&v1.Pod{}, schedulercache.NewNodeInfo(), true, "nothing"},
{&v1.Pod{}, schedulercache.NewNodeInfo(&v1.Pod{Spec: volState}), true, "one state"},
{&v1.Pod{Spec: volState}, schedulercache.NewNodeInfo(&v1.Pod{Spec: volState}), false, "same state"},
{&v1.Pod{Spec: volState2}, schedulercache.NewNodeInfo(&v1.Pod{Spec: volState}), true, "different state"},
}
expectedFailureReasons := []algorithm.PredicateFailureReason{ErrDiskConflict}
for _, test := range tests {
ok, reasons, err := NoDiskConflict(test.pod, PredicateMetadata(test.pod, nil), test.nodeInfo)
if err != nil {
t.Errorf("%s: unexpected error: %v", test.test, err)
}
if !ok && !reflect.DeepEqual(reasons, expectedFailureReasons) {
t.Errorf("%s: unexpected failure reasons: %v, want: %v", test.test, reasons, expectedFailureReasons)
}
if test.isOk && !ok {
t.Errorf("%s: expected ok, got none. %v %s %s", test.test, test.pod, test.nodeInfo, test.test)
}
if !test.isOk && ok {
t.Errorf("%s: expected no ok, got one. %v %s %s", test.test, test.pod, test.nodeInfo, test.test)
}
}
}
func TestPodFitsSelector(t *testing.T) {
tests := []struct {
pod *v1.Pod