Fix compatibility tests for scheduler

pull/8/head
Hemant Kumar 2018-08-22 19:31:24 -04:00
parent 8e4b33d1a8
commit fc61620db5
7 changed files with 411 additions and 89 deletions

View File

@ -406,7 +406,7 @@ pkg/volume/azure_file
pkg/volume/cephfs
pkg/volume/configmap
pkg/volume/csi/fake
pkg/volume/csi/labelmanager
pkg/volume/csi/nodeupdater
pkg/volume/empty_dir
pkg/volume/fc
pkg/volume/flexvolume

View File

@ -47,6 +47,7 @@ go_library(
go_test(
name = "go_default_test",
srcs = [
"csi_volume_predicate_test.go",
"max_attachable_volume_predicate_test.go",
"metadata_test.go",
"predicates_test.go",

View File

@ -114,7 +114,9 @@ func (c *CSIMaxVolumeLimitChecker) filterAttachableVolumes(
for _, vol := range volumes {
// CSI volumes can only be used as persistent volumes
if vol.PersistentVolumeClaim != nil {
if vol.PersistentVolumeClaim == nil {
continue
}
pvcName := vol.PersistentVolumeClaim.ClaimName
if pvcName == "" {
@ -124,19 +126,20 @@ func (c *CSIMaxVolumeLimitChecker) filterAttachableVolumes(
pvc, err := c.pvcInfo.GetPersistentVolumeClaimInfo(namespace, pvcName)
if err != nil {
glog.Errorf("Unable to look up PVC info for %s/%s", namespace, pvcName)
glog.V(4).Infof("Unable to look up PVC info for %s/%s", namespace, pvcName)
continue
}
pvName := pvc.Spec.VolumeName
// TODO - the actual handling of unbound PVCs will be fixed by late binding design.
if pvName == "" {
glog.Errorf("Persistent volume had no name for claim %s/%s", namespace, pvcName)
glog.V(4).Infof("Persistent volume had no name for claim %s/%s", namespace, pvcName)
continue
}
pv, err := c.pvInfo.GetPersistentVolumeInfo(pvName)
if err != nil {
glog.Errorf("Unable to look up PV info for PVC %s/%s and PV %s", namespace, pvcName, pvName)
glog.V(4).Infof("Unable to look up PV info for PVC %s/%s and PV %s", namespace, pvcName, pvName)
continue
}
@ -148,7 +151,7 @@ func (c *CSIMaxVolumeLimitChecker) filterAttachableVolumes(
driverName := csiSource.Driver
volumeLimitKey := volumeutil.GetCSIAttachLimitKey(driverName)
result[csiSource.VolumeHandle] = volumeLimitKey
}
}
return nil
}

View File

@ -0,0 +1,179 @@
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package predicates
import (
"reflect"
"testing"
"k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
utilfeature "k8s.io/apiserver/pkg/util/feature"
utilfeaturetesting "k8s.io/apiserver/pkg/util/feature/testing"
"k8s.io/kubernetes/pkg/features"
"k8s.io/kubernetes/pkg/scheduler/algorithm"
)
func TestCSIVolumeCountPredicate(t *testing.T) {
// for pods with CSI pvcs
oneVolPod := &v1.Pod{
Spec: v1.PodSpec{
Volumes: []v1.Volume{
{
VolumeSource: v1.VolumeSource{
PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{
ClaimName: "csi-ebs",
},
},
},
},
},
}
twoVolPod := &v1.Pod{
Spec: v1.PodSpec{
Volumes: []v1.Volume{
{
VolumeSource: v1.VolumeSource{
PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{
ClaimName: "cs-ebs-1",
},
},
},
{
VolumeSource: v1.VolumeSource{
PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{
ClaimName: "csi-ebs-2",
},
},
},
},
},
}
runningPod := &v1.Pod{
Spec: v1.PodSpec{
Volumes: []v1.Volume{
{
VolumeSource: v1.VolumeSource{
PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{
ClaimName: "csi-ebs-3",
},
},
},
},
},
}
tests := []struct {
newPod *v1.Pod
existingPods []*v1.Pod
filterName string
maxVols int
fits bool
test string
}{
{
newPod: oneVolPod,
existingPods: []*v1.Pod{runningPod, twoVolPod},
filterName: "csi-ebs",
maxVols: 4,
fits: true,
test: "fits when node capacity >= new pods CSI volume",
},
{
newPod: oneVolPod,
existingPods: []*v1.Pod{runningPod, twoVolPod},
filterName: "csi-ebs",
maxVols: 2,
fits: false,
test: "doesn't when node capacity <= pods CSI volume",
},
}
defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.AttachVolumeLimit, true)()
expectedFailureReasons := []algorithm.PredicateFailureReason{ErrMaxVolumeCountExceeded}
// running attachable predicate tests with feature gate and limit present on nodes
for _, test := range tests {
node := getNodeWithPodAndVolumeLimits(test.existingPods, int64(test.maxVols), test.filterName)
pred := NewCSIMaxVolumeLimitPredicate(getFakeCSIPVInfo("csi-ebs", "csi-ebs"), getFakeCSIPVCInfo("csi-ebs"))
fits, reasons, err := pred(test.newPod, PredicateMetadata(test.newPod, nil), node)
if err != nil {
t.Errorf("Using allocatable [%s]%s: unexpected error: %v", test.filterName, test.test, err)
}
if !fits && !reflect.DeepEqual(reasons, expectedFailureReasons) {
t.Errorf("Using allocatable [%s]%s: unexpected failure reasons: %v, want: %v", test.filterName, test.test, reasons, expectedFailureReasons)
}
if fits != test.fits {
t.Errorf("Using allocatable [%s]%s: expected %v, got %v", test.filterName, test.test, test.fits, fits)
}
}
}
func getFakeCSIPVInfo(volumeName, driverName string) FakePersistentVolumeInfo {
return FakePersistentVolumeInfo{
{
ObjectMeta: metav1.ObjectMeta{Name: volumeName},
Spec: v1.PersistentVolumeSpec{
PersistentVolumeSource: v1.PersistentVolumeSource{
CSI: &v1.CSIPersistentVolumeSource{
Driver: driverName,
VolumeHandle: volumeName,
},
},
},
},
{
ObjectMeta: metav1.ObjectMeta{Name: volumeName + "-2"},
Spec: v1.PersistentVolumeSpec{
PersistentVolumeSource: v1.PersistentVolumeSource{
CSI: &v1.CSIPersistentVolumeSource{
Driver: driverName,
VolumeHandle: volumeName + "-2",
},
},
},
},
{
ObjectMeta: metav1.ObjectMeta{Name: volumeName + "-3"},
Spec: v1.PersistentVolumeSpec{
PersistentVolumeSource: v1.PersistentVolumeSource{
CSI: &v1.CSIPersistentVolumeSource{
Driver: driverName,
VolumeHandle: volumeName + "-3",
},
},
},
},
}
}
func getFakeCSIPVCInfo(volumeName string) FakePersistentVolumeClaimInfo {
return FakePersistentVolumeClaimInfo{
{
ObjectMeta: metav1.ObjectMeta{Name: volumeName},
Spec: v1.PersistentVolumeClaimSpec{VolumeName: volumeName},
},
{
ObjectMeta: metav1.ObjectMeta{Name: volumeName + "-2"},
Spec: v1.PersistentVolumeClaimSpec{VolumeName: volumeName + "-2"},
},
{
ObjectMeta: metav1.ObjectMeta{Name: volumeName + "-3"},
Spec: v1.PersistentVolumeClaimSpec{VolumeName: volumeName + "-3"},
},
}
}

View File

@ -742,7 +742,44 @@ func TestVolumeCountConflicts(t *testing.T) {
},
}
pvInfo := func(filterName string) FakePersistentVolumeInfo {
expectedFailureReasons := []algorithm.PredicateFailureReason{ErrMaxVolumeCountExceeded}
// running attachable predicate tests without feature gate and no limit present on nodes
for _, test := range tests {
os.Setenv(KubeMaxPDVols, strconv.Itoa(test.maxVols))
pred := NewMaxPDVolumeCountPredicate(test.filterName, getFakePVInfo(test.filterName), getFakePVCInfo(test.filterName))
fits, reasons, err := pred(test.newPod, PredicateMetadata(test.newPod, nil), schedulercache.NewNodeInfo(test.existingPods...))
if err != nil {
t.Errorf("[%s]%s: unexpected error: %v", test.filterName, test.test, err)
}
if !fits && !reflect.DeepEqual(reasons, expectedFailureReasons) {
t.Errorf("[%s]%s: unexpected failure reasons: %v, want: %v", test.filterName, test.test, reasons, expectedFailureReasons)
}
if fits != test.fits {
t.Errorf("[%s]%s: expected %v, got %v", test.filterName, test.test, test.fits, fits)
}
}
defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.AttachVolumeLimit, true)()
// running attachable predicate tests with feature gate and limit present on nodes
for _, test := range tests {
node := getNodeWithPodAndVolumeLimits(test.existingPods, int64(test.maxVols), test.filterName)
pred := NewMaxPDVolumeCountPredicate(test.filterName, getFakePVInfo(test.filterName), getFakePVCInfo(test.filterName))
fits, reasons, err := pred(test.newPod, PredicateMetadata(test.newPod, nil), node)
if err != nil {
t.Errorf("Using allocatable [%s]%s: unexpected error: %v", test.filterName, test.test, err)
}
if !fits && !reflect.DeepEqual(reasons, expectedFailureReasons) {
t.Errorf("Using allocatable [%s]%s: unexpected failure reasons: %v, want: %v", test.filterName, test.test, reasons, expectedFailureReasons)
}
if fits != test.fits {
t.Errorf("Using allocatable [%s]%s: expected %v, got %v", test.filterName, test.test, test.fits, fits)
}
}
}
func getFakePVInfo(filterName string) FakePersistentVolumeInfo {
return FakePersistentVolumeInfo{
{
ObjectMeta: metav1.ObjectMeta{Name: "some" + filterName + "Vol"},
@ -761,7 +798,7 @@ func TestVolumeCountConflicts(t *testing.T) {
}
}
pvcInfo := func(filterName string) FakePersistentVolumeClaimInfo {
func getFakePVCInfo(filterName string) FakePersistentVolumeClaimInfo {
return FakePersistentVolumeClaimInfo{
{
ObjectMeta: metav1.ObjectMeta{Name: "some" + filterName + "Vol"},
@ -790,43 +827,6 @@ func TestVolumeCountConflicts(t *testing.T) {
}
}
expectedFailureReasons := []algorithm.PredicateFailureReason{ErrMaxVolumeCountExceeded}
// running attachable predicate tests without feature gate and no limit present on nodes
for _, test := range tests {
os.Setenv(KubeMaxPDVols, strconv.Itoa(test.maxVols))
pred := NewMaxPDVolumeCountPredicate(test.filterName, pvInfo(test.filterName), pvcInfo(test.filterName))
fits, reasons, err := pred(test.newPod, PredicateMetadata(test.newPod, nil), schedulercache.NewNodeInfo(test.existingPods...))
if err != nil {
t.Errorf("[%s]%s: unexpected error: %v", test.filterName, test.test, err)
}
if !fits && !reflect.DeepEqual(reasons, expectedFailureReasons) {
t.Errorf("[%s]%s: unexpected failure reasons: %v, want: %v", test.filterName, test.test, reasons, expectedFailureReasons)
}
if fits != test.fits {
t.Errorf("[%s]%s: expected %v, got %v", test.filterName, test.test, test.fits, fits)
}
}
defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.AttachVolumeLimit, true)()
// running attachable predicate tests with feature gate and limit present on nodes
for _, test := range tests {
node := getNodeWithPodAndVolumeLimits(test.existingPods, int64(test.maxVols), test.filterName)
pred := NewMaxPDVolumeCountPredicate(test.filterName, pvInfo(test.filterName), pvcInfo(test.filterName))
fits, reasons, err := pred(test.newPod, PredicateMetadata(test.newPod, nil), node)
if err != nil {
t.Errorf("Using allocatable [%s]%s: unexpected error: %v", test.filterName, test.test, err)
}
if !fits && !reflect.DeepEqual(reasons, expectedFailureReasons) {
t.Errorf("Using allocatable [%s]%s: unexpected failure reasons: %v, want: %v", test.filterName, test.test, reasons, expectedFailureReasons)
}
if fits != test.fits {
t.Errorf("Using allocatable [%s]%s: expected %v, got %v", test.filterName, test.test, test.fits, fits)
}
}
}
func TestMaxVolumeFunc(t *testing.T) {
node := &v1.Node{
ObjectMeta: metav1.ObjectMeta{
@ -867,6 +867,6 @@ func getVolumeLimitKey(filterType string) v1.ResourceName {
case AzureDiskVolumeFilterType:
return v1.ResourceName(volumeutil.AzureVolumeLimitKey)
default:
return ""
return v1.ResourceName(volumeutil.GetCSIAttachLimitKey(filterType))
}
}

View File

@ -814,6 +814,130 @@ func TestCompatibility_v1_Scheduler(t *testing.T) {
}},
},
},
// Do not change this JSON after the corresponding release has been tagged.
// A failure indicates backwards compatibility with the specified release was broken.
"1.12": {
JSON: `{
"kind": "Policy",
"apiVersion": "v1",
"predicates": [
{"name": "MatchNodeSelector"},
{"name": "PodFitsResources"},
{"name": "PodFitsHostPorts"},
{"name": "HostName"},
{"name": "NoDiskConflict"},
{"name": "NoVolumeZoneConflict"},
{"name": "PodToleratesNodeTaints"},
{"name": "CheckNodeMemoryPressure"},
{"name": "CheckNodeDiskPressure"},
{"name": "CheckNodePIDPressure"},
{"name": "CheckNodeCondition"},
{"name": "MaxEBSVolumeCount"},
{"name": "MaxGCEPDVolumeCount"},
{"name": "MaxAzureDiskVolumeCount"},
{"name": "MaxCSIVolumeCountPred"},
{"name": "MatchInterPodAffinity"},
{"name": "GeneralPredicates"},
{"name": "CheckVolumeBinding"},
{"name": "TestServiceAffinity", "argument": {"serviceAffinity" : {"labels" : ["region"]}}},
{"name": "TestLabelsPresence", "argument": {"labelsPresence" : {"labels" : ["foo"], "presence":true}}}
],"priorities": [
{"name": "EqualPriority", "weight": 2},
{"name": "ImageLocalityPriority", "weight": 2},
{"name": "LeastRequestedPriority", "weight": 2},
{"name": "BalancedResourceAllocation", "weight": 2},
{"name": "SelectorSpreadPriority", "weight": 2},
{"name": "NodePreferAvoidPodsPriority", "weight": 2},
{"name": "NodeAffinityPriority", "weight": 2},
{"name": "TaintTolerationPriority", "weight": 2},
{"name": "InterPodAffinityPriority", "weight": 2},
{"name": "MostRequestedPriority", "weight": 2},
{
"name": "RequestedToCapacityRatioPriority",
"weight": 2,
"argument": {
"requestedToCapacityRatioArguments": {
"shape": [
{"utilization": 0, "score": 0},
{"utilization": 50, "score": 7}
]
}
}}
],"extenders": [{
"urlPrefix": "/prefix",
"filterVerb": "filter",
"prioritizeVerb": "prioritize",
"weight": 1,
"bindVerb": "bind",
"enableHttps": true,
"tlsConfig": {"Insecure":true},
"httpTimeout": 1,
"nodeCacheCapable": true,
"managedResources": [{"name":"example.com/foo","ignoredByScheduler":true}],
"ignorable":true
}]
}`,
ExpectedPolicy: schedulerapi.Policy{
Predicates: []schedulerapi.PredicatePolicy{
{Name: "MatchNodeSelector"},
{Name: "PodFitsResources"},
{Name: "PodFitsHostPorts"},
{Name: "HostName"},
{Name: "NoDiskConflict"},
{Name: "NoVolumeZoneConflict"},
{Name: "PodToleratesNodeTaints"},
{Name: "CheckNodeMemoryPressure"},
{Name: "CheckNodeDiskPressure"},
{Name: "CheckNodePIDPressure"},
{Name: "CheckNodeCondition"},
{Name: "MaxEBSVolumeCount"},
{Name: "MaxGCEPDVolumeCount"},
{Name: "MaxAzureDiskVolumeCount"},
{Name: "MaxCSIVolumeCountPred"},
{Name: "MatchInterPodAffinity"},
{Name: "GeneralPredicates"},
{Name: "CheckVolumeBinding"},
{Name: "TestServiceAffinity", Argument: &schedulerapi.PredicateArgument{ServiceAffinity: &schedulerapi.ServiceAffinity{Labels: []string{"region"}}}},
{Name: "TestLabelsPresence", Argument: &schedulerapi.PredicateArgument{LabelsPresence: &schedulerapi.LabelsPresence{Labels: []string{"foo"}, Presence: true}}},
},
Priorities: []schedulerapi.PriorityPolicy{
{Name: "EqualPriority", Weight: 2},
{Name: "ImageLocalityPriority", Weight: 2},
{Name: "LeastRequestedPriority", Weight: 2},
{Name: "BalancedResourceAllocation", Weight: 2},
{Name: "SelectorSpreadPriority", Weight: 2},
{Name: "NodePreferAvoidPodsPriority", Weight: 2},
{Name: "NodeAffinityPriority", Weight: 2},
{Name: "TaintTolerationPriority", Weight: 2},
{Name: "InterPodAffinityPriority", Weight: 2},
{Name: "MostRequestedPriority", Weight: 2},
{
Name: "RequestedToCapacityRatioPriority",
Weight: 2,
Argument: &schedulerapi.PriorityArgument{
RequestedToCapacityRatioArguments: &schedulerapi.RequestedToCapacityRatioArguments{
UtilizationShape: []schedulerapi.UtilizationShapePoint{
{Utilization: 0, Score: 0},
{Utilization: 50, Score: 7},
}},
},
},
},
ExtenderConfigs: []schedulerapi.ExtenderConfig{{
URLPrefix: "/prefix",
FilterVerb: "filter",
PrioritizeVerb: "prioritize",
Weight: 1,
BindVerb: "bind", // 1.11 restored case-sensitivity, but allowed either "BindVerb" or "bindVerb"
EnableHTTPS: true,
TLSConfig: &restclient.TLSClientConfig{Insecure: true},
HTTPTimeout: 1,
NodeCacheCapable: true,
ManagedResources: []schedulerapi.ExtenderManagedResource{{Name: v1.ResourceName("example.com/foo"), IgnoredByScheduler: true}},
Ignorable: true,
}},
},
},
}
registeredPredicates := sets.NewString(factory.ListRegisteredFitPredicates()...)

View File

@ -17,7 +17,8 @@ limitations under the License.
package util
import (
"fmt"
"crypto/sha1"
"encoding/hex"
"testing"
"k8s.io/api/core/v1"
@ -32,9 +33,23 @@ func TestGetCSIAttachLimitKey(t *testing.T) {
}
// When driver is longer than 39 chars
csiLimitKeyLonger := GetCSIAttachLimitKey("com.amazon.kubernetes.eks.ec2.ebs/csi-driver")
fmt.Println(csiLimitKeyLonger)
longDriverName := "com.amazon.kubernetes.eks.ec2.ebs/csi-driver"
csiLimitKeyLonger := GetCSIAttachLimitKey(longDriverName)
if !v1helper.IsAttachableVolumeResourceName(v1.ResourceName(csiLimitKeyLonger)) {
t.Errorf("Expected %s to have attachable prefix", csiLimitKeyLonger)
}
expectedCSIKey := getDriverHash(longDriverName)
if csiLimitKeyLonger != expectedCSIKey {
t.Errorf("Expected limit to be %s got %s", expectedCSIKey, csiLimitKeyLonger)
}
}
func getDriverHash(driverName string) string {
charsFromDriverName := driverName[:23]
hash := sha1.New()
hash.Write([]byte(driverName))
hashed := hex.EncodeToString(hash.Sum(nil))
hashed = hashed[:16]
return CSIAttachLimitPrefix + charsFromDriverName + hashed
}