From 575f79e03e6083bf1a3c17af4fe175f9ce53bb0e Mon Sep 17 00:00:00 2001 From: Hemant Kumar Date: Fri, 28 Sep 2018 11:13:59 -0400 Subject: [PATCH 1/2] Enable volume limit feature by default Also add tests for it. --- pkg/features/kube_features.go | 2 +- pkg/volume/csi/nodeinfomanager/BUILD | 3 + .../csi/nodeinfomanager/nodeinfomanager.go | 7 +- .../nodeinfomanager/nodeinfomanager_test.go | 142 ++++++++++++++---- 4 files changed, 125 insertions(+), 29 deletions(-) diff --git a/pkg/features/kube_features.go b/pkg/features/kube_features.go index bb5cfd587e..c03e93c6c8 100644 --- a/pkg/features/kube_features.go +++ b/pkg/features/kube_features.go @@ -408,7 +408,7 @@ var defaultKubernetesFeatureGates = map[utilfeature.Feature]utilfeature.FeatureS QOSReserved: {Default: false, PreRelease: utilfeature.Alpha}, ExpandPersistentVolumes: {Default: true, PreRelease: utilfeature.Beta}, ExpandInUsePersistentVolumes: {Default: false, PreRelease: utilfeature.Alpha}, - AttachVolumeLimit: {Default: false, PreRelease: utilfeature.Beta}, + AttachVolumeLimit: {Default: true, PreRelease: utilfeature.Beta}, CPUManager: {Default: true, PreRelease: utilfeature.Beta}, CPUCFSQuotaPeriod: {Default: false, PreRelease: utilfeature.Alpha}, ServiceNodeExclusion: {Default: false, PreRelease: utilfeature.Alpha}, diff --git a/pkg/volume/csi/nodeinfomanager/BUILD b/pkg/volume/csi/nodeinfomanager/BUILD index 86ac0efcd2..588871e29e 100644 --- a/pkg/volume/csi/nodeinfomanager/BUILD +++ b/pkg/volume/csi/nodeinfomanager/BUILD @@ -43,10 +43,13 @@ go_test( embed = [":go_default_library"], deps = [ "//pkg/apis/core/helper:go_default_library", + "//pkg/apis/core/v1/helper:go_default_library", "//pkg/features:go_default_library", "//pkg/volume/testing:go_default_library", + "//pkg/volume/util:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/api/errors:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/types:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library", diff --git a/pkg/volume/csi/nodeinfomanager/nodeinfomanager.go b/pkg/volume/csi/nodeinfomanager/nodeinfomanager.go index 462914f968..bea909d6a5 100644 --- a/pkg/volume/csi/nodeinfomanager/nodeinfomanager.go +++ b/pkg/volume/csi/nodeinfomanager/nodeinfomanager.go @@ -89,11 +89,16 @@ func (nim *nodeInfoManager) AddNodeInfo(driverName string, driverNodeID string, nodeUpdateFuncs := []nodeUpdateFunc{ updateNodeIDInNode(driverName, driverNodeID), - updateMaxAttachLimit(driverName, maxAttachLimit), } + if utilfeature.DefaultFeatureGate.Enabled(features.CSINodeInfo) { nodeUpdateFuncs = append(nodeUpdateFuncs, updateTopologyLabels(topology)) } + + if utilfeature.DefaultFeatureGate.Enabled(features.AttachVolumeLimit) { + nodeUpdateFuncs = append(nodeUpdateFuncs, updateMaxAttachLimit(driverName, maxAttachLimit)) + } + err := nim.updateNode(nodeUpdateFuncs...) if err != nil { return fmt.Errorf("error updating Node object with CSI driver node info: %v", err) diff --git a/pkg/volume/csi/nodeinfomanager/nodeinfomanager_test.go b/pkg/volume/csi/nodeinfomanager/nodeinfomanager_test.go index 122463bdde..6d5c299b39 100644 --- a/pkg/volume/csi/nodeinfomanager/nodeinfomanager_test.go +++ b/pkg/volume/csi/nodeinfomanager/nodeinfomanager_test.go @@ -18,9 +18,12 @@ package nodeinfomanager import ( "encoding/json" + "testing" + "github.com/container-storage-interface/spec/lib/go/csi/v0" "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/sets" @@ -31,9 +34,10 @@ import ( csiv1alpha1 "k8s.io/csi-api/pkg/apis/csi/v1alpha1" csifake "k8s.io/csi-api/pkg/client/clientset/versioned/fake" "k8s.io/kubernetes/pkg/apis/core/helper" + v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper" "k8s.io/kubernetes/pkg/features" volumetest "k8s.io/kubernetes/pkg/volume/testing" - "testing" + "k8s.io/kubernetes/pkg/volume/util" ) type testcase struct { @@ -43,10 +47,12 @@ type testcase struct { existingNodeInfo *csiv1alpha1.CSINodeInfo inputNodeID string inputTopology *csi.Topology + inputVolumeLimit int64 expectedNodeIDMap map[string]string expectedTopologyMap map[string]sets.String expectedLabels map[string]string expectNoNodeInfo bool + expectedVolumeLimit int64 expectFail bool } @@ -61,7 +67,7 @@ func TestAddNodeInfo(t *testing.T) { { name: "empty node", driverName: "com.example.csi/driver1", - existingNode: generateNode(nil /* nodeIDs */, nil /* labels */), + existingNode: generateNode(nil /* nodeIDs */, nil /* labels */, nil /*capacity*/), inputNodeID: "com.example.csi/csi-node1", inputTopology: &csi.Topology{ Segments: map[string]string{ @@ -85,7 +91,8 @@ func TestAddNodeInfo(t *testing.T) { }, labelMap{ "com.example.csi/zone": "zoneA", - }), + }, + nil /*capacity*/), existingNodeInfo: generateNodeInfo( nodeIDMap{ "com.example.csi/driver1": "com.example.csi/csi-node1", @@ -117,7 +124,7 @@ func TestAddNodeInfo(t *testing.T) { nodeIDMap{ "com.example.csi/driver1": "com.example.csi/csi-node1", }, - nil /* labels */), + nil /* labels */, nil /*capacity*/), existingNodeInfo: generateNodeInfo( nodeIDMap{ "com.example.csi/driver1": "com.example.csi/csi-node1", @@ -149,7 +156,7 @@ func TestAddNodeInfo(t *testing.T) { }, labelMap{ "net.example.storage/rack": "rack1", - }), + }, nil /*capacity*/), existingNodeInfo: generateNodeInfo( nodeIDMap{ "net.example.storage/other-driver": "net.example.storage/test-node", @@ -186,7 +193,7 @@ func TestAddNodeInfo(t *testing.T) { }, labelMap{ "com.example.csi/zone": "zoneA", - }), + }, nil /*capacity*/), existingNodeInfo: generateNodeInfo( nodeIDMap{ "com.example.csi/driver1": "com.example.csi/csi-node1", @@ -212,7 +219,7 @@ func TestAddNodeInfo(t *testing.T) { }, labelMap{ "com.example.csi/zone": "zoneA", - }), + }, nil /*capacity*/), existingNodeInfo: generateNodeInfo( nodeIDMap{ "com.example.csi/driver1": "com.example.csi/csi-node1", @@ -241,7 +248,7 @@ func TestAddNodeInfo(t *testing.T) { { name: "nil topology, empty node", driverName: "com.example.csi/driver1", - existingNode: generateNode(nil /* nodeIDs */, nil /* labels */), + existingNode: generateNode(nil /* nodeIDs */, nil /* labels */, nil /*capacity*/), inputNodeID: "com.example.csi/csi-node1", inputTopology: nil, expectedNodeIDMap: map[string]string{ @@ -261,7 +268,7 @@ func TestAddNodeInfo(t *testing.T) { }, labelMap{ "com.example.csi/zone": "zoneA", - }), + }, nil /*capacity*/), existingNodeInfo: generateNodeInfo( nodeIDMap{ "com.example.csi/driver1": "com.example.csi/csi-node1", @@ -291,7 +298,7 @@ func TestAddNodeInfo(t *testing.T) { }, labelMap{ "net.example.storage/rack": "rack1", - }), + }, nil /*capacity*/), existingNodeInfo: generateNodeInfo( nodeIDMap{ "net.example.storage/other-driver": "net.example.storage/test-node", @@ -317,10 +324,48 @@ func TestAddNodeInfo(t *testing.T) { { name: "empty node ID", driverName: "com.example.csi/driver1", - existingNode: generateNode(nil /* nodeIDs */, nil /* labels */), + existingNode: generateNode(nil /* nodeIDs */, nil /* labels */, nil /*capacity*/), inputNodeID: "", expectFail: true, }, + { + name: "new node with valid max limit", + driverName: "com.example.csi/driver1", + existingNode: generateNode(nil /*nodeIDs*/, nil /*labels*/, nil /*capacity*/), + inputVolumeLimit: 10, + inputTopology: nil, + inputNodeID: "com.example.csi/csi-node1", + expectedVolumeLimit: 10, + expectedNodeIDMap: map[string]string{ + "com.example.csi/driver1": "com.example.csi/csi-node1", + }, + expectedTopologyMap: map[string]sets.String{ + "com.example.csi/driver1": nil, + }, + expectedLabels: nil, + }, + { + name: "node with existing valid max limit", + driverName: "com.example.csi/driver1", + existingNode: generateNode( + nil, /*nodeIDs*/ + nil, /*labels*/ + map[v1.ResourceName]resource.Quantity{ + v1.ResourceCPU: *resource.NewScaledQuantity(4, -3), + v1.ResourceName(util.GetCSIAttachLimitKey("com.example.csi/driver1")): *resource.NewQuantity(10, resource.DecimalSI), + }), + inputVolumeLimit: 20, + inputTopology: nil, + inputNodeID: "com.example.csi/csi-node1", + expectedVolumeLimit: 20, + expectedNodeIDMap: map[string]string{ + "com.example.csi/driver1": "com.example.csi/csi-node1", + }, + expectedTopologyMap: map[string]sets.String{ + "com.example.csi/driver1": nil, + }, + expectedLabels: nil, + }, } test(t, true /* addNodeInfo */, true /* csiNodeInfoEnabled */, testcases) @@ -333,7 +378,7 @@ func TestAddNodeInfo_CSINodeInfoDisabled(t *testing.T) { { name: "empty node", driverName: "com.example.csi/driver1", - existingNode: generateNode(nil /* nodeIDs */, nil /* labels */), + existingNode: generateNode(nil /* nodeIDs */, nil /* labels */, nil /*capacity*/), inputNodeID: "com.example.csi/csi-node1", expectedNodeIDMap: map[string]string{ "com.example.csi/driver1": "com.example.csi/csi-node1", @@ -346,7 +391,7 @@ func TestAddNodeInfo_CSINodeInfoDisabled(t *testing.T) { nodeIDMap{ "com.example.csi/driver1": "com.example.csi/csi-node1", }, - nil /* labels */), + nil /* labels */, nil /*capacity*/), inputNodeID: "com.example.csi/csi-node1", expectedNodeIDMap: map[string]string{ "com.example.csi/driver1": "com.example.csi/csi-node1", @@ -359,7 +404,7 @@ func TestAddNodeInfo_CSINodeInfoDisabled(t *testing.T) { nodeIDMap{ "net.example.storage/other-driver": "net.example.storage/test-node", }, - nil /* labels */), + nil /* labels */, nil /*capacity*/), inputNodeID: "com.example.csi/csi-node1", expectedNodeIDMap: map[string]string{ "com.example.csi/driver1": "com.example.csi/csi-node1", @@ -377,7 +422,7 @@ func TestRemoveNodeInfo(t *testing.T) { { name: "empty node and no CSINodeInfo", driverName: "com.example.csi/driver1", - existingNode: generateNode(nil /* nodeIDs */, nil /* labels */), + existingNode: generateNode(nil /* nodeIDs */, nil /* labels */, nil /*capacity*/), expectedNodeIDMap: nil, expectedLabels: nil, expectNoNodeInfo: true, @@ -391,7 +436,7 @@ func TestRemoveNodeInfo(t *testing.T) { }, labelMap{ "com.example.csi/zone": "zoneA", - }), + }, nil /*capacity*/), existingNodeInfo: generateNodeInfo( nodeIDMap{ "com.example.csi/driver1": "com.example.csi/csi-node1", @@ -413,7 +458,7 @@ func TestRemoveNodeInfo(t *testing.T) { }, labelMap{ "net.example.storage/zone": "zoneA", - }), + }, nil /*capacity*/), existingNodeInfo: generateNodeInfo( nodeIDMap{ "net.example.storage/other-driver": "net.example.storage/csi-node1", @@ -437,7 +482,7 @@ func TestRemoveNodeInfo(t *testing.T) { nodeIDMap{ "com.example.csi/driver1": "com.example.csi/csi-node1", }, - nil /* labels */), + nil /* labels */, nil /*capacity*/), expectedNodeIDMap: nil, expectedLabels: nil, expectNoNodeInfo: true, @@ -448,13 +493,29 @@ func TestRemoveNodeInfo(t *testing.T) { nodeIDMap{ "net.example.storage/other-driver": "net.example.storage/csi-node1", }, - nil /* labels */), + nil /* labels */, nil /*capacity*/), expectedNodeIDMap: map[string]string{ "net.example.storage/other-driver": "net.example.storage/csi-node1", }, expectedLabels: nil, expectNoNodeInfo: true, }, + { + name: "new node with valid max limit", + driverName: "com.example.csi/driver1", + existingNode: generateNode( + nil, /*nodeIDs*/ + nil, /*labels*/ + map[v1.ResourceName]resource.Quantity{ + v1.ResourceCPU: *resource.NewScaledQuantity(4, -3), + v1.ResourceName(util.GetCSIAttachLimitKey("com.example.csi/driver1")): *resource.NewQuantity(10, resource.DecimalSI), + }, + ), + inputTopology: nil, + inputNodeID: "com.example.csi/csi-node1", + expectNoNodeInfo: true, + expectedVolumeLimit: 0, + }, } test(t, false /* addNodeInfo */, true /* csiNodeInfoEnabled */, testcases) @@ -467,7 +528,7 @@ func TestRemoveNodeInfo_CSINodeInfoDisabled(t *testing.T) { { name: "empty node", driverName: "com.example.csi/driver1", - existingNode: generateNode(nil /* nodeIDs */, nil /* labels */), + existingNode: generateNode(nil /* nodeIDs */, nil /* labels */, nil /*capacity*/), expectedNodeIDMap: nil, }, { @@ -477,7 +538,7 @@ func TestRemoveNodeInfo_CSINodeInfoDisabled(t *testing.T) { nodeIDMap{ "com.example.csi/driver1": "com.example.csi/csi-node1", }, - nil /* labels */), + nil /* labels */, nil /*capacity*/), expectedNodeIDMap: nil, }, { @@ -487,7 +548,7 @@ func TestRemoveNodeInfo_CSINodeInfoDisabled(t *testing.T) { nodeIDMap{ "net.example.storage/other-driver": "net.example.storage/csi-node1", }, - nil /* labels */), + nil /* labels */, nil /*capacity*/), expectedNodeIDMap: map[string]string{ "net.example.storage/other-driver": "net.example.storage/csi-node1", }, @@ -513,7 +574,7 @@ func TestAddNodeInfoExistingAnnotation(t *testing.T) { nodeIDMap{ "com.example.csi/driver1": "com.example.csi/csi-node1", }, - nil /* labels */), + nil /* labels */, nil /*capacity*/), }, { name: "pre-existing info about a different driver in node, but no CSINodeInfo", @@ -521,7 +582,7 @@ func TestAddNodeInfoExistingAnnotation(t *testing.T) { nodeIDMap{ "net.example.storage/other-driver": "net.example.storage/test-node", }, - nil /* labels */), + nil /* labels */, nil /*capacity*/), }, } @@ -575,6 +636,7 @@ func TestAddNodeInfoExistingAnnotation(t *testing.T) { func test(t *testing.T, addNodeInfo bool, csiNodeInfoEnabled bool, testcases []testcase) { defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.CSINodeInfo, csiNodeInfoEnabled)() + defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.AttachVolumeLimit, true)() for _, tc := range testcases { t.Logf("test case: %q", tc.name) @@ -604,7 +666,7 @@ func test(t *testing.T, addNodeInfo bool, csiNodeInfoEnabled bool, testcases []t //// Act if addNodeInfo { - err = nim.AddNodeInfo(tc.driverName, tc.inputNodeID, 0 /* maxVolumeLimit */, tc.inputTopology) // TODO test maxVolumeLimit + err = nim.AddNodeInfo(tc.driverName, tc.inputNodeID, tc.inputVolumeLimit, tc.inputTopology) } else { err = nim.RemoveNodeInfo(tc.driverName) } @@ -627,6 +689,13 @@ func test(t *testing.T, addNodeInfo bool, csiNodeInfoEnabled bool, testcases []t continue } + // We are testing max volume limits + attachLimit := getVolumeLimit(node, tc.driverName) + if attachLimit != tc.expectedVolumeLimit { + t.Errorf("expected volume limit to be %d got %d", tc.expectedVolumeLimit, attachLimit) + continue + } + // Node ID annotation annNodeID, ok := node.Annotations[annotationKeyNodeID] if ok { @@ -685,19 +754,38 @@ func test(t *testing.T, addNodeInfo bool, csiNodeInfoEnabled bool, testcases []t } } -func generateNode(nodeIDs, labels map[string]string) *v1.Node { +func getVolumeLimit(node *v1.Node, driverName string) int64 { + volumeLimits := map[v1.ResourceName]int64{} + nodeAllocatables := node.Status.Allocatable + for k, v := range nodeAllocatables { + if v1helper.IsAttachableVolumeResourceName(k) { + volumeLimits[k] = v.Value() + } + } + attachKey := v1.ResourceName(util.GetCSIAttachLimitKey(driverName)) + attachLimit := volumeLimits[attachKey] + return attachLimit +} + +func generateNode(nodeIDs, labels map[string]string, capacity map[v1.ResourceName]resource.Quantity) *v1.Node { var annotations map[string]string if len(nodeIDs) > 0 { b, _ := json.Marshal(nodeIDs) annotations = map[string]string{annotationKeyNodeID: string(b)} } - return &v1.Node{ + node := &v1.Node{ ObjectMeta: metav1.ObjectMeta{ Name: "node1", Annotations: annotations, Labels: labels, }, } + + if len(capacity) > 0 { + node.Status.Capacity = v1.ResourceList(capacity) + node.Status.Allocatable = v1.ResourceList(capacity) + } + return node } func generateNodeInfo(nodeIDs map[string]string, topologyKeys map[string][]string) *csiv1alpha1.CSINodeInfo { From e38cfb61be5ebed71407149aae468b769a3ade4e Mon Sep 17 00:00:00 2001 From: Hemant Kumar Date: Mon, 1 Oct 2018 14:13:14 -0400 Subject: [PATCH 2/2] Add a e2e test to check node limits --- test/e2e/storage/BUILD | 2 + test/e2e/storage/volume_limits.go | 63 +++++++++++++++++++++++++++++++ 2 files changed, 65 insertions(+) create mode 100644 test/e2e/storage/volume_limits.go diff --git a/test/e2e/storage/BUILD b/test/e2e/storage/BUILD index 395a2a7ac5..2e45fee4ed 100644 --- a/test/e2e/storage/BUILD +++ b/test/e2e/storage/BUILD @@ -21,6 +21,7 @@ go_library( "regional_pd.go", "subpath.go", "volume_expand.go", + "volume_limits.go", "volume_metrics.go", "volume_provisioning.go", "volumes.go", @@ -29,6 +30,7 @@ go_library( visibility = ["//visibility:public"], deps = [ "//pkg/api/v1/pod:go_default_library", + "//pkg/apis/core/v1/helper:go_default_library", "//pkg/apis/storage/v1/util:go_default_library", "//pkg/client/conditions:go_default_library", "//pkg/kubelet/apis:go_default_library", diff --git a/test/e2e/storage/volume_limits.go b/test/e2e/storage/volume_limits.go new file mode 100644 index 0000000000..9c3a179449 --- /dev/null +++ b/test/e2e/storage/volume_limits.go @@ -0,0 +1,63 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package storage + +import ( + . "github.com/onsi/ginkgo" + "k8s.io/api/core/v1" + clientset "k8s.io/client-go/kubernetes" + v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper" + "k8s.io/kubernetes/test/e2e/framework" + "k8s.io/kubernetes/test/e2e/storage/utils" +) + +var _ = utils.SIGDescribe("Volume limits", func() { + var ( + c clientset.Interface + ) + f := framework.NewDefaultFramework("volume-limits-on-node") + BeforeEach(func() { + framework.SkipUnlessProviderIs("aws", "gce", "gke") + c = f.ClientSet + framework.ExpectNoError(framework.WaitForAllNodesSchedulable(c, framework.TestContext.NodeSchedulableTimeout)) + }) + + It("should verify that all nodes have volume limits", func() { + nodeList := framework.GetReadySchedulableNodesOrDie(f.ClientSet) + if len(nodeList.Items) == 0 { + framework.Failf("Unable to find ready and schedulable Node") + } + for _, node := range nodeList.Items { + volumeLimits := getVolumeLimit(&node) + if len(volumeLimits) == 0 { + framework.Failf("Expected volume limits to be set") + } + } + + }) +}) + +func getVolumeLimit(node *v1.Node) map[v1.ResourceName]int64 { + volumeLimits := map[v1.ResourceName]int64{} + nodeAllocatables := node.Status.Allocatable + for k, v := range nodeAllocatables { + if v1helper.IsAttachableVolumeResourceName(k) { + volumeLimits[k] = v.Value() + } + } + return volumeLimits +}