Replace hard-code "cpu" and "memory" to consts

pull/6/head
xiangpengzhao 2017-08-15 15:56:18 +08:00
parent ba110e9f08
commit 1c4dbcf5ca
20 changed files with 116 additions and 119 deletions

View File

@ -30,9 +30,8 @@ func TestResourceHelpers(t *testing.T) {
memoryLimit := resource.MustParse("10G")
resourceSpec := api.ResourceRequirements{
Limits: api.ResourceList{
"cpu": cpuLimit,
"memory": memoryLimit,
"kube.io/storage": memoryLimit,
api.ResourceCPU: cpuLimit,
api.ResourceMemory: memoryLimit,
},
}
if res := resourceSpec.Limits.Cpu(); res.Cmp(cpuLimit) != 0 {
@ -43,8 +42,7 @@ func TestResourceHelpers(t *testing.T) {
}
resourceSpec = api.ResourceRequirements{
Limits: api.ResourceList{
"memory": memoryLimit,
"kube.io/storage": memoryLimit,
api.ResourceMemory: memoryLimit,
},
}
if res := resourceSpec.Limits.Cpu(); res.Value() != 0 {

View File

@ -30,9 +30,8 @@ func TestResourceHelpers(t *testing.T) {
memoryLimit := resource.MustParse("10G")
resourceSpec := v1.ResourceRequirements{
Limits: v1.ResourceList{
"cpu": cpuLimit,
"memory": memoryLimit,
"kube.io/storage": memoryLimit,
v1.ResourceCPU: cpuLimit,
v1.ResourceMemory: memoryLimit,
},
}
if res := resourceSpec.Limits.Cpu(); res.Cmp(cpuLimit) != 0 {
@ -43,8 +42,7 @@ func TestResourceHelpers(t *testing.T) {
}
resourceSpec = v1.ResourceRequirements{
Limits: v1.ResourceList{
"memory": memoryLimit,
"kube.io/storage": memoryLimit,
v1.ResourceMemory: memoryLimit,
},
}
if res := resourceSpec.Limits.Cpu(); res.Value() != 0 {

View File

@ -563,7 +563,7 @@ func TestHandleMemExceeded(t *testing.T) {
spec := v1.PodSpec{NodeName: string(kl.nodeName),
Containers: []v1.Container{{Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{
"memory": resource.MustParse("90"),
v1.ResourceMemory: resource.MustParse("90"),
},
}}},
}

View File

@ -338,47 +338,47 @@ func getTestPods() map[string]*v1.Pod {
allPods := map[string]*v1.Pod{
tinyBurstable: getPodWithResources(tinyBurstable, v1.ResourceRequirements{
Requests: v1.ResourceList{
"cpu": resource.MustParse("1m"),
"memory": resource.MustParse("1Mi"),
v1.ResourceCPU: resource.MustParse("1m"),
v1.ResourceMemory: resource.MustParse("1Mi"),
},
}),
bestEffort: getPodWithResources(bestEffort, v1.ResourceRequirements{}),
critical: getPodWithResources(critical, v1.ResourceRequirements{
Requests: v1.ResourceList{
"cpu": resource.MustParse("100m"),
"memory": resource.MustParse("100Mi"),
v1.ResourceCPU: resource.MustParse("100m"),
v1.ResourceMemory: resource.MustParse("100Mi"),
},
}),
burstable: getPodWithResources(burstable, v1.ResourceRequirements{
Requests: v1.ResourceList{
"cpu": resource.MustParse("100m"),
"memory": resource.MustParse("100Mi"),
v1.ResourceCPU: resource.MustParse("100m"),
v1.ResourceMemory: resource.MustParse("100Mi"),
},
}),
guaranteed: getPodWithResources(guaranteed, v1.ResourceRequirements{
Requests: v1.ResourceList{
"cpu": resource.MustParse("100m"),
"memory": resource.MustParse("100Mi"),
v1.ResourceCPU: resource.MustParse("100m"),
v1.ResourceMemory: resource.MustParse("100Mi"),
},
Limits: v1.ResourceList{
"cpu": resource.MustParse("100m"),
"memory": resource.MustParse("100Mi"),
v1.ResourceCPU: resource.MustParse("100m"),
v1.ResourceMemory: resource.MustParse("100Mi"),
},
}),
highRequestBurstable: getPodWithResources(highRequestBurstable, v1.ResourceRequirements{
Requests: v1.ResourceList{
"cpu": resource.MustParse("300m"),
"memory": resource.MustParse("300Mi"),
v1.ResourceCPU: resource.MustParse("300m"),
v1.ResourceMemory: resource.MustParse("300Mi"),
},
}),
highRequestGuaranteed: getPodWithResources(highRequestGuaranteed, v1.ResourceRequirements{
Requests: v1.ResourceList{
"cpu": resource.MustParse("300m"),
"memory": resource.MustParse("300Mi"),
v1.ResourceCPU: resource.MustParse("300m"),
v1.ResourceMemory: resource.MustParse("300Mi"),
},
Limits: v1.ResourceList{
"cpu": resource.MustParse("300m"),
"memory": resource.MustParse("300Mi"),
v1.ResourceCPU: resource.MustParse("300m"),
v1.ResourceMemory: resource.MustParse("300Mi"),
},
}),
}

View File

@ -1073,8 +1073,8 @@ func TestSetApp(t *testing.T) {
Command: []string{"/bin/bar", "$(env-bar)"},
WorkingDir: tmpDir,
Resources: v1.ResourceRequirements{
Limits: v1.ResourceList{"cpu": resource.MustParse("50m"), "memory": resource.MustParse("50M")},
Requests: v1.ResourceList{"cpu": resource.MustParse("5m"), "memory": resource.MustParse("5M")},
Limits: v1.ResourceList{v1.ResourceCPU: resource.MustParse("50m"), v1.ResourceMemory: resource.MustParse("50M")},
Requests: v1.ResourceList{v1.ResourceCPU: resource.MustParse("5m"), v1.ResourceMemory: resource.MustParse("5M")},
},
},
mountPoints: []appctypes.MountPoint{
@ -1137,8 +1137,8 @@ func TestSetApp(t *testing.T) {
Args: []string{"hello", "world", "$(env-bar)"},
WorkingDir: tmpDir,
Resources: v1.ResourceRequirements{
Limits: v1.ResourceList{"cpu": resource.MustParse("50m")},
Requests: v1.ResourceList{"memory": resource.MustParse("5M")},
Limits: v1.ResourceList{v1.ResourceCPU: resource.MustParse("50m")},
Requests: v1.ResourceList{v1.ResourceMemory: resource.MustParse("5M")},
},
},
mountPoints: []appctypes.MountPoint{

View File

@ -51,16 +51,16 @@ func TestBalancedResourceAllocation(t *testing.T) {
{
Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{
"cpu": resource.MustParse("1000m"),
"memory": resource.MustParse("0"),
v1.ResourceCPU: resource.MustParse("1000m"),
v1.ResourceMemory: resource.MustParse("0"),
},
},
},
{
Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{
"cpu": resource.MustParse("2000m"),
"memory": resource.MustParse("0"),
v1.ResourceCPU: resource.MustParse("2000m"),
v1.ResourceMemory: resource.MustParse("0"),
},
},
},
@ -74,16 +74,16 @@ func TestBalancedResourceAllocation(t *testing.T) {
{
Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{
"cpu": resource.MustParse("1000m"),
"memory": resource.MustParse("2000"),
v1.ResourceCPU: resource.MustParse("1000m"),
v1.ResourceMemory: resource.MustParse("2000"),
},
},
},
{
Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{
"cpu": resource.MustParse("2000m"),
"memory": resource.MustParse("3000"),
v1.ResourceCPU: resource.MustParse("2000m"),
v1.ResourceMemory: resource.MustParse("3000"),
},
},
},

View File

@ -51,16 +51,16 @@ func TestLeastRequested(t *testing.T) {
{
Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{
"cpu": resource.MustParse("1000m"),
"memory": resource.MustParse("0"),
v1.ResourceCPU: resource.MustParse("1000m"),
v1.ResourceMemory: resource.MustParse("0"),
},
},
},
{
Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{
"cpu": resource.MustParse("2000m"),
"memory": resource.MustParse("0"),
v1.ResourceCPU: resource.MustParse("2000m"),
v1.ResourceMemory: resource.MustParse("0"),
},
},
},
@ -74,16 +74,16 @@ func TestLeastRequested(t *testing.T) {
{
Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{
"cpu": resource.MustParse("1000m"),
"memory": resource.MustParse("2000"),
v1.ResourceCPU: resource.MustParse("1000m"),
v1.ResourceMemory: resource.MustParse("2000"),
},
},
},
{
Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{
"cpu": resource.MustParse("2000m"),
"memory": resource.MustParse("3000"),
v1.ResourceCPU: resource.MustParse("2000m"),
v1.ResourceMemory: resource.MustParse("3000"),
},
},
},

View File

@ -85,8 +85,8 @@ func TestPriorityMetadata(t *testing.T) {
ImagePullPolicy: "Always",
Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{
"cpu": resource.MustParse("200m"),
"memory": resource.MustParse("2000"),
v1.ResourceCPU: resource.MustParse("200m"),
v1.ResourceMemory: resource.MustParse("2000"),
},
},
},

View File

@ -45,16 +45,16 @@ func TestMostRequested(t *testing.T) {
{
Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{
"cpu": resource.MustParse("1000m"),
"memory": resource.MustParse("0"),
v1.ResourceCPU: resource.MustParse("1000m"),
v1.ResourceMemory: resource.MustParse("0"),
},
},
},
{
Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{
"cpu": resource.MustParse("2000m"),
"memory": resource.MustParse("0"),
v1.ResourceCPU: resource.MustParse("2000m"),
v1.ResourceMemory: resource.MustParse("0"),
},
},
},
@ -68,16 +68,16 @@ func TestMostRequested(t *testing.T) {
{
Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{
"cpu": resource.MustParse("1000m"),
"memory": resource.MustParse("2000"),
v1.ResourceCPU: resource.MustParse("1000m"),
v1.ResourceMemory: resource.MustParse("2000"),
},
},
},
{
Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{
"cpu": resource.MustParse("2000m"),
"memory": resource.MustParse("3000"),
v1.ResourceCPU: resource.MustParse("2000m"),
v1.ResourceMemory: resource.MustParse("3000"),
},
},
},
@ -89,16 +89,16 @@ func TestMostRequested(t *testing.T) {
{
Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{
"cpu": resource.MustParse("2000m"),
"memory": resource.MustParse("4000"),
v1.ResourceCPU: resource.MustParse("2000m"),
v1.ResourceMemory: resource.MustParse("4000"),
},
},
},
{
Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{
"cpu": resource.MustParse("3000m"),
"memory": resource.MustParse("5000"),
v1.ResourceCPU: resource.MustParse("3000m"),
v1.ResourceMemory: resource.MustParse("5000"),
},
},
},

View File

@ -30,12 +30,12 @@ func makeNode(node string, milliCPU, memory int64) *v1.Node {
ObjectMeta: metav1.ObjectMeta{Name: node},
Status: v1.NodeStatus{
Capacity: v1.ResourceList{
"cpu": *resource.NewMilliQuantity(milliCPU, resource.DecimalSI),
"memory": *resource.NewQuantity(memory, resource.BinarySI),
v1.ResourceCPU: *resource.NewMilliQuantity(milliCPU, resource.DecimalSI),
v1.ResourceMemory: *resource.NewQuantity(memory, resource.BinarySI),
},
Allocatable: v1.ResourceList{
"cpu": *resource.NewMilliQuantity(milliCPU, resource.DecimalSI),
"memory": *resource.NewQuantity(memory, resource.BinarySI),
v1.ResourceCPU: *resource.NewMilliQuantity(milliCPU, resource.DecimalSI),
v1.ResourceMemory: *resource.NewQuantity(memory, resource.BinarySI),
},
},
}

View File

@ -390,12 +390,12 @@ func makeNode(node string, milliCPU, memory int64) *v1.Node {
ObjectMeta: metav1.ObjectMeta{Name: node},
Status: v1.NodeStatus{
Capacity: v1.ResourceList{
"cpu": *resource.NewMilliQuantity(milliCPU, resource.DecimalSI),
"memory": *resource.NewQuantity(memory, resource.BinarySI),
v1.ResourceCPU: *resource.NewMilliQuantity(milliCPU, resource.DecimalSI),
v1.ResourceMemory: *resource.NewQuantity(memory, resource.BinarySI),
},
Allocatable: v1.ResourceList{
"cpu": *resource.NewMilliQuantity(milliCPU, resource.DecimalSI),
"memory": *resource.NewQuantity(memory, resource.BinarySI),
v1.ResourceCPU: *resource.NewMilliQuantity(milliCPU, resource.DecimalSI),
v1.ResourceMemory: *resource.NewQuantity(memory, resource.BinarySI),
},
},
}
@ -438,9 +438,9 @@ func TestZeroRequest(t *testing.T) {
{
Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{
"cpu": resource.MustParse(
v1.ResourceCPU: resource.MustParse(
strconv.FormatInt(priorityutil.DefaultMilliCpuRequest, 10) + "m"),
"memory": resource.MustParse(
v1.ResourceMemory: resource.MustParse(
strconv.FormatInt(priorityutil.DefaultMemoryRequest, 10)),
},
},
@ -455,9 +455,9 @@ func TestZeroRequest(t *testing.T) {
{
Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{
"cpu": resource.MustParse(
v1.ResourceCPU: resource.MustParse(
strconv.FormatInt(priorityutil.DefaultMilliCpuRequest*3, 10) + "m"),
"memory": resource.MustParse(
v1.ResourceMemory: resource.MustParse(
strconv.FormatInt(priorityutil.DefaultMemoryRequest*3, 10)),
},
},

View File

@ -215,12 +215,12 @@ var _ = framework.KubeDescribe("Pods Extended", func() {
Image: "gcr.io/google_containers/nginx-slim:0.7",
Resources: v1.ResourceRequirements{
Limits: v1.ResourceList{
"cpu": resource.MustParse("100m"),
"memory": resource.MustParse("100Mi"),
v1.ResourceCPU: resource.MustParse("100m"),
v1.ResourceMemory: resource.MustParse("100Mi"),
},
Requests: v1.ResourceList{
"cpu": resource.MustParse("100m"),
"memory": resource.MustParse("100Mi"),
v1.ResourceCPU: resource.MustParse("100m"),
v1.ResourceMemory: resource.MustParse("100Mi"),
},
},
},

View File

@ -157,7 +157,7 @@ var _ = SIGDescribe("SchedulerPredicates [Serial]", func() {
nodeToAllocatableMap := make(map[string]int64)
for _, node := range nodeList.Items {
allocatable, found := node.Status.Allocatable["cpu"]
allocatable, found := node.Status.Allocatable[v1.ResourceCPU]
Expect(found).To(Equal(true))
nodeToAllocatableMap[node.Name] = allocatable.MilliValue()
if nodeMaxAllocatable < allocatable.MilliValue() {
@ -201,10 +201,10 @@ var _ = SIGDescribe("SchedulerPredicates [Serial]", func() {
Labels: map[string]string{"name": ""},
Resources: &v1.ResourceRequirements{
Limits: v1.ResourceList{
"cpu": *resource.NewMilliQuantity(milliCpuPerPod, "DecimalSI"),
v1.ResourceCPU: *resource.NewMilliQuantity(milliCpuPerPod, "DecimalSI"),
},
Requests: v1.ResourceList{
"cpu": *resource.NewMilliQuantity(milliCpuPerPod, "DecimalSI"),
v1.ResourceCPU: *resource.NewMilliQuantity(milliCpuPerPod, "DecimalSI"),
},
},
}), true, framework.Logf))
@ -215,7 +215,7 @@ var _ = SIGDescribe("SchedulerPredicates [Serial]", func() {
Labels: map[string]string{"name": "additional"},
Resources: &v1.ResourceRequirements{
Limits: v1.ResourceList{
"cpu": *resource.NewMilliQuantity(milliCpuPerPod, "DecimalSI"),
v1.ResourceCPU: *resource.NewMilliQuantity(milliCpuPerPod, "DecimalSI"),
},
},
}

View File

@ -278,20 +278,20 @@ func createBalancedPodForNodes(f *framework.Framework, cs clientset.Interface, n
// we need the max one to keep the same cpu/mem use rate
ratio = math.Max(maxCPUFraction, maxMemFraction)
for _, node := range nodes {
memAllocatable, found := node.Status.Allocatable["memory"]
memAllocatable, found := node.Status.Allocatable[v1.ResourceMemory]
Expect(found).To(Equal(true))
memAllocatableVal := memAllocatable.Value()
cpuAllocatable, found := node.Status.Allocatable["cpu"]
cpuAllocatable, found := node.Status.Allocatable[v1.ResourceCPU]
Expect(found).To(Equal(true))
cpuAllocatableMil := cpuAllocatable.MilliValue()
needCreateResource := v1.ResourceList{}
cpuFraction := cpuFractionMap[node.Name]
memFraction := memFractionMap[node.Name]
needCreateResource["cpu"] = *resource.NewMilliQuantity(int64((ratio-cpuFraction)*float64(cpuAllocatableMil)), resource.DecimalSI)
needCreateResource[v1.ResourceCPU] = *resource.NewMilliQuantity(int64((ratio-cpuFraction)*float64(cpuAllocatableMil)), resource.DecimalSI)
needCreateResource["memory"] = *resource.NewQuantity(int64((ratio-memFraction)*float64(memAllocatableVal)), resource.BinarySI)
needCreateResource[v1.ResourceMemory] = *resource.NewQuantity(int64((ratio-memFraction)*float64(memAllocatableVal)), resource.BinarySI)
err := testutils.StartPods(cs, 1, ns, string(uuid.NewUUID()),
*initPausePod(f, pausePodConfig{
@ -332,12 +332,12 @@ func computeCpuMemFraction(cs clientset.Interface, node v1.Node, resource *v1.Re
totalRequestedMemResource += getNonZeroRequests(&pod).Memory
}
}
cpuAllocatable, found := node.Status.Allocatable["cpu"]
cpuAllocatable, found := node.Status.Allocatable[v1.ResourceCPU]
Expect(found).To(Equal(true))
cpuAllocatableMil := cpuAllocatable.MilliValue()
cpuFraction := float64(totalRequestedCpuResource) / float64(cpuAllocatableMil)
memAllocatable, found := node.Status.Allocatable["memory"]
memAllocatable, found := node.Status.Allocatable[v1.ResourceMemory]
Expect(found).To(Equal(true))
memAllocatableVal := memAllocatable.Value()
memFraction := float64(totalRequestedMemResource) / float64(memAllocatableVal)

View File

@ -57,7 +57,7 @@ var _ = framework.KubeDescribe("MemoryAllocatableEviction [Slow] [Serial] [Disru
// The default hard eviction threshold is 250Mb, so Allocatable = Capacity - Reserved - 250Mb
// We want Allocatable = 50Mb, so set Reserved = Capacity - Allocatable - 250Mb = Capacity - 300Mb
kubeReserved.Sub(resource.MustParse("300Mi"))
initialConfig.KubeReserved = kubeletconfig.ConfigurationMap(map[string]string{"memory": kubeReserved.String()})
initialConfig.KubeReserved = kubeletconfig.ConfigurationMap(map[string]string{string(v1.ResourceMemory): kubeReserved.String()})
initialConfig.EnforceNodeAllocatable = []string{cm.NodeAllocatableEnforcementKey}
initialConfig.ExperimentalNodeAllocatableIgnoreEvictionThreshold = false
initialConfig.CgroupsPerQOS = true

View File

@ -26,6 +26,7 @@ import (
"strconv"
"time"
"k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/kubernetes/test/e2e/framework"
"k8s.io/kubernetes/test/e2e/perftype"
@ -156,12 +157,12 @@ func getTestNodeInfo(f *framework.Framework, testName, testDesc string) map[stri
node, err := f.ClientSet.Core().Nodes().Get(nodeName, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred())
cpu, ok := node.Status.Capacity["cpu"]
cpu, ok := node.Status.Capacity[v1.ResourceCPU]
if !ok {
framework.Failf("Fail to fetch CPU capacity value of test node.")
}
memory, ok := node.Status.Capacity["memory"]
memory, ok := node.Status.Capacity[v1.ResourceMemory]
if !ok {
framework.Failf("Fail to fetch Memory capacity value of test node.")
}

View File

@ -172,8 +172,8 @@ var _ = framework.KubeDescribe("Container Manager Misc [Serial]", func() {
Name: podName,
Resources: v1.ResourceRequirements{
Limits: v1.ResourceList{
"cpu": resource.MustParse("100m"),
"memory": resource.MustParse("50Mi"),
v1.ResourceCPU: resource.MustParse("100m"),
v1.ResourceMemory: resource.MustParse("50Mi"),
},
},
},
@ -213,8 +213,8 @@ var _ = framework.KubeDescribe("Container Manager Misc [Serial]", func() {
Name: podName,
Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{
"cpu": resource.MustParse("100m"),
"memory": resource.MustParse("50Mi"),
v1.ResourceCPU: resource.MustParse("100m"),
v1.ResourceMemory: resource.MustParse("50Mi"),
},
},
},

View File

@ -56,18 +56,18 @@ var _ = framework.KubeDescribe("CriticalPod [Serial] [Disruptive]", func() {
// Define test pods
nonCriticalGuaranteed := getTestPod(false, guaranteedPodName, v1.ResourceRequirements{
Requests: v1.ResourceList{
"cpu": resource.MustParse("100m"),
"memory": resource.MustParse("100Mi"),
v1.ResourceCPU: resource.MustParse("100m"),
v1.ResourceMemory: resource.MustParse("100Mi"),
},
Limits: v1.ResourceList{
"cpu": resource.MustParse("100m"),
"memory": resource.MustParse("100Mi"),
v1.ResourceCPU: resource.MustParse("100m"),
v1.ResourceMemory: resource.MustParse("100Mi"),
},
})
nonCriticalBurstable := getTestPod(false, burstablePodName, v1.ResourceRequirements{
Requests: v1.ResourceList{
"cpu": resource.MustParse("100m"),
"memory": resource.MustParse("100Mi"),
v1.ResourceCPU: resource.MustParse("100m"),
v1.ResourceMemory: resource.MustParse("100Mi"),
},
})
nonCriticalBestEffort := getTestPod(false, bestEffortPodName, v1.ResourceRequirements{})

View File

@ -139,12 +139,12 @@ var _ = framework.KubeDescribe("MemoryEviction [Slow] [Serial] [Disruptive]", fu
// A pod is guaranteed only when requests and limits are specified for all the containers and they are equal.
guaranteed := getMemhogPod("guaranteed-pod", "guaranteed", v1.ResourceRequirements{
Requests: v1.ResourceList{
"cpu": resource.MustParse("100m"),
"memory": resource.MustParse("100Mi"),
v1.ResourceCPU: resource.MustParse("100m"),
v1.ResourceMemory: resource.MustParse("100Mi"),
},
Limits: v1.ResourceList{
"cpu": resource.MustParse("100m"),
"memory": resource.MustParse("100Mi"),
v1.ResourceCPU: resource.MustParse("100m"),
v1.ResourceMemory: resource.MustParse("100Mi"),
}})
guaranteed = f.PodClient().CreateSync(guaranteed)
glog.Infof("pod created with name: %s", guaranteed.Name)
@ -152,8 +152,8 @@ var _ = framework.KubeDescribe("MemoryEviction [Slow] [Serial] [Disruptive]", fu
// A pod is burstable if limits and requests do not match across all containers.
burstable := getMemhogPod("burstable-pod", "burstable", v1.ResourceRequirements{
Requests: v1.ResourceList{
"cpu": resource.MustParse("100m"),
"memory": resource.MustParse("100Mi"),
v1.ResourceCPU: resource.MustParse("100m"),
v1.ResourceMemory: resource.MustParse("100Mi"),
}})
burstable = f.PodClient().CreateSync(burstable)
glog.Infof("pod created with name: %s", burstable.Name)
@ -256,7 +256,7 @@ func getMemhogPod(podName string, ctnName string, res v1.ResourceRequirements) *
// This helps prevent a guaranteed pod from triggering an OOM kill due to it's low memory limit,
// which will cause the test to fail inappropriately.
var memLimit string
if limit, ok := res.Limits["memory"]; ok {
if limit, ok := res.Limits[v1.ResourceMemory]; ok {
memLimit = strconv.Itoa(int(
float64(limit.Value()) * 0.8))
} else {

View File

@ -40,12 +40,12 @@ import (
func setDesiredConfiguration(initialConfig *kubeletconfig.KubeletConfiguration) {
initialConfig.EnforceNodeAllocatable = []string{"pods", "kube-reserved", "system-reserved"}
initialConfig.SystemReserved = kubeletconfig.ConfigurationMap{
"cpu": "100m",
"memory": "100Mi",
string(v1.ResourceCPU): "100m",
string(v1.ResourceMemory): "100Mi",
}
initialConfig.KubeReserved = kubeletconfig.ConfigurationMap{
"cpu": "100m",
"memory": "100Mi",
string(v1.ResourceCPU): "100m",
string(v1.ResourceMemory): "100Mi",
}
initialConfig.EvictionHard = "memory.available<100Mi"
// Necessary for allocatable cgroup creation.
@ -210,23 +210,23 @@ func runTest(f *framework.Framework) error {
return fmt.Errorf("Expected all resources in capacity to be found in allocatable")
}
// CPU based evictions are not supported.
if allocatableCPU.Cmp(schedulerAllocatable["cpu"]) != 0 {
return fmt.Errorf("Unexpected cpu allocatable value exposed by the node. Expected: %v, got: %v, capacity: %v", allocatableCPU, schedulerAllocatable["cpu"], capacity["cpu"])
if allocatableCPU.Cmp(schedulerAllocatable[v1.ResourceCPU]) != 0 {
return fmt.Errorf("Unexpected cpu allocatable value exposed by the node. Expected: %v, got: %v, capacity: %v", allocatableCPU, schedulerAllocatable[v1.ResourceCPU], capacity[v1.ResourceCPU])
}
if allocatableMemory.Cmp(schedulerAllocatable["memory"]) != 0 {
return fmt.Errorf("Unexpected cpu allocatable value exposed by the node. Expected: %v, got: %v, capacity: %v", allocatableCPU, schedulerAllocatable["cpu"], capacity["memory"])
if allocatableMemory.Cmp(schedulerAllocatable[v1.ResourceMemory]) != 0 {
return fmt.Errorf("Unexpected cpu allocatable value exposed by the node. Expected: %v, got: %v, capacity: %v", allocatableCPU, schedulerAllocatable[v1.ResourceCPU], capacity[v1.ResourceMemory])
}
if !cgroupManager.Exists(cm.CgroupName(kubeReservedCgroup)) {
return fmt.Errorf("Expected kube reserved cgroup Does not exist")
}
// Expect CPU shares on kube reserved cgroup to equal it's reservation which is `100m`.
kubeReservedCPU := resource.MustParse(currentConfig.KubeReserved["cpu"])
kubeReservedCPU := resource.MustParse(currentConfig.KubeReserved[string(v1.ResourceCPU)])
if err := expectFileValToEqual(filepath.Join(subsystems.MountPoints["cpu"], kubeReservedCgroup, "cpu.shares"), cm.MilliCPUToShares(kubeReservedCPU.MilliValue()), 10); err != nil {
return err
}
// Expect Memory limit kube reserved cgroup to equal configured value `100Mi`.
kubeReservedMemory := resource.MustParse(currentConfig.KubeReserved["memory"])
kubeReservedMemory := resource.MustParse(currentConfig.KubeReserved[string(v1.ResourceMemory)])
if err := expectFileValToEqual(filepath.Join(subsystems.MountPoints["memory"], kubeReservedCgroup, "memory.limit_in_bytes"), kubeReservedMemory.Value(), 0); err != nil {
return err
}
@ -234,12 +234,12 @@ func runTest(f *framework.Framework) error {
return fmt.Errorf("Expected system reserved cgroup Does not exist")
}
// Expect CPU shares on system reserved cgroup to equal it's reservation which is `100m`.
systemReservedCPU := resource.MustParse(currentConfig.SystemReserved["cpu"])
systemReservedCPU := resource.MustParse(currentConfig.SystemReserved[string(v1.ResourceCPU)])
if err := expectFileValToEqual(filepath.Join(subsystems.MountPoints["cpu"], systemReservedCgroup, "cpu.shares"), cm.MilliCPUToShares(systemReservedCPU.MilliValue()), 10); err != nil {
return err
}
// Expect Memory limit on node allocatable cgroup to equal allocatable.
systemReservedMemory := resource.MustParse(currentConfig.SystemReserved["memory"])
systemReservedMemory := resource.MustParse(currentConfig.SystemReserved[string(v1.ResourceMemory)])
if err := expectFileValToEqual(filepath.Join(subsystems.MountPoints["memory"], systemReservedCgroup, "memory.limit_in_bytes"), systemReservedMemory.Value(), 0); err != nil {
return err
}