mirror of https://github.com/k3s-io/k3s
Promote LocalStorageCapacityIsolation feature to beta
The LocalStorageCapacityIsolation feature added a new resource type ResourceEphemeralStorage "ephemeral-storage" so that this resource can be allocated, limited, and consumed as the same way as CPU/memory. All the features related to resource management (resource request/limit, quota, limitrange) are avaiable for local ephemeral storage. This local ephemeral storage represents the storage for root file system, which will be consumed by containers' writtable layer and logs. Some volumes such as emptyDir might also consume this storage.pull/6/head
parent
07240b7166
commit
b2e744c620
|
@ -98,7 +98,7 @@ const (
|
|||
PersistentLocalVolumes utilfeature.Feature = "PersistentLocalVolumes"
|
||||
|
||||
// owner: @jinxu
|
||||
// alpha: v1.7
|
||||
// beta: v1.10
|
||||
//
|
||||
// New local storage types to support local storage capacity isolation
|
||||
LocalStorageCapacityIsolation utilfeature.Feature = "LocalStorageCapacityIsolation"
|
||||
|
@ -281,7 +281,7 @@ var defaultKubernetesFeatureGates = map[utilfeature.Feature]utilfeature.FeatureS
|
|||
RotateKubeletServerCertificate: {Default: false, PreRelease: utilfeature.Alpha},
|
||||
RotateKubeletClientCertificate: {Default: true, PreRelease: utilfeature.Beta},
|
||||
PersistentLocalVolumes: {Default: true, PreRelease: utilfeature.Beta},
|
||||
LocalStorageCapacityIsolation: {Default: false, PreRelease: utilfeature.Alpha},
|
||||
LocalStorageCapacityIsolation: {Default: true, PreRelease: utilfeature.Beta},
|
||||
HugePages: {Default: true, PreRelease: utilfeature.Beta},
|
||||
DebugContainers: {Default: false, PreRelease: utilfeature.Alpha},
|
||||
PodShareProcessNamespace: {Default: false, PreRelease: utilfeature.Alpha},
|
||||
|
|
|
@ -94,6 +94,7 @@ go_library(
|
|||
"//pkg/scheduler/schedulercache:go_default_library",
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/util/feature:go_default_library",
|
||||
|
@ -140,7 +141,6 @@ go_library(
|
|||
"//vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs:go_default_library",
|
||||
"//vendor/github.com/opencontainers/runc/libcontainer/cgroups/systemd:go_default_library",
|
||||
"//vendor/github.com/opencontainers/runc/libcontainer/configs:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/errors:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
"//vendor/k8s.io/client-go/tools/record:go_default_library",
|
||||
|
|
|
@ -20,6 +20,7 @@ import (
|
|||
"github.com/golang/glog"
|
||||
"k8s.io/api/core/v1"
|
||||
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
internalapi "k8s.io/kubernetes/pkg/kubelet/apis/cri"
|
||||
"k8s.io/kubernetes/pkg/kubelet/cm/cpumanager"
|
||||
"k8s.io/kubernetes/pkg/kubelet/config"
|
||||
|
@ -67,7 +68,12 @@ func (cm *containerManagerStub) GetNodeAllocatableReservation() v1.ResourceList
|
|||
}
|
||||
|
||||
func (cm *containerManagerStub) GetCapacity() v1.ResourceList {
|
||||
return nil
|
||||
c := v1.ResourceList{
|
||||
v1.ResourceEphemeralStorage: *resource.NewQuantity(
|
||||
int64(0),
|
||||
resource.BinarySI),
|
||||
}
|
||||
return c
|
||||
}
|
||||
|
||||
func (cm *containerManagerStub) GetDevicePluginResourceCapacity() (v1.ResourceList, v1.ResourceList, []string) {
|
||||
|
|
|
@ -21,6 +21,7 @@ go_library(
|
|||
"//pkg/kubelet/apis/stats/v1alpha1:go_default_library",
|
||||
"//pkg/kubelet/container:go_default_library",
|
||||
"//pkg/kubelet/events:go_default_library",
|
||||
"//pkg/kubelet/util/sliceutils:go_default_library",
|
||||
"//pkg/util/parsers:go_default_library",
|
||||
"//vendor/github.com/docker/distribution/reference:go_default_library",
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
|
|
|
@ -34,6 +34,7 @@ import (
|
|||
statsapi "k8s.io/kubernetes/pkg/kubelet/apis/stats/v1alpha1"
|
||||
"k8s.io/kubernetes/pkg/kubelet/container"
|
||||
"k8s.io/kubernetes/pkg/kubelet/events"
|
||||
"k8s.io/kubernetes/pkg/kubelet/util/sliceutils"
|
||||
)
|
||||
|
||||
// StatsProvider is an interface for fetching stats used during image garbage
|
||||
|
@ -120,11 +121,15 @@ func (i *imageCache) set(images []container.Image) {
|
|||
i.images = images
|
||||
}
|
||||
|
||||
// get gets image list from image cache.
|
||||
// get gets a sorted (by image size) image list from image cache.
|
||||
// There is a potentical data race in this function. See PR #60448
|
||||
// Because there is deepcopy function available currently, move sort
|
||||
// function inside this function
|
||||
func (i *imageCache) get() []container.Image {
|
||||
i.RLock()
|
||||
defer i.RUnlock()
|
||||
return append([]container.Image{}, i.images...)
|
||||
i.Lock()
|
||||
defer i.Unlock()
|
||||
sort.Sort(sliceutils.ByImageSize(i.images))
|
||||
return i.images
|
||||
}
|
||||
|
||||
// Information about the images we track.
|
||||
|
|
|
@ -560,11 +560,11 @@ func NewMainKubelet(kubeCfg *kubeletconfiginternal.KubeletConfiguration,
|
|||
return nil, err
|
||||
}
|
||||
klet.networkPlugin = plug
|
||||
|
||||
machineInfo, err := klet.GetCachedMachineInfo()
|
||||
machineInfo, err := klet.cadvisor.MachineInfo()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
klet.machineInfo = machineInfo
|
||||
|
||||
imageBackOff := flowcontrol.NewBackOff(backOffPeriod, MaxContainerBackOff)
|
||||
|
||||
|
@ -1338,8 +1338,6 @@ func (kl *Kubelet) initializeRuntimeDependentModules() {
|
|||
// TODO(random-liu): Add backoff logic in the babysitter
|
||||
glog.Fatalf("Failed to start cAdvisor %v", err)
|
||||
}
|
||||
// eviction manager must start after cadvisor because it needs to know if the container runtime has a dedicated imagefs
|
||||
kl.evictionManager.Start(kl.StatsProvider, kl.GetActivePods, kl.podResourcesAreReclaimed, evictionMonitoringPeriod)
|
||||
|
||||
// trigger on-demand stats collection once so that we have capacity information for ephemeral storage.
|
||||
// ignore any errors, since if stats collection is not successful, the container manager will fail to start below.
|
||||
|
@ -1355,6 +1353,9 @@ func (kl *Kubelet) initializeRuntimeDependentModules() {
|
|||
// Fail kubelet and rely on the babysitter to retry starting kubelet.
|
||||
glog.Fatalf("Failed to start ContainerManager %v", err)
|
||||
}
|
||||
// eviction manager must start after cadvisor because it needs to know if the container runtime has a dedicated imagefs
|
||||
kl.evictionManager.Start(kl.StatsProvider, kl.GetActivePods, kl.podResourcesAreReclaimed, evictionMonitoringPeriod)
|
||||
|
||||
// container log manager must start after container runtime is up to retrieve information from container runtime
|
||||
// and inform container to reopen log file after log rotation.
|
||||
kl.containerLogManager.Start()
|
||||
|
|
|
@ -281,12 +281,5 @@ func (kl *Kubelet) GetVersionInfo() (*cadvisorapiv1.VersionInfo, error) {
|
|||
|
||||
// GetCachedMachineInfo assumes that the machine info can't change without a reboot
|
||||
func (kl *Kubelet) GetCachedMachineInfo() (*cadvisorapiv1.MachineInfo, error) {
|
||||
if kl.machineInfo == nil {
|
||||
info, err := kl.cadvisor.MachineInfo()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
kl.machineInfo = info
|
||||
}
|
||||
return kl.machineInfo, nil
|
||||
}
|
||||
|
|
|
@ -22,8 +22,8 @@ import (
|
|||
"math"
|
||||
"net"
|
||||
goruntime "runtime"
|
||||
"sort"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/golang/glog"
|
||||
|
@ -42,7 +42,6 @@ import (
|
|||
"k8s.io/kubernetes/pkg/kubelet/cadvisor"
|
||||
"k8s.io/kubernetes/pkg/kubelet/events"
|
||||
"k8s.io/kubernetes/pkg/kubelet/util"
|
||||
"k8s.io/kubernetes/pkg/kubelet/util/sliceutils"
|
||||
"k8s.io/kubernetes/pkg/scheduler/algorithm"
|
||||
nodeutil "k8s.io/kubernetes/pkg/util/node"
|
||||
"k8s.io/kubernetes/pkg/version"
|
||||
|
@ -702,7 +701,6 @@ func (kl *Kubelet) setNodeStatusImages(node *v1.Node) {
|
|||
return
|
||||
}
|
||||
// sort the images from max to min, and only set top N images into the node status.
|
||||
sort.Sort(sliceutils.ByImageSize(containerImages))
|
||||
if maxImagesInNodeStatus < len(containerImages) {
|
||||
containerImages = containerImages[0:maxImagesInNodeStatus]
|
||||
}
|
||||
|
@ -773,7 +771,6 @@ func (kl *Kubelet) setNodeReadyCondition(node *v1.Node) {
|
|||
LastHeartbeatTime: currentTime,
|
||||
}
|
||||
}
|
||||
|
||||
// Append AppArmor status if it's enabled.
|
||||
// TODO(tallclair): This is a temporary message until node feature reporting is added.
|
||||
if newNodeReadyCondition.Status == v1.ConditionTrue &&
|
||||
|
@ -1019,10 +1016,15 @@ func (kl *Kubelet) setNodeOODCondition(node *v1.Node) {
|
|||
|
||||
// Maintains Node.Spec.Unschedulable value from previous run of tryUpdateNodeStatus()
|
||||
// TODO: why is this a package var?
|
||||
var oldNodeUnschedulable bool
|
||||
var (
|
||||
oldNodeUnschedulable bool
|
||||
oldNodeUnschedulableLock sync.Mutex
|
||||
)
|
||||
|
||||
// record if node schedulable change.
|
||||
func (kl *Kubelet) recordNodeSchedulableEvent(node *v1.Node) {
|
||||
oldNodeUnschedulableLock.Lock()
|
||||
defer oldNodeUnschedulableLock.Unlock()
|
||||
if oldNodeUnschedulable != node.Spec.Unschedulable {
|
||||
if node.Spec.Unschedulable {
|
||||
kl.recordNodeStatusEvent(v1.EventTypeNormal, events.NodeNotSchedulable)
|
||||
|
|
|
@ -120,12 +120,12 @@ func applyNodeStatusPatch(originalNode *v1.Node, patch []byte) (*v1.Node, error)
|
|||
|
||||
type localCM struct {
|
||||
cm.ContainerManager
|
||||
allocatable v1.ResourceList
|
||||
capacity v1.ResourceList
|
||||
allocatableReservation v1.ResourceList
|
||||
capacity v1.ResourceList
|
||||
}
|
||||
|
||||
func (lcm *localCM) GetNodeAllocatableReservation() v1.ResourceList {
|
||||
return lcm.allocatable
|
||||
return lcm.allocatableReservation
|
||||
}
|
||||
|
||||
func (lcm *localCM) GetCapacity() v1.ResourceList {
|
||||
|
@ -222,13 +222,15 @@ func TestUpdateNewNodeStatus(t *testing.T) {
|
|||
kubelet.kubeClient = nil // ensure only the heartbeat client is used
|
||||
kubelet.containerManager = &localCM{
|
||||
ContainerManager: cm.NewStubContainerManager(),
|
||||
allocatable: v1.ResourceList{
|
||||
v1.ResourceCPU: *resource.NewMilliQuantity(200, resource.DecimalSI),
|
||||
v1.ResourceMemory: *resource.NewQuantity(100E6, resource.BinarySI),
|
||||
allocatableReservation: v1.ResourceList{
|
||||
v1.ResourceCPU: *resource.NewMilliQuantity(200, resource.DecimalSI),
|
||||
v1.ResourceMemory: *resource.NewQuantity(100E6, resource.BinarySI),
|
||||
v1.ResourceEphemeralStorage: *resource.NewQuantity(2000, resource.BinarySI),
|
||||
},
|
||||
capacity: v1.ResourceList{
|
||||
v1.ResourceCPU: *resource.NewMilliQuantity(2000, resource.DecimalSI),
|
||||
v1.ResourceMemory: *resource.NewQuantity(10E9, resource.BinarySI),
|
||||
v1.ResourceCPU: *resource.NewMilliQuantity(2000, resource.DecimalSI),
|
||||
v1.ResourceMemory: *resource.NewQuantity(10E9, resource.BinarySI),
|
||||
v1.ResourceEphemeralStorage: *resource.NewQuantity(5000, resource.BinarySI),
|
||||
},
|
||||
}
|
||||
kubeClient := testKubelet.fakeKubeClient
|
||||
|
@ -248,10 +250,21 @@ func TestUpdateNewNodeStatus(t *testing.T) {
|
|||
KernelVersion: "3.16.0-0.bpo.4-amd64",
|
||||
ContainerOsVersion: "Debian GNU/Linux 7 (wheezy)",
|
||||
}
|
||||
mockCadvisor.On("ImagesFsInfo").Return(cadvisorapiv2.FsInfo{
|
||||
Usage: 400,
|
||||
Capacity: 5000,
|
||||
Available: 600,
|
||||
}, nil)
|
||||
mockCadvisor.On("RootFsInfo").Return(cadvisorapiv2.FsInfo{
|
||||
Usage: 400,
|
||||
Capacity: 5000,
|
||||
Available: 600,
|
||||
}, nil)
|
||||
mockCadvisor.On("VersionInfo").Return(versionInfo, nil)
|
||||
maxAge := 0 * time.Second
|
||||
options := cadvisorapiv2.RequestOptions{IdType: cadvisorapiv2.TypeName, Count: 2, Recursive: false, MaxAge: &maxAge}
|
||||
mockCadvisor.On("ContainerInfoV2", "/", options).Return(map[string]cadvisorapiv2.ContainerInfo{}, nil)
|
||||
kubelet.machineInfo = machineInfo
|
||||
|
||||
expectedNode := &v1.Node{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: testKubeletHostname},
|
||||
|
@ -312,14 +325,16 @@ func TestUpdateNewNodeStatus(t *testing.T) {
|
|||
KubeProxyVersion: version.Get().String(),
|
||||
},
|
||||
Capacity: v1.ResourceList{
|
||||
v1.ResourceCPU: *resource.NewMilliQuantity(2000, resource.DecimalSI),
|
||||
v1.ResourceMemory: *resource.NewQuantity(10E9, resource.BinarySI),
|
||||
v1.ResourcePods: *resource.NewQuantity(0, resource.DecimalSI),
|
||||
v1.ResourceCPU: *resource.NewMilliQuantity(2000, resource.DecimalSI),
|
||||
v1.ResourceMemory: *resource.NewQuantity(10E9, resource.BinarySI),
|
||||
v1.ResourcePods: *resource.NewQuantity(0, resource.DecimalSI),
|
||||
v1.ResourceEphemeralStorage: *resource.NewQuantity(5000, resource.BinarySI),
|
||||
},
|
||||
Allocatable: v1.ResourceList{
|
||||
v1.ResourceCPU: *resource.NewMilliQuantity(1800, resource.DecimalSI),
|
||||
v1.ResourceMemory: *resource.NewQuantity(9900E6, resource.BinarySI),
|
||||
v1.ResourcePods: *resource.NewQuantity(0, resource.DecimalSI),
|
||||
v1.ResourceCPU: *resource.NewMilliQuantity(1800, resource.DecimalSI),
|
||||
v1.ResourceMemory: *resource.NewQuantity(9900E6, resource.BinarySI),
|
||||
v1.ResourcePods: *resource.NewQuantity(0, resource.DecimalSI),
|
||||
v1.ResourceEphemeralStorage: *resource.NewQuantity(3000, resource.BinarySI),
|
||||
},
|
||||
Addresses: []v1.NodeAddress{
|
||||
{Type: v1.NodeInternalIP, Address: "127.0.0.1"},
|
||||
|
@ -359,13 +374,14 @@ func TestUpdateExistingNodeStatus(t *testing.T) {
|
|||
kubelet.kubeClient = nil // ensure only the heartbeat client is used
|
||||
kubelet.containerManager = &localCM{
|
||||
ContainerManager: cm.NewStubContainerManager(),
|
||||
allocatable: v1.ResourceList{
|
||||
allocatableReservation: v1.ResourceList{
|
||||
v1.ResourceCPU: *resource.NewMilliQuantity(200, resource.DecimalSI),
|
||||
v1.ResourceMemory: *resource.NewQuantity(100E6, resource.BinarySI),
|
||||
},
|
||||
capacity: v1.ResourceList{
|
||||
v1.ResourceCPU: *resource.NewMilliQuantity(2000, resource.DecimalSI),
|
||||
v1.ResourceMemory: *resource.NewQuantity(20E9, resource.BinarySI),
|
||||
v1.ResourceCPU: *resource.NewMilliQuantity(2000, resource.DecimalSI),
|
||||
v1.ResourceMemory: *resource.NewQuantity(20E9, resource.BinarySI),
|
||||
v1.ResourceEphemeralStorage: *resource.NewQuantity(5000, resource.BinarySI),
|
||||
},
|
||||
}
|
||||
|
||||
|
@ -443,10 +459,21 @@ func TestUpdateExistingNodeStatus(t *testing.T) {
|
|||
KernelVersion: "3.16.0-0.bpo.4-amd64",
|
||||
ContainerOsVersion: "Debian GNU/Linux 7 (wheezy)",
|
||||
}
|
||||
mockCadvisor.On("ImagesFsInfo").Return(cadvisorapiv2.FsInfo{
|
||||
Usage: 400,
|
||||
Capacity: 5000,
|
||||
Available: 600,
|
||||
}, nil)
|
||||
mockCadvisor.On("RootFsInfo").Return(cadvisorapiv2.FsInfo{
|
||||
Usage: 400,
|
||||
Capacity: 5000,
|
||||
Available: 600,
|
||||
}, nil)
|
||||
mockCadvisor.On("VersionInfo").Return(versionInfo, nil)
|
||||
maxAge := 0 * time.Second
|
||||
options := cadvisorapiv2.RequestOptions{IdType: cadvisorapiv2.TypeName, Count: 2, Recursive: false, MaxAge: &maxAge}
|
||||
mockCadvisor.On("ContainerInfoV2", "/", options).Return(map[string]cadvisorapiv2.ContainerInfo{}, nil)
|
||||
kubelet.machineInfo = machineInfo
|
||||
|
||||
expectedNode := &v1.Node{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: testKubeletHostname},
|
||||
|
@ -507,14 +534,16 @@ func TestUpdateExistingNodeStatus(t *testing.T) {
|
|||
KubeProxyVersion: version.Get().String(),
|
||||
},
|
||||
Capacity: v1.ResourceList{
|
||||
v1.ResourceCPU: *resource.NewMilliQuantity(2000, resource.DecimalSI),
|
||||
v1.ResourceMemory: *resource.NewQuantity(20E9, resource.BinarySI),
|
||||
v1.ResourcePods: *resource.NewQuantity(0, resource.DecimalSI),
|
||||
v1.ResourceCPU: *resource.NewMilliQuantity(2000, resource.DecimalSI),
|
||||
v1.ResourceMemory: *resource.NewQuantity(20E9, resource.BinarySI),
|
||||
v1.ResourcePods: *resource.NewQuantity(0, resource.DecimalSI),
|
||||
v1.ResourceEphemeralStorage: *resource.NewQuantity(5000, resource.BinarySI),
|
||||
},
|
||||
Allocatable: v1.ResourceList{
|
||||
v1.ResourceCPU: *resource.NewMilliQuantity(1800, resource.DecimalSI),
|
||||
v1.ResourceMemory: *resource.NewQuantity(19900E6, resource.BinarySI),
|
||||
v1.ResourcePods: *resource.NewQuantity(0, resource.DecimalSI),
|
||||
v1.ResourceCPU: *resource.NewMilliQuantity(1800, resource.DecimalSI),
|
||||
v1.ResourceMemory: *resource.NewQuantity(19900E6, resource.BinarySI),
|
||||
v1.ResourcePods: *resource.NewQuantity(0, resource.DecimalSI),
|
||||
v1.ResourceEphemeralStorage: *resource.NewQuantity(5000, resource.BinarySI),
|
||||
},
|
||||
Addresses: []v1.NodeAddress{
|
||||
{Type: v1.NodeInternalIP, Address: "127.0.0.1"},
|
||||
|
@ -522,14 +551,14 @@ func TestUpdateExistingNodeStatus(t *testing.T) {
|
|||
},
|
||||
// images will be sorted from max to min in node status.
|
||||
Images: []v1.ContainerImage{
|
||||
{
|
||||
Names: []string{"k8s.gcr.io:v3", "k8s.gcr.io:v4"},
|
||||
SizeBytes: 456,
|
||||
},
|
||||
{
|
||||
Names: []string{"k8s.gcr.io:v1", "k8s.gcr.io:v2"},
|
||||
SizeBytes: 123,
|
||||
},
|
||||
{
|
||||
Names: []string{"k8s.gcr.io:v3", "k8s.gcr.io:v4"},
|
||||
SizeBytes: 456,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
@ -596,7 +625,7 @@ func TestUpdateExistingNodeStatusTimeout(t *testing.T) {
|
|||
kubelet.heartbeatClient, err = v1core.NewForConfig(config)
|
||||
kubelet.containerManager = &localCM{
|
||||
ContainerManager: cm.NewStubContainerManager(),
|
||||
allocatable: v1.ResourceList{
|
||||
allocatableReservation: v1.ResourceList{
|
||||
v1.ResourceCPU: *resource.NewMilliQuantity(200, resource.DecimalSI),
|
||||
v1.ResourceMemory: *resource.NewQuantity(100E6, resource.BinarySI),
|
||||
},
|
||||
|
@ -622,13 +651,15 @@ func TestUpdateNodeStatusWithRuntimeStateError(t *testing.T) {
|
|||
kubelet.kubeClient = nil // ensure only the heartbeat client is used
|
||||
kubelet.containerManager = &localCM{
|
||||
ContainerManager: cm.NewStubContainerManager(),
|
||||
allocatable: v1.ResourceList{
|
||||
v1.ResourceCPU: *resource.NewMilliQuantity(200, resource.DecimalSI),
|
||||
v1.ResourceMemory: *resource.NewQuantity(100E6, resource.BinarySI),
|
||||
allocatableReservation: v1.ResourceList{
|
||||
v1.ResourceCPU: *resource.NewMilliQuantity(200, resource.DecimalSI),
|
||||
v1.ResourceMemory: *resource.NewQuantity(100E6, resource.BinarySI),
|
||||
v1.ResourceEphemeralStorage: *resource.NewQuantity(10E9, resource.BinarySI),
|
||||
},
|
||||
capacity: v1.ResourceList{
|
||||
v1.ResourceCPU: *resource.NewMilliQuantity(2000, resource.DecimalSI),
|
||||
v1.ResourceMemory: *resource.NewQuantity(10E9, resource.BinarySI),
|
||||
v1.ResourceCPU: *resource.NewMilliQuantity(2000, resource.DecimalSI),
|
||||
v1.ResourceMemory: *resource.NewQuantity(10E9, resource.BinarySI),
|
||||
v1.ResourceEphemeralStorage: *resource.NewQuantity(20E9, resource.BinarySI),
|
||||
},
|
||||
}
|
||||
|
||||
|
@ -650,10 +681,21 @@ func TestUpdateNodeStatusWithRuntimeStateError(t *testing.T) {
|
|||
KernelVersion: "3.16.0-0.bpo.4-amd64",
|
||||
ContainerOsVersion: "Debian GNU/Linux 7 (wheezy)",
|
||||
}
|
||||
|
||||
mockCadvisor.On("VersionInfo").Return(versionInfo, nil)
|
||||
maxAge := 0 * time.Second
|
||||
options := cadvisorapiv2.RequestOptions{IdType: cadvisorapiv2.TypeName, Count: 2, Recursive: false, MaxAge: &maxAge}
|
||||
mockCadvisor.On("ContainerInfoV2", "/", options).Return(map[string]cadvisorapiv2.ContainerInfo{}, nil)
|
||||
mockCadvisor.On("ImagesFsInfo").Return(cadvisorapiv2.FsInfo{
|
||||
Usage: 400,
|
||||
Capacity: 10E9,
|
||||
}, nil)
|
||||
mockCadvisor.On("RootFsInfo").Return(cadvisorapiv2.FsInfo{
|
||||
Usage: 400,
|
||||
Capacity: 20E9,
|
||||
}, nil)
|
||||
|
||||
kubelet.machineInfo = machineInfo
|
||||
|
||||
expectedNode := &v1.Node{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: testKubeletHostname},
|
||||
|
@ -707,28 +749,30 @@ func TestUpdateNodeStatusWithRuntimeStateError(t *testing.T) {
|
|||
KubeProxyVersion: version.Get().String(),
|
||||
},
|
||||
Capacity: v1.ResourceList{
|
||||
v1.ResourceCPU: *resource.NewMilliQuantity(2000, resource.DecimalSI),
|
||||
v1.ResourceMemory: *resource.NewQuantity(10E9, resource.BinarySI),
|
||||
v1.ResourcePods: *resource.NewQuantity(0, resource.DecimalSI),
|
||||
v1.ResourceCPU: *resource.NewMilliQuantity(2000, resource.DecimalSI),
|
||||
v1.ResourceMemory: *resource.NewQuantity(10E9, resource.BinarySI),
|
||||
v1.ResourcePods: *resource.NewQuantity(0, resource.DecimalSI),
|
||||
v1.ResourceEphemeralStorage: *resource.NewQuantity(20E9, resource.BinarySI),
|
||||
},
|
||||
Allocatable: v1.ResourceList{
|
||||
v1.ResourceCPU: *resource.NewMilliQuantity(1800, resource.DecimalSI),
|
||||
v1.ResourceMemory: *resource.NewQuantity(9900E6, resource.BinarySI),
|
||||
v1.ResourcePods: *resource.NewQuantity(0, resource.DecimalSI),
|
||||
v1.ResourceCPU: *resource.NewMilliQuantity(1800, resource.DecimalSI),
|
||||
v1.ResourceMemory: *resource.NewQuantity(9900E6, resource.BinarySI),
|
||||
v1.ResourcePods: *resource.NewQuantity(0, resource.DecimalSI),
|
||||
v1.ResourceEphemeralStorage: *resource.NewQuantity(10E9, resource.BinarySI),
|
||||
},
|
||||
Addresses: []v1.NodeAddress{
|
||||
{Type: v1.NodeInternalIP, Address: "127.0.0.1"},
|
||||
{Type: v1.NodeHostName, Address: testKubeletHostname},
|
||||
},
|
||||
Images: []v1.ContainerImage{
|
||||
{
|
||||
Names: []string{"k8s.gcr.io:v3", "k8s.gcr.io:v4"},
|
||||
SizeBytes: 456,
|
||||
},
|
||||
{
|
||||
Names: []string{"k8s.gcr.io:v1", "k8s.gcr.io:v2"},
|
||||
SizeBytes: 123,
|
||||
},
|
||||
{
|
||||
Names: []string{"k8s.gcr.io:v3", "k8s.gcr.io:v4"},
|
||||
SizeBytes: 456,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
@ -898,6 +942,7 @@ func TestRegisterWithApiServer(t *testing.T) {
|
|||
Usage: 9,
|
||||
Capacity: 10,
|
||||
}, nil)
|
||||
kubelet.machineInfo = machineInfo
|
||||
|
||||
done := make(chan struct{})
|
||||
go func() {
|
||||
|
@ -1105,12 +1150,14 @@ func TestUpdateNewNodeStatusTooLargeReservation(t *testing.T) {
|
|||
kubelet.kubeClient = nil // ensure only the heartbeat client is used
|
||||
kubelet.containerManager = &localCM{
|
||||
ContainerManager: cm.NewStubContainerManager(),
|
||||
allocatable: v1.ResourceList{
|
||||
v1.ResourceCPU: *resource.NewMilliQuantity(40000, resource.DecimalSI),
|
||||
allocatableReservation: v1.ResourceList{
|
||||
v1.ResourceCPU: *resource.NewMilliQuantity(40000, resource.DecimalSI),
|
||||
v1.ResourceEphemeralStorage: *resource.NewQuantity(1000, resource.BinarySI),
|
||||
},
|
||||
capacity: v1.ResourceList{
|
||||
v1.ResourceCPU: *resource.NewMilliQuantity(2000, resource.DecimalSI),
|
||||
v1.ResourceMemory: *resource.NewQuantity(10E9, resource.BinarySI),
|
||||
v1.ResourceCPU: *resource.NewMilliQuantity(2000, resource.DecimalSI),
|
||||
v1.ResourceMemory: *resource.NewQuantity(10E9, resource.BinarySI),
|
||||
v1.ResourceEphemeralStorage: *resource.NewQuantity(3000, resource.BinarySI),
|
||||
},
|
||||
}
|
||||
kubeClient := testKubelet.fakeKubeClient
|
||||
|
@ -1134,20 +1181,33 @@ func TestUpdateNewNodeStatusTooLargeReservation(t *testing.T) {
|
|||
maxAge := 0 * time.Second
|
||||
options := cadvisorapiv2.RequestOptions{IdType: cadvisorapiv2.TypeName, Count: 2, Recursive: false, MaxAge: &maxAge}
|
||||
mockCadvisor.On("ContainerInfoV2", "/", options).Return(map[string]cadvisorapiv2.ContainerInfo{}, nil)
|
||||
mockCadvisor.On("ImagesFsInfo").Return(cadvisorapiv2.FsInfo{
|
||||
Usage: 400,
|
||||
Capacity: 3000,
|
||||
Available: 600,
|
||||
}, nil)
|
||||
mockCadvisor.On("RootFsInfo").Return(cadvisorapiv2.FsInfo{
|
||||
Usage: 400,
|
||||
Capacity: 3000,
|
||||
Available: 600,
|
||||
}, nil)
|
||||
kubelet.machineInfo = machineInfo
|
||||
|
||||
expectedNode := &v1.Node{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: testKubeletHostname},
|
||||
Spec: v1.NodeSpec{},
|
||||
Status: v1.NodeStatus{
|
||||
Capacity: v1.ResourceList{
|
||||
v1.ResourceCPU: *resource.NewMilliQuantity(2000, resource.DecimalSI),
|
||||
v1.ResourceMemory: *resource.NewQuantity(10E9, resource.BinarySI),
|
||||
v1.ResourcePods: *resource.NewQuantity(0, resource.DecimalSI),
|
||||
v1.ResourceCPU: *resource.NewMilliQuantity(2000, resource.DecimalSI),
|
||||
v1.ResourceMemory: *resource.NewQuantity(10E9, resource.BinarySI),
|
||||
v1.ResourcePods: *resource.NewQuantity(0, resource.DecimalSI),
|
||||
v1.ResourceEphemeralStorage: *resource.NewQuantity(3000, resource.BinarySI),
|
||||
},
|
||||
Allocatable: v1.ResourceList{
|
||||
v1.ResourceCPU: *resource.NewMilliQuantity(0, resource.DecimalSI),
|
||||
v1.ResourceMemory: *resource.NewQuantity(10E9, resource.BinarySI),
|
||||
v1.ResourcePods: *resource.NewQuantity(0, resource.DecimalSI),
|
||||
v1.ResourceCPU: *resource.NewMilliQuantity(0, resource.DecimalSI),
|
||||
v1.ResourceMemory: *resource.NewQuantity(10E9, resource.BinarySI),
|
||||
v1.ResourcePods: *resource.NewQuantity(0, resource.DecimalSI),
|
||||
v1.ResourceEphemeralStorage: *resource.NewQuantity(2000, resource.BinarySI),
|
||||
},
|
||||
},
|
||||
}
|
||||
|
|
|
@ -264,6 +264,15 @@ func newTestKubeletWithImageList(
|
|||
ImageGCManager: imageGCManager,
|
||||
}
|
||||
kubelet.containerLogManager = logs.NewStubContainerLogManager()
|
||||
containerGCPolicy := kubecontainer.ContainerGCPolicy{
|
||||
MinAge: time.Duration(0),
|
||||
MaxPerPodContainer: 1,
|
||||
MaxContainers: -1,
|
||||
}
|
||||
containerGC, err := kubecontainer.NewContainerGC(fakeRuntime, containerGCPolicy, kubelet.sourcesReady)
|
||||
assert.NoError(t, err)
|
||||
kubelet.containerGC = containerGC
|
||||
|
||||
fakeClock := clock.NewFakeClock(time.Now())
|
||||
kubelet.backOff = flowcontrol.NewBackOff(time.Second, time.Minute)
|
||||
kubelet.backOff.Clock = fakeClock
|
||||
|
|
|
@ -42,10 +42,10 @@ var _ = SIGDescribe("LimitRange", func() {
|
|||
It("should create a LimitRange with defaults and ensure pod has those defaults applied.", func() {
|
||||
By("Creating a LimitRange")
|
||||
|
||||
min := getResourceList("50m", "100Mi")
|
||||
max := getResourceList("500m", "500Mi")
|
||||
defaultLimit := getResourceList("500m", "500Mi")
|
||||
defaultRequest := getResourceList("100m", "200Mi")
|
||||
min := getResourceList("50m", "100Mi", "100Gi")
|
||||
max := getResourceList("500m", "500Mi", "500Gi")
|
||||
defaultLimit := getResourceList("500m", "500Mi", "500Gi")
|
||||
defaultRequest := getResourceList("100m", "200Mi", "200Gi")
|
||||
maxLimitRequestRatio := v1.ResourceList{}
|
||||
limitRange := newLimitRange("limit-range", v1.LimitTypeContainer,
|
||||
min, max,
|
||||
|
@ -104,7 +104,7 @@ var _ = SIGDescribe("LimitRange", func() {
|
|||
}
|
||||
|
||||
By("Creating a Pod with partial resource requirements")
|
||||
pod = f.NewTestPod("pod-partial-resources", getResourceList("", "150Mi"), getResourceList("300m", ""))
|
||||
pod = f.NewTestPod("pod-partial-resources", getResourceList("", "150Mi", "150Gi"), getResourceList("300m", "", ""))
|
||||
pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(pod)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
|
@ -114,7 +114,7 @@ var _ = SIGDescribe("LimitRange", func() {
|
|||
// This is an interesting case, so it's worth a comment
|
||||
// If you specify a Limit, and no Request, the Limit will default to the Request
|
||||
// This means that the LimitRange.DefaultRequest will ONLY take affect if a container.resources.limit is not supplied
|
||||
expected = v1.ResourceRequirements{Requests: getResourceList("300m", "150Mi"), Limits: getResourceList("300m", "500Mi")}
|
||||
expected = v1.ResourceRequirements{Requests: getResourceList("300m", "150Mi", "150Gi"), Limits: getResourceList("300m", "500Mi", "500Gi")}
|
||||
for i := range pod.Spec.Containers {
|
||||
err = equalResourceRequirement(expected, pod.Spec.Containers[i].Resources)
|
||||
if err != nil {
|
||||
|
@ -125,28 +125,28 @@ var _ = SIGDescribe("LimitRange", func() {
|
|||
}
|
||||
|
||||
By("Failing to create a Pod with less than min resources")
|
||||
pod = f.NewTestPod(podName, getResourceList("10m", "50Mi"), v1.ResourceList{})
|
||||
pod = f.NewTestPod(podName, getResourceList("10m", "50Mi", "50Gi"), v1.ResourceList{})
|
||||
pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(pod)
|
||||
Expect(err).To(HaveOccurred())
|
||||
|
||||
By("Failing to create a Pod with more than max resources")
|
||||
pod = f.NewTestPod(podName, getResourceList("600m", "600Mi"), v1.ResourceList{})
|
||||
pod = f.NewTestPod(podName, getResourceList("600m", "600Mi", "600Gi"), v1.ResourceList{})
|
||||
pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(pod)
|
||||
Expect(err).To(HaveOccurred())
|
||||
|
||||
By("Updating a LimitRange")
|
||||
newMin := getResourceList("9m", "49Mi")
|
||||
newMin := getResourceList("9m", "49Mi", "49Gi")
|
||||
limitRange.Spec.Limits[0].Min = newMin
|
||||
limitRange, err = f.ClientSet.CoreV1().LimitRanges(f.Namespace.Name).Update(limitRange)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By("Creating a Pod with less than former min resources")
|
||||
pod = f.NewTestPod(podName, getResourceList("10m", "50Mi"), v1.ResourceList{})
|
||||
pod = f.NewTestPod(podName, getResourceList("10m", "50Mi", "50Gi"), v1.ResourceList{})
|
||||
pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(pod)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By("Failing to create a Pod with more than max resources")
|
||||
pod = f.NewTestPod(podName, getResourceList("600m", "600Mi"), v1.ResourceList{})
|
||||
pod = f.NewTestPod(podName, getResourceList("600m", "600Mi", "600Gi"), v1.ResourceList{})
|
||||
pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(pod)
|
||||
Expect(err).To(HaveOccurred())
|
||||
|
||||
|
@ -183,144 +183,13 @@ var _ = SIGDescribe("LimitRange", func() {
|
|||
})).NotTo(HaveOccurred(), "kubelet never observed the termination notice")
|
||||
|
||||
By("Creating a Pod with more than former max resources")
|
||||
pod = f.NewTestPod(podName+"2", getResourceList("600m", "600Mi"), v1.ResourceList{})
|
||||
pod = f.NewTestPod(podName+"2", getResourceList("600m", "600Mi", "600Gi"), v1.ResourceList{})
|
||||
pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(pod)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
})
|
||||
|
||||
})
|
||||
|
||||
var _ = framework.KubeDescribe("LimitRange", func() {
|
||||
f := framework.NewDefaultFramework("limitrange")
|
||||
|
||||
BeforeEach(func() {
|
||||
// only run the tests when LocalStorageCapacityIsolation feature is enabled
|
||||
framework.SkipUnlessLocalEphemeralStorageEnabled()
|
||||
})
|
||||
|
||||
It("should create a LimitRange with default ephemeral storage and ensure pod has the default applied.", func() {
|
||||
By("Creating a LimitRange")
|
||||
|
||||
min := getEphemeralStorageResourceList("100Mi")
|
||||
max := getEphemeralStorageResourceList("500Mi")
|
||||
defaultLimit := getEphemeralStorageResourceList("500Mi")
|
||||
defaultRequest := getEphemeralStorageResourceList("200Mi")
|
||||
maxLimitRequestRatio := v1.ResourceList{}
|
||||
limitRange := newLimitRange("limit-range", v1.LimitTypeContainer,
|
||||
min, max,
|
||||
defaultLimit, defaultRequest,
|
||||
maxLimitRequestRatio)
|
||||
limitRange, err := f.ClientSet.CoreV1().LimitRanges(f.Namespace.Name).Create(limitRange)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
defer func() {
|
||||
By("Removing limitrange")
|
||||
err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(limitRange.Name, nil)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
}()
|
||||
|
||||
By("Fetching the LimitRange to ensure it has proper values")
|
||||
limitRange, err = f.ClientSet.CoreV1().LimitRanges(f.Namespace.Name).Get(limitRange.Name, metav1.GetOptions{})
|
||||
expected := v1.ResourceRequirements{Requests: defaultRequest, Limits: defaultLimit}
|
||||
actual := v1.ResourceRequirements{Requests: limitRange.Spec.Limits[0].DefaultRequest, Limits: limitRange.Spec.Limits[0].Default}
|
||||
err = equalResourceRequirement(expected, actual)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By("Creating a Pod with no resource requirements")
|
||||
pod := f.NewTestPod("pod-no-resources", v1.ResourceList{}, v1.ResourceList{})
|
||||
pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(pod)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
defer func() {
|
||||
By("Removing pod")
|
||||
err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(pod.Name, nil)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
}()
|
||||
|
||||
By("Ensuring Pod has resource requirements applied from LimitRange")
|
||||
pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(pod.Name, metav1.GetOptions{})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
for i := range pod.Spec.Containers {
|
||||
err = equalResourceRequirement(expected, pod.Spec.Containers[i].Resources)
|
||||
if err != nil {
|
||||
// Print the pod to help in debugging.
|
||||
framework.Logf("Pod %+v does not have the expected requirements", pod)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
}
|
||||
}
|
||||
|
||||
By("Creating a Pod with request")
|
||||
pod = f.NewTestPod("pod-partial-resources", getEphemeralStorageResourceList("150m"), v1.ResourceList{})
|
||||
pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(pod)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
defer func() {
|
||||
By("Removing pod")
|
||||
err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(pod.Name, nil)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
}()
|
||||
|
||||
By("Ensuring Pod has merged resource requirements applied from LimitRange")
|
||||
pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(pod.Name, metav1.GetOptions{})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
// If you specify a Request, and no Limit, the Limit will be set to default limit
|
||||
expected = v1.ResourceRequirements{Requests: getEphemeralStorageResourceList("150Mi"), Limits: defaultLimit}
|
||||
for i := range pod.Spec.Containers {
|
||||
err = equalResourceRequirement(expected, pod.Spec.Containers[i].Resources)
|
||||
if err != nil {
|
||||
// Print the pod to help in debugging.
|
||||
framework.Logf("Pod %+v does not have the expected requirements", pod)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
}
|
||||
}
|
||||
|
||||
By("Creating a Pod with limit")
|
||||
pod = f.NewTestPod("pod-partial-resources", v1.ResourceList{}, getEphemeralStorageResourceList("300m"))
|
||||
pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(pod)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
defer func() {
|
||||
By("Removing pod")
|
||||
err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(pod.Name, nil)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
}()
|
||||
|
||||
By("Ensuring Pod has merged resource requirements applied from LimitRange")
|
||||
pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(pod.Name, metav1.GetOptions{})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
// If you specify a Limit, and no Request, the Limit will default to the Request
|
||||
// This means that the LimitRange.DefaultRequest will ONLY take affect if a container.resources.limit is not supplied
|
||||
expected = v1.ResourceRequirements{Requests: getEphemeralStorageResourceList("300Mi"), Limits: getEphemeralStorageResourceList("300Mi")}
|
||||
for i := range pod.Spec.Containers {
|
||||
err = equalResourceRequirement(expected, pod.Spec.Containers[i].Resources)
|
||||
if err != nil {
|
||||
// Print the pod to help in debugging.
|
||||
framework.Logf("Pod %+v does not have the expected requirements", pod)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
}
|
||||
}
|
||||
|
||||
By("Failing to create a Pod with less than min resources")
|
||||
pod = f.NewTestPod(podName, getEphemeralStorageResourceList("50Mi"), v1.ResourceList{})
|
||||
pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(pod)
|
||||
Expect(err).To(HaveOccurred())
|
||||
|
||||
By("Failing to create a Pod with more than max resources")
|
||||
pod = f.NewTestPod(podName, getEphemeralStorageResourceList("600Mi"), v1.ResourceList{})
|
||||
pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(pod)
|
||||
Expect(err).To(HaveOccurred())
|
||||
})
|
||||
|
||||
})
|
||||
|
||||
func getEphemeralStorageResourceList(ephemeralStorage string) v1.ResourceList {
|
||||
res := v1.ResourceList{}
|
||||
if ephemeralStorage != "" {
|
||||
res[v1.ResourceEphemeralStorage] = resource.MustParse(ephemeralStorage)
|
||||
}
|
||||
return res
|
||||
}
|
||||
|
||||
func equalResourceRequirement(expected v1.ResourceRequirements, actual v1.ResourceRequirements) error {
|
||||
framework.Logf("Verifying requests: expected %v with actual %v", expected.Requests, actual.Requests)
|
||||
err := equalResourceList(expected.Requests, actual.Requests)
|
||||
|
@ -346,7 +215,7 @@ func equalResourceList(expected v1.ResourceList, actual v1.ResourceList) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func getResourceList(cpu, memory string) v1.ResourceList {
|
||||
func getResourceList(cpu, memory string, ephemeralStorage string) v1.ResourceList {
|
||||
res := v1.ResourceList{}
|
||||
if cpu != "" {
|
||||
res[v1.ResourceCPU] = resource.MustParse(cpu)
|
||||
|
@ -354,6 +223,9 @@ func getResourceList(cpu, memory string) v1.ResourceList {
|
|||
if memory != "" {
|
||||
res[v1.ResourceMemory] = resource.MustParse(memory)
|
||||
}
|
||||
if ephemeralStorage != "" {
|
||||
res[v1.ResourceEphemeralStorage] = resource.MustParse(ephemeralStorage)
|
||||
}
|
||||
return res
|
||||
}
|
||||
|
||||
|
|
|
@ -43,83 +43,6 @@ const (
|
|||
var classGold string = "gold"
|
||||
var extendedResourceName string = "example.com/dongle"
|
||||
|
||||
var _ = SIGDescribe("ResourceQuota", func() {
|
||||
f := framework.NewDefaultFramework("resourcequota")
|
||||
|
||||
BeforeEach(func() {
|
||||
// only run the tests when LocalStorageCapacityIsolation feature is enabled
|
||||
framework.SkipUnlessLocalEphemeralStorageEnabled()
|
||||
})
|
||||
|
||||
It("should create a ResourceQuota and capture the life of a pod.", func() {
|
||||
By("Creating a ResourceQuota")
|
||||
quotaName := "test-quota"
|
||||
resourceQuota := newTestResourceQuotaForEphemeralStorage(quotaName)
|
||||
resourceQuota, err := createResourceQuota(f.ClientSet, f.Namespace.Name, resourceQuota)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
defer func() {
|
||||
By("Removing resourceQuota")
|
||||
err = deleteResourceQuota(f.ClientSet, f.Namespace.Name, resourceQuota.Name)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
}()
|
||||
|
||||
By("Ensuring resource quota status is calculated")
|
||||
usedResources := v1.ResourceList{}
|
||||
usedResources[v1.ResourceQuotas] = resource.MustParse("1")
|
||||
err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, quotaName, usedResources)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By("Creating a Pod that fits quota")
|
||||
podName := "test-pod"
|
||||
requests := v1.ResourceList{}
|
||||
requests[v1.ResourceEphemeralStorage] = resource.MustParse("300Mi")
|
||||
pod := newTestPodForQuota(f, podName, requests, v1.ResourceList{})
|
||||
pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(pod)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
podToUpdate := pod
|
||||
|
||||
defer func() {
|
||||
By("Deleting the pod")
|
||||
err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(pod.Name, metav1.NewDeleteOptions(0))
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
}()
|
||||
|
||||
By("Ensuring ResourceQuota status captures the pod usage")
|
||||
usedResources[v1.ResourceQuotas] = resource.MustParse("1")
|
||||
usedResources[v1.ResourcePods] = resource.MustParse("1")
|
||||
usedResources[v1.ResourceEphemeralStorage] = requests[v1.ResourceEphemeralStorage]
|
||||
err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, quotaName, usedResources)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By("Not allowing a pod to be created that exceeds remaining quota")
|
||||
requests = v1.ResourceList{}
|
||||
requests[v1.ResourceEphemeralStorage] = resource.MustParse("300Mi")
|
||||
pod = newTestPodForQuota(f, "fail-pod", requests, v1.ResourceList{})
|
||||
pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(pod)
|
||||
Expect(err).To(HaveOccurred())
|
||||
|
||||
By("Ensuring a pod cannot update its resource requirements")
|
||||
// a pod cannot dynamically update its resource requirements.
|
||||
requests = v1.ResourceList{}
|
||||
requests[v1.ResourceEphemeralStorage] = resource.MustParse("100Mi")
|
||||
podToUpdate.Spec.Containers[0].Resources.Requests = requests
|
||||
_, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Update(podToUpdate)
|
||||
Expect(err).To(HaveOccurred())
|
||||
|
||||
By("Ensuring attempts to update pod resource requirements did not change quota usage")
|
||||
err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, quotaName, usedResources)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By("Ensuring resource quota status released the pod usage")
|
||||
usedResources[v1.ResourceQuotas] = resource.MustParse("1")
|
||||
usedResources[v1.ResourcePods] = resource.MustParse("0")
|
||||
usedResources[v1.ResourceEphemeralStorage] = resource.MustParse("0")
|
||||
err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, quotaName, usedResources)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
})
|
||||
})
|
||||
|
||||
var _ = SIGDescribe("ResourceQuota", func() {
|
||||
f := framework.NewDefaultFramework("resourcequota")
|
||||
|
||||
|
@ -372,6 +295,7 @@ var _ = SIGDescribe("ResourceQuota", func() {
|
|||
limits := v1.ResourceList{}
|
||||
requests[v1.ResourceCPU] = resource.MustParse("500m")
|
||||
requests[v1.ResourceMemory] = resource.MustParse("252Mi")
|
||||
requests[v1.ResourceEphemeralStorage] = resource.MustParse("30Gi")
|
||||
requests[v1.ResourceName(extendedResourceName)] = resource.MustParse("2")
|
||||
limits[v1.ResourceName(extendedResourceName)] = resource.MustParse("2")
|
||||
pod := newTestPodForQuota(f, podName, requests, limits)
|
||||
|
@ -384,6 +308,7 @@ var _ = SIGDescribe("ResourceQuota", func() {
|
|||
usedResources[v1.ResourcePods] = resource.MustParse("1")
|
||||
usedResources[v1.ResourceCPU] = requests[v1.ResourceCPU]
|
||||
usedResources[v1.ResourceMemory] = requests[v1.ResourceMemory]
|
||||
usedResources[v1.ResourceEphemeralStorage] = requests[v1.ResourceEphemeralStorage]
|
||||
usedResources[v1.ResourceName(v1.DefaultResourceRequestsPrefix+extendedResourceName)] = requests[v1.ResourceName(extendedResourceName)]
|
||||
err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, quotaName, usedResources)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
@ -401,6 +326,7 @@ var _ = SIGDescribe("ResourceQuota", func() {
|
|||
limits = v1.ResourceList{}
|
||||
requests[v1.ResourceCPU] = resource.MustParse("500m")
|
||||
requests[v1.ResourceMemory] = resource.MustParse("100Mi")
|
||||
requests[v1.ResourceEphemeralStorage] = resource.MustParse("30Gi")
|
||||
requests[v1.ResourceName(extendedResourceName)] = resource.MustParse("2")
|
||||
limits[v1.ResourceName(extendedResourceName)] = resource.MustParse("2")
|
||||
pod = newTestPodForQuota(f, "fail-pod-for-extended-resource", requests, limits)
|
||||
|
@ -412,6 +338,7 @@ var _ = SIGDescribe("ResourceQuota", func() {
|
|||
requests = v1.ResourceList{}
|
||||
requests[v1.ResourceCPU] = resource.MustParse("100m")
|
||||
requests[v1.ResourceMemory] = resource.MustParse("100Mi")
|
||||
requests[v1.ResourceEphemeralStorage] = resource.MustParse("10Gi")
|
||||
podToUpdate.Spec.Containers[0].Resources.Requests = requests
|
||||
_, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Update(podToUpdate)
|
||||
Expect(err).To(HaveOccurred())
|
||||
|
@ -429,6 +356,7 @@ var _ = SIGDescribe("ResourceQuota", func() {
|
|||
usedResources[v1.ResourcePods] = resource.MustParse("0")
|
||||
usedResources[v1.ResourceCPU] = resource.MustParse("0")
|
||||
usedResources[v1.ResourceMemory] = resource.MustParse("0")
|
||||
usedResources[v1.ResourceEphemeralStorage] = resource.MustParse("0")
|
||||
usedResources[v1.ResourceName(v1.DefaultResourceRequestsPrefix+extendedResourceName)] = resource.MustParse("0")
|
||||
err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, quotaName, usedResources)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
@ -821,10 +749,11 @@ func newTestResourceQuotaWithScope(name string, scope v1.ResourceQuotaScope) *v1
|
|||
}
|
||||
}
|
||||
|
||||
// newTestResourceQuotaForEphemeralStorage returns a quota that enforces default constraints for testing alpha feature LocalStorageCapacityIsolation
|
||||
// newTestResourceQuotaForEphemeralStorage returns a quota that enforces default constraints for testing feature LocalStorageCapacityIsolation
|
||||
func newTestResourceQuotaForEphemeralStorage(name string) *v1.ResourceQuota {
|
||||
hard := v1.ResourceList{}
|
||||
hard[v1.ResourceEphemeralStorage] = resource.MustParse("500Mi")
|
||||
hard[v1.ResourceQuotas] = resource.MustParse("1")
|
||||
return &v1.ResourceQuota{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: name},
|
||||
Spec: v1.ResourceQuotaSpec{Hard: hard},
|
||||
|
@ -846,6 +775,7 @@ func newTestResourceQuota(name string) *v1.ResourceQuota {
|
|||
hard[v1.ResourceSecrets] = resource.MustParse("10")
|
||||
hard[v1.ResourcePersistentVolumeClaims] = resource.MustParse("10")
|
||||
hard[v1.ResourceRequestsStorage] = resource.MustParse("10Gi")
|
||||
hard[v1.ResourceEphemeralStorage] = resource.MustParse("50Gi")
|
||||
hard[core.V1ResourceByStorageClass(classGold, v1.ResourcePersistentVolumeClaims)] = resource.MustParse("10")
|
||||
hard[core.V1ResourceByStorageClass(classGold, v1.ResourceRequestsStorage)] = resource.MustParse("10Gi")
|
||||
// test quota on discovered resource type
|
||||
|
|
Loading…
Reference in New Issue