2015-01-25 06:11:10 +00:00
|
|
|
/*
|
2015-05-01 16:19:44 +00:00
|
|
|
Copyright 2014 The Kubernetes Authors All rights reserved.
|
2015-01-25 06:11:10 +00:00
|
|
|
|
|
|
|
Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
you may not use this file except in compliance with the License.
|
|
|
|
You may obtain a copy of the License at
|
|
|
|
|
|
|
|
http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
|
|
|
|
Unless required by applicable law or agreed to in writing, software
|
|
|
|
distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
See the License for the specific language governing permissions and
|
|
|
|
limitations under the License.
|
|
|
|
*/
|
|
|
|
|
2015-10-10 03:58:57 +00:00
|
|
|
package resourcequota
|
2015-01-25 06:11:10 +00:00
|
|
|
|
|
|
|
import (
|
2015-08-13 14:19:27 +00:00
|
|
|
"fmt"
|
2015-01-25 06:11:10 +00:00
|
|
|
"time"
|
|
|
|
|
2015-08-05 22:05:17 +00:00
|
|
|
"github.com/golang/glog"
|
2015-08-05 22:03:47 +00:00
|
|
|
"k8s.io/kubernetes/pkg/api"
|
|
|
|
"k8s.io/kubernetes/pkg/api/resource"
|
2015-11-11 21:19:39 +00:00
|
|
|
"k8s.io/kubernetes/pkg/client/cache"
|
2016-02-05 21:58:03 +00:00
|
|
|
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
|
2015-11-11 21:19:39 +00:00
|
|
|
"k8s.io/kubernetes/pkg/controller"
|
|
|
|
"k8s.io/kubernetes/pkg/controller/framework"
|
|
|
|
"k8s.io/kubernetes/pkg/runtime"
|
2016-01-15 07:32:10 +00:00
|
|
|
utilruntime "k8s.io/kubernetes/pkg/util/runtime"
|
2016-02-02 10:57:06 +00:00
|
|
|
"k8s.io/kubernetes/pkg/util/wait"
|
2015-11-11 21:19:39 +00:00
|
|
|
"k8s.io/kubernetes/pkg/util/workqueue"
|
|
|
|
"k8s.io/kubernetes/pkg/watch"
|
2015-01-25 06:11:10 +00:00
|
|
|
)
|
|
|
|
|
2015-07-31 11:38:04 +00:00
|
|
|
// ResourceQuotaController is responsible for tracking quota usage status in the system
|
|
|
|
type ResourceQuotaController struct {
|
2015-11-11 21:19:39 +00:00
|
|
|
// Must have authority to list all resources in the system, and update quota status
|
2016-01-29 06:34:08 +00:00
|
|
|
kubeClient clientset.Interface
|
2015-11-11 21:19:39 +00:00
|
|
|
// An index of resource quota objects by namespace
|
|
|
|
rqIndexer cache.Indexer
|
|
|
|
// Watches changes to all resource quota
|
|
|
|
rqController *framework.Controller
|
|
|
|
// A store of pods, populated by the podController
|
|
|
|
podStore cache.StoreToPodLister
|
|
|
|
// Watches changes to all pods (so we can optimize release of compute resources)
|
|
|
|
podController *framework.Controller
|
|
|
|
// ResourceQuota objects that need to be synchronized
|
|
|
|
queue *workqueue.Type
|
2015-01-25 06:11:10 +00:00
|
|
|
// To allow injection of syncUsage for testing.
|
2015-11-11 21:19:39 +00:00
|
|
|
syncHandler func(key string) error
|
|
|
|
// function that controls full recalculation of quota usage
|
|
|
|
resyncPeriod controller.ResyncPeriodFunc
|
2015-01-25 06:11:10 +00:00
|
|
|
}
|
|
|
|
|
2015-07-31 11:38:04 +00:00
|
|
|
// NewResourceQuotaController creates a new ResourceQuotaController
|
2016-01-29 06:34:08 +00:00
|
|
|
func NewResourceQuotaController(kubeClient clientset.Interface, resyncPeriod controller.ResyncPeriodFunc) *ResourceQuotaController {
|
2015-01-25 06:11:10 +00:00
|
|
|
|
2015-11-11 21:19:39 +00:00
|
|
|
rq := &ResourceQuotaController{
|
|
|
|
kubeClient: kubeClient,
|
|
|
|
queue: workqueue.New(),
|
|
|
|
resyncPeriod: resyncPeriod,
|
2015-01-25 06:11:10 +00:00
|
|
|
}
|
|
|
|
|
2015-11-11 21:19:39 +00:00
|
|
|
rq.rqIndexer, rq.rqController = framework.NewIndexerInformer(
|
|
|
|
&cache.ListWatch{
|
2015-12-10 09:39:03 +00:00
|
|
|
ListFunc: func(options api.ListOptions) (runtime.Object, error) {
|
2016-02-03 21:21:05 +00:00
|
|
|
return rq.kubeClient.Core().ResourceQuotas(api.NamespaceAll).List(options)
|
2015-11-11 21:19:39 +00:00
|
|
|
},
|
2015-12-10 09:39:03 +00:00
|
|
|
WatchFunc: func(options api.ListOptions) (watch.Interface, error) {
|
2016-02-03 21:21:05 +00:00
|
|
|
return rq.kubeClient.Core().ResourceQuotas(api.NamespaceAll).Watch(options)
|
2015-11-11 21:19:39 +00:00
|
|
|
},
|
|
|
|
},
|
|
|
|
&api.ResourceQuota{},
|
|
|
|
resyncPeriod(),
|
|
|
|
framework.ResourceEventHandlerFuncs{
|
|
|
|
AddFunc: rq.enqueueResourceQuota,
|
|
|
|
UpdateFunc: func(old, cur interface{}) {
|
|
|
|
// We are only interested in observing updates to quota.spec to drive updates to quota.status.
|
|
|
|
// We ignore all updates to quota.Status because they are all driven by this controller.
|
|
|
|
// IMPORTANT:
|
|
|
|
// We do not use this function to queue up a full quota recalculation. To do so, would require
|
|
|
|
// us to enqueue all quota.Status updates, and since quota.Status updates involve additional queries
|
|
|
|
// that cannot be backed by a cache and result in a full query of a namespace's content, we do not
|
|
|
|
// want to pay the price on spurious status updates. As a result, we have a separate routine that is
|
|
|
|
// responsible for enqueue of all resource quotas when doing a full resync (enqueueAll)
|
|
|
|
oldResourceQuota := old.(*api.ResourceQuota)
|
|
|
|
curResourceQuota := cur.(*api.ResourceQuota)
|
|
|
|
if api.Semantic.DeepEqual(oldResourceQuota.Spec.Hard, curResourceQuota.Status.Hard) {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
glog.V(4).Infof("Observed updated quota spec for %v/%v", curResourceQuota.Namespace, curResourceQuota.Name)
|
|
|
|
rq.enqueueResourceQuota(curResourceQuota)
|
|
|
|
},
|
|
|
|
// This will enter the sync loop and no-op, because the controller has been deleted from the store.
|
|
|
|
// Note that deleting a controller immediately after scaling it to 0 will not work. The recommended
|
|
|
|
// way of achieving this is by performing a `stop` operation on the controller.
|
|
|
|
DeleteFunc: rq.enqueueResourceQuota,
|
|
|
|
},
|
|
|
|
cache.Indexers{"namespace": cache.MetaNamespaceIndexFunc},
|
|
|
|
)
|
|
|
|
|
|
|
|
// We use this pod controller to rapidly observe when a pod deletion occurs in order to
|
|
|
|
// release compute resources from any associated quota.
|
|
|
|
rq.podStore.Store, rq.podController = framework.NewInformer(
|
|
|
|
&cache.ListWatch{
|
2015-12-10 09:39:03 +00:00
|
|
|
ListFunc: func(options api.ListOptions) (runtime.Object, error) {
|
2016-02-03 21:21:05 +00:00
|
|
|
return rq.kubeClient.Core().Pods(api.NamespaceAll).List(options)
|
2015-11-11 21:19:39 +00:00
|
|
|
},
|
2015-12-10 09:39:03 +00:00
|
|
|
WatchFunc: func(options api.ListOptions) (watch.Interface, error) {
|
2016-02-03 21:21:05 +00:00
|
|
|
return rq.kubeClient.Core().Pods(api.NamespaceAll).Watch(options)
|
2015-11-11 21:19:39 +00:00
|
|
|
},
|
|
|
|
},
|
|
|
|
&api.Pod{},
|
|
|
|
resyncPeriod(),
|
|
|
|
framework.ResourceEventHandlerFuncs{
|
|
|
|
DeleteFunc: rq.deletePod,
|
|
|
|
},
|
|
|
|
)
|
|
|
|
|
2015-01-25 06:11:10 +00:00
|
|
|
// set the synchronization handler
|
2015-11-11 21:19:39 +00:00
|
|
|
rq.syncHandler = rq.syncResourceQuotaFromKey
|
|
|
|
return rq
|
2015-01-25 06:11:10 +00:00
|
|
|
}
|
|
|
|
|
2015-11-11 21:19:39 +00:00
|
|
|
// enqueueAll is called at the fullResyncPeriod interval to force a full recalculation of quota usage statistics
|
|
|
|
func (rq *ResourceQuotaController) enqueueAll() {
|
|
|
|
defer glog.V(4).Infof("Resource quota controller queued all resource quota for full calculation of usage")
|
|
|
|
for _, k := range rq.rqIndexer.ListKeys() {
|
|
|
|
rq.queue.Add(k)
|
|
|
|
}
|
2015-01-25 06:11:10 +00:00
|
|
|
}
|
|
|
|
|
2015-11-11 21:19:39 +00:00
|
|
|
// obj could be an *api.ResourceQuota, or a DeletionFinalStateUnknown marker item.
|
|
|
|
func (rq *ResourceQuotaController) enqueueResourceQuota(obj interface{}) {
|
|
|
|
key, err := controller.KeyFunc(obj)
|
2015-01-25 06:11:10 +00:00
|
|
|
if err != nil {
|
2015-11-11 21:19:39 +00:00
|
|
|
glog.Errorf("Couldn't get key for object %+v: %v", obj, err)
|
|
|
|
return
|
2015-01-25 06:11:10 +00:00
|
|
|
}
|
2015-11-11 21:19:39 +00:00
|
|
|
rq.queue.Add(key)
|
|
|
|
}
|
|
|
|
|
|
|
|
// worker runs a worker thread that just dequeues items, processes them, and marks them done.
|
|
|
|
// It enforces that the syncHandler is never invoked concurrently with the same key.
|
|
|
|
func (rq *ResourceQuotaController) worker() {
|
|
|
|
for {
|
|
|
|
func() {
|
|
|
|
key, quit := rq.queue.Get()
|
|
|
|
if quit {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
defer rq.queue.Done(key)
|
|
|
|
err := rq.syncHandler(key.(string))
|
|
|
|
if err != nil {
|
2016-01-15 07:32:10 +00:00
|
|
|
utilruntime.HandleError(err)
|
2015-11-11 21:19:39 +00:00
|
|
|
}
|
|
|
|
}()
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Run begins quota controller using the specified number of workers
|
|
|
|
func (rq *ResourceQuotaController) Run(workers int, stopCh <-chan struct{}) {
|
2016-01-15 07:32:10 +00:00
|
|
|
defer utilruntime.HandleCrash()
|
2015-11-11 21:19:39 +00:00
|
|
|
go rq.rqController.Run(stopCh)
|
|
|
|
go rq.podController.Run(stopCh)
|
|
|
|
for i := 0; i < workers; i++ {
|
2016-02-02 10:57:06 +00:00
|
|
|
go wait.Until(rq.worker, time.Second, stopCh)
|
2015-01-25 06:11:10 +00:00
|
|
|
}
|
2016-02-02 10:57:06 +00:00
|
|
|
go wait.Until(func() { rq.enqueueAll() }, rq.resyncPeriod(), stopCh)
|
2015-11-11 21:19:39 +00:00
|
|
|
<-stopCh
|
|
|
|
glog.Infof("Shutting down ResourceQuotaController")
|
|
|
|
rq.queue.ShutDown()
|
2015-01-25 06:11:10 +00:00
|
|
|
}
|
|
|
|
|
2015-02-24 16:17:41 +00:00
|
|
|
// FilterQuotaPods eliminates pods that no longer have a cost against the quota
|
|
|
|
// pods that have a restart policy of always are always returned
|
|
|
|
// pods that are in a failed state, but have a restart policy of on failure are always returned
|
|
|
|
// pods that are not in a success state or a failure state are included in quota
|
2015-04-20 18:20:53 +00:00
|
|
|
func FilterQuotaPods(pods []api.Pod) []*api.Pod {
|
|
|
|
var result []*api.Pod
|
|
|
|
for i := range pods {
|
|
|
|
value := &pods[i]
|
2015-02-24 16:17:41 +00:00
|
|
|
// a pod that has a restart policy always no matter its state counts against usage
|
2015-03-14 01:38:07 +00:00
|
|
|
if value.Spec.RestartPolicy == api.RestartPolicyAlways {
|
2015-02-24 16:17:41 +00:00
|
|
|
result = append(result, value)
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
// a failed pod with a restart policy of on failure will count against usage
|
|
|
|
if api.PodFailed == value.Status.Phase &&
|
2015-03-14 01:38:07 +00:00
|
|
|
value.Spec.RestartPolicy == api.RestartPolicyOnFailure {
|
2015-02-24 16:17:41 +00:00
|
|
|
result = append(result, value)
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
// if the pod is not succeeded or failed, then we count it against quota
|
|
|
|
if api.PodSucceeded != value.Status.Phase &&
|
|
|
|
api.PodFailed != value.Status.Phase {
|
|
|
|
result = append(result, value)
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return result
|
|
|
|
}
|
|
|
|
|
2015-11-11 21:19:39 +00:00
|
|
|
// syncResourceQuotaFromKey syncs a quota key
|
|
|
|
func (rq *ResourceQuotaController) syncResourceQuotaFromKey(key string) (err error) {
|
|
|
|
startTime := time.Now()
|
|
|
|
defer func() {
|
|
|
|
glog.V(4).Infof("Finished syncing resource quota %q (%v)", key, time.Now().Sub(startTime))
|
|
|
|
}()
|
|
|
|
|
|
|
|
obj, exists, err := rq.rqIndexer.GetByKey(key)
|
|
|
|
if !exists {
|
|
|
|
glog.Infof("Resource quota has been deleted %v", key)
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
if err != nil {
|
|
|
|
glog.Infof("Unable to retrieve resource quota %v from store: %v", key, err)
|
|
|
|
rq.queue.Add(key)
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
quota := *obj.(*api.ResourceQuota)
|
|
|
|
return rq.syncResourceQuota(quota)
|
|
|
|
}
|
|
|
|
|
2015-01-25 06:11:10 +00:00
|
|
|
// syncResourceQuota runs a complete sync of current status
|
2015-11-11 21:19:39 +00:00
|
|
|
func (rq *ResourceQuotaController) syncResourceQuota(quota api.ResourceQuota) (err error) {
|
2015-01-25 06:11:10 +00:00
|
|
|
|
2015-04-16 21:46:27 +00:00
|
|
|
// quota is dirty if any part of spec hard limits differs from the status hard limits
|
|
|
|
dirty := !api.Semantic.DeepEqual(quota.Spec.Hard, quota.Status.Hard)
|
|
|
|
|
2015-01-25 06:11:10 +00:00
|
|
|
// dirty tracks if the usage status differs from the previous sync,
|
|
|
|
// if so, we send a new usage with latest status
|
|
|
|
// if this is our first sync, it will be dirty by default, since we need track usage
|
2015-04-16 21:46:27 +00:00
|
|
|
dirty = dirty || (quota.Status.Hard == nil || quota.Status.Used == nil)
|
2015-01-25 06:11:10 +00:00
|
|
|
|
|
|
|
// Create a usage object that is based on the quota resource version
|
2015-03-13 19:15:04 +00:00
|
|
|
usage := api.ResourceQuota{
|
2015-01-25 06:11:10 +00:00
|
|
|
ObjectMeta: api.ObjectMeta{
|
|
|
|
Name: quota.Name,
|
|
|
|
Namespace: quota.Namespace,
|
2015-03-13 19:15:04 +00:00
|
|
|
ResourceVersion: quota.ResourceVersion,
|
|
|
|
Labels: quota.Labels,
|
|
|
|
Annotations: quota.Annotations},
|
2015-01-25 06:11:10 +00:00
|
|
|
Status: api.ResourceQuotaStatus{
|
|
|
|
Hard: api.ResourceList{},
|
|
|
|
Used: api.ResourceList{},
|
|
|
|
},
|
|
|
|
}
|
2015-02-24 16:17:41 +00:00
|
|
|
|
|
|
|
// set the hard values supported on the quota
|
|
|
|
for k, v := range quota.Spec.Hard {
|
|
|
|
usage.Status.Hard[k] = *v.Copy()
|
|
|
|
}
|
|
|
|
// set any last known observed status values for usage
|
|
|
|
for k, v := range quota.Status.Used {
|
|
|
|
usage.Status.Used[k] = *v.Copy()
|
|
|
|
}
|
2015-01-25 06:11:10 +00:00
|
|
|
|
|
|
|
set := map[api.ResourceName]bool{}
|
|
|
|
for k := range usage.Status.Hard {
|
|
|
|
set[k] = true
|
|
|
|
}
|
|
|
|
|
|
|
|
pods := &api.PodList{}
|
|
|
|
if set[api.ResourcePods] || set[api.ResourceMemory] || set[api.ResourceCPU] {
|
2016-02-03 21:21:05 +00:00
|
|
|
pods, err = rq.kubeClient.Core().Pods(usage.Namespace).List(api.ListOptions{})
|
2015-01-25 06:11:10 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-02-24 16:17:41 +00:00
|
|
|
filteredPods := FilterQuotaPods(pods.Items)
|
|
|
|
|
2015-01-25 06:11:10 +00:00
|
|
|
// iterate over each resource, and update observation
|
|
|
|
for k := range usage.Status.Hard {
|
|
|
|
|
|
|
|
// look if there is a used value, if none, we are definitely dirty
|
|
|
|
prevQuantity, found := usage.Status.Used[k]
|
|
|
|
if !found {
|
|
|
|
dirty = true
|
|
|
|
}
|
|
|
|
|
|
|
|
var value *resource.Quantity
|
|
|
|
|
|
|
|
switch k {
|
|
|
|
case api.ResourcePods:
|
2015-02-24 16:17:41 +00:00
|
|
|
value = resource.NewQuantity(int64(len(filteredPods)), resource.DecimalSI)
|
2015-01-25 06:11:10 +00:00
|
|
|
case api.ResourceServices:
|
2016-02-03 21:21:05 +00:00
|
|
|
items, err := rq.kubeClient.Core().Services(usage.Namespace).List(api.ListOptions{})
|
2015-01-25 06:11:10 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
value = resource.NewQuantity(int64(len(items.Items)), resource.DecimalSI)
|
|
|
|
case api.ResourceReplicationControllers:
|
2016-02-03 21:21:05 +00:00
|
|
|
items, err := rq.kubeClient.Core().ReplicationControllers(usage.Namespace).List(api.ListOptions{})
|
2015-01-25 06:11:10 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
value = resource.NewQuantity(int64(len(items.Items)), resource.DecimalSI)
|
|
|
|
case api.ResourceQuotas:
|
2016-02-03 21:21:05 +00:00
|
|
|
items, err := rq.kubeClient.Core().ResourceQuotas(usage.Namespace).List(api.ListOptions{})
|
2015-01-25 06:11:10 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
value = resource.NewQuantity(int64(len(items.Items)), resource.DecimalSI)
|
2015-04-08 21:03:56 +00:00
|
|
|
case api.ResourceSecrets:
|
2016-02-03 21:21:05 +00:00
|
|
|
items, err := rq.kubeClient.Core().Secrets(usage.Namespace).List(api.ListOptions{})
|
2015-04-08 21:03:56 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
value = resource.NewQuantity(int64(len(items.Items)), resource.DecimalSI)
|
|
|
|
case api.ResourcePersistentVolumeClaims:
|
2016-02-03 21:21:05 +00:00
|
|
|
items, err := rq.kubeClient.Core().PersistentVolumeClaims(usage.Namespace).List(api.ListOptions{})
|
2015-04-08 21:03:56 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
value = resource.NewQuantity(int64(len(items.Items)), resource.DecimalSI)
|
2015-08-13 14:19:27 +00:00
|
|
|
case api.ResourceMemory:
|
|
|
|
value = PodsRequests(filteredPods, api.ResourceMemory)
|
|
|
|
case api.ResourceCPU:
|
|
|
|
value = PodsRequests(filteredPods, api.ResourceCPU)
|
2015-01-25 06:11:10 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// ignore fields we do not understand (assume another controller is tracking it)
|
|
|
|
if value != nil {
|
|
|
|
// see if the value has changed
|
|
|
|
dirty = dirty || (value.Value() != prevQuantity.Value())
|
|
|
|
// just update the value
|
|
|
|
usage.Status.Used[k] = *value
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// update the usage only if it changed
|
|
|
|
if dirty {
|
2016-02-03 21:21:05 +00:00
|
|
|
_, err = rq.kubeClient.Core().ResourceQuotas(usage.Namespace).UpdateStatus(&usage)
|
2015-03-13 19:15:04 +00:00
|
|
|
return err
|
2015-01-25 06:11:10 +00:00
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
2015-01-26 04:34:30 +00:00
|
|
|
|
2015-08-13 14:19:27 +00:00
|
|
|
// PodsRequests returns sum of each resource request for each pod in list
|
|
|
|
// If a given pod in the list does not have a request for the named resource, we log the error
|
|
|
|
// but still attempt to get the most representative count
|
|
|
|
func PodsRequests(pods []*api.Pod, resourceName api.ResourceName) *resource.Quantity {
|
|
|
|
var sum *resource.Quantity
|
|
|
|
for i := range pods {
|
|
|
|
pod := pods[i]
|
|
|
|
podQuantity, err := PodRequests(pod, resourceName)
|
|
|
|
if err != nil {
|
|
|
|
// log the error, but try to keep the most accurate count possible in log
|
|
|
|
// rationale here is that you may have had pods in a namespace that did not have
|
|
|
|
// explicit requests prior to adding the quota
|
|
|
|
glog.Infof("No explicit request for resource, pod %s/%s, %s", pod.Namespace, pod.Name, resourceName)
|
|
|
|
} else {
|
|
|
|
if sum == nil {
|
|
|
|
sum = podQuantity
|
|
|
|
} else {
|
|
|
|
sum.Add(*podQuantity)
|
|
|
|
}
|
|
|
|
}
|
2015-01-26 04:34:30 +00:00
|
|
|
}
|
2015-08-13 14:19:27 +00:00
|
|
|
// if list is empty
|
|
|
|
if sum == nil {
|
|
|
|
q := resource.MustParse("0")
|
|
|
|
sum = &q
|
|
|
|
}
|
|
|
|
return sum
|
2015-01-26 04:34:30 +00:00
|
|
|
}
|
|
|
|
|
2015-08-13 14:19:27 +00:00
|
|
|
// PodRequests returns sum of each resource request across all containers in pod
|
|
|
|
func PodRequests(pod *api.Pod, resourceName api.ResourceName) (*resource.Quantity, error) {
|
|
|
|
if !PodHasRequests(pod, resourceName) {
|
|
|
|
return nil, fmt.Errorf("Each container in pod %s/%s does not have an explicit request for resource %s.", pod.Namespace, pod.Name, resourceName)
|
|
|
|
}
|
|
|
|
var sum *resource.Quantity
|
2015-04-17 20:59:54 +00:00
|
|
|
for j := range pod.Spec.Containers {
|
2015-08-13 14:19:27 +00:00
|
|
|
value, _ := pod.Spec.Containers[j].Resources.Requests[resourceName]
|
|
|
|
if sum == nil {
|
|
|
|
sum = value.Copy()
|
|
|
|
} else {
|
|
|
|
err := sum.Add(value)
|
|
|
|
if err != nil {
|
|
|
|
return sum, err
|
|
|
|
}
|
2015-04-17 20:59:54 +00:00
|
|
|
}
|
|
|
|
}
|
2015-08-13 14:19:27 +00:00
|
|
|
// if list is empty
|
|
|
|
if sum == nil {
|
|
|
|
q := resource.MustParse("0")
|
|
|
|
sum = &q
|
|
|
|
}
|
|
|
|
return sum, nil
|
2015-04-17 20:59:54 +00:00
|
|
|
}
|
|
|
|
|
2015-08-13 14:19:27 +00:00
|
|
|
// PodHasRequests verifies that each container in the pod has an explicit request that is non-zero for a named resource
|
|
|
|
func PodHasRequests(pod *api.Pod, resourceName api.ResourceName) bool {
|
2015-04-17 20:59:54 +00:00
|
|
|
for j := range pod.Spec.Containers {
|
2015-08-13 14:19:27 +00:00
|
|
|
value, valueSet := pod.Spec.Containers[j].Resources.Requests[resourceName]
|
|
|
|
if !valueSet || value.Value() == int64(0) {
|
|
|
|
return false
|
2015-04-17 20:59:54 +00:00
|
|
|
}
|
|
|
|
}
|
2015-08-13 14:19:27 +00:00
|
|
|
return true
|
|
|
|
}
|
2015-11-11 21:19:39 +00:00
|
|
|
|
|
|
|
// When a pod is deleted, enqueue the quota that manages the pod and update its expectations.
|
|
|
|
// obj could be an *api.Pod, or a DeletionFinalStateUnknown marker item.
|
|
|
|
func (rq *ResourceQuotaController) deletePod(obj interface{}) {
|
|
|
|
pod, ok := obj.(*api.Pod)
|
|
|
|
// When a delete is dropped, the relist will notice a pod in the store not
|
|
|
|
// in the list, leading to the insertion of a tombstone object which contains
|
|
|
|
// the deleted key/value. Note that this value might be stale. If the pod
|
|
|
|
// changed labels the new rc will not be woken up till the periodic resync.
|
|
|
|
if !ok {
|
|
|
|
tombstone, ok := obj.(cache.DeletedFinalStateUnknown)
|
|
|
|
if !ok {
|
|
|
|
glog.Errorf("Couldn't get object from tombstone %+v, could take up to %v before a quota records the deletion", obj, rq.resyncPeriod())
|
|
|
|
return
|
|
|
|
}
|
|
|
|
pod, ok = tombstone.Obj.(*api.Pod)
|
|
|
|
if !ok {
|
|
|
|
glog.Errorf("Tombstone contained object that is not a pod %+v, could take up to %v before quota records the deletion", obj, rq.resyncPeriod())
|
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
quotas, err := rq.rqIndexer.Index("namespace", pod)
|
|
|
|
if err != nil {
|
|
|
|
glog.Errorf("Couldn't find resource quota associated with pod %+v, could take up to %v before a quota records the deletion", obj, rq.resyncPeriod())
|
|
|
|
}
|
|
|
|
if len(quotas) == 0 {
|
|
|
|
glog.V(4).Infof("No resource quota associated with namespace %q", pod.Namespace)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
for i := range quotas {
|
|
|
|
quota := quotas[i].(*api.ResourceQuota)
|
|
|
|
rq.enqueueResourceQuota(quota)
|
|
|
|
}
|
|
|
|
}
|