mirror of https://github.com/k3s-io/k3s
Ability to quota storage by storage class
parent
7168fce59a
commit
459a7a05f1
|
@ -197,6 +197,7 @@ pkg/kubelet/volumemanager/populator
|
||||||
pkg/kubelet/volumemanager/reconciler
|
pkg/kubelet/volumemanager/reconciler
|
||||||
pkg/proxy/config
|
pkg/proxy/config
|
||||||
pkg/proxy/healthcheck
|
pkg/proxy/healthcheck
|
||||||
|
pkg/quota
|
||||||
pkg/quota/install
|
pkg/quota/install
|
||||||
pkg/registry
|
pkg/registry
|
||||||
pkg/registry/authorization/util
|
pkg/registry/authorization/util
|
||||||
|
|
|
@ -182,9 +182,8 @@ func (rq *ResourceQuotaController) addQuota(obj interface{}) {
|
||||||
for constraint := range resourceQuota.Status.Hard {
|
for constraint := range resourceQuota.Status.Hard {
|
||||||
if _, usageFound := resourceQuota.Status.Used[constraint]; !usageFound {
|
if _, usageFound := resourceQuota.Status.Used[constraint]; !usageFound {
|
||||||
matchedResources := []api.ResourceName{api.ResourceName(constraint)}
|
matchedResources := []api.ResourceName{api.ResourceName(constraint)}
|
||||||
|
|
||||||
for _, evaluator := range rq.registry.Evaluators() {
|
for _, evaluator := range rq.registry.Evaluators() {
|
||||||
if intersection := quota.Intersection(evaluator.MatchesResources(), matchedResources); len(intersection) != 0 {
|
if intersection := evaluator.MatchingResources(matchedResources); len(intersection) > 0 {
|
||||||
rq.missingUsageQueue.Add(key)
|
rq.missingUsageQueue.Add(key)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
@ -348,7 +347,6 @@ func (rq *ResourceQuotaController) replenishQuota(groupKind schema.GroupKind, na
|
||||||
}
|
}
|
||||||
|
|
||||||
// only queue those quotas that are tracking a resource associated with this kind.
|
// only queue those quotas that are tracking a resource associated with this kind.
|
||||||
matchedResources := evaluator.MatchesResources()
|
|
||||||
for i := range resourceQuotas {
|
for i := range resourceQuotas {
|
||||||
resourceQuota := resourceQuotas[i].(*v1.ResourceQuota)
|
resourceQuota := resourceQuotas[i].(*v1.ResourceQuota)
|
||||||
internalResourceQuota := &api.ResourceQuota{}
|
internalResourceQuota := &api.ResourceQuota{}
|
||||||
|
@ -357,7 +355,7 @@ func (rq *ResourceQuotaController) replenishQuota(groupKind schema.GroupKind, na
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
resourceQuotaResources := quota.ResourceNames(internalResourceQuota.Status.Hard)
|
resourceQuotaResources := quota.ResourceNames(internalResourceQuota.Status.Hard)
|
||||||
if len(quota.Intersection(matchedResources, resourceQuotaResources)) > 0 {
|
if intersection := evaluator.MatchingResources(resourceQuotaResources); len(intersection) > 0 {
|
||||||
// TODO: make this support targeted replenishment to a specific kind, right now it does a full recalc on that quota.
|
// TODO: make this support targeted replenishment to a specific kind, right now it does a full recalc on that quota.
|
||||||
rq.enqueueResourceQuota(resourceQuota)
|
rq.enqueueResourceQuota(resourceQuota)
|
||||||
}
|
}
|
||||||
|
|
|
@ -533,12 +533,18 @@ func memory(stats statsFunc) cmpFunc {
|
||||||
|
|
||||||
// adjust p1, p2 usage relative to the request (if any)
|
// adjust p1, p2 usage relative to the request (if any)
|
||||||
p1Memory := p1Usage[v1.ResourceMemory]
|
p1Memory := p1Usage[v1.ResourceMemory]
|
||||||
p1Spec := core.PodUsageFunc(p1)
|
p1Spec, err := core.PodUsageFunc(p1)
|
||||||
|
if err != nil {
|
||||||
|
return -1
|
||||||
|
}
|
||||||
p1Request := p1Spec[api.ResourceRequestsMemory]
|
p1Request := p1Spec[api.ResourceRequestsMemory]
|
||||||
p1Memory.Sub(p1Request)
|
p1Memory.Sub(p1Request)
|
||||||
|
|
||||||
p2Memory := p2Usage[v1.ResourceMemory]
|
p2Memory := p2Usage[v1.ResourceMemory]
|
||||||
p2Spec := core.PodUsageFunc(p2)
|
p2Spec, err := core.PodUsageFunc(p2)
|
||||||
|
if err != nil {
|
||||||
|
return 1
|
||||||
|
}
|
||||||
p2Request := p2Spec[api.ResourceRequestsMemory]
|
p2Request := p2Spec[api.ResourceRequestsMemory]
|
||||||
p2Memory.Sub(p2Request)
|
p2Memory.Sub(p2Request)
|
||||||
|
|
||||||
|
|
|
@ -30,7 +30,7 @@ go_library(
|
||||||
"//pkg/api/resource:go_default_library",
|
"//pkg/api/resource:go_default_library",
|
||||||
"//pkg/api/v1:go_default_library",
|
"//pkg/api/v1:go_default_library",
|
||||||
"//pkg/api/validation:go_default_library",
|
"//pkg/api/validation:go_default_library",
|
||||||
"//pkg/apis/meta/v1:go_default_library",
|
"//pkg/apis/storage/util:go_default_library",
|
||||||
"//pkg/client/clientset_generated/release_1_5:go_default_library",
|
"//pkg/client/clientset_generated/release_1_5:go_default_library",
|
||||||
"//pkg/controller/informers:go_default_library",
|
"//pkg/controller/informers:go_default_library",
|
||||||
"//pkg/kubelet/qos:go_default_library",
|
"//pkg/kubelet/qos:go_default_library",
|
||||||
|
@ -56,6 +56,7 @@ go_test(
|
||||||
"//pkg/api:go_default_library",
|
"//pkg/api:go_default_library",
|
||||||
"//pkg/api/resource:go_default_library",
|
"//pkg/api/resource:go_default_library",
|
||||||
"//pkg/apis/meta/v1:go_default_library",
|
"//pkg/apis/meta/v1:go_default_library",
|
||||||
|
"//pkg/apis/storage/util:go_default_library",
|
||||||
"//pkg/client/clientset_generated/release_1_5/fake:go_default_library",
|
"//pkg/client/clientset_generated/release_1_5/fake:go_default_library",
|
||||||
"//pkg/quota:go_default_library",
|
"//pkg/quota:go_default_library",
|
||||||
],
|
],
|
||||||
|
|
|
@ -17,7 +17,6 @@ limitations under the License.
|
||||||
package core
|
package core
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"k8s.io/kubernetes/pkg/admission"
|
|
||||||
"k8s.io/kubernetes/pkg/api"
|
"k8s.io/kubernetes/pkg/api"
|
||||||
"k8s.io/kubernetes/pkg/api/v1"
|
"k8s.io/kubernetes/pkg/api/v1"
|
||||||
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
|
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
|
||||||
|
@ -28,17 +27,10 @@ import (
|
||||||
|
|
||||||
// NewConfigMapEvaluator returns an evaluator that can evaluate configMaps
|
// NewConfigMapEvaluator returns an evaluator that can evaluate configMaps
|
||||||
func NewConfigMapEvaluator(kubeClient clientset.Interface) quota.Evaluator {
|
func NewConfigMapEvaluator(kubeClient clientset.Interface) quota.Evaluator {
|
||||||
allResources := []api.ResourceName{api.ResourceConfigMaps}
|
return &generic.ObjectCountEvaluator{
|
||||||
return &generic.GenericEvaluator{
|
AllowCreateOnUpdate: false,
|
||||||
Name: "Evaluator.ConfigMap",
|
InternalGroupKind: api.Kind("ConfigMap"),
|
||||||
InternalGroupKind: api.Kind("ConfigMap"),
|
ResourceName: api.ResourceConfigMaps,
|
||||||
InternalOperationResources: map[admission.Operation][]api.ResourceName{
|
|
||||||
admission.Create: allResources,
|
|
||||||
},
|
|
||||||
MatchedResourceNames: allResources,
|
|
||||||
MatchesScopeFunc: generic.MatchesNoScopeFunc,
|
|
||||||
ConstraintsFunc: generic.ObjectCountConstraintsFunc(api.ResourceConfigMaps),
|
|
||||||
UsageFunc: generic.ObjectCountUsageFunc(api.ResourceConfigMaps),
|
|
||||||
ListFuncByNamespace: func(namespace string, options v1.ListOptions) ([]runtime.Object, error) {
|
ListFuncByNamespace: func(namespace string, options v1.ListOptions) ([]runtime.Object, error) {
|
||||||
itemList, err := kubeClient.Core().ConfigMaps(namespace).List(options)
|
itemList, err := kubeClient.Core().ConfigMaps(namespace).List(options)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|
|
@ -24,6 +24,7 @@ import (
|
||||||
"k8s.io/kubernetes/pkg/api"
|
"k8s.io/kubernetes/pkg/api"
|
||||||
"k8s.io/kubernetes/pkg/api/resource"
|
"k8s.io/kubernetes/pkg/api/resource"
|
||||||
"k8s.io/kubernetes/pkg/api/v1"
|
"k8s.io/kubernetes/pkg/api/v1"
|
||||||
|
"k8s.io/kubernetes/pkg/apis/storage/util"
|
||||||
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
|
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
|
||||||
"k8s.io/kubernetes/pkg/controller/informers"
|
"k8s.io/kubernetes/pkg/controller/informers"
|
||||||
"k8s.io/kubernetes/pkg/quota"
|
"k8s.io/kubernetes/pkg/quota"
|
||||||
|
@ -33,6 +34,31 @@ import (
|
||||||
"k8s.io/kubernetes/pkg/util/sets"
|
"k8s.io/kubernetes/pkg/util/sets"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// pvcResources are the set of static resources managed by quota associated with pvcs.
|
||||||
|
// for each resouce in this list, it may be refined dynamically based on storage class.
|
||||||
|
var pvcResources = []api.ResourceName{
|
||||||
|
api.ResourcePersistentVolumeClaims,
|
||||||
|
api.ResourceRequestsStorage,
|
||||||
|
}
|
||||||
|
|
||||||
|
// storageClassSuffix is the suffix to the qualified portion of storage class resource name.
|
||||||
|
// For example, if you want to quota storage by storage class, you would have a declaration
|
||||||
|
// that follows <storage-class>.storageclass.storage.k8s.io/<resource>.
|
||||||
|
// For example:
|
||||||
|
// * gold.storageclass.storage.k8s.io/: 500Gi
|
||||||
|
// * bronze.storageclass.storage.k8s.io/requests.storage: 500Gi
|
||||||
|
const storageClassSuffix string = ".storageclass.storage.k8s.io/"
|
||||||
|
|
||||||
|
// ResourceByStorageClass returns a quota resource name by storage class.
|
||||||
|
func ResourceByStorageClass(storageClass string, resourceName api.ResourceName) api.ResourceName {
|
||||||
|
return api.ResourceName(string(storageClass + storageClassSuffix + string(resourceName)))
|
||||||
|
}
|
||||||
|
|
||||||
|
// V1ResourceByStorageClass returns a quota resource name by storage class.
|
||||||
|
func V1ResourceByStorageClass(storageClass string, resourceName v1.ResourceName) v1.ResourceName {
|
||||||
|
return v1.ResourceName(string(storageClass + storageClassSuffix + string(resourceName)))
|
||||||
|
}
|
||||||
|
|
||||||
// listPersistentVolumeClaimsByNamespaceFuncUsingClient returns a pvc listing function based on the provided client.
|
// listPersistentVolumeClaimsByNamespaceFuncUsingClient returns a pvc listing function based on the provided client.
|
||||||
func listPersistentVolumeClaimsByNamespaceFuncUsingClient(kubeClient clientset.Interface) generic.ListFuncByNamespace {
|
func listPersistentVolumeClaimsByNamespaceFuncUsingClient(kubeClient clientset.Interface) generic.ListFuncByNamespace {
|
||||||
// TODO: ideally, we could pass dynamic client pool down into this code, and have one way of doing this.
|
// TODO: ideally, we could pass dynamic client pool down into this code, and have one way of doing this.
|
||||||
|
@ -54,59 +80,49 @@ func listPersistentVolumeClaimsByNamespaceFuncUsingClient(kubeClient clientset.I
|
||||||
// NewPersistentVolumeClaimEvaluator returns an evaluator that can evaluate persistent volume claims
|
// NewPersistentVolumeClaimEvaluator returns an evaluator that can evaluate persistent volume claims
|
||||||
// if the specified shared informer factory is not nil, evaluator may use it to support listing functions.
|
// if the specified shared informer factory is not nil, evaluator may use it to support listing functions.
|
||||||
func NewPersistentVolumeClaimEvaluator(kubeClient clientset.Interface, f informers.SharedInformerFactory) quota.Evaluator {
|
func NewPersistentVolumeClaimEvaluator(kubeClient clientset.Interface, f informers.SharedInformerFactory) quota.Evaluator {
|
||||||
allResources := []api.ResourceName{api.ResourcePersistentVolumeClaims, api.ResourceRequestsStorage}
|
|
||||||
listFuncByNamespace := listPersistentVolumeClaimsByNamespaceFuncUsingClient(kubeClient)
|
listFuncByNamespace := listPersistentVolumeClaimsByNamespaceFuncUsingClient(kubeClient)
|
||||||
if f != nil {
|
if f != nil {
|
||||||
listFuncByNamespace = generic.ListResourceUsingInformerFunc(f, schema.GroupResource{Resource: "persistentvolumeclaims"})
|
listFuncByNamespace = generic.ListResourceUsingInformerFunc(f, schema.GroupResource{Resource: "persistentvolumeclaims"})
|
||||||
}
|
}
|
||||||
|
return &pvcEvaluator{
|
||||||
return &generic.GenericEvaluator{
|
listFuncByNamespace: listFuncByNamespace,
|
||||||
Name: "Evaluator.PersistentVolumeClaim",
|
|
||||||
InternalGroupKind: api.Kind("PersistentVolumeClaim"),
|
|
||||||
InternalOperationResources: map[admission.Operation][]api.ResourceName{
|
|
||||||
admission.Create: allResources,
|
|
||||||
},
|
|
||||||
MatchedResourceNames: allResources,
|
|
||||||
MatchesScopeFunc: generic.MatchesNoScopeFunc,
|
|
||||||
ConstraintsFunc: PersistentVolumeClaimConstraintsFunc,
|
|
||||||
UsageFunc: PersistentVolumeClaimUsageFunc,
|
|
||||||
ListFuncByNamespace: listFuncByNamespace,
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// PersistentVolumeClaimUsageFunc knows how to measure usage associated with persistent volume claims
|
// pvcEvaluator knows how to evaluate quota usage for persistent volume claims
|
||||||
func PersistentVolumeClaimUsageFunc(object runtime.Object) api.ResourceList {
|
type pvcEvaluator struct {
|
||||||
result := api.ResourceList{}
|
// listFuncByNamespace knows how to list pvc claims
|
||||||
var found bool
|
listFuncByNamespace generic.ListFuncByNamespace
|
||||||
var request resource.Quantity
|
|
||||||
|
|
||||||
switch t := object.(type) {
|
|
||||||
case *v1.PersistentVolumeClaim:
|
|
||||||
request, found = t.Spec.Resources.Requests[v1.ResourceStorage]
|
|
||||||
case *api.PersistentVolumeClaim:
|
|
||||||
request, found = t.Spec.Resources.Requests[api.ResourceStorage]
|
|
||||||
default:
|
|
||||||
panic(fmt.Sprintf("expect *api.PersistenVolumeClaim or *v1.PersistentVolumeClaim, got %v", t))
|
|
||||||
}
|
|
||||||
|
|
||||||
result[api.ResourcePersistentVolumeClaims] = resource.MustParse("1")
|
|
||||||
if found {
|
|
||||||
result[api.ResourceRequestsStorage] = request
|
|
||||||
}
|
|
||||||
return result
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// PersistentVolumeClaimConstraintsFunc verifies that all required resources are present on the claim
|
// Constraints verifies that all required resources are present on the item.
|
||||||
// In addition, it validates that the resources are valid (i.e. requests < limits)
|
func (p *pvcEvaluator) Constraints(required []api.ResourceName, item runtime.Object) error {
|
||||||
func PersistentVolumeClaimConstraintsFunc(required []api.ResourceName, object runtime.Object) error {
|
pvc, ok := item.(*api.PersistentVolumeClaim)
|
||||||
pvc, ok := object.(*api.PersistentVolumeClaim)
|
|
||||||
if !ok {
|
if !ok {
|
||||||
return fmt.Errorf("unexpected input object %v", object)
|
return fmt.Errorf("unexpected input object %v", item)
|
||||||
}
|
}
|
||||||
|
|
||||||
requiredSet := quota.ToSet(required)
|
// these are the items that we will be handling based on the objects actual storage-class
|
||||||
|
pvcRequiredSet := append([]api.ResourceName{}, pvcResources...)
|
||||||
|
if storageClassRef := util.GetClaimStorageClass(pvc); len(storageClassRef) > 0 {
|
||||||
|
pvcRequiredSet = append(pvcRequiredSet, ResourceByStorageClass(storageClassRef, api.ResourcePersistentVolumeClaims))
|
||||||
|
pvcRequiredSet = append(pvcRequiredSet, ResourceByStorageClass(storageClassRef, api.ResourceRequestsStorage))
|
||||||
|
}
|
||||||
|
|
||||||
|
// in effect, this will remove things from the required set that are not tied to this pvcs storage class
|
||||||
|
// for example, if a quota has bronze and gold storage class items defined, we should not error a bronze pvc for not being gold.
|
||||||
|
// but we should error a bronze pvc if it doesn't make a storage request size...
|
||||||
|
requiredResources := quota.Intersection(required, pvcRequiredSet)
|
||||||
|
requiredSet := quota.ToSet(requiredResources)
|
||||||
|
|
||||||
|
// usage for this pvc will only include global pvc items + this storage class specific items
|
||||||
|
pvcUsage, err := p.Usage(item)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// determine what required resources were not tracked by usage.
|
||||||
missingSet := sets.NewString()
|
missingSet := sets.NewString()
|
||||||
pvcUsage := PersistentVolumeClaimUsageFunc(pvc)
|
|
||||||
pvcSet := quota.ToSet(quota.ResourceNames(pvcUsage))
|
pvcSet := quota.ToSet(quota.ResourceNames(pvcUsage))
|
||||||
if diff := requiredSet.Difference(pvcSet); len(diff) > 0 {
|
if diff := requiredSet.Difference(pvcSet); len(diff) > 0 {
|
||||||
missingSet.Insert(diff.List()...)
|
missingSet.Insert(diff.List()...)
|
||||||
|
@ -116,3 +132,89 @@ func PersistentVolumeClaimConstraintsFunc(required []api.ResourceName, object ru
|
||||||
}
|
}
|
||||||
return fmt.Errorf("must specify %s", strings.Join(missingSet.List(), ","))
|
return fmt.Errorf("must specify %s", strings.Join(missingSet.List(), ","))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// GroupKind that this evaluator tracks
|
||||||
|
func (p *pvcEvaluator) GroupKind() schema.GroupKind {
|
||||||
|
return api.Kind("PersistentVolumeClaim")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Handles returns true if the evalutor should handle the specified operation.
|
||||||
|
func (p *pvcEvaluator) Handles(operation admission.Operation) bool {
|
||||||
|
return admission.Create == operation
|
||||||
|
}
|
||||||
|
|
||||||
|
// Matches returns true if the evaluator matches the specified quota with the provided input item
|
||||||
|
func (p *pvcEvaluator) Matches(resourceQuota *api.ResourceQuota, item runtime.Object) (bool, error) {
|
||||||
|
return generic.Matches(resourceQuota, item, p.MatchingResources, generic.MatchesNoScopeFunc)
|
||||||
|
}
|
||||||
|
|
||||||
|
// MatchingResources takes the input specified list of resources and returns the set of resources it matches.
|
||||||
|
func (p *pvcEvaluator) MatchingResources(items []api.ResourceName) []api.ResourceName {
|
||||||
|
result := []api.ResourceName{}
|
||||||
|
for _, item := range items {
|
||||||
|
if quota.Contains(pvcResources, item) {
|
||||||
|
result = append(result, item)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
// match pvc resources scoped by storage class (<storage-class-name>.storage-class.kubernetes.io/<resource>)
|
||||||
|
for _, resource := range pvcResources {
|
||||||
|
byStorageClass := storageClassSuffix + string(resource)
|
||||||
|
if strings.HasSuffix(string(item), byStorageClass) {
|
||||||
|
result = append(result, item)
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return result
|
||||||
|
}
|
||||||
|
|
||||||
|
// Usage knows how to measure usage associated with item.
|
||||||
|
func (p *pvcEvaluator) Usage(item runtime.Object) (api.ResourceList, error) {
|
||||||
|
result := api.ResourceList{}
|
||||||
|
pvc, err := toInternalPersistentVolumeClaimOrError(item)
|
||||||
|
if err != nil {
|
||||||
|
return result, err
|
||||||
|
}
|
||||||
|
storageClassRef := util.GetClaimStorageClass(pvc)
|
||||||
|
|
||||||
|
// charge for claim
|
||||||
|
result[api.ResourcePersistentVolumeClaims] = resource.MustParse("1")
|
||||||
|
if len(storageClassRef) > 0 {
|
||||||
|
storageClassClaim := api.ResourceName(storageClassRef + storageClassSuffix + string(api.ResourcePersistentVolumeClaims))
|
||||||
|
result[storageClassClaim] = resource.MustParse("1")
|
||||||
|
}
|
||||||
|
|
||||||
|
// charge for storage
|
||||||
|
if request, found := pvc.Spec.Resources.Requests[api.ResourceStorage]; found {
|
||||||
|
result[api.ResourceRequestsStorage] = request
|
||||||
|
// charge usage to the storage class (if present)
|
||||||
|
if len(storageClassRef) > 0 {
|
||||||
|
storageClassStorage := api.ResourceName(storageClassRef + storageClassSuffix + string(api.ResourceRequestsStorage))
|
||||||
|
result[storageClassStorage] = request
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return result, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// UsageStats calculates aggregate usage for the object.
|
||||||
|
func (p *pvcEvaluator) UsageStats(options quota.UsageStatsOptions) (quota.UsageStats, error) {
|
||||||
|
return generic.CalculateUsageStats(options, p.listFuncByNamespace, generic.MatchesNoScopeFunc, p.Usage)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ensure we implement required interface
|
||||||
|
var _ quota.Evaluator = &pvcEvaluator{}
|
||||||
|
|
||||||
|
func toInternalPersistentVolumeClaimOrError(obj runtime.Object) (*api.PersistentVolumeClaim, error) {
|
||||||
|
pvc := &api.PersistentVolumeClaim{}
|
||||||
|
switch t := obj.(type) {
|
||||||
|
case *v1.PersistentVolumeClaim:
|
||||||
|
if err := v1.Convert_v1_PersistentVolumeClaim_To_api_PersistentVolumeClaim(t, pvc, nil); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
case *api.PersistentVolumeClaim:
|
||||||
|
pvc = t
|
||||||
|
default:
|
||||||
|
return nil, fmt.Errorf("expect *api.PersistentVolumeClaim or *v1.PersistentVolumeClaim, got %v", t)
|
||||||
|
}
|
||||||
|
return pvc, nil
|
||||||
|
}
|
||||||
|
|
|
@ -22,6 +22,7 @@ import (
|
||||||
"k8s.io/kubernetes/pkg/api"
|
"k8s.io/kubernetes/pkg/api"
|
||||||
"k8s.io/kubernetes/pkg/api/resource"
|
"k8s.io/kubernetes/pkg/api/resource"
|
||||||
metav1 "k8s.io/kubernetes/pkg/apis/meta/v1"
|
metav1 "k8s.io/kubernetes/pkg/apis/meta/v1"
|
||||||
|
"k8s.io/kubernetes/pkg/apis/storage/util"
|
||||||
"k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5/fake"
|
"k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5/fake"
|
||||||
"k8s.io/kubernetes/pkg/quota"
|
"k8s.io/kubernetes/pkg/quota"
|
||||||
)
|
)
|
||||||
|
@ -53,6 +54,52 @@ func TestPersistentVolumeClaimsConstraintsFunc(t *testing.T) {
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
})
|
})
|
||||||
|
validClaimGoldStorageClass := testVolumeClaim("foo", "ns", api.PersistentVolumeClaimSpec{
|
||||||
|
Selector: &metav1.LabelSelector{
|
||||||
|
MatchExpressions: []metav1.LabelSelectorRequirement{
|
||||||
|
{
|
||||||
|
Key: "key2",
|
||||||
|
Operator: "Exists",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
AccessModes: []api.PersistentVolumeAccessMode{
|
||||||
|
api.ReadWriteOnce,
|
||||||
|
api.ReadOnlyMany,
|
||||||
|
},
|
||||||
|
Resources: api.ResourceRequirements{
|
||||||
|
Requests: api.ResourceList{
|
||||||
|
api.ResourceName(api.ResourceStorage): resource.MustParse("10Gi"),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
})
|
||||||
|
validClaimGoldStorageClass.Annotations = map[string]string{
|
||||||
|
util.StorageClassAnnotation: "gold",
|
||||||
|
}
|
||||||
|
|
||||||
|
validClaimBronzeStorageClass := testVolumeClaim("foo", "ns", api.PersistentVolumeClaimSpec{
|
||||||
|
Selector: &metav1.LabelSelector{
|
||||||
|
MatchExpressions: []metav1.LabelSelectorRequirement{
|
||||||
|
{
|
||||||
|
Key: "key2",
|
||||||
|
Operator: "Exists",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
AccessModes: []api.PersistentVolumeAccessMode{
|
||||||
|
api.ReadWriteOnce,
|
||||||
|
api.ReadOnlyMany,
|
||||||
|
},
|
||||||
|
Resources: api.ResourceRequirements{
|
||||||
|
Requests: api.ResourceList{
|
||||||
|
api.ResourceName(api.ResourceStorage): resource.MustParse("10Gi"),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
})
|
||||||
|
validClaimBronzeStorageClass.Annotations = map[string]string{
|
||||||
|
util.StorageClassAnnotation: "bronze",
|
||||||
|
}
|
||||||
|
|
||||||
missingStorage := testVolumeClaim("foo", "ns", api.PersistentVolumeClaimSpec{
|
missingStorage := testVolumeClaim("foo", "ns", api.PersistentVolumeClaimSpec{
|
||||||
Selector: &metav1.LabelSelector{
|
Selector: &metav1.LabelSelector{
|
||||||
MatchExpressions: []metav1.LabelSelectorRequirement{
|
MatchExpressions: []metav1.LabelSelectorRequirement{
|
||||||
|
@ -71,6 +118,27 @@ func TestPersistentVolumeClaimsConstraintsFunc(t *testing.T) {
|
||||||
},
|
},
|
||||||
})
|
})
|
||||||
|
|
||||||
|
missingGoldStorage := testVolumeClaim("foo", "ns", api.PersistentVolumeClaimSpec{
|
||||||
|
Selector: &metav1.LabelSelector{
|
||||||
|
MatchExpressions: []metav1.LabelSelectorRequirement{
|
||||||
|
{
|
||||||
|
Key: "key2",
|
||||||
|
Operator: "Exists",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
AccessModes: []api.PersistentVolumeAccessMode{
|
||||||
|
api.ReadWriteOnce,
|
||||||
|
api.ReadOnlyMany,
|
||||||
|
},
|
||||||
|
Resources: api.ResourceRequirements{
|
||||||
|
Requests: api.ResourceList{},
|
||||||
|
},
|
||||||
|
})
|
||||||
|
missingGoldStorage.Annotations = map[string]string{
|
||||||
|
util.StorageClassAnnotation: "gold",
|
||||||
|
}
|
||||||
|
|
||||||
testCases := map[string]struct {
|
testCases := map[string]struct {
|
||||||
pvc *api.PersistentVolumeClaim
|
pvc *api.PersistentVolumeClaim
|
||||||
required []api.ResourceName
|
required []api.ResourceName
|
||||||
|
@ -81,6 +149,11 @@ func TestPersistentVolumeClaimsConstraintsFunc(t *testing.T) {
|
||||||
required: []api.ResourceName{api.ResourceRequestsStorage},
|
required: []api.ResourceName{api.ResourceRequestsStorage},
|
||||||
err: `must specify requests.storage`,
|
err: `must specify requests.storage`,
|
||||||
},
|
},
|
||||||
|
"missing gold storage": {
|
||||||
|
pvc: missingGoldStorage,
|
||||||
|
required: []api.ResourceName{ResourceByStorageClass("gold", api.ResourceRequestsStorage)},
|
||||||
|
err: `must specify gold.storageclass.storage.k8s.io/requests.storage`,
|
||||||
|
},
|
||||||
"valid-claim-quota-storage": {
|
"valid-claim-quota-storage": {
|
||||||
pvc: validClaim,
|
pvc: validClaim,
|
||||||
required: []api.ResourceName{api.ResourceRequestsStorage},
|
required: []api.ResourceName{api.ResourceRequestsStorage},
|
||||||
|
@ -93,9 +166,30 @@ func TestPersistentVolumeClaimsConstraintsFunc(t *testing.T) {
|
||||||
pvc: validClaim,
|
pvc: validClaim,
|
||||||
required: []api.ResourceName{api.ResourceRequestsStorage, api.ResourcePersistentVolumeClaims},
|
required: []api.ResourceName{api.ResourceRequestsStorage, api.ResourcePersistentVolumeClaims},
|
||||||
},
|
},
|
||||||
|
"valid-claim-gold-quota-gold": {
|
||||||
|
pvc: validClaimGoldStorageClass,
|
||||||
|
required: []api.ResourceName{
|
||||||
|
api.ResourceRequestsStorage,
|
||||||
|
api.ResourcePersistentVolumeClaims,
|
||||||
|
ResourceByStorageClass("gold", api.ResourceRequestsStorage),
|
||||||
|
ResourceByStorageClass("gold", api.ResourcePersistentVolumeClaims),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
"valid-claim-bronze-with-quota-gold": {
|
||||||
|
pvc: validClaimBronzeStorageClass,
|
||||||
|
required: []api.ResourceName{
|
||||||
|
api.ResourceRequestsStorage,
|
||||||
|
api.ResourcePersistentVolumeClaims,
|
||||||
|
ResourceByStorageClass("gold", api.ResourceRequestsStorage),
|
||||||
|
ResourceByStorageClass("gold", api.ResourcePersistentVolumeClaims),
|
||||||
|
},
|
||||||
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
|
kubeClient := fake.NewSimpleClientset()
|
||||||
|
evaluator := NewPersistentVolumeClaimEvaluator(kubeClient, nil)
|
||||||
for testName, test := range testCases {
|
for testName, test := range testCases {
|
||||||
err := PersistentVolumeClaimConstraintsFunc(test.required, test.pvc)
|
err := evaluator.Constraints(test.required, test.pvc)
|
||||||
switch {
|
switch {
|
||||||
case err != nil && len(test.err) == 0,
|
case err != nil && len(test.err) == 0,
|
||||||
err == nil && len(test.err) != 0,
|
err == nil && len(test.err) != 0,
|
||||||
|
@ -125,6 +219,29 @@ func TestPersistentVolumeClaimEvaluatorUsage(t *testing.T) {
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
})
|
})
|
||||||
|
validClaimByStorageClass := testVolumeClaim("foo", "ns", api.PersistentVolumeClaimSpec{
|
||||||
|
Selector: &metav1.LabelSelector{
|
||||||
|
MatchExpressions: []metav1.LabelSelectorRequirement{
|
||||||
|
{
|
||||||
|
Key: "key2",
|
||||||
|
Operator: "Exists",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
AccessModes: []api.PersistentVolumeAccessMode{
|
||||||
|
api.ReadWriteOnce,
|
||||||
|
api.ReadOnlyMany,
|
||||||
|
},
|
||||||
|
Resources: api.ResourceRequirements{
|
||||||
|
Requests: api.ResourceList{
|
||||||
|
api.ResourceName(api.ResourceStorage): resource.MustParse("10Gi"),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
})
|
||||||
|
storageClassName := "gold"
|
||||||
|
validClaimByStorageClass.Annotations = map[string]string{
|
||||||
|
util.StorageClassAnnotation: storageClassName,
|
||||||
|
}
|
||||||
|
|
||||||
kubeClient := fake.NewSimpleClientset()
|
kubeClient := fake.NewSimpleClientset()
|
||||||
evaluator := NewPersistentVolumeClaimEvaluator(kubeClient, nil)
|
evaluator := NewPersistentVolumeClaimEvaluator(kubeClient, nil)
|
||||||
|
@ -139,9 +256,21 @@ func TestPersistentVolumeClaimEvaluatorUsage(t *testing.T) {
|
||||||
api.ResourcePersistentVolumeClaims: resource.MustParse("1"),
|
api.ResourcePersistentVolumeClaims: resource.MustParse("1"),
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
"pvc-usage-by-class": {
|
||||||
|
pvc: validClaimByStorageClass,
|
||||||
|
usage: api.ResourceList{
|
||||||
|
api.ResourceRequestsStorage: resource.MustParse("10Gi"),
|
||||||
|
api.ResourcePersistentVolumeClaims: resource.MustParse("1"),
|
||||||
|
ResourceByStorageClass(storageClassName, api.ResourceRequestsStorage): resource.MustParse("10Gi"),
|
||||||
|
ResourceByStorageClass(storageClassName, api.ResourcePersistentVolumeClaims): resource.MustParse("1"),
|
||||||
|
},
|
||||||
|
},
|
||||||
}
|
}
|
||||||
for testName, testCase := range testCases {
|
for testName, testCase := range testCases {
|
||||||
actual := evaluator.Usage(testCase.pvc)
|
actual, err := evaluator.Usage(testCase.pvc)
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("%s unexpected error: %v", testName, err)
|
||||||
|
}
|
||||||
if !quota.Equals(testCase.usage, actual) {
|
if !quota.Equals(testCase.usage, actual) {
|
||||||
t.Errorf("%s expected: %v, actual: %v", testName, testCase.usage, actual)
|
t.Errorf("%s expected: %v, actual: %v", testName, testCase.usage, actual)
|
||||||
}
|
}
|
||||||
|
|
|
@ -25,7 +25,6 @@ import (
|
||||||
"k8s.io/kubernetes/pkg/api/resource"
|
"k8s.io/kubernetes/pkg/api/resource"
|
||||||
"k8s.io/kubernetes/pkg/api/v1"
|
"k8s.io/kubernetes/pkg/api/v1"
|
||||||
"k8s.io/kubernetes/pkg/api/validation"
|
"k8s.io/kubernetes/pkg/api/validation"
|
||||||
metav1 "k8s.io/kubernetes/pkg/apis/meta/v1"
|
|
||||||
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
|
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
|
||||||
"k8s.io/kubernetes/pkg/controller/informers"
|
"k8s.io/kubernetes/pkg/controller/informers"
|
||||||
"k8s.io/kubernetes/pkg/kubelet/qos"
|
"k8s.io/kubernetes/pkg/kubelet/qos"
|
||||||
|
@ -37,6 +36,17 @@ import (
|
||||||
"k8s.io/kubernetes/pkg/util/validation/field"
|
"k8s.io/kubernetes/pkg/util/validation/field"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// podResources are the set of resources managed by quota associated with pods.
|
||||||
|
var podResources = []api.ResourceName{
|
||||||
|
api.ResourceCPU,
|
||||||
|
api.ResourceMemory,
|
||||||
|
api.ResourceRequestsCPU,
|
||||||
|
api.ResourceRequestsMemory,
|
||||||
|
api.ResourceLimitsCPU,
|
||||||
|
api.ResourceLimitsMemory,
|
||||||
|
api.ResourcePods,
|
||||||
|
}
|
||||||
|
|
||||||
// listPodsByNamespaceFuncUsingClient returns a pod listing function based on the provided client.
|
// listPodsByNamespaceFuncUsingClient returns a pod listing function based on the provided client.
|
||||||
func listPodsByNamespaceFuncUsingClient(kubeClient clientset.Interface) generic.ListFuncByNamespace {
|
func listPodsByNamespaceFuncUsingClient(kubeClient clientset.Interface) generic.ListFuncByNamespace {
|
||||||
// TODO: ideally, we could pass dynamic client pool down into this code, and have one way of doing this.
|
// TODO: ideally, we could pass dynamic client pool down into this code, and have one way of doing this.
|
||||||
|
@ -58,44 +68,27 @@ func listPodsByNamespaceFuncUsingClient(kubeClient clientset.Interface) generic.
|
||||||
// NewPodEvaluator returns an evaluator that can evaluate pods
|
// NewPodEvaluator returns an evaluator that can evaluate pods
|
||||||
// if the specified shared informer factory is not nil, evaluator may use it to support listing functions.
|
// if the specified shared informer factory is not nil, evaluator may use it to support listing functions.
|
||||||
func NewPodEvaluator(kubeClient clientset.Interface, f informers.SharedInformerFactory) quota.Evaluator {
|
func NewPodEvaluator(kubeClient clientset.Interface, f informers.SharedInformerFactory) quota.Evaluator {
|
||||||
computeResources := []api.ResourceName{
|
|
||||||
api.ResourceCPU,
|
|
||||||
api.ResourceMemory,
|
|
||||||
api.ResourceRequestsCPU,
|
|
||||||
api.ResourceRequestsMemory,
|
|
||||||
api.ResourceLimitsCPU,
|
|
||||||
api.ResourceLimitsMemory,
|
|
||||||
}
|
|
||||||
allResources := append(computeResources, api.ResourcePods)
|
|
||||||
listFuncByNamespace := listPodsByNamespaceFuncUsingClient(kubeClient)
|
listFuncByNamespace := listPodsByNamespaceFuncUsingClient(kubeClient)
|
||||||
if f != nil {
|
if f != nil {
|
||||||
listFuncByNamespace = generic.ListResourceUsingInformerFunc(f, schema.GroupResource{Resource: "pods"})
|
listFuncByNamespace = generic.ListResourceUsingInformerFunc(f, schema.GroupResource{Resource: "pods"})
|
||||||
}
|
}
|
||||||
return &generic.GenericEvaluator{
|
return &podEvaluator{
|
||||||
Name: "Evaluator.Pod",
|
listFuncByNamespace: listFuncByNamespace,
|
||||||
InternalGroupKind: api.Kind("Pod"),
|
|
||||||
InternalOperationResources: map[admission.Operation][]api.ResourceName{
|
|
||||||
admission.Create: allResources,
|
|
||||||
// TODO: the quota system can only charge for deltas on compute resources when pods support updates.
|
|
||||||
// admission.Update: computeResources,
|
|
||||||
},
|
|
||||||
GetFuncByNamespace: func(namespace, name string) (runtime.Object, error) {
|
|
||||||
return kubeClient.Core().Pods(namespace).Get(name, metav1.GetOptions{})
|
|
||||||
},
|
|
||||||
ConstraintsFunc: PodConstraintsFunc,
|
|
||||||
MatchedResourceNames: allResources,
|
|
||||||
MatchesScopeFunc: PodMatchesScopeFunc,
|
|
||||||
UsageFunc: PodUsageFunc,
|
|
||||||
ListFuncByNamespace: listFuncByNamespace,
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// PodConstraintsFunc verifies that all required resources are present on the pod
|
// podEvaluator knows how to measure usage of pods.
|
||||||
|
type podEvaluator struct {
|
||||||
|
// knows how to list pods
|
||||||
|
listFuncByNamespace generic.ListFuncByNamespace
|
||||||
|
}
|
||||||
|
|
||||||
|
// Constraints verifies that all required resources are present on the pod
|
||||||
// In addition, it validates that the resources are valid (i.e. requests < limits)
|
// In addition, it validates that the resources are valid (i.e. requests < limits)
|
||||||
func PodConstraintsFunc(required []api.ResourceName, object runtime.Object) error {
|
func (p *podEvaluator) Constraints(required []api.ResourceName, item runtime.Object) error {
|
||||||
pod, ok := object.(*api.Pod)
|
pod, ok := item.(*api.Pod)
|
||||||
if !ok {
|
if !ok {
|
||||||
return fmt.Errorf("Unexpected input object %v", object)
|
return fmt.Errorf("Unexpected input object %v", item)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Pod level resources are often set during admission control
|
// Pod level resources are often set during admission control
|
||||||
|
@ -114,7 +107,7 @@ func PodConstraintsFunc(required []api.ResourceName, object runtime.Object) erro
|
||||||
return allErrs.ToAggregate()
|
return allErrs.ToAggregate()
|
||||||
}
|
}
|
||||||
|
|
||||||
// TODO: fix this when we have pod level cgroups
|
// TODO: fix this when we have pod level resource requirements
|
||||||
// since we do not yet pod level requests/limits, we need to ensure each
|
// since we do not yet pod level requests/limits, we need to ensure each
|
||||||
// container makes an explict request or limit for a quota tracked resource
|
// container makes an explict request or limit for a quota tracked resource
|
||||||
requiredSet := quota.ToSet(required)
|
requiredSet := quota.ToSet(required)
|
||||||
|
@ -131,6 +124,40 @@ func PodConstraintsFunc(required []api.ResourceName, object runtime.Object) erro
|
||||||
return fmt.Errorf("must specify %s", strings.Join(missingSet.List(), ","))
|
return fmt.Errorf("must specify %s", strings.Join(missingSet.List(), ","))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// GroupKind that this evaluator tracks
|
||||||
|
func (p *podEvaluator) GroupKind() schema.GroupKind {
|
||||||
|
return api.Kind("Pod")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Handles returns true of the evalutor should handle the specified operation.
|
||||||
|
func (p *podEvaluator) Handles(operation admission.Operation) bool {
|
||||||
|
// TODO: update this if/when pods support resizing resource requirements.
|
||||||
|
return admission.Create == operation
|
||||||
|
}
|
||||||
|
|
||||||
|
// Matches returns true if the evaluator matches the specified quota with the provided input item
|
||||||
|
func (p *podEvaluator) Matches(resourceQuota *api.ResourceQuota, item runtime.Object) (bool, error) {
|
||||||
|
return generic.Matches(resourceQuota, item, p.MatchingResources, podMatchesScopeFunc)
|
||||||
|
}
|
||||||
|
|
||||||
|
// MatchingResources takes the input specified list of resources and returns the set of resources it matches.
|
||||||
|
func (p *podEvaluator) MatchingResources(input []api.ResourceName) []api.ResourceName {
|
||||||
|
return quota.Intersection(input, podResources)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Usage knows how to measure usage associated with pods
|
||||||
|
func (p *podEvaluator) Usage(item runtime.Object) (api.ResourceList, error) {
|
||||||
|
return PodUsageFunc(item)
|
||||||
|
}
|
||||||
|
|
||||||
|
// UsageStats calculates aggregate usage for the object.
|
||||||
|
func (p *podEvaluator) UsageStats(options quota.UsageStatsOptions) (quota.UsageStats, error) {
|
||||||
|
return generic.CalculateUsageStats(options, p.listFuncByNamespace, podMatchesScopeFunc, p.Usage)
|
||||||
|
}
|
||||||
|
|
||||||
|
// verifies we implement the required interface.
|
||||||
|
var _ quota.Evaluator = &podEvaluator{}
|
||||||
|
|
||||||
// enforcePodContainerConstraints checks for required resources that are not set on this container and
|
// enforcePodContainerConstraints checks for required resources that are not set on this container and
|
||||||
// adds them to missingSet.
|
// adds them to missingSet.
|
||||||
func enforcePodContainerConstraints(container *api.Container, requiredSet, missingSet sets.String) {
|
func enforcePodContainerConstraints(container *api.Container, requiredSet, missingSet sets.String) {
|
||||||
|
@ -165,27 +192,49 @@ func podUsageHelper(requests api.ResourceList, limits api.ResourceList) api.Reso
|
||||||
return result
|
return result
|
||||||
}
|
}
|
||||||
|
|
||||||
func toInternalPodOrDie(obj runtime.Object) *api.Pod {
|
func toInternalPodOrError(obj runtime.Object) (*api.Pod, error) {
|
||||||
pod := &api.Pod{}
|
pod := &api.Pod{}
|
||||||
switch t := obj.(type) {
|
switch t := obj.(type) {
|
||||||
case *v1.Pod:
|
case *v1.Pod:
|
||||||
if err := v1.Convert_v1_Pod_To_api_Pod(t, pod, nil); err != nil {
|
if err := v1.Convert_v1_Pod_To_api_Pod(t, pod, nil); err != nil {
|
||||||
panic(err)
|
return nil, err
|
||||||
}
|
}
|
||||||
case *api.Pod:
|
case *api.Pod:
|
||||||
pod = t
|
pod = t
|
||||||
default:
|
default:
|
||||||
panic(fmt.Sprintf("expect *api.Pod or *v1.Pod, got %v", t))
|
return nil, fmt.Errorf("expect *api.Pod or *v1.Pod, got %v", t)
|
||||||
}
|
}
|
||||||
return pod
|
return pod, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// podMatchesScopeFunc is a function that knows how to evaluate if a pod matches a scope
|
||||||
|
func podMatchesScopeFunc(scope api.ResourceQuotaScope, object runtime.Object) (bool, error) {
|
||||||
|
pod, err := toInternalPodOrError(object)
|
||||||
|
if err != nil {
|
||||||
|
return false, err
|
||||||
|
}
|
||||||
|
switch scope {
|
||||||
|
case api.ResourceQuotaScopeTerminating:
|
||||||
|
return isTerminating(pod), nil
|
||||||
|
case api.ResourceQuotaScopeNotTerminating:
|
||||||
|
return !isTerminating(pod), nil
|
||||||
|
case api.ResourceQuotaScopeBestEffort:
|
||||||
|
return isBestEffort(pod), nil
|
||||||
|
case api.ResourceQuotaScopeNotBestEffort:
|
||||||
|
return !isBestEffort(pod), nil
|
||||||
|
}
|
||||||
|
return false, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// PodUsageFunc knows how to measure usage associated with pods
|
// PodUsageFunc knows how to measure usage associated with pods
|
||||||
func PodUsageFunc(obj runtime.Object) api.ResourceList {
|
func PodUsageFunc(obj runtime.Object) (api.ResourceList, error) {
|
||||||
pod := toInternalPodOrDie(obj)
|
pod, err := toInternalPodOrError(obj)
|
||||||
|
if err != nil {
|
||||||
|
return api.ResourceList{}, err
|
||||||
|
}
|
||||||
// by convention, we do not quota pods that have reached an end-of-life state
|
// by convention, we do not quota pods that have reached an end-of-life state
|
||||||
if !QuotaPod(pod) {
|
if !QuotaPod(pod) {
|
||||||
return api.ResourceList{}
|
return api.ResourceList{}, nil
|
||||||
}
|
}
|
||||||
requests := api.ResourceList{}
|
requests := api.ResourceList{}
|
||||||
limits := api.ResourceList{}
|
limits := api.ResourceList{}
|
||||||
|
@ -203,23 +252,7 @@ func PodUsageFunc(obj runtime.Object) api.ResourceList {
|
||||||
limits = quota.Max(limits, pod.Spec.InitContainers[i].Resources.Limits)
|
limits = quota.Max(limits, pod.Spec.InitContainers[i].Resources.Limits)
|
||||||
}
|
}
|
||||||
|
|
||||||
return podUsageHelper(requests, limits)
|
return podUsageHelper(requests, limits), nil
|
||||||
}
|
|
||||||
|
|
||||||
// PodMatchesScopeFunc is a function that knows how to evaluate if a pod matches a scope
|
|
||||||
func PodMatchesScopeFunc(scope api.ResourceQuotaScope, object runtime.Object) bool {
|
|
||||||
pod := toInternalPodOrDie(object)
|
|
||||||
switch scope {
|
|
||||||
case api.ResourceQuotaScopeTerminating:
|
|
||||||
return isTerminating(pod)
|
|
||||||
case api.ResourceQuotaScopeNotTerminating:
|
|
||||||
return !isTerminating(pod)
|
|
||||||
case api.ResourceQuotaScopeBestEffort:
|
|
||||||
return isBestEffort(pod)
|
|
||||||
case api.ResourceQuotaScopeNotBestEffort:
|
|
||||||
return !isBestEffort(pod)
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func isBestEffort(pod *api.Pod) bool {
|
func isBestEffort(pod *api.Pod) bool {
|
||||||
|
@ -234,7 +267,6 @@ func isTerminating(pod *api.Pod) bool {
|
||||||
}
|
}
|
||||||
|
|
||||||
// QuotaPod returns true if the pod is eligible to track against a quota
|
// QuotaPod returns true if the pod is eligible to track against a quota
|
||||||
// if it's not in a terminal state according to its phase.
|
|
||||||
func QuotaPod(pod *api.Pod) bool {
|
func QuotaPod(pod *api.Pod) bool {
|
||||||
return !(api.PodFailed == pod.Status.Phase || api.PodSucceeded == pod.Status.Phase)
|
return !(api.PodFailed == pod.Status.Phase || api.PodSucceeded == pod.Status.Phase)
|
||||||
}
|
}
|
||||||
|
|
|
@ -86,8 +86,10 @@ func TestPodConstraintsFunc(t *testing.T) {
|
||||||
err: `must specify memory`,
|
err: `must specify memory`,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
kubeClient := fake.NewSimpleClientset()
|
||||||
|
evaluator := NewPodEvaluator(kubeClient, nil)
|
||||||
for testName, test := range testCases {
|
for testName, test := range testCases {
|
||||||
err := PodConstraintsFunc(test.required, test.pod)
|
err := evaluator.Constraints(test.required, test.pod)
|
||||||
switch {
|
switch {
|
||||||
case err != nil && len(test.err) == 0,
|
case err != nil && len(test.err) == 0,
|
||||||
err == nil && len(test.err) != 0,
|
err == nil && len(test.err) != 0,
|
||||||
|
@ -245,7 +247,10 @@ func TestPodEvaluatorUsage(t *testing.T) {
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
for testName, testCase := range testCases {
|
for testName, testCase := range testCases {
|
||||||
actual := evaluator.Usage(testCase.pod)
|
actual, err := evaluator.Usage(testCase.pod)
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("%s unexpected error: %v", testName, err)
|
||||||
|
}
|
||||||
if !quota.Equals(testCase.usage, actual) {
|
if !quota.Equals(testCase.usage, actual) {
|
||||||
t.Errorf("%s expected: %v, actual: %v", testName, testCase.usage, actual)
|
t.Errorf("%s expected: %v, actual: %v", testName, testCase.usage, actual)
|
||||||
}
|
}
|
||||||
|
|
|
@ -17,7 +17,6 @@ limitations under the License.
|
||||||
package core
|
package core
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"k8s.io/kubernetes/pkg/admission"
|
|
||||||
"k8s.io/kubernetes/pkg/api"
|
"k8s.io/kubernetes/pkg/api"
|
||||||
"k8s.io/kubernetes/pkg/api/v1"
|
"k8s.io/kubernetes/pkg/api/v1"
|
||||||
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
|
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
|
||||||
|
@ -28,17 +27,10 @@ import (
|
||||||
|
|
||||||
// NewReplicationControllerEvaluator returns an evaluator that can evaluate replication controllers
|
// NewReplicationControllerEvaluator returns an evaluator that can evaluate replication controllers
|
||||||
func NewReplicationControllerEvaluator(kubeClient clientset.Interface) quota.Evaluator {
|
func NewReplicationControllerEvaluator(kubeClient clientset.Interface) quota.Evaluator {
|
||||||
allResources := []api.ResourceName{api.ResourceReplicationControllers}
|
return &generic.ObjectCountEvaluator{
|
||||||
return &generic.GenericEvaluator{
|
AllowCreateOnUpdate: false,
|
||||||
Name: "Evaluator.ReplicationController",
|
InternalGroupKind: api.Kind("ReplicationController"),
|
||||||
InternalGroupKind: api.Kind("ReplicationController"),
|
ResourceName: api.ResourceReplicationControllers,
|
||||||
InternalOperationResources: map[admission.Operation][]api.ResourceName{
|
|
||||||
admission.Create: allResources,
|
|
||||||
},
|
|
||||||
MatchedResourceNames: allResources,
|
|
||||||
MatchesScopeFunc: generic.MatchesNoScopeFunc,
|
|
||||||
ConstraintsFunc: generic.ObjectCountConstraintsFunc(api.ResourceReplicationControllers),
|
|
||||||
UsageFunc: generic.ObjectCountUsageFunc(api.ResourceReplicationControllers),
|
|
||||||
ListFuncByNamespace: func(namespace string, options v1.ListOptions) ([]runtime.Object, error) {
|
ListFuncByNamespace: func(namespace string, options v1.ListOptions) ([]runtime.Object, error) {
|
||||||
itemList, err := kubeClient.Core().ReplicationControllers(namespace).List(options)
|
itemList, err := kubeClient.Core().ReplicationControllers(namespace).List(options)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|
|
@ -17,7 +17,6 @@ limitations under the License.
|
||||||
package core
|
package core
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"k8s.io/kubernetes/pkg/admission"
|
|
||||||
"k8s.io/kubernetes/pkg/api"
|
"k8s.io/kubernetes/pkg/api"
|
||||||
"k8s.io/kubernetes/pkg/api/v1"
|
"k8s.io/kubernetes/pkg/api/v1"
|
||||||
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
|
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
|
||||||
|
@ -28,17 +27,10 @@ import (
|
||||||
|
|
||||||
// NewResourceQuotaEvaluator returns an evaluator that can evaluate resource quotas
|
// NewResourceQuotaEvaluator returns an evaluator that can evaluate resource quotas
|
||||||
func NewResourceQuotaEvaluator(kubeClient clientset.Interface) quota.Evaluator {
|
func NewResourceQuotaEvaluator(kubeClient clientset.Interface) quota.Evaluator {
|
||||||
allResources := []api.ResourceName{api.ResourceQuotas}
|
return &generic.ObjectCountEvaluator{
|
||||||
return &generic.GenericEvaluator{
|
AllowCreateOnUpdate: false,
|
||||||
Name: "Evaluator.ResourceQuota",
|
InternalGroupKind: api.Kind("ResourceQuota"),
|
||||||
InternalGroupKind: api.Kind("ResourceQuota"),
|
ResourceName: api.ResourceQuotas,
|
||||||
InternalOperationResources: map[admission.Operation][]api.ResourceName{
|
|
||||||
admission.Create: allResources,
|
|
||||||
},
|
|
||||||
MatchedResourceNames: allResources,
|
|
||||||
MatchesScopeFunc: generic.MatchesNoScopeFunc,
|
|
||||||
ConstraintsFunc: generic.ObjectCountConstraintsFunc(api.ResourceQuotas),
|
|
||||||
UsageFunc: generic.ObjectCountUsageFunc(api.ResourceQuotas),
|
|
||||||
ListFuncByNamespace: func(namespace string, options v1.ListOptions) ([]runtime.Object, error) {
|
ListFuncByNamespace: func(namespace string, options v1.ListOptions) ([]runtime.Object, error) {
|
||||||
itemList, err := kubeClient.Core().ResourceQuotas(namespace).List(options)
|
itemList, err := kubeClient.Core().ResourceQuotas(namespace).List(options)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|
|
@ -17,7 +17,6 @@ limitations under the License.
|
||||||
package core
|
package core
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"k8s.io/kubernetes/pkg/admission"
|
|
||||||
"k8s.io/kubernetes/pkg/api"
|
"k8s.io/kubernetes/pkg/api"
|
||||||
"k8s.io/kubernetes/pkg/api/v1"
|
"k8s.io/kubernetes/pkg/api/v1"
|
||||||
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
|
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
|
||||||
|
@ -28,17 +27,10 @@ import (
|
||||||
|
|
||||||
// NewSecretEvaluator returns an evaluator that can evaluate secrets
|
// NewSecretEvaluator returns an evaluator that can evaluate secrets
|
||||||
func NewSecretEvaluator(kubeClient clientset.Interface) quota.Evaluator {
|
func NewSecretEvaluator(kubeClient clientset.Interface) quota.Evaluator {
|
||||||
allResources := []api.ResourceName{api.ResourceSecrets}
|
return &generic.ObjectCountEvaluator{
|
||||||
return &generic.GenericEvaluator{
|
AllowCreateOnUpdate: false,
|
||||||
Name: "Evaluator.Secret",
|
InternalGroupKind: api.Kind("Secret"),
|
||||||
InternalGroupKind: api.Kind("Secret"),
|
ResourceName: api.ResourceSecrets,
|
||||||
InternalOperationResources: map[admission.Operation][]api.ResourceName{
|
|
||||||
admission.Create: allResources,
|
|
||||||
},
|
|
||||||
MatchedResourceNames: allResources,
|
|
||||||
MatchesScopeFunc: generic.MatchesNoScopeFunc,
|
|
||||||
ConstraintsFunc: generic.ObjectCountConstraintsFunc(api.ResourceSecrets),
|
|
||||||
UsageFunc: generic.ObjectCountUsageFunc(api.ResourceSecrets),
|
|
||||||
ListFuncByNamespace: func(namespace string, options v1.ListOptions) ([]runtime.Object, error) {
|
ListFuncByNamespace: func(namespace string, options v1.ListOptions) ([]runtime.Object, error) {
|
||||||
itemList, err := kubeClient.Core().Secrets(namespace).List(options)
|
itemList, err := kubeClient.Core().Secrets(namespace).List(options)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|
|
@ -28,28 +28,21 @@ import (
|
||||||
"k8s.io/kubernetes/pkg/quota"
|
"k8s.io/kubernetes/pkg/quota"
|
||||||
"k8s.io/kubernetes/pkg/quota/generic"
|
"k8s.io/kubernetes/pkg/quota/generic"
|
||||||
"k8s.io/kubernetes/pkg/runtime"
|
"k8s.io/kubernetes/pkg/runtime"
|
||||||
|
"k8s.io/kubernetes/pkg/runtime/schema"
|
||||||
"k8s.io/kubernetes/pkg/util/sets"
|
"k8s.io/kubernetes/pkg/util/sets"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// serviceResources are the set of resources managed by quota associated with services.
|
||||||
|
var serviceResources = []api.ResourceName{
|
||||||
|
api.ResourceServices,
|
||||||
|
api.ResourceServicesNodePorts,
|
||||||
|
api.ResourceServicesLoadBalancers,
|
||||||
|
}
|
||||||
|
|
||||||
// NewServiceEvaluator returns an evaluator that can evaluate service quotas
|
// NewServiceEvaluator returns an evaluator that can evaluate service quotas
|
||||||
func NewServiceEvaluator(kubeClient clientset.Interface) quota.Evaluator {
|
func NewServiceEvaluator(kubeClient clientset.Interface) quota.Evaluator {
|
||||||
allResources := []api.ResourceName{
|
return &serviceEvaluator{
|
||||||
api.ResourceServices,
|
listFuncByNamespace: func(namespace string, options v1.ListOptions) ([]runtime.Object, error) {
|
||||||
api.ResourceServicesNodePorts,
|
|
||||||
api.ResourceServicesLoadBalancers,
|
|
||||||
}
|
|
||||||
return &generic.GenericEvaluator{
|
|
||||||
Name: "Evaluator.Service",
|
|
||||||
InternalGroupKind: api.Kind("Service"),
|
|
||||||
InternalOperationResources: map[admission.Operation][]api.ResourceName{
|
|
||||||
admission.Create: allResources,
|
|
||||||
admission.Update: allResources,
|
|
||||||
},
|
|
||||||
MatchedResourceNames: allResources,
|
|
||||||
MatchesScopeFunc: generic.MatchesNoScopeFunc,
|
|
||||||
ConstraintsFunc: ServiceConstraintsFunc,
|
|
||||||
UsageFunc: ServiceUsageFunc,
|
|
||||||
ListFuncByNamespace: func(namespace string, options v1.ListOptions) ([]runtime.Object, error) {
|
|
||||||
itemList, err := kubeClient.Core().Services(namespace).List(options)
|
itemList, err := kubeClient.Core().Services(namespace).List(options)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
|
@ -63,39 +56,104 @@ func NewServiceEvaluator(kubeClient clientset.Interface) quota.Evaluator {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// ServiceUsageFunc knows how to measure usage associated with services
|
// serviceEvaluator knows how to measure usage for services.
|
||||||
func ServiceUsageFunc(object runtime.Object) api.ResourceList {
|
type serviceEvaluator struct {
|
||||||
result := api.ResourceList{}
|
// knows how to list items by namespace
|
||||||
var serviceType api.ServiceType
|
listFuncByNamespace generic.ListFuncByNamespace
|
||||||
var ports int
|
}
|
||||||
|
|
||||||
switch t := object.(type) {
|
// Constraints verifies that all required resources are present on the item
|
||||||
case *v1.Service:
|
func (p *serviceEvaluator) Constraints(required []api.ResourceName, item runtime.Object) error {
|
||||||
serviceType = api.ServiceType(t.Spec.Type)
|
service, ok := item.(*api.Service)
|
||||||
ports = len(t.Spec.Ports)
|
if !ok {
|
||||||
case *api.Service:
|
return fmt.Errorf("unexpected input object %v", item)
|
||||||
serviceType = t.Spec.Type
|
|
||||||
ports = len(t.Spec.Ports)
|
|
||||||
default:
|
|
||||||
panic(fmt.Sprintf("expect *api.Service or *v1.Service, got %v", t))
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
requiredSet := quota.ToSet(required)
|
||||||
|
missingSet := sets.NewString()
|
||||||
|
serviceUsage, err := p.Usage(service)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
serviceSet := quota.ToSet(quota.ResourceNames(serviceUsage))
|
||||||
|
if diff := requiredSet.Difference(serviceSet); len(diff) > 0 {
|
||||||
|
missingSet.Insert(diff.List()...)
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(missingSet) == 0 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return fmt.Errorf("must specify %s", strings.Join(missingSet.List(), ","))
|
||||||
|
}
|
||||||
|
|
||||||
|
// GroupKind that this evaluator tracks
|
||||||
|
func (p *serviceEvaluator) GroupKind() schema.GroupKind {
|
||||||
|
return api.Kind("Service")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Handles returns true of the evalutor should handle the specified operation.
|
||||||
|
func (p *serviceEvaluator) Handles(operation admission.Operation) bool {
|
||||||
|
// We handle create and update because a service type can change.
|
||||||
|
return admission.Create == operation || admission.Update == operation
|
||||||
|
}
|
||||||
|
|
||||||
|
// Matches returns true if the evaluator matches the specified quota with the provided input item
|
||||||
|
func (p *serviceEvaluator) Matches(resourceQuota *api.ResourceQuota, item runtime.Object) (bool, error) {
|
||||||
|
return generic.Matches(resourceQuota, item, p.MatchingResources, generic.MatchesNoScopeFunc)
|
||||||
|
}
|
||||||
|
|
||||||
|
// MatchingResources takes the input specified list of resources and returns the set of resources it matches.
|
||||||
|
func (p *serviceEvaluator) MatchingResources(input []api.ResourceName) []api.ResourceName {
|
||||||
|
return quota.Intersection(input, serviceResources)
|
||||||
|
}
|
||||||
|
|
||||||
|
// convert the input object to an internal service object or error.
|
||||||
|
func toInternalServiceOrError(obj runtime.Object) (*api.Service, error) {
|
||||||
|
svc := &api.Service{}
|
||||||
|
switch t := obj.(type) {
|
||||||
|
case *v1.Service:
|
||||||
|
if err := v1.Convert_v1_Service_To_api_Service(t, svc, nil); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
case *api.Service:
|
||||||
|
svc = t
|
||||||
|
default:
|
||||||
|
return nil, fmt.Errorf("expect *api.Service or *v1.Service, got %v", t)
|
||||||
|
}
|
||||||
|
return svc, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Usage knows how to measure usage associated with pods
|
||||||
|
func (p *serviceEvaluator) Usage(item runtime.Object) (api.ResourceList, error) {
|
||||||
|
result := api.ResourceList{}
|
||||||
|
svc, err := toInternalServiceOrError(item)
|
||||||
|
if err != nil {
|
||||||
|
return result, err
|
||||||
|
}
|
||||||
|
ports := len(svc.Spec.Ports)
|
||||||
// default service usage
|
// default service usage
|
||||||
result[api.ResourceServices] = resource.MustParse("1")
|
result[api.ResourceServices] = *(resource.NewQuantity(1, resource.DecimalSI))
|
||||||
result[api.ResourceServicesLoadBalancers] = resource.MustParse("0")
|
result[api.ResourceServicesLoadBalancers] = resource.Quantity{Format: resource.DecimalSI}
|
||||||
result[api.ResourceServicesNodePorts] = resource.MustParse("0")
|
result[api.ResourceServicesNodePorts] = resource.Quantity{Format: resource.DecimalSI}
|
||||||
switch serviceType {
|
switch svc.Spec.Type {
|
||||||
case api.ServiceTypeNodePort:
|
case api.ServiceTypeNodePort:
|
||||||
// node port services need to count node ports
|
// node port services need to count node ports
|
||||||
value := resource.NewQuantity(int64(ports), resource.DecimalSI)
|
value := resource.NewQuantity(int64(ports), resource.DecimalSI)
|
||||||
result[api.ResourceServicesNodePorts] = *value
|
result[api.ResourceServicesNodePorts] = *value
|
||||||
case api.ServiceTypeLoadBalancer:
|
case api.ServiceTypeLoadBalancer:
|
||||||
// load balancer services need to count load balancers
|
// load balancer services need to count load balancers
|
||||||
result[api.ResourceServicesLoadBalancers] = resource.MustParse("1")
|
result[api.ResourceServicesLoadBalancers] = *(resource.NewQuantity(1, resource.DecimalSI))
|
||||||
}
|
}
|
||||||
return result
|
return result, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// UsageStats calculates aggregate usage for the object.
|
||||||
|
func (p *serviceEvaluator) UsageStats(options quota.UsageStatsOptions) (quota.UsageStats, error) {
|
||||||
|
return generic.CalculateUsageStats(options, p.listFuncByNamespace, generic.MatchesNoScopeFunc, p.Usage)
|
||||||
|
}
|
||||||
|
|
||||||
|
var _ quota.Evaluator = &serviceEvaluator{}
|
||||||
|
|
||||||
// QuotaServiceType returns true if the service type is eligible to track against a quota
|
// QuotaServiceType returns true if the service type is eligible to track against a quota
|
||||||
func QuotaServiceType(service *v1.Service) bool {
|
func QuotaServiceType(service *v1.Service) bool {
|
||||||
switch service.Spec.Type {
|
switch service.Spec.Type {
|
||||||
|
@ -115,24 +173,3 @@ func GetQuotaServiceType(service *v1.Service) v1.ServiceType {
|
||||||
}
|
}
|
||||||
return v1.ServiceType("")
|
return v1.ServiceType("")
|
||||||
}
|
}
|
||||||
|
|
||||||
// ServiceConstraintsFunc verifies that all required resources are captured in service usage.
|
|
||||||
func ServiceConstraintsFunc(required []api.ResourceName, object runtime.Object) error {
|
|
||||||
service, ok := object.(*api.Service)
|
|
||||||
if !ok {
|
|
||||||
return fmt.Errorf("unexpected input object %v", object)
|
|
||||||
}
|
|
||||||
|
|
||||||
requiredSet := quota.ToSet(required)
|
|
||||||
missingSet := sets.NewString()
|
|
||||||
serviceUsage := ServiceUsageFunc(service)
|
|
||||||
serviceSet := quota.ToSet(quota.ResourceNames(serviceUsage))
|
|
||||||
if diff := requiredSet.Difference(serviceSet); len(diff) > 0 {
|
|
||||||
missingSet.Insert(diff.List()...)
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(missingSet) == 0 {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
return fmt.Errorf("must specify %s", strings.Join(missingSet.List(), ","))
|
|
||||||
}
|
|
||||||
|
|
|
@ -28,12 +28,21 @@ import (
|
||||||
func TestServiceEvaluatorMatchesResources(t *testing.T) {
|
func TestServiceEvaluatorMatchesResources(t *testing.T) {
|
||||||
kubeClient := fake.NewSimpleClientset()
|
kubeClient := fake.NewSimpleClientset()
|
||||||
evaluator := NewServiceEvaluator(kubeClient)
|
evaluator := NewServiceEvaluator(kubeClient)
|
||||||
|
// we give a lot of resources
|
||||||
|
input := []api.ResourceName{
|
||||||
|
api.ResourceConfigMaps,
|
||||||
|
api.ResourceCPU,
|
||||||
|
api.ResourceServices,
|
||||||
|
api.ResourceServicesNodePorts,
|
||||||
|
api.ResourceServicesLoadBalancers,
|
||||||
|
}
|
||||||
|
// but we only match these...
|
||||||
expected := quota.ToSet([]api.ResourceName{
|
expected := quota.ToSet([]api.ResourceName{
|
||||||
api.ResourceServices,
|
api.ResourceServices,
|
||||||
api.ResourceServicesNodePorts,
|
api.ResourceServicesNodePorts,
|
||||||
api.ResourceServicesLoadBalancers,
|
api.ResourceServicesLoadBalancers,
|
||||||
})
|
})
|
||||||
actual := quota.ToSet(evaluator.MatchesResources())
|
actual := quota.ToSet(evaluator.MatchingResources(input))
|
||||||
if !expected.Equal(actual) {
|
if !expected.Equal(actual) {
|
||||||
t.Errorf("expected: %v, actual: %v", expected, actual)
|
t.Errorf("expected: %v, actual: %v", expected, actual)
|
||||||
}
|
}
|
||||||
|
@ -109,7 +118,10 @@ func TestServiceEvaluatorUsage(t *testing.T) {
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
for testName, testCase := range testCases {
|
for testName, testCase := range testCases {
|
||||||
actual := evaluator.Usage(testCase.service)
|
actual, err := evaluator.Usage(testCase.service)
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("%s unexpected error: %v", testName, err)
|
||||||
|
}
|
||||||
if !quota.Equals(testCase.usage, actual) {
|
if !quota.Equals(testCase.usage, actual) {
|
||||||
t.Errorf("%s expected: %v, actual: %v", testName, testCase.usage, actual)
|
t.Errorf("%s expected: %v, actual: %v", testName, testCase.usage, actual)
|
||||||
}
|
}
|
||||||
|
@ -168,8 +180,11 @@ func TestServiceConstraintsFunc(t *testing.T) {
|
||||||
required: []api.ResourceName{api.ResourceServicesNodePorts},
|
required: []api.ResourceName{api.ResourceServicesNodePorts},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
|
kubeClient := fake.NewSimpleClientset()
|
||||||
|
evaluator := NewServiceEvaluator(kubeClient)
|
||||||
for testName, test := range testCases {
|
for testName, test := range testCases {
|
||||||
err := ServiceConstraintsFunc(test.required, test.service)
|
err := evaluator.Constraints(test.required, test.service)
|
||||||
switch {
|
switch {
|
||||||
case err != nil && len(test.err) == 0,
|
case err != nil && len(test.err) == 0,
|
||||||
err == nil && len(test.err) != 0,
|
err == nil && len(test.err) != 0,
|
||||||
|
|
|
@ -45,167 +45,138 @@ func ListResourceUsingInformerFunc(f informers.SharedInformerFactory, groupResou
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// ConstraintsFunc takes a list of required resources that must match on the input item
|
|
||||||
type ConstraintsFunc func(required []api.ResourceName, item runtime.Object) error
|
|
||||||
|
|
||||||
// GetFuncByNamespace knows how to get a resource with specified namespace and name
|
|
||||||
type GetFuncByNamespace func(namespace, name string) (runtime.Object, error)
|
|
||||||
|
|
||||||
// ListFuncByNamespace knows how to list resources in a namespace
|
// ListFuncByNamespace knows how to list resources in a namespace
|
||||||
type ListFuncByNamespace func(namespace string, options v1.ListOptions) ([]runtime.Object, error)
|
type ListFuncByNamespace func(namespace string, options v1.ListOptions) ([]runtime.Object, error)
|
||||||
|
|
||||||
// MatchesScopeFunc knows how to evaluate if an object matches a scope
|
// MatchesScopeFunc knows how to evaluate if an object matches a scope
|
||||||
type MatchesScopeFunc func(scope api.ResourceQuotaScope, object runtime.Object) bool
|
type MatchesScopeFunc func(scope api.ResourceQuotaScope, object runtime.Object) (bool, error)
|
||||||
|
|
||||||
// UsageFunc knows how to measure usage associated with an object
|
// UsageFunc knows how to measure usage associated with an object
|
||||||
type UsageFunc func(object runtime.Object) api.ResourceList
|
type UsageFunc func(object runtime.Object) (api.ResourceList, error)
|
||||||
|
|
||||||
|
// MatchingResourceNamesFunc is a function that returns the list of resources matched
|
||||||
|
type MatchingResourceNamesFunc func(input []api.ResourceName) []api.ResourceName
|
||||||
|
|
||||||
// MatchesNoScopeFunc returns false on all match checks
|
// MatchesNoScopeFunc returns false on all match checks
|
||||||
func MatchesNoScopeFunc(scope api.ResourceQuotaScope, object runtime.Object) bool {
|
func MatchesNoScopeFunc(scope api.ResourceQuotaScope, object runtime.Object) (bool, error) {
|
||||||
return false
|
return false, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// ObjectCountConstraintsFunc returns ConstraintsFunc that returns nil if the
|
// Matches returns true if the quota matches the specified item.
|
||||||
// specified resource name is in the required set of resource names
|
func Matches(resourceQuota *api.ResourceQuota, item runtime.Object, matchFunc MatchingResourceNamesFunc, scopeFunc MatchesScopeFunc) (bool, error) {
|
||||||
func ObjectCountConstraintsFunc(resourceName api.ResourceName) ConstraintsFunc {
|
|
||||||
return func(required []api.ResourceName, item runtime.Object) error {
|
|
||||||
if !quota.Contains(required, resourceName) {
|
|
||||||
return fmt.Errorf("missing %s", resourceName)
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// ObjectCountUsageFunc is useful if you are only counting your object
|
|
||||||
// It always returns 1 as the usage for the named resource
|
|
||||||
func ObjectCountUsageFunc(resourceName api.ResourceName) UsageFunc {
|
|
||||||
return func(object runtime.Object) api.ResourceList {
|
|
||||||
return api.ResourceList{
|
|
||||||
resourceName: resource.MustParse("1"),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// GenericEvaluator provides an implementation for quota.Evaluator
|
|
||||||
type GenericEvaluator struct {
|
|
||||||
// Name used for logging
|
|
||||||
Name string
|
|
||||||
// The GroupKind that this evaluator tracks
|
|
||||||
InternalGroupKind schema.GroupKind
|
|
||||||
// The set of resources that are pertinent to the mapped operation
|
|
||||||
InternalOperationResources map[admission.Operation][]api.ResourceName
|
|
||||||
// The set of resource names this evaluator matches
|
|
||||||
MatchedResourceNames []api.ResourceName
|
|
||||||
// A function that knows how to evaluate a matches scope request
|
|
||||||
MatchesScopeFunc MatchesScopeFunc
|
|
||||||
// A function that knows how to return usage for an object
|
|
||||||
UsageFunc UsageFunc
|
|
||||||
// A function that knows how to list resources by namespace
|
|
||||||
ListFuncByNamespace ListFuncByNamespace
|
|
||||||
// A function that knows how to get resource in a namespace
|
|
||||||
// This function must be specified if the evaluator needs to handle UPDATE
|
|
||||||
GetFuncByNamespace GetFuncByNamespace
|
|
||||||
// A function that checks required constraints are satisfied
|
|
||||||
ConstraintsFunc ConstraintsFunc
|
|
||||||
}
|
|
||||||
|
|
||||||
// Ensure that GenericEvaluator implements quota.Evaluator
|
|
||||||
var _ quota.Evaluator = &GenericEvaluator{}
|
|
||||||
|
|
||||||
// Constraints checks required constraints are satisfied on the input object
|
|
||||||
func (g *GenericEvaluator) Constraints(required []api.ResourceName, item runtime.Object) error {
|
|
||||||
return g.ConstraintsFunc(required, item)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Get returns the object by namespace and name
|
|
||||||
func (g *GenericEvaluator) Get(namespace, name string) (runtime.Object, error) {
|
|
||||||
return g.GetFuncByNamespace(namespace, name)
|
|
||||||
}
|
|
||||||
|
|
||||||
// OperationResources returns the set of resources that could be updated for the
|
|
||||||
// specified operation for this kind. If empty, admission control will ignore
|
|
||||||
// quota processing for the operation.
|
|
||||||
func (g *GenericEvaluator) OperationResources(operation admission.Operation) []api.ResourceName {
|
|
||||||
return g.InternalOperationResources[operation]
|
|
||||||
}
|
|
||||||
|
|
||||||
// GroupKind that this evaluator tracks
|
|
||||||
func (g *GenericEvaluator) GroupKind() schema.GroupKind {
|
|
||||||
return g.InternalGroupKind
|
|
||||||
}
|
|
||||||
|
|
||||||
// MatchesResources is the list of resources that this evaluator matches
|
|
||||||
func (g *GenericEvaluator) MatchesResources() []api.ResourceName {
|
|
||||||
return g.MatchedResourceNames
|
|
||||||
}
|
|
||||||
|
|
||||||
// Matches returns true if the evaluator matches the specified quota with the provided input item
|
|
||||||
func (g *GenericEvaluator) Matches(resourceQuota *api.ResourceQuota, item runtime.Object) bool {
|
|
||||||
if resourceQuota == nil {
|
if resourceQuota == nil {
|
||||||
return false
|
return false, fmt.Errorf("expected non-nil quota")
|
||||||
}
|
|
||||||
|
|
||||||
// verify the quota matches on resource, by default its false
|
|
||||||
matchResource := false
|
|
||||||
for resourceName := range resourceQuota.Status.Hard {
|
|
||||||
if g.MatchesResource(resourceName) {
|
|
||||||
matchResource = true
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
// verify the quota matches on at least one resource
|
||||||
|
matchResource := len(matchFunc(quota.ResourceNames(resourceQuota.Status.Hard))) > 0
|
||||||
// by default, no scopes matches all
|
// by default, no scopes matches all
|
||||||
matchScope := true
|
matchScope := true
|
||||||
for _, scope := range resourceQuota.Spec.Scopes {
|
for _, scope := range resourceQuota.Spec.Scopes {
|
||||||
matchScope = matchScope && g.MatchesScope(scope, item)
|
innerMatch, err := scopeFunc(scope, item)
|
||||||
}
|
if err != nil {
|
||||||
return matchResource && matchScope
|
return false, err
|
||||||
}
|
|
||||||
|
|
||||||
// MatchesResource returns true if this evaluator can match on the specified resource
|
|
||||||
func (g *GenericEvaluator) MatchesResource(resourceName api.ResourceName) bool {
|
|
||||||
for _, matchedResourceName := range g.MatchedResourceNames {
|
|
||||||
if resourceName == matchedResourceName {
|
|
||||||
return true
|
|
||||||
}
|
}
|
||||||
|
matchScope = matchScope && innerMatch
|
||||||
}
|
}
|
||||||
return false
|
return matchResource && matchScope, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// MatchesScope returns true if the input object matches the specified scope
|
// CalculateUsageStats is a utility function that knows how to calculate aggregate usage.
|
||||||
func (g *GenericEvaluator) MatchesScope(scope api.ResourceQuotaScope, object runtime.Object) bool {
|
func CalculateUsageStats(options quota.UsageStatsOptions,
|
||||||
return g.MatchesScopeFunc(scope, object)
|
listFunc ListFuncByNamespace,
|
||||||
}
|
scopeFunc MatchesScopeFunc,
|
||||||
|
usageFunc UsageFunc) (quota.UsageStats, error) {
|
||||||
// Usage returns the resource usage for the specified object
|
|
||||||
func (g *GenericEvaluator) Usage(object runtime.Object) api.ResourceList {
|
|
||||||
return g.UsageFunc(object)
|
|
||||||
}
|
|
||||||
|
|
||||||
// UsageStats calculates latest observed usage stats for all objects
|
|
||||||
func (g *GenericEvaluator) UsageStats(options quota.UsageStatsOptions) (quota.UsageStats, error) {
|
|
||||||
// default each tracked resource to zero
|
// default each tracked resource to zero
|
||||||
result := quota.UsageStats{Used: api.ResourceList{}}
|
result := quota.UsageStats{Used: api.ResourceList{}}
|
||||||
for _, resourceName := range g.MatchedResourceNames {
|
for _, resourceName := range options.Resources {
|
||||||
result.Used[resourceName] = resource.MustParse("0")
|
result.Used[resourceName] = resource.Quantity{Format: resource.DecimalSI}
|
||||||
}
|
}
|
||||||
items, err := g.ListFuncByNamespace(options.Namespace, v1.ListOptions{
|
items, err := listFunc(options.Namespace, v1.ListOptions{
|
||||||
LabelSelector: labels.Everything().String(),
|
LabelSelector: labels.Everything().String(),
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return result, fmt.Errorf("%s: Failed to list %v: %v", g.Name, g.GroupKind(), err)
|
return result, fmt.Errorf("failed to list content: %v", err)
|
||||||
}
|
}
|
||||||
for _, item := range items {
|
for _, item := range items {
|
||||||
// need to verify that the item matches the set of scopes
|
// need to verify that the item matches the set of scopes
|
||||||
matchesScopes := true
|
matchesScopes := true
|
||||||
for _, scope := range options.Scopes {
|
for _, scope := range options.Scopes {
|
||||||
if !g.MatchesScope(scope, item) {
|
innerMatch, err := scopeFunc(scope, item)
|
||||||
|
if err != nil {
|
||||||
|
return result, nil
|
||||||
|
}
|
||||||
|
if !innerMatch {
|
||||||
matchesScopes = false
|
matchesScopes = false
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
// only count usage if there was a match
|
// only count usage if there was a match
|
||||||
if matchesScopes {
|
if matchesScopes {
|
||||||
result.Used = quota.Add(result.Used, g.Usage(item))
|
usage, err := usageFunc(item)
|
||||||
|
if err != nil {
|
||||||
|
return result, err
|
||||||
|
}
|
||||||
|
result.Used = quota.Add(result.Used, usage)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return result, nil
|
return result, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// ObjectCountEvaluator provides an implementation for quota.Evaluator
|
||||||
|
// that associates usage of the specified resource based on the number of items
|
||||||
|
// returned by the specified listing function.
|
||||||
|
type ObjectCountEvaluator struct {
|
||||||
|
// AllowCreateOnUpdate if true will ensure the evaluator tracks create
|
||||||
|
// and update operations.
|
||||||
|
AllowCreateOnUpdate bool
|
||||||
|
// GroupKind that this evaluator tracks.
|
||||||
|
InternalGroupKind schema.GroupKind
|
||||||
|
// A function that knows how to list resources by namespace.
|
||||||
|
// TODO move to dynamic client in future
|
||||||
|
ListFuncByNamespace ListFuncByNamespace
|
||||||
|
// Name associated with this resource in the quota.
|
||||||
|
ResourceName api.ResourceName
|
||||||
|
}
|
||||||
|
|
||||||
|
// Constraints returns an error if the configured resource name is not in the required set.
|
||||||
|
func (o *ObjectCountEvaluator) Constraints(required []api.ResourceName, item runtime.Object) error {
|
||||||
|
if !quota.Contains(required, o.ResourceName) {
|
||||||
|
return fmt.Errorf("missing %s", o.ResourceName)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// GroupKind that this evaluator tracks
|
||||||
|
func (o *ObjectCountEvaluator) GroupKind() schema.GroupKind {
|
||||||
|
return o.InternalGroupKind
|
||||||
|
}
|
||||||
|
|
||||||
|
// Handles returns true if the object count evaluator needs to track this operation.
|
||||||
|
func (o *ObjectCountEvaluator) Handles(operation admission.Operation) bool {
|
||||||
|
return operation == admission.Create || (o.AllowCreateOnUpdate && operation == admission.Update)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Matches returns true if the evaluator matches the specified quota with the provided input item
|
||||||
|
func (o *ObjectCountEvaluator) Matches(resourceQuota *api.ResourceQuota, item runtime.Object) (bool, error) {
|
||||||
|
return Matches(resourceQuota, item, o.MatchingResources, MatchesNoScopeFunc)
|
||||||
|
}
|
||||||
|
|
||||||
|
// MatchingResources takes the input specified list of resources and returns the set of resources it matches.
|
||||||
|
func (o *ObjectCountEvaluator) MatchingResources(input []api.ResourceName) []api.ResourceName {
|
||||||
|
return quota.Intersection(input, []api.ResourceName{o.ResourceName})
|
||||||
|
}
|
||||||
|
|
||||||
|
// Usage returns the resource usage for the specified object
|
||||||
|
func (o *ObjectCountEvaluator) Usage(object runtime.Object) (api.ResourceList, error) {
|
||||||
|
quantity := resource.NewQuantity(1, resource.DecimalSI)
|
||||||
|
return api.ResourceList{
|
||||||
|
o.ResourceName: *quantity,
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// UsageStats calculates aggregate usage for the object.
|
||||||
|
func (o *ObjectCountEvaluator) UsageStats(options quota.UsageStatsOptions) (quota.UsageStats, error) {
|
||||||
|
return CalculateUsageStats(options, o.ListFuncByNamespace, MatchesNoScopeFunc, o.Usage)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Verify implementation of interface at compile time.
|
||||||
|
var _ quota.Evaluator = &ObjectCountEvaluator{}
|
||||||
|
|
|
@ -29,6 +29,8 @@ type UsageStatsOptions struct {
|
||||||
Namespace string
|
Namespace string
|
||||||
// Scopes that must match counted objects
|
// Scopes that must match counted objects
|
||||||
Scopes []api.ResourceQuotaScope
|
Scopes []api.ResourceQuotaScope
|
||||||
|
// Resources are the set of resources to include in the measurement
|
||||||
|
Resources []api.ResourceName
|
||||||
}
|
}
|
||||||
|
|
||||||
// UsageStats is result of measuring observed resource use in the system
|
// UsageStats is result of measuring observed resource use in the system
|
||||||
|
@ -41,20 +43,17 @@ type UsageStats struct {
|
||||||
type Evaluator interface {
|
type Evaluator interface {
|
||||||
// Constraints ensures that each required resource is present on item
|
// Constraints ensures that each required resource is present on item
|
||||||
Constraints(required []api.ResourceName, item runtime.Object) error
|
Constraints(required []api.ResourceName, item runtime.Object) error
|
||||||
// Get returns the object with specified namespace and name
|
|
||||||
Get(namespace, name string) (runtime.Object, error)
|
|
||||||
// GroupKind returns the groupKind that this object knows how to evaluate
|
// GroupKind returns the groupKind that this object knows how to evaluate
|
||||||
GroupKind() schema.GroupKind
|
GroupKind() schema.GroupKind
|
||||||
// MatchesResources is the list of resources that this evaluator matches
|
// Handles determines if quota could be impacted by the specified operation.
|
||||||
MatchesResources() []api.ResourceName
|
// If true, admission control must perform quota processing for the operation, otherwise it is safe to ignore quota.
|
||||||
|
Handles(operation admission.Operation) bool
|
||||||
// Matches returns true if the specified quota matches the input item
|
// Matches returns true if the specified quota matches the input item
|
||||||
Matches(resourceQuota *api.ResourceQuota, item runtime.Object) bool
|
Matches(resourceQuota *api.ResourceQuota, item runtime.Object) (bool, error)
|
||||||
// OperationResources returns the set of resources that could be updated for the
|
// MatchingResources takes the input specified list of resources and returns the set of resources evaluator matches.
|
||||||
// specified operation for this kind. If empty, admission control will ignore
|
MatchingResources(input []api.ResourceName) []api.ResourceName
|
||||||
// quota processing for the operation.
|
|
||||||
OperationResources(operation admission.Operation) []api.ResourceName
|
|
||||||
// Usage returns the resource usage for the specified object
|
// Usage returns the resource usage for the specified object
|
||||||
Usage(object runtime.Object) api.ResourceList
|
Usage(item runtime.Object) (api.ResourceList, error)
|
||||||
// UsageStats calculates latest observed usage stats for all objects
|
// UsageStats calculates latest observed usage stats for all objects
|
||||||
UsageStats(options UsageStatsOptions) (UsageStats, error)
|
UsageStats(options UsageStatsOptions) (UsageStats, error)
|
||||||
}
|
}
|
||||||
|
@ -69,6 +68,7 @@ type Registry interface {
|
||||||
// is the "winner"
|
// is the "winner"
|
||||||
type UnionRegistry []Registry
|
type UnionRegistry []Registry
|
||||||
|
|
||||||
|
// Evaluators returns a mapping of evaluators by group kind.
|
||||||
func (r UnionRegistry) Evaluators() map[schema.GroupKind]Evaluator {
|
func (r UnionRegistry) Evaluators() map[schema.GroupKind]Evaluator {
|
||||||
ret := map[schema.GroupKind]Evaluator{}
|
ret := map[schema.GroupKind]Evaluator{}
|
||||||
|
|
||||||
|
|
|
@ -223,18 +223,21 @@ func CalculateUsage(namespaceName string, scopes []api.ResourceQuotaScope, hardL
|
||||||
potentialResources := []api.ResourceName{}
|
potentialResources := []api.ResourceName{}
|
||||||
evaluators := registry.Evaluators()
|
evaluators := registry.Evaluators()
|
||||||
for _, evaluator := range evaluators {
|
for _, evaluator := range evaluators {
|
||||||
potentialResources = append(potentialResources, evaluator.MatchesResources()...)
|
potentialResources = append(potentialResources, evaluator.MatchingResources(hardResources)...)
|
||||||
}
|
}
|
||||||
|
// NOTE: the intersection just removes duplicates since the evaluator match intersects wtih hard
|
||||||
matchedResources := Intersection(hardResources, potentialResources)
|
matchedResources := Intersection(hardResources, potentialResources)
|
||||||
|
|
||||||
// sum the observed usage from each evaluator
|
// sum the observed usage from each evaluator
|
||||||
newUsage := api.ResourceList{}
|
newUsage := api.ResourceList{}
|
||||||
usageStatsOptions := UsageStatsOptions{Namespace: namespaceName, Scopes: scopes}
|
|
||||||
for _, evaluator := range evaluators {
|
for _, evaluator := range evaluators {
|
||||||
// only trigger the evaluator if it matches a resource in the quota, otherwise, skip calculating anything
|
// only trigger the evaluator if it matches a resource in the quota, otherwise, skip calculating anything
|
||||||
if intersection := Intersection(evaluator.MatchesResources(), matchedResources); len(intersection) == 0 {
|
intersection := evaluator.MatchingResources(matchedResources)
|
||||||
|
if len(intersection) == 0 {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
|
usageStatsOptions := UsageStatsOptions{Namespace: namespaceName, Scopes: scopes, Resources: intersection}
|
||||||
stats, err := evaluator.UsageStats(usageStatsOptions)
|
stats, err := evaluator.UsageStats(usageStatsOptions)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
|
|
|
@ -218,6 +218,7 @@ func TestIsQualifiedName(t *testing.T) {
|
||||||
"1.2.3.4/5678",
|
"1.2.3.4/5678",
|
||||||
"Uppercase_Is_OK_123",
|
"Uppercase_Is_OK_123",
|
||||||
"example.com/Uppercase_Is_OK_123",
|
"example.com/Uppercase_Is_OK_123",
|
||||||
|
"requests.storage-foo",
|
||||||
strings.Repeat("a", 63),
|
strings.Repeat("a", 63),
|
||||||
strings.Repeat("a", 253) + "/" + strings.Repeat("b", 63),
|
strings.Repeat("a", 253) + "/" + strings.Repeat("b", 63),
|
||||||
}
|
}
|
||||||
|
|
|
@ -54,10 +54,8 @@ go_test(
|
||||||
"//pkg/client/clientset_generated/internalclientset/fake:go_default_library",
|
"//pkg/client/clientset_generated/internalclientset/fake:go_default_library",
|
||||||
"//pkg/client/testing/core:go_default_library",
|
"//pkg/client/testing/core:go_default_library",
|
||||||
"//pkg/quota:go_default_library",
|
"//pkg/quota:go_default_library",
|
||||||
"//pkg/quota/evaluator/core:go_default_library",
|
|
||||||
"//pkg/quota/generic:go_default_library",
|
"//pkg/quota/generic:go_default_library",
|
||||||
"//pkg/quota/install:go_default_library",
|
"//pkg/quota/install:go_default_library",
|
||||||
"//pkg/runtime:go_default_library",
|
|
||||||
"//pkg/runtime/schema:go_default_library",
|
"//pkg/runtime/schema:go_default_library",
|
||||||
"//pkg/util/sets:go_default_library",
|
"//pkg/util/sets:go_default_library",
|
||||||
"//vendor:github.com/hashicorp/golang-lru",
|
"//vendor:github.com/hashicorp/golang-lru",
|
||||||
|
|
|
@ -31,10 +31,8 @@ import (
|
||||||
"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/fake"
|
"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/fake"
|
||||||
testcore "k8s.io/kubernetes/pkg/client/testing/core"
|
testcore "k8s.io/kubernetes/pkg/client/testing/core"
|
||||||
"k8s.io/kubernetes/pkg/quota"
|
"k8s.io/kubernetes/pkg/quota"
|
||||||
"k8s.io/kubernetes/pkg/quota/evaluator/core"
|
|
||||||
"k8s.io/kubernetes/pkg/quota/generic"
|
"k8s.io/kubernetes/pkg/quota/generic"
|
||||||
"k8s.io/kubernetes/pkg/quota/install"
|
"k8s.io/kubernetes/pkg/quota/install"
|
||||||
"k8s.io/kubernetes/pkg/runtime"
|
|
||||||
"k8s.io/kubernetes/pkg/runtime/schema"
|
"k8s.io/kubernetes/pkg/runtime/schema"
|
||||||
"k8s.io/kubernetes/pkg/util/sets"
|
"k8s.io/kubernetes/pkg/util/sets"
|
||||||
)
|
)
|
||||||
|
@ -896,44 +894,22 @@ func TestAdmissionSetsMissingNamespace(t *testing.T) {
|
||||||
ObjectMeta: api.ObjectMeta{Name: "quota", Namespace: namespace, ResourceVersion: "124"},
|
ObjectMeta: api.ObjectMeta{Name: "quota", Namespace: namespace, ResourceVersion: "124"},
|
||||||
Status: api.ResourceQuotaStatus{
|
Status: api.ResourceQuotaStatus{
|
||||||
Hard: api.ResourceList{
|
Hard: api.ResourceList{
|
||||||
api.ResourceCPU: resource.MustParse("3"),
|
api.ResourcePods: resource.MustParse("3"),
|
||||||
},
|
},
|
||||||
Used: api.ResourceList{
|
Used: api.ResourceList{
|
||||||
api.ResourceCPU: resource.MustParse("1"),
|
api.ResourcePods: resource.MustParse("1"),
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
kubeClient := fake.NewSimpleClientset(resourceQuota)
|
kubeClient := fake.NewSimpleClientset(resourceQuota)
|
||||||
indexer := cache.NewIndexer(cache.MetaNamespaceKeyFunc, cache.Indexers{"namespace": cache.MetaNamespaceIndexFunc})
|
indexer := cache.NewIndexer(cache.MetaNamespaceKeyFunc, cache.Indexers{"namespace": cache.MetaNamespaceIndexFunc})
|
||||||
|
|
||||||
computeResources := []api.ResourceName{
|
// create a dummy evaluator so we can trigger quota
|
||||||
api.ResourcePods,
|
podEvaluator := &generic.ObjectCountEvaluator{
|
||||||
api.ResourceCPU,
|
AllowCreateOnUpdate: false,
|
||||||
|
InternalGroupKind: api.Kind("Pod"),
|
||||||
|
ResourceName: api.ResourcePods,
|
||||||
}
|
}
|
||||||
|
|
||||||
usageFunc := func(object runtime.Object) api.ResourceList {
|
|
||||||
pod, ok := object.(*api.Pod)
|
|
||||||
if !ok {
|
|
||||||
t.Fatalf("Expected pod, got %T", object)
|
|
||||||
}
|
|
||||||
if pod.Namespace != namespace {
|
|
||||||
t.Errorf("Expected pod with different namespace: %q != %q", pod.Namespace, namespace)
|
|
||||||
}
|
|
||||||
return core.PodUsageFunc(pod)
|
|
||||||
}
|
|
||||||
|
|
||||||
podEvaluator := &generic.GenericEvaluator{
|
|
||||||
Name: "Test-Evaluator.Pod",
|
|
||||||
InternalGroupKind: api.Kind("Pod"),
|
|
||||||
InternalOperationResources: map[admission.Operation][]api.ResourceName{
|
|
||||||
admission.Create: computeResources,
|
|
||||||
},
|
|
||||||
ConstraintsFunc: core.PodConstraintsFunc,
|
|
||||||
MatchedResourceNames: computeResources,
|
|
||||||
MatchesScopeFunc: core.PodMatchesScopeFunc,
|
|
||||||
UsageFunc: usageFunc,
|
|
||||||
}
|
|
||||||
|
|
||||||
registry := &generic.GenericRegistry{
|
registry := &generic.GenericRegistry{
|
||||||
InternalEvaluators: map[schema.GroupKind]quota.Evaluator{
|
InternalEvaluators: map[schema.GroupKind]quota.Evaluator{
|
||||||
podEvaluator.GroupKind(): podEvaluator,
|
podEvaluator.GroupKind(): podEvaluator,
|
||||||
|
|
|
@ -327,8 +327,7 @@ func (e *quotaEvaluator) checkRequest(quotas []api.ResourceQuota, a admission.At
|
||||||
}
|
}
|
||||||
|
|
||||||
op := a.GetOperation()
|
op := a.GetOperation()
|
||||||
operationResources := evaluator.OperationResources(op)
|
if !evaluator.Handles(op) {
|
||||||
if len(operationResources) == 0 {
|
|
||||||
return quotas, nil
|
return quotas, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -340,14 +339,16 @@ func (e *quotaEvaluator) checkRequest(quotas []api.ResourceQuota, a admission.At
|
||||||
interestingQuotaIndexes := []int{}
|
interestingQuotaIndexes := []int{}
|
||||||
for i := range quotas {
|
for i := range quotas {
|
||||||
resourceQuota := quotas[i]
|
resourceQuota := quotas[i]
|
||||||
match := evaluator.Matches(&resourceQuota, inputObject)
|
match, err := evaluator.Matches(&resourceQuota, inputObject)
|
||||||
|
if err != nil {
|
||||||
|
return quotas, err
|
||||||
|
}
|
||||||
if !match {
|
if !match {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
hardResources := quota.ResourceNames(resourceQuota.Status.Hard)
|
hardResources := quota.ResourceNames(resourceQuota.Status.Hard)
|
||||||
evaluatorResources := evaluator.MatchesResources()
|
requiredResources := evaluator.MatchingResources(hardResources)
|
||||||
requiredResources := quota.Intersection(hardResources, evaluatorResources)
|
|
||||||
if err := evaluator.Constraints(requiredResources, inputObject); err != nil {
|
if err := evaluator.Constraints(requiredResources, inputObject); err != nil {
|
||||||
return nil, admission.NewForbidden(a, fmt.Errorf("failed quota: %s: %v", resourceQuota.Name, err))
|
return nil, admission.NewForbidden(a, fmt.Errorf("failed quota: %s: %v", resourceQuota.Name, err))
|
||||||
}
|
}
|
||||||
|
@ -375,7 +376,10 @@ func (e *quotaEvaluator) checkRequest(quotas []api.ResourceQuota, a admission.At
|
||||||
// as a result, we need to measure the usage of this object for quota
|
// as a result, we need to measure the usage of this object for quota
|
||||||
// on updates, we need to subtract the previous measured usage
|
// on updates, we need to subtract the previous measured usage
|
||||||
// if usage shows no change, just return since it has no impact on quota
|
// if usage shows no change, just return since it has no impact on quota
|
||||||
deltaUsage := evaluator.Usage(inputObject)
|
deltaUsage, err := evaluator.Usage(inputObject)
|
||||||
|
if err != nil {
|
||||||
|
return quotas, err
|
||||||
|
}
|
||||||
|
|
||||||
// ensure that usage for input object is never negative (this would mean a resource made a negative resource requirement)
|
// ensure that usage for input object is never negative (this would mean a resource made a negative resource requirement)
|
||||||
if negativeUsage := quota.IsNegative(deltaUsage); len(negativeUsage) > 0 {
|
if negativeUsage := quota.IsNegative(deltaUsage); len(negativeUsage) > 0 {
|
||||||
|
@ -392,7 +396,10 @@ func (e *quotaEvaluator) checkRequest(quotas []api.ResourceQuota, a admission.At
|
||||||
// then charge based on the delta. Otherwise, bill the maximum
|
// then charge based on the delta. Otherwise, bill the maximum
|
||||||
metadata, err := meta.Accessor(prevItem)
|
metadata, err := meta.Accessor(prevItem)
|
||||||
if err == nil && len(metadata.GetResourceVersion()) > 0 {
|
if err == nil && len(metadata.GetResourceVersion()) > 0 {
|
||||||
prevUsage := evaluator.Usage(prevItem)
|
prevUsage, innerErr := evaluator.Usage(prevItem)
|
||||||
|
if innerErr != nil {
|
||||||
|
return quotas, innerErr
|
||||||
|
}
|
||||||
deltaUsage = quota.Subtract(deltaUsage, prevUsage)
|
deltaUsage = quota.Subtract(deltaUsage, prevUsage)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -446,8 +453,7 @@ func (e *quotaEvaluator) Evaluate(a admission.Attributes) error {
|
||||||
// for this kind, check if the operation could mutate any quota resources
|
// for this kind, check if the operation could mutate any quota resources
|
||||||
// if no resources tracked by quota are impacted, then just return
|
// if no resources tracked by quota are impacted, then just return
|
||||||
op := a.GetOperation()
|
op := a.GetOperation()
|
||||||
operationResources := evaluator.OperationResources(op)
|
if !evaluator.Handles(op) {
|
||||||
if len(operationResources) == 0 {
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -130,6 +130,7 @@ go_library(
|
||||||
"//pkg/apis/extensions:go_default_library",
|
"//pkg/apis/extensions:go_default_library",
|
||||||
"//pkg/apis/extensions/v1beta1:go_default_library",
|
"//pkg/apis/extensions/v1beta1:go_default_library",
|
||||||
"//pkg/apis/meta/v1:go_default_library",
|
"//pkg/apis/meta/v1:go_default_library",
|
||||||
|
"//pkg/apis/storage/util:go_default_library",
|
||||||
"//pkg/apis/storage/v1beta1:go_default_library",
|
"//pkg/apis/storage/v1beta1:go_default_library",
|
||||||
"//pkg/apis/storage/v1beta1/util:go_default_library",
|
"//pkg/apis/storage/v1beta1/util:go_default_library",
|
||||||
"//pkg/client/cache:go_default_library",
|
"//pkg/client/cache:go_default_library",
|
||||||
|
@ -159,6 +160,7 @@ go_library(
|
||||||
"//pkg/labels:go_default_library",
|
"//pkg/labels:go_default_library",
|
||||||
"//pkg/master/ports:go_default_library",
|
"//pkg/master/ports:go_default_library",
|
||||||
"//pkg/metrics:go_default_library",
|
"//pkg/metrics:go_default_library",
|
||||||
|
"//pkg/quota/evaluator/core:go_default_library",
|
||||||
"//pkg/registry/generic/registry:go_default_library",
|
"//pkg/registry/generic/registry:go_default_library",
|
||||||
"//pkg/runtime:go_default_library",
|
"//pkg/runtime:go_default_library",
|
||||||
"//pkg/runtime/schema:go_default_library",
|
"//pkg/runtime/schema:go_default_library",
|
||||||
|
|
|
@ -23,7 +23,9 @@ import (
|
||||||
"k8s.io/kubernetes/pkg/api/resource"
|
"k8s.io/kubernetes/pkg/api/resource"
|
||||||
"k8s.io/kubernetes/pkg/api/v1"
|
"k8s.io/kubernetes/pkg/api/v1"
|
||||||
metav1 "k8s.io/kubernetes/pkg/apis/meta/v1"
|
metav1 "k8s.io/kubernetes/pkg/apis/meta/v1"
|
||||||
|
"k8s.io/kubernetes/pkg/apis/storage/util"
|
||||||
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
|
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
|
||||||
|
"k8s.io/kubernetes/pkg/quota/evaluator/core"
|
||||||
"k8s.io/kubernetes/pkg/util/intstr"
|
"k8s.io/kubernetes/pkg/util/intstr"
|
||||||
"k8s.io/kubernetes/pkg/util/wait"
|
"k8s.io/kubernetes/pkg/util/wait"
|
||||||
"k8s.io/kubernetes/test/e2e/framework"
|
"k8s.io/kubernetes/test/e2e/framework"
|
||||||
|
@ -288,7 +290,7 @@ var _ = framework.KubeDescribe("ResourceQuota", func() {
|
||||||
pvc, err = f.ClientSet.Core().PersistentVolumeClaims(f.Namespace.Name).Create(pvc)
|
pvc, err = f.ClientSet.Core().PersistentVolumeClaims(f.Namespace.Name).Create(pvc)
|
||||||
Expect(err).NotTo(HaveOccurred())
|
Expect(err).NotTo(HaveOccurred())
|
||||||
|
|
||||||
By("Ensuring resource quota status captures persistent volume claimcreation")
|
By("Ensuring resource quota status captures persistent volume claim creation")
|
||||||
usedResources = v1.ResourceList{}
|
usedResources = v1.ResourceList{}
|
||||||
usedResources[v1.ResourcePersistentVolumeClaims] = resource.MustParse("1")
|
usedResources[v1.ResourcePersistentVolumeClaims] = resource.MustParse("1")
|
||||||
usedResources[v1.ResourceRequestsStorage] = resource.MustParse("1Gi")
|
usedResources[v1.ResourceRequestsStorage] = resource.MustParse("1Gi")
|
||||||
|
@ -306,6 +308,56 @@ var _ = framework.KubeDescribe("ResourceQuota", func() {
|
||||||
Expect(err).NotTo(HaveOccurred())
|
Expect(err).NotTo(HaveOccurred())
|
||||||
})
|
})
|
||||||
|
|
||||||
|
It("should create a ResourceQuota and capture the life of a persistent volume claim with a storage class.", func() {
|
||||||
|
By("Creating a ResourceQuota")
|
||||||
|
quotaName := "test-quota"
|
||||||
|
resourceQuota := newTestResourceQuota(quotaName)
|
||||||
|
resourceQuota, err := createResourceQuota(f.ClientSet, f.Namespace.Name, resourceQuota)
|
||||||
|
Expect(err).NotTo(HaveOccurred())
|
||||||
|
|
||||||
|
By("Ensuring resource quota status is calculated")
|
||||||
|
usedResources := v1.ResourceList{}
|
||||||
|
usedResources[v1.ResourceQuotas] = resource.MustParse("1")
|
||||||
|
usedResources[v1.ResourcePersistentVolumeClaims] = resource.MustParse("0")
|
||||||
|
usedResources[v1.ResourceRequestsStorage] = resource.MustParse("0")
|
||||||
|
usedResources[core.V1ResourceByStorageClass("gold", v1.ResourcePersistentVolumeClaims)] = resource.MustParse("0")
|
||||||
|
usedResources[core.V1ResourceByStorageClass("gold", v1.ResourceRequestsStorage)] = resource.MustParse("0")
|
||||||
|
|
||||||
|
err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, quotaName, usedResources)
|
||||||
|
Expect(err).NotTo(HaveOccurred())
|
||||||
|
|
||||||
|
By("Creating a PersistentVolumeClaim with storage class")
|
||||||
|
pvc := newTestPersistentVolumeClaimForQuota("test-claim")
|
||||||
|
pvc.Annotations = map[string]string{
|
||||||
|
util.StorageClassAnnotation: "gold",
|
||||||
|
}
|
||||||
|
pvc, err = f.ClientSet.Core().PersistentVolumeClaims(f.Namespace.Name).Create(pvc)
|
||||||
|
Expect(err).NotTo(HaveOccurred())
|
||||||
|
|
||||||
|
By("Ensuring resource quota status captures persistent volume claim creation")
|
||||||
|
usedResources = v1.ResourceList{}
|
||||||
|
usedResources[v1.ResourcePersistentVolumeClaims] = resource.MustParse("1")
|
||||||
|
usedResources[v1.ResourceRequestsStorage] = resource.MustParse("1Gi")
|
||||||
|
usedResources[core.V1ResourceByStorageClass("gold", v1.ResourcePersistentVolumeClaims)] = resource.MustParse("1")
|
||||||
|
usedResources[core.V1ResourceByStorageClass("gold", v1.ResourceRequestsStorage)] = resource.MustParse("1Gi")
|
||||||
|
|
||||||
|
err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, quotaName, usedResources)
|
||||||
|
Expect(err).NotTo(HaveOccurred())
|
||||||
|
|
||||||
|
By("Deleting a PersistentVolumeClaim")
|
||||||
|
err = f.ClientSet.Core().PersistentVolumeClaims(f.Namespace.Name).Delete(pvc.Name, nil)
|
||||||
|
Expect(err).NotTo(HaveOccurred())
|
||||||
|
|
||||||
|
By("Ensuring resource quota status released usage")
|
||||||
|
usedResources[v1.ResourcePersistentVolumeClaims] = resource.MustParse("0")
|
||||||
|
usedResources[v1.ResourceRequestsStorage] = resource.MustParse("0")
|
||||||
|
usedResources[core.V1ResourceByStorageClass("gold", v1.ResourcePersistentVolumeClaims)] = resource.MustParse("0")
|
||||||
|
usedResources[core.V1ResourceByStorageClass("gold", v1.ResourceRequestsStorage)] = resource.MustParse("0")
|
||||||
|
|
||||||
|
err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, quotaName, usedResources)
|
||||||
|
Expect(err).NotTo(HaveOccurred())
|
||||||
|
})
|
||||||
|
|
||||||
It("should verify ResourceQuota with terminating scopes.", func() {
|
It("should verify ResourceQuota with terminating scopes.", func() {
|
||||||
By("Creating a ResourceQuota with terminating scope")
|
By("Creating a ResourceQuota with terminating scope")
|
||||||
quotaTerminatingName := "quota-terminating"
|
quotaTerminatingName := "quota-terminating"
|
||||||
|
@ -517,6 +569,8 @@ func newTestResourceQuota(name string) *v1.ResourceQuota {
|
||||||
hard[v1.ResourceSecrets] = resource.MustParse("10")
|
hard[v1.ResourceSecrets] = resource.MustParse("10")
|
||||||
hard[v1.ResourcePersistentVolumeClaims] = resource.MustParse("10")
|
hard[v1.ResourcePersistentVolumeClaims] = resource.MustParse("10")
|
||||||
hard[v1.ResourceRequestsStorage] = resource.MustParse("10Gi")
|
hard[v1.ResourceRequestsStorage] = resource.MustParse("10Gi")
|
||||||
|
hard[core.V1ResourceByStorageClass("gold", v1.ResourcePersistentVolumeClaims)] = resource.MustParse("10")
|
||||||
|
hard[core.V1ResourceByStorageClass("gold", v1.ResourceRequestsStorage)] = resource.MustParse("10Gi")
|
||||||
return &v1.ResourceQuota{
|
return &v1.ResourceQuota{
|
||||||
ObjectMeta: v1.ObjectMeta{Name: name},
|
ObjectMeta: v1.ObjectMeta{Name: name},
|
||||||
Spec: v1.ResourceQuotaSpec{Hard: hard},
|
Spec: v1.ResourceQuotaSpec{Hard: hard},
|
||||||
|
|
|
@ -1,11 +1,11 @@
|
||||||
name,owner,auto-assigned
|
name,owner,auto-assigned
|
||||||
DEFAULT,rmmh/spxtr/ixdy/apelisse/fejta,0
|
DEFAULT,rmmh/spxtr/ixdy/apelisse/fejta,0
|
||||||
Addon update should propagate add-on file changes,eparis,1
|
Addon update should propagate add-on file changes,eparis,1
|
||||||
AppArmor should enforce an AppArmor profile,kevin-wangzefeng,1
|
AppArmor should enforce an AppArmor profile,derekwaynecarr,0
|
||||||
AppArmor when running with AppArmor should enforce a permissive profile,yujuhong,1
|
AppArmor when running with AppArmor should enforce a permissive profile,yujuhong,1
|
||||||
AppArmor when running with AppArmor should enforce a profile blocking writes,freehan,1
|
AppArmor when running with AppArmor should enforce a profile blocking writes,freehan,1
|
||||||
AppArmor when running with AppArmor should reject an unloaded profile,kargakis,1
|
AppArmor when running with AppArmor should reject an unloaded profile,kargakis,1
|
||||||
AppArmor when running without AppArmor should reject a pod with an AppArmor profile,vulpecula,1
|
AppArmor when running without AppArmor should reject a pod with an AppArmor profile,derekwaynecarr,0
|
||||||
Cadvisor should be healthy on every node.,vishh,0
|
Cadvisor should be healthy on every node.,vishh,0
|
||||||
Cassandra should create and scale cassandra,fabioy,1
|
Cassandra should create and scale cassandra,fabioy,1
|
||||||
CassandraStatefulSet should create statefulset,wojtek-t,1
|
CassandraStatefulSet should create statefulset,wojtek-t,1
|
||||||
|
@ -19,13 +19,14 @@ Cluster size autoscaling should increase cluster size if pending pods are small,
|
||||||
Cluster size autoscaling should increase cluster size if pending pods are small and there is another node pool that is not autoscaled,apelisse,1
|
Cluster size autoscaling should increase cluster size if pending pods are small and there is another node pool that is not autoscaled,apelisse,1
|
||||||
Cluster size autoscaling should increase cluster size if pods are pending due to host port conflict,brendandburns,1
|
Cluster size autoscaling should increase cluster size if pods are pending due to host port conflict,brendandburns,1
|
||||||
Cluster size autoscaling should scale up correct target pool,mikedanese,1
|
Cluster size autoscaling should scale up correct target pool,mikedanese,1
|
||||||
Cluster size autoscaling shouldn't increase cluster size if pending pod is too large,karlkfi,1
|
Cluster size autoscaling shouldn't increase cluster size if pending pod is too large,derekwaynecarr,0
|
||||||
ClusterDns should create pod that uses dns,sttts,0
|
ClusterDns should create pod that uses dns,sttts,0
|
||||||
ConfigMap should be consumable from pods in volume,alex-mohr,1
|
ConfigMap should be consumable from pods in volume,alex-mohr,1
|
||||||
ConfigMap should be consumable from pods in volume as non-root,hurf,1
|
ConfigMap should be consumable from pods in volume as non-root,derekwaynecarr,0
|
||||||
ConfigMap should be consumable from pods in volume as non-root with FSGroup,roberthbailey,1
|
ConfigMap should be consumable from pods in volume as non-root with FSGroup,roberthbailey,1
|
||||||
|
ConfigMap should be consumable from pods in volume as non-root with defaultMode and fsGroup set,derekwaynecarr,0
|
||||||
ConfigMap should be consumable from pods in volume with defaultMode set,Random-Liu,1
|
ConfigMap should be consumable from pods in volume with defaultMode set,Random-Liu,1
|
||||||
ConfigMap should be consumable from pods in volume with mappings,karlkfi,1
|
ConfigMap should be consumable from pods in volume with mappings,derekwaynecarr,0
|
||||||
ConfigMap should be consumable from pods in volume with mappings and Item mode set,eparis,1
|
ConfigMap should be consumable from pods in volume with mappings and Item mode set,eparis,1
|
||||||
ConfigMap should be consumable from pods in volume with mappings as non-root,apelisse,1
|
ConfigMap should be consumable from pods in volume with mappings as non-root,apelisse,1
|
||||||
ConfigMap should be consumable from pods in volume with mappings as non-root with FSGroup,zmerlynn,1
|
ConfigMap should be consumable from pods in volume with mappings as non-root with FSGroup,zmerlynn,1
|
||||||
|
@ -33,7 +34,7 @@ ConfigMap should be consumable in multiple volumes in the same pod,caesarxuchao,
|
||||||
ConfigMap should be consumable via environment variable,ncdc,1
|
ConfigMap should be consumable via environment variable,ncdc,1
|
||||||
ConfigMap updates should be reflected in volume,kevin-wangzefeng,1
|
ConfigMap updates should be reflected in volume,kevin-wangzefeng,1
|
||||||
Container Lifecycle Hook when create a pod with lifecycle hook when it is exec hook should execute poststart exec hook properly,kargakis,1
|
Container Lifecycle Hook when create a pod with lifecycle hook when it is exec hook should execute poststart exec hook properly,kargakis,1
|
||||||
Container Lifecycle Hook when create a pod with lifecycle hook when it is exec hook should execute prestop exec hook properly,jdef,1
|
Container Lifecycle Hook when create a pod with lifecycle hook when it is exec hook should execute prestop exec hook properly,derekwaynecarr,0
|
||||||
Container Lifecycle Hook when create a pod with lifecycle hook when it is http hook should execute poststart http hook properly,vishh,1
|
Container Lifecycle Hook when create a pod with lifecycle hook when it is http hook should execute poststart http hook properly,vishh,1
|
||||||
Container Lifecycle Hook when create a pod with lifecycle hook when it is http hook should execute prestop http hook properly,freehan,1
|
Container Lifecycle Hook when create a pod with lifecycle hook when it is http hook should execute prestop http hook properly,freehan,1
|
||||||
Container Runtime Conformance Test container runtime conformance blackbox test when running a container with a new image *,Random-Liu,0
|
Container Runtime Conformance Test container runtime conformance blackbox test when running a container with a new image *,Random-Liu,0
|
||||||
|
@ -55,14 +56,14 @@ DNS should provide DNS for the cluster,roberthbailey,1
|
||||||
Daemon set should run and stop complex daemon,jlowdermilk,1
|
Daemon set should run and stop complex daemon,jlowdermilk,1
|
||||||
Daemon set should run and stop complex daemon with node affinity,erictune,1
|
Daemon set should run and stop complex daemon with node affinity,erictune,1
|
||||||
Daemon set should run and stop simple daemon,mtaufen,1
|
Daemon set should run and stop simple daemon,mtaufen,1
|
||||||
DaemonRestart Controller Manager should not create/delete replicas across restart,vulpecula,1
|
DaemonRestart Controller Manager should not create/delete replicas across restart,derekwaynecarr,0
|
||||||
DaemonRestart Kubelet should not restart containers across restart,madhusudancs,1
|
DaemonRestart Kubelet should not restart containers across restart,madhusudancs,1
|
||||||
DaemonRestart Scheduler should continue assigning pods to nodes across restart,lavalamp,1
|
DaemonRestart Scheduler should continue assigning pods to nodes across restart,lavalamp,1
|
||||||
Density create a batch of pods latency/resource should be within limit when create * pods with * interval,apelisse,1
|
Density create a batch of pods latency/resource should be within limit when create * pods with * interval,apelisse,1
|
||||||
Density create a batch of pods with higher API QPS latency/resource should be within limit when create * pods with * interval (QPS *),jlowdermilk,1
|
Density create a batch of pods with higher API QPS latency/resource should be within limit when create * pods with * interval (QPS *),jlowdermilk,1
|
||||||
Density create a sequence of pods latency/resource should be within limit when create * pods with * background pods,wojtek-t,1
|
Density create a sequence of pods latency/resource should be within limit when create * pods with * background pods,wojtek-t,1
|
||||||
Density should allow running maximum capacity pods on nodes,smarterclayton,1
|
Density should allow running maximum capacity pods on nodes,smarterclayton,1
|
||||||
Density should allow starting * pods per node,gmarek,0
|
Density should allow starting * pods per node using *,derekwaynecarr,0
|
||||||
Deployment RecreateDeployment should delete old pods and create new ones,pwittrock,0
|
Deployment RecreateDeployment should delete old pods and create new ones,pwittrock,0
|
||||||
Deployment RollingUpdateDeployment should delete old pods and create new ones,pwittrock,0
|
Deployment RollingUpdateDeployment should delete old pods and create new ones,pwittrock,0
|
||||||
Deployment RollingUpdateDeployment should scale up and down in the right order,pwittrock,0
|
Deployment RollingUpdateDeployment should scale up and down in the right order,pwittrock,0
|
||||||
|
@ -97,17 +98,18 @@ Downward API volume should provide container's memory limit,krousey,1
|
||||||
Downward API volume should provide container's memory request,mikedanese,1
|
Downward API volume should provide container's memory request,mikedanese,1
|
||||||
Downward API volume should provide node allocatable (cpu) as default cpu limit if the limit is not set,lavalamp,1
|
Downward API volume should provide node allocatable (cpu) as default cpu limit if the limit is not set,lavalamp,1
|
||||||
Downward API volume should provide node allocatable (memory) as default memory limit if the limit is not set,freehan,1
|
Downward API volume should provide node allocatable (memory) as default memory limit if the limit is not set,freehan,1
|
||||||
Downward API volume should provide podname as non-root with fsgroup,karlkfi,1
|
Downward API volume should provide podname as non-root with fsgroup,derekwaynecarr,0
|
||||||
|
Downward API volume should provide podname as non-root with fsgroup and defaultMode,derekwaynecarr,0
|
||||||
Downward API volume should provide podname only,mwielgus,1
|
Downward API volume should provide podname only,mwielgus,1
|
||||||
Downward API volume should set DefaultMode on files,davidopp,1
|
Downward API volume should set DefaultMode on files,davidopp,1
|
||||||
Downward API volume should set mode on item file,mtaufen,1
|
Downward API volume should set mode on item file,mtaufen,1
|
||||||
Downward API volume should update annotations on modification,eparis,1
|
Downward API volume should update annotations on modification,eparis,1
|
||||||
Downward API volume should update labels on modification,timothysc,1
|
Downward API volume should update labels on modification,timothysc,1
|
||||||
Dynamic provisioning DynamicProvisioner Alpha should create and delete alpha persistent volumes,andyzheng0831,1
|
Dynamic provisioning DynamicProvisioner Alpha should create and delete alpha persistent volumes,derekwaynecarr,0
|
||||||
Dynamic provisioning DynamicProvisioner should create and delete persistent volumes,jsafrane,0
|
Dynamic provisioning DynamicProvisioner should create and delete persistent volumes,jsafrane,0
|
||||||
DynamicKubeletConfiguration When a configmap called `kubelet-<node-name>` is added to the `kube-system` namespace The Kubelet on that node should restart to take up the new config,mwielgus,1
|
DynamicKubeletConfiguration When a configmap called `kubelet-<node-name>` is added to the `kube-system` namespace The Kubelet on that node should restart to take up the new config,mwielgus,1
|
||||||
ESIPP should handle updates to source ip annotation,jsafrane,1
|
ESIPP should handle updates to source ip annotation,jsafrane,1
|
||||||
ESIPP should only target nodes with endpoints,karlkfi,1
|
ESIPP should only target nodes with endpoints,derekwaynecarr,0
|
||||||
ESIPP should work for type=LoadBalancer,fgrzadkowski,1
|
ESIPP should work for type=LoadBalancer,fgrzadkowski,1
|
||||||
ESIPP should work for type=NodePort,kargakis,1
|
ESIPP should work for type=NodePort,kargakis,1
|
||||||
ESIPP should work from pods,cjcullen,1
|
ESIPP should work from pods,cjcullen,1
|
||||||
|
@ -141,20 +143,19 @@ Federated Services DNS non-local federated service missing local service should
|
||||||
Federated Services DNS non-local federated service should be able to discover a non-local federated service,jlowdermilk,1
|
Federated Services DNS non-local federated service should be able to discover a non-local federated service,jlowdermilk,1
|
||||||
Federated Services DNS should be able to discover a federated service,derekwaynecarr,1
|
Federated Services DNS should be able to discover a federated service,derekwaynecarr,1
|
||||||
Federated Services Service creation should create matching services in underlying clusters,jbeda,1
|
Federated Services Service creation should create matching services in underlying clusters,jbeda,1
|
||||||
Federated Services Service creation should not be deleted from underlying clusters when it is deleted,sttts,0
|
Federated Services Service creation should not be deleted from underlying clusters when it is deleted,derekwaynecarr,0
|
||||||
Federated Services Service creation should not be deleted from underlying clusters when it is deleted,madhusudancs,0
|
|
||||||
Federated Services Service creation should succeed,rmmh,1
|
Federated Services Service creation should succeed,rmmh,1
|
||||||
Federated ingresses Federated Ingresses Ingress connectivity and DNS should be able to connect to a federated ingress via its load balancer,rmmh,1
|
Federated ingresses Federated Ingresses Ingress connectivity and DNS should be able to connect to a federated ingress via its load balancer,rmmh,1
|
||||||
Federated ingresses Federated Ingresses should be created and deleted successfully,dchen1107,1
|
Federated ingresses Federated Ingresses should be created and deleted successfully,dchen1107,1
|
||||||
Federated ingresses Federated Ingresses should be deleted from underlying clusters when OrphanDependents is false,nikhiljindal,0
|
Federated ingresses Federated Ingresses should be deleted from underlying clusters when OrphanDependents is false,nikhiljindal,0
|
||||||
Federated ingresses Federated Ingresses should create and update matching ingresses in underlying clusters,ghodss,1
|
Federated ingresses Federated Ingresses should create and update matching ingresses in underlying clusters,derekwaynecarr,0
|
||||||
Federated ingresses Federated Ingresses should not be deleted from underlying clusters when OrphanDependents is nil,nikhiljindal,0
|
Federated ingresses Federated Ingresses should not be deleted from underlying clusters when OrphanDependents is nil,nikhiljindal,0
|
||||||
Federated ingresses Federated Ingresses should not be deleted from underlying clusters when OrphanDependents is true,nikhiljindal,0
|
Federated ingresses Federated Ingresses should not be deleted from underlying clusters when OrphanDependents is true,nikhiljindal,0
|
||||||
Federation API server authentication should accept cluster resources when the client has right authentication credentials,davidopp,1
|
Federation API server authentication should accept cluster resources when the client has right authentication credentials,davidopp,1
|
||||||
Federation API server authentication should not accept cluster resources when the client has invalid authentication credentials,yujuhong,1
|
Federation API server authentication should not accept cluster resources when the client has invalid authentication credentials,yujuhong,1
|
||||||
Federation API server authentication should not accept cluster resources when the client has no authentication credentials,nikhiljindal,1
|
Federation API server authentication should not accept cluster resources when the client has no authentication credentials,nikhiljindal,1
|
||||||
Federation apiserver Admission control should not be able to create resources if namespace does not exist,alex-mohr,1
|
Federation apiserver Admission control should not be able to create resources if namespace does not exist,alex-mohr,1
|
||||||
Federation apiserver Cluster objects should be created and deleted successfully,ghodss,1
|
Federation apiserver Cluster objects should be created and deleted successfully,derekwaynecarr,0
|
||||||
Federation daemonsets DaemonSet objects should be created and deleted successfully,nikhiljindal,0
|
Federation daemonsets DaemonSet objects should be created and deleted successfully,nikhiljindal,0
|
||||||
Federation daemonsets DaemonSet objects should be deleted from underlying clusters when OrphanDependents is false,nikhiljindal,0
|
Federation daemonsets DaemonSet objects should be deleted from underlying clusters when OrphanDependents is false,nikhiljindal,0
|
||||||
Federation daemonsets DaemonSet objects should not be deleted from underlying clusters when OrphanDependents is nil,nikhiljindal,0
|
Federation daemonsets DaemonSet objects should not be deleted from underlying clusters when OrphanDependents is nil,nikhiljindal,0
|
||||||
|
@ -164,7 +165,7 @@ Federation deployments Federated Deployment should be deleted from underlying cl
|
||||||
Federation deployments Federated Deployment should create and update matching deployments in underling clusters,soltysh,1
|
Federation deployments Federated Deployment should create and update matching deployments in underling clusters,soltysh,1
|
||||||
Federation deployments Federated Deployment should not be deleted from underlying clusters when OrphanDependents is nil,nikhiljindal,0
|
Federation deployments Federated Deployment should not be deleted from underlying clusters when OrphanDependents is nil,nikhiljindal,0
|
||||||
Federation deployments Federated Deployment should not be deleted from underlying clusters when OrphanDependents is true,nikhiljindal,0
|
Federation deployments Federated Deployment should not be deleted from underlying clusters when OrphanDependents is true,nikhiljindal,0
|
||||||
Federation events Event objects should be created and deleted successfully,karlkfi,1
|
Federation events Event objects should be created and deleted successfully,derekwaynecarr,0
|
||||||
Federation namespace Namespace objects all resources in the namespace should be deleted when namespace is deleted,nikhiljindal,0
|
Federation namespace Namespace objects all resources in the namespace should be deleted when namespace is deleted,nikhiljindal,0
|
||||||
Federation namespace Namespace objects should be created and deleted successfully,xiang90,1
|
Federation namespace Namespace objects should be created and deleted successfully,xiang90,1
|
||||||
Federation namespace Namespace objects should be deleted from underlying clusters when OrphanDependents is false,nikhiljindal,0
|
Federation namespace Namespace objects should be deleted from underlying clusters when OrphanDependents is false,nikhiljindal,0
|
||||||
|
@ -187,7 +188,7 @@ Garbage Collection Test: * Should eventually garbage collect containers when we
|
||||||
Garbage collector should delete pods created by rc when not orphaning,justinsb,1
|
Garbage collector should delete pods created by rc when not orphaning,justinsb,1
|
||||||
Garbage collector should orphan pods created by rc if delete options say so,fabioy,1
|
Garbage collector should orphan pods created by rc if delete options say so,fabioy,1
|
||||||
Garbage collector should orphan pods created by rc if deleteOptions.OrphanDependents is nil,zmerlynn,1
|
Garbage collector should orphan pods created by rc if deleteOptions.OrphanDependents is nil,zmerlynn,1
|
||||||
"Generated release_1_5 clientset should create pods, delete pods, watch pods",ghodss,1
|
"Generated release_1_5 clientset should create pods, delete pods, watch pods",derekwaynecarr,0
|
||||||
"Generated release_1_5 clientset should create v2alpha1 cronJobs, delete cronJobs, watch cronJobs",soltysh,1
|
"Generated release_1_5 clientset should create v2alpha1 cronJobs, delete cronJobs, watch cronJobs",soltysh,1
|
||||||
HA-master survive addition/removal replicas different zones,derekwaynecarr,0
|
HA-master survive addition/removal replicas different zones,derekwaynecarr,0
|
||||||
HA-master survive addition/removal replicas same zone,derekwaynecarr,0
|
HA-master survive addition/removal replicas same zone,derekwaynecarr,0
|
||||||
|
@ -202,9 +203,9 @@ Horizontal pod autoscaling (scale resource: CPU) ReplicationController light Sho
|
||||||
HostPath should give a volume the correct mode,thockin,1
|
HostPath should give a volume the correct mode,thockin,1
|
||||||
HostPath should support r/w,luxas,1
|
HostPath should support r/w,luxas,1
|
||||||
HostPath should support subPath,sttts,1
|
HostPath should support subPath,sttts,1
|
||||||
ImageID should be set to the manifest digest (from RepoDigests) when available,hurf,1
|
ImageID should be set to the manifest digest (from RepoDigests) when available,derekwaynecarr,0
|
||||||
InitContainer should invoke init containers on a RestartAlways pod,saad-ali,1
|
InitContainer should invoke init containers on a RestartAlways pod,saad-ali,1
|
||||||
InitContainer should invoke init containers on a RestartNever pod,vulpecula,1
|
InitContainer should invoke init containers on a RestartNever pod,derekwaynecarr,0
|
||||||
InitContainer should not start app containers and fail the pod if init containers fail on a RestartNever pod,maisem,0
|
InitContainer should not start app containers and fail the pod if init containers fail on a RestartNever pod,maisem,0
|
||||||
InitContainer should not start app containers if init containers fail on a RestartAlways pod,maisem,0
|
InitContainer should not start app containers if init containers fail on a RestartAlways pod,maisem,0
|
||||||
Initial Resources should set initial resources based on historical data,piosz,0
|
Initial Resources should set initial resources based on historical data,piosz,0
|
||||||
|
@ -224,7 +225,7 @@ Kubectl client Kubectl api-versions should check if v1 is in available api versi
|
||||||
Kubectl client Kubectl apply should apply a new configuration to an existing RC,pwittrock,0
|
Kubectl client Kubectl apply should apply a new configuration to an existing RC,pwittrock,0
|
||||||
Kubectl client Kubectl apply should reuse port when apply to an existing SVC,deads2k,0
|
Kubectl client Kubectl apply should reuse port when apply to an existing SVC,deads2k,0
|
||||||
Kubectl client Kubectl cluster-info should check if Kubernetes master services is included in cluster-info,pwittrock,0
|
Kubectl client Kubectl cluster-info should check if Kubernetes master services is included in cluster-info,pwittrock,0
|
||||||
Kubectl client Kubectl create quota should create a quota with scopes,jdef,1
|
Kubectl client Kubectl create quota should create a quota with scopes,derekwaynecarr,0
|
||||||
Kubectl client Kubectl create quota should create a quota without scopes,xiang90,1
|
Kubectl client Kubectl create quota should create a quota without scopes,xiang90,1
|
||||||
Kubectl client Kubectl create quota should reject quota with invalid scopes,brendandburns,1
|
Kubectl client Kubectl create quota should reject quota with invalid scopes,brendandburns,1
|
||||||
Kubectl client Kubectl describe should check if kubectl describe prints relevant information for rc and pods,pwittrock,0
|
Kubectl client Kubectl describe should check if kubectl describe prints relevant information for rc and pods,pwittrock,0
|
||||||
|
@ -232,7 +233,7 @@ Kubectl client Kubectl expose should create services for rc,pwittrock,0
|
||||||
Kubectl client Kubectl label should update the label on a resource,pwittrock,0
|
Kubectl client Kubectl label should update the label on a resource,pwittrock,0
|
||||||
Kubectl client Kubectl logs should be able to retrieve and filter logs,jlowdermilk,0
|
Kubectl client Kubectl logs should be able to retrieve and filter logs,jlowdermilk,0
|
||||||
Kubectl client Kubectl patch should add annotations for pods in rc,janetkuo,0
|
Kubectl client Kubectl patch should add annotations for pods in rc,janetkuo,0
|
||||||
Kubectl client Kubectl replace should update a single-container pod's image,karlkfi,1
|
Kubectl client Kubectl replace should update a single-container pod's image,derekwaynecarr,0
|
||||||
Kubectl client Kubectl rolling-update should support rolling-update to same image,janetkuo,0
|
Kubectl client Kubectl rolling-update should support rolling-update to same image,janetkuo,0
|
||||||
"Kubectl client Kubectl run --rm job should create a job from an image, then delete the job",soltysh,1
|
"Kubectl client Kubectl run --rm job should create a job from an image, then delete the job",soltysh,1
|
||||||
Kubectl client Kubectl run default should create an rc or deployment from an image,janetkuo,0
|
Kubectl client Kubectl run default should create an rc or deployment from an image,janetkuo,0
|
||||||
|
@ -262,7 +263,7 @@ Kubelet Container Manager Validate OOM score adjustments once the node is setup
|
||||||
Kubelet Container Manager Validate OOM score adjustments once the node is setup docker daemon's oom-score-adj should be -999,thockin,1
|
Kubelet Container Manager Validate OOM score adjustments once the node is setup docker daemon's oom-score-adj should be -999,thockin,1
|
||||||
Kubelet Container Manager Validate OOM score adjustments once the node is setup guaranteed container's oom-score-adj should be -998,kargakis,1
|
Kubelet Container Manager Validate OOM score adjustments once the node is setup guaranteed container's oom-score-adj should be -998,kargakis,1
|
||||||
Kubelet Container Manager Validate OOM score adjustments once the node is setup pod infra containers oom-score-adj should be -998 and best effort container's should be 1000,timothysc,1
|
Kubelet Container Manager Validate OOM score adjustments once the node is setup pod infra containers oom-score-adj should be -998 and best effort container's should be 1000,timothysc,1
|
||||||
Kubelet Eviction Manager hard eviction test pod using the most disk space gets evicted when the node disk usage is above the eviction hard threshold should evict the pod using the most disk space,karlkfi,1
|
Kubelet Eviction Manager hard eviction test pod using the most disk space gets evicted when the node disk usage is above the eviction hard threshold should evict the pod using the most disk space,derekwaynecarr,0
|
||||||
Kubelet Volume Manager Volume Manager On terminatation of pod with memory backed volume should remove the volume from the node,derekwaynecarr,0
|
Kubelet Volume Manager Volume Manager On terminatation of pod with memory backed volume should remove the volume from the node,derekwaynecarr,0
|
||||||
Kubelet experimental resource usage tracking resource tracking for * pods per node,yujuhong,0
|
Kubelet experimental resource usage tracking resource tracking for * pods per node,yujuhong,0
|
||||||
Kubelet regular resource usage tracking resource tracking for * pods per node,yujuhong,0
|
Kubelet regular resource usage tracking resource tracking for * pods per node,yujuhong,0
|
||||||
|
@ -273,10 +274,10 @@ Kubelet when scheduling a read only busybox container it should not write to roo
|
||||||
KubeletManagedEtcHosts should test kubelet managed /etc/hosts file,Random-Liu,1
|
KubeletManagedEtcHosts should test kubelet managed /etc/hosts file,Random-Liu,1
|
||||||
Kubernetes Dashboard should check that the kubernetes-dashboard instance is alive,wonderfly,0
|
Kubernetes Dashboard should check that the kubernetes-dashboard instance is alive,wonderfly,0
|
||||||
LimitRange should create a LimitRange with defaults and ensure pod has those defaults applied.,cjcullen,1
|
LimitRange should create a LimitRange with defaults and ensure pod has those defaults applied.,cjcullen,1
|
||||||
Liveness liveness pods should be automatically restarted,andyzheng0831,1
|
Liveness liveness pods should be automatically restarted,derekwaynecarr,0
|
||||||
Load capacity should be able to handle * pods per node,gmarek,0
|
Load capacity should be able to handle * pods per node *,derekwaynecarr,0
|
||||||
Loadbalancing: L7 GCE shoud create ingress with given static-ip,vulpecula,1
|
Loadbalancing: L7 GCE shoud create ingress with given static-ip,derekwaynecarr,0
|
||||||
Loadbalancing: L7 GCE should conform to Ingress spec,andyzheng0831,1
|
Loadbalancing: L7 GCE should conform to Ingress spec,derekwaynecarr,0
|
||||||
Loadbalancing: L7 Nginx should conform to Ingress spec,ncdc,1
|
Loadbalancing: L7 Nginx should conform to Ingress spec,ncdc,1
|
||||||
"Logging soak should survive logging 1KB every * seconds, for a duration of *, scaling up to * pods per node",justinsb,1
|
"Logging soak should survive logging 1KB every * seconds, for a duration of *, scaling up to * pods per node",justinsb,1
|
||||||
"MemoryEviction when there is memory pressure should evict pods in the correct order (besteffort first, then burstable, then guaranteed)",ixdy,1
|
"MemoryEviction when there is memory pressure should evict pods in the correct order (besteffort first, then burstable, then guaranteed)",ixdy,1
|
||||||
|
@ -314,7 +315,7 @@ Networking Granular Checks: Services should function for node-Service: http,thoc
|
||||||
Networking Granular Checks: Services should function for node-Service: udp,yifan-gu,1
|
Networking Granular Checks: Services should function for node-Service: udp,yifan-gu,1
|
||||||
Networking Granular Checks: Services should function for pod-Service: http,childsb,1
|
Networking Granular Checks: Services should function for pod-Service: http,childsb,1
|
||||||
Networking Granular Checks: Services should function for pod-Service: udp,brendandburns,1
|
Networking Granular Checks: Services should function for pod-Service: udp,brendandburns,1
|
||||||
Networking Granular Checks: Services should update endpoints: http,jdef,1
|
Networking Granular Checks: Services should update endpoints: http,derekwaynecarr,0
|
||||||
Networking Granular Checks: Services should update endpoints: udp,freehan,1
|
Networking Granular Checks: Services should update endpoints: udp,freehan,1
|
||||||
Networking Granular Checks: Services should update nodePort: http,nikhiljindal,1
|
Networking Granular Checks: Services should update nodePort: http,nikhiljindal,1
|
||||||
Networking Granular Checks: Services should update nodePort: udp,smarterclayton,1
|
Networking Granular Checks: Services should update nodePort: udp,smarterclayton,1
|
||||||
|
@ -339,7 +340,7 @@ PersistentVolumes with multiple PVs and PVCs all in same ns should create 3 PVs
|
||||||
PersistentVolumes with multiple PVs and PVCs all in same ns should create 4 PVs and 2 PVCs: test write access,caesarxuchao,1
|
PersistentVolumes with multiple PVs and PVCs all in same ns should create 4 PVs and 2 PVCs: test write access,caesarxuchao,1
|
||||||
Pet Store should scale to persist a nominal number ( * ) of transactions in * seconds,xiang90,1
|
Pet Store should scale to persist a nominal number ( * ) of transactions in * seconds,xiang90,1
|
||||||
"Pod Disks Should schedule a pod w/ a RW PD, gracefully remove it, then schedule it on another host",alex-mohr,1
|
"Pod Disks Should schedule a pod w/ a RW PD, gracefully remove it, then schedule it on another host",alex-mohr,1
|
||||||
"Pod Disks Should schedule a pod w/ a readonly PD on two hosts, then remove both gracefully.",ghodss,1
|
"Pod Disks Should schedule a pod w/ a readonly PD on two hosts, then remove both gracefully.",derekwaynecarr,0
|
||||||
"Pod Disks should schedule a pod w/ a RW PD shared between multiple containers, write to PD, delete pod, verify contents, and repeat in rapid succession",saad-ali,0
|
"Pod Disks should schedule a pod w/ a RW PD shared between multiple containers, write to PD, delete pod, verify contents, and repeat in rapid succession",saad-ali,0
|
||||||
"Pod Disks should schedule a pod w/ a RW PD, ungracefully remove it, then schedule it on another host",mml,1
|
"Pod Disks should schedule a pod w/ a RW PD, ungracefully remove it, then schedule it on another host",mml,1
|
||||||
"Pod Disks should schedule a pod w/ a readonly PD on two hosts, then remove both ungracefully.",saad-ali,1
|
"Pod Disks should schedule a pod w/ a readonly PD on two hosts, then remove both ungracefully.",saad-ali,1
|
||||||
|
@ -359,7 +360,7 @@ Pods should support retrieving logs from the container over websockets,vishh,0
|
||||||
"Port forwarding With a server that expects a client request should support a client that connects, sends no data, and disconnects",sttts,0
|
"Port forwarding With a server that expects a client request should support a client that connects, sends no data, and disconnects",sttts,0
|
||||||
"Port forwarding With a server that expects no client request should support a client that connects, sends no data, and disconnects",sttts,0
|
"Port forwarding With a server that expects no client request should support a client that connects, sends no data, and disconnects",sttts,0
|
||||||
PreStop should call prestop when killing a pod,ncdc,1
|
PreStop should call prestop when killing a pod,ncdc,1
|
||||||
PrivilegedPod should enable privileged commands,dchen1107,1
|
PrivilegedPod should enable privileged commands,derekwaynecarr,0
|
||||||
Probing container should *not* be restarted with a /healthz http liveness probe,Random-Liu,0
|
Probing container should *not* be restarted with a /healthz http liveness probe,Random-Liu,0
|
||||||
"Probing container should *not* be restarted with a exec ""cat /tmp/health"" liveness probe",Random-Liu,0
|
"Probing container should *not* be restarted with a exec ""cat /tmp/health"" liveness probe",Random-Liu,0
|
||||||
Probing container should be restarted with a /healthz http liveness probe,Random-Liu,0
|
Probing container should be restarted with a /healthz http liveness probe,Random-Liu,0
|
||||||
|
@ -368,11 +369,11 @@ Probing container should be restarted with a docker exec liveness probe with tim
|
||||||
Probing container should have monotonically increasing restart count,Random-Liu,0
|
Probing container should have monotonically increasing restart count,Random-Liu,0
|
||||||
Probing container with readiness probe should not be ready before initial delay and never restart,Random-Liu,0
|
Probing container with readiness probe should not be ready before initial delay and never restart,Random-Liu,0
|
||||||
Probing container with readiness probe that fails should never be ready and never restart,Random-Liu,0
|
Probing container with readiness probe that fails should never be ready and never restart,Random-Liu,0
|
||||||
Proxy * should proxy logs on node,karlkfi,1
|
Proxy * should proxy logs on node,derekwaynecarr,0
|
||||||
Proxy * should proxy logs on node using proxy subresource,hurf,1
|
Proxy * should proxy logs on node using proxy subresource,derekwaynecarr,0
|
||||||
Proxy * should proxy logs on node with explicit kubelet port,ixdy,1
|
Proxy * should proxy logs on node with explicit kubelet port,ixdy,1
|
||||||
Proxy * should proxy logs on node with explicit kubelet port using proxy subresource,dchen1107,1
|
Proxy * should proxy logs on node with explicit kubelet port using proxy subresource,dchen1107,1
|
||||||
Proxy * should proxy through a service and a pod,karlkfi,1
|
Proxy * should proxy through a service and a pod,derekwaynecarr,0
|
||||||
Proxy * should proxy to cadvisor,jszczepkowski,1
|
Proxy * should proxy to cadvisor,jszczepkowski,1
|
||||||
Proxy * should proxy to cadvisor using proxy subresource,roberthbailey,1
|
Proxy * should proxy to cadvisor using proxy subresource,roberthbailey,1
|
||||||
Reboot each node by dropping all inbound packets for a while and ensure they function afterwards,quinton-hoole,0
|
Reboot each node by dropping all inbound packets for a while and ensure they function afterwards,quinton-hoole,0
|
||||||
|
@ -391,21 +392,22 @@ ReplicationController should surface a failure condition on a common issue like
|
||||||
Rescheduler should ensure that critical pod is scheduled in case there is no resources available,mtaufen,1
|
Rescheduler should ensure that critical pod is scheduled in case there is no resources available,mtaufen,1
|
||||||
Resource-usage regular resource usage tracking resource tracking for * pods per node,janetkuo,1
|
Resource-usage regular resource usage tracking resource tracking for * pods per node,janetkuo,1
|
||||||
ResourceQuota should create a ResourceQuota and capture the life of a configMap.,timstclair,1
|
ResourceQuota should create a ResourceQuota and capture the life of a configMap.,timstclair,1
|
||||||
|
ResourceQuota should create a ResourceQuota and capture the life of a persistent volume claim with a storage class.,derekwaynecarr,0
|
||||||
ResourceQuota should create a ResourceQuota and capture the life of a persistent volume claim.,bgrant0607,1
|
ResourceQuota should create a ResourceQuota and capture the life of a persistent volume claim.,bgrant0607,1
|
||||||
ResourceQuota should create a ResourceQuota and capture the life of a pod.,pmorie,1
|
ResourceQuota should create a ResourceQuota and capture the life of a pod.,pmorie,1
|
||||||
ResourceQuota should create a ResourceQuota and capture the life of a replication controller.,jdef,1
|
ResourceQuota should create a ResourceQuota and capture the life of a replication controller.,derekwaynecarr,0
|
||||||
ResourceQuota should create a ResourceQuota and capture the life of a secret.,ncdc,1
|
ResourceQuota should create a ResourceQuota and capture the life of a secret.,ncdc,1
|
||||||
ResourceQuota should create a ResourceQuota and capture the life of a service.,timstclair,1
|
ResourceQuota should create a ResourceQuota and capture the life of a service.,timstclair,1
|
||||||
ResourceQuota should create a ResourceQuota and ensure its status is promptly calculated.,krousey,1
|
ResourceQuota should create a ResourceQuota and ensure its status is promptly calculated.,krousey,1
|
||||||
ResourceQuota should verify ResourceQuota with best effort scope.,mml,1
|
ResourceQuota should verify ResourceQuota with best effort scope.,mml,1
|
||||||
ResourceQuota should verify ResourceQuota with terminating scopes.,ncdc,1
|
ResourceQuota should verify ResourceQuota with terminating scopes.,ncdc,1
|
||||||
Restart Docker Daemon Network should recover from ip leak,bprashanth,0
|
Restart Docker Daemon Network should recover from ip leak,bprashanth,0
|
||||||
Restart should restart all nodes and ensure all nodes and pods recover,andyzheng0831,1
|
Restart should restart all nodes and ensure all nodes and pods recover,derekwaynecarr,0
|
||||||
RethinkDB should create and stop rethinkdb servers,mwielgus,1
|
RethinkDB should create and stop rethinkdb servers,mwielgus,1
|
||||||
SSH should SSH to all nodes and run commands,quinton-hoole,0
|
SSH should SSH to all nodes and run commands,quinton-hoole,0
|
||||||
SchedulerPredicates validates MaxPods limit number of pods that are allowed to run,gmarek,0
|
SchedulerPredicates validates MaxPods limit number of pods that are allowed to run,gmarek,0
|
||||||
SchedulerPredicates validates resource limits of pods that are allowed to run,gmarek,0
|
SchedulerPredicates validates resource limits of pods that are allowed to run,gmarek,0
|
||||||
SchedulerPredicates validates that Inter-pod-Affinity is respected if not matching,hurf,1
|
SchedulerPredicates validates that Inter-pod-Affinity is respected if not matching,derekwaynecarr,0
|
||||||
SchedulerPredicates validates that InterPod Affinity and AntiAffinity is respected if matching,yifan-gu,1
|
SchedulerPredicates validates that InterPod Affinity and AntiAffinity is respected if matching,yifan-gu,1
|
||||||
SchedulerPredicates validates that InterPodAffinity is respected if matching,kevin-wangzefeng,1
|
SchedulerPredicates validates that InterPodAffinity is respected if matching,kevin-wangzefeng,1
|
||||||
SchedulerPredicates validates that InterPodAffinity is respected if matching with multiple Affinities,caesarxuchao,1
|
SchedulerPredicates validates that InterPodAffinity is respected if matching with multiple Affinities,caesarxuchao,1
|
||||||
|
@ -416,21 +418,22 @@ SchedulerPredicates validates that NodeSelector is respected if not matching,gma
|
||||||
SchedulerPredicates validates that a pod with an invalid NodeAffinity is rejected,deads2k,1
|
SchedulerPredicates validates that a pod with an invalid NodeAffinity is rejected,deads2k,1
|
||||||
SchedulerPredicates validates that a pod with an invalid podAffinity is rejected because of the LabelSelectorRequirement is invalid,smarterclayton,1
|
SchedulerPredicates validates that a pod with an invalid podAffinity is rejected because of the LabelSelectorRequirement is invalid,smarterclayton,1
|
||||||
SchedulerPredicates validates that embedding the JSON NodeAffinity setting as a string in the annotation value work,kevin-wangzefeng,1
|
SchedulerPredicates validates that embedding the JSON NodeAffinity setting as a string in the annotation value work,kevin-wangzefeng,1
|
||||||
SchedulerPredicates validates that embedding the JSON PodAffinity and PodAntiAffinity setting as a string in the annotation value work,hurf,1
|
SchedulerPredicates validates that embedding the JSON PodAffinity and PodAntiAffinity setting as a string in the annotation value work,derekwaynecarr,0
|
||||||
SchedulerPredicates validates that required NodeAffinity setting is respected if matching,mml,1
|
SchedulerPredicates validates that required NodeAffinity setting is respected if matching,mml,1
|
||||||
SchedulerPredicates validates that taints-tolerations is respected if matching,jlowdermilk,1
|
SchedulerPredicates validates that taints-tolerations is respected if matching,jlowdermilk,1
|
||||||
SchedulerPredicates validates that taints-tolerations is respected if not matching,derekwaynecarr,1
|
SchedulerPredicates validates that taints-tolerations is respected if not matching,derekwaynecarr,1
|
||||||
Secret should create a pod that reads a secret,luxas,1
|
Secret should create a pod that reads a secret,luxas,1
|
||||||
Secrets should be able to mount in a volume regardless of a different secret existing with same name in different namespace,rkouj,0
|
Secrets should be able to mount in a volume regardless of a different secret existing with same name in different namespace,rkouj,0
|
||||||
Secrets should be consumable from pods in env vars,mml,1
|
Secrets should be consumable from pods in env vars,mml,1
|
||||||
Secrets should be consumable from pods in volume,ghodss,1
|
Secrets should be consumable from pods in volume,derekwaynecarr,0
|
||||||
|
Secrets should be consumable from pods in volume as non-root with defaultMode and fsGroup set,derekwaynecarr,0
|
||||||
Secrets should be consumable from pods in volume with defaultMode set,derekwaynecarr,1
|
Secrets should be consumable from pods in volume with defaultMode set,derekwaynecarr,1
|
||||||
Secrets should be consumable from pods in volume with mappings,jbeda,1
|
Secrets should be consumable from pods in volume with mappings,jbeda,1
|
||||||
Secrets should be consumable from pods in volume with mappings and Item Mode set,quinton-hoole,1
|
Secrets should be consumable from pods in volume with mappings and Item Mode set,quinton-hoole,1
|
||||||
Secrets should be consumable in multiple volumes in a pod,alex-mohr,1
|
Secrets should be consumable in multiple volumes in a pod,alex-mohr,1
|
||||||
Security Context should support container.SecurityContext.RunAsUser,alex-mohr,1
|
Security Context should support container.SecurityContext.RunAsUser,alex-mohr,1
|
||||||
Security Context should support pod.Spec.SecurityContext.RunAsUser,bgrant0607,1
|
Security Context should support pod.Spec.SecurityContext.RunAsUser,bgrant0607,1
|
||||||
Security Context should support pod.Spec.SecurityContext.SupplementalGroups,andyzheng0831,1
|
Security Context should support pod.Spec.SecurityContext.SupplementalGroups,derekwaynecarr,0
|
||||||
Security Context should support seccomp alpha docker/default annotation,freehan,1
|
Security Context should support seccomp alpha docker/default annotation,freehan,1
|
||||||
Security Context should support seccomp alpha unconfined annotation on the container,childsb,1
|
Security Context should support seccomp alpha unconfined annotation on the container,childsb,1
|
||||||
Security Context should support seccomp alpha unconfined annotation on the pod,krousey,1
|
Security Context should support seccomp alpha unconfined annotation on the pod,krousey,1
|
||||||
|
@ -447,8 +450,7 @@ Services should be able to create a functioning NodePort service,bprashanth,0
|
||||||
Services should be able to up and down services,bprashanth,0
|
Services should be able to up and down services,bprashanth,0
|
||||||
Services should check NodePort out-of-range,bprashanth,0
|
Services should check NodePort out-of-range,bprashanth,0
|
||||||
Services should create endpoints for unready pods,maisem,0
|
Services should create endpoints for unready pods,maisem,0
|
||||||
Services should only allow access from service loadbalancer source ranges,sttts,0
|
Services should only allow access from service loadbalancer source ranges,derekwaynecarr,0
|
||||||
Services should only allow access from service loadbalancer source ranges,madhusudancs,0
|
|
||||||
Services should preserve source pod IP for traffic thru service cluster IP,Random-Liu,1
|
Services should preserve source pod IP for traffic thru service cluster IP,Random-Liu,1
|
||||||
Services should prevent NodePort collisions,bprashanth,0
|
Services should prevent NodePort collisions,bprashanth,0
|
||||||
Services should provide secure master service,bprashanth,0
|
Services should provide secure master service,bprashanth,0
|
||||||
|
@ -458,7 +460,7 @@ Services should serve multiport endpoints from pods,bprashanth,0
|
||||||
Services should use same NodePort with same port but different protocols,timothysc,1
|
Services should use same NodePort with same port but different protocols,timothysc,1
|
||||||
Services should work after restarting apiserver,bprashanth,0
|
Services should work after restarting apiserver,bprashanth,0
|
||||||
Services should work after restarting kube-proxy,bprashanth,0
|
Services should work after restarting kube-proxy,bprashanth,0
|
||||||
SimpleMount should be able to mount an emptydir on a container,karlkfi,1
|
SimpleMount should be able to mount an emptydir on a container,derekwaynecarr,0
|
||||||
"Spark should start spark master, driver and workers",jszczepkowski,1
|
"Spark should start spark master, driver and workers",jszczepkowski,1
|
||||||
"Staging client repo client should create pods, delete pods, watch pods",jbeda,1
|
"Staging client repo client should create pods, delete pods, watch pods",jbeda,1
|
||||||
Stateful Set recreate should recreate evicted statefulset,derekwaynecarr,0
|
Stateful Set recreate should recreate evicted statefulset,derekwaynecarr,0
|
||||||
|
@ -491,19 +493,18 @@ V1Job should run a job to completion when tasks sometimes fail and are not local
|
||||||
V1Job should run a job to completion when tasks succeed,soltysh,1
|
V1Job should run a job to completion when tasks succeed,soltysh,1
|
||||||
V1Job should scale a job down,soltysh,1
|
V1Job should scale a job down,soltysh,1
|
||||||
V1Job should scale a job up,soltysh,1
|
V1Job should scale a job up,soltysh,1
|
||||||
Variable Expansion should allow composing env vars into new env vars,ghodss,1
|
Variable Expansion should allow composing env vars into new env vars,derekwaynecarr,0
|
||||||
Variable Expansion should allow substituting values in a container's args,dchen1107,1
|
Variable Expansion should allow substituting values in a container's args,dchen1107,1
|
||||||
Variable Expansion should allow substituting values in a container's command,mml,1
|
Variable Expansion should allow substituting values in a container's command,mml,1
|
||||||
Volumes Ceph RBD should be mountable,fabioy,1
|
Volumes Ceph RBD should be mountable,fabioy,1
|
||||||
Volumes CephFS should be mountable,Q-Lee,1
|
Volumes CephFS should be mountable,Q-Lee,1
|
||||||
Volumes Cinder should be mountable,cjcullen,1
|
Volumes Cinder should be mountable,cjcullen,1
|
||||||
Volumes GlusterFS should be mountable,eparis,1
|
Volumes GlusterFS should be mountable,eparis,1
|
||||||
Volumes NFS should be mountable,andyzheng0831,1
|
Volumes NFS should be mountable,derekwaynecarr,0
|
||||||
Volumes PD should be mountable,caesarxuchao,1
|
Volumes PD should be mountable,caesarxuchao,1
|
||||||
Volumes iSCSI should be mountable,jsafrane,1
|
Volumes iSCSI should be mountable,jsafrane,1
|
||||||
k8s.io/kubernetes/cmd/genutils,rmmh,1
|
k8s.io/kubernetes/cmd/genutils,rmmh,1
|
||||||
k8s.io/kubernetes/cmd/hyperkube,jbeda,0
|
k8s.io/kubernetes/cmd/hyperkube,jbeda,0
|
||||||
k8s.io/kubernetes/cmd/kube-apiserver/app,nikhiljindal,0
|
|
||||||
k8s.io/kubernetes/cmd/kube-apiserver/app/options,nikhiljindal,0
|
k8s.io/kubernetes/cmd/kube-apiserver/app/options,nikhiljindal,0
|
||||||
k8s.io/kubernetes/cmd/kube-discovery/app,pmorie,1
|
k8s.io/kubernetes/cmd/kube-discovery/app,pmorie,1
|
||||||
k8s.io/kubernetes/cmd/kube-proxy/app,luxas,1
|
k8s.io/kubernetes/cmd/kube-proxy/app,luxas,1
|
||||||
|
@ -514,7 +515,7 @@ k8s.io/kubernetes/cmd/kubeadm/app/node,apprenda,0
|
||||||
k8s.io/kubernetes/cmd/kubeadm/app/preflight,apprenda,0
|
k8s.io/kubernetes/cmd/kubeadm/app/preflight,apprenda,0
|
||||||
k8s.io/kubernetes/cmd/kubeadm/app/util,krousey,1
|
k8s.io/kubernetes/cmd/kubeadm/app/util,krousey,1
|
||||||
k8s.io/kubernetes/cmd/kubeadm/test,pipejakob,0
|
k8s.io/kubernetes/cmd/kubeadm/test,pipejakob,0
|
||||||
k8s.io/kubernetes/cmd/kubelet/app,hurf,1
|
k8s.io/kubernetes/cmd/kubelet/app,derekwaynecarr,0
|
||||||
k8s.io/kubernetes/cmd/libs/go2idl/client-gen/types,caesarxuchao,0
|
k8s.io/kubernetes/cmd/libs/go2idl/client-gen/types,caesarxuchao,0
|
||||||
k8s.io/kubernetes/cmd/libs/go2idl/go-to-protobuf/protobuf,smarterclayton,0
|
k8s.io/kubernetes/cmd/libs/go2idl/go-to-protobuf/protobuf,smarterclayton,0
|
||||||
k8s.io/kubernetes/cmd/libs/go2idl/openapi-gen/generators,davidopp,1
|
k8s.io/kubernetes/cmd/libs/go2idl/openapi-gen/generators,davidopp,1
|
||||||
|
@ -531,7 +532,7 @@ k8s.io/kubernetes/federation/pkg/federation-controller/configmap,mwielgus,0
|
||||||
k8s.io/kubernetes/federation/pkg/federation-controller/daemonset,childsb,1
|
k8s.io/kubernetes/federation/pkg/federation-controller/daemonset,childsb,1
|
||||||
k8s.io/kubernetes/federation/pkg/federation-controller/deployment,zmerlynn,1
|
k8s.io/kubernetes/federation/pkg/federation-controller/deployment,zmerlynn,1
|
||||||
k8s.io/kubernetes/federation/pkg/federation-controller/ingress,vishh,1
|
k8s.io/kubernetes/federation/pkg/federation-controller/ingress,vishh,1
|
||||||
k8s.io/kubernetes/federation/pkg/federation-controller/namespace,hurf,1
|
k8s.io/kubernetes/federation/pkg/federation-controller/namespace,derekwaynecarr,0
|
||||||
k8s.io/kubernetes/federation/pkg/federation-controller/replicaset,roberthbailey,1
|
k8s.io/kubernetes/federation/pkg/federation-controller/replicaset,roberthbailey,1
|
||||||
k8s.io/kubernetes/federation/pkg/federation-controller/secret,apelisse,1
|
k8s.io/kubernetes/federation/pkg/federation-controller/secret,apelisse,1
|
||||||
k8s.io/kubernetes/federation/pkg/federation-controller/service,pmorie,1
|
k8s.io/kubernetes/federation/pkg/federation-controller/service,pmorie,1
|
||||||
|
@ -554,14 +555,11 @@ k8s.io/kubernetes/pkg/api/meta,fabioy,1
|
||||||
k8s.io/kubernetes/pkg/api/resource,smarterclayton,1
|
k8s.io/kubernetes/pkg/api/resource,smarterclayton,1
|
||||||
k8s.io/kubernetes/pkg/api/service,spxtr,1
|
k8s.io/kubernetes/pkg/api/service,spxtr,1
|
||||||
k8s.io/kubernetes/pkg/api/testapi,caesarxuchao,1
|
k8s.io/kubernetes/pkg/api/testapi,caesarxuchao,1
|
||||||
k8s.io/kubernetes/pkg/api/util,ghodss,1
|
k8s.io/kubernetes/pkg/api/util,derekwaynecarr,0
|
||||||
k8s.io/kubernetes/pkg/api/v1,vulpecula,1
|
k8s.io/kubernetes/pkg/api/v1,derekwaynecarr,0
|
||||||
k8s.io/kubernetes/pkg/api/v1/endpoints,sttts,0
|
k8s.io/kubernetes/pkg/api/v1/endpoints,derekwaynecarr,0
|
||||||
k8s.io/kubernetes/pkg/api/v1/pod,sttts,0
|
k8s.io/kubernetes/pkg/api/v1/pod,derekwaynecarr,0
|
||||||
k8s.io/kubernetes/pkg/api/v1/service,sttts,0
|
k8s.io/kubernetes/pkg/api/v1/service,derekwaynecarr,0
|
||||||
k8s.io/kubernetes/pkg/api/v1/endpoints,madhusudancs,0
|
|
||||||
k8s.io/kubernetes/pkg/api/v1/pod,madhusudancs,0
|
|
||||||
k8s.io/kubernetes/pkg/api/v1/service,madhusudancs,0
|
|
||||||
k8s.io/kubernetes/pkg/api/validation,smarterclayton,1
|
k8s.io/kubernetes/pkg/api/validation,smarterclayton,1
|
||||||
k8s.io/kubernetes/pkg/api/validation/path,luxas,1
|
k8s.io/kubernetes/pkg/api/validation/path,luxas,1
|
||||||
k8s.io/kubernetes/pkg/apimachinery,gmarek,1
|
k8s.io/kubernetes/pkg/apimachinery,gmarek,1
|
||||||
|
@ -580,7 +578,7 @@ k8s.io/kubernetes/pkg/apis/extensions,bgrant0607,1
|
||||||
k8s.io/kubernetes/pkg/apis/extensions/v1beta1,madhusudancs,1
|
k8s.io/kubernetes/pkg/apis/extensions/v1beta1,madhusudancs,1
|
||||||
k8s.io/kubernetes/pkg/apis/extensions/validation,nikhiljindal,1
|
k8s.io/kubernetes/pkg/apis/extensions/validation,nikhiljindal,1
|
||||||
k8s.io/kubernetes/pkg/apis/meta/v1,sttts,0
|
k8s.io/kubernetes/pkg/apis/meta/v1,sttts,0
|
||||||
k8s.io/kubernetes/pkg/apis/meta/v1/validation,jszczepkowski,1
|
k8s.io/kubernetes/pkg/apis/meta/v1/validation,derekwaynecarr,0
|
||||||
k8s.io/kubernetes/pkg/apis/policy/validation,deads2k,1
|
k8s.io/kubernetes/pkg/apis/policy/validation,deads2k,1
|
||||||
k8s.io/kubernetes/pkg/apis/rbac/validation,erictune,0
|
k8s.io/kubernetes/pkg/apis/rbac/validation,erictune,0
|
||||||
k8s.io/kubernetes/pkg/apis/storage/validation,caesarxuchao,1
|
k8s.io/kubernetes/pkg/apis/storage/validation,caesarxuchao,1
|
||||||
|
@ -590,13 +588,13 @@ k8s.io/kubernetes/pkg/apiserver/request,lavalamp,1
|
||||||
k8s.io/kubernetes/pkg/auth/authenticator/bearertoken,liggitt,0
|
k8s.io/kubernetes/pkg/auth/authenticator/bearertoken,liggitt,0
|
||||||
k8s.io/kubernetes/pkg/auth/authorizer/abac,liggitt,0
|
k8s.io/kubernetes/pkg/auth/authorizer/abac,liggitt,0
|
||||||
k8s.io/kubernetes/pkg/auth/authorizer/union,liggitt,0
|
k8s.io/kubernetes/pkg/auth/authorizer/union,liggitt,0
|
||||||
k8s.io/kubernetes/pkg/auth/group,andyzheng0831,1
|
k8s.io/kubernetes/pkg/auth/group,derekwaynecarr,0
|
||||||
k8s.io/kubernetes/pkg/auth/handlers,liggitt,0
|
k8s.io/kubernetes/pkg/auth/handlers,liggitt,0
|
||||||
k8s.io/kubernetes/pkg/client/cache,xiang90,1
|
k8s.io/kubernetes/pkg/client/cache,xiang90,1
|
||||||
k8s.io/kubernetes/pkg/client/chaosclient,deads2k,1
|
k8s.io/kubernetes/pkg/client/chaosclient,deads2k,1
|
||||||
k8s.io/kubernetes/pkg/client/leaderelection,xiang90,1
|
k8s.io/kubernetes/pkg/client/leaderelection,xiang90,1
|
||||||
k8s.io/kubernetes/pkg/client/listers/batch/internalversion,mqliang,0
|
k8s.io/kubernetes/pkg/client/listers/batch/internalversion,mqliang,0
|
||||||
k8s.io/kubernetes/pkg/client/record,karlkfi,1
|
k8s.io/kubernetes/pkg/client/record,derekwaynecarr,0
|
||||||
k8s.io/kubernetes/pkg/client/restclient,kargakis,1
|
k8s.io/kubernetes/pkg/client/restclient,kargakis,1
|
||||||
k8s.io/kubernetes/pkg/client/retry,caesarxuchao,1
|
k8s.io/kubernetes/pkg/client/retry,caesarxuchao,1
|
||||||
k8s.io/kubernetes/pkg/client/testing/cache,mikedanese,1
|
k8s.io/kubernetes/pkg/client/testing/cache,mikedanese,1
|
||||||
|
@ -609,7 +607,7 @@ k8s.io/kubernetes/pkg/client/unversioned/auth,jbeda,1
|
||||||
k8s.io/kubernetes/pkg/client/unversioned/clientcmd,yifan-gu,1
|
k8s.io/kubernetes/pkg/client/unversioned/clientcmd,yifan-gu,1
|
||||||
k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api,thockin,1
|
k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api,thockin,1
|
||||||
k8s.io/kubernetes/pkg/client/unversioned/portforward,lavalamp,1
|
k8s.io/kubernetes/pkg/client/unversioned/portforward,lavalamp,1
|
||||||
k8s.io/kubernetes/pkg/client/unversioned/remotecommand,andyzheng0831,1
|
k8s.io/kubernetes/pkg/client/unversioned/remotecommand,derekwaynecarr,0
|
||||||
k8s.io/kubernetes/pkg/cloudprovider/providers/aws,eparis,1
|
k8s.io/kubernetes/pkg/cloudprovider/providers/aws,eparis,1
|
||||||
k8s.io/kubernetes/pkg/cloudprovider/providers/azure,saad-ali,1
|
k8s.io/kubernetes/pkg/cloudprovider/providers/azure,saad-ali,1
|
||||||
k8s.io/kubernetes/pkg/cloudprovider/providers/cloudstack,roberthbailey,1
|
k8s.io/kubernetes/pkg/cloudprovider/providers/cloudstack,roberthbailey,1
|
||||||
|
@ -630,20 +628,20 @@ k8s.io/kubernetes/pkg/controller/endpoint,mwielgus,1
|
||||||
k8s.io/kubernetes/pkg/controller/garbagecollector,rmmh,1
|
k8s.io/kubernetes/pkg/controller/garbagecollector,rmmh,1
|
||||||
k8s.io/kubernetes/pkg/controller/garbagecollector/metaonly,cjcullen,1
|
k8s.io/kubernetes/pkg/controller/garbagecollector/metaonly,cjcullen,1
|
||||||
k8s.io/kubernetes/pkg/controller/job,soltysh,1
|
k8s.io/kubernetes/pkg/controller/job,soltysh,1
|
||||||
k8s.io/kubernetes/pkg/controller/namespace,karlkfi,1
|
k8s.io/kubernetes/pkg/controller/namespace,derekwaynecarr,0
|
||||||
k8s.io/kubernetes/pkg/controller/node,gmarek,0
|
k8s.io/kubernetes/pkg/controller/node,gmarek,0
|
||||||
k8s.io/kubernetes/pkg/controller/petset,fgrzadkowski,1
|
k8s.io/kubernetes/pkg/controller/petset,fgrzadkowski,1
|
||||||
k8s.io/kubernetes/pkg/controller/podautoscaler,piosz,0
|
k8s.io/kubernetes/pkg/controller/podautoscaler,piosz,0
|
||||||
k8s.io/kubernetes/pkg/controller/podautoscaler/metrics,piosz,0
|
k8s.io/kubernetes/pkg/controller/podautoscaler/metrics,piosz,0
|
||||||
k8s.io/kubernetes/pkg/controller/podgc,jdef,1
|
k8s.io/kubernetes/pkg/controller/podgc,derekwaynecarr,0
|
||||||
k8s.io/kubernetes/pkg/controller/replicaset,fgrzadkowski,0
|
k8s.io/kubernetes/pkg/controller/replicaset,fgrzadkowski,0
|
||||||
k8s.io/kubernetes/pkg/controller/replication,fgrzadkowski,0
|
k8s.io/kubernetes/pkg/controller/replication,fgrzadkowski,0
|
||||||
k8s.io/kubernetes/pkg/controller/resourcequota,ghodss,1
|
k8s.io/kubernetes/pkg/controller/resourcequota,derekwaynecarr,0
|
||||||
k8s.io/kubernetes/pkg/controller/route,gmarek,0
|
k8s.io/kubernetes/pkg/controller/route,gmarek,0
|
||||||
k8s.io/kubernetes/pkg/controller/service,asalkeld,0
|
k8s.io/kubernetes/pkg/controller/service,asalkeld,0
|
||||||
k8s.io/kubernetes/pkg/controller/serviceaccount,liggitt,0
|
k8s.io/kubernetes/pkg/controller/serviceaccount,liggitt,0
|
||||||
k8s.io/kubernetes/pkg/controller/volume/attachdetach,luxas,1
|
k8s.io/kubernetes/pkg/controller/volume/attachdetach,luxas,1
|
||||||
k8s.io/kubernetes/pkg/controller/volume/attachdetach/cache,hurf,1
|
k8s.io/kubernetes/pkg/controller/volume/attachdetach/cache,derekwaynecarr,0
|
||||||
k8s.io/kubernetes/pkg/controller/volume/attachdetach/reconciler,jsafrane,1
|
k8s.io/kubernetes/pkg/controller/volume/attachdetach/reconciler,jsafrane,1
|
||||||
k8s.io/kubernetes/pkg/controller/volume/persistentvolume,jsafrane,0
|
k8s.io/kubernetes/pkg/controller/volume/persistentvolume,jsafrane,0
|
||||||
k8s.io/kubernetes/pkg/conversion,ixdy,1
|
k8s.io/kubernetes/pkg/conversion,ixdy,1
|
||||||
|
@ -651,7 +649,7 @@ k8s.io/kubernetes/pkg/conversion/queryparams,caesarxuchao,1
|
||||||
k8s.io/kubernetes/pkg/credentialprovider,justinsb,1
|
k8s.io/kubernetes/pkg/credentialprovider,justinsb,1
|
||||||
k8s.io/kubernetes/pkg/credentialprovider/aws,zmerlynn,1
|
k8s.io/kubernetes/pkg/credentialprovider/aws,zmerlynn,1
|
||||||
k8s.io/kubernetes/pkg/credentialprovider/gcp,mml,1
|
k8s.io/kubernetes/pkg/credentialprovider/gcp,mml,1
|
||||||
k8s.io/kubernetes/pkg/dns,jdef,1
|
k8s.io/kubernetes/pkg/dns,derekwaynecarr,0
|
||||||
k8s.io/kubernetes/pkg/dns/config,derekwaynecarr,0
|
k8s.io/kubernetes/pkg/dns/config,derekwaynecarr,0
|
||||||
k8s.io/kubernetes/pkg/dns/federation,derekwaynecarr,0
|
k8s.io/kubernetes/pkg/dns/federation,derekwaynecarr,0
|
||||||
k8s.io/kubernetes/pkg/dns/treecache,bowei,0
|
k8s.io/kubernetes/pkg/dns/treecache,bowei,0
|
||||||
|
@ -670,7 +668,7 @@ k8s.io/kubernetes/pkg/kubectl/cmd,rmmh,1
|
||||||
k8s.io/kubernetes/pkg/kubectl/cmd/config,asalkeld,0
|
k8s.io/kubernetes/pkg/kubectl/cmd/config,asalkeld,0
|
||||||
k8s.io/kubernetes/pkg/kubectl/cmd/set,erictune,1
|
k8s.io/kubernetes/pkg/kubectl/cmd/set,erictune,1
|
||||||
k8s.io/kubernetes/pkg/kubectl/cmd/util,asalkeld,0
|
k8s.io/kubernetes/pkg/kubectl/cmd/util,asalkeld,0
|
||||||
k8s.io/kubernetes/pkg/kubectl/cmd/util/editor,jdef,1
|
k8s.io/kubernetes/pkg/kubectl/cmd/util/editor,derekwaynecarr,0
|
||||||
k8s.io/kubernetes/pkg/kubectl/resource,caesarxuchao,1
|
k8s.io/kubernetes/pkg/kubectl/resource,caesarxuchao,1
|
||||||
k8s.io/kubernetes/pkg/kubelet,vishh,0
|
k8s.io/kubernetes/pkg/kubelet,vishh,0
|
||||||
k8s.io/kubernetes/pkg/kubelet/cadvisor,sttts,1
|
k8s.io/kubernetes/pkg/kubelet/cadvisor,sttts,1
|
||||||
|
@ -681,7 +679,7 @@ k8s.io/kubernetes/pkg/kubelet/container,yujuhong,0
|
||||||
k8s.io/kubernetes/pkg/kubelet/custommetrics,kevin-wangzefeng,0
|
k8s.io/kubernetes/pkg/kubelet/custommetrics,kevin-wangzefeng,0
|
||||||
k8s.io/kubernetes/pkg/kubelet/dockershim,zmerlynn,1
|
k8s.io/kubernetes/pkg/kubelet/dockershim,zmerlynn,1
|
||||||
k8s.io/kubernetes/pkg/kubelet/dockertools,deads2k,1
|
k8s.io/kubernetes/pkg/kubelet/dockertools,deads2k,1
|
||||||
k8s.io/kubernetes/pkg/kubelet/envvars,karlkfi,1
|
k8s.io/kubernetes/pkg/kubelet/envvars,derekwaynecarr,0
|
||||||
k8s.io/kubernetes/pkg/kubelet/eviction,childsb,1
|
k8s.io/kubernetes/pkg/kubelet/eviction,childsb,1
|
||||||
k8s.io/kubernetes/pkg/kubelet/images,caesarxuchao,1
|
k8s.io/kubernetes/pkg/kubelet/images,caesarxuchao,1
|
||||||
k8s.io/kubernetes/pkg/kubelet/kuberuntime,yifan-gu,1
|
k8s.io/kubernetes/pkg/kubelet/kuberuntime,yifan-gu,1
|
||||||
|
@ -709,7 +707,7 @@ k8s.io/kubernetes/pkg/kubelet/types,jlowdermilk,1
|
||||||
k8s.io/kubernetes/pkg/kubelet/util/cache,timothysc,1
|
k8s.io/kubernetes/pkg/kubelet/util/cache,timothysc,1
|
||||||
k8s.io/kubernetes/pkg/kubelet/util/format,ncdc,1
|
k8s.io/kubernetes/pkg/kubelet/util/format,ncdc,1
|
||||||
k8s.io/kubernetes/pkg/kubelet/util/queue,yujuhong,0
|
k8s.io/kubernetes/pkg/kubelet/util/queue,yujuhong,0
|
||||||
k8s.io/kubernetes/pkg/kubelet/volumemanager,jdef,1
|
k8s.io/kubernetes/pkg/kubelet/volumemanager,derekwaynecarr,0
|
||||||
k8s.io/kubernetes/pkg/kubelet/volumemanager/cache,janetkuo,1
|
k8s.io/kubernetes/pkg/kubelet/volumemanager/cache,janetkuo,1
|
||||||
k8s.io/kubernetes/pkg/kubelet/volumemanager/reconciler,timstclair,1
|
k8s.io/kubernetes/pkg/kubelet/volumemanager/reconciler,timstclair,1
|
||||||
k8s.io/kubernetes/pkg/labels,ixdy,1
|
k8s.io/kubernetes/pkg/labels,ixdy,1
|
||||||
|
@ -718,7 +716,7 @@ k8s.io/kubernetes/pkg/probe/exec,bgrant0607,1
|
||||||
k8s.io/kubernetes/pkg/probe/http,mtaufen,1
|
k8s.io/kubernetes/pkg/probe/http,mtaufen,1
|
||||||
k8s.io/kubernetes/pkg/probe/tcp,mtaufen,1
|
k8s.io/kubernetes/pkg/probe/tcp,mtaufen,1
|
||||||
k8s.io/kubernetes/pkg/proxy/config,ixdy,1
|
k8s.io/kubernetes/pkg/proxy/config,ixdy,1
|
||||||
k8s.io/kubernetes/pkg/proxy/healthcheck,ghodss,1
|
k8s.io/kubernetes/pkg/proxy/healthcheck,derekwaynecarr,0
|
||||||
k8s.io/kubernetes/pkg/proxy/iptables,freehan,0
|
k8s.io/kubernetes/pkg/proxy/iptables,freehan,0
|
||||||
k8s.io/kubernetes/pkg/proxy/userspace,luxas,1
|
k8s.io/kubernetes/pkg/proxy/userspace,luxas,1
|
||||||
k8s.io/kubernetes/pkg/proxy/winuserspace,jbhurat,0
|
k8s.io/kubernetes/pkg/proxy/winuserspace,jbhurat,0
|
||||||
|
@ -751,16 +749,16 @@ k8s.io/kubernetes/pkg/registry/core/node/etcd,deads2k,1
|
||||||
k8s.io/kubernetes/pkg/registry/core/persistentvolume,lavalamp,1
|
k8s.io/kubernetes/pkg/registry/core/persistentvolume,lavalamp,1
|
||||||
k8s.io/kubernetes/pkg/registry/core/persistentvolume/etcd,derekwaynecarr,1
|
k8s.io/kubernetes/pkg/registry/core/persistentvolume/etcd,derekwaynecarr,1
|
||||||
k8s.io/kubernetes/pkg/registry/core/persistentvolumeclaim,bgrant0607,1
|
k8s.io/kubernetes/pkg/registry/core/persistentvolumeclaim,bgrant0607,1
|
||||||
k8s.io/kubernetes/pkg/registry/core/persistentvolumeclaim/etcd,vulpecula,1
|
k8s.io/kubernetes/pkg/registry/core/persistentvolumeclaim/etcd,derekwaynecarr,0
|
||||||
k8s.io/kubernetes/pkg/registry/core/pod,Random-Liu,1
|
k8s.io/kubernetes/pkg/registry/core/pod,Random-Liu,1
|
||||||
k8s.io/kubernetes/pkg/registry/core/pod/etcd,alex-mohr,1
|
k8s.io/kubernetes/pkg/registry/core/pod/etcd,alex-mohr,1
|
||||||
k8s.io/kubernetes/pkg/registry/core/pod/rest,jsafrane,1
|
k8s.io/kubernetes/pkg/registry/core/pod/rest,jsafrane,1
|
||||||
k8s.io/kubernetes/pkg/registry/core/podtemplate,thockin,1
|
k8s.io/kubernetes/pkg/registry/core/podtemplate,thockin,1
|
||||||
k8s.io/kubernetes/pkg/registry/core/podtemplate/etcd,brendandburns,1
|
k8s.io/kubernetes/pkg/registry/core/podtemplate/etcd,brendandburns,1
|
||||||
k8s.io/kubernetes/pkg/registry/core/resourcequota,vulpecula,1
|
k8s.io/kubernetes/pkg/registry/core/resourcequota,derekwaynecarr,0
|
||||||
k8s.io/kubernetes/pkg/registry/core/resourcequota/etcd,ghodss,1
|
k8s.io/kubernetes/pkg/registry/core/resourcequota/etcd,derekwaynecarr,0
|
||||||
k8s.io/kubernetes/pkg/registry/core/rest,deads2k,0
|
k8s.io/kubernetes/pkg/registry/core/rest,deads2k,0
|
||||||
k8s.io/kubernetes/pkg/registry/core/secret,jdef,1
|
k8s.io/kubernetes/pkg/registry/core/secret,derekwaynecarr,0
|
||||||
k8s.io/kubernetes/pkg/registry/core/secret/etcd,freehan,1
|
k8s.io/kubernetes/pkg/registry/core/secret/etcd,freehan,1
|
||||||
k8s.io/kubernetes/pkg/registry/core/service,madhusudancs,1
|
k8s.io/kubernetes/pkg/registry/core/service,madhusudancs,1
|
||||||
k8s.io/kubernetes/pkg/registry/core/service/allocator,jbeda,1
|
k8s.io/kubernetes/pkg/registry/core/service/allocator,jbeda,1
|
||||||
|
@ -769,24 +767,24 @@ k8s.io/kubernetes/pkg/registry/core/service/etcd,apelisse,1
|
||||||
k8s.io/kubernetes/pkg/registry/core/service/ipallocator,eparis,1
|
k8s.io/kubernetes/pkg/registry/core/service/ipallocator,eparis,1
|
||||||
k8s.io/kubernetes/pkg/registry/core/service/ipallocator/controller,mtaufen,1
|
k8s.io/kubernetes/pkg/registry/core/service/ipallocator/controller,mtaufen,1
|
||||||
k8s.io/kubernetes/pkg/registry/core/service/ipallocator/etcd,kargakis,1
|
k8s.io/kubernetes/pkg/registry/core/service/ipallocator/etcd,kargakis,1
|
||||||
k8s.io/kubernetes/pkg/registry/core/service/portallocator,jdef,1
|
k8s.io/kubernetes/pkg/registry/core/service/portallocator,derekwaynecarr,0
|
||||||
k8s.io/kubernetes/pkg/registry/core/serviceaccount,caesarxuchao,1
|
k8s.io/kubernetes/pkg/registry/core/serviceaccount,caesarxuchao,1
|
||||||
k8s.io/kubernetes/pkg/registry/core/serviceaccount/etcd,bprashanth,1
|
k8s.io/kubernetes/pkg/registry/core/serviceaccount/etcd,bprashanth,1
|
||||||
k8s.io/kubernetes/pkg/registry/extensions/controller/etcd,mwielgus,1
|
k8s.io/kubernetes/pkg/registry/extensions/controller/etcd,mwielgus,1
|
||||||
k8s.io/kubernetes/pkg/registry/extensions/daemonset,nikhiljindal,1
|
k8s.io/kubernetes/pkg/registry/extensions/daemonset,nikhiljindal,1
|
||||||
k8s.io/kubernetes/pkg/registry/extensions/daemonset/etcd,spxtr,1
|
k8s.io/kubernetes/pkg/registry/extensions/daemonset/etcd,spxtr,1
|
||||||
k8s.io/kubernetes/pkg/registry/extensions/deployment,dchen1107,1
|
k8s.io/kubernetes/pkg/registry/extensions/deployment,dchen1107,1
|
||||||
k8s.io/kubernetes/pkg/registry/extensions/deployment/etcd,ghodss,1
|
k8s.io/kubernetes/pkg/registry/extensions/deployment/etcd,derekwaynecarr,0
|
||||||
k8s.io/kubernetes/pkg/registry/extensions/ingress,apelisse,1
|
k8s.io/kubernetes/pkg/registry/extensions/ingress,apelisse,1
|
||||||
k8s.io/kubernetes/pkg/registry/extensions/ingress/etcd,apelisse,1
|
k8s.io/kubernetes/pkg/registry/extensions/ingress/etcd,apelisse,1
|
||||||
k8s.io/kubernetes/pkg/registry/extensions/networkpolicy,deads2k,1
|
k8s.io/kubernetes/pkg/registry/extensions/networkpolicy,deads2k,1
|
||||||
k8s.io/kubernetes/pkg/registry/extensions/networkpolicy/etcd,ncdc,1
|
k8s.io/kubernetes/pkg/registry/extensions/networkpolicy/etcd,ncdc,1
|
||||||
k8s.io/kubernetes/pkg/registry/extensions/podsecuritypolicy/etcd,erictune,1
|
k8s.io/kubernetes/pkg/registry/extensions/podsecuritypolicy/etcd,erictune,1
|
||||||
k8s.io/kubernetes/pkg/registry/extensions/replicaset,andyzheng0831,1
|
k8s.io/kubernetes/pkg/registry/extensions/replicaset,derekwaynecarr,0
|
||||||
k8s.io/kubernetes/pkg/registry/extensions/replicaset/etcd,fabioy,1
|
k8s.io/kubernetes/pkg/registry/extensions/replicaset/etcd,fabioy,1
|
||||||
k8s.io/kubernetes/pkg/registry/extensions/rest,karlkfi,1
|
k8s.io/kubernetes/pkg/registry/extensions/rest,derekwaynecarr,0
|
||||||
k8s.io/kubernetes/pkg/registry/extensions/thirdpartyresource,mwielgus,1
|
k8s.io/kubernetes/pkg/registry/extensions/thirdpartyresource,mwielgus,1
|
||||||
k8s.io/kubernetes/pkg/registry/extensions/thirdpartyresource/etcd,vulpecula,1
|
k8s.io/kubernetes/pkg/registry/extensions/thirdpartyresource/etcd,derekwaynecarr,0
|
||||||
k8s.io/kubernetes/pkg/registry/extensions/thirdpartyresourcedata,sttts,1
|
k8s.io/kubernetes/pkg/registry/extensions/thirdpartyresourcedata,sttts,1
|
||||||
k8s.io/kubernetes/pkg/registry/extensions/thirdpartyresourcedata/etcd,sttts,1
|
k8s.io/kubernetes/pkg/registry/extensions/thirdpartyresourcedata/etcd,sttts,1
|
||||||
k8s.io/kubernetes/pkg/registry/generic/registry,jsafrane,1
|
k8s.io/kubernetes/pkg/registry/generic/registry,jsafrane,1
|
||||||
|
@ -796,8 +794,7 @@ k8s.io/kubernetes/pkg/registry/policy/poddisruptionbudget/etcd,xiang90,1
|
||||||
k8s.io/kubernetes/pkg/registry/storage/storageclass,brendandburns,1
|
k8s.io/kubernetes/pkg/registry/storage/storageclass,brendandburns,1
|
||||||
k8s.io/kubernetes/pkg/registry/storage/storageclass/etcd,eparis,1
|
k8s.io/kubernetes/pkg/registry/storage/storageclass/etcd,eparis,1
|
||||||
k8s.io/kubernetes/pkg/runtime,wojtek-t,0
|
k8s.io/kubernetes/pkg/runtime,wojtek-t,0
|
||||||
k8s.io/kubernetes/pkg/runtime/schema,sttts,0
|
k8s.io/kubernetes/pkg/runtime/schema,derekwaynecarr,0
|
||||||
k8s.io/kubernetes/pkg/runtime/schema,madhusudancs,0
|
|
||||||
k8s.io/kubernetes/pkg/runtime/serializer,wojtek-t,0
|
k8s.io/kubernetes/pkg/runtime/serializer,wojtek-t,0
|
||||||
k8s.io/kubernetes/pkg/runtime/serializer/json,wojtek-t,0
|
k8s.io/kubernetes/pkg/runtime/serializer/json,wojtek-t,0
|
||||||
k8s.io/kubernetes/pkg/runtime/serializer/protobuf,wojtek-t,0
|
k8s.io/kubernetes/pkg/runtime/serializer/protobuf,wojtek-t,0
|
||||||
|
@ -806,12 +803,12 @@ k8s.io/kubernetes/pkg/runtime/serializer/streaming,wojtek-t,0
|
||||||
k8s.io/kubernetes/pkg/runtime/serializer/versioning,wojtek-t,0
|
k8s.io/kubernetes/pkg/runtime/serializer/versioning,wojtek-t,0
|
||||||
k8s.io/kubernetes/pkg/security/apparmor,bgrant0607,1
|
k8s.io/kubernetes/pkg/security/apparmor,bgrant0607,1
|
||||||
k8s.io/kubernetes/pkg/security/podsecuritypolicy,erictune,0
|
k8s.io/kubernetes/pkg/security/podsecuritypolicy,erictune,0
|
||||||
k8s.io/kubernetes/pkg/security/podsecuritypolicy/apparmor,vulpecula,1
|
k8s.io/kubernetes/pkg/security/podsecuritypolicy/apparmor,derekwaynecarr,0
|
||||||
k8s.io/kubernetes/pkg/security/podsecuritypolicy/capabilities,erictune,0
|
k8s.io/kubernetes/pkg/security/podsecuritypolicy/capabilities,erictune,0
|
||||||
k8s.io/kubernetes/pkg/security/podsecuritypolicy/group,erictune,0
|
k8s.io/kubernetes/pkg/security/podsecuritypolicy/group,erictune,0
|
||||||
k8s.io/kubernetes/pkg/security/podsecuritypolicy/seccomp,rmmh,1
|
k8s.io/kubernetes/pkg/security/podsecuritypolicy/seccomp,rmmh,1
|
||||||
k8s.io/kubernetes/pkg/security/podsecuritypolicy/selinux,erictune,0
|
k8s.io/kubernetes/pkg/security/podsecuritypolicy/selinux,erictune,0
|
||||||
k8s.io/kubernetes/pkg/security/podsecuritypolicy/sysctl,andyzheng0831,1
|
k8s.io/kubernetes/pkg/security/podsecuritypolicy/sysctl,derekwaynecarr,0
|
||||||
k8s.io/kubernetes/pkg/security/podsecuritypolicy/user,erictune,0
|
k8s.io/kubernetes/pkg/security/podsecuritypolicy/user,erictune,0
|
||||||
k8s.io/kubernetes/pkg/security/podsecuritypolicy/util,erictune,0
|
k8s.io/kubernetes/pkg/security/podsecuritypolicy/util,erictune,0
|
||||||
k8s.io/kubernetes/pkg/securitycontext,erictune,1
|
k8s.io/kubernetes/pkg/securitycontext,erictune,1
|
||||||
|
@ -826,7 +823,7 @@ k8s.io/kubernetes/pkg/util,jbeda,1
|
||||||
k8s.io/kubernetes/pkg/util/async,spxtr,1
|
k8s.io/kubernetes/pkg/util/async,spxtr,1
|
||||||
k8s.io/kubernetes/pkg/util/bandwidth,thockin,1
|
k8s.io/kubernetes/pkg/util/bandwidth,thockin,1
|
||||||
k8s.io/kubernetes/pkg/util/cache,thockin,1
|
k8s.io/kubernetes/pkg/util/cache,thockin,1
|
||||||
k8s.io/kubernetes/pkg/util/cert,karlkfi,1
|
k8s.io/kubernetes/pkg/util/cert,derekwaynecarr,0
|
||||||
k8s.io/kubernetes/pkg/util/clock,zmerlynn,1
|
k8s.io/kubernetes/pkg/util/clock,zmerlynn,1
|
||||||
k8s.io/kubernetes/pkg/util/config,jszczepkowski,1
|
k8s.io/kubernetes/pkg/util/config,jszczepkowski,1
|
||||||
k8s.io/kubernetes/pkg/util/configz,ixdy,1
|
k8s.io/kubernetes/pkg/util/configz,ixdy,1
|
||||||
|
@ -836,7 +833,7 @@ k8s.io/kubernetes/pkg/util/env,asalkeld,0
|
||||||
k8s.io/kubernetes/pkg/util/errors,jlowdermilk,1
|
k8s.io/kubernetes/pkg/util/errors,jlowdermilk,1
|
||||||
k8s.io/kubernetes/pkg/util/exec,krousey,1
|
k8s.io/kubernetes/pkg/util/exec,krousey,1
|
||||||
k8s.io/kubernetes/pkg/util/flowcontrol,ixdy,1
|
k8s.io/kubernetes/pkg/util/flowcontrol,ixdy,1
|
||||||
k8s.io/kubernetes/pkg/util/flushwriter,vulpecula,1
|
k8s.io/kubernetes/pkg/util/flushwriter,derekwaynecarr,0
|
||||||
k8s.io/kubernetes/pkg/util/framer,piosz,1
|
k8s.io/kubernetes/pkg/util/framer,piosz,1
|
||||||
k8s.io/kubernetes/pkg/util/goroutinemap,saad-ali,0
|
k8s.io/kubernetes/pkg/util/goroutinemap,saad-ali,0
|
||||||
k8s.io/kubernetes/pkg/util/hash,timothysc,1
|
k8s.io/kubernetes/pkg/util/hash,timothysc,1
|
||||||
|
@ -845,7 +842,7 @@ k8s.io/kubernetes/pkg/util/httpstream/spdy,zmerlynn,1
|
||||||
k8s.io/kubernetes/pkg/util/integer,childsb,1
|
k8s.io/kubernetes/pkg/util/integer,childsb,1
|
||||||
k8s.io/kubernetes/pkg/util/intstr,brendandburns,1
|
k8s.io/kubernetes/pkg/util/intstr,brendandburns,1
|
||||||
k8s.io/kubernetes/pkg/util/io,mtaufen,1
|
k8s.io/kubernetes/pkg/util/io,mtaufen,1
|
||||||
k8s.io/kubernetes/pkg/util/iptables,hurf,1
|
k8s.io/kubernetes/pkg/util/iptables,derekwaynecarr,0
|
||||||
k8s.io/kubernetes/pkg/util/json,liggitt,0
|
k8s.io/kubernetes/pkg/util/json,liggitt,0
|
||||||
k8s.io/kubernetes/pkg/util/jsonpath,spxtr,1
|
k8s.io/kubernetes/pkg/util/jsonpath,spxtr,1
|
||||||
k8s.io/kubernetes/pkg/util/keymutex,saad-ali,0
|
k8s.io/kubernetes/pkg/util/keymutex,saad-ali,0
|
||||||
|
@ -853,20 +850,20 @@ k8s.io/kubernetes/pkg/util/labels,rmmh,1
|
||||||
k8s.io/kubernetes/pkg/util/limitwriter,deads2k,1
|
k8s.io/kubernetes/pkg/util/limitwriter,deads2k,1
|
||||||
k8s.io/kubernetes/pkg/util/mount,xiang90,1
|
k8s.io/kubernetes/pkg/util/mount,xiang90,1
|
||||||
k8s.io/kubernetes/pkg/util/net,spxtr,1
|
k8s.io/kubernetes/pkg/util/net,spxtr,1
|
||||||
k8s.io/kubernetes/pkg/util/net/sets,jdef,1
|
k8s.io/kubernetes/pkg/util/net/sets,derekwaynecarr,0
|
||||||
k8s.io/kubernetes/pkg/util/node,liggitt,0
|
k8s.io/kubernetes/pkg/util/node,liggitt,0
|
||||||
k8s.io/kubernetes/pkg/util/oom,vishh,0
|
k8s.io/kubernetes/pkg/util/oom,vishh,0
|
||||||
k8s.io/kubernetes/pkg/util/parsers,derekwaynecarr,1
|
k8s.io/kubernetes/pkg/util/parsers,derekwaynecarr,1
|
||||||
k8s.io/kubernetes/pkg/util/procfs,roberthbailey,1
|
k8s.io/kubernetes/pkg/util/procfs,roberthbailey,1
|
||||||
k8s.io/kubernetes/pkg/util/proxy,cjcullen,1
|
k8s.io/kubernetes/pkg/util/proxy,cjcullen,1
|
||||||
k8s.io/kubernetes/pkg/util/rand,madhusudancs,1
|
k8s.io/kubernetes/pkg/util/rand,madhusudancs,1
|
||||||
k8s.io/kubernetes/pkg/util/ratelimit,justinsb,1
|
|
||||||
k8s.io/kubernetes/pkg/util/runtime,davidopp,1
|
k8s.io/kubernetes/pkg/util/runtime,davidopp,1
|
||||||
k8s.io/kubernetes/pkg/util/sets,quinton-hoole,0
|
k8s.io/kubernetes/pkg/util/sets,quinton-hoole,0
|
||||||
k8s.io/kubernetes/pkg/util/slice,quinton-hoole,0
|
k8s.io/kubernetes/pkg/util/slice,quinton-hoole,0
|
||||||
k8s.io/kubernetes/pkg/util/strategicpatch,brendandburns,1
|
k8s.io/kubernetes/pkg/util/strategicpatch,brendandburns,1
|
||||||
k8s.io/kubernetes/pkg/util/strings,quinton-hoole,0
|
k8s.io/kubernetes/pkg/util/strings,quinton-hoole,0
|
||||||
k8s.io/kubernetes/pkg/util/system,mwielgus,0
|
k8s.io/kubernetes/pkg/util/system,mwielgus,0
|
||||||
|
k8s.io/kubernetes/pkg/util/taints,derekwaynecarr,0
|
||||||
k8s.io/kubernetes/pkg/util/term,davidopp,1
|
k8s.io/kubernetes/pkg/util/term,davidopp,1
|
||||||
k8s.io/kubernetes/pkg/util/testing,jlowdermilk,1
|
k8s.io/kubernetes/pkg/util/testing,jlowdermilk,1
|
||||||
k8s.io/kubernetes/pkg/util/threading,roberthbailey,1
|
k8s.io/kubernetes/pkg/util/threading,roberthbailey,1
|
||||||
|
@ -886,7 +883,7 @@ k8s.io/kubernetes/pkg/volume/cinder,jsafrane,1
|
||||||
k8s.io/kubernetes/pkg/volume/configmap,derekwaynecarr,1
|
k8s.io/kubernetes/pkg/volume/configmap,derekwaynecarr,1
|
||||||
k8s.io/kubernetes/pkg/volume/downwardapi,mikedanese,1
|
k8s.io/kubernetes/pkg/volume/downwardapi,mikedanese,1
|
||||||
k8s.io/kubernetes/pkg/volume/empty_dir,quinton-hoole,1
|
k8s.io/kubernetes/pkg/volume/empty_dir,quinton-hoole,1
|
||||||
k8s.io/kubernetes/pkg/volume/fc,andyzheng0831,1
|
k8s.io/kubernetes/pkg/volume/fc,derekwaynecarr,0
|
||||||
k8s.io/kubernetes/pkg/volume/flexvolume,Q-Lee,1
|
k8s.io/kubernetes/pkg/volume/flexvolume,Q-Lee,1
|
||||||
k8s.io/kubernetes/pkg/volume/flocker,jbeda,1
|
k8s.io/kubernetes/pkg/volume/flocker,jbeda,1
|
||||||
k8s.io/kubernetes/pkg/volume/gce_pd,saad-ali,0
|
k8s.io/kubernetes/pkg/volume/gce_pd,saad-ali,0
|
||||||
|
@ -916,11 +913,11 @@ k8s.io/kubernetes/plugin/pkg/admission/limitranger,ncdc,1
|
||||||
k8s.io/kubernetes/plugin/pkg/admission/namespace/autoprovision,derekwaynecarr,0
|
k8s.io/kubernetes/plugin/pkg/admission/namespace/autoprovision,derekwaynecarr,0
|
||||||
k8s.io/kubernetes/plugin/pkg/admission/namespace/exists,derekwaynecarr,0
|
k8s.io/kubernetes/plugin/pkg/admission/namespace/exists,derekwaynecarr,0
|
||||||
k8s.io/kubernetes/plugin/pkg/admission/namespace/lifecycle,derekwaynecarr,0
|
k8s.io/kubernetes/plugin/pkg/admission/namespace/lifecycle,derekwaynecarr,0
|
||||||
k8s.io/kubernetes/plugin/pkg/admission/persistentvolume/label,jdef,1
|
k8s.io/kubernetes/plugin/pkg/admission/persistentvolume/label,derekwaynecarr,0
|
||||||
k8s.io/kubernetes/plugin/pkg/admission/podnodeselector,ixdy,1
|
k8s.io/kubernetes/plugin/pkg/admission/podnodeselector,ixdy,1
|
||||||
k8s.io/kubernetes/plugin/pkg/admission/resourcequota,fabioy,1
|
k8s.io/kubernetes/plugin/pkg/admission/resourcequota,fabioy,1
|
||||||
k8s.io/kubernetes/plugin/pkg/admission/security/podsecuritypolicy,maisem,1
|
k8s.io/kubernetes/plugin/pkg/admission/security/podsecuritypolicy,maisem,1
|
||||||
k8s.io/kubernetes/plugin/pkg/admission/securitycontext/scdeny,vulpecula,1
|
k8s.io/kubernetes/plugin/pkg/admission/securitycontext/scdeny,derekwaynecarr,0
|
||||||
k8s.io/kubernetes/plugin/pkg/admission/serviceaccount,liggitt,0
|
k8s.io/kubernetes/plugin/pkg/admission/serviceaccount,liggitt,0
|
||||||
k8s.io/kubernetes/plugin/pkg/admission/storageclass/default,pmorie,1
|
k8s.io/kubernetes/plugin/pkg/admission/storageclass/default,pmorie,1
|
||||||
k8s.io/kubernetes/plugin/pkg/auth/authenticator/password/allow,liggitt,0
|
k8s.io/kubernetes/plugin/pkg/auth/authenticator/password/allow,liggitt,0
|
||||||
|
@ -933,10 +930,10 @@ k8s.io/kubernetes/plugin/pkg/auth/authenticator/request/x509,liggitt,0
|
||||||
k8s.io/kubernetes/plugin/pkg/auth/authenticator/token/anytoken,krousey,1
|
k8s.io/kubernetes/plugin/pkg/auth/authenticator/token/anytoken,krousey,1
|
||||||
k8s.io/kubernetes/plugin/pkg/auth/authenticator/token/oidc,brendandburns,1
|
k8s.io/kubernetes/plugin/pkg/auth/authenticator/token/oidc,brendandburns,1
|
||||||
k8s.io/kubernetes/plugin/pkg/auth/authenticator/token/tokenfile,liggitt,0
|
k8s.io/kubernetes/plugin/pkg/auth/authenticator/token/tokenfile,liggitt,0
|
||||||
k8s.io/kubernetes/plugin/pkg/auth/authenticator/token/webhook,ghodss,1
|
k8s.io/kubernetes/plugin/pkg/auth/authenticator/token/webhook,derekwaynecarr,0
|
||||||
k8s.io/kubernetes/plugin/pkg/auth/authorizer/rbac,hurf,1
|
k8s.io/kubernetes/plugin/pkg/auth/authorizer/rbac,derekwaynecarr,0
|
||||||
k8s.io/kubernetes/plugin/pkg/auth/authorizer/rbac/bootstrappolicy,mml,1
|
k8s.io/kubernetes/plugin/pkg/auth/authorizer/rbac/bootstrappolicy,mml,1
|
||||||
k8s.io/kubernetes/plugin/pkg/auth/authorizer/webhook,hurf,1
|
k8s.io/kubernetes/plugin/pkg/auth/authorizer/webhook,derekwaynecarr,0
|
||||||
k8s.io/kubernetes/plugin/pkg/client/auth/gcp,jlowdermilk,0
|
k8s.io/kubernetes/plugin/pkg/client/auth/gcp,jlowdermilk,0
|
||||||
k8s.io/kubernetes/plugin/pkg/client/auth/oidc,cjcullen,1
|
k8s.io/kubernetes/plugin/pkg/client/auth/oidc,cjcullen,1
|
||||||
k8s.io/kubernetes/plugin/pkg/scheduler,fgrzadkowski,0
|
k8s.io/kubernetes/plugin/pkg/scheduler,fgrzadkowski,0
|
||||||
|
@ -956,14 +953,13 @@ k8s.io/kubernetes/test/integration/client,Q-Lee,1
|
||||||
k8s.io/kubernetes/test/integration/configmap,Q-Lee,1
|
k8s.io/kubernetes/test/integration/configmap,Q-Lee,1
|
||||||
k8s.io/kubernetes/test/integration/discoverysummarizer,fabioy,1
|
k8s.io/kubernetes/test/integration/discoverysummarizer,fabioy,1
|
||||||
k8s.io/kubernetes/test/integration/examples,maisem,1
|
k8s.io/kubernetes/test/integration/examples,maisem,1
|
||||||
k8s.io/kubernetes/test/integration/federation,vulpecula,1
|
k8s.io/kubernetes/test/integration/federation,derekwaynecarr,0
|
||||||
k8s.io/kubernetes/test/integration/garbagecollector,jlowdermilk,1
|
k8s.io/kubernetes/test/integration/garbagecollector,jlowdermilk,1
|
||||||
k8s.io/kubernetes/test/integration/kubectl,vulpecula,1
|
k8s.io/kubernetes/test/integration/kubectl,derekwaynecarr,0
|
||||||
k8s.io/kubernetes/test/integration/master,fabioy,1
|
k8s.io/kubernetes/test/integration/master,fabioy,1
|
||||||
k8s.io/kubernetes/test/integration/metrics,lavalamp,1
|
k8s.io/kubernetes/test/integration/metrics,lavalamp,1
|
||||||
k8s.io/kubernetes/test/integration/objectmeta,janetkuo,1
|
k8s.io/kubernetes/test/integration/objectmeta,janetkuo,1
|
||||||
k8s.io/kubernetes/test/integration/openshift,kevin-wangzefeng,1
|
k8s.io/kubernetes/test/integration/openshift,kevin-wangzefeng,1
|
||||||
k8s.io/kubernetes/test/integration/persistentvolumes,cjcullen,1
|
|
||||||
k8s.io/kubernetes/test/integration/pods,smarterclayton,1
|
k8s.io/kubernetes/test/integration/pods,smarterclayton,1
|
||||||
k8s.io/kubernetes/test/integration/quota,alex-mohr,1
|
k8s.io/kubernetes/test/integration/quota,alex-mohr,1
|
||||||
k8s.io/kubernetes/test/integration/replicaset,janetkuo,1
|
k8s.io/kubernetes/test/integration/replicaset,janetkuo,1
|
||||||
|
@ -972,8 +968,9 @@ k8s.io/kubernetes/test/integration/scheduler,mikedanese,1
|
||||||
k8s.io/kubernetes/test/integration/scheduler_perf,roberthbailey,1
|
k8s.io/kubernetes/test/integration/scheduler_perf,roberthbailey,1
|
||||||
k8s.io/kubernetes/test/integration/secrets,rmmh,1
|
k8s.io/kubernetes/test/integration/secrets,rmmh,1
|
||||||
k8s.io/kubernetes/test/integration/serviceaccount,deads2k,1
|
k8s.io/kubernetes/test/integration/serviceaccount,deads2k,1
|
||||||
k8s.io/kubernetes/test/integration/storageclasses,andyzheng0831,1
|
k8s.io/kubernetes/test/integration/storageclasses,derekwaynecarr,0
|
||||||
k8s.io/kubernetes/test/integration/thirdparty,davidopp,1
|
k8s.io/kubernetes/test/integration/thirdparty,davidopp,1
|
||||||
|
k8s.io/kubernetes/test/integration/volume,derekwaynecarr,0
|
||||||
k8s.io/kubernetes/test/list,maisem,1
|
k8s.io/kubernetes/test/list,maisem,1
|
||||||
kubelet Clean up pods on node kubelet should be able to delete * pods per node in *.,yujuhong,0
|
kubelet Clean up pods on node kubelet should be able to delete * pods per node in *.,yujuhong,0
|
||||||
"when we run containers that should cause * should eventually see *, and then evict all of the correct pods",Random-Liu,0
|
"when we run containers that should cause * should eventually see *, and then evict all of the correct pods",Random-Liu,0
|
||||||
|
|
|
Loading…
Reference in New Issue