Ability to quota storage by storage class

pull/6/head
Derek Carr 2016-12-07 15:44:16 -05:00
parent 7168fce59a
commit 459a7a05f1
24 changed files with 794 additions and 492 deletions

View File

@ -197,6 +197,7 @@ pkg/kubelet/volumemanager/populator
pkg/kubelet/volumemanager/reconciler
pkg/proxy/config
pkg/proxy/healthcheck
pkg/quota
pkg/quota/install
pkg/registry
pkg/registry/authorization/util

View File

@ -182,9 +182,8 @@ func (rq *ResourceQuotaController) addQuota(obj interface{}) {
for constraint := range resourceQuota.Status.Hard {
if _, usageFound := resourceQuota.Status.Used[constraint]; !usageFound {
matchedResources := []api.ResourceName{api.ResourceName(constraint)}
for _, evaluator := range rq.registry.Evaluators() {
if intersection := quota.Intersection(evaluator.MatchesResources(), matchedResources); len(intersection) != 0 {
if intersection := evaluator.MatchingResources(matchedResources); len(intersection) > 0 {
rq.missingUsageQueue.Add(key)
return
}
@ -348,7 +347,6 @@ func (rq *ResourceQuotaController) replenishQuota(groupKind schema.GroupKind, na
}
// only queue those quotas that are tracking a resource associated with this kind.
matchedResources := evaluator.MatchesResources()
for i := range resourceQuotas {
resourceQuota := resourceQuotas[i].(*v1.ResourceQuota)
internalResourceQuota := &api.ResourceQuota{}
@ -357,7 +355,7 @@ func (rq *ResourceQuotaController) replenishQuota(groupKind schema.GroupKind, na
continue
}
resourceQuotaResources := quota.ResourceNames(internalResourceQuota.Status.Hard)
if len(quota.Intersection(matchedResources, resourceQuotaResources)) > 0 {
if intersection := evaluator.MatchingResources(resourceQuotaResources); len(intersection) > 0 {
// TODO: make this support targeted replenishment to a specific kind, right now it does a full recalc on that quota.
rq.enqueueResourceQuota(resourceQuota)
}

View File

@ -533,12 +533,18 @@ func memory(stats statsFunc) cmpFunc {
// adjust p1, p2 usage relative to the request (if any)
p1Memory := p1Usage[v1.ResourceMemory]
p1Spec := core.PodUsageFunc(p1)
p1Spec, err := core.PodUsageFunc(p1)
if err != nil {
return -1
}
p1Request := p1Spec[api.ResourceRequestsMemory]
p1Memory.Sub(p1Request)
p2Memory := p2Usage[v1.ResourceMemory]
p2Spec := core.PodUsageFunc(p2)
p2Spec, err := core.PodUsageFunc(p2)
if err != nil {
return 1
}
p2Request := p2Spec[api.ResourceRequestsMemory]
p2Memory.Sub(p2Request)

View File

@ -30,7 +30,7 @@ go_library(
"//pkg/api/resource:go_default_library",
"//pkg/api/v1:go_default_library",
"//pkg/api/validation:go_default_library",
"//pkg/apis/meta/v1:go_default_library",
"//pkg/apis/storage/util:go_default_library",
"//pkg/client/clientset_generated/release_1_5:go_default_library",
"//pkg/controller/informers:go_default_library",
"//pkg/kubelet/qos:go_default_library",
@ -56,6 +56,7 @@ go_test(
"//pkg/api:go_default_library",
"//pkg/api/resource:go_default_library",
"//pkg/apis/meta/v1:go_default_library",
"//pkg/apis/storage/util:go_default_library",
"//pkg/client/clientset_generated/release_1_5/fake:go_default_library",
"//pkg/quota:go_default_library",
],

View File

@ -17,7 +17,6 @@ limitations under the License.
package core
import (
"k8s.io/kubernetes/pkg/admission"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/v1"
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
@ -28,17 +27,10 @@ import (
// NewConfigMapEvaluator returns an evaluator that can evaluate configMaps
func NewConfigMapEvaluator(kubeClient clientset.Interface) quota.Evaluator {
allResources := []api.ResourceName{api.ResourceConfigMaps}
return &generic.GenericEvaluator{
Name: "Evaluator.ConfigMap",
return &generic.ObjectCountEvaluator{
AllowCreateOnUpdate: false,
InternalGroupKind: api.Kind("ConfigMap"),
InternalOperationResources: map[admission.Operation][]api.ResourceName{
admission.Create: allResources,
},
MatchedResourceNames: allResources,
MatchesScopeFunc: generic.MatchesNoScopeFunc,
ConstraintsFunc: generic.ObjectCountConstraintsFunc(api.ResourceConfigMaps),
UsageFunc: generic.ObjectCountUsageFunc(api.ResourceConfigMaps),
ResourceName: api.ResourceConfigMaps,
ListFuncByNamespace: func(namespace string, options v1.ListOptions) ([]runtime.Object, error) {
itemList, err := kubeClient.Core().ConfigMaps(namespace).List(options)
if err != nil {

View File

@ -24,6 +24,7 @@ import (
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/resource"
"k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/apis/storage/util"
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
"k8s.io/kubernetes/pkg/controller/informers"
"k8s.io/kubernetes/pkg/quota"
@ -33,6 +34,31 @@ import (
"k8s.io/kubernetes/pkg/util/sets"
)
// pvcResources are the set of static resources managed by quota associated with pvcs.
// for each resouce in this list, it may be refined dynamically based on storage class.
var pvcResources = []api.ResourceName{
api.ResourcePersistentVolumeClaims,
api.ResourceRequestsStorage,
}
// storageClassSuffix is the suffix to the qualified portion of storage class resource name.
// For example, if you want to quota storage by storage class, you would have a declaration
// that follows <storage-class>.storageclass.storage.k8s.io/<resource>.
// For example:
// * gold.storageclass.storage.k8s.io/: 500Gi
// * bronze.storageclass.storage.k8s.io/requests.storage: 500Gi
const storageClassSuffix string = ".storageclass.storage.k8s.io/"
// ResourceByStorageClass returns a quota resource name by storage class.
func ResourceByStorageClass(storageClass string, resourceName api.ResourceName) api.ResourceName {
return api.ResourceName(string(storageClass + storageClassSuffix + string(resourceName)))
}
// V1ResourceByStorageClass returns a quota resource name by storage class.
func V1ResourceByStorageClass(storageClass string, resourceName v1.ResourceName) v1.ResourceName {
return v1.ResourceName(string(storageClass + storageClassSuffix + string(resourceName)))
}
// listPersistentVolumeClaimsByNamespaceFuncUsingClient returns a pvc listing function based on the provided client.
func listPersistentVolumeClaimsByNamespaceFuncUsingClient(kubeClient clientset.Interface) generic.ListFuncByNamespace {
// TODO: ideally, we could pass dynamic client pool down into this code, and have one way of doing this.
@ -54,59 +80,49 @@ func listPersistentVolumeClaimsByNamespaceFuncUsingClient(kubeClient clientset.I
// NewPersistentVolumeClaimEvaluator returns an evaluator that can evaluate persistent volume claims
// if the specified shared informer factory is not nil, evaluator may use it to support listing functions.
func NewPersistentVolumeClaimEvaluator(kubeClient clientset.Interface, f informers.SharedInformerFactory) quota.Evaluator {
allResources := []api.ResourceName{api.ResourcePersistentVolumeClaims, api.ResourceRequestsStorage}
listFuncByNamespace := listPersistentVolumeClaimsByNamespaceFuncUsingClient(kubeClient)
if f != nil {
listFuncByNamespace = generic.ListResourceUsingInformerFunc(f, schema.GroupResource{Resource: "persistentvolumeclaims"})
}
return &generic.GenericEvaluator{
Name: "Evaluator.PersistentVolumeClaim",
InternalGroupKind: api.Kind("PersistentVolumeClaim"),
InternalOperationResources: map[admission.Operation][]api.ResourceName{
admission.Create: allResources,
},
MatchedResourceNames: allResources,
MatchesScopeFunc: generic.MatchesNoScopeFunc,
ConstraintsFunc: PersistentVolumeClaimConstraintsFunc,
UsageFunc: PersistentVolumeClaimUsageFunc,
ListFuncByNamespace: listFuncByNamespace,
return &pvcEvaluator{
listFuncByNamespace: listFuncByNamespace,
}
}
// PersistentVolumeClaimUsageFunc knows how to measure usage associated with persistent volume claims
func PersistentVolumeClaimUsageFunc(object runtime.Object) api.ResourceList {
result := api.ResourceList{}
var found bool
var request resource.Quantity
switch t := object.(type) {
case *v1.PersistentVolumeClaim:
request, found = t.Spec.Resources.Requests[v1.ResourceStorage]
case *api.PersistentVolumeClaim:
request, found = t.Spec.Resources.Requests[api.ResourceStorage]
default:
panic(fmt.Sprintf("expect *api.PersistenVolumeClaim or *v1.PersistentVolumeClaim, got %v", t))
}
result[api.ResourcePersistentVolumeClaims] = resource.MustParse("1")
if found {
result[api.ResourceRequestsStorage] = request
}
return result
// pvcEvaluator knows how to evaluate quota usage for persistent volume claims
type pvcEvaluator struct {
// listFuncByNamespace knows how to list pvc claims
listFuncByNamespace generic.ListFuncByNamespace
}
// PersistentVolumeClaimConstraintsFunc verifies that all required resources are present on the claim
// In addition, it validates that the resources are valid (i.e. requests < limits)
func PersistentVolumeClaimConstraintsFunc(required []api.ResourceName, object runtime.Object) error {
pvc, ok := object.(*api.PersistentVolumeClaim)
// Constraints verifies that all required resources are present on the item.
func (p *pvcEvaluator) Constraints(required []api.ResourceName, item runtime.Object) error {
pvc, ok := item.(*api.PersistentVolumeClaim)
if !ok {
return fmt.Errorf("unexpected input object %v", object)
return fmt.Errorf("unexpected input object %v", item)
}
requiredSet := quota.ToSet(required)
// these are the items that we will be handling based on the objects actual storage-class
pvcRequiredSet := append([]api.ResourceName{}, pvcResources...)
if storageClassRef := util.GetClaimStorageClass(pvc); len(storageClassRef) > 0 {
pvcRequiredSet = append(pvcRequiredSet, ResourceByStorageClass(storageClassRef, api.ResourcePersistentVolumeClaims))
pvcRequiredSet = append(pvcRequiredSet, ResourceByStorageClass(storageClassRef, api.ResourceRequestsStorage))
}
// in effect, this will remove things from the required set that are not tied to this pvcs storage class
// for example, if a quota has bronze and gold storage class items defined, we should not error a bronze pvc for not being gold.
// but we should error a bronze pvc if it doesn't make a storage request size...
requiredResources := quota.Intersection(required, pvcRequiredSet)
requiredSet := quota.ToSet(requiredResources)
// usage for this pvc will only include global pvc items + this storage class specific items
pvcUsage, err := p.Usage(item)
if err != nil {
return err
}
// determine what required resources were not tracked by usage.
missingSet := sets.NewString()
pvcUsage := PersistentVolumeClaimUsageFunc(pvc)
pvcSet := quota.ToSet(quota.ResourceNames(pvcUsage))
if diff := requiredSet.Difference(pvcSet); len(diff) > 0 {
missingSet.Insert(diff.List()...)
@ -116,3 +132,89 @@ func PersistentVolumeClaimConstraintsFunc(required []api.ResourceName, object ru
}
return fmt.Errorf("must specify %s", strings.Join(missingSet.List(), ","))
}
// GroupKind that this evaluator tracks
func (p *pvcEvaluator) GroupKind() schema.GroupKind {
return api.Kind("PersistentVolumeClaim")
}
// Handles returns true if the evalutor should handle the specified operation.
func (p *pvcEvaluator) Handles(operation admission.Operation) bool {
return admission.Create == operation
}
// Matches returns true if the evaluator matches the specified quota with the provided input item
func (p *pvcEvaluator) Matches(resourceQuota *api.ResourceQuota, item runtime.Object) (bool, error) {
return generic.Matches(resourceQuota, item, p.MatchingResources, generic.MatchesNoScopeFunc)
}
// MatchingResources takes the input specified list of resources and returns the set of resources it matches.
func (p *pvcEvaluator) MatchingResources(items []api.ResourceName) []api.ResourceName {
result := []api.ResourceName{}
for _, item := range items {
if quota.Contains(pvcResources, item) {
result = append(result, item)
continue
}
// match pvc resources scoped by storage class (<storage-class-name>.storage-class.kubernetes.io/<resource>)
for _, resource := range pvcResources {
byStorageClass := storageClassSuffix + string(resource)
if strings.HasSuffix(string(item), byStorageClass) {
result = append(result, item)
break
}
}
}
return result
}
// Usage knows how to measure usage associated with item.
func (p *pvcEvaluator) Usage(item runtime.Object) (api.ResourceList, error) {
result := api.ResourceList{}
pvc, err := toInternalPersistentVolumeClaimOrError(item)
if err != nil {
return result, err
}
storageClassRef := util.GetClaimStorageClass(pvc)
// charge for claim
result[api.ResourcePersistentVolumeClaims] = resource.MustParse("1")
if len(storageClassRef) > 0 {
storageClassClaim := api.ResourceName(storageClassRef + storageClassSuffix + string(api.ResourcePersistentVolumeClaims))
result[storageClassClaim] = resource.MustParse("1")
}
// charge for storage
if request, found := pvc.Spec.Resources.Requests[api.ResourceStorage]; found {
result[api.ResourceRequestsStorage] = request
// charge usage to the storage class (if present)
if len(storageClassRef) > 0 {
storageClassStorage := api.ResourceName(storageClassRef + storageClassSuffix + string(api.ResourceRequestsStorage))
result[storageClassStorage] = request
}
}
return result, nil
}
// UsageStats calculates aggregate usage for the object.
func (p *pvcEvaluator) UsageStats(options quota.UsageStatsOptions) (quota.UsageStats, error) {
return generic.CalculateUsageStats(options, p.listFuncByNamespace, generic.MatchesNoScopeFunc, p.Usage)
}
// ensure we implement required interface
var _ quota.Evaluator = &pvcEvaluator{}
func toInternalPersistentVolumeClaimOrError(obj runtime.Object) (*api.PersistentVolumeClaim, error) {
pvc := &api.PersistentVolumeClaim{}
switch t := obj.(type) {
case *v1.PersistentVolumeClaim:
if err := v1.Convert_v1_PersistentVolumeClaim_To_api_PersistentVolumeClaim(t, pvc, nil); err != nil {
return nil, err
}
case *api.PersistentVolumeClaim:
pvc = t
default:
return nil, fmt.Errorf("expect *api.PersistentVolumeClaim or *v1.PersistentVolumeClaim, got %v", t)
}
return pvc, nil
}

View File

@ -22,6 +22,7 @@ import (
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/resource"
metav1 "k8s.io/kubernetes/pkg/apis/meta/v1"
"k8s.io/kubernetes/pkg/apis/storage/util"
"k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5/fake"
"k8s.io/kubernetes/pkg/quota"
)
@ -53,6 +54,52 @@ func TestPersistentVolumeClaimsConstraintsFunc(t *testing.T) {
},
},
})
validClaimGoldStorageClass := testVolumeClaim("foo", "ns", api.PersistentVolumeClaimSpec{
Selector: &metav1.LabelSelector{
MatchExpressions: []metav1.LabelSelectorRequirement{
{
Key: "key2",
Operator: "Exists",
},
},
},
AccessModes: []api.PersistentVolumeAccessMode{
api.ReadWriteOnce,
api.ReadOnlyMany,
},
Resources: api.ResourceRequirements{
Requests: api.ResourceList{
api.ResourceName(api.ResourceStorage): resource.MustParse("10Gi"),
},
},
})
validClaimGoldStorageClass.Annotations = map[string]string{
util.StorageClassAnnotation: "gold",
}
validClaimBronzeStorageClass := testVolumeClaim("foo", "ns", api.PersistentVolumeClaimSpec{
Selector: &metav1.LabelSelector{
MatchExpressions: []metav1.LabelSelectorRequirement{
{
Key: "key2",
Operator: "Exists",
},
},
},
AccessModes: []api.PersistentVolumeAccessMode{
api.ReadWriteOnce,
api.ReadOnlyMany,
},
Resources: api.ResourceRequirements{
Requests: api.ResourceList{
api.ResourceName(api.ResourceStorage): resource.MustParse("10Gi"),
},
},
})
validClaimBronzeStorageClass.Annotations = map[string]string{
util.StorageClassAnnotation: "bronze",
}
missingStorage := testVolumeClaim("foo", "ns", api.PersistentVolumeClaimSpec{
Selector: &metav1.LabelSelector{
MatchExpressions: []metav1.LabelSelectorRequirement{
@ -71,6 +118,27 @@ func TestPersistentVolumeClaimsConstraintsFunc(t *testing.T) {
},
})
missingGoldStorage := testVolumeClaim("foo", "ns", api.PersistentVolumeClaimSpec{
Selector: &metav1.LabelSelector{
MatchExpressions: []metav1.LabelSelectorRequirement{
{
Key: "key2",
Operator: "Exists",
},
},
},
AccessModes: []api.PersistentVolumeAccessMode{
api.ReadWriteOnce,
api.ReadOnlyMany,
},
Resources: api.ResourceRequirements{
Requests: api.ResourceList{},
},
})
missingGoldStorage.Annotations = map[string]string{
util.StorageClassAnnotation: "gold",
}
testCases := map[string]struct {
pvc *api.PersistentVolumeClaim
required []api.ResourceName
@ -81,6 +149,11 @@ func TestPersistentVolumeClaimsConstraintsFunc(t *testing.T) {
required: []api.ResourceName{api.ResourceRequestsStorage},
err: `must specify requests.storage`,
},
"missing gold storage": {
pvc: missingGoldStorage,
required: []api.ResourceName{ResourceByStorageClass("gold", api.ResourceRequestsStorage)},
err: `must specify gold.storageclass.storage.k8s.io/requests.storage`,
},
"valid-claim-quota-storage": {
pvc: validClaim,
required: []api.ResourceName{api.ResourceRequestsStorage},
@ -93,9 +166,30 @@ func TestPersistentVolumeClaimsConstraintsFunc(t *testing.T) {
pvc: validClaim,
required: []api.ResourceName{api.ResourceRequestsStorage, api.ResourcePersistentVolumeClaims},
},
"valid-claim-gold-quota-gold": {
pvc: validClaimGoldStorageClass,
required: []api.ResourceName{
api.ResourceRequestsStorage,
api.ResourcePersistentVolumeClaims,
ResourceByStorageClass("gold", api.ResourceRequestsStorage),
ResourceByStorageClass("gold", api.ResourcePersistentVolumeClaims),
},
},
"valid-claim-bronze-with-quota-gold": {
pvc: validClaimBronzeStorageClass,
required: []api.ResourceName{
api.ResourceRequestsStorage,
api.ResourcePersistentVolumeClaims,
ResourceByStorageClass("gold", api.ResourceRequestsStorage),
ResourceByStorageClass("gold", api.ResourcePersistentVolumeClaims),
},
},
}
kubeClient := fake.NewSimpleClientset()
evaluator := NewPersistentVolumeClaimEvaluator(kubeClient, nil)
for testName, test := range testCases {
err := PersistentVolumeClaimConstraintsFunc(test.required, test.pvc)
err := evaluator.Constraints(test.required, test.pvc)
switch {
case err != nil && len(test.err) == 0,
err == nil && len(test.err) != 0,
@ -125,6 +219,29 @@ func TestPersistentVolumeClaimEvaluatorUsage(t *testing.T) {
},
},
})
validClaimByStorageClass := testVolumeClaim("foo", "ns", api.PersistentVolumeClaimSpec{
Selector: &metav1.LabelSelector{
MatchExpressions: []metav1.LabelSelectorRequirement{
{
Key: "key2",
Operator: "Exists",
},
},
},
AccessModes: []api.PersistentVolumeAccessMode{
api.ReadWriteOnce,
api.ReadOnlyMany,
},
Resources: api.ResourceRequirements{
Requests: api.ResourceList{
api.ResourceName(api.ResourceStorage): resource.MustParse("10Gi"),
},
},
})
storageClassName := "gold"
validClaimByStorageClass.Annotations = map[string]string{
util.StorageClassAnnotation: storageClassName,
}
kubeClient := fake.NewSimpleClientset()
evaluator := NewPersistentVolumeClaimEvaluator(kubeClient, nil)
@ -139,9 +256,21 @@ func TestPersistentVolumeClaimEvaluatorUsage(t *testing.T) {
api.ResourcePersistentVolumeClaims: resource.MustParse("1"),
},
},
"pvc-usage-by-class": {
pvc: validClaimByStorageClass,
usage: api.ResourceList{
api.ResourceRequestsStorage: resource.MustParse("10Gi"),
api.ResourcePersistentVolumeClaims: resource.MustParse("1"),
ResourceByStorageClass(storageClassName, api.ResourceRequestsStorage): resource.MustParse("10Gi"),
ResourceByStorageClass(storageClassName, api.ResourcePersistentVolumeClaims): resource.MustParse("1"),
},
},
}
for testName, testCase := range testCases {
actual := evaluator.Usage(testCase.pvc)
actual, err := evaluator.Usage(testCase.pvc)
if err != nil {
t.Errorf("%s unexpected error: %v", testName, err)
}
if !quota.Equals(testCase.usage, actual) {
t.Errorf("%s expected: %v, actual: %v", testName, testCase.usage, actual)
}

View File

@ -25,7 +25,6 @@ import (
"k8s.io/kubernetes/pkg/api/resource"
"k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/api/validation"
metav1 "k8s.io/kubernetes/pkg/apis/meta/v1"
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
"k8s.io/kubernetes/pkg/controller/informers"
"k8s.io/kubernetes/pkg/kubelet/qos"
@ -37,6 +36,17 @@ import (
"k8s.io/kubernetes/pkg/util/validation/field"
)
// podResources are the set of resources managed by quota associated with pods.
var podResources = []api.ResourceName{
api.ResourceCPU,
api.ResourceMemory,
api.ResourceRequestsCPU,
api.ResourceRequestsMemory,
api.ResourceLimitsCPU,
api.ResourceLimitsMemory,
api.ResourcePods,
}
// listPodsByNamespaceFuncUsingClient returns a pod listing function based on the provided client.
func listPodsByNamespaceFuncUsingClient(kubeClient clientset.Interface) generic.ListFuncByNamespace {
// TODO: ideally, we could pass dynamic client pool down into this code, and have one way of doing this.
@ -58,44 +68,27 @@ func listPodsByNamespaceFuncUsingClient(kubeClient clientset.Interface) generic.
// NewPodEvaluator returns an evaluator that can evaluate pods
// if the specified shared informer factory is not nil, evaluator may use it to support listing functions.
func NewPodEvaluator(kubeClient clientset.Interface, f informers.SharedInformerFactory) quota.Evaluator {
computeResources := []api.ResourceName{
api.ResourceCPU,
api.ResourceMemory,
api.ResourceRequestsCPU,
api.ResourceRequestsMemory,
api.ResourceLimitsCPU,
api.ResourceLimitsMemory,
}
allResources := append(computeResources, api.ResourcePods)
listFuncByNamespace := listPodsByNamespaceFuncUsingClient(kubeClient)
if f != nil {
listFuncByNamespace = generic.ListResourceUsingInformerFunc(f, schema.GroupResource{Resource: "pods"})
}
return &generic.GenericEvaluator{
Name: "Evaluator.Pod",
InternalGroupKind: api.Kind("Pod"),
InternalOperationResources: map[admission.Operation][]api.ResourceName{
admission.Create: allResources,
// TODO: the quota system can only charge for deltas on compute resources when pods support updates.
// admission.Update: computeResources,
},
GetFuncByNamespace: func(namespace, name string) (runtime.Object, error) {
return kubeClient.Core().Pods(namespace).Get(name, metav1.GetOptions{})
},
ConstraintsFunc: PodConstraintsFunc,
MatchedResourceNames: allResources,
MatchesScopeFunc: PodMatchesScopeFunc,
UsageFunc: PodUsageFunc,
ListFuncByNamespace: listFuncByNamespace,
return &podEvaluator{
listFuncByNamespace: listFuncByNamespace,
}
}
// PodConstraintsFunc verifies that all required resources are present on the pod
// podEvaluator knows how to measure usage of pods.
type podEvaluator struct {
// knows how to list pods
listFuncByNamespace generic.ListFuncByNamespace
}
// Constraints verifies that all required resources are present on the pod
// In addition, it validates that the resources are valid (i.e. requests < limits)
func PodConstraintsFunc(required []api.ResourceName, object runtime.Object) error {
pod, ok := object.(*api.Pod)
func (p *podEvaluator) Constraints(required []api.ResourceName, item runtime.Object) error {
pod, ok := item.(*api.Pod)
if !ok {
return fmt.Errorf("Unexpected input object %v", object)
return fmt.Errorf("Unexpected input object %v", item)
}
// Pod level resources are often set during admission control
@ -114,7 +107,7 @@ func PodConstraintsFunc(required []api.ResourceName, object runtime.Object) erro
return allErrs.ToAggregate()
}
// TODO: fix this when we have pod level cgroups
// TODO: fix this when we have pod level resource requirements
// since we do not yet pod level requests/limits, we need to ensure each
// container makes an explict request or limit for a quota tracked resource
requiredSet := quota.ToSet(required)
@ -131,6 +124,40 @@ func PodConstraintsFunc(required []api.ResourceName, object runtime.Object) erro
return fmt.Errorf("must specify %s", strings.Join(missingSet.List(), ","))
}
// GroupKind that this evaluator tracks
func (p *podEvaluator) GroupKind() schema.GroupKind {
return api.Kind("Pod")
}
// Handles returns true of the evalutor should handle the specified operation.
func (p *podEvaluator) Handles(operation admission.Operation) bool {
// TODO: update this if/when pods support resizing resource requirements.
return admission.Create == operation
}
// Matches returns true if the evaluator matches the specified quota with the provided input item
func (p *podEvaluator) Matches(resourceQuota *api.ResourceQuota, item runtime.Object) (bool, error) {
return generic.Matches(resourceQuota, item, p.MatchingResources, podMatchesScopeFunc)
}
// MatchingResources takes the input specified list of resources and returns the set of resources it matches.
func (p *podEvaluator) MatchingResources(input []api.ResourceName) []api.ResourceName {
return quota.Intersection(input, podResources)
}
// Usage knows how to measure usage associated with pods
func (p *podEvaluator) Usage(item runtime.Object) (api.ResourceList, error) {
return PodUsageFunc(item)
}
// UsageStats calculates aggregate usage for the object.
func (p *podEvaluator) UsageStats(options quota.UsageStatsOptions) (quota.UsageStats, error) {
return generic.CalculateUsageStats(options, p.listFuncByNamespace, podMatchesScopeFunc, p.Usage)
}
// verifies we implement the required interface.
var _ quota.Evaluator = &podEvaluator{}
// enforcePodContainerConstraints checks for required resources that are not set on this container and
// adds them to missingSet.
func enforcePodContainerConstraints(container *api.Container, requiredSet, missingSet sets.String) {
@ -165,27 +192,49 @@ func podUsageHelper(requests api.ResourceList, limits api.ResourceList) api.Reso
return result
}
func toInternalPodOrDie(obj runtime.Object) *api.Pod {
func toInternalPodOrError(obj runtime.Object) (*api.Pod, error) {
pod := &api.Pod{}
switch t := obj.(type) {
case *v1.Pod:
if err := v1.Convert_v1_Pod_To_api_Pod(t, pod, nil); err != nil {
panic(err)
return nil, err
}
case *api.Pod:
pod = t
default:
panic(fmt.Sprintf("expect *api.Pod or *v1.Pod, got %v", t))
return nil, fmt.Errorf("expect *api.Pod or *v1.Pod, got %v", t)
}
return pod
return pod, nil
}
// podMatchesScopeFunc is a function that knows how to evaluate if a pod matches a scope
func podMatchesScopeFunc(scope api.ResourceQuotaScope, object runtime.Object) (bool, error) {
pod, err := toInternalPodOrError(object)
if err != nil {
return false, err
}
switch scope {
case api.ResourceQuotaScopeTerminating:
return isTerminating(pod), nil
case api.ResourceQuotaScopeNotTerminating:
return !isTerminating(pod), nil
case api.ResourceQuotaScopeBestEffort:
return isBestEffort(pod), nil
case api.ResourceQuotaScopeNotBestEffort:
return !isBestEffort(pod), nil
}
return false, nil
}
// PodUsageFunc knows how to measure usage associated with pods
func PodUsageFunc(obj runtime.Object) api.ResourceList {
pod := toInternalPodOrDie(obj)
func PodUsageFunc(obj runtime.Object) (api.ResourceList, error) {
pod, err := toInternalPodOrError(obj)
if err != nil {
return api.ResourceList{}, err
}
// by convention, we do not quota pods that have reached an end-of-life state
if !QuotaPod(pod) {
return api.ResourceList{}
return api.ResourceList{}, nil
}
requests := api.ResourceList{}
limits := api.ResourceList{}
@ -203,23 +252,7 @@ func PodUsageFunc(obj runtime.Object) api.ResourceList {
limits = quota.Max(limits, pod.Spec.InitContainers[i].Resources.Limits)
}
return podUsageHelper(requests, limits)
}
// PodMatchesScopeFunc is a function that knows how to evaluate if a pod matches a scope
func PodMatchesScopeFunc(scope api.ResourceQuotaScope, object runtime.Object) bool {
pod := toInternalPodOrDie(object)
switch scope {
case api.ResourceQuotaScopeTerminating:
return isTerminating(pod)
case api.ResourceQuotaScopeNotTerminating:
return !isTerminating(pod)
case api.ResourceQuotaScopeBestEffort:
return isBestEffort(pod)
case api.ResourceQuotaScopeNotBestEffort:
return !isBestEffort(pod)
}
return false
return podUsageHelper(requests, limits), nil
}
func isBestEffort(pod *api.Pod) bool {
@ -234,7 +267,6 @@ func isTerminating(pod *api.Pod) bool {
}
// QuotaPod returns true if the pod is eligible to track against a quota
// if it's not in a terminal state according to its phase.
func QuotaPod(pod *api.Pod) bool {
return !(api.PodFailed == pod.Status.Phase || api.PodSucceeded == pod.Status.Phase)
}

View File

@ -86,8 +86,10 @@ func TestPodConstraintsFunc(t *testing.T) {
err: `must specify memory`,
},
}
kubeClient := fake.NewSimpleClientset()
evaluator := NewPodEvaluator(kubeClient, nil)
for testName, test := range testCases {
err := PodConstraintsFunc(test.required, test.pod)
err := evaluator.Constraints(test.required, test.pod)
switch {
case err != nil && len(test.err) == 0,
err == nil && len(test.err) != 0,
@ -245,7 +247,10 @@ func TestPodEvaluatorUsage(t *testing.T) {
},
}
for testName, testCase := range testCases {
actual := evaluator.Usage(testCase.pod)
actual, err := evaluator.Usage(testCase.pod)
if err != nil {
t.Errorf("%s unexpected error: %v", testName, err)
}
if !quota.Equals(testCase.usage, actual) {
t.Errorf("%s expected: %v, actual: %v", testName, testCase.usage, actual)
}

View File

@ -17,7 +17,6 @@ limitations under the License.
package core
import (
"k8s.io/kubernetes/pkg/admission"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/v1"
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
@ -28,17 +27,10 @@ import (
// NewReplicationControllerEvaluator returns an evaluator that can evaluate replication controllers
func NewReplicationControllerEvaluator(kubeClient clientset.Interface) quota.Evaluator {
allResources := []api.ResourceName{api.ResourceReplicationControllers}
return &generic.GenericEvaluator{
Name: "Evaluator.ReplicationController",
return &generic.ObjectCountEvaluator{
AllowCreateOnUpdate: false,
InternalGroupKind: api.Kind("ReplicationController"),
InternalOperationResources: map[admission.Operation][]api.ResourceName{
admission.Create: allResources,
},
MatchedResourceNames: allResources,
MatchesScopeFunc: generic.MatchesNoScopeFunc,
ConstraintsFunc: generic.ObjectCountConstraintsFunc(api.ResourceReplicationControllers),
UsageFunc: generic.ObjectCountUsageFunc(api.ResourceReplicationControllers),
ResourceName: api.ResourceReplicationControllers,
ListFuncByNamespace: func(namespace string, options v1.ListOptions) ([]runtime.Object, error) {
itemList, err := kubeClient.Core().ReplicationControllers(namespace).List(options)
if err != nil {

View File

@ -17,7 +17,6 @@ limitations under the License.
package core
import (
"k8s.io/kubernetes/pkg/admission"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/v1"
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
@ -28,17 +27,10 @@ import (
// NewResourceQuotaEvaluator returns an evaluator that can evaluate resource quotas
func NewResourceQuotaEvaluator(kubeClient clientset.Interface) quota.Evaluator {
allResources := []api.ResourceName{api.ResourceQuotas}
return &generic.GenericEvaluator{
Name: "Evaluator.ResourceQuota",
return &generic.ObjectCountEvaluator{
AllowCreateOnUpdate: false,
InternalGroupKind: api.Kind("ResourceQuota"),
InternalOperationResources: map[admission.Operation][]api.ResourceName{
admission.Create: allResources,
},
MatchedResourceNames: allResources,
MatchesScopeFunc: generic.MatchesNoScopeFunc,
ConstraintsFunc: generic.ObjectCountConstraintsFunc(api.ResourceQuotas),
UsageFunc: generic.ObjectCountUsageFunc(api.ResourceQuotas),
ResourceName: api.ResourceQuotas,
ListFuncByNamespace: func(namespace string, options v1.ListOptions) ([]runtime.Object, error) {
itemList, err := kubeClient.Core().ResourceQuotas(namespace).List(options)
if err != nil {

View File

@ -17,7 +17,6 @@ limitations under the License.
package core
import (
"k8s.io/kubernetes/pkg/admission"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/v1"
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
@ -28,17 +27,10 @@ import (
// NewSecretEvaluator returns an evaluator that can evaluate secrets
func NewSecretEvaluator(kubeClient clientset.Interface) quota.Evaluator {
allResources := []api.ResourceName{api.ResourceSecrets}
return &generic.GenericEvaluator{
Name: "Evaluator.Secret",
return &generic.ObjectCountEvaluator{
AllowCreateOnUpdate: false,
InternalGroupKind: api.Kind("Secret"),
InternalOperationResources: map[admission.Operation][]api.ResourceName{
admission.Create: allResources,
},
MatchedResourceNames: allResources,
MatchesScopeFunc: generic.MatchesNoScopeFunc,
ConstraintsFunc: generic.ObjectCountConstraintsFunc(api.ResourceSecrets),
UsageFunc: generic.ObjectCountUsageFunc(api.ResourceSecrets),
ResourceName: api.ResourceSecrets,
ListFuncByNamespace: func(namespace string, options v1.ListOptions) ([]runtime.Object, error) {
itemList, err := kubeClient.Core().Secrets(namespace).List(options)
if err != nil {

View File

@ -28,28 +28,21 @@ import (
"k8s.io/kubernetes/pkg/quota"
"k8s.io/kubernetes/pkg/quota/generic"
"k8s.io/kubernetes/pkg/runtime"
"k8s.io/kubernetes/pkg/runtime/schema"
"k8s.io/kubernetes/pkg/util/sets"
)
// NewServiceEvaluator returns an evaluator that can evaluate service quotas
func NewServiceEvaluator(kubeClient clientset.Interface) quota.Evaluator {
allResources := []api.ResourceName{
// serviceResources are the set of resources managed by quota associated with services.
var serviceResources = []api.ResourceName{
api.ResourceServices,
api.ResourceServicesNodePorts,
api.ResourceServicesLoadBalancers,
}
return &generic.GenericEvaluator{
Name: "Evaluator.Service",
InternalGroupKind: api.Kind("Service"),
InternalOperationResources: map[admission.Operation][]api.ResourceName{
admission.Create: allResources,
admission.Update: allResources,
},
MatchedResourceNames: allResources,
MatchesScopeFunc: generic.MatchesNoScopeFunc,
ConstraintsFunc: ServiceConstraintsFunc,
UsageFunc: ServiceUsageFunc,
ListFuncByNamespace: func(namespace string, options v1.ListOptions) ([]runtime.Object, error) {
}
// NewServiceEvaluator returns an evaluator that can evaluate service quotas
func NewServiceEvaluator(kubeClient clientset.Interface) quota.Evaluator {
return &serviceEvaluator{
listFuncByNamespace: func(namespace string, options v1.ListOptions) ([]runtime.Object, error) {
itemList, err := kubeClient.Core().Services(namespace).List(options)
if err != nil {
return nil, err
@ -63,39 +56,104 @@ func NewServiceEvaluator(kubeClient clientset.Interface) quota.Evaluator {
}
}
// ServiceUsageFunc knows how to measure usage associated with services
func ServiceUsageFunc(object runtime.Object) api.ResourceList {
result := api.ResourceList{}
var serviceType api.ServiceType
var ports int
// serviceEvaluator knows how to measure usage for services.
type serviceEvaluator struct {
// knows how to list items by namespace
listFuncByNamespace generic.ListFuncByNamespace
}
switch t := object.(type) {
case *v1.Service:
serviceType = api.ServiceType(t.Spec.Type)
ports = len(t.Spec.Ports)
case *api.Service:
serviceType = t.Spec.Type
ports = len(t.Spec.Ports)
default:
panic(fmt.Sprintf("expect *api.Service or *v1.Service, got %v", t))
// Constraints verifies that all required resources are present on the item
func (p *serviceEvaluator) Constraints(required []api.ResourceName, item runtime.Object) error {
service, ok := item.(*api.Service)
if !ok {
return fmt.Errorf("unexpected input object %v", item)
}
requiredSet := quota.ToSet(required)
missingSet := sets.NewString()
serviceUsage, err := p.Usage(service)
if err != nil {
return err
}
serviceSet := quota.ToSet(quota.ResourceNames(serviceUsage))
if diff := requiredSet.Difference(serviceSet); len(diff) > 0 {
missingSet.Insert(diff.List()...)
}
if len(missingSet) == 0 {
return nil
}
return fmt.Errorf("must specify %s", strings.Join(missingSet.List(), ","))
}
// GroupKind that this evaluator tracks
func (p *serviceEvaluator) GroupKind() schema.GroupKind {
return api.Kind("Service")
}
// Handles returns true of the evalutor should handle the specified operation.
func (p *serviceEvaluator) Handles(operation admission.Operation) bool {
// We handle create and update because a service type can change.
return admission.Create == operation || admission.Update == operation
}
// Matches returns true if the evaluator matches the specified quota with the provided input item
func (p *serviceEvaluator) Matches(resourceQuota *api.ResourceQuota, item runtime.Object) (bool, error) {
return generic.Matches(resourceQuota, item, p.MatchingResources, generic.MatchesNoScopeFunc)
}
// MatchingResources takes the input specified list of resources and returns the set of resources it matches.
func (p *serviceEvaluator) MatchingResources(input []api.ResourceName) []api.ResourceName {
return quota.Intersection(input, serviceResources)
}
// convert the input object to an internal service object or error.
func toInternalServiceOrError(obj runtime.Object) (*api.Service, error) {
svc := &api.Service{}
switch t := obj.(type) {
case *v1.Service:
if err := v1.Convert_v1_Service_To_api_Service(t, svc, nil); err != nil {
return nil, err
}
case *api.Service:
svc = t
default:
return nil, fmt.Errorf("expect *api.Service or *v1.Service, got %v", t)
}
return svc, nil
}
// Usage knows how to measure usage associated with pods
func (p *serviceEvaluator) Usage(item runtime.Object) (api.ResourceList, error) {
result := api.ResourceList{}
svc, err := toInternalServiceOrError(item)
if err != nil {
return result, err
}
ports := len(svc.Spec.Ports)
// default service usage
result[api.ResourceServices] = resource.MustParse("1")
result[api.ResourceServicesLoadBalancers] = resource.MustParse("0")
result[api.ResourceServicesNodePorts] = resource.MustParse("0")
switch serviceType {
result[api.ResourceServices] = *(resource.NewQuantity(1, resource.DecimalSI))
result[api.ResourceServicesLoadBalancers] = resource.Quantity{Format: resource.DecimalSI}
result[api.ResourceServicesNodePorts] = resource.Quantity{Format: resource.DecimalSI}
switch svc.Spec.Type {
case api.ServiceTypeNodePort:
// node port services need to count node ports
value := resource.NewQuantity(int64(ports), resource.DecimalSI)
result[api.ResourceServicesNodePorts] = *value
case api.ServiceTypeLoadBalancer:
// load balancer services need to count load balancers
result[api.ResourceServicesLoadBalancers] = resource.MustParse("1")
result[api.ResourceServicesLoadBalancers] = *(resource.NewQuantity(1, resource.DecimalSI))
}
return result
return result, nil
}
// UsageStats calculates aggregate usage for the object.
func (p *serviceEvaluator) UsageStats(options quota.UsageStatsOptions) (quota.UsageStats, error) {
return generic.CalculateUsageStats(options, p.listFuncByNamespace, generic.MatchesNoScopeFunc, p.Usage)
}
var _ quota.Evaluator = &serviceEvaluator{}
// QuotaServiceType returns true if the service type is eligible to track against a quota
func QuotaServiceType(service *v1.Service) bool {
switch service.Spec.Type {
@ -115,24 +173,3 @@ func GetQuotaServiceType(service *v1.Service) v1.ServiceType {
}
return v1.ServiceType("")
}
// ServiceConstraintsFunc verifies that all required resources are captured in service usage.
func ServiceConstraintsFunc(required []api.ResourceName, object runtime.Object) error {
service, ok := object.(*api.Service)
if !ok {
return fmt.Errorf("unexpected input object %v", object)
}
requiredSet := quota.ToSet(required)
missingSet := sets.NewString()
serviceUsage := ServiceUsageFunc(service)
serviceSet := quota.ToSet(quota.ResourceNames(serviceUsage))
if diff := requiredSet.Difference(serviceSet); len(diff) > 0 {
missingSet.Insert(diff.List()...)
}
if len(missingSet) == 0 {
return nil
}
return fmt.Errorf("must specify %s", strings.Join(missingSet.List(), ","))
}

View File

@ -28,12 +28,21 @@ import (
func TestServiceEvaluatorMatchesResources(t *testing.T) {
kubeClient := fake.NewSimpleClientset()
evaluator := NewServiceEvaluator(kubeClient)
// we give a lot of resources
input := []api.ResourceName{
api.ResourceConfigMaps,
api.ResourceCPU,
api.ResourceServices,
api.ResourceServicesNodePorts,
api.ResourceServicesLoadBalancers,
}
// but we only match these...
expected := quota.ToSet([]api.ResourceName{
api.ResourceServices,
api.ResourceServicesNodePorts,
api.ResourceServicesLoadBalancers,
})
actual := quota.ToSet(evaluator.MatchesResources())
actual := quota.ToSet(evaluator.MatchingResources(input))
if !expected.Equal(actual) {
t.Errorf("expected: %v, actual: %v", expected, actual)
}
@ -109,7 +118,10 @@ func TestServiceEvaluatorUsage(t *testing.T) {
},
}
for testName, testCase := range testCases {
actual := evaluator.Usage(testCase.service)
actual, err := evaluator.Usage(testCase.service)
if err != nil {
t.Errorf("%s unexpected error: %v", testName, err)
}
if !quota.Equals(testCase.usage, actual) {
t.Errorf("%s expected: %v, actual: %v", testName, testCase.usage, actual)
}
@ -168,8 +180,11 @@ func TestServiceConstraintsFunc(t *testing.T) {
required: []api.ResourceName{api.ResourceServicesNodePorts},
},
}
kubeClient := fake.NewSimpleClientset()
evaluator := NewServiceEvaluator(kubeClient)
for testName, test := range testCases {
err := ServiceConstraintsFunc(test.required, test.service)
err := evaluator.Constraints(test.required, test.service)
switch {
case err != nil && len(test.err) == 0,
err == nil && len(test.err) != 0,

View File

@ -45,167 +45,138 @@ func ListResourceUsingInformerFunc(f informers.SharedInformerFactory, groupResou
}
}
// ConstraintsFunc takes a list of required resources that must match on the input item
type ConstraintsFunc func(required []api.ResourceName, item runtime.Object) error
// GetFuncByNamespace knows how to get a resource with specified namespace and name
type GetFuncByNamespace func(namespace, name string) (runtime.Object, error)
// ListFuncByNamespace knows how to list resources in a namespace
type ListFuncByNamespace func(namespace string, options v1.ListOptions) ([]runtime.Object, error)
// MatchesScopeFunc knows how to evaluate if an object matches a scope
type MatchesScopeFunc func(scope api.ResourceQuotaScope, object runtime.Object) bool
type MatchesScopeFunc func(scope api.ResourceQuotaScope, object runtime.Object) (bool, error)
// UsageFunc knows how to measure usage associated with an object
type UsageFunc func(object runtime.Object) api.ResourceList
type UsageFunc func(object runtime.Object) (api.ResourceList, error)
// MatchingResourceNamesFunc is a function that returns the list of resources matched
type MatchingResourceNamesFunc func(input []api.ResourceName) []api.ResourceName
// MatchesNoScopeFunc returns false on all match checks
func MatchesNoScopeFunc(scope api.ResourceQuotaScope, object runtime.Object) bool {
return false
func MatchesNoScopeFunc(scope api.ResourceQuotaScope, object runtime.Object) (bool, error) {
return false, nil
}
// ObjectCountConstraintsFunc returns ConstraintsFunc that returns nil if the
// specified resource name is in the required set of resource names
func ObjectCountConstraintsFunc(resourceName api.ResourceName) ConstraintsFunc {
return func(required []api.ResourceName, item runtime.Object) error {
if !quota.Contains(required, resourceName) {
return fmt.Errorf("missing %s", resourceName)
}
return nil
}
}
// ObjectCountUsageFunc is useful if you are only counting your object
// It always returns 1 as the usage for the named resource
func ObjectCountUsageFunc(resourceName api.ResourceName) UsageFunc {
return func(object runtime.Object) api.ResourceList {
return api.ResourceList{
resourceName: resource.MustParse("1"),
}
}
}
// GenericEvaluator provides an implementation for quota.Evaluator
type GenericEvaluator struct {
// Name used for logging
Name string
// The GroupKind that this evaluator tracks
InternalGroupKind schema.GroupKind
// The set of resources that are pertinent to the mapped operation
InternalOperationResources map[admission.Operation][]api.ResourceName
// The set of resource names this evaluator matches
MatchedResourceNames []api.ResourceName
// A function that knows how to evaluate a matches scope request
MatchesScopeFunc MatchesScopeFunc
// A function that knows how to return usage for an object
UsageFunc UsageFunc
// A function that knows how to list resources by namespace
ListFuncByNamespace ListFuncByNamespace
// A function that knows how to get resource in a namespace
// This function must be specified if the evaluator needs to handle UPDATE
GetFuncByNamespace GetFuncByNamespace
// A function that checks required constraints are satisfied
ConstraintsFunc ConstraintsFunc
}
// Ensure that GenericEvaluator implements quota.Evaluator
var _ quota.Evaluator = &GenericEvaluator{}
// Constraints checks required constraints are satisfied on the input object
func (g *GenericEvaluator) Constraints(required []api.ResourceName, item runtime.Object) error {
return g.ConstraintsFunc(required, item)
}
// Get returns the object by namespace and name
func (g *GenericEvaluator) Get(namespace, name string) (runtime.Object, error) {
return g.GetFuncByNamespace(namespace, name)
}
// OperationResources returns the set of resources that could be updated for the
// specified operation for this kind. If empty, admission control will ignore
// quota processing for the operation.
func (g *GenericEvaluator) OperationResources(operation admission.Operation) []api.ResourceName {
return g.InternalOperationResources[operation]
}
// GroupKind that this evaluator tracks
func (g *GenericEvaluator) GroupKind() schema.GroupKind {
return g.InternalGroupKind
}
// MatchesResources is the list of resources that this evaluator matches
func (g *GenericEvaluator) MatchesResources() []api.ResourceName {
return g.MatchedResourceNames
}
// Matches returns true if the evaluator matches the specified quota with the provided input item
func (g *GenericEvaluator) Matches(resourceQuota *api.ResourceQuota, item runtime.Object) bool {
// Matches returns true if the quota matches the specified item.
func Matches(resourceQuota *api.ResourceQuota, item runtime.Object, matchFunc MatchingResourceNamesFunc, scopeFunc MatchesScopeFunc) (bool, error) {
if resourceQuota == nil {
return false
}
// verify the quota matches on resource, by default its false
matchResource := false
for resourceName := range resourceQuota.Status.Hard {
if g.MatchesResource(resourceName) {
matchResource = true
break
}
return false, fmt.Errorf("expected non-nil quota")
}
// verify the quota matches on at least one resource
matchResource := len(matchFunc(quota.ResourceNames(resourceQuota.Status.Hard))) > 0
// by default, no scopes matches all
matchScope := true
for _, scope := range resourceQuota.Spec.Scopes {
matchScope = matchScope && g.MatchesScope(scope, item)
innerMatch, err := scopeFunc(scope, item)
if err != nil {
return false, err
}
return matchResource && matchScope
}
// MatchesResource returns true if this evaluator can match on the specified resource
func (g *GenericEvaluator) MatchesResource(resourceName api.ResourceName) bool {
for _, matchedResourceName := range g.MatchedResourceNames {
if resourceName == matchedResourceName {
return true
matchScope = matchScope && innerMatch
}
}
return false
return matchResource && matchScope, nil
}
// MatchesScope returns true if the input object matches the specified scope
func (g *GenericEvaluator) MatchesScope(scope api.ResourceQuotaScope, object runtime.Object) bool {
return g.MatchesScopeFunc(scope, object)
}
// Usage returns the resource usage for the specified object
func (g *GenericEvaluator) Usage(object runtime.Object) api.ResourceList {
return g.UsageFunc(object)
}
// UsageStats calculates latest observed usage stats for all objects
func (g *GenericEvaluator) UsageStats(options quota.UsageStatsOptions) (quota.UsageStats, error) {
// CalculateUsageStats is a utility function that knows how to calculate aggregate usage.
func CalculateUsageStats(options quota.UsageStatsOptions,
listFunc ListFuncByNamespace,
scopeFunc MatchesScopeFunc,
usageFunc UsageFunc) (quota.UsageStats, error) {
// default each tracked resource to zero
result := quota.UsageStats{Used: api.ResourceList{}}
for _, resourceName := range g.MatchedResourceNames {
result.Used[resourceName] = resource.MustParse("0")
for _, resourceName := range options.Resources {
result.Used[resourceName] = resource.Quantity{Format: resource.DecimalSI}
}
items, err := g.ListFuncByNamespace(options.Namespace, v1.ListOptions{
items, err := listFunc(options.Namespace, v1.ListOptions{
LabelSelector: labels.Everything().String(),
})
if err != nil {
return result, fmt.Errorf("%s: Failed to list %v: %v", g.Name, g.GroupKind(), err)
return result, fmt.Errorf("failed to list content: %v", err)
}
for _, item := range items {
// need to verify that the item matches the set of scopes
matchesScopes := true
for _, scope := range options.Scopes {
if !g.MatchesScope(scope, item) {
innerMatch, err := scopeFunc(scope, item)
if err != nil {
return result, nil
}
if !innerMatch {
matchesScopes = false
}
}
// only count usage if there was a match
if matchesScopes {
result.Used = quota.Add(result.Used, g.Usage(item))
usage, err := usageFunc(item)
if err != nil {
return result, err
}
result.Used = quota.Add(result.Used, usage)
}
}
return result, nil
}
// ObjectCountEvaluator provides an implementation for quota.Evaluator
// that associates usage of the specified resource based on the number of items
// returned by the specified listing function.
type ObjectCountEvaluator struct {
// AllowCreateOnUpdate if true will ensure the evaluator tracks create
// and update operations.
AllowCreateOnUpdate bool
// GroupKind that this evaluator tracks.
InternalGroupKind schema.GroupKind
// A function that knows how to list resources by namespace.
// TODO move to dynamic client in future
ListFuncByNamespace ListFuncByNamespace
// Name associated with this resource in the quota.
ResourceName api.ResourceName
}
// Constraints returns an error if the configured resource name is not in the required set.
func (o *ObjectCountEvaluator) Constraints(required []api.ResourceName, item runtime.Object) error {
if !quota.Contains(required, o.ResourceName) {
return fmt.Errorf("missing %s", o.ResourceName)
}
return nil
}
// GroupKind that this evaluator tracks
func (o *ObjectCountEvaluator) GroupKind() schema.GroupKind {
return o.InternalGroupKind
}
// Handles returns true if the object count evaluator needs to track this operation.
func (o *ObjectCountEvaluator) Handles(operation admission.Operation) bool {
return operation == admission.Create || (o.AllowCreateOnUpdate && operation == admission.Update)
}
// Matches returns true if the evaluator matches the specified quota with the provided input item
func (o *ObjectCountEvaluator) Matches(resourceQuota *api.ResourceQuota, item runtime.Object) (bool, error) {
return Matches(resourceQuota, item, o.MatchingResources, MatchesNoScopeFunc)
}
// MatchingResources takes the input specified list of resources and returns the set of resources it matches.
func (o *ObjectCountEvaluator) MatchingResources(input []api.ResourceName) []api.ResourceName {
return quota.Intersection(input, []api.ResourceName{o.ResourceName})
}
// Usage returns the resource usage for the specified object
func (o *ObjectCountEvaluator) Usage(object runtime.Object) (api.ResourceList, error) {
quantity := resource.NewQuantity(1, resource.DecimalSI)
return api.ResourceList{
o.ResourceName: *quantity,
}, nil
}
// UsageStats calculates aggregate usage for the object.
func (o *ObjectCountEvaluator) UsageStats(options quota.UsageStatsOptions) (quota.UsageStats, error) {
return CalculateUsageStats(options, o.ListFuncByNamespace, MatchesNoScopeFunc, o.Usage)
}
// Verify implementation of interface at compile time.
var _ quota.Evaluator = &ObjectCountEvaluator{}

View File

@ -29,6 +29,8 @@ type UsageStatsOptions struct {
Namespace string
// Scopes that must match counted objects
Scopes []api.ResourceQuotaScope
// Resources are the set of resources to include in the measurement
Resources []api.ResourceName
}
// UsageStats is result of measuring observed resource use in the system
@ -41,20 +43,17 @@ type UsageStats struct {
type Evaluator interface {
// Constraints ensures that each required resource is present on item
Constraints(required []api.ResourceName, item runtime.Object) error
// Get returns the object with specified namespace and name
Get(namespace, name string) (runtime.Object, error)
// GroupKind returns the groupKind that this object knows how to evaluate
GroupKind() schema.GroupKind
// MatchesResources is the list of resources that this evaluator matches
MatchesResources() []api.ResourceName
// Handles determines if quota could be impacted by the specified operation.
// If true, admission control must perform quota processing for the operation, otherwise it is safe to ignore quota.
Handles(operation admission.Operation) bool
// Matches returns true if the specified quota matches the input item
Matches(resourceQuota *api.ResourceQuota, item runtime.Object) bool
// OperationResources returns the set of resources that could be updated for the
// specified operation for this kind. If empty, admission control will ignore
// quota processing for the operation.
OperationResources(operation admission.Operation) []api.ResourceName
Matches(resourceQuota *api.ResourceQuota, item runtime.Object) (bool, error)
// MatchingResources takes the input specified list of resources and returns the set of resources evaluator matches.
MatchingResources(input []api.ResourceName) []api.ResourceName
// Usage returns the resource usage for the specified object
Usage(object runtime.Object) api.ResourceList
Usage(item runtime.Object) (api.ResourceList, error)
// UsageStats calculates latest observed usage stats for all objects
UsageStats(options UsageStatsOptions) (UsageStats, error)
}
@ -69,6 +68,7 @@ type Registry interface {
// is the "winner"
type UnionRegistry []Registry
// Evaluators returns a mapping of evaluators by group kind.
func (r UnionRegistry) Evaluators() map[schema.GroupKind]Evaluator {
ret := map[schema.GroupKind]Evaluator{}

View File

@ -223,18 +223,21 @@ func CalculateUsage(namespaceName string, scopes []api.ResourceQuotaScope, hardL
potentialResources := []api.ResourceName{}
evaluators := registry.Evaluators()
for _, evaluator := range evaluators {
potentialResources = append(potentialResources, evaluator.MatchesResources()...)
potentialResources = append(potentialResources, evaluator.MatchingResources(hardResources)...)
}
// NOTE: the intersection just removes duplicates since the evaluator match intersects wtih hard
matchedResources := Intersection(hardResources, potentialResources)
// sum the observed usage from each evaluator
newUsage := api.ResourceList{}
usageStatsOptions := UsageStatsOptions{Namespace: namespaceName, Scopes: scopes}
for _, evaluator := range evaluators {
// only trigger the evaluator if it matches a resource in the quota, otherwise, skip calculating anything
if intersection := Intersection(evaluator.MatchesResources(), matchedResources); len(intersection) == 0 {
intersection := evaluator.MatchingResources(matchedResources)
if len(intersection) == 0 {
continue
}
usageStatsOptions := UsageStatsOptions{Namespace: namespaceName, Scopes: scopes, Resources: intersection}
stats, err := evaluator.UsageStats(usageStatsOptions)
if err != nil {
return nil, err

View File

@ -218,6 +218,7 @@ func TestIsQualifiedName(t *testing.T) {
"1.2.3.4/5678",
"Uppercase_Is_OK_123",
"example.com/Uppercase_Is_OK_123",
"requests.storage-foo",
strings.Repeat("a", 63),
strings.Repeat("a", 253) + "/" + strings.Repeat("b", 63),
}

View File

@ -54,10 +54,8 @@ go_test(
"//pkg/client/clientset_generated/internalclientset/fake:go_default_library",
"//pkg/client/testing/core:go_default_library",
"//pkg/quota:go_default_library",
"//pkg/quota/evaluator/core:go_default_library",
"//pkg/quota/generic:go_default_library",
"//pkg/quota/install:go_default_library",
"//pkg/runtime:go_default_library",
"//pkg/runtime/schema:go_default_library",
"//pkg/util/sets:go_default_library",
"//vendor:github.com/hashicorp/golang-lru",

View File

@ -31,10 +31,8 @@ import (
"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/fake"
testcore "k8s.io/kubernetes/pkg/client/testing/core"
"k8s.io/kubernetes/pkg/quota"
"k8s.io/kubernetes/pkg/quota/evaluator/core"
"k8s.io/kubernetes/pkg/quota/generic"
"k8s.io/kubernetes/pkg/quota/install"
"k8s.io/kubernetes/pkg/runtime"
"k8s.io/kubernetes/pkg/runtime/schema"
"k8s.io/kubernetes/pkg/util/sets"
)
@ -896,44 +894,22 @@ func TestAdmissionSetsMissingNamespace(t *testing.T) {
ObjectMeta: api.ObjectMeta{Name: "quota", Namespace: namespace, ResourceVersion: "124"},
Status: api.ResourceQuotaStatus{
Hard: api.ResourceList{
api.ResourceCPU: resource.MustParse("3"),
api.ResourcePods: resource.MustParse("3"),
},
Used: api.ResourceList{
api.ResourceCPU: resource.MustParse("1"),
api.ResourcePods: resource.MustParse("1"),
},
},
}
kubeClient := fake.NewSimpleClientset(resourceQuota)
indexer := cache.NewIndexer(cache.MetaNamespaceKeyFunc, cache.Indexers{"namespace": cache.MetaNamespaceIndexFunc})
computeResources := []api.ResourceName{
api.ResourcePods,
api.ResourceCPU,
}
usageFunc := func(object runtime.Object) api.ResourceList {
pod, ok := object.(*api.Pod)
if !ok {
t.Fatalf("Expected pod, got %T", object)
}
if pod.Namespace != namespace {
t.Errorf("Expected pod with different namespace: %q != %q", pod.Namespace, namespace)
}
return core.PodUsageFunc(pod)
}
podEvaluator := &generic.GenericEvaluator{
Name: "Test-Evaluator.Pod",
// create a dummy evaluator so we can trigger quota
podEvaluator := &generic.ObjectCountEvaluator{
AllowCreateOnUpdate: false,
InternalGroupKind: api.Kind("Pod"),
InternalOperationResources: map[admission.Operation][]api.ResourceName{
admission.Create: computeResources,
},
ConstraintsFunc: core.PodConstraintsFunc,
MatchedResourceNames: computeResources,
MatchesScopeFunc: core.PodMatchesScopeFunc,
UsageFunc: usageFunc,
ResourceName: api.ResourcePods,
}
registry := &generic.GenericRegistry{
InternalEvaluators: map[schema.GroupKind]quota.Evaluator{
podEvaluator.GroupKind(): podEvaluator,

View File

@ -327,8 +327,7 @@ func (e *quotaEvaluator) checkRequest(quotas []api.ResourceQuota, a admission.At
}
op := a.GetOperation()
operationResources := evaluator.OperationResources(op)
if len(operationResources) == 0 {
if !evaluator.Handles(op) {
return quotas, nil
}
@ -340,14 +339,16 @@ func (e *quotaEvaluator) checkRequest(quotas []api.ResourceQuota, a admission.At
interestingQuotaIndexes := []int{}
for i := range quotas {
resourceQuota := quotas[i]
match := evaluator.Matches(&resourceQuota, inputObject)
match, err := evaluator.Matches(&resourceQuota, inputObject)
if err != nil {
return quotas, err
}
if !match {
continue
}
hardResources := quota.ResourceNames(resourceQuota.Status.Hard)
evaluatorResources := evaluator.MatchesResources()
requiredResources := quota.Intersection(hardResources, evaluatorResources)
requiredResources := evaluator.MatchingResources(hardResources)
if err := evaluator.Constraints(requiredResources, inputObject); err != nil {
return nil, admission.NewForbidden(a, fmt.Errorf("failed quota: %s: %v", resourceQuota.Name, err))
}
@ -375,7 +376,10 @@ func (e *quotaEvaluator) checkRequest(quotas []api.ResourceQuota, a admission.At
// as a result, we need to measure the usage of this object for quota
// on updates, we need to subtract the previous measured usage
// if usage shows no change, just return since it has no impact on quota
deltaUsage := evaluator.Usage(inputObject)
deltaUsage, err := evaluator.Usage(inputObject)
if err != nil {
return quotas, err
}
// ensure that usage for input object is never negative (this would mean a resource made a negative resource requirement)
if negativeUsage := quota.IsNegative(deltaUsage); len(negativeUsage) > 0 {
@ -392,7 +396,10 @@ func (e *quotaEvaluator) checkRequest(quotas []api.ResourceQuota, a admission.At
// then charge based on the delta. Otherwise, bill the maximum
metadata, err := meta.Accessor(prevItem)
if err == nil && len(metadata.GetResourceVersion()) > 0 {
prevUsage := evaluator.Usage(prevItem)
prevUsage, innerErr := evaluator.Usage(prevItem)
if innerErr != nil {
return quotas, innerErr
}
deltaUsage = quota.Subtract(deltaUsage, prevUsage)
}
}
@ -446,8 +453,7 @@ func (e *quotaEvaluator) Evaluate(a admission.Attributes) error {
// for this kind, check if the operation could mutate any quota resources
// if no resources tracked by quota are impacted, then just return
op := a.GetOperation()
operationResources := evaluator.OperationResources(op)
if len(operationResources) == 0 {
if !evaluator.Handles(op) {
return nil
}

View File

@ -130,6 +130,7 @@ go_library(
"//pkg/apis/extensions:go_default_library",
"//pkg/apis/extensions/v1beta1:go_default_library",
"//pkg/apis/meta/v1:go_default_library",
"//pkg/apis/storage/util:go_default_library",
"//pkg/apis/storage/v1beta1:go_default_library",
"//pkg/apis/storage/v1beta1/util:go_default_library",
"//pkg/client/cache:go_default_library",
@ -159,6 +160,7 @@ go_library(
"//pkg/labels:go_default_library",
"//pkg/master/ports:go_default_library",
"//pkg/metrics:go_default_library",
"//pkg/quota/evaluator/core:go_default_library",
"//pkg/registry/generic/registry:go_default_library",
"//pkg/runtime:go_default_library",
"//pkg/runtime/schema:go_default_library",

View File

@ -23,7 +23,9 @@ import (
"k8s.io/kubernetes/pkg/api/resource"
"k8s.io/kubernetes/pkg/api/v1"
metav1 "k8s.io/kubernetes/pkg/apis/meta/v1"
"k8s.io/kubernetes/pkg/apis/storage/util"
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
"k8s.io/kubernetes/pkg/quota/evaluator/core"
"k8s.io/kubernetes/pkg/util/intstr"
"k8s.io/kubernetes/pkg/util/wait"
"k8s.io/kubernetes/test/e2e/framework"
@ -288,7 +290,7 @@ var _ = framework.KubeDescribe("ResourceQuota", func() {
pvc, err = f.ClientSet.Core().PersistentVolumeClaims(f.Namespace.Name).Create(pvc)
Expect(err).NotTo(HaveOccurred())
By("Ensuring resource quota status captures persistent volume claimcreation")
By("Ensuring resource quota status captures persistent volume claim creation")
usedResources = v1.ResourceList{}
usedResources[v1.ResourcePersistentVolumeClaims] = resource.MustParse("1")
usedResources[v1.ResourceRequestsStorage] = resource.MustParse("1Gi")
@ -306,6 +308,56 @@ var _ = framework.KubeDescribe("ResourceQuota", func() {
Expect(err).NotTo(HaveOccurred())
})
It("should create a ResourceQuota and capture the life of a persistent volume claim with a storage class.", func() {
By("Creating a ResourceQuota")
quotaName := "test-quota"
resourceQuota := newTestResourceQuota(quotaName)
resourceQuota, err := createResourceQuota(f.ClientSet, f.Namespace.Name, resourceQuota)
Expect(err).NotTo(HaveOccurred())
By("Ensuring resource quota status is calculated")
usedResources := v1.ResourceList{}
usedResources[v1.ResourceQuotas] = resource.MustParse("1")
usedResources[v1.ResourcePersistentVolumeClaims] = resource.MustParse("0")
usedResources[v1.ResourceRequestsStorage] = resource.MustParse("0")
usedResources[core.V1ResourceByStorageClass("gold", v1.ResourcePersistentVolumeClaims)] = resource.MustParse("0")
usedResources[core.V1ResourceByStorageClass("gold", v1.ResourceRequestsStorage)] = resource.MustParse("0")
err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, quotaName, usedResources)
Expect(err).NotTo(HaveOccurred())
By("Creating a PersistentVolumeClaim with storage class")
pvc := newTestPersistentVolumeClaimForQuota("test-claim")
pvc.Annotations = map[string]string{
util.StorageClassAnnotation: "gold",
}
pvc, err = f.ClientSet.Core().PersistentVolumeClaims(f.Namespace.Name).Create(pvc)
Expect(err).NotTo(HaveOccurred())
By("Ensuring resource quota status captures persistent volume claim creation")
usedResources = v1.ResourceList{}
usedResources[v1.ResourcePersistentVolumeClaims] = resource.MustParse("1")
usedResources[v1.ResourceRequestsStorage] = resource.MustParse("1Gi")
usedResources[core.V1ResourceByStorageClass("gold", v1.ResourcePersistentVolumeClaims)] = resource.MustParse("1")
usedResources[core.V1ResourceByStorageClass("gold", v1.ResourceRequestsStorage)] = resource.MustParse("1Gi")
err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, quotaName, usedResources)
Expect(err).NotTo(HaveOccurred())
By("Deleting a PersistentVolumeClaim")
err = f.ClientSet.Core().PersistentVolumeClaims(f.Namespace.Name).Delete(pvc.Name, nil)
Expect(err).NotTo(HaveOccurred())
By("Ensuring resource quota status released usage")
usedResources[v1.ResourcePersistentVolumeClaims] = resource.MustParse("0")
usedResources[v1.ResourceRequestsStorage] = resource.MustParse("0")
usedResources[core.V1ResourceByStorageClass("gold", v1.ResourcePersistentVolumeClaims)] = resource.MustParse("0")
usedResources[core.V1ResourceByStorageClass("gold", v1.ResourceRequestsStorage)] = resource.MustParse("0")
err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, quotaName, usedResources)
Expect(err).NotTo(HaveOccurred())
})
It("should verify ResourceQuota with terminating scopes.", func() {
By("Creating a ResourceQuota with terminating scope")
quotaTerminatingName := "quota-terminating"
@ -517,6 +569,8 @@ func newTestResourceQuota(name string) *v1.ResourceQuota {
hard[v1.ResourceSecrets] = resource.MustParse("10")
hard[v1.ResourcePersistentVolumeClaims] = resource.MustParse("10")
hard[v1.ResourceRequestsStorage] = resource.MustParse("10Gi")
hard[core.V1ResourceByStorageClass("gold", v1.ResourcePersistentVolumeClaims)] = resource.MustParse("10")
hard[core.V1ResourceByStorageClass("gold", v1.ResourceRequestsStorage)] = resource.MustParse("10Gi")
return &v1.ResourceQuota{
ObjectMeta: v1.ObjectMeta{Name: name},
Spec: v1.ResourceQuotaSpec{Hard: hard},

View File

@ -1,11 +1,11 @@
name,owner,auto-assigned
DEFAULT,rmmh/spxtr/ixdy/apelisse/fejta,0
Addon update should propagate add-on file changes,eparis,1
AppArmor should enforce an AppArmor profile,kevin-wangzefeng,1
AppArmor should enforce an AppArmor profile,derekwaynecarr,0
AppArmor when running with AppArmor should enforce a permissive profile,yujuhong,1
AppArmor when running with AppArmor should enforce a profile blocking writes,freehan,1
AppArmor when running with AppArmor should reject an unloaded profile,kargakis,1
AppArmor when running without AppArmor should reject a pod with an AppArmor profile,vulpecula,1
AppArmor when running without AppArmor should reject a pod with an AppArmor profile,derekwaynecarr,0
Cadvisor should be healthy on every node.,vishh,0
Cassandra should create and scale cassandra,fabioy,1
CassandraStatefulSet should create statefulset,wojtek-t,1
@ -19,13 +19,14 @@ Cluster size autoscaling should increase cluster size if pending pods are small,
Cluster size autoscaling should increase cluster size if pending pods are small and there is another node pool that is not autoscaled,apelisse,1
Cluster size autoscaling should increase cluster size if pods are pending due to host port conflict,brendandburns,1
Cluster size autoscaling should scale up correct target pool,mikedanese,1
Cluster size autoscaling shouldn't increase cluster size if pending pod is too large,karlkfi,1
Cluster size autoscaling shouldn't increase cluster size if pending pod is too large,derekwaynecarr,0
ClusterDns should create pod that uses dns,sttts,0
ConfigMap should be consumable from pods in volume,alex-mohr,1
ConfigMap should be consumable from pods in volume as non-root,hurf,1
ConfigMap should be consumable from pods in volume as non-root,derekwaynecarr,0
ConfigMap should be consumable from pods in volume as non-root with FSGroup,roberthbailey,1
ConfigMap should be consumable from pods in volume as non-root with defaultMode and fsGroup set,derekwaynecarr,0
ConfigMap should be consumable from pods in volume with defaultMode set,Random-Liu,1
ConfigMap should be consumable from pods in volume with mappings,karlkfi,1
ConfigMap should be consumable from pods in volume with mappings,derekwaynecarr,0
ConfigMap should be consumable from pods in volume with mappings and Item mode set,eparis,1
ConfigMap should be consumable from pods in volume with mappings as non-root,apelisse,1
ConfigMap should be consumable from pods in volume with mappings as non-root with FSGroup,zmerlynn,1
@ -33,7 +34,7 @@ ConfigMap should be consumable in multiple volumes in the same pod,caesarxuchao,
ConfigMap should be consumable via environment variable,ncdc,1
ConfigMap updates should be reflected in volume,kevin-wangzefeng,1
Container Lifecycle Hook when create a pod with lifecycle hook when it is exec hook should execute poststart exec hook properly,kargakis,1
Container Lifecycle Hook when create a pod with lifecycle hook when it is exec hook should execute prestop exec hook properly,jdef,1
Container Lifecycle Hook when create a pod with lifecycle hook when it is exec hook should execute prestop exec hook properly,derekwaynecarr,0
Container Lifecycle Hook when create a pod with lifecycle hook when it is http hook should execute poststart http hook properly,vishh,1
Container Lifecycle Hook when create a pod with lifecycle hook when it is http hook should execute prestop http hook properly,freehan,1
Container Runtime Conformance Test container runtime conformance blackbox test when running a container with a new image *,Random-Liu,0
@ -55,14 +56,14 @@ DNS should provide DNS for the cluster,roberthbailey,1
Daemon set should run and stop complex daemon,jlowdermilk,1
Daemon set should run and stop complex daemon with node affinity,erictune,1
Daemon set should run and stop simple daemon,mtaufen,1
DaemonRestart Controller Manager should not create/delete replicas across restart,vulpecula,1
DaemonRestart Controller Manager should not create/delete replicas across restart,derekwaynecarr,0
DaemonRestart Kubelet should not restart containers across restart,madhusudancs,1
DaemonRestart Scheduler should continue assigning pods to nodes across restart,lavalamp,1
Density create a batch of pods latency/resource should be within limit when create * pods with * interval,apelisse,1
Density create a batch of pods with higher API QPS latency/resource should be within limit when create * pods with * interval (QPS *),jlowdermilk,1
Density create a sequence of pods latency/resource should be within limit when create * pods with * background pods,wojtek-t,1
Density should allow running maximum capacity pods on nodes,smarterclayton,1
Density should allow starting * pods per node,gmarek,0
Density should allow starting * pods per node using *,derekwaynecarr,0
Deployment RecreateDeployment should delete old pods and create new ones,pwittrock,0
Deployment RollingUpdateDeployment should delete old pods and create new ones,pwittrock,0
Deployment RollingUpdateDeployment should scale up and down in the right order,pwittrock,0
@ -97,17 +98,18 @@ Downward API volume should provide container's memory limit,krousey,1
Downward API volume should provide container's memory request,mikedanese,1
Downward API volume should provide node allocatable (cpu) as default cpu limit if the limit is not set,lavalamp,1
Downward API volume should provide node allocatable (memory) as default memory limit if the limit is not set,freehan,1
Downward API volume should provide podname as non-root with fsgroup,karlkfi,1
Downward API volume should provide podname as non-root with fsgroup,derekwaynecarr,0
Downward API volume should provide podname as non-root with fsgroup and defaultMode,derekwaynecarr,0
Downward API volume should provide podname only,mwielgus,1
Downward API volume should set DefaultMode on files,davidopp,1
Downward API volume should set mode on item file,mtaufen,1
Downward API volume should update annotations on modification,eparis,1
Downward API volume should update labels on modification,timothysc,1
Dynamic provisioning DynamicProvisioner Alpha should create and delete alpha persistent volumes,andyzheng0831,1
Dynamic provisioning DynamicProvisioner Alpha should create and delete alpha persistent volumes,derekwaynecarr,0
Dynamic provisioning DynamicProvisioner should create and delete persistent volumes,jsafrane,0
DynamicKubeletConfiguration When a configmap called `kubelet-<node-name>` is added to the `kube-system` namespace The Kubelet on that node should restart to take up the new config,mwielgus,1
ESIPP should handle updates to source ip annotation,jsafrane,1
ESIPP should only target nodes with endpoints,karlkfi,1
ESIPP should only target nodes with endpoints,derekwaynecarr,0
ESIPP should work for type=LoadBalancer,fgrzadkowski,1
ESIPP should work for type=NodePort,kargakis,1
ESIPP should work from pods,cjcullen,1
@ -141,20 +143,19 @@ Federated Services DNS non-local federated service missing local service should
Federated Services DNS non-local federated service should be able to discover a non-local federated service,jlowdermilk,1
Federated Services DNS should be able to discover a federated service,derekwaynecarr,1
Federated Services Service creation should create matching services in underlying clusters,jbeda,1
Federated Services Service creation should not be deleted from underlying clusters when it is deleted,sttts,0
Federated Services Service creation should not be deleted from underlying clusters when it is deleted,madhusudancs,0
Federated Services Service creation should not be deleted from underlying clusters when it is deleted,derekwaynecarr,0
Federated Services Service creation should succeed,rmmh,1
Federated ingresses Federated Ingresses Ingress connectivity and DNS should be able to connect to a federated ingress via its load balancer,rmmh,1
Federated ingresses Federated Ingresses should be created and deleted successfully,dchen1107,1
Federated ingresses Federated Ingresses should be deleted from underlying clusters when OrphanDependents is false,nikhiljindal,0
Federated ingresses Federated Ingresses should create and update matching ingresses in underlying clusters,ghodss,1
Federated ingresses Federated Ingresses should create and update matching ingresses in underlying clusters,derekwaynecarr,0
Federated ingresses Federated Ingresses should not be deleted from underlying clusters when OrphanDependents is nil,nikhiljindal,0
Federated ingresses Federated Ingresses should not be deleted from underlying clusters when OrphanDependents is true,nikhiljindal,0
Federation API server authentication should accept cluster resources when the client has right authentication credentials,davidopp,1
Federation API server authentication should not accept cluster resources when the client has invalid authentication credentials,yujuhong,1
Federation API server authentication should not accept cluster resources when the client has no authentication credentials,nikhiljindal,1
Federation apiserver Admission control should not be able to create resources if namespace does not exist,alex-mohr,1
Federation apiserver Cluster objects should be created and deleted successfully,ghodss,1
Federation apiserver Cluster objects should be created and deleted successfully,derekwaynecarr,0
Federation daemonsets DaemonSet objects should be created and deleted successfully,nikhiljindal,0
Federation daemonsets DaemonSet objects should be deleted from underlying clusters when OrphanDependents is false,nikhiljindal,0
Federation daemonsets DaemonSet objects should not be deleted from underlying clusters when OrphanDependents is nil,nikhiljindal,0
@ -164,7 +165,7 @@ Federation deployments Federated Deployment should be deleted from underlying cl
Federation deployments Federated Deployment should create and update matching deployments in underling clusters,soltysh,1
Federation deployments Federated Deployment should not be deleted from underlying clusters when OrphanDependents is nil,nikhiljindal,0
Federation deployments Federated Deployment should not be deleted from underlying clusters when OrphanDependents is true,nikhiljindal,0
Federation events Event objects should be created and deleted successfully,karlkfi,1
Federation events Event objects should be created and deleted successfully,derekwaynecarr,0
Federation namespace Namespace objects all resources in the namespace should be deleted when namespace is deleted,nikhiljindal,0
Federation namespace Namespace objects should be created and deleted successfully,xiang90,1
Federation namespace Namespace objects should be deleted from underlying clusters when OrphanDependents is false,nikhiljindal,0
@ -187,7 +188,7 @@ Garbage Collection Test: * Should eventually garbage collect containers when we
Garbage collector should delete pods created by rc when not orphaning,justinsb,1
Garbage collector should orphan pods created by rc if delete options say so,fabioy,1
Garbage collector should orphan pods created by rc if deleteOptions.OrphanDependents is nil,zmerlynn,1
"Generated release_1_5 clientset should create pods, delete pods, watch pods",ghodss,1
"Generated release_1_5 clientset should create pods, delete pods, watch pods",derekwaynecarr,0
"Generated release_1_5 clientset should create v2alpha1 cronJobs, delete cronJobs, watch cronJobs",soltysh,1
HA-master survive addition/removal replicas different zones,derekwaynecarr,0
HA-master survive addition/removal replicas same zone,derekwaynecarr,0
@ -202,9 +203,9 @@ Horizontal pod autoscaling (scale resource: CPU) ReplicationController light Sho
HostPath should give a volume the correct mode,thockin,1
HostPath should support r/w,luxas,1
HostPath should support subPath,sttts,1
ImageID should be set to the manifest digest (from RepoDigests) when available,hurf,1
ImageID should be set to the manifest digest (from RepoDigests) when available,derekwaynecarr,0
InitContainer should invoke init containers on a RestartAlways pod,saad-ali,1
InitContainer should invoke init containers on a RestartNever pod,vulpecula,1
InitContainer should invoke init containers on a RestartNever pod,derekwaynecarr,0
InitContainer should not start app containers and fail the pod if init containers fail on a RestartNever pod,maisem,0
InitContainer should not start app containers if init containers fail on a RestartAlways pod,maisem,0
Initial Resources should set initial resources based on historical data,piosz,0
@ -224,7 +225,7 @@ Kubectl client Kubectl api-versions should check if v1 is in available api versi
Kubectl client Kubectl apply should apply a new configuration to an existing RC,pwittrock,0
Kubectl client Kubectl apply should reuse port when apply to an existing SVC,deads2k,0
Kubectl client Kubectl cluster-info should check if Kubernetes master services is included in cluster-info,pwittrock,0
Kubectl client Kubectl create quota should create a quota with scopes,jdef,1
Kubectl client Kubectl create quota should create a quota with scopes,derekwaynecarr,0
Kubectl client Kubectl create quota should create a quota without scopes,xiang90,1
Kubectl client Kubectl create quota should reject quota with invalid scopes,brendandburns,1
Kubectl client Kubectl describe should check if kubectl describe prints relevant information for rc and pods,pwittrock,0
@ -232,7 +233,7 @@ Kubectl client Kubectl expose should create services for rc,pwittrock,0
Kubectl client Kubectl label should update the label on a resource,pwittrock,0
Kubectl client Kubectl logs should be able to retrieve and filter logs,jlowdermilk,0
Kubectl client Kubectl patch should add annotations for pods in rc,janetkuo,0
Kubectl client Kubectl replace should update a single-container pod's image,karlkfi,1
Kubectl client Kubectl replace should update a single-container pod's image,derekwaynecarr,0
Kubectl client Kubectl rolling-update should support rolling-update to same image,janetkuo,0
"Kubectl client Kubectl run --rm job should create a job from an image, then delete the job",soltysh,1
Kubectl client Kubectl run default should create an rc or deployment from an image,janetkuo,0
@ -262,7 +263,7 @@ Kubelet Container Manager Validate OOM score adjustments once the node is setup
Kubelet Container Manager Validate OOM score adjustments once the node is setup docker daemon's oom-score-adj should be -999,thockin,1
Kubelet Container Manager Validate OOM score adjustments once the node is setup guaranteed container's oom-score-adj should be -998,kargakis,1
Kubelet Container Manager Validate OOM score adjustments once the node is setup pod infra containers oom-score-adj should be -998 and best effort container's should be 1000,timothysc,1
Kubelet Eviction Manager hard eviction test pod using the most disk space gets evicted when the node disk usage is above the eviction hard threshold should evict the pod using the most disk space,karlkfi,1
Kubelet Eviction Manager hard eviction test pod using the most disk space gets evicted when the node disk usage is above the eviction hard threshold should evict the pod using the most disk space,derekwaynecarr,0
Kubelet Volume Manager Volume Manager On terminatation of pod with memory backed volume should remove the volume from the node,derekwaynecarr,0
Kubelet experimental resource usage tracking resource tracking for * pods per node,yujuhong,0
Kubelet regular resource usage tracking resource tracking for * pods per node,yujuhong,0
@ -273,10 +274,10 @@ Kubelet when scheduling a read only busybox container it should not write to roo
KubeletManagedEtcHosts should test kubelet managed /etc/hosts file,Random-Liu,1
Kubernetes Dashboard should check that the kubernetes-dashboard instance is alive,wonderfly,0
LimitRange should create a LimitRange with defaults and ensure pod has those defaults applied.,cjcullen,1
Liveness liveness pods should be automatically restarted,andyzheng0831,1
Load capacity should be able to handle * pods per node,gmarek,0
Loadbalancing: L7 GCE shoud create ingress with given static-ip,vulpecula,1
Loadbalancing: L7 GCE should conform to Ingress spec,andyzheng0831,1
Liveness liveness pods should be automatically restarted,derekwaynecarr,0
Load capacity should be able to handle * pods per node *,derekwaynecarr,0
Loadbalancing: L7 GCE shoud create ingress with given static-ip,derekwaynecarr,0
Loadbalancing: L7 GCE should conform to Ingress spec,derekwaynecarr,0
Loadbalancing: L7 Nginx should conform to Ingress spec,ncdc,1
"Logging soak should survive logging 1KB every * seconds, for a duration of *, scaling up to * pods per node",justinsb,1
"MemoryEviction when there is memory pressure should evict pods in the correct order (besteffort first, then burstable, then guaranteed)",ixdy,1
@ -314,7 +315,7 @@ Networking Granular Checks: Services should function for node-Service: http,thoc
Networking Granular Checks: Services should function for node-Service: udp,yifan-gu,1
Networking Granular Checks: Services should function for pod-Service: http,childsb,1
Networking Granular Checks: Services should function for pod-Service: udp,brendandburns,1
Networking Granular Checks: Services should update endpoints: http,jdef,1
Networking Granular Checks: Services should update endpoints: http,derekwaynecarr,0
Networking Granular Checks: Services should update endpoints: udp,freehan,1
Networking Granular Checks: Services should update nodePort: http,nikhiljindal,1
Networking Granular Checks: Services should update nodePort: udp,smarterclayton,1
@ -339,7 +340,7 @@ PersistentVolumes with multiple PVs and PVCs all in same ns should create 3 PVs
PersistentVolumes with multiple PVs and PVCs all in same ns should create 4 PVs and 2 PVCs: test write access,caesarxuchao,1
Pet Store should scale to persist a nominal number ( * ) of transactions in * seconds,xiang90,1
"Pod Disks Should schedule a pod w/ a RW PD, gracefully remove it, then schedule it on another host",alex-mohr,1
"Pod Disks Should schedule a pod w/ a readonly PD on two hosts, then remove both gracefully.",ghodss,1
"Pod Disks Should schedule a pod w/ a readonly PD on two hosts, then remove both gracefully.",derekwaynecarr,0
"Pod Disks should schedule a pod w/ a RW PD shared between multiple containers, write to PD, delete pod, verify contents, and repeat in rapid succession",saad-ali,0
"Pod Disks should schedule a pod w/ a RW PD, ungracefully remove it, then schedule it on another host",mml,1
"Pod Disks should schedule a pod w/ a readonly PD on two hosts, then remove both ungracefully.",saad-ali,1
@ -359,7 +360,7 @@ Pods should support retrieving logs from the container over websockets,vishh,0
"Port forwarding With a server that expects a client request should support a client that connects, sends no data, and disconnects",sttts,0
"Port forwarding With a server that expects no client request should support a client that connects, sends no data, and disconnects",sttts,0
PreStop should call prestop when killing a pod,ncdc,1
PrivilegedPod should enable privileged commands,dchen1107,1
PrivilegedPod should enable privileged commands,derekwaynecarr,0
Probing container should *not* be restarted with a /healthz http liveness probe,Random-Liu,0
"Probing container should *not* be restarted with a exec ""cat /tmp/health"" liveness probe",Random-Liu,0
Probing container should be restarted with a /healthz http liveness probe,Random-Liu,0
@ -368,11 +369,11 @@ Probing container should be restarted with a docker exec liveness probe with tim
Probing container should have monotonically increasing restart count,Random-Liu,0
Probing container with readiness probe should not be ready before initial delay and never restart,Random-Liu,0
Probing container with readiness probe that fails should never be ready and never restart,Random-Liu,0
Proxy * should proxy logs on node,karlkfi,1
Proxy * should proxy logs on node using proxy subresource,hurf,1
Proxy * should proxy logs on node,derekwaynecarr,0
Proxy * should proxy logs on node using proxy subresource,derekwaynecarr,0
Proxy * should proxy logs on node with explicit kubelet port,ixdy,1
Proxy * should proxy logs on node with explicit kubelet port using proxy subresource,dchen1107,1
Proxy * should proxy through a service and a pod,karlkfi,1
Proxy * should proxy through a service and a pod,derekwaynecarr,0
Proxy * should proxy to cadvisor,jszczepkowski,1
Proxy * should proxy to cadvisor using proxy subresource,roberthbailey,1
Reboot each node by dropping all inbound packets for a while and ensure they function afterwards,quinton-hoole,0
@ -391,21 +392,22 @@ ReplicationController should surface a failure condition on a common issue like
Rescheduler should ensure that critical pod is scheduled in case there is no resources available,mtaufen,1
Resource-usage regular resource usage tracking resource tracking for * pods per node,janetkuo,1
ResourceQuota should create a ResourceQuota and capture the life of a configMap.,timstclair,1
ResourceQuota should create a ResourceQuota and capture the life of a persistent volume claim with a storage class.,derekwaynecarr,0
ResourceQuota should create a ResourceQuota and capture the life of a persistent volume claim.,bgrant0607,1
ResourceQuota should create a ResourceQuota and capture the life of a pod.,pmorie,1
ResourceQuota should create a ResourceQuota and capture the life of a replication controller.,jdef,1
ResourceQuota should create a ResourceQuota and capture the life of a replication controller.,derekwaynecarr,0
ResourceQuota should create a ResourceQuota and capture the life of a secret.,ncdc,1
ResourceQuota should create a ResourceQuota and capture the life of a service.,timstclair,1
ResourceQuota should create a ResourceQuota and ensure its status is promptly calculated.,krousey,1
ResourceQuota should verify ResourceQuota with best effort scope.,mml,1
ResourceQuota should verify ResourceQuota with terminating scopes.,ncdc,1
Restart Docker Daemon Network should recover from ip leak,bprashanth,0
Restart should restart all nodes and ensure all nodes and pods recover,andyzheng0831,1
Restart should restart all nodes and ensure all nodes and pods recover,derekwaynecarr,0
RethinkDB should create and stop rethinkdb servers,mwielgus,1
SSH should SSH to all nodes and run commands,quinton-hoole,0
SchedulerPredicates validates MaxPods limit number of pods that are allowed to run,gmarek,0
SchedulerPredicates validates resource limits of pods that are allowed to run,gmarek,0
SchedulerPredicates validates that Inter-pod-Affinity is respected if not matching,hurf,1
SchedulerPredicates validates that Inter-pod-Affinity is respected if not matching,derekwaynecarr,0
SchedulerPredicates validates that InterPod Affinity and AntiAffinity is respected if matching,yifan-gu,1
SchedulerPredicates validates that InterPodAffinity is respected if matching,kevin-wangzefeng,1
SchedulerPredicates validates that InterPodAffinity is respected if matching with multiple Affinities,caesarxuchao,1
@ -416,21 +418,22 @@ SchedulerPredicates validates that NodeSelector is respected if not matching,gma
SchedulerPredicates validates that a pod with an invalid NodeAffinity is rejected,deads2k,1
SchedulerPredicates validates that a pod with an invalid podAffinity is rejected because of the LabelSelectorRequirement is invalid,smarterclayton,1
SchedulerPredicates validates that embedding the JSON NodeAffinity setting as a string in the annotation value work,kevin-wangzefeng,1
SchedulerPredicates validates that embedding the JSON PodAffinity and PodAntiAffinity setting as a string in the annotation value work,hurf,1
SchedulerPredicates validates that embedding the JSON PodAffinity and PodAntiAffinity setting as a string in the annotation value work,derekwaynecarr,0
SchedulerPredicates validates that required NodeAffinity setting is respected if matching,mml,1
SchedulerPredicates validates that taints-tolerations is respected if matching,jlowdermilk,1
SchedulerPredicates validates that taints-tolerations is respected if not matching,derekwaynecarr,1
Secret should create a pod that reads a secret,luxas,1
Secrets should be able to mount in a volume regardless of a different secret existing with same name in different namespace,rkouj,0
Secrets should be consumable from pods in env vars,mml,1
Secrets should be consumable from pods in volume,ghodss,1
Secrets should be consumable from pods in volume,derekwaynecarr,0
Secrets should be consumable from pods in volume as non-root with defaultMode and fsGroup set,derekwaynecarr,0
Secrets should be consumable from pods in volume with defaultMode set,derekwaynecarr,1
Secrets should be consumable from pods in volume with mappings,jbeda,1
Secrets should be consumable from pods in volume with mappings and Item Mode set,quinton-hoole,1
Secrets should be consumable in multiple volumes in a pod,alex-mohr,1
Security Context should support container.SecurityContext.RunAsUser,alex-mohr,1
Security Context should support pod.Spec.SecurityContext.RunAsUser,bgrant0607,1
Security Context should support pod.Spec.SecurityContext.SupplementalGroups,andyzheng0831,1
Security Context should support pod.Spec.SecurityContext.SupplementalGroups,derekwaynecarr,0
Security Context should support seccomp alpha docker/default annotation,freehan,1
Security Context should support seccomp alpha unconfined annotation on the container,childsb,1
Security Context should support seccomp alpha unconfined annotation on the pod,krousey,1
@ -447,8 +450,7 @@ Services should be able to create a functioning NodePort service,bprashanth,0
Services should be able to up and down services,bprashanth,0
Services should check NodePort out-of-range,bprashanth,0
Services should create endpoints for unready pods,maisem,0
Services should only allow access from service loadbalancer source ranges,sttts,0
Services should only allow access from service loadbalancer source ranges,madhusudancs,0
Services should only allow access from service loadbalancer source ranges,derekwaynecarr,0
Services should preserve source pod IP for traffic thru service cluster IP,Random-Liu,1
Services should prevent NodePort collisions,bprashanth,0
Services should provide secure master service,bprashanth,0
@ -458,7 +460,7 @@ Services should serve multiport endpoints from pods,bprashanth,0
Services should use same NodePort with same port but different protocols,timothysc,1
Services should work after restarting apiserver,bprashanth,0
Services should work after restarting kube-proxy,bprashanth,0
SimpleMount should be able to mount an emptydir on a container,karlkfi,1
SimpleMount should be able to mount an emptydir on a container,derekwaynecarr,0
"Spark should start spark master, driver and workers",jszczepkowski,1
"Staging client repo client should create pods, delete pods, watch pods",jbeda,1
Stateful Set recreate should recreate evicted statefulset,derekwaynecarr,0
@ -491,19 +493,18 @@ V1Job should run a job to completion when tasks sometimes fail and are not local
V1Job should run a job to completion when tasks succeed,soltysh,1
V1Job should scale a job down,soltysh,1
V1Job should scale a job up,soltysh,1
Variable Expansion should allow composing env vars into new env vars,ghodss,1
Variable Expansion should allow composing env vars into new env vars,derekwaynecarr,0
Variable Expansion should allow substituting values in a container's args,dchen1107,1
Variable Expansion should allow substituting values in a container's command,mml,1
Volumes Ceph RBD should be mountable,fabioy,1
Volumes CephFS should be mountable,Q-Lee,1
Volumes Cinder should be mountable,cjcullen,1
Volumes GlusterFS should be mountable,eparis,1
Volumes NFS should be mountable,andyzheng0831,1
Volumes NFS should be mountable,derekwaynecarr,0
Volumes PD should be mountable,caesarxuchao,1
Volumes iSCSI should be mountable,jsafrane,1
k8s.io/kubernetes/cmd/genutils,rmmh,1
k8s.io/kubernetes/cmd/hyperkube,jbeda,0
k8s.io/kubernetes/cmd/kube-apiserver/app,nikhiljindal,0
k8s.io/kubernetes/cmd/kube-apiserver/app/options,nikhiljindal,0
k8s.io/kubernetes/cmd/kube-discovery/app,pmorie,1
k8s.io/kubernetes/cmd/kube-proxy/app,luxas,1
@ -514,7 +515,7 @@ k8s.io/kubernetes/cmd/kubeadm/app/node,apprenda,0
k8s.io/kubernetes/cmd/kubeadm/app/preflight,apprenda,0
k8s.io/kubernetes/cmd/kubeadm/app/util,krousey,1
k8s.io/kubernetes/cmd/kubeadm/test,pipejakob,0
k8s.io/kubernetes/cmd/kubelet/app,hurf,1
k8s.io/kubernetes/cmd/kubelet/app,derekwaynecarr,0
k8s.io/kubernetes/cmd/libs/go2idl/client-gen/types,caesarxuchao,0
k8s.io/kubernetes/cmd/libs/go2idl/go-to-protobuf/protobuf,smarterclayton,0
k8s.io/kubernetes/cmd/libs/go2idl/openapi-gen/generators,davidopp,1
@ -531,7 +532,7 @@ k8s.io/kubernetes/federation/pkg/federation-controller/configmap,mwielgus,0
k8s.io/kubernetes/federation/pkg/federation-controller/daemonset,childsb,1
k8s.io/kubernetes/federation/pkg/federation-controller/deployment,zmerlynn,1
k8s.io/kubernetes/federation/pkg/federation-controller/ingress,vishh,1
k8s.io/kubernetes/federation/pkg/federation-controller/namespace,hurf,1
k8s.io/kubernetes/federation/pkg/federation-controller/namespace,derekwaynecarr,0
k8s.io/kubernetes/federation/pkg/federation-controller/replicaset,roberthbailey,1
k8s.io/kubernetes/federation/pkg/federation-controller/secret,apelisse,1
k8s.io/kubernetes/federation/pkg/federation-controller/service,pmorie,1
@ -554,14 +555,11 @@ k8s.io/kubernetes/pkg/api/meta,fabioy,1
k8s.io/kubernetes/pkg/api/resource,smarterclayton,1
k8s.io/kubernetes/pkg/api/service,spxtr,1
k8s.io/kubernetes/pkg/api/testapi,caesarxuchao,1
k8s.io/kubernetes/pkg/api/util,ghodss,1
k8s.io/kubernetes/pkg/api/v1,vulpecula,1
k8s.io/kubernetes/pkg/api/v1/endpoints,sttts,0
k8s.io/kubernetes/pkg/api/v1/pod,sttts,0
k8s.io/kubernetes/pkg/api/v1/service,sttts,0
k8s.io/kubernetes/pkg/api/v1/endpoints,madhusudancs,0
k8s.io/kubernetes/pkg/api/v1/pod,madhusudancs,0
k8s.io/kubernetes/pkg/api/v1/service,madhusudancs,0
k8s.io/kubernetes/pkg/api/util,derekwaynecarr,0
k8s.io/kubernetes/pkg/api/v1,derekwaynecarr,0
k8s.io/kubernetes/pkg/api/v1/endpoints,derekwaynecarr,0
k8s.io/kubernetes/pkg/api/v1/pod,derekwaynecarr,0
k8s.io/kubernetes/pkg/api/v1/service,derekwaynecarr,0
k8s.io/kubernetes/pkg/api/validation,smarterclayton,1
k8s.io/kubernetes/pkg/api/validation/path,luxas,1
k8s.io/kubernetes/pkg/apimachinery,gmarek,1
@ -580,7 +578,7 @@ k8s.io/kubernetes/pkg/apis/extensions,bgrant0607,1
k8s.io/kubernetes/pkg/apis/extensions/v1beta1,madhusudancs,1
k8s.io/kubernetes/pkg/apis/extensions/validation,nikhiljindal,1
k8s.io/kubernetes/pkg/apis/meta/v1,sttts,0
k8s.io/kubernetes/pkg/apis/meta/v1/validation,jszczepkowski,1
k8s.io/kubernetes/pkg/apis/meta/v1/validation,derekwaynecarr,0
k8s.io/kubernetes/pkg/apis/policy/validation,deads2k,1
k8s.io/kubernetes/pkg/apis/rbac/validation,erictune,0
k8s.io/kubernetes/pkg/apis/storage/validation,caesarxuchao,1
@ -590,13 +588,13 @@ k8s.io/kubernetes/pkg/apiserver/request,lavalamp,1
k8s.io/kubernetes/pkg/auth/authenticator/bearertoken,liggitt,0
k8s.io/kubernetes/pkg/auth/authorizer/abac,liggitt,0
k8s.io/kubernetes/pkg/auth/authorizer/union,liggitt,0
k8s.io/kubernetes/pkg/auth/group,andyzheng0831,1
k8s.io/kubernetes/pkg/auth/group,derekwaynecarr,0
k8s.io/kubernetes/pkg/auth/handlers,liggitt,0
k8s.io/kubernetes/pkg/client/cache,xiang90,1
k8s.io/kubernetes/pkg/client/chaosclient,deads2k,1
k8s.io/kubernetes/pkg/client/leaderelection,xiang90,1
k8s.io/kubernetes/pkg/client/listers/batch/internalversion,mqliang,0
k8s.io/kubernetes/pkg/client/record,karlkfi,1
k8s.io/kubernetes/pkg/client/record,derekwaynecarr,0
k8s.io/kubernetes/pkg/client/restclient,kargakis,1
k8s.io/kubernetes/pkg/client/retry,caesarxuchao,1
k8s.io/kubernetes/pkg/client/testing/cache,mikedanese,1
@ -609,7 +607,7 @@ k8s.io/kubernetes/pkg/client/unversioned/auth,jbeda,1
k8s.io/kubernetes/pkg/client/unversioned/clientcmd,yifan-gu,1
k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api,thockin,1
k8s.io/kubernetes/pkg/client/unversioned/portforward,lavalamp,1
k8s.io/kubernetes/pkg/client/unversioned/remotecommand,andyzheng0831,1
k8s.io/kubernetes/pkg/client/unversioned/remotecommand,derekwaynecarr,0
k8s.io/kubernetes/pkg/cloudprovider/providers/aws,eparis,1
k8s.io/kubernetes/pkg/cloudprovider/providers/azure,saad-ali,1
k8s.io/kubernetes/pkg/cloudprovider/providers/cloudstack,roberthbailey,1
@ -630,20 +628,20 @@ k8s.io/kubernetes/pkg/controller/endpoint,mwielgus,1
k8s.io/kubernetes/pkg/controller/garbagecollector,rmmh,1
k8s.io/kubernetes/pkg/controller/garbagecollector/metaonly,cjcullen,1
k8s.io/kubernetes/pkg/controller/job,soltysh,1
k8s.io/kubernetes/pkg/controller/namespace,karlkfi,1
k8s.io/kubernetes/pkg/controller/namespace,derekwaynecarr,0
k8s.io/kubernetes/pkg/controller/node,gmarek,0
k8s.io/kubernetes/pkg/controller/petset,fgrzadkowski,1
k8s.io/kubernetes/pkg/controller/podautoscaler,piosz,0
k8s.io/kubernetes/pkg/controller/podautoscaler/metrics,piosz,0
k8s.io/kubernetes/pkg/controller/podgc,jdef,1
k8s.io/kubernetes/pkg/controller/podgc,derekwaynecarr,0
k8s.io/kubernetes/pkg/controller/replicaset,fgrzadkowski,0
k8s.io/kubernetes/pkg/controller/replication,fgrzadkowski,0
k8s.io/kubernetes/pkg/controller/resourcequota,ghodss,1
k8s.io/kubernetes/pkg/controller/resourcequota,derekwaynecarr,0
k8s.io/kubernetes/pkg/controller/route,gmarek,0
k8s.io/kubernetes/pkg/controller/service,asalkeld,0
k8s.io/kubernetes/pkg/controller/serviceaccount,liggitt,0
k8s.io/kubernetes/pkg/controller/volume/attachdetach,luxas,1
k8s.io/kubernetes/pkg/controller/volume/attachdetach/cache,hurf,1
k8s.io/kubernetes/pkg/controller/volume/attachdetach/cache,derekwaynecarr,0
k8s.io/kubernetes/pkg/controller/volume/attachdetach/reconciler,jsafrane,1
k8s.io/kubernetes/pkg/controller/volume/persistentvolume,jsafrane,0
k8s.io/kubernetes/pkg/conversion,ixdy,1
@ -651,7 +649,7 @@ k8s.io/kubernetes/pkg/conversion/queryparams,caesarxuchao,1
k8s.io/kubernetes/pkg/credentialprovider,justinsb,1
k8s.io/kubernetes/pkg/credentialprovider/aws,zmerlynn,1
k8s.io/kubernetes/pkg/credentialprovider/gcp,mml,1
k8s.io/kubernetes/pkg/dns,jdef,1
k8s.io/kubernetes/pkg/dns,derekwaynecarr,0
k8s.io/kubernetes/pkg/dns/config,derekwaynecarr,0
k8s.io/kubernetes/pkg/dns/federation,derekwaynecarr,0
k8s.io/kubernetes/pkg/dns/treecache,bowei,0
@ -670,7 +668,7 @@ k8s.io/kubernetes/pkg/kubectl/cmd,rmmh,1
k8s.io/kubernetes/pkg/kubectl/cmd/config,asalkeld,0
k8s.io/kubernetes/pkg/kubectl/cmd/set,erictune,1
k8s.io/kubernetes/pkg/kubectl/cmd/util,asalkeld,0
k8s.io/kubernetes/pkg/kubectl/cmd/util/editor,jdef,1
k8s.io/kubernetes/pkg/kubectl/cmd/util/editor,derekwaynecarr,0
k8s.io/kubernetes/pkg/kubectl/resource,caesarxuchao,1
k8s.io/kubernetes/pkg/kubelet,vishh,0
k8s.io/kubernetes/pkg/kubelet/cadvisor,sttts,1
@ -681,7 +679,7 @@ k8s.io/kubernetes/pkg/kubelet/container,yujuhong,0
k8s.io/kubernetes/pkg/kubelet/custommetrics,kevin-wangzefeng,0
k8s.io/kubernetes/pkg/kubelet/dockershim,zmerlynn,1
k8s.io/kubernetes/pkg/kubelet/dockertools,deads2k,1
k8s.io/kubernetes/pkg/kubelet/envvars,karlkfi,1
k8s.io/kubernetes/pkg/kubelet/envvars,derekwaynecarr,0
k8s.io/kubernetes/pkg/kubelet/eviction,childsb,1
k8s.io/kubernetes/pkg/kubelet/images,caesarxuchao,1
k8s.io/kubernetes/pkg/kubelet/kuberuntime,yifan-gu,1
@ -709,7 +707,7 @@ k8s.io/kubernetes/pkg/kubelet/types,jlowdermilk,1
k8s.io/kubernetes/pkg/kubelet/util/cache,timothysc,1
k8s.io/kubernetes/pkg/kubelet/util/format,ncdc,1
k8s.io/kubernetes/pkg/kubelet/util/queue,yujuhong,0
k8s.io/kubernetes/pkg/kubelet/volumemanager,jdef,1
k8s.io/kubernetes/pkg/kubelet/volumemanager,derekwaynecarr,0
k8s.io/kubernetes/pkg/kubelet/volumemanager/cache,janetkuo,1
k8s.io/kubernetes/pkg/kubelet/volumemanager/reconciler,timstclair,1
k8s.io/kubernetes/pkg/labels,ixdy,1
@ -718,7 +716,7 @@ k8s.io/kubernetes/pkg/probe/exec,bgrant0607,1
k8s.io/kubernetes/pkg/probe/http,mtaufen,1
k8s.io/kubernetes/pkg/probe/tcp,mtaufen,1
k8s.io/kubernetes/pkg/proxy/config,ixdy,1
k8s.io/kubernetes/pkg/proxy/healthcheck,ghodss,1
k8s.io/kubernetes/pkg/proxy/healthcheck,derekwaynecarr,0
k8s.io/kubernetes/pkg/proxy/iptables,freehan,0
k8s.io/kubernetes/pkg/proxy/userspace,luxas,1
k8s.io/kubernetes/pkg/proxy/winuserspace,jbhurat,0
@ -751,16 +749,16 @@ k8s.io/kubernetes/pkg/registry/core/node/etcd,deads2k,1
k8s.io/kubernetes/pkg/registry/core/persistentvolume,lavalamp,1
k8s.io/kubernetes/pkg/registry/core/persistentvolume/etcd,derekwaynecarr,1
k8s.io/kubernetes/pkg/registry/core/persistentvolumeclaim,bgrant0607,1
k8s.io/kubernetes/pkg/registry/core/persistentvolumeclaim/etcd,vulpecula,1
k8s.io/kubernetes/pkg/registry/core/persistentvolumeclaim/etcd,derekwaynecarr,0
k8s.io/kubernetes/pkg/registry/core/pod,Random-Liu,1
k8s.io/kubernetes/pkg/registry/core/pod/etcd,alex-mohr,1
k8s.io/kubernetes/pkg/registry/core/pod/rest,jsafrane,1
k8s.io/kubernetes/pkg/registry/core/podtemplate,thockin,1
k8s.io/kubernetes/pkg/registry/core/podtemplate/etcd,brendandburns,1
k8s.io/kubernetes/pkg/registry/core/resourcequota,vulpecula,1
k8s.io/kubernetes/pkg/registry/core/resourcequota/etcd,ghodss,1
k8s.io/kubernetes/pkg/registry/core/resourcequota,derekwaynecarr,0
k8s.io/kubernetes/pkg/registry/core/resourcequota/etcd,derekwaynecarr,0
k8s.io/kubernetes/pkg/registry/core/rest,deads2k,0
k8s.io/kubernetes/pkg/registry/core/secret,jdef,1
k8s.io/kubernetes/pkg/registry/core/secret,derekwaynecarr,0
k8s.io/kubernetes/pkg/registry/core/secret/etcd,freehan,1
k8s.io/kubernetes/pkg/registry/core/service,madhusudancs,1
k8s.io/kubernetes/pkg/registry/core/service/allocator,jbeda,1
@ -769,24 +767,24 @@ k8s.io/kubernetes/pkg/registry/core/service/etcd,apelisse,1
k8s.io/kubernetes/pkg/registry/core/service/ipallocator,eparis,1
k8s.io/kubernetes/pkg/registry/core/service/ipallocator/controller,mtaufen,1
k8s.io/kubernetes/pkg/registry/core/service/ipallocator/etcd,kargakis,1
k8s.io/kubernetes/pkg/registry/core/service/portallocator,jdef,1
k8s.io/kubernetes/pkg/registry/core/service/portallocator,derekwaynecarr,0
k8s.io/kubernetes/pkg/registry/core/serviceaccount,caesarxuchao,1
k8s.io/kubernetes/pkg/registry/core/serviceaccount/etcd,bprashanth,1
k8s.io/kubernetes/pkg/registry/extensions/controller/etcd,mwielgus,1
k8s.io/kubernetes/pkg/registry/extensions/daemonset,nikhiljindal,1
k8s.io/kubernetes/pkg/registry/extensions/daemonset/etcd,spxtr,1
k8s.io/kubernetes/pkg/registry/extensions/deployment,dchen1107,1
k8s.io/kubernetes/pkg/registry/extensions/deployment/etcd,ghodss,1
k8s.io/kubernetes/pkg/registry/extensions/deployment/etcd,derekwaynecarr,0
k8s.io/kubernetes/pkg/registry/extensions/ingress,apelisse,1
k8s.io/kubernetes/pkg/registry/extensions/ingress/etcd,apelisse,1
k8s.io/kubernetes/pkg/registry/extensions/networkpolicy,deads2k,1
k8s.io/kubernetes/pkg/registry/extensions/networkpolicy/etcd,ncdc,1
k8s.io/kubernetes/pkg/registry/extensions/podsecuritypolicy/etcd,erictune,1
k8s.io/kubernetes/pkg/registry/extensions/replicaset,andyzheng0831,1
k8s.io/kubernetes/pkg/registry/extensions/replicaset,derekwaynecarr,0
k8s.io/kubernetes/pkg/registry/extensions/replicaset/etcd,fabioy,1
k8s.io/kubernetes/pkg/registry/extensions/rest,karlkfi,1
k8s.io/kubernetes/pkg/registry/extensions/rest,derekwaynecarr,0
k8s.io/kubernetes/pkg/registry/extensions/thirdpartyresource,mwielgus,1
k8s.io/kubernetes/pkg/registry/extensions/thirdpartyresource/etcd,vulpecula,1
k8s.io/kubernetes/pkg/registry/extensions/thirdpartyresource/etcd,derekwaynecarr,0
k8s.io/kubernetes/pkg/registry/extensions/thirdpartyresourcedata,sttts,1
k8s.io/kubernetes/pkg/registry/extensions/thirdpartyresourcedata/etcd,sttts,1
k8s.io/kubernetes/pkg/registry/generic/registry,jsafrane,1
@ -796,8 +794,7 @@ k8s.io/kubernetes/pkg/registry/policy/poddisruptionbudget/etcd,xiang90,1
k8s.io/kubernetes/pkg/registry/storage/storageclass,brendandburns,1
k8s.io/kubernetes/pkg/registry/storage/storageclass/etcd,eparis,1
k8s.io/kubernetes/pkg/runtime,wojtek-t,0
k8s.io/kubernetes/pkg/runtime/schema,sttts,0
k8s.io/kubernetes/pkg/runtime/schema,madhusudancs,0
k8s.io/kubernetes/pkg/runtime/schema,derekwaynecarr,0
k8s.io/kubernetes/pkg/runtime/serializer,wojtek-t,0
k8s.io/kubernetes/pkg/runtime/serializer/json,wojtek-t,0
k8s.io/kubernetes/pkg/runtime/serializer/protobuf,wojtek-t,0
@ -806,12 +803,12 @@ k8s.io/kubernetes/pkg/runtime/serializer/streaming,wojtek-t,0
k8s.io/kubernetes/pkg/runtime/serializer/versioning,wojtek-t,0
k8s.io/kubernetes/pkg/security/apparmor,bgrant0607,1
k8s.io/kubernetes/pkg/security/podsecuritypolicy,erictune,0
k8s.io/kubernetes/pkg/security/podsecuritypolicy/apparmor,vulpecula,1
k8s.io/kubernetes/pkg/security/podsecuritypolicy/apparmor,derekwaynecarr,0
k8s.io/kubernetes/pkg/security/podsecuritypolicy/capabilities,erictune,0
k8s.io/kubernetes/pkg/security/podsecuritypolicy/group,erictune,0
k8s.io/kubernetes/pkg/security/podsecuritypolicy/seccomp,rmmh,1
k8s.io/kubernetes/pkg/security/podsecuritypolicy/selinux,erictune,0
k8s.io/kubernetes/pkg/security/podsecuritypolicy/sysctl,andyzheng0831,1
k8s.io/kubernetes/pkg/security/podsecuritypolicy/sysctl,derekwaynecarr,0
k8s.io/kubernetes/pkg/security/podsecuritypolicy/user,erictune,0
k8s.io/kubernetes/pkg/security/podsecuritypolicy/util,erictune,0
k8s.io/kubernetes/pkg/securitycontext,erictune,1
@ -826,7 +823,7 @@ k8s.io/kubernetes/pkg/util,jbeda,1
k8s.io/kubernetes/pkg/util/async,spxtr,1
k8s.io/kubernetes/pkg/util/bandwidth,thockin,1
k8s.io/kubernetes/pkg/util/cache,thockin,1
k8s.io/kubernetes/pkg/util/cert,karlkfi,1
k8s.io/kubernetes/pkg/util/cert,derekwaynecarr,0
k8s.io/kubernetes/pkg/util/clock,zmerlynn,1
k8s.io/kubernetes/pkg/util/config,jszczepkowski,1
k8s.io/kubernetes/pkg/util/configz,ixdy,1
@ -836,7 +833,7 @@ k8s.io/kubernetes/pkg/util/env,asalkeld,0
k8s.io/kubernetes/pkg/util/errors,jlowdermilk,1
k8s.io/kubernetes/pkg/util/exec,krousey,1
k8s.io/kubernetes/pkg/util/flowcontrol,ixdy,1
k8s.io/kubernetes/pkg/util/flushwriter,vulpecula,1
k8s.io/kubernetes/pkg/util/flushwriter,derekwaynecarr,0
k8s.io/kubernetes/pkg/util/framer,piosz,1
k8s.io/kubernetes/pkg/util/goroutinemap,saad-ali,0
k8s.io/kubernetes/pkg/util/hash,timothysc,1
@ -845,7 +842,7 @@ k8s.io/kubernetes/pkg/util/httpstream/spdy,zmerlynn,1
k8s.io/kubernetes/pkg/util/integer,childsb,1
k8s.io/kubernetes/pkg/util/intstr,brendandburns,1
k8s.io/kubernetes/pkg/util/io,mtaufen,1
k8s.io/kubernetes/pkg/util/iptables,hurf,1
k8s.io/kubernetes/pkg/util/iptables,derekwaynecarr,0
k8s.io/kubernetes/pkg/util/json,liggitt,0
k8s.io/kubernetes/pkg/util/jsonpath,spxtr,1
k8s.io/kubernetes/pkg/util/keymutex,saad-ali,0
@ -853,20 +850,20 @@ k8s.io/kubernetes/pkg/util/labels,rmmh,1
k8s.io/kubernetes/pkg/util/limitwriter,deads2k,1
k8s.io/kubernetes/pkg/util/mount,xiang90,1
k8s.io/kubernetes/pkg/util/net,spxtr,1
k8s.io/kubernetes/pkg/util/net/sets,jdef,1
k8s.io/kubernetes/pkg/util/net/sets,derekwaynecarr,0
k8s.io/kubernetes/pkg/util/node,liggitt,0
k8s.io/kubernetes/pkg/util/oom,vishh,0
k8s.io/kubernetes/pkg/util/parsers,derekwaynecarr,1
k8s.io/kubernetes/pkg/util/procfs,roberthbailey,1
k8s.io/kubernetes/pkg/util/proxy,cjcullen,1
k8s.io/kubernetes/pkg/util/rand,madhusudancs,1
k8s.io/kubernetes/pkg/util/ratelimit,justinsb,1
k8s.io/kubernetes/pkg/util/runtime,davidopp,1
k8s.io/kubernetes/pkg/util/sets,quinton-hoole,0
k8s.io/kubernetes/pkg/util/slice,quinton-hoole,0
k8s.io/kubernetes/pkg/util/strategicpatch,brendandburns,1
k8s.io/kubernetes/pkg/util/strings,quinton-hoole,0
k8s.io/kubernetes/pkg/util/system,mwielgus,0
k8s.io/kubernetes/pkg/util/taints,derekwaynecarr,0
k8s.io/kubernetes/pkg/util/term,davidopp,1
k8s.io/kubernetes/pkg/util/testing,jlowdermilk,1
k8s.io/kubernetes/pkg/util/threading,roberthbailey,1
@ -886,7 +883,7 @@ k8s.io/kubernetes/pkg/volume/cinder,jsafrane,1
k8s.io/kubernetes/pkg/volume/configmap,derekwaynecarr,1
k8s.io/kubernetes/pkg/volume/downwardapi,mikedanese,1
k8s.io/kubernetes/pkg/volume/empty_dir,quinton-hoole,1
k8s.io/kubernetes/pkg/volume/fc,andyzheng0831,1
k8s.io/kubernetes/pkg/volume/fc,derekwaynecarr,0
k8s.io/kubernetes/pkg/volume/flexvolume,Q-Lee,1
k8s.io/kubernetes/pkg/volume/flocker,jbeda,1
k8s.io/kubernetes/pkg/volume/gce_pd,saad-ali,0
@ -916,11 +913,11 @@ k8s.io/kubernetes/plugin/pkg/admission/limitranger,ncdc,1
k8s.io/kubernetes/plugin/pkg/admission/namespace/autoprovision,derekwaynecarr,0
k8s.io/kubernetes/plugin/pkg/admission/namespace/exists,derekwaynecarr,0
k8s.io/kubernetes/plugin/pkg/admission/namespace/lifecycle,derekwaynecarr,0
k8s.io/kubernetes/plugin/pkg/admission/persistentvolume/label,jdef,1
k8s.io/kubernetes/plugin/pkg/admission/persistentvolume/label,derekwaynecarr,0
k8s.io/kubernetes/plugin/pkg/admission/podnodeselector,ixdy,1
k8s.io/kubernetes/plugin/pkg/admission/resourcequota,fabioy,1
k8s.io/kubernetes/plugin/pkg/admission/security/podsecuritypolicy,maisem,1
k8s.io/kubernetes/plugin/pkg/admission/securitycontext/scdeny,vulpecula,1
k8s.io/kubernetes/plugin/pkg/admission/securitycontext/scdeny,derekwaynecarr,0
k8s.io/kubernetes/plugin/pkg/admission/serviceaccount,liggitt,0
k8s.io/kubernetes/plugin/pkg/admission/storageclass/default,pmorie,1
k8s.io/kubernetes/plugin/pkg/auth/authenticator/password/allow,liggitt,0
@ -933,10 +930,10 @@ k8s.io/kubernetes/plugin/pkg/auth/authenticator/request/x509,liggitt,0
k8s.io/kubernetes/plugin/pkg/auth/authenticator/token/anytoken,krousey,1
k8s.io/kubernetes/plugin/pkg/auth/authenticator/token/oidc,brendandburns,1
k8s.io/kubernetes/plugin/pkg/auth/authenticator/token/tokenfile,liggitt,0
k8s.io/kubernetes/plugin/pkg/auth/authenticator/token/webhook,ghodss,1
k8s.io/kubernetes/plugin/pkg/auth/authorizer/rbac,hurf,1
k8s.io/kubernetes/plugin/pkg/auth/authenticator/token/webhook,derekwaynecarr,0
k8s.io/kubernetes/plugin/pkg/auth/authorizer/rbac,derekwaynecarr,0
k8s.io/kubernetes/plugin/pkg/auth/authorizer/rbac/bootstrappolicy,mml,1
k8s.io/kubernetes/plugin/pkg/auth/authorizer/webhook,hurf,1
k8s.io/kubernetes/plugin/pkg/auth/authorizer/webhook,derekwaynecarr,0
k8s.io/kubernetes/plugin/pkg/client/auth/gcp,jlowdermilk,0
k8s.io/kubernetes/plugin/pkg/client/auth/oidc,cjcullen,1
k8s.io/kubernetes/plugin/pkg/scheduler,fgrzadkowski,0
@ -956,14 +953,13 @@ k8s.io/kubernetes/test/integration/client,Q-Lee,1
k8s.io/kubernetes/test/integration/configmap,Q-Lee,1
k8s.io/kubernetes/test/integration/discoverysummarizer,fabioy,1
k8s.io/kubernetes/test/integration/examples,maisem,1
k8s.io/kubernetes/test/integration/federation,vulpecula,1
k8s.io/kubernetes/test/integration/federation,derekwaynecarr,0
k8s.io/kubernetes/test/integration/garbagecollector,jlowdermilk,1
k8s.io/kubernetes/test/integration/kubectl,vulpecula,1
k8s.io/kubernetes/test/integration/kubectl,derekwaynecarr,0
k8s.io/kubernetes/test/integration/master,fabioy,1
k8s.io/kubernetes/test/integration/metrics,lavalamp,1
k8s.io/kubernetes/test/integration/objectmeta,janetkuo,1
k8s.io/kubernetes/test/integration/openshift,kevin-wangzefeng,1
k8s.io/kubernetes/test/integration/persistentvolumes,cjcullen,1
k8s.io/kubernetes/test/integration/pods,smarterclayton,1
k8s.io/kubernetes/test/integration/quota,alex-mohr,1
k8s.io/kubernetes/test/integration/replicaset,janetkuo,1
@ -972,8 +968,9 @@ k8s.io/kubernetes/test/integration/scheduler,mikedanese,1
k8s.io/kubernetes/test/integration/scheduler_perf,roberthbailey,1
k8s.io/kubernetes/test/integration/secrets,rmmh,1
k8s.io/kubernetes/test/integration/serviceaccount,deads2k,1
k8s.io/kubernetes/test/integration/storageclasses,andyzheng0831,1
k8s.io/kubernetes/test/integration/storageclasses,derekwaynecarr,0
k8s.io/kubernetes/test/integration/thirdparty,davidopp,1
k8s.io/kubernetes/test/integration/volume,derekwaynecarr,0
k8s.io/kubernetes/test/list,maisem,1
kubelet Clean up pods on node kubelet should be able to delete * pods per node in *.,yujuhong,0
"when we run containers that should cause * should eventually see *, and then evict all of the correct pods",Random-Liu,0

1 name owner auto-assigned
2 DEFAULT rmmh/spxtr/ixdy/apelisse/fejta 0
3 Addon update should propagate add-on file changes eparis 1
4 AppArmor should enforce an AppArmor profile kevin-wangzefeng derekwaynecarr 1 0
5 AppArmor when running with AppArmor should enforce a permissive profile yujuhong 1
6 AppArmor when running with AppArmor should enforce a profile blocking writes freehan 1
7 AppArmor when running with AppArmor should reject an unloaded profile kargakis 1
8 AppArmor when running without AppArmor should reject a pod with an AppArmor profile vulpecula derekwaynecarr 1 0
9 Cadvisor should be healthy on every node. vishh 0
10 Cassandra should create and scale cassandra fabioy 1
11 CassandraStatefulSet should create statefulset wojtek-t 1
19 Cluster size autoscaling should increase cluster size if pending pods are small and there is another node pool that is not autoscaled apelisse 1
20 Cluster size autoscaling should increase cluster size if pods are pending due to host port conflict brendandburns 1
21 Cluster size autoscaling should scale up correct target pool mikedanese 1
22 Cluster size autoscaling shouldn't increase cluster size if pending pod is too large karlkfi derekwaynecarr 1 0
23 ClusterDns should create pod that uses dns sttts 0
24 ConfigMap should be consumable from pods in volume alex-mohr 1
25 ConfigMap should be consumable from pods in volume as non-root hurf derekwaynecarr 1 0
26 ConfigMap should be consumable from pods in volume as non-root with FSGroup roberthbailey 1
27 ConfigMap should be consumable from pods in volume as non-root with defaultMode and fsGroup set derekwaynecarr 0
28 ConfigMap should be consumable from pods in volume with defaultMode set Random-Liu 1
29 ConfigMap should be consumable from pods in volume with mappings karlkfi derekwaynecarr 1 0
30 ConfigMap should be consumable from pods in volume with mappings and Item mode set eparis 1
31 ConfigMap should be consumable from pods in volume with mappings as non-root apelisse 1
32 ConfigMap should be consumable from pods in volume with mappings as non-root with FSGroup zmerlynn 1
34 ConfigMap should be consumable via environment variable ncdc 1
35 ConfigMap updates should be reflected in volume kevin-wangzefeng 1
36 Container Lifecycle Hook when create a pod with lifecycle hook when it is exec hook should execute poststart exec hook properly kargakis 1
37 Container Lifecycle Hook when create a pod with lifecycle hook when it is exec hook should execute prestop exec hook properly jdef derekwaynecarr 1 0
38 Container Lifecycle Hook when create a pod with lifecycle hook when it is http hook should execute poststart http hook properly vishh 1
39 Container Lifecycle Hook when create a pod with lifecycle hook when it is http hook should execute prestop http hook properly freehan 1
40 Container Runtime Conformance Test container runtime conformance blackbox test when running a container with a new image * Random-Liu 0
56 Daemon set should run and stop complex daemon jlowdermilk 1
57 Daemon set should run and stop complex daemon with node affinity erictune 1
58 Daemon set should run and stop simple daemon mtaufen 1
59 DaemonRestart Controller Manager should not create/delete replicas across restart vulpecula derekwaynecarr 1 0
60 DaemonRestart Kubelet should not restart containers across restart madhusudancs 1
61 DaemonRestart Scheduler should continue assigning pods to nodes across restart lavalamp 1
62 Density create a batch of pods latency/resource should be within limit when create * pods with * interval apelisse 1
63 Density create a batch of pods with higher API QPS latency/resource should be within limit when create * pods with * interval (QPS *) jlowdermilk 1
64 Density create a sequence of pods latency/resource should be within limit when create * pods with * background pods wojtek-t 1
65 Density should allow running maximum capacity pods on nodes smarterclayton 1
66 Density should allow starting * pods per node Density should allow starting * pods per node using * gmarek derekwaynecarr 0
67 Deployment RecreateDeployment should delete old pods and create new ones pwittrock 0
68 Deployment RollingUpdateDeployment should delete old pods and create new ones pwittrock 0
69 Deployment RollingUpdateDeployment should scale up and down in the right order pwittrock 0
98 Downward API volume should provide container's memory request mikedanese 1
99 Downward API volume should provide node allocatable (cpu) as default cpu limit if the limit is not set lavalamp 1
100 Downward API volume should provide node allocatable (memory) as default memory limit if the limit is not set freehan 1
101 Downward API volume should provide podname as non-root with fsgroup karlkfi derekwaynecarr 1 0
102 Downward API volume should provide podname as non-root with fsgroup and defaultMode derekwaynecarr 0
103 Downward API volume should provide podname only mwielgus 1
104 Downward API volume should set DefaultMode on files davidopp 1
105 Downward API volume should set mode on item file mtaufen 1
106 Downward API volume should update annotations on modification eparis 1
107 Downward API volume should update labels on modification timothysc 1
108 Dynamic provisioning DynamicProvisioner Alpha should create and delete alpha persistent volumes andyzheng0831 derekwaynecarr 1 0
109 Dynamic provisioning DynamicProvisioner should create and delete persistent volumes jsafrane 0
110 DynamicKubeletConfiguration When a configmap called `kubelet-<node-name>` is added to the `kube-system` namespace The Kubelet on that node should restart to take up the new config mwielgus 1
111 ESIPP should handle updates to source ip annotation jsafrane 1
112 ESIPP should only target nodes with endpoints karlkfi derekwaynecarr 1 0
113 ESIPP should work for type=LoadBalancer fgrzadkowski 1
114 ESIPP should work for type=NodePort kargakis 1
115 ESIPP should work from pods cjcullen 1
143 Federated Services DNS non-local federated service should be able to discover a non-local federated service jlowdermilk 1
144 Federated Services DNS should be able to discover a federated service derekwaynecarr 1
145 Federated Services Service creation should create matching services in underlying clusters jbeda 1
146 Federated Services Service creation should not be deleted from underlying clusters when it is deleted sttts derekwaynecarr 0
Federated Services Service creation should not be deleted from underlying clusters when it is deleted madhusudancs 0
147 Federated Services Service creation should succeed rmmh 1
148 Federated ingresses Federated Ingresses Ingress connectivity and DNS should be able to connect to a federated ingress via its load balancer rmmh 1
149 Federated ingresses Federated Ingresses should be created and deleted successfully dchen1107 1
150 Federated ingresses Federated Ingresses should be deleted from underlying clusters when OrphanDependents is false nikhiljindal 0
151 Federated ingresses Federated Ingresses should create and update matching ingresses in underlying clusters ghodss derekwaynecarr 1 0
152 Federated ingresses Federated Ingresses should not be deleted from underlying clusters when OrphanDependents is nil nikhiljindal 0
153 Federated ingresses Federated Ingresses should not be deleted from underlying clusters when OrphanDependents is true nikhiljindal 0
154 Federation API server authentication should accept cluster resources when the client has right authentication credentials davidopp 1
155 Federation API server authentication should not accept cluster resources when the client has invalid authentication credentials yujuhong 1
156 Federation API server authentication should not accept cluster resources when the client has no authentication credentials nikhiljindal 1
157 Federation apiserver Admission control should not be able to create resources if namespace does not exist alex-mohr 1
158 Federation apiserver Cluster objects should be created and deleted successfully ghodss derekwaynecarr 1 0
159 Federation daemonsets DaemonSet objects should be created and deleted successfully nikhiljindal 0
160 Federation daemonsets DaemonSet objects should be deleted from underlying clusters when OrphanDependents is false nikhiljindal 0
161 Federation daemonsets DaemonSet objects should not be deleted from underlying clusters when OrphanDependents is nil nikhiljindal 0
165 Federation deployments Federated Deployment should create and update matching deployments in underling clusters soltysh 1
166 Federation deployments Federated Deployment should not be deleted from underlying clusters when OrphanDependents is nil nikhiljindal 0
167 Federation deployments Federated Deployment should not be deleted from underlying clusters when OrphanDependents is true nikhiljindal 0
168 Federation events Event objects should be created and deleted successfully karlkfi derekwaynecarr 1 0
169 Federation namespace Namespace objects all resources in the namespace should be deleted when namespace is deleted nikhiljindal 0
170 Federation namespace Namespace objects should be created and deleted successfully xiang90 1
171 Federation namespace Namespace objects should be deleted from underlying clusters when OrphanDependents is false nikhiljindal 0
188 Garbage collector should delete pods created by rc when not orphaning justinsb 1
189 Garbage collector should orphan pods created by rc if delete options say so fabioy 1
190 Garbage collector should orphan pods created by rc if deleteOptions.OrphanDependents is nil zmerlynn 1
191 Generated release_1_5 clientset should create pods, delete pods, watch pods ghodss derekwaynecarr 1 0
192 Generated release_1_5 clientset should create v2alpha1 cronJobs, delete cronJobs, watch cronJobs soltysh 1
193 HA-master survive addition/removal replicas different zones derekwaynecarr 0
194 HA-master survive addition/removal replicas same zone derekwaynecarr 0
203 HostPath should give a volume the correct mode thockin 1
204 HostPath should support r/w luxas 1
205 HostPath should support subPath sttts 1
206 ImageID should be set to the manifest digest (from RepoDigests) when available hurf derekwaynecarr 1 0
207 InitContainer should invoke init containers on a RestartAlways pod saad-ali 1
208 InitContainer should invoke init containers on a RestartNever pod vulpecula derekwaynecarr 1 0
209 InitContainer should not start app containers and fail the pod if init containers fail on a RestartNever pod maisem 0
210 InitContainer should not start app containers if init containers fail on a RestartAlways pod maisem 0
211 Initial Resources should set initial resources based on historical data piosz 0
225 Kubectl client Kubectl apply should apply a new configuration to an existing RC pwittrock 0
226 Kubectl client Kubectl apply should reuse port when apply to an existing SVC deads2k 0
227 Kubectl client Kubectl cluster-info should check if Kubernetes master services is included in cluster-info pwittrock 0
228 Kubectl client Kubectl create quota should create a quota with scopes jdef derekwaynecarr 1 0
229 Kubectl client Kubectl create quota should create a quota without scopes xiang90 1
230 Kubectl client Kubectl create quota should reject quota with invalid scopes brendandburns 1
231 Kubectl client Kubectl describe should check if kubectl describe prints relevant information for rc and pods pwittrock 0
233 Kubectl client Kubectl label should update the label on a resource pwittrock 0
234 Kubectl client Kubectl logs should be able to retrieve and filter logs jlowdermilk 0
235 Kubectl client Kubectl patch should add annotations for pods in rc janetkuo 0
236 Kubectl client Kubectl replace should update a single-container pod's image karlkfi derekwaynecarr 1 0
237 Kubectl client Kubectl rolling-update should support rolling-update to same image janetkuo 0
238 Kubectl client Kubectl run --rm job should create a job from an image, then delete the job soltysh 1
239 Kubectl client Kubectl run default should create an rc or deployment from an image janetkuo 0
263 Kubelet Container Manager Validate OOM score adjustments once the node is setup docker daemon's oom-score-adj should be -999 thockin 1
264 Kubelet Container Manager Validate OOM score adjustments once the node is setup guaranteed container's oom-score-adj should be -998 kargakis 1
265 Kubelet Container Manager Validate OOM score adjustments once the node is setup pod infra containers oom-score-adj should be -998 and best effort container's should be 1000 timothysc 1
266 Kubelet Eviction Manager hard eviction test pod using the most disk space gets evicted when the node disk usage is above the eviction hard threshold should evict the pod using the most disk space karlkfi derekwaynecarr 1 0
267 Kubelet Volume Manager Volume Manager On terminatation of pod with memory backed volume should remove the volume from the node derekwaynecarr 0
268 Kubelet experimental resource usage tracking resource tracking for * pods per node yujuhong 0
269 Kubelet regular resource usage tracking resource tracking for * pods per node yujuhong 0
274 KubeletManagedEtcHosts should test kubelet managed /etc/hosts file Random-Liu 1
275 Kubernetes Dashboard should check that the kubernetes-dashboard instance is alive wonderfly 0
276 LimitRange should create a LimitRange with defaults and ensure pod has those defaults applied. cjcullen 1
277 Liveness liveness pods should be automatically restarted andyzheng0831 derekwaynecarr 1 0
278 Load capacity should be able to handle * pods per node Load capacity should be able to handle * pods per node * gmarek derekwaynecarr 0
279 Loadbalancing: L7 GCE shoud create ingress with given static-ip vulpecula derekwaynecarr 1 0
280 Loadbalancing: L7 GCE should conform to Ingress spec andyzheng0831 derekwaynecarr 1 0
281 Loadbalancing: L7 Nginx should conform to Ingress spec ncdc 1
282 Logging soak should survive logging 1KB every * seconds, for a duration of *, scaling up to * pods per node justinsb 1
283 MemoryEviction when there is memory pressure should evict pods in the correct order (besteffort first, then burstable, then guaranteed) ixdy 1
315 Networking Granular Checks: Services should function for node-Service: udp yifan-gu 1
316 Networking Granular Checks: Services should function for pod-Service: http childsb 1
317 Networking Granular Checks: Services should function for pod-Service: udp brendandburns 1
318 Networking Granular Checks: Services should update endpoints: http jdef derekwaynecarr 1 0
319 Networking Granular Checks: Services should update endpoints: udp freehan 1
320 Networking Granular Checks: Services should update nodePort: http nikhiljindal 1
321 Networking Granular Checks: Services should update nodePort: udp smarterclayton 1
340 PersistentVolumes with multiple PVs and PVCs all in same ns should create 4 PVs and 2 PVCs: test write access caesarxuchao 1
341 Pet Store should scale to persist a nominal number ( * ) of transactions in * seconds xiang90 1
342 Pod Disks Should schedule a pod w/ a RW PD, gracefully remove it, then schedule it on another host alex-mohr 1
343 Pod Disks Should schedule a pod w/ a readonly PD on two hosts, then remove both gracefully. ghodss derekwaynecarr 1 0
344 Pod Disks should schedule a pod w/ a RW PD shared between multiple containers, write to PD, delete pod, verify contents, and repeat in rapid succession saad-ali 0
345 Pod Disks should schedule a pod w/ a RW PD, ungracefully remove it, then schedule it on another host mml 1
346 Pod Disks should schedule a pod w/ a readonly PD on two hosts, then remove both ungracefully. saad-ali 1
360 Port forwarding With a server that expects a client request should support a client that connects, sends no data, and disconnects sttts 0
361 Port forwarding With a server that expects no client request should support a client that connects, sends no data, and disconnects sttts 0
362 PreStop should call prestop when killing a pod ncdc 1
363 PrivilegedPod should enable privileged commands dchen1107 derekwaynecarr 1 0
364 Probing container should *not* be restarted with a /healthz http liveness probe Random-Liu 0
365 Probing container should *not* be restarted with a exec "cat /tmp/health" liveness probe Random-Liu 0
366 Probing container should be restarted with a /healthz http liveness probe Random-Liu 0
369 Probing container should have monotonically increasing restart count Random-Liu 0
370 Probing container with readiness probe should not be ready before initial delay and never restart Random-Liu 0
371 Probing container with readiness probe that fails should never be ready and never restart Random-Liu 0
372 Proxy * should proxy logs on node karlkfi derekwaynecarr 1 0
373 Proxy * should proxy logs on node using proxy subresource hurf derekwaynecarr 1 0
374 Proxy * should proxy logs on node with explicit kubelet port ixdy 1
375 Proxy * should proxy logs on node with explicit kubelet port using proxy subresource dchen1107 1
376 Proxy * should proxy through a service and a pod karlkfi derekwaynecarr 1 0
377 Proxy * should proxy to cadvisor jszczepkowski 1
378 Proxy * should proxy to cadvisor using proxy subresource roberthbailey 1
379 Reboot each node by dropping all inbound packets for a while and ensure they function afterwards quinton-hoole 0
392 Rescheduler should ensure that critical pod is scheduled in case there is no resources available mtaufen 1
393 Resource-usage regular resource usage tracking resource tracking for * pods per node janetkuo 1
394 ResourceQuota should create a ResourceQuota and capture the life of a configMap. timstclair 1
395 ResourceQuota should create a ResourceQuota and capture the life of a persistent volume claim with a storage class. derekwaynecarr 0
396 ResourceQuota should create a ResourceQuota and capture the life of a persistent volume claim. bgrant0607 1
397 ResourceQuota should create a ResourceQuota and capture the life of a pod. pmorie 1
398 ResourceQuota should create a ResourceQuota and capture the life of a replication controller. jdef derekwaynecarr 1 0
399 ResourceQuota should create a ResourceQuota and capture the life of a secret. ncdc 1
400 ResourceQuota should create a ResourceQuota and capture the life of a service. timstclair 1
401 ResourceQuota should create a ResourceQuota and ensure its status is promptly calculated. krousey 1
402 ResourceQuota should verify ResourceQuota with best effort scope. mml 1
403 ResourceQuota should verify ResourceQuota with terminating scopes. ncdc 1
404 Restart Docker Daemon Network should recover from ip leak bprashanth 0
405 Restart should restart all nodes and ensure all nodes and pods recover andyzheng0831 derekwaynecarr 1 0
406 RethinkDB should create and stop rethinkdb servers mwielgus 1
407 SSH should SSH to all nodes and run commands quinton-hoole 0
408 SchedulerPredicates validates MaxPods limit number of pods that are allowed to run gmarek 0
409 SchedulerPredicates validates resource limits of pods that are allowed to run gmarek 0
410 SchedulerPredicates validates that Inter-pod-Affinity is respected if not matching hurf derekwaynecarr 1 0
411 SchedulerPredicates validates that InterPod Affinity and AntiAffinity is respected if matching yifan-gu 1
412 SchedulerPredicates validates that InterPodAffinity is respected if matching kevin-wangzefeng 1
413 SchedulerPredicates validates that InterPodAffinity is respected if matching with multiple Affinities caesarxuchao 1
418 SchedulerPredicates validates that a pod with an invalid NodeAffinity is rejected deads2k 1
419 SchedulerPredicates validates that a pod with an invalid podAffinity is rejected because of the LabelSelectorRequirement is invalid smarterclayton 1
420 SchedulerPredicates validates that embedding the JSON NodeAffinity setting as a string in the annotation value work kevin-wangzefeng 1
421 SchedulerPredicates validates that embedding the JSON PodAffinity and PodAntiAffinity setting as a string in the annotation value work hurf derekwaynecarr 1 0
422 SchedulerPredicates validates that required NodeAffinity setting is respected if matching mml 1
423 SchedulerPredicates validates that taints-tolerations is respected if matching jlowdermilk 1
424 SchedulerPredicates validates that taints-tolerations is respected if not matching derekwaynecarr 1
425 Secret should create a pod that reads a secret luxas 1
426 Secrets should be able to mount in a volume regardless of a different secret existing with same name in different namespace rkouj 0
427 Secrets should be consumable from pods in env vars mml 1
428 Secrets should be consumable from pods in volume ghodss derekwaynecarr 1 0
429 Secrets should be consumable from pods in volume as non-root with defaultMode and fsGroup set derekwaynecarr 0
430 Secrets should be consumable from pods in volume with defaultMode set derekwaynecarr 1
431 Secrets should be consumable from pods in volume with mappings jbeda 1
432 Secrets should be consumable from pods in volume with mappings and Item Mode set quinton-hoole 1
433 Secrets should be consumable in multiple volumes in a pod alex-mohr 1
434 Security Context should support container.SecurityContext.RunAsUser alex-mohr 1
435 Security Context should support pod.Spec.SecurityContext.RunAsUser bgrant0607 1
436 Security Context should support pod.Spec.SecurityContext.SupplementalGroups andyzheng0831 derekwaynecarr 1 0
437 Security Context should support seccomp alpha docker/default annotation freehan 1
438 Security Context should support seccomp alpha unconfined annotation on the container childsb 1
439 Security Context should support seccomp alpha unconfined annotation on the pod krousey 1
450 Services should be able to up and down services bprashanth 0
451 Services should check NodePort out-of-range bprashanth 0
452 Services should create endpoints for unready pods maisem 0
453 Services should only allow access from service loadbalancer source ranges sttts derekwaynecarr 0
Services should only allow access from service loadbalancer source ranges madhusudancs 0
454 Services should preserve source pod IP for traffic thru service cluster IP Random-Liu 1
455 Services should prevent NodePort collisions bprashanth 0
456 Services should provide secure master service bprashanth 0
460 Services should use same NodePort with same port but different protocols timothysc 1
461 Services should work after restarting apiserver bprashanth 0
462 Services should work after restarting kube-proxy bprashanth 0
463 SimpleMount should be able to mount an emptydir on a container karlkfi derekwaynecarr 1 0
464 Spark should start spark master, driver and workers jszczepkowski 1
465 Staging client repo client should create pods, delete pods, watch pods jbeda 1
466 Stateful Set recreate should recreate evicted statefulset derekwaynecarr 0
493 V1Job should run a job to completion when tasks succeed soltysh 1
494 V1Job should scale a job down soltysh 1
495 V1Job should scale a job up soltysh 1
496 Variable Expansion should allow composing env vars into new env vars ghodss derekwaynecarr 1 0
497 Variable Expansion should allow substituting values in a container's args dchen1107 1
498 Variable Expansion should allow substituting values in a container's command mml 1
499 Volumes Ceph RBD should be mountable fabioy 1
500 Volumes CephFS should be mountable Q-Lee 1
501 Volumes Cinder should be mountable cjcullen 1
502 Volumes GlusterFS should be mountable eparis 1
503 Volumes NFS should be mountable andyzheng0831 derekwaynecarr 1 0
504 Volumes PD should be mountable caesarxuchao 1
505 Volumes iSCSI should be mountable jsafrane 1
506 k8s.io/kubernetes/cmd/genutils rmmh 1
507 k8s.io/kubernetes/cmd/hyperkube jbeda 0
k8s.io/kubernetes/cmd/kube-apiserver/app nikhiljindal 0
508 k8s.io/kubernetes/cmd/kube-apiserver/app/options nikhiljindal 0
509 k8s.io/kubernetes/cmd/kube-discovery/app pmorie 1
510 k8s.io/kubernetes/cmd/kube-proxy/app luxas 1
515 k8s.io/kubernetes/cmd/kubeadm/app/preflight apprenda 0
516 k8s.io/kubernetes/cmd/kubeadm/app/util krousey 1
517 k8s.io/kubernetes/cmd/kubeadm/test pipejakob 0
518 k8s.io/kubernetes/cmd/kubelet/app hurf derekwaynecarr 1 0
519 k8s.io/kubernetes/cmd/libs/go2idl/client-gen/types caesarxuchao 0
520 k8s.io/kubernetes/cmd/libs/go2idl/go-to-protobuf/protobuf smarterclayton 0
521 k8s.io/kubernetes/cmd/libs/go2idl/openapi-gen/generators davidopp 1
532 k8s.io/kubernetes/federation/pkg/federation-controller/daemonset childsb 1
533 k8s.io/kubernetes/federation/pkg/federation-controller/deployment zmerlynn 1
534 k8s.io/kubernetes/federation/pkg/federation-controller/ingress vishh 1
535 k8s.io/kubernetes/federation/pkg/federation-controller/namespace hurf derekwaynecarr 1 0
536 k8s.io/kubernetes/federation/pkg/federation-controller/replicaset roberthbailey 1
537 k8s.io/kubernetes/federation/pkg/federation-controller/secret apelisse 1
538 k8s.io/kubernetes/federation/pkg/federation-controller/service pmorie 1
555 k8s.io/kubernetes/pkg/api/resource smarterclayton 1
556 k8s.io/kubernetes/pkg/api/service spxtr 1
557 k8s.io/kubernetes/pkg/api/testapi caesarxuchao 1
558 k8s.io/kubernetes/pkg/api/util ghodss derekwaynecarr 1 0
559 k8s.io/kubernetes/pkg/api/v1 vulpecula derekwaynecarr 1 0
560 k8s.io/kubernetes/pkg/api/v1/endpoints sttts derekwaynecarr 0
561 k8s.io/kubernetes/pkg/api/v1/pod sttts derekwaynecarr 0
562 k8s.io/kubernetes/pkg/api/v1/service sttts derekwaynecarr 0
k8s.io/kubernetes/pkg/api/v1/endpoints madhusudancs 0
k8s.io/kubernetes/pkg/api/v1/pod madhusudancs 0
k8s.io/kubernetes/pkg/api/v1/service madhusudancs 0
563 k8s.io/kubernetes/pkg/api/validation smarterclayton 1
564 k8s.io/kubernetes/pkg/api/validation/path luxas 1
565 k8s.io/kubernetes/pkg/apimachinery gmarek 1
578 k8s.io/kubernetes/pkg/apis/extensions/v1beta1 madhusudancs 1
579 k8s.io/kubernetes/pkg/apis/extensions/validation nikhiljindal 1
580 k8s.io/kubernetes/pkg/apis/meta/v1 sttts 0
581 k8s.io/kubernetes/pkg/apis/meta/v1/validation jszczepkowski derekwaynecarr 1 0
582 k8s.io/kubernetes/pkg/apis/policy/validation deads2k 1
583 k8s.io/kubernetes/pkg/apis/rbac/validation erictune 0
584 k8s.io/kubernetes/pkg/apis/storage/validation caesarxuchao 1
588 k8s.io/kubernetes/pkg/auth/authenticator/bearertoken liggitt 0
589 k8s.io/kubernetes/pkg/auth/authorizer/abac liggitt 0
590 k8s.io/kubernetes/pkg/auth/authorizer/union liggitt 0
591 k8s.io/kubernetes/pkg/auth/group andyzheng0831 derekwaynecarr 1 0
592 k8s.io/kubernetes/pkg/auth/handlers liggitt 0
593 k8s.io/kubernetes/pkg/client/cache xiang90 1
594 k8s.io/kubernetes/pkg/client/chaosclient deads2k 1
595 k8s.io/kubernetes/pkg/client/leaderelection xiang90 1
596 k8s.io/kubernetes/pkg/client/listers/batch/internalversion mqliang 0
597 k8s.io/kubernetes/pkg/client/record karlkfi derekwaynecarr 1 0
598 k8s.io/kubernetes/pkg/client/restclient kargakis 1
599 k8s.io/kubernetes/pkg/client/retry caesarxuchao 1
600 k8s.io/kubernetes/pkg/client/testing/cache mikedanese 1
607 k8s.io/kubernetes/pkg/client/unversioned/clientcmd yifan-gu 1
608 k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api thockin 1
609 k8s.io/kubernetes/pkg/client/unversioned/portforward lavalamp 1
610 k8s.io/kubernetes/pkg/client/unversioned/remotecommand andyzheng0831 derekwaynecarr 1 0
611 k8s.io/kubernetes/pkg/cloudprovider/providers/aws eparis 1
612 k8s.io/kubernetes/pkg/cloudprovider/providers/azure saad-ali 1
613 k8s.io/kubernetes/pkg/cloudprovider/providers/cloudstack roberthbailey 1
628 k8s.io/kubernetes/pkg/controller/garbagecollector rmmh 1
629 k8s.io/kubernetes/pkg/controller/garbagecollector/metaonly cjcullen 1
630 k8s.io/kubernetes/pkg/controller/job soltysh 1
631 k8s.io/kubernetes/pkg/controller/namespace karlkfi derekwaynecarr 1 0
632 k8s.io/kubernetes/pkg/controller/node gmarek 0
633 k8s.io/kubernetes/pkg/controller/petset fgrzadkowski 1
634 k8s.io/kubernetes/pkg/controller/podautoscaler piosz 0
635 k8s.io/kubernetes/pkg/controller/podautoscaler/metrics piosz 0
636 k8s.io/kubernetes/pkg/controller/podgc jdef derekwaynecarr 1 0
637 k8s.io/kubernetes/pkg/controller/replicaset fgrzadkowski 0
638 k8s.io/kubernetes/pkg/controller/replication fgrzadkowski 0
639 k8s.io/kubernetes/pkg/controller/resourcequota ghodss derekwaynecarr 1 0
640 k8s.io/kubernetes/pkg/controller/route gmarek 0
641 k8s.io/kubernetes/pkg/controller/service asalkeld 0
642 k8s.io/kubernetes/pkg/controller/serviceaccount liggitt 0
643 k8s.io/kubernetes/pkg/controller/volume/attachdetach luxas 1
644 k8s.io/kubernetes/pkg/controller/volume/attachdetach/cache hurf derekwaynecarr 1 0
645 k8s.io/kubernetes/pkg/controller/volume/attachdetach/reconciler jsafrane 1
646 k8s.io/kubernetes/pkg/controller/volume/persistentvolume jsafrane 0
647 k8s.io/kubernetes/pkg/conversion ixdy 1
649 k8s.io/kubernetes/pkg/credentialprovider justinsb 1
650 k8s.io/kubernetes/pkg/credentialprovider/aws zmerlynn 1
651 k8s.io/kubernetes/pkg/credentialprovider/gcp mml 1
652 k8s.io/kubernetes/pkg/dns jdef derekwaynecarr 1 0
653 k8s.io/kubernetes/pkg/dns/config derekwaynecarr 0
654 k8s.io/kubernetes/pkg/dns/federation derekwaynecarr 0
655 k8s.io/kubernetes/pkg/dns/treecache bowei 0
668 k8s.io/kubernetes/pkg/kubectl/cmd/config asalkeld 0
669 k8s.io/kubernetes/pkg/kubectl/cmd/set erictune 1
670 k8s.io/kubernetes/pkg/kubectl/cmd/util asalkeld 0
671 k8s.io/kubernetes/pkg/kubectl/cmd/util/editor jdef derekwaynecarr 1 0
672 k8s.io/kubernetes/pkg/kubectl/resource caesarxuchao 1
673 k8s.io/kubernetes/pkg/kubelet vishh 0
674 k8s.io/kubernetes/pkg/kubelet/cadvisor sttts 1
679 k8s.io/kubernetes/pkg/kubelet/custommetrics kevin-wangzefeng 0
680 k8s.io/kubernetes/pkg/kubelet/dockershim zmerlynn 1
681 k8s.io/kubernetes/pkg/kubelet/dockertools deads2k 1
682 k8s.io/kubernetes/pkg/kubelet/envvars karlkfi derekwaynecarr 1 0
683 k8s.io/kubernetes/pkg/kubelet/eviction childsb 1
684 k8s.io/kubernetes/pkg/kubelet/images caesarxuchao 1
685 k8s.io/kubernetes/pkg/kubelet/kuberuntime yifan-gu 1
707 k8s.io/kubernetes/pkg/kubelet/util/cache timothysc 1
708 k8s.io/kubernetes/pkg/kubelet/util/format ncdc 1
709 k8s.io/kubernetes/pkg/kubelet/util/queue yujuhong 0
710 k8s.io/kubernetes/pkg/kubelet/volumemanager jdef derekwaynecarr 1 0
711 k8s.io/kubernetes/pkg/kubelet/volumemanager/cache janetkuo 1
712 k8s.io/kubernetes/pkg/kubelet/volumemanager/reconciler timstclair 1
713 k8s.io/kubernetes/pkg/labels ixdy 1
716 k8s.io/kubernetes/pkg/probe/http mtaufen 1
717 k8s.io/kubernetes/pkg/probe/tcp mtaufen 1
718 k8s.io/kubernetes/pkg/proxy/config ixdy 1
719 k8s.io/kubernetes/pkg/proxy/healthcheck ghodss derekwaynecarr 1 0
720 k8s.io/kubernetes/pkg/proxy/iptables freehan 0
721 k8s.io/kubernetes/pkg/proxy/userspace luxas 1
722 k8s.io/kubernetes/pkg/proxy/winuserspace jbhurat 0
749 k8s.io/kubernetes/pkg/registry/core/persistentvolume lavalamp 1
750 k8s.io/kubernetes/pkg/registry/core/persistentvolume/etcd derekwaynecarr 1
751 k8s.io/kubernetes/pkg/registry/core/persistentvolumeclaim bgrant0607 1
752 k8s.io/kubernetes/pkg/registry/core/persistentvolumeclaim/etcd vulpecula derekwaynecarr 1 0
753 k8s.io/kubernetes/pkg/registry/core/pod Random-Liu 1
754 k8s.io/kubernetes/pkg/registry/core/pod/etcd alex-mohr 1
755 k8s.io/kubernetes/pkg/registry/core/pod/rest jsafrane 1
756 k8s.io/kubernetes/pkg/registry/core/podtemplate thockin 1
757 k8s.io/kubernetes/pkg/registry/core/podtemplate/etcd brendandburns 1
758 k8s.io/kubernetes/pkg/registry/core/resourcequota vulpecula derekwaynecarr 1 0
759 k8s.io/kubernetes/pkg/registry/core/resourcequota/etcd ghodss derekwaynecarr 1 0
760 k8s.io/kubernetes/pkg/registry/core/rest deads2k 0
761 k8s.io/kubernetes/pkg/registry/core/secret jdef derekwaynecarr 1 0
762 k8s.io/kubernetes/pkg/registry/core/secret/etcd freehan 1
763 k8s.io/kubernetes/pkg/registry/core/service madhusudancs 1
764 k8s.io/kubernetes/pkg/registry/core/service/allocator jbeda 1
767 k8s.io/kubernetes/pkg/registry/core/service/ipallocator eparis 1
768 k8s.io/kubernetes/pkg/registry/core/service/ipallocator/controller mtaufen 1
769 k8s.io/kubernetes/pkg/registry/core/service/ipallocator/etcd kargakis 1
770 k8s.io/kubernetes/pkg/registry/core/service/portallocator jdef derekwaynecarr 1 0
771 k8s.io/kubernetes/pkg/registry/core/serviceaccount caesarxuchao 1
772 k8s.io/kubernetes/pkg/registry/core/serviceaccount/etcd bprashanth 1
773 k8s.io/kubernetes/pkg/registry/extensions/controller/etcd mwielgus 1
774 k8s.io/kubernetes/pkg/registry/extensions/daemonset nikhiljindal 1
775 k8s.io/kubernetes/pkg/registry/extensions/daemonset/etcd spxtr 1
776 k8s.io/kubernetes/pkg/registry/extensions/deployment dchen1107 1
777 k8s.io/kubernetes/pkg/registry/extensions/deployment/etcd ghodss derekwaynecarr 1 0
778 k8s.io/kubernetes/pkg/registry/extensions/ingress apelisse 1
779 k8s.io/kubernetes/pkg/registry/extensions/ingress/etcd apelisse 1
780 k8s.io/kubernetes/pkg/registry/extensions/networkpolicy deads2k 1
781 k8s.io/kubernetes/pkg/registry/extensions/networkpolicy/etcd ncdc 1
782 k8s.io/kubernetes/pkg/registry/extensions/podsecuritypolicy/etcd erictune 1
783 k8s.io/kubernetes/pkg/registry/extensions/replicaset andyzheng0831 derekwaynecarr 1 0
784 k8s.io/kubernetes/pkg/registry/extensions/replicaset/etcd fabioy 1
785 k8s.io/kubernetes/pkg/registry/extensions/rest karlkfi derekwaynecarr 1 0
786 k8s.io/kubernetes/pkg/registry/extensions/thirdpartyresource mwielgus 1
787 k8s.io/kubernetes/pkg/registry/extensions/thirdpartyresource/etcd vulpecula derekwaynecarr 1 0
788 k8s.io/kubernetes/pkg/registry/extensions/thirdpartyresourcedata sttts 1
789 k8s.io/kubernetes/pkg/registry/extensions/thirdpartyresourcedata/etcd sttts 1
790 k8s.io/kubernetes/pkg/registry/generic/registry jsafrane 1
794 k8s.io/kubernetes/pkg/registry/storage/storageclass brendandburns 1
795 k8s.io/kubernetes/pkg/registry/storage/storageclass/etcd eparis 1
796 k8s.io/kubernetes/pkg/runtime wojtek-t 0
797 k8s.io/kubernetes/pkg/runtime/schema sttts derekwaynecarr 0
k8s.io/kubernetes/pkg/runtime/schema madhusudancs 0
798 k8s.io/kubernetes/pkg/runtime/serializer wojtek-t 0
799 k8s.io/kubernetes/pkg/runtime/serializer/json wojtek-t 0
800 k8s.io/kubernetes/pkg/runtime/serializer/protobuf wojtek-t 0
803 k8s.io/kubernetes/pkg/runtime/serializer/versioning wojtek-t 0
804 k8s.io/kubernetes/pkg/security/apparmor bgrant0607 1
805 k8s.io/kubernetes/pkg/security/podsecuritypolicy erictune 0
806 k8s.io/kubernetes/pkg/security/podsecuritypolicy/apparmor vulpecula derekwaynecarr 1 0
807 k8s.io/kubernetes/pkg/security/podsecuritypolicy/capabilities erictune 0
808 k8s.io/kubernetes/pkg/security/podsecuritypolicy/group erictune 0
809 k8s.io/kubernetes/pkg/security/podsecuritypolicy/seccomp rmmh 1
810 k8s.io/kubernetes/pkg/security/podsecuritypolicy/selinux erictune 0
811 k8s.io/kubernetes/pkg/security/podsecuritypolicy/sysctl andyzheng0831 derekwaynecarr 1 0
812 k8s.io/kubernetes/pkg/security/podsecuritypolicy/user erictune 0
813 k8s.io/kubernetes/pkg/security/podsecuritypolicy/util erictune 0
814 k8s.io/kubernetes/pkg/securitycontext erictune 1
823 k8s.io/kubernetes/pkg/util/async spxtr 1
824 k8s.io/kubernetes/pkg/util/bandwidth thockin 1
825 k8s.io/kubernetes/pkg/util/cache thockin 1
826 k8s.io/kubernetes/pkg/util/cert karlkfi derekwaynecarr 1 0
827 k8s.io/kubernetes/pkg/util/clock zmerlynn 1
828 k8s.io/kubernetes/pkg/util/config jszczepkowski 1
829 k8s.io/kubernetes/pkg/util/configz ixdy 1
833 k8s.io/kubernetes/pkg/util/errors jlowdermilk 1
834 k8s.io/kubernetes/pkg/util/exec krousey 1
835 k8s.io/kubernetes/pkg/util/flowcontrol ixdy 1
836 k8s.io/kubernetes/pkg/util/flushwriter vulpecula derekwaynecarr 1 0
837 k8s.io/kubernetes/pkg/util/framer piosz 1
838 k8s.io/kubernetes/pkg/util/goroutinemap saad-ali 0
839 k8s.io/kubernetes/pkg/util/hash timothysc 1
842 k8s.io/kubernetes/pkg/util/integer childsb 1
843 k8s.io/kubernetes/pkg/util/intstr brendandburns 1
844 k8s.io/kubernetes/pkg/util/io mtaufen 1
845 k8s.io/kubernetes/pkg/util/iptables hurf derekwaynecarr 1 0
846 k8s.io/kubernetes/pkg/util/json liggitt 0
847 k8s.io/kubernetes/pkg/util/jsonpath spxtr 1
848 k8s.io/kubernetes/pkg/util/keymutex saad-ali 0
850 k8s.io/kubernetes/pkg/util/limitwriter deads2k 1
851 k8s.io/kubernetes/pkg/util/mount xiang90 1
852 k8s.io/kubernetes/pkg/util/net spxtr 1
853 k8s.io/kubernetes/pkg/util/net/sets jdef derekwaynecarr 1 0
854 k8s.io/kubernetes/pkg/util/node liggitt 0
855 k8s.io/kubernetes/pkg/util/oom vishh 0
856 k8s.io/kubernetes/pkg/util/parsers derekwaynecarr 1
857 k8s.io/kubernetes/pkg/util/procfs roberthbailey 1
858 k8s.io/kubernetes/pkg/util/proxy cjcullen 1
859 k8s.io/kubernetes/pkg/util/rand madhusudancs 1
k8s.io/kubernetes/pkg/util/ratelimit justinsb 1
860 k8s.io/kubernetes/pkg/util/runtime davidopp 1
861 k8s.io/kubernetes/pkg/util/sets quinton-hoole 0
862 k8s.io/kubernetes/pkg/util/slice quinton-hoole 0
863 k8s.io/kubernetes/pkg/util/strategicpatch brendandburns 1
864 k8s.io/kubernetes/pkg/util/strings quinton-hoole 0
865 k8s.io/kubernetes/pkg/util/system mwielgus 0
866 k8s.io/kubernetes/pkg/util/taints derekwaynecarr 0
867 k8s.io/kubernetes/pkg/util/term davidopp 1
868 k8s.io/kubernetes/pkg/util/testing jlowdermilk 1
869 k8s.io/kubernetes/pkg/util/threading roberthbailey 1
883 k8s.io/kubernetes/pkg/volume/configmap derekwaynecarr 1
884 k8s.io/kubernetes/pkg/volume/downwardapi mikedanese 1
885 k8s.io/kubernetes/pkg/volume/empty_dir quinton-hoole 1
886 k8s.io/kubernetes/pkg/volume/fc andyzheng0831 derekwaynecarr 1 0
887 k8s.io/kubernetes/pkg/volume/flexvolume Q-Lee 1
888 k8s.io/kubernetes/pkg/volume/flocker jbeda 1
889 k8s.io/kubernetes/pkg/volume/gce_pd saad-ali 0
913 k8s.io/kubernetes/plugin/pkg/admission/namespace/autoprovision derekwaynecarr 0
914 k8s.io/kubernetes/plugin/pkg/admission/namespace/exists derekwaynecarr 0
915 k8s.io/kubernetes/plugin/pkg/admission/namespace/lifecycle derekwaynecarr 0
916 k8s.io/kubernetes/plugin/pkg/admission/persistentvolume/label jdef derekwaynecarr 1 0
917 k8s.io/kubernetes/plugin/pkg/admission/podnodeselector ixdy 1
918 k8s.io/kubernetes/plugin/pkg/admission/resourcequota fabioy 1
919 k8s.io/kubernetes/plugin/pkg/admission/security/podsecuritypolicy maisem 1
920 k8s.io/kubernetes/plugin/pkg/admission/securitycontext/scdeny vulpecula derekwaynecarr 1 0
921 k8s.io/kubernetes/plugin/pkg/admission/serviceaccount liggitt 0
922 k8s.io/kubernetes/plugin/pkg/admission/storageclass/default pmorie 1
923 k8s.io/kubernetes/plugin/pkg/auth/authenticator/password/allow liggitt 0
930 k8s.io/kubernetes/plugin/pkg/auth/authenticator/token/anytoken krousey 1
931 k8s.io/kubernetes/plugin/pkg/auth/authenticator/token/oidc brendandburns 1
932 k8s.io/kubernetes/plugin/pkg/auth/authenticator/token/tokenfile liggitt 0
933 k8s.io/kubernetes/plugin/pkg/auth/authenticator/token/webhook ghodss derekwaynecarr 1 0
934 k8s.io/kubernetes/plugin/pkg/auth/authorizer/rbac hurf derekwaynecarr 1 0
935 k8s.io/kubernetes/plugin/pkg/auth/authorizer/rbac/bootstrappolicy mml 1
936 k8s.io/kubernetes/plugin/pkg/auth/authorizer/webhook hurf derekwaynecarr 1 0
937 k8s.io/kubernetes/plugin/pkg/client/auth/gcp jlowdermilk 0
938 k8s.io/kubernetes/plugin/pkg/client/auth/oidc cjcullen 1
939 k8s.io/kubernetes/plugin/pkg/scheduler fgrzadkowski 0
953 k8s.io/kubernetes/test/integration/configmap Q-Lee 1
954 k8s.io/kubernetes/test/integration/discoverysummarizer fabioy 1
955 k8s.io/kubernetes/test/integration/examples maisem 1
956 k8s.io/kubernetes/test/integration/federation vulpecula derekwaynecarr 1 0
957 k8s.io/kubernetes/test/integration/garbagecollector jlowdermilk 1
958 k8s.io/kubernetes/test/integration/kubectl vulpecula derekwaynecarr 1 0
959 k8s.io/kubernetes/test/integration/master fabioy 1
960 k8s.io/kubernetes/test/integration/metrics lavalamp 1
961 k8s.io/kubernetes/test/integration/objectmeta janetkuo 1
962 k8s.io/kubernetes/test/integration/openshift kevin-wangzefeng 1
k8s.io/kubernetes/test/integration/persistentvolumes cjcullen 1
963 k8s.io/kubernetes/test/integration/pods smarterclayton 1
964 k8s.io/kubernetes/test/integration/quota alex-mohr 1
965 k8s.io/kubernetes/test/integration/replicaset janetkuo 1
968 k8s.io/kubernetes/test/integration/scheduler_perf roberthbailey 1
969 k8s.io/kubernetes/test/integration/secrets rmmh 1
970 k8s.io/kubernetes/test/integration/serviceaccount deads2k 1
971 k8s.io/kubernetes/test/integration/storageclasses andyzheng0831 derekwaynecarr 1 0
972 k8s.io/kubernetes/test/integration/thirdparty davidopp 1
973 k8s.io/kubernetes/test/integration/volume derekwaynecarr 0
974 k8s.io/kubernetes/test/list maisem 1
975 kubelet Clean up pods on node kubelet should be able to delete * pods per node in *. yujuhong 0
976 when we run containers that should cause * should eventually see *, and then evict all of the correct pods Random-Liu 0