Merge pull request #35480 from rmmh/update_owners_local

Automatic merge from submit-queue

Make hack/update_owners.py get list from local repo, add --check option.

This should become a verify step soon. The munger understands the * syntax already.
pull/6/head
Kubernetes Submit Queue 2016-10-25 13:24:25 -07:00 committed by GitHub
commit 74244283c6
4 changed files with 244 additions and 310 deletions

View File

@ -14,12 +14,14 @@
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import collections
import csv
import re
import json
import os
import random
import subprocess
import sys
import time
import urllib2
@ -33,6 +35,13 @@ SKIP_MAINTAINERS = {
'a-robinson', 'aronchick', 'bgrant0607-nocc', 'david-mcmahon',
'goltermann', 'sarahnovotny'}
def normalize(name):
name = re.sub(r'\[.*?\]|\{.*?\}', '', name)
name = re.sub(r'\s+', ' ', name)
return name.strip()
def get_test_history(days_ago):
url = time.strftime(GCS_URL_BASE + 'logs/%Y-%m-%d.json',
time.gmtime(time.time() - days_ago * 24 * 60 * 60))
@ -43,10 +52,19 @@ def get_test_history(days_ago):
return json.loads(content)
def normalize(name):
name = re.sub(r'\[.*?\]|\{.*?\}', '', name)
name = re.sub(r'\s+', ' ', name)
return name.strip()
def get_test_names_from_test_history():
test_names = set()
for days_ago in range(4):
test_history = get_test_history(days_ago)
test_names.update(normalize(name) for name in test_history['test_names'])
return test_names
def get_test_names_from_local_files():
tests_json = subprocess.check_output(['go', 'run', 'test/list/main.go', '-json'])
tests = json.loads(tests_json)
return {normalize(t['Name'] + (' ' + t['TestName'] if 'k8s.io/' not in t['Name'] else ''))
for t in tests}
def load_owners(fname):
@ -98,10 +116,16 @@ def get_maintainers():
def main():
test_names = set()
for days_ago in range(4):
test_history = get_test_history(days_ago)
test_names.update(normalize(name) for name in test_history['test_names'])
parser = argparse.ArgumentParser()
parser.add_argument('--history', action='store_true', help='Generate test list from result history.')
parser.add_argument('--user', help='User to assign new tests to (RANDOM for random assignment).')
parser.add_argument('--check', action='store_true', help='Exit with a nonzero status if the test list has changed.')
options = parser.parse_args()
if options.history:
test_names = get_test_names_from_test_history()
else:
test_names = get_test_names_from_local_files()
test_names.add('DEFAULT')
test_names = sorted(test_names)
owners = load_owners(OWNERS_PATH)
@ -115,6 +139,13 @@ def main():
print '# NEW TESTS (%d):' % len(new_tests)
print '\n'.join(new_tests)
if options.check:
if new_tests or outdated_tests:
print
print 'ERROR: the test list has changed'
sys.exit(1)
sys.exit(0)
for name in outdated_tests:
owners.pop(name)
@ -130,7 +161,12 @@ def main():
owner for name, (owner, random) in owners.iteritems()
if owner in maintainers)
for test_name in set(test_names) - set(owners):
new_owner, _count = random.choice(owner_counts.most_common()[-4:])
if options.user == 'RANDOM':
new_owner, _count = random.choice(owner_counts.most_common()[-4:])
elif options.user:
new_owner = options.user
else:
raise AssertionError('--user must be specified for new tests')
owner_counts[new_owner] += 1
owners[test_name] = (new_owner, True)

View File

@ -24,7 +24,7 @@ import (
. "github.com/onsi/ginkgo"
)
var _ = framework.KubeDescribe("[Feature:Empty]", func() {
var _ = framework.KubeDescribe("Empty [Feature:Empty]", func() {
f := framework.NewDefaultFramework("empty")
BeforeEach(func() {
@ -39,5 +39,5 @@ var _ = framework.KubeDescribe("[Feature:Empty]", func() {
framework.ExpectNoError(err)
})
It("Does nothing", func() {})
It("does nothing", func() {})
})

View File

@ -38,59 +38,51 @@ import (
var _ = framework.KubeDescribe("AppArmor [Feature:AppArmor]", func() {
if isAppArmorEnabled() {
testAppArmorNode()
BeforeEach(func() {
By("Loading AppArmor profiles for testing")
framework.ExpectNoError(loadTestProfiles(), "Could not load AppArmor test profiles")
})
Context("when running with AppArmor", func() {
f := framework.NewDefaultFramework("apparmor-test")
It("should reject an unloaded profile", func() {
status := runAppArmorTest(f, apparmor.ProfileNamePrefix+"non-existant-profile")
Expect(status.Phase).To(Equal(api.PodFailed), "PodStatus: %+v", status)
Expect(status.Reason).To(Equal("AppArmor"), "PodStatus: %+v", status)
})
It("should enforce a profile blocking writes", func() {
status := runAppArmorTest(f, apparmor.ProfileNamePrefix+apparmorProfilePrefix+"deny-write")
if len(status.ContainerStatuses) == 0 {
framework.Failf("Unexpected pod status: %s", spew.Sdump(status))
return
}
state := status.ContainerStatuses[0].State.Terminated
Expect(state.ExitCode).To(Not(BeZero()), "ContainerStateTerminated: %+v", state)
})
It("should enforce a permissive profile", func() {
status := runAppArmorTest(f, apparmor.ProfileNamePrefix+apparmorProfilePrefix+"audit-write")
if len(status.ContainerStatuses) == 0 {
framework.Failf("Unexpected pod status: %s", spew.Sdump(status))
return
}
state := status.ContainerStatuses[0].State.Terminated
Expect(state.ExitCode).To(BeZero(), "ContainerStateTerminated: %+v", state)
})
})
} else {
testNonAppArmorNode()
Context("when running without AppArmor", func() {
f := framework.NewDefaultFramework("apparmor-test")
It("should reject a pod with an AppArmor profile", func() {
status := runAppArmorTest(f, apparmor.ProfileRuntimeDefault)
Expect(status.Phase).To(Equal(api.PodFailed), "PodStatus: %+v", status)
Expect(status.Reason).To(Equal("AppArmor"), "PodStatus: %+v", status)
})
})
}
})
func testAppArmorNode() {
BeforeEach(func() {
By("Loading AppArmor profiles for testing")
framework.ExpectNoError(loadTestProfiles(), "Could not load AppArmor test profiles")
})
Context("when running with AppArmor", func() {
f := framework.NewDefaultFramework("apparmor-test")
It("should reject an unloaded profile", func() {
status := runAppArmorTest(f, apparmor.ProfileNamePrefix+"non-existant-profile")
Expect(status.Phase).To(Equal(api.PodFailed), "PodStatus: %+v", status)
Expect(status.Reason).To(Equal("AppArmor"), "PodStatus: %+v", status)
})
It("should enforce a profile blocking writes", func() {
status := runAppArmorTest(f, apparmor.ProfileNamePrefix+apparmorProfilePrefix+"deny-write")
if len(status.ContainerStatuses) == 0 {
framework.Failf("Unexpected pod status: %s", spew.Sdump(status))
return
}
state := status.ContainerStatuses[0].State.Terminated
Expect(state.ExitCode).To(Not(BeZero()), "ContainerStateTerminated: %+v", state)
})
It("should enforce a permissive profile", func() {
status := runAppArmorTest(f, apparmor.ProfileNamePrefix+apparmorProfilePrefix+"audit-write")
if len(status.ContainerStatuses) == 0 {
framework.Failf("Unexpected pod status: %s", spew.Sdump(status))
return
}
state := status.ContainerStatuses[0].State.Terminated
Expect(state.ExitCode).To(BeZero(), "ContainerStateTerminated: %+v", state)
})
})
}
func testNonAppArmorNode() {
Context("when running without AppArmor", func() {
f := framework.NewDefaultFramework("apparmor-test")
It("should reject a pod with an AppArmor profile", func() {
status := runAppArmorTest(f, apparmor.ProfileRuntimeDefault)
Expect(status.Phase).To(Equal(api.PodFailed), "PodStatus: %+v", status)
Expect(status.Reason).To(Equal("AppArmor"), "PodStatus: %+v", status)
})
})
}
const apparmorProfilePrefix = "e2e-node-apparmor-test-"
const testProfiles = `
#include <tunables/global>

View File

@ -1,21 +1,15 @@
name,owner,auto-assigned
DEFAULT,rmmh/spxtr/ixdy/apelisse/fejta,0
Addon update should propagate add-on file changes,eparis,1
AppArmor when running with AppArmor should enforce a permissive profile,brendandburns,1
AppArmor when running with AppArmor should enforce a profile blocking writes,timstclair,1
AppArmor when running with AppArmor should reject an unloaded profile,eparis,1
AppArmor when running without AppArmor should reject a pod with an AppArmor profile,childsb,1
BeforeSuite,ixdy,0
AppArmor when running with AppArmor should enforce a permissive profile,yujuhong,1
AppArmor when running with AppArmor should enforce a profile blocking writes,freehan,1
AppArmor when running with AppArmor should reject an unloaded profile,kargakis,1
AppArmor when running without AppArmor should reject a pod with an AppArmor profile,vulpecula,1
Cadvisor should be healthy on every node.,vishh,0
Cassandra should create and scale cassandra,fabioy,1
CassandraPetSet should create petset,erictune,1
Celery-RabbitMQ should create and stop celery+rabbitmq servers,luxas,1
Cluster level logging using Elasticsearch should check that logs from containers are ingested into Elasticsearch,crassirostris,0
Cluster level logging using GCL should check that logs from containers are ingested in GCL,crassirostris,0
Cluster size autoscaling Should scale cluster size based on cpu reservation,dchen1107,1
Cluster size autoscaling Should scale cluster size based on cpu utilization,ixdy,1
Cluster size autoscaling Should scale cluster size based on memory reservation,ghodss,1
Cluster size autoscaling Should scale cluster size based on memory utilization,childsb,1
Cluster size autoscaling should add node to the particular mig,spxtr,1
Cluster size autoscaling should correctly scale down after a node is not needed,pmorie,1
Cluster size autoscaling should correctly scale down after a node is not needed when there is non autoscaled pool,krousey,1
@ -37,12 +31,11 @@ ConfigMap should be consumable from pods in volume with mappings as non-root wit
ConfigMap should be consumable in multiple volumes in the same pod,caesarxuchao,1
ConfigMap should be consumable via environment variable,ncdc,1
ConfigMap updates should be reflected in volume,kevin-wangzefeng,1
Container Runtime Conformance Test container runtime conformance blackbox test when running a container with a new image should be able to pull from private registry with secret,mikedanese,1
Container Runtime Conformance Test container runtime conformance blackbox test when running a container with a new image should be able to pull image from docker hub,girishkalele,1
Container Runtime Conformance Test container runtime conformance blackbox test when running a container with a new image should be able to pull image from gcr.io,Random-Liu,0
Container Runtime Conformance Test container runtime conformance blackbox test when running a container with a new image should not be able to pull from private registry without secret,yifan-gu,1
Container Runtime Conformance Test container runtime conformance blackbox test when running a container with a new image should not be able to pull image from invalid registry,janetkuo,1
Container Runtime Conformance Test container runtime conformance blackbox test when running a container with a new image should not be able to pull non-existing image from gcr.io,kevin-wangzefeng,1
Container Lifecycle Hook when create a pod with lifecycle hook when it is exec hook should execute poststart exec hook properly,dchen1107,1
Container Lifecycle Hook when create a pod with lifecycle hook when it is exec hook should execute prestop exec hook properly,ixdy,1
Container Lifecycle Hook when create a pod with lifecycle hook when it is http hook should execute poststart http hook properly,jszczepkowski,1
Container Lifecycle Hook when create a pod with lifecycle hook when it is http hook should execute prestop http hook properly,janetkuo,1
Container Runtime Conformance Test container runtime conformance blackbox test when running a container with a new image *,Random-Liu,0
Container Runtime Conformance Test container runtime conformance blackbox test when starting a container that exits it should run with the expected status,luxas,1
Container Runtime Conformance Test container runtime conformance blackbox test when starting a container that exits should report termination message if TerminationMessagePath is set,timothysc,1
DNS should provide DNS for ExternalName services,rmmh,1
@ -55,30 +48,15 @@ Daemon set should run and stop simple daemon,mtaufen,1
DaemonRestart Controller Manager should not create/delete replicas across restart,vulpecula,1
DaemonRestart Kubelet should not restart containers across restart,madhusudancs,1
DaemonRestart Scheduler should continue assigning pods to nodes across restart,lavalamp,1
Density create a batch of pods latency/resource should be within limit when create 10 pods with 0 interval,zmerlynn,1
Density create a batch of pods latency/resource should be within limit when create 10 pods with 100ms interval,jlowdermilk,1
Density create a batch of pods latency/resource should be within limit when create 10 pods with 300ms interval,jsafrane,1
Density create a batch of pods latency/resource should be within limit when create 105 pods with 0 interval,bgrant0607,1
Density create a batch of pods latency/resource should be within limit when create 105 pods with 100ms interval,derekwaynecarr,1
Density create a batch of pods latency/resource should be within limit when create 105 pods with 300ms interval,saad-ali,1
Density create a batch of pods latency/resource should be within limit when create 35 pods with 0 interval,davidopp,1
Density create a batch of pods latency/resource should be within limit when create 35 pods with 100ms interval,kargakis,1
Density create a batch of pods latency/resource should be within limit when create 35 pods with 300ms interval,justinsb,1
Density create a batch of pods with higher API QPS latency/resource should be within limit when create 105 pods with 0 interval (QPS 60),roberthbailey,1
Density create a batch of pods with higher API QPS latency/resource should be within limit when create 105 pods with 100ms interval (QPS 60),kevin-wangzefeng,1
Density create a batch of pods with higher API QPS latency/resource should be within limit when create 105 pods with 300ms interval (QPS 60),apelisse,1
Density create a sequence of pods latency/resource should be within limit when create 10 pods with 50 background pods,janetkuo,1
Density create a sequence of pods latency/resource should be within limit when create 30 pods with 50 background pods,alex-mohr,1
Density create a sequence of pods latency/resource should be within limit when create 50 pods with 50 background pods,yifan-gu,1
Density should allow running maximum capacity pods on nodes,smarterclayton,1
Density should allow starting 100 pods per node,wojtek-t,0
Density should allow starting 3 pods per node,gmarek,0
Density should allow starting 30 pods per node,gmarek,0
Density should allow starting 50 pods per node,wojtek-t,0
Density should allow starting 95 pods per node,wojtek-t,0
Density create a batch of pods latency/resource should be within limit when create * pods with * interval,apelisse,1
Density create a batch of pods with higher API QPS latency/resource should be within limit when create * pods with * interval (QPS *),jlowdermilk,1
Density create a sequence of pods latency/resource should be within limit when create * pods with * background pods,wojtek-t,1
Density should allow running maximum capacity pods on nodes,wojtek-t,0
Density should allow starting * pods per node,gmarek,0
Deployment RecreateDeployment should delete old pods and create new ones,pwittrock,0
Deployment RollingUpdateDeployment should delete old pods and create new ones,pwittrock,0
Deployment RollingUpdateDeployment should scale up and down in the right order,pwittrock,0
Deployment deployment reaping should cascade to its replica sets and pods,kargakis,1
Deployment deployment should create new pods,pwittrock,0
Deployment deployment should delete old replica sets,pwittrock,0
Deployment deployment should label adopted RSs and pods,pwittrock,0
@ -89,25 +67,16 @@ Deployment overlapping deployment should not fight with each other,kargakis,1
Deployment paused deployment should be able to scale,kargakis,1
Deployment paused deployment should be ignored by the controller,kargakis,0
Deployment scaled rollout deployment should not block on annotation check,kargakis,1
"DisruptionController evictions: enough pods, absolute => should allow an eviction",madhusudancs,1
"DisruptionController evictions: enough pods, replicaSet, percentage => should allow an eviction",nikhiljindal,1
DisruptionController evictions: no PDB => should allow an eviction,krousey,1
"DisruptionController evictions: too few pods, absolute => should not allow an eviction",mml,1
"DisruptionController evictions: too few pods, replicaSet, percentage => should not allow an eviction",mml,1
DisruptionController should allow an eviction when enough pods,mml,1
DisruptionController should allow an eviction when there is no PDB,Q-Lee,1
DisruptionController evictions: * => *,jszczepkowski,1
DisruptionController should create a PodDisruptionBudget,quinton-hoole,1
DisruptionController should not allow an eviction when too few pods,lavalamp,1
DisruptionController should update PodDisruptionBudget status,bgrant0607,1
Docker Containers should be able to override the image's default arguments (docker cmd),maisem,0
Docker Containers should be able to override the image's default command and arguments,maisem,0
Docker Containers should be able to override the image's default commmand (docker entrypoint),maisem,0
Docker Containers should use the image defaults if command and args are blank,vishh,0
Does nothing,janetkuo,1
Downward API should create a pod that prints his name and namespace,nhlfr,0
Downward API should provide container's limits.cpu/memory and requests.cpu/memory as env vars,deads2k,1
Downward API should provide default limits.cpu/memory from node allocatable,derekwaynecarr,0
Downward API should provide default limits.cpu/memory from node capacity,mml,1
Downward API should provide pod IP as an env var,nhlfr,0
Downward API should provide pod name and namespace as env vars,nhlfr,0
Downward API volume should provide container's cpu limit,smarterclayton,1
@ -125,6 +94,12 @@ Downward API volume should update labels on modification,timothysc,1
Dynamic provisioning DynamicProvisioner Alpha should create and delete alpha persistent volumes,andyzheng0831,1
Dynamic provisioning DynamicProvisioner should create and delete persistent volumes,jsafrane,0
DynamicKubeletConfiguration When a configmap called `kubelet-<node-name>` is added to the `kube-system` namespace The Kubelet on that node should restart to take up the new config,mwielgus,1
ESIPP should handle updates to source ip annotation,thockin,1
ESIPP should only target nodes with endpoints,yifan-gu,1
ESIPP should work for type=LoadBalancer,kargakis,1
ESIPP should work for type=NodePort,yujuhong,1
ESIPP should work from pods,dchen1107,1
Empty does nothing,alex-mohr,1
"EmptyDir volumes should support (non-root,0644,default)",timstclair,1
"EmptyDir volumes should support (non-root,0644,tmpfs)",spxtr,1
"EmptyDir volumes should support (non-root,0666,default)",dchen1107,1
@ -144,7 +119,6 @@ EmptyDir volumes when FSGroup is specified new files should be created with FSGr
EmptyDir volumes when FSGroup is specified new files should be created with FSGroup ownership when container is root,childsb,1
EmptyDir volumes when FSGroup is specified volume on default medium should have the correct mode using FSGroup,eparis,1
EmptyDir volumes when FSGroup is specified volume on tmpfs should have the correct mode using FSGroup,timothysc,1
EmptyDir wrapper volumes should becomes running,justinsb,1
EmptyDir wrapper volumes should not cause race condition when used for configmaps,bprashanth,1
EmptyDir wrapper volumes should not cause race condition when used for git_repo,girishkalele,1
EmptyDir wrapper volumes should not conflict,deads2k,1
@ -157,55 +131,44 @@ Federated Services DNS should be able to discover a federated service,derekwayne
Federated Services Service creation should create matching services in underlying clusters,jbeda,1
Federated Services Service creation should succeed,rmmh,1
Federated ingresses Federated Ingresses Ingress connectivity and DNS should be able to connect to a federated ingress via its load balancer,rmmh,1
Federated ingresses Federated Ingresses Ingress connectivity and DNS should be able to discover a federated ingress service,bgrant0607,1
Federated ingresses Federated Ingresses Ingress connectivity and DNS should be able to discover a federated ingress service via DNS,smarterclayton,1
Federated ingresses Federated Ingresses should be created and deleted successfully,kevin-wangzefeng,1
Federated ingresses Federated Ingresses should create and update matching ingresses in underlying clusters,ghodss,1
Federated ingresses Ingress objects should be created and deleted successfully,madhusudancs,1
Federation API server authentication should accept cluster resources when the client has right authentication credentials,davidopp,1
Federation API server authentication should not accept cluster resources when the client has invalid authentication credentials,yujuhong,1
Federation API server authentication should not accept cluster resources when the client has no authentication credentials,nikhiljindal,1
Federation apiserver Admission control should not be able to create resources if namespace does not exist,alex-mohr,1
Federation apiserver Cluster objects should be created and deleted successfully,ghodss,1
Federation events Event objects should be created and deleted successfully,karlkfi,1
Federation ingresses Ingress objects should be created and deleted successfully,bprashanth,1
Federation namespace Namespace objects should be created and deleted successfully,xiang90,1
Federation replicasets Federated ReplicaSet should create and update matching replicasets in underling clusters,childsb,1
Federation replicasets ReplicaSet objects should be created and deleted successfully,apelisse,1
Federation secrets Secret objects should be created and deleted successfully,pmorie,1
GCE L7 LoadBalancer Controller should create GCE L7 loadbalancers and verify Ingress,bprashanth,0
GKE local SSD should write and read from node local SSD,fabioy,0
GKE node pools should create a cluster with multiple node pools,fabioy,1
Garbage collector should delete pods created by rc when not orphaning,justinsb,1
Garbage collector should handle the creation of 1000 pods,xiang90,1
Garbage collector should orphan pods created by rc if delete options say so,fabioy,1
Garbage collector should orphan pods created by rc if deleteOptions.OrphanDependents is nil,zmerlynn,1
"Generated release_1_2 clientset should create pods, delete pods, watch pods",ncdc,1
"Generated release_1_3 clientset should create pods, delete pods, watch pods",krousey,1
"Generated release_1_5 clientset should create pods, delete pods, watch pods",ghodss,1
HA-master pods survive addition/removal,roberthbailey,1
Hazelcast should create and scale hazelcast,mikedanese,1
Horizontal pod autoscaling (scale resource: CPU) Deployment Should scale from 1 pod to 3 pods and from 3 to 5,jszczepkowski,0
Horizontal pod autoscaling (scale resource: CPU) Deployment Should scale from 1 pod to 3 pods and from 3 to 5 and verify decision stability,jszczepkowski,0
Horizontal pod autoscaling (scale resource: CPU) Deployment Should scale from 5 pods to 3 pods and from 3 to 1,jszczepkowski,0
Horizontal pod autoscaling (scale resource: CPU) Deployment Should scale from 5 pods to 3 pods and from 3 to 1 and verify decision stability,jszczepkowski,0
Horizontal pod autoscaling (scale resource: CPU) ReplicaSet Should scale from 1 pod to 3 pods and from 3 to 5,jszczepkowski,0
Horizontal pod autoscaling (scale resource: CPU) ReplicaSet Should scale from 1 pod to 3 pods and from 3 to 5 and verify decision stability,jszczepkowski,0
Horizontal pod autoscaling (scale resource: CPU) ReplicaSet Should scale from 5 pods to 3 pods and from 3 to 1,jszczepkowski,0
Horizontal pod autoscaling (scale resource: CPU) ReplicaSet Should scale from 5 pods to 3 pods and from 3 to 1 and verify decision stability,jszczepkowski,0
Horizontal pod autoscaling (scale resource: CPU) ReplicationController Should scale from 1 pod to 3 pods and from 3 to 5 and verify decision stability,jszczepkowski,0
Horizontal pod autoscaling (scale resource: CPU) ReplicationController Should scale from 5 pods to 3 pods and from 3 to 1 and verify decision stability,jszczepkowski,0
Horizontal pod autoscaling (scale resource: CPU) ReplicationController *,jszczepkowski,0
Horizontal pod autoscaling (scale resource: CPU) ReplicationController light Should scale from 1 pod to 2 pods,jszczepkowski,0
Horizontal pod autoscaling (scale resource: CPU) ReplicationController light Should scale from 2 pods to 1 pod,jszczepkowski,0
Horizontal pod autoscaling (scale resource: CPU) ReplicationController light Should scale from 2 pods to 1 pod using HPA version v1,jszczepkowski,0
HostPath should give a volume the correct mode,thockin,1
HostPath should support r/w,luxas,1
HostPath should support subPath,sttts,1
Image Container Conformance Test image conformance blackbox test when testing image that does not exist should ignore pull failures,jsafrane,1
Image Container Conformance Test image conformance blackbox test when testing images that exist It should present successfully,yujuhong,1
Image Container Conformance Test image conformance blackbox test when testing images that exist should list pulled images,maisem,1
ImageID should be set to the manifest digest (from RepoDigests) when available,mikedanese,1
InitContainer should invoke init containers on a RestartAlways pod,saad-ali,1
InitContainer should invoke init containers on a RestartNever pod,vulpecula,1
InitContainer should not start app containers and fail the pod if init containers fail on a RestartNever pod,timothysc,1
InitContainer should not start app containers if init containers fail on a RestartAlways pod,davidopp,1
InitContainer should not start app containers and fail the pod if init containers fail on a RestartNever pod,maisem,0
InitContainer should not start app containers if init containers fail on a RestartAlways pod,maisem,0
Initial Resources should set initial resources based on historical data,piosz,0
Job should delete a job,soltysh,0
Job should fail a job,soltysh,0
@ -216,7 +179,7 @@ Job should run a job to completion when tasks succeed,soltysh,0
Job should scale a job down,soltysh,0
Job should scale a job up,soltysh,0
Kibana Logging Instances Is Alive should check that the Kibana logging instance is alive,swagiaal,0
KubeProxy should test kube-proxy,vulpecula,1
Kubectl alpha client Kubectl run ScheduledJob should create a ScheduledJob,yujuhong,1
Kubectl client Guestbook application should create and stop a working application,pwittrock,0
Kubectl client Kubectl api-versions should check if v1 is in available api versions,pwittrock,0
Kubectl client Kubectl apply should apply a new configuration to an existing RC,pwittrock,0
@ -232,12 +195,9 @@ Kubectl client Kubectl logs should be able to retrieve and filter logs,jlowdermi
Kubectl client Kubectl patch should add annotations for pods in rc,janetkuo,0
Kubectl client Kubectl replace should update a single-container pod's image,karlkfi,1
Kubectl client Kubectl rolling-update should support rolling-update to same image,janetkuo,0
Kubectl client Kubectl run --quiet job should not output anything except the command when --quiet is set,soltysh,1
"Kubectl client Kubectl run --rm job should create a job from an image, then delete the job",soltysh,0
Kubectl client Kubectl run ScheduledJob should create a ScheduledJob,soltysh,0
Kubectl client Kubectl run default should create an rc or deployment from an image,janetkuo,0
Kubectl client Kubectl run deployment should create a deployment from an image,janetkuo,0
Kubectl client Kubectl run job should create a job from an image when restart is Never,soltysh,0
Kubectl client Kubectl run job should create a job from an image when restart is OnFailure,soltysh,0
Kubectl client Kubectl run pod should create a pod from an image when restart is Never,janetkuo,0
Kubectl client Kubectl run rc should create an rc from an image,janetkuo,0
@ -260,18 +220,9 @@ Kubelet Container Manager Validate OOM score adjustments once the node is setup
Kubelet Container Manager Validate OOM score adjustments once the node is setup docker daemon's oom-score-adj should be -999,thockin,1
Kubelet Container Manager Validate OOM score adjustments once the node is setup guaranteed container's oom-score-adj should be -998,kargakis,1
Kubelet Container Manager Validate OOM score adjustments once the node is setup pod infra containers oom-score-adj should be -998 and best effort container's should be 1000,timothysc,1
Kubelet Container Manager oom score adjusting when scheduling a busybox command that always fails in a pod should be possible to delete,jbeda,1
Kubelet Container Manager oom score adjusting when scheduling a busybox command that always fails in a pod should have an error terminated reason,vulpecula,1
Kubelet Eviction Manager hard eviction test pod using the most disk space gets evicted when the node disk usage is above the eviction hard threshold should evict the pod using the most disk space,karlkfi,1
Kubelet experimental resource usage tracking for 100 pods per node over 20m0s,yujuhong,0
Kubelet experimental resource usage tracking resource tracking for 100 pods per node,ghodss,0
Kubelet metrics api when querying /stats/summary it should report resource usage through the stats api,fabioy,1
Kubelet regular resource usage tracking for 0 pods per node over 20m0s,yujuhong,0
Kubelet regular resource usage tracking for 100 pods per node over 20m0s,yujuhong,0
Kubelet regular resource usage tracking for 35 pods per node over 20m0s,yujuhong,0
Kubelet regular resource usage tracking resource tracking for 0 pods per node,pmorie,1
Kubelet regular resource usage tracking resource tracking for 100 pods per node,justinsb,1
Kubelet regular resource usage tracking resource tracking for 35 pods per node,madhusudancs,1
Kubelet experimental resource usage tracking resource tracking for * pods per node,yujuhong,0
Kubelet regular resource usage tracking resource tracking for * pods per node,yujuhong,0
Kubelet when scheduling a busybox command in a pod it should print the output to logs,ixdy,1
Kubelet when scheduling a busybox command that always fails in a pod should be possible to delete,smarterclayton,1
Kubelet when scheduling a busybox command that always fails in a pod should have an error terminated reason,deads2k,1
@ -280,11 +231,11 @@ KubeletManagedEtcHosts should test kubelet managed /etc/hosts file,Random-Liu,1
Kubernetes Dashboard should check that the kubernetes-dashboard instance is alive,wonderfly,0
LimitRange should create a LimitRange with defaults and ensure pod has those defaults applied.,cjcullen,1
Liveness liveness pods should be automatically restarted,andyzheng0831,1
Load capacity should be able to handle 3 pods per node,gmarek,0
Load capacity should be able to handle 30 pods per node,wojtek-t,0
Loadbalancing: L7 GCE shoud create ingress with given static-ip,vulpecula,1
Loadbalancing: L7 GCE should conform to Ingress spec,andyzheng0831,1
Loadbalancing: L7 Nginx should conform to Ingress spec,ncdc,1
Load capacity should be able to handle * pods per node,gmarek,0
Loadbalancing: L7 GCE shoud create ingress with given static-ip,bprashanth,0
Loadbalancing: L7 GCE should conform to Ingress spec,bprashanth,0
Loadbalancing: L7 Nginx should conform to Ingress spec,bprashanth,0
"Logging soak should survive logging 1KB every * seconds, for a duration of *, scaling up to * pods per node",justinsb,1
"MemoryEviction when there is memory pressure should evict pods in the correct order (besteffort first, then burstable, then guaranteed)",ixdy,1
Mesos applies slave attributes as labels,justinsb,1
Mesos schedules pods annotated with roles on correct slaves,timstclair,1
@ -303,10 +254,8 @@ Namespaces should always delete fast (ALL of 100 namespaces in 150 seconds),rmmh
Namespaces should delete fast enough (90 percent of 100 namespaces in 150 seconds),kevin-wangzefeng,1
Namespaces should ensure that all pods are removed when a namespace is deleted.,xiang90,1
Namespaces should ensure that all services are removed when a namespace is deleted.,pmorie,1
Networking Granular Checks should function for pod communication between nodes,freehan,0
Networking Granular Checks should function for pod communication on a single node,freehan,0
Networking Granular Checks: Pods should function for intra-pod communication: http,spxtr,1
Networking Granular Checks: Pods should function for intra-pod communication: udp,alex-mohr,1
Networking Granular Checks: Pods should function for intra-pod communication: http,stts,0
Networking Granular Checks: Pods should function for intra-pod communication: udp,freehan,0
Networking Granular Checks: Pods should function for node-pod communication: http,spxtr,1
Networking Granular Checks: Pods should function for node-pod communication: udp,wojtek-t,1
Networking Granular Checks: Services should function for endpoint-Service: http,bgrant0607,1
@ -319,55 +268,40 @@ Networking Granular Checks: Services should update endpoints: http,jdef,1
Networking Granular Checks: Services should update endpoints: udp,freehan,1
Networking Granular Checks: Services should update nodePort: http,nikhiljindal,1
Networking Granular Checks: Services should update nodePort: udp,smarterclayton,1
Networking IPerf should transfer ~ 1GB onto the service endpoint 1 servers (maximum of 1 clients),ghodss,1
Networking IPerf should transfer ~ 1GB onto the service endpoint * servers (maximum of * clients),fabioy,1
Networking should check kube-proxy urls,lavalamp,1
Networking should function for intra-pod communication,sttts,0
Networking should provide Internet connection for containers,sttts,0
"Networking should provide unchanging, static URL paths for kubernetes api services",freehan,0
NodeOutOfDisk runs out of disk space,vishh,0
NodeProblemDetector KernelMonitor should generate node condition and events for corresponding errors,Random-Liu,0
Nodes Network when a node becomes unreachable All pods on the unreachable node should be marked as NotReady upon the node turn NotReady AND all pods should be mark back to Ready when the node get back to Ready before pod eviction timeout,freehan,0
Nodes Network when a node becomes unreachable recreates pods scheduled on the unreachable node AND allows scheduling of pods on a node after it rejoins the cluster,madhusudancs,1
Nodes Network when a node becomes unreachable *,alex-mohr,1
Nodes Resize should be able to add nodes,piosz,1
Nodes Resize should be able to delete nodes,zmerlynn,1
"PersistentVolumes NFS volume can be created, bound, retrieved, unbound, and used by a pod",jsafrane,0
PersistentVolumes create a PV and a pre-bound PVC: test write access,caesarxuchao,1
PersistentVolumes create a PVC and a pre-bound PV: test write access,lavalamp,1
PersistentVolumes create a PVC and non-pre-bound PV: test write access,mwielgus,1
"PersistentVolumes should create a PersistentVolume, Claim, and a client Pod that will test the read/write access of the volume",ixdy,1
PersistentVolumes should create a non-pre-bound PV and PVC: test write access,andyzheng0831,1
Pet Store should scale to persist a nominal number ( 50 ) of transactions in 1m0s seconds,timstclair,1
Pet Store should scale to persist a nominal number ( * ) of transactions in * seconds,xiang90,1
Pet set recreate should recreate evicted petset,hurf,1
PetSet Basic PetSet functionality should handle healthy pet restarts during scale,kevin-wangzefeng,1
PetSet Basic PetSet functionality should provide basic identity,girishkalele,1
PetSet Deploy clustered applications should creating a working mysql cluster,piosz,1
PetSet Deploy clustered applications should creating a working redis cluster,mtaufen,1
PetSet Deploy clustered applications should creating a working zookeeper cluster,rmmh,1
"Pod Disks Should schedule a pod w/ a RW PD, gracefully remove it, then schedule it on another host",alex-mohr,1
"Pod Disks Should schedule a pod w/ a readonly PD on two hosts, then remove both gracefully.",ghodss,1
"Pod Disks Should schedule a pod w/ a RW PD, gracefully remove it, then schedule it on another host",saad-ali,0
"Pod Disks Should schedule a pod w/ a readonly PD on two hosts, then remove both gracefully.",saad-ali,0
"Pod Disks should schedule a pod w/ a RW PD shared between multiple containers, write to PD, delete pod, verify contents, and repeat in rapid succession",saad-ali,0
"Pod Disks should schedule a pod w/ a RW PD, remove it, then schedule it on another host",saad-ali,0
"Pod Disks should schedule a pod w/ a RW PD, ungracefully remove it, then schedule it on another host",mml,1
"Pod Disks should schedule a pod w/ a readonly PD on two hosts, then remove both ungracefully.",saad-ali,1
"Pod Disks should schedule a pod w/ a readonly PD on two hosts, then remove both.",saad-ali,0
"Pod Disks should schedule a pod w/two RW PDs both mounted to one container, write to PD, verify contents, delete pod, recreate pod, verify contents, and repeat in rapid succession",saad-ali,0
Pod garbage collector should handle the creation of 1000 pods,wojtek-t,1
Pods should *not* be restarted with a /healthz http liveness probe,cjcullen,1
"Pods should *not* be restarted with a docker exec ""cat /tmp/health"" liveness probe",vishh,0
Pods should allow activeDeadlineSeconds to be updated,derekwaynecarr,0
Pods should be restarted with a /healthz http liveness probe,girishkalele,1
"Pods should be restarted with a docker exec ""cat /tmp/health"" liveness probe",bgrant0607,1
Pods should be schedule with cpu and memory limits,vishh,0
Pods should be submitted and removed,davidopp,1
Pods should be updated,derekwaynecarr,1
Pods should cap back-off at MaxContainerBackOff,maisem,1
Pods should contain environment variables for services,jlowdermilk,1
Pods should get a host IP,xiang90,1
Pods should have monotonically increasing restart count,apelisse,1
Pods should have their auto-restart back-off timer reset on image update,mikedanese,1
Pods should invoke init containers on a RestartAlways pod,cjcullen,1
Pods should invoke init containers on a RestartNever pod,justinsb,1
Pods should not start app containers and fail the pod if init containers fail on a RestartNever pod,maisem,0
Pods should not start app containers if init containers fail on a RestartAlways pod,dchen1107,1
Pods should support remote command execution over websockets,madhusudancs,1
Pods should support retrieving logs from the container over websockets,vishh,0
"Port forwarding With a server that expects a client request should support a client that connects, sends data, and disconnects",sttts,0
@ -382,13 +316,13 @@ Probing container should be restarted with a /healthz http liveness probe,Random
Probing container should have monotonically increasing restart count,Random-Liu,0
Probing container with readiness probe should not be ready before initial delay and never restart,Random-Liu,0
Probing container with readiness probe that fails should never be ready and never restart,Random-Liu,0
Proxy version v1 should proxy logs on node,girishkalele,1
Proxy version v1 should proxy logs on node using proxy subresource,karlkfi,1
Proxy version v1 should proxy logs on node with explicit kubelet port,mwielgus,1
Proxy version v1 should proxy logs on node with explicit kubelet port using proxy subresource,bgrant0607,1
Proxy version v1 should proxy through a service and a pod,mml,1
Proxy version v1 should proxy to cadvisor,rmmh,1
Proxy version v1 should proxy to cadvisor using proxy subresource,deads2k,1
Proxy * should proxy logs on node,karlkfi,1
Proxy * should proxy logs on node using proxy subresource,hurf,1
Proxy * should proxy logs on node with explicit kubelet port,ixdy,1
Proxy * should proxy logs on node with explicit kubelet port using proxy subresource,dchen1107,1
Proxy * should proxy through a service and a pod,karlkfi,1
Proxy * should proxy to cadvisor,jszczepkowski,1
Proxy * should proxy to cadvisor using proxy subresource,roberthbailey,1
Reboot each node by dropping all inbound packets for a while and ensure they function afterwards,quinton-hoole,0
Reboot each node by dropping all outbound packets for a while and ensure they function afterwards,quinton-hoole,0
Reboot each node by ordering clean reboot and ensure they function upon restart,quinton-hoole,0
@ -401,9 +335,7 @@ ReplicaSet should serve a basic image on each replica with a public image,krouse
ReplicationController should serve a basic image on each replica with a private image,jbeda,1
ReplicationController should serve a basic image on each replica with a public image,krousey,1
Rescheduler should ensure that critical pod is scheduled in case there is no resources available,mtaufen,1
Resource-usage regular resource usage tracking resource tracking for 10 pods per node,janetkuo,1
Resource-usage regular resource usage tracking resource tracking for 105 pods per node,jszczepkowski,1
Resource-usage regular resource usage tracking resource tracking for 35 pods per node,jdef,1
Resource-usage regular resource usage tracking resource tracking for * pods per node,janetkuo,1
ResourceQuota should create a ResourceQuota and capture the life of a configMap.,timstclair,1
ResourceQuota should create a ResourceQuota and capture the life of a persistent volume claim.,bgrant0607,1
ResourceQuota should create a ResourceQuota and capture the life of a pod.,pmorie,1
@ -441,8 +373,9 @@ SchedulerPredicates validates that taints-tolerations is respected if not matchi
Secret should create a pod that reads a secret,luxas,1
Secrets should be consumable from pods in env vars,mml,1
Secrets should be consumable from pods in volume,ghodss,1
Secrets should be consumable from pods in volume with Mode set in the item,madhusudancs,1
Secrets should be consumable from pods in volume with defaultMode set,derekwaynecarr,1
Secrets should be consumable from pods in volume with mappings,freehan,1
Secrets should be consumable from pods in volume with mappings and Item Mode set,kevin-wangzefeng,1
Secrets should be consumable in multiple volumes in a pod,girishkalele,1
Security Context should support container.SecurityContext.RunAsUser,alex-mohr,1
Security Context should support pod.Spec.SecurityContext.RunAsUser,bgrant0607,1
@ -460,7 +393,6 @@ ServiceAccounts should mount an API token into pods,liggitt,0
ServiceLoadBalancer should support simple GET on Ingress ips,bprashanth,0
Services should be able to change the type and ports of a service,bprashanth,0
Services should be able to create a functioning NodePort service,bprashanth,0
Services should be able to create services of type LoadBalancer and externalTraffic=localOnly,jlowdermilk,1
Services should be able to up and down services,bprashanth,0
Services should check NodePort out-of-range,bprashanth,0
Services should create endpoints for unready pods,maisem,0
@ -473,16 +405,16 @@ Services should serve multiport endpoints from pods,bprashanth,0
Services should use same NodePort with same port but different protocols,timothysc,1
Services should work after restarting apiserver,bprashanth,0
Services should work after restarting kube-proxy,bprashanth,0
SimpleMount should be able to mount an emptydir on a container,jdef,1
"Spark should start spark master, driver and workers",girishkalele,1
"Staging client repo client should create pods, delete pods, watch pods",jbeda,1
"Storm should create and stop Zookeeper, Nimbus and Storm worker servers",mtaufen,1
Summary API when querying /stats/summary should report resource usage through the stats api,apelisse,1
"Sysctls should not launch unsafe, but not explicitly enabled sysctls on the node",madhusudancs,1
Sysctls should reject invalid sysctls,davidopp,1
Sysctls should support sysctls,Random-Liu,1
Sysctls should support unsafe sysctls which are actually whitelisted,deads2k,1
ThirdParty resources Simple Third Party creating/deleting thirdparty objects works,luxas,1
Ubernetes Lite should spread the pods of a replication controller across zones,quinton-hoole,0
Ubernetes Lite should spread the pods of a service across zones,quinton-hoole,0
Upgrade cluster upgrade should maintain a functioning cluster,girishkalele,1
Upgrade cluster upgrade should maintain responsive services,mikedanese,1
Upgrade master upgrade should maintain responsive services,mikedanese,1
@ -506,47 +438,36 @@ Volumes GlusterFS should be mountable,eparis,1
Volumes NFS should be mountable,andyzheng0831,1
Volumes PD should be mountable,caesarxuchao,1
Volumes iSCSI should be mountable,jsafrane,1
download_gcloud,hurf,1
hostPath should give a volume the correct mode,roberthbailey,1
hostPath should support r/w,roberthbailey,1
hostPath should support subPath,krousey,1
install_gcloud,roberthbailey,1
k8s.io/kubernetes/cluster/addons/dns/kube2sky,zmerlynn,1
k8s.io/kubernetes/cmd/genutils,rmmh,1
k8s.io/kubernetes/cmd/hyperkube,jbeda,0
k8s.io/kubernetes/cmd/kube-apiserver/app,nikhiljindal,0
k8s.io/kubernetes/cmd/kube-apiserver/app/options,nikhiljindal,0
k8s.io/kubernetes/cmd/kube-discovery/app,vulpecula,1
k8s.io/kubernetes/cmd/kube-proxy/app,luxas,1
k8s.io/kubernetes/cmd/kubeadm/app/images,davidopp,1
k8s.io/kubernetes/cmd/kubeadm/app/util,krousey,1
k8s.io/kubernetes/cmd/kubelet/app,hurf,1
k8s.io/kubernetes/cmd/kubernetes-discovery/discoverysummarizer,thockin,1
k8s.io/kubernetes/cmd/libs/go2idl/client-gen/testoutput/clientset_generated/test_internalclientset,bgrant0607,1
k8s.io/kubernetes/cmd/libs/go2idl/client-gen/testoutput/clientset_generated/test_internalclientset/typed/testgroup.k8s.io/unversioned,eparis,1
k8s.io/kubernetes/cmd/libs/go2idl/client-gen/testoutput/testgroup/unversioned,wojtek-t,1
k8s.io/kubernetes/cmd/libs/go2idl/deepcopy-gen/generators,mwielgus,1
k8s.io/kubernetes/cmd/libs/go2idl/generator,ixdy,1
k8s.io/kubernetes/cmd/libs/go2idl/import-boss/generators,kevin-wangzefeng,1
k8s.io/kubernetes/cmd/libs/go2idl/namer,luxas,1
k8s.io/kubernetes/cmd/libs/go2idl/openapi-gen/generators,davidopp,1
k8s.io/kubernetes/cmd/libs/go2idl/parser,lavalamp,1
k8s.io/kubernetes/cmd/libs/go2idl/types,mikedanese,1
k8s.io/kubernetes/cmd/mungedocs,mwielgus,1
k8s.io/kubernetes/examples,Random-Liu,0
k8s.io/kubernetes/examples/apiserver,nikhiljindal,0
k8s.io/kubernetes/federation/apis/federation/install,nikhiljindal,0
k8s.io/kubernetes/federation/apis/federation/validation,nikhiljindal,0
k8s.io/kubernetes/federation/cmd/federation-apiserver/app,dchen1107,1
k8s.io/kubernetes/federation/pkg/dnsprovider,sttts,1
k8s.io/kubernetes/federation/pkg/dnsprovider/providers/aws/route53,cjcullen,1
k8s.io/kubernetes/federation/pkg/dnsprovider/providers/google/clouddns,jsafrane,1
k8s.io/kubernetes/federation/pkg/federation-controller/cluster,nikhiljindal,0
k8s.io/kubernetes/federation/pkg/federation-controller/daemonset,madhusudancs,1
k8s.io/kubernetes/federation/pkg/federation-controller/deployment,childsb,1
k8s.io/kubernetes/federation/pkg/federation-controller/ingress,vishh,1
k8s.io/kubernetes/federation/pkg/federation-controller/namespace,hurf,1
k8s.io/kubernetes/federation/pkg/federation-controller/replicaset,roberthbailey,1
k8s.io/kubernetes/federation/pkg/federation-controller/replicaset/planner,dchen1107,1
k8s.io/kubernetes/federation/pkg/federation-controller/secret,apelisse,1
k8s.io/kubernetes/federation/pkg/federation-controller/service,pmorie,1
k8s.io/kubernetes/federation/pkg/federation-controller/util,bgrant0607,1
k8s.io/kubernetes/federation/pkg/federation-controller/util/eventsink,luxas,1
k8s.io/kubernetes/federation/pkg/federation-controller/util/planner,jszczepkowski,1
k8s.io/kubernetes/federation/pkg/federation-controller/util/podanalyzer,yujuhong,1
k8s.io/kubernetes/federation/registry/cluster,nikhiljindal,0
k8s.io/kubernetes/federation/registry/cluster/etcd,nikhiljindal,0
k8s.io/kubernetes/hack/cmd/teststale,thockin,1
@ -554,6 +475,7 @@ k8s.io/kubernetes/pkg/admission,dchen1107,1
k8s.io/kubernetes/pkg/api,Q-Lee,1
k8s.io/kubernetes/pkg/api/endpoints,cjcullen,1
k8s.io/kubernetes/pkg/api/errors,yifan-gu,1
k8s.io/kubernetes/pkg/api/events,caesarxuchao,1
k8s.io/kubernetes/pkg/api/install,timothysc,1
k8s.io/kubernetes/pkg/api/meta,fabioy,1
k8s.io/kubernetes/pkg/api/pod,piosz,1
@ -576,28 +498,27 @@ k8s.io/kubernetes/pkg/apis/autoscaling/validation,mtaufen,1
k8s.io/kubernetes/pkg/apis/batch/v1,vishh,1
k8s.io/kubernetes/pkg/apis/batch/v2alpha1,jlowdermilk,1
k8s.io/kubernetes/pkg/apis/batch/validation,erictune,0
k8s.io/kubernetes/pkg/apis/certificates/install,zmerlynn,1
k8s.io/kubernetes/pkg/apis/componentconfig,jbeda,1
k8s.io/kubernetes/pkg/apis/componentconfig/install,pmorie,1
k8s.io/kubernetes/pkg/apis/extensions,bgrant0607,1
k8s.io/kubernetes/pkg/apis/extensions/install,thockin,1
k8s.io/kubernetes/pkg/apis/extensions/v1beta1,madhusudancs,1
k8s.io/kubernetes/pkg/apis/extensions/validation,nikhiljindal,1
k8s.io/kubernetes/pkg/apis/policy/validation,deads2k,1
k8s.io/kubernetes/pkg/apis/rbac/install,ixdy,1
k8s.io/kubernetes/pkg/apis/rbac/validation,erictune,0
k8s.io/kubernetes/pkg/apis/storage/validation,caesarxuchao,1
k8s.io/kubernetes/pkg/apiserver,nikhiljindal,0
k8s.io/kubernetes/pkg/apiserver/audit,caesarxuchao,1
k8s.io/kubernetes/pkg/apiserver/filters,yifan-gu,1
k8s.io/kubernetes/pkg/apiserver/request,luxas,1
k8s.io/kubernetes/pkg/auth/authenticator/bearertoken,liggitt,0
k8s.io/kubernetes/pkg/auth/authorizer/abac,liggitt,0
k8s.io/kubernetes/pkg/auth/authorizer/union,liggitt,0
k8s.io/kubernetes/pkg/auth/group,cjcullen,1
k8s.io/kubernetes/pkg/auth/handlers,liggitt,0
k8s.io/kubernetes/pkg/client/cache,xiang90,1
k8s.io/kubernetes/pkg/client/chaosclient,deads2k,1
k8s.io/kubernetes/pkg/client/leaderelection,xiang90,1
k8s.io/kubernetes/pkg/client/record,karlkfi,1
k8s.io/kubernetes/pkg/client/restclient,kargakis,1
k8s.io/kubernetes/pkg/client/retry,jsafrane,1
k8s.io/kubernetes/pkg/client/testing/cache,mikedanese,1
k8s.io/kubernetes/pkg/client/testing/core,maisem,1
k8s.io/kubernetes/pkg/client/transport,eparis,1
@ -625,14 +546,11 @@ k8s.io/kubernetes/pkg/controller/deployment,asalkeld,0
k8s.io/kubernetes/pkg/controller/deployment/util,saad-ali,1
k8s.io/kubernetes/pkg/controller/disruption,fabioy,1
k8s.io/kubernetes/pkg/controller/endpoint,mwielgus,1
k8s.io/kubernetes/pkg/controller/framework,karlkfi,1
k8s.io/kubernetes/pkg/controller/garbagecollector,rmmh,1
k8s.io/kubernetes/pkg/controller/garbagecollector/metaonly,cjcullen,1
k8s.io/kubernetes/pkg/controller/gc,jdef,1
k8s.io/kubernetes/pkg/controller/job,soltysh,0
k8s.io/kubernetes/pkg/controller/namespace,karlkfi,1
k8s.io/kubernetes/pkg/controller/node,gmarek,0
k8s.io/kubernetes/pkg/controller/persistentvolume,jsafrane,0
k8s.io/kubernetes/pkg/controller/petset,fgrzadkowski,1
k8s.io/kubernetes/pkg/controller/podautoscaler,piosz,0
k8s.io/kubernetes/pkg/controller/podautoscaler/metrics,piosz,0
@ -647,7 +565,7 @@ k8s.io/kubernetes/pkg/controller/serviceaccount,liggitt,0
k8s.io/kubernetes/pkg/controller/volume/attachdetach,luxas,1
k8s.io/kubernetes/pkg/controller/volume/attachdetach/cache,hurf,1
k8s.io/kubernetes/pkg/controller/volume/attachdetach/reconciler,jsafrane,1
k8s.io/kubernetes/pkg/controller/volume/persistentvolume,apelisse,1
k8s.io/kubernetes/pkg/controller/volume/persistentvolume,jsafrane,0
k8s.io/kubernetes/pkg/conversion,ixdy,1
k8s.io/kubernetes/pkg/conversion/queryparams,caesarxuchao,1
k8s.io/kubernetes/pkg/credentialprovider,justinsb,1
@ -658,12 +576,16 @@ k8s.io/kubernetes/pkg/fieldpath,childsb,1
k8s.io/kubernetes/pkg/fields,jsafrane,1
k8s.io/kubernetes/pkg/genericapiserver,nikhiljindal,0
k8s.io/kubernetes/pkg/genericapiserver/authorizer,lavalamp,1
k8s.io/kubernetes/pkg/genericapiserver/filters,dchen1107,1
k8s.io/kubernetes/pkg/genericapiserver/mux,zmerlynn,1
k8s.io/kubernetes/pkg/genericapiserver/openapi,davidopp,1
k8s.io/kubernetes/pkg/genericapiserver/routes,girishkalele,1
k8s.io/kubernetes/pkg/healthz,thockin,1
k8s.io/kubernetes/pkg/httplog,mtaufen,1
k8s.io/kubernetes/pkg/kubectl,madhusudancs,1
k8s.io/kubernetes/pkg/kubectl/cmd,rmmh,1
k8s.io/kubernetes/pkg/kubectl/cmd/config,asalkeld,0
k8s.io/kubernetes/pkg/kubectl/cmd/set,brendandburns,1
k8s.io/kubernetes/pkg/kubectl/cmd/util,asalkeld,0
k8s.io/kubernetes/pkg/kubectl/cmd/util/editor,jdef,1
k8s.io/kubernetes/pkg/kubectl/resource,caesarxuchao,1
@ -692,7 +614,6 @@ k8s.io/kubernetes/pkg/kubelet/pod,alex-mohr,1
k8s.io/kubernetes/pkg/kubelet/prober,alex-mohr,1
k8s.io/kubernetes/pkg/kubelet/prober/results,krousey,1
k8s.io/kubernetes/pkg/kubelet/qos,vishh,0
k8s.io/kubernetes/pkg/kubelet/qos/util,vishh,0
k8s.io/kubernetes/pkg/kubelet/rkt,apelisse,1
k8s.io/kubernetes/pkg/kubelet/rktshim,mml,1
k8s.io/kubernetes/pkg/kubelet/server,timstclair,0
@ -717,83 +638,81 @@ k8s.io/kubernetes/pkg/proxy/iptables,freehan,0
k8s.io/kubernetes/pkg/proxy/userspace,luxas,1
k8s.io/kubernetes/pkg/quota,sttts,1
k8s.io/kubernetes/pkg/quota/evaluator/core,yifan-gu,1
k8s.io/kubernetes/pkg/registry/certificates,ncdc,1
k8s.io/kubernetes/pkg/registry/componentstatus,yifan-gu,1
k8s.io/kubernetes/pkg/registry/configmap,timothysc,1
k8s.io/kubernetes/pkg/registry/configmap/etcd,janetkuo,1
k8s.io/kubernetes/pkg/registry/controller,jdef,1
k8s.io/kubernetes/pkg/registry/controller/etcd,spxtr,1
k8s.io/kubernetes/pkg/registry/daemonset,ghodss,1
k8s.io/kubernetes/pkg/registry/daemonset/etcd,pmorie,1
k8s.io/kubernetes/pkg/registry/deployment,timothysc,1
k8s.io/kubernetes/pkg/registry/deployment/etcd,fgrzadkowski,1
k8s.io/kubernetes/pkg/registry/endpoint,smarterclayton,1
k8s.io/kubernetes/pkg/registry/endpoint/etcd,timstclair,1
k8s.io/kubernetes/pkg/registry/event,girishkalele,1
k8s.io/kubernetes/pkg/registry/event/etcd,karlkfi,1
k8s.io/kubernetes/pkg/registry/experimental/controller/etcd,smarterclayton,1
k8s.io/kubernetes/pkg/registry/generic,eparis,1
k8s.io/kubernetes/pkg/registry/generic/etcd,fabioy,1
k8s.io/kubernetes/pkg/registry/apps/petset,madhusudancs,1
k8s.io/kubernetes/pkg/registry/apps/petset/etcd,wojtek-t,1
k8s.io/kubernetes/pkg/registry/autoscaling/horizontalpodautoscaler,jszczepkowski,0
k8s.io/kubernetes/pkg/registry/autoscaling/horizontalpodautoscaler/etcd,jszczepkowski,0
k8s.io/kubernetes/pkg/registry/batch/job,soltysh,0
k8s.io/kubernetes/pkg/registry/batch/job/etcd,soltysh,0
k8s.io/kubernetes/pkg/registry/batch/scheduledjob,soltysh,0
k8s.io/kubernetes/pkg/registry/batch/scheduledjob/etcd,soltysh,0
k8s.io/kubernetes/pkg/registry/certificates/certificates,cjcullen,1
k8s.io/kubernetes/pkg/registry/core/componentstatus,jsafrane,1
k8s.io/kubernetes/pkg/registry/core/configmap,bgrant0607,1
k8s.io/kubernetes/pkg/registry/core/configmap/etcd,ghodss,1
k8s.io/kubernetes/pkg/registry/core/controller,luxas,1
k8s.io/kubernetes/pkg/registry/core/controller/etcd,mwielgus,1
k8s.io/kubernetes/pkg/registry/core/endpoint,yujuhong,1
k8s.io/kubernetes/pkg/registry/core/endpoint/etcd,bgrant0607,1
k8s.io/kubernetes/pkg/registry/core/event,thockin,1
k8s.io/kubernetes/pkg/registry/core/event/etcd,krousey,1
k8s.io/kubernetes/pkg/registry/core/limitrange,bgrant0607,1
k8s.io/kubernetes/pkg/registry/core/limitrange/etcd,mikedanese,1
k8s.io/kubernetes/pkg/registry/core/namespace,pmorie,1
k8s.io/kubernetes/pkg/registry/core/namespace/etcd,yifan-gu,1
k8s.io/kubernetes/pkg/registry/core/node,karlkfi,1
k8s.io/kubernetes/pkg/registry/core/node/etcd,saad-ali,1
k8s.io/kubernetes/pkg/registry/core/persistentvolume,lavalamp,1
k8s.io/kubernetes/pkg/registry/core/persistentvolume/etcd,hurf,1
k8s.io/kubernetes/pkg/registry/core/persistentvolumeclaim,wojtek-t,1
k8s.io/kubernetes/pkg/registry/core/persistentvolumeclaim/etcd,maisem,1
k8s.io/kubernetes/pkg/registry/core/pod,spxtr,1
k8s.io/kubernetes/pkg/registry/core/pod/etcd,hurf,1
k8s.io/kubernetes/pkg/registry/core/pod/rest,mml,1
k8s.io/kubernetes/pkg/registry/core/podtemplate,eparis,1
k8s.io/kubernetes/pkg/registry/core/podtemplate/etcd,yujuhong,1
k8s.io/kubernetes/pkg/registry/core/resourcequota,ixdy,1
k8s.io/kubernetes/pkg/registry/core/resourcequota/etcd,gmarek,1
k8s.io/kubernetes/pkg/registry/core/secret,smarterclayton,1
k8s.io/kubernetes/pkg/registry/core/secret/etcd,thockin,1
k8s.io/kubernetes/pkg/registry/core/service,madhusudancs,1
k8s.io/kubernetes/pkg/registry/core/service/allocator,justinsb,1
k8s.io/kubernetes/pkg/registry/core/service/allocator/etcd,childsb,1
k8s.io/kubernetes/pkg/registry/core/service/etcd,zmerlynn,1
k8s.io/kubernetes/pkg/registry/core/service/ipallocator,yifan-gu,1
k8s.io/kubernetes/pkg/registry/core/service/ipallocator/controller,justinsb,1
k8s.io/kubernetes/pkg/registry/core/service/ipallocator/etcd,dchen1107,1
k8s.io/kubernetes/pkg/registry/core/service/portallocator,pmorie,1
k8s.io/kubernetes/pkg/registry/core/serviceaccount,liggitt,0
k8s.io/kubernetes/pkg/registry/core/serviceaccount/etcd,liggitt,0
k8s.io/kubernetes/pkg/registry/extensions/controller/etcd,madhusudancs,1
k8s.io/kubernetes/pkg/registry/extensions/daemonset,andyzheng0831,1
k8s.io/kubernetes/pkg/registry/extensions/daemonset/etcd,roberthbailey,1
k8s.io/kubernetes/pkg/registry/extensions/deployment,jszczepkowski,1
k8s.io/kubernetes/pkg/registry/extensions/deployment/etcd,vulpecula,1
k8s.io/kubernetes/pkg/registry/extensions/ingress,girishkalele,1
k8s.io/kubernetes/pkg/registry/extensions/ingress/etcd,wojtek-t,1
k8s.io/kubernetes/pkg/registry/extensions/networkpolicy,bgrant0607,1
k8s.io/kubernetes/pkg/registry/extensions/networkpolicy/etcd,brendandburns,1
k8s.io/kubernetes/pkg/registry/extensions/podsecuritypolicy/etcd,justinsb,1
k8s.io/kubernetes/pkg/registry/extensions/replicaset,jdef,1
k8s.io/kubernetes/pkg/registry/extensions/replicaset/etcd,deads2k,1
k8s.io/kubernetes/pkg/registry/extensions/rest,spxtr,1
k8s.io/kubernetes/pkg/registry/extensions/thirdpartyresource,lavalamp,1
k8s.io/kubernetes/pkg/registry/extensions/thirdpartyresource/etcd,Q-Lee,1
k8s.io/kubernetes/pkg/registry/extensions/thirdpartyresourcedata,pmorie,1
k8s.io/kubernetes/pkg/registry/extensions/thirdpartyresourcedata/etcd,davidopp,1
k8s.io/kubernetes/pkg/registry/generic/registry,jsafrane,1
k8s.io/kubernetes/pkg/registry/generic/rest,smarterclayton,1
k8s.io/kubernetes/pkg/registry/horizontalpodautoscaler,jszczepkowski,0
k8s.io/kubernetes/pkg/registry/horizontalpodautoscaler/etcd,jszczepkowski,0
k8s.io/kubernetes/pkg/registry/ingress,jsafrane,1
k8s.io/kubernetes/pkg/registry/ingress/etcd,vulpecula,1
k8s.io/kubernetes/pkg/registry/job,soltysh,0
k8s.io/kubernetes/pkg/registry/job/etcd,soltysh,0
k8s.io/kubernetes/pkg/registry/limitrange,apelisse,1
k8s.io/kubernetes/pkg/registry/limitrange/etcd,zmerlynn,1
k8s.io/kubernetes/pkg/registry/namespace,roberthbailey,1
k8s.io/kubernetes/pkg/registry/namespace/etcd,justinsb,1
k8s.io/kubernetes/pkg/registry/networkpolicy,xiang90,1
k8s.io/kubernetes/pkg/registry/networkpolicy/etcd,xiang90,1
k8s.io/kubernetes/pkg/registry/node,krousey,1
k8s.io/kubernetes/pkg/registry/node/etcd,kevin-wangzefeng,1
k8s.io/kubernetes/pkg/registry/persistentvolume,kevin-wangzefeng,1
k8s.io/kubernetes/pkg/registry/persistentvolume/etcd,andyzheng0831,1
k8s.io/kubernetes/pkg/registry/persistentvolumeclaim,bgrant0607,1
k8s.io/kubernetes/pkg/registry/persistentvolumeclaim/etcd,deads2k,1
k8s.io/kubernetes/pkg/registry/petset,rmmh,1
k8s.io/kubernetes/pkg/registry/petset/etcd,dchen1107,1
k8s.io/kubernetes/pkg/registry/pod,alex-mohr,1
k8s.io/kubernetes/pkg/registry/pod/etcd,justinsb,1
k8s.io/kubernetes/pkg/registry/pod/rest,childsb,1
k8s.io/kubernetes/pkg/registry/poddisruptionbudget,jsafrane,1
k8s.io/kubernetes/pkg/registry/poddisruptionbudget/etcd,alex-mohr,1
k8s.io/kubernetes/pkg/registry/podsecuritypolicy/etcd,yifan-gu,1
k8s.io/kubernetes/pkg/registry/podtemplate,jsafrane,1
k8s.io/kubernetes/pkg/registry/podtemplate/etcd,mtaufen,1
k8s.io/kubernetes/pkg/registry/replicaset,mwielgus,1
k8s.io/kubernetes/pkg/registry/replicaset/etcd,deads2k,1
k8s.io/kubernetes/pkg/registry/resourcequota,maisem,1
k8s.io/kubernetes/pkg/registry/resourcequota/etcd,cjcullen,1
k8s.io/kubernetes/pkg/registry/scheduledjob,soltysh,0
k8s.io/kubernetes/pkg/registry/scheduledjob/etcd,soltysh,1
k8s.io/kubernetes/pkg/registry/secret,spxtr,1
k8s.io/kubernetes/pkg/registry/secret/etcd,brendandburns,1
k8s.io/kubernetes/pkg/registry/service,thockin,1
k8s.io/kubernetes/pkg/registry/service/allocator,pmorie,1
k8s.io/kubernetes/pkg/registry/service/allocator/etcd,caesarxuchao,1
k8s.io/kubernetes/pkg/registry/service/etcd,alex-mohr,1
k8s.io/kubernetes/pkg/registry/service/ipallocator,jlowdermilk,1
k8s.io/kubernetes/pkg/registry/service/ipallocator/controller,lavalamp,1
k8s.io/kubernetes/pkg/registry/service/ipallocator/etcd,andyzheng0831,1
k8s.io/kubernetes/pkg/registry/service/portallocator,karlkfi,1
k8s.io/kubernetes/pkg/registry/serviceaccount,liggitt,0
k8s.io/kubernetes/pkg/registry/serviceaccount/etcd,liggitt,0
k8s.io/kubernetes/pkg/registry/storageclass,ixdy,1
k8s.io/kubernetes/pkg/registry/storageclass/etcd,jbeda,1
k8s.io/kubernetes/pkg/registry/thirdpartyresource,vulpecula,1
k8s.io/kubernetes/pkg/registry/thirdpartyresource/etcd,hurf,1
k8s.io/kubernetes/pkg/registry/thirdpartyresourcedata,childsb,1
k8s.io/kubernetes/pkg/registry/thirdpartyresourcedata/etcd,lavalamp,1
k8s.io/kubernetes/pkg/registry/policy/poddisruptionbudget,dchen1107,1
k8s.io/kubernetes/pkg/registry/policy/poddisruptionbudget/etcd,kevin-wangzefeng,1
k8s.io/kubernetes/pkg/registry/storage/storageclass,jdef,1
k8s.io/kubernetes/pkg/registry/storage/storageclass/etcd,vulpecula,1
k8s.io/kubernetes/pkg/runtime,wojtek-t,0
k8s.io/kubernetes/pkg/runtime/serializer,wojtek-t,0
k8s.io/kubernetes/pkg/runtime/serializer/json,wojtek-t,0
k8s.io/kubernetes/pkg/runtime/serializer/protobuf,wojtek-t,0
k8s.io/kubernetes/pkg/runtime/serializer/recognizer,wojtek-t,0
k8s.io/kubernetes/pkg/runtime/serializer/recognizer/testing,piosz,1
k8s.io/kubernetes/pkg/runtime/serializer/recognizer/testing,wojtek-t,0
k8s.io/kubernetes/pkg/runtime/serializer/streaming,wojtek-t,0
k8s.io/kubernetes/pkg/runtime/serializer/versioning,wojtek-t,0
k8s.io/kubernetes/pkg/security/apparmor,bgrant0607,1
@ -801,6 +720,7 @@ k8s.io/kubernetes/pkg/security/podsecuritypolicy,erictune,0
k8s.io/kubernetes/pkg/security/podsecuritypolicy/apparmor,vulpecula,1
k8s.io/kubernetes/pkg/security/podsecuritypolicy/capabilities,erictune,0
k8s.io/kubernetes/pkg/security/podsecuritypolicy/group,erictune,0
k8s.io/kubernetes/pkg/security/podsecuritypolicy/seccomp,mml,1
k8s.io/kubernetes/pkg/security/podsecuritypolicy/selinux,erictune,0
k8s.io/kubernetes/pkg/security/podsecuritypolicy/sysctl,andyzheng0831,1
k8s.io/kubernetes/pkg/security/podsecuritypolicy/user,erictune,0
@ -815,7 +735,6 @@ k8s.io/kubernetes/pkg/storage/etcd3,xiang90,0
k8s.io/kubernetes/pkg/storage/storagebackend/factory,maisem,1
k8s.io/kubernetes/pkg/util,jbeda,1
k8s.io/kubernetes/pkg/util/async,spxtr,1
k8s.io/kubernetes/pkg/util/atomic,kargakis,1
k8s.io/kubernetes/pkg/util/bandwidth,thockin,1
k8s.io/kubernetes/pkg/util/cache,thockin,1
k8s.io/kubernetes/pkg/util/cert,karlkfi,1
@ -823,9 +742,7 @@ k8s.io/kubernetes/pkg/util/clock,zmerlynn,1
k8s.io/kubernetes/pkg/util/config,girishkalele,1
k8s.io/kubernetes/pkg/util/configz,ixdy,1
k8s.io/kubernetes/pkg/util/dbus,roberthbailey,1
k8s.io/kubernetes/pkg/util/deployment,asalkeld,0
k8s.io/kubernetes/pkg/util/diff,piosz,1
k8s.io/kubernetes/pkg/util/ebtables,thockin,1
k8s.io/kubernetes/pkg/util/env,asalkeld,0
k8s.io/kubernetes/pkg/util/errors,jlowdermilk,1
k8s.io/kubernetes/pkg/util/exec,krousey,1
@ -858,6 +775,7 @@ k8s.io/kubernetes/pkg/util/sets,quinton-hoole,0
k8s.io/kubernetes/pkg/util/slice,quinton-hoole,0
k8s.io/kubernetes/pkg/util/strategicpatch,brendandburns,1
k8s.io/kubernetes/pkg/util/strings,quinton-hoole,0
k8s.io/kubernetes/pkg/util/term,wojtek-t,1
k8s.io/kubernetes/pkg/util/testing,jlowdermilk,1
k8s.io/kubernetes/pkg/util/threading,roberthbailey,1
k8s.io/kubernetes/pkg/util/validation,Q-Lee,1
@ -885,7 +803,6 @@ k8s.io/kubernetes/pkg/volume/glusterfs,timstclair,1
k8s.io/kubernetes/pkg/volume/host_path,jbeda,1
k8s.io/kubernetes/pkg/volume/iscsi,cjcullen,1
k8s.io/kubernetes/pkg/volume/nfs,justinsb,1
k8s.io/kubernetes/pkg/volume/persistent_claim,krousey,1
k8s.io/kubernetes/pkg/volume/quobyte,yujuhong,1
k8s.io/kubernetes/pkg/volume/rbd,piosz,1
k8s.io/kubernetes/pkg/volume/secret,rmmh,1
@ -893,13 +810,13 @@ k8s.io/kubernetes/pkg/volume/util,saad-ali,0
k8s.io/kubernetes/pkg/volume/util/nestedpendingoperations,freehan,1
k8s.io/kubernetes/pkg/volume/vsphere_volume,deads2k,1
k8s.io/kubernetes/pkg/watch,mwielgus,1
k8s.io/kubernetes/pkg/watch/json,thockin,1
k8s.io/kubernetes/pkg/watch/versioned,childsb,1
k8s.io/kubernetes/plugin/pkg/admission/admit,piosz,1
k8s.io/kubernetes/plugin/pkg/admission/alwayspullimages,kargakis,1
k8s.io/kubernetes/plugin/pkg/admission/antiaffinity,timothysc,1
k8s.io/kubernetes/plugin/pkg/admission/deny,eparis,1
k8s.io/kubernetes/plugin/pkg/admission/exec,deads2k,1
k8s.io/kubernetes/plugin/pkg/admission/gc,Q-Lee,1
k8s.io/kubernetes/plugin/pkg/admission/imagepolicy,apelisse,1
k8s.io/kubernetes/plugin/pkg/admission/initialresources,piosz,0
k8s.io/kubernetes/plugin/pkg/admission/limitranger,ncdc,1
@ -907,6 +824,7 @@ k8s.io/kubernetes/plugin/pkg/admission/namespace/autoprovision,derekwaynecarr,0
k8s.io/kubernetes/plugin/pkg/admission/namespace/exists,derekwaynecarr,0
k8s.io/kubernetes/plugin/pkg/admission/namespace/lifecycle,derekwaynecarr,0
k8s.io/kubernetes/plugin/pkg/admission/persistentvolume/label,jdef,1
k8s.io/kubernetes/plugin/pkg/admission/podnodeselector,yifan-gu,1
k8s.io/kubernetes/plugin/pkg/admission/resourcequota,fabioy,1
k8s.io/kubernetes/plugin/pkg/admission/security/podsecuritypolicy,maisem,1
k8s.io/kubernetes/plugin/pkg/admission/securitycontext/scdeny,vulpecula,1
@ -914,17 +832,19 @@ k8s.io/kubernetes/plugin/pkg/admission/serviceaccount,liggitt,0
k8s.io/kubernetes/plugin/pkg/admission/storageclass/default,pmorie,1
k8s.io/kubernetes/plugin/pkg/auth/authenticator/password/allow,liggitt,0
k8s.io/kubernetes/plugin/pkg/auth/authenticator/password/passwordfile,liggitt,0
k8s.io/kubernetes/plugin/pkg/auth/authenticator/request/anonymous,ixdy,1
k8s.io/kubernetes/plugin/pkg/auth/authenticator/request/basicauth,liggitt,0
k8s.io/kubernetes/plugin/pkg/auth/authenticator/request/union,liggitt,0
k8s.io/kubernetes/plugin/pkg/auth/authenticator/request/x509,liggitt,0
k8s.io/kubernetes/plugin/pkg/auth/authenticator/token/anytoken,timstclair,1
k8s.io/kubernetes/plugin/pkg/auth/authenticator/token/oidc,brendandburns,1
k8s.io/kubernetes/plugin/pkg/auth/authenticator/token/tokenfile,liggitt,0
k8s.io/kubernetes/plugin/pkg/auth/authenticator/token/webhook,ghodss,1
k8s.io/kubernetes/plugin/pkg/auth/authorizer/rbac,hurf,1
k8s.io/kubernetes/plugin/pkg/auth/authorizer/rbac/bootstrappolicy,jszczepkowski,1
k8s.io/kubernetes/plugin/pkg/auth/authorizer/webhook,hurf,1
k8s.io/kubernetes/plugin/pkg/client/auth/oidc,cjcullen,1
k8s.io/kubernetes/plugin/pkg/scheduler,fgrzadkowski,0
k8s.io/kubernetes/plugin/pkg/scheduler/algorithm,fgrzadkowski,0
k8s.io/kubernetes/plugin/pkg/scheduler/algorithm/predicates,fgrzadkowski,0
k8s.io/kubernetes/plugin/pkg/scheduler/algorithm/priorities,fgrzadkowski,0
k8s.io/kubernetes/plugin/pkg/scheduler/algorithmprovider,fgrzadkowski,0
@ -932,7 +852,9 @@ k8s.io/kubernetes/plugin/pkg/scheduler/algorithmprovider/defaults,fgrzadkowski,0
k8s.io/kubernetes/plugin/pkg/scheduler/api/validation,fgrzadkowski,0
k8s.io/kubernetes/plugin/pkg/scheduler/factory,fgrzadkowski,0
k8s.io/kubernetes/plugin/pkg/scheduler/schedulercache,fgrzadkowski,0
k8s.io/kubernetes/test/integration,lavalamp,1
k8s.io/kubernetes/test/e2e,kevin-wangzefeng,1
k8s.io/kubernetes/test/e2e/chaosmonkey,pmorie,1
k8s.io/kubernetes/test/e2e_node,mml,1
k8s.io/kubernetes/test/integration/auth,jbeda,1
k8s.io/kubernetes/test/integration/client,Q-Lee,1
k8s.io/kubernetes/test/integration/configmap,Q-Lee,1
@ -951,26 +873,10 @@ k8s.io/kubernetes/test/integration/quota,alex-mohr,1
k8s.io/kubernetes/test/integration/replicaset,janetkuo,1
k8s.io/kubernetes/test/integration/replicationcontroller,jbeda,1
k8s.io/kubernetes/test/integration/scheduler,mikedanese,1
k8s.io/kubernetes/test/integration/scheduler_perf,roberthbailey,1
k8s.io/kubernetes/test/integration/secrets,rmmh,1
k8s.io/kubernetes/test/integration/serviceaccount,deads2k,1
k8s.io/kubernetes/test/integration/storageclasses,andyzheng0831,1
k8s.io/kubernetes/third_party/forked/reflect,yifan-gu,1
k8s.io/kubernetes/third_party/golang/expansion,dchen1107,1
k8s.io/kubernetes/third_party/golang/go/ast,mml,1
k8s.io/kubernetes/third_party/golang/go/build,pmorie,1
k8s.io/kubernetes/third_party/golang/go/constant,Q-Lee,1
k8s.io/kubernetes/third_party/golang/go/doc,fabioy,1
k8s.io/kubernetes/third_party/golang/go/parser,davidopp,1
k8s.io/kubernetes/third_party/golang/go/printer,madhusudancs,1
k8s.io/kubernetes/third_party/golang/go/scanner,hurf,1
k8s.io/kubernetes/third_party/golang/go/token,mml,1
k8s.io/kubernetes/vendor/k8s.io/client-go/1.4/discovery,hurf,1
k8s.io/kubernetes/vendor/k8s.io/client-go/1.4/dynamic,smarterclayton,1
k8s.io/kubernetes/vendor/k8s.io/client-go/1.4/rest,yujuhong,1
k8s.io/kubernetes/vendor/k8s.io/client-go/1.4/tools/auth,luxas,1
k8s.io/kubernetes/vendor/k8s.io/client-go/1.4/tools/cache,mikedanese,1
k8s.io/kubernetes/vendor/k8s.io/client-go/1.4/tools/clientcmd,spxtr,1
k8s.io/kubernetes/vendor/k8s.io/client-go/1.4/tools/clientcmd/api,pmorie,1
k8s.io/kubernetes/vendor/k8s.io/client-go/1.4/tools/record,yifan-gu,1
k8s.io/kubernetes/vendor/k8s.io/client-go/1.4/transport,jszczepkowski,1
kubelet Clean up pods on node kubelet should be able to delete 10 pods per node in 1m0s.,yujuhong,0
k8s.io/kubernetes/test/integration/thirdparty,ixdy,1
k8s.io/kubernetes/test/list,pmorie,1
kubelet Clean up pods on node kubelet should be able to delete * pods per node in *.,yujuhong,0

1 name owner auto-assigned
2 DEFAULT rmmh/spxtr/ixdy/apelisse/fejta 0
3 Addon update should propagate add-on file changes eparis 1
4 AppArmor when running with AppArmor should enforce a permissive profile brendandburns yujuhong 1
5 AppArmor when running with AppArmor should enforce a profile blocking writes timstclair freehan 1
6 AppArmor when running with AppArmor should reject an unloaded profile eparis kargakis 1
7 AppArmor when running without AppArmor should reject a pod with an AppArmor profile childsb vulpecula 1
BeforeSuite ixdy 0
8 Cadvisor should be healthy on every node. vishh 0
9 Cassandra should create and scale cassandra fabioy 1
10 CassandraPetSet should create petset erictune 1
Celery-RabbitMQ should create and stop celery+rabbitmq servers luxas 1
11 Cluster level logging using Elasticsearch should check that logs from containers are ingested into Elasticsearch crassirostris 0
12 Cluster level logging using GCL should check that logs from containers are ingested in GCL crassirostris 0
Cluster size autoscaling Should scale cluster size based on cpu reservation dchen1107 1
Cluster size autoscaling Should scale cluster size based on cpu utilization ixdy 1
Cluster size autoscaling Should scale cluster size based on memory reservation ghodss 1
Cluster size autoscaling Should scale cluster size based on memory utilization childsb 1
13 Cluster size autoscaling should add node to the particular mig spxtr 1
14 Cluster size autoscaling should correctly scale down after a node is not needed pmorie 1
15 Cluster size autoscaling should correctly scale down after a node is not needed when there is non autoscaled pool krousey 1
31 ConfigMap should be consumable in multiple volumes in the same pod caesarxuchao 1
32 ConfigMap should be consumable via environment variable ncdc 1
33 ConfigMap updates should be reflected in volume kevin-wangzefeng 1
34 Container Runtime Conformance Test container runtime conformance blackbox test when running a container with a new image should be able to pull from private registry with secret Container Lifecycle Hook when create a pod with lifecycle hook when it is exec hook should execute poststart exec hook properly mikedanese dchen1107 1
35 Container Runtime Conformance Test container runtime conformance blackbox test when running a container with a new image should be able to pull image from docker hub Container Lifecycle Hook when create a pod with lifecycle hook when it is exec hook should execute prestop exec hook properly girishkalele ixdy 1
36 Container Runtime Conformance Test container runtime conformance blackbox test when running a container with a new image should be able to pull image from gcr.io Container Lifecycle Hook when create a pod with lifecycle hook when it is http hook should execute poststart http hook properly Random-Liu jszczepkowski 0 1
37 Container Runtime Conformance Test container runtime conformance blackbox test when running a container with a new image should not be able to pull from private registry without secret Container Lifecycle Hook when create a pod with lifecycle hook when it is http hook should execute prestop http hook properly yifan-gu janetkuo 1
38 Container Runtime Conformance Test container runtime conformance blackbox test when running a container with a new image should not be able to pull image from invalid registry Container Runtime Conformance Test container runtime conformance blackbox test when running a container with a new image * janetkuo Random-Liu 1 0
Container Runtime Conformance Test container runtime conformance blackbox test when running a container with a new image should not be able to pull non-existing image from gcr.io kevin-wangzefeng 1
39 Container Runtime Conformance Test container runtime conformance blackbox test when starting a container that exits it should run with the expected status luxas 1
40 Container Runtime Conformance Test container runtime conformance blackbox test when starting a container that exits should report termination message if TerminationMessagePath is set timothysc 1
41 DNS should provide DNS for ExternalName services rmmh 1
48 DaemonRestart Controller Manager should not create/delete replicas across restart vulpecula 1
49 DaemonRestart Kubelet should not restart containers across restart madhusudancs 1
50 DaemonRestart Scheduler should continue assigning pods to nodes across restart lavalamp 1
51 Density create a batch of pods latency/resource should be within limit when create 10 pods with 0 interval Density create a batch of pods latency/resource should be within limit when create * pods with * interval zmerlynn apelisse 1
52 Density create a batch of pods latency/resource should be within limit when create 10 pods with 100ms interval Density create a batch of pods with higher API QPS latency/resource should be within limit when create * pods with * interval (QPS *) jlowdermilk 1
53 Density create a batch of pods latency/resource should be within limit when create 10 pods with 300ms interval Density create a sequence of pods latency/resource should be within limit when create * pods with * background pods jsafrane wojtek-t 1
54 Density create a batch of pods latency/resource should be within limit when create 105 pods with 0 interval Density should allow running maximum capacity pods on nodes bgrant0607 wojtek-t 1 0
55 Density create a batch of pods latency/resource should be within limit when create 105 pods with 100ms interval Density should allow starting * pods per node derekwaynecarr gmarek 1 0
Density create a batch of pods latency/resource should be within limit when create 105 pods with 300ms interval saad-ali 1
Density create a batch of pods latency/resource should be within limit when create 35 pods with 0 interval davidopp 1
Density create a batch of pods latency/resource should be within limit when create 35 pods with 100ms interval kargakis 1
Density create a batch of pods latency/resource should be within limit when create 35 pods with 300ms interval justinsb 1
Density create a batch of pods with higher API QPS latency/resource should be within limit when create 105 pods with 0 interval (QPS 60) roberthbailey 1
Density create a batch of pods with higher API QPS latency/resource should be within limit when create 105 pods with 100ms interval (QPS 60) kevin-wangzefeng 1
Density create a batch of pods with higher API QPS latency/resource should be within limit when create 105 pods with 300ms interval (QPS 60) apelisse 1
Density create a sequence of pods latency/resource should be within limit when create 10 pods with 50 background pods janetkuo 1
Density create a sequence of pods latency/resource should be within limit when create 30 pods with 50 background pods alex-mohr 1
Density create a sequence of pods latency/resource should be within limit when create 50 pods with 50 background pods yifan-gu 1
Density should allow running maximum capacity pods on nodes smarterclayton 1
Density should allow starting 100 pods per node wojtek-t 0
Density should allow starting 3 pods per node gmarek 0
Density should allow starting 30 pods per node gmarek 0
Density should allow starting 50 pods per node wojtek-t 0
Density should allow starting 95 pods per node wojtek-t 0
56 Deployment RecreateDeployment should delete old pods and create new ones pwittrock 0
57 Deployment RollingUpdateDeployment should delete old pods and create new ones pwittrock 0
58 Deployment RollingUpdateDeployment should scale up and down in the right order pwittrock 0
59 Deployment deployment reaping should cascade to its replica sets and pods kargakis 1
60 Deployment deployment should create new pods pwittrock 0
61 Deployment deployment should delete old replica sets pwittrock 0
62 Deployment deployment should label adopted RSs and pods pwittrock 0
67 Deployment paused deployment should be able to scale kargakis 1
68 Deployment paused deployment should be ignored by the controller kargakis 0
69 Deployment scaled rollout deployment should not block on annotation check kargakis 1
70 DisruptionController evictions: enough pods, absolute => should allow an eviction DisruptionController evictions: * => * madhusudancs jszczepkowski 1
DisruptionController evictions: enough pods, replicaSet, percentage => should allow an eviction nikhiljindal 1
DisruptionController evictions: no PDB => should allow an eviction krousey 1
DisruptionController evictions: too few pods, absolute => should not allow an eviction mml 1
DisruptionController evictions: too few pods, replicaSet, percentage => should not allow an eviction mml 1
DisruptionController should allow an eviction when enough pods mml 1
DisruptionController should allow an eviction when there is no PDB Q-Lee 1
71 DisruptionController should create a PodDisruptionBudget quinton-hoole 1
DisruptionController should not allow an eviction when too few pods lavalamp 1
72 DisruptionController should update PodDisruptionBudget status bgrant0607 1
73 Docker Containers should be able to override the image's default arguments (docker cmd) maisem 0
74 Docker Containers should be able to override the image's default command and arguments maisem 0
75 Docker Containers should be able to override the image's default commmand (docker entrypoint) maisem 0
76 Docker Containers should use the image defaults if command and args are blank vishh 0
Does nothing janetkuo 1
77 Downward API should create a pod that prints his name and namespace nhlfr 0
78 Downward API should provide container's limits.cpu/memory and requests.cpu/memory as env vars deads2k 1
79 Downward API should provide default limits.cpu/memory from node allocatable derekwaynecarr 0
Downward API should provide default limits.cpu/memory from node capacity mml 1
80 Downward API should provide pod IP as an env var nhlfr 0
81 Downward API should provide pod name and namespace as env vars nhlfr 0
82 Downward API volume should provide container's cpu limit smarterclayton 1
94 Dynamic provisioning DynamicProvisioner Alpha should create and delete alpha persistent volumes andyzheng0831 1
95 Dynamic provisioning DynamicProvisioner should create and delete persistent volumes jsafrane 0
96 DynamicKubeletConfiguration When a configmap called `kubelet-<node-name>` is added to the `kube-system` namespace The Kubelet on that node should restart to take up the new config mwielgus 1
97 ESIPP should handle updates to source ip annotation thockin 1
98 ESIPP should only target nodes with endpoints yifan-gu 1
99 ESIPP should work for type=LoadBalancer kargakis 1
100 ESIPP should work for type=NodePort yujuhong 1
101 ESIPP should work from pods dchen1107 1
102 Empty does nothing alex-mohr 1
103 EmptyDir volumes should support (non-root,0644,default) timstclair 1
104 EmptyDir volumes should support (non-root,0644,tmpfs) spxtr 1
105 EmptyDir volumes should support (non-root,0666,default) dchen1107 1
119 EmptyDir volumes when FSGroup is specified new files should be created with FSGroup ownership when container is root childsb 1
120 EmptyDir volumes when FSGroup is specified volume on default medium should have the correct mode using FSGroup eparis 1
121 EmptyDir volumes when FSGroup is specified volume on tmpfs should have the correct mode using FSGroup timothysc 1
EmptyDir wrapper volumes should becomes running justinsb 1
122 EmptyDir wrapper volumes should not cause race condition when used for configmaps bprashanth 1
123 EmptyDir wrapper volumes should not cause race condition when used for git_repo girishkalele 1
124 EmptyDir wrapper volumes should not conflict deads2k 1
131 Federated Services Service creation should create matching services in underlying clusters jbeda 1
132 Federated Services Service creation should succeed rmmh 1
133 Federated ingresses Federated Ingresses Ingress connectivity and DNS should be able to connect to a federated ingress via its load balancer rmmh 1
134 Federated ingresses Federated Ingresses Ingress connectivity and DNS should be able to discover a federated ingress service Federated ingresses Federated Ingresses should be created and deleted successfully bgrant0607 kevin-wangzefeng 1
Federated ingresses Federated Ingresses Ingress connectivity and DNS should be able to discover a federated ingress service via DNS smarterclayton 1
135 Federated ingresses Federated Ingresses should create and update matching ingresses in underlying clusters ghodss 1
Federated ingresses Ingress objects should be created and deleted successfully madhusudancs 1
136 Federation API server authentication should accept cluster resources when the client has right authentication credentials davidopp 1
137 Federation API server authentication should not accept cluster resources when the client has invalid authentication credentials yujuhong 1
138 Federation API server authentication should not accept cluster resources when the client has no authentication credentials nikhiljindal 1
139 Federation apiserver Admission control should not be able to create resources if namespace does not exist alex-mohr 1
140 Federation apiserver Cluster objects should be created and deleted successfully ghodss 1
141 Federation events Event objects should be created and deleted successfully karlkfi 1
Federation ingresses Ingress objects should be created and deleted successfully bprashanth 1
142 Federation namespace Namespace objects should be created and deleted successfully xiang90 1
143 Federation replicasets Federated ReplicaSet should create and update matching replicasets in underling clusters childsb 1
144 Federation replicasets ReplicaSet objects should be created and deleted successfully apelisse 1
145 Federation secrets Secret objects should be created and deleted successfully pmorie 1
GCE L7 LoadBalancer Controller should create GCE L7 loadbalancers and verify Ingress bprashanth 0
146 GKE local SSD should write and read from node local SSD fabioy 0
147 GKE node pools should create a cluster with multiple node pools fabioy 1
148 Garbage collector should delete pods created by rc when not orphaning justinsb 1
Garbage collector should handle the creation of 1000 pods xiang90 1
149 Garbage collector should orphan pods created by rc if delete options say so fabioy 1
150 Garbage collector should orphan pods created by rc if deleteOptions.OrphanDependents is nil zmerlynn 1
Generated release_1_2 clientset should create pods, delete pods, watch pods ncdc 1
Generated release_1_3 clientset should create pods, delete pods, watch pods krousey 1
151 Generated release_1_5 clientset should create pods, delete pods, watch pods ghodss 1
152 HA-master pods survive addition/removal roberthbailey 1
153 Hazelcast should create and scale hazelcast mikedanese 1
154 Horizontal pod autoscaling (scale resource: CPU) Deployment Should scale from 1 pod to 3 pods and from 3 to 5 jszczepkowski 0
Horizontal pod autoscaling (scale resource: CPU) Deployment Should scale from 1 pod to 3 pods and from 3 to 5 and verify decision stability jszczepkowski 0
155 Horizontal pod autoscaling (scale resource: CPU) Deployment Should scale from 5 pods to 3 pods and from 3 to 1 jszczepkowski 0
Horizontal pod autoscaling (scale resource: CPU) Deployment Should scale from 5 pods to 3 pods and from 3 to 1 and verify decision stability jszczepkowski 0
156 Horizontal pod autoscaling (scale resource: CPU) ReplicaSet Should scale from 1 pod to 3 pods and from 3 to 5 jszczepkowski 0
Horizontal pod autoscaling (scale resource: CPU) ReplicaSet Should scale from 1 pod to 3 pods and from 3 to 5 and verify decision stability jszczepkowski 0
157 Horizontal pod autoscaling (scale resource: CPU) ReplicaSet Should scale from 5 pods to 3 pods and from 3 to 1 jszczepkowski 0
158 Horizontal pod autoscaling (scale resource: CPU) ReplicaSet Should scale from 5 pods to 3 pods and from 3 to 1 and verify decision stability Horizontal pod autoscaling (scale resource: CPU) ReplicationController * jszczepkowski 0
Horizontal pod autoscaling (scale resource: CPU) ReplicationController Should scale from 1 pod to 3 pods and from 3 to 5 and verify decision stability jszczepkowski 0
Horizontal pod autoscaling (scale resource: CPU) ReplicationController Should scale from 5 pods to 3 pods and from 3 to 1 and verify decision stability jszczepkowski 0
159 Horizontal pod autoscaling (scale resource: CPU) ReplicationController light Should scale from 1 pod to 2 pods jszczepkowski 0
160 Horizontal pod autoscaling (scale resource: CPU) ReplicationController light Should scale from 2 pods to 1 pod jszczepkowski 0
Horizontal pod autoscaling (scale resource: CPU) ReplicationController light Should scale from 2 pods to 1 pod using HPA version v1 jszczepkowski 0
161 HostPath should give a volume the correct mode thockin 1
162 HostPath should support r/w luxas 1
163 HostPath should support subPath sttts 1
164 Image Container Conformance Test image conformance blackbox test when testing image that does not exist should ignore pull failures jsafrane 1
165 Image Container Conformance Test image conformance blackbox test when testing images that exist It should present successfully yujuhong 1
166 Image Container Conformance Test image conformance blackbox test when testing images that exist should list pulled images maisem 1
167 ImageID should be set to the manifest digest (from RepoDigests) when available mikedanese 1
168 InitContainer should invoke init containers on a RestartAlways pod saad-ali 1
169 InitContainer should invoke init containers on a RestartNever pod vulpecula 1
170 InitContainer should not start app containers and fail the pod if init containers fail on a RestartNever pod timothysc maisem 1 0
171 InitContainer should not start app containers if init containers fail on a RestartAlways pod davidopp maisem 1 0
172 Initial Resources should set initial resources based on historical data piosz 0
173 Job should delete a job soltysh 0
174 Job should fail a job soltysh 0
179 Job should scale a job down soltysh 0
180 Job should scale a job up soltysh 0
181 Kibana Logging Instances Is Alive should check that the Kibana logging instance is alive swagiaal 0
182 KubeProxy should test kube-proxy Kubectl alpha client Kubectl run ScheduledJob should create a ScheduledJob vulpecula yujuhong 1
183 Kubectl client Guestbook application should create and stop a working application pwittrock 0
184 Kubectl client Kubectl api-versions should check if v1 is in available api versions pwittrock 0
185 Kubectl client Kubectl apply should apply a new configuration to an existing RC pwittrock 0
195 Kubectl client Kubectl patch should add annotations for pods in rc janetkuo 0
196 Kubectl client Kubectl replace should update a single-container pod's image karlkfi 1
197 Kubectl client Kubectl rolling-update should support rolling-update to same image janetkuo 0
Kubectl client Kubectl run --quiet job should not output anything except the command when --quiet is set soltysh 1
198 Kubectl client Kubectl run --rm job should create a job from an image, then delete the job soltysh 0
Kubectl client Kubectl run ScheduledJob should create a ScheduledJob soltysh 0
199 Kubectl client Kubectl run default should create an rc or deployment from an image janetkuo 0
200 Kubectl client Kubectl run deployment should create a deployment from an image janetkuo 0
Kubectl client Kubectl run job should create a job from an image when restart is Never soltysh 0
201 Kubectl client Kubectl run job should create a job from an image when restart is OnFailure soltysh 0
202 Kubectl client Kubectl run pod should create a pod from an image when restart is Never janetkuo 0
203 Kubectl client Kubectl run rc should create an rc from an image janetkuo 0
220 Kubelet Container Manager Validate OOM score adjustments once the node is setup docker daemon's oom-score-adj should be -999 thockin 1
221 Kubelet Container Manager Validate OOM score adjustments once the node is setup guaranteed container's oom-score-adj should be -998 kargakis 1
222 Kubelet Container Manager Validate OOM score adjustments once the node is setup pod infra containers oom-score-adj should be -998 and best effort container's should be 1000 timothysc 1
Kubelet Container Manager oom score adjusting when scheduling a busybox command that always fails in a pod should be possible to delete jbeda 1
Kubelet Container Manager oom score adjusting when scheduling a busybox command that always fails in a pod should have an error terminated reason vulpecula 1
223 Kubelet Eviction Manager hard eviction test pod using the most disk space gets evicted when the node disk usage is above the eviction hard threshold should evict the pod using the most disk space karlkfi 1
224 Kubelet experimental resource usage tracking for 100 pods per node over 20m0s Kubelet experimental resource usage tracking resource tracking for * pods per node yujuhong 0
225 Kubelet experimental resource usage tracking resource tracking for 100 pods per node Kubelet regular resource usage tracking resource tracking for * pods per node ghodss yujuhong 0
Kubelet metrics api when querying /stats/summary it should report resource usage through the stats api fabioy 1
Kubelet regular resource usage tracking for 0 pods per node over 20m0s yujuhong 0
Kubelet regular resource usage tracking for 100 pods per node over 20m0s yujuhong 0
Kubelet regular resource usage tracking for 35 pods per node over 20m0s yujuhong 0
Kubelet regular resource usage tracking resource tracking for 0 pods per node pmorie 1
Kubelet regular resource usage tracking resource tracking for 100 pods per node justinsb 1
Kubelet regular resource usage tracking resource tracking for 35 pods per node madhusudancs 1
226 Kubelet when scheduling a busybox command in a pod it should print the output to logs ixdy 1
227 Kubelet when scheduling a busybox command that always fails in a pod should be possible to delete smarterclayton 1
228 Kubelet when scheduling a busybox command that always fails in a pod should have an error terminated reason deads2k 1
231 Kubernetes Dashboard should check that the kubernetes-dashboard instance is alive wonderfly 0
232 LimitRange should create a LimitRange with defaults and ensure pod has those defaults applied. cjcullen 1
233 Liveness liveness pods should be automatically restarted andyzheng0831 1
234 Load capacity should be able to handle 3 pods per node Load capacity should be able to handle * pods per node gmarek 0
235 Load capacity should be able to handle 30 pods per node Loadbalancing: L7 GCE shoud create ingress with given static-ip wojtek-t bprashanth 0
236 Loadbalancing: L7 GCE shoud create ingress with given static-ip Loadbalancing: L7 GCE should conform to Ingress spec vulpecula bprashanth 1 0
237 Loadbalancing: L7 GCE should conform to Ingress spec Loadbalancing: L7 Nginx should conform to Ingress spec andyzheng0831 bprashanth 1 0
238 Loadbalancing: L7 Nginx should conform to Ingress spec Logging soak should survive logging 1KB every * seconds, for a duration of *, scaling up to * pods per node ncdc justinsb 1
239 MemoryEviction when there is memory pressure should evict pods in the correct order (besteffort first, then burstable, then guaranteed) ixdy 1
240 Mesos applies slave attributes as labels justinsb 1
241 Mesos schedules pods annotated with roles on correct slaves timstclair 1
254 Namespaces should delete fast enough (90 percent of 100 namespaces in 150 seconds) kevin-wangzefeng 1
255 Namespaces should ensure that all pods are removed when a namespace is deleted. xiang90 1
256 Namespaces should ensure that all services are removed when a namespace is deleted. pmorie 1
257 Networking Granular Checks should function for pod communication between nodes Networking Granular Checks: Pods should function for intra-pod communication: http freehan stts 0
258 Networking Granular Checks should function for pod communication on a single node Networking Granular Checks: Pods should function for intra-pod communication: udp freehan 0
Networking Granular Checks: Pods should function for intra-pod communication: http spxtr 1
Networking Granular Checks: Pods should function for intra-pod communication: udp alex-mohr 1
259 Networking Granular Checks: Pods should function for node-pod communication: http spxtr 1
260 Networking Granular Checks: Pods should function for node-pod communication: udp wojtek-t 1
261 Networking Granular Checks: Services should function for endpoint-Service: http bgrant0607 1
268 Networking Granular Checks: Services should update endpoints: udp freehan 1
269 Networking Granular Checks: Services should update nodePort: http nikhiljindal 1
270 Networking Granular Checks: Services should update nodePort: udp smarterclayton 1
271 Networking IPerf should transfer ~ 1GB onto the service endpoint 1 servers (maximum of 1 clients) Networking IPerf should transfer ~ 1GB onto the service endpoint * servers (maximum of * clients) ghodss fabioy 1
272 Networking should check kube-proxy urls lavalamp 1
Networking should function for intra-pod communication sttts 0
273 Networking should provide Internet connection for containers sttts 0
274 Networking should provide unchanging, static URL paths for kubernetes api services freehan 0
275 NodeOutOfDisk runs out of disk space vishh 0
276 NodeProblemDetector KernelMonitor should generate node condition and events for corresponding errors Random-Liu 0
277 Nodes Network when a node becomes unreachable All pods on the unreachable node should be marked as NotReady upon the node turn NotReady AND all pods should be mark back to Ready when the node get back to Ready before pod eviction timeout Nodes Network when a node becomes unreachable * freehan alex-mohr 0 1
Nodes Network when a node becomes unreachable recreates pods scheduled on the unreachable node AND allows scheduling of pods on a node after it rejoins the cluster madhusudancs 1
278 Nodes Resize should be able to add nodes piosz 1
279 Nodes Resize should be able to delete nodes zmerlynn 1
PersistentVolumes NFS volume can be created, bound, retrieved, unbound, and used by a pod jsafrane 0
280 PersistentVolumes create a PV and a pre-bound PVC: test write access caesarxuchao 1
281 PersistentVolumes create a PVC and a pre-bound PV: test write access lavalamp 1
282 PersistentVolumes create a PVC and non-pre-bound PV: test write access mwielgus 1
PersistentVolumes should create a PersistentVolume, Claim, and a client Pod that will test the read/write access of the volume ixdy 1
283 PersistentVolumes should create a non-pre-bound PV and PVC: test write access andyzheng0831 1
284 Pet Store should scale to persist a nominal number ( 50 ) of transactions in 1m0s seconds Pet Store should scale to persist a nominal number ( * ) of transactions in * seconds timstclair xiang90 1
285 Pet set recreate should recreate evicted petset hurf 1
286 PetSet Basic PetSet functionality should handle healthy pet restarts during scale kevin-wangzefeng 1
287 PetSet Basic PetSet functionality should provide basic identity girishkalele 1
288 PetSet Deploy clustered applications should creating a working mysql cluster piosz 1
289 PetSet Deploy clustered applications should creating a working redis cluster mtaufen 1
290 PetSet Deploy clustered applications should creating a working zookeeper cluster rmmh 1
291 Pod Disks Should schedule a pod w/ a RW PD, gracefully remove it, then schedule it on another host alex-mohr saad-ali 1 0
292 Pod Disks Should schedule a pod w/ a readonly PD on two hosts, then remove both gracefully. ghodss saad-ali 1 0
293 Pod Disks should schedule a pod w/ a RW PD shared between multiple containers, write to PD, delete pod, verify contents, and repeat in rapid succession saad-ali 0
Pod Disks should schedule a pod w/ a RW PD, remove it, then schedule it on another host saad-ali 0
294 Pod Disks should schedule a pod w/ a RW PD, ungracefully remove it, then schedule it on another host mml 1
295 Pod Disks should schedule a pod w/ a readonly PD on two hosts, then remove both ungracefully. saad-ali 1
Pod Disks should schedule a pod w/ a readonly PD on two hosts, then remove both. saad-ali 0
296 Pod Disks should schedule a pod w/two RW PDs both mounted to one container, write to PD, verify contents, delete pod, recreate pod, verify contents, and repeat in rapid succession saad-ali 0
297 Pod garbage collector should handle the creation of 1000 pods wojtek-t 1
Pods should *not* be restarted with a /healthz http liveness probe cjcullen 1
Pods should *not* be restarted with a docker exec "cat /tmp/health" liveness probe vishh 0
298 Pods should allow activeDeadlineSeconds to be updated derekwaynecarr 0
Pods should be restarted with a /healthz http liveness probe girishkalele 1
Pods should be restarted with a docker exec "cat /tmp/health" liveness probe bgrant0607 1
Pods should be schedule with cpu and memory limits vishh 0
299 Pods should be submitted and removed davidopp 1
300 Pods should be updated derekwaynecarr 1
301 Pods should cap back-off at MaxContainerBackOff maisem 1
302 Pods should contain environment variables for services jlowdermilk 1
303 Pods should get a host IP xiang90 1
Pods should have monotonically increasing restart count apelisse 1
304 Pods should have their auto-restart back-off timer reset on image update mikedanese 1
Pods should invoke init containers on a RestartAlways pod cjcullen 1
Pods should invoke init containers on a RestartNever pod justinsb 1
Pods should not start app containers and fail the pod if init containers fail on a RestartNever pod maisem 0
Pods should not start app containers if init containers fail on a RestartAlways pod dchen1107 1
305 Pods should support remote command execution over websockets madhusudancs 1
306 Pods should support retrieving logs from the container over websockets vishh 0
307 Port forwarding With a server that expects a client request should support a client that connects, sends data, and disconnects sttts 0
316 Probing container should have monotonically increasing restart count Random-Liu 0
317 Probing container with readiness probe should not be ready before initial delay and never restart Random-Liu 0
318 Probing container with readiness probe that fails should never be ready and never restart Random-Liu 0
319 Proxy version v1 should proxy logs on node Proxy * should proxy logs on node girishkalele karlkfi 1
320 Proxy version v1 should proxy logs on node using proxy subresource Proxy * should proxy logs on node using proxy subresource karlkfi hurf 1
321 Proxy version v1 should proxy logs on node with explicit kubelet port Proxy * should proxy logs on node with explicit kubelet port mwielgus ixdy 1
322 Proxy version v1 should proxy logs on node with explicit kubelet port using proxy subresource Proxy * should proxy logs on node with explicit kubelet port using proxy subresource bgrant0607 dchen1107 1
323 Proxy version v1 should proxy through a service and a pod Proxy * should proxy through a service and a pod mml karlkfi 1
324 Proxy version v1 should proxy to cadvisor Proxy * should proxy to cadvisor rmmh jszczepkowski 1
325 Proxy version v1 should proxy to cadvisor using proxy subresource Proxy * should proxy to cadvisor using proxy subresource deads2k roberthbailey 1
326 Reboot each node by dropping all inbound packets for a while and ensure they function afterwards quinton-hoole 0
327 Reboot each node by dropping all outbound packets for a while and ensure they function afterwards quinton-hoole 0
328 Reboot each node by ordering clean reboot and ensure they function upon restart quinton-hoole 0
335 ReplicationController should serve a basic image on each replica with a private image jbeda 1
336 ReplicationController should serve a basic image on each replica with a public image krousey 1
337 Rescheduler should ensure that critical pod is scheduled in case there is no resources available mtaufen 1
338 Resource-usage regular resource usage tracking resource tracking for 10 pods per node Resource-usage regular resource usage tracking resource tracking for * pods per node janetkuo 1
Resource-usage regular resource usage tracking resource tracking for 105 pods per node jszczepkowski 1
Resource-usage regular resource usage tracking resource tracking for 35 pods per node jdef 1
339 ResourceQuota should create a ResourceQuota and capture the life of a configMap. timstclair 1
340 ResourceQuota should create a ResourceQuota and capture the life of a persistent volume claim. bgrant0607 1
341 ResourceQuota should create a ResourceQuota and capture the life of a pod. pmorie 1
373 Secret should create a pod that reads a secret luxas 1
374 Secrets should be consumable from pods in env vars mml 1
375 Secrets should be consumable from pods in volume ghodss 1
Secrets should be consumable from pods in volume with Mode set in the item madhusudancs 1
376 Secrets should be consumable from pods in volume with defaultMode set derekwaynecarr 1
377 Secrets should be consumable from pods in volume with mappings freehan 1
378 Secrets should be consumable from pods in volume with mappings and Item Mode set kevin-wangzefeng 1
379 Secrets should be consumable in multiple volumes in a pod girishkalele 1
380 Security Context should support container.SecurityContext.RunAsUser alex-mohr 1
381 Security Context should support pod.Spec.SecurityContext.RunAsUser bgrant0607 1
393 ServiceLoadBalancer should support simple GET on Ingress ips bprashanth 0
394 Services should be able to change the type and ports of a service bprashanth 0
395 Services should be able to create a functioning NodePort service bprashanth 0
Services should be able to create services of type LoadBalancer and externalTraffic=localOnly jlowdermilk 1
396 Services should be able to up and down services bprashanth 0
397 Services should check NodePort out-of-range bprashanth 0
398 Services should create endpoints for unready pods maisem 0
405 Services should use same NodePort with same port but different protocols timothysc 1
406 Services should work after restarting apiserver bprashanth 0
407 Services should work after restarting kube-proxy bprashanth 0
408 SimpleMount should be able to mount an emptydir on a container jdef 1
409 Spark should start spark master, driver and workers girishkalele 1
410 Staging client repo client should create pods, delete pods, watch pods jbeda 1
411 Storm should create and stop Zookeeper, Nimbus and Storm worker servers mtaufen 1
412 Summary API when querying /stats/summary should report resource usage through the stats api apelisse 1
413 Sysctls should not launch unsafe, but not explicitly enabled sysctls on the node madhusudancs 1
414 Sysctls should reject invalid sysctls davidopp 1
415 Sysctls should support sysctls Random-Liu 1
416 Sysctls should support unsafe sysctls which are actually whitelisted deads2k 1
417 ThirdParty resources Simple Third Party creating/deleting thirdparty objects works luxas 1
Ubernetes Lite should spread the pods of a replication controller across zones quinton-hoole 0
Ubernetes Lite should spread the pods of a service across zones quinton-hoole 0
418 Upgrade cluster upgrade should maintain a functioning cluster girishkalele 1
419 Upgrade cluster upgrade should maintain responsive services mikedanese 1
420 Upgrade master upgrade should maintain responsive services mikedanese 1
438 Volumes NFS should be mountable andyzheng0831 1
439 Volumes PD should be mountable caesarxuchao 1
440 Volumes iSCSI should be mountable jsafrane 1
download_gcloud hurf 1
hostPath should give a volume the correct mode roberthbailey 1
hostPath should support r/w roberthbailey 1
hostPath should support subPath krousey 1
install_gcloud roberthbailey 1
k8s.io/kubernetes/cluster/addons/dns/kube2sky zmerlynn 1
441 k8s.io/kubernetes/cmd/genutils rmmh 1
442 k8s.io/kubernetes/cmd/hyperkube jbeda 0
443 k8s.io/kubernetes/cmd/kube-apiserver/app nikhiljindal 0
444 k8s.io/kubernetes/cmd/kube-apiserver/app/options nikhiljindal 0
445 k8s.io/kubernetes/cmd/kube-discovery/app vulpecula 1
446 k8s.io/kubernetes/cmd/kube-proxy/app luxas 1
447 k8s.io/kubernetes/cmd/kubeadm/app/images davidopp 1
448 k8s.io/kubernetes/cmd/kubeadm/app/util krousey 1
449 k8s.io/kubernetes/cmd/kubelet/app hurf 1
k8s.io/kubernetes/cmd/kubernetes-discovery/discoverysummarizer thockin 1
k8s.io/kubernetes/cmd/libs/go2idl/client-gen/testoutput/clientset_generated/test_internalclientset bgrant0607 1
450 k8s.io/kubernetes/cmd/libs/go2idl/client-gen/testoutput/clientset_generated/test_internalclientset/typed/testgroup.k8s.io/unversioned eparis 1
k8s.io/kubernetes/cmd/libs/go2idl/client-gen/testoutput/testgroup/unversioned wojtek-t 1
k8s.io/kubernetes/cmd/libs/go2idl/deepcopy-gen/generators mwielgus 1
k8s.io/kubernetes/cmd/libs/go2idl/generator ixdy 1
k8s.io/kubernetes/cmd/libs/go2idl/import-boss/generators kevin-wangzefeng 1
k8s.io/kubernetes/cmd/libs/go2idl/namer luxas 1
451 k8s.io/kubernetes/cmd/libs/go2idl/openapi-gen/generators davidopp 1
k8s.io/kubernetes/cmd/libs/go2idl/parser lavalamp 1
k8s.io/kubernetes/cmd/libs/go2idl/types mikedanese 1
452 k8s.io/kubernetes/cmd/mungedocs mwielgus 1
453 k8s.io/kubernetes/examples Random-Liu 0
k8s.io/kubernetes/examples/apiserver nikhiljindal 0
454 k8s.io/kubernetes/federation/apis/federation/install nikhiljindal 0
455 k8s.io/kubernetes/federation/apis/federation/validation nikhiljindal 0
k8s.io/kubernetes/federation/cmd/federation-apiserver/app dchen1107 1
456 k8s.io/kubernetes/federation/pkg/dnsprovider sttts 1
457 k8s.io/kubernetes/federation/pkg/dnsprovider/providers/aws/route53 cjcullen 1
458 k8s.io/kubernetes/federation/pkg/dnsprovider/providers/google/clouddns jsafrane 1
459 k8s.io/kubernetes/federation/pkg/federation-controller/cluster nikhiljindal 0
460 k8s.io/kubernetes/federation/pkg/federation-controller/daemonset madhusudancs 1
461 k8s.io/kubernetes/federation/pkg/federation-controller/deployment childsb 1
462 k8s.io/kubernetes/federation/pkg/federation-controller/ingress vishh 1
463 k8s.io/kubernetes/federation/pkg/federation-controller/namespace hurf 1
464 k8s.io/kubernetes/federation/pkg/federation-controller/replicaset roberthbailey 1
k8s.io/kubernetes/federation/pkg/federation-controller/replicaset/planner dchen1107 1
465 k8s.io/kubernetes/federation/pkg/federation-controller/secret apelisse 1
466 k8s.io/kubernetes/federation/pkg/federation-controller/service pmorie 1
467 k8s.io/kubernetes/federation/pkg/federation-controller/util bgrant0607 1
468 k8s.io/kubernetes/federation/pkg/federation-controller/util/eventsink luxas 1
469 k8s.io/kubernetes/federation/pkg/federation-controller/util/planner jszczepkowski 1
470 k8s.io/kubernetes/federation/pkg/federation-controller/util/podanalyzer yujuhong 1
471 k8s.io/kubernetes/federation/registry/cluster nikhiljindal 0
472 k8s.io/kubernetes/federation/registry/cluster/etcd nikhiljindal 0
473 k8s.io/kubernetes/hack/cmd/teststale thockin 1
475 k8s.io/kubernetes/pkg/api Q-Lee 1
476 k8s.io/kubernetes/pkg/api/endpoints cjcullen 1
477 k8s.io/kubernetes/pkg/api/errors yifan-gu 1
478 k8s.io/kubernetes/pkg/api/events caesarxuchao 1
479 k8s.io/kubernetes/pkg/api/install timothysc 1
480 k8s.io/kubernetes/pkg/api/meta fabioy 1
481 k8s.io/kubernetes/pkg/api/pod piosz 1
498 k8s.io/kubernetes/pkg/apis/batch/v1 vishh 1
499 k8s.io/kubernetes/pkg/apis/batch/v2alpha1 jlowdermilk 1
500 k8s.io/kubernetes/pkg/apis/batch/validation erictune 0
k8s.io/kubernetes/pkg/apis/certificates/install zmerlynn 1
501 k8s.io/kubernetes/pkg/apis/componentconfig jbeda 1
k8s.io/kubernetes/pkg/apis/componentconfig/install pmorie 1
502 k8s.io/kubernetes/pkg/apis/extensions bgrant0607 1
k8s.io/kubernetes/pkg/apis/extensions/install thockin 1
503 k8s.io/kubernetes/pkg/apis/extensions/v1beta1 madhusudancs 1
504 k8s.io/kubernetes/pkg/apis/extensions/validation nikhiljindal 1
505 k8s.io/kubernetes/pkg/apis/policy/validation deads2k 1
k8s.io/kubernetes/pkg/apis/rbac/install ixdy 1
506 k8s.io/kubernetes/pkg/apis/rbac/validation erictune 0
507 k8s.io/kubernetes/pkg/apis/storage/validation caesarxuchao 1
508 k8s.io/kubernetes/pkg/apiserver nikhiljindal 0
509 k8s.io/kubernetes/pkg/apiserver/audit k8s.io/kubernetes/pkg/apiserver/filters caesarxuchao yifan-gu 1
510 k8s.io/kubernetes/pkg/apiserver/request luxas 1
511 k8s.io/kubernetes/pkg/auth/authenticator/bearertoken liggitt 0
512 k8s.io/kubernetes/pkg/auth/authorizer/abac liggitt 0
513 k8s.io/kubernetes/pkg/auth/authorizer/union liggitt 0
514 k8s.io/kubernetes/pkg/auth/group cjcullen 1
515 k8s.io/kubernetes/pkg/auth/handlers liggitt 0
516 k8s.io/kubernetes/pkg/client/cache xiang90 1
517 k8s.io/kubernetes/pkg/client/chaosclient deads2k 1
518 k8s.io/kubernetes/pkg/client/leaderelection xiang90 1
519 k8s.io/kubernetes/pkg/client/record karlkfi 1
520 k8s.io/kubernetes/pkg/client/restclient kargakis 1
521 k8s.io/kubernetes/pkg/client/retry jsafrane 1
522 k8s.io/kubernetes/pkg/client/testing/cache mikedanese 1
523 k8s.io/kubernetes/pkg/client/testing/core maisem 1
524 k8s.io/kubernetes/pkg/client/transport eparis 1
546 k8s.io/kubernetes/pkg/controller/deployment/util saad-ali 1
547 k8s.io/kubernetes/pkg/controller/disruption fabioy 1
548 k8s.io/kubernetes/pkg/controller/endpoint mwielgus 1
k8s.io/kubernetes/pkg/controller/framework karlkfi 1
549 k8s.io/kubernetes/pkg/controller/garbagecollector rmmh 1
550 k8s.io/kubernetes/pkg/controller/garbagecollector/metaonly cjcullen 1
k8s.io/kubernetes/pkg/controller/gc jdef 1
551 k8s.io/kubernetes/pkg/controller/job soltysh 0
552 k8s.io/kubernetes/pkg/controller/namespace karlkfi 1
553 k8s.io/kubernetes/pkg/controller/node gmarek 0
k8s.io/kubernetes/pkg/controller/persistentvolume jsafrane 0
554 k8s.io/kubernetes/pkg/controller/petset fgrzadkowski 1
555 k8s.io/kubernetes/pkg/controller/podautoscaler piosz 0
556 k8s.io/kubernetes/pkg/controller/podautoscaler/metrics piosz 0
565 k8s.io/kubernetes/pkg/controller/volume/attachdetach luxas 1
566 k8s.io/kubernetes/pkg/controller/volume/attachdetach/cache hurf 1
567 k8s.io/kubernetes/pkg/controller/volume/attachdetach/reconciler jsafrane 1
568 k8s.io/kubernetes/pkg/controller/volume/persistentvolume apelisse jsafrane 1 0
569 k8s.io/kubernetes/pkg/conversion ixdy 1
570 k8s.io/kubernetes/pkg/conversion/queryparams caesarxuchao 1
571 k8s.io/kubernetes/pkg/credentialprovider justinsb 1
576 k8s.io/kubernetes/pkg/fields jsafrane 1
577 k8s.io/kubernetes/pkg/genericapiserver nikhiljindal 0
578 k8s.io/kubernetes/pkg/genericapiserver/authorizer lavalamp 1
579 k8s.io/kubernetes/pkg/genericapiserver/filters dchen1107 1
580 k8s.io/kubernetes/pkg/genericapiserver/mux zmerlynn 1
581 k8s.io/kubernetes/pkg/genericapiserver/openapi davidopp 1
582 k8s.io/kubernetes/pkg/genericapiserver/routes girishkalele 1
583 k8s.io/kubernetes/pkg/healthz thockin 1
584 k8s.io/kubernetes/pkg/httplog mtaufen 1
585 k8s.io/kubernetes/pkg/kubectl madhusudancs 1
586 k8s.io/kubernetes/pkg/kubectl/cmd rmmh 1
587 k8s.io/kubernetes/pkg/kubectl/cmd/config asalkeld 0
588 k8s.io/kubernetes/pkg/kubectl/cmd/set brendandburns 1
589 k8s.io/kubernetes/pkg/kubectl/cmd/util asalkeld 0
590 k8s.io/kubernetes/pkg/kubectl/cmd/util/editor jdef 1
591 k8s.io/kubernetes/pkg/kubectl/resource caesarxuchao 1
614 k8s.io/kubernetes/pkg/kubelet/prober alex-mohr 1
615 k8s.io/kubernetes/pkg/kubelet/prober/results krousey 1
616 k8s.io/kubernetes/pkg/kubelet/qos vishh 0
k8s.io/kubernetes/pkg/kubelet/qos/util vishh 0
617 k8s.io/kubernetes/pkg/kubelet/rkt apelisse 1
618 k8s.io/kubernetes/pkg/kubelet/rktshim mml 1
619 k8s.io/kubernetes/pkg/kubelet/server timstclair 0
638 k8s.io/kubernetes/pkg/proxy/userspace luxas 1
639 k8s.io/kubernetes/pkg/quota sttts 1
640 k8s.io/kubernetes/pkg/quota/evaluator/core yifan-gu 1
641 k8s.io/kubernetes/pkg/registry/certificates k8s.io/kubernetes/pkg/registry/apps/petset ncdc madhusudancs 1
642 k8s.io/kubernetes/pkg/registry/componentstatus k8s.io/kubernetes/pkg/registry/apps/petset/etcd yifan-gu wojtek-t 1
643 k8s.io/kubernetes/pkg/registry/configmap k8s.io/kubernetes/pkg/registry/autoscaling/horizontalpodautoscaler timothysc jszczepkowski 1 0
644 k8s.io/kubernetes/pkg/registry/configmap/etcd k8s.io/kubernetes/pkg/registry/autoscaling/horizontalpodautoscaler/etcd janetkuo jszczepkowski 1 0
645 k8s.io/kubernetes/pkg/registry/controller k8s.io/kubernetes/pkg/registry/batch/job jdef soltysh 1 0
646 k8s.io/kubernetes/pkg/registry/controller/etcd k8s.io/kubernetes/pkg/registry/batch/job/etcd spxtr soltysh 1 0
647 k8s.io/kubernetes/pkg/registry/daemonset k8s.io/kubernetes/pkg/registry/batch/scheduledjob ghodss soltysh 1 0
648 k8s.io/kubernetes/pkg/registry/daemonset/etcd k8s.io/kubernetes/pkg/registry/batch/scheduledjob/etcd pmorie soltysh 1 0
649 k8s.io/kubernetes/pkg/registry/deployment k8s.io/kubernetes/pkg/registry/certificates/certificates timothysc cjcullen 1
650 k8s.io/kubernetes/pkg/registry/deployment/etcd k8s.io/kubernetes/pkg/registry/core/componentstatus fgrzadkowski jsafrane 1
651 k8s.io/kubernetes/pkg/registry/endpoint k8s.io/kubernetes/pkg/registry/core/configmap smarterclayton bgrant0607 1
652 k8s.io/kubernetes/pkg/registry/endpoint/etcd k8s.io/kubernetes/pkg/registry/core/configmap/etcd timstclair ghodss 1
653 k8s.io/kubernetes/pkg/registry/event k8s.io/kubernetes/pkg/registry/core/controller girishkalele luxas 1
654 k8s.io/kubernetes/pkg/registry/event/etcd k8s.io/kubernetes/pkg/registry/core/controller/etcd karlkfi mwielgus 1
655 k8s.io/kubernetes/pkg/registry/experimental/controller/etcd k8s.io/kubernetes/pkg/registry/core/endpoint smarterclayton yujuhong 1
656 k8s.io/kubernetes/pkg/registry/generic k8s.io/kubernetes/pkg/registry/core/endpoint/etcd eparis bgrant0607 1
657 k8s.io/kubernetes/pkg/registry/generic/etcd k8s.io/kubernetes/pkg/registry/core/event fabioy thockin 1
658 k8s.io/kubernetes/pkg/registry/core/event/etcd krousey 1
659 k8s.io/kubernetes/pkg/registry/core/limitrange bgrant0607 1
660 k8s.io/kubernetes/pkg/registry/core/limitrange/etcd mikedanese 1
661 k8s.io/kubernetes/pkg/registry/core/namespace pmorie 1
662 k8s.io/kubernetes/pkg/registry/core/namespace/etcd yifan-gu 1
663 k8s.io/kubernetes/pkg/registry/core/node karlkfi 1
664 k8s.io/kubernetes/pkg/registry/core/node/etcd saad-ali 1
665 k8s.io/kubernetes/pkg/registry/core/persistentvolume lavalamp 1
666 k8s.io/kubernetes/pkg/registry/core/persistentvolume/etcd hurf 1
667 k8s.io/kubernetes/pkg/registry/core/persistentvolumeclaim wojtek-t 1
668 k8s.io/kubernetes/pkg/registry/core/persistentvolumeclaim/etcd maisem 1
669 k8s.io/kubernetes/pkg/registry/core/pod spxtr 1
670 k8s.io/kubernetes/pkg/registry/core/pod/etcd hurf 1
671 k8s.io/kubernetes/pkg/registry/core/pod/rest mml 1
672 k8s.io/kubernetes/pkg/registry/core/podtemplate eparis 1
673 k8s.io/kubernetes/pkg/registry/core/podtemplate/etcd yujuhong 1
674 k8s.io/kubernetes/pkg/registry/core/resourcequota ixdy 1
675 k8s.io/kubernetes/pkg/registry/core/resourcequota/etcd gmarek 1
676 k8s.io/kubernetes/pkg/registry/core/secret smarterclayton 1
677 k8s.io/kubernetes/pkg/registry/core/secret/etcd thockin 1
678 k8s.io/kubernetes/pkg/registry/core/service madhusudancs 1
679 k8s.io/kubernetes/pkg/registry/core/service/allocator justinsb 1
680 k8s.io/kubernetes/pkg/registry/core/service/allocator/etcd childsb 1
681 k8s.io/kubernetes/pkg/registry/core/service/etcd zmerlynn 1
682 k8s.io/kubernetes/pkg/registry/core/service/ipallocator yifan-gu 1
683 k8s.io/kubernetes/pkg/registry/core/service/ipallocator/controller justinsb 1
684 k8s.io/kubernetes/pkg/registry/core/service/ipallocator/etcd dchen1107 1
685 k8s.io/kubernetes/pkg/registry/core/service/portallocator pmorie 1
686 k8s.io/kubernetes/pkg/registry/core/serviceaccount liggitt 0
687 k8s.io/kubernetes/pkg/registry/core/serviceaccount/etcd liggitt 0
688 k8s.io/kubernetes/pkg/registry/extensions/controller/etcd madhusudancs 1
689 k8s.io/kubernetes/pkg/registry/extensions/daemonset andyzheng0831 1
690 k8s.io/kubernetes/pkg/registry/extensions/daemonset/etcd roberthbailey 1
691 k8s.io/kubernetes/pkg/registry/extensions/deployment jszczepkowski 1
692 k8s.io/kubernetes/pkg/registry/extensions/deployment/etcd vulpecula 1
693 k8s.io/kubernetes/pkg/registry/extensions/ingress girishkalele 1
694 k8s.io/kubernetes/pkg/registry/extensions/ingress/etcd wojtek-t 1
695 k8s.io/kubernetes/pkg/registry/extensions/networkpolicy bgrant0607 1
696 k8s.io/kubernetes/pkg/registry/extensions/networkpolicy/etcd brendandburns 1
697 k8s.io/kubernetes/pkg/registry/extensions/podsecuritypolicy/etcd justinsb 1
698 k8s.io/kubernetes/pkg/registry/extensions/replicaset jdef 1
699 k8s.io/kubernetes/pkg/registry/extensions/replicaset/etcd deads2k 1
700 k8s.io/kubernetes/pkg/registry/extensions/rest spxtr 1
701 k8s.io/kubernetes/pkg/registry/extensions/thirdpartyresource lavalamp 1
702 k8s.io/kubernetes/pkg/registry/extensions/thirdpartyresource/etcd Q-Lee 1
703 k8s.io/kubernetes/pkg/registry/extensions/thirdpartyresourcedata pmorie 1
704 k8s.io/kubernetes/pkg/registry/extensions/thirdpartyresourcedata/etcd davidopp 1
705 k8s.io/kubernetes/pkg/registry/generic/registry jsafrane 1
706 k8s.io/kubernetes/pkg/registry/generic/rest smarterclayton 1
707 k8s.io/kubernetes/pkg/registry/horizontalpodautoscaler k8s.io/kubernetes/pkg/registry/policy/poddisruptionbudget jszczepkowski dchen1107 0 1
708 k8s.io/kubernetes/pkg/registry/horizontalpodautoscaler/etcd k8s.io/kubernetes/pkg/registry/policy/poddisruptionbudget/etcd jszczepkowski kevin-wangzefeng 0 1
709 k8s.io/kubernetes/pkg/registry/ingress k8s.io/kubernetes/pkg/registry/storage/storageclass jsafrane jdef 1
710 k8s.io/kubernetes/pkg/registry/ingress/etcd k8s.io/kubernetes/pkg/registry/storage/storageclass/etcd vulpecula 1
k8s.io/kubernetes/pkg/registry/job soltysh 0
k8s.io/kubernetes/pkg/registry/job/etcd soltysh 0
k8s.io/kubernetes/pkg/registry/limitrange apelisse 1
k8s.io/kubernetes/pkg/registry/limitrange/etcd zmerlynn 1
k8s.io/kubernetes/pkg/registry/namespace roberthbailey 1
k8s.io/kubernetes/pkg/registry/namespace/etcd justinsb 1
k8s.io/kubernetes/pkg/registry/networkpolicy xiang90 1
k8s.io/kubernetes/pkg/registry/networkpolicy/etcd xiang90 1
k8s.io/kubernetes/pkg/registry/node krousey 1
k8s.io/kubernetes/pkg/registry/node/etcd kevin-wangzefeng 1
k8s.io/kubernetes/pkg/registry/persistentvolume kevin-wangzefeng 1
k8s.io/kubernetes/pkg/registry/persistentvolume/etcd andyzheng0831 1
k8s.io/kubernetes/pkg/registry/persistentvolumeclaim bgrant0607 1
k8s.io/kubernetes/pkg/registry/persistentvolumeclaim/etcd deads2k 1
k8s.io/kubernetes/pkg/registry/petset rmmh 1
k8s.io/kubernetes/pkg/registry/petset/etcd dchen1107 1
k8s.io/kubernetes/pkg/registry/pod alex-mohr 1
k8s.io/kubernetes/pkg/registry/pod/etcd justinsb 1
k8s.io/kubernetes/pkg/registry/pod/rest childsb 1
k8s.io/kubernetes/pkg/registry/poddisruptionbudget jsafrane 1
k8s.io/kubernetes/pkg/registry/poddisruptionbudget/etcd alex-mohr 1
k8s.io/kubernetes/pkg/registry/podsecuritypolicy/etcd yifan-gu 1
k8s.io/kubernetes/pkg/registry/podtemplate jsafrane 1
k8s.io/kubernetes/pkg/registry/podtemplate/etcd mtaufen 1
k8s.io/kubernetes/pkg/registry/replicaset mwielgus 1
k8s.io/kubernetes/pkg/registry/replicaset/etcd deads2k 1
k8s.io/kubernetes/pkg/registry/resourcequota maisem 1
k8s.io/kubernetes/pkg/registry/resourcequota/etcd cjcullen 1
k8s.io/kubernetes/pkg/registry/scheduledjob soltysh 0
k8s.io/kubernetes/pkg/registry/scheduledjob/etcd soltysh 1
k8s.io/kubernetes/pkg/registry/secret spxtr 1
k8s.io/kubernetes/pkg/registry/secret/etcd brendandburns 1
k8s.io/kubernetes/pkg/registry/service thockin 1
k8s.io/kubernetes/pkg/registry/service/allocator pmorie 1
k8s.io/kubernetes/pkg/registry/service/allocator/etcd caesarxuchao 1
k8s.io/kubernetes/pkg/registry/service/etcd alex-mohr 1
k8s.io/kubernetes/pkg/registry/service/ipallocator jlowdermilk 1
k8s.io/kubernetes/pkg/registry/service/ipallocator/controller lavalamp 1
k8s.io/kubernetes/pkg/registry/service/ipallocator/etcd andyzheng0831 1
k8s.io/kubernetes/pkg/registry/service/portallocator karlkfi 1
k8s.io/kubernetes/pkg/registry/serviceaccount liggitt 0
k8s.io/kubernetes/pkg/registry/serviceaccount/etcd liggitt 0
k8s.io/kubernetes/pkg/registry/storageclass ixdy 1
k8s.io/kubernetes/pkg/registry/storageclass/etcd jbeda 1
k8s.io/kubernetes/pkg/registry/thirdpartyresource vulpecula 1
k8s.io/kubernetes/pkg/registry/thirdpartyresource/etcd hurf 1
k8s.io/kubernetes/pkg/registry/thirdpartyresourcedata childsb 1
k8s.io/kubernetes/pkg/registry/thirdpartyresourcedata/etcd lavalamp 1
711 k8s.io/kubernetes/pkg/runtime wojtek-t 0
712 k8s.io/kubernetes/pkg/runtime/serializer wojtek-t 0
713 k8s.io/kubernetes/pkg/runtime/serializer/json wojtek-t 0
714 k8s.io/kubernetes/pkg/runtime/serializer/protobuf wojtek-t 0
715 k8s.io/kubernetes/pkg/runtime/serializer/recognizer k8s.io/kubernetes/pkg/runtime/serializer/recognizer/testing wojtek-t 0
k8s.io/kubernetes/pkg/runtime/serializer/recognizer/testing piosz 1
716 k8s.io/kubernetes/pkg/runtime/serializer/streaming wojtek-t 0
717 k8s.io/kubernetes/pkg/runtime/serializer/versioning wojtek-t 0
718 k8s.io/kubernetes/pkg/security/apparmor bgrant0607 1
720 k8s.io/kubernetes/pkg/security/podsecuritypolicy/apparmor vulpecula 1
721 k8s.io/kubernetes/pkg/security/podsecuritypolicy/capabilities erictune 0
722 k8s.io/kubernetes/pkg/security/podsecuritypolicy/group erictune 0
723 k8s.io/kubernetes/pkg/security/podsecuritypolicy/seccomp mml 1
724 k8s.io/kubernetes/pkg/security/podsecuritypolicy/selinux erictune 0
725 k8s.io/kubernetes/pkg/security/podsecuritypolicy/sysctl andyzheng0831 1
726 k8s.io/kubernetes/pkg/security/podsecuritypolicy/user erictune 0
735 k8s.io/kubernetes/pkg/storage/storagebackend/factory maisem 1
736 k8s.io/kubernetes/pkg/util jbeda 1
737 k8s.io/kubernetes/pkg/util/async spxtr 1
k8s.io/kubernetes/pkg/util/atomic kargakis 1
738 k8s.io/kubernetes/pkg/util/bandwidth thockin 1
739 k8s.io/kubernetes/pkg/util/cache thockin 1
740 k8s.io/kubernetes/pkg/util/cert karlkfi 1
742 k8s.io/kubernetes/pkg/util/config girishkalele 1
743 k8s.io/kubernetes/pkg/util/configz ixdy 1
744 k8s.io/kubernetes/pkg/util/dbus roberthbailey 1
k8s.io/kubernetes/pkg/util/deployment asalkeld 0
745 k8s.io/kubernetes/pkg/util/diff piosz 1
k8s.io/kubernetes/pkg/util/ebtables thockin 1
746 k8s.io/kubernetes/pkg/util/env asalkeld 0
747 k8s.io/kubernetes/pkg/util/errors jlowdermilk 1
748 k8s.io/kubernetes/pkg/util/exec krousey 1
775 k8s.io/kubernetes/pkg/util/slice quinton-hoole 0
776 k8s.io/kubernetes/pkg/util/strategicpatch brendandburns 1
777 k8s.io/kubernetes/pkg/util/strings quinton-hoole 0
778 k8s.io/kubernetes/pkg/util/term wojtek-t 1
779 k8s.io/kubernetes/pkg/util/testing jlowdermilk 1
780 k8s.io/kubernetes/pkg/util/threading roberthbailey 1
781 k8s.io/kubernetes/pkg/util/validation Q-Lee 1
803 k8s.io/kubernetes/pkg/volume/host_path jbeda 1
804 k8s.io/kubernetes/pkg/volume/iscsi cjcullen 1
805 k8s.io/kubernetes/pkg/volume/nfs justinsb 1
k8s.io/kubernetes/pkg/volume/persistent_claim krousey 1
806 k8s.io/kubernetes/pkg/volume/quobyte yujuhong 1
807 k8s.io/kubernetes/pkg/volume/rbd piosz 1
808 k8s.io/kubernetes/pkg/volume/secret rmmh 1
810 k8s.io/kubernetes/pkg/volume/util/nestedpendingoperations freehan 1
811 k8s.io/kubernetes/pkg/volume/vsphere_volume deads2k 1
812 k8s.io/kubernetes/pkg/watch mwielgus 1
k8s.io/kubernetes/pkg/watch/json thockin 1
813 k8s.io/kubernetes/pkg/watch/versioned childsb 1
814 k8s.io/kubernetes/plugin/pkg/admission/admit piosz 1
815 k8s.io/kubernetes/plugin/pkg/admission/alwayspullimages kargakis 1
816 k8s.io/kubernetes/plugin/pkg/admission/antiaffinity timothysc 1
817 k8s.io/kubernetes/plugin/pkg/admission/deny eparis 1
818 k8s.io/kubernetes/plugin/pkg/admission/exec deads2k 1
819 k8s.io/kubernetes/plugin/pkg/admission/gc Q-Lee 1
820 k8s.io/kubernetes/plugin/pkg/admission/imagepolicy apelisse 1
821 k8s.io/kubernetes/plugin/pkg/admission/initialresources piosz 0
822 k8s.io/kubernetes/plugin/pkg/admission/limitranger ncdc 1
824 k8s.io/kubernetes/plugin/pkg/admission/namespace/exists derekwaynecarr 0
825 k8s.io/kubernetes/plugin/pkg/admission/namespace/lifecycle derekwaynecarr 0
826 k8s.io/kubernetes/plugin/pkg/admission/persistentvolume/label jdef 1
827 k8s.io/kubernetes/plugin/pkg/admission/podnodeselector yifan-gu 1
828 k8s.io/kubernetes/plugin/pkg/admission/resourcequota fabioy 1
829 k8s.io/kubernetes/plugin/pkg/admission/security/podsecuritypolicy maisem 1
830 k8s.io/kubernetes/plugin/pkg/admission/securitycontext/scdeny vulpecula 1
832 k8s.io/kubernetes/plugin/pkg/admission/storageclass/default pmorie 1
833 k8s.io/kubernetes/plugin/pkg/auth/authenticator/password/allow liggitt 0
834 k8s.io/kubernetes/plugin/pkg/auth/authenticator/password/passwordfile liggitt 0
835 k8s.io/kubernetes/plugin/pkg/auth/authenticator/request/anonymous ixdy 1
836 k8s.io/kubernetes/plugin/pkg/auth/authenticator/request/basicauth liggitt 0
837 k8s.io/kubernetes/plugin/pkg/auth/authenticator/request/union liggitt 0
838 k8s.io/kubernetes/plugin/pkg/auth/authenticator/request/x509 liggitt 0
839 k8s.io/kubernetes/plugin/pkg/auth/authenticator/token/anytoken timstclair 1
840 k8s.io/kubernetes/plugin/pkg/auth/authenticator/token/oidc brendandburns 1
841 k8s.io/kubernetes/plugin/pkg/auth/authenticator/token/tokenfile liggitt 0
842 k8s.io/kubernetes/plugin/pkg/auth/authenticator/token/webhook ghodss 1
843 k8s.io/kubernetes/plugin/pkg/auth/authorizer/rbac hurf 1
844 k8s.io/kubernetes/plugin/pkg/auth/authorizer/rbac/bootstrappolicy jszczepkowski 1
845 k8s.io/kubernetes/plugin/pkg/auth/authorizer/webhook hurf 1
846 k8s.io/kubernetes/plugin/pkg/client/auth/oidc cjcullen 1
847 k8s.io/kubernetes/plugin/pkg/scheduler fgrzadkowski 0
k8s.io/kubernetes/plugin/pkg/scheduler/algorithm fgrzadkowski 0
848 k8s.io/kubernetes/plugin/pkg/scheduler/algorithm/predicates fgrzadkowski 0
849 k8s.io/kubernetes/plugin/pkg/scheduler/algorithm/priorities fgrzadkowski 0
850 k8s.io/kubernetes/plugin/pkg/scheduler/algorithmprovider fgrzadkowski 0
852 k8s.io/kubernetes/plugin/pkg/scheduler/api/validation fgrzadkowski 0
853 k8s.io/kubernetes/plugin/pkg/scheduler/factory fgrzadkowski 0
854 k8s.io/kubernetes/plugin/pkg/scheduler/schedulercache fgrzadkowski 0
855 k8s.io/kubernetes/test/integration k8s.io/kubernetes/test/e2e lavalamp kevin-wangzefeng 1
856 k8s.io/kubernetes/test/e2e/chaosmonkey pmorie 1
857 k8s.io/kubernetes/test/e2e_node mml 1
858 k8s.io/kubernetes/test/integration/auth jbeda 1
859 k8s.io/kubernetes/test/integration/client Q-Lee 1
860 k8s.io/kubernetes/test/integration/configmap Q-Lee 1
873 k8s.io/kubernetes/test/integration/replicaset janetkuo 1
874 k8s.io/kubernetes/test/integration/replicationcontroller jbeda 1
875 k8s.io/kubernetes/test/integration/scheduler mikedanese 1
876 k8s.io/kubernetes/test/integration/scheduler_perf roberthbailey 1
877 k8s.io/kubernetes/test/integration/secrets rmmh 1
878 k8s.io/kubernetes/test/integration/serviceaccount deads2k 1
879 k8s.io/kubernetes/test/integration/storageclasses andyzheng0831 1
880 k8s.io/kubernetes/third_party/forked/reflect k8s.io/kubernetes/test/integration/thirdparty yifan-gu ixdy 1
881 k8s.io/kubernetes/third_party/golang/expansion k8s.io/kubernetes/test/list dchen1107 pmorie 1
882 k8s.io/kubernetes/third_party/golang/go/ast kubelet Clean up pods on node kubelet should be able to delete * pods per node in *. mml yujuhong 1 0
k8s.io/kubernetes/third_party/golang/go/build pmorie 1
k8s.io/kubernetes/third_party/golang/go/constant Q-Lee 1
k8s.io/kubernetes/third_party/golang/go/doc fabioy 1
k8s.io/kubernetes/third_party/golang/go/parser davidopp 1
k8s.io/kubernetes/third_party/golang/go/printer madhusudancs 1
k8s.io/kubernetes/third_party/golang/go/scanner hurf 1
k8s.io/kubernetes/third_party/golang/go/token mml 1
k8s.io/kubernetes/vendor/k8s.io/client-go/1.4/discovery hurf 1
k8s.io/kubernetes/vendor/k8s.io/client-go/1.4/dynamic smarterclayton 1
k8s.io/kubernetes/vendor/k8s.io/client-go/1.4/rest yujuhong 1
k8s.io/kubernetes/vendor/k8s.io/client-go/1.4/tools/auth luxas 1
k8s.io/kubernetes/vendor/k8s.io/client-go/1.4/tools/cache mikedanese 1
k8s.io/kubernetes/vendor/k8s.io/client-go/1.4/tools/clientcmd spxtr 1
k8s.io/kubernetes/vendor/k8s.io/client-go/1.4/tools/clientcmd/api pmorie 1
k8s.io/kubernetes/vendor/k8s.io/client-go/1.4/tools/record yifan-gu 1
k8s.io/kubernetes/vendor/k8s.io/client-go/1.4/transport jszczepkowski 1
kubelet Clean up pods on node kubelet should be able to delete 10 pods per node in 1m0s. yujuhong 0