mirror of https://github.com/k3s-io/k3s
Address PR comments, randomly assign owners for new tests.
parent
3d485098c3
commit
616e938662
|
@ -14,21 +14,26 @@
|
|||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import collections
|
||||
import csv
|
||||
import re
|
||||
import json
|
||||
import os
|
||||
import random
|
||||
import sys
|
||||
import time
|
||||
import urllib2
|
||||
import zlib
|
||||
|
||||
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
|
||||
OWNERS_PATH = os.path.abspath(os.path.join(BASE_DIR, '..', 'test', 'test_owners.csv'))
|
||||
|
||||
OWNERS_PATH = os.path.abspath(
|
||||
os.path.join(BASE_DIR, '..', 'test', 'test_owners.csv'))
|
||||
GCS_URL_BASE = 'https://storage.googleapis.com/kubernetes-test-history/'
|
||||
SKIP_MAINTAINERS = {
|
||||
'aronchick', 'bgrant0607-nocc', 'goltermann', 'sarahnovotny'}
|
||||
|
||||
def get_test_history():
|
||||
url = time.strftime('https://storage.googleapis.com/kubernetes-test-history/logs/%Y-%m-%d.json')
|
||||
url = time.strftime(GCS_URL_BASE + 'logs/%Y-%m-%d.json')
|
||||
resp = urllib2.urlopen(url)
|
||||
content = resp.read()
|
||||
if resp.headers.get('content-encoding') == 'gzip':
|
||||
|
@ -44,42 +49,47 @@ def normalize(name):
|
|||
|
||||
def load_owners(fname):
|
||||
owners = {}
|
||||
for n, (name, owner, random_assignment) in enumerate(csv.reader(open(fname))):
|
||||
if n == 0:
|
||||
continue # header
|
||||
owners[normalize(name)] = (owner, int(random_assignment))
|
||||
return owners
|
||||
with open(fname) as f:
|
||||
for n, (name, owner, random_assignment) in enumerate(csv.reader(f)):
|
||||
if n == 0:
|
||||
continue # header
|
||||
owners[normalize(name)] = (owner, int(random_assignment))
|
||||
return owners
|
||||
|
||||
|
||||
def write_owners(fname, owners):
|
||||
out = csv.writer(open(fname, 'w'))
|
||||
out.writerow(['name', 'owner', 'auto-assigned'])
|
||||
for name, (owner, random_assignment) in sorted(owners.items()):
|
||||
out.writerow([name, owner, int(random_assignment)])
|
||||
with open(fname, 'w') as f:
|
||||
out = csv.writer(f, lineterminator='\n')
|
||||
out.writerow(['name', 'owner', 'auto-assigned'])
|
||||
for name, (owner, random_assignment) in sorted(owners.items()):
|
||||
out.writerow([name, owner, int(random_assignment)])
|
||||
|
||||
|
||||
def get_maintainers():
|
||||
# Github doesn't seem to support team membership listing without org admin privileges.
|
||||
# Instead, we do it manually:
|
||||
# Github doesn't seem to support team membership listing without a key with
|
||||
# org admin privileges. Instead, we do it manually:
|
||||
# Open https://github.com/orgs/kubernetes/teams/kubernetes-maintainers
|
||||
# Run this in the js console:
|
||||
# [].slice.call(document.querySelectorAll('.team-member-username a')).map(e => e.textContent.trim())
|
||||
return ["a-robinson", "alex-mohr", "amygdala", "andyzheng0831", "apelisse", "aronchick",
|
||||
"ArtfulCoder", "bgrant0607", "bgrant0607-nocc", "bprashanth", "brendandburns",
|
||||
"caesarxuchao", "childsb", "cjcullen", "david-mcmahon", "davidopp", "dchen1107",
|
||||
"deads2k", "derekwaynecarr", "dubstack", "eparis", "erictune", "fabioy",
|
||||
"fejta", "fgrzadkowski", "freehan", "ghodss", "girishkalele", "gmarek",
|
||||
"goltermann", "grodrigues3", "hurf", "ingvagabund", "ixdy",
|
||||
"jackgr", "janetkuo", "jbeda", "jdef", "jingxu97", "jlowdermilk",
|
||||
"jsafrane", "jszczepkowski", "justinsb", "kargakis", "karlkfi",
|
||||
"kelseyhightower", "kevin-wangzefeng", "krousey", "lavalamp",
|
||||
"liggitt", "luxas", "madhusudancs", "maisem", "mansoorj",
|
||||
"matchstick", "mikedanese", "mml", "mtaufen", "mwielgus", "ncdc",
|
||||
"nikhiljindal", "piosz", "pmorie", "pwittrock", "Q-Lee", "quinton-hoole",
|
||||
"Random-Liu", "rmmh", "roberthbailey", "ronnielai", "saad-ali", "sarahnovotny",
|
||||
"smarterclayton", "soltysh", "spxtr", "sttts", "swagiaal", "thockin",
|
||||
"timothysc", "timstclair", "tmrts", "vishh", "vulpecula", "wojtek-t", "xiang90",
|
||||
"yifan-gu", "yujuhong", "zmerlynn"]
|
||||
# [].slice.call(document.querySelectorAll('.team-member-username a')).map(
|
||||
# e => e.textContent.trim())
|
||||
ret = {"a-robinson", "alex-mohr", "amygdala", "andyzheng0831", "apelisse",
|
||||
"aronchick", "ArtfulCoder", "bgrant0607", "bgrant0607-nocc",
|
||||
"bprashanth", "brendandburns", "caesarxuchao", "childsb", "cjcullen",
|
||||
"david-mcmahon", "davidopp", "dchen1107", "deads2k", "derekwaynecarr",
|
||||
"dubstack", "eparis", "erictune", "fabioy", "fejta", "fgrzadkowski",
|
||||
"freehan", "ghodss", "girishkalele", "gmarek", "goltermann",
|
||||
"grodrigues3", "hurf", "ingvagabund", "ixdy",
|
||||
"jackgr", "janetkuo", "jbeda", "jdef", "jingxu97", "jlowdermilk",
|
||||
"jsafrane", "jszczepkowski", "justinsb", "kargakis", "karlkfi",
|
||||
"kelseyhightower", "kevin-wangzefeng", "krousey", "lavalamp",
|
||||
"liggitt", "luxas", "madhusudancs", "maisem", "mansoorj", "matchstick",
|
||||
"mikedanese", "mml", "mtaufen", "mwielgus", "ncdc", "nikhiljindal",
|
||||
"piosz", "pmorie", "pwittrock", "Q-Lee", "quinton-hoole", "Random-Liu",
|
||||
"rmmh", "roberthbailey", "ronnielai", "saad-ali", "sarahnovotny",
|
||||
"smarterclayton", "soltysh", "spxtr", "sttts", "swagiaal", "thockin",
|
||||
"timothysc", "timstclair", "tmrts", "vishh", "vulpecula", "wojtek-t",
|
||||
"xiang90", "yifan-gu", "yujuhong", "zmerlynn"}
|
||||
return sorted(ret - SKIP_MAINTAINERS)
|
||||
|
||||
|
||||
def main():
|
||||
|
@ -99,10 +109,25 @@ def main():
|
|||
for name in outdated_tests:
|
||||
owners.pop(name)
|
||||
|
||||
print '# UNEXPECTED MAINTAINERS (randomly assigned, not in kubernetes-maintainers)'
|
||||
print '# UNEXPECTED MAINTAINERS ',
|
||||
print '(randomly assigned, but not in kubernetes-maintainers)'
|
||||
for name, (owner, random_assignment) in sorted(owners.iteritems()):
|
||||
if random_assignment and owner not in maintainers:
|
||||
print '%-16s %s' % (owner, name)
|
||||
owners.pop(name)
|
||||
print
|
||||
|
||||
owner_counts = collections.Counter(
|
||||
owner for name, (owner, random) in owners.iteritems()
|
||||
if owner in maintainers)
|
||||
for test_name in set(test_names) - set(owners):
|
||||
new_owner, _count = random.choice(owner_counts.most_common()[-4:])
|
||||
owner_counts[new_owner] += 1
|
||||
owners[test_name] = (new_owner, True)
|
||||
|
||||
print '# Tests per maintainer:'
|
||||
for owner, count in owner_counts.most_common():
|
||||
print '%-20s %3d' % (owner, count)
|
||||
|
||||
write_owners(OWNERS_PATH + '.new', owners)
|
||||
|
||||
|
|
|
@ -1,5 +1,6 @@
|
|||
test name,owner,auto-assigned
|
||||
name,owner,auto-assigned
|
||||
Addon update should propagate add-on file changes,eparis,1
|
||||
AfterSuite,deads2k,1
|
||||
Autoscaling should scale cluster size based on cpu reservation,davidopp,1
|
||||
Autoscaling should scale cluster size based on cpu utilization,thockin,1
|
||||
Autoscaling should scale cluster size based on memory reservation,hurf,1
|
||||
|
@ -10,11 +11,19 @@ Cassandra should create and scale cassandra,fabioy,1
|
|||
Celery-RabbitMQ should create and stop celery+rabbitmq servers,luxas,1
|
||||
Clean up pods on node kubelet should be able to delete 10 pods per node in 1m0s.,lavalamp,1
|
||||
Cluster level logging using Elasticsearch should check that logs from pods on all nodes are ingested into Elasticsearch,david-mcmahon,1
|
||||
Cluster size autoscaling Should correctly handle pending pods,jszczepkowski,0
|
||||
Cluster size autoscaling Should scale cluster size based on cpu reservation,dchen1107,1
|
||||
Cluster size autoscaling Should scale cluster size based on cpu utilization,a-robinson,1
|
||||
Cluster size autoscaling Should scale cluster size based on memory reservation,ghodss,1
|
||||
Cluster size autoscaling Should scale cluster size based on memory utilization,childsb,1
|
||||
Cluster size autoscaling should add node to the particular mig,spxtr,1
|
||||
Cluster size autoscaling should correctly scale down after a node is not needed,pmorie,1
|
||||
Cluster size autoscaling should correctly scale down after a node is not needed when there is non autoscaled pool,krousey,1
|
||||
Cluster size autoscaling should disable node pool autoscaling,Q-Lee,1
|
||||
Cluster size autoscaling should increase cluster size if pending pods are small,a-robinson,1
|
||||
Cluster size autoscaling should increase cluster size if pending pods are small and there is another node pool that is not autoscaled,apelisse,1
|
||||
Cluster size autoscaling should increase cluster size if pods are pending due to host port conflict,brendandburns,1
|
||||
Cluster size autoscaling should scale up correct target pool,mikedanese,1
|
||||
Cluster size autoscaling shouldn't increase cluster size if pending pod is too large,karlkfi,1
|
||||
ClusterDns should create pod that uses dns,sttts,0
|
||||
ConfigMap should be consumable from pods in volume,alex-mohr,1
|
||||
ConfigMap should be consumable from pods in volume as non-root,ArtfulCoder,1
|
||||
|
@ -22,9 +31,20 @@ ConfigMap should be consumable from pods in volume as non-root with FSGroup,robe
|
|||
ConfigMap should be consumable from pods in volume with mappings,karlkfi,1
|
||||
ConfigMap should be consumable from pods in volume with mappings as non-root,apelisse,1
|
||||
ConfigMap should be consumable from pods in volume with mappings as non-root with FSGroup,zmerlynn,1
|
||||
ConfigMap should be consumable from pods in volumpe,mwielgus,1
|
||||
ConfigMap should be consumable via environment variable,a-robinson,1
|
||||
ConfigMap updates should be reflected in volume,kevin-wangzefeng,1
|
||||
Container Runtime Conformance Test container runtime conformance blackbox test when running a container with a new image should be able to pull image from gcr.io ,Random-Liu,0
|
||||
Container Runtime Conformance Test container runtime conformance blackbox test when running a container with a new image should be able to pull from private registry with secret,mikedanese,1
|
||||
Container Runtime Conformance Test container runtime conformance blackbox test when running a container with a new image should be able to pull image from docker hub,girishkalele,1
|
||||
Container Runtime Conformance Test container runtime conformance blackbox test when running a container with a new image should be able to pull image from gcr.io,Random-Liu,0
|
||||
Container Runtime Conformance Test container runtime conformance blackbox test when running a container with a new image should not be able to pull from private registry without secret,yifan-gu,1
|
||||
Container Runtime Conformance Test container runtime conformance blackbox test when running a container with a new image should not be able to pull image from invalid registry,janetkuo,1
|
||||
Container Runtime Conformance Test container runtime conformance blackbox test when running a container with a new image should not be able to pull non-existing image from gcr.io,kevin-wangzefeng,1
|
||||
Container Runtime Conformance Test container runtime conformance blackbox test when starting a container that exits it should run with the expected status,luxas,1
|
||||
Container Runtime Conformance Test container runtime conformance blackbox test when starting a container that exits should report termination message if TerminationMessagePath is set,timothysc,1
|
||||
DNS should provide DNS for pods for Hostname and Subdomain Annotation,mtaufen,1
|
||||
DNS should provide DNS for services,roberthbailey,1
|
||||
DNS should provide DNS for the cluster,roberthbailey,1
|
||||
Daemon set should launch a daemon pod on every node of the cluster,andyzheng0831,1
|
||||
Daemon set should run and stop complex daemon,ArtfulCoder,1
|
||||
Daemon set should run and stop simple daemon,mtaufen,1
|
||||
|
@ -36,6 +56,9 @@ Density should allow starting 3 pods per node,gmarek,0
|
|||
Density should allow starting 30 pods per node,gmarek,0
|
||||
Density should allow starting 50 pods per node,wojtek-t,0
|
||||
Density should allow starting 95 pods per node,wojtek-t,0
|
||||
Deployment RecreateDeployment should delete old pods and create new ones,pwittrock,0
|
||||
Deployment RollingUpdateDeployment should delete old pods and create new ones,pwittrock,0
|
||||
Deployment RollingUpdateDeployment should scale up and down in the right order,pwittrock,0
|
||||
Deployment deployment should create new pods,pwittrock,0
|
||||
Deployment deployment should delete old pods and create new ones,nikhiljindal,0
|
||||
Deployment deployment should delete old replica sets,pwittrock,0
|
||||
|
@ -44,21 +67,21 @@ Deployment deployment should scale up and down in the right order,pwittrock,0
|
|||
Deployment deployment should support rollback,pwittrock,0
|
||||
Deployment deployment should support rollback when there's replica set with no revision,pwittrock,0
|
||||
Deployment deployment should support rollover,pwittrock,0
|
||||
Deployment paused deployment should be able to scale,janetkuo,1
|
||||
Deployment paused deployment should be ignored by the controller,kargakis,0
|
||||
Deployment RecreateDeployment should delete old pods and create new ones,pwittrock,0
|
||||
Deployment RollingUpdateDeployment should delete old pods and create new ones,pwittrock,0
|
||||
Deployment RollingUpdateDeployment should scale up and down in the right order,pwittrock,0
|
||||
DNS should provide DNS for pods for Hostname and Subdomain Annotation,mtaufen,1
|
||||
DNS should provide DNS for services,roberthbailey,1
|
||||
DNS should provide DNS for the cluster,roberthbailey,1
|
||||
Docker Containers should be able to override the image's default arguments (docker cmd),maisem,0
|
||||
Docker Containers should be able to override the image's default command and arguments,maisem,0
|
||||
Docker Containers should be able to override the image's default commmand (docker entrypoint),maisem,0
|
||||
Docker Containers should use the image defaults if command and args are blank,vishh,0
|
||||
Downward API should create a pod that prints his name and namespace,nhlfr,0
|
||||
Downward API should provide container's limits.cpu/memory and requests.cpu/memory as env vars,deads2k,1
|
||||
Downward API should provide default limits.cpu/memory from node capacity,derekwaynecarr,0
|
||||
Downward API should provide pod IP as an env var,nhlfr,0
|
||||
Downward API should provide pod name and namespace as env vars,nhlfr,0
|
||||
Downward API volume should provide container's cpu limit,smarterclayton,1
|
||||
Downward API volume should provide container's cpu request,krousey,1
|
||||
Downward API volume should provide container's memory limit,david-mcmahon,1
|
||||
Downward API volume should provide container's memory request,mikedanese,1
|
||||
Downward API volume should provide labels and annotations files,nhlfr,0
|
||||
Downward API volume should provide podname as non-root with fsgroup,karlkfi,1
|
||||
Downward API volume should provide podname only,mwielgus,1
|
||||
|
@ -81,13 +104,14 @@ EmptyDir volumes should have the correct mode,mikedanese,1
|
|||
EmptyDir volumes should support r/w,davidopp,1
|
||||
EmptyDir volumes volume on default medium should have the correct mode,girishkalele,1
|
||||
EmptyDir volumes volume on tmpfs should have the correct mode,mwielgus,1
|
||||
"EmptyDir volumes when FSGroup is specified files with FSGroup ownership should support (root,0644,tmpfs)",justinsb,1
|
||||
EmptyDir volumes when FSGroup is specified new files should be created with FSGroup ownership when container is non-root,brendandburns,1
|
||||
EmptyDir volumes when FSGroup is specified new files should be created with FSGroup ownership when container is root,childsb,1
|
||||
EmptyDir volumes when FSGroup is specified volume on default medium should have the correct mode using FSGroup,ArtfulCoder,1
|
||||
EmptyDir volumes when FSGroup is specified volume on tmpfs should have the correct mode using FSGroup,timothysc,1
|
||||
EmptyDir wrapper volumes should becomes running,deads2k,1
|
||||
Etcd failure should recover from network partition with master,justinsb,1
|
||||
Etcd failure should recover from SIGKILL,pmorie,1
|
||||
Etcd failure should recover from network partition with master,justinsb,1
|
||||
Events should be sent by kubelets and the scheduler about pods scheduling and running,zmerlynn,1
|
||||
Examples e2e Cassandra should create and scale cassandra,bgrant0607,1
|
||||
Examples e2e Celery-RabbitMQ should create and stop celery+rabbitmq servers,apelisse,1
|
||||
|
@ -101,28 +125,37 @@ Examples e2e Secret should create a pod that reads a secret,alex-mohr,1
|
|||
"Examples e2e Spark should start spark master, driver and workers",maisem,1
|
||||
"Examples e2e Storm should create and stop Zookeeper, Nimbus and Storm worker servers",mwielgus,1
|
||||
Federated Services DNS non-local federated service missing local service should never find DNS entries for a missing local service,mml,0
|
||||
Garbage collector should handle the creation of 1000 pods,wonderfly,1
|
||||
Federated Services DNS non-local federated service should be able to discover a non-local federated service,jlowdermilk,1
|
||||
Federated Services DNS should be able to discover a federated service,derekwaynecarr,1
|
||||
Federated Services Service creation should create matching services in underlying clusters,jbeda,1
|
||||
Federated Services Service creation should succeed,rmmh,1
|
||||
Federation apiserver Cluster objects should be created and deleted successfully,ghodss,1
|
||||
Federation apiserver should allow creation of cluster api objects,mtaufen,1
|
||||
GCE L7 LoadBalancer Controller should create GCE L7 loadbalancers and verify Ingress,bprashanth,0
|
||||
GKE local SSD should write and read from node local SSD,fabioy,0
|
||||
GKE node pools should create a cluster with multiple node pools,kargakis,1
|
||||
Garbage collector should handle the creation of 1000 pods,xiang90,1
|
||||
"Generated release_1_2 clientset should create pods, delete pods, watch pods",ncdc,1
|
||||
"Generated release_1_3 clientset should create pods, delete pods, watch pods",krousey,1
|
||||
GKE local SSD should write and read from node local SSD,fabioy,0
|
||||
Hazelcast should create and scale hazelcast,mikedanese,1
|
||||
Horizontal pod autoscaling (scale resource: CPU) Deployment Should scale from 1 pod to 3 pods and from 3 to 5,jlowdermilk,1
|
||||
Horizontal pod autoscaling (scale resource: CPU) Deployment Should scale from 1 pod to 3 pods and from 3 to 5 and verify decision stability,jszczepkowski,0
|
||||
Horizontal pod autoscaling (scale resource: CPU) Deployment Should scale from 5 pods to 3 pods and from 3 to 1,bgrant0607,1
|
||||
Horizontal pod autoscaling (scale resource: CPU) Deployment Should scale from 5 pods to 3 pods and from 3 to 1 and verify decision stability,jszczepkowski,0
|
||||
Horizontal pod autoscaling (scale resource: CPU) ReplicaSet Should scale from 1 pod to 3 pods and from 3 to 5,ghodss,1
|
||||
Horizontal pod autoscaling (scale resource: CPU) ReplicaSet Should scale from 1 pod to 3 pods and from 3 to 5 and verify decision stability,jszczepkowski,0
|
||||
Horizontal pod autoscaling (scale resource: CPU) ReplicaSet Should scale from 5 pods to 3 pods and from 3 to 1,davidopp,1
|
||||
Horizontal pod autoscaling (scale resource: CPU) ReplicaSet Should scale from 5 pods to 3 pods and from 3 to 1 and verify decision stability,jszczepkowski,0
|
||||
Horizontal pod autoscaling (scale resource: CPU) ReplicationController Should scale from 1 pod to 3 pods and from 3 to 5 and verify decision stability,jszczepkowski,0
|
||||
Horizontal pod autoscaling (scale resource: CPU) ReplicationController Should scale from 5 pods to 3 pods and from 3 to 1 and verify decision stability,jszczepkowski,0
|
||||
Horizontal pod autoscaling (scale resource: CPU) ReplicationController light Should scale from 1 pod to 2 pods,jszczepkowski,0
|
||||
Horizontal pod autoscaling (scale resource: CPU) ReplicationController light Should scale from 2 pods to 1 pod,jszczepkowski,0
|
||||
Horizontal pod autoscaling (scale resource: CPU) ReplicationController light Should scale from 2 pods to 1 pod using HPA version v1,jszczepkowski,0
|
||||
Horizontal pod autoscaling (scale resource: CPU) ReplicationController Should scale from 1 pod to 3 pods and from 3 to 5 and verify decision stability,jszczepkowski,0
|
||||
Horizontal pod autoscaling (scale resource: CPU) ReplicationController Should scale from 5 pods to 3 pods and from 3 to 1 and verify decision stability,jszczepkowski,0
|
||||
Horizontal pod autoscaling should scale from 1 pod to 3 pods and from 3 to 5 (scale resource: CPU),jszczepkowski,0
|
||||
Horizontal pod autoscaling should scale from 5 pods to 3 pods and from 3 to 1 (scale resource: CPU),jszczepkowski,0
|
||||
hostDir should give a volume the correct mode,bgrant0607,1
|
||||
hostDir should support r/w on tmpfs,erictune,0
|
||||
hostPath should give a volume the correct mode,roberthbailey,1
|
||||
hostPath should support r/w,roberthbailey,1
|
||||
hostPath should support subPath,krousey,1
|
||||
Image Container Conformance Test image conformance blackbox test when testing image that does not exist should ignore pull failures,jsafrane,1
|
||||
Image Container Conformance Test image conformance blackbox test when testing images that exist It should present successfully,yujuhong,1
|
||||
Image Container Conformance Test image conformance blackbox test when testing images that exist should list pulled images,maisem,1
|
||||
Initial Resources should set initial resources based on historical data,piosz,0
|
||||
Job should delete a job,soltysh,0
|
||||
Job should fail a job,soltysh,0
|
||||
|
@ -133,6 +166,260 @@ Job should run a job to completion when tasks succeed,soltysh,0
|
|||
Job should scale a job down,soltysh,0
|
||||
Job should scale a job up,soltysh,0
|
||||
Job should stop a job,soltysh,0
|
||||
Kibana Logging Instances Is Alive should check that the Kibana logging instance is alive,swagiaal,0
|
||||
KubeProxy should test kube-proxy,vulpecula,1
|
||||
Kubectl client Guestbook application should create and stop a working application,pwittrock,0
|
||||
Kubectl client Kubectl api-versions should check if v1 is in available api versions,pwittrock,0
|
||||
Kubectl client Kubectl apply should apply a new configuration to an existing RC,pwittrock,0
|
||||
Kubectl client Kubectl apply should reuse nodePort when apply to an existing SVC,pwittrock,0
|
||||
Kubectl client Kubectl cluster-info should check if Kubernetes master services is included in cluster-info,pwittrock,0
|
||||
Kubectl client Kubectl describe should check if kubectl describe prints relevant information for rc and pods,pwittrock,0
|
||||
Kubectl client Kubectl expose should create services for rc,pwittrock,0
|
||||
Kubectl client Kubectl label should update the label on a resource,pwittrock,0
|
||||
Kubectl client Kubectl logs should be able to retrieve and filter logs,jlowdermilk,0
|
||||
Kubectl client Kubectl patch should add annotations for pods in rc,janetkuo,0
|
||||
Kubectl client Kubectl rolling-update should support rolling-update to same image,janetkuo,0
|
||||
"Kubectl client Kubectl run --rm job should create a job from an image, then delete the job",soltysh,0
|
||||
Kubectl client Kubectl run default should create an rc or deployment from an image,janetkuo,0
|
||||
Kubectl client Kubectl run deployment should create a deployment from an image,janetkuo,0
|
||||
Kubectl client Kubectl run job should create a job from an image when restart is Never,soltysh,0
|
||||
Kubectl client Kubectl run job should create a job from an image when restart is OnFailure,soltysh,0
|
||||
Kubectl client Kubectl run pod should create a pod from an image when restart is Never,janetkuo,0
|
||||
Kubectl client Kubectl run pod should create a pod from an image when restart is OnFailure,janetkuo,0
|
||||
Kubectl client Kubectl run rc should create an rc from an image,janetkuo,0
|
||||
Kubectl client Kubectl taint should update the taint on a node,pwittrock,0
|
||||
Kubectl client Kubectl version should check is all data is printed,janetkuo,0
|
||||
Kubectl client Proxy server should support --unix-socket=/path,zmerlynn,1
|
||||
Kubectl client Proxy server should support proxy with --port 0,ncdc,1
|
||||
Kubectl client Simple pod should support exec,ncdc,0
|
||||
Kubectl client Simple pod should support exec through an HTTP proxy,ncdc,0
|
||||
Kubectl client Simple pod should support inline execution and attach,ncdc,0
|
||||
Kubectl client Simple pod should support port-forward,ncdc,0
|
||||
Kubectl client Update Demo should create and stop a replication controller,sttts,0
|
||||
Kubectl client Update Demo should do a rolling update of a replication controller,sttts,0
|
||||
Kubectl client Update Demo should scale a replication controller,sttts,0
|
||||
Kubelet Container Manager oom score adjusting when scheduling a busybox command that always fails in a pod should be possible to delete,jbeda,1
|
||||
Kubelet Container Manager oom score adjusting when scheduling a busybox command that always fails in a pod should have an error terminated reason,vulpecula,1
|
||||
Kubelet experimental resource usage tracking for 100 pods per node over 20m0s,yujuhong,0
|
||||
Kubelet experimental resource usage tracking over 30m0s with 50 pods per node.,yujuhong,0
|
||||
Kubelet experimental resource usage tracking resource tracking for 100 pods per node,ghodss,0
|
||||
Kubelet metrics api when querying /stats/summary it should report resource usage through the stats api,fabioy,1
|
||||
Kubelet regular resource usage tracking for 0 pods per node over 20m0s,yujuhong,0
|
||||
Kubelet regular resource usage tracking for 100 pods per node over 20m0s,yujuhong,0
|
||||
Kubelet regular resource usage tracking for 35 pods per node over 20m0s,yujuhong,0
|
||||
Kubelet regular resource usage tracking over 30m0s with 0 pods per node.,yujuhong,0
|
||||
Kubelet regular resource usage tracking over 30m0s with 35 pods per node.,yujuhong,0
|
||||
Kubelet regular resource usage tracking resource tracking for 0 pods per node,pmorie,1
|
||||
Kubelet regular resource usage tracking resource tracking for 100 pods per node,justinsb,1
|
||||
Kubelet regular resource usage tracking resource tracking for 35 pods per node,madhusudancs,1
|
||||
Kubelet when scheduling a busybox command in a pod it should print the output to logs,ixdy,1
|
||||
Kubelet when scheduling a read only busybox container it should not write to root filesystem,timothysc,1
|
||||
KubeletManagedEtcHosts should test kubelet managed /etc/hosts file,ArtfulCoder,1
|
||||
Kubernetes Dashboard should check that the kubernetes-dashboard instance is alive,wonderfly,0
|
||||
LimitRange should create a LimitRange with defaults and ensure pod has those defaults applied.,cjcullen,1
|
||||
Liveness liveness pods should be automatically restarted,andyzheng0831,1
|
||||
Load capacity should be able to handle 3 pods per node,gmarek,0
|
||||
Load capacity should be able to handle 30 pods per node,wojtek-t,0
|
||||
Loadbalancing: L7 GCE shoud create ingress with given static-ip,vulpecula,1
|
||||
Loadbalancing: L7 GCE should conform to Ingress spec,andyzheng0831,1
|
||||
MasterCerts should have all expected certs on the master,apelisse,1
|
||||
MaxPods Validates MaxPods limit number of pods that are allowed to run.,gmarek,0
|
||||
Mesos applies slave attributes as labels,justinsb,1
|
||||
Mesos schedules pods annotated with roles on correct slaves,timstclair,1
|
||||
Mesos starts static pods on every node in the mesos cluster,lavalamp,1
|
||||
MetricsGrabber should grab all metrics from API server.,gmarek,0
|
||||
MetricsGrabber should grab all metrics from a ControllerManager.,gmarek,0
|
||||
MetricsGrabber should grab all metrics from a Kubelet.,gmarek,0
|
||||
MetricsGrabber should grab all metrics from a Scheduler.,gmarek,0
|
||||
MirrorPod when create a mirror pod should be recreated when mirror pod forcibly deleted,roberthbailey,1
|
||||
MirrorPod when create a mirror pod should be recreated when mirror pod gracefully deleted,justinsb,1
|
||||
MirrorPod when create a mirror pod should be updated when static pod updated,saad-ali,1
|
||||
Monitoring should verify monitoring pods and all cluster nodes are available on influxdb using heapster.,piosz,0
|
||||
Namespaces Delete 90 percent of 100 namespace in 150 seconds,caesarxuchao,1
|
||||
Namespaces Delete ALL of 100 namespace in 150 seconds,luxas,1
|
||||
Namespaces should always delete fast (ALL of 100 namespaces in 150 seconds),rmmh,1
|
||||
Namespaces should delete fast enough (90 percent of 100 namespaces in 150 seconds),kevin-wangzefeng,1
|
||||
Namespaces should ensure that all pods are removed when a namespace is deleted.,xiang90,1
|
||||
Namespaces should ensure that all services are removed when a namespace is deleted.,pmorie,1
|
||||
Networking Granular Checks should function for pod communication between nodes,freehan,0
|
||||
Networking Granular Checks should function for pod communication on a single node,freehan,0
|
||||
Networking IPerf should transfer ~ 1GB onto the service endpoint 1 servers (maximum of 1 clients),ghodss,1
|
||||
Networking should function for intra-pod communication,sttts,0
|
||||
Networking should provide Internet connection for containers,sttts,0
|
||||
"Networking should provide unchanging, static URL paths for kubernetes api services",freehan,0
|
||||
"Networking should provide unchanging, static URL paths for kubernetes api services.",madhusudancs,1
|
||||
NodeOutOfDisk runs out of disk space,vishh,0
|
||||
NodeProblemDetector KernelMonitor should generate node condition and events for corresponding errors,Random-Liu,0
|
||||
Nodes Network when a minion node becomes unreachable recreates pods scheduled on the unreachable minion node AND allows scheduling of pods on a minion after it rejoins the cluster,childsb,1
|
||||
Nodes Network when a node becomes unreachable All pods on the unreachable node should be marked as NotReady upon the node turn NotReady AND all pods should be mark back to Ready when the node get back to Ready before pod eviction timeout,freehan,0
|
||||
Nodes Network when a node becomes unreachable recreates pods scheduled on the unreachable node AND allows scheduling of pods on a node after it rejoins the cluster,madhusudancs,1
|
||||
Nodes Resize should be able to add nodes,piosz,1
|
||||
Nodes Resize should be able to delete nodes,zmerlynn,1
|
||||
"PersistentVolumes NFS volume can be created, bound, retrieved, unbound, and used by a pod",jsafrane,0
|
||||
"PersistentVolumes should create a PersistentVolume, Claim, and a client Pod that will test the read/write access of the volume",ArtfulCoder,1
|
||||
Pet Store should scale to persist a nominal number ( 50 ) of transactions in 1m0s seconds,timstclair,1
|
||||
PetSet Basic PetSet functionality should handle healthy pet restarts during scale,kevin-wangzefeng,1
|
||||
PetSet Basic PetSet functionality should provide basic identity,girishkalele,1
|
||||
PetSet Deploy clustered applications should creating a working mysql cluster,piosz,1
|
||||
PetSet Deploy clustered applications should creating a working redis cluster,mtaufen,1
|
||||
PetSet Deploy clustered applications should creating a working zookeeper cluster,rmmh,1
|
||||
"Pod Disks Should schedule a pod w/ a RW PD, gracefully remove it, then schedule it on another host",alex-mohr,1
|
||||
"Pod Disks Should schedule a pod w/ a readonly PD on two hosts, then remove both gracefully.",ghodss,1
|
||||
"Pod Disks should schedule a pod w/ a RW PD shared between multiple containers, write to PD, delete pod, verify contents, and repeat in rapid succession",saad-ali,0
|
||||
"Pod Disks should schedule a pod w/ a RW PD, remove it, then schedule it on another host",saad-ali,0
|
||||
"Pod Disks should schedule a pod w/ a RW PD, ungracefully remove it, then schedule it on another host",mml,1
|
||||
"Pod Disks should schedule a pod w/ a readonly PD on two hosts, then remove both ungracefully.",kargakis,1
|
||||
"Pod Disks should schedule a pod w/ a readonly PD on two hosts, then remove both.",saad-ali,0
|
||||
"Pod Disks should schedule a pod w/two RW PDs both mounted to one container, write to PD, verify contents, delete pod, recreate pod, verify contents, and repeat in rapid succession",saad-ali,0
|
||||
Pods should *not* be restarted with a /healthz http liveness probe,cjcullen,1
|
||||
"Pods should *not* be restarted with a docker exec ""cat /tmp/health"" liveness probe",vishh,0
|
||||
Pods should allow activeDeadlineSeconds to be updated,derekwaynecarr,0
|
||||
Pods should be restarted with a /healthz http liveness probe,girishkalele,1
|
||||
"Pods should be restarted with a docker exec ""cat /tmp/health"" liveness probe",bgrant0607,1
|
||||
Pods should be schedule with cpu and memory limits,vishh,0
|
||||
Pods should be submitted and removed,davidopp,1
|
||||
Pods should be updated,derekwaynecarr,1
|
||||
Pods should cap back-off at MaxContainerBackOff,maisem,1
|
||||
Pods should contain environment variables for services,jlowdermilk,1
|
||||
Pods should get a host IP,xiang90,1
|
||||
Pods should have monotonically increasing restart count,Random-Liu,1
|
||||
Pods should have their auto-restart back-off timer reset on image update,mikedanese,1
|
||||
Pods should invoke init containers on a RestartAlways pod,cjcullen,1
|
||||
Pods should invoke init containers on a RestartNever pod,justinsb,1
|
||||
Pods should not start app containers and fail the pod if init containers fail on a RestartNever pod,maisem,0
|
||||
Pods should not start app containers if init containers fail on a RestartAlways pod,david-mcmahon,1
|
||||
Pods should support remote command execution over websockets,madhusudancs,1
|
||||
Pods should support retrieving logs from the container over websockets,vishh,0
|
||||
"Port forwarding With a server that expects a client request should support a client that connects, sends data, and disconnects",sttts,0
|
||||
"Port forwarding With a server that expects a client request should support a client that connects, sends no data, and disconnects",sttts,0
|
||||
"Port forwarding With a server that expects no client request should support a client that connects, sends no data, and disconnects",sttts,0
|
||||
PreStop should call prestop when killing a pod,ncdc,1
|
||||
PrivilegedPod should test privileged pod,vishh,0
|
||||
Probing container with readiness probe should not be ready before initial delay and never restart,smarterclayton,1
|
||||
Probing container with readiness probe that fails should never be ready and never restart,mtaufen,1
|
||||
Proxy version v1 should proxy logs on node,girishkalele,1
|
||||
Proxy version v1 should proxy logs on node using proxy subresource,karlkfi,1
|
||||
Proxy version v1 should proxy logs on node with explicit kubelet port,mwielgus,1
|
||||
Proxy version v1 should proxy logs on node with explicit kubelet port using proxy subresource,bgrant0607,1
|
||||
Proxy version v1 should proxy through a service and a pod,mml,1
|
||||
Proxy version v1 should proxy to cadvisor,Random-Liu,1
|
||||
Proxy version v1 should proxy to cadvisor using proxy subresource,deads2k,1
|
||||
Reboot each node by dropping all inbound packets for a while and ensure they function afterwards,quinton-hoole,0
|
||||
Reboot each node by dropping all outbound packets for a while and ensure they function afterwards,quinton-hoole,0
|
||||
Reboot each node by ordering clean reboot and ensure they function upon restart,quinton-hoole,0
|
||||
Reboot each node by ordering unclean reboot and ensure they function upon restart,quinton-hoole,0
|
||||
Reboot each node by switching off the network interface and ensure they function upon switch on,quinton-hoole,0
|
||||
Reboot each node by triggering kernel panic and ensure they function upon restart,quinton-hoole,0
|
||||
Redis should create and stop redis servers,timstclair,1
|
||||
ReplicaSet should serve a basic image on each replica with a private image,pmorie,1
|
||||
ReplicaSet should serve a basic image on each replica with a public image,krousey,0
|
||||
ReplicationController should serve a basic image on each replica with a private image,jbeda,1
|
||||
ReplicationController should serve a basic image on each replica with a public image,a-robinson,1
|
||||
Resource usage of system containers should not exceed expected amount.,ncdc,1
|
||||
ResourceQuota should create a ResourceQuota and capture the life of a configMap.,david-mcmahon,1
|
||||
ResourceQuota should create a ResourceQuota and capture the life of a persistent volume claim.,bgrant0607,1
|
||||
ResourceQuota should create a ResourceQuota and capture the life of a pod.,pmorie,1
|
||||
ResourceQuota should create a ResourceQuota and capture the life of a replication controller.,jdef,1
|
||||
ResourceQuota should create a ResourceQuota and capture the life of a secret.,ncdc,1
|
||||
ResourceQuota should create a ResourceQuota and capture the life of a service.,timstclair,1
|
||||
ResourceQuota should create a ResourceQuota and ensure its status is promptly calculated.,krousey,1
|
||||
ResourceQuota should verify ResourceQuota with best effort scope.,mml,1
|
||||
ResourceQuota should verify ResourceQuota with terminating scopes.,ncdc,1
|
||||
Restart should restart all nodes and ensure all nodes and pods recover,andyzheng0831,1
|
||||
RethinkDB should create and stop rethinkdb servers,kargakis,1
|
||||
SSH should SSH to all nodes and run commands,quinton-hoole,0
|
||||
"Scale should be able to launch 500 pods, 10 per minion, in 25 rcs/thread.",roberthbailey,1
|
||||
"Scale should be able to launch 500 pods, 10 per minion, in 5 rcs/thread.",bgrant0607,1
|
||||
SchedulerPredicates validates MaxPods limit number of pods that are allowed to run,gmarek,0
|
||||
SchedulerPredicates validates resource limits of pods that are allowed to run,gmarek,0
|
||||
SchedulerPredicates validates that Inter-pod-Affinity is respected if not matching,hurf,1
|
||||
SchedulerPredicates validates that InterPod Affinity and AntiAffinity is respected if matching,girishkalele,1
|
||||
SchedulerPredicates validates that InterPodAffinity is respected if matching,kevin-wangzefeng,1
|
||||
SchedulerPredicates validates that InterPodAffinity is respected if matching with multiple Affinities,caesarxuchao,1
|
||||
SchedulerPredicates validates that InterPodAntiAffinity is respected if matching 2,sttts,0
|
||||
SchedulerPredicates validates that NodeAffinity is respected if not matching,fgrzadkowski,0
|
||||
SchedulerPredicates validates that NodeSelector is respected,fgrzadkowski,0
|
||||
SchedulerPredicates validates that NodeSelector is respected if matching,gmarek,0
|
||||
SchedulerPredicates validates that NodeSelector is respected if not matching,gmarek,0
|
||||
SchedulerPredicates validates that a pod with an invalid NodeAffinity is rejected,deads2k,1
|
||||
SchedulerPredicates validates that a pod with an invalid podAffinity is rejected because of the LabelSelectorRequirement is invalid,smarterclayton,1
|
||||
SchedulerPredicates validates that embedding the JSON NodeAffinity setting as a string in the annotation value work,yifan-gu,1
|
||||
SchedulerPredicates validates that embedding the JSON PodAffinity and PodAntiAffinity setting as a string in the annotation value work,hurf,1
|
||||
SchedulerPredicates validates that required NodeAffinity setting is respected if matching,ArtfulCoder,1
|
||||
SchedulerPredicates validates that taints-tolerations is respected if matching,jlowdermilk,1
|
||||
SchedulerPredicates validates that taints-tolerations is respected if not matching,derekwaynecarr,1
|
||||
Secret should create a pod that reads a secret,luxas,1
|
||||
Secrets should be consumable from pods,Random-Liu,1
|
||||
Secrets should be consumable from pods in env vars,ArtfulCoder,1
|
||||
Secrets should be consumable from pods in volume,ghodss,1
|
||||
Security Context should support container.SecurityContext.RunAsUser,alex-mohr,1
|
||||
Security Context should support pod.Spec.SecurityContext.RunAsUser,bgrant0607,1
|
||||
Security Context should support pod.Spec.SecurityContext.SupplementalGroups,andyzheng0831,1
|
||||
Security Context should support seccomp alpha docker/default annotation,freehan,1
|
||||
Security Context should support seccomp alpha unconfined annotation on the container,childsb,1
|
||||
Security Context should support seccomp alpha unconfined annotation on the pod,krousey,1
|
||||
Security Context should support seccomp default which is unconfined,lavalamp,1
|
||||
Security Context should support volume SELinux relabeling,thockin,1
|
||||
Security Context should support volume SELinux relabeling when using hostIPC,alex-mohr,1
|
||||
Security Context should support volume SELinux relabeling when using hostPID,dchen1107,1
|
||||
Service endpoints latency should not be very high,cjcullen,1
|
||||
ServiceAccounts should ensure a single API token exists,liggitt,0
|
||||
ServiceAccounts should mount an API token into pods,liggitt,0
|
||||
ServiceLoadBalancer should support simple GET on Ingress ips,bprashanth,0
|
||||
Services should be able to change the type and nodeport settings of a service,bprashanth,0
|
||||
Services should be able to change the type and ports of a service,bprashanth,0
|
||||
Services should be able to create a functioning NodePort service,bprashanth,0
|
||||
Services should be able to create a functioning external load balancer,bprashanth,0
|
||||
Services should be able to up and down services,bprashanth,0
|
||||
Services should check NodePort out-of-range,bprashanth,0
|
||||
Services should correctly serve identically named services in different namespaces on different external IP addresses,bprashanth,0
|
||||
Services should create endpoints for unready pods,maisem,0
|
||||
Services should prevent NodePort collisions,bprashanth,0
|
||||
Services should provide secure master service,bprashanth,0
|
||||
Services should release NodePorts on delete,bprashanth,0
|
||||
Services should release the load balancer when Type goes from LoadBalancer -> NodePort,karlkfi,1
|
||||
Services should serve a basic endpoint from pods,bprashanth,0
|
||||
Services should serve identically named services in different namespaces on different load-balancers,davidopp,1
|
||||
Services should serve multiport endpoints from pods,bprashanth,0
|
||||
Services should work after restarting apiserver,bprashanth,0
|
||||
Services should work after restarting kube-proxy,bprashanth,0
|
||||
Shell should pass tests for services.sh,mtaufen,1
|
||||
Skipped Cluster upgrade kube-push of master should maintain responsive services,a-robinson,1
|
||||
Skipped Cluster upgrade upgrade-cluster should maintain a functioning cluster,smarterclayton,1
|
||||
Skipped Cluster upgrade upgrade-master should maintain responsive services,vulpecula,1
|
||||
"Spark should start spark master, driver and workers",girishkalele,1
|
||||
"Storm should create and stop Zookeeper, Nimbus and Storm worker servers",swagiaal,1
|
||||
ThirdParty resources Simple Third Party creating/deleting thirdparty objects works,luxas,1
|
||||
Ubernetes Lite should spread the pods of a replication controller across zones,quinton-hoole,0
|
||||
Ubernetes Lite should spread the pods of a service across zones,quinton-hoole,0
|
||||
Upgrade cluster upgrade should maintain a functioning cluster,girishkalele,1
|
||||
Upgrade cluster upgrade should maintain responsive services,david-mcmahon,1
|
||||
Upgrade master upgrade should maintain responsive services,mikedanese,1
|
||||
Upgrade node upgrade should maintain a functioning cluster,zmerlynn,1
|
||||
Upgrade node upgrade should maintain responsive services,childsb,1
|
||||
V1Job should delete a job,soltysh,0
|
||||
V1Job should fail a job,soltysh,0
|
||||
V1Job should keep restarting failed pods,soltysh,0
|
||||
V1Job should run a job to completion when tasks sometimes fail and are locally restarted,soltysh,0
|
||||
V1Job should run a job to completion when tasks sometimes fail and are not locally restarted,soltysh,0
|
||||
V1Job should run a job to completion when tasks succeed,soltysh,0
|
||||
V1Job should scale a job down,soltysh,0
|
||||
V1Job should scale a job up,soltysh,0
|
||||
Variable Expansion should allow composing env vars into new env vars,ghodss,1
|
||||
Variable Expansion should allow substituting values in a container's args,dchen1107,1
|
||||
Variable Expansion should allow substituting values in a container's command,mml,1
|
||||
Volumes Ceph RBD should be mountable,fabioy,1
|
||||
Volumes CephFS should be mountable,Q-Lee,1
|
||||
Volumes Cinder should be mountable,cjcullen,1
|
||||
Volumes GlusterFS should be mountable,eparis,1
|
||||
Volumes NFS should be mountable,andyzheng0831,1
|
||||
Volumes PD should be mountable,caesarxuchao,1
|
||||
Volumes iSCSI should be mountable,david-mcmahon,1
|
||||
hostDir should give a volume the correct mode,bgrant0607,1
|
||||
hostDir should support r/w on tmpfs,erictune,0
|
||||
hostPath should give a volume the correct mode,roberthbailey,1
|
||||
hostPath should support r/w,roberthbailey,1
|
||||
hostPath should support subPath,krousey,1
|
||||
k8s.io/kubernetes/cluster/addons/dns/kube2sky,zmerlynn,1
|
||||
k8s.io/kubernetes/cmd/genutils,rmmh,1
|
||||
k8s.io/kubernetes/cmd/hyperkube,jbeda,0
|
||||
|
@ -153,7 +440,7 @@ k8s.io/kubernetes/cmd/mungedocs,mwielgus,1
|
|||
k8s.io/kubernetes/contrib/mesos/cmd/km,brendandburns,1
|
||||
k8s.io/kubernetes/contrib/mesos/pkg/archive,kargakis,1
|
||||
k8s.io/kubernetes/contrib/mesos/pkg/election,vulpecula,1
|
||||
k8s.io/kubernetes/contrib/mesos/pkg/executor,mhrgoog,1
|
||||
k8s.io/kubernetes/contrib/mesos/pkg/executor,brendandburns,1
|
||||
k8s.io/kubernetes/contrib/mesos/pkg/minion/tasks,hurf,1
|
||||
k8s.io/kubernetes/contrib/mesos/pkg/node,jdef,1
|
||||
k8s.io/kubernetes/contrib/mesos/pkg/offers,david-mcmahon,1
|
||||
|
@ -169,7 +456,7 @@ k8s.io/kubernetes/contrib/mesos/pkg/scheduler/config,dchen1107,1
|
|||
k8s.io/kubernetes/contrib/mesos/pkg/scheduler/constraint,dchen1107,1
|
||||
k8s.io/kubernetes/contrib/mesos/pkg/scheduler/executorinfo,luxas,1
|
||||
k8s.io/kubernetes/contrib/mesos/pkg/scheduler/integration,yifan-gu,1
|
||||
k8s.io/kubernetes/contrib/mesos/pkg/scheduler/podtask,mhrgoog,1
|
||||
k8s.io/kubernetes/contrib/mesos/pkg/scheduler/podtask,dchen1107,1
|
||||
k8s.io/kubernetes/contrib/mesos/pkg/scheduler/podtask/hostport,mml,1
|
||||
k8s.io/kubernetes/contrib/mesos/pkg/scheduler/resources,ixdy,1
|
||||
k8s.io/kubernetes/contrib/mesos/pkg/scheduler/service,madhusudancs,1
|
||||
|
@ -180,11 +467,14 @@ k8s.io/kubernetes/examples,Random-Liu,0
|
|||
k8s.io/kubernetes/examples/apiserver,nikhiljindal,0
|
||||
k8s.io/kubernetes/federation/apis/federation/install,nikhiljindal,0
|
||||
k8s.io/kubernetes/federation/apis/federation/validation,nikhiljindal,0
|
||||
k8s.io/kubernetes/federation/cmd/federated-apiserver/app,nikhiljindal,0
|
||||
k8s.io/kubernetes/federation/cmd/federation-apiserver/app,dchen1107,1
|
||||
k8s.io/kubernetes/federation/pkg/dnsprovider/providers/aws/route53,cjcullen,1
|
||||
k8s.io/kubernetes/federation/pkg/dnsprovider/providers/google/clouddns,jsafrane,1
|
||||
k8s.io/kubernetes/federation/pkg/federation-controller/cluster,nikhiljindal,0
|
||||
k8s.io/kubernetes/federation/pkg/federation-controller/service,pmorie,1
|
||||
k8s.io/kubernetes/federation/registry/cluster,nikhiljindal,0
|
||||
k8s.io/kubernetes/federation/registry/cluster/etcd,nikhiljindal,0
|
||||
k8s.io/kubernetes/hack/cmd/teststale,mhrgoog,1
|
||||
k8s.io/kubernetes/hack/cmd/teststale,thockin,1
|
||||
k8s.io/kubernetes/pkg/admission,dchen1107,1
|
||||
k8s.io/kubernetes/pkg/api,Q-Lee,1
|
||||
k8s.io/kubernetes/pkg/api/endpoints,swagiaal,1
|
||||
|
@ -196,7 +486,7 @@ k8s.io/kubernetes/pkg/api/pod,piosz,1
|
|||
k8s.io/kubernetes/pkg/api/resource,smarterclayton,1
|
||||
k8s.io/kubernetes/pkg/api/rest,ixdy,1
|
||||
k8s.io/kubernetes/pkg/api/service,spxtr,1
|
||||
k8s.io/kubernetes/pkg/api/testapi,wonderfly,1
|
||||
k8s.io/kubernetes/pkg/api/testapi,caesarxuchao,1
|
||||
k8s.io/kubernetes/pkg/api/unversioned,kevin-wangzefeng,1
|
||||
k8s.io/kubernetes/pkg/api/unversioned/validation,brendandburns,1
|
||||
k8s.io/kubernetes/pkg/api/util,ghodss,1
|
||||
|
@ -208,6 +498,7 @@ k8s.io/kubernetes/pkg/apis/apps/validation,derekwaynecarr,1
|
|||
k8s.io/kubernetes/pkg/apis/authorization/validation,erictune,0
|
||||
k8s.io/kubernetes/pkg/apis/autoscaling/validation,Random-Liu,1
|
||||
k8s.io/kubernetes/pkg/apis/batch/validation,erictune,0
|
||||
k8s.io/kubernetes/pkg/apis/certificates/install,zmerlynn,1
|
||||
k8s.io/kubernetes/pkg/apis/componentconfig,jbeda,1
|
||||
k8s.io/kubernetes/pkg/apis/componentconfig/install,pmorie,1
|
||||
k8s.io/kubernetes/pkg/apis/extensions,jbeda,1
|
||||
|
@ -215,6 +506,7 @@ k8s.io/kubernetes/pkg/apis/extensions/install,thockin,1
|
|||
k8s.io/kubernetes/pkg/apis/extensions/v1beta1,madhusudancs,1
|
||||
k8s.io/kubernetes/pkg/apis/extensions/validation,Random-Liu,1
|
||||
k8s.io/kubernetes/pkg/apis/policy/validation,deads2k,1
|
||||
k8s.io/kubernetes/pkg/apis/rbac/install,ixdy,1
|
||||
k8s.io/kubernetes/pkg/apis/rbac/validation,erictune,0
|
||||
k8s.io/kubernetes/pkg/apiserver,nikhiljindal,0
|
||||
k8s.io/kubernetes/pkg/auth/authenticator/bearertoken,liggitt,0
|
||||
|
@ -226,13 +518,14 @@ k8s.io/kubernetes/pkg/client/chaosclient,a-robinson,1
|
|||
k8s.io/kubernetes/pkg/client/leaderelection,xiang90,1
|
||||
k8s.io/kubernetes/pkg/client/record,karlkfi,1
|
||||
k8s.io/kubernetes/pkg/client/restclient,kargakis,1
|
||||
k8s.io/kubernetes/pkg/client/testing/core,maisem,1
|
||||
k8s.io/kubernetes/pkg/client/transport,eparis,1
|
||||
k8s.io/kubernetes/pkg/client/typed/discovery,ihmccreery,1
|
||||
k8s.io/kubernetes/pkg/client/typed/discovery,fabioy,1
|
||||
k8s.io/kubernetes/pkg/client/typed/dynamic,luxas,1
|
||||
k8s.io/kubernetes/pkg/client/unversioned,justinsb,1
|
||||
k8s.io/kubernetes/pkg/client/unversioned/auth,swagiaal,1
|
||||
k8s.io/kubernetes/pkg/client/unversioned/clientcmd,yifan-gu,1
|
||||
k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api,mhrgoog,1
|
||||
k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api,thockin,1
|
||||
k8s.io/kubernetes/pkg/client/unversioned/portforward,lavalamp,1
|
||||
k8s.io/kubernetes/pkg/client/unversioned/remotecommand,andyzheng0831,1
|
||||
k8s.io/kubernetes/pkg/client/unversioned/testclient,brendandburns,1
|
||||
|
@ -248,23 +541,30 @@ k8s.io/kubernetes/pkg/controller,mikedanese,1
|
|||
k8s.io/kubernetes/pkg/controller/daemon,Q-Lee,1
|
||||
k8s.io/kubernetes/pkg/controller/deployment,asalkeld,0
|
||||
k8s.io/kubernetes/pkg/controller/endpoint,Random-Liu,1
|
||||
k8s.io/kubernetes/pkg/controller/framework,mhrgoog,1
|
||||
k8s.io/kubernetes/pkg/controller/framework,smarterclayton,1
|
||||
k8s.io/kubernetes/pkg/controller/garbagecollector,rmmh,1
|
||||
k8s.io/kubernetes/pkg/controller/gc,jdef,1
|
||||
k8s.io/kubernetes/pkg/controller/job,soltysh,0
|
||||
k8s.io/kubernetes/pkg/controller/namespace,ihmccreery,1
|
||||
k8s.io/kubernetes/pkg/controller/namespace,karlkfi,1
|
||||
k8s.io/kubernetes/pkg/controller/node,gmarek,0
|
||||
k8s.io/kubernetes/pkg/controller/persistentvolume,jsafrane,0
|
||||
k8s.io/kubernetes/pkg/controller/petset,david-mcmahon,1
|
||||
k8s.io/kubernetes/pkg/controller/podautoscaler,piosz,0
|
||||
k8s.io/kubernetes/pkg/controller/podautoscaler/metrics,piosz,0
|
||||
k8s.io/kubernetes/pkg/controller/podgc,jdef,1
|
||||
k8s.io/kubernetes/pkg/controller/replicaset,fgrzadkowski,0
|
||||
k8s.io/kubernetes/pkg/controller/replication,fgrzadkowski,0
|
||||
k8s.io/kubernetes/pkg/controller/resourcequota,ghodss,1
|
||||
k8s.io/kubernetes/pkg/controller/route,gmarek,0
|
||||
k8s.io/kubernetes/pkg/controller/service,asalkeld,0
|
||||
k8s.io/kubernetes/pkg/controller/serviceaccount,liggitt,0
|
||||
k8s.io/kubernetes/pkg/controller/volume,zmerlynn,1
|
||||
k8s.io/kubernetes/pkg/controller/volume/attachdetach,luxas,1
|
||||
k8s.io/kubernetes/pkg/controller/volume/attachdetach/cache,hurf,1
|
||||
k8s.io/kubernetes/pkg/controller/volume/attachdetach/reconciler,jsafrane,1
|
||||
k8s.io/kubernetes/pkg/controller/volume/cache,saad-ali,0
|
||||
k8s.io/kubernetes/pkg/controller/volume/persistentvolume,apelisse,1
|
||||
k8s.io/kubernetes/pkg/controller/volume/reconciler,xiang90,1
|
||||
k8s.io/kubernetes/pkg/conversion,swagiaal,1
|
||||
k8s.io/kubernetes/pkg/conversion/queryparams,caesarxuchao,1
|
||||
k8s.io/kubernetes/pkg/credentialprovider,justinsb,1
|
||||
|
@ -276,8 +576,8 @@ k8s.io/kubernetes/pkg/fields,jsafrane,1
|
|||
k8s.io/kubernetes/pkg/genericapiserver,nikhiljindal,0
|
||||
k8s.io/kubernetes/pkg/healthz,thockin,1
|
||||
k8s.io/kubernetes/pkg/httplog,mtaufen,1
|
||||
k8s.io/kubernetes/pkg/kubectl,ihmccreery,1
|
||||
k8s.io/kubernetes/pkg/kubectl/cmd,wonderfly,1
|
||||
k8s.io/kubernetes/pkg/kubectl,madhusudancs,1
|
||||
k8s.io/kubernetes/pkg/kubectl/cmd,rmmh,1
|
||||
k8s.io/kubernetes/pkg/kubectl/cmd/config,asalkeld,0
|
||||
k8s.io/kubernetes/pkg/kubectl/cmd/util,asalkeld,0
|
||||
k8s.io/kubernetes/pkg/kubectl/cmd/util/editor,jdef,1
|
||||
|
@ -297,6 +597,7 @@ k8s.io/kubernetes/pkg/kubelet/network,freehan,0
|
|||
k8s.io/kubernetes/pkg/kubelet/network/cni,freehan,0
|
||||
k8s.io/kubernetes/pkg/kubelet/network/exec,freehan,0
|
||||
k8s.io/kubernetes/pkg/kubelet/network/hairpin,freehan,0
|
||||
k8s.io/kubernetes/pkg/kubelet/network/hostport,erictune,1
|
||||
k8s.io/kubernetes/pkg/kubelet/network/kubenet,freehan,0
|
||||
k8s.io/kubernetes/pkg/kubelet/pleg,yujuhong,0
|
||||
k8s.io/kubernetes/pkg/kubelet/pod,alex-mohr,1
|
||||
|
@ -312,24 +613,29 @@ k8s.io/kubernetes/pkg/kubelet/types,jlowdermilk,1
|
|||
k8s.io/kubernetes/pkg/kubelet/util/cache,timothysc,1
|
||||
k8s.io/kubernetes/pkg/kubelet/util/format,a-robinson,1
|
||||
k8s.io/kubernetes/pkg/kubelet/util/queue,yujuhong,0
|
||||
k8s.io/kubernetes/pkg/kubelet/volume/cache,swagiaal,1
|
||||
k8s.io/kubernetes/pkg/kubelet/volume/reconciler,jdef,1
|
||||
k8s.io/kubernetes/pkg/kubelet/volumemanager/cache,swagiaal,1
|
||||
k8s.io/kubernetes/pkg/kubelet/volumemanager/reconciler,timstclair,1
|
||||
k8s.io/kubernetes/pkg/labels,ixdy,1
|
||||
k8s.io/kubernetes/pkg/master,fabioy,1
|
||||
k8s.io/kubernetes/pkg/probe/exec,bgrant0607,1
|
||||
k8s.io/kubernetes/pkg/probe/http,david-mcmahon,1
|
||||
k8s.io/kubernetes/pkg/probe/tcp,mtaufen,1
|
||||
k8s.io/kubernetes/pkg/proxy/config,wonderfly,1
|
||||
k8s.io/kubernetes/pkg/proxy/config,ixdy,1
|
||||
k8s.io/kubernetes/pkg/proxy/iptables,freehan,0
|
||||
k8s.io/kubernetes/pkg/proxy/userspace,luxas,1
|
||||
k8s.io/kubernetes/pkg/quota,ihmccreery,1
|
||||
k8s.io/kubernetes/pkg/quota,sttts,1
|
||||
k8s.io/kubernetes/pkg/quota/evaluator/core,yifan-gu,1
|
||||
k8s.io/kubernetes/pkg/registry/componentstatus,yifan-gu,1
|
||||
k8s.io/kubernetes/pkg/registry/configmap,timothysc,1
|
||||
k8s.io/kubernetes/pkg/registry/configmap/etcd,ihmccreery,1
|
||||
k8s.io/kubernetes/pkg/registry/configmap/etcd,janetkuo,1
|
||||
k8s.io/kubernetes/pkg/registry/controller,jdef,1
|
||||
k8s.io/kubernetes/pkg/registry/controller/etcd,spxtr,1
|
||||
k8s.io/kubernetes/pkg/registry/daemonset,a-robinson,1
|
||||
k8s.io/kubernetes/pkg/registry/daemonset/etcd,pmorie,1
|
||||
k8s.io/kubernetes/pkg/registry/deployment,timothysc,1
|
||||
k8s.io/kubernetes/pkg/registry/deployment/etcd,ihmccreery,1
|
||||
k8s.io/kubernetes/pkg/registry/deployment/etcd,swagiaal,1
|
||||
k8s.io/kubernetes/pkg/registry/endpoint,smarterclayton,1
|
||||
k8s.io/kubernetes/pkg/registry/endpoint/etcd,a-robinson,1
|
||||
k8s.io/kubernetes/pkg/registry/event,girishkalele,1
|
||||
|
@ -349,6 +655,8 @@ k8s.io/kubernetes/pkg/registry/limitrange,apelisse,1
|
|||
k8s.io/kubernetes/pkg/registry/limitrange/etcd,zmerlynn,1
|
||||
k8s.io/kubernetes/pkg/registry/namespace,roberthbailey,1
|
||||
k8s.io/kubernetes/pkg/registry/namespace/etcd,justinsb,1
|
||||
k8s.io/kubernetes/pkg/registry/networkpolicy,david-mcmahon,1
|
||||
k8s.io/kubernetes/pkg/registry/networkpolicy/etcd,xiang90,1
|
||||
k8s.io/kubernetes/pkg/registry/node,krousey,1
|
||||
k8s.io/kubernetes/pkg/registry/node/etcd,kevin-wangzefeng,1
|
||||
k8s.io/kubernetes/pkg/registry/persistentvolume,kevin-wangzefeng,1
|
||||
|
@ -403,7 +711,7 @@ k8s.io/kubernetes/pkg/securitycontext,ArtfulCoder,1
|
|||
k8s.io/kubernetes/pkg/serviceaccount,liggitt,0
|
||||
k8s.io/kubernetes/pkg/ssh,jbeda,1
|
||||
k8s.io/kubernetes/pkg/storage,xiang90,0
|
||||
k8s.io/kubernetes/pkg/storage/etcd,mhrgoog,1
|
||||
k8s.io/kubernetes/pkg/storage/etcd,eparis,1
|
||||
k8s.io/kubernetes/pkg/storage/etcd/util,xiang90,0
|
||||
k8s.io/kubernetes/pkg/storage/etcd3,xiang90,0
|
||||
k8s.io/kubernetes/pkg/util,jbeda,1
|
||||
|
@ -414,6 +722,7 @@ k8s.io/kubernetes/pkg/util/config,girishkalele,1
|
|||
k8s.io/kubernetes/pkg/util/configz,ixdy,1
|
||||
k8s.io/kubernetes/pkg/util/dbus,roberthbailey,1
|
||||
k8s.io/kubernetes/pkg/util/deployment,asalkeld,0
|
||||
k8s.io/kubernetes/pkg/util/diff,piosz,1
|
||||
k8s.io/kubernetes/pkg/util/env,asalkeld,0
|
||||
k8s.io/kubernetes/pkg/util/errors,a-robinson,1
|
||||
k8s.io/kubernetes/pkg/util/exec,krousey,1
|
||||
|
@ -468,7 +777,7 @@ k8s.io/kubernetes/pkg/volume/flexvolume,Q-Lee,1
|
|||
k8s.io/kubernetes/pkg/volume/flocker,jbeda,1
|
||||
k8s.io/kubernetes/pkg/volume/gce_pd,saad-ali,0
|
||||
k8s.io/kubernetes/pkg/volume/git_repo,davidopp,1
|
||||
k8s.io/kubernetes/pkg/volume/glusterfs,ihmccreery,1
|
||||
k8s.io/kubernetes/pkg/volume/glusterfs,timstclair,1
|
||||
k8s.io/kubernetes/pkg/volume/host_path,jbeda,1
|
||||
k8s.io/kubernetes/pkg/volume/iscsi,cjcullen,1
|
||||
k8s.io/kubernetes/pkg/volume/nfs,justinsb,1
|
||||
|
@ -504,6 +813,7 @@ k8s.io/kubernetes/plugin/pkg/auth/authenticator/request/x509,liggitt,0
|
|||
k8s.io/kubernetes/plugin/pkg/auth/authenticator/token/oidc,brendandburns,1
|
||||
k8s.io/kubernetes/plugin/pkg/auth/authenticator/token/tokenfile,liggitt,0
|
||||
k8s.io/kubernetes/plugin/pkg/auth/authenticator/token/webhook,ghodss,1
|
||||
k8s.io/kubernetes/plugin/pkg/auth/authorizer/rbac,hurf,1
|
||||
k8s.io/kubernetes/plugin/pkg/auth/authorizer/webhook,hurf,1
|
||||
k8s.io/kubernetes/plugin/pkg/client/auth/oidc,cjcullen,1
|
||||
k8s.io/kubernetes/plugin/pkg/scheduler,fgrzadkowski,0
|
||||
|
@ -516,7 +826,6 @@ k8s.io/kubernetes/plugin/pkg/scheduler/api/validation,fgrzadkowski,0
|
|||
k8s.io/kubernetes/plugin/pkg/scheduler/factory,fgrzadkowski,0
|
||||
k8s.io/kubernetes/plugin/pkg/scheduler/schedulercache,fgrzadkowski,0
|
||||
k8s.io/kubernetes/test/integration,lavalamp,1
|
||||
k8s.io/kubernetes/test/integration TestServiceAccountAutoCreate,swagiaal,0
|
||||
k8s.io/kubernetes/third_party/forked/reflect,yifan-gu,1
|
||||
k8s.io/kubernetes/third_party/golang/expansion,dchen1107,1
|
||||
k8s.io/kubernetes/third_party/golang/go/ast,mml,1
|
||||
|
@ -527,237 +836,6 @@ k8s.io/kubernetes/third_party/golang/go/parser,davidopp,1
|
|||
k8s.io/kubernetes/third_party/golang/go/printer,madhusudancs,1
|
||||
k8s.io/kubernetes/third_party/golang/go/scanner,hurf,1
|
||||
k8s.io/kubernetes/third_party/golang/go/token,mml,1
|
||||
Kibana Logging Instances Is Alive should check that the Kibana logging instance is alive,swagiaal,0
|
||||
kube-ui should check that the kube-ui instance is alive,fabioy,1
|
||||
Kubectl client Guestbook application should create and stop a working application,pwittrock,0
|
||||
Kubectl client Kubectl api-versions should check if v1 is in available api versions,pwittrock,0
|
||||
Kubectl client Kubectl apply should apply a new configuration to an existing RC,pwittrock,0
|
||||
Kubectl client Kubectl apply should reuse nodePort when apply to an existing SVC,pwittrock,0
|
||||
Kubectl client Kubectl cluster-info should check if Kubernetes master services is included in cluster-info,pwittrock,0
|
||||
Kubectl client Kubectl describe should check if kubectl describe prints relevant information for rc and pods,pwittrock,0
|
||||
Kubectl client Kubectl expose should create services for rc,pwittrock,0
|
||||
Kubectl client Kubectl label should update the label on a resource,pwittrock,0
|
||||
Kubectl client Kubectl logs should be able to retrieve and filter logs,jlowdermilk,0
|
||||
Kubectl client Kubectl patch should add annotations for pods in rc,janetkuo,0
|
||||
Kubectl client Kubectl rolling-update should support rolling-update to same image,janetkuo,0
|
||||
"Kubectl client Kubectl run --rm job should create a job from an image, then delete the job",soltysh,0
|
||||
Kubectl client Kubectl run default should create an rc or deployment from an image,janetkuo,0
|
||||
Kubectl client Kubectl run deployment should create a deployment from an image,janetkuo,0
|
||||
Kubectl client Kubectl run job should create a job from an image when restart is Never,soltysh,0
|
||||
Kubectl client Kubectl run job should create a job from an image when restart is OnFailure,soltysh,0
|
||||
Kubectl client Kubectl run pod should create a pod from an image when restart is Never,janetkuo,0
|
||||
Kubectl client Kubectl run pod should create a pod from an image when restart is OnFailure,janetkuo,0
|
||||
Kubectl client Kubectl run rc should create an rc from an image,janetkuo,0
|
||||
Kubectl client Kubectl taint should update the taint on a node,pwittrock,0
|
||||
Kubectl client Kubectl version should check is all data is printed,janetkuo,0
|
||||
Kubectl client Proxy server should support --unix-socket=/path,zmerlynn,1
|
||||
Kubectl client Proxy server should support proxy with --port 0,ncdc,1
|
||||
Kubectl client Simple pod should support exec,ncdc,0
|
||||
Kubectl client Simple pod should support exec through an HTTP proxy,ncdc,0
|
||||
Kubectl client Simple pod should support inline execution and attach,ncdc,0
|
||||
Kubectl client Simple pod should support port-forward,ncdc,0
|
||||
Kubectl client Update Demo should create and stop a replication controller,sttts,0
|
||||
Kubectl client Update Demo should do a rolling update of a replication controller,sttts,0
|
||||
Kubectl client Update Demo should scale a replication controller,sttts,0
|
||||
kubelet Clean up pods on node kubelet should be able to delete 10 pods per node in 1m0s.,bgrant0607,1
|
||||
Kubelet experimental resource usage tracking for 100 pods per node over 20m0s,yujuhong,0
|
||||
Kubelet experimental resource usage tracking over 30m0s with 50 pods per node.,yujuhong,0
|
||||
Kubelet experimental resource usage tracking resource tracking for 100 pods per node,ghodss,0
|
||||
Kubelet regular resource usage tracking for 0 pods per node over 20m0s,yujuhong,0
|
||||
Kubelet regular resource usage tracking for 100 pods per node over 20m0s,yujuhong,0
|
||||
Kubelet regular resource usage tracking for 35 pods per node over 20m0s,yujuhong,0
|
||||
Kubelet regular resource usage tracking over 30m0s with 0 pods per node.,yujuhong,0
|
||||
Kubelet regular resource usage tracking over 30m0s with 35 pods per node.,yujuhong,0
|
||||
Kubelet regular resource usage tracking resource tracking for 0 pods per node,pmorie,1
|
||||
Kubelet regular resource usage tracking resource tracking for 100 pods per node,justinsb,1
|
||||
Kubelet regular resource usage tracking resource tracking for 35 pods per node,madhusudancs,1
|
||||
KubeletManagedEtcHosts should test kubelet managed /etc/hosts file,ArtfulCoder,1
|
||||
KubeProxy should test kube-proxy,vulpecula,1
|
||||
Kubernetes Dashboard should check that the kubernetes-dashboard instance is alive,wonderfly,0
|
||||
LimitRange should create a LimitRange with defaults and ensure pod has those defaults applied.,cjcullen,1
|
||||
Liveness liveness pods should be automatically restarted,andyzheng0831,1
|
||||
Load capacity should be able to handle 3 pods per node,gmarek,0
|
||||
Load capacity should be able to handle 30 pods per node,wojtek-t,0
|
||||
MasterCerts should have all expected certs on the master,apelisse,1
|
||||
MaxPods Validates MaxPods limit number of pods that are allowed to run.,gmarek,0
|
||||
Mesos applies slave attributes as labels,justinsb,1
|
||||
Mesos schedules pods annotated with roles on correct slaves,timstclair,1
|
||||
Mesos starts static pods on every node in the mesos cluster,lavalamp,1
|
||||
MetricsGrabber should grab all metrics from a ControllerManager.,gmarek,0
|
||||
MetricsGrabber should grab all metrics from a Kubelet.,gmarek,0
|
||||
MetricsGrabber should grab all metrics from a Scheduler.,gmarek,0
|
||||
MetricsGrabber should grab all metrics from API server.,gmarek,0
|
||||
Monitoring should verify monitoring pods and all cluster nodes are available on influxdb using heapster.,piosz,0
|
||||
Namespaces Delete 90 percent of 100 namespace in 150 seconds,caesarxuchao,1
|
||||
Namespaces Delete ALL of 100 namespace in 150 seconds,luxas,1
|
||||
Namespaces should always delete fast (ALL of 100 namespaces in 150 seconds),rmmh,1
|
||||
Namespaces should delete fast enough (90 percent of 100 namespaces in 150 seconds),kevin-wangzefeng,1
|
||||
Namespaces should ensure that all pods are removed when a namespace is deleted.,xiang90,1
|
||||
Namespaces should ensure that all services are removed when a namespace is deleted.,pmorie,1
|
||||
Networking Granular Checks should function for pod communication between nodes,freehan,0
|
||||
Networking Granular Checks should function for pod communication on a single node,freehan,0
|
||||
Networking IPerf should transfer ~ 1GB onto the service endpoint 1 servers (maximum of 1 clients),ghodss,1
|
||||
Networking should function for intra-pod communication,sttts,0
|
||||
Networking should provide Internet connection for containers,sttts,0
|
||||
"Networking should provide unchanging, static URL paths for kubernetes api services",freehan,0
|
||||
"Networking should provide unchanging, static URL paths for kubernetes api services.",ihmccreery,1
|
||||
NodeOutOfDisk runs out of disk space,vishh,0
|
||||
NodeProblemDetector KernelMonitor should generate node condition and events for corresponding errors ,Random-Liu,0
|
||||
Nodes Network when a minion node becomes unreachable recreates pods scheduled on the unreachable minion node AND allows scheduling of pods on a minion after it rejoins the cluster,childsb,1
|
||||
Nodes Network when a node becomes unreachable All pods on the unreachable node should be marked as NotReady upon the node turn NotReady AND all pods should be mark back to Ready when the node get back to Ready before pod eviction timeout,freehan,0
|
||||
Nodes Network when a node becomes unreachable recreates pods scheduled on the unreachable node AND allows scheduling of pods on a node after it rejoins the cluster,madhusudancs,1
|
||||
Nodes Resize should be able to add nodes,piosz,1
|
||||
Nodes Resize should be able to delete nodes,zmerlynn,1
|
||||
"PersistentVolumes NFS volume can be created, bound, retrieved, unbound, and used by a pod",jsafrane,0
|
||||
kubelet Clean up pods on node kubelet should be able to delete 10 pods per node in 1m0s.,yujuhong,0
|
||||
persistentVolumes PersistentVolume,yifan-gu,1
|
||||
Pet Store should scale to persist a nominal number ( 50 ) of transactions in 1m0s seconds,timstclair,1
|
||||
PetSet provide basic identity,david-mcmahon,1
|
||||
PetSet should handle healthy pet restarts during scale,ixdy,1
|
||||
"Pod Disks should schedule a pod w/ a readonly PD on two hosts, then remove both.",saad-ali,0
|
||||
"Pod Disks should schedule a pod w/ a RW PD shared between multiple containers, write to PD, delete pod, verify contents, and repeat in rapid succession",saad-ali,0
|
||||
"Pod Disks should schedule a pod w/ a RW PD, remove it, then schedule it on another host",saad-ali,0
|
||||
"Pod Disks should schedule a pod w/two RW PDs both mounted to one container, write to PD, verify contents, delete pod, recreate pod, verify contents, and repeat in rapid succession",saad-ali,0
|
||||
Pods should *not* be restarted with a /healthz http liveness probe,cjcullen,1
|
||||
"Pods should *not* be restarted with a docker exec ""cat /tmp/health"" liveness probe",vishh,0
|
||||
Pods should allow activeDeadlineSeconds to be updated,derekwaynecarr,0
|
||||
Pods should be restarted with a /healthz http liveness probe,girishkalele,1
|
||||
"Pods should be restarted with a docker exec ""cat /tmp/health"" liveness probe",bgrant0607,1
|
||||
Pods should be schedule with cpu and memory limits,vishh,0
|
||||
Pods should be submitted and removed,wonderfly,1
|
||||
Pods should be updated,derekwaynecarr,1
|
||||
Pods should cap back-off at MaxContainerBackOff,maisem,1
|
||||
Pods should contain environment variables for services,jlowdermilk,1
|
||||
Pods should get a host IP,xiang90,1
|
||||
Pods should have monotonically increasing restart count,Random-Liu,1
|
||||
Pods should have their auto-restart back-off timer reset on image update,mikedanese,1
|
||||
Pods should invoke init containers on a RestartAlways pod,cjcullen,1
|
||||
Pods should invoke init containers on a RestartNever pod,justinsb,1
|
||||
Pods should not start app containers and fail the pod if init containers fail on a RestartNever pod,maisem,0
|
||||
Pods should not start app containers if init containers fail on a RestartAlways pod,david-mcmahon,1
|
||||
Pods should support remote command execution over websockets,madhusudancs,1
|
||||
Pods should support retrieving logs from the container over websockets,vishh,0
|
||||
"Port forwarding With a server that expects a client request should support a client that connects, sends data, and disconnects",sttts,0
|
||||
"Port forwarding With a server that expects a client request should support a client that connects, sends no data, and disconnects",sttts,0
|
||||
"Port forwarding With a server that expects no client request should support a client that connects, sends no data, and disconnects",sttts,0
|
||||
PreStop should call prestop when killing a pod,ncdc,1
|
||||
PrivilegedPod should test privileged pod,vishh,0
|
||||
Probing container with readiness probe should not be ready before initial delay and never restart,smarterclayton,1
|
||||
Probing container with readiness probe that fails should never be ready and never restart,mtaufen,1
|
||||
Proxy version v1 should proxy logs on node,girishkalele,1
|
||||
Proxy version v1 should proxy logs on node using proxy subresource,karlkfi,1
|
||||
Proxy version v1 should proxy logs on node with explicit kubelet port,mwielgus,1
|
||||
Proxy version v1 should proxy logs on node with explicit kubelet port using proxy subresource,bgrant0607,1
|
||||
Proxy version v1 should proxy through a service and a pod,mml,1
|
||||
Proxy version v1 should proxy to cadvisor,Random-Liu,1
|
||||
Proxy version v1 should proxy to cadvisor using proxy subresource,deads2k,1
|
||||
Reboot each node by dropping all inbound packets for a while and ensure they function afterwards,quinton-hoole,0
|
||||
Reboot each node by dropping all outbound packets for a while and ensure they function afterwards,quinton-hoole,0
|
||||
Reboot each node by ordering clean reboot and ensure they function upon restart,quinton-hoole,0
|
||||
Reboot each node by ordering unclean reboot and ensure they function upon restart,quinton-hoole,0
|
||||
Reboot each node by switching off the network interface and ensure they function upon switch on,quinton-hoole,0
|
||||
Reboot each node by triggering kernel panic and ensure they function upon restart,quinton-hoole,0
|
||||
Redis should create and stop redis servers,timstclair,1
|
||||
ReplicaSet should serve a basic image on each replica with a private image,pmorie,1
|
||||
ReplicaSet should serve a basic image on each replica with a public image,krousey,0
|
||||
ReplicationController should serve a basic image on each replica with a private image,jbeda,1
|
||||
ReplicationController should serve a basic image on each replica with a public image,a-robinson,1
|
||||
Resource usage of system containers should not exceed expected amount.,ncdc,1
|
||||
ResourceQuota should create a ResourceQuota and capture the life of a configMap.,mhrgoog,1
|
||||
ResourceQuota should create a ResourceQuota and capture the life of a loadBalancer service.,ncdc,1
|
||||
ResourceQuota should create a ResourceQuota and capture the life of a nodePort service updated to clusterIP.,ghodss,1
|
||||
ResourceQuota should create a ResourceQuota and capture the life of a nodePort service updated to loadBalancer.,jdef,1
|
||||
ResourceQuota should create a ResourceQuota and capture the life of a nodePort service.,thockin,1
|
||||
ResourceQuota should create a ResourceQuota and capture the life of a persistent volume claim.,bgrant0607,1
|
||||
ResourceQuota should create a ResourceQuota and capture the life of a pod.,pmorie,1
|
||||
ResourceQuota should create a ResourceQuota and capture the life of a replication controller.,ihmccreery,1
|
||||
ResourceQuota should create a ResourceQuota and capture the life of a secret.,ncdc,1
|
||||
ResourceQuota should create a ResourceQuota and capture the life of a service.,timstclair,1
|
||||
ResourceQuota should create a ResourceQuota and ensure its status is promptly calculated.,krousey,1
|
||||
ResourceQuota should verify ResourceQuota with best effort scope.,mml,1
|
||||
ResourceQuota should verify ResourceQuota with terminating scopes.,ncdc,1
|
||||
Restart should restart all nodes and ensure all nodes and pods recover,andyzheng0831,1
|
||||
RethinkDB should create and stop rethinkdb servers,kargakis,1
|
||||
"Scale should be able to launch 500 pods, 10 per minion, in 25 rcs/thread.",wonderfly,1
|
||||
"Scale should be able to launch 500 pods, 10 per minion, in 5 rcs/thread.",bgrant0607,1
|
||||
SchedulerPredicates validates MaxPods limit number of pods that are allowed to run,gmarek,0
|
||||
SchedulerPredicates validates resource limits of pods that are allowed to run,gmarek,0
|
||||
SchedulerPredicates validates that a pod with an invalid NodeAffinity is rejected,deads2k,1
|
||||
SchedulerPredicates validates that a pod with an invalid podAffinity is rejected because of the LabelSelectorRequirement is invalid,smarterclayton,1
|
||||
SchedulerPredicates validates that embedding the JSON NodeAffinity setting as a string in the annotation value work,yifan-gu,1
|
||||
SchedulerPredicates validates that embedding the JSON PodAffinity and PodAntiAffinity setting as a string in the annotation value work,hurf,1
|
||||
SchedulerPredicates validates that Inter-pod-Affinity is respected if not matching,hurf,1
|
||||
SchedulerPredicates validates that InterPod Affinity and AntiAffinity is respected if matching,girishkalele,1
|
||||
SchedulerPredicates validates that InterPodAffinity is respected if matching,kevin-wangzefeng,1
|
||||
SchedulerPredicates validates that InterPodAffinity is respected if matching with multiple Affinities,caesarxuchao,1
|
||||
SchedulerPredicates validates that InterPodAntiAffinity is respected if matching 2,sttts,0
|
||||
SchedulerPredicates validates that NodeAffinity is respected if not matching,fgrzadkowski,0
|
||||
SchedulerPredicates validates that NodeSelector is respected,fgrzadkowski,0
|
||||
SchedulerPredicates validates that NodeSelector is respected if matching,gmarek,0
|
||||
SchedulerPredicates validates that NodeSelector is respected if not matching,gmarek,0
|
||||
SchedulerPredicates validates that required NodeAffinity setting is respected if matching,ArtfulCoder,1
|
||||
SchedulerPredicates validates that taints-tolerations is respected if matching,jlowdermilk,1
|
||||
SchedulerPredicates validates that taints-tolerations is respected if not matching,derekwaynecarr,1
|
||||
Secret should create a pod that reads a secret,luxas,1
|
||||
Secrets should be consumable from pods,Random-Liu,1
|
||||
Secrets should be consumable from pods in env vars,ArtfulCoder,1
|
||||
Secrets should be consumable from pods in volume,ghodss,1
|
||||
Security Context should support container.SecurityContext.RunAsUser,alex-mohr,1
|
||||
Security Context should support pod.Spec.SecurityContext.RunAsUser,bgrant0607,1
|
||||
Security Context should support pod.Spec.SecurityContext.SupplementalGroups,andyzheng0831,1
|
||||
Security Context should support volume SELinux relabeling,wonderfly,1
|
||||
Security Context should support volume SELinux relabeling when using hostIPC,alex-mohr,1
|
||||
Security Context should support volume SELinux relabeling when using hostPID,dchen1107,1
|
||||
Service endpoints latency should not be very high,cjcullen,1
|
||||
ServiceAccounts should ensure a single API token exists,liggitt,0
|
||||
ServiceAccounts should mount an API token into pods,liggitt,0
|
||||
ServiceLoadBalancer should support simple GET on Ingress ips,bprashanth,0
|
||||
Services should be able to change the type and nodeport settings of a service,bprashanth,0
|
||||
Services should be able to change the type and ports of a service,bprashanth,0
|
||||
Services should be able to create a functioning external load balancer,bprashanth,0
|
||||
Services should be able to create a functioning NodePort service,bprashanth,0
|
||||
Services should be able to up and down services,bprashanth,0
|
||||
Services should check NodePort out-of-range,bprashanth,0
|
||||
Services should correctly serve identically named services in different namespaces on different external IP addresses,bprashanth,0
|
||||
Services should create endpoints for unready pods,maisem,0
|
||||
Services should prevent NodePort collisions,bprashanth,0
|
||||
Services should provide secure master service,bprashanth,0
|
||||
Services should release NodePorts on delete,bprashanth,0
|
||||
Services should release the load balancer when Type goes from LoadBalancer -> NodePort,karlkfi,1
|
||||
Services should serve a basic endpoint from pods,bprashanth,0
|
||||
Services should serve identically named services in different namespaces on different load-balancers,davidopp,1
|
||||
Services should serve multiport endpoints from pods,bprashanth,0
|
||||
Services should work after restarting apiserver,bprashanth,0
|
||||
Services should work after restarting kube-proxy,bprashanth,0
|
||||
Shell should pass tests for services.sh,mtaufen,1
|
||||
Skipped Cluster upgrade kube-push of master should maintain responsive services,a-robinson,1
|
||||
Skipped Cluster upgrade upgrade-cluster should maintain a functioning cluster,smarterclayton,1
|
||||
Skipped Cluster upgrade upgrade-master should maintain responsive services,vulpecula,1
|
||||
"Spark should start spark master, driver and workers",girishkalele,1
|
||||
SSH should SSH to all nodes and run commands,quinton-hoole,0
|
||||
"Storm should create and stop Zookeeper, Nimbus and Storm worker servers",swagiaal,1
|
||||
ThirdParty resources Simple Third Party creating/deleting thirdparty objects works,luxas,1
|
||||
Ubernetes Lite should spread the pods of a replication controller across zones,quinton-hoole,0
|
||||
Ubernetes Lite should spread the pods of a service across zones,quinton-hoole,0
|
||||
Upgrade cluster upgrade should maintain a functioning cluster,girishkalele,1
|
||||
Upgrade cluster upgrade should maintain responsive services,david-mcmahon,1
|
||||
Upgrade master upgrade should maintain responsive services,mikedanese,1
|
||||
Upgrade node upgrade should maintain a functioning cluster,zmerlynn,1
|
||||
Upgrade node upgrade should maintain responsive services,childsb,1
|
||||
V1Job should delete a job,soltysh,0
|
||||
V1Job should fail a job,soltysh,0
|
||||
V1Job should keep restarting failed pods,soltysh,0
|
||||
V1Job should run a job to completion when tasks sometimes fail and are locally restarted,soltysh,0
|
||||
V1Job should run a job to completion when tasks sometimes fail and are not locally restarted,soltysh,0
|
||||
V1Job should run a job to completion when tasks succeed,soltysh,0
|
||||
V1Job should scale a job down,soltysh,0
|
||||
V1Job should scale a job up,soltysh,0
|
||||
Variable Expansion should allow composing env vars into new env vars,ghodss,1
|
||||
Variable Expansion should allow substituting values in a container's args,dchen1107,1
|
||||
Variable Expansion should allow substituting values in a container's command,mml,1
|
||||
Volumes Ceph RBD should be mountable,fabioy,1
|
||||
Volumes CephFS should be mountable,Q-Lee,1
|
||||
Volumes Cinder should be mountable,cjcullen,1
|
||||
Volumes GlusterFS should be mountable,eparis,1
|
||||
Volumes iSCSI should be mountable,david-mcmahon,1
|
||||
Volumes NFS should be mountable,andyzheng0831,1
|
||||
Volumes PD should be mountable,caesarxuchao,1
|
||||
|
|
|
Loading…
Reference in New Issue