Merge pull request #13020 from eparis/enforce-flag-checker

Make verify-flags-underscore.py return error on failure
pull/6/head
Marek Grabowski 2015-08-25 09:16:31 +02:00
commit 74cbb4da2e
3 changed files with 9 additions and 3 deletions

View File

@ -330,7 +330,7 @@ Now, we can visit the running WordPress app.
Use the external IP that you obtained above, and visit it on port 80: Use the external IP that you obtained above, and visit it on port 80:
``` ```
http://<external_ip> http://<external-ip>
``` ```
You should see the familiar WordPress init page. You should see the familiar WordPress init page.

View File

@ -236,6 +236,7 @@ def main():
bad_lines.sort() bad_lines.sort()
for (relname, line) in bad_lines: for (relname, line) in bad_lines:
print("%s:%s" % (relname, line)) print("%s:%s" % (relname, line))
return 1
if __name__ == "__main__": if __name__ == "__main__":
sys.exit(main()) sys.exit(main())

View File

@ -1,4 +1,6 @@
cluster/addons/cluster-monitoring/README.md:Heapster enables monitoring of Kubernetes Clusters using [cAdvisor](https://github.com/google/cadvisor). The kubelet will communicate with an instance of cAdvisor running on localhost and proxy container stats to Heapster. Kubelet will attempt to connect to cAdvisor on port 4194 by default but this port can be configured with kubelet's `-cadvisor_port` run flag. Detailed information about heapster can be found [here](https://github.com/GoogleCloudPlatform/heapster). cluster/addons/cluster-monitoring/README.md:Heapster enables monitoring of Kubernetes Clusters using [cAdvisor](https://github.com/google/cadvisor). The kubelet will communicate with an instance of cAdvisor running on localhost and proxy container stats to Heapster. Kubelet will attempt to connect to cAdvisor on port 4194 by default but this port can be configured with kubelet's `-cadvisor_port` run flag. Detailed information about heapster can be found [here](https://github.com/GoogleCloudPlatform/heapster).
cluster/addons/registry/images/Dockerfile:ADD run_proxy.sh /usr/bin/run_proxy
cluster/addons/registry/images/Dockerfile:CMD ["/usr/bin/run_proxy"]
cluster/aws/templates/salt-minion.sh:# We set the hostname_override to the full EC2 private dns name cluster/aws/templates/salt-minion.sh:# We set the hostname_override to the full EC2 private dns name
cluster/gce/coreos/helper.sh:# cloud_config yaml file should be passed cluster/gce/coreos/helper.sh:# cloud_config yaml file should be passed
cluster/juju/charms/trusty/kubernetes-master/files/controller-manager.upstart.tmpl: --address=%(bind_address)s \ cluster/juju/charms/trusty/kubernetes-master/files/controller-manager.upstart.tmpl: --address=%(bind_address)s \
@ -32,8 +34,7 @@ cluster/juju/charms/trusty/kubernetes/hooks/network-relation-changed: api
cluster/juju/charms/trusty/kubernetes/hooks/network-relation-changed: api_servers = get_rel_hosts('api', rels, ('hostname', 'port')) cluster/juju/charms/trusty/kubernetes/hooks/network-relation-changed: api_servers = get_rel_hosts('api', rels, ('hostname', 'port'))
cluster/juju/charms/trusty/kubernetes/hooks/network-relation-changed: etcd_servers = get_rel_hosts('etcd', rels, ('hostname', 'port')) cluster/juju/charms/trusty/kubernetes/hooks/network-relation-changed: etcd_servers = get_rel_hosts('etcd', rels, ('hostname', 'port'))
cluster/juju/charms/trusty/kubernetes/hooks/network-relation-changed: for k in ('etcd_servers', 'kubeapi_server'): cluster/juju/charms/trusty/kubernetes/hooks/network-relation-changed: for k in ('etcd_servers', 'kubeapi_server'):
cluster/libvirt-coreos/util.sh: public_ip=$MASTER_IP cluster/mesos/docker/common/bin/util-ssl.sh: local cluster_domain="cluster.local"
cluster/libvirt-coreos/util.sh: public_ip=${MINION_IPS[$i]}
cluster/saltbase/salt/kube-addons/kube-addons.sh:# Create admission_control objects if defined before any other addon services. If the limits cluster/saltbase/salt/kube-addons/kube-addons.sh:# Create admission_control objects if defined before any other addon services. If the limits
cluster/saltbase/salt/kube-admission-controls/init.sls:{% if 'LimitRanger' in pillar.get('admission_control', '') %} cluster/saltbase/salt/kube-admission-controls/init.sls:{% if 'LimitRanger' in pillar.get('admission_control', '') %}
cluster/saltbase/salt/kube-apiserver/kube-apiserver.manifest:{% set params = address + " " + etcd_servers + " " + cloud_provider + " " + cloud_config + " " + runtime_config + " " + admission_control + " " + service_cluster_ip_range + " " + client_ca_file + " " + basic_auth_file + " " + min_request_timeout -%} cluster/saltbase/salt/kube-apiserver/kube-apiserver.manifest:{% set params = address + " " + etcd_servers + " " + cloud_provider + " " + cloud_config + " " + runtime_config + " " + admission_control + " " + service_cluster_ip_range + " " + client_ca_file + " " + basic_auth_file + " " + min_request_timeout -%}
@ -80,3 +81,7 @@ pkg/util/oom/oom_linux.go:// Writes 'value' to /proc/<pid>/oom_score_adj. PID =
test/e2e/es_cluster_logging.go: Failf("No cluster_name field in Elasticsearch response: %v", esResponse) test/e2e/es_cluster_logging.go: Failf("No cluster_name field in Elasticsearch response: %v", esResponse)
test/e2e/es_cluster_logging.go: // Check to see if have a cluster_name field. test/e2e/es_cluster_logging.go: // Check to see if have a cluster_name field.
test/e2e/es_cluster_logging.go: clusterName, ok := esResponse["cluster_name"] test/e2e/es_cluster_logging.go: clusterName, ok := esResponse["cluster_name"]
test/e2e/host_path.go: fmt.Sprintf("--file_content_in_loop=%v", filePath),
test/e2e/host_path.go: fmt.Sprintf("--retry_time=%d", retryDuration),
test/images/mount-tester/mt.go: flag.IntVar(&retryDuration, "retry_time", 180, "Retry time during the loop")
test/images/mount-tester/mt.go: flag.StringVar(&readFileContentInLoopPath, "file_content_in_loop", "", "Path to read the file content in loop from")