mirror of https://github.com/k3s-io/k3s
Make verify-flags-underscore.py return error on failure
We were running it in shippable and travis, but since it didn't return an error it wasn't actually catching things.pull/6/head
parent
a945785409
commit
03f4e52812
|
@ -330,7 +330,7 @@ Now, we can visit the running WordPress app.
|
|||
Use the external IP that you obtained above, and visit it on port 80:
|
||||
|
||||
```
|
||||
http://<external_ip>
|
||||
http://<external-ip>
|
||||
```
|
||||
|
||||
You should see the familiar WordPress init page.
|
||||
|
|
|
@ -236,6 +236,7 @@ def main():
|
|||
bad_lines.sort()
|
||||
for (relname, line) in bad_lines:
|
||||
print("%s:%s" % (relname, line))
|
||||
return 1
|
||||
|
||||
if __name__ == "__main__":
|
||||
sys.exit(main())
|
||||
|
|
|
@ -1,4 +1,6 @@
|
|||
cluster/addons/cluster-monitoring/README.md:Heapster enables monitoring of Kubernetes Clusters using [cAdvisor](https://github.com/google/cadvisor). The kubelet will communicate with an instance of cAdvisor running on localhost and proxy container stats to Heapster. Kubelet will attempt to connect to cAdvisor on port 4194 by default but this port can be configured with kubelet's `-cadvisor_port` run flag. Detailed information about heapster can be found [here](https://github.com/GoogleCloudPlatform/heapster).
|
||||
cluster/addons/registry/images/Dockerfile:ADD run_proxy.sh /usr/bin/run_proxy
|
||||
cluster/addons/registry/images/Dockerfile:CMD ["/usr/bin/run_proxy"]
|
||||
cluster/aws/templates/salt-minion.sh:# We set the hostname_override to the full EC2 private dns name
|
||||
cluster/gce/coreos/helper.sh:# cloud_config yaml file should be passed
|
||||
cluster/juju/charms/trusty/kubernetes-master/files/controller-manager.upstart.tmpl: --address=%(bind_address)s \
|
||||
|
@ -32,8 +34,7 @@ cluster/juju/charms/trusty/kubernetes/hooks/network-relation-changed: api
|
|||
cluster/juju/charms/trusty/kubernetes/hooks/network-relation-changed: api_servers = get_rel_hosts('api', rels, ('hostname', 'port'))
|
||||
cluster/juju/charms/trusty/kubernetes/hooks/network-relation-changed: etcd_servers = get_rel_hosts('etcd', rels, ('hostname', 'port'))
|
||||
cluster/juju/charms/trusty/kubernetes/hooks/network-relation-changed: for k in ('etcd_servers', 'kubeapi_server'):
|
||||
cluster/libvirt-coreos/util.sh: public_ip=$MASTER_IP
|
||||
cluster/libvirt-coreos/util.sh: public_ip=${MINION_IPS[$i]}
|
||||
cluster/mesos/docker/common/bin/util-ssl.sh: local cluster_domain="cluster.local"
|
||||
cluster/saltbase/salt/kube-addons/kube-addons.sh:# Create admission_control objects if defined before any other addon services. If the limits
|
||||
cluster/saltbase/salt/kube-admission-controls/init.sls:{% if 'LimitRanger' in pillar.get('admission_control', '') %}
|
||||
cluster/saltbase/salt/kube-apiserver/kube-apiserver.manifest:{% set params = address + " " + etcd_servers + " " + cloud_provider + " " + cloud_config + " " + runtime_config + " " + admission_control + " " + service_cluster_ip_range + " " + client_ca_file + " " + basic_auth_file + " " + min_request_timeout -%}
|
||||
|
@ -80,3 +81,7 @@ pkg/util/oom/oom_linux.go:// Writes 'value' to /proc/<pid>/oom_score_adj. PID =
|
|||
test/e2e/es_cluster_logging.go: Failf("No cluster_name field in Elasticsearch response: %v", esResponse)
|
||||
test/e2e/es_cluster_logging.go: // Check to see if have a cluster_name field.
|
||||
test/e2e/es_cluster_logging.go: clusterName, ok := esResponse["cluster_name"]
|
||||
test/e2e/host_path.go: fmt.Sprintf("--file_content_in_loop=%v", filePath),
|
||||
test/e2e/host_path.go: fmt.Sprintf("--retry_time=%d", retryDuration),
|
||||
test/images/mount-tester/mt.go: flag.IntVar(&retryDuration, "retry_time", 180, "Retry time during the loop")
|
||||
test/images/mount-tester/mt.go: flag.StringVar(&readFileContentInLoopPath, "file_content_in_loop", "", "Path to read the file content in loop from")
|
||||
|
|
Loading…
Reference in New Issue