From 68716c7712354e3be027c4a7770e16c92880249a Mon Sep 17 00:00:00 2001 From: Eric Paris Date: Fri, 14 Aug 2015 14:56:12 -0400 Subject: [PATCH 01/10] verify-flags-underscore.py: Remove contrib/ exceptions As contrib was removed. --- hack/verify-flags/exceptions.txt | 37 -------------------------------- 1 file changed, 37 deletions(-) diff --git a/hack/verify-flags/exceptions.txt b/hack/verify-flags/exceptions.txt index d2fafd279f..93580c1603 100644 --- a/hack/verify-flags/exceptions.txt +++ b/hack/verify-flags/exceptions.txt @@ -23,43 +23,6 @@ contrib/mesos/docs/ha.md:- `--km_path` or else (`--executor_path` and `--proxy_p contrib/mesos/docs/ha.md:$ ./bin/km scheduler ... --mesos_master=zk://zk1:2181,zk2:2181/mesos --ha --km_path=hdfs:///km contrib/mesos/docs/ha.md:- `--auth_path` contrib/mesos/docs/ha.md:- `--km_path` -contrib/prometheus/README.md:http://service_address:service_port/metrics. -contrib/ansible/vagrant/Vagrantfile:$num_nodes = (ENV['NUM_NODES'] || 2).to_i -contrib/ansible/vagrant/Vagrantfile: $num_nodes.times do |i| -contrib/ansible/roles/flannel/tasks/config.yml: conf_loc: "/{{ cluster_name }}/network/config" -contrib/ansible/roles/flannel/templates/flanneld.j2:FLANNEL_ETCD_KEY="/{{ cluster_name }}/network" -contrib/ansible/roles/kubernetes-addons/files/kube-addons.sh:# Create admission_control objects if defined before any other addon services. If the limits -contrib/ansible/roles/node/templates/proxy.kubeconfig.j2:current-context: proxy-to-{{ cluster_name }} -contrib/ansible/roles/node/templates/proxy.kubeconfig.j2: cluster: {{ cluster_name }} -contrib/ansible/roles/node/templates/proxy.kubeconfig.j2: name: proxy-to-{{ cluster_name }} -contrib/ansible/roles/node/templates/proxy.kubeconfig.j2: name: {{ cluster_name }} -contrib/ansible/roles/node/templates/kubelet.kubeconfig.j2:current-context: kubelet-to-{{ cluster_name }} -contrib/ansible/roles/node/templates/kubelet.kubeconfig.j2: name: {{ cluster_name }} -contrib/ansible/roles/node/templates/kubelet.kubeconfig.j2: cluster: {{ cluster_name }} -contrib/ansible/roles/node/templates/kubelet.kubeconfig.j2: name: kubelet-to-{{ cluster_name }} -contrib/ansible/roles/master/templates/scheduler.kubeconfig.j2:current-context: scheduler-to-{{ cluster_name }} -contrib/ansible/roles/master/templates/scheduler.kubeconfig.j2: name: {{ cluster_name }} -contrib/ansible/roles/master/templates/scheduler.kubeconfig.j2: cluster: {{ cluster_name }} -contrib/ansible/roles/master/templates/scheduler.kubeconfig.j2: name: scheduler-to-{{ cluster_name }} -contrib/ansible/roles/master/templates/kubectl.kubeconfig.j2:current-context: kubectl-to-{{ cluster_name }} -contrib/ansible/roles/master/templates/kubectl.kubeconfig.j2: name: {{ cluster_name }} -contrib/ansible/roles/master/templates/kubectl.kubeconfig.j2: cluster: {{ cluster_name }} -contrib/ansible/roles/master/templates/kubectl.kubeconfig.j2: name: kubectl-to-{{ cluster_name }} -contrib/ansible/roles/master/templates/controller-manager.kubeconfig.j2:current-context: controller-manager-to-{{ cluster_name }} -contrib/ansible/roles/master/templates/controller-manager.kubeconfig.j2: name: {{ cluster_name }} -contrib/ansible/roles/master/templates/controller-manager.kubeconfig.j2: cluster: {{ cluster_name }} -contrib/ansible/roles/master/templates/controller-manager.kubeconfig.j2: name: controller-manager-to-{{ cluster_name }} -contrib/ansible/roles/kubernetes/defaults/main.yml:dns_domain: "{{ cluster_name }}" -contrib/ansible/roles/kubernetes/files/make-ca-cert.sh:mkdir -p "$cert_dir" -contrib/ansible/roles/kubernetes/files/make-ca-cert.sh:cp -p pki/ca.crt "${cert_dir}/ca.crt" -contrib/ansible/roles/kubernetes/files/make-ca-cert.sh:cp -p "pki/issued/${master_name}.crt" "${cert_dir}/server.crt" > /dev/null 2>&1 -contrib/ansible/roles/kubernetes/files/make-ca-cert.sh:cp -p "pki/private/${master_name}.key" "${cert_dir}/server.key" > /dev/null 2>&1 -contrib/ansible/roles/kubernetes/files/make-ca-cert.sh:cp -p pki/issued/kubecfg.crt "${cert_dir}/kubecfg.crt" -contrib/ansible/roles/kubernetes/files/make-ca-cert.sh:cp -p pki/private/kubecfg.key "${cert_dir}/kubecfg.key" -contrib/ansible/roles/kubernetes/files/make-ca-cert.sh:cp -p pki/issued/kubelet.crt "${cert_dir}/kubelet.crt" -contrib/ansible/roles/kubernetes/files/make-ca-cert.sh:cp -p pki/private/kubelet.key "${cert_dir}/kubelet.key" -contrib/ansible/roles/kubernetes/files/make-ca-cert.sh: chgrp "${cert_group}" "${cert_dir}/${cert}" -contrib/ansible/roles/kubernetes/files/make-ca-cert.sh: chmod 660 "${cert_dir}/${cert}" examples/cluster-dns/images/frontend/client.py: service_address = socket.gethostbyname(hostname) examples/elasticsearch/README.md: "cluster_name" : "mytunes-db", examples/elasticsearch/README.md: "cluster_name" : "mytunes-db", From c560f41a4b30b947a8a6b52bf615ef1f537c07ea Mon Sep 17 00:00:00 2001 From: Eric Paris Date: Fri, 14 Aug 2015 14:40:48 -0400 Subject: [PATCH 02/10] Update docs which were incorrectly using _ in flag names Each of these was verified that the actual flag declaration correctly used - --- cluster/saltbase/salt/kubelet/default | 2 +- contrib/mesos/docs/ha.md | 8 +- docs/admin/admission-controllers.md | 2 +- docs/getting-started-guides/mesos.md | 4 +- hack/verify-flags/exceptions.txt | 245 ++++++++++++-------------- 5 files changed, 123 insertions(+), 138 deletions(-) diff --git a/cluster/saltbase/salt/kubelet/default b/cluster/saltbase/salt/kubelet/default index 6a9dcdc7d2..0962711d3a 100644 --- a/cluster/saltbase/salt/kubelet/default +++ b/cluster/saltbase/salt/kubelet/default @@ -30,7 +30,7 @@ # Unless given a specific directive, disable registration for the kubelet # running on the master. {% if grains.kubelet_api_servers is defined -%} - {% set api_servers_with_port = "--api_servers=https://" + grains.kubelet_api_servers -%} + {% set api_servers_with_port = "--api-servers=https://" + grains.kubelet_api_servers -%} {% else -%} {% set api_servers_with_port = "" -%} {% endif -%} diff --git a/contrib/mesos/docs/ha.md b/contrib/mesos/docs/ha.md index fc80bcbddb..967d89cf11 100644 --- a/contrib/mesos/docs/ha.md +++ b/contrib/mesos/docs/ha.md @@ -34,13 +34,13 @@ In this case, if there are problems launching a replacement scheduler process th ##### Command Line Arguments - `--ha` is required to enable scheduler HA and multi-scheduler leader election. -- `--km_path` or else (`--executor_path` and `--proxy_path`) should reference non-local-file URI's and must be identical across schedulers. +- `--km-path` or else (`--executor-path` and `--proxy-path`) should reference non-local-file URI's and must be identical across schedulers. If you have HDFS installed on your slaves then you can specify HDFS URI locations for the binaries: ```shell $ hdfs dfs -put -f bin/km hdfs:///km -$ ./bin/km scheduler ... --mesos_master=zk://zk1:2181,zk2:2181/mesos --ha --km_path=hdfs:///km +$ ./bin/km scheduler ... --mesos-master=zk://zk1:2181,zk2:2181/mesos --ha --km-path=hdfs:///km ``` **IMPORTANT:** some command line parameters specified for the scheduler process are passed to the Kubelet-executor and so are subject to compatibility tests: @@ -54,11 +54,11 @@ The command line parameters that affect the hash calculation are listed below. - `--allow-privileged` - `--api-servers` -- `--auth_path` +- `--auth-path` - `--cluster_*` - `--executor_*` - `--kubelet_*` -- `--km_path` +- `--km-path` - `--profiling` - `--proxy_path` diff --git a/docs/admin/admission-controllers.md b/docs/admin/admission-controllers.md index 89d7da6049..a0dda55a8d 100644 --- a/docs/admin/admission-controllers.md +++ b/docs/admin/admission-controllers.md @@ -79,7 +79,7 @@ support all the features you expect. ## How do I turn on an admission control plug-in? -The Kubernetes API server supports a flag, `admission_control` that takes a comma-delimited, +The Kubernetes API server supports a flag, `admission-control` that takes a comma-delimited, ordered list of admission control choices to invoke prior to modifying objects in the cluster. ## What does each plug-in do? diff --git a/docs/getting-started-guides/mesos.md b/docs/getting-started-guides/mesos.md index 7e4c94428d..4b88bf5599 100644 --- a/docs/getting-started-guides/mesos.md +++ b/docs/getting-started-guides/mesos.md @@ -132,7 +132,7 @@ Update your PATH to more easily run the Kubernetes-Mesos binaries: export PATH="$(pwd)/_output/local/go/bin:$PATH" ``` -Identify your Mesos master: depending on your Mesos installation this is either a `host:port` like `mesos_master:5050` or a ZooKeeper URL like `zk://zookeeper:2181/mesos`. +Identify your Mesos master: depending on your Mesos installation this is either a `host:port` like `mesos-master:5050` or a ZooKeeper URL like `zk://zookeeper:2181/mesos`. In order to let Kubernetes survive Mesos master changes, the ZooKeeper URL is recommended for production environments. ```bash @@ -208,7 +208,7 @@ kubernetes component=apiserver,provider=kubernetes 10.10.10.1 ``` Lastly, look for Kubernetes in the Mesos web GUI by pointing your browser to -`http://`. Make sure you have an active VPN connection. +`http://`. Make sure you have an active VPN connection. Go to the Frameworks tab, and look for an active framework named "Kubernetes". ## Spin up a pod diff --git a/hack/verify-flags/exceptions.txt b/hack/verify-flags/exceptions.txt index 93580c1603..ee3fef334b 100644 --- a/hack/verify-flags/exceptions.txt +++ b/hack/verify-flags/exceptions.txt @@ -1,116 +1,22 @@ -test/e2e/es_cluster_logging.go: // Check to see if have a cluster_name field. -test/e2e/es_cluster_logging.go: clusterName, ok := esResponse["cluster_name"] -test/e2e/es_cluster_logging.go: Failf("No cluster_name field in Elasticsearch response: %v", esResponse) -pkg/kubelet/qos/memory_policy_test.go: lowOomScoreAdj int // The max oom_score_adj score the container should be assigned. -pkg/kubelet/qos/memory_policy_test.go: highOomScoreAdj int // The min oom_score_adj score the container should be assigned. -pkg/kubelet/qos/memory_policy_test.go: t.Errorf("oom_score_adj should be between %d and %d, but was %d", test.lowOomScoreAdj, test.highOomScoreAdj, oomScoreAdj) -pkg/util/oom/oom_linux.go:// Writes 'value' to /proc//oom_score_adj. PID = 0 means self -pkg/util/oom/oom_linux.go: return fmt.Errorf("invalid PID %d specified for oom_score_adj", pid) -pkg/util/oom/oom_linux.go: oomScoreAdjPath := path.Join("/proc", pidStr, "oom_score_adj") -pkg/util/oom/oom_linux.go: err = fmt.Errorf("failed to read oom_score_adj: %v", readErr) -pkg/util/oom/oom_linux.go: err = fmt.Errorf("failed to set oom_score_adj to %d: %v", oomScoreAdj, writeErr) -pkg/util/oom/oom_linux.go:// Writes 'value' to /proc//oom_score_adj for all processes in cgroup cgroupName. -pkg/cloudprovider/providers/vagrant/vagrant_test.go: testSaltMinionsResponse = []byte(`{ "return": [{"kubernetes-minion-1": {"kernel": "Linux", "domain": "", "zmqversion": "3.2.4", "kernelrelease": "3.11.10-301.fc20.x86_64", "pythonpath": ["/usr/bin", "/usr/lib64/python27.zip", "/usr/lib64/python2.7", "/usr/lib64/python2.7/plat-linux2", "/usr/lib64/python2.7/lib-tk", "/usr/lib64/python2.7/lib-old", "/usr/lib64/python2.7/lib-dynload", "/usr/lib64/python2.7/site-packages", "/usr/lib/python2.7/site-packages"], "etcd_servers": "10.245.1.2", "ip_interfaces": {"lo": ["127.0.0.1"], "docker0": ["172.17.42.1"], "enp0s8": ["10.245.2.2"], "p2p1": ["10.0.2.15"]}, "shell": "/bin/sh", "mem_total": 491, "saltversioninfo": [2014, 1, 7], "osmajorrelease": ["20"], "node_ip": "10.245.2.2", "id": "kubernetes-minion-1", "osrelease": "20", "ps": "ps -efH", "server_id": 1005530826, "num_cpus": 1, "hwaddr_interfaces": {"lo": "00:00:00:00:00:00", "docker0": "56:84:7a:fe:97:99", "enp0s8": "08:00:27:17:c5:0f", "p2p1": "08:00:27:96:96:e1"}, "virtual": "VirtualBox", "osfullname": "Fedora", "master": "kubernetes-master", "ipv4": ["10.0.2.15", "10.245.2.2", "127.0.0.1", "172.17.42.1"], "ipv6": ["::1", "fe80::a00:27ff:fe17:c50f", "fe80::a00:27ff:fe96:96e1"], "cpu_flags": ["fpu", "vme", "de", "pse", "tsc", "msr", "pae", "mce", "cx8", "apic", "sep", "mtrr", "pge", "mca", "cmov", "pat", "pse36", "clflush", "mmx", "fxsr", "sse", "sse2", "syscall", "nx", "rdtscp", "lm", "constant_tsc", "rep_good", "nopl", "pni", "monitor", "ssse3", "lahf_lm"], "localhost": "kubernetes-minion-1", "lsb_distrib_id": "Fedora", "fqdn_ip4": ["127.0.0.1"], "fqdn_ip6": [], "nodename": "kubernetes-minion-1", "saltversion": "2014.1.7", "saltpath": "/usr/lib/python2.7/site-packages/salt", "pythonversion": [2, 7, 5, "final", 0], "host": "kubernetes-minion-1", "os_family": "RedHat", "oscodename": "Heisenbug", "defaultencoding": "UTF-8", "osfinger": "Fedora-20", "roles": ["kubernetes-pool"], "num_gpus": 1, "cpu_model": "Intel(R) Core(TM) i7-4600U CPU @ 2.10GHz", "fqdn": "kubernetes-minion-1", "osarch": "x86_64", "cpuarch": "x86_64", "gpus": [{"model": "VirtualBox Graphics Adapter", "vendor": "unknown"}], "path": "/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin", "os": "Fedora", "defaultlanguage": "en_US"}}]}`) -test/images/mount-tester/mt.go: flag.StringVar(&fsTypePath, "fs_type", "", "Path to print the fs type for") -test/images/mount-tester/mt.go: flag.StringVar(&fileModePath, "file_mode", "", "Path to print the mode bits of") -test/images/mount-tester/mt.go: flag.StringVar(&filePermPath, "file_perm", "", "Path to print the perms of") -test/images/mount-tester/mt.go: flag.StringVar(&readFileContentPath, "file_content", "", "Path to read the file content from") -test/images/mount-tester/mt.go: flag.StringVar(&newFilePath0644, "new_file_0644", "", "Path to write to and read from with perm 0644") -test/images/mount-tester/mt.go: flag.StringVar(&newFilePath0666, "new_file_0666", "", "Path to write to and read from with perm 0666") -test/images/mount-tester/mt.go: flag.StringVar(&newFilePath0777, "new_file_0777", "", "Path to write to and read from with perm 0777") -contrib/mesos/pkg/controllermanager/controllermanager.go: fs.BoolVar(&s.UseHostPortEndpoints, "host_port_endpoints", s.UseHostPortEndpoints, "Map service endpoints to hostIP:hostPort instead of podIP:containerPort. Default true.") -contrib/mesos/docs/ha.md:- `--km_path` or else (`--executor_path` and `--proxy_path`) should reference non-local-file URI's and must be identical across schedulers. -contrib/mesos/docs/ha.md:$ ./bin/km scheduler ... --mesos_master=zk://zk1:2181,zk2:2181/mesos --ha --km_path=hdfs:///km -contrib/mesos/docs/ha.md:- `--auth_path` -contrib/mesos/docs/ha.md:- `--km_path` examples/cluster-dns/images/frontend/client.py: service_address = socket.gethostbyname(hostname) examples/elasticsearch/README.md: "cluster_name" : "mytunes-db", examples/elasticsearch/README.md: "cluster_name" : "mytunes-db", examples/elasticsearch/README.md: "cluster_name" : "mytunes-db", examples/elasticsearch/README.md:"cluster_name" : "mytunes-db", -hack/lib/logging.sh: local source_file=${BASH_SOURCE[$frame_no]} -hack/lib/logging.sh: echo " $i: ${source_file}:${source_lineno} ${funcname}(...)" >&2 -hack/lib/logging.sh: local source_file=${BASH_SOURCE[$stack_skip]} -hack/lib/logging.sh: echo "!!! Error in ${source_file}:${source_line}" >&2 -docs/devel/README.md:* **Admission Control Plugins** ([admission_control](../design/admission_control.md)) -docs/user-guide/accessing-the-cluster.md: "cluster_name" : "kubernetes_logging", -docs/design/admission_control.md:[here](http://releases.k8s.io/release-1.0/docs/design/admission_control.md). -docs/design/admission_control.md:[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/docs/design/admission_control.md?pixel)]() -docs/admin/salt.md: etcd_servers: $MASTER_IP -docs/admin/salt.md: cloud_provider: vagrant -docs/admin/salt.md:`api_servers` | (Optional) The IP address / host name where a kubelet can get read-only access to kube-apiserver -docs/admin/salt.md:`etcd_servers` | (Optional) Comma-delimited list of IP addresses the kube-apiserver and kubelet use to reach etcd. Uses the IP of the first machine in the kubernetes_master role, or 127.0.0.1 on GCE. -docs/admin/salt.md:`hostname_override` | (Optional) Mapped to the kubelet hostname-override -docs/admin/admission-controllers.md:The Kubernetes API server supports a flag, `admission_control` that takes a comma-delimited, -docs/getting-started-guides/mesos.md:Identify your Mesos master: depending on your Mesos installation this is either a `host:port` like `mesos_master:5050` or a ZooKeeper URL like `zk://zookeeper:2181/mesos`. -docs/getting-started-guides/logging-elasticsearch.md: "cluster_name" : "kubernetes-logging", -docs/getting-started-guides/aws/cloudformation-template.json: " etcd_servers: http://localhost:2379\n", -docs/getting-started-guides/aws/cloudformation-template.json: " etcd_servers: http://localhost:2379\n", -docs/getting-started-guides/aws/cloud-configs/master.yaml: etcd_servers: http://localhost:2379 -docs/getting-started-guides/aws/cloud-configs/node.yaml: etcd_servers: http://localhost:2379 -docs/getting-started-guides/coreos/azure/lib/azure_wrapper.js: var cloud_config = cloud_config_creator(x, conf); -docs/getting-started-guides/coreos/azure/lib/azure_wrapper.js: if (cloud_config instanceof Array) { -docs/getting-started-guides/coreos/azure/lib/azure_wrapper.js: host.cloud_config_file = cloud_config[n]; -docs/getting-started-guides/coreos/azure/lib/azure_wrapper.js: host.cloud_config_file = cloud_config; -docs/getting-started-guides/coreos/azure/lib/deployment_logic/kubernetes.js:var cloud_config = require('../cloud_config.js'); -docs/getting-started-guides/coreos/azure/lib/deployment_logic/kubernetes.js: return cloud_config.process_template(input_file, output_file, function(data) { -docs/getting-started-guides/coreos/azure/lib/deployment_logic/kubernetes.js: return cloud_config.generate_environment_file_entry_from_object(util.hostname(n, 'kube'), { -docs/getting-started-guides/coreos/azure/lib/deployment_logic/kubernetes.js: var write_files_extra = cloud_config.write_files_from('addons', '/etc/kubernetes/addons'); -docs/getting-started-guides/coreos/azure/lib/deployment_logic/kubernetes.js: return cloud_config.process_template(input_file, output_file, function(data) { -cluster/juju/charms/trusty/kubernetes-master/hooks/etcd-relation-changed: for k in ('etcd_servers',): -cluster/juju/charms/trusty/kubernetes-master/hooks/etcd-relation-changed: template_data['etcd_servers'] = ",".join([ -cluster/juju/charms/trusty/kubernetes-master/hooks/etcd-relation-changed: template_data['bind_address'] = "127.0.0.1" -cluster/juju/charms/trusty/kubernetes-master/hooks/minions-api-relation-changed: for k in ('etcd_servers',): -cluster/juju/charms/trusty/kubernetes-master/hooks/minions-api-relation-changed: template_data['etcd_servers'] = ",".join([ -cluster/juju/charms/trusty/kubernetes-master/hooks/minions-api-relation-changed: template_data['bind_address'] = "127.0.0.1" -cluster/juju/charms/trusty/kubernetes-master/hooks/hooks.py: for k in ('etcd_servers',): -cluster/juju/charms/trusty/kubernetes-master/hooks/hooks.py: template_data['etcd_servers'] = ",".join([ -cluster/juju/charms/trusty/kubernetes-master/hooks/hooks.py: template_data['bind_address'] = "127.0.0.1" -cluster/juju/charms/trusty/kubernetes-master/hooks/network-relation-changed: for k in ('etcd_servers',): -cluster/juju/charms/trusty/kubernetes-master/hooks/network-relation-changed: template_data['etcd_servers'] = ",".join([ -cluster/juju/charms/trusty/kubernetes-master/hooks/network-relation-changed: template_data['bind_address'] = "127.0.0.1" -cluster/juju/charms/trusty/kubernetes-master/hooks/config-changed: for k in ('etcd_servers',): -cluster/juju/charms/trusty/kubernetes-master/hooks/config-changed: template_data['etcd_servers'] = ",".join([ -cluster/juju/charms/trusty/kubernetes-master/hooks/config-changed: template_data['bind_address'] = "127.0.0.1" -cluster/juju/charms/trusty/kubernetes-master/files/apiserver.upstart.tmpl: --etcd-servers=%(etcd_servers)s \ -cluster/juju/charms/trusty/kubernetes-master/files/scheduler.upstart.tmpl: --address=%(bind_address)s \ -cluster/juju/charms/trusty/kubernetes-master/files/controller-manager.upstart.tmpl: --address=%(bind_address)s \ -cluster/juju/charms/trusty/kubernetes/hooks/etcd-relation-changed: for k in ('etcd_servers', 'kubeapi_server'): -cluster/juju/charms/trusty/kubernetes/hooks/etcd-relation-changed: etcd_servers = get_rel_hosts('etcd', rels, ('hostname', 'port')) -cluster/juju/charms/trusty/kubernetes/hooks/etcd-relation-changed: api_servers = get_rel_hosts('api', rels, ('hostname', 'port')) -cluster/juju/charms/trusty/kubernetes/hooks/etcd-relation-changed: if api_servers: -cluster/juju/charms/trusty/kubernetes/hooks/etcd-relation-changed: api_info = api_servers.pop() -cluster/juju/charms/trusty/kubernetes/hooks/etcd-relation-changed: api_servers = 'http://%s:%s' % (api_info[0], api_info[1]) -cluster/juju/charms/trusty/kubernetes/hooks/etcd-relation-changed: template_data['etcd_servers'] = ','.join([ -cluster/juju/charms/trusty/kubernetes/hooks/etcd-relation-changed: 'http://%s:%s' % (s[0], s[1]) for s in sorted(etcd_servers)]) -cluster/juju/charms/trusty/kubernetes/hooks/hooks.py: for k in ('etcd_servers', 'kubeapi_server'): -cluster/juju/charms/trusty/kubernetes/hooks/hooks.py: etcd_servers = get_rel_hosts('etcd', rels, ('hostname', 'port')) -cluster/juju/charms/trusty/kubernetes/hooks/hooks.py: api_servers = get_rel_hosts('api', rels, ('hostname', 'port')) -cluster/juju/charms/trusty/kubernetes/hooks/hooks.py: if api_servers: -cluster/juju/charms/trusty/kubernetes/hooks/hooks.py: api_info = api_servers.pop() -cluster/juju/charms/trusty/kubernetes/hooks/hooks.py: api_servers = 'http://%s:%s' % (api_info[0], api_info[1]) -cluster/juju/charms/trusty/kubernetes/hooks/hooks.py: template_data['etcd_servers'] = ','.join([ -cluster/juju/charms/trusty/kubernetes/hooks/hooks.py: 'http://%s:%s' % (s[0], s[1]) for s in sorted(etcd_servers)]) -cluster/juju/charms/trusty/kubernetes/hooks/network-relation-changed: for k in ('etcd_servers', 'kubeapi_server'): -cluster/juju/charms/trusty/kubernetes/hooks/network-relation-changed: etcd_servers = get_rel_hosts('etcd', rels, ('hostname', 'port')) -cluster/juju/charms/trusty/kubernetes/hooks/network-relation-changed: api_servers = get_rel_hosts('api', rels, ('hostname', 'port')) -cluster/juju/charms/trusty/kubernetes/hooks/network-relation-changed: if api_servers: -cluster/juju/charms/trusty/kubernetes/hooks/network-relation-changed: api_info = api_servers.pop() -cluster/juju/charms/trusty/kubernetes/hooks/network-relation-changed: api_servers = 'http://%s:%s' % (api_info[0], api_info[1]) -cluster/juju/charms/trusty/kubernetes/hooks/network-relation-changed: template_data['etcd_servers'] = ','.join([ -cluster/juju/charms/trusty/kubernetes/hooks/network-relation-changed: 'http://%s:%s' % (s[0], s[1]) for s in sorted(etcd_servers)]) -cluster/juju/charms/trusty/kubernetes/hooks/api-relation-changed: for k in ('etcd_servers', 'kubeapi_server'): -cluster/juju/charms/trusty/kubernetes/hooks/api-relation-changed: etcd_servers = get_rel_hosts('etcd', rels, ('hostname', 'port')) -cluster/juju/charms/trusty/kubernetes/hooks/api-relation-changed: api_servers = get_rel_hosts('api', rels, ('hostname', 'port')) -cluster/juju/charms/trusty/kubernetes/hooks/api-relation-changed: if api_servers: -cluster/juju/charms/trusty/kubernetes/hooks/api-relation-changed: api_info = api_servers.pop() -cluster/juju/charms/trusty/kubernetes/hooks/api-relation-changed: api_servers = 'http://%s:%s' % (api_info[0], api_info[1]) -cluster/juju/charms/trusty/kubernetes/hooks/api-relation-changed: template_data['etcd_servers'] = ','.join([ -cluster/juju/charms/trusty/kubernetes/hooks/api-relation-changed: 'http://%s:%s' % (s[0], s[1]) for s in sorted(etcd_servers)]) -cluster/gce/configure-vm.sh: cloud_config: /etc/gce.conf -cluster/gce/configure-vm.sh: advertise_address: '${EXTERNAL_IP}' -cluster/gce/configure-vm.sh: api_servers: '${KUBERNETES_MASTER_NAME}' -cluster/gce/coreos/helper.sh:# cloud_config yaml file should be passed +test/e2e/es_cluster_logging.go: // Check to see if have a cluster_name field. +test/e2e/es_cluster_logging.go: clusterName, ok := esResponse["cluster_name"] +test/e2e/es_cluster_logging.go: Failf("No cluster_name field in Elasticsearch response: %v", esResponse) +cluster/vagrant/provision-master.sh: api_servers: '$(echo "$MASTER_IP" | sed -e "s/'/''/g")' +cluster/vagrant/provision-master.sh: runtime_config: '$(echo "$RUNTIME_CONFIG" | sed -e "s/'/''/g")' +cluster/vagrant/provision-master.sh: service_cluster_ip_range: '$(echo "$SERVICE_CLUSTER_IP_RANGE" | sed -e "s/'/''/g")' +cluster/vagrant/provision-master.sh: admission_control: '$(echo "$ADMISSION_CONTROL" | sed -e "s/'/''/g")' +cluster/vagrant/provision-minion.sh: api_servers: '$(echo "$MASTER_IP" | sed -e "s/'/''/g")' +cluster/vagrant/provision-minion.sh: hostname_override: '$(echo "$MINION_IP" | sed -e "s/'/''/g")' +cluster/addons/cluster-monitoring/README.md:Heapster enables monitoring of Kubernetes Clusters using [cAdvisor](https://github.com/google/cadvisor). The kubelet will communicate with an instance of cAdvisor running on localhost and proxy container stats to Heapster. Kubelet will attempt to connect to cAdvisor on port 4194 by default but this port can be configured with kubelet's `-cadvisor_port` run flag. Detailed information about heapster can be found [here](https://github.com/GoogleCloudPlatform/heapster). +cluster/aws/templates/salt-minion.sh:# We set the hostname_override to the full EC2 private dns name +cluster/aws/templates/salt-minion.sh: hostname_override: "${HOSTNAME_OVERRIDE}" +cluster/mesos/docker/km/build.sh:if [ -z "$km_path" ]; then +cluster/mesos/docker/km/build.sh:kube_bin_path=$(dirname ${km_path}) cluster/saltbase/salt/kube-controller-manager/kube-controller-manager.manifest:{% set cluster_name = "" -%} cluster/saltbase/salt/kube-controller-manager/kube-controller-manager.manifest:{% set cluster_cidr = "" -%} cluster/saltbase/salt/kube-controller-manager/kube-controller-manager.manifest:{% set allocate_node_cidrs = "" -%} @@ -136,6 +42,7 @@ cluster/saltbase/salt/kube-proxy/default: {% set api_servers = "--master=http cluster/saltbase/salt/kube-proxy/default: {% set api_servers = "--master=https://" + ips[0][0] -%} cluster/saltbase/salt/kube-proxy/default: {% set api_servers_with_port = api_servers -%} cluster/saltbase/salt/kube-proxy/default: {% set api_servers_with_port = api_servers + ":6443" -%} +cluster/saltbase/salt/kube-addons/kube-addons.sh:# Create admission_control objects if defined before any other addon services. If the limits cluster/saltbase/salt/kubelet/default:{% if grains.api_servers is defined -%} cluster/saltbase/salt/kubelet/default: {% set api_servers = "--api-servers=https://" + grains.api_servers -%} cluster/saltbase/salt/kubelet/default: {% set api_servers = "--api-servers=https://" + grains.apiservers -%} @@ -143,7 +50,6 @@ cluster/saltbase/salt/kubelet/default: {% set api_servers = "--api-servers=http cluster/saltbase/salt/kubelet/default: {% set api_servers = "--api-servers=https://" + ips[0][0] -%} cluster/saltbase/salt/kubelet/default: {% set api_servers_with_port = api_servers -%} cluster/saltbase/salt/kubelet/default: {% set api_servers_with_port = api_servers + ":6443" -%} -cluster/saltbase/salt/kubelet/default: {% set api_servers_with_port = "--api_servers=https://" + grains.kubelet_api_servers -%} cluster/saltbase/salt/kubelet/default:{% set cloud_provider = "" -%} cluster/saltbase/salt/kubelet/default: {% set cloud_provider = "--cloud-provider=" + grains.cloud -%} cluster/saltbase/salt/kubelet/default:{% set manifest_url = "" -%} @@ -166,10 +72,8 @@ cluster/saltbase/salt/kubelet/default: {% set cgroup_root = "--cgroup-root=dock cluster/saltbase/salt/kubelet/default:{% set pod_cidr = "" %} cluster/saltbase/salt/kubelet/default: {% set pod_cidr = "--pod-cidr=" + grains['cbr-cidr'] %} cluster/saltbase/salt/kubelet/default:DAEMON_ARGS="{{daemon_args}} {{api_servers_with_port}} {{debugging_handlers}} {{hostname_override}} {{cloud_provider}} {{config}} {{manifest_url}} --allow-privileged={{pillar['allow_privileged']}} {{pillar['log_level']}} {{cluster_dns}} {{cluster_domain}} {{docker_root}} {{kubelet_root}} {{configure_cbr0}} {{cgroup_root}} {{system_container}} {{pod_cidr}} {{test_args}}" -cluster/saltbase/salt/generate-cert/make-cert.sh:mkdir -p "$cert_dir" -cluster/saltbase/salt/generate-cert/make-cert.sh: -keyout "${cert_dir}/server.key" -out "${cert_dir}/server.cert" -cluster/saltbase/salt/generate-cert/make-cert.sh:chgrp $cert_group "${cert_dir}/server.key" "${cert_dir}/server.cert" -cluster/saltbase/salt/generate-cert/make-cert.sh:chmod 660 "${cert_dir}/server.key" "${cert_dir}/server.cert" +cluster/saltbase/salt/kube-admission-controls/init.sls:{% if 'LimitRanger' in pillar.get('admission_control', '') %} +cluster/saltbase/salt/monit/monit_watcher.sh:# Apply oom_score_adj: -901 to processes cluster/saltbase/salt/generate-cert/make-ca-cert.sh:mkdir -p "$cert_dir" cluster/saltbase/salt/generate-cert/make-ca-cert.sh: cp -p pki/issued/$cert_ip.crt "${cert_dir}/server.cert" > /dev/null 2>&1 cluster/saltbase/salt/generate-cert/make-ca-cert.sh: cp -p pki/private/$cert_ip.key "${cert_dir}/server.key" > /dev/null 2>&1 @@ -180,10 +84,11 @@ cluster/saltbase/salt/generate-cert/make-ca-cert.sh:cp -p pki/issued/kubecfg.crt cluster/saltbase/salt/generate-cert/make-ca-cert.sh:cp -p pki/private/kubecfg.key "${cert_dir}/kubecfg.key" cluster/saltbase/salt/generate-cert/make-ca-cert.sh:chgrp $cert_group "${cert_dir}/server.key" "${cert_dir}/server.cert" "${cert_dir}/ca.crt" cluster/saltbase/salt/generate-cert/make-ca-cert.sh:chmod 660 "${cert_dir}/server.key" "${cert_dir}/server.cert" "${cert_dir}/ca.crt" -cluster/saltbase/salt/monit/monit_watcher.sh:# Apply oom_score_adj: -901 to processes +cluster/saltbase/salt/generate-cert/make-cert.sh:mkdir -p "$cert_dir" +cluster/saltbase/salt/generate-cert/make-cert.sh: -keyout "${cert_dir}/server.key" -out "${cert_dir}/server.cert" +cluster/saltbase/salt/generate-cert/make-cert.sh:chgrp $cert_group "${cert_dir}/server.key" "${cert_dir}/server.cert" +cluster/saltbase/salt/generate-cert/make-cert.sh:chmod 660 "${cert_dir}/server.key" "${cert_dir}/server.cert" cluster/saltbase/salt/supervisor/supervisor_watcher.sh:# Apply oom_score_adj: -901 to processes -cluster/saltbase/salt/kube-addons/kube-addons.sh:# Create admission_control objects if defined before any other addon services. If the limits -cluster/saltbase/salt/kube-admission-controls/init.sls:{% if 'LimitRanger' in pillar.get('admission_control', '') %} cluster/saltbase/salt/kube-apiserver/kube-apiserver.manifest:{% set cloud_provider = "" -%} cluster/saltbase/salt/kube-apiserver/kube-apiserver.manifest:{% set cloud_config = "" -%} cluster/saltbase/salt/kube-apiserver/kube-apiserver.manifest: {% set cloud_provider = "--cloud-provider=" + grains.cloud -%} @@ -223,21 +128,101 @@ cluster/saltbase/salt/kube-apiserver/kube-apiserver.manifest:{% set params = par cluster/saltbase/salt/kube-apiserver/kube-apiserver.manifest: "/usr/local/bin/kube-apiserver {{params}} --allow-privileged={{pillar['allow_privileged']}} 1>>/var/log/kube-apiserver.log 2>&1" cluster/saltbase/salt/kube-apiserver/kube-apiserver.manifest: "containerPort": {{secure_port}}, cluster/saltbase/salt/kube-apiserver/kube-apiserver.manifest: "hostPort": {{secure_port}}},{ -cluster/aws/templates/salt-minion.sh:# We set the hostname_override to the full EC2 private dns name -cluster/aws/templates/salt-minion.sh: hostname_override: "${HOSTNAME_OVERRIDE}" -cluster/vagrant/provision-minion.sh: api_servers: '$(echo "$MASTER_IP" | sed -e "s/'/''/g")' -cluster/vagrant/provision-minion.sh: hostname_override: '$(echo "$MINION_IP" | sed -e "s/'/''/g")' -cluster/vagrant/provision-master.sh: api_servers: '$(echo "$MASTER_IP" | sed -e "s/'/''/g")' -cluster/vagrant/provision-master.sh: runtime_config: '$(echo "$RUNTIME_CONFIG" | sed -e "s/'/''/g")' -cluster/vagrant/provision-master.sh: service_cluster_ip_range: '$(echo "$SERVICE_CLUSTER_IP_RANGE" | sed -e "s/'/''/g")' -cluster/vagrant/provision-master.sh: admission_control: '$(echo "$ADMISSION_CONTROL" | sed -e "s/'/''/g")' +cluster/juju/charms/trusty/kubernetes-master/hooks/network-relation-changed: for k in ('etcd_servers',): +cluster/juju/charms/trusty/kubernetes-master/hooks/network-relation-changed: template_data['etcd_servers'] = ",".join([ +cluster/juju/charms/trusty/kubernetes-master/hooks/network-relation-changed: template_data['bind_address'] = "127.0.0.1" +cluster/juju/charms/trusty/kubernetes-master/hooks/etcd-relation-changed: for k in ('etcd_servers',): +cluster/juju/charms/trusty/kubernetes-master/hooks/etcd-relation-changed: template_data['etcd_servers'] = ",".join([ +cluster/juju/charms/trusty/kubernetes-master/hooks/etcd-relation-changed: template_data['bind_address'] = "127.0.0.1" +cluster/juju/charms/trusty/kubernetes-master/hooks/minions-api-relation-changed: for k in ('etcd_servers',): +cluster/juju/charms/trusty/kubernetes-master/hooks/minions-api-relation-changed: template_data['etcd_servers'] = ",".join([ +cluster/juju/charms/trusty/kubernetes-master/hooks/minions-api-relation-changed: template_data['bind_address'] = "127.0.0.1" +cluster/juju/charms/trusty/kubernetes-master/hooks/hooks.py: for k in ('etcd_servers',): +cluster/juju/charms/trusty/kubernetes-master/hooks/hooks.py: template_data['etcd_servers'] = ",".join([ +cluster/juju/charms/trusty/kubernetes-master/hooks/hooks.py: template_data['bind_address'] = "127.0.0.1" +cluster/juju/charms/trusty/kubernetes-master/hooks/config-changed: for k in ('etcd_servers',): +cluster/juju/charms/trusty/kubernetes-master/hooks/config-changed: template_data['etcd_servers'] = ",".join([ +cluster/juju/charms/trusty/kubernetes-master/hooks/config-changed: template_data['bind_address'] = "127.0.0.1" +cluster/juju/charms/trusty/kubernetes-master/files/apiserver.upstart.tmpl: --etcd-servers=%(etcd_servers)s \ +cluster/juju/charms/trusty/kubernetes-master/files/scheduler.upstart.tmpl: --address=%(bind_address)s \ +cluster/juju/charms/trusty/kubernetes-master/files/controller-manager.upstart.tmpl: --address=%(bind_address)s \ +cluster/juju/charms/trusty/kubernetes/hooks/network-relation-changed: for k in ('etcd_servers', 'kubeapi_server'): +cluster/juju/charms/trusty/kubernetes/hooks/network-relation-changed: etcd_servers = get_rel_hosts('etcd', rels, ('hostname', 'port')) +cluster/juju/charms/trusty/kubernetes/hooks/network-relation-changed: api_servers = get_rel_hosts('api', rels, ('hostname', 'port')) +cluster/juju/charms/trusty/kubernetes/hooks/network-relation-changed: if api_servers: +cluster/juju/charms/trusty/kubernetes/hooks/network-relation-changed: api_info = api_servers.pop() +cluster/juju/charms/trusty/kubernetes/hooks/network-relation-changed: api_servers = 'http://%s:%s' % (api_info[0], api_info[1]) +cluster/juju/charms/trusty/kubernetes/hooks/network-relation-changed: template_data['etcd_servers'] = ','.join([ +cluster/juju/charms/trusty/kubernetes/hooks/network-relation-changed: 'http://%s:%s' % (s[0], s[1]) for s in sorted(etcd_servers)]) +cluster/juju/charms/trusty/kubernetes/hooks/etcd-relation-changed: for k in ('etcd_servers', 'kubeapi_server'): +cluster/juju/charms/trusty/kubernetes/hooks/etcd-relation-changed: etcd_servers = get_rel_hosts('etcd', rels, ('hostname', 'port')) +cluster/juju/charms/trusty/kubernetes/hooks/etcd-relation-changed: api_servers = get_rel_hosts('api', rels, ('hostname', 'port')) +cluster/juju/charms/trusty/kubernetes/hooks/etcd-relation-changed: if api_servers: +cluster/juju/charms/trusty/kubernetes/hooks/etcd-relation-changed: api_info = api_servers.pop() +cluster/juju/charms/trusty/kubernetes/hooks/etcd-relation-changed: api_servers = 'http://%s:%s' % (api_info[0], api_info[1]) +cluster/juju/charms/trusty/kubernetes/hooks/etcd-relation-changed: template_data['etcd_servers'] = ','.join([ +cluster/juju/charms/trusty/kubernetes/hooks/etcd-relation-changed: 'http://%s:%s' % (s[0], s[1]) for s in sorted(etcd_servers)]) +cluster/juju/charms/trusty/kubernetes/hooks/hooks.py: for k in ('etcd_servers', 'kubeapi_server'): +cluster/juju/charms/trusty/kubernetes/hooks/hooks.py: etcd_servers = get_rel_hosts('etcd', rels, ('hostname', 'port')) +cluster/juju/charms/trusty/kubernetes/hooks/hooks.py: api_servers = get_rel_hosts('api', rels, ('hostname', 'port')) +cluster/juju/charms/trusty/kubernetes/hooks/hooks.py: if api_servers: +cluster/juju/charms/trusty/kubernetes/hooks/hooks.py: api_info = api_servers.pop() +cluster/juju/charms/trusty/kubernetes/hooks/hooks.py: api_servers = 'http://%s:%s' % (api_info[0], api_info[1]) +cluster/juju/charms/trusty/kubernetes/hooks/hooks.py: template_data['etcd_servers'] = ','.join([ +cluster/juju/charms/trusty/kubernetes/hooks/hooks.py: 'http://%s:%s' % (s[0], s[1]) for s in sorted(etcd_servers)]) +cluster/juju/charms/trusty/kubernetes/hooks/api-relation-changed: for k in ('etcd_servers', 'kubeapi_server'): +cluster/juju/charms/trusty/kubernetes/hooks/api-relation-changed: etcd_servers = get_rel_hosts('etcd', rels, ('hostname', 'port')) +cluster/juju/charms/trusty/kubernetes/hooks/api-relation-changed: api_servers = get_rel_hosts('api', rels, ('hostname', 'port')) +cluster/juju/charms/trusty/kubernetes/hooks/api-relation-changed: if api_servers: +cluster/juju/charms/trusty/kubernetes/hooks/api-relation-changed: api_info = api_servers.pop() +cluster/juju/charms/trusty/kubernetes/hooks/api-relation-changed: api_servers = 'http://%s:%s' % (api_info[0], api_info[1]) +cluster/juju/charms/trusty/kubernetes/hooks/api-relation-changed: template_data['etcd_servers'] = ','.join([ +cluster/juju/charms/trusty/kubernetes/hooks/api-relation-changed: 'http://%s:%s' % (s[0], s[1]) for s in sorted(etcd_servers)]) +cluster/vsphere/templates/salt-minion.sh: hostname_override: $(ip route get 1.1.1.1 | awk '{print $7}') +cluster/libvirt-coreos/util.sh: public_ip=$MASTER_IP +cluster/libvirt-coreos/util.sh: public_ip=${MINION_IPS[$i]} cluster/libvirt-coreos/user_data.yml: advertise-client-urls: http://${public_ip}:2379 cluster/libvirt-coreos/user_data.yml: initial-advertise-peer-urls: http://${public_ip}:2380 cluster/libvirt-coreos/user_data.yml: listen-peer-urls: http://${public_ip}:2380 cluster/libvirt-coreos/user_data.yml: Address=${public_ip}/24 -cluster/libvirt-coreos/util.sh: public_ip=$MASTER_IP -cluster/libvirt-coreos/util.sh: public_ip=${MINION_IPS[$i]} -cluster/addons/cluster-monitoring/README.md:Heapster enables monitoring of Kubernetes Clusters using [cAdvisor](https://github.com/google/cadvisor). The kubelet will communicate with an instance of cAdvisor running on localhost and proxy container stats to Heapster. Kubelet will attempt to connect to cAdvisor on port 4194 by default but this port can be configured with kubelet's `-cadvisor_port` run flag. Detailed information about heapster can be found [here](https://github.com/GoogleCloudPlatform/heapster). -cluster/vsphere/templates/salt-minion.sh: hostname_override: $(ip route get 1.1.1.1 | awk '{print $7}') -cluster/mesos/docker/km/build.sh:if [ -z "$km_path" ]; then -cluster/mesos/docker/km/build.sh:kube_bin_path=$(dirname ${km_path}) +cluster/gce/configure-vm.sh: cloud_config: /etc/gce.conf +cluster/gce/configure-vm.sh: advertise_address: '${EXTERNAL_IP}' +cluster/gce/configure-vm.sh: api_servers: '${KUBERNETES_MASTER_NAME}' +cluster/gce/coreos/helper.sh:# cloud_config yaml file should be passed +hack/lib/logging.sh: local source_file=${BASH_SOURCE[$frame_no]} +hack/lib/logging.sh: echo " $i: ${source_file}:${source_lineno} ${funcname}(...)" >&2 +hack/lib/logging.sh: local source_file=${BASH_SOURCE[$stack_skip]} +hack/lib/logging.sh: echo "!!! Error in ${source_file}:${source_line}" >&2 +pkg/util/oom/oom_linux.go:// Writes 'value' to /proc//oom_score_adj. PID = 0 means self +pkg/util/oom/oom_linux.go: return fmt.Errorf("invalid PID %d specified for oom_score_adj", pid) +pkg/util/oom/oom_linux.go: oomScoreAdjPath := path.Join("/proc", pidStr, "oom_score_adj") +pkg/util/oom/oom_linux.go: err = fmt.Errorf("failed to read oom_score_adj: %v", readErr) +pkg/util/oom/oom_linux.go: err = fmt.Errorf("failed to set oom_score_adj to %d: %v", oomScoreAdj, writeErr) +pkg/util/oom/oom_linux.go:// Writes 'value' to /proc//oom_score_adj for all processes in cgroup cgroupName. +pkg/kubelet/qos/memory_policy_test.go: lowOomScoreAdj int // The max oom_score_adj score the container should be assigned. +pkg/kubelet/qos/memory_policy_test.go: highOomScoreAdj int // The min oom_score_adj score the container should be assigned. +pkg/kubelet/qos/memory_policy_test.go: t.Errorf("oom_score_adj should be between %d and %d, but was %d", test.lowOomScoreAdj, test.highOomScoreAdj, oomScoreAdj) +pkg/cloudprovider/providers/vagrant/vagrant_test.go: testSaltMinionsResponse = []byte(`{ "return": [{"kubernetes-minion-1": {"kernel": "Linux", "domain": "", "zmqversion": "3.2.4", "kernelrelease": "3.11.10-301.fc20.x86_64", "pythonpath": ["/usr/bin", "/usr/lib64/python27.zip", "/usr/lib64/python2.7", "/usr/lib64/python2.7/plat-linux2", "/usr/lib64/python2.7/lib-tk", "/usr/lib64/python2.7/lib-old", "/usr/lib64/python2.7/lib-dynload", "/usr/lib64/python2.7/site-packages", "/usr/lib/python2.7/site-packages"], "etcd_servers": "10.245.1.2", "ip_interfaces": {"lo": ["127.0.0.1"], "docker0": ["172.17.42.1"], "enp0s8": ["10.245.2.2"], "p2p1": ["10.0.2.15"]}, "shell": "/bin/sh", "mem_total": 491, "saltversioninfo": [2014, 1, 7], "osmajorrelease": ["20"], "node_ip": "10.245.2.2", "id": "kubernetes-minion-1", "osrelease": "20", "ps": "ps -efH", "server_id": 1005530826, "num_cpus": 1, "hwaddr_interfaces": {"lo": "00:00:00:00:00:00", "docker0": "56:84:7a:fe:97:99", "enp0s8": "08:00:27:17:c5:0f", "p2p1": "08:00:27:96:96:e1"}, "virtual": "VirtualBox", "osfullname": "Fedora", "master": "kubernetes-master", "ipv4": ["10.0.2.15", "10.245.2.2", "127.0.0.1", "172.17.42.1"], "ipv6": ["::1", "fe80::a00:27ff:fe17:c50f", "fe80::a00:27ff:fe96:96e1"], "cpu_flags": ["fpu", "vme", "de", "pse", "tsc", "msr", "pae", "mce", "cx8", "apic", "sep", "mtrr", "pge", "mca", "cmov", "pat", "pse36", "clflush", "mmx", "fxsr", "sse", "sse2", "syscall", "nx", "rdtscp", "lm", "constant_tsc", "rep_good", "nopl", "pni", "monitor", "ssse3", "lahf_lm"], "localhost": "kubernetes-minion-1", "lsb_distrib_id": "Fedora", "fqdn_ip4": ["127.0.0.1"], "fqdn_ip6": [], "nodename": "kubernetes-minion-1", "saltversion": "2014.1.7", "saltpath": "/usr/lib/python2.7/site-packages/salt", "pythonversion": [2, 7, 5, "final", 0], "host": "kubernetes-minion-1", "os_family": "RedHat", "oscodename": "Heisenbug", "defaultencoding": "UTF-8", "osfinger": "Fedora-20", "roles": ["kubernetes-pool"], "num_gpus": 1, "cpu_model": "Intel(R) Core(TM) i7-4600U CPU @ 2.10GHz", "fqdn": "kubernetes-minion-1", "osarch": "x86_64", "cpuarch": "x86_64", "gpus": [{"model": "VirtualBox Graphics Adapter", "vendor": "unknown"}], "path": "/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin", "os": "Fedora", "defaultlanguage": "en_US"}}]}`) +docs/admin/salt.md: etcd_servers: $MASTER_IP +docs/admin/salt.md: cloud_provider: vagrant +docs/admin/salt.md:`api_servers` | (Optional) The IP address / host name where a kubelet can get read-only access to kube-apiserver +docs/admin/salt.md:`etcd_servers` | (Optional) Comma-delimited list of IP addresses the kube-apiserver and kubelet use to reach etcd. Uses the IP of the first machine in the kubernetes_master role, or 127.0.0.1 on GCE. +docs/admin/salt.md:`hostname_override` | (Optional) Mapped to the kubelet hostname-override +docs/design/admission_control.md:[here](http://releases.k8s.io/release-1.0/docs/design/admission_control.md). +docs/design/admission_control.md:[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/docs/design/admission_control.md?pixel)]() +docs/user-guide/accessing-the-cluster.md: "cluster_name" : "kubernetes_logging", +docs/getting-started-guides/logging-elasticsearch.md: "cluster_name" : "kubernetes-logging", +docs/getting-started-guides/aws/cloudformation-template.json: " etcd_servers: http://localhost:2379\n", +docs/getting-started-guides/aws/cloudformation-template.json: " etcd_servers: http://localhost:2379\n", +docs/getting-started-guides/aws/cloud-configs/master.yaml: etcd_servers: http://localhost:2379 +docs/getting-started-guides/aws/cloud-configs/node.yaml: etcd_servers: http://localhost:2379 +docs/getting-started-guides/coreos/azure/lib/azure_wrapper.js: var cloud_config = cloud_config_creator(x, conf); +docs/getting-started-guides/coreos/azure/lib/azure_wrapper.js: if (cloud_config instanceof Array) { +docs/getting-started-guides/coreos/azure/lib/azure_wrapper.js: host.cloud_config_file = cloud_config[n]; +docs/getting-started-guides/coreos/azure/lib/azure_wrapper.js: host.cloud_config_file = cloud_config; +docs/getting-started-guides/coreos/azure/lib/deployment_logic/kubernetes.js:var cloud_config = require('../cloud_config.js'); +docs/getting-started-guides/coreos/azure/lib/deployment_logic/kubernetes.js: return cloud_config.process_template(input_file, output_file, function(data) { +docs/getting-started-guides/coreos/azure/lib/deployment_logic/kubernetes.js: return cloud_config.generate_environment_file_entry_from_object(util.hostname(n, 'kube'), { +docs/getting-started-guides/coreos/azure/lib/deployment_logic/kubernetes.js: var write_files_extra = cloud_config.write_files_from('addons', '/etc/kubernetes/addons'); +docs/getting-started-guides/coreos/azure/lib/deployment_logic/kubernetes.js: return cloud_config.process_template(input_file, output_file, function(data) { +docs/devel/README.md:* **Admission Control Plugins** ([admission_control](../design/admission_control.md)) From 055d648ed7a2e588e11219cc0bda2df7af0eafe5 Mon Sep 17 00:00:00 2001 From: Eric Paris Date: Mon, 17 Aug 2015 11:14:37 -0700 Subject: [PATCH 03/10] Change log_flush_frequency to use - Since we have the rename, we don't need our declaration to look like glog's. --- hack/verify-flags/excluded-flags.txt | 1 - hack/verify-flags/known-flags.txt | 1 + pkg/util/logs.go | 3 +-- 3 files changed, 2 insertions(+), 3 deletions(-) diff --git a/hack/verify-flags/excluded-flags.txt b/hack/verify-flags/excluded-flags.txt index 6fca694c9d..10f6857783 100644 --- a/hack/verify-flags/excluded-flags.txt +++ b/hack/verify-flags/excluded-flags.txt @@ -9,7 +9,6 @@ gke_context host_port_endpoints kubecfg_file kube_master_url -log_flush_frequency max_in_flight max_par new_file_0644 diff --git a/hack/verify-flags/known-flags.txt b/hack/verify-flags/known-flags.txt index e843d901ec..c21da97869 100644 --- a/hack/verify-flags/known-flags.txt +++ b/hack/verify-flags/known-flags.txt @@ -125,6 +125,7 @@ kube-master label-columns last-release-pr legacy-userspace-proxy +log-flush-frequency long-running-request-regexp low-diskspace-threshold-mb manifest-url diff --git a/pkg/util/logs.go b/pkg/util/logs.go index eb2ef7eefe..03a7314a10 100644 --- a/pkg/util/logs.go +++ b/pkg/util/logs.go @@ -25,8 +25,7 @@ import ( "github.com/spf13/pflag" ) -// Uses _ instead of - to better align with glog flags. -var logFlushFreq = pflag.Duration("log_flush_frequency", 5*time.Second, "Maximum number of seconds between log flushes") +var logFlushFreq = pflag.Duration("log-flush-frequency", 5*time.Second, "Maximum number of seconds between log flushes") // TODO(thockin): This is temporary until we agree on log dirs and put those into each cmd. func init() { From 7f96f9312b668da9d903da93821ae00fa700b369 Mon Sep 17 00:00:00 2001 From: Eric Paris Date: Fri, 14 Aug 2015 14:46:12 -0400 Subject: [PATCH 04/10] verify-flags-underscore.py: ignore 'flags' that look like bash variables We ignore bad 'flags' if they are proceeded by $ or { as these are likely bash variables and not in fact bad flag usage. --- hack/verify-flags-underscore.py | 2 +- hack/verify-flags/exceptions.txt | 24 ------------------------ 2 files changed, 1 insertion(+), 25 deletions(-) diff --git a/hack/verify-flags-underscore.py b/hack/verify-flags-underscore.py index fdb13c6ff5..4dee7d9f25 100755 --- a/hack/verify-flags-underscore.py +++ b/hack/verify-flags-underscore.py @@ -164,7 +164,7 @@ def flags_to_re(flags): # turn all flag names into regexs which will find both types newre = dashRE.sub('[-_]', flag) # only match if there is not a leading or trailing alphanumeric character - flagREs.append("[^\w]" + newre + "[^\w]") + flagREs.append("[^\w${]" + newre + "[^\w]") # turn that list of regex strings into a single large RE flagRE = "|".join(flagREs) flagRE = re.compile(flagRE) diff --git a/hack/verify-flags/exceptions.txt b/hack/verify-flags/exceptions.txt index ee3fef334b..a8da97b414 100644 --- a/hack/verify-flags/exceptions.txt +++ b/hack/verify-flags/exceptions.txt @@ -15,8 +15,6 @@ cluster/vagrant/provision-minion.sh: hostname_override: '$(echo "$MINION_IP" | cluster/addons/cluster-monitoring/README.md:Heapster enables monitoring of Kubernetes Clusters using [cAdvisor](https://github.com/google/cadvisor). The kubelet will communicate with an instance of cAdvisor running on localhost and proxy container stats to Heapster. Kubelet will attempt to connect to cAdvisor on port 4194 by default but this port can be configured with kubelet's `-cadvisor_port` run flag. Detailed information about heapster can be found [here](https://github.com/GoogleCloudPlatform/heapster). cluster/aws/templates/salt-minion.sh:# We set the hostname_override to the full EC2 private dns name cluster/aws/templates/salt-minion.sh: hostname_override: "${HOSTNAME_OVERRIDE}" -cluster/mesos/docker/km/build.sh:if [ -z "$km_path" ]; then -cluster/mesos/docker/km/build.sh:kube_bin_path=$(dirname ${km_path}) cluster/saltbase/salt/kube-controller-manager/kube-controller-manager.manifest:{% set cluster_name = "" -%} cluster/saltbase/salt/kube-controller-manager/kube-controller-manager.manifest:{% set cluster_cidr = "" -%} cluster/saltbase/salt/kube-controller-manager/kube-controller-manager.manifest:{% set allocate_node_cidrs = "" -%} @@ -74,20 +72,6 @@ cluster/saltbase/salt/kubelet/default: {% set pod_cidr = "--pod-cidr=" + grains cluster/saltbase/salt/kubelet/default:DAEMON_ARGS="{{daemon_args}} {{api_servers_with_port}} {{debugging_handlers}} {{hostname_override}} {{cloud_provider}} {{config}} {{manifest_url}} --allow-privileged={{pillar['allow_privileged']}} {{pillar['log_level']}} {{cluster_dns}} {{cluster_domain}} {{docker_root}} {{kubelet_root}} {{configure_cbr0}} {{cgroup_root}} {{system_container}} {{pod_cidr}} {{test_args}}" cluster/saltbase/salt/kube-admission-controls/init.sls:{% if 'LimitRanger' in pillar.get('admission_control', '') %} cluster/saltbase/salt/monit/monit_watcher.sh:# Apply oom_score_adj: -901 to processes -cluster/saltbase/salt/generate-cert/make-ca-cert.sh:mkdir -p "$cert_dir" -cluster/saltbase/salt/generate-cert/make-ca-cert.sh: cp -p pki/issued/$cert_ip.crt "${cert_dir}/server.cert" > /dev/null 2>&1 -cluster/saltbase/salt/generate-cert/make-ca-cert.sh: cp -p pki/private/$cert_ip.key "${cert_dir}/server.key" > /dev/null 2>&1 -cluster/saltbase/salt/generate-cert/make-ca-cert.sh: cp -p pki/issued/kubernetes-master.crt "${cert_dir}/server.cert" > /dev/null 2>&1 -cluster/saltbase/salt/generate-cert/make-ca-cert.sh: cp -p pki/private/kubernetes-master.key "${cert_dir}/server.key" > /dev/null 2>&1 -cluster/saltbase/salt/generate-cert/make-ca-cert.sh:cp -p pki/ca.crt "${cert_dir}/ca.crt" -cluster/saltbase/salt/generate-cert/make-ca-cert.sh:cp -p pki/issued/kubecfg.crt "${cert_dir}/kubecfg.crt" -cluster/saltbase/salt/generate-cert/make-ca-cert.sh:cp -p pki/private/kubecfg.key "${cert_dir}/kubecfg.key" -cluster/saltbase/salt/generate-cert/make-ca-cert.sh:chgrp $cert_group "${cert_dir}/server.key" "${cert_dir}/server.cert" "${cert_dir}/ca.crt" -cluster/saltbase/salt/generate-cert/make-ca-cert.sh:chmod 660 "${cert_dir}/server.key" "${cert_dir}/server.cert" "${cert_dir}/ca.crt" -cluster/saltbase/salt/generate-cert/make-cert.sh:mkdir -p "$cert_dir" -cluster/saltbase/salt/generate-cert/make-cert.sh: -keyout "${cert_dir}/server.key" -out "${cert_dir}/server.cert" -cluster/saltbase/salt/generate-cert/make-cert.sh:chgrp $cert_group "${cert_dir}/server.key" "${cert_dir}/server.cert" -cluster/saltbase/salt/generate-cert/make-cert.sh:chmod 660 "${cert_dir}/server.key" "${cert_dir}/server.cert" cluster/saltbase/salt/supervisor/supervisor_watcher.sh:# Apply oom_score_adj: -901 to processes cluster/saltbase/salt/kube-apiserver/kube-apiserver.manifest:{% set cloud_provider = "" -%} cluster/saltbase/salt/kube-apiserver/kube-apiserver.manifest:{% set cloud_config = "" -%} @@ -126,8 +110,6 @@ cluster/saltbase/salt/kube-apiserver/kube-apiserver.manifest: {% set runtime_con cluster/saltbase/salt/kube-apiserver/kube-apiserver.manifest:{% set params = address + " " + etcd_servers + " " + cloud_provider + " " + cloud_config + " " + runtime_config + " " + admission_control + " " + service_cluster_ip_range + " " + client_ca_file + " " + basic_auth_file + " " + min_request_timeout -%} cluster/saltbase/salt/kube-apiserver/kube-apiserver.manifest:{% set params = params + " " + cluster_name + " " + cert_file + " " + key_file + " --secure-port=" + secure_port + " " + token_auth_file + " " + bind_address + " " + pillar['log_level'] + " " + advertise_address + " " + proxy_ssh_options -%} cluster/saltbase/salt/kube-apiserver/kube-apiserver.manifest: "/usr/local/bin/kube-apiserver {{params}} --allow-privileged={{pillar['allow_privileged']}} 1>>/var/log/kube-apiserver.log 2>&1" -cluster/saltbase/salt/kube-apiserver/kube-apiserver.manifest: "containerPort": {{secure_port}}, -cluster/saltbase/salt/kube-apiserver/kube-apiserver.manifest: "hostPort": {{secure_port}}},{ cluster/juju/charms/trusty/kubernetes-master/hooks/network-relation-changed: for k in ('etcd_servers',): cluster/juju/charms/trusty/kubernetes-master/hooks/network-relation-changed: template_data['etcd_servers'] = ",".join([ cluster/juju/charms/trusty/kubernetes-master/hooks/network-relation-changed: template_data['bind_address'] = "127.0.0.1" @@ -181,18 +163,12 @@ cluster/juju/charms/trusty/kubernetes/hooks/api-relation-changed: 'http:/ cluster/vsphere/templates/salt-minion.sh: hostname_override: $(ip route get 1.1.1.1 | awk '{print $7}') cluster/libvirt-coreos/util.sh: public_ip=$MASTER_IP cluster/libvirt-coreos/util.sh: public_ip=${MINION_IPS[$i]} -cluster/libvirt-coreos/user_data.yml: advertise-client-urls: http://${public_ip}:2379 -cluster/libvirt-coreos/user_data.yml: initial-advertise-peer-urls: http://${public_ip}:2380 -cluster/libvirt-coreos/user_data.yml: listen-peer-urls: http://${public_ip}:2380 -cluster/libvirt-coreos/user_data.yml: Address=${public_ip}/24 cluster/gce/configure-vm.sh: cloud_config: /etc/gce.conf cluster/gce/configure-vm.sh: advertise_address: '${EXTERNAL_IP}' cluster/gce/configure-vm.sh: api_servers: '${KUBERNETES_MASTER_NAME}' cluster/gce/coreos/helper.sh:# cloud_config yaml file should be passed hack/lib/logging.sh: local source_file=${BASH_SOURCE[$frame_no]} -hack/lib/logging.sh: echo " $i: ${source_file}:${source_lineno} ${funcname}(...)" >&2 hack/lib/logging.sh: local source_file=${BASH_SOURCE[$stack_skip]} -hack/lib/logging.sh: echo "!!! Error in ${source_file}:${source_line}" >&2 pkg/util/oom/oom_linux.go:// Writes 'value' to /proc//oom_score_adj. PID = 0 means self pkg/util/oom/oom_linux.go: return fmt.Errorf("invalid PID %d specified for oom_score_adj", pid) pkg/util/oom/oom_linux.go: oomScoreAdjPath := path.Join("/proc", pidStr, "oom_score_adj") From d7f9477a9476b900acc5d53db38293b5905675d4 Mon Sep 17 00:00:00 2001 From: Eric Paris Date: Fri, 14 Aug 2015 14:54:12 -0400 Subject: [PATCH 05/10] verify-flags-underscore.py: exclude salt variable which look like flags --- hack/verify-flags-underscore.py | 11 ++++- hack/verify-flags/exceptions.txt | 73 -------------------------------- 2 files changed, 9 insertions(+), 75 deletions(-) diff --git a/hack/verify-flags-underscore.py b/hack/verify-flags-underscore.py index 4dee7d9f25..8ed34a03c9 100755 --- a/hack/verify-flags-underscore.py +++ b/hack/verify-flags-underscore.py @@ -95,8 +95,15 @@ def normalize_files(rootdir, files): def line_has_bad_flag(line, flagre): results = flagre.findall(line) for result in results: - if "_" in result: - return True + if not "_" in result: + return False + # this should exclude many cases where jinja2 templates use kube flags + # as variables, except it uses _ for the variable name + if "{% set" + result + "= \"" in line: + return False + if "pillar[" + result + "]" in line: + return False + return True return False # The list of files might not be the whole repo. If someone only changed a diff --git a/hack/verify-flags/exceptions.txt b/hack/verify-flags/exceptions.txt index a8da97b414..b19d08633e 100644 --- a/hack/verify-flags/exceptions.txt +++ b/hack/verify-flags/exceptions.txt @@ -15,101 +15,29 @@ cluster/vagrant/provision-minion.sh: hostname_override: '$(echo "$MINION_IP" | cluster/addons/cluster-monitoring/README.md:Heapster enables monitoring of Kubernetes Clusters using [cAdvisor](https://github.com/google/cadvisor). The kubelet will communicate with an instance of cAdvisor running on localhost and proxy container stats to Heapster. Kubelet will attempt to connect to cAdvisor on port 4194 by default but this port can be configured with kubelet's `-cadvisor_port` run flag. Detailed information about heapster can be found [here](https://github.com/GoogleCloudPlatform/heapster). cluster/aws/templates/salt-minion.sh:# We set the hostname_override to the full EC2 private dns name cluster/aws/templates/salt-minion.sh: hostname_override: "${HOSTNAME_OVERRIDE}" -cluster/saltbase/salt/kube-controller-manager/kube-controller-manager.manifest:{% set cluster_name = "" -%} -cluster/saltbase/salt/kube-controller-manager/kube-controller-manager.manifest:{% set cluster_cidr = "" -%} -cluster/saltbase/salt/kube-controller-manager/kube-controller-manager.manifest:{% set allocate_node_cidrs = "" -%} -cluster/saltbase/salt/kube-controller-manager/kube-controller-manager.manifest: {% set cluster_name = "--cluster-name=" + pillar['instance_prefix'] -%} -cluster/saltbase/salt/kube-controller-manager/kube-controller-manager.manifest:{% if pillar['cluster_cidr'] is defined and pillar['cluster_cidr'] != "" -%} -cluster/saltbase/salt/kube-controller-manager/kube-controller-manager.manifest: {% set cluster_cidr = "--cluster-cidr=" + pillar['cluster_cidr'] -%} -cluster/saltbase/salt/kube-controller-manager/kube-controller-manager.manifest:{% if pillar['allocate_node_cidrs'] is defined -%} -cluster/saltbase/salt/kube-controller-manager/kube-controller-manager.manifest: {% set allocate_node_cidrs = "--allocate-node-cidrs=" + pillar['allocate_node_cidrs'] -%} -cluster/saltbase/salt/kube-controller-manager/kube-controller-manager.manifest:{% set cloud_provider = "" -%} -cluster/saltbase/salt/kube-controller-manager/kube-controller-manager.manifest:{% set cloud_config = "" -%} -cluster/saltbase/salt/kube-controller-manager/kube-controller-manager.manifest: {% set cloud_provider = "--cloud-provider=" + grains.cloud -%} cluster/saltbase/salt/kube-controller-manager/kube-controller-manager.manifest: {% if grains.cloud in [ 'aws', 'gce' ] and grains.cloud_config is defined -%} -cluster/saltbase/salt/kube-controller-manager/kube-controller-manager.manifest: {% set cloud_config = "--cloud-config=" + grains.cloud_config -%} cluster/saltbase/salt/kube-controller-manager/kube-controller-manager.manifest: {% set cloud_config_mount = "{\"name\": \"cloudconfigmount\",\"mountPath\": \"" + grains.cloud_config + "\", \"readOnly\": true}," -%} cluster/saltbase/salt/kube-controller-manager/kube-controller-manager.manifest: {% set cloud_config_volume = "{\"name\": \"cloudconfigmount\",\"hostPath\": {\"path\": \"" + grains.cloud_config + "\"}}," -%} -cluster/saltbase/salt/kube-controller-manager/kube-controller-manager.manifest:{% set root_ca_file = "" -%} -cluster/saltbase/salt/kube-controller-manager/kube-controller-manager.manifest: {% set root_ca_file = "--root-ca-file=/srv/kubernetes/ca.crt" -%} cluster/saltbase/salt/kube-controller-manager/kube-controller-manager.manifest:{% set params = "--master=127.0.0.1:8080" + " " + cluster_name + " " + cluster_cidr + " " + allocate_node_cidrs + " " + cloud_provider + " " + cloud_config + service_account_key + pillar['log_level'] + " " + root_ca_file -%} -cluster/saltbase/salt/kube-proxy/default: {% set api_servers = "--master=http://" + ips[0][0] -%} cluster/saltbase/salt/kube-proxy/default: {% set api_servers_with_port = api_servers + ":7080" -%} cluster/saltbase/salt/kube-proxy/default: {% if grains.api_servers is defined -%} -cluster/saltbase/salt/kube-proxy/default: {% set api_servers = "--master=https://" + grains.api_servers -%} -cluster/saltbase/salt/kube-proxy/default: {% set api_servers = "--master=https://" + ips[0][0] -%} cluster/saltbase/salt/kube-proxy/default: {% set api_servers_with_port = api_servers -%} cluster/saltbase/salt/kube-proxy/default: {% set api_servers_with_port = api_servers + ":6443" -%} cluster/saltbase/salt/kube-addons/kube-addons.sh:# Create admission_control objects if defined before any other addon services. If the limits cluster/saltbase/salt/kubelet/default:{% if grains.api_servers is defined -%} -cluster/saltbase/salt/kubelet/default: {% set api_servers = "--api-servers=https://" + grains.api_servers -%} -cluster/saltbase/salt/kubelet/default: {% set api_servers = "--api-servers=https://" + grains.apiservers -%} -cluster/saltbase/salt/kubelet/default: {% set api_servers = "--api-servers=https://" + master_ipv4 -%} -cluster/saltbase/salt/kubelet/default: {% set api_servers = "--api-servers=https://" + ips[0][0] -%} cluster/saltbase/salt/kubelet/default: {% set api_servers_with_port = api_servers -%} cluster/saltbase/salt/kubelet/default: {% set api_servers_with_port = api_servers + ":6443" -%} -cluster/saltbase/salt/kubelet/default:{% set cloud_provider = "" -%} -cluster/saltbase/salt/kubelet/default: {% set cloud_provider = "--cloud-provider=" + grains.cloud -%} -cluster/saltbase/salt/kubelet/default:{% set manifest_url = "" -%} -cluster/saltbase/salt/kubelet/default: {% set manifest_url = "--manifest-url=http://metadata.google.internal/computeMetadata/v1/instance/attributes/google-container-manifest --manifest-url-header=Metadata-Flavor:Google" -%} -cluster/saltbase/salt/kubelet/default:{% set hostname_override = "" -%} cluster/saltbase/salt/kubelet/default:{% if grains.hostname_override is defined -%} -cluster/saltbase/salt/kubelet/default: {% set hostname_override = " --hostname-override=" + grains.hostname_override -%} -cluster/saltbase/salt/kubelet/default:{% set cluster_dns = "" %} -cluster/saltbase/salt/kubelet/default:{% set cluster_domain = "" %} -cluster/saltbase/salt/kubelet/default: {% set cluster_dns = "--cluster-dns=" + pillar['dns_server'] %} -cluster/saltbase/salt/kubelet/default: {% set cluster_domain = "--cluster-domain=" + pillar['dns_domain'] %} -cluster/saltbase/salt/kubelet/default:{% set configure_cbr0 = "" -%} -cluster/saltbase/salt/kubelet/default:{% if pillar['allocate_node_cidrs'] is defined -%} -cluster/saltbase/salt/kubelet/default: {% set configure_cbr0 = "--configure-cbr0=" + pillar['allocate_node_cidrs'] -%} -cluster/saltbase/salt/kubelet/default:{% set system_container = "" -%} -cluster/saltbase/salt/kubelet/default:{% set cgroup_root = "" -%} -cluster/saltbase/salt/kubelet/default: {% set system_container = "--system-container=/system" -%} -cluster/saltbase/salt/kubelet/default: {% set cgroup_root = "--cgroup-root=/" -%} -cluster/saltbase/salt/kubelet/default: {% set cgroup_root = "--cgroup-root=docker" -%} -cluster/saltbase/salt/kubelet/default:{% set pod_cidr = "" %} -cluster/saltbase/salt/kubelet/default: {% set pod_cidr = "--pod-cidr=" + grains['cbr-cidr'] %} -cluster/saltbase/salt/kubelet/default:DAEMON_ARGS="{{daemon_args}} {{api_servers_with_port}} {{debugging_handlers}} {{hostname_override}} {{cloud_provider}} {{config}} {{manifest_url}} --allow-privileged={{pillar['allow_privileged']}} {{pillar['log_level']}} {{cluster_dns}} {{cluster_domain}} {{docker_root}} {{kubelet_root}} {{configure_cbr0}} {{cgroup_root}} {{system_container}} {{pod_cidr}} {{test_args}}" cluster/saltbase/salt/kube-admission-controls/init.sls:{% if 'LimitRanger' in pillar.get('admission_control', '') %} cluster/saltbase/salt/monit/monit_watcher.sh:# Apply oom_score_adj: -901 to processes cluster/saltbase/salt/supervisor/supervisor_watcher.sh:# Apply oom_score_adj: -901 to processes -cluster/saltbase/salt/kube-apiserver/kube-apiserver.manifest:{% set cloud_provider = "" -%} -cluster/saltbase/salt/kube-apiserver/kube-apiserver.manifest:{% set cloud_config = "" -%} -cluster/saltbase/salt/kube-apiserver/kube-apiserver.manifest: {% set cloud_provider = "--cloud-provider=" + grains.cloud -%} cluster/saltbase/salt/kube-apiserver/kube-apiserver.manifest: {% if grains.cloud in [ 'aws', 'gce' ] and grains.cloud_config is defined -%} -cluster/saltbase/salt/kube-apiserver/kube-apiserver.manifest: {% set cloud_config = "--cloud-config=" + grains.cloud_config -%} cluster/saltbase/salt/kube-apiserver/kube-apiserver.manifest: {% set cloud_config_mount = "{\"name\": \"cloudconfigmount\",\"mountPath\": \"" + grains.cloud_config + "\", \"readOnly\": true}," -%} cluster/saltbase/salt/kube-apiserver/kube-apiserver.manifest: {% set cloud_config_volume = "{\"name\": \"cloudconfigmount\",\"hostPath\": {\"path\": \"" + grains.cloud_config + "\"}}," -%} -cluster/saltbase/salt/kube-apiserver/kube-apiserver.manifest:{% set advertise_address = "" -%} cluster/saltbase/salt/kube-apiserver/kube-apiserver.manifest:{% if grains.advertise_address is defined -%} -cluster/saltbase/salt/kube-apiserver/kube-apiserver.manifest: {% set advertise_address = "--advertise-address=" + grains.advertise_address -%} -cluster/saltbase/salt/kube-apiserver/kube-apiserver.manifest:{% set cluster_name = "" -%} -cluster/saltbase/salt/kube-apiserver/kube-apiserver.manifest: {% set cluster_name = "--cluster-name=" + pillar['instance_prefix'] -%} -cluster/saltbase/salt/kube-apiserver/kube-apiserver.manifest:{% set bind_address = "" -%} -cluster/saltbase/salt/kube-apiserver/kube-apiserver.manifest: {% set bind_address = "--bind-address=" + grains.publicAddressOverride -%} -cluster/saltbase/salt/kube-apiserver/kube-apiserver.manifest:{% set etcd_servers = "--etcd-servers=http://127.0.0.1:4001" -%} -cluster/saltbase/salt/kube-apiserver/kube-apiserver.manifest:{% set service_cluster_ip_range = "" -%} -cluster/saltbase/salt/kube-apiserver/kube-apiserver.manifest:{% if pillar['service_cluster_ip_range'] is defined -%} -cluster/saltbase/salt/kube-apiserver/kube-apiserver.manifest: {% set service_cluster_ip_range = "--service-cluster-ip-range=" + pillar['service_cluster_ip_range'] -%} -cluster/saltbase/salt/kube-apiserver/kube-apiserver.manifest:{% set client_ca_file = "" -%} -cluster/saltbase/salt/kube-apiserver/kube-apiserver.manifest:{% set secure_port = "6443" -%} -cluster/saltbase/salt/kube-apiserver/kube-apiserver.manifest: {% set secure_port = "443" -%} -cluster/saltbase/salt/kube-apiserver/kube-apiserver.manifest: {% set client_ca_file = "--client-ca-file=/srv/kubernetes/ca.crt" -%} -cluster/saltbase/salt/kube-apiserver/kube-apiserver.manifest:{% set token_auth_file = "--token-auth-file=/dev/null" -%} -cluster/saltbase/salt/kube-apiserver/kube-apiserver.manifest:{% set basic_auth_file = "" -%} -cluster/saltbase/salt/kube-apiserver/kube-apiserver.manifest:{% set min_request_timeout = "" -%} -cluster/saltbase/salt/kube-apiserver/kube-apiserver.manifest: {% set min_request_timeout = "--min-request-timeout=" + grains.minRequestTimeout -%} -cluster/saltbase/salt/kube-apiserver/kube-apiserver.manifest: {% set token_auth_file = "--token-auth-file=/srv/kubernetes/known_tokens.csv" -%} -cluster/saltbase/salt/kube-apiserver/kube-apiserver.manifest: {% set basic_auth_file = "--basic-auth-file=/srv/kubernetes/basic_auth.csv" -%} -cluster/saltbase/salt/kube-apiserver/kube-apiserver.manifest:{% set admission_control = "" -%} -cluster/saltbase/salt/kube-apiserver/kube-apiserver.manifest:{% if pillar['admission_control'] is defined -%} -cluster/saltbase/salt/kube-apiserver/kube-apiserver.manifest: {% set admission_control = "--admission-control=" + pillar['admission_control'] -%} -cluster/saltbase/salt/kube-apiserver/kube-apiserver.manifest:{% set runtime_config = "" -%} cluster/saltbase/salt/kube-apiserver/kube-apiserver.manifest:{% if grains.runtime_config is defined -%} -cluster/saltbase/salt/kube-apiserver/kube-apiserver.manifest: {% set runtime_config = "--runtime-config=" + grains.runtime_config -%} cluster/saltbase/salt/kube-apiserver/kube-apiserver.manifest:{% set params = address + " " + etcd_servers + " " + cloud_provider + " " + cloud_config + " " + runtime_config + " " + admission_control + " " + service_cluster_ip_range + " " + client_ca_file + " " + basic_auth_file + " " + min_request_timeout -%} cluster/saltbase/salt/kube-apiserver/kube-apiserver.manifest:{% set params = params + " " + cluster_name + " " + cert_file + " " + key_file + " --secure-port=" + secure_port + " " + token_auth_file + " " + bind_address + " " + pillar['log_level'] + " " + advertise_address + " " + proxy_ssh_options -%} -cluster/saltbase/salt/kube-apiserver/kube-apiserver.manifest: "/usr/local/bin/kube-apiserver {{params}} --allow-privileged={{pillar['allow_privileged']}} 1>>/var/log/kube-apiserver.log 2>&1" cluster/juju/charms/trusty/kubernetes-master/hooks/network-relation-changed: for k in ('etcd_servers',): cluster/juju/charms/trusty/kubernetes-master/hooks/network-relation-changed: template_data['etcd_servers'] = ",".join([ cluster/juju/charms/trusty/kubernetes-master/hooks/network-relation-changed: template_data['bind_address'] = "127.0.0.1" @@ -125,7 +53,6 @@ cluster/juju/charms/trusty/kubernetes-master/hooks/hooks.py: template_data['b cluster/juju/charms/trusty/kubernetes-master/hooks/config-changed: for k in ('etcd_servers',): cluster/juju/charms/trusty/kubernetes-master/hooks/config-changed: template_data['etcd_servers'] = ",".join([ cluster/juju/charms/trusty/kubernetes-master/hooks/config-changed: template_data['bind_address'] = "127.0.0.1" -cluster/juju/charms/trusty/kubernetes-master/files/apiserver.upstart.tmpl: --etcd-servers=%(etcd_servers)s \ cluster/juju/charms/trusty/kubernetes-master/files/scheduler.upstart.tmpl: --address=%(bind_address)s \ cluster/juju/charms/trusty/kubernetes-master/files/controller-manager.upstart.tmpl: --address=%(bind_address)s \ cluster/juju/charms/trusty/kubernetes/hooks/network-relation-changed: for k in ('etcd_servers', 'kubeapi_server'): From d1ba0bb692bf068a2d642e7716f898d75431e542 Mon Sep 17 00:00:00 2001 From: Eric Paris Date: Fri, 14 Aug 2015 15:26:15 -0400 Subject: [PATCH 06/10] verify-flags-underscore.py: ignore salt grains --- hack/verify-flags-underscore.py | 2 ++ hack/verify-flags/exceptions.txt | 11 ----------- 2 files changed, 2 insertions(+), 11 deletions(-) diff --git a/hack/verify-flags-underscore.py b/hack/verify-flags-underscore.py index 8ed34a03c9..9ade05d9f5 100755 --- a/hack/verify-flags-underscore.py +++ b/hack/verify-flags-underscore.py @@ -103,6 +103,8 @@ def line_has_bad_flag(line, flagre): return False if "pillar[" + result + "]" in line: return False + if "grains" + result in line: + return False return True return False diff --git a/hack/verify-flags/exceptions.txt b/hack/verify-flags/exceptions.txt index b19d08633e..2b8c252dc0 100644 --- a/hack/verify-flags/exceptions.txt +++ b/hack/verify-flags/exceptions.txt @@ -15,27 +15,16 @@ cluster/vagrant/provision-minion.sh: hostname_override: '$(echo "$MINION_IP" | cluster/addons/cluster-monitoring/README.md:Heapster enables monitoring of Kubernetes Clusters using [cAdvisor](https://github.com/google/cadvisor). The kubelet will communicate with an instance of cAdvisor running on localhost and proxy container stats to Heapster. Kubelet will attempt to connect to cAdvisor on port 4194 by default but this port can be configured with kubelet's `-cadvisor_port` run flag. Detailed information about heapster can be found [here](https://github.com/GoogleCloudPlatform/heapster). cluster/aws/templates/salt-minion.sh:# We set the hostname_override to the full EC2 private dns name cluster/aws/templates/salt-minion.sh: hostname_override: "${HOSTNAME_OVERRIDE}" -cluster/saltbase/salt/kube-controller-manager/kube-controller-manager.manifest: {% if grains.cloud in [ 'aws', 'gce' ] and grains.cloud_config is defined -%} -cluster/saltbase/salt/kube-controller-manager/kube-controller-manager.manifest: {% set cloud_config_mount = "{\"name\": \"cloudconfigmount\",\"mountPath\": \"" + grains.cloud_config + "\", \"readOnly\": true}," -%} -cluster/saltbase/salt/kube-controller-manager/kube-controller-manager.manifest: {% set cloud_config_volume = "{\"name\": \"cloudconfigmount\",\"hostPath\": {\"path\": \"" + grains.cloud_config + "\"}}," -%} cluster/saltbase/salt/kube-controller-manager/kube-controller-manager.manifest:{% set params = "--master=127.0.0.1:8080" + " " + cluster_name + " " + cluster_cidr + " " + allocate_node_cidrs + " " + cloud_provider + " " + cloud_config + service_account_key + pillar['log_level'] + " " + root_ca_file -%} cluster/saltbase/salt/kube-proxy/default: {% set api_servers_with_port = api_servers + ":7080" -%} -cluster/saltbase/salt/kube-proxy/default: {% if grains.api_servers is defined -%} cluster/saltbase/salt/kube-proxy/default: {% set api_servers_with_port = api_servers -%} cluster/saltbase/salt/kube-proxy/default: {% set api_servers_with_port = api_servers + ":6443" -%} cluster/saltbase/salt/kube-addons/kube-addons.sh:# Create admission_control objects if defined before any other addon services. If the limits -cluster/saltbase/salt/kubelet/default:{% if grains.api_servers is defined -%} cluster/saltbase/salt/kubelet/default: {% set api_servers_with_port = api_servers -%} cluster/saltbase/salt/kubelet/default: {% set api_servers_with_port = api_servers + ":6443" -%} -cluster/saltbase/salt/kubelet/default:{% if grains.hostname_override is defined -%} cluster/saltbase/salt/kube-admission-controls/init.sls:{% if 'LimitRanger' in pillar.get('admission_control', '') %} cluster/saltbase/salt/monit/monit_watcher.sh:# Apply oom_score_adj: -901 to processes cluster/saltbase/salt/supervisor/supervisor_watcher.sh:# Apply oom_score_adj: -901 to processes -cluster/saltbase/salt/kube-apiserver/kube-apiserver.manifest: {% if grains.cloud in [ 'aws', 'gce' ] and grains.cloud_config is defined -%} -cluster/saltbase/salt/kube-apiserver/kube-apiserver.manifest: {% set cloud_config_mount = "{\"name\": \"cloudconfigmount\",\"mountPath\": \"" + grains.cloud_config + "\", \"readOnly\": true}," -%} -cluster/saltbase/salt/kube-apiserver/kube-apiserver.manifest: {% set cloud_config_volume = "{\"name\": \"cloudconfigmount\",\"hostPath\": {\"path\": \"" + grains.cloud_config + "\"}}," -%} -cluster/saltbase/salt/kube-apiserver/kube-apiserver.manifest:{% if grains.advertise_address is defined -%} -cluster/saltbase/salt/kube-apiserver/kube-apiserver.manifest:{% if grains.runtime_config is defined -%} cluster/saltbase/salt/kube-apiserver/kube-apiserver.manifest:{% set params = address + " " + etcd_servers + " " + cloud_provider + " " + cloud_config + " " + runtime_config + " " + admission_control + " " + service_cluster_ip_range + " " + client_ca_file + " " + basic_auth_file + " " + min_request_timeout -%} cluster/saltbase/salt/kube-apiserver/kube-apiserver.manifest:{% set params = params + " " + cluster_name + " " + cert_file + " " + key_file + " --secure-port=" + secure_port + " " + token_auth_file + " " + bind_address + " " + pillar['log_level'] + " " + advertise_address + " " + proxy_ssh_options -%} cluster/juju/charms/trusty/kubernetes-master/hooks/network-relation-changed: for k in ('etcd_servers',): From f3fd2e1028891c172bf01dc03065dcdc4cb48b0a Mon Sep 17 00:00:00 2001 From: Eric Paris Date: Fri, 14 Aug 2015 15:28:03 -0400 Subject: [PATCH 07/10] verify-flags-underscore.py: ignore if trailed by : These are often yaml definitions, and thus not usages of kubernetes flags. Not necessarily always, but usually. --- hack/verify-flags-underscore.py | 3 +++ hack/verify-flags/exceptions.txt | 24 ------------------------ 2 files changed, 3 insertions(+), 24 deletions(-) diff --git a/hack/verify-flags-underscore.py b/hack/verify-flags-underscore.py index 9ade05d9f5..b224f7a4be 100755 --- a/hack/verify-flags-underscore.py +++ b/hack/verify-flags-underscore.py @@ -105,6 +105,9 @@ def line_has_bad_flag(line, flagre): return False if "grains" + result in line: return False + # These are usually yaml definitions + if result.endswith(":"): + return False return True return False diff --git a/hack/verify-flags/exceptions.txt b/hack/verify-flags/exceptions.txt index 2b8c252dc0..b055c90f67 100644 --- a/hack/verify-flags/exceptions.txt +++ b/hack/verify-flags/exceptions.txt @@ -6,15 +6,8 @@ examples/elasticsearch/README.md:"cluster_name" : "mytunes-db", test/e2e/es_cluster_logging.go: // Check to see if have a cluster_name field. test/e2e/es_cluster_logging.go: clusterName, ok := esResponse["cluster_name"] test/e2e/es_cluster_logging.go: Failf("No cluster_name field in Elasticsearch response: %v", esResponse) -cluster/vagrant/provision-master.sh: api_servers: '$(echo "$MASTER_IP" | sed -e "s/'/''/g")' -cluster/vagrant/provision-master.sh: runtime_config: '$(echo "$RUNTIME_CONFIG" | sed -e "s/'/''/g")' -cluster/vagrant/provision-master.sh: service_cluster_ip_range: '$(echo "$SERVICE_CLUSTER_IP_RANGE" | sed -e "s/'/''/g")' -cluster/vagrant/provision-master.sh: admission_control: '$(echo "$ADMISSION_CONTROL" | sed -e "s/'/''/g")' -cluster/vagrant/provision-minion.sh: api_servers: '$(echo "$MASTER_IP" | sed -e "s/'/''/g")' -cluster/vagrant/provision-minion.sh: hostname_override: '$(echo "$MINION_IP" | sed -e "s/'/''/g")' cluster/addons/cluster-monitoring/README.md:Heapster enables monitoring of Kubernetes Clusters using [cAdvisor](https://github.com/google/cadvisor). The kubelet will communicate with an instance of cAdvisor running on localhost and proxy container stats to Heapster. Kubelet will attempt to connect to cAdvisor on port 4194 by default but this port can be configured with kubelet's `-cadvisor_port` run flag. Detailed information about heapster can be found [here](https://github.com/GoogleCloudPlatform/heapster). cluster/aws/templates/salt-minion.sh:# We set the hostname_override to the full EC2 private dns name -cluster/aws/templates/salt-minion.sh: hostname_override: "${HOSTNAME_OVERRIDE}" cluster/saltbase/salt/kube-controller-manager/kube-controller-manager.manifest:{% set params = "--master=127.0.0.1:8080" + " " + cluster_name + " " + cluster_cidr + " " + allocate_node_cidrs + " " + cloud_provider + " " + cloud_config + service_account_key + pillar['log_level'] + " " + root_ca_file -%} cluster/saltbase/salt/kube-proxy/default: {% set api_servers_with_port = api_servers + ":7080" -%} cluster/saltbase/salt/kube-proxy/default: {% set api_servers_with_port = api_servers -%} @@ -23,8 +16,6 @@ cluster/saltbase/salt/kube-addons/kube-addons.sh:# Create admission_control obje cluster/saltbase/salt/kubelet/default: {% set api_servers_with_port = api_servers -%} cluster/saltbase/salt/kubelet/default: {% set api_servers_with_port = api_servers + ":6443" -%} cluster/saltbase/salt/kube-admission-controls/init.sls:{% if 'LimitRanger' in pillar.get('admission_control', '') %} -cluster/saltbase/salt/monit/monit_watcher.sh:# Apply oom_score_adj: -901 to processes -cluster/saltbase/salt/supervisor/supervisor_watcher.sh:# Apply oom_score_adj: -901 to processes cluster/saltbase/salt/kube-apiserver/kube-apiserver.manifest:{% set params = address + " " + etcd_servers + " " + cloud_provider + " " + cloud_config + " " + runtime_config + " " + admission_control + " " + service_cluster_ip_range + " " + client_ca_file + " " + basic_auth_file + " " + min_request_timeout -%} cluster/saltbase/salt/kube-apiserver/kube-apiserver.manifest:{% set params = params + " " + cluster_name + " " + cert_file + " " + key_file + " --secure-port=" + secure_port + " " + token_auth_file + " " + bind_address + " " + pillar['log_level'] + " " + advertise_address + " " + proxy_ssh_options -%} cluster/juju/charms/trusty/kubernetes-master/hooks/network-relation-changed: for k in ('etcd_servers',): @@ -47,7 +38,6 @@ cluster/juju/charms/trusty/kubernetes-master/files/controller-manager.upstart.tm cluster/juju/charms/trusty/kubernetes/hooks/network-relation-changed: for k in ('etcd_servers', 'kubeapi_server'): cluster/juju/charms/trusty/kubernetes/hooks/network-relation-changed: etcd_servers = get_rel_hosts('etcd', rels, ('hostname', 'port')) cluster/juju/charms/trusty/kubernetes/hooks/network-relation-changed: api_servers = get_rel_hosts('api', rels, ('hostname', 'port')) -cluster/juju/charms/trusty/kubernetes/hooks/network-relation-changed: if api_servers: cluster/juju/charms/trusty/kubernetes/hooks/network-relation-changed: api_info = api_servers.pop() cluster/juju/charms/trusty/kubernetes/hooks/network-relation-changed: api_servers = 'http://%s:%s' % (api_info[0], api_info[1]) cluster/juju/charms/trusty/kubernetes/hooks/network-relation-changed: template_data['etcd_servers'] = ','.join([ @@ -55,7 +45,6 @@ cluster/juju/charms/trusty/kubernetes/hooks/network-relation-changed: 'ht cluster/juju/charms/trusty/kubernetes/hooks/etcd-relation-changed: for k in ('etcd_servers', 'kubeapi_server'): cluster/juju/charms/trusty/kubernetes/hooks/etcd-relation-changed: etcd_servers = get_rel_hosts('etcd', rels, ('hostname', 'port')) cluster/juju/charms/trusty/kubernetes/hooks/etcd-relation-changed: api_servers = get_rel_hosts('api', rels, ('hostname', 'port')) -cluster/juju/charms/trusty/kubernetes/hooks/etcd-relation-changed: if api_servers: cluster/juju/charms/trusty/kubernetes/hooks/etcd-relation-changed: api_info = api_servers.pop() cluster/juju/charms/trusty/kubernetes/hooks/etcd-relation-changed: api_servers = 'http://%s:%s' % (api_info[0], api_info[1]) cluster/juju/charms/trusty/kubernetes/hooks/etcd-relation-changed: template_data['etcd_servers'] = ','.join([ @@ -63,7 +52,6 @@ cluster/juju/charms/trusty/kubernetes/hooks/etcd-relation-changed: 'http: cluster/juju/charms/trusty/kubernetes/hooks/hooks.py: for k in ('etcd_servers', 'kubeapi_server'): cluster/juju/charms/trusty/kubernetes/hooks/hooks.py: etcd_servers = get_rel_hosts('etcd', rels, ('hostname', 'port')) cluster/juju/charms/trusty/kubernetes/hooks/hooks.py: api_servers = get_rel_hosts('api', rels, ('hostname', 'port')) -cluster/juju/charms/trusty/kubernetes/hooks/hooks.py: if api_servers: cluster/juju/charms/trusty/kubernetes/hooks/hooks.py: api_info = api_servers.pop() cluster/juju/charms/trusty/kubernetes/hooks/hooks.py: api_servers = 'http://%s:%s' % (api_info[0], api_info[1]) cluster/juju/charms/trusty/kubernetes/hooks/hooks.py: template_data['etcd_servers'] = ','.join([ @@ -71,32 +59,24 @@ cluster/juju/charms/trusty/kubernetes/hooks/hooks.py: 'http://%s:%s' % (s cluster/juju/charms/trusty/kubernetes/hooks/api-relation-changed: for k in ('etcd_servers', 'kubeapi_server'): cluster/juju/charms/trusty/kubernetes/hooks/api-relation-changed: etcd_servers = get_rel_hosts('etcd', rels, ('hostname', 'port')) cluster/juju/charms/trusty/kubernetes/hooks/api-relation-changed: api_servers = get_rel_hosts('api', rels, ('hostname', 'port')) -cluster/juju/charms/trusty/kubernetes/hooks/api-relation-changed: if api_servers: cluster/juju/charms/trusty/kubernetes/hooks/api-relation-changed: api_info = api_servers.pop() cluster/juju/charms/trusty/kubernetes/hooks/api-relation-changed: api_servers = 'http://%s:%s' % (api_info[0], api_info[1]) cluster/juju/charms/trusty/kubernetes/hooks/api-relation-changed: template_data['etcd_servers'] = ','.join([ cluster/juju/charms/trusty/kubernetes/hooks/api-relation-changed: 'http://%s:%s' % (s[0], s[1]) for s in sorted(etcd_servers)]) -cluster/vsphere/templates/salt-minion.sh: hostname_override: $(ip route get 1.1.1.1 | awk '{print $7}') cluster/libvirt-coreos/util.sh: public_ip=$MASTER_IP cluster/libvirt-coreos/util.sh: public_ip=${MINION_IPS[$i]} -cluster/gce/configure-vm.sh: cloud_config: /etc/gce.conf -cluster/gce/configure-vm.sh: advertise_address: '${EXTERNAL_IP}' -cluster/gce/configure-vm.sh: api_servers: '${KUBERNETES_MASTER_NAME}' cluster/gce/coreos/helper.sh:# cloud_config yaml file should be passed hack/lib/logging.sh: local source_file=${BASH_SOURCE[$frame_no]} hack/lib/logging.sh: local source_file=${BASH_SOURCE[$stack_skip]} pkg/util/oom/oom_linux.go:// Writes 'value' to /proc//oom_score_adj. PID = 0 means self pkg/util/oom/oom_linux.go: return fmt.Errorf("invalid PID %d specified for oom_score_adj", pid) pkg/util/oom/oom_linux.go: oomScoreAdjPath := path.Join("/proc", pidStr, "oom_score_adj") -pkg/util/oom/oom_linux.go: err = fmt.Errorf("failed to read oom_score_adj: %v", readErr) pkg/util/oom/oom_linux.go: err = fmt.Errorf("failed to set oom_score_adj to %d: %v", oomScoreAdj, writeErr) pkg/util/oom/oom_linux.go:// Writes 'value' to /proc//oom_score_adj for all processes in cgroup cgroupName. pkg/kubelet/qos/memory_policy_test.go: lowOomScoreAdj int // The max oom_score_adj score the container should be assigned. pkg/kubelet/qos/memory_policy_test.go: highOomScoreAdj int // The min oom_score_adj score the container should be assigned. pkg/kubelet/qos/memory_policy_test.go: t.Errorf("oom_score_adj should be between %d and %d, but was %d", test.lowOomScoreAdj, test.highOomScoreAdj, oomScoreAdj) pkg/cloudprovider/providers/vagrant/vagrant_test.go: testSaltMinionsResponse = []byte(`{ "return": [{"kubernetes-minion-1": {"kernel": "Linux", "domain": "", "zmqversion": "3.2.4", "kernelrelease": "3.11.10-301.fc20.x86_64", "pythonpath": ["/usr/bin", "/usr/lib64/python27.zip", "/usr/lib64/python2.7", "/usr/lib64/python2.7/plat-linux2", "/usr/lib64/python2.7/lib-tk", "/usr/lib64/python2.7/lib-old", "/usr/lib64/python2.7/lib-dynload", "/usr/lib64/python2.7/site-packages", "/usr/lib/python2.7/site-packages"], "etcd_servers": "10.245.1.2", "ip_interfaces": {"lo": ["127.0.0.1"], "docker0": ["172.17.42.1"], "enp0s8": ["10.245.2.2"], "p2p1": ["10.0.2.15"]}, "shell": "/bin/sh", "mem_total": 491, "saltversioninfo": [2014, 1, 7], "osmajorrelease": ["20"], "node_ip": "10.245.2.2", "id": "kubernetes-minion-1", "osrelease": "20", "ps": "ps -efH", "server_id": 1005530826, "num_cpus": 1, "hwaddr_interfaces": {"lo": "00:00:00:00:00:00", "docker0": "56:84:7a:fe:97:99", "enp0s8": "08:00:27:17:c5:0f", "p2p1": "08:00:27:96:96:e1"}, "virtual": "VirtualBox", "osfullname": "Fedora", "master": "kubernetes-master", "ipv4": ["10.0.2.15", "10.245.2.2", "127.0.0.1", "172.17.42.1"], "ipv6": ["::1", "fe80::a00:27ff:fe17:c50f", "fe80::a00:27ff:fe96:96e1"], "cpu_flags": ["fpu", "vme", "de", "pse", "tsc", "msr", "pae", "mce", "cx8", "apic", "sep", "mtrr", "pge", "mca", "cmov", "pat", "pse36", "clflush", "mmx", "fxsr", "sse", "sse2", "syscall", "nx", "rdtscp", "lm", "constant_tsc", "rep_good", "nopl", "pni", "monitor", "ssse3", "lahf_lm"], "localhost": "kubernetes-minion-1", "lsb_distrib_id": "Fedora", "fqdn_ip4": ["127.0.0.1"], "fqdn_ip6": [], "nodename": "kubernetes-minion-1", "saltversion": "2014.1.7", "saltpath": "/usr/lib/python2.7/site-packages/salt", "pythonversion": [2, 7, 5, "final", 0], "host": "kubernetes-minion-1", "os_family": "RedHat", "oscodename": "Heisenbug", "defaultencoding": "UTF-8", "osfinger": "Fedora-20", "roles": ["kubernetes-pool"], "num_gpus": 1, "cpu_model": "Intel(R) Core(TM) i7-4600U CPU @ 2.10GHz", "fqdn": "kubernetes-minion-1", "osarch": "x86_64", "cpuarch": "x86_64", "gpus": [{"model": "VirtualBox Graphics Adapter", "vendor": "unknown"}], "path": "/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin", "os": "Fedora", "defaultlanguage": "en_US"}}]}`) -docs/admin/salt.md: etcd_servers: $MASTER_IP -docs/admin/salt.md: cloud_provider: vagrant docs/admin/salt.md:`api_servers` | (Optional) The IP address / host name where a kubelet can get read-only access to kube-apiserver docs/admin/salt.md:`etcd_servers` | (Optional) Comma-delimited list of IP addresses the kube-apiserver and kubelet use to reach etcd. Uses the IP of the first machine in the kubernetes_master role, or 127.0.0.1 on GCE. docs/admin/salt.md:`hostname_override` | (Optional) Mapped to the kubelet hostname-override @@ -104,10 +84,6 @@ docs/design/admission_control.md:[here](http://releases.k8s.io/release-1.0/docs/ docs/design/admission_control.md:[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/docs/design/admission_control.md?pixel)]() docs/user-guide/accessing-the-cluster.md: "cluster_name" : "kubernetes_logging", docs/getting-started-guides/logging-elasticsearch.md: "cluster_name" : "kubernetes-logging", -docs/getting-started-guides/aws/cloudformation-template.json: " etcd_servers: http://localhost:2379\n", -docs/getting-started-guides/aws/cloudformation-template.json: " etcd_servers: http://localhost:2379\n", -docs/getting-started-guides/aws/cloud-configs/master.yaml: etcd_servers: http://localhost:2379 -docs/getting-started-guides/aws/cloud-configs/node.yaml: etcd_servers: http://localhost:2379 docs/getting-started-guides/coreos/azure/lib/azure_wrapper.js: var cloud_config = cloud_config_creator(x, conf); docs/getting-started-guides/coreos/azure/lib/azure_wrapper.js: if (cloud_config instanceof Array) { docs/getting-started-guides/coreos/azure/lib/azure_wrapper.js: host.cloud_config_file = cloud_config[n]; From b432f036dbe4d74173dc1d1eee3c45004de3838e Mon Sep 17 00:00:00 2001 From: Eric Paris Date: Fri, 14 Aug 2015 15:30:46 -0400 Subject: [PATCH 08/10] verify-flags-underscore.py: ignore juju variables --- hack/verify-flags-underscore.py | 3 +++ hack/verify-flags/exceptions.txt | 14 -------------- 2 files changed, 3 insertions(+), 14 deletions(-) diff --git a/hack/verify-flags-underscore.py b/hack/verify-flags-underscore.py index b224f7a4be..39cad75039 100755 --- a/hack/verify-flags-underscore.py +++ b/hack/verify-flags-underscore.py @@ -108,6 +108,9 @@ def line_has_bad_flag(line, flagre): # These are usually yaml definitions if result.endswith(":"): return False + # something common in juju variables... + if "template_data[" + result + "]" in line: + return False return True return False diff --git a/hack/verify-flags/exceptions.txt b/hack/verify-flags/exceptions.txt index b055c90f67..532b2932f7 100644 --- a/hack/verify-flags/exceptions.txt +++ b/hack/verify-flags/exceptions.txt @@ -19,20 +19,10 @@ cluster/saltbase/salt/kube-admission-controls/init.sls:{% if 'LimitRanger' in pi cluster/saltbase/salt/kube-apiserver/kube-apiserver.manifest:{% set params = address + " " + etcd_servers + " " + cloud_provider + " " + cloud_config + " " + runtime_config + " " + admission_control + " " + service_cluster_ip_range + " " + client_ca_file + " " + basic_auth_file + " " + min_request_timeout -%} cluster/saltbase/salt/kube-apiserver/kube-apiserver.manifest:{% set params = params + " " + cluster_name + " " + cert_file + " " + key_file + " --secure-port=" + secure_port + " " + token_auth_file + " " + bind_address + " " + pillar['log_level'] + " " + advertise_address + " " + proxy_ssh_options -%} cluster/juju/charms/trusty/kubernetes-master/hooks/network-relation-changed: for k in ('etcd_servers',): -cluster/juju/charms/trusty/kubernetes-master/hooks/network-relation-changed: template_data['etcd_servers'] = ",".join([ -cluster/juju/charms/trusty/kubernetes-master/hooks/network-relation-changed: template_data['bind_address'] = "127.0.0.1" cluster/juju/charms/trusty/kubernetes-master/hooks/etcd-relation-changed: for k in ('etcd_servers',): -cluster/juju/charms/trusty/kubernetes-master/hooks/etcd-relation-changed: template_data['etcd_servers'] = ",".join([ -cluster/juju/charms/trusty/kubernetes-master/hooks/etcd-relation-changed: template_data['bind_address'] = "127.0.0.1" cluster/juju/charms/trusty/kubernetes-master/hooks/minions-api-relation-changed: for k in ('etcd_servers',): -cluster/juju/charms/trusty/kubernetes-master/hooks/minions-api-relation-changed: template_data['etcd_servers'] = ",".join([ -cluster/juju/charms/trusty/kubernetes-master/hooks/minions-api-relation-changed: template_data['bind_address'] = "127.0.0.1" cluster/juju/charms/trusty/kubernetes-master/hooks/hooks.py: for k in ('etcd_servers',): -cluster/juju/charms/trusty/kubernetes-master/hooks/hooks.py: template_data['etcd_servers'] = ",".join([ -cluster/juju/charms/trusty/kubernetes-master/hooks/hooks.py: template_data['bind_address'] = "127.0.0.1" cluster/juju/charms/trusty/kubernetes-master/hooks/config-changed: for k in ('etcd_servers',): -cluster/juju/charms/trusty/kubernetes-master/hooks/config-changed: template_data['etcd_servers'] = ",".join([ -cluster/juju/charms/trusty/kubernetes-master/hooks/config-changed: template_data['bind_address'] = "127.0.0.1" cluster/juju/charms/trusty/kubernetes-master/files/scheduler.upstart.tmpl: --address=%(bind_address)s \ cluster/juju/charms/trusty/kubernetes-master/files/controller-manager.upstart.tmpl: --address=%(bind_address)s \ cluster/juju/charms/trusty/kubernetes/hooks/network-relation-changed: for k in ('etcd_servers', 'kubeapi_server'): @@ -40,28 +30,24 @@ cluster/juju/charms/trusty/kubernetes/hooks/network-relation-changed: etcd_se cluster/juju/charms/trusty/kubernetes/hooks/network-relation-changed: api_servers = get_rel_hosts('api', rels, ('hostname', 'port')) cluster/juju/charms/trusty/kubernetes/hooks/network-relation-changed: api_info = api_servers.pop() cluster/juju/charms/trusty/kubernetes/hooks/network-relation-changed: api_servers = 'http://%s:%s' % (api_info[0], api_info[1]) -cluster/juju/charms/trusty/kubernetes/hooks/network-relation-changed: template_data['etcd_servers'] = ','.join([ cluster/juju/charms/trusty/kubernetes/hooks/network-relation-changed: 'http://%s:%s' % (s[0], s[1]) for s in sorted(etcd_servers)]) cluster/juju/charms/trusty/kubernetes/hooks/etcd-relation-changed: for k in ('etcd_servers', 'kubeapi_server'): cluster/juju/charms/trusty/kubernetes/hooks/etcd-relation-changed: etcd_servers = get_rel_hosts('etcd', rels, ('hostname', 'port')) cluster/juju/charms/trusty/kubernetes/hooks/etcd-relation-changed: api_servers = get_rel_hosts('api', rels, ('hostname', 'port')) cluster/juju/charms/trusty/kubernetes/hooks/etcd-relation-changed: api_info = api_servers.pop() cluster/juju/charms/trusty/kubernetes/hooks/etcd-relation-changed: api_servers = 'http://%s:%s' % (api_info[0], api_info[1]) -cluster/juju/charms/trusty/kubernetes/hooks/etcd-relation-changed: template_data['etcd_servers'] = ','.join([ cluster/juju/charms/trusty/kubernetes/hooks/etcd-relation-changed: 'http://%s:%s' % (s[0], s[1]) for s in sorted(etcd_servers)]) cluster/juju/charms/trusty/kubernetes/hooks/hooks.py: for k in ('etcd_servers', 'kubeapi_server'): cluster/juju/charms/trusty/kubernetes/hooks/hooks.py: etcd_servers = get_rel_hosts('etcd', rels, ('hostname', 'port')) cluster/juju/charms/trusty/kubernetes/hooks/hooks.py: api_servers = get_rel_hosts('api', rels, ('hostname', 'port')) cluster/juju/charms/trusty/kubernetes/hooks/hooks.py: api_info = api_servers.pop() cluster/juju/charms/trusty/kubernetes/hooks/hooks.py: api_servers = 'http://%s:%s' % (api_info[0], api_info[1]) -cluster/juju/charms/trusty/kubernetes/hooks/hooks.py: template_data['etcd_servers'] = ','.join([ cluster/juju/charms/trusty/kubernetes/hooks/hooks.py: 'http://%s:%s' % (s[0], s[1]) for s in sorted(etcd_servers)]) cluster/juju/charms/trusty/kubernetes/hooks/api-relation-changed: for k in ('etcd_servers', 'kubeapi_server'): cluster/juju/charms/trusty/kubernetes/hooks/api-relation-changed: etcd_servers = get_rel_hosts('etcd', rels, ('hostname', 'port')) cluster/juju/charms/trusty/kubernetes/hooks/api-relation-changed: api_servers = get_rel_hosts('api', rels, ('hostname', 'port')) cluster/juju/charms/trusty/kubernetes/hooks/api-relation-changed: api_info = api_servers.pop() cluster/juju/charms/trusty/kubernetes/hooks/api-relation-changed: api_servers = 'http://%s:%s' % (api_info[0], api_info[1]) -cluster/juju/charms/trusty/kubernetes/hooks/api-relation-changed: template_data['etcd_servers'] = ','.join([ cluster/juju/charms/trusty/kubernetes/hooks/api-relation-changed: 'http://%s:%s' % (s[0], s[1]) for s in sorted(etcd_servers)]) cluster/libvirt-coreos/util.sh: public_ip=$MASTER_IP cluster/libvirt-coreos/util.sh: public_ip=${MINION_IPS[$i]} From 99dc464c9bb4dca935012c6ba59fdf20b88ef756 Mon Sep 17 00:00:00 2001 From: Eric Paris Date: Fri, 14 Aug 2015 15:41:09 -0400 Subject: [PATCH 09/10] verify-flags-underscore.py: update how to add to exclusion list --- hack/verify-flags-underscore.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/hack/verify-flags-underscore.py b/hack/verify-flags-underscore.py index 39cad75039..047b7f5daa 100755 --- a/hack/verify-flags-underscore.py +++ b/hack/verify-flags-underscore.py @@ -229,7 +229,7 @@ def main(): if len(bad_lines) != 0: if not args.skip_exceptions: - print("Found illegal 'flag' usage. If this is a false positive add the following line(s) to hack/verify-flags/exceptions.txt:") + print("Found illegal 'flag' usage. If these are false positives you should running `hack/verify-flags-underscore.py -e > hack/verify-flags/exceptions.txt` to update the list.") for (relname, line) in bad_lines: print("%s:%s" % (relname, line)) From ca310ffdac8d33671f32d7cb91c8fb1278cd059f Mon Sep 17 00:00:00 2001 From: Eric Paris Date: Mon, 17 Aug 2015 11:32:18 -0700 Subject: [PATCH 10/10] Sort output from hack/verify-flag-underscore.py So we get some consistency and readability to the exceptions.txt --- hack/verify-flags-underscore.py | 3 + hack/verify-flags/exceptions.txt | 144 +++++++++++++++---------------- 2 files changed, 75 insertions(+), 72 deletions(-) diff --git a/hack/verify-flags-underscore.py b/hack/verify-flags-underscore.py index 047b7f5daa..af86f97d9b 100755 --- a/hack/verify-flags-underscore.py +++ b/hack/verify-flags-underscore.py @@ -163,10 +163,12 @@ def get_flags(rootdir, files): if len(new_excluded_flags) != 0: print("Found a flag declared with an _ but which is not explicitly listed as a valid flag name in hack/verify-flags/excluded-flags.txt") print("Are you certain this flag should not have been declared with an - instead?") + new_excluded_flags.sort() print("%s" % "\n".join(new_excluded_flags)) sys.exit(1) if len(new_flags) != 0: print("Found flags in golang files not in the list of known flags. Please add these to hack/verify-flags/known-flags.txt") + new_flags.sort() print("%s" % "\n".join(new_flags)) sys.exit(1) return list(flags) @@ -230,6 +232,7 @@ def main(): if len(bad_lines) != 0: if not args.skip_exceptions: print("Found illegal 'flag' usage. If these are false positives you should running `hack/verify-flags-underscore.py -e > hack/verify-flags/exceptions.txt` to update the list.") + bad_lines.sort() for (relname, line) in bad_lines: print("%s:%s" % (relname, line)) diff --git a/hack/verify-flags/exceptions.txt b/hack/verify-flags/exceptions.txt index 532b2932f7..7b6b0666b3 100644 --- a/hack/verify-flags/exceptions.txt +++ b/hack/verify-flags/exceptions.txt @@ -1,82 +1,82 @@ +cluster/addons/cluster-monitoring/README.md:Heapster enables monitoring of Kubernetes Clusters using [cAdvisor](https://github.com/google/cadvisor). The kubelet will communicate with an instance of cAdvisor running on localhost and proxy container stats to Heapster. Kubelet will attempt to connect to cAdvisor on port 4194 by default but this port can be configured with kubelet's `-cadvisor_port` run flag. Detailed information about heapster can be found [here](https://github.com/GoogleCloudPlatform/heapster). +cluster/aws/templates/salt-minion.sh:# We set the hostname_override to the full EC2 private dns name +cluster/gce/coreos/helper.sh:# cloud_config yaml file should be passed +cluster/juju/charms/trusty/kubernetes-master/files/controller-manager.upstart.tmpl: --address=%(bind_address)s \ +cluster/juju/charms/trusty/kubernetes-master/files/scheduler.upstart.tmpl: --address=%(bind_address)s \ +cluster/juju/charms/trusty/kubernetes-master/hooks/config-changed: for k in ('etcd_servers',): +cluster/juju/charms/trusty/kubernetes-master/hooks/etcd-relation-changed: for k in ('etcd_servers',): +cluster/juju/charms/trusty/kubernetes-master/hooks/hooks.py: for k in ('etcd_servers',): +cluster/juju/charms/trusty/kubernetes-master/hooks/minions-api-relation-changed: for k in ('etcd_servers',): +cluster/juju/charms/trusty/kubernetes-master/hooks/network-relation-changed: for k in ('etcd_servers',): +cluster/juju/charms/trusty/kubernetes/hooks/api-relation-changed: 'http://%s:%s' % (s[0], s[1]) for s in sorted(etcd_servers)]) +cluster/juju/charms/trusty/kubernetes/hooks/api-relation-changed: api_info = api_servers.pop() +cluster/juju/charms/trusty/kubernetes/hooks/api-relation-changed: api_servers = 'http://%s:%s' % (api_info[0], api_info[1]) +cluster/juju/charms/trusty/kubernetes/hooks/api-relation-changed: api_servers = get_rel_hosts('api', rels, ('hostname', 'port')) +cluster/juju/charms/trusty/kubernetes/hooks/api-relation-changed: etcd_servers = get_rel_hosts('etcd', rels, ('hostname', 'port')) +cluster/juju/charms/trusty/kubernetes/hooks/api-relation-changed: for k in ('etcd_servers', 'kubeapi_server'): +cluster/juju/charms/trusty/kubernetes/hooks/etcd-relation-changed: 'http://%s:%s' % (s[0], s[1]) for s in sorted(etcd_servers)]) +cluster/juju/charms/trusty/kubernetes/hooks/etcd-relation-changed: api_info = api_servers.pop() +cluster/juju/charms/trusty/kubernetes/hooks/etcd-relation-changed: api_servers = 'http://%s:%s' % (api_info[0], api_info[1]) +cluster/juju/charms/trusty/kubernetes/hooks/etcd-relation-changed: api_servers = get_rel_hosts('api', rels, ('hostname', 'port')) +cluster/juju/charms/trusty/kubernetes/hooks/etcd-relation-changed: etcd_servers = get_rel_hosts('etcd', rels, ('hostname', 'port')) +cluster/juju/charms/trusty/kubernetes/hooks/etcd-relation-changed: for k in ('etcd_servers', 'kubeapi_server'): +cluster/juju/charms/trusty/kubernetes/hooks/hooks.py: 'http://%s:%s' % (s[0], s[1]) for s in sorted(etcd_servers)]) +cluster/juju/charms/trusty/kubernetes/hooks/hooks.py: api_info = api_servers.pop() +cluster/juju/charms/trusty/kubernetes/hooks/hooks.py: api_servers = 'http://%s:%s' % (api_info[0], api_info[1]) +cluster/juju/charms/trusty/kubernetes/hooks/hooks.py: api_servers = get_rel_hosts('api', rels, ('hostname', 'port')) +cluster/juju/charms/trusty/kubernetes/hooks/hooks.py: etcd_servers = get_rel_hosts('etcd', rels, ('hostname', 'port')) +cluster/juju/charms/trusty/kubernetes/hooks/hooks.py: for k in ('etcd_servers', 'kubeapi_server'): +cluster/juju/charms/trusty/kubernetes/hooks/network-relation-changed: 'http://%s:%s' % (s[0], s[1]) for s in sorted(etcd_servers)]) +cluster/juju/charms/trusty/kubernetes/hooks/network-relation-changed: api_info = api_servers.pop() +cluster/juju/charms/trusty/kubernetes/hooks/network-relation-changed: api_servers = 'http://%s:%s' % (api_info[0], api_info[1]) +cluster/juju/charms/trusty/kubernetes/hooks/network-relation-changed: api_servers = get_rel_hosts('api', rels, ('hostname', 'port')) +cluster/juju/charms/trusty/kubernetes/hooks/network-relation-changed: etcd_servers = get_rel_hosts('etcd', rels, ('hostname', 'port')) +cluster/juju/charms/trusty/kubernetes/hooks/network-relation-changed: for k in ('etcd_servers', 'kubeapi_server'): +cluster/libvirt-coreos/util.sh: public_ip=$MASTER_IP +cluster/libvirt-coreos/util.sh: public_ip=${MINION_IPS[$i]} +cluster/saltbase/salt/kube-addons/kube-addons.sh:# Create admission_control objects if defined before any other addon services. If the limits +cluster/saltbase/salt/kube-admission-controls/init.sls:{% if 'LimitRanger' in pillar.get('admission_control', '') %} +cluster/saltbase/salt/kube-apiserver/kube-apiserver.manifest:{% set params = address + " " + etcd_servers + " " + cloud_provider + " " + cloud_config + " " + runtime_config + " " + admission_control + " " + service_cluster_ip_range + " " + client_ca_file + " " + basic_auth_file + " " + min_request_timeout -%} +cluster/saltbase/salt/kube-apiserver/kube-apiserver.manifest:{% set params = params + " " + cluster_name + " " + cert_file + " " + key_file + " --secure-port=" + secure_port + " " + token_auth_file + " " + bind_address + " " + pillar['log_level'] + " " + advertise_address + " " + proxy_ssh_options -%} +cluster/saltbase/salt/kube-controller-manager/kube-controller-manager.manifest:{% set params = "--master=127.0.0.1:8080" + " " + cluster_name + " " + cluster_cidr + " " + allocate_node_cidrs + " " + cloud_provider + " " + cloud_config + service_account_key + pillar['log_level'] + " " + root_ca_file -%} +cluster/saltbase/salt/kube-proxy/default: {% set api_servers_with_port = api_servers -%} +cluster/saltbase/salt/kube-proxy/default: {% set api_servers_with_port = api_servers + ":6443" -%} +cluster/saltbase/salt/kube-proxy/default: {% set api_servers_with_port = api_servers + ":7080" -%} +cluster/saltbase/salt/kubelet/default: {% set api_servers_with_port = api_servers + ":6443" -%} +cluster/saltbase/salt/kubelet/default: {% set api_servers_with_port = api_servers -%} +docs/admin/salt.md:`api_servers` | (Optional) The IP address / host name where a kubelet can get read-only access to kube-apiserver +docs/admin/salt.md:`etcd_servers` | (Optional) Comma-delimited list of IP addresses the kube-apiserver and kubelet use to reach etcd. Uses the IP of the first machine in the kubernetes_master role, or 127.0.0.1 on GCE. +docs/admin/salt.md:`hostname_override` | (Optional) Mapped to the kubelet hostname-override +docs/design/admission_control.md:[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/docs/design/admission_control.md?pixel)]() +docs/design/admission_control.md:[here](http://releases.k8s.io/release-1.0/docs/design/admission_control.md). +docs/devel/README.md:* **Admission Control Plugins** ([admission_control](../design/admission_control.md)) +docs/getting-started-guides/coreos/azure/lib/azure_wrapper.js: host.cloud_config_file = cloud_config; +docs/getting-started-guides/coreos/azure/lib/azure_wrapper.js: host.cloud_config_file = cloud_config[n]; +docs/getting-started-guides/coreos/azure/lib/azure_wrapper.js: if (cloud_config instanceof Array) { +docs/getting-started-guides/coreos/azure/lib/azure_wrapper.js: var cloud_config = cloud_config_creator(x, conf); +docs/getting-started-guides/coreos/azure/lib/deployment_logic/kubernetes.js: return cloud_config.generate_environment_file_entry_from_object(util.hostname(n, 'kube'), { +docs/getting-started-guides/coreos/azure/lib/deployment_logic/kubernetes.js: return cloud_config.process_template(input_file, output_file, function(data) { +docs/getting-started-guides/coreos/azure/lib/deployment_logic/kubernetes.js: return cloud_config.process_template(input_file, output_file, function(data) { +docs/getting-started-guides/coreos/azure/lib/deployment_logic/kubernetes.js: var write_files_extra = cloud_config.write_files_from('addons', '/etc/kubernetes/addons'); +docs/getting-started-guides/coreos/azure/lib/deployment_logic/kubernetes.js:var cloud_config = require('../cloud_config.js'); +docs/getting-started-guides/logging-elasticsearch.md: "cluster_name" : "kubernetes-logging", +docs/user-guide/accessing-the-cluster.md: "cluster_name" : "kubernetes_logging", examples/cluster-dns/images/frontend/client.py: service_address = socket.gethostbyname(hostname) examples/elasticsearch/README.md: "cluster_name" : "mytunes-db", examples/elasticsearch/README.md: "cluster_name" : "mytunes-db", examples/elasticsearch/README.md: "cluster_name" : "mytunes-db", examples/elasticsearch/README.md:"cluster_name" : "mytunes-db", -test/e2e/es_cluster_logging.go: // Check to see if have a cluster_name field. -test/e2e/es_cluster_logging.go: clusterName, ok := esResponse["cluster_name"] -test/e2e/es_cluster_logging.go: Failf("No cluster_name field in Elasticsearch response: %v", esResponse) -cluster/addons/cluster-monitoring/README.md:Heapster enables monitoring of Kubernetes Clusters using [cAdvisor](https://github.com/google/cadvisor). The kubelet will communicate with an instance of cAdvisor running on localhost and proxy container stats to Heapster. Kubelet will attempt to connect to cAdvisor on port 4194 by default but this port can be configured with kubelet's `-cadvisor_port` run flag. Detailed information about heapster can be found [here](https://github.com/GoogleCloudPlatform/heapster). -cluster/aws/templates/salt-minion.sh:# We set the hostname_override to the full EC2 private dns name -cluster/saltbase/salt/kube-controller-manager/kube-controller-manager.manifest:{% set params = "--master=127.0.0.1:8080" + " " + cluster_name + " " + cluster_cidr + " " + allocate_node_cidrs + " " + cloud_provider + " " + cloud_config + service_account_key + pillar['log_level'] + " " + root_ca_file -%} -cluster/saltbase/salt/kube-proxy/default: {% set api_servers_with_port = api_servers + ":7080" -%} -cluster/saltbase/salt/kube-proxy/default: {% set api_servers_with_port = api_servers -%} -cluster/saltbase/salt/kube-proxy/default: {% set api_servers_with_port = api_servers + ":6443" -%} -cluster/saltbase/salt/kube-addons/kube-addons.sh:# Create admission_control objects if defined before any other addon services. If the limits -cluster/saltbase/salt/kubelet/default: {% set api_servers_with_port = api_servers -%} -cluster/saltbase/salt/kubelet/default: {% set api_servers_with_port = api_servers + ":6443" -%} -cluster/saltbase/salt/kube-admission-controls/init.sls:{% if 'LimitRanger' in pillar.get('admission_control', '') %} -cluster/saltbase/salt/kube-apiserver/kube-apiserver.manifest:{% set params = address + " " + etcd_servers + " " + cloud_provider + " " + cloud_config + " " + runtime_config + " " + admission_control + " " + service_cluster_ip_range + " " + client_ca_file + " " + basic_auth_file + " " + min_request_timeout -%} -cluster/saltbase/salt/kube-apiserver/kube-apiserver.manifest:{% set params = params + " " + cluster_name + " " + cert_file + " " + key_file + " --secure-port=" + secure_port + " " + token_auth_file + " " + bind_address + " " + pillar['log_level'] + " " + advertise_address + " " + proxy_ssh_options -%} -cluster/juju/charms/trusty/kubernetes-master/hooks/network-relation-changed: for k in ('etcd_servers',): -cluster/juju/charms/trusty/kubernetes-master/hooks/etcd-relation-changed: for k in ('etcd_servers',): -cluster/juju/charms/trusty/kubernetes-master/hooks/minions-api-relation-changed: for k in ('etcd_servers',): -cluster/juju/charms/trusty/kubernetes-master/hooks/hooks.py: for k in ('etcd_servers',): -cluster/juju/charms/trusty/kubernetes-master/hooks/config-changed: for k in ('etcd_servers',): -cluster/juju/charms/trusty/kubernetes-master/files/scheduler.upstart.tmpl: --address=%(bind_address)s \ -cluster/juju/charms/trusty/kubernetes-master/files/controller-manager.upstart.tmpl: --address=%(bind_address)s \ -cluster/juju/charms/trusty/kubernetes/hooks/network-relation-changed: for k in ('etcd_servers', 'kubeapi_server'): -cluster/juju/charms/trusty/kubernetes/hooks/network-relation-changed: etcd_servers = get_rel_hosts('etcd', rels, ('hostname', 'port')) -cluster/juju/charms/trusty/kubernetes/hooks/network-relation-changed: api_servers = get_rel_hosts('api', rels, ('hostname', 'port')) -cluster/juju/charms/trusty/kubernetes/hooks/network-relation-changed: api_info = api_servers.pop() -cluster/juju/charms/trusty/kubernetes/hooks/network-relation-changed: api_servers = 'http://%s:%s' % (api_info[0], api_info[1]) -cluster/juju/charms/trusty/kubernetes/hooks/network-relation-changed: 'http://%s:%s' % (s[0], s[1]) for s in sorted(etcd_servers)]) -cluster/juju/charms/trusty/kubernetes/hooks/etcd-relation-changed: for k in ('etcd_servers', 'kubeapi_server'): -cluster/juju/charms/trusty/kubernetes/hooks/etcd-relation-changed: etcd_servers = get_rel_hosts('etcd', rels, ('hostname', 'port')) -cluster/juju/charms/trusty/kubernetes/hooks/etcd-relation-changed: api_servers = get_rel_hosts('api', rels, ('hostname', 'port')) -cluster/juju/charms/trusty/kubernetes/hooks/etcd-relation-changed: api_info = api_servers.pop() -cluster/juju/charms/trusty/kubernetes/hooks/etcd-relation-changed: api_servers = 'http://%s:%s' % (api_info[0], api_info[1]) -cluster/juju/charms/trusty/kubernetes/hooks/etcd-relation-changed: 'http://%s:%s' % (s[0], s[1]) for s in sorted(etcd_servers)]) -cluster/juju/charms/trusty/kubernetes/hooks/hooks.py: for k in ('etcd_servers', 'kubeapi_server'): -cluster/juju/charms/trusty/kubernetes/hooks/hooks.py: etcd_servers = get_rel_hosts('etcd', rels, ('hostname', 'port')) -cluster/juju/charms/trusty/kubernetes/hooks/hooks.py: api_servers = get_rel_hosts('api', rels, ('hostname', 'port')) -cluster/juju/charms/trusty/kubernetes/hooks/hooks.py: api_info = api_servers.pop() -cluster/juju/charms/trusty/kubernetes/hooks/hooks.py: api_servers = 'http://%s:%s' % (api_info[0], api_info[1]) -cluster/juju/charms/trusty/kubernetes/hooks/hooks.py: 'http://%s:%s' % (s[0], s[1]) for s in sorted(etcd_servers)]) -cluster/juju/charms/trusty/kubernetes/hooks/api-relation-changed: for k in ('etcd_servers', 'kubeapi_server'): -cluster/juju/charms/trusty/kubernetes/hooks/api-relation-changed: etcd_servers = get_rel_hosts('etcd', rels, ('hostname', 'port')) -cluster/juju/charms/trusty/kubernetes/hooks/api-relation-changed: api_servers = get_rel_hosts('api', rels, ('hostname', 'port')) -cluster/juju/charms/trusty/kubernetes/hooks/api-relation-changed: api_info = api_servers.pop() -cluster/juju/charms/trusty/kubernetes/hooks/api-relation-changed: api_servers = 'http://%s:%s' % (api_info[0], api_info[1]) -cluster/juju/charms/trusty/kubernetes/hooks/api-relation-changed: 'http://%s:%s' % (s[0], s[1]) for s in sorted(etcd_servers)]) -cluster/libvirt-coreos/util.sh: public_ip=$MASTER_IP -cluster/libvirt-coreos/util.sh: public_ip=${MINION_IPS[$i]} -cluster/gce/coreos/helper.sh:# cloud_config yaml file should be passed hack/lib/logging.sh: local source_file=${BASH_SOURCE[$frame_no]} hack/lib/logging.sh: local source_file=${BASH_SOURCE[$stack_skip]} -pkg/util/oom/oom_linux.go:// Writes 'value' to /proc//oom_score_adj. PID = 0 means self +pkg/cloudprovider/providers/vagrant/vagrant_test.go: testSaltMinionsResponse = []byte(`{ "return": [{"kubernetes-minion-1": {"kernel": "Linux", "domain": "", "zmqversion": "3.2.4", "kernelrelease": "3.11.10-301.fc20.x86_64", "pythonpath": ["/usr/bin", "/usr/lib64/python27.zip", "/usr/lib64/python2.7", "/usr/lib64/python2.7/plat-linux2", "/usr/lib64/python2.7/lib-tk", "/usr/lib64/python2.7/lib-old", "/usr/lib64/python2.7/lib-dynload", "/usr/lib64/python2.7/site-packages", "/usr/lib/python2.7/site-packages"], "etcd_servers": "10.245.1.2", "ip_interfaces": {"lo": ["127.0.0.1"], "docker0": ["172.17.42.1"], "enp0s8": ["10.245.2.2"], "p2p1": ["10.0.2.15"]}, "shell": "/bin/sh", "mem_total": 491, "saltversioninfo": [2014, 1, 7], "osmajorrelease": ["20"], "node_ip": "10.245.2.2", "id": "kubernetes-minion-1", "osrelease": "20", "ps": "ps -efH", "server_id": 1005530826, "num_cpus": 1, "hwaddr_interfaces": {"lo": "00:00:00:00:00:00", "docker0": "56:84:7a:fe:97:99", "enp0s8": "08:00:27:17:c5:0f", "p2p1": "08:00:27:96:96:e1"}, "virtual": "VirtualBox", "osfullname": "Fedora", "master": "kubernetes-master", "ipv4": ["10.0.2.15", "10.245.2.2", "127.0.0.1", "172.17.42.1"], "ipv6": ["::1", "fe80::a00:27ff:fe17:c50f", "fe80::a00:27ff:fe96:96e1"], "cpu_flags": ["fpu", "vme", "de", "pse", "tsc", "msr", "pae", "mce", "cx8", "apic", "sep", "mtrr", "pge", "mca", "cmov", "pat", "pse36", "clflush", "mmx", "fxsr", "sse", "sse2", "syscall", "nx", "rdtscp", "lm", "constant_tsc", "rep_good", "nopl", "pni", "monitor", "ssse3", "lahf_lm"], "localhost": "kubernetes-minion-1", "lsb_distrib_id": "Fedora", "fqdn_ip4": ["127.0.0.1"], "fqdn_ip6": [], "nodename": "kubernetes-minion-1", "saltversion": "2014.1.7", "saltpath": "/usr/lib/python2.7/site-packages/salt", "pythonversion": [2, 7, 5, "final", 0], "host": "kubernetes-minion-1", "os_family": "RedHat", "oscodename": "Heisenbug", "defaultencoding": "UTF-8", "osfinger": "Fedora-20", "roles": ["kubernetes-pool"], "num_gpus": 1, "cpu_model": "Intel(R) Core(TM) i7-4600U CPU @ 2.10GHz", "fqdn": "kubernetes-minion-1", "osarch": "x86_64", "cpuarch": "x86_64", "gpus": [{"model": "VirtualBox Graphics Adapter", "vendor": "unknown"}], "path": "/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin", "os": "Fedora", "defaultlanguage": "en_US"}}]}`) +pkg/kubelet/qos/memory_policy_test.go: t.Errorf("oom_score_adj should be between %d and %d, but was %d", test.lowOomScoreAdj, test.highOomScoreAdj, oomScoreAdj) +pkg/kubelet/qos/memory_policy_test.go: highOomScoreAdj int // The min oom_score_adj score the container should be assigned. +pkg/kubelet/qos/memory_policy_test.go: lowOomScoreAdj int // The max oom_score_adj score the container should be assigned. +pkg/util/oom/oom_linux.go: err = fmt.Errorf("failed to set oom_score_adj to %d: %v", oomScoreAdj, writeErr) pkg/util/oom/oom_linux.go: return fmt.Errorf("invalid PID %d specified for oom_score_adj", pid) pkg/util/oom/oom_linux.go: oomScoreAdjPath := path.Join("/proc", pidStr, "oom_score_adj") -pkg/util/oom/oom_linux.go: err = fmt.Errorf("failed to set oom_score_adj to %d: %v", oomScoreAdj, writeErr) pkg/util/oom/oom_linux.go:// Writes 'value' to /proc//oom_score_adj for all processes in cgroup cgroupName. -pkg/kubelet/qos/memory_policy_test.go: lowOomScoreAdj int // The max oom_score_adj score the container should be assigned. -pkg/kubelet/qos/memory_policy_test.go: highOomScoreAdj int // The min oom_score_adj score the container should be assigned. -pkg/kubelet/qos/memory_policy_test.go: t.Errorf("oom_score_adj should be between %d and %d, but was %d", test.lowOomScoreAdj, test.highOomScoreAdj, oomScoreAdj) -pkg/cloudprovider/providers/vagrant/vagrant_test.go: testSaltMinionsResponse = []byte(`{ "return": [{"kubernetes-minion-1": {"kernel": "Linux", "domain": "", "zmqversion": "3.2.4", "kernelrelease": "3.11.10-301.fc20.x86_64", "pythonpath": ["/usr/bin", "/usr/lib64/python27.zip", "/usr/lib64/python2.7", "/usr/lib64/python2.7/plat-linux2", "/usr/lib64/python2.7/lib-tk", "/usr/lib64/python2.7/lib-old", "/usr/lib64/python2.7/lib-dynload", "/usr/lib64/python2.7/site-packages", "/usr/lib/python2.7/site-packages"], "etcd_servers": "10.245.1.2", "ip_interfaces": {"lo": ["127.0.0.1"], "docker0": ["172.17.42.1"], "enp0s8": ["10.245.2.2"], "p2p1": ["10.0.2.15"]}, "shell": "/bin/sh", "mem_total": 491, "saltversioninfo": [2014, 1, 7], "osmajorrelease": ["20"], "node_ip": "10.245.2.2", "id": "kubernetes-minion-1", "osrelease": "20", "ps": "ps -efH", "server_id": 1005530826, "num_cpus": 1, "hwaddr_interfaces": {"lo": "00:00:00:00:00:00", "docker0": "56:84:7a:fe:97:99", "enp0s8": "08:00:27:17:c5:0f", "p2p1": "08:00:27:96:96:e1"}, "virtual": "VirtualBox", "osfullname": "Fedora", "master": "kubernetes-master", "ipv4": ["10.0.2.15", "10.245.2.2", "127.0.0.1", "172.17.42.1"], "ipv6": ["::1", "fe80::a00:27ff:fe17:c50f", "fe80::a00:27ff:fe96:96e1"], "cpu_flags": ["fpu", "vme", "de", "pse", "tsc", "msr", "pae", "mce", "cx8", "apic", "sep", "mtrr", "pge", "mca", "cmov", "pat", "pse36", "clflush", "mmx", "fxsr", "sse", "sse2", "syscall", "nx", "rdtscp", "lm", "constant_tsc", "rep_good", "nopl", "pni", "monitor", "ssse3", "lahf_lm"], "localhost": "kubernetes-minion-1", "lsb_distrib_id": "Fedora", "fqdn_ip4": ["127.0.0.1"], "fqdn_ip6": [], "nodename": "kubernetes-minion-1", "saltversion": "2014.1.7", "saltpath": "/usr/lib/python2.7/site-packages/salt", "pythonversion": [2, 7, 5, "final", 0], "host": "kubernetes-minion-1", "os_family": "RedHat", "oscodename": "Heisenbug", "defaultencoding": "UTF-8", "osfinger": "Fedora-20", "roles": ["kubernetes-pool"], "num_gpus": 1, "cpu_model": "Intel(R) Core(TM) i7-4600U CPU @ 2.10GHz", "fqdn": "kubernetes-minion-1", "osarch": "x86_64", "cpuarch": "x86_64", "gpus": [{"model": "VirtualBox Graphics Adapter", "vendor": "unknown"}], "path": "/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin", "os": "Fedora", "defaultlanguage": "en_US"}}]}`) -docs/admin/salt.md:`api_servers` | (Optional) The IP address / host name where a kubelet can get read-only access to kube-apiserver -docs/admin/salt.md:`etcd_servers` | (Optional) Comma-delimited list of IP addresses the kube-apiserver and kubelet use to reach etcd. Uses the IP of the first machine in the kubernetes_master role, or 127.0.0.1 on GCE. -docs/admin/salt.md:`hostname_override` | (Optional) Mapped to the kubelet hostname-override -docs/design/admission_control.md:[here](http://releases.k8s.io/release-1.0/docs/design/admission_control.md). -docs/design/admission_control.md:[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/docs/design/admission_control.md?pixel)]() -docs/user-guide/accessing-the-cluster.md: "cluster_name" : "kubernetes_logging", -docs/getting-started-guides/logging-elasticsearch.md: "cluster_name" : "kubernetes-logging", -docs/getting-started-guides/coreos/azure/lib/azure_wrapper.js: var cloud_config = cloud_config_creator(x, conf); -docs/getting-started-guides/coreos/azure/lib/azure_wrapper.js: if (cloud_config instanceof Array) { -docs/getting-started-guides/coreos/azure/lib/azure_wrapper.js: host.cloud_config_file = cloud_config[n]; -docs/getting-started-guides/coreos/azure/lib/azure_wrapper.js: host.cloud_config_file = cloud_config; -docs/getting-started-guides/coreos/azure/lib/deployment_logic/kubernetes.js:var cloud_config = require('../cloud_config.js'); -docs/getting-started-guides/coreos/azure/lib/deployment_logic/kubernetes.js: return cloud_config.process_template(input_file, output_file, function(data) { -docs/getting-started-guides/coreos/azure/lib/deployment_logic/kubernetes.js: return cloud_config.generate_environment_file_entry_from_object(util.hostname(n, 'kube'), { -docs/getting-started-guides/coreos/azure/lib/deployment_logic/kubernetes.js: var write_files_extra = cloud_config.write_files_from('addons', '/etc/kubernetes/addons'); -docs/getting-started-guides/coreos/azure/lib/deployment_logic/kubernetes.js: return cloud_config.process_template(input_file, output_file, function(data) { -docs/devel/README.md:* **Admission Control Plugins** ([admission_control](../design/admission_control.md)) +pkg/util/oom/oom_linux.go:// Writes 'value' to /proc//oom_score_adj. PID = 0 means self +test/e2e/es_cluster_logging.go: Failf("No cluster_name field in Elasticsearch response: %v", esResponse) +test/e2e/es_cluster_logging.go: // Check to see if have a cluster_name field. +test/e2e/es_cluster_logging.go: clusterName, ok := esResponse["cluster_name"]