Update code to use - in flag names instead of _

pull/6/head
Eric Paris 2015-08-11 16:29:50 -04:00
parent 9fdd793555
commit 5aa495cdad
13 changed files with 15 additions and 15 deletions

View File

@ -99,7 +99,7 @@ Key | Value
`etcd_servers` | (Optional) Comma-delimited list of IP addresses the kube-apiserver and kubelet use to reach etcd. Uses the IP of the first machine in the kubernetes_master role, or 127.0.0.1 on GCE.
`hostnamef` | (Optional) The full host name of the machine, i.e. uname -n
`node_ip` | (Optional) The IP address to use to address this node
`hostname_override` | (Optional) Mapped to the kubelet hostname_override
`hostname_override` | (Optional) Mapped to the kubelet hostname-override
`network_mode` | (Optional) Networking model to use among nodes: *openvswitch*
`networkInterfaceName` | (Optional) Networking interface to use to bind addresses, default value *eth0*
`publicAddressOverride` | (Optional) The IP address the kube-apiserver should use to bind against for external read-only access

View File

@ -63,8 +63,8 @@ The kube-apiserver takes the following OPTIONAL arguments to enable admission co
| Option | Behavior |
| ------ | -------- |
| admission_control | Comma-delimited, ordered list of admission control choices to invoke prior to modifying or deleting an object. |
| admission_control_config_file | File with admission control configuration parameters to boot-strap plug-in. |
| admission-control | Comma-delimited, ordered list of admission control choices to invoke prior to modifying or deleting an object. |
| admission-control-config-file | File with admission control configuration parameters to boot-strap plug-in. |
An **AdmissionControl** plug-in is an implementation of the following interface:

View File

@ -137,7 +137,7 @@ If a constraint is not specified for an enumerated resource, it is not enforced
To enable the plug-in and support for LimitRange, the kube-apiserver must be configured as follows:
```console
$ kube-apiserver -admission_control=LimitRanger
$ kube-apiserver --admission-control=LimitRanger
```
### Enforcement of constraints

View File

@ -178,7 +178,7 @@ The **ResourceQuota** plug-in introspects all incoming admission requests.
To enable the plug-in and support for ResourceQuota, the kube-apiserver must be configured as follows:
```
$ kube-apiserver -admission_control=ResourceQuota
$ kube-apiserver --admission-control=ResourceQuota
```
It makes decisions by evaluating the incoming object against all defined **ResourceQuota.Status.Hard** resource limits in the request

View File

@ -107,7 +107,7 @@ systemctl stop iptables-services firewalld
**Configure the Kubernetes services on the master.**
* Edit /etc/kubernetes/apiserver to appear as such. The service_cluster_ip_range IP addresses must be an unused block of addresses, not used anywhere else. They do not need to be routed or assigned to anything.
* Edit /etc/kubernetes/apiserver to appear as such. The service-cluster-ip-range IP addresses must be an unused block of addresses, not used anywhere else. They do not need to be routed or assigned to anything.
```sh
# The address on the local server to listen to.

View File

@ -266,7 +266,7 @@ request. To do this, set the `spec.clusterIP` field. For example, if you
already have an existing DNS entry that you wish to replace, or legacy systems
that are configured for a specific IP address and difficult to re-configure.
The IP address that a user chooses must be a valid IP address and within the
service_cluster_ip_range CIDR range that is specified by flag to the API
service-cluster-ip-range CIDR range that is specified by flag to the API
server. If the IP address value is invalid, the apiserver returns a 422 HTTP
status code to indicate that the value is invalid.

View File

@ -41,7 +41,7 @@ KUBECTL="${KUBE_OUTPUT_HOSTBIN}/kubectl"
UPDATE_ETCD_OBJECTS_SCRIPT="${KUBE_ROOT}/cluster/update-storage-objects.sh"
function startApiServer() {
kube::log::status "Starting kube-apiserver with KUBE_API_VERSIONS: ${KUBE_API_VERSIONS} and runtime_config: ${RUNTIME_CONFIG}"
kube::log::status "Starting kube-apiserver with KUBE_API_VERSIONS: ${KUBE_API_VERSIONS} and runtime-config: ${RUNTIME_CONFIG}"
KUBE_API_VERSIONS="${KUBE_API_VERSIONS}" \
"${KUBE_OUTPUT_HOSTBIN}/kube-apiserver" \

View File

@ -35,7 +35,7 @@ import (
flag "github.com/spf13/pflag"
)
var fuzzIters = flag.Int("fuzz_iters", 20, "How many fuzzing iterations to do.")
var fuzzIters = flag.Int("fuzz-iters", 20, "How many fuzzing iterations to do.")
func fuzzInternalObject(t *testing.T, forVersion string, item runtime.Object, seed int64) runtime.Object {
apitesting.FuzzerFor(t, forVersion, rand.NewSource(seed)).Fuzz(item)

View File

@ -65,7 +65,7 @@ type NodeController struct {
// sync node status in this case, but will monitor node status updated from kubelet. If
// it doesn't receive update for this amount of time, it will start posting "NodeReady==
// ConditionUnknown". The amount of time before which NodeController start evicting pods
// is controlled via flag 'pod_eviction_timeout'.
// is controlled via flag 'pod-eviction-timeout'.
// Note: be cautious when changing the constant, it must work with nodeStatusUpdateFrequency
// in kubelet. There are several constraints:
// 1. nodeMonitorGracePeriod must be N times more than nodeStatusUpdateFrequency, where

View File

@ -30,7 +30,7 @@ import (
flag "github.com/spf13/pflag"
)
var fuzzIters = flag.Int("fuzz_iters", 50, "How many fuzzing iterations to do.")
var fuzzIters = flag.Int("fuzz-iters", 50, "How many fuzzing iterations to do.")
// Test a weird version/kind embedding format.
type MyWeirdCustomEmbeddedVersionKindField struct {

View File

@ -229,7 +229,7 @@ func ensureDockerInContainer(cadvisor cadvisor.Interface, oomScoreAdj int, manag
}
}
// Also apply oom_score_adj to processes
// Also apply oom-score-adj to processes
oomAdjuster := oom.NewOomAdjuster()
if err := oomAdjuster.ApplyOomScoreAdj(pid, oomScoreAdj); err != nil {
errs = append(errs, fmt.Errorf("failed to apply oom score %d to PID %d", oomScoreAdj, pid))

View File

@ -1740,7 +1740,7 @@ func (kl *Kubelet) admitPods(allPods []*api.Pod, podSyncTypes map[types.UID]Sync
// three channels (file, apiserver, and http) and creates a union of them. For
// any new change seen, will run a sync against desired state and running state. If
// no changes are seen to the configuration, will synchronize the last known desired
// state every sync_frequency seconds. Never returns.
// state every sync-frequency seconds. Never returns.
func (kl *Kubelet) syncLoop(updates <-chan PodUpdate, handler SyncHandler) {
glog.Info("Starting kubelet main sync loop.")
for {

View File

@ -582,7 +582,7 @@ var iptablesHostNodePortChain iptables.Chain = "KUBE-NODEPORT-HOST"
// Ensure that the iptables infrastructure we use is set up. This can safely be called periodically.
func iptablesInit(ipt iptables.Interface) error {
// TODO: There is almost certainly room for optimization here. E.g. If
// we knew the service_cluster_ip_range CIDR we could fast-track outbound packets not
// we knew the service-cluster-ip-range CIDR we could fast-track outbound packets not
// destined for a service. There's probably more, help wanted.
// Danger - order of these rules matters here:
@ -602,7 +602,7 @@ func iptablesInit(ipt iptables.Interface) error {
// the NodePort would take priority (incorrectly).
// This is unlikely (and would only affect outgoing traffic from the cluster to the load balancer, which seems
// doubly-unlikely), but we need to be careful to keep the rules in the right order.
args := []string{ /* service_cluster_ip_range matching could go here */ }
args := []string{ /* service-cluster-ip-range matching could go here */ }
args = append(args, "-m", "comment", "--comment", "handle ClusterIPs; NOTE: this must be before the NodePort rules")
if _, err := ipt.EnsureChain(iptables.TableNAT, iptablesContainerPortalChain); err != nil {
return err