mirror of https://github.com/k3s-io/k3s
Fix various typos in kubectl
parent
e3fa83177c
commit
91cace347d
|
@ -45,10 +45,10 @@ var (
|
||||||
An autoscaler can automatically increase or decrease number of pods deployed within the system as needed.`)
|
An autoscaler can automatically increase or decrease number of pods deployed within the system as needed.`)
|
||||||
|
|
||||||
autoscaleExample = dedent.Dedent(`
|
autoscaleExample = dedent.Dedent(`
|
||||||
# Auto scale a deployment "foo", with the number of pods between 2 to 10, target CPU utilization specified so a default autoscaling policy will be used:
|
# Auto scale a deployment "foo", with the number of pods between 2 and 10, target CPU utilization specified so a default autoscaling policy will be used:
|
||||||
kubectl autoscale deployment foo --min=2 --max=10
|
kubectl autoscale deployment foo --min=2 --max=10
|
||||||
|
|
||||||
# Auto scale a replication controller "foo", with the number of pods between 1 to 5, target CPU utilization at 80%:
|
# Auto scale a replication controller "foo", with the number of pods between 1 and 5, target CPU utilization at 80%:
|
||||||
kubectl autoscale rc foo --max=5 --cpu-percent=80`)
|
kubectl autoscale rc foo --max=5 --cpu-percent=80`)
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
|
@ -38,7 +38,7 @@ func NewCmdConfig(pathOptions *clientcmd.PathOptions, out io.Writer) *cobra.Comm
|
||||||
|
|
||||||
The loading order follows these rules:
|
The loading order follows these rules:
|
||||||
1. If the --` + pathOptions.ExplicitFileFlag + ` flag is set, then only that file is loaded. The flag may only be set once and no merging takes place.
|
1. If the --` + pathOptions.ExplicitFileFlag + ` flag is set, then only that file is loaded. The flag may only be set once and no merging takes place.
|
||||||
2. If $` + pathOptions.EnvVar + ` environment variable is set, then it is used a list of paths (normal path delimitting rules for your system). These paths are merged together. When a value is modified, it is modified in the file that defines the stanza. When a value is created, it is created in the first file that exists. If no files in the chain exist, then it creates the last file in the list.
|
2. If $` + pathOptions.EnvVar + ` environment variable is set, then it is used a list of paths (normal path delimitting rules for your system). These paths are merged. When a value is modified, it is modified in the file that defines the stanza. When a value is created, it is created in the first file that exists. If no files in the chain exist, then it creates the last file in the list.
|
||||||
3. Otherwise, ` + path.Join("${HOME}", pathOptions.GlobalFileSubpath) + ` is used and no merging takes place.
|
3. Otherwise, ` + path.Join("${HOME}", pathOptions.GlobalFileSubpath) + ` is used and no merging takes place.
|
||||||
`,
|
`,
|
||||||
Run: func(cmd *cobra.Command, args []string) {
|
Run: func(cmd *cobra.Command, args []string) {
|
||||||
|
|
|
@ -61,7 +61,7 @@ func NewCmdConfigSetCluster(out io.Writer, configAccess clientcmd.ConfigAccess)
|
||||||
options := &createClusterOptions{configAccess: configAccess}
|
options := &createClusterOptions{configAccess: configAccess}
|
||||||
|
|
||||||
cmd := &cobra.Command{
|
cmd := &cobra.Command{
|
||||||
Use: fmt.Sprintf("set-cluster NAME [--%v=server] [--%v=path/to/certficate/authority] [--%v=true]", clientcmd.FlagAPIServer, clientcmd.FlagCAFile, clientcmd.FlagInsecure),
|
Use: fmt.Sprintf("set-cluster NAME [--%v=server] [--%v=path/to/certificate/authority] [--%v=true]", clientcmd.FlagAPIServer, clientcmd.FlagCAFile, clientcmd.FlagInsecure),
|
||||||
Short: "Sets a cluster entry in kubeconfig",
|
Short: "Sets a cluster entry in kubeconfig",
|
||||||
Long: create_cluster_long,
|
Long: create_cluster_long,
|
||||||
Example: create_cluster_example,
|
Example: create_cluster_example,
|
||||||
|
|
|
@ -45,7 +45,7 @@ type setOptions struct {
|
||||||
|
|
||||||
var set_long = dedent.Dedent(`
|
var set_long = dedent.Dedent(`
|
||||||
Sets an individual value in a kubeconfig file
|
Sets an individual value in a kubeconfig file
|
||||||
PROPERTY_NAME is a dot delimited name where each token represents either a attribute name or a map key. Map keys may not contain dots.
|
PROPERTY_NAME is a dot delimited name where each token represents either an attribute name or a map key. Map keys may not contain dots.
|
||||||
PROPERTY_VALUE is the new value you wish to set. Binary fields such as 'certificate-authority-data' expect a base64 encoded string unless the --set-raw-bytes flag is used.`)
|
PROPERTY_VALUE is the new value you wish to set. Binary fields such as 'certificate-authority-data' expect a base64 encoded string unless the --set-raw-bytes flag is used.`)
|
||||||
|
|
||||||
func NewCmdConfigSet(out io.Writer, configAccess clientcmd.ConfigAccess) *cobra.Command {
|
func NewCmdConfigSet(out io.Writer, configAccess clientcmd.ConfigAccess) *cobra.Command {
|
||||||
|
|
|
@ -35,7 +35,7 @@ type unsetOptions struct {
|
||||||
|
|
||||||
var unset_long = dedent.Dedent(`
|
var unset_long = dedent.Dedent(`
|
||||||
Unsets an individual value in a kubeconfig file
|
Unsets an individual value in a kubeconfig file
|
||||||
PROPERTY_NAME is a dot delimited name where each token represents either a attribute name or a map key. Map keys may not contain dots.`)
|
PROPERTY_NAME is a dot delimited name where each token represents either an attribute name or a map key. Map keys may not contain dots.`)
|
||||||
|
|
||||||
func NewCmdConfigUnset(out io.Writer, configAccess clientcmd.ConfigAccess) *cobra.Command {
|
func NewCmdConfigUnset(out io.Writer, configAccess clientcmd.ConfigAccess) *cobra.Command {
|
||||||
options := &unsetOptions{configAccess: configAccess}
|
options := &unsetOptions{configAccess: configAccess}
|
||||||
|
|
|
@ -89,10 +89,10 @@ func NewCmdConfigView(out io.Writer, ConfigAccess clientcmd.ConfigAccess) *cobra
|
||||||
cmd.Flags().Set("output", defaultOutputFormat)
|
cmd.Flags().Set("output", defaultOutputFormat)
|
||||||
|
|
||||||
options.Merge.Default(true)
|
options.Merge.Default(true)
|
||||||
f := cmd.Flags().VarPF(&options.Merge, "merge", "", "merge together the full hierarchy of kubeconfig files")
|
f := cmd.Flags().VarPF(&options.Merge, "merge", "", "merge the full hierarchy of kubeconfig files")
|
||||||
f.NoOptDefVal = "true"
|
f.NoOptDefVal = "true"
|
||||||
cmd.Flags().BoolVar(&options.RawByteData, "raw", false, "display raw byte data")
|
cmd.Flags().BoolVar(&options.RawByteData, "raw", false, "display raw byte data")
|
||||||
cmd.Flags().BoolVar(&options.Flatten, "flatten", false, "flatten the resulting kubeconfig file into self contained output (useful for creating portable kubeconfig files)")
|
cmd.Flags().BoolVar(&options.Flatten, "flatten", false, "flatten the resulting kubeconfig file into self-contained output (useful for creating portable kubeconfig files)")
|
||||||
cmd.Flags().BoolVar(&options.Minify, "minify", false, "remove all information not used by current-context from the output")
|
cmd.Flags().BoolVar(&options.Minify, "minify", false, "remove all information not used by current-context from the output")
|
||||||
return cmd
|
return cmd
|
||||||
}
|
}
|
||||||
|
|
|
@ -110,7 +110,7 @@ func NewCmdExposeService(f *cmdutil.Factory, out io.Writer) *cobra.Command {
|
||||||
// TODO: remove create-external-load-balancer in code on or after Aug 25, 2016.
|
// TODO: remove create-external-load-balancer in code on or after Aug 25, 2016.
|
||||||
cmd.Flags().Bool("create-external-load-balancer", false, "If true, create an external load balancer for this service (trumped by --type). Implementation is cloud provider dependent. Default is 'false'.")
|
cmd.Flags().Bool("create-external-load-balancer", false, "If true, create an external load balancer for this service (trumped by --type). Implementation is cloud provider dependent. Default is 'false'.")
|
||||||
cmd.Flags().MarkDeprecated("create-external-load-balancer", "use --type=\"LoadBalancer\" instead")
|
cmd.Flags().MarkDeprecated("create-external-load-balancer", "use --type=\"LoadBalancer\" instead")
|
||||||
cmd.Flags().String("load-balancer-ip", "", "IP to assign to to the Load Balancer. If empty, an ephemeral IP will be created and used (cloud-provider specific).")
|
cmd.Flags().String("load-balancer-ip", "", "IP to assign to the Load Balancer. If empty, an ephemeral IP will be created and used (cloud-provider specific).")
|
||||||
cmd.Flags().String("selector", "", "A label selector to use for this service. Only equality-based selector requirements are supported. If empty (the default) infer the selector from the replication controller or replica set.")
|
cmd.Flags().String("selector", "", "A label selector to use for this service. Only equality-based selector requirements are supported. If empty (the default) infer the selector from the replication controller or replica set.")
|
||||||
cmd.Flags().StringP("labels", "l", "", "Labels to apply to the service created by this call.")
|
cmd.Flags().StringP("labels", "l", "", "Labels to apply to the service created by this call.")
|
||||||
cmd.Flags().String("container-port", "", "Synonym for --target-port")
|
cmd.Flags().String("container-port", "", "Synonym for --target-port")
|
||||||
|
|
|
@ -88,7 +88,7 @@ func (c *ClientCache) ClientConfigForVersion(version *unversioned.GroupVersion)
|
||||||
}
|
}
|
||||||
|
|
||||||
// `version` does not necessarily equal `config.Version`. However, we know that we call this method again with
|
// `version` does not necessarily equal `config.Version`. However, we know that we call this method again with
|
||||||
// `config.Version`, we should get the the config we've just built.
|
// `config.Version`, we should get the config we've just built.
|
||||||
configCopy := config
|
configCopy := config
|
||||||
c.configs[*config.GroupVersion] = &configCopy
|
c.configs[*config.GroupVersion] = &configCopy
|
||||||
|
|
||||||
|
|
|
@ -316,8 +316,8 @@ func NewFactory(optionalClientConfig clientcmd.ClientConfig) *Factory {
|
||||||
multiMapper = append(meta.MultiRESTMapper{thirdPartyMapper}, multiMapper...)
|
multiMapper = append(meta.MultiRESTMapper{thirdPartyMapper}, multiMapper...)
|
||||||
}
|
}
|
||||||
priorityMapper.Delegate = multiMapper
|
priorityMapper.Delegate = multiMapper
|
||||||
// Re-assign to the RESTMapper here because priorityMapper is actually a copy, so if we
|
// Reassign to the RESTMapper here because priorityMapper is actually a copy, so if we
|
||||||
// don't re-assign, the above assignement won't actually update mapper.RESTMapper
|
// don't reassign, the above assignement won't actually update mapper.RESTMapper
|
||||||
mapper.RESTMapper = priorityMapper
|
mapper.RESTMapper = priorityMapper
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -1006,7 +1006,7 @@ func getSchemaAndValidate(c schemaClient, data []byte, prefix, groupVersion, cac
|
||||||
}
|
}
|
||||||
err = schema.ValidateBytes(data)
|
err = schema.ValidateBytes(data)
|
||||||
if _, ok := err.(validation.TypeNotFoundError); ok && !firstSeen {
|
if _, ok := err.(validation.TypeNotFoundError); ok && !firstSeen {
|
||||||
// As a temporay hack, kubectl would re-get the schema if validation
|
// As a temporary hack, kubectl would re-get the schema if validation
|
||||||
// fails for type not found reason.
|
// fails for type not found reason.
|
||||||
// TODO: runtime-config settings needs to make into the file's name
|
// TODO: runtime-config settings needs to make into the file's name
|
||||||
schemaData, err = downloadSchemaAndStore(c, cacheDir, fullDir, cacheFile, prefix, groupVersion)
|
schemaData, err = downloadSchemaAndStore(c, cacheDir, fullDir, cacheFile, prefix, groupVersion)
|
||||||
|
@ -1106,7 +1106,7 @@ func (c *clientSwaggerSchema) ValidateBytes(data []byte) error {
|
||||||
|
|
||||||
// DefaultClientConfig creates a clientcmd.ClientConfig with the following hierarchy:
|
// DefaultClientConfig creates a clientcmd.ClientConfig with the following hierarchy:
|
||||||
// 1. Use the kubeconfig builder. The number of merges and overrides here gets a little crazy. Stay with me.
|
// 1. Use the kubeconfig builder. The number of merges and overrides here gets a little crazy. Stay with me.
|
||||||
// 1. Merge together the kubeconfig itself. This is done with the following hierarchy rules:
|
// 1. Merge the kubeconfig itself. This is done with the following hierarchy rules:
|
||||||
// 1. CommandLineLocation - this parsed from the command line, so it must be late bound. If you specify this,
|
// 1. CommandLineLocation - this parsed from the command line, so it must be late bound. If you specify this,
|
||||||
// then no other kubeconfig files are merged. This file must exist.
|
// then no other kubeconfig files are merged. This file must exist.
|
||||||
// 2. If $KUBECONFIG is set, then it is treated as a list of files that should be merged.
|
// 2. If $KUBECONFIG is set, then it is treated as a list of files that should be merged.
|
||||||
|
|
|
@ -105,7 +105,7 @@ func splitOnWhitespace(line string) []string {
|
||||||
|
|
||||||
// NewCustomColumnsPrinterFromTemplate creates a custom columns printer from a template stream. The template is expected
|
// NewCustomColumnsPrinterFromTemplate creates a custom columns printer from a template stream. The template is expected
|
||||||
// to consist of two lines, whitespace separated. The first line is the header line, the second line is the jsonpath field spec
|
// to consist of two lines, whitespace separated. The first line is the header line, the second line is the jsonpath field spec
|
||||||
// For example the template below:
|
// For example, the template below:
|
||||||
// NAME API_VERSION
|
// NAME API_VERSION
|
||||||
// {metadata.name} {apiVersion}
|
// {metadata.name} {apiVersion}
|
||||||
func NewCustomColumnsPrinterFromTemplate(templateReader io.Reader, decoder runtime.Decoder) (*CustomColumnsPrinter, error) {
|
func NewCustomColumnsPrinterFromTemplate(templateReader io.Reader, decoder runtime.Decoder) (*CustomColumnsPrinter, error) {
|
||||||
|
|
|
@ -63,7 +63,7 @@ type FilterServer struct {
|
||||||
delegate http.Handler
|
delegate http.Handler
|
||||||
}
|
}
|
||||||
|
|
||||||
// Splits a comma separated list of regexps into a array of Regexp objects.
|
// Splits a comma separated list of regexps into an array of Regexp objects.
|
||||||
func MakeRegexpArray(str string) ([]*regexp.Regexp, error) {
|
func MakeRegexpArray(str string) ([]*regexp.Regexp, error) {
|
||||||
parts := strings.Split(str, ",")
|
parts := strings.Split(str, ",")
|
||||||
result := make([]*regexp.Regexp, len(parts))
|
result := make([]*regexp.Regexp, len(parts))
|
||||||
|
|
|
@ -274,13 +274,13 @@ func AsVersionedObjects(infos []*Info, version unversioned.GroupVersion, encoder
|
||||||
|
|
||||||
// tryConvert attempts to convert the given object to the provided versions in order. This function assumes
|
// tryConvert attempts to convert the given object to the provided versions in order. This function assumes
|
||||||
// the object is in internal version.
|
// the object is in internal version.
|
||||||
func tryConvert(convertor runtime.ObjectConvertor, object runtime.Object, versions ...unversioned.GroupVersion) (runtime.Object, error) {
|
func tryConvert(converter runtime.ObjectConvertor, object runtime.Object, versions ...unversioned.GroupVersion) (runtime.Object, error) {
|
||||||
var last error
|
var last error
|
||||||
for _, version := range versions {
|
for _, version := range versions {
|
||||||
if version.IsEmpty() {
|
if version.IsEmpty() {
|
||||||
return object, nil
|
return object, nil
|
||||||
}
|
}
|
||||||
obj, err := convertor.ConvertToVersion(object, version)
|
obj, err := converter.ConvertToVersion(object, version)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
last = err
|
last = err
|
||||||
continue
|
continue
|
||||||
|
|
|
@ -427,7 +427,7 @@ func FileVisitorForSTDIN(mapper *Mapper, schema validation.Schema) Visitor {
|
||||||
}
|
}
|
||||||
|
|
||||||
// ExpandPathsToFileVisitors will return a slice of FileVisitors that will handle files from the provided path.
|
// ExpandPathsToFileVisitors will return a slice of FileVisitors that will handle files from the provided path.
|
||||||
// After FileVisitors open the files, they will pass a io.Reader to a StreamVisitor to do the reading. (stdin
|
// After FileVisitors open the files, they will pass an io.Reader to a StreamVisitor to do the reading. (stdin
|
||||||
// is also taken care of). Paths argument also accepts a single file, and will return a single visitor
|
// is also taken care of). Paths argument also accepts a single file, and will return a single visitor
|
||||||
func ExpandPathsToFileVisitors(mapper *Mapper, paths string, recursive bool, extensions []string, schema validation.Schema) ([]Visitor, error) {
|
func ExpandPathsToFileVisitors(mapper *Mapper, paths string, recursive bool, extensions []string, schema validation.Schema) ([]Visitor, error) {
|
||||||
var visitors []Visitor
|
var visitors []Visitor
|
||||||
|
|
|
@ -165,15 +165,15 @@ func (fn ResourcePrinterFunc) HandledResources() []string {
|
||||||
// prior to being passed to a nested printer.
|
// prior to being passed to a nested printer.
|
||||||
type VersionedPrinter struct {
|
type VersionedPrinter struct {
|
||||||
printer ResourcePrinter
|
printer ResourcePrinter
|
||||||
convertor runtime.ObjectConvertor
|
converter runtime.ObjectConvertor
|
||||||
versions []unversioned.GroupVersion
|
versions []unversioned.GroupVersion
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewVersionedPrinter wraps a printer to convert objects to a known API version prior to printing.
|
// NewVersionedPrinter wraps a printer to convert objects to a known API version prior to printing.
|
||||||
func NewVersionedPrinter(printer ResourcePrinter, convertor runtime.ObjectConvertor, versions ...unversioned.GroupVersion) ResourcePrinter {
|
func NewVersionedPrinter(printer ResourcePrinter, converter runtime.ObjectConvertor, versions ...unversioned.GroupVersion) ResourcePrinter {
|
||||||
return &VersionedPrinter{
|
return &VersionedPrinter{
|
||||||
printer: printer,
|
printer: printer,
|
||||||
convertor: convertor,
|
converter: converter,
|
||||||
versions: versions,
|
versions: versions,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -187,7 +187,7 @@ func (p *VersionedPrinter) PrintObj(obj runtime.Object, w io.Writer) error {
|
||||||
if version.IsEmpty() {
|
if version.IsEmpty() {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
converted, err := p.convertor.ConvertToVersion(obj, version)
|
converted, err := p.converter.ConvertToVersion(obj, version)
|
||||||
if runtime.IsNotRegisteredError(err) {
|
if runtime.IsNotRegisteredError(err) {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
@ -287,7 +287,7 @@ func (p *JSONPrinter) HandledResources() []string {
|
||||||
// to the given version first.
|
// to the given version first.
|
||||||
type YAMLPrinter struct {
|
type YAMLPrinter struct {
|
||||||
version string
|
version string
|
||||||
convertor runtime.ObjectConvertor
|
converter runtime.ObjectConvertor
|
||||||
}
|
}
|
||||||
|
|
||||||
// PrintObj prints the data as YAML.
|
// PrintObj prints the data as YAML.
|
||||||
|
|
|
@ -214,7 +214,7 @@ func (r *RollingUpdater) Update(config *RollingUpdaterConfig) error {
|
||||||
if desired > 0 && maxUnavailable == 0 && maxSurge == 0 {
|
if desired > 0 && maxUnavailable == 0 && maxSurge == 0 {
|
||||||
return fmt.Errorf("one of maxSurge or maxUnavailable must be specified")
|
return fmt.Errorf("one of maxSurge or maxUnavailable must be specified")
|
||||||
}
|
}
|
||||||
// The minumum pods which must remain available througout the update
|
// The minimum pods which must remain available throughout the update
|
||||||
// calculated for internal convenience.
|
// calculated for internal convenience.
|
||||||
minAvailable := int32(integer.IntMax(0, int(desired-maxUnavailable)))
|
minAvailable := int32(integer.IntMax(0, int(desired-maxUnavailable)))
|
||||||
// If the desired new scale is 0, then the max unavailable is necessarily
|
// If the desired new scale is 0, then the max unavailable is necessarily
|
||||||
|
|
|
@ -680,7 +680,7 @@ Scaling foo-v2 up to 1
|
||||||
`,
|
`,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "1->2 25/25 complex asymetric deployment",
|
name: "1->2 25/25 complex asymmetric deployment",
|
||||||
oldRc: oldRc(1, 1),
|
oldRc: oldRc(1, 1),
|
||||||
newRc: newRc(0, 2),
|
newRc: newRc(0, 2),
|
||||||
newRcExists: false,
|
newRcExists: false,
|
||||||
|
|
|
@ -36,7 +36,7 @@ type Scaler interface {
|
||||||
// retries in the event of resource version mismatch (if retry is not nil),
|
// retries in the event of resource version mismatch (if retry is not nil),
|
||||||
// and optionally waits until the status of the resource matches newSize (if wait is not nil)
|
// and optionally waits until the status of the resource matches newSize (if wait is not nil)
|
||||||
Scale(namespace, name string, newSize uint, preconditions *ScalePrecondition, retry, wait *RetryParams) error
|
Scale(namespace, name string, newSize uint, preconditions *ScalePrecondition, retry, wait *RetryParams) error
|
||||||
// ScaleSimple does a simple one-shot attempt at scaling - not useful on it's own, but
|
// ScaleSimple does a simple one-shot attempt at scaling - not useful on its own, but
|
||||||
// a necessary building block for Scale
|
// a necessary building block for Scale
|
||||||
ScaleSimple(namespace, name string, preconditions *ScalePrecondition, newSize uint) error
|
ScaleSimple(namespace, name string, preconditions *ScalePrecondition, newSize uint) error
|
||||||
}
|
}
|
||||||
|
|
|
@ -408,7 +408,7 @@ func (reaper *DeploymentReaper) Stop(namespace, name string, timeout time.Durati
|
||||||
}
|
}
|
||||||
|
|
||||||
// Delete deployment at the end.
|
// Delete deployment at the end.
|
||||||
// Note: We delete deployment at the end so that if removing RSs fails, we atleast have the deployment to retry.
|
// Note: We delete deployment at the end so that if removing RSs fails, we at least have the deployment to retry.
|
||||||
return deployments.Delete(name, nil)
|
return deployments.Delete(name, nil)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
Loading…
Reference in New Issue