Fixed several typos

pull/6/head
joe2far 2016-07-13 15:06:24 +01:00
parent 7e6a856078
commit 5ead89b5bb
66 changed files with 80 additions and 80 deletions

View File

@ -96,7 +96,7 @@
},
"unversioned.GroupVersionForDiscovery": {
"id": "unversioned.GroupVersionForDiscovery",
"description": "GroupVersion contains the \"group/version\" and \"version\" string of a version. It is made a struct to keep extensiblity.",
"description": "GroupVersion contains the \"group/version\" and \"version\" string of a version. It is made a struct to keep extensibility.",
"required": [
"groupVersion",
"version"

View File

@ -72,7 +72,7 @@
},
"unversioned.GroupVersionForDiscovery": {
"id": "unversioned.GroupVersionForDiscovery",
"description": "GroupVersion contains the \"group/version\" and \"version\" string of a version. It is made a struct to keep extensiblity.",
"description": "GroupVersion contains the \"group/version\" and \"version\" string of a version. It is made a struct to keep extensibility.",
"required": [
"groupVersion",
"version"

View File

@ -72,7 +72,7 @@
},
"unversioned.GroupVersionForDiscovery": {
"id": "unversioned.GroupVersionForDiscovery",
"description": "GroupVersion contains the \"group/version\" and \"version\" string of a version. It is made a struct to keep extensiblity.",
"description": "GroupVersion contains the \"group/version\" and \"version\" string of a version. It is made a struct to keep extensibility.",
"required": [
"groupVersion",
"version"

View File

@ -72,7 +72,7 @@
},
"unversioned.GroupVersionForDiscovery": {
"id": "unversioned.GroupVersionForDiscovery",
"description": "GroupVersion contains the \"group/version\" and \"version\" string of a version. It is made a struct to keep extensiblity.",
"description": "GroupVersion contains the \"group/version\" and \"version\" string of a version. It is made a struct to keep extensibility.",
"required": [
"groupVersion",
"version"

View File

@ -72,7 +72,7 @@
},
"unversioned.GroupVersionForDiscovery": {
"id": "unversioned.GroupVersionForDiscovery",
"description": "GroupVersion contains the \"group/version\" and \"version\" string of a version. It is made a struct to keep extensiblity.",
"description": "GroupVersion contains the \"group/version\" and \"version\" string of a version. It is made a struct to keep extensibility.",
"required": [
"groupVersion",
"version"

View File

@ -72,7 +72,7 @@
},
"unversioned.GroupVersionForDiscovery": {
"id": "unversioned.GroupVersionForDiscovery",
"description": "GroupVersion contains the \"group/version\" and \"version\" string of a version. It is made a struct to keep extensiblity.",
"description": "GroupVersion contains the \"group/version\" and \"version\" string of a version. It is made a struct to keep extensibility.",
"required": [
"groupVersion",
"version"

View File

@ -72,7 +72,7 @@
},
"unversioned.GroupVersionForDiscovery": {
"id": "unversioned.GroupVersionForDiscovery",
"description": "GroupVersion contains the \"group/version\" and \"version\" string of a version. It is made a struct to keep extensiblity.",
"description": "GroupVersion contains the \"group/version\" and \"version\" string of a version. It is made a struct to keep extensibility.",
"required": [
"groupVersion",
"version"

View File

@ -104,7 +104,7 @@ contribute!
**NON_MASQUERADE_CIDR**
The 'internal' IP range which Kuberenetes will use, which will therefore not
The 'internal' IP range which Kubernetes will use, which will therefore not
use IP masquerade. By default kubernetes runs an internal network for traffic
between pods (and between pods and services), and by default this uses the
`10.0.0.0/8` range. However, this sometimes overlaps with a range that you may

View File

@ -81,7 +81,7 @@ via juju ssh:
juju ssh kubernetes-master/0 -t "sudo kubectl get nodes"
You may also SSH to the kuberentes-master unit (`juju ssh kubernetes-master/0`)
You may also SSH to the kubernetes-master unit (`juju ssh kubernetes-master/0`)
and call kubectl from the command prompt.
See the

View File

@ -30,7 +30,7 @@ juju add-relation kubernetes etcd
# Configuration
For your convenience this charm supports some configuration options to set up
a Kuberentes cluster that works in your environment:
a Kubernetes cluster that works in your environment:
**version**: Set the version of the Kubernetes containers to deploy. The
version string must be in the following format "v#.#.#" where the numbers

View File

@ -140,11 +140,11 @@ func (s *CMServer) AddFlags(fs *pflag.FlagSet) {
"The number of retries for initial node registration. Retry interval equals node-sync-period.")
fs.MarkDeprecated("register-retry-count", "This flag is currently no-op and will be deleted.")
fs.DurationVar(&s.NodeMonitorGracePeriod.Duration, "node-monitor-grace-period", s.NodeMonitorGracePeriod.Duration,
"Amount of time which we allow running Node to be unresponsive before marking it unhealty. "+
"Amount of time which we allow running Node to be unresponsive before marking it unhealthy. "+
"Must be N times more than kubelet's nodeStatusUpdateFrequency, "+
"where N means number of retries allowed for kubelet to post node status.")
fs.DurationVar(&s.NodeStartupGracePeriod.Duration, "node-startup-grace-period", s.NodeStartupGracePeriod.Duration,
"Amount of time which we allow starting Node to be unresponsive before marking it unhealty.")
"Amount of time which we allow starting Node to be unresponsive before marking it unhealthy.")
fs.DurationVar(&s.NodeMonitorPeriod.Duration, "node-monitor-period", s.NodeMonitorPeriod.Duration,
"The period for syncing NodeStatus in NodeController.")
fs.StringVar(&s.ServiceAccountKeyFile, "service-account-private-key-file", s.ServiceAccountKeyFile, "Filename containing a PEM-encoded private RSA key used to sign service account tokens.")

View File

@ -28,7 +28,7 @@ This project combines concepts and technologies from two already-complex project
To get up and running with Kubernetes-Mesos, follow:
- the [Getting started guide](../../docs/getting-started-guides/mesos.md) to launch a Kuberneters-Mesos cluster,
- the [Getting started guide](../../docs/getting-started-guides/mesos.md) to launch a Kubernetes-Mesos cluster,
- the [Kubernetes-Mesos Scheduler Guide](./docs/scheduler.md) for topics concerning the custom scheduler used in this distribution.

View File

@ -37,7 +37,7 @@ Setting either of these flags to non-zero values may impact connection tracking
In order for pods (replicated, or otherwise) to be scheduled on the cluster, it is strongly recommended that:
* `pod.spec.containers[x].ports[y].hostPort` be left unspecified (or zero), or else;
* `pod.spec.containers[x].ports[y].hostPort` exists in the range of `ports` resources declared on Mesos slaves
- double-check the resource declaraions for your Mesos slaves, the default for `ports` is typically `[31000-32000]`
- double-check the resource declarations for your Mesos slaves, the default for `ports` is typically `[31000-32000]`
Mesos slave host `ports` are resources that are managed by the Mesos resource/offers ecosystem; slave host ports are consumed by launched tasks.
Kubernetes pod container specifications identify two types of ports, "container ports" and "host ports":

View File

@ -72,7 +72,7 @@ type Registry interface {
}
// callback that is invoked during a walk through a series of live offers,
// returning with stop=true (or err != nil) if the walk should stop permaturely.
// returning with stop=true (or err != nil) if the walk should stop prematurely.
type Walker func(offer Perishable) (stop bool, err error)
type RegistryConfig struct {

View File

@ -116,7 +116,7 @@ func (a AllOrNothingProcurement) Procure(t *T, n *api.Node, ps *ProcureState) er
}
// NewNodeProcurement returns a Procurement that checks whether the given pod task and offer
// have valid node informations available and wehther the pod spec node selector matches
// have valid node informations available and whether the pod spec node selector matches
// the pod labels.
// If the check is successful the slave ID and assigned slave is set in the given Spec.
func NewNodeProcurement() Procurement {

View File

@ -179,7 +179,7 @@ well-bounded time period.
Multiple stateless, self-hosted, self-healing API servers behind a HA
load balancer, built out by the default "kube-up" automation on GCE,
AWS and basic bare metal (BBM). Note that the single-host approach of
hving etcd listen only on localhost to ensure that onyl API server can
having etcd listen only on localhost to ensure that only API server can
connect to it will no longer work, so alternative security will be
needed in the regard (either using firewall rules, SSL certs, or
something else). All necessary flags are currently supported to enable

View File

@ -174,7 +174,7 @@ upgradable, and more generally could not be managed through the API server
interface.
A third alternative is to generalize the Replication Controller. We would do
something like: if you set the `replicas` field of the ReplicationConrollerSpec
something like: if you set the `replicas` field of the ReplicationControllerSpec
to -1, then it means "run exactly one replica on every node matching the
nodeSelector in the pod template." The ReplicationController would pretend
`replicas` had been set to some large number -- larger than the largest number

View File

@ -505,7 +505,7 @@ depend on what scheduling policy is in force. In the above example, the
scheduler created an equal number of replicas (2) in each of the three
underlying clusters, to make up the total of 6 replicas required. To handle
entire cluster failures, various approaches are possible, including:
1. **simple overprovisioing**, such that sufficient replicas remain even if a
1. **simple overprovisioning**, such that sufficient replicas remain even if a
cluster fails. This wastes some resources, but is simple and reliable.
2. **pod autoscaling**, where the replication controller in each
cluster automatically and autonomously increases the number of

View File

@ -522,7 +522,7 @@ The index-only approach:
- Requires that the user keep the *per completion parameters* in a separate
storage, such as a configData or networked storage.
- Makes no changes to the JobSpec.
- Drawback: while in separate storage, they could be mutatated, which would have
- Drawback: while in separate storage, they could be mutated, which would have
unexpected effects.
- Drawback: Logic for using index to lookup parameters needs to be in the Pod.
- Drawback: CLIs and UIs are limited to using the "index" as the identity of a

View File

@ -62,7 +62,7 @@ scheduling requirements.
rather than replacing `map[string]string`, due to backward compatibility
requirements.)
The affiniy specifications described above allow a pod to request various
The affinity specifications described above allow a pod to request various
properties that are inherent to nodes, for example "run this pod on a node with
an Intel CPU" or, in a multi-zone cluster, "run this pod on a node in zone Z."
([This issue](https://github.com/kubernetes/kubernetes/issues/9044) describes

View File

@ -204,7 +204,7 @@ arbitrary containers on hosts, to gain access to any protected information
stored in either volumes or in pods (such as access tokens or shared secrets
provided as environment variables), to intercept and redirect traffic from
running services by inserting middlemen, or to simply delete the entire history
of the custer.
of the cluster.
As a general principle, access to the central data store should be restricted to
the components that need full control over the system and which can apply

View File

@ -201,7 +201,7 @@ to both `NodeSpec` and `NodeStatus`. The value in `NodeStatus` is the union
of the taints specified by various sources. For now, the only source is
the `NodeSpec` itself, but in the future one could imagine a node inheriting
taints from pods (if we were to allow taints to be attached to pods), from
the node's startup coniguration, etc. The scheduler should look at the `Taints`
the node's startup configuration, etc. The scheduler should look at the `Taints`
in `NodeStatus`, not in `NodeSpec`.
Taints and tolerations are not scoped to namespace.
@ -305,7 +305,7 @@ Users should not start using taints and tolerations until the full
implementation has been in Kubelet and the master for enough binary versions
that we feel comfortable that we will not need to roll back either Kubelet or
master to a version that does not support them. Longer-term we will use a
progamatic approach to enforcing this ([#4855](https://github.com/kubernetes/kubernetes/issues/4855)).
programatic approach to enforcing this ([#4855](https://github.com/kubernetes/kubernetes/issues/4855)).
## Related issues

View File

@ -50,7 +50,7 @@ will generate a clientset named "my_release" which includes clients for api/v1 o
- Adding expansion methods: client-gen only generates the common methods, such as `Create()` and `Delete()`. You can manually add additional methods through the expansion interface. For example, this [file](../../pkg/client/clientset_generated/release_1_2/typed/core/v1/pod_expansion.go) adds additional methods to Pod's client. As a convention, we put the expansion interface and its methods in file ${TYPE}_expansion.go.
- Generating Fake clients for testing purposes: client-gen will generate a fake clientset if the command line argument `--fake-clientset` is set. The fake clientset provides the default implementation, you only need to fake out the methods you care about when writing test cases.
The output of client-gen inlcudes:
The output of client-gen includes:
- clientset: the clientset will be generated at `pkg/client/clientset_generated/` by default, and you can change the path via the `--clientset-path` command line argument.
- Individual typed clients and client for group: They will be generated at `pkg/client/clientset_generated/${clientset_name}/typed/generated/${GROUP}/${VERSION}/`

View File

@ -76,7 +76,7 @@ Common workflow for Kubemark is:
- monitoring test execution and debugging problems
- turning down Kubemark cluster
Included in descrptions there will be comments helpful for anyone wholl want to
Included in descriptions there will be comments helpful for anyone wholl want to
port Kubemark to different providers.
### Starting a Kubemark cluster

View File

@ -153,7 +153,7 @@ Then, run
This will do a dry run of the release. It will give you instructions at the
end for `pushd`ing into the dry-run directory and having a look around.
`pushd` into the directory and make sure everythig looks as you expect:
`pushd` into the directory and make sure everything looks as you expect:
```console
git log "${RELEASE_VERSION}" # do you see the commit you expect?

View File

@ -105,7 +105,7 @@ Documentation for other releases can be found at
Types in the unversioned package will not have the APIVersion field, but may retain the Kind field.
For backward compatibility, when hanlding the Status, the server will encode it to v1 if the client expects the Status to be encoded in v1, otherwise the server will send the unversioned#Status. If an error occurs before the version can be determined, the server will send the unversioned#Status.
For backward compatibility, when handling the Status, the server will encode it to v1 if the client expects the Status to be encoded in v1, otherwise the server will send the unversioned#Status. If an error occurs before the version can be determined, the server will send the unversioned#Status.
* non-top-level common API objects:

View File

@ -198,7 +198,7 @@ sources AND out-of-tree destinations, so it will be useful for consuming
out-of-tree APIs and for others to build custom clients into their own
repositories.
Typed clients will be constructabale given a ClientMux; the typed constructor will use
Typed clients will be constructable given a ClientMux; the typed constructor will use
the ClientMux to find or construct an appropriate RESTClient. Alternatively, a
typed client should be constructable individually given a config, from which it
will be able to construct the appropriate RESTClient.
@ -342,7 +342,7 @@ changes for multiple releases, to give users time to transition.
Once we release a clientset, we will not make interface changes to it. Users of
that client will not have to change their code until they are deliberately
upgrading their import. We probably will want to generate some sort of stub test
with a clienset, to ensure that we don't change the interface.
with a clientset, to ensure that we don't change the interface.
<!-- BEGIN MUNGE: GENERATED_ANALYTICS -->

View File

@ -116,7 +116,7 @@ Cluster admins are also free to use any of the multiple open source API manageme
provide a lot more functionality like: rate-limiting, caching, logging,
transformations and authentication.
In future, we can also use ingress. That will give cluster admins the flexibility to
easily swap out the ingress controller by a Go reverse proxy, ngingx, haproxy
easily swap out the ingress controller by a Go reverse proxy, nginx, haproxy
or any other solution they might want.
### Storage

View File

@ -38,7 +38,7 @@ Documentation for other releases can be found at
## Introduction
Full Cluster Federation will offer sophisticated federation between multiple kuberentes
Full Cluster Federation will offer sophisticated federation between multiple kubernetes
clusters, offering true high-availability, multiple provider support &
cloud-bursting, multiple region support etc. However, many users have
expressed a desire for a "reasonably" high-available cluster, that runs in
@ -73,7 +73,7 @@ advanced/experimental functionality, so the interface is not initially going to
be particularly user-friendly. As we design the evolution of kube-up, we will
make multiple zones better supported.
For the initial implemenation, kube-up must be run multiple times, once for
For the initial implementation, kube-up must be run multiple times, once for
each zone. The first kube-up will take place as normal, but then for each
additional zone the user must run kube-up again, specifying
`KUBE_USE_EXISTING_MASTER=true` and `KUBE_SUBNET_CIDR=172.20.x.0/24`. This will then
@ -226,7 +226,7 @@ Initially therefore, the GCE changes will be to:
1. change kube-up to support creation of a cluster in multiple zones
1. pass a flag enabling multi-AZ clusters with kube-up
1. change the kuberentes cloud provider to iterate through relevant zones when resolving items
1. change the kubernetes cloud provider to iterate through relevant zones when resolving items
1. tag GCE PD volumes with the appropriate zone information

View File

@ -141,7 +141,7 @@ The ick-iest part of this implementation is going to the the `GET /network/lease
* On each change, figure out the lease for the node, construct a [lease watch result](https://github.com/coreos/flannel/blob/0bf263826eab1707be5262703a8092c7d15e0be4/subnet/subnet.go#L72), and send it down the watch with the RV from the node
* Implement a lease list that does a similar translation
I say this is gross without an api objet because for each node->lease translation one has to store and retrieve the node metadata sent by flannel (eg: VTEP) from node annotations. [Reference implementation](https://github.com/bprashanth/kubernetes/blob/network_vxlan/pkg/kubelet/flannel_server.go) and [watch proxy](https://github.com/bprashanth/kubernetes/blob/network_vxlan/pkg/kubelet/watch_proxy.go).
I say this is gross without an api object because for each node->lease translation one has to store and retrieve the node metadata sent by flannel (eg: VTEP) from node annotations. [Reference implementation](https://github.com/bprashanth/kubernetes/blob/network_vxlan/pkg/kubelet/flannel_server.go) and [watch proxy](https://github.com/bprashanth/kubernetes/blob/network_vxlan/pkg/kubelet/watch_proxy.go).
# Limitations

View File

@ -99,7 +99,7 @@ An eviction threshold is of the following form:
* valid `operator` tokens are `<`
* valid `quantity` tokens must match the quantity representation used by Kubernetes
If threhold criteria are met, the `kubelet` will take pro-active action to attempt
If threshold criteria are met, the `kubelet` will take pro-active action to attempt
to reclaim the starved compute resource associated with the eviction signal.
The `kubelet` will support soft and hard eviction thresholds.

View File

@ -87,7 +87,7 @@ type CertificateSigningRequest struct {
unversioned.TypeMeta `json:",inline"`
api.ObjectMeta `json:"metadata,omitempty"`
// The certificate request itself and any additonal information.
// The certificate request itself and any additional information.
Spec CertificateSigningRequestSpec `json:"spec,omitempty"`
// Derived information about the request.
@ -105,7 +105,7 @@ type CertificateSigningRequestSpec struct {
// This information is derived from the request by Kubernetes and cannot be
// modified by users. All information is optional since it might not be
// available in the underlying request. This is intented to aid approval
// available in the underlying request. This is intended to aid approval
// decisions.
type CertificateSigningRequestStatus struct {
// Information about the requesting user (if relevant)

View File

@ -50,7 +50,7 @@ however for the purpose of service discovery we can simplify this to the followi
If a user and/or password is required then this information can be passed using Kubernetes Secrets. Kubernetes contains the host and port of each service but it lacks the scheme and path.
`Service Path` - Every Service has one (or more) endpoint. As a rule the endpoint should be located at the root "/" of the localtion URL, i.e. `http://172.100.1.52/`. There are cases where this is not possible and the actual service endpoint could be located at `http://172.100.1.52/cxfcdi`. The Kubernetes metadata for a service does not capture the path part, making it hard to consume this service.
`Service Path` - Every Service has one (or more) endpoint. As a rule the endpoint should be located at the root "/" of the location URL, i.e. `http://172.100.1.52/`. There are cases where this is not possible and the actual service endpoint could be located at `http://172.100.1.52/cxfcdi`. The Kubernetes metadata for a service does not capture the path part, making it hard to consume this service.
`Service Scheme` - Services can be deployed using different schemes. Some popular schemes include `http`,`https`,`file`,`ftp` and `jdbc`.
@ -62,7 +62,7 @@ The API of a service is the point of interaction with a service consumer. The de
`Service Description Path` - To facilitate the consumption of the service by client, the location this document would be greatly helpful to the service consumer. In some cases the client side code can be generated from such a document. It is assumed that the service description document is published somewhere on the service endpoint itself.
`Service Description Language` - A number of Definition Languages (DL) have been developed to describe the service. Some of examples are `WSDL`, `WADL` and `Swagger`. In order to consume a decription document it is good to know the type of DL used.
`Service Description Language` - A number of Definition Languages (DL) have been developed to describe the service. Some of examples are `WSDL`, `WADL` and `Swagger`. In order to consume a description document it is good to know the type of DL used.
## Standard Service Annotations
@ -92,7 +92,7 @@ The fragment below is taken from the service section of the kubernetes.json were
## Conclusion
Five service annotations are proposed as a standard way to desribe a service endpoint. These five annotation are promoted as a Kubernetes standard, so that services can be discovered and a service catalog can be build to facilitate service consumers.
Five service annotations are proposed as a standard way to describe a service endpoint. These five annotation are promoted as a Kubernetes standard, so that services can be discovered and a service catalog can be build to facilitate service consumers.

View File

@ -274,7 +274,7 @@ which process templates are free to override this value based on user input.
**Example Template**
Illustration of a template which defines a service and replication controller with parameters to specialized
the name of the top level objects, the number of replicas, and serveral environment variables defined on the
the name of the top level objects, the number of replicas, and several environment variables defined on the
pod template.
```
@ -412,7 +412,7 @@ Instead they can invoke the k8s api directly.
* **/templates** - the REST storage resource for storing and retrieving template objects, scoped within a namespace.
Storing templates within k8s has the benefit of enabling template sharing and securing via the same roles/resources
that are used to provide access control to other cluster resoures. It also enables sophisticated service catalog
that are used to provide access control to other cluster resources. It also enables sophisticated service catalog
flows in which selecting a service from a catalog results in a new instantiation of that service. (This is not the
only way to implement such a flow, but it does provide a useful level of integration).

View File

@ -102,7 +102,7 @@ Later, we will make a docker image that includes these packages.
Next, we will check that we can discover the rabbitmq service:
```
# Note the rabitmq-service has a DNS name, provided by Kubernetes:
# Note the rabbitmq-service has a DNS name, provided by Kubernetes:
root@temp-loe07:/# nslookup rabbitmq-service
Server: 10.0.0.10
@ -355,7 +355,7 @@ not all items will be processed.
If the number of completions is set to more than the number of items in the queue,
then the Job will not appear to be completed, even though all items in the queue
have been processed. It will start additional pods which will block waiting
for a mesage.
for a message.
There is an unlikely race with this pattern. If the container is killed in between the time
that the message is acknowledged by the amqp-consume command and the time that the container

View File

@ -76,7 +76,7 @@ The web app is written in Go, and borrowed from the original Guestbook example b
K8petstore is built to be expanded, and aims to attract developers interested in building and maintaining a polyglot, non-trivial kubernetes app as a community.
It can be a simple way to get started with kuberentes or golang application development.
It can be a simple way to get started with kubernetes or golang application development.
Thus we've tried to make it easy to hack on, even without kubernetes. Just run the containers and glue them together using docker IP addresses !
@ -156,7 +156,7 @@ We are open to other ways of expanding the coverage and realism of the k8petstor
Reach out with ideas, pull requests, and so on!
The end goal is to support polyglot, real world, data-intensive application on kuberenetes which can be used both to learn how to maintain kubernetes applications
The end goal is to support polyglot, real world, data-intensive application on kubernetes which can be used both to learn how to maintain kubernetes applications
as well as for scale and functionality testing.

View File

@ -31,8 +31,8 @@ Documentation for other releases can be found at
<!-- END STRIP_FOR_RELEASE -->
<!-- END MUNGE: UNVERSIONED_WARNING -->
Meteor on Kuberenetes
=====================
Meteor on Kubernetes
====================
This example shows you how to package and run a
[Meteor](https://www.meteor.com/) app on Kubernetes.

View File

@ -202,7 +202,7 @@ You now have 10 Firefox and 10 Chrome nodes, happy Seleniuming!
### Debugging
Sometimes it is neccessary to check on a hung test. Each pod is running VNC. To check on one of the browser nodes via VNC, it's reccomended that you proxy, since we don't want to expose a service for every pod, and the containers have a weak VNC password. Replace POD_NAME with the name of the pod you want to connect to.
Sometimes it is neccessary to check on a hung test. Each pod is running VNC. To check on one of the browser nodes via VNC, it's recommended that you proxy, since we don't want to expose a service for every pod, and the containers have a weak VNC password. Replace POD_NAME with the name of the pod you want to connect to.
```console
kubectl port-forward --pod=POD_NAME 5900:5900

View File

@ -35,7 +35,7 @@ const GroupName = ""
// SchemeGroupVersion is group version used to register these objects
var SchemeGroupVersion = unversioned.GroupVersion{Group: GroupName, Version: runtime.APIVersionInternal}
// Unversiond is group version for unversioned API objects
// Unversioned is group version for unversioned API objects
// TODO: this should be v1 probably
var Unversioned = unversioned.GroupVersion{Group: "", Version: "v1"}

View File

@ -34,7 +34,7 @@ const GroupName = ""
// SchemeGroupVersion is group version used to register these objects
var SchemeGroupVersion = unversioned.GroupVersion{Group: GroupName, Version: runtime.APIVersionInternal}
// Unversiond is group version for unversioned API objects
// Unversioned is group version for unversioned API objects
// TODO: this should be v1 probably
var Unversioned = unversioned.GroupVersion{Group: "", Version: "v1"}

View File

@ -144,7 +144,7 @@ message GroupVersion {
}
// GroupVersion contains the "group/version" and "version" string of a version.
// It is made a struct to keep extensiblity.
// It is made a struct to keep extensibility.
message GroupVersionForDiscovery {
// groupVersion specifies the API group and version in the form "group/version"
optional string groupVersion = 1;

View File

@ -357,7 +357,7 @@ type ServerAddressByClientCIDR struct {
}
// GroupVersion contains the "group/version" and "version" string of a version.
// It is made a struct to keep extensiblity.
// It is made a struct to keep extensibility.
type GroupVersionForDiscovery struct {
// groupVersion specifies the API group and version in the form "group/version"
GroupVersion string `json:"groupVersion" protobuf:"bytes,1,opt,name=groupVersion"`

View File

@ -90,7 +90,7 @@ func (ExportOptions) SwaggerDoc() map[string]string {
}
var map_GroupVersionForDiscovery = map[string]string{
"": "GroupVersion contains the \"group/version\" and \"version\" string of a version. It is made a struct to keep extensiblity.",
"": "GroupVersion contains the \"group/version\" and \"version\" string of a version. It is made a struct to keep extensibility.",
"groupVersion": "groupVersion specifies the API group and version in the form \"group/version\"",
"version": "version specifies the version in the form of \"version\". This is to save the clients the trouble of splitting the GroupVersion.",
}

View File

@ -24,7 +24,7 @@ import (
"k8s.io/kubernetes/pkg/util/intstr"
)
// The comments for the structs and fields can be used from go-resful to
// The comments for the structs and fields can be used from go-restful to
// generate Swagger API documentation for its models. Please read this PR for more
// information on the implementation: https://github.com/emicklei/go-restful/pull/215
//

View File

@ -520,7 +520,7 @@ type KubeControllerManagerConfiguration struct {
// case of node failure. For more details look into RateLimiter.
DeletingPodsBurst int32 `json:"deletingPodsBurst"`
// nodeMontiorGracePeriod is the amount of time which we allow a running node to be
// unresponsive before marking it unhealty. Must be N times more than kubelet's
// unresponsive before marking it unhealthy. Must be N times more than kubelet's
// nodeStatusUpdateFrequency, where N means number of retries allowed for kubelet
// to post node status.
NodeMonitorGracePeriod unversioned.Duration `json:"nodeMonitorGracePeriod"`
@ -528,7 +528,7 @@ type KubeControllerManagerConfiguration struct {
// Retry interval equals node-sync-period.
RegisterRetryCount int32 `json:"registerRetryCount"`
// nodeStartupGracePeriod is the amount of time which we allow starting a node to
// be unresponsive before marking it unhealty.
// be unresponsive before marking it unhealthy.
NodeStartupGracePeriod unversioned.Duration `json:"nodeStartupGracePeriod"`
// nodeMonitorPeriod is the period for syncing NodeStatus in NodeController.
NodeMonitorPeriod unversioned.Duration `json:"nodeMonitorPeriod"`

View File

@ -14,5 +14,5 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
// package capbabilities manages system level capabilities
// package capabilities manages system level capabilities
package capabilities

View File

@ -73,7 +73,7 @@ type timestampedEntry struct {
timestamp time.Time
}
// getTimestampedEntry returnes the timestampedEntry stored under the given key.
// getTimestampedEntry returns the timestampedEntry stored under the given key.
func (c *ExpirationCache) getTimestampedEntry(key string) (*timestampedEntry, bool) {
item, _ := c.cacheStorage.Get(key)
if tsEntry, ok := item.(*timestampedEntry); ok {

View File

@ -164,7 +164,7 @@ func hasPackage(file string, ignoredPackages []string) bool {
return false
}
// trimPackagePrefix reduces dulpicate values off the front of a package name.
// trimPackagePrefix reduces duplicate values off the front of a package name.
func trimPackagePrefix(file string) string {
if l := strings.LastIndex(file, "k8s.io/kubernetes/pkg/"); l >= 0 {
return file[l+len("k8s.io/kubernetes/"):]

View File

@ -240,7 +240,7 @@ func DefaultKubernetesUserAgent() string {
// InClusterConfig returns a config object which uses the service account
// kubernetes gives to pods. It's intended for clients that expect to be
// running inside a pod running on kuberenetes. It will return an error if
// running inside a pod running on kubernetes. It will return an error if
// called from a process not running in a kubernetes environment.
func InClusterConfig() (*Config, error) {
host, port := os.Getenv("KUBERNETES_SERVICE_HOST"), os.Getenv("KUBERNETES_SERVICE_PORT")

View File

@ -54,7 +54,7 @@ func NewDiscoveryRESTMapper(resources []*unversioned.APIResourceList, versionFun
return rm, nil
}
// ObjectTyper provides an ObjectTyper implmentation for
// ObjectTyper provides an ObjectTyper implementation for
// runtime.Unstructured object based on discovery information.
type ObjectTyper struct {
registered map[unversioned.GroupVersionKind]bool

View File

@ -28,7 +28,7 @@ import (
)
// streamProtocolV1 implements the first version of the streaming exec & attach
// protocol. This version has some bugs, such as not being able to detecte when
// protocol. This version has some bugs, such as not being able to detect when
// non-interactive stdin data has ended. See http://issues.k8s.io/13394 and
// http://issues.k8s.io/13395 for more details.
type streamProtocolV1 struct {

View File

@ -46,7 +46,7 @@ import (
const ProviderName = "openstack"
// metadataUrl is URL to OpenStack metadata server. It's hadrcoded IPv4
// metadataUrl is URL to OpenStack metadata server. It's hardcoded IPv4
// link-local address as documented in "OpenStack Cloud Administrator Guide",
// chapter Compute - Networking with nova-network.
// http://docs.openstack.org/admin-guide-cloud/compute-networking-nova.html#metadata-service

View File

@ -203,7 +203,7 @@ func (r *ControllerExpectations) RaiseExpectations(controllerKey string, add, de
}
}
// CreationObserved atomically decrements the `add` expecation count of the given controller.
// CreationObserved atomically decrements the `add` expectation count of the given controller.
func (r *ControllerExpectations) CreationObserved(controllerKey string) {
r.LowerExpectations(controllerKey, 1, 0)
}
@ -666,7 +666,7 @@ func FilterActiveReplicaSets(replicaSets []*extensions.ReplicaSet) []*extensions
// PodKey returns a key unique to the given pod within a cluster.
// It's used so we consistently use the same key scheme in this module.
// It does exactly what cache.MetaNamespaceKeyFunc would have done
// expcept there's not possibility for error since we know the exact type.
// except there's not possibility for error since we know the exact type.
func PodKey(pod *api.Pod) string {
return fmt.Sprintf("%v/%v", pod.Namespace, pod.Name)
}

View File

@ -107,7 +107,7 @@ func (u *unhealthyPetTracker) Get(ps *apps.PetSet, knownPets []*api.Pod) (*pcb,
glog.V(4).Infof("Ignoring healthy pet %v for PetSet %v", p.Name, ps.Name)
continue
}
glog.Infof("No recorded blocking pet, but found unhealty pet %v for PetSet %v", p.Name, ps.Name)
glog.Infof("No recorded blocking pet, but found unhealthy pet %v for PetSet %v", p.Name, ps.Name)
return &pcb{pod: p, parent: ps}, nil
}
return nil, nil

View File

@ -36,7 +36,7 @@ type OSInterface interface {
ReadDir(dirname string) ([]os.FileInfo, error)
}
// RealOS is used to dispatch the real system level operaitons.
// RealOS is used to dispatch the real system level operations.
type RealOS struct{}
// MkDir will will call os.Mkdir to create a directory.

View File

@ -1214,7 +1214,7 @@ func (dm *DockerManager) KillPod(pod *api.Pod, runningPod kubecontainer.Pod, gra
return result.Error()
}
// TODO(random-liu): This is just a temporary function, will be removed when we acturally add PodSyncResult
// TODO(random-liu): This is just a temporary function, will be removed when we actually add PodSyncResult
// NOTE(random-liu): The pod passed in could be *nil* when kubelet restarted.
func (dm *DockerManager) killPodWithSyncResult(pod *api.Pod, runningPod kubecontainer.Pod, gracePeriodOverride *int64) (result kubecontainer.PodSyncResult) {
// Send the kills in parallel since they may take a long time.

View File

@ -1639,7 +1639,7 @@ func (kl *Kubelet) GetClusterDNS(pod *api.Pod) ([]string, []string, error) {
return dns, dnsSearch, nil
}
// One of the following aruguements must be non-nil: runningPod, status.
// One of the following arguments must be non-nil: runningPod, status.
// TODO: Modify containerRuntime.KillPod() to accept the right arguments.
func (kl *Kubelet) killPod(pod *api.Pod, runningPod *kubecontainer.Pod, status *kubecontainer.PodStatus, gracePeriodOverride *int64) error {
var p kubecontainer.Pod

View File

@ -1992,7 +1992,7 @@ func (r *Runtime) removePod(uuid string) error {
return errors.NewAggregate(errlist)
}
// rktExitError implemets /pkg/util/exec.ExitError interface.
// rktExitError implements /pkg/util/exec.ExitError interface.
type rktExitError struct{ *exec.ExitError }
var _ utilexec.ExitError = &rktExitError{}

View File

@ -84,7 +84,7 @@ type missingVersionErr struct {
}
// IsMissingVersion returns true if the error indicates that the provided object
// is missing a 'Versioj' field.
// is missing a 'Version' field.
func NewMissingVersionErr(data string) error {
return &missingVersionErr{data}
}

View File

@ -109,7 +109,7 @@ func NewUIDPreconditions(uid string) *Preconditions {
return &Preconditions{UID: &u}
}
// Interface offers a common interface for object marshaling/unmarshling operations and
// Interface offers a common interface for object marshaling/unmarshaling operations and
// hides all the storage-related operations behind it.
type Interface interface {
// Returns list of servers addresses of the underyling database.

View File

@ -18,7 +18,7 @@ package testing
import "k8s.io/kubernetes/pkg/util/iptables"
// no-op implemenatation of iptables Interface
// no-op implementation of iptables Interface
type fake struct{}
func NewFake() *fake {

View File

@ -42,7 +42,7 @@ var (
gitMajor string = "" // major version, always numeric
gitMinor string = "" // minor version, numeric possibly followed by "+"
// semantic version, dervied by build scripts (see
// semantic version, derived by build scripts (see
// https://github.com/kubernetes/kubernetes/blob/master/docs/design/versioning.md
// for a detailed discussion of this field)
//

View File

@ -15,7 +15,7 @@ limitations under the License.
*/
//
// diskManager interface and diskSetup/TearDown functions abtract commonly used procedures to setup a block volume
// diskManager interface and diskSetup/TearDown functions abstract commonly used procedures to setup a block volume
// rbd volume implements diskManager, calls diskSetup when creating a volume, and calls diskTearDown inside volume unmounter.
// TODO: consolidate, refactor, and share diskManager among iSCSI, GCE PD, and RBD
//

View File

@ -384,7 +384,7 @@ func sum(inputs []api.ResourceList) api.ResourceList {
return result
}
// DefaultLimitRangerActions is the default implementatation of LimitRangerActions.
// DefaultLimitRangerActions is the default implementation of LimitRangerActions.
type DefaultLimitRangerActions struct{}
// ensure DefaultLimitRangerActions implements the LimitRangerActions interface.

View File

@ -180,7 +180,7 @@ func (e *quotaEvaluator) checkAttributes(ns string, admissionAttributes []*admis
e.checkQuotas(quotas, admissionAttributes, 3)
}
// checkQuotas checks the admission atttributes against the passed quotas. If a quota applies, it will attempt to update it
// checkQuotas checks the admission attributes against the passed quotas. If a quota applies, it will attempt to update it
// AFTER it has checked all the admissionAttributes. The method breaks down into phase like this:
// 0. make a copy of the quotas to act as a "running" quota so we know what we need to update and can still compare against the
// originals

View File

@ -311,7 +311,7 @@ func validatePriorityOrDie(priority schedulerapi.PriorityPolicy) {
}
}
// ListAlgorithmProviders is called when listing all available algortihm providers in `kube-scheduler --help`
// ListAlgorithmProviders is called when listing all available algorithm providers in `kube-scheduler --help`
func ListAlgorithmProviders() string {
var availableAlgorithmProviders []string
for name := range algorithmProviderMap {