mirror of https://github.com/k3s-io/k3s
Spelling fixes inspired by github.com/client9/misspell
parent
6435d03400
commit
7ef585be22
|
@ -8246,7 +8246,7 @@
|
|||
"type": "string",
|
||||
"paramType": "query",
|
||||
"name": "sinceTime",
|
||||
"description": "An RFC3339 timestamp from which to show logs. If this value preceeds the time a pod was started, only logs since the pod start will be returned. If this value is in the future, no logs will be returned. Only one of sinceSeconds or sinceTime may be specified.",
|
||||
"description": "An RFC3339 timestamp from which to show logs. If this value precedes the time a pod was started, only logs since the pod start will be returned. If this value is in the future, no logs will be returned. Only one of sinceSeconds or sinceTime may be specified.",
|
||||
"required": false,
|
||||
"allowMultiple": false
|
||||
},
|
||||
|
@ -17236,7 +17236,7 @@
|
|||
},
|
||||
"v1.ServicePort": {
|
||||
"id": "v1.ServicePort",
|
||||
"description": "ServicePort conatins information on service's port.",
|
||||
"description": "ServicePort contains information on service's port.",
|
||||
"required": [
|
||||
"port"
|
||||
],
|
||||
|
|
|
@ -1562,7 +1562,7 @@ function kube::release::docker::release() {
|
|||
kube::log::status "Pushing ${binary} to ${docker_target}"
|
||||
"${docker_push_cmd[@]}" push "${docker_target}"
|
||||
|
||||
# If we have a amd64 docker image. Tag it without -amd64 also and push it for compability with earlier versions
|
||||
# If we have a amd64 docker image. Tag it without -amd64 also and push it for compatibility with earlier versions
|
||||
if [[ ${arch} == "amd64" ]]; then
|
||||
local legacy_docker_target="${KUBE_DOCKER_REGISTRY}/${binary}:${KUBE_DOCKER_IMAGE_TAG}"
|
||||
|
||||
|
|
|
@ -25,7 +25,7 @@ spec:
|
|||
terminationGracePeriodSeconds: 600
|
||||
containers:
|
||||
- name: default-http-backend
|
||||
# Any image is permissable as long as:
|
||||
# Any image is permissible as long as:
|
||||
# 1. It serves a 404 page at /
|
||||
# 2. It serves 200 on a /healthz endpoint
|
||||
image: gcr.io/google_containers/defaultbackend:1.0
|
||||
|
|
|
@ -60,7 +60,7 @@ created independently - this is not something Kubernetes manages for you (yet).
|
|||
### I don't want or don't have persistent storage
|
||||
|
||||
If you are running in a place that doesn't have networked storage, or if you
|
||||
just want to kick the tires on this without commiting to it, you can easily
|
||||
just want to kick the tires on this without committing to it, you can easily
|
||||
adapt the `ReplicationController` specification below to use a simple
|
||||
`emptyDir` volume instead of a `persistentVolumeClaim`.
|
||||
|
||||
|
|
|
@ -53,7 +53,7 @@ if [[ "${KUBE_OS_DISTRIBUTION}" == "ubuntu" ]]; then
|
|||
KUBE_OS_DISTRIBUTION=vivid
|
||||
fi
|
||||
|
||||
# For GCE script compatability
|
||||
# For GCE script compatibility
|
||||
OS_DISTRIBUTION=${KUBE_OS_DISTRIBUTION}
|
||||
|
||||
case "${KUBE_OS_DISTRIBUTION}" in
|
||||
|
|
|
@ -25,7 +25,7 @@ spec:
|
|||
terminationGracePeriodSeconds: 600
|
||||
containers:
|
||||
- name: default-http-backend
|
||||
# Any image is permissable as long as:
|
||||
# Any image is permissible as long as:
|
||||
# 1. It serves a 404 page at /
|
||||
# 2. It serves 200 on a /healthz endpoint
|
||||
image: gcr.io/google_containers/defaultbackend:1.0
|
||||
|
|
|
@ -18,7 +18,7 @@
|
|||
|
||||
# The code and configuration is for running node instances on Ubuntu images.
|
||||
# The master is still on Debian. In addition, the configuration is based on
|
||||
# upstart, which is in Ubuntu upto 14.04 LTS (Trusty). Ubuntu 15.04 and above
|
||||
# upstart, which is in Ubuntu up to 14.04 LTS (Trusty). Ubuntu 15.04 and above
|
||||
# replaced upstart with systemd as the init system. Consequently, the
|
||||
# configuration cannot work on these images.
|
||||
|
||||
|
|
|
@ -46,14 +46,14 @@ build:
|
|||
docker build -t ${REGISTRY}/etcd-${ARCH}:${TAG} ${TEMP_DIR}
|
||||
|
||||
ifeq ($(ARCH),amd64)
|
||||
# Backward compatability. TODO: deprecate this image tag
|
||||
# Backward compatibility. TODO: deprecate this image tag
|
||||
docker tag -f ${REGISTRY}/etcd-${ARCH}:${TAG} ${REGISTRY}/etcd:${TAG}
|
||||
endif
|
||||
|
||||
push: build
|
||||
gcloud docker push ${REGISTRY}/etcd-${ARCH}:${TAG}
|
||||
|
||||
# Backward compatability. TODO: deprecate this image tag
|
||||
# Backward compatibility. TODO: deprecate this image tag
|
||||
ifeq ($(ARCH),amd64)
|
||||
gcloud docker push ${REGISTRY}/etcd:${TAG}
|
||||
endif
|
||||
|
|
|
@ -41,7 +41,7 @@ endif
|
|||
cd ${TEMP_DIR} && sed -i "s/ARCH/${ARCH}/g" master-multi.json master.json kube-proxy.json
|
||||
cd ${TEMP_DIR} && sed -i "s/BASEIMAGE/${BASEIMAGE}/g" Dockerfile
|
||||
docker build -t ${REGISTRY}/hyperkube-${ARCH}:${VERSION} ${TEMP_DIR}
|
||||
# Backward compatability. TODO: deprecate this image tag
|
||||
# Backward compatibility. TODO: deprecate this image tag
|
||||
ifeq ($(ARCH),amd64)
|
||||
docker tag -f ${REGISTRY}/hyperkube-${ARCH}:${VERSION} ${REGISTRY}/hyperkube:${VERSION}
|
||||
endif
|
||||
|
|
|
@ -128,7 +128,7 @@ function kube-down() {
|
|||
}
|
||||
|
||||
function prepare-e2e() {
|
||||
echo "prepare-e2e() The Juju provider does not need any preperations for e2e." 1>&2
|
||||
echo "prepare-e2e() The Juju provider does not need any preparations for e2e." 1>&2
|
||||
}
|
||||
|
||||
function sleep-status() {
|
||||
|
|
|
@ -72,6 +72,6 @@ elif [[ $1 == "ai" ]]; then
|
|||
elif [[ $1 == "a" ]]; then
|
||||
config_etcd
|
||||
else
|
||||
echo "Another arguement is required."
|
||||
echo "Another argument is required."
|
||||
exit 1
|
||||
fi
|
||||
|
|
|
@ -182,7 +182,7 @@ func (s *KubeletServer) AddFlags(fs *pflag.FlagSet) {
|
|||
fs.DurationVar(&s.StreamingConnectionIdleTimeout.Duration, "streaming-connection-idle-timeout", s.StreamingConnectionIdleTimeout.Duration, "Maximum time a streaming connection can be idle before the connection is automatically closed. 0 indicates no timeout. Example: '5m'")
|
||||
fs.DurationVar(&s.NodeStatusUpdateFrequency.Duration, "node-status-update-frequency", s.NodeStatusUpdateFrequency.Duration, "Specifies how often kubelet posts node status to master. Note: be cautious when changing the constant, it must work with nodeMonitorGracePeriod in nodecontroller. Default: 10s")
|
||||
bindableNodeLabels := util.ConfigurationMap(s.NodeLabels)
|
||||
fs.Var(&bindableNodeLabels, "node-labels", "<Warning: Alpha feature> Labels to add when registering the node in the cluster. Labels must are key=value pairs seperated by ','.")
|
||||
fs.Var(&bindableNodeLabels, "node-labels", "<Warning: Alpha feature> Labels to add when registering the node in the cluster. Labels must are key=value pairs separated by ','.")
|
||||
fs.DurationVar(&s.ImageMinimumGCAge.Duration, "minimum-image-ttl-duration", s.ImageMinimumGCAge.Duration, "Minimum age for a unused image before it is garbage collected. Examples: '300ms', '10s' or '2h45m'. Default: '2m'")
|
||||
fs.IntVar(&s.ImageGCHighThresholdPercent, "image-gc-high-threshold", s.ImageGCHighThresholdPercent, "The percent of disk usage after which image garbage collection is always run. Default: 90%")
|
||||
fs.IntVar(&s.ImageGCLowThresholdPercent, "image-gc-low-threshold", s.ImageGCLowThresholdPercent, "The percent of disk usage before which image garbage collection is never run. Lowest disk usage to garbage collect to. Default: 80%")
|
||||
|
|
|
@ -20,7 +20,7 @@ import (
|
|||
"k8s.io/kubernetes/cmd/libs/go2idl/types"
|
||||
)
|
||||
|
||||
// DefaultPackage contains a default implentation of Package.
|
||||
// DefaultPackage contains a default implementation of Package.
|
||||
type DefaultPackage struct {
|
||||
// Short name of package, used in the "package xxxx" line.
|
||||
PackageName string
|
||||
|
|
|
@ -52,7 +52,7 @@ func newProtobufPackage(packagePath, packageName string, generateAll bool, omitF
|
|||
return pkg
|
||||
}
|
||||
|
||||
// protobufPackage contains the protobuf implentation of Package.
|
||||
// protobufPackage contains the protobuf implementation of Package.
|
||||
type protobufPackage struct {
|
||||
// Short name of package, used in the "package xxxx" line.
|
||||
PackageName string
|
||||
|
|
|
@ -29,7 +29,7 @@ const (
|
|||
TaskKilled = "task-killed"
|
||||
TaskLost = "task-lost"
|
||||
UnmarshalTaskDataFailure = "unmarshal-task-data-failure"
|
||||
TaskLostAck = "task-lost-ack" // executor acknowledgement of forwarded TASK_LOST framework message
|
||||
TaskLostAck = "task-lost-ack" // executor acknowledgment of forwarded TASK_LOST framework message
|
||||
Kamikaze = "kamikaze"
|
||||
WrongSlaveFailure = "pod-for-wrong-slave-failure"
|
||||
AnnotationUpdateFailure = "annotation-update-failure"
|
||||
|
|
|
@ -271,7 +271,7 @@ func (s *offerStorage) Delete(offerId string, reason metrics.OfferDeclinedReason
|
|||
s.declineOffer(offerId, offer.Host(), reason)
|
||||
} else {
|
||||
// some pod has acquired this and may attempt to launch a task with it
|
||||
// failed schedule/launch attempts are requried to Release() any claims on the offer
|
||||
// failed schedule/launch attempts are required to Release() any claims on the offer
|
||||
|
||||
// TODO(jdef): not sure what a good value is here. the goal is to provide a
|
||||
// launchTasks (driver) operation enough time to complete so that we don't end
|
||||
|
|
|
@ -315,7 +315,7 @@ func (k *framework) Registered(drv bindings.SchedulerDriver, fid *mesos.Framewor
|
|||
}
|
||||
|
||||
// Reregistered is called when the scheduler re-registered with the master successfully.
|
||||
// This happends when the master fails over.
|
||||
// This happens when the master fails over.
|
||||
func (k *framework) Reregistered(drv bindings.SchedulerDriver, mi *mesos.MasterInfo) {
|
||||
log.Infof("Scheduler reregistered with the master: %v\n", mi)
|
||||
|
||||
|
|
|
@ -14,6 +14,6 @@ See the License for the specific language governing permissions and
|
|||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Package podreconciler implements pod reconcilation of pods which failed
|
||||
// Package podreconciler implements pod reconciliation of pods which failed
|
||||
// to launch, i.e. before binding by the executor took place.
|
||||
package podreconciler
|
||||
|
|
|
@ -14,5 +14,5 @@ See the License for the specific language governing permissions and
|
|||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Package taskreconciler implement Mesos task reconcilation.
|
||||
// Package taskreconciler implement Mesos task reconciliation.
|
||||
package taskreconciler
|
||||
|
|
|
@ -182,7 +182,7 @@ We strongly recommend `NamespaceLifecycle` over `NamespaceAutoProvision`.
|
|||
### NamespaceLifecycle
|
||||
|
||||
This plug-in enforces that a `Namespace` that is undergoing termination cannot have new objects created in it,
|
||||
and ensures that requests in a non-existant `Namespace` are rejected.
|
||||
and ensures that requests in a non-existent `Namespace` are rejected.
|
||||
|
||||
A `Namespace` deletion kicks off a sequence of operations that remove all objects (pods, services, etc.) in that
|
||||
namespace. In order to enforce integrity of that process, we strongly recommend running this plug-in.
|
||||
|
|
|
@ -120,7 +120,7 @@ kubelet
|
|||
--network-plugin="": <Warning: Alpha feature> The name of the network plugin to be invoked for various events in kubelet/pod lifecycle
|
||||
--network-plugin-dir="/usr/libexec/kubernetes/kubelet-plugins/net/exec/": <Warning: Alpha feature> The full path of the directory in which to search for network plugins
|
||||
--node-ip="": IP address of the node. If set, kubelet will use this IP address for the node
|
||||
--node-labels=: <Warning: Alpha feature> Labels to add when registering the node in the cluster. Labels must are key=value pairs seperated by ','.
|
||||
--node-labels=: <Warning: Alpha feature> Labels to add when registering the node in the cluster. Labels must are key=value pairs separated by ','.
|
||||
--node-status-update-frequency=10s: Specifies how often kubelet posts node status to master. Note: be cautious when changing the constant, it must work with nodeMonitorGracePeriod in nodecontroller. Default: 10s
|
||||
--non-masquerade-cidr="10.0.0.0/8": Traffic to IPs outside this range will use IP masquerade.
|
||||
--oom-score-adj=-999: The oom-score-adj value for kubelet process. Values must be within the range [-1000, 1000]
|
||||
|
@ -152,7 +152,7 @@ kubelet
|
|||
--volume-stats-agg-period=1m0s: Specifies interval for kubelet to calculate and cache the volume disk usage for all pods and volumes. To disable volume calculations, set to 0. Default: '1m'
|
||||
```
|
||||
|
||||
###### Auto generated by spf13/cobra on 15-Feb-2016
|
||||
###### Auto generated by spf13/cobra on 17-Feb-2016
|
||||
|
||||
|
||||
<!-- BEGIN MUNGE: GENERATED_ANALYTICS -->
|
||||
|
|
|
@ -102,7 +102,7 @@ HTTP connections and are therefore neither authenticated nor encrypted. They
|
|||
can be run over a secure HTTPS connection by prefixing `https:` to the node,
|
||||
pod, or service name in the API URL, but they will not validate the certificate
|
||||
provided by the HTTPS endpoint nor provide client credentials so while the
|
||||
connection will by encrypted, it will not provide any guarentees of integrity.
|
||||
connection will by encrypted, it will not provide any guarantees of integrity.
|
||||
These connections **are not currently safe** to run over untrusted and/or
|
||||
public networks.
|
||||
|
||||
|
|
|
@ -5470,7 +5470,7 @@ The resulting set of endpoints can be viewed as:<br>
|
|||
<div class="sect2">
|
||||
<h3 id="_v1_serviceport">v1.ServicePort</h3>
|
||||
<div class="paragraph">
|
||||
<p>ServicePort conatins information on service’s port.</p>
|
||||
<p>ServicePort contains information on service’s port.</p>
|
||||
</div>
|
||||
<table class="tableblock frame-all grid-all" style="width:100%; ">
|
||||
<colgroup>
|
||||
|
|
|
@ -8196,7 +8196,7 @@ span.icon > [class^="icon-"], span.icon > [class*=" icon-"] { cursor: default; }
|
|||
<tr>
|
||||
<td class="tableblock halign-left valign-top"><p class="tableblock">QueryParameter</p></td>
|
||||
<td class="tableblock halign-left valign-top"><p class="tableblock">sinceTime</p></td>
|
||||
<td class="tableblock halign-left valign-top"><p class="tableblock">An RFC3339 timestamp from which to show logs. If this value preceeds the time a pod was started, only logs since the pod start will be returned. If this value is in the future, no logs will be returned. Only one of sinceSeconds or sinceTime may be specified.</p></td>
|
||||
<td class="tableblock halign-left valign-top"><p class="tableblock">An RFC3339 timestamp from which to show logs. If this value precedes the time a pod was started, only logs since the pod start will be returned. If this value is in the future, no logs will be returned. Only one of sinceSeconds or sinceTime may be specified.</p></td>
|
||||
<td class="tableblock halign-left valign-top"><p class="tableblock">false</p></td>
|
||||
<td class="tableblock halign-left valign-top"><p class="tableblock">string</p></td>
|
||||
<td class="tableblock halign-left valign-top"></td>
|
||||
|
|
|
@ -15,7 +15,7 @@ seqdiag {
|
|||
|
||||
user ->> kubelet [label="start\n- bootstrap-cluster-uri"];
|
||||
kubelet => bootstrap [label="get-master", return="returns\n- master-location\n- master-ca"];
|
||||
kubelet ->> master [label="signCert\n- unsigned-kubelet-cert", return="retuns\n- kubelet-cert"];
|
||||
kubelet ->> master [label="signCert\n- unsigned-kubelet-cert", return="returns\n- kubelet-cert"];
|
||||
user => master [label="getSignRequests"];
|
||||
user => master [label="approveSignRequests"];
|
||||
kubelet <<-- master [label="returns\n- kubelet-cert"];
|
||||
|
|
|
@ -54,7 +54,7 @@ An API request has the following attributes that can be considered for authoriza
|
|||
- resourceVersion - the API version of the resource being accessed
|
||||
- resource - which resource is being accessed
|
||||
- applies only to the API endpoints, such as
|
||||
`/api/v1beta1/pods`. For miscelaneous endpoints, like `/version`, the kind is the empty string.
|
||||
`/api/v1beta1/pods`. For miscellaneous endpoints, like `/version`, the kind is the empty string.
|
||||
- resourceName - the name of the resource during a get, update, or delete action.
|
||||
- subresource - which subresource is being accessed
|
||||
|
||||
|
|
|
@ -33,18 +33,18 @@ Documentation for other releases can be found at
|
|||
## Summary
|
||||
|
||||
This design extends kubernetes with user-friendly support for
|
||||
running embarassingly parallel jobs.
|
||||
running embarrassingly parallel jobs.
|
||||
|
||||
Here, *parallel* means on multiple nodes, which means multiple pods.
|
||||
By *embarassingly parallel*, it is meant that the pods
|
||||
By *embarrassingly parallel*, it is meant that the pods
|
||||
have no dependencies between each other. In particular, neither
|
||||
ordering between pods nor gang scheduling are supported.
|
||||
|
||||
Users already have two other options for running embarassingly parallel
|
||||
Users already have two other options for running embarrassingly parallel
|
||||
Jobs (described in the next section), but both have ease-of-use issues.
|
||||
|
||||
Therefore, this document proposes extending the Job resource type to support
|
||||
a third way to run embarassingly parallel programs, with a focus on
|
||||
a third way to run embarrassingly parallel programs, with a focus on
|
||||
ease of use.
|
||||
|
||||
This new style of Job is called an *indexed job*, because each Pod of the Job
|
||||
|
@ -53,7 +53,7 @@ is specialized to work on a particular *index* from a fixed length array of work
|
|||
## Background
|
||||
|
||||
The Kubernetes [Job](../../docs/user-guide/jobs.md) already supports
|
||||
the embarassingly parallel use case through *workqueue jobs*.
|
||||
the embarrassingly parallel use case through *workqueue jobs*.
|
||||
While [workqueue jobs](../../docs/user-guide/jobs.md#job-patterns)
|
||||
are very flexible, they can be difficult to use.
|
||||
They: (1) typically require running a message queue
|
||||
|
@ -242,7 +242,7 @@ In the above example:
|
|||
- `--restart=OnFailure` implies creating a job instead of replicationController.
|
||||
- Each pods command line is `/usr/local/bin/process_file $F`.
|
||||
- `--per-completion-env=` implies the jobs `.spec.completions` is set to the length of the argument array (3 in the example).
|
||||
- `--per-completion-env=F=<values>` causes env var with `F` to be available in the enviroment when the command line is evaluated.
|
||||
- `--per-completion-env=F=<values>` causes env var with `F` to be available in the environment when the command line is evaluated.
|
||||
|
||||
How exactly this happens is discussed later in the doc: this is a sketch of the user experience.
|
||||
|
||||
|
@ -269,7 +269,7 @@ Another case we do not try to handle is where the input file does not exist yet
|
|||
|
||||
#### Multiple parameters
|
||||
|
||||
The user may also have multiple paramters, like in [work list 2](#work-list-2).
|
||||
The user may also have multiple parameters, like in [work list 2](#work-list-2).
|
||||
One way is to just list all the command lines already expanded, one per line, in a file, like this:
|
||||
|
||||
```
|
||||
|
@ -491,7 +491,7 @@ The index-only approach:
|
|||
- requires that the user keep the *per completion parameters* in a separate storage, such as a configData or networked storage.
|
||||
- makes no changes to the JobSpec.
|
||||
- Drawback: while in separate storage, they could be mutatated, which would have unexpected effects
|
||||
- Drawback: Logic for using index to lookup paramters needs to be in the Pod.
|
||||
- Drawback: Logic for using index to lookup parameters needs to be in the Pod.
|
||||
- Drawback: CLIs and UIs are limited to using the "index" as the identity of a pod
|
||||
from a job. They cannot easily say, for example `repeated failures on the pod processing banana.txt`.
|
||||
|
||||
|
|
|
@ -51,7 +51,7 @@ on a node but do not prevent it, taints that prevent a pod from starting on Kube
|
|||
if the pod's `NodeName` was written directly (i.e. pod did not go through the scheduler),
|
||||
and taints that evict already-running pods.
|
||||
[This comment](https://github.com/kubernetes/kubernetes/issues/3885#issuecomment-146002375)
|
||||
has more background on these diffrent scenarios. We will focus on the first
|
||||
has more background on these different scenarios. We will focus on the first
|
||||
kind of taint in this doc, since it is the kind required for the "dedicated nodes" use case.
|
||||
|
||||
Implementing dedicated nodes using taints and tolerations is straightforward: in essence, a node that
|
||||
|
@ -264,7 +264,7 @@ their taint. Thus we need to ensure that a new node does not become "Ready" unti
|
|||
configured with its taints. One way to do this is to have an admission controller that adds the taint whenever
|
||||
a Node object is created.
|
||||
|
||||
A quota policy may want to treat nodes diffrently based on what taints, if any,
|
||||
A quota policy may want to treat nodes differently based on what taints, if any,
|
||||
they have. For example, if a particular namespace is only allowed to access dedicated nodes,
|
||||
then it may be convenient to give the namespace unlimited quota. (To use finite quota,
|
||||
you'd have to size the namespace's quota to the sum of the sizes of the machines in the
|
||||
|
|
|
@ -79,7 +79,7 @@ Guide](../admin/README.md).
|
|||
Document style advice for contributors.
|
||||
|
||||
* **Running a cluster locally** ([running-locally.md](running-locally.md)):
|
||||
A fast and lightweight local cluster deployment for developement.
|
||||
A fast and lightweight local cluster deployment for development.
|
||||
|
||||
## Developing against the Kubernetes API
|
||||
|
||||
|
|
|
@ -306,7 +306,7 @@ removed value as deprecated but allowed). This is actually a special case of
|
|||
a new representation, discussed above.
|
||||
|
||||
For [Unions](api-conventions.md), sets of fields where at most one should be set,
|
||||
it is acceptible to add a new option to the union if the [appropriate conventions]
|
||||
it is acceptable to add a new option to the union if the [appropriate conventions]
|
||||
were followed in the original object. Removing an option requires following
|
||||
the deprecation process.
|
||||
|
||||
|
|
|
@ -36,7 +36,7 @@ Documentation for other releases can be found at
|
|||
|
||||
## Overview
|
||||
|
||||
End-to-end (e2e) tests for Kubernetes provide a mechanism to test end-to-end behavior of the system, and is the last signal to ensure end user operations match developer specifications. Although unit and integration tests should ideally provide a good signal, the reality is in a distributed system like Kubernetes it is not uncommon that a minor change may pass all unit and integration tests, but cause unforseen changes at the system level. e2e testing is very costly, both in time to run tests and difficulty debugging, though: it takes a long time to build, deploy, and exercise a cluster. Thus, the primary objectives of the e2e tests are to ensure a consistent and reliable behavior of the kubernetes code base, and to catch hard-to-test bugs before users do, when unit and integration tests are insufficient.
|
||||
End-to-end (e2e) tests for Kubernetes provide a mechanism to test end-to-end behavior of the system, and is the last signal to ensure end user operations match developer specifications. Although unit and integration tests should ideally provide a good signal, the reality is in a distributed system like Kubernetes it is not uncommon that a minor change may pass all unit and integration tests, but cause unforeseen changes at the system level. e2e testing is very costly, both in time to run tests and difficulty debugging, though: it takes a long time to build, deploy, and exercise a cluster. Thus, the primary objectives of the e2e tests are to ensure a consistent and reliable behavior of the kubernetes code base, and to catch hard-to-test bugs before users do, when unit and integration tests are insufficient.
|
||||
|
||||
The e2e tests in kubernetes are built atop of [Ginkgo](http://onsi.github.io/ginkgo/) and [Gomega](http://onsi.github.io/gomega/). There are a host of features that this BDD testing framework provides, and it is recommended that the developer read the documentation prior to diving into the tests.
|
||||
|
||||
|
|
|
@ -89,7 +89,7 @@ to update docker image address if you’re not using GCR and default image name*
|
|||
- Waits until all HollowNodes are in the Running phase (*will work exactly the same everywhere*)
|
||||
|
||||
<sub>\* Port 443 is a secured port on the master machine which is used for all external communication with the API server. In the last sentence *external* means all traffic
|
||||
comming from other machines, including all the Nodes, not only from outside of the cluster. Currently local components, i.e. ControllerManager and Scheduler talk with API server using insecure port 8080.</sub>
|
||||
coming from other machines, including all the Nodes, not only from outside of the cluster. Currently local components, i.e. ControllerManager and Scheduler talk with API server using insecure port 8080.</sub>
|
||||
|
||||
### Running e2e tests on Kubemark cluster
|
||||
|
||||
|
|
|
@ -104,7 +104,7 @@ The high level steps to update docs in an existing collection are:
|
|||
## Updating docs on HEAD
|
||||
|
||||
[Development guide](development.md) provides general instructions on how to contribute to kubernetes github repo.
|
||||
[Docs how to guide](how-to-doc.md) provides conventions to follow while writting docs.
|
||||
[Docs how to guide](how-to-doc.md) provides conventions to follow while writing docs.
|
||||
|
||||
## Updating docs in release branch
|
||||
|
||||
|
|
|
@ -47,7 +47,7 @@ federated servers.
|
|||
developers to expose their APIs as a separate server and enabling the cluster
|
||||
admin to use it without any change to the core kubernetes reporsitory, we
|
||||
unblock these APIs.
|
||||
* Place for staging experimental APIs: New APIs can remain in seperate
|
||||
* Place for staging experimental APIs: New APIs can remain in separate
|
||||
federated servers until they become stable, at which point, they can be moved
|
||||
to the core kubernetes master, if appropriate.
|
||||
* Ensure that new APIs follow kubernetes conventions: Without the mechanism
|
||||
|
|
|
@ -37,7 +37,7 @@ Full Ubernetes will offer sophisticated federation between multiple kuberentes
|
|||
clusters, offering true high-availability, multiple provider support &
|
||||
cloud-bursting, multiple region support etc. However, many users have
|
||||
expressed a desire for a "reasonably" high-available cluster, that runs in
|
||||
multiple zones on GCE or availablity zones in AWS, and can tolerate the failure
|
||||
multiple zones on GCE or availability zones in AWS, and can tolerate the failure
|
||||
of a single zone without the complexity of running multiple clusters.
|
||||
|
||||
Ubernetes-Lite aims to deliver exactly that functionality: to run a single
|
||||
|
@ -88,7 +88,7 @@ The implementation of this will be described in the implementation section.
|
|||
|
||||
Note that zone spreading is 'best effort'; zones are just be one of the factors
|
||||
in making scheduling decisions, and thus it is not guaranteed that pods will
|
||||
spread evenly across zones. However, this is likely desireable: if a zone is
|
||||
spread evenly across zones. However, this is likely desirable: if a zone is
|
||||
overloaded or failing, we still want to schedule the requested number of pods.
|
||||
|
||||
### Volume affinity
|
||||
|
|
|
@ -111,7 +111,7 @@ and cheap network within each cluster.
|
|||
There is also assumed to be some degree of failure correlation across
|
||||
a cluster, i.e. whole clusters are expected to fail, at least
|
||||
occasionally (due to cluster-wide power and network failures, natural
|
||||
disasters etc). Clusters are often relatively homogenous in that all
|
||||
disasters etc). Clusters are often relatively homogeneous in that all
|
||||
compute nodes are typically provided by a single cloud provider or
|
||||
hardware vendor, and connected by a common, unified network fabric.
|
||||
But these are not hard requirements of Kubernetes.
|
||||
|
|
|
@ -129,7 +129,7 @@ The first is accomplished in this PR, while a timeline for 2. and 3. are TDB. To
|
|||
- Get: Handle a watch on leases
|
||||
* `/network/leases/subnet`:
|
||||
- Put: This is a request for a lease. If the nodecontroller is allocating CIDRs we can probably just no-op.
|
||||
* `/network/reservations`: TDB, we can probably use this to accomodate node controller allocating CIDR instead of flannel requesting it
|
||||
* `/network/reservations`: TDB, we can probably use this to accommodate node controller allocating CIDR instead of flannel requesting it
|
||||
|
||||
The ick-iest part of this implementation is going to the the `GET /network/leases`, i.e the watch proxy. We can side-step by waiting for a more generic Kubernetes resource. However, we can also implement it as follows:
|
||||
* Watch all nodes, ignore heartbeats
|
||||
|
@ -152,7 +152,7 @@ This proposal is really just a call for community help in writing a Kubernetes x
|
|||
* Flannel daemon in privileged pod
|
||||
* Flannel server talks to apiserver, described in proposal above
|
||||
* HTTPs between flannel daemon/server
|
||||
* Investigate flannel server runing on every node (as done in the reference implementation mentioned above)
|
||||
* Investigate flannel server running on every node (as done in the reference implementation mentioned above)
|
||||
* Use flannel reservation mode to support node controller podcidr alloction
|
||||
|
||||
|
||||
|
|
|
@ -129,8 +129,8 @@ behavior is equivalent to the 1.1 behavior with scheduling based on Capacity.
|
|||
|
||||
In the initial implementation, `SystemReserved` will be functionally equivalent to
|
||||
[`KubeReserved`](#system-reserved), but with a different semantic meaning. While KubeReserved
|
||||
designates resources set asside for kubernetes components, SystemReserved designates resources set
|
||||
asside for non-kubernetes components (currently this is reported as all the processes lumped
|
||||
designates resources set aside for kubernetes components, SystemReserved designates resources set
|
||||
aside for non-kubernetes components (currently this is reported as all the processes lumped
|
||||
together in the `/system` raw container).
|
||||
|
||||
## Issues
|
||||
|
@ -159,7 +159,7 @@ according to `KubeReserved`.
|
|||
**API server expects `Allocatable` but does not receive it:** If the kubelet is older and does not
|
||||
provide `Allocatable` in the `NodeStatus`, then `Allocatable` will be
|
||||
[defaulted](../../pkg/api/v1/defaults.go) to
|
||||
`Capacity` (which will yield todays behavior of scheduling based on capacity).
|
||||
`Capacity` (which will yield today's behavior of scheduling based on capacity).
|
||||
|
||||
### 3rd party schedulers
|
||||
|
||||
|
|
|
@ -40,7 +40,7 @@ Main reason behind doing this is to understand what kind of monitoring needs to
|
|||
|
||||
Issue https://github.com/kubernetes/kubernetes/issues/14216 was opened because @spiffxp observed a regression in scheduler performance in 1.1 branch in comparison to `old` 1.0
|
||||
cut. In the end it turned out the be caused by `--v=4` (instead of default `--v=2`) flag in the scheduler together with the flag `--logtostderr` which disables batching of
|
||||
log lines and a number of loging without explicit V level. This caused weird behavior of the whole component.
|
||||
log lines and a number of logging without explicit V level. This caused weird behavior of the whole component.
|
||||
|
||||
Because we now know that logging may have big performance impact we should consider instrumenting logging mechanism and compute statistics such as number of logged messages,
|
||||
total and average size of them. Each binary should be responsible for exposing its metrics. An unaccounted but way too big number of days, if not weeks, of engineering time was
|
||||
|
@ -137,7 +137,7 @@ Basic ideas:
|
|||
We should monitor other aspects of the system, which may indicate saturation of some component.
|
||||
|
||||
Basic ideas:
|
||||
- queue lenght for queues in the system,
|
||||
- queue length for queues in the system,
|
||||
- wait time for WaitGroups.
|
||||
|
||||
<!-- BEGIN MUNGE: GENERATED_ANALYTICS -->
|
||||
|
|
|
@ -194,7 +194,7 @@ From the above, we know that label management must be applied:
|
|||
3. To some volume types *sometimes*
|
||||
|
||||
Volumes should be relabeled with the correct SELinux context. Docker has this capability today; it
|
||||
is desireable for other container runtime implementations to provide similar functionality.
|
||||
is desirable for other container runtime implementations to provide similar functionality.
|
||||
|
||||
Relabeling should be an optional aspect of a volume plugin to accommodate:
|
||||
|
||||
|
|
|
@ -178,7 +178,7 @@ can be instantiated multiple times within the same namespace, as long as a diffe
|
|||
instantiation. The resulting objects will be independent from a replica/load-balancing perspective.
|
||||
|
||||
Generation of parameter values for fields such as Secrets will be delegated to an [admission controller/initializer/finalizer](https://github.com/kubernetes/kubernetes/issues/3585) rather than being solved by the template processor. Some discussion about a generation
|
||||
service is occuring [here](https://github.com/kubernetes/kubernetes/issues/12732)
|
||||
service is occurring [here](https://github.com/kubernetes/kubernetes/issues/12732)
|
||||
|
||||
Labels to be assigned to all objects could also be generated in addition to, or instead of, allowing labels to be supplied in the
|
||||
Template definition.
|
||||
|
|
|
@ -258,7 +258,7 @@ The events associated to `Workflow`s will be:
|
|||
|
||||
## Kubectl
|
||||
|
||||
Kubectl will be modified to display workflows. More particulary the `describe` command
|
||||
Kubectl will be modified to display workflows. More particularly the `describe` command
|
||||
will display all the steps with their status. Steps will be topologically sorted and
|
||||
each dependency will be decorated with its status (wether or not step is waitin for
|
||||
dependency).
|
||||
|
|
|
@ -55,7 +55,7 @@ Documentation for other releases can be found at
|
|||
|
||||
__Terminology__
|
||||
|
||||
Throughout this doc you will see a few terms that are sometimes used interchangably elsewhere, that might cause confusion. This section attempts to clarify them.
|
||||
Throughout this doc you will see a few terms that are sometimes used interchangeably elsewhere, that might cause confusion. This section attempts to clarify them.
|
||||
|
||||
* Node: A single virtual or physical machine in a Kubernetes cluster.
|
||||
* Cluster: A group of nodes firewalled from the internet, that are the primary compute resources managed by Kubernetes.
|
||||
|
|
|
@ -239,7 +239,7 @@ parallelism, for a variety or reasons:
|
|||
|
||||
- For Fixed Completion Count jobs, the actual number of pods running in parallel will not exceed the number of
|
||||
remaining completions. Higher values of `.spec.parallelism` are effectively ignored.
|
||||
- For work queue jobs, no new pods are started after any pod has succeded -- remaining pods are allowed to complete, however.
|
||||
- For work queue jobs, no new pods are started after any pod has succeeded -- remaining pods are allowed to complete, however.
|
||||
- If the controller has not had time to react.
|
||||
- If the controller failed to create pods for any reason (lack of ResourceQuota, lack of permission, etc),
|
||||
then there may be fewer pods than requested.
|
||||
|
|
|
@ -305,7 +305,7 @@ Use the following set of examples to help you familiarize yourself with running
|
|||
// Return a snapshot of the logs from pod <pod-name>.
|
||||
$ kubectl logs <pod-name>
|
||||
|
||||
// Start streaming the logs from pod <pod-name>. This is similiar to the 'tail -f' Linux command.
|
||||
// Start streaming the logs from pod <pod-name>. This is similar to the 'tail -f' Linux command.
|
||||
$ kubectl logs -f <pod-name>
|
||||
|
||||
|
||||
|
|
|
@ -68,7 +68,7 @@ Documentation for other releases can be found at
|
|||
## What is a _replication controller_?
|
||||
|
||||
A _replication controller_ ensures that a specified number of pod "replicas" are running at any one
|
||||
time. In other words, a replication controller makes sure that a pod or homogenous set of pods are
|
||||
time. In other words, a replication controller makes sure that a pod or homogeneous set of pods are
|
||||
always up and available.
|
||||
If there are too many pods, it will kill some. If there are too few, the
|
||||
replication controller will start more. Unlike manually created pods, the pods maintained by a
|
||||
|
|
|
@ -108,7 +108,7 @@ They are a core concept used by two additional Kubernetes building blocks: Repli
|
|||
|
||||
## Replication Controllers
|
||||
|
||||
OK, now you know how to make awesome, multi-container, labeled pods and you want to use them to build an application, you might be tempted to just start building a whole bunch of individual pods, but if you do that, a whole host of operational concerns pop up. For example: how will you scale the number of pods up or down and how will you ensure that all pods are homogenous?
|
||||
OK, now you know how to make awesome, multi-container, labeled pods and you want to use them to build an application, you might be tempted to just start building a whole bunch of individual pods, but if you do that, a whole host of operational concerns pop up. For example: how will you scale the number of pods up or down and how will you ensure that all pods are homogeneous?
|
||||
|
||||
Replication controllers are the objects to answer these questions. A replication controller combines a template for pod creation (a "cookie-cutter" if you will) and a number of desired replicas, into a single Kubernetes object. The replication controller also contains a label selector that identifies the set of objects managed by the replication controller. The replication controller constantly measures the size of this set relative to the desired size, and takes action by creating or deleting pods.
|
||||
|
||||
|
|
|
@ -117,7 +117,7 @@ public class KubernetesSeedProvider implements SeedProvider {
|
|||
logger.info("Getting endpoints from " + url);
|
||||
HttpsURLConnection conn = (HttpsURLConnection)url.openConnection();
|
||||
|
||||
// TODO: Remove this once the CA cert is propogated everywhere, and replace
|
||||
// TODO: Remove this once the CA cert is propagated everywhere, and replace
|
||||
// with loading the CA cert.
|
||||
conn.setSSLSocketFactory(ctx.getSocketFactory());
|
||||
conn.setHostnameVerifier(trustAllHosts);
|
||||
|
|
|
@ -43,7 +43,7 @@ For example to add a 'cifs' driver, by vendor 'foo' install the driver at: /usr/
|
|||
|
||||
## Plugin details
|
||||
|
||||
Driver will be invoked with 'Init' to initalize the driver. It will be invoked with 'attach' to attach the volume and with 'detach' to detach the volume from the kubelet node. It also supports custom mounts using 'mount' and 'unmount' callouts to the driver.
|
||||
Driver will be invoked with 'Init' to initialize the driver. It will be invoked with 'attach' to attach the volume and with 'detach' to detach the volume from the kubelet node. It also supports custom mounts using 'mount' and 'unmount' callouts to the driver.
|
||||
|
||||
### Driver invocation model:
|
||||
|
||||
|
|
|
@ -41,7 +41,7 @@ This sidecar mode brings a new workflow for Java users:
|
|||
|
||||
![](workflow.png?raw=true "Workflow")
|
||||
|
||||
As you can see, user can create a `sample:v2` container as sidecar to "provide" war file to Tomcat by copying it to the shared `emptyDir` volume. And Pod will make sure the two containers compose an "atomic" scheduling unit, which is perfect for this case. Thus, your application version management will be totally seperated from web server management.
|
||||
As you can see, user can create a `sample:v2` container as sidecar to "provide" war file to Tomcat by copying it to the shared `emptyDir` volume. And Pod will make sure the two containers compose an "atomic" scheduling unit, which is perfect for this case. Thus, your application version management will be totally separated from web server management.
|
||||
|
||||
For example, if you are going to change the configurations of your Tomcat:
|
||||
|
||||
|
|
|
@ -160,7 +160,7 @@ host="redis"
|
|||
|
||||
q = rediswq.RedisWQ(name="job2", host="redis")
|
||||
print("Worker with sessionID: " + q.sessionID())
|
||||
print("Inital queue state: empty=" + str(q.empty()))
|
||||
print("Initial queue state: empty=" + str(q.empty()))
|
||||
while not q.empty():
|
||||
item = q.lease(lease_secs=10, block=True, timeout=2)
|
||||
if item is not None:
|
||||
|
@ -283,7 +283,7 @@ Events:
|
|||
|
||||
$ kubectl logs pods/job-wq-2-7r7b2
|
||||
Worker with sessionID: bbd72d0a-9e5c-4dd6-abf6-416cc267991f
|
||||
Inital queue state: empty=False
|
||||
Initial queue state: empty=False
|
||||
Working on banana
|
||||
Working on date
|
||||
Working on lemon
|
||||
|
|
|
@ -24,7 +24,7 @@ host="redis"
|
|||
|
||||
q = rediswq.RedisWQ(name="job2", host="redis")
|
||||
print("Worker with sessionID: " + q.sessionID())
|
||||
print("Inital queue state: empty=" + str(q.empty()))
|
||||
print("Initial queue state: empty=" + str(q.empty()))
|
||||
while not q.empty():
|
||||
item = q.lease(lease_secs=10, block=True, timeout=2)
|
||||
if item is not None:
|
||||
|
|
|
@ -64,7 +64,7 @@ function build_containers() {
|
|||
function runk8petstore() {
|
||||
|
||||
### Finally, run the application.
|
||||
### This app is gauranteed to be a clean run using all the source.
|
||||
### This app is guaranteed to be a clean run using all the source.
|
||||
### You can use it to iteratively test/deploy k8petstore and make new changes.
|
||||
|
||||
### TODO, add slaves.
|
||||
|
|
|
@ -27,7 +27,7 @@ _SECONDS=1000 # number of seconds to measure throughput.
|
|||
FE="1" # amount of Web server
|
||||
LG="1" # amount of load generators
|
||||
SLAVE="1" # amount of redis slaves
|
||||
TEST="1" # 0 = Dont run tests, 1 = Do run tests.
|
||||
TEST="1" # 0 = Don't run tests, 1 = Do run tests.
|
||||
NS="default" # namespace
|
||||
|
||||
kubectl="${1:-$kubectl}"
|
||||
|
@ -36,7 +36,7 @@ _SECONDS="${3:-$_SECONDS}" # number of seconds to measure throughput.
|
|||
FE="${4:-$FE}" # amount of Web server
|
||||
LG="${5:-$LG}" # amount of load generators
|
||||
SLAVE="${6:-$SLAVE}" # amount of redis slaves
|
||||
TEST="${7:-$TEST}" # 0 = Dont run tests, 1 = Do run tests.
|
||||
TEST="${7:-$TEST}" # 0 = Don't run tests, 1 = Do run tests.
|
||||
NS="${8:-$NS}" # namespace
|
||||
|
||||
echo "Running w/ args: kubectl $kubectl version $VERSION sec $_SECONDS fe $FE lg $LG slave $SLAVE test $TEST NAMESPACE $NS"
|
||||
|
|
|
@ -28,7 +28,7 @@ _SECONDS=1000 # number of seconds to measure throughput.
|
|||
FE="1" # amount of Web server
|
||||
LG="1" # amount of load generators
|
||||
SLAVE="1" # amount of redis slaves
|
||||
TEST="1" # 0 = Dont run tests, 1 = Do run tests.
|
||||
TEST="1" # 0 = Don't run tests, 1 = Do run tests.
|
||||
NS="default" # namespace
|
||||
NODE_PORT=30291 #nodePort, see fe-s.json
|
||||
|
||||
|
@ -38,7 +38,7 @@ _SECONDS="${3:-$_SECONDS}" # number of seconds to measure throughput.
|
|||
FE="${4:-$FE}" # amount of Web server
|
||||
LG="${5:-$LG}" # amount of load generators
|
||||
SLAVE="${6:-$SLAVE}" # amount of redis slaves
|
||||
TEST="${7:-$TEST}" # 0 = Dont run tests, 1 = Do run tests.
|
||||
TEST="${7:-$TEST}" # 0 = Don't run tests, 1 = Do run tests.
|
||||
NS="${8:-$NS}" # namespace
|
||||
NODE_PORT="${9:-$NODE_PORT}" #nodePort, see fe-s.json
|
||||
echo "Running w/ args: kubectl $kubectl version $VERSION sec $_SECONDS fe $FE lg $LG slave $SLAVE test = $TEST, NAMESPACE = $NS, NODE_PORT = $NODE_PORT"
|
||||
|
|
|
@ -28,7 +28,7 @@ _SECONDS=1000 # number of seconds to measure throughput.
|
|||
FE="1" # amount of Web server
|
||||
LG="1" # amount of load generators
|
||||
SLAVE="1" # amount of redis slaves
|
||||
TEST="1" # 0 = Dont run tests, 1 = Do run tests.
|
||||
TEST="1" # 0 = Don't run tests, 1 = Do run tests.
|
||||
NS="default" # namespace
|
||||
|
||||
kubectl="${1:-$kubectl}"
|
||||
|
@ -38,7 +38,7 @@ _SECONDS="${4:-$_SECONDS}" # number of seconds to measure throughput.
|
|||
FE="${5:-$FE}" # amount of Web server
|
||||
LG="${6:-$LG}" # amount of load generators
|
||||
SLAVE="${7:-$SLAVE}" # amount of redis slaves
|
||||
TEST="${8:-$TEST}" # 0 = Dont run tests, 1 = Do run tests.
|
||||
TEST="${8:-$TEST}" # 0 = Don't run tests, 1 = Do run tests.
|
||||
NS="${9:-$NS}" # namespace
|
||||
|
||||
echo "Running w/ args: kubectl $kubectl version $VERSION ip $PUBLIC_IP sec $_SECONDS fe $FE lg $LG slave $SLAVE test $TEST NAMESPACE $NS"
|
||||
|
|
|
@ -47,7 +47,7 @@ By defaults, there are only three pods (hence replication controllers) for this
|
|||
|
||||
When the replication controller is created, it results in the corresponding container to start, run an entrypoint script that installs the mysql system tables, set up users, and build up a list of servers that is used with the galera parameter ```wsrep_cluster_address```. This is a list of running nodes that galera uses for election of a node to obtain SST (Single State Transfer) from.
|
||||
|
||||
Note: Kubernetes best-practices is to pre-create the services for each controller, and the configuration files which contain the service and replication controller for each node, when created, will result in both a service and replication contrller running for the given node. An important thing to know is that it's important that initally pxc-node1.yaml be processed first and no other pxc-nodeN services that don't have corresponding replication controllers should exist. The reason for this is that if there is a node in ```wsrep_clsuter_address``` without a backing galera node there will be nothing to obtain SST from which will cause the node to shut itself down and the container in question to exit (and another soon relaunched, repeatedly).
|
||||
Note: Kubernetes best-practices is to pre-create the services for each controller, and the configuration files which contain the service and replication controller for each node, when created, will result in both a service and replication contrller running for the given node. An important thing to know is that it's important that initially pxc-node1.yaml be processed first and no other pxc-nodeN services that don't have corresponding replication controllers should exist. The reason for this is that if there is a node in ```wsrep_clsuter_address``` without a backing galera node there will be nothing to obtain SST from which will cause the node to shut itself down and the container in question to exit (and another soon relaunched, repeatedly).
|
||||
|
||||
First, create the overall cluster service that will be used to connect to the cluster:
|
||||
|
||||
|
|
|
@ -37,7 +37,7 @@ if [ "$1" = 'mysqld' ]; then
|
|||
DATADIR="$("$@" --verbose --help 2>/dev/null | awk '$1 == "datadir" { print $2; exit }')"
|
||||
|
||||
# only check if system tables not created from mysql_install_db and permissions
|
||||
# set with initial SQL script before proceding to build SQL script
|
||||
# set with initial SQL script before proceeding to build SQL script
|
||||
if [ ! -d "$DATADIR/mysql" ]; then
|
||||
# fail if user didn't supply a root password
|
||||
if [ -z "$MYSQL_ROOT_PASSWORD" -a -z "$MYSQL_ALLOW_EMPTY_PASSWORD" ]; then
|
||||
|
@ -95,7 +95,7 @@ EOSQL
|
|||
chown -R mysql:mysql "$DATADIR"
|
||||
fi
|
||||
|
||||
# if cluster is turned on, then procede to build cluster setting strings
|
||||
# if cluster is turned on, then proceed to build cluster setting strings
|
||||
# that will be interpolated into the config files
|
||||
if [ -n "$GALERA_CLUSTER" ]; then
|
||||
# this is the Single State Transfer user (SST, initial dump or xtrabackup user)
|
||||
|
|
|
@ -77,7 +77,7 @@ export PODNAME=`kubectl get pods --selector="app=selenium-hub" --output=template
|
|||
kubectl port-forward --pod=$PODNAME 4444:4444
|
||||
```
|
||||
|
||||
In a seperate terminal, you can now check the status.
|
||||
In a separate terminal, you can now check the status.
|
||||
|
||||
```console
|
||||
curl http://localhost:4444
|
||||
|
|
|
@ -3364,7 +3364,7 @@ span.icon > [class^="icon-"], span.icon > [class*=" icon-"] { cursor: default; }
|
|||
</tr>
|
||||
<tr>
|
||||
<td class="tableblock halign-left valign-top"><p class="tableblock">template</p></td>
|
||||
<td class="tableblock halign-left valign-top"><p class="tableblock">object that describes the pod that will be created if insufficient replicas are detected; takes precendence over templateRef; see <a href="http://releases.k8s.io/HEAD/docs/replication-controller.md#pod-template">http://releases.k8s.io/HEAD/docs/replication-controller.md#pod-template</a></p></td>
|
||||
<td class="tableblock halign-left valign-top"><p class="tableblock">object that describes the pod that will be created if insufficient replicas are detected; takes precedence over templateRef; see <a href="http://releases.k8s.io/HEAD/docs/replication-controller.md#pod-template">http://releases.k8s.io/HEAD/docs/replication-controller.md#pod-template</a></p></td>
|
||||
<td class="tableblock halign-left valign-top"><p class="tableblock">false</p></td>
|
||||
<td class="tableblock halign-left valign-top"><p class="tableblock"><a href="#_v1_podtemplatespec">v1.PodTemplateSpec</a></p></td>
|
||||
<td class="tableblock halign-left valign-top"></td>
|
||||
|
|
|
@ -289,7 +289,7 @@ case ${JOB_NAME} in
|
|||
export NUM_NODES="100"
|
||||
# Reduce logs verbosity
|
||||
export TEST_CLUSTER_LOG_LEVEL="--v=2"
|
||||
# TODO: Remove when we figure out the reason for ocassional failures #19048
|
||||
# TODO: Remove when we figure out the reason for occasional failures #19048
|
||||
export KUBELET_TEST_LOG_LEVEL="--v=4"
|
||||
# Increase resync period to simulate production
|
||||
export TEST_CLUSTER_RESYNC_PERIOD="--min-resync-period=12h"
|
||||
|
@ -366,7 +366,7 @@ case ${JOB_NAME} in
|
|||
export E2E_TEST="false"
|
||||
export USE_KUBEMARK="true"
|
||||
export KUBEMARK_TESTS="\[Feature:Performance\]"
|
||||
# Override defaults to be indpendent from GCE defaults and set kubemark parameters
|
||||
# Override defaults to be independent from GCE defaults and set kubemark parameters
|
||||
export NUM_NODES="10"
|
||||
export MASTER_SIZE="n1-standard-2"
|
||||
export NODE_SIZE="n1-standard-1"
|
||||
|
@ -385,7 +385,7 @@ case ${JOB_NAME} in
|
|||
export USE_KUBEMARK="true"
|
||||
export KUBEMARK_TESTS="\[Feature:Performance\]"
|
||||
export FAIL_ON_GCP_RESOURCE_LEAK="false"
|
||||
# Override defaults to be indpendent from GCE defaults and set kubemark parameters
|
||||
# Override defaults to be independent from GCE defaults and set kubemark parameters
|
||||
export NUM_NODES="6"
|
||||
export MASTER_SIZE="n1-standard-4"
|
||||
export NODE_SIZE="n1-standard-8"
|
||||
|
@ -403,7 +403,7 @@ case ${JOB_NAME} in
|
|||
export USE_KUBEMARK="true"
|
||||
export KUBEMARK_TESTS="\[Feature:Performance\]"
|
||||
export FAIL_ON_GCP_RESOURCE_LEAK="false"
|
||||
# Override defaults to be indpendent from GCE defaults and set kubemark parameters
|
||||
# Override defaults to be independent from GCE defaults and set kubemark parameters
|
||||
# We need 11 so that we won't hit max-pods limit (set to 100). TODO: do it in a nicer way.
|
||||
export NUM_NODES="11"
|
||||
export MASTER_SIZE="n1-standard-4"
|
||||
|
|
|
@ -437,7 +437,7 @@ runTests() {
|
|||
kube::test::get_object_assert 'pod valid-pod' "{{range$labels_field}}{{.}}:{{end}}" 'valid-pod:'
|
||||
# Command
|
||||
kubectl label pods valid-pod new-name=new-valid-pod "${kube_flags[@]}"
|
||||
# Post-conditon: valid-pod is labelled
|
||||
# Post-condition: valid-pod is labelled
|
||||
kube::test::get_object_assert 'pod valid-pod' "{{range$labels_field}}{{.}}:{{end}}" 'valid-pod:new-valid-pod:'
|
||||
|
||||
### Delete POD by label
|
||||
|
|
|
@ -112,11 +112,11 @@ for current in "${index[@]}"; do
|
|||
pushd "$(dirname ${file})" > /dev/null
|
||||
base_file=$(basename "${file}")
|
||||
base_generated_file=$(basename "${generated_file}")
|
||||
# We use '-d 1234' flag to have a deterministic output everytime.
|
||||
# We use '-d 1234' flag to have a deterministic output every time.
|
||||
# The constant was just randomly chosen.
|
||||
echo Running ${CODECGEN} -d 1234 -o "${base_generated_file}" "${base_file}"
|
||||
${CODECGEN} -d 1234 -o "${base_generated_file}" "${base_file}"
|
||||
# Add boilerplate at the begining of the generated file.
|
||||
# Add boilerplate at the beginning of the generated file.
|
||||
sed 's/YEAR/2015/' "${initial_dir}/hack/boilerplate/boilerplate.go.txt" > "${base_generated_file}.tmp"
|
||||
cat "${base_generated_file}" >> "${base_generated_file}.tmp"
|
||||
mv "${base_generated_file}.tmp" "${base_generated_file}"
|
||||
|
|
|
@ -14,7 +14,7 @@
|
|||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# Verifies that api reference docs are upto date.
|
||||
# Verifies that api reference docs are up to date.
|
||||
|
||||
set -o errexit
|
||||
set -o nounset
|
||||
|
|
|
@ -97,10 +97,10 @@ for current in ${index[@]}; do
|
|||
base_generated_file=$(basename "${generated_file}")
|
||||
# temporarily move the generated file to a non-go file so it doesn't influence the verify codecgen
|
||||
mv "${base_generated_file}" "${base_generated_file}.bak"
|
||||
# We use '-d 1234' flag to have a deterministic output everytime.
|
||||
# We use '-d 1234' flag to have a deterministic output every time.
|
||||
# The constant was just randomly chosen.
|
||||
${CODECGEN} -d 1234 -o "${base_generated_file}.1tmp" "${base_file}"
|
||||
# Add boilerplate at the begining of the generated file.
|
||||
# Add boilerplate at the beginning of the generated file.
|
||||
sed 's/YEAR/2015/' "${initial_dir}/hack/boilerplate/boilerplate.go.txt" > "${base_generated_file}.tmp"
|
||||
cat "${base_generated_file}.1tmp" >> "${base_generated_file}.tmp"
|
||||
rm "${base_generated_file}.1tmp"
|
||||
|
|
|
@ -93,7 +93,7 @@ fi
|
|||
# is an intentionally broken symlink. Linux can use --no-dereference. OS X cannot.
|
||||
# So we --exclude='symlink' so diff -r doesn't die following a bad symlink.
|
||||
if ! _out="$(diff -Naupr --exclude='symlink' ${KUBE_ROOT}/Godeps/_workspace/src ${_kubetmp}/Godeps/_workspace/src)"; then
|
||||
echo "Your godeps changes are not reproducable"
|
||||
echo "Your godeps changes are not reproducible"
|
||||
echo "${_out}"
|
||||
exit 1
|
||||
fi
|
||||
|
|
|
@ -16,10 +16,10 @@ else
|
|||
fi
|
||||
echo "${reset}"
|
||||
|
||||
# Check if changes to Godeps are reproducable...
|
||||
# Check if changes to Godeps are reproducible...
|
||||
files=($(git diff --cached --name-only --diff-filter ACM | grep "Godeps"))
|
||||
if [[ "${#files[@]}" -ne 0 ]]; then
|
||||
echo -ne "Check if changes to Godeps are reproducable (this is pretty slow)..."
|
||||
echo -ne "Check if changes to Godeps are reproducible (this is pretty slow)..."
|
||||
if ! OUT=$("hack/verify-godeps.sh" 2>&1); then
|
||||
echo
|
||||
echo "${red}${OUT}"
|
||||
|
|
|
@ -104,7 +104,7 @@ type GetterWithOptions interface {
|
|||
// value of the request path below the object will be included as the named
|
||||
// string in the serialization of the runtime object. E.g., returning "path"
|
||||
// will convert the trailing request scheme value to "path" in the map[string][]string
|
||||
// passed to the convertor.
|
||||
// passed to the converter.
|
||||
NewGetOptions() (runtime.Object, bool, string)
|
||||
}
|
||||
|
||||
|
|
|
@ -429,7 +429,7 @@ const (
|
|||
StorageMediumMemory StorageMedium = "Memory" // use memory (tmpfs)
|
||||
)
|
||||
|
||||
// Protocol defines network protocols supported for things like conatiner ports.
|
||||
// Protocol defines network protocols supported for things like container ports.
|
||||
type Protocol string
|
||||
|
||||
const (
|
||||
|
@ -1017,7 +1017,7 @@ type ContainerStatus struct {
|
|||
Name string `json:"name"`
|
||||
State ContainerState `json:"state,omitempty"`
|
||||
LastTerminationState ContainerState `json:"lastState,omitempty"`
|
||||
// Ready specifies whether the conatiner has passed its readiness check.
|
||||
// Ready specifies whether the container has passed its readiness check.
|
||||
Ready bool `json:"ready"`
|
||||
// Note that this is calculated from dead containers. But those containers are subject to
|
||||
// garbage collection. This value will get capped at 5 by GC.
|
||||
|
@ -1934,7 +1934,7 @@ type PodLogOptions struct {
|
|||
// Only one of sinceSeconds or sinceTime may be specified.
|
||||
SinceSeconds *int64
|
||||
// An RFC3339 timestamp from which to show logs. If this value
|
||||
// preceeds the time a pod was started, only logs since the pod start will be returned.
|
||||
// precedes the time a pod was started, only logs since the pod start will be returned.
|
||||
// If this value is in the future, no logs will be returned.
|
||||
// Only one of sinceSeconds or sinceTime may be specified.
|
||||
SinceTime *unversioned.Time
|
||||
|
|
|
@ -322,7 +322,7 @@ func Convert_api_PodSpec_To_v1_PodSpec(in *api.PodSpec, out *PodSpec, s conversi
|
|||
return err
|
||||
}
|
||||
|
||||
// the host namespace fields have to be handled here for backward compatibilty
|
||||
// the host namespace fields have to be handled here for backward compatibility
|
||||
// with v1.0.0
|
||||
out.HostPID = in.SecurityContext.HostPID
|
||||
out.HostNetwork = in.SecurityContext.HostNetwork
|
||||
|
|
|
@ -630,7 +630,7 @@ const (
|
|||
StorageMediumMemory StorageMedium = "Memory" // use memory (tmpfs)
|
||||
)
|
||||
|
||||
// Protocol defines network protocols supported for things like conatiner ports.
|
||||
// Protocol defines network protocols supported for things like container ports.
|
||||
type Protocol string
|
||||
|
||||
const (
|
||||
|
@ -1831,7 +1831,7 @@ type ServiceSpec struct {
|
|||
LoadBalancerIP string `json:"loadBalancerIP,omitempty"`
|
||||
}
|
||||
|
||||
// ServicePort conatins information on service's port.
|
||||
// ServicePort contains information on service's port.
|
||||
type ServicePort struct {
|
||||
// The name of this port within the service. This must be a DNS_LABEL.
|
||||
// All ports within a ServiceSpec must have unique names. This maps to
|
||||
|
@ -2346,7 +2346,7 @@ type PodLogOptions struct {
|
|||
// Only one of sinceSeconds or sinceTime may be specified.
|
||||
SinceSeconds *int64 `json:"sinceSeconds,omitempty"`
|
||||
// An RFC3339 timestamp from which to show logs. If this value
|
||||
// preceeds the time a pod was started, only logs since the pod start will be returned.
|
||||
// precedes the time a pod was started, only logs since the pod start will be returned.
|
||||
// If this value is in the future, no logs will be returned.
|
||||
// Only one of sinceSeconds or sinceTime may be specified.
|
||||
SinceTime *unversioned.Time `json:"sinceTime,omitempty"`
|
||||
|
|
|
@ -1126,7 +1126,7 @@ var map_PodLogOptions = map[string]string{
|
|||
"follow": "Follow the log stream of the pod. Defaults to false.",
|
||||
"previous": "Return previous terminated container logs. Defaults to false.",
|
||||
"sinceSeconds": "A relative time in seconds before the current time from which to show logs. If this value precedes the time a pod was started, only logs since the pod start will be returned. If this value is in the future, no logs will be returned. Only one of sinceSeconds or sinceTime may be specified.",
|
||||
"sinceTime": "An RFC3339 timestamp from which to show logs. If this value preceeds the time a pod was started, only logs since the pod start will be returned. If this value is in the future, no logs will be returned. Only one of sinceSeconds or sinceTime may be specified.",
|
||||
"sinceTime": "An RFC3339 timestamp from which to show logs. If this value precedes the time a pod was started, only logs since the pod start will be returned. If this value is in the future, no logs will be returned. Only one of sinceSeconds or sinceTime may be specified.",
|
||||
"timestamps": "If true, add an RFC3339 or RFC3339Nano timestamp at the beginning of every line of log output. Defaults to false.",
|
||||
"tailLines": "If set, the number of lines from the end of the logs to show. If not specified, logs are shown from the creation of the container or sinceSeconds or sinceTime",
|
||||
"limitBytes": "If set, the number of bytes to read from the server before terminating the log output. This may not display a complete final line of logging, and may return slightly more or slightly less than the specified limit.",
|
||||
|
@ -1496,7 +1496,7 @@ func (ServiceList) SwaggerDoc() map[string]string {
|
|||
}
|
||||
|
||||
var map_ServicePort = map[string]string{
|
||||
"": "ServicePort conatins information on service's port.",
|
||||
"": "ServicePort contains information on service's port.",
|
||||
"name": "The name of this port within the service. This must be a DNS_LABEL. All ports within a ServiceSpec must have unique names. This maps to the 'Name' field in EndpointPort objects. Optional if only one ServicePort is defined on this service.",
|
||||
"protocol": "The IP protocol for this port. Supports \"TCP\" and \"UDP\". Default is TCP.",
|
||||
"port": "The port that will be exposed by this service.",
|
||||
|
|
|
@ -100,7 +100,7 @@ func (a *APIInstaller) NewWebService() *restful.WebService {
|
|||
ws.Path(a.prefix)
|
||||
// a.prefix contains "prefix/group/version"
|
||||
ws.Doc("API at " + a.prefix)
|
||||
// Backwards compatibilty, we accepted objects with empty content-type at V1.
|
||||
// Backwards compatibility, we accepted objects with empty content-type at V1.
|
||||
// If we stop using go-restful, we can default empty content-type to application/json on an
|
||||
// endpoint by endpoint basis
|
||||
ws.Consumes("*/*")
|
||||
|
|
|
@ -66,7 +66,7 @@ func pathWithPrefix(prefix, resource, namespace, name string) string {
|
|||
// hanging for the long time,
|
||||
// - "short" requests are correctly accounted, i.e. there can be only size of channel passed to the
|
||||
// constructor in flight at any given moment,
|
||||
// - subsequent "short" requests are rejected instantly with apropriate error,
|
||||
// - subsequent "short" requests are rejected instantly with appropriate error,
|
||||
// - subsequent "long" requests are handled normally,
|
||||
// - we correctly recover after some "short" requests finish, i.e. we can process new ones.
|
||||
func TestMaxInFlight(t *testing.T) {
|
||||
|
|
|
@ -83,7 +83,7 @@ var (
|
|||
// TCP connection.
|
||||
minWatchTimeout = 5 * time.Minute
|
||||
// If we are within 'forceResyncThreshold' from the next planned resync
|
||||
// and are just before issueing Watch(), resync will be forced now.
|
||||
// and are just before issuing Watch(), resync will be forced now.
|
||||
forceResyncThreshold = 3 * time.Second
|
||||
// We try to set timeouts for Watch() so that we will finish about
|
||||
// than 'timeoutThreshold' from next planned periodic resync.
|
||||
|
|
|
@ -37,7 +37,7 @@ limitations under the License.
|
|||
// availability.
|
||||
//
|
||||
// Larger clusters often have a more lenient SLA for API latency. This should be
|
||||
// taken into account when configuring the client. The rate of leader transistions
|
||||
// taken into account when configuring the client. The rate of leader transitions
|
||||
// should be monitored and RetryPeriod and LeaseDuration should be increased
|
||||
// until the rate is stable and acceptably low. It's important to keep in mind
|
||||
// when configuring this client that the tolerance to API latency varies inversely
|
||||
|
@ -149,7 +149,7 @@ type LeaderElector struct {
|
|||
observedRecord LeaderElectionRecord
|
||||
observedTime time.Time
|
||||
// used to implement OnNewLeader(), may lag slightly from the
|
||||
// value observedRecord.HolderIdentity if the transistion has
|
||||
// value observedRecord.HolderIdentity if the transition has
|
||||
// not yet been reported.
|
||||
reportedLeader string
|
||||
}
|
||||
|
|
|
@ -67,7 +67,7 @@ type SwaggerSchemaInterface interface {
|
|||
SwaggerSchema(version unversioned.GroupVersion) (*swagger.ApiDeclaration, error)
|
||||
}
|
||||
|
||||
// DiscoveryClient implements the functions that dicovery server-supported API groups,
|
||||
// DiscoveryClient implements the functions that discover server-supported API groups,
|
||||
// versions and resources.
|
||||
type DiscoveryClient struct {
|
||||
*RESTClient
|
||||
|
|
|
@ -342,7 +342,7 @@ func (nc *NodeController) maybeDeleteTerminatingPod(obj interface{}) {
|
|||
}
|
||||
|
||||
// delete terminating pods that have been scheduled on
|
||||
// nonexistant nodes
|
||||
// nonexistent nodes
|
||||
if !found {
|
||||
nc.forcefullyDeletePod(pod)
|
||||
return
|
||||
|
|
|
@ -227,7 +227,7 @@ func testRecycleFailures(t *testing.T, recycler *PersistentVolumeRecycler, mockC
|
|||
func newFailingMockRecycler(spec *volume.Spec, host volume.VolumeHost, config volume.VolumeConfig) (volume.Recycler, error) {
|
||||
return &failingMockRecycler{
|
||||
path: spec.PersistentVolume.Spec.HostPath.Path,
|
||||
errorCount: myMaximumRetry - 1, // fail two times and then successfuly recycle the volume
|
||||
errorCount: myMaximumRetry - 1, // fail two times and then successfully recycle the volume
|
||||
}, nil
|
||||
}
|
||||
|
||||
|
|
|
@ -350,7 +350,7 @@ func ReadConfigDataFromReader(reader io.Reader, source string) ([]byte, error) {
|
|||
return data, nil
|
||||
}
|
||||
|
||||
// ReadConfigData reads the bytes from the specified filesytem or network
|
||||
// ReadConfigData reads the bytes from the specified filesystem or network
|
||||
// location or from stdin if location == "-".
|
||||
// TODO: replace with resource.Builder
|
||||
func ReadConfigData(location string) ([]byte, error) {
|
||||
|
|
|
@ -45,7 +45,7 @@ import (
|
|||
)
|
||||
|
||||
// Describer generates output for the named resource or an error
|
||||
// if the output could not be generated. Implementors typically
|
||||
// if the output could not be generated. Implementers typically
|
||||
// abstract the retrieval of the named object from a remote server.
|
||||
type Describer interface {
|
||||
Describe(namespace, name string) (output string, err error)
|
||||
|
@ -53,7 +53,7 @@ type Describer interface {
|
|||
|
||||
// ObjectDescriber is an interface for displaying arbitrary objects with extra
|
||||
// information. Use when an object is in hand (on disk, or already retrieved).
|
||||
// Implementors may ignore the additional information passed on extra, or use it
|
||||
// Implementers may ignore the additional information passed on extra, or use it
|
||||
// by default. ObjectDescribers may return ErrNoDescriber if no suitable describer
|
||||
// is found.
|
||||
type ObjectDescriber interface {
|
||||
|
|
|
@ -108,7 +108,7 @@ func expectPodUpdate(t *testing.T, ch <-chan kubetypes.PodUpdate, expected ...ku
|
|||
if len(expected[i].Pods) != len(update.Pods) {
|
||||
t.Fatalf("Expected %#v, Got %#v", expected[i], update)
|
||||
}
|
||||
// Compare pods one by one. This is necessary beacuse we don't want to
|
||||
// Compare pods one by one. This is necessary because we don't want to
|
||||
// compare local annotations.
|
||||
for j := range expected[i].Pods {
|
||||
if podsDifferSemantically(expected[i].Pods[j], update.Pods[j]) || !reflect.DeepEqual(expected[i].Pods[j].Status, update.Pods[j].Status) {
|
||||
|
|
|
@ -189,7 +189,7 @@ func (c *cache) subscribe(id types.UID, timestamp time.Time) chan *data {
|
|||
defer c.lock.Unlock()
|
||||
d := c.getIfNewerThan(id, timestamp)
|
||||
if d != nil {
|
||||
// If the cache entry is ready, send the data and return immediatly.
|
||||
// If the cache entry is ready, send the data and return immediately.
|
||||
ch <- d
|
||||
return ch
|
||||
}
|
||||
|
|
|
@ -114,7 +114,7 @@ func (p *PodSyncResult) AddPodSyncResult(result PodSyncResult) {
|
|||
p.SyncError = result.SyncError
|
||||
}
|
||||
|
||||
// Fail fails the PodSyncResult with an error occured in SyncPod() and KillPod() itself
|
||||
// Fail fails the PodSyncResult with an error occurred in SyncPod() and KillPod() itself
|
||||
func (p *PodSyncResult) Fail(err error) {
|
||||
p.SyncError = err
|
||||
}
|
||||
|
|
|
@ -37,7 +37,7 @@ func GetCAdvisorCustomMetricsDefinitionPath(container *api.Container) (*string,
|
|||
if container.VolumeMounts != nil {
|
||||
for _, volumeMount := range container.VolumeMounts {
|
||||
if path.Clean(volumeMount.MountPath) == path.Clean(CustomMetricsDefinitionDir) {
|
||||
// TODO: add defintion file validation.
|
||||
// TODO: add definition file validation.
|
||||
definitionPath := path.Clean(path.Join(volumeMount.MountPath, CustomMetricsDefinitionContainerFile))
|
||||
return &definitionPath, nil
|
||||
}
|
||||
|
|
|
@ -196,7 +196,7 @@ func TestCache(t *testing.T) {
|
|||
assert.NoError(err)
|
||||
assert.True(ok)
|
||||
|
||||
// Ensure no more calls to the mockCadvisor occured
|
||||
// Ensure no more calls to the mockCadvisor occurred
|
||||
assert.Equal(cadvisorCallCount, len(mockCadvisor.Calls))
|
||||
}
|
||||
|
||||
|
|
|
@ -704,7 +704,7 @@ func TestFindContainersByPod(t *testing.T) {
|
|||
}
|
||||
fakeClient := &FakeDockerClient{}
|
||||
np, _ := network.InitNetworkPlugin([]network.NetworkPlugin{}, "", network.NewFakeHost(nil))
|
||||
// image back-off is set to nil, this test shouldnt pull images
|
||||
// image back-off is set to nil, this test should not pull images
|
||||
containerManager := NewFakeDockerManager(fakeClient, &record.FakeRecorder{}, nil, nil, &cadvisorapi.MachineInfo{}, kubetypes.PodInfraContainerImage, 0, 0, "", kubecontainer.FakeOS{}, np, nil, nil, nil)
|
||||
for i, test := range tests {
|
||||
fakeClient.ContainerList = test.containerList
|
||||
|
|
|
@ -189,7 +189,7 @@ func (f *FakeDockerClient) ListContainers(options docker.ListContainersOptions)
|
|||
err := f.popError("list")
|
||||
containerList := append([]docker.APIContainers{}, f.ContainerList...)
|
||||
if options.All {
|
||||
// Althought the container is not sorted, but the container with the same name should be in order,
|
||||
// Although the container is not sorted, but the container with the same name should be in order,
|
||||
// that is enough for us now.
|
||||
// TODO(random-liu): Is a fully sorted array needed?
|
||||
containerList = append(containerList, f.ExitedContainerList...)
|
||||
|
|
|
@ -1570,7 +1570,7 @@ func (dm *DockerManager) createPodInfraContainer(pod *api.Pod) (kubecontainer.Do
|
|||
}
|
||||
|
||||
// No pod secrets for the infra container.
|
||||
// The message isnt needed for the Infra container
|
||||
// The message isn't needed for the Infra container
|
||||
if err, msg := dm.imagePuller.PullImage(pod, container, nil); err != nil {
|
||||
return "", err, msg
|
||||
}
|
||||
|
|
|
@ -913,7 +913,7 @@ func TestSyncPodsDoesNothing(t *testing.T) {
|
|||
runSyncPod(t, dm, fakeDocker, pod, nil, false)
|
||||
|
||||
verifyCalls(t, fakeDocker, []string{
|
||||
// Check the pod infra contianer.
|
||||
// Check the pod infra container.
|
||||
"inspect_container",
|
||||
})
|
||||
}
|
||||
|
|
|
@ -113,7 +113,7 @@ const (
|
|||
|
||||
etcHostsPath = "/etc/hosts"
|
||||
|
||||
// Capacity of the channel for recieving pod lifecycle events. This number
|
||||
// Capacity of the channel for receiving pod lifecycle events. This number
|
||||
// is a bit arbitrary and may be adjusted in the future.
|
||||
plegChannelCapacity = 1000
|
||||
|
||||
|
@ -1579,7 +1579,7 @@ func parseResolvConf(reader io.Reader, dnsScrubber dnsScrubber) (nameservers []s
|
|||
}
|
||||
|
||||
// One of the following aruguements must be non-nil: runningPod, status.
|
||||
// TODO: Modify containerRuntime.KillPod() to accept the right arguements.
|
||||
// TODO: Modify containerRuntime.KillPod() to accept the right arguments.
|
||||
func (kl *Kubelet) killPod(pod *api.Pod, runningPod *kubecontainer.Pod, status *kubecontainer.PodStatus) error {
|
||||
var p kubecontainer.Pod
|
||||
if runningPod != nil {
|
||||
|
@ -2114,7 +2114,7 @@ func (kl *Kubelet) HandlePodCleanups() error {
|
|||
glog.Errorf("Failed to cleanup terminated pods: %v", err)
|
||||
}
|
||||
|
||||
// Clear out any old bandwith rules
|
||||
// Clear out any old bandwidth rules
|
||||
if err = kl.cleanupBandwidthLimits(allPods); err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
|
@ -101,7 +101,7 @@ func NewGenericPLEG(runtime kubecontainer.Runtime, channelCapacity int,
|
|||
}
|
||||
}
|
||||
|
||||
// Returns a channel from which the subscriber can recieve PodLifecycleEvent
|
||||
// Returns a channel from which the subscriber can receive PodLifecycleEvent
|
||||
// events.
|
||||
// TODO: support multiple subscribers.
|
||||
func (g *GenericPLEG) Watch() chan *PodLifecycleEvent {
|
||||
|
|
|
@ -614,7 +614,7 @@ func TestGetPodStatus(t *testing.T) {
|
|||
Name: "guestbook",
|
||||
Namespace: "default",
|
||||
IP: "10.10.10.42",
|
||||
// Result should contain all contianers.
|
||||
// Result should contain all containers.
|
||||
ContainerStatuses: []*kubecontainer.ContainerStatus{
|
||||
{
|
||||
ID: kubecontainer.BuildContainerID("rkt", "uuid-4002:app-1"),
|
||||
|
|
|
@ -24,7 +24,7 @@ import (
|
|||
"k8s.io/kubernetes/pkg/util"
|
||||
)
|
||||
|
||||
// WorkQueue allows queueing items with a timestamp. An item is
|
||||
// WorkQueue allows queuing items with a timestamp. An item is
|
||||
// considered ready to process if the timestamp has expired.
|
||||
type WorkQueue interface {
|
||||
// GetWork dequeues and returns all ready items.
|
||||
|
|
|
@ -216,7 +216,7 @@ func (m *Master) InstallAPIs(c *Config) {
|
|||
|
||||
extensionResources := m.getExtensionResources(c)
|
||||
extensionsGroupMeta := registered.GroupOrDie(extensions.GroupName)
|
||||
// Update the prefered version as per StorageVersions in the config.
|
||||
// Update the preferred version as per StorageVersions in the config.
|
||||
storageVersion, found := c.StorageVersions[extensionsGroupMeta.GroupVersion.Group]
|
||||
if !found {
|
||||
glog.Fatalf("Couldn't find storage version of group %v", extensionsGroupMeta.GroupVersion.Group)
|
||||
|
|
|
@ -142,7 +142,7 @@ func (s *Scheme) Converter() *conversion.Converter {
|
|||
// API group and version that would never be updated.
|
||||
//
|
||||
// TODO: there is discussion about removing unversioned and replacing it with objects that are manifest into
|
||||
// every version with particular schemas. Resolve tihs method at that point.
|
||||
// every version with particular schemas. Resolve this method at that point.
|
||||
func (s *Scheme) AddUnversionedTypes(version unversioned.GroupVersion, types ...Object) {
|
||||
s.AddKnownTypes(version, types...)
|
||||
for _, obj := range types {
|
||||
|
|
|
@ -118,7 +118,7 @@ type Unstructured struct {
|
|||
// of an object during the decoding process.
|
||||
type VersionedObjects struct {
|
||||
// Objects is the set of objects retrieved during decoding, in order of conversion.
|
||||
// The 0 index is the object as serialized on the wire. If conversion has occured,
|
||||
// The 0 index is the object as serialized on the wire. If conversion has occurred,
|
||||
// other objects may be present. The right most object is the same as would be returned
|
||||
// by a normal Decode call.
|
||||
Objects []Object
|
||||
|
|
|
@ -127,7 +127,7 @@ func (p *Backoff) initEntryUnsafe(id string) *backoffEntry {
|
|||
return entry
|
||||
}
|
||||
|
||||
// After 2*maxDuration we restart the backoff factor to the begining
|
||||
// After 2*maxDuration we restart the backoff factor to the beginning
|
||||
func hasExpired(eventTime time.Time, lastUpdate time.Time, maxDuration time.Duration) bool {
|
||||
return eventTime.Sub(lastUpdate) > maxDuration*2 // consider stable if it's ok for twice the maxDuration
|
||||
}
|
||||
|
|
|
@ -37,8 +37,8 @@ import (
|
|||
// In general, using this requires that the caller posses the NET_CAP_ADMIN capability, though if you
|
||||
// do this within an container, it only requires the NS_CAPABLE capability for manipulations to that
|
||||
// container's network namespace.
|
||||
// Uses the hierarchical token bucket queueing discipline (htb), this requires Linux 2.4.20 or newer
|
||||
// or a custom kernel with that queueing discipline backported.
|
||||
// Uses the hierarchical token bucket queuing discipline (htb), this requires Linux 2.4.20 or newer
|
||||
// or a custom kernel with that queuing discipline backported.
|
||||
type tcShaper struct {
|
||||
e exec.Interface
|
||||
iface string
|
||||
|
|
|
@ -108,7 +108,7 @@ func GetOldReplicaSetsFromLists(deployment extensions.Deployment, c clientset.In
|
|||
}
|
||||
|
||||
// GetNewReplicaSet returns a replica set that matches the intent of the given deployment; get ReplicaSetList from client interface.
|
||||
// Returns nil if the new replica set doesnt exist yet.
|
||||
// Returns nil if the new replica set doesn't exist yet.
|
||||
func GetNewReplicaSet(deployment extensions.Deployment, c clientset.Interface) (*extensions.ReplicaSet, error) {
|
||||
return GetNewReplicaSetFromList(deployment, c,
|
||||
func(namespace string, options api.ListOptions) ([]extensions.ReplicaSet, error) {
|
||||
|
@ -118,7 +118,7 @@ func GetNewReplicaSet(deployment extensions.Deployment, c clientset.Interface) (
|
|||
}
|
||||
|
||||
// GetNewReplicaSetFromList returns a replica set that matches the intent of the given deployment; get ReplicaSetList with the input function.
|
||||
// Returns nil if the new replica set doesnt exist yet.
|
||||
// Returns nil if the new replica set doesn't exist yet.
|
||||
func GetNewReplicaSetFromList(deployment extensions.Deployment, c clientset.Interface, getRSList func(string, api.ListOptions) ([]extensions.ReplicaSet, error)) (*extensions.ReplicaSet, error) {
|
||||
namespace := deployment.ObjectMeta.Namespace
|
||||
selector, err := unversioned.LabelSelectorAsSelector(deployment.Spec.Selector)
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue