Merge pull request #38763 from aaron12134/spell-corrective-doctrine

Automatic merge from submit-queue

make spellcheck for test/*

**What this PR does / why we need it**: Increase code readability

**Which issue this PR fixes** *(optional, in `fixes #<issue number>(, fixes #<issue_number>, ...)` format, will close that issue when PR gets merged)*: fixes #

**Special notes for your reviewer**: only a slight effort 

**Release note**:

```release-note
```
pull/6/head
Kubernetes Submit Queue 2016-12-14 12:25:53 -08:00 committed by GitHub
commit 6c50e73d4d
10 changed files with 16 additions and 16 deletions

View File

@ -43,7 +43,7 @@ kube::util::gen-docs "${KUBE_TEMP}"
# remove all of the old docs
kube::util::remove-gen-docs
# Copy fresh docs into the repo.
# copy fresh docs into the repo.
# the shopt is so that we get .generated_docs from the glob.
shopt -s dotglob
cp -af "${KUBE_TEMP}"/* "${KUBE_ROOT}"

View File

@ -91,7 +91,7 @@ function start_discovery {
${kubectl} config set-cluster local-up-cluster --kubeconfig="${CERT_DIR}/admin-discovery.kubeconfig" --certificate-authority="${CERT_DIR}/discovery-ca.crt" --embed-certs --server="https://${API_HOST_IP}:${DISCOVERY_SECURE_PORT}"
# Wait for kubernetes-discovery to come up before launching the rest of the components.
# this should work since we're creating a node port service
# This should work since we're creating a node port service.
echo "Waiting for kubernetes-discovery to come up: https://${API_HOST_IP}:${DISCOVERY_SECURE_PORT}/version"
kube::util::wait_for_url "https://${API_HOST_IP}:${DISCOVERY_SECURE_PORT}/version" "kubernetes-discovery: " 1 60 || exit 1

View File

@ -49,4 +49,4 @@ PATH="${KUBE_ROOT}/_output/bin:${PATH}" \
"${gotoprotobuf}" \
--proto-import="${KUBE_ROOT}/vendor" \
--proto-import="${KUBE_ROOT}/third_party/protobuf" \
$@
"$@"

View File

@ -25,7 +25,7 @@ kube::golang::setup_env
make -C "${KUBE_ROOT}" WHAT=cmd/hyperkube
# add other BADSYMBOLS here.
# Add other BADSYMBOLS here.
BADSYMBOLS=(
"httptest"
"testify"

View File

@ -428,7 +428,7 @@ func TestOrphaning(t *testing.T) {
t.Fatalf("Failed to create replication controller: %v", err)
}
// these pods should be ophaned.
// these pods should be orphaned.
var podUIDs []types.UID
podsNum := 3
for i := 0; i < podsNum; i++ {
@ -478,7 +478,7 @@ func TestOrphaning(t *testing.T) {
}
for _, pod := range pods.Items {
if len(pod.ObjectMeta.OwnerReferences) != 0 {
t.Errorf("pod %s still has non-empty OwnerRefereces: %v", pod.ObjectMeta.Name, pod.ObjectMeta.OwnerReferences)
t.Errorf("pod %s still has non-empty OwnerReferences: %v", pod.ObjectMeta.Name, pod.ObjectMeta.OwnerReferences)
}
}
}

View File

@ -174,7 +174,7 @@ func DoTestUnschedulableNodes(t *testing.T, cs clientset.Interface, ns *v1.Names
err = waitForReflection(t, s, nodeKey, func(node interface{}) bool {
// An unschedulable node should still be present in the store
// Nodes that are unschedulable or that are not ready or
// have their disk full (Node.Spec.Conditions) are exluded
// have their disk full (Node.Spec.Conditions) are excluded
// based on NodeConditionPredicate, a separate check
return node != nil && node.(*v1.Node).Spec.Unschedulable == true
})
@ -319,7 +319,7 @@ func TestMultiScheduler(t *testing.T) {
8. create 2 pods: testPodNoAnnotation2 and testPodWithAnnotationFitsDefault2
- note: these two pods belong to default scheduler which no longer exists
9. **check point-3**:
- testPodNoAnnotation2 and testPodWithAnnotationFitsDefault2 shoule NOT be scheduled
- testPodNoAnnotation2 and testPodWithAnnotationFitsDefault2 should NOT be scheduled
*/
// 1. create and start default-scheduler
clientSet := clientset.NewForConfigOrDie(&restclient.Config{Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: &registered.GroupOrDie(v1.GroupName).GroupVersion}})
@ -453,7 +453,7 @@ func TestMultiScheduler(t *testing.T) {
}
// 9. **check point-3**:
// - testPodNoAnnotation2 and testPodWithAnnotationFitsDefault2 shoule NOT be scheduled
// - testPodNoAnnotation2 and testPodWithAnnotationFitsDefault2 should NOT be scheduled
err = wait.Poll(time.Second, time.Second*5, podScheduled(clientSet, testPodNoAnnotation2.Namespace, testPodNoAnnotation2.Name))
if err == nil {
t.Errorf("Test MultiScheduler: %s Pod got scheduled, %v", testPodNoAnnotation2.Name, err)
@ -587,7 +587,7 @@ func TestAllocatable(t *testing.T) {
// 7. Test: this test pod should not be scheduled since it request more than Allocatable
err = wait.Poll(time.Second, time.Second*5, podScheduled(clientSet, testAllocPod2.Namespace, testAllocPod2.Name))
if err == nil {
t.Errorf("Test allocatable awareness: %s Pod got scheduled unexpectly, %v", testAllocPod2.Name, err)
t.Errorf("Test allocatable awareness: %s Pod got scheduled unexpectedly, %v", testAllocPod2.Name, err)
} else {
t.Logf("Test allocatable awareness: %s Pod not scheduled as expected", testAllocPod2.Name)
}

View File

@ -184,7 +184,7 @@ func defaultSchedulerBenchmarkConfig(numNodes, numPods int) *testConfig {
// This is used to learn the scheduling throughput on various
// sizes of cluster and changes as more and more pods are scheduled.
// It won't stop until all pods are scheduled.
// It retruns the minimum of throughput over whole run.
// It returns the minimum of throughput over whole run.
func schedulePods(config *testConfig) int32 {
defer config.destroyFunc()
if err := config.nodePreparer.PrepareNodes(); err != nil {

View File

@ -78,7 +78,7 @@ type FooList struct {
Items []Foo `json:"items"`
}
// installThirdParty installs a third party resoure and returns a defer func
// installThirdParty installs a third party resource and returns a defer func
func installThirdParty(t *testing.T, client clientset.Interface, clientConfig *restclient.Config, tpr *extensions.ThirdPartyResource, group, version, resource string) func() {
var err error
_, err = client.Extensions().ThirdPartyResources().Create(tpr)

View File

@ -372,7 +372,7 @@ func TestPersistentVolumeClaimLabelSelectorMatchExpressions(t *testing.T) {
_, s := framework.RunAMaster(nil)
defer s.Close()
ns := framework.CreateTestingNamespace("pvc-match-expresssions", s, t)
ns := framework.CreateTestingNamespace("pvc-match-expressions", s, t)
defer framework.DeleteTestingNamespace(ns, s, t)
testClient, controller, watchPV, watchPVC := createClients(ns, t, s, defaultSyncPeriod)
@ -778,7 +778,7 @@ func TestPersistentVolumeControllerStartup(t *testing.T) {
}
pvs[i] = newPV
// Drain watchPV with all events generated by the PV until it's bound
// We don't want to catch "PV craated with Status.Phase == Pending"
// We don't want to catch "PV created with Status.Phase == Pending"
// later in this test.
waitForAnyPersistentVolumePhase(watchPV, v1.VolumeBound)
}
@ -1135,7 +1135,7 @@ func createClients(ns *v1.Namespace, t *testing.T, s *httptest.Server, syncPerio
}
watchPVC, err := testClient.PersistentVolumeClaims(ns.Name).Watch(v1.ListOptions{})
if err != nil {
t.Fatalf("Failed to watch PersistentVolumeClaimss: %v", err)
t.Fatalf("Failed to watch PersistentVolumeClaims: %v", err)
}
return testClient, ctrl, watchPV, watchPVC

View File

@ -40,5 +40,5 @@ else
fi
go run ./hack/e2e.go -v --check_version_skew=false --test --test_args="--e2e-verify-service-account=false --dump-logs-on-failure=false ${ARGS}"
# Just make local testing easier...
# Just make local test easier...
# ${KUBE_ROOT}/hack/ginkgo-e2e.sh "--e2e-verify-service-account=false" "--dump-logs-on-failure=false" $ARGS