Merge pull request #53051 from tanshanshan/test925

Automatic merge from submit-queue (batch tested with PRs 53051, 52489, 53920). If you want to cherry-pick this change to another branch, please follow the instructions <a href="https://github.com/kubernetes/community/blob/master/contributors/devel/cherry-picks.md">here</a>.

fix todo

**What this PR does / why we need it**:
fix todo 
thanks
**Which issue this PR fixes** *(optional, in `fixes #<issue number>(, fixes #<issue_number>, ...)` format, will close that issue when PR gets merged)*: fixes #

**Special notes for your reviewer**:

**Release note**:

```release-note
```
pull/6/head
Kubernetes Submit Queue 2017-10-24 21:38:17 -07:00 committed by GitHub
commit 1336cc0b05
4 changed files with 9 additions and 6 deletions

View File

@ -524,7 +524,7 @@ func newInformerWatchPod(f *framework.Framework, mutex *sync.Mutex, watchTimes m
return controller
}
// createBatchPodSequential creats pods back-to-back in sequence.
// createBatchPodSequential creates pods back-to-back in sequence.
func createBatchPodSequential(f *framework.Framework, pods []*v1.Pod) (time.Duration, []framework.PodLatencyData) {
batchStartTime := metav1.Now()
e2eLags := make([]framework.PodLatencyData, 0)
@ -570,7 +570,7 @@ func logPodCreateThroughput(batchLag time.Duration, e2eLags []framework.PodLaten
logPerfData(getThroughputPerfData(batchLag, e2eLags, podsNr, testInfo), "throughput")
}
// increaseKubeletAPIQPSLimit sets Kubelet API QPS via ConfigMap. Kubelet will restart with the new QPS.
// setKubeletAPIQPSLimit sets Kubelet API QPS via ConfigMap. Kubelet will restart with the new QPS.
func setKubeletAPIQPSLimit(f *framework.Framework, newAPIQPS int32) {
const restartGap = 40 * time.Second

View File

@ -87,7 +87,7 @@ func isDockerLiveRestoreEnabled() (bool, error) {
return info.LiveRestoreEnabled, nil
}
// stopDockerDaemon starts the Docker daemon.
// startDockerDaemon starts the Docker daemon.
func startDockerDaemon() error {
switch {
case systemdutil.IsRunningSystemd():

View File

@ -93,8 +93,11 @@ func (rp *remotePuller) Name() string {
}
func (rp *remotePuller) Pull(image string) ([]byte, error) {
// TODO(runcom): should we check if the image is already pulled with ImageStatus?
_, err := rp.imageService.PullImage(&runtimeapi.ImageSpec{Image: image}, nil)
imageStatus, err := rp.imageService.ImageStatus(&runtimeapi.ImageSpec{Image: image})
if err == nil && imageStatus != nil {
return nil, nil
}
_, err = rp.imageService.PullImage(&runtimeapi.ImageSpec{Image: image}, nil)
return nil, err
}

View File

@ -534,7 +534,7 @@ func getContainer(pid int) (string, error) {
return "", cgroups.NewNotFoundError("memory")
}
// since we use this container for accounting, we need to ensure its a unified hierarchy.
// since we use this container for accounting, we need to ensure it is a unified hierarchy.
if cpu != memory {
return "", fmt.Errorf("cpu and memory cgroup hierarchy not unified. cpu: %s, memory: %s", cpu, memory)
}