diff --git a/hack/.golint_failures b/hack/.golint_failures index bec40496f3..626ccc0b79 100644 --- a/hack/.golint_failures +++ b/hack/.golint_failures @@ -706,6 +706,4 @@ test/integration/master test/integration/replicaset test/integration/replicationcontroller test/integration/scheduler -test/integration/scheduler_perf -test/integration/volume test/utils diff --git a/test/integration/scheduler_perf/BUILD b/test/integration/scheduler_perf/BUILD index b83637d6c4..462e1c0f1a 100644 --- a/test/integration/scheduler_perf/BUILD +++ b/test/integration/scheduler_perf/BUILD @@ -14,7 +14,6 @@ go_library( ], importpath = "k8s.io/kubernetes/test/integration/scheduler_perf", deps = [ - "//pkg/scheduler/algorithmprovider:go_default_library", "//pkg/scheduler/factory:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", "//staging/src/k8s.io/client-go/kubernetes:go_default_library", diff --git a/test/integration/scheduler_perf/scheduler_test.go b/test/integration/scheduler_perf/scheduler_test.go index 37cc04991c..978f369dcc 100644 --- a/test/integration/scheduler_perf/scheduler_test.go +++ b/test/integration/scheduler_perf/scheduler_test.go @@ -130,7 +130,7 @@ func schedulePods(config *testConfig) int32 { prev := 0 // On startup there may be a latent period where NO scheduling occurs (qps = 0). // We are interested in low scheduling rates (i.e. qps=2), - minQps := int32(math.MaxInt32) + minQPS := int32(math.MaxInt32) start := time.Now() // Bake in time for the first pod scheduling event. for { @@ -167,15 +167,15 @@ func schedulePods(config *testConfig) int32 { consumed = 1 } fmt.Printf("Scheduled %v Pods in %v seconds (%v per second on average). min QPS was %v\n", - config.numPods, consumed, config.numPods/consumed, minQps) - return minQps + config.numPods, consumed, config.numPods/consumed, minQPS) + return minQPS } // There's no point in printing it for the last iteration, as the value is random qps := len(scheduled) - prev - qpsStats[qps] += 1 - if int32(qps) < minQps { - minQps = int32(qps) + qpsStats[qps]++ + if int32(qps) < minQPS { + minQPS = int32(qps) } fmt.Printf("%ds\trate: %d\ttotal: %d (qps frequency: %v)\n", time.Since(start)/time.Second, qps, len(scheduled), qpsStats) prev = len(scheduled) diff --git a/test/integration/scheduler_perf/util.go b/test/integration/scheduler_perf/util.go index 9991da31d7..ce91edddde 100644 --- a/test/integration/scheduler_perf/util.go +++ b/test/integration/scheduler_perf/util.go @@ -20,7 +20,6 @@ import ( "k8s.io/apimachinery/pkg/runtime/schema" clientset "k8s.io/client-go/kubernetes" restclient "k8s.io/client-go/rest" - _ "k8s.io/kubernetes/pkg/scheduler/algorithmprovider" "k8s.io/kubernetes/pkg/scheduler/factory" "k8s.io/kubernetes/test/integration/util" ) diff --git a/test/integration/volume/attach_detach_test.go b/test/integration/volume/attach_detach_test.go index aa72838c05..c737c87c6f 100644 --- a/test/integration/volume/attach_detach_test.go +++ b/test/integration/volume/attach_detach_test.go @@ -350,9 +350,8 @@ func waitToObservePods(t *testing.T, podInformer cache.SharedIndexInformer, podN objects := podInformer.GetIndexer().List() if len(objects) == podNum { return true, nil - } else { - return false, nil } + return false, nil }); err != nil { t.Fatal(err) } diff --git a/test/integration/volume/persistent_volumes_test.go b/test/integration/volume/persistent_volumes_test.go index 7fdba82ec3..63b5702039 100644 --- a/test/integration/volume/persistent_volumes_test.go +++ b/test/integration/volume/persistent_volumes_test.go @@ -244,7 +244,7 @@ func TestPersistentVolumeBindRace(t *testing.T) { maxClaims := 100 claims := []*v1.PersistentVolumeClaim{} for counter <= maxClaims { - counter += 1 + counter++ newPvc := pvc.DeepCopy() newPvc.ObjectMeta = metav1.ObjectMeta{Name: fmt.Sprintf("fake-pvc-race-%d", counter)} claim, err := testClient.CoreV1().PersistentVolumeClaims(ns.Name).Create(newPvc) @@ -313,19 +313,19 @@ func TestPersistentVolumeClaimLabelSelector(t *testing.T) { modes = []v1.PersistentVolumeAccessMode{v1.ReadWriteOnce} reclaim = v1.PersistentVolumeReclaimRetain - pv_true = createPV("pv-true", "/tmp/foo-label", "1G", modes, reclaim) - pv_false = createPV("pv-false", "/tmp/foo-label", "1G", modes, reclaim) - pvc = createPVC("pvc-ls-1", ns.Name, "1G", modes, "") + pvTrue = createPV("pv-true", "/tmp/foo-label", "1G", modes, reclaim) + pvFalse = createPV("pv-false", "/tmp/foo-label", "1G", modes, reclaim) + pvc = createPVC("pvc-ls-1", ns.Name, "1G", modes, "") ) - pv_true.ObjectMeta.SetLabels(map[string]string{"foo": "true"}) - pv_false.ObjectMeta.SetLabels(map[string]string{"foo": "false"}) + pvTrue.ObjectMeta.SetLabels(map[string]string{"foo": "true"}) + pvFalse.ObjectMeta.SetLabels(map[string]string{"foo": "false"}) - _, err = testClient.CoreV1().PersistentVolumes().Create(pv_true) + _, err = testClient.CoreV1().PersistentVolumes().Create(pvTrue) if err != nil { t.Fatalf("Failed to create PersistentVolume: %v", err) } - _, err = testClient.CoreV1().PersistentVolumes().Create(pv_false) + _, err = testClient.CoreV1().PersistentVolumes().Create(pvFalse) if err != nil { t.Fatalf("Failed to create PersistentVolume: %v", err) } @@ -394,19 +394,19 @@ func TestPersistentVolumeClaimLabelSelectorMatchExpressions(t *testing.T) { modes = []v1.PersistentVolumeAccessMode{v1.ReadWriteOnce} reclaim = v1.PersistentVolumeReclaimRetain - pv_true = createPV("pv-true", "/tmp/foo-label", "1G", modes, reclaim) - pv_false = createPV("pv-false", "/tmp/foo-label", "1G", modes, reclaim) - pvc = createPVC("pvc-ls-1", ns.Name, "1G", modes, "") + pvTrue = createPV("pv-true", "/tmp/foo-label", "1G", modes, reclaim) + pvFalse = createPV("pv-false", "/tmp/foo-label", "1G", modes, reclaim) + pvc = createPVC("pvc-ls-1", ns.Name, "1G", modes, "") ) - pv_true.ObjectMeta.SetLabels(map[string]string{"foo": "valA", "bar": ""}) - pv_false.ObjectMeta.SetLabels(map[string]string{"foo": "valB", "baz": ""}) + pvTrue.ObjectMeta.SetLabels(map[string]string{"foo": "valA", "bar": ""}) + pvFalse.ObjectMeta.SetLabels(map[string]string{"foo": "valB", "baz": ""}) - _, err = testClient.CoreV1().PersistentVolumes().Create(pv_true) + _, err = testClient.CoreV1().PersistentVolumes().Create(pvTrue) if err != nil { t.Fatalf("Failed to create PersistentVolume: %v", err) } - _, err = testClient.CoreV1().PersistentVolumes().Create(pv_false) + _, err = testClient.CoreV1().PersistentVolumes().Create(pvFalse) if err != nil { t.Fatalf("Failed to create PersistentVolume: %v", err) } @@ -539,7 +539,7 @@ func TestPersistentVolumeMultiPVs(t *testing.T) { t.Fatalf("Bind mismatch! Expected %s capacity %d but got %s capacity %d", pvc.Name, expectedCapVal, pv.Spec.ClaimRef.Name, pvCap) } t.Logf("claim bounded to %s capacity %v", pv.Name, pv.Spec.Capacity[v1.ResourceStorage]) - bound += 1 + bound++ } t.Log("volumes checked") @@ -965,18 +965,18 @@ func TestPersistentVolumeMultiPVsDiffAccessModes(t *testing.T) { defer close(stopCh) // This PV will be claimed, released, and deleted - pv_rwo := createPV("pv-rwo", "/tmp/foo", "10G", + pvRwo := createPV("pv-rwo", "/tmp/foo", "10G", []v1.PersistentVolumeAccessMode{v1.ReadWriteOnce}, v1.PersistentVolumeReclaimRetain) - pv_rwm := createPV("pv-rwm", "/tmp/bar", "10G", + pvRwm := createPV("pv-rwm", "/tmp/bar", "10G", []v1.PersistentVolumeAccessMode{v1.ReadWriteMany}, v1.PersistentVolumeReclaimRetain) pvc := createPVC("pvc-rwm", ns.Name, "5G", []v1.PersistentVolumeAccessMode{v1.ReadWriteMany}, "") - _, err := testClient.CoreV1().PersistentVolumes().Create(pv_rwm) + _, err := testClient.CoreV1().PersistentVolumes().Create(pvRwm) if err != nil { t.Errorf("Failed to create PersistentVolume: %v", err) } - _, err = testClient.CoreV1().PersistentVolumes().Create(pv_rwo) + _, err = testClient.CoreV1().PersistentVolumes().Create(pvRwo) if err != nil { t.Errorf("Failed to create PersistentVolume: %v", err) }