mirror of https://github.com/k3s-io/k3s
Merge pull request #74453 from xichengliudui/fixgolintfailures1902
fixgo lint failures test/integration/...pull/564/head
commit
1595089ec5
|
@ -706,6 +706,4 @@ test/integration/master
|
|||
test/integration/replicaset
|
||||
test/integration/replicationcontroller
|
||||
test/integration/scheduler
|
||||
test/integration/scheduler_perf
|
||||
test/integration/volume
|
||||
test/utils
|
||||
|
|
|
@ -14,7 +14,6 @@ go_library(
|
|||
],
|
||||
importpath = "k8s.io/kubernetes/test/integration/scheduler_perf",
|
||||
deps = [
|
||||
"//pkg/scheduler/algorithmprovider:go_default_library",
|
||||
"//pkg/scheduler/factory:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
|
||||
|
|
|
@ -130,7 +130,7 @@ func schedulePods(config *testConfig) int32 {
|
|||
prev := 0
|
||||
// On startup there may be a latent period where NO scheduling occurs (qps = 0).
|
||||
// We are interested in low scheduling rates (i.e. qps=2),
|
||||
minQps := int32(math.MaxInt32)
|
||||
minQPS := int32(math.MaxInt32)
|
||||
start := time.Now()
|
||||
// Bake in time for the first pod scheduling event.
|
||||
for {
|
||||
|
@ -167,15 +167,15 @@ func schedulePods(config *testConfig) int32 {
|
|||
consumed = 1
|
||||
}
|
||||
fmt.Printf("Scheduled %v Pods in %v seconds (%v per second on average). min QPS was %v\n",
|
||||
config.numPods, consumed, config.numPods/consumed, minQps)
|
||||
return minQps
|
||||
config.numPods, consumed, config.numPods/consumed, minQPS)
|
||||
return minQPS
|
||||
}
|
||||
|
||||
// There's no point in printing it for the last iteration, as the value is random
|
||||
qps := len(scheduled) - prev
|
||||
qpsStats[qps] += 1
|
||||
if int32(qps) < minQps {
|
||||
minQps = int32(qps)
|
||||
qpsStats[qps]++
|
||||
if int32(qps) < minQPS {
|
||||
minQPS = int32(qps)
|
||||
}
|
||||
fmt.Printf("%ds\trate: %d\ttotal: %d (qps frequency: %v)\n", time.Since(start)/time.Second, qps, len(scheduled), qpsStats)
|
||||
prev = len(scheduled)
|
||||
|
|
|
@ -20,7 +20,6 @@ import (
|
|||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
restclient "k8s.io/client-go/rest"
|
||||
_ "k8s.io/kubernetes/pkg/scheduler/algorithmprovider"
|
||||
"k8s.io/kubernetes/pkg/scheduler/factory"
|
||||
"k8s.io/kubernetes/test/integration/util"
|
||||
)
|
||||
|
|
|
@ -350,9 +350,8 @@ func waitToObservePods(t *testing.T, podInformer cache.SharedIndexInformer, podN
|
|||
objects := podInformer.GetIndexer().List()
|
||||
if len(objects) == podNum {
|
||||
return true, nil
|
||||
} else {
|
||||
return false, nil
|
||||
}
|
||||
return false, nil
|
||||
}); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
|
|
@ -244,7 +244,7 @@ func TestPersistentVolumeBindRace(t *testing.T) {
|
|||
maxClaims := 100
|
||||
claims := []*v1.PersistentVolumeClaim{}
|
||||
for counter <= maxClaims {
|
||||
counter += 1
|
||||
counter++
|
||||
newPvc := pvc.DeepCopy()
|
||||
newPvc.ObjectMeta = metav1.ObjectMeta{Name: fmt.Sprintf("fake-pvc-race-%d", counter)}
|
||||
claim, err := testClient.CoreV1().PersistentVolumeClaims(ns.Name).Create(newPvc)
|
||||
|
@ -313,19 +313,19 @@ func TestPersistentVolumeClaimLabelSelector(t *testing.T) {
|
|||
modes = []v1.PersistentVolumeAccessMode{v1.ReadWriteOnce}
|
||||
reclaim = v1.PersistentVolumeReclaimRetain
|
||||
|
||||
pv_true = createPV("pv-true", "/tmp/foo-label", "1G", modes, reclaim)
|
||||
pv_false = createPV("pv-false", "/tmp/foo-label", "1G", modes, reclaim)
|
||||
pvc = createPVC("pvc-ls-1", ns.Name, "1G", modes, "")
|
||||
pvTrue = createPV("pv-true", "/tmp/foo-label", "1G", modes, reclaim)
|
||||
pvFalse = createPV("pv-false", "/tmp/foo-label", "1G", modes, reclaim)
|
||||
pvc = createPVC("pvc-ls-1", ns.Name, "1G", modes, "")
|
||||
)
|
||||
|
||||
pv_true.ObjectMeta.SetLabels(map[string]string{"foo": "true"})
|
||||
pv_false.ObjectMeta.SetLabels(map[string]string{"foo": "false"})
|
||||
pvTrue.ObjectMeta.SetLabels(map[string]string{"foo": "true"})
|
||||
pvFalse.ObjectMeta.SetLabels(map[string]string{"foo": "false"})
|
||||
|
||||
_, err = testClient.CoreV1().PersistentVolumes().Create(pv_true)
|
||||
_, err = testClient.CoreV1().PersistentVolumes().Create(pvTrue)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create PersistentVolume: %v", err)
|
||||
}
|
||||
_, err = testClient.CoreV1().PersistentVolumes().Create(pv_false)
|
||||
_, err = testClient.CoreV1().PersistentVolumes().Create(pvFalse)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create PersistentVolume: %v", err)
|
||||
}
|
||||
|
@ -394,19 +394,19 @@ func TestPersistentVolumeClaimLabelSelectorMatchExpressions(t *testing.T) {
|
|||
modes = []v1.PersistentVolumeAccessMode{v1.ReadWriteOnce}
|
||||
reclaim = v1.PersistentVolumeReclaimRetain
|
||||
|
||||
pv_true = createPV("pv-true", "/tmp/foo-label", "1G", modes, reclaim)
|
||||
pv_false = createPV("pv-false", "/tmp/foo-label", "1G", modes, reclaim)
|
||||
pvc = createPVC("pvc-ls-1", ns.Name, "1G", modes, "")
|
||||
pvTrue = createPV("pv-true", "/tmp/foo-label", "1G", modes, reclaim)
|
||||
pvFalse = createPV("pv-false", "/tmp/foo-label", "1G", modes, reclaim)
|
||||
pvc = createPVC("pvc-ls-1", ns.Name, "1G", modes, "")
|
||||
)
|
||||
|
||||
pv_true.ObjectMeta.SetLabels(map[string]string{"foo": "valA", "bar": ""})
|
||||
pv_false.ObjectMeta.SetLabels(map[string]string{"foo": "valB", "baz": ""})
|
||||
pvTrue.ObjectMeta.SetLabels(map[string]string{"foo": "valA", "bar": ""})
|
||||
pvFalse.ObjectMeta.SetLabels(map[string]string{"foo": "valB", "baz": ""})
|
||||
|
||||
_, err = testClient.CoreV1().PersistentVolumes().Create(pv_true)
|
||||
_, err = testClient.CoreV1().PersistentVolumes().Create(pvTrue)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create PersistentVolume: %v", err)
|
||||
}
|
||||
_, err = testClient.CoreV1().PersistentVolumes().Create(pv_false)
|
||||
_, err = testClient.CoreV1().PersistentVolumes().Create(pvFalse)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create PersistentVolume: %v", err)
|
||||
}
|
||||
|
@ -539,7 +539,7 @@ func TestPersistentVolumeMultiPVs(t *testing.T) {
|
|||
t.Fatalf("Bind mismatch! Expected %s capacity %d but got %s capacity %d", pvc.Name, expectedCapVal, pv.Spec.ClaimRef.Name, pvCap)
|
||||
}
|
||||
t.Logf("claim bounded to %s capacity %v", pv.Name, pv.Spec.Capacity[v1.ResourceStorage])
|
||||
bound += 1
|
||||
bound++
|
||||
}
|
||||
t.Log("volumes checked")
|
||||
|
||||
|
@ -965,18 +965,18 @@ func TestPersistentVolumeMultiPVsDiffAccessModes(t *testing.T) {
|
|||
defer close(stopCh)
|
||||
|
||||
// This PV will be claimed, released, and deleted
|
||||
pv_rwo := createPV("pv-rwo", "/tmp/foo", "10G",
|
||||
pvRwo := createPV("pv-rwo", "/tmp/foo", "10G",
|
||||
[]v1.PersistentVolumeAccessMode{v1.ReadWriteOnce}, v1.PersistentVolumeReclaimRetain)
|
||||
pv_rwm := createPV("pv-rwm", "/tmp/bar", "10G",
|
||||
pvRwm := createPV("pv-rwm", "/tmp/bar", "10G",
|
||||
[]v1.PersistentVolumeAccessMode{v1.ReadWriteMany}, v1.PersistentVolumeReclaimRetain)
|
||||
|
||||
pvc := createPVC("pvc-rwm", ns.Name, "5G", []v1.PersistentVolumeAccessMode{v1.ReadWriteMany}, "")
|
||||
|
||||
_, err := testClient.CoreV1().PersistentVolumes().Create(pv_rwm)
|
||||
_, err := testClient.CoreV1().PersistentVolumes().Create(pvRwm)
|
||||
if err != nil {
|
||||
t.Errorf("Failed to create PersistentVolume: %v", err)
|
||||
}
|
||||
_, err = testClient.CoreV1().PersistentVolumes().Create(pv_rwo)
|
||||
_, err = testClient.CoreV1().PersistentVolumes().Create(pvRwo)
|
||||
if err != nil {
|
||||
t.Errorf("Failed to create PersistentVolume: %v", err)
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue