mirror of https://github.com/k3s-io/k3s
Merge pull request #68403 from wgliang/master.deprecate-Parallelize
Replace Parallelize with function ParallelizeUntil and formally depre…pull/58/head
commit
c00f19bd15
|
@ -17,6 +17,7 @@ limitations under the License.
|
|||
package predicates
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"sync"
|
||||
|
||||
|
@ -487,7 +488,7 @@ func getTPMapMatchingIncomingAffinityAntiAffinity(pod *v1.Pod, nodeInfoMap map[s
|
|||
appendResult(node.Name, nodeTopologyPairsAffinityPodsMaps, nodeTopologyPairsAntiAffinityPodsMaps)
|
||||
}
|
||||
}
|
||||
workqueue.Parallelize(16, len(allNodeNames), processNode)
|
||||
workqueue.ParallelizeUntil(context.TODO(), 16, len(allNodeNames), processNode)
|
||||
return topologyPairsAffinityPodsMaps, topologyPairsAntiAffinityPodsMaps, firstError
|
||||
}
|
||||
|
||||
|
|
|
@ -17,6 +17,7 @@ limitations under the License.
|
|||
package priorities
|
||||
|
||||
import (
|
||||
"context"
|
||||
"sync"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
|
@ -210,7 +211,7 @@ func (ipa *InterPodAffinity) CalculateInterPodAffinityPriority(pod *v1.Pod, node
|
|||
}
|
||||
}
|
||||
}
|
||||
workqueue.Parallelize(16, len(allNodeNames), processNode)
|
||||
workqueue.ParallelizeUntil(context.TODO(), 16, len(allNodeNames), processNode)
|
||||
if pm.firstError != nil {
|
||||
return nil, pm.firstError
|
||||
}
|
||||
|
|
|
@ -684,7 +684,7 @@ func PrioritizeNodes(
|
|||
}
|
||||
}
|
||||
}
|
||||
workqueue.Parallelize(16, len(nodes), processNode)
|
||||
workqueue.ParallelizeUntil(context.TODO(), 16, len(nodes), processNode)
|
||||
for i, priorityConfig := range priorityConfigs {
|
||||
if priorityConfig.Reduce == nil {
|
||||
continue
|
||||
|
@ -915,7 +915,7 @@ func selectNodesForPreemption(pod *v1.Pod,
|
|||
resultLock.Unlock()
|
||||
}
|
||||
}
|
||||
workqueue.Parallelize(16, len(potentialNodes), checkNode)
|
||||
workqueue.ParallelizeUntil(context.TODO(), 16, len(potentialNodes), checkNode)
|
||||
return nodeToVictims, nil
|
||||
}
|
||||
|
||||
|
|
|
@ -27,6 +27,8 @@ type DoWorkPieceFunc func(piece int)
|
|||
|
||||
// Parallelize is a very simple framework that allows for parallelizing
|
||||
// N independent pieces of work.
|
||||
//
|
||||
// Deprecated: Use ParallelizeUntil instead.
|
||||
func Parallelize(workers, pieces int, doWorkPiece DoWorkPieceFunc) {
|
||||
ParallelizeUntil(nil, workers, pieces, doWorkPiece)
|
||||
}
|
||||
|
|
|
@ -17,6 +17,7 @@ limitations under the License.
|
|||
package apimachinery
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"math/rand"
|
||||
"reflect"
|
||||
|
@ -44,7 +45,7 @@ var _ = SIGDescribe("Servers with support for API chunking", func() {
|
|||
c := f.ClientSet
|
||||
client := c.CoreV1().PodTemplates(ns)
|
||||
By("creating a large number of resources")
|
||||
workqueue.Parallelize(20, numberOfTotalResources, func(i int) {
|
||||
workqueue.ParallelizeUntil(context.TODO(), 20, numberOfTotalResources, func(i int) {
|
||||
for tries := 3; tries >= 0; tries-- {
|
||||
_, err := client.Create(&v1.PodTemplate{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
|
|
|
@ -18,6 +18,7 @@ package apimachinery
|
|||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"text/tabwriter"
|
||||
|
||||
|
@ -79,7 +80,7 @@ var _ = SIGDescribe("Servers with support for Table transformation", func() {
|
|||
client := c.CoreV1().PodTemplates(ns)
|
||||
|
||||
By("creating a large number of resources")
|
||||
workqueue.Parallelize(5, 20, func(i int) {
|
||||
workqueue.ParallelizeUntil(context.TODO(), 5, 20, func(i int) {
|
||||
for tries := 3; tries >= 0; tries-- {
|
||||
_, err := client.Create(&v1.PodTemplate{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
|
|
|
@ -17,6 +17,7 @@ limitations under the License.
|
|||
package network
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strconv"
|
||||
"time"
|
||||
|
@ -66,7 +67,7 @@ var _ = SIGDescribe("[Feature:PerformanceDNS][Serial]", func() {
|
|||
framework.ExpectNoError(testutils.CreateServiceWithRetries(f.ClientSet, services[i].Namespace, services[i]))
|
||||
}
|
||||
framework.Logf("Creating %v test services", maxServicesPerCluster)
|
||||
workqueue.Parallelize(parallelCreateServiceWorkers, len(services), createService)
|
||||
workqueue.ParallelizeUntil(context.TODO(), parallelCreateServiceWorkers, len(services), createService)
|
||||
dnsTest := dnsTestCommon{
|
||||
f: f,
|
||||
c: f.ClientSet,
|
||||
|
|
|
@ -17,6 +17,7 @@ limitations under the License.
|
|||
package scalability
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"math"
|
||||
"os"
|
||||
|
@ -851,7 +852,7 @@ var _ = SIGDescribe("Density", func() {
|
|||
name := additionalPodsPrefix + "-" + strconv.Itoa(podIndexOffset+i+1)
|
||||
framework.ExpectNoError(framework.DeleteRCAndWaitForGC(c, rcNameToNsMap[name], name))
|
||||
}
|
||||
workqueue.Parallelize(25, nodeCount, deleteRC)
|
||||
workqueue.ParallelizeUntil(context.TODO(), 25, nodeCount, deleteRC)
|
||||
podDeletionPhase.End()
|
||||
}
|
||||
close(stopCh)
|
||||
|
|
|
@ -17,6 +17,7 @@ limitations under the License.
|
|||
package scalability
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"math"
|
||||
"math/rand"
|
||||
|
@ -37,11 +38,14 @@ import (
|
|||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/client-go/discovery"
|
||||
cacheddiscovery "k8s.io/client-go/discovery/cached"
|
||||
"k8s.io/client-go/dynamic"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
restclient "k8s.io/client-go/rest"
|
||||
"k8s.io/client-go/restmapper"
|
||||
scaleclient "k8s.io/client-go/scale"
|
||||
"k8s.io/client-go/transport"
|
||||
"k8s.io/client-go/util/workqueue"
|
||||
"k8s.io/kubernetes/pkg/api/legacyscheme"
|
||||
"k8s.io/kubernetes/pkg/apis/batch"
|
||||
api "k8s.io/kubernetes/pkg/apis/core"
|
||||
"k8s.io/kubernetes/pkg/apis/extensions"
|
||||
|
@ -52,9 +56,6 @@ import (
|
|||
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
"k8s.io/client-go/dynamic"
|
||||
"k8s.io/client-go/restmapper"
|
||||
"k8s.io/kubernetes/pkg/api/legacyscheme"
|
||||
)
|
||||
|
||||
const (
|
||||
|
@ -240,7 +241,7 @@ var _ = SIGDescribe("Load capacity", func() {
|
|||
defer GinkgoRecover()
|
||||
framework.ExpectNoError(testutils.CreateServiceWithRetries(clientset, services[i].Namespace, services[i]))
|
||||
}
|
||||
workqueue.Parallelize(serviceOperationsParallelism, len(services), createService)
|
||||
workqueue.ParallelizeUntil(context.TODO(), serviceOperationsParallelism, len(services), createService)
|
||||
framework.Logf("%v Services created.", len(services))
|
||||
defer func(services []*v1.Service) {
|
||||
serviceCleanupPhase := testPhaseDurations.StartPhase(800, "services deletion")
|
||||
|
@ -250,7 +251,7 @@ var _ = SIGDescribe("Load capacity", func() {
|
|||
defer GinkgoRecover()
|
||||
framework.ExpectNoError(testutils.DeleteResourceWithRetries(clientset, api.Kind("Service"), services[i].Namespace, services[i].Name, nil))
|
||||
}
|
||||
workqueue.Parallelize(serviceOperationsParallelism, len(services), deleteService)
|
||||
workqueue.ParallelizeUntil(context.TODO(), serviceOperationsParallelism, len(services), deleteService)
|
||||
framework.Logf("Services deleted")
|
||||
}(services)
|
||||
} else {
|
||||
|
|
|
@ -17,6 +17,7 @@ limitations under the License.
|
|||
package utils
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"math"
|
||||
"os"
|
||||
|
@ -1061,9 +1062,9 @@ func CreatePod(client clientset.Interface, namespace string, podCount int, podTe
|
|||
}
|
||||
|
||||
if podCount < 30 {
|
||||
workqueue.Parallelize(podCount, podCount, createPodFunc)
|
||||
workqueue.ParallelizeUntil(context.TODO(), podCount, podCount, createPodFunc)
|
||||
} else {
|
||||
workqueue.Parallelize(30, podCount, createPodFunc)
|
||||
workqueue.ParallelizeUntil(context.TODO(), 30, podCount, createPodFunc)
|
||||
}
|
||||
return createError
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue