mirror of https://github.com/k3s-io/k3s
commit
d54e0fc0bc
|
@ -39,7 +39,7 @@ import (
|
|||
)
|
||||
|
||||
func main() {
|
||||
rand.Seed(time.Now().UTC().UnixNano())
|
||||
rand.Seed(time.Now().UnixNano())
|
||||
|
||||
command := app.NewCloudControllerManagerCommand()
|
||||
|
||||
|
|
|
@ -47,7 +47,7 @@ import (
|
|||
)
|
||||
|
||||
func main() {
|
||||
rand.Seed(time.Now().UTC().UnixNano())
|
||||
rand.Seed(time.Now().UnixNano())
|
||||
|
||||
hyperkubeCommand, allCommandFns := NewHyperKubeCommand(server.SetupSignalHandler())
|
||||
|
||||
|
|
|
@ -36,7 +36,7 @@ import (
|
|||
)
|
||||
|
||||
func main() {
|
||||
rand.Seed(time.Now().UTC().UnixNano())
|
||||
rand.Seed(time.Now().UnixNano())
|
||||
|
||||
command := app.NewAPIServerCommand(server.SetupSignalHandler())
|
||||
|
||||
|
|
|
@ -39,7 +39,7 @@ import (
|
|||
)
|
||||
|
||||
func main() {
|
||||
rand.Seed(time.Now().UTC().UnixNano())
|
||||
rand.Seed(time.Now().UnixNano())
|
||||
|
||||
command := app.NewControllerManagerCommand()
|
||||
|
||||
|
|
|
@ -33,7 +33,7 @@ import (
|
|||
)
|
||||
|
||||
func main() {
|
||||
rand.Seed(time.Now().UTC().UnixNano())
|
||||
rand.Seed(time.Now().UnixNano())
|
||||
|
||||
command := app.NewProxyCommand()
|
||||
|
||||
|
|
|
@ -32,7 +32,7 @@ import (
|
|||
)
|
||||
|
||||
func main() {
|
||||
rand.Seed(time.Now().UTC().UnixNano())
|
||||
rand.Seed(time.Now().UnixNano())
|
||||
|
||||
command := app.NewSchedulerCommand()
|
||||
|
||||
|
|
|
@ -34,7 +34,7 @@ import (
|
|||
)
|
||||
|
||||
func main() {
|
||||
rand.Seed(time.Now().UTC().UnixNano())
|
||||
rand.Seed(time.Now().UnixNano())
|
||||
|
||||
command := cmd.NewDefaultKubectlCommand()
|
||||
|
||||
|
|
|
@ -732,7 +732,7 @@ func run(s *options.KubeletServer, kubeDeps *kubelet.Dependencies, stopCh <-chan
|
|||
|
||||
utilruntime.ReallyCrash = s.ReallyCrashForTesting
|
||||
|
||||
rand.Seed(time.Now().UTC().UnixNano())
|
||||
rand.Seed(time.Now().UnixNano())
|
||||
|
||||
// TODO(vmarmol): Do this through container config.
|
||||
oomAdjuster := kubeDeps.OOMAdjuster
|
||||
|
|
|
@ -34,7 +34,7 @@ import (
|
|||
)
|
||||
|
||||
func main() {
|
||||
rand.Seed(time.Now().UTC().UnixNano())
|
||||
rand.Seed(time.Now().UnixNano())
|
||||
|
||||
command := app.NewKubeletCommand(server.SetupSignalHandler())
|
||||
logs.InitLogs()
|
||||
|
|
|
@ -100,7 +100,7 @@ func (c *HollowNodeConfig) createClientConfigFromFile() (*restclient.Config, err
|
|||
}
|
||||
|
||||
func main() {
|
||||
rand.Seed(time.Now().UTC().UnixNano())
|
||||
rand.Seed(time.Now().UnixNano())
|
||||
|
||||
command := newHollowNodeCommand()
|
||||
|
||||
|
|
|
@ -99,7 +99,7 @@ func NewKubemarkController(externalClient kubeclient.Interface, externalInformer
|
|||
nodesToDelete: make(map[string]bool),
|
||||
nodesToDeleteLock: sync.Mutex{},
|
||||
},
|
||||
rand: rand.New(rand.NewSource(time.Now().UTC().UnixNano())),
|
||||
rand: rand.New(rand.NewSource(time.Now().UnixNano())),
|
||||
createNodeQueue: make(chan string, 1000),
|
||||
nodeGroupQueueSize: make(map[string]int),
|
||||
nodeGroupQueueSizeLock: sync.Mutex{},
|
||||
|
|
|
@ -18,7 +18,6 @@ package flocker
|
|||
|
||||
import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/util/rand"
|
||||
|
@ -68,7 +67,6 @@ func (util *flockerUtil) CreateVolume(c *flockerVolumeProvisioner) (datasetUUID
|
|||
}
|
||||
|
||||
// select random node
|
||||
rand.Seed(time.Now().UTC().UnixNano())
|
||||
node := nodes[rand.Intn(len(nodes))]
|
||||
glog.V(2).Infof("selected flocker node with UUID '%s' to provision dataset", node.UUID)
|
||||
|
||||
|
|
|
@ -27,7 +27,14 @@ var rng = struct {
|
|||
sync.Mutex
|
||||
rand *rand.Rand
|
||||
}{
|
||||
rand: rand.New(rand.NewSource(time.Now().UTC().UnixNano())),
|
||||
rand: rand.New(rand.NewSource(time.Now().UnixNano())),
|
||||
}
|
||||
|
||||
// Int returns a non-negative pseudo-random int.
|
||||
func Int() int {
|
||||
rng.Lock()
|
||||
defer rng.Unlock()
|
||||
return rng.rand.Int()
|
||||
}
|
||||
|
||||
// Intn generates an integer in range [0,max).
|
||||
|
|
|
@ -1111,7 +1111,7 @@ var _ = SIGDescribe("Services", func() {
|
|||
}
|
||||
|
||||
outOfRangeNodePort := 0
|
||||
rand.Seed(time.Now().UTC().UnixNano())
|
||||
rand.Seed(time.Now().UnixNano())
|
||||
for {
|
||||
outOfRangeNodePort = 1 + rand.Intn(65535)
|
||||
if !framework.ServiceNodePortRange.Contains(outOfRangeNodePort) {
|
||||
|
|
|
@ -79,7 +79,7 @@ var _ = utils.SIGDescribe("Pod Disks", func() {
|
|||
host0Name = types.NodeName(nodes.Items[0].ObjectMeta.Name)
|
||||
host1Name = types.NodeName(nodes.Items[1].ObjectMeta.Name)
|
||||
|
||||
mathrand.Seed(time.Now().UTC().UnixNano())
|
||||
mathrand.Seed(time.Now().UnixNano())
|
||||
})
|
||||
|
||||
Context("schedule pods each with a PD, delete pod and verify detach [Slow]", func() {
|
||||
|
|
|
@ -46,6 +46,7 @@ go_library(
|
|||
"//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/rand:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/uuid:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
|
||||
|
|
|
@ -19,7 +19,6 @@ package vsphere
|
|||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"math/rand"
|
||||
"path/filepath"
|
||||
"regexp"
|
||||
"strings"
|
||||
|
@ -37,6 +36,7 @@ import (
|
|||
storage "k8s.io/api/storage/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/rand"
|
||||
"k8s.io/apimachinery/pkg/util/uuid"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
|
@ -747,7 +747,6 @@ func GetReadySchedulableNodeInfos() []*NodeInfo {
|
|||
// and it's associated NodeInfo object is returned.
|
||||
func GetReadySchedulableRandomNodeInfo() *NodeInfo {
|
||||
nodesInfo := GetReadySchedulableNodeInfos()
|
||||
rand.Seed(time.Now().Unix())
|
||||
Expect(nodesInfo).NotTo(BeEmpty())
|
||||
return nodesInfo[rand.Int()%len(nodesInfo)]
|
||||
}
|
||||
|
|
|
@ -74,6 +74,7 @@ func init() {
|
|||
}
|
||||
|
||||
func TestMain(m *testing.M) {
|
||||
rand.Seed(time.Now().UnixNano())
|
||||
pflag.Parse()
|
||||
framework.AfterReadingAllFlags(&framework.TestContext)
|
||||
os.Exit(m.Run())
|
||||
|
@ -119,7 +120,6 @@ func TestE2eNode(t *testing.T) {
|
|||
return
|
||||
}
|
||||
// If run-services-mode is not specified, run test.
|
||||
rand.Seed(time.Now().UTC().UnixNano())
|
||||
RegisterFailHandler(Fail)
|
||||
reporters := []Reporter{}
|
||||
reportDir := framework.TestContext.ReportDir
|
||||
|
|
|
@ -188,7 +188,7 @@ func main() {
|
|||
glog.Fatalf("--test-suite must be one of default or conformance")
|
||||
}
|
||||
|
||||
rand.Seed(time.Now().UTC().UnixNano())
|
||||
rand.Seed(time.Now().UnixNano())
|
||||
if *buildOnly {
|
||||
// Build the archive and exit
|
||||
remote.CreateTestArchive(suite, *systemSpecName)
|
||||
|
|
|
@ -60,7 +60,6 @@ func main() {
|
|||
// Outputs linesTotal lines of logs to stdout uniformly for duration
|
||||
func generateLogs(linesTotal int, duration time.Duration) {
|
||||
delay := duration / time.Duration(linesTotal)
|
||||
rand.Seed(time.Now().UnixNano())
|
||||
|
||||
ticker := time.NewTicker(delay)
|
||||
defer ticker.Stop()
|
||||
|
|
|
@ -17,11 +17,14 @@ limitations under the License.
|
|||
package volume
|
||||
|
||||
import (
|
||||
"math/rand"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"k8s.io/kubernetes/test/integration/framework"
|
||||
)
|
||||
|
||||
func TestMain(m *testing.M) {
|
||||
rand.Seed(time.Now().UnixNano())
|
||||
framework.EtcdMain(m.Run)
|
||||
}
|
||||
|
|
|
@ -256,7 +256,6 @@ func TestPersistentVolumeBindRace(t *testing.T) {
|
|||
glog.V(2).Infof("TestPersistentVolumeBindRace claims created")
|
||||
|
||||
// putting a bind manually on a pv should only match the claim it is bound to
|
||||
rand.Seed(time.Now().Unix())
|
||||
claim := claims[rand.Intn(maxClaims-1)]
|
||||
claimRef, err := ref.GetReference(legacyscheme.Scheme, claim)
|
||||
if err != nil {
|
||||
|
|
Loading…
Reference in New Issue