mirror of https://github.com/XTLS/Xray-core
Browse Source
https://github.com/XTLS/Xray-core/pull/3613#issuecomment-2351954957 Closes https://github.com/XTLS/Xray-core/issues/3560#issuecomment-2247495778 --------- Co-authored-by: mmmray <142015632+mmmray@users.noreply.github.com>pull/3813/merge
ll11l1lIllIl1lll
2 months ago
committed by
GitHub
9 changed files with 475 additions and 64 deletions
@ -0,0 +1,102 @@
|
||||
package splithttp |
||||
|
||||
import ( |
||||
"context" |
||||
"math/rand" |
||||
"sync/atomic" |
||||
"time" |
||||
|
||||
"github.com/xtls/xray-core/common/errors" |
||||
) |
||||
|
||||
type muxResource struct { |
||||
Resource interface{} |
||||
OpenRequests atomic.Int32 |
||||
leftUsage int32 |
||||
expirationTime time.Time |
||||
} |
||||
|
||||
type muxManager struct { |
||||
newResourceFn func() interface{} |
||||
config Multiplexing |
||||
concurrency int32 |
||||
connections int32 |
||||
instances []*muxResource |
||||
} |
||||
|
||||
func NewMuxManager(config Multiplexing, newResource func() interface{}) *muxManager { |
||||
return &muxManager{ |
||||
config: config, |
||||
concurrency: config.GetNormalizedMaxConcurrency().roll(), |
||||
connections: config.GetNormalizedMaxConnections().roll(), |
||||
newResourceFn: newResource, |
||||
instances: make([]*muxResource, 0), |
||||
} |
||||
} |
||||
|
||||
func (m *muxManager) GetResource(ctx context.Context) *muxResource { |
||||
m.removeExpiredConnections(ctx) |
||||
|
||||
if m.connections > 0 && len(m.instances) < int(m.connections) { |
||||
errors.LogDebug(ctx, "xmux: creating client, connections=", len(m.instances)) |
||||
return m.newResource() |
||||
} |
||||
|
||||
if len(m.instances) == 0 { |
||||
errors.LogDebug(ctx, "xmux: creating client because instances is empty, connections=", len(m.instances)) |
||||
return m.newResource() |
||||
} |
||||
|
||||
clients := make([]*muxResource, 0) |
||||
if m.concurrency > 0 { |
||||
for _, client := range m.instances { |
||||
openRequests := client.OpenRequests.Load() |
||||
if openRequests < m.concurrency { |
||||
clients = append(clients, client) |
||||
} |
||||
} |
||||
} else { |
||||
clients = m.instances |
||||
} |
||||
|
||||
if len(clients) == 0 { |
||||
errors.LogDebug(ctx, "xmux: creating client because concurrency was hit, total clients=", len(m.instances)) |
||||
return m.newResource() |
||||
} |
||||
|
||||
client := clients[rand.Intn(len(clients))] |
||||
if client.leftUsage > 0 { |
||||
client.leftUsage -= 1 |
||||
} |
||||
return client |
||||
} |
||||
|
||||
func (m *muxManager) newResource() *muxResource { |
||||
leftUsage := int32(-1) |
||||
if x := m.config.GetNormalizedCMaxReuseTimes().roll(); x > 0 { |
||||
leftUsage = x - 1 |
||||
} |
||||
expirationTime := time.UnixMilli(0) |
||||
if x := m.config.GetNormalizedCMaxLifetimeMs().roll(); x > 0 { |
||||
expirationTime = time.Now().Add(time.Duration(x) * time.Millisecond) |
||||
} |
||||
|
||||
client := &muxResource{ |
||||
Resource: m.newResourceFn(), |
||||
leftUsage: leftUsage, |
||||
expirationTime: expirationTime, |
||||
} |
||||
m.instances = append(m.instances, client) |
||||
return client |
||||
} |
||||
|
||||
func (m *muxManager) removeExpiredConnections(ctx context.Context) { |
||||
for i := 0; i < len(m.instances); i++ { |
||||
client := m.instances[i] |
||||
if client.leftUsage == 0 || (client.expirationTime != time.UnixMilli(0) && time.Now().After(client.expirationTime)) { |
||||
errors.LogDebug(ctx, "xmux: removing client, leftUsage = ", client.leftUsage, ", expirationTime = ", client.expirationTime) |
||||
m.instances = append(m.instances[:i], m.instances[i+1:]...) |
||||
i-- |
||||
} |
||||
} |
||||
} |
@ -0,0 +1,88 @@
|
||||
package splithttp_test |
||||
|
||||
import ( |
||||
"context" |
||||
"testing" |
||||
|
||||
. "github.com/xtls/xray-core/transport/internet/splithttp" |
||||
) |
||||
|
||||
type fakeRoundTripper struct{} |
||||
|
||||
func TestMaxConnections(t *testing.T) { |
||||
config := Multiplexing{ |
||||
MaxConnections: &RandRangeConfig{From: 4, To: 4}, |
||||
} |
||||
|
||||
mux := NewMuxManager(config, func() interface{} { |
||||
return &fakeRoundTripper{} |
||||
}) |
||||
|
||||
clients := make(map[interface{}]struct{}) |
||||
for i := 0; i < 8; i++ { |
||||
clients[mux.GetResource(context.Background())] = struct{}{} |
||||
} |
||||
|
||||
if len(clients) != 4 { |
||||
t.Error("did not get 4 distinct clients, got ", len(clients)) |
||||
} |
||||
} |
||||
|
||||
func TestCMaxReuseTimes(t *testing.T) { |
||||
config := Multiplexing{ |
||||
CMaxReuseTimes: &RandRangeConfig{From: 2, To: 2}, |
||||
} |
||||
|
||||
mux := NewMuxManager(config, func() interface{} { |
||||
return &fakeRoundTripper{} |
||||
}) |
||||
|
||||
clients := make(map[interface{}]struct{}) |
||||
for i := 0; i < 64; i++ { |
||||
clients[mux.GetResource(context.Background())] = struct{}{} |
||||
} |
||||
|
||||
if len(clients) != 32 { |
||||
t.Error("did not get 32 distinct clients, got ", len(clients)) |
||||
} |
||||
} |
||||
|
||||
func TestMaxConcurrency(t *testing.T) { |
||||
config := Multiplexing{ |
||||
MaxConcurrency: &RandRangeConfig{From: 2, To: 2}, |
||||
} |
||||
|
||||
mux := NewMuxManager(config, func() interface{} { |
||||
return &fakeRoundTripper{} |
||||
}) |
||||
|
||||
clients := make(map[interface{}]struct{}) |
||||
for i := 0; i < 64; i++ { |
||||
client := mux.GetResource(context.Background()) |
||||
client.OpenRequests.Add(1) |
||||
clients[client] = struct{}{} |
||||
} |
||||
|
||||
if len(clients) != 32 { |
||||
t.Error("did not get 32 distinct clients, got ", len(clients)) |
||||
} |
||||
} |
||||
|
||||
func TestDefault(t *testing.T) { |
||||
config := Multiplexing{} |
||||
|
||||
mux := NewMuxManager(config, func() interface{} { |
||||
return &fakeRoundTripper{} |
||||
}) |
||||
|
||||
clients := make(map[interface{}]struct{}) |
||||
for i := 0; i < 64; i++ { |
||||
client := mux.GetResource(context.Background()) |
||||
client.OpenRequests.Add(1) |
||||
clients[client] = struct{}{} |
||||
} |
||||
|
||||
if len(clients) != 1 { |
||||
t.Error("did not get 1 distinct clients, got ", len(clients)) |
||||
} |
||||
} |
Loading…
Reference in new issue