Connection disconnection |ip limit bug

pull/103/head
刘河 2019-03-18 14:18:58 +08:00
parent 572dcd2aab
commit e24b2921ac
5 changed files with 51 additions and 43 deletions

View File

@ -324,7 +324,7 @@ func (s *Bridge) register(c *conn.Conn) {
var hour int32 var hour int32
if err := binary.Read(c, binary.LittleEndian, &hour); err == nil { if err := binary.Read(c, binary.LittleEndian, &hour); err == nil {
s.registerLock.Lock() s.registerLock.Lock()
s.Register[common.GetIpByAddr(c.Conn.RemoteAddr().String())] = time.Now().Add(time.Minute * time.Duration(hour)) s.Register[common.GetIpByAddr(c.Conn.RemoteAddr().String())] = time.Now().Add(time.Hour * time.Duration(hour))
s.registerLock.Unlock() s.registerLock.Unlock()
} }
} }

View File

@ -26,6 +26,7 @@ type conn struct {
connId int32 connId int32
isClose bool isClose bool
readWait bool readWait bool
hasWrite int
mux *Mux mux *Mux
} }
@ -83,10 +84,8 @@ func (s *conn) Read(buf []byte) (n int, err error) {
} else { } else {
n = copy(buf, s.readBuffer[s.startRead:s.endRead]) n = copy(buf, s.readBuffer[s.startRead:s.endRead])
s.startRead += n s.startRead += n
if s.waitQueue.Size() < s.mux.waitQueueSize/2 {
s.mux.sendInfo(MUX_MSG_SEND_OK, s.connId, nil) s.mux.sendInfo(MUX_MSG_SEND_OK, s.connId, nil)
} }
}
return return
} }
@ -116,9 +115,10 @@ func (s *conn) write(buf []byte, ch chan struct{}) {
start := 0 start := 0
l := len(buf) l := len(buf)
for { for {
if s.stopWrite { if s.hasWrite > 10 {
<-s.getStatusCh <-s.getStatusCh
} }
s.hasWrite++
if l-start > pool.PoolSizeCopy { if l-start > pool.PoolSizeCopy {
s.mux.sendInfo(MUX_NEW_MSG, s.connId, buf[start:start+pool.PoolSizeCopy]) s.mux.sendInfo(MUX_NEW_MSG, s.connId, buf[start:start+pool.PoolSizeCopy])
start += pool.PoolSizeCopy start += pool.PoolSizeCopy

View File

@ -5,6 +5,7 @@ import (
"encoding/binary" "encoding/binary"
"errors" "errors"
"github.com/cnlh/nps/lib/pool" "github.com/cnlh/nps/lib/pool"
"github.com/cnlh/nps/vender/github.com/astaxie/beego/logs"
"math" "math"
"net" "net"
"sync" "sync"
@ -22,8 +23,6 @@ const (
MUX_PING MUX_PING
MUX_CONN_CLOSE MUX_CONN_CLOSE
MUX_PING_RETURN MUX_PING_RETURN
MUX_STOP_WRITE
RETRY_TIME = 2 //Heart beat allowed fault tolerance times
) )
type Mux struct { type Mux struct {
@ -34,8 +33,6 @@ type Mux struct {
id int32 id int32
closeChan chan struct{} closeChan chan struct{}
IsClose bool IsClose bool
pingOk int
waitQueueSize int
sync.Mutex sync.Mutex
} }
@ -47,7 +44,6 @@ func NewMux(c net.Conn) *Mux {
closeChan: make(chan struct{}), closeChan: make(chan struct{}),
newConnCh: make(chan *conn), newConnCh: make(chan *conn),
IsClose: false, IsClose: false,
waitQueueSize: 10, //TODO :In order to be more efficient, this value can be dynamically generated according to the delay algorithm.
} }
//read session by flag //read session by flag
go m.readSession() go m.readSession()
@ -104,7 +100,7 @@ func (s *Mux) sendInfo(flag int32, id int32, content []byte) error {
binary.Write(raw, binary.LittleEndian, int32(len(content))) binary.Write(raw, binary.LittleEndian, int32(len(content)))
binary.Write(raw, binary.LittleEndian, content) binary.Write(raw, binary.LittleEndian, content)
} }
if _, err := s.conn.Write(raw.Bytes()); err != nil || s.pingOk > RETRY_TIME { if _, err := s.conn.Write(raw.Bytes()); err != nil {
s.Close() s.Close()
return err return err
} }
@ -113,7 +109,7 @@ func (s *Mux) sendInfo(flag int32, id int32, content []byte) error {
func (s *Mux) ping() { func (s *Mux) ping() {
go func() { go func() {
ticker := time.NewTicker(time.Second * 5) ticker := time.NewTicker(time.Second * 1)
for { for {
select { select {
case <-ticker.C: case <-ticker.C:
@ -122,10 +118,11 @@ func (s *Mux) ping() {
if (math.MaxInt32 - s.id) < 10000 { if (math.MaxInt32 - s.id) < 10000 {
s.id = 0 s.id = 0
} }
if err := s.sendInfo(MUX_PING_FLAG, MUX_PING, nil); err != nil || s.pingOk > RETRY_TIME { if err := s.sendInfo(MUX_PING_FLAG, MUX_PING, nil); err != nil {
logs.Error("ping error,close the connection")
s.Close()
break break
} }
s.pingOk += 1
} }
}() }()
select { select {
@ -155,7 +152,6 @@ func (s *Mux) readSession() {
s.sendInfo(MUX_PING_RETURN, MUX_PING, nil) s.sendInfo(MUX_PING_RETURN, MUX_PING, nil)
continue continue
case MUX_PING_RETURN: case MUX_PING_RETURN:
s.pingOk -= 1
continue continue
case MUX_NEW_MSG: case MUX_NEW_MSG:
buf = pool.GetBufPoolCopy() buf = pool.GetBufPoolCopy()
@ -173,19 +169,12 @@ func (s *Mux) readSession() {
conn.readWait = false conn.readWait = false
conn.readCh <- struct{}{} conn.readCh <- struct{}{}
} }
if conn.waitQueue.Size() > s.waitQueueSize {
s.sendInfo(MUX_STOP_WRITE, conn.connId, nil)
}
case MUX_STOP_WRITE:
conn.stopWrite = true
case MUX_MSG_SEND_OK: //the remote has read case MUX_MSG_SEND_OK: //the remote has read
if conn.stopWrite {
conn.stopWrite = false
select { select {
case conn.getStatusCh <- struct{}{}: case conn.getStatusCh <- struct{}{}:
default: default:
} }
} conn.hasWrite --
case MUX_NEW_CONN_OK: //conn ok case MUX_NEW_CONN_OK: //conn ok
conn.connStatusOkCh <- struct{}{} conn.connStatusOkCh <- struct{}{}
case MUX_NEW_CONN_Fail: case MUX_NEW_CONN_Fail:
@ -198,6 +187,7 @@ func (s *Mux) readSession() {
pool.PutBufPoolCopy(buf) pool.PutBufPoolCopy(buf)
} }
} else { } else {
logs.Error("read or send error")
break break
} }
} }
@ -214,9 +204,12 @@ func (s *Mux) Close() error {
} }
s.IsClose = true s.IsClose = true
s.connMap.Close() s.connMap.Close()
s.closeChan <- struct{}{} select {
s.closeChan <- struct{}{} case s.closeChan <- struct{}{}:
s.closeChan <- struct{}{} }
select {
case s.closeChan <- struct{}{}:
}
return s.conn.Close() return s.conn.Close()
} }

View File

@ -211,14 +211,18 @@ func (s *httpServer) process(c *conn.Conn, r *http.Request) {
} }
//根据设定修改header和host //根据设定修改header和host
common.ChangeHostAndHeader(r, host.HostChange, host.HeaderChange, c.Conn.RemoteAddr().String()) common.ChangeHostAndHeader(r, host.HostChange, host.HeaderChange, c.Conn.RemoteAddr().String())
b, err := httputil.DumpRequest(r, true) b, err := httputil.DumpRequest(r, false)
if err != nil { if err != nil {
break break
} }
host.Flow.Add(int64(len(b)), 0)
logs.Trace("%s request, method %s, host %s, url %s, remote address %s, target %s", r.URL.Scheme, r.Method, r.Host, r.RequestURI, r.RemoteAddr, lk.Host) logs.Trace("%s request, method %s, host %s, url %s, remote address %s, target %s", r.URL.Scheme, r.Method, r.Host, r.RequestURI, r.RemoteAddr, lk.Host)
//write //write
connClient.Write(b) connClient.Write(b)
if bodyLen, err := common.CopyBuffer(connClient, r.Body); err != nil {
break
} else {
host.Flow.Add(int64(len(b))+bodyLen, 0)
}
} }
end: end:
if isConn { if isConn {

View File

@ -91,6 +91,7 @@ func StartNewServer(bridgePort int, cnf *file.Tunnel, bridgeType string) {
go proxy.NewP2PServer(p).Start() go proxy.NewP2PServer(p).Start()
} }
go DealBridgeTask() go DealBridgeTask()
go dealClientFlow()
if svr := NewMode(Bridge, cnf); svr != nil { if svr := NewMode(Bridge, cnf); svr != nil {
if err := svr.Start(); err != nil { if err := svr.Start(); err != nil {
logs.Error(err) logs.Error(err)
@ -101,6 +102,16 @@ func StartNewServer(bridgePort int, cnf *file.Tunnel, bridgeType string) {
} }
} }
func dealClientFlow() {
ticker := time.NewTicker(time.Minute)
for {
select {
case <-ticker.C:
dealClientData(file.GetCsvDb().Clients)
}
}
}
//new a server by mode name //new a server by mode name
func NewMode(Bridge *bridge.Bridge, c *file.Tunnel) proxy.Service { func NewMode(Bridge *bridge.Bridge, c *file.Tunnel) proxy.Service {
var service proxy.Service var service proxy.Service