nps/lib/mux/mux.go

510 lines
12 KiB
Go
Raw Normal View History

2019-02-26 14:40:28 +00:00
package mux
import (
"errors"
2019-10-07 15:04:54 +00:00
"io"
2019-02-26 14:40:28 +00:00
"math"
"net"
"sync/atomic"
"time"
2019-08-10 03:15:25 +00:00
2019-09-24 14:29:31 +00:00
"github.com/astaxie/beego/logs"
"github.com/cnlh/nps/lib/common"
2019-02-26 14:40:28 +00:00
)
type Mux struct {
latency uint64 // we store latency in bits, but it's float64
2019-02-26 14:40:28 +00:00
net.Listener
conn net.Conn
connMap *connMap
newConnCh chan *conn
id int32
closeChan chan struct{}
IsClose bool
2019-11-27 14:46:34 +00:00
pingOk uint32
counter *latencyCounter
bw *bandwidth
pingCh chan []byte
pingCheckTime uint32
connType string
writeQueue PriorityQueue
newConnQueue ConnQueue
2019-02-26 14:40:28 +00:00
}
2019-03-19 14:41:40 +00:00
func NewMux(c net.Conn, connType string) *Mux {
2019-10-21 03:55:29 +00:00
//c.(*net.TCPConn).SetReadBuffer(0)
//c.(*net.TCPConn).SetWriteBuffer(0)
2019-02-26 14:40:28 +00:00
m := &Mux{
conn: c,
connMap: NewConnMap(),
id: 0,
2019-10-27 15:25:12 +00:00
closeChan: make(chan struct{}, 1),
newConnCh: make(chan *conn),
bw: new(bandwidth),
IsClose: false,
connType: connType,
2019-10-07 15:04:54 +00:00
pingCh: make(chan []byte),
counter: newLatencyCounter(),
2019-02-26 14:40:28 +00:00
}
m.writeQueue.New()
m.newConnQueue.New()
2019-02-26 14:40:28 +00:00
//read session by flag
2019-10-07 15:04:54 +00:00
m.readSession()
2019-02-26 14:40:28 +00:00
//ping
2019-10-07 15:04:54 +00:00
m.ping()
m.pingReturn()
m.writeSession()
2019-02-26 14:40:28 +00:00
return m
}
func (s *Mux) NewConn() (*conn, error) {
2019-03-02 09:43:21 +00:00
if s.IsClose {
2019-02-26 14:40:28 +00:00
return nil, errors.New("the mux has closed")
}
2019-10-15 08:32:21 +00:00
conn := NewConn(s.getId(), s, "nps ")
2019-02-26 14:40:28 +00:00
//it must be set before send
s.connMap.Set(conn.connId, conn)
s.sendInfo(common.MUX_NEW_CONN, conn.connId, nil)
2019-03-15 06:03:49 +00:00
//set a timer timeout 30 second
2019-03-29 05:31:11 +00:00
timer := time.NewTimer(time.Minute * 2)
2019-03-15 06:03:49 +00:00
defer timer.Stop()
2019-02-26 14:40:28 +00:00
select {
case <-conn.connStatusOkCh:
return conn, nil
case <-conn.connStatusFailCh:
2019-03-15 06:03:49 +00:00
case <-timer.C:
2019-02-26 14:40:28 +00:00
}
return nil, errors.New("create connection failthe server refused the connection")
}
func (s *Mux) Accept() (net.Conn, error) {
2019-03-02 09:43:21 +00:00
if s.IsClose {
return nil, errors.New("accpet error,the mux has closed")
2019-02-26 14:40:28 +00:00
}
2019-03-19 14:41:40 +00:00
conn := <-s.newConnCh
if conn == nil {
return nil, errors.New("accpet error,the conn has closed")
}
return conn, nil
2019-02-26 14:40:28 +00:00
}
func (s *Mux) Addr() net.Addr {
return s.conn.LocalAddr()
}
2019-10-07 15:04:54 +00:00
func (s *Mux) sendInfo(flag uint8, id int32, data ...interface{}) {
if s.IsClose {
return
}
var err error
pack := common.MuxPack.Get()
2019-10-07 15:04:54 +00:00
err = pack.NewPac(flag, id, data...)
2019-08-23 10:53:36 +00:00
if err != nil {
common.MuxPack.Put(pack)
2019-11-27 14:46:34 +00:00
logs.Error("mux: new pack err", err)
s.Close()
2019-08-23 10:53:36 +00:00
return
2019-03-15 06:03:49 +00:00
}
s.writeQueue.Push(pack)
2019-08-23 10:53:36 +00:00
return
2019-03-15 06:03:49 +00:00
}
2019-08-26 10:47:23 +00:00
func (s *Mux) writeSession() {
go s.packBuf()
2019-10-27 15:25:12 +00:00
//go s.writeBuf()
}
func (s *Mux) packBuf() {
2019-11-22 16:42:46 +00:00
//buffer := common.BuffPool.Get()
for {
2019-10-07 15:04:54 +00:00
if s.IsClose {
break
}
2019-11-22 16:42:46 +00:00
//buffer.Reset()
pack := s.writeQueue.Pop()
if s.IsClose {
break
}
2019-10-27 15:25:12 +00:00
//buffer := common.BuffPool.Get()
2019-11-22 16:42:46 +00:00
err := pack.Pack(s.conn)
common.MuxPack.Put(pack)
if err != nil {
logs.Error("mux: pack err", err)
2019-11-22 16:42:46 +00:00
//common.BuffPool.Put(buffer)
s.Close()
break
}
2019-10-12 14:56:37 +00:00
//logs.Warn(buffer.String())
2019-10-27 15:25:12 +00:00
//s.bufQueue.Push(buffer)
2019-11-22 16:42:46 +00:00
//l := buffer.Len()
//n, err := buffer.WriteTo(s.conn)
2019-10-27 15:25:12 +00:00
//common.BuffPool.Put(buffer)
2019-11-22 16:42:46 +00:00
//if err != nil || int(n) != l {
// logs.Error("mux: close from write session fail ", err, n, l)
// s.Close()
// break
//}
}
}
2019-10-27 15:25:12 +00:00
//func (s *Mux) writeBuf() {
// for {
// if s.IsClose {
// break
// }
// buffer, err := s.bufQueue.Pop()
// if err != nil {
// break
// }
// l := buffer.Len()
// n, err := buffer.WriteTo(s.conn)
// common.BuffPool.Put(buffer)
// if err != nil || int(n) != l {
// logs.Warn("close from write session fail ", err, n, l)
// s.Close()
// break
// }
// }
//}
2019-08-26 10:47:23 +00:00
2019-02-26 14:40:28 +00:00
func (s *Mux) ping() {
go func() {
now, _ := time.Now().UTC().MarshalText()
2019-10-07 15:04:54 +00:00
s.sendInfo(common.MUX_PING_FLAG, common.MUX_PING, now)
// send the ping flag and get the latency first
2019-10-12 14:56:37 +00:00
ticker := time.NewTicker(time.Second * 5)
defer ticker.Stop()
2019-02-26 14:40:28 +00:00
for {
2019-10-07 15:04:54 +00:00
if s.IsClose {
break
}
2019-02-26 14:40:28 +00:00
select {
case <-ticker.C:
}
if atomic.LoadUint32(&s.pingCheckTime) >= 60 {
logs.Error("mux: ping time out")
s.Close()
// more than 5 minutes not receive the ping return package,
// mux conn is damaged, maybe a packet drop, close it
break
}
now, _ := time.Now().UTC().MarshalText()
2019-10-07 15:04:54 +00:00
s.sendInfo(common.MUX_PING_FLAG, common.MUX_PING, now)
atomic.AddUint32(&s.pingCheckTime, 1)
2019-11-27 14:46:34 +00:00
if atomic.LoadUint32(&s.pingOk) > 10 && s.connType == "kcp" {
logs.Error("mux: kcp ping err")
2019-03-18 06:18:58 +00:00
s.Close()
2019-02-26 14:40:28 +00:00
break
}
2019-11-27 14:46:34 +00:00
atomic.AddUint32(&s.pingOk, 1)
2019-02-26 14:40:28 +00:00
}
return
2019-02-26 14:40:28 +00:00
}()
2019-10-07 15:04:54 +00:00
}
func (s *Mux) pingReturn() {
go func() {
var now time.Time
var data []byte
for {
if s.IsClose {
break
}
2019-10-07 15:04:54 +00:00
select {
case data = <-s.pingCh:
atomic.StoreUint32(&s.pingCheckTime, 0)
2019-10-07 15:04:54 +00:00
case <-s.closeChan:
break
}
_ = now.UnmarshalText(data)
latency := time.Now().UTC().Sub(now).Seconds() / 2
if latency > 0 {
atomic.StoreUint64(&s.latency, math.Float64bits(s.counter.Latency(latency)))
// convert float64 to bits, store it atomic
}
//logs.Warn("latency", math.Float64frombits(atomic.LoadUint64(&s.latency)))
if cap(data) > 0 {
common.WindowBuff.Put(data)
}
2019-10-07 15:04:54 +00:00
}
}()
2019-02-26 14:40:28 +00:00
}
func (s *Mux) readSession() {
go func() {
var connection *conn
for {
if s.IsClose {
break
}
connection = s.newConnQueue.Pop()
if s.IsClose {
break // make sure that is closed
}
s.connMap.Set(connection.connId, connection) //it has been set before send ok
s.newConnCh <- connection
s.sendInfo(common.MUX_NEW_CONN_OK, connection.connId, nil)
}
}()
2019-02-26 14:40:28 +00:00
go func() {
pack := common.MuxPack.Get()
var l uint16
var err error
2019-02-26 14:40:28 +00:00
for {
2019-10-07 15:04:54 +00:00
if s.IsClose {
break
}
2019-09-08 15:49:16 +00:00
pack = common.MuxPack.Get()
s.bw.StartRead()
if l, err = pack.UnPack(s.conn); err != nil {
2019-11-27 14:46:34 +00:00
logs.Error("mux: read session unpack from connection err", err)
s.Close()
2019-08-23 10:53:36 +00:00
break
}
s.bw.SetCopySize(l)
2019-11-27 14:46:34 +00:00
atomic.StoreUint32(&s.pingOk, 0)
2019-08-23 10:53:36 +00:00
switch pack.Flag {
2019-09-08 15:49:16 +00:00
case common.MUX_NEW_CONN: //new connection
connection := NewConn(pack.Id, s)
s.newConnQueue.Push(connection)
2019-08-23 10:53:36 +00:00
continue
case common.MUX_PING_FLAG: //ping
s.sendInfo(common.MUX_PING_RETURN, common.MUX_PING, pack.Content)
2019-10-12 14:56:37 +00:00
common.WindowBuff.Put(pack.Content)
2019-08-23 10:53:36 +00:00
continue
case common.MUX_PING_RETURN:
2019-11-02 14:59:52 +00:00
//go func(content []byte) {
s.pingCh <- pack.Content
2019-11-02 14:59:52 +00:00
//}(pack.Content)
2019-08-23 10:53:36 +00:00
continue
}
2019-09-08 15:49:16 +00:00
if connection, ok := s.connMap.Get(pack.Id); ok && !connection.isClose {
2019-08-23 10:53:36 +00:00
switch pack.Flag {
2019-10-07 15:04:54 +00:00
case common.MUX_NEW_MSG, common.MUX_NEW_MSG_PART: //new msg from remote connection
err = s.newMsg(connection, pack)
2019-10-07 15:04:54 +00:00
if err != nil {
2019-11-27 14:46:34 +00:00
logs.Error("mux: read session connection new msg err", err)
2019-10-07 15:04:54 +00:00
connection.Close()
2019-02-26 14:40:28 +00:00
}
continue
2019-09-08 15:49:16 +00:00
case common.MUX_NEW_CONN_OK: //connection ok
connection.connStatusOkCh <- struct{}{}
continue
2019-08-23 10:53:36 +00:00
case common.MUX_NEW_CONN_Fail:
2019-09-08 15:49:16 +00:00
connection.connStatusFailCh <- struct{}{}
continue
case common.MUX_MSG_SEND_OK:
if connection.isClose {
continue
}
2019-10-07 15:04:54 +00:00
connection.sendWindow.SetSize(pack.Window, pack.ReadLength)
continue
2019-08-23 10:53:36 +00:00
case common.MUX_CONN_CLOSE: //close the connection
2019-09-08 15:49:16 +00:00
connection.closeFlag = true
//s.connMap.Delete(pack.Id)
//go func(connection *conn) {
2019-10-07 15:04:54 +00:00
connection.receiveWindow.Stop() // close signal to receive window
2019-11-02 14:59:52 +00:00
//}(connection)
continue
2019-02-26 14:40:28 +00:00
}
} else if pack.Flag == common.MUX_CONN_CLOSE {
continue
2019-02-26 14:40:28 +00:00
}
2019-09-08 15:49:16 +00:00
common.MuxPack.Put(pack)
2019-02-26 14:40:28 +00:00
}
common.MuxPack.Put(pack)
2019-02-26 14:40:28 +00:00
s.Close()
}()
2019-10-07 15:04:54 +00:00
}
func (s *Mux) newMsg(connection *conn, pack *common.MuxPackager) (err error) {
if connection.isClose {
err = io.ErrClosedPipe
return
}
//logs.Warn("read session receive new msg", pack.Length)
//go func(connection *conn, pack *common.MuxPackager) { // do not block read session
//insert into queue
if pack.Flag == common.MUX_NEW_MSG_PART {
err = connection.receiveWindow.Write(pack.Content, pack.Length, true, pack.Id)
}
if pack.Flag == common.MUX_NEW_MSG {
err = connection.receiveWindow.Write(pack.Content, pack.Length, false, pack.Id)
2019-02-26 14:40:28 +00:00
}
2019-10-07 15:04:54 +00:00
//logs.Warn("read session write success", pack.Length)
return
2019-02-26 14:40:28 +00:00
}
func (s *Mux) Close() (err error) {
2019-08-23 10:53:36 +00:00
logs.Warn("close mux")
2019-03-02 09:43:21 +00:00
if s.IsClose {
2019-02-26 14:40:28 +00:00
return errors.New("the mux has closed")
}
2019-03-02 09:43:21 +00:00
s.IsClose = true
2019-02-26 14:40:28 +00:00
s.connMap.Close()
s.connMap = nil
2019-10-27 15:25:12 +00:00
//s.bufQueue.Stop()
s.closeChan <- struct{}{}
2019-03-19 14:41:40 +00:00
close(s.newConnCh)
err = s.conn.Close()
s.release()
return
}
func (s *Mux) release() {
for {
pack := s.writeQueue.TryPop()
if pack == nil {
break
}
if pack.BasePackager.Content != nil {
common.WindowBuff.Put(pack.BasePackager.Content)
}
common.MuxPack.Put(pack)
}
for {
connection := s.newConnQueue.TryPop()
if connection == nil {
break
}
connection = nil
}
s.writeQueue.Stop()
s.newConnQueue.Stop()
2019-02-26 14:40:28 +00:00
}
//get new connId as unique flag
2019-08-23 10:53:36 +00:00
func (s *Mux) getId() (id int32) {
2019-10-21 03:55:29 +00:00
//Avoid going beyond the scope
if (math.MaxInt32 - s.id) < 10000 {
2019-10-23 15:35:39 +00:00
atomic.StoreInt32(&s.id, 0)
2019-10-21 03:55:29 +00:00
}
2019-08-23 10:53:36 +00:00
id = atomic.AddInt32(&s.id, 1)
if _, ok := s.connMap.Get(id); ok {
2019-10-23 15:35:39 +00:00
return s.getId()
2019-08-23 10:53:36 +00:00
}
return
2019-02-26 14:40:28 +00:00
}
type bandwidth struct {
2019-11-22 16:42:46 +00:00
readBandwidth uint64 // store in bits, but it's float64
readStart time.Time
lastReadStart time.Time
2019-11-22 16:42:46 +00:00
bufLength uint32
}
func (Self *bandwidth) StartRead() {
if Self.readStart.IsZero() {
Self.readStart = time.Now()
}
2019-11-24 13:19:25 +00:00
if Self.bufLength >= common.MAXIMUM_SEGMENT_SIZE*300 {
Self.lastReadStart, Self.readStart = Self.readStart, time.Now()
Self.calcBandWidth()
}
}
func (Self *bandwidth) SetCopySize(n uint16) {
2019-11-22 16:42:46 +00:00
Self.bufLength += uint32(n)
}
func (Self *bandwidth) calcBandWidth() {
t := Self.readStart.Sub(Self.lastReadStart)
atomic.StoreUint64(&Self.readBandwidth, math.Float64bits(float64(Self.bufLength)/t.Seconds()))
Self.bufLength = 0
}
func (Self *bandwidth) Get() (bw float64) {
// The zero value, 0 for numeric types
bw = math.Float64frombits(atomic.LoadUint64(&Self.readBandwidth))
if bw <= 0 {
bw = 100
}
2019-11-22 16:42:46 +00:00
//logs.Warn(bw)
return
}
const counterBits = 4
const counterMask = 1<<counterBits - 1
func newLatencyCounter() *latencyCounter {
return &latencyCounter{
buf: make([]float64, 1<<counterBits, 1<<counterBits),
headMin: 0,
}
}
type latencyCounter struct {
buf []float64 //buf is a fixed length ring buffer,
// if buffer is full, new value will replace the oldest one.
headMin uint8 //head indicate the head in ring buffer,
// in meaning, slot in list will be replaced;
// min indicate this slot value is minimal in list.
}
func (Self *latencyCounter) unpack(idxs uint8) (head, min uint8) {
head = uint8((idxs >> counterBits) & counterMask)
// we set head is 4 bits
min = uint8(idxs & counterMask)
return
}
func (Self *latencyCounter) pack(head, min uint8) uint8 {
return uint8(head<<counterBits) |
uint8(min&counterMask)
}
func (Self *latencyCounter) add(value float64) {
head, min := Self.unpack(Self.headMin)
Self.buf[head] = value
if head == min {
min = Self.minimal()
//if head equals min, means the min slot already be replaced,
// so we need to find another minimal value in the list,
// and change the min indicator
}
if Self.buf[min] > value {
min = head
}
head++
Self.headMin = Self.pack(head, min)
}
func (Self *latencyCounter) minimal() (min uint8) {
var val float64
var i uint8
for i = 0; i < counterMask; i++ {
if Self.buf[i] > 0 {
if val > Self.buf[i] {
val = Self.buf[i]
min = i
}
}
}
return
}
func (Self *latencyCounter) Latency(value float64) (latency float64) {
Self.add(value)
_, min := Self.unpack(Self.headMin)
latency = Self.buf[min] * Self.countSuccess()
return
}
const lossRatio = 1.6
func (Self *latencyCounter) countSuccess() (successRate float64) {
var success, loss, i uint8
_, min := Self.unpack(Self.headMin)
for i = 0; i < counterMask; i++ {
if Self.buf[i] > lossRatio*Self.buf[min] && Self.buf[i] > 0 {
loss++
}
if Self.buf[i] <= lossRatio*Self.buf[min] && Self.buf[i] > 0 {
success++
}
}
// counting all the data in the ring buf, except zero
successRate = float64(success) / float64(loss+success)
return
}