nps/lib/mux/conn.go

553 lines
14 KiB
Go
Raw Normal View History

2019-02-26 14:40:28 +00:00
package mux
import (
"errors"
"io"
"net"
2019-10-15 08:32:21 +00:00
"strconv"
"sync"
2019-10-21 03:55:29 +00:00
"sync/atomic"
2019-02-26 14:40:28 +00:00
"time"
2019-08-10 03:15:25 +00:00
"github.com/cnlh/nps/lib/common"
2019-02-26 14:40:28 +00:00
)
type conn struct {
net.Conn
getStatusCh chan struct{}
connStatusOkCh chan struct{}
connStatusFailCh chan struct{}
2019-09-15 07:02:10 +00:00
connId int32
isClose bool
closeFlag bool // close conn flag
2019-10-07 15:04:54 +00:00
receiveWindow *ReceiveWindow
sendWindow *SendWindow
2019-09-15 07:02:10 +00:00
once sync.Once
2019-10-15 08:32:21 +00:00
label string
2019-02-26 14:40:28 +00:00
}
2019-10-15 08:32:21 +00:00
func NewConn(connId int32, mux *Mux, label ...string) *conn {
2019-03-15 06:03:49 +00:00
c := &conn{
2019-02-26 14:40:28 +00:00
getStatusCh: make(chan struct{}),
connStatusOkCh: make(chan struct{}),
connStatusFailCh: make(chan struct{}),
connId: connId,
2019-10-07 15:04:54 +00:00
receiveWindow: new(ReceiveWindow),
sendWindow: new(SendWindow),
once: sync.Once{},
2019-02-26 14:40:28 +00:00
}
2019-10-15 08:32:21 +00:00
if len(label) > 0 {
c.label = label[0]
}
2019-10-07 15:04:54 +00:00
c.receiveWindow.New(mux)
c.sendWindow.New(mux)
2019-10-15 08:32:21 +00:00
logm := &connLog{
startTime: time.Now(),
isClose: false,
logs: []string{c.label + "new conn success"},
}
setM(label[0], int(connId), logm)
2019-03-15 06:03:49 +00:00
return c
2019-02-26 14:40:28 +00:00
}
func (s *conn) Read(buf []byte) (n int, err error) {
2019-03-15 06:03:49 +00:00
if s.isClose || buf == nil {
2019-02-26 14:40:28 +00:00
return 0, errors.New("the conn has closed")
}
2019-10-07 15:04:54 +00:00
if len(buf) == 0 {
return 0, nil
}
2019-10-07 15:04:54 +00:00
// waiting for takeout from receive window finish or timeout
n, err = s.receiveWindow.Read(buf, s.connId)
2019-10-15 08:32:21 +00:00
var errstr string
if err == nil {
errstr = "err:nil"
} else {
errstr = err.Error()
}
d := getM(s.label, int(s.connId))
2019-10-21 03:55:29 +00:00
d.logs = append(d.logs, s.label+"read "+strconv.Itoa(n)+" "+errstr+" "+string(buf[:100]))
2019-10-15 08:32:21 +00:00
setM(s.label, int(s.connId), d)
return
2019-02-26 14:40:28 +00:00
}
2019-08-23 10:53:36 +00:00
func (s *conn) Write(buf []byte) (n int, err error) {
2019-02-26 14:40:28 +00:00
if s.isClose {
return 0, errors.New("the conn has closed")
}
if s.closeFlag {
2019-09-08 15:49:16 +00:00
//s.Close()
return 0, errors.New("io: write on closed conn")
}
2019-10-07 15:04:54 +00:00
if len(buf) == 0 {
return 0, nil
2019-02-26 14:40:28 +00:00
}
2019-10-07 15:04:54 +00:00
//logs.Warn("write buf", len(buf))
n, err = s.sendWindow.WriteFull(buf, s.connId)
2019-09-08 15:49:16 +00:00
return
2019-02-26 14:40:28 +00:00
}
2019-08-26 10:47:23 +00:00
func (s *conn) Close() (err error) {
s.once.Do(s.closeProcess)
return
}
func (s *conn) closeProcess() {
2019-02-26 14:40:28 +00:00
s.isClose = true
2019-10-07 15:04:54 +00:00
s.receiveWindow.mux.connMap.Delete(s.connId)
if !s.receiveWindow.mux.IsClose {
2019-09-08 15:49:16 +00:00
// if server or user close the conn while reading, will get a io.EOF
// and this Close method will be invoke, send this signal to close other side
2019-10-07 15:04:54 +00:00
s.receiveWindow.mux.sendInfo(common.MUX_CONN_CLOSE, s.connId, nil)
}
2019-09-08 15:49:16 +00:00
s.sendWindow.CloseWindow()
s.receiveWindow.CloseWindow()
2019-10-15 08:32:21 +00:00
d := getM(s.label, int(s.connId))
d.isClose = true
d.logs = append(d.logs, s.label+"close "+time.Now().String())
setM(s.label, int(s.connId), d)
2019-08-26 10:47:23 +00:00
return
2019-02-26 14:40:28 +00:00
}
func (s *conn) LocalAddr() net.Addr {
2019-10-07 15:04:54 +00:00
return s.receiveWindow.mux.conn.LocalAddr()
2019-02-26 14:40:28 +00:00
}
func (s *conn) RemoteAddr() net.Addr {
2019-10-07 15:04:54 +00:00
return s.receiveWindow.mux.conn.RemoteAddr()
2019-02-26 14:40:28 +00:00
}
func (s *conn) SetDeadline(t time.Time) error {
2019-10-07 15:04:54 +00:00
_ = s.SetReadDeadline(t)
_ = s.SetWriteDeadline(t)
2019-02-26 14:40:28 +00:00
return nil
}
func (s *conn) SetReadDeadline(t time.Time) error {
2019-10-07 15:04:54 +00:00
s.receiveWindow.SetTimeOut(t)
2019-02-26 14:40:28 +00:00
return nil
}
func (s *conn) SetWriteDeadline(t time.Time) error {
2019-10-07 15:04:54 +00:00
s.sendWindow.SetTimeOut(t)
2019-02-26 14:40:28 +00:00
return nil
}
2019-09-08 15:49:16 +00:00
type window struct {
2019-10-07 15:04:54 +00:00
off uint32
maxSize uint32
closeOp bool
closeOpCh chan struct{}
mux *Mux
2019-09-08 15:49:16 +00:00
}
2019-10-07 15:04:54 +00:00
func (Self *window) New() {
Self.closeOpCh = make(chan struct{}, 2)
2019-09-08 15:49:16 +00:00
}
2019-10-07 15:04:54 +00:00
func (Self *window) CloseWindow() {
if !Self.closeOp {
Self.closeOp = true
Self.closeOpCh <- struct{}{}
Self.closeOpCh <- struct{}{}
}
2019-09-08 15:49:16 +00:00
}
2019-10-07 15:04:54 +00:00
type ReceiveWindow struct {
bufQueue FIFOQueue
element *ListElement
readLength uint32
readOp chan struct{}
readWait bool
windowFull bool
count int8
//bw *bandwidth
once sync.Once
2019-10-07 15:04:54 +00:00
window
2019-09-08 15:49:16 +00:00
}
2019-10-07 15:04:54 +00:00
func (Self *ReceiveWindow) New(mux *Mux) {
// initial a window for receive
Self.readOp = make(chan struct{})
Self.bufQueue.New()
//Self.bw = new(bandwidth)
2019-10-07 15:04:54 +00:00
Self.element = new(ListElement)
Self.maxSize = 8192
Self.mux = mux
Self.window.New()
}
2019-10-23 15:35:39 +00:00
func (Self *ReceiveWindow) remainingSize() (n uint32) {
2019-10-07 15:04:54 +00:00
// receive window remaining
2019-10-23 15:35:39 +00:00
return Self.maxSize - Self.bufQueue.Len()
2019-09-08 15:49:16 +00:00
}
2019-10-23 15:35:39 +00:00
func (Self *ReceiveWindow) readSize() (n uint32) {
2019-10-07 15:04:54 +00:00
// acknowledge the size already read
2019-10-21 03:55:29 +00:00
return atomic.SwapUint32(&Self.readLength, 0)
2019-09-08 15:49:16 +00:00
}
2019-10-23 15:35:39 +00:00
func (Self *ReceiveWindow) calcSize() {
2019-10-07 15:04:54 +00:00
// calculating maximum receive window size
if Self.count == 0 {
//logs.Warn("ping, bw", Self.mux.latency, Self.bw.Get())
n := uint32(2 * Self.mux.latency * Self.mux.bw.Get() * 1.5 / float64(Self.mux.connMap.Size()))
2019-10-07 15:04:54 +00:00
if n < 8192 {
n = 8192
}
if n < Self.bufQueue.Len() {
n = Self.bufQueue.Len()
}
// set the minimal size
if n > 2*Self.maxSize {
n = 2 * Self.maxSize
}
2019-10-12 14:56:37 +00:00
if n > common.MAXIMUM_WINDOW_SIZE {
n = common.MAXIMUM_WINDOW_SIZE
}
// set the maximum size
//logs.Warn("n", n)
2019-10-07 15:04:54 +00:00
Self.maxSize = n
Self.count = -10
2019-10-07 15:04:54 +00:00
}
Self.count += 1
2019-09-08 15:49:16 +00:00
}
2019-10-07 15:04:54 +00:00
func (Self *ReceiveWindow) Write(buf []byte, l uint16, part bool, id int32) (err error) {
if Self.closeOp {
return errors.New("conn.receiveWindow: write on closed window")
}
2019-10-23 15:35:39 +00:00
element := new(ListElement)
2019-10-07 15:04:54 +00:00
err = element.New(buf, l, part)
//logs.Warn("push the buf", len(buf), l, (&element).l)
if err != nil {
return
}
2019-10-23 15:35:39 +00:00
Self.bufQueue.Push(element) // must push data before allow read
2019-10-07 15:04:54 +00:00
//logs.Warn("read session calc size ", Self.maxSize)
// calculating the receive window size
2019-10-23 15:35:39 +00:00
Self.calcSize()
//logs.Warn("read session calc size finish", Self.maxSize)
2019-10-23 15:35:39 +00:00
if Self.remainingSize() == 0 {
2019-10-07 15:04:54 +00:00
Self.windowFull = true
//logs.Warn("window full true", Self.windowFull)
}
2019-10-23 15:35:39 +00:00
Self.mux.sendInfo(common.MUX_MSG_SEND_OK, id, Self.maxSize, Self.readSize())
2019-10-07 15:04:54 +00:00
return nil
2019-09-08 15:49:16 +00:00
}
2019-10-07 15:04:54 +00:00
func (Self *ReceiveWindow) Read(p []byte, id int32) (n int, err error) {
2019-09-08 15:49:16 +00:00
if Self.closeOp {
2019-10-07 15:04:54 +00:00
return 0, io.EOF // receive close signal, returns eof
2019-09-08 15:49:16 +00:00
}
2019-10-07 15:04:54 +00:00
pOff := 0
l := 0
//logs.Warn("receive window read off, element.l", Self.off, Self.element.l)
copyData:
//Self.bw.StartRead()
2019-10-07 15:04:54 +00:00
if Self.off == uint32(Self.element.l) {
// on the first Read method invoked, Self.off and Self.element.l
// both zero value
Self.element, err = Self.bufQueue.Pop()
// if the queue is empty, Pop method will wait until one element push
// into the queue successful, or timeout.
// timer start on timeout parameter is set up ,
// reset to 60s if timeout and data still available
Self.off = 0
if err != nil {
return // queue receive stop or time out, break the loop and return
}
//logs.Warn("pop element", Self.element.l, Self.element.part)
2019-09-08 15:49:16 +00:00
}
2019-10-07 15:04:54 +00:00
l = copy(p[pOff:], Self.element.buf[Self.off:])
//Self.bw.SetCopySize(l)
2019-10-07 15:04:54 +00:00
pOff += l
Self.off += uint32(l)
2019-10-21 03:55:29 +00:00
atomic.AddUint32(&Self.readLength, uint32(l))
2019-10-07 15:04:54 +00:00
//logs.Warn("window read length buf len", Self.readLength, Self.bufQueue.Len())
n += l
l = 0
//Self.bw.EndRead()
2019-10-12 14:56:37 +00:00
if Self.off == uint32(Self.element.l) {
//logs.Warn("put the element end ", string(Self.element.buf[:15]))
common.WindowBuff.Put(Self.element.buf)
2019-10-23 15:35:39 +00:00
Self.sendStatus(id)
2019-10-12 14:56:37 +00:00
}
2019-10-07 15:04:54 +00:00
if pOff < len(p) && Self.element.part {
// element is a part of the segments, trying to fill up buf p
goto copyData
2019-09-08 15:49:16 +00:00
}
2019-10-07 15:04:54 +00:00
return // buf p is full or all of segments in buf, return
}
2019-10-07 15:04:54 +00:00
func (Self *ReceiveWindow) sendStatus(id int32) {
if Self.windowFull || Self.bufQueue.Len() == 0 {
// window is full before read or empty now
Self.windowFull = false
2019-10-23 15:35:39 +00:00
Self.mux.sendInfo(common.MUX_MSG_SEND_OK, id, Self.maxSize, Self.readSize())
2019-10-07 15:04:54 +00:00
// acknowledge other side, have empty some receive window space
//}
2019-09-15 07:02:10 +00:00
}
2019-10-07 15:04:54 +00:00
}
func (Self *ReceiveWindow) SetTimeOut(t time.Time) {
// waiting for FIFO queue Pop method
Self.bufQueue.SetTimeOut(t)
}
func (Self *ReceiveWindow) Stop() {
// queue has no more data to push, so unblock pop method
Self.once.Do(Self.bufQueue.Stop)
}
func (Self *ReceiveWindow) CloseWindow() {
Self.window.CloseWindow()
Self.Stop()
}
type SendWindow struct {
buf []byte
sentLength uint32
setSizeCh chan struct{}
2019-10-23 15:35:39 +00:00
setSizeWait int32
2019-10-07 15:04:54 +00:00
unSlide uint32
timeout time.Time
window
}
func (Self *SendWindow) New(mux *Mux) {
Self.setSizeCh = make(chan struct{})
Self.maxSize = 4096
Self.mux = mux
Self.window.New()
}
func (Self *SendWindow) SetSendBuf(buf []byte) {
// send window buff from conn write method, set it to send window
Self.buf = buf
Self.off = 0
2019-09-08 15:49:16 +00:00
}
2019-10-07 15:04:54 +00:00
func (Self *SendWindow) RemainingSize() (n uint32) {
2019-10-23 15:35:39 +00:00
return atomic.LoadUint32(&Self.maxSize) - atomic.LoadUint32(&Self.sentLength)
2019-10-07 15:04:54 +00:00
}
func (Self *SendWindow) SetSize(windowSize, readLength uint32) (closed bool) {
defer func() {
if recover() != nil {
closed = true
}
}()
2019-09-08 15:49:16 +00:00
if Self.closeOp {
2019-10-07 15:04:54 +00:00
close(Self.setSizeCh)
2019-09-08 15:49:16 +00:00
return true
}
2019-10-23 15:35:39 +00:00
if readLength == 0 && atomic.LoadUint32(&Self.maxSize) == windowSize {
//logs.Warn("waiting for another window size")
2019-10-07 15:04:54 +00:00
return false // waiting for receive another usable window size
}
//logs.Warn("set send window size to ", windowSize, readLength)
2019-10-07 15:04:54 +00:00
Self.slide(windowSize, readLength)
2019-10-23 15:35:39 +00:00
if Self.RemainingSize() == 0 {
//logs.Warn("waiting for another window size after slide")
// keep the wait status
atomic.StoreInt32(&Self.setSizeWait, 1)
return false
}
if atomic.CompareAndSwapInt32(&Self.setSizeWait, 1, 0) {
2019-10-07 15:04:54 +00:00
// send window into the wait status, need notice the channel
select {
case Self.setSizeCh <- struct{}{}:
//logs.Warn("send window remaining size is 0 finish")
return false
case <-Self.closeOpCh:
close(Self.setSizeCh)
return true
}
2019-09-08 15:49:16 +00:00
}
2019-10-07 15:04:54 +00:00
// send window not into the wait status, so just do slide
return false
}
func (Self *SendWindow) slide(windowSize, readLength uint32) {
2019-10-23 15:35:39 +00:00
atomic.AddUint32(&Self.sentLength, ^readLength-1)
atomic.SwapUint32(&Self.maxSize, windowSize)
2019-09-08 15:49:16 +00:00
}
2019-10-23 15:35:39 +00:00
func (Self *SendWindow) WriteTo() (p []byte, sendSize uint32, part bool, err error) {
2019-10-07 15:04:54 +00:00
// returns buf segments, return only one segments, need a loop outside
// until err = io.EOF
2019-09-08 15:49:16 +00:00
if Self.closeOp {
2019-10-23 15:35:39 +00:00
return nil, 0, false, errors.New("conn.writeWindow: window closed")
2019-10-07 15:04:54 +00:00
}
if Self.off == uint32(len(Self.buf)) {
2019-10-23 15:35:39 +00:00
return nil, 0, false, io.EOF
2019-10-07 15:04:54 +00:00
// send window buff is drain, return eof and get another one
2019-09-08 15:49:16 +00:00
}
2019-10-07 15:04:54 +00:00
if Self.RemainingSize() == 0 {
2019-10-23 15:35:39 +00:00
atomic.StoreInt32(&Self.setSizeWait, 1)
2019-10-07 15:04:54 +00:00
// into the wait status
err = Self.waitReceiveWindow()
if err != nil {
2019-10-23 15:35:39 +00:00
return nil, 0, false, err
2019-09-08 15:49:16 +00:00
}
}
2019-10-07 15:04:54 +00:00
if len(Self.buf[Self.off:]) > common.MAXIMUM_SEGMENT_SIZE {
sendSize = common.MAXIMUM_SEGMENT_SIZE
part = true
2019-10-23 15:35:39 +00:00
//logs.Warn("cut buf by mss")
2019-10-07 15:04:54 +00:00
} else {
sendSize = uint32(len(Self.buf[Self.off:]))
part = false
2019-09-22 14:08:51 +00:00
}
2019-10-07 15:04:54 +00:00
if Self.RemainingSize() < sendSize {
// usable window size is small than
// window MAXIMUM_SEGMENT_SIZE or send buf left
sendSize = Self.RemainingSize()
2019-10-23 15:35:39 +00:00
//logs.Warn("cut buf by remainingsize", sendSize, len(Self.buf[Self.off:]))
2019-10-07 15:04:54 +00:00
part = true
}
//logs.Warn("send size", sendSize)
p = Self.buf[Self.off : sendSize+Self.off]
Self.off += sendSize
2019-10-23 15:35:39 +00:00
atomic.AddUint32(&Self.sentLength, sendSize)
2019-09-08 15:49:16 +00:00
return
}
2019-10-07 15:04:54 +00:00
func (Self *SendWindow) waitReceiveWindow() (err error) {
t := Self.timeout.Sub(time.Now())
if t < 0 {
t = time.Minute
2019-09-08 15:49:16 +00:00
}
2019-10-07 15:04:54 +00:00
timer := time.NewTimer(t)
defer timer.Stop()
2019-09-08 15:49:16 +00:00
// waiting for receive usable window size, or timeout
select {
2019-10-07 15:04:54 +00:00
case _, ok := <-Self.setSizeCh:
2019-09-08 15:49:16 +00:00
if !ok {
2019-10-07 15:04:54 +00:00
return errors.New("conn.writeWindow: window closed")
2019-09-08 15:49:16 +00:00
}
2019-10-07 15:04:54 +00:00
return nil
case <-timer.C:
return errors.New("conn.writeWindow: write to time out")
2019-09-15 07:02:10 +00:00
case <-Self.closeOpCh:
2019-10-07 15:04:54 +00:00
return errors.New("conn.writeWindow: window closed")
2019-09-08 15:49:16 +00:00
}
2019-10-07 15:04:54 +00:00
}
func (Self *SendWindow) WriteFull(buf []byte, id int32) (n int, err error) {
Self.SetSendBuf(buf) // set the buf to send window
var bufSeg []byte
var part bool
2019-10-23 15:35:39 +00:00
var l uint32
2019-10-07 15:04:54 +00:00
for {
2019-10-23 15:35:39 +00:00
bufSeg, l, part, err = Self.WriteTo()
2019-10-07 15:04:54 +00:00
//logs.Warn("buf seg", len(bufSeg), part, err)
// get the buf segments from send window
if bufSeg == nil && part == false && err == io.EOF {
// send window is drain, break the loop
err = nil
break
}
if err != nil {
break
}
2019-10-23 15:35:39 +00:00
n += int(l)
l = 0
2019-10-07 15:04:54 +00:00
if part {
Self.mux.sendInfo(common.MUX_NEW_MSG_PART, id, bufSeg)
} else {
Self.mux.sendInfo(common.MUX_NEW_MSG, id, bufSeg)
//logs.Warn("buf seg sent", len(bufSeg), part, err)
}
// send to other side, not send nil data to other side
2019-09-08 15:49:16 +00:00
}
2019-10-07 15:04:54 +00:00
//logs.Warn("buf seg write success")
2019-09-08 15:49:16 +00:00
return
}
2019-10-07 15:04:54 +00:00
func (Self *SendWindow) SetTimeOut(t time.Time) {
// waiting for receive a receive window size
Self.timeout = t
}
//type bandwidth struct {
// readStart time.Time
// lastReadStart time.Time
// readEnd time.Time
// lastReadEnd time.Time
// bufLength int
// lastBufLength int
// count int8
// readBW float64
// writeBW float64
// readBandwidth float64
//}
//
//func (Self *bandwidth) StartRead() {
// Self.lastReadStart, Self.readStart = Self.readStart, time.Now()
// if !Self.lastReadStart.IsZero() {
// if Self.count == -5 {
// Self.calcBandWidth()
// }
// }
//}
//
//func (Self *bandwidth) EndRead() {
// Self.lastReadEnd, Self.readEnd = Self.readEnd, time.Now()
// if Self.count == -5 {
// Self.calcWriteBandwidth()
// }
// if Self.count == 0 {
// Self.calcReadBandwidth()
// Self.count = -6
// }
// Self.count += 1
//}
//
//func (Self *bandwidth) SetCopySize(n int) {
// // must be invoke between StartRead and EndRead
// Self.lastBufLength, Self.bufLength = Self.bufLength, n
//}
//// calculating
//// start end start end
//// read read
//// write
//
//func (Self *bandwidth) calcBandWidth() {
// t := Self.readStart.Sub(Self.lastReadStart)
// if Self.lastBufLength >= 32768 {
// Self.readBandwidth = float64(Self.lastBufLength) / t.Seconds()
// }
//}
//
//func (Self *bandwidth) calcReadBandwidth() {
// // Bandwidth between nps and npc
// readTime := Self.readEnd.Sub(Self.readStart)
// Self.readBW = float64(Self.bufLength) / readTime.Seconds()
// //logs.Warn("calc read bw", Self.readBW, Self.bufLength, readTime.Seconds())
//}
//
//func (Self *bandwidth) calcWriteBandwidth() {
// // Bandwidth between nps and user, npc and application
// writeTime := Self.readStart.Sub(Self.lastReadEnd)
// Self.writeBW = float64(Self.lastBufLength) / writeTime.Seconds()
// //logs.Warn("calc write bw", Self.writeBW, Self.bufLength, writeTime.Seconds())
//}
//
//func (Self *bandwidth) Get() (bw float64) {
// // The zero value, 0 for numeric types
// if Self.writeBW == 0 && Self.readBW == 0 {
// //logs.Warn("bw both 0")
// return 100
// }
// if Self.writeBW == 0 && Self.readBW != 0 {
// return Self.readBW
// }
// if Self.readBW == 0 && Self.writeBW != 0 {
// return Self.writeBW
// }
// return Self.readBandwidth
//}