fix several race condition, change slide window max size 2G to 32M, add buffer release

pull/276/head
ffdfgdfg 2019-11-21 23:53:06 +08:00
parent bd87864e26
commit 9bb8230fc1
5 changed files with 129 additions and 59 deletions

View File

@ -49,5 +49,6 @@ const (
MUX_PING_RETURN MUX_PING_RETURN
MUX_PING int32 = -1 MUX_PING int32 = -1
MAXIMUM_SEGMENT_SIZE = PoolSizeWindow MAXIMUM_SEGMENT_SIZE = PoolSizeWindow
MAXIMUM_WINDOW_SIZE = 1<<31 - 1 MAXIMUM_WINDOW_SIZE = 1 << 25 // 1<<31-1 TCP slide window size is very large,
// we use 32M, reduce memory usage
) )

View File

@ -210,7 +210,7 @@ func (Self *ReceiveWindow) calcSize() {
if Self.count == 0 { if Self.count == 0 {
//logs.Warn("ping, bw", Self.mux.latency, Self.bw.Get()) //logs.Warn("ping, bw", Self.mux.latency, Self.bw.Get())
n := uint32(2 * math.Float64frombits(atomic.LoadUint64(&Self.mux.latency)) * n := uint32(2 * math.Float64frombits(atomic.LoadUint64(&Self.mux.latency)) *
Self.mux.bw.Get() * 1.5 / float64(Self.mux.connMap.Size())) Self.mux.bw.Get() / float64(Self.mux.connMap.Size()))
if n < 8192 { if n < 8192 {
n = 8192 n = 8192
} }
@ -279,6 +279,9 @@ copyData:
// on the first Read method invoked, Self.off and Self.element.l // on the first Read method invoked, Self.off and Self.element.l
// both zero value // both zero value
common.ListElementPool.Put(Self.element) common.ListElementPool.Put(Self.element)
if Self.closeOp {
return 0, io.EOF
}
Self.element, err = Self.bufQueue.Pop() Self.element, err = Self.bufQueue.Pop()
// if the queue is empty, Pop method will wait until one element push // if the queue is empty, Pop method will wait until one element push
// into the queue successful, or timeout. // into the queue successful, or timeout.
@ -343,6 +346,26 @@ func (Self *ReceiveWindow) Stop() {
func (Self *ReceiveWindow) CloseWindow() { func (Self *ReceiveWindow) CloseWindow() {
Self.window.CloseWindow() Self.window.CloseWindow()
Self.Stop() Self.Stop()
Self.release()
}
func (Self *ReceiveWindow) release() {
//if Self.element != nil {
// if Self.element.Buf != nil {
// common.WindowBuff.Put(Self.element.Buf)
// }
// common.ListElementPool.Put(Self.element)
//}
for {
Self.element = Self.bufQueue.TryPop()
if Self.element == nil {
return
}
if Self.element.Buf != nil {
common.WindowBuff.Put(Self.element.Buf)
}
common.ListElementPool.Put(Self.element)
} // release resource
} }
type SendWindow struct { type SendWindow struct {

View File

@ -2,32 +2,35 @@ package mux
import ( import (
"sync" "sync"
"time"
) )
type connMap struct { type connMap struct {
connMap map[int32]*conn connMap map[int32]*conn
closeCh chan struct{} //closeCh chan struct{}
sync.RWMutex sync.RWMutex
} }
func NewConnMap() *connMap { func NewConnMap() *connMap {
connMap := &connMap{ connMap := &connMap{
connMap: make(map[int32]*conn), connMap: make(map[int32]*conn),
closeCh: make(chan struct{}), //closeCh: make(chan struct{}),
} }
go connMap.clean() //go connMap.clean()
return connMap return connMap
} }
func (s *connMap) Size() (n int) { func (s *connMap) Size() (n int) {
return len(s.connMap) s.Lock()
n = len(s.connMap)
s.Unlock()
return
} }
func (s *connMap) Get(id int32) (*conn, bool) { func (s *connMap) Get(id int32) (*conn, bool) {
s.Lock() s.Lock()
defer s.Unlock() v, ok := s.connMap[id]
if v, ok := s.connMap[id]; ok && v != nil { s.Unlock()
if ok && v != nil {
return v, true return v, true
} }
return nil, false return nil, false
@ -35,40 +38,38 @@ func (s *connMap) Get(id int32) (*conn, bool) {
func (s *connMap) Set(id int32, v *conn) { func (s *connMap) Set(id int32, v *conn) {
s.Lock() s.Lock()
defer s.Unlock()
s.connMap[id] = v s.connMap[id] = v
s.Unlock()
} }
func (s *connMap) Close() { func (s *connMap) Close() {
s.Lock() //s.closeCh <- struct{}{} // stop the clean goroutine first
defer s.Unlock()
for _, v := range s.connMap { for _, v := range s.connMap {
v.isClose = true v.Close() // close all the connections in the mux
} }
s.closeCh <- struct{}{}
} }
func (s *connMap) Delete(id int32) { func (s *connMap) Delete(id int32) {
s.Lock() s.Lock()
defer s.Unlock()
delete(s.connMap, id) delete(s.connMap, id)
s.Unlock()
} }
func (s *connMap) clean() { //func (s *connMap) clean() {
ticker := time.NewTimer(time.Minute * 1) // ticker := time.NewTimer(time.Minute * 1)
for { // for {
select { // select {
case <-ticker.C: // case <-ticker.C:
s.Lock() // s.Lock()
for _, v := range s.connMap { // for _, v := range s.connMap {
if v.isClose { // if v.isClose {
delete(s.connMap, v.connId) // delete(s.connMap, v.connId)
} // }
} // }
s.Unlock() // s.Unlock()
case <-s.closeCh: // case <-s.closeCh:
ticker.Stop() // ticker.Stop()
return // return
} // }
} // }
} //}

View File

@ -122,6 +122,9 @@ func (s *Mux) packBuf() {
} }
buffer.Reset() buffer.Reset()
pack := s.writeQueue.Pop() pack := s.writeQueue.Pop()
if s.IsClose {
break
}
//buffer := common.BuffPool.Get() //buffer := common.BuffPool.Get()
err := pack.Pack(buffer) err := pack.Pack(buffer)
common.MuxPack.Put(pack) common.MuxPack.Put(pack)
@ -218,8 +221,10 @@ func (s *Mux) pingReturn() {
// convert float64 to bits, store it atomic // convert float64 to bits, store it atomic
} }
//logs.Warn("latency", math.Float64frombits(atomic.LoadUint64(&s.latency))) //logs.Warn("latency", math.Float64frombits(atomic.LoadUint64(&s.latency)))
if cap(data) > 0 {
common.WindowBuff.Put(data) common.WindowBuff.Put(data)
} }
}
}() }()
} }
@ -227,7 +232,13 @@ func (s *Mux) readSession() {
go func() { go func() {
var connection *conn var connection *conn
for { for {
if s.IsClose {
break
}
connection = s.newConnQueue.Pop() connection = s.newConnQueue.Pop()
if s.IsClose {
break // make sure that is closed
}
s.connMap.Set(connection.connId, connection) //it has been set before send ok s.connMap.Set(connection.connId, connection) //it has been set before send ok
s.newConnCh <- connection s.newConnCh <- connection
s.sendInfo(common.MUX_NEW_CONN_OK, connection.connId, nil) s.sendInfo(common.MUX_NEW_CONN_OK, connection.connId, nil)
@ -287,9 +298,9 @@ func (s *Mux) readSession() {
connection.sendWindow.SetSize(pack.Window, pack.ReadLength) connection.sendWindow.SetSize(pack.Window, pack.ReadLength)
continue continue
case common.MUX_CONN_CLOSE: //close the connection case common.MUX_CONN_CLOSE: //close the connection
s.connMap.Delete(pack.Id)
//go func(connection *conn) {
connection.closeFlag = true connection.closeFlag = true
//s.connMap.Delete(pack.Id)
//go func(connection *conn) {
connection.receiveWindow.Stop() // close signal to receive window connection.receiveWindow.Stop() // close signal to receive window
//}(connection) //}(connection)
continue continue
@ -322,17 +333,42 @@ func (s *Mux) newMsg(connection *conn, pack *common.MuxPackager) (err error) {
return return
} }
func (s *Mux) Close() error { func (s *Mux) Close() (err error) {
logs.Warn("close mux") logs.Warn("close mux")
if s.IsClose { if s.IsClose {
return errors.New("the mux has closed") return errors.New("the mux has closed")
} }
s.IsClose = true s.IsClose = true
s.connMap.Close() s.connMap.Close()
s.connMap = nil
//s.bufQueue.Stop() //s.bufQueue.Stop()
s.closeChan <- struct{}{} s.closeChan <- struct{}{}
close(s.newConnCh) close(s.newConnCh)
return s.conn.Close() err = s.conn.Close()
s.release()
return
}
func (s *Mux) release() {
for {
pack := s.writeQueue.TryPop()
if pack == nil {
break
}
if pack.BasePackager.Content != nil {
common.WindowBuff.Put(pack.BasePackager.Content)
}
common.MuxPack.Put(pack)
}
for {
connection := s.newConnQueue.TryPop()
if connection == nil {
break
}
connection = nil
}
s.writeQueue.Stop()
s.newConnQueue.Stop()
} }
//get new connId as unique flag //get new connId as unique flag
@ -352,7 +388,7 @@ type bandwidth struct {
readStart time.Time readStart time.Time
lastReadStart time.Time lastReadStart time.Time
bufLength uint16 bufLength uint16
readBandwidth float64 readBandwidth uint64 // store in bits, but it's float64
} }
func (Self *bandwidth) StartRead() { func (Self *bandwidth) StartRead() {
@ -371,16 +407,17 @@ func (Self *bandwidth) SetCopySize(n uint16) {
func (Self *bandwidth) calcBandWidth() { func (Self *bandwidth) calcBandWidth() {
t := Self.readStart.Sub(Self.lastReadStart) t := Self.readStart.Sub(Self.lastReadStart)
Self.readBandwidth = float64(Self.bufLength) / t.Seconds() atomic.StoreUint64(&Self.readBandwidth, math.Float64bits(float64(Self.bufLength)/t.Seconds()))
Self.bufLength = 0 Self.bufLength = 0
} }
func (Self *bandwidth) Get() (bw float64) { func (Self *bandwidth) Get() (bw float64) {
// The zero value, 0 for numeric types // The zero value, 0 for numeric types
if Self.readBandwidth <= 0 { bw = math.Float64frombits(atomic.LoadUint64(&Self.readBandwidth))
Self.readBandwidth = 100 if bw <= 0 {
bw = 100
} }
return Self.readBandwidth return
} }
const counterBits = 4 const counterBits = 4

View File

@ -59,7 +59,7 @@ const maxStarving uint8 = 8
func (Self *PriorityQueue) Pop() (packager *common.MuxPackager) { func (Self *PriorityQueue) Pop() (packager *common.MuxPackager) {
var iter bool var iter bool
for { for {
packager = Self.pop() packager = Self.TryPop()
if packager != nil { if packager != nil {
return return
} }
@ -75,20 +75,20 @@ func (Self *PriorityQueue) Pop() (packager *common.MuxPackager) {
} }
Self.cond.L.Lock() Self.cond.L.Lock()
defer Self.cond.L.Unlock() defer Self.cond.L.Unlock()
for packager = Self.pop(); packager == nil; { for packager = Self.TryPop(); packager == nil; {
if Self.stop { if Self.stop {
return return
} }
//logs.Warn("queue into wait") //logs.Warn("queue into wait")
Self.cond.Wait() Self.cond.Wait()
// wait for it with no more iter // wait for it with no more iter
packager = Self.pop() packager = Self.TryPop()
//logs.Warn("queue wait finish", packager) //logs.Warn("queue wait finish", packager)
} }
return return
} }
func (Self *PriorityQueue) pop() (packager *common.MuxPackager) { func (Self *PriorityQueue) TryPop() (packager *common.MuxPackager) {
ptr, ok := Self.highestChain.popTail() ptr, ok := Self.highestChain.popTail()
if ok { if ok {
packager = (*common.MuxPackager)(ptr) packager = (*common.MuxPackager)(ptr)
@ -150,7 +150,7 @@ func (Self *ConnQueue) Push(connection *conn) {
func (Self *ConnQueue) Pop() (connection *conn) { func (Self *ConnQueue) Pop() (connection *conn) {
var iter bool var iter bool
for { for {
connection = Self.pop() connection = Self.TryPop()
if connection != nil { if connection != nil {
return return
} }
@ -166,20 +166,20 @@ func (Self *ConnQueue) Pop() (connection *conn) {
} }
Self.cond.L.Lock() Self.cond.L.Lock()
defer Self.cond.L.Unlock() defer Self.cond.L.Unlock()
for connection = Self.pop(); connection == nil; { for connection = Self.TryPop(); connection == nil; {
if Self.stop { if Self.stop {
return return
} }
//logs.Warn("queue into wait") //logs.Warn("queue into wait")
Self.cond.Wait() Self.cond.Wait()
// wait for it with no more iter // wait for it with no more iter
connection = Self.pop() connection = Self.TryPop()
//logs.Warn("queue wait finish", packager) //logs.Warn("queue wait finish", packager)
} }
return return
} }
func (Self *ConnQueue) pop() (connection *conn) { func (Self *ConnQueue) TryPop() (connection *conn) {
ptr, ok := Self.chain.popTail() ptr, ok := Self.chain.popTail()
if ok { if ok {
connection = (*conn)(ptr) connection = (*conn)(ptr)
@ -261,6 +261,15 @@ startPop:
} }
// length is not zero, so try to pop // length is not zero, so try to pop
for { for {
element = Self.TryPop()
if element != nil {
return
}
runtime.Gosched() // another goroutine is still pushing
}
}
func (Self *ReceiveWindowQueue) TryPop() (element *common.ListElement) {
ptr, ok := Self.chain.popTail() ptr, ok := Self.chain.popTail()
if ok { if ok {
//logs.Warn("window pop before", Self.Len()) //logs.Warn("window pop before", Self.Len())
@ -269,8 +278,7 @@ startPop:
//logs.Warn("window pop", Self.Len(), uint32(element.l)) //logs.Warn("window pop", Self.Len(), uint32(element.l))
return return
} }
runtime.Gosched() // another goroutine is still pushing return nil
}
} }
func (Self *ReceiveWindowQueue) allowPop() (closed bool) { func (Self *ReceiveWindowQueue) allowPop() (closed bool) {
@ -539,7 +547,7 @@ func (c *bufChain) popTail() (unsafe.Pointer, bool) {
// It's important that we load the next pointer // It's important that we load the next pointer
// *before* popping the tail. In general, d may be // *before* popping the tail. In general, d may be
// transiently empty, but if next is non-nil before // transiently empty, but if next is non-nil before
// the pop and the pop fails, then d is permanently // the TryPop and the TryPop fails, then d is permanently
// empty, which is the only condition under which it's // empty, which is the only condition under which it's
// safe to drop d from the chain. // safe to drop d from the chain.
d2 := loadPoolChainElt(&d.next) d2 := loadPoolChainElt(&d.next)
@ -556,7 +564,7 @@ func (c *bufChain) popTail() (unsafe.Pointer, bool) {
// The tail of the chain has been drained, so move on // The tail of the chain has been drained, so move on
// to the next dequeue. Try to drop it from the chain // to the next dequeue. Try to drop it from the chain
// so the next pop doesn't have to look at the empty // so the next TryPop doesn't have to look at the empty
// dequeue again. // dequeue again.
if atomic.CompareAndSwapPointer((*unsafe.Pointer)(unsafe.Pointer(&c.tail)), unsafe.Pointer(d), unsafe.Pointer(d2)) { if atomic.CompareAndSwapPointer((*unsafe.Pointer)(unsafe.Pointer(&c.tail)), unsafe.Pointer(d), unsafe.Pointer(d2)) {
// We won the race. Clear the prev pointer so // We won the race. Clear the prev pointer so