mirror of https://github.com/ehang-io/nps
add lock free queue
parent
c2f4510a0f
commit
23b023c562
|
@ -263,7 +263,7 @@ func GetPortByAddr(addr string) int {
|
||||||
return p
|
return p
|
||||||
}
|
}
|
||||||
|
|
||||||
func CopyBuffer(dst io.Writer, src io.Reader) (written int64, err error) {
|
func CopyBuffer(dst io.Writer, src io.Reader, label ...string) (written int64, err error) {
|
||||||
buf := CopyBuff.Get()
|
buf := CopyBuff.Get()
|
||||||
defer CopyBuff.Put(buf)
|
defer CopyBuff.Put(buf)
|
||||||
for {
|
for {
|
||||||
|
|
|
@ -2,10 +2,12 @@ package mux
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"errors"
|
"errors"
|
||||||
|
"github.com/astaxie/beego/logs"
|
||||||
"io"
|
"io"
|
||||||
"net"
|
"net"
|
||||||
"strconv"
|
"strconv"
|
||||||
"sync"
|
"sync"
|
||||||
|
"sync/atomic"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/cnlh/nps/lib/common"
|
"github.com/cnlh/nps/lib/common"
|
||||||
|
@ -65,7 +67,7 @@ func (s *conn) Read(buf []byte) (n int, err error) {
|
||||||
errstr = err.Error()
|
errstr = err.Error()
|
||||||
}
|
}
|
||||||
d := getM(s.label, int(s.connId))
|
d := getM(s.label, int(s.connId))
|
||||||
d.logs = append(d.logs, s.label+"read "+strconv.Itoa(n)+" "+errstr)
|
d.logs = append(d.logs, s.label+"read "+strconv.Itoa(n)+" "+errstr+" "+string(buf[:100]))
|
||||||
setM(s.label, int(s.connId), d)
|
setM(s.label, int(s.connId), d)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
@ -187,11 +189,7 @@ func (Self *ReceiveWindow) RemainingSize() (n uint32) {
|
||||||
|
|
||||||
func (Self *ReceiveWindow) ReadSize() (n uint32) {
|
func (Self *ReceiveWindow) ReadSize() (n uint32) {
|
||||||
// acknowledge the size already read
|
// acknowledge the size already read
|
||||||
Self.bufQueue.mutex.Lock()
|
return atomic.SwapUint32(&Self.readLength, 0)
|
||||||
n = Self.readLength
|
|
||||||
Self.readLength = 0
|
|
||||||
Self.bufQueue.mutex.Unlock()
|
|
||||||
return
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (Self *ReceiveWindow) CalcSize() {
|
func (Self *ReceiveWindow) CalcSize() {
|
||||||
|
@ -270,10 +268,8 @@ copyData:
|
||||||
//Self.bw.SetCopySize(l)
|
//Self.bw.SetCopySize(l)
|
||||||
pOff += l
|
pOff += l
|
||||||
Self.off += uint32(l)
|
Self.off += uint32(l)
|
||||||
Self.bufQueue.mutex.Lock()
|
atomic.AddUint32(&Self.readLength, uint32(l))
|
||||||
Self.readLength += uint32(l)
|
|
||||||
//logs.Warn("window read length buf len", Self.readLength, Self.bufQueue.Len())
|
//logs.Warn("window read length buf len", Self.readLength, Self.bufQueue.Len())
|
||||||
Self.bufQueue.mutex.Unlock()
|
|
||||||
n += l
|
n += l
|
||||||
l = 0
|
l = 0
|
||||||
//Self.bw.EndRead()
|
//Self.bw.EndRead()
|
||||||
|
@ -422,6 +418,7 @@ func (Self *SendWindow) WriteTo() (p []byte, part bool, err error) {
|
||||||
if len(Self.buf[Self.off:]) > common.MAXIMUM_SEGMENT_SIZE {
|
if len(Self.buf[Self.off:]) > common.MAXIMUM_SEGMENT_SIZE {
|
||||||
sendSize = common.MAXIMUM_SEGMENT_SIZE
|
sendSize = common.MAXIMUM_SEGMENT_SIZE
|
||||||
part = true
|
part = true
|
||||||
|
logs.Warn("cut buf by mss")
|
||||||
} else {
|
} else {
|
||||||
sendSize = uint32(len(Self.buf[Self.off:]))
|
sendSize = uint32(len(Self.buf[Self.off:]))
|
||||||
part = false
|
part = false
|
||||||
|
@ -430,6 +427,7 @@ func (Self *SendWindow) WriteTo() (p []byte, part bool, err error) {
|
||||||
// usable window size is small than
|
// usable window size is small than
|
||||||
// window MAXIMUM_SEGMENT_SIZE or send buf left
|
// window MAXIMUM_SEGMENT_SIZE or send buf left
|
||||||
sendSize = Self.RemainingSize()
|
sendSize = Self.RemainingSize()
|
||||||
|
logs.Warn("cut buf by remainingsize", sendSize, len(Self.buf[Self.off:]))
|
||||||
part = true
|
part = true
|
||||||
}
|
}
|
||||||
//logs.Warn("send size", sendSize)
|
//logs.Warn("send size", sendSize)
|
||||||
|
|
|
@ -34,6 +34,8 @@ type Mux struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewMux(c net.Conn, connType string) *Mux {
|
func NewMux(c net.Conn, connType string) *Mux {
|
||||||
|
//c.(*net.TCPConn).SetReadBuffer(0)
|
||||||
|
//c.(*net.TCPConn).SetWriteBuffer(0)
|
||||||
m := &Mux{
|
m := &Mux{
|
||||||
conn: c,
|
conn: c,
|
||||||
connMap: NewConnMap(),
|
connMap: NewConnMap(),
|
||||||
|
@ -173,10 +175,6 @@ func (s *Mux) ping() {
|
||||||
select {
|
select {
|
||||||
case <-ticker.C:
|
case <-ticker.C:
|
||||||
}
|
}
|
||||||
//Avoid going beyond the scope
|
|
||||||
if (math.MaxInt32 - s.id) < 10000 {
|
|
||||||
s.id = 0
|
|
||||||
}
|
|
||||||
now, _ := time.Now().UTC().MarshalText()
|
now, _ := time.Now().UTC().MarshalText()
|
||||||
s.sendInfo(common.MUX_PING_FLAG, common.MUX_PING, now)
|
s.sendInfo(common.MUX_PING_FLAG, common.MUX_PING, now)
|
||||||
if !s.pingTimer.Stop() {
|
if !s.pingTimer.Stop() {
|
||||||
|
@ -321,6 +319,10 @@ func (s *Mux) Close() error {
|
||||||
|
|
||||||
//get new connId as unique flag
|
//get new connId as unique flag
|
||||||
func (s *Mux) getId() (id int32) {
|
func (s *Mux) getId() (id int32) {
|
||||||
|
//Avoid going beyond the scope
|
||||||
|
if (math.MaxInt32 - s.id) < 10000 {
|
||||||
|
atomic.SwapInt32(&s.id, 0)
|
||||||
|
}
|
||||||
id = atomic.AddInt32(&s.id, 1)
|
id = atomic.AddInt32(&s.id, 1)
|
||||||
if _, ok := s.connMap.Get(id); ok {
|
if _, ok := s.connMap.Get(id); ok {
|
||||||
s.getId()
|
s.getId()
|
||||||
|
|
|
@ -3,13 +3,16 @@ package mux
|
||||||
import (
|
import (
|
||||||
"bufio"
|
"bufio"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"io"
|
||||||
"net"
|
"net"
|
||||||
"net/http"
|
"net/http"
|
||||||
"net/http/httputil"
|
"net/http/httputil"
|
||||||
_ "net/http/pprof"
|
_ "net/http/pprof"
|
||||||
|
"strconv"
|
||||||
"sync"
|
"sync"
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
"unsafe"
|
||||||
|
|
||||||
"github.com/astaxie/beego/logs"
|
"github.com/astaxie/beego/logs"
|
||||||
"github.com/cnlh/nps/lib/common"
|
"github.com/cnlh/nps/lib/common"
|
||||||
|
@ -30,20 +33,22 @@ func TestNewMux(t *testing.T) {
|
||||||
go func() {
|
go func() {
|
||||||
m2 := NewMux(conn2, "tcp")
|
m2 := NewMux(conn2, "tcp")
|
||||||
for {
|
for {
|
||||||
logs.Warn("npc starting accept")
|
//logs.Warn("npc starting accept")
|
||||||
c, err := m2.Accept()
|
c, err := m2.Accept()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logs.Warn(err)
|
logs.Warn(err)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
logs.Warn("npc accept success ")
|
//logs.Warn("npc accept success ")
|
||||||
c2, err := net.Dial("tcp", "127.0.0.1:80")
|
c2, err := net.Dial("tcp", "127.0.0.1:80")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logs.Warn(err)
|
logs.Warn(err)
|
||||||
c.Close()
|
c.Close()
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
go func(c2 net.Conn, c net.Conn) {
|
//c2.(*net.TCPConn).SetReadBuffer(0)
|
||||||
|
//c2.(*net.TCPConn).SetReadBuffer(0)
|
||||||
|
go func(c2 net.Conn, c *conn) {
|
||||||
wg := sync.WaitGroup{}
|
wg := sync.WaitGroup{}
|
||||||
wg.Add(1)
|
wg.Add(1)
|
||||||
go func() {
|
go func() {
|
||||||
|
@ -51,7 +56,7 @@ func TestNewMux(t *testing.T) {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
c2.Close()
|
c2.Close()
|
||||||
c.Close()
|
c.Close()
|
||||||
logs.Warn("close npc by copy from nps", err)
|
logs.Warn("close npc by copy from nps", err, c.connId)
|
||||||
}
|
}
|
||||||
wg.Done()
|
wg.Done()
|
||||||
}()
|
}()
|
||||||
|
@ -61,13 +66,13 @@ func TestNewMux(t *testing.T) {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
c2.Close()
|
c2.Close()
|
||||||
c.Close()
|
c.Close()
|
||||||
logs.Warn("close npc by copy from server", err)
|
logs.Warn("close npc by copy from server", err, c.connId)
|
||||||
}
|
}
|
||||||
wg.Done()
|
wg.Done()
|
||||||
}()
|
}()
|
||||||
logs.Warn("npc wait")
|
//logs.Warn("npc wait")
|
||||||
wg.Wait()
|
wg.Wait()
|
||||||
}(c2, c)
|
}(c2, c.(*conn))
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
|
|
||||||
|
@ -78,42 +83,46 @@ func TestNewMux(t *testing.T) {
|
||||||
logs.Warn(err)
|
logs.Warn(err)
|
||||||
}
|
}
|
||||||
for {
|
for {
|
||||||
logs.Warn("nps starting accept")
|
//logs.Warn("nps starting accept")
|
||||||
conn, err := l.Accept()
|
conns, err := l.Accept()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logs.Warn(err)
|
logs.Warn(err)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
logs.Warn("nps accept success starting new conn")
|
//conns.(*net.TCPConn).SetReadBuffer(0)
|
||||||
|
//conns.(*net.TCPConn).SetReadBuffer(0)
|
||||||
|
//logs.Warn("nps accept success starting new conn")
|
||||||
tmpCpnn, err := m1.NewConn()
|
tmpCpnn, err := m1.NewConn()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logs.Warn("nps new conn err ", err)
|
logs.Warn("nps new conn err ", err)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
logs.Warn("nps new conn success ", tmpCpnn.connId)
|
logs.Warn("nps new conn success ", tmpCpnn.connId)
|
||||||
go func(tmpCpnn net.Conn, conn net.Conn) {
|
go func(tmpCpnn *conn, conns net.Conn) {
|
||||||
go func() {
|
go func() {
|
||||||
_, err := common.CopyBuffer(tmpCpnn, conn)
|
_, err := common.CopyBuffer(tmpCpnn, conns)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
conn.Close()
|
conns.Close()
|
||||||
tmpCpnn.Close()
|
tmpCpnn.Close()
|
||||||
logs.Warn("close nps by copy from user")
|
logs.Warn("close nps by copy from user", tmpCpnn.connId, err)
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
//time.Sleep(time.Second)
|
//time.Sleep(time.Second)
|
||||||
_, err = common.CopyBuffer(conn, tmpCpnn)
|
_, err = common.CopyBuffer(conns, tmpCpnn)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
conn.Close()
|
conns.Close()
|
||||||
tmpCpnn.Close()
|
tmpCpnn.Close()
|
||||||
logs.Warn("close nps by copy from npc ")
|
logs.Warn("close nps by copy from npc ", tmpCpnn.connId, err)
|
||||||
}
|
}
|
||||||
}(tmpCpnn, conn)
|
}(tmpCpnn, conns)
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
|
|
||||||
go NewLogServer()
|
go NewLogServer()
|
||||||
time.Sleep(time.Second * 5)
|
time.Sleep(time.Second * 5)
|
||||||
//go test_request()
|
//for i:=0;i<1000;i++ {
|
||||||
|
// go test_raw(i)
|
||||||
|
//}
|
||||||
|
|
||||||
for {
|
for {
|
||||||
time.Sleep(time.Second * 5)
|
time.Sleep(time.Second * 5)
|
||||||
|
@ -168,23 +177,40 @@ Connection: keep-alive
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func test_raw() {
|
func test_raw(k int) {
|
||||||
conn, _ := net.Dial("tcp", "127.0.0.1:7777")
|
for i := 0; i < 1; i++ {
|
||||||
for {
|
ti := time.Now()
|
||||||
conn.Write([]byte(`GET /videojs5/test HTTP/1.1
|
conn, _ := net.Dial("tcp", "127.0.0.1:7777")
|
||||||
|
tid := time.Now()
|
||||||
|
conn.Write([]byte(`GET / HTTP/1.1
|
||||||
Host: 127.0.0.1:7777
|
Host: 127.0.0.1:7777
|
||||||
Connection: keep-alive
|
|
||||||
|
|
||||||
|
|
||||||
`))
|
`))
|
||||||
buf := make([]byte, 1000000)
|
tiw := time.Now()
|
||||||
n, err := conn.Read(buf)
|
buf := make([]byte, 3572)
|
||||||
|
n, err := io.ReadFull(conn, buf)
|
||||||
|
//n, err := conn.Read(buf)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logs.Warn("close by read response err", err)
|
logs.Warn("close by read response err", err)
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
logs.Warn(n, string(buf[:50]), "\n--------------\n", string(buf[n-50:n]))
|
//logs.Warn(n, string(buf[:50]), "\n--------------\n", string(buf[n-50:n]))
|
||||||
time.Sleep(time.Second)
|
//time.Sleep(time.Second)
|
||||||
|
err = conn.Close()
|
||||||
|
if err != nil {
|
||||||
|
logs.Warn("close conn err ", err)
|
||||||
|
}
|
||||||
|
now := time.Now()
|
||||||
|
du := now.Sub(ti).Seconds()
|
||||||
|
dud := now.Sub(tid).Seconds()
|
||||||
|
duw := now.Sub(tiw).Seconds()
|
||||||
|
if du > 1 {
|
||||||
|
logs.Warn("duration long", du, dud, duw, k, i)
|
||||||
|
}
|
||||||
|
if n != 3572 {
|
||||||
|
logs.Warn("n loss", n, string(buf))
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -199,3 +225,53 @@ func TestNewConn(t *testing.T) {
|
||||||
logs.Warn(copy(buf[:3], b), len(buf), cap(buf))
|
logs.Warn(copy(buf[:3], b), len(buf), cap(buf))
|
||||||
logs.Warn(len(buf), buf[0])
|
logs.Warn(len(buf), buf[0])
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestDQueue(t *testing.T) {
|
||||||
|
logs.EnableFuncCallDepth(true)
|
||||||
|
logs.SetLogFuncCallDepth(3)
|
||||||
|
d := new(bufDequeue)
|
||||||
|
d.vals = make([]unsafe.Pointer, 8)
|
||||||
|
go func() {
|
||||||
|
time.Sleep(time.Second)
|
||||||
|
for i := 0; i < 10; i++ {
|
||||||
|
logs.Warn(i)
|
||||||
|
logs.Warn(d.popTail())
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
go func() {
|
||||||
|
time.Sleep(time.Second)
|
||||||
|
for i := 0; i < 10; i++ {
|
||||||
|
data := "test"
|
||||||
|
go logs.Warn(i, unsafe.Pointer(&data), d.pushHead(unsafe.Pointer(&data)))
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
time.Sleep(time.Second * 3)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestChain(t *testing.T) {
|
||||||
|
logs.EnableFuncCallDepth(true)
|
||||||
|
logs.SetLogFuncCallDepth(3)
|
||||||
|
d := new(bufChain)
|
||||||
|
d.new(256)
|
||||||
|
go func() {
|
||||||
|
time.Sleep(time.Second)
|
||||||
|
for i := 0; i < 1000; i++ {
|
||||||
|
unsa, ok := d.popTail()
|
||||||
|
str := (*string)(unsa)
|
||||||
|
if ok {
|
||||||
|
logs.Warn(i, str, *str, ok)
|
||||||
|
} else {
|
||||||
|
logs.Warn("nil", i, ok)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
go func() {
|
||||||
|
time.Sleep(time.Second)
|
||||||
|
for i := 0; i < 1000; i++ {
|
||||||
|
data := "test " + strconv.Itoa(i)
|
||||||
|
logs.Warn(data, unsafe.Pointer(&data))
|
||||||
|
go d.pushHead(unsafe.Pointer(&data))
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
time.Sleep(time.Second * 10)
|
||||||
|
}
|
||||||
|
|
390
lib/mux/queue.go
390
lib/mux/queue.go
|
@ -1,19 +1,19 @@
|
||||||
package mux
|
package mux
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"container/list"
|
|
||||||
"errors"
|
"errors"
|
||||||
"github.com/cnlh/nps/lib/common"
|
"github.com/cnlh/nps/lib/common"
|
||||||
"io"
|
"io"
|
||||||
"sync"
|
"math"
|
||||||
|
"sync/atomic"
|
||||||
"time"
|
"time"
|
||||||
|
"unsafe"
|
||||||
)
|
)
|
||||||
|
|
||||||
type QueueOp struct {
|
type QueueOp struct {
|
||||||
readOp chan struct{}
|
readOp chan struct{}
|
||||||
cleanOp chan struct{}
|
cleanOp chan struct{}
|
||||||
popWait bool
|
popWait int32
|
||||||
mutex sync.Mutex
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (Self *QueueOp) New() {
|
func (Self *QueueOp) New() {
|
||||||
|
@ -22,15 +22,15 @@ func (Self *QueueOp) New() {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (Self *QueueOp) allowPop() (closed bool) {
|
func (Self *QueueOp) allowPop() (closed bool) {
|
||||||
Self.mutex.Lock()
|
if atomic.CompareAndSwapInt32(&Self.popWait, 1, 0) {
|
||||||
Self.popWait = false
|
select {
|
||||||
Self.mutex.Unlock()
|
case Self.readOp <- struct{}{}:
|
||||||
select {
|
return false
|
||||||
case Self.readOp <- struct{}{}:
|
case <-Self.cleanOp:
|
||||||
return false
|
return true
|
||||||
case <-Self.cleanOp:
|
}
|
||||||
return true
|
|
||||||
}
|
}
|
||||||
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
func (Self *QueueOp) Clean() {
|
func (Self *QueueOp) Clean() {
|
||||||
|
@ -40,84 +40,72 @@ func (Self *QueueOp) Clean() {
|
||||||
}
|
}
|
||||||
|
|
||||||
type PriorityQueue struct {
|
type PriorityQueue struct {
|
||||||
list *list.List
|
|
||||||
QueueOp
|
QueueOp
|
||||||
|
highestChain *bufChain
|
||||||
|
middleChain *bufChain
|
||||||
|
lowestChain *bufChain
|
||||||
|
hunger uint8
|
||||||
}
|
}
|
||||||
|
|
||||||
func (Self *PriorityQueue) New() {
|
func (Self *PriorityQueue) New() {
|
||||||
Self.list = list.New()
|
Self.highestChain = new(bufChain)
|
||||||
|
Self.highestChain.new(4)
|
||||||
|
Self.middleChain = new(bufChain)
|
||||||
|
Self.middleChain.new(32)
|
||||||
|
Self.lowestChain = new(bufChain)
|
||||||
|
Self.lowestChain.new(256)
|
||||||
Self.QueueOp.New()
|
Self.QueueOp.New()
|
||||||
}
|
}
|
||||||
|
|
||||||
func (Self *PriorityQueue) Push(packager *common.MuxPackager) {
|
func (Self *PriorityQueue) Push(packager *common.MuxPackager) {
|
||||||
Self.mutex.Lock()
|
|
||||||
switch packager.Flag {
|
switch packager.Flag {
|
||||||
case common.MUX_PING_FLAG, common.MUX_PING_RETURN:
|
case common.MUX_PING_FLAG, common.MUX_PING_RETURN:
|
||||||
Self.list.PushFront(packager)
|
Self.highestChain.pushHead(unsafe.Pointer(packager))
|
||||||
// the ping package need highest priority
|
// the ping package need highest priority
|
||||||
// prevent ping calculation error
|
// prevent ping calculation error
|
||||||
case common.MUX_CONN_CLOSE:
|
case common.MUX_NEW_CONN, common.MUX_NEW_CONN_OK, common.MUX_NEW_CONN_Fail:
|
||||||
Self.insert(packager)
|
// the new conn package need some priority too
|
||||||
// the close package may need priority too, set second
|
Self.middleChain.pushHead(unsafe.Pointer(packager))
|
||||||
// prevent wait too long to close conn
|
|
||||||
default:
|
default:
|
||||||
Self.list.PushBack(packager)
|
Self.lowestChain.pushHead(unsafe.Pointer(packager))
|
||||||
}
|
}
|
||||||
if Self.popWait {
|
Self.allowPop()
|
||||||
Self.mutex.Unlock()
|
|
||||||
Self.allowPop()
|
|
||||||
return
|
|
||||||
}
|
|
||||||
Self.mutex.Unlock()
|
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
func (Self *PriorityQueue) insert(packager *common.MuxPackager) {
|
|
||||||
element := Self.list.Back()
|
|
||||||
for {
|
|
||||||
if element == nil { // PriorityQueue dose not have any of msg package with this close package id
|
|
||||||
element = Self.list.Front()
|
|
||||||
if element != nil {
|
|
||||||
Self.list.InsertAfter(packager, element)
|
|
||||||
// insert close package to second
|
|
||||||
} else {
|
|
||||||
Self.list.PushFront(packager)
|
|
||||||
// list is empty, push to front
|
|
||||||
}
|
|
||||||
break
|
|
||||||
}
|
|
||||||
if element.Value.(*common.MuxPackager).Flag == common.MUX_NEW_MSG &&
|
|
||||||
element.Value.(*common.MuxPackager).Id == packager.Id {
|
|
||||||
Self.list.InsertAfter(packager, element) // PriorityQueue has some msg package
|
|
||||||
// with this close package id, insert close package after last msg package
|
|
||||||
break
|
|
||||||
}
|
|
||||||
element = element.Prev()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (Self *PriorityQueue) Pop() (packager *common.MuxPackager) {
|
func (Self *PriorityQueue) Pop() (packager *common.MuxPackager) {
|
||||||
Self.mutex.Lock()
|
startPop:
|
||||||
element := Self.list.Front()
|
ptr, ok := Self.highestChain.popTail()
|
||||||
if element != nil {
|
if ok {
|
||||||
packager = element.Value.(*common.MuxPackager)
|
packager = (*common.MuxPackager)(ptr)
|
||||||
Self.list.Remove(element)
|
|
||||||
Self.mutex.Unlock()
|
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
Self.popWait = true // PriorityQueue is empty, notice Push method
|
if Self.hunger < 100 {
|
||||||
Self.mutex.Unlock()
|
ptr, ok = Self.middleChain.popTail()
|
||||||
select {
|
if ok {
|
||||||
case <-Self.readOp:
|
packager = (*common.MuxPackager)(ptr)
|
||||||
return Self.Pop()
|
Self.hunger++
|
||||||
case <-Self.cleanOp:
|
return
|
||||||
return nil
|
}
|
||||||
}
|
}
|
||||||
}
|
ptr, ok = Self.lowestChain.popTail()
|
||||||
|
if ok {
|
||||||
func (Self *PriorityQueue) Len() (n int) {
|
packager = (*common.MuxPackager)(ptr)
|
||||||
n = Self.list.Len()
|
if Self.hunger > 0 {
|
||||||
return
|
Self.hunger = uint8(Self.hunger / 2)
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
// PriorityQueue is empty, notice Push method
|
||||||
|
if atomic.CompareAndSwapInt32(&Self.popWait, 0, 1) {
|
||||||
|
select {
|
||||||
|
case <-Self.readOp:
|
||||||
|
goto startPop
|
||||||
|
case <-Self.cleanOp:
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
goto startPop
|
||||||
}
|
}
|
||||||
|
|
||||||
type ListElement struct {
|
type ListElement struct {
|
||||||
|
@ -137,36 +125,36 @@ func (Self *ListElement) New(buf []byte, l uint16, part bool) (err error) {
|
||||||
}
|
}
|
||||||
|
|
||||||
type FIFOQueue struct {
|
type FIFOQueue struct {
|
||||||
list []*ListElement
|
QueueOp
|
||||||
|
chain *bufChain
|
||||||
length uint32
|
length uint32
|
||||||
stopOp chan struct{}
|
stopOp chan struct{}
|
||||||
timeout time.Time
|
timeout time.Time
|
||||||
QueueOp
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (Self *FIFOQueue) New() {
|
func (Self *FIFOQueue) New() {
|
||||||
Self.QueueOp.New()
|
Self.QueueOp.New()
|
||||||
|
Self.chain = new(bufChain)
|
||||||
|
Self.chain.new(64)
|
||||||
Self.stopOp = make(chan struct{}, 1)
|
Self.stopOp = make(chan struct{}, 1)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (Self *FIFOQueue) Push(element *ListElement) {
|
func (Self *FIFOQueue) Push(element *ListElement) {
|
||||||
Self.mutex.Lock()
|
Self.chain.pushHead(unsafe.Pointer(element))
|
||||||
Self.list = append(Self.list, element)
|
|
||||||
Self.length += uint32(element.l)
|
Self.length += uint32(element.l)
|
||||||
if Self.popWait {
|
Self.allowPop()
|
||||||
Self.mutex.Unlock()
|
|
||||||
Self.allowPop()
|
|
||||||
return
|
|
||||||
}
|
|
||||||
Self.mutex.Unlock()
|
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
func (Self *FIFOQueue) Pop() (element *ListElement, err error) {
|
func (Self *FIFOQueue) Pop() (element *ListElement, err error) {
|
||||||
Self.mutex.Lock()
|
startPop:
|
||||||
if len(Self.list) == 0 {
|
ptr, ok := Self.chain.popTail()
|
||||||
Self.popWait = true
|
if ok {
|
||||||
Self.mutex.Unlock()
|
element = (*ListElement)(ptr)
|
||||||
|
Self.length -= uint32(element.l)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if atomic.CompareAndSwapInt32(&Self.popWait, 0, 1) {
|
||||||
t := Self.timeout.Sub(time.Now())
|
t := Self.timeout.Sub(time.Now())
|
||||||
if t <= 0 {
|
if t <= 0 {
|
||||||
t = time.Minute
|
t = time.Minute
|
||||||
|
@ -175,7 +163,7 @@ func (Self *FIFOQueue) Pop() (element *ListElement, err error) {
|
||||||
defer timer.Stop()
|
defer timer.Stop()
|
||||||
select {
|
select {
|
||||||
case <-Self.readOp:
|
case <-Self.readOp:
|
||||||
Self.mutex.Lock()
|
goto startPop
|
||||||
case <-Self.cleanOp:
|
case <-Self.cleanOp:
|
||||||
return
|
return
|
||||||
case <-Self.stopOp:
|
case <-Self.stopOp:
|
||||||
|
@ -186,11 +174,7 @@ func (Self *FIFOQueue) Pop() (element *ListElement, err error) {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
element = Self.list[0]
|
goto startPop
|
||||||
Self.list = Self.list[1:]
|
|
||||||
Self.length -= uint32(element.l)
|
|
||||||
Self.mutex.Unlock()
|
|
||||||
return
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (Self *FIFOQueue) Len() (n uint32) {
|
func (Self *FIFOQueue) Len() (n uint32) {
|
||||||
|
@ -204,3 +188,231 @@ func (Self *FIFOQueue) Stop() {
|
||||||
func (Self *FIFOQueue) SetTimeOut(t time.Time) {
|
func (Self *FIFOQueue) SetTimeOut(t time.Time) {
|
||||||
Self.timeout = t
|
Self.timeout = t
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// https://golang.org/src/sync/poolqueue.go
|
||||||
|
|
||||||
|
type bufDequeue struct {
|
||||||
|
// headTail packs together a 32-bit head index and a 32-bit
|
||||||
|
// tail index. Both are indexes into vals modulo len(vals)-1.
|
||||||
|
//
|
||||||
|
// tail = index of oldest data in queue
|
||||||
|
// head = index of next slot to fill
|
||||||
|
//
|
||||||
|
// Slots in the range [tail, head) are owned by consumers.
|
||||||
|
// A consumer continues to own a slot outside this range until
|
||||||
|
// it nils the slot, at which point ownership passes to the
|
||||||
|
// producer.
|
||||||
|
//
|
||||||
|
// The head index is stored in the most-significant bits so
|
||||||
|
// that we can atomically add to it and the overflow is
|
||||||
|
// harmless.
|
||||||
|
headTail uint64
|
||||||
|
|
||||||
|
// vals is a ring buffer of interface{} values stored in this
|
||||||
|
// dequeue. The size of this must be a power of 2.
|
||||||
|
//
|
||||||
|
// A slot is still in use until *both* the tail
|
||||||
|
// index has moved beyond it and typ has been set to nil. This
|
||||||
|
// is set to nil atomically by the consumer and read
|
||||||
|
// atomically by the producer.
|
||||||
|
vals []unsafe.Pointer
|
||||||
|
}
|
||||||
|
|
||||||
|
const dequeueBits = 32
|
||||||
|
|
||||||
|
// dequeueLimit is the maximum size of a bufDequeue.
|
||||||
|
//
|
||||||
|
// This must be at most (1<<dequeueBits)/2 because detecting fullness
|
||||||
|
// depends on wrapping around the ring buffer without wrapping around
|
||||||
|
// the index. We divide by 4 so this fits in an int on 32-bit.
|
||||||
|
const dequeueLimit = (1 << dequeueBits) / 4
|
||||||
|
|
||||||
|
func (d *bufDequeue) unpack(ptrs uint64) (head, tail uint32) {
|
||||||
|
const mask = 1<<dequeueBits - 1
|
||||||
|
head = uint32((ptrs >> dequeueBits) & mask)
|
||||||
|
tail = uint32(ptrs & mask)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *bufDequeue) pack(head, tail uint32) uint64 {
|
||||||
|
const mask = 1<<dequeueBits - 1
|
||||||
|
return (uint64(head) << dequeueBits) |
|
||||||
|
uint64(tail&mask)
|
||||||
|
}
|
||||||
|
|
||||||
|
// pushHead adds val at the head of the queue. It returns false if the
|
||||||
|
// queue is full.
|
||||||
|
func (d *bufDequeue) pushHead(val unsafe.Pointer) bool {
|
||||||
|
var slot *unsafe.Pointer
|
||||||
|
for {
|
||||||
|
ptrs := atomic.LoadUint64(&d.headTail)
|
||||||
|
head, tail := d.unpack(ptrs)
|
||||||
|
if (tail+uint32(len(d.vals)))&(1<<dequeueBits-1) == head {
|
||||||
|
// Queue is full.
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
ptrs2 := d.pack(head+1, tail)
|
||||||
|
if atomic.CompareAndSwapUint64(&d.headTail, ptrs, ptrs2) {
|
||||||
|
slot = &d.vals[head&uint32(len(d.vals)-1)]
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// The head slot is free, so we own it.
|
||||||
|
*slot = val
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
// popTail removes and returns the element at the tail of the queue.
|
||||||
|
// It returns false if the queue is empty. It may be called by any
|
||||||
|
// number of consumers.
|
||||||
|
func (d *bufDequeue) popTail() (unsafe.Pointer, bool) {
|
||||||
|
ptrs := atomic.LoadUint64(&d.headTail)
|
||||||
|
head, tail := d.unpack(ptrs)
|
||||||
|
if tail == head {
|
||||||
|
// Queue is empty.
|
||||||
|
return nil, false
|
||||||
|
}
|
||||||
|
slot := &d.vals[tail&uint32(len(d.vals)-1)]
|
||||||
|
for {
|
||||||
|
typ := atomic.LoadPointer(slot)
|
||||||
|
if typ != nil {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
// Another goroutine is still pushing data on the tail.
|
||||||
|
}
|
||||||
|
|
||||||
|
// We now own slot.
|
||||||
|
val := *slot
|
||||||
|
|
||||||
|
// Tell pushHead that we're done with this slot. Zeroing the
|
||||||
|
// slot is also important so we don't leave behind references
|
||||||
|
// that could keep this object live longer than necessary.
|
||||||
|
//
|
||||||
|
// We write to val first and then publish that we're done with
|
||||||
|
atomic.StorePointer(slot, nil)
|
||||||
|
// At this point pushHead owns the slot.
|
||||||
|
if tail < math.MaxUint32 {
|
||||||
|
atomic.AddUint64(&d.headTail, 1)
|
||||||
|
} else {
|
||||||
|
atomic.AddUint64(&d.headTail, ^uint64(math.MaxUint32-1))
|
||||||
|
}
|
||||||
|
return val, true
|
||||||
|
}
|
||||||
|
|
||||||
|
// bufChain is a dynamically-sized version of bufDequeue.
|
||||||
|
//
|
||||||
|
// This is implemented as a doubly-linked list queue of poolDequeues
|
||||||
|
// where each dequeue is double the size of the previous one. Once a
|
||||||
|
// dequeue fills up, this allocates a new one and only ever pushes to
|
||||||
|
// the latest dequeue. Pops happen from the other end of the list and
|
||||||
|
// once a dequeue is exhausted, it gets removed from the list.
|
||||||
|
type bufChain struct {
|
||||||
|
// head is the bufDequeue to push to. This is only accessed
|
||||||
|
// by the producer, so doesn't need to be synchronized.
|
||||||
|
head *bufChainElt
|
||||||
|
|
||||||
|
// tail is the bufDequeue to popTail from. This is accessed
|
||||||
|
// by consumers, so reads and writes must be atomic.
|
||||||
|
tail *bufChainElt
|
||||||
|
chainStatus int32
|
||||||
|
}
|
||||||
|
|
||||||
|
type bufChainElt struct {
|
||||||
|
bufDequeue
|
||||||
|
|
||||||
|
// next and prev link to the adjacent poolChainElts in this
|
||||||
|
// bufChain.
|
||||||
|
//
|
||||||
|
// next is written atomically by the producer and read
|
||||||
|
// atomically by the consumer. It only transitions from nil to
|
||||||
|
// non-nil.
|
||||||
|
//
|
||||||
|
// prev is written atomically by the consumer and read
|
||||||
|
// atomically by the producer. It only transitions from
|
||||||
|
// non-nil to nil.
|
||||||
|
next, prev *bufChainElt
|
||||||
|
}
|
||||||
|
|
||||||
|
func storePoolChainElt(pp **bufChainElt, v *bufChainElt) {
|
||||||
|
atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(pp)), unsafe.Pointer(v))
|
||||||
|
}
|
||||||
|
|
||||||
|
func loadPoolChainElt(pp **bufChainElt) *bufChainElt {
|
||||||
|
return (*bufChainElt)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(pp))))
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *bufChain) new(initSize int) {
|
||||||
|
// Initialize the chain.
|
||||||
|
// initSize must be a power of 2
|
||||||
|
d := new(bufChainElt)
|
||||||
|
d.vals = make([]unsafe.Pointer, initSize)
|
||||||
|
storePoolChainElt(&c.head, d)
|
||||||
|
storePoolChainElt(&c.tail, d)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *bufChain) pushHead(val unsafe.Pointer) {
|
||||||
|
for {
|
||||||
|
d := loadPoolChainElt(&c.head)
|
||||||
|
|
||||||
|
if d.pushHead(val) {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// The current dequeue is full. Allocate a new one of twice
|
||||||
|
// the size.
|
||||||
|
if atomic.CompareAndSwapInt32(&c.chainStatus, 0, 1) {
|
||||||
|
newSize := len(d.vals) * 2
|
||||||
|
if newSize >= dequeueLimit {
|
||||||
|
// Can't make it any bigger.
|
||||||
|
newSize = dequeueLimit
|
||||||
|
}
|
||||||
|
|
||||||
|
d2 := &bufChainElt{prev: d}
|
||||||
|
d2.vals = make([]unsafe.Pointer, newSize)
|
||||||
|
storePoolChainElt(&c.head, d2)
|
||||||
|
storePoolChainElt(&d.next, d2)
|
||||||
|
d2.pushHead(val)
|
||||||
|
atomic.SwapInt32(&c.chainStatus, 0)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *bufChain) popTail() (unsafe.Pointer, bool) {
|
||||||
|
d := loadPoolChainElt(&c.tail)
|
||||||
|
if d == nil {
|
||||||
|
return nil, false
|
||||||
|
}
|
||||||
|
|
||||||
|
for {
|
||||||
|
// It's important that we load the next pointer
|
||||||
|
// *before* popping the tail. In general, d may be
|
||||||
|
// transiently empty, but if next is non-nil before
|
||||||
|
// the pop and the pop fails, then d is permanently
|
||||||
|
// empty, which is the only condition under which it's
|
||||||
|
// safe to drop d from the chain.
|
||||||
|
d2 := loadPoolChainElt(&d.next)
|
||||||
|
|
||||||
|
if val, ok := d.popTail(); ok {
|
||||||
|
return val, ok
|
||||||
|
}
|
||||||
|
|
||||||
|
if d2 == nil {
|
||||||
|
// This is the only dequeue. It's empty right
|
||||||
|
// now, but could be pushed to in the future.
|
||||||
|
return nil, false
|
||||||
|
}
|
||||||
|
|
||||||
|
// The tail of the chain has been drained, so move on
|
||||||
|
// to the next dequeue. Try to drop it from the chain
|
||||||
|
// so the next pop doesn't have to look at the empty
|
||||||
|
// dequeue again.
|
||||||
|
if atomic.CompareAndSwapPointer((*unsafe.Pointer)(unsafe.Pointer(&c.tail)), unsafe.Pointer(d), unsafe.Pointer(d2)) {
|
||||||
|
// We won the race. Clear the prev pointer so
|
||||||
|
// the garbage collector can collect the empty
|
||||||
|
// dequeue and so popHead doesn't back up
|
||||||
|
// further than necessary.
|
||||||
|
storePoolChainElt(&d2.prev, nil)
|
||||||
|
}
|
||||||
|
d = d2
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
Loading…
Reference in New Issue