package mux import ( "errors" "github.com/cnlh/nps/lib/common" "io" "math" "sync/atomic" "time" "unsafe" ) type QueueOp struct { readOp chan struct{} cleanOp chan struct{} popWait int32 } func (Self *QueueOp) New() { Self.readOp = make(chan struct{}) Self.cleanOp = make(chan struct{}, 2) } func (Self *QueueOp) allowPop() (closed bool) { if atomic.CompareAndSwapInt32(&Self.popWait, 1, 0) { select { case Self.readOp <- struct{}{}: return false case <-Self.cleanOp: return true } } return } func (Self *QueueOp) Clean() { Self.cleanOp <- struct{}{} Self.cleanOp <- struct{}{} close(Self.cleanOp) } type PriorityQueue struct { QueueOp highestChain *bufChain middleChain *bufChain lowestChain *bufChain hunger uint8 } func (Self *PriorityQueue) New() { Self.highestChain = new(bufChain) Self.highestChain.new(4) Self.middleChain = new(bufChain) Self.middleChain.new(32) Self.lowestChain = new(bufChain) Self.lowestChain.new(256) Self.QueueOp.New() } func (Self *PriorityQueue) Push(packager *common.MuxPackager) { switch packager.Flag { case common.MUX_PING_FLAG, common.MUX_PING_RETURN: Self.highestChain.pushHead(unsafe.Pointer(packager)) // the ping package need highest priority // prevent ping calculation error case common.MUX_NEW_CONN, common.MUX_NEW_CONN_OK, common.MUX_NEW_CONN_Fail: // the new conn package need some priority too Self.middleChain.pushHead(unsafe.Pointer(packager)) default: Self.lowestChain.pushHead(unsafe.Pointer(packager)) } Self.allowPop() return } func (Self *PriorityQueue) Pop() (packager *common.MuxPackager) { startPop: ptr, ok := Self.highestChain.popTail() if ok { packager = (*common.MuxPackager)(ptr) return } if Self.hunger < 100 { ptr, ok = Self.middleChain.popTail() if ok { packager = (*common.MuxPackager)(ptr) Self.hunger++ return } } ptr, ok = Self.lowestChain.popTail() if ok { packager = (*common.MuxPackager)(ptr) if Self.hunger > 0 { Self.hunger = uint8(Self.hunger / 2) } return } // PriorityQueue is empty, notice Push method if atomic.CompareAndSwapInt32(&Self.popWait, 0, 1) { select { case <-Self.readOp: goto startPop case <-Self.cleanOp: return nil } } goto startPop } type ListElement struct { buf []byte l uint16 part bool } func (Self *ListElement) New(buf []byte, l uint16, part bool) (err error) { if uint16(len(buf)) != l { return errors.New("ListElement: buf length not match") } Self.buf = buf Self.l = l Self.part = part return nil } type FIFOQueue struct { QueueOp chain *bufChain length uint32 stopOp chan struct{} timeout time.Time } func (Self *FIFOQueue) New() { Self.QueueOp.New() Self.chain = new(bufChain) Self.chain.new(64) Self.stopOp = make(chan struct{}, 1) } func (Self *FIFOQueue) Push(element *ListElement) { Self.chain.pushHead(unsafe.Pointer(element)) Self.length += uint32(element.l) Self.allowPop() return } func (Self *FIFOQueue) Pop() (element *ListElement, err error) { startPop: ptr, ok := Self.chain.popTail() if ok { element = (*ListElement)(ptr) Self.length -= uint32(element.l) return } if atomic.CompareAndSwapInt32(&Self.popWait, 0, 1) { t := Self.timeout.Sub(time.Now()) if t <= 0 { t = time.Minute } timer := time.NewTimer(t) defer timer.Stop() select { case <-Self.readOp: goto startPop case <-Self.cleanOp: return case <-Self.stopOp: err = io.EOF return case <-timer.C: err = errors.New("mux.queue: read time out") return } } goto startPop } func (Self *FIFOQueue) Len() (n uint32) { return Self.length } func (Self *FIFOQueue) Stop() { Self.stopOp <- struct{}{} } func (Self *FIFOQueue) SetTimeOut(t time.Time) { Self.timeout = t } // https://golang.org/src/sync/poolqueue.go type bufDequeue struct { // headTail packs together a 32-bit head index and a 32-bit // tail index. Both are indexes into vals modulo len(vals)-1. // // tail = index of oldest data in queue // head = index of next slot to fill // // Slots in the range [tail, head) are owned by consumers. // A consumer continues to own a slot outside this range until // it nils the slot, at which point ownership passes to the // producer. // // The head index is stored in the most-significant bits so // that we can atomically add to it and the overflow is // harmless. headTail uint64 // vals is a ring buffer of interface{} values stored in this // dequeue. The size of this must be a power of 2. // // A slot is still in use until *both* the tail // index has moved beyond it and typ has been set to nil. This // is set to nil atomically by the consumer and read // atomically by the producer. vals []unsafe.Pointer } const dequeueBits = 32 // dequeueLimit is the maximum size of a bufDequeue. // // This must be at most (1<> dequeueBits) & mask) tail = uint32(ptrs & mask) return } func (d *bufDequeue) pack(head, tail uint32) uint64 { const mask = 1<= dequeueLimit { // Can't make it any bigger. newSize = dequeueLimit } d2 := &bufChainElt{prev: d} d2.vals = make([]unsafe.Pointer, newSize) storePoolChainElt(&c.head, d2) storePoolChainElt(&d.next, d2) d2.pushHead(val) atomic.SwapInt32(&c.chainStatus, 0) } } } func (c *bufChain) popTail() (unsafe.Pointer, bool) { d := loadPoolChainElt(&c.tail) if d == nil { return nil, false } for { // It's important that we load the next pointer // *before* popping the tail. In general, d may be // transiently empty, but if next is non-nil before // the pop and the pop fails, then d is permanently // empty, which is the only condition under which it's // safe to drop d from the chain. d2 := loadPoolChainElt(&d.next) if val, ok := d.popTail(); ok { return val, ok } if d2 == nil { // This is the only dequeue. It's empty right // now, but could be pushed to in the future. return nil, false } // The tail of the chain has been drained, so move on // to the next dequeue. Try to drop it from the chain // so the next pop doesn't have to look at the empty // dequeue again. if atomic.CompareAndSwapPointer((*unsafe.Pointer)(unsafe.Pointer(&c.tail)), unsafe.Pointer(d), unsafe.Pointer(d2)) { // We won the race. Clear the prev pointer so // the garbage collector can collect the empty // dequeue and so popHead doesn't back up // further than necessary. storePoolChainElt(&d2.prev, nil) } d = d2 } }