Skip to content

Commit

Permalink
Merge pull request #350 from aceld/feature/aceld
Browse files Browse the repository at this point in the history
修复:如果程序里同时启动了 Server 和 Client ,GlobalObject 混淆的问题
  • Loading branch information
aceld authored Dec 6, 2024
2 parents 59e7837 + d3ec644 commit 3083008
Show file tree
Hide file tree
Showing 2 changed files with 67 additions and 15 deletions.
8 changes: 2 additions & 6 deletions znet/client.go
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,6 @@ import (
"net/url"
"time"

"github.com/aceld/zinx/zconf"
"github.com/aceld/zinx/zdecoder"
"github.com/aceld/zinx/ziface"
"github.com/aceld/zinx/zlog"
Expand Down Expand Up @@ -59,7 +58,7 @@ func NewClient(ip string, port int, opts ...ClientOption) ziface.IClient {
Ip: ip,
Port: port,

msgHandler: newMsgHandle(),
msgHandler: newCliMsgHandle(),
packet: zpack.Factory().NewPack(ziface.ZinxDataPack), // Default to using Zinx's TLV packet format(默认使用zinx的TLV封包方式)
decoder: zdecoder.NewTLVDecoder(), // Default to using Zinx's TLV decoder(默认使用zinx的TLV解码器)
version: "tcp",
Expand All @@ -83,7 +82,7 @@ func NewWsClient(ip string, port int, opts ...ClientOption) ziface.IClient {
Ip: ip,
Port: port,

msgHandler: newMsgHandle(),
msgHandler: newCliMsgHandle(),
packet: zpack.Factory().NewPack(ziface.ZinxDataPack), // Default to using Zinx's TLV packet format(默认使用zinx的TLV封包方式)
decoder: zdecoder.NewTLVDecoder(), // Default to using Zinx's TLV decoder(默认使用zinx的TLV解码器)
version: "websocket",
Expand Down Expand Up @@ -113,9 +112,6 @@ func NewTLSClient(ip string, port int, opts ...ClientOption) ziface.IClient {
func (c *Client) Restart() {
c.exitChan = make(chan struct{})

// Set worker pool size to 0 to turn off the worker pool in the client (客户端将协程池关闭)
zconf.GlobalObject.WorkerPoolSize = 0

go func() {

addr := &net.TCPAddr{
Expand Down
74 changes: 65 additions & 9 deletions znet/msghandler.go
Original file line number Diff line number Diff line change
Expand Up @@ -50,7 +50,7 @@ type MsgHandle struct {
}

// newMsgHandle creates MsgHandle
// zinxRole: IServer/IClient
// zinxRole: IServer
func newMsgHandle() *MsgHandle {
var freeWorkers map[uint32]struct{}
var extraFreeWorkers map[uint32]struct{}
Expand Down Expand Up @@ -84,17 +84,73 @@ func newMsgHandle() *MsgHandle {
}

handle := &MsgHandle{
Apis: make(map[uint32]ziface.IRouter),
RouterSlices: NewRouterSlices(),
WorkerPoolSize: zconf.GlobalObject.WorkerPoolSize,
// One worker corresponds to one queue (一个worker对应一个queue)
TaskQueue: make([]chan ziface.IRequest, TaskQueueLen),
freeWorkers: freeWorkers,
builder: newChainBuilder(),
Apis: make(map[uint32]ziface.IRouter),
RouterSlices: NewRouterSlices(),
freeWorkers: freeWorkers,
builder: newChainBuilder(),
// 可额外临时分配的workerID集合
extraFreeWorkers: extraFreeWorkers,
}

// server
handle.WorkerPoolSize = zconf.GlobalObject.WorkerPoolSize
// One worker corresponds to one queue (一个worker对应一个queue)
handle.TaskQueue = make([]chan ziface.IRequest, TaskQueueLen)

// It is necessary to add the MsgHandle to the responsibility chain here, and it is the last link in the responsibility chain. After decoding in the MsgHandle, data distribution is done by router
// (此处必须把 msghandler 添加到责任链中,并且是责任链最后一环,在msghandler中进行解码后由router做数据分发)
handle.builder.Tail(handle)
return handle
}

// newCliMsgHandle creates MsgHandle
// zinxRole: IClient
func newCliMsgHandle() *MsgHandle {
var freeWorkers map[uint32]struct{}
var extraFreeWorkers map[uint32]struct{}

if zconf.GlobalObject.WorkerMode == zconf.WorkerModeBind {
// Assign a workder to each link, avoid interactions when multiple links are processed by the same worker
// MaxWorkerTaskLen can also be reduced, for example, 50
// 为每个链接分配一个workder,避免同一worker处理多个链接时的互相影响
// 同时可以减小MaxWorkerTaskLen,比如50,因为每个worker的负担减轻了
zconf.GlobalObject.WorkerPoolSize = uint32(zconf.GlobalObject.MaxConn)
freeWorkers = make(map[uint32]struct{}, zconf.GlobalObject.WorkerPoolSize)
for i := uint32(0); i < zconf.GlobalObject.WorkerPoolSize; i++ {
freeWorkers[i] = struct{}{}
}
}

TaskQueueLen := zconf.GlobalObject.WorkerPoolSize

if zconf.GlobalObject.WorkerMode == zconf.WorkerModeDynamicBind {
zlog.Ins().DebugF("WorkerMode = %s", zconf.WorkerModeDynamicBind)
freeWorkers = make(map[uint32]struct{}, zconf.GlobalObject.WorkerPoolSize)
for i := uint32(0); i < zconf.GlobalObject.WorkerPoolSize; i++ {
freeWorkers[i] = struct{}{}
}

extraFreeWorkers = make(map[uint32]struct{}, zconf.GlobalObject.MaxConn-int(zconf.GlobalObject.WorkerPoolSize))
for i := zconf.GlobalObject.WorkerPoolSize; i < uint32(zconf.GlobalObject.MaxConn); i++ {
extraFreeWorkers[i] = struct{}{}
}
TaskQueueLen = uint32(zconf.GlobalObject.MaxConn)
}

handle := &MsgHandle{
Apis: make(map[uint32]ziface.IRouter),
RouterSlices: NewRouterSlices(),
freeWorkers: freeWorkers,
builder: newChainBuilder(),
// 可额外临时分配的workerID集合
extraFreeWorkers: extraFreeWorkers,
}

// client: Set worker pool size to 0 to turn off the worker pool in the client (客户端将协程池关闭)
handle.WorkerPoolSize = 0
// One worker corresponds to one queue (一个worker对应一个queue)
handle.TaskQueue = make([]chan ziface.IRequest, TaskQueueLen)

// It is necessary to add the MsgHandle to the responsibility chain here, and it is the last link in the responsibility chain. After decoding in the MsgHandle, data distribution is done by router
// (此处必须把 msghandler 添加到责任链中,并且是责任链最后一环,在msghandler中进行解码后由router做数据分发)
handle.builder.Tail(handle)
Expand Down Expand Up @@ -202,7 +258,7 @@ func (mh *MsgHandle) Intercept(chain ziface.IChain) ziface.IcResp {
switch request.(type) {
case ziface.IRequest:
iRequest := request.(ziface.IRequest)
if zconf.GlobalObject.WorkerPoolSize > 0 {
if mh.WorkerPoolSize > 0 {
// If the worker pool mechanism has been started, hand over the message to the worker for processing
// (已经启动工作池机制,将消息交给Worker处理)
mh.SendMsgToTaskQueue(iRequest)
Expand Down

0 comments on commit 3083008

Please sign in to comment.