Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
taosdata
TDengine
提交
a466e82c
T
TDengine
项目概览
taosdata
/
TDengine
1 年多 前同步成功
通知
1185
Star
22016
Fork
4786
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
1
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
T
TDengine
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
1
Issue
1
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
a466e82c
编写于
1月 29, 2022
作者:
dengyihao
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
add more optim
上级
6c6573cc
变更
4
隐藏空白更改
内联
并排
Showing
4 changed file
with
63 addition
and
32 deletion
+63
-32
source/libs/transport/inc/transComm.h
source/libs/transport/inc/transComm.h
+7
-1
source/libs/transport/src/transCli.c
source/libs/transport/src/transCli.c
+15
-14
source/libs/transport/src/transComm.c
source/libs/transport/src/transComm.c
+26
-4
source/libs/transport/src/transSrv.c
source/libs/transport/src/transSrv.c
+15
-13
未找到文件。
source/libs/transport/inc/transComm.h
浏览文件 @
a466e82c
...
...
@@ -213,6 +213,12 @@ typedef struct SConnBuffer {
typedef
void
(
*
AsyncCB
)(
uv_async_t
*
handle
);
typedef
struct
{
void
*
pThrd
;
queue
qmsg
;
pthread_mutex_t
mtx
;
// protect qmsg;
}
SAsyncItem
;
typedef
struct
{
int
index
;
int
nAsync
;
...
...
@@ -221,7 +227,7 @@ typedef struct {
SAsyncPool
*
transCreateAsyncPool
(
uv_loop_t
*
loop
,
void
*
arg
,
AsyncCB
cb
);
void
transDestroyAsyncPool
(
SAsyncPool
*
pool
);
int
transSendAsync
(
SAsyncPool
*
pool
);
int
transSendAsync
(
SAsyncPool
*
pool
,
queue
*
mq
);
int
transInitBuffer
(
SConnBuffer
*
buf
);
int
transClearBuffer
(
SConnBuffer
*
buf
);
...
...
source/libs/transport/src/transCli.c
浏览文件 @
a466e82c
...
...
@@ -432,14 +432,15 @@ static void clientHandleReq(SCliMsg* pMsg, SCliThrdObj* pThrd) {
}
}
static
void
clientAsyncCb
(
uv_async_t
*
handle
)
{
SCliThrdObj
*
pThrd
=
handle
->
data
;
SAsyncItem
*
item
=
handle
->
data
;
SCliThrdObj
*
pThrd
=
item
->
pThrd
;
SCliMsg
*
pMsg
=
NULL
;
queue
wq
;
// batch process to avoid to lock/unlock frequently
pthread_mutex_lock
(
&
pThrd
->
msgM
tx
);
QUEUE_MOVE
(
&
pThrd
->
msg
,
&
wq
);
pthread_mutex_unlock
(
&
pThrd
->
msgM
tx
);
pthread_mutex_lock
(
&
item
->
m
tx
);
QUEUE_MOVE
(
&
item
->
q
msg
,
&
wq
);
pthread_mutex_unlock
(
&
item
->
m
tx
);
int
count
=
0
;
while
(
!
QUEUE_IS_EMPTY
(
&
wq
))
{
...
...
@@ -548,11 +549,11 @@ static void clientSendQuit(SCliThrdObj* thrd) {
SCliMsg
*
msg
=
calloc
(
1
,
sizeof
(
SCliMsg
));
msg
->
ctx
=
NULL
;
//
pthread_mutex_lock
(
&
thrd
->
msgMtx
);
QUEUE_PUSH
(
&
thrd
->
msg
,
&
msg
->
q
);
pthread_mutex_unlock
(
&
thrd
->
msgMtx
);
//
pthread_mutex_lock(&thrd->msgMtx);
//
QUEUE_PUSH(&thrd->msg, &msg->q);
//
pthread_mutex_unlock(&thrd->msgMtx);
transSendAsync
(
thrd
->
asyncPool
);
transSendAsync
(
thrd
->
asyncPool
,
&
msg
->
q
);
// uv_async_send(thrd->cliAsync);
}
void
taosCloseClient
(
void
*
arg
)
{
...
...
@@ -598,14 +599,14 @@ void rpcSendRequest(void* shandle, const SEpSet* pEpSet, SRpcMsg* pMsg, int64_t*
SCliThrdObj
*
thrd
=
((
SClientObj
*
)
pRpc
->
tcphandle
)
->
pThreadObj
[
index
%
pRpc
->
numOfThreads
];
pthread_mutex_lock
(
&
thrd
->
msgMtx
);
QUEUE_PUSH
(
&
thrd
->
msg
,
&
cliMsg
->
q
);
pthread_mutex_unlock
(
&
thrd
->
msgMtx
);
//
pthread_mutex_lock(&thrd->msgMtx);
//
QUEUE_PUSH(&thrd->msg, &cliMsg->q);
//
pthread_mutex_unlock(&thrd->msgMtx);
int
start
=
taosGetTimestampUs
();
transSendAsync
(
thrd
->
asyncPool
);
//
int start = taosGetTimestampUs();
transSendAsync
(
thrd
->
asyncPool
,
&
(
cliMsg
->
q
)
);
// uv_async_send(thrd->cliAsync);
int
end
=
taosGetTimestampUs
()
-
start
;
//
int end = taosGetTimestampUs() - start;
// tError("client sent to rpc, time cost: %d", (int)end);
}
#endif
source/libs/transport/src/transComm.c
浏览文件 @
a466e82c
...
...
@@ -247,7 +247,7 @@ int transDestroyBuffer(SConnBuffer* buf) {
}
SAsyncPool
*
transCreateAsyncPool
(
uv_loop_t
*
loop
,
void
*
arg
,
AsyncCB
cb
)
{
static
int
sz
=
2
0
;
static
int
sz
=
1
0
;
SAsyncPool
*
pool
=
calloc
(
1
,
sizeof
(
SAsyncPool
));
pool
->
index
=
0
;
...
...
@@ -257,24 +257,46 @@ SAsyncPool* transCreateAsyncPool(uv_loop_t* loop, void* arg, AsyncCB cb) {
for
(
int
i
=
0
;
i
<
pool
->
nAsync
;
i
++
)
{
uv_async_t
*
async
=
&
(
pool
->
asyncs
[
i
]);
uv_async_init
(
loop
,
async
,
cb
);
async
->
data
=
arg
;
SAsyncItem
*
item
=
calloc
(
1
,
sizeof
(
SAsyncItem
));
item
->
pThrd
=
arg
;
QUEUE_INIT
(
&
item
->
qmsg
);
pthread_mutex_init
(
&
item
->
mtx
,
NULL
);
async
->
data
=
item
;
}
return
pool
;
}
void
transDestroyAsyncPool
(
SAsyncPool
*
pool
)
{
for
(
int
i
=
0
;
i
<
pool
->
nAsync
;
i
++
)
{
uv_async_t
*
async
=
&
(
pool
->
asyncs
[
i
]);
SAsyncItem
*
item
=
async
->
data
;
pthread_mutex_destroy
(
&
item
->
mtx
);
free
(
item
);
}
free
(
pool
->
asyncs
);
free
(
pool
);
}
int
transSendAsync
(
SAsyncPool
*
pool
)
{
int
transSendAsync
(
SAsyncPool
*
pool
,
queue
*
q
)
{
int
idx
=
pool
->
index
;
idx
=
idx
%
pool
->
nAsync
;
// no need mutex here
if
(
pool
->
index
++
>
pool
->
nAsync
)
{
pool
->
index
=
0
;
}
return
uv_async_send
(
&
(
pool
->
asyncs
[
idx
]));
uv_async_t
*
async
=
&
(
pool
->
asyncs
[
idx
]);
SAsyncItem
*
item
=
async
->
data
;
int64_t
st
=
taosGetTimestampUs
();
pthread_mutex_lock
(
&
item
->
mtx
);
QUEUE_PUSH
(
&
item
->
qmsg
,
q
);
pthread_mutex_unlock
(
&
item
->
mtx
);
int64_t
el
=
taosGetTimestampUs
()
-
st
;
if
(
el
>
50
)
{
// tInfo("lock and unlock cost: %d", (int)el);
}
return
uv_async_send
(
async
);
}
#endif
source/libs/transport/src/transSrv.c
浏览文件 @
a466e82c
...
...
@@ -376,13 +376,15 @@ static void destroySmsg(SSrvMsg* smsg) {
free
(
smsg
);
}
void
uvWorkerAsyncCb
(
uv_async_t
*
handle
)
{
SWorkThrdObj
*
pThrd
=
handle
->
data
;
SAsyncItem
*
item
=
handle
->
data
;
SWorkThrdObj
*
pThrd
=
item
->
pThrd
;
SSrvConn
*
conn
=
NULL
;
queue
wq
;
// batch process to avoid to lock/unlock frequently
pthread_mutex_lock
(
&
pThrd
->
msgMtx
);
QUEUE_MOVE
(
&
pThrd
->
msg
,
&
wq
);
pthread_mutex_unlock
(
&
pThrd
->
msgMtx
);
pthread_mutex_lock
(
&
item
->
mtx
);
QUEUE_MOVE
(
&
item
->
qmsg
,
&
wq
);
pthread_mutex_unlock
(
&
item
->
mtx
);
// pthread_mutex_unlock(&mtx);
while
(
!
QUEUE_IS_EMPTY
(
&
wq
))
{
queue
*
head
=
QUEUE_HEAD
(
&
wq
);
...
...
@@ -539,7 +541,7 @@ static bool addHandleToAcceptloop(void* arg) {
tError
(
"failed to bind: %s"
,
uv_err_name
(
err
));
return
false
;
}
if
((
err
=
uv_listen
((
uv_stream_t
*
)
&
srv
->
server
,
128
,
uvOnAcceptCb
))
!=
0
)
{
if
((
err
=
uv_listen
((
uv_stream_t
*
)
&
srv
->
server
,
512
,
uvOnAcceptCb
))
!=
0
)
{
tError
(
"failed to listen: %s"
,
uv_err_name
(
err
));
return
false
;
}
...
...
@@ -671,12 +673,12 @@ void destroyWorkThrd(SWorkThrdObj* pThrd) {
void
sendQuitToWorkThrd
(
SWorkThrdObj
*
pThrd
)
{
SSrvMsg
*
srvMsg
=
calloc
(
1
,
sizeof
(
SSrvMsg
));
pthread_mutex_lock
(
&
pThrd
->
msgMtx
);
QUEUE_PUSH
(
&
pThrd
->
msg
,
&
srvMsg
->
q
);
pthread_mutex_unlock
(
&
pThrd
->
msgMtx
);
//
pthread_mutex_lock(&pThrd->msgMtx);
//
QUEUE_PUSH(&pThrd->msg, &srvMsg->q);
//
pthread_mutex_unlock(&pThrd->msgMtx);
tDebug
(
"send quit msg to work thread"
);
transSendAsync
(
pThrd
->
asyncPool
);
transSendAsync
(
pThrd
->
asyncPool
,
&
srvMsg
->
q
);
// uv_async_send(pThrd->workerAsync);
}
...
...
@@ -712,12 +714,12 @@ void rpcSendResponse(const SRpcMsg* pMsg) {
srvMsg
->
pConn
=
pConn
;
srvMsg
->
msg
=
*
pMsg
;
pthread_mutex_lock
(
&
pThrd
->
msgMtx
);
QUEUE_PUSH
(
&
pThrd
->
msg
,
&
srvMsg
->
q
);
pthread_mutex_unlock
(
&
pThrd
->
msgMtx
);
//
pthread_mutex_lock(&pThrd->msgMtx);
//
QUEUE_PUSH(&pThrd->msg, &srvMsg->q);
//
pthread_mutex_unlock(&pThrd->msgMtx);
tDebug
(
"conn %p start to send resp"
,
pConn
);
transSendAsync
(
pThrd
->
asyncPool
);
transSendAsync
(
pThrd
->
asyncPool
,
&
srvMsg
->
q
);
// uv_async_send(pThrd->workerAsync);
}
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录