Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
taosdata
TDengine
提交
445d7f2d
T
TDengine
项目概览
taosdata
/
TDengine
大约 2 年 前同步成功
通知
1192
Star
22018
Fork
4786
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
1
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
T
TDengine
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
1
Issue
1
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
445d7f2d
编写于
6月 24, 2022
作者:
dengyihao
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
feat: refactor rpc quit
上级
87f4f536
变更
2
隐藏空白更改
内联
并排
Showing
2 changed file
with
83 addition
and
61 deletion
+83
-61
source/client/src/clientEnv.c
source/client/src/clientEnv.c
+2
-1
source/libs/transport/src/transCli.c
source/libs/transport/src/transCli.c
+81
-60
未找到文件。
source/client/src/clientEnv.c
浏览文件 @
445d7f2d
...
@@ -85,7 +85,8 @@ void closeTransporter(STscObj *pTscObj) {
...
@@ -85,7 +85,8 @@ void closeTransporter(STscObj *pTscObj) {
}
}
static
bool
clientRpcRfp
(
int32_t
code
)
{
static
bool
clientRpcRfp
(
int32_t
code
)
{
if
(
code
==
TSDB_CODE_RPC_REDIRECT
)
{
if
(
code
==
TSDB_CODE_RPC_REDIRECT
||
code
==
TSDB_CODE_RPC_NETWORK_UNAVAIL
||
code
==
TSDB_CODE_NODE_NOT_DEPLOYED
||
code
==
TSDB_CODE_SYN_NOT_LEADER
)
{
return
true
;
return
true
;
}
else
{
}
else
{
return
false
;
return
false
;
...
...
source/libs/transport/src/transCli.c
浏览文件 @
445d7f2d
...
@@ -111,6 +111,13 @@ static void cliDestroyConn(SCliConn* pConn, bool clear /*clear tcp handle o
...
@@ -111,6 +111,13 @@ static void cliDestroyConn(SCliConn* pConn, bool clear /*clear tcp handle o
static
void
cliDestroy
(
uv_handle_t
*
handle
);
static
void
cliDestroy
(
uv_handle_t
*
handle
);
static
void
cliSend
(
SCliConn
*
pConn
);
static
void
cliSend
(
SCliConn
*
pConn
);
static
bool
cliIsEpsetUpdated
(
int32_t
code
,
STransConnCtx
*
pCtx
)
{
if
(
code
!=
0
)
return
false
;
if
(
pCtx
->
retryCnt
==
0
)
return
false
;
if
(
transEpSetIsEqual
(
&
pCtx
->
epSet
,
&
pCtx
->
origEpSet
))
return
false
;
return
true
;
}
void
cliMayCvtFqdnToIp
(
SEpSet
*
pEpSet
,
SCvtAddr
*
pCvtAddr
);
void
cliMayCvtFqdnToIp
(
SEpSet
*
pEpSet
,
SCvtAddr
*
pCvtAddr
);
/*
/*
* set TCP connection timeout per-socket level
* set TCP connection timeout per-socket level
...
@@ -154,7 +161,6 @@ static void cliReleaseUnfinishedMsg(SCliConn* conn) {
...
@@ -154,7 +161,6 @@ static void cliReleaseUnfinishedMsg(SCliConn* conn) {
destroyCmsg
(
pMsg
);
destroyCmsg
(
pMsg
);
}
}
}
}
#define CLI_RELEASE_UV(loop) \
#define CLI_RELEASE_UV(loop) \
do { \
do { \
uv_walk(loop, cliWalkCb, NULL); \
uv_walk(loop, cliWalkCb, NULL); \
...
@@ -183,7 +189,6 @@ static void cliReleaseUnfinishedMsg(SCliConn* conn) {
...
@@ -183,7 +189,6 @@ static void cliReleaseUnfinishedMsg(SCliConn* conn) {
#define CONN_SHOULD_RELEASE(conn, head) \
#define CONN_SHOULD_RELEASE(conn, head) \
do { \
do { \
if ((head)->release == 1 && (head->msgLen) == sizeof(*head)) { \
if ((head)->release == 1 && (head->msgLen) == sizeof(*head)) { \
int status = conn->status; \
uint64_t ahandle = head->ahandle; \
uint64_t ahandle = head->ahandle; \
CONN_GET_MSGCTX_BY_AHANDLE(conn, ahandle); \
CONN_GET_MSGCTX_BY_AHANDLE(conn, ahandle); \
transClearBuffer(&conn->readBuf); \
transClearBuffer(&conn->readBuf); \
...
@@ -194,9 +199,7 @@ static void cliReleaseUnfinishedMsg(SCliConn* conn) {
...
@@ -194,9 +199,7 @@ static void cliReleaseUnfinishedMsg(SCliConn* conn) {
} \
} \
destroyCmsg(pMsg); \
destroyCmsg(pMsg); \
cliReleaseUnfinishedMsg(conn); \
cliReleaseUnfinishedMsg(conn); \
if (status != ConnInPool) { \
addConnToPool(((SCliThrd*)conn->hostThrd)->pool, conn); \
addConnToPool(((SCliThrd*)conn->hostThrd)->pool, conn); \
} \
return; \
return; \
} \
} \
} while (0)
} while (0)
...
@@ -262,8 +265,25 @@ static void cliReleaseUnfinishedMsg(SCliConn* conn) {
...
@@ -262,8 +265,25 @@ static void cliReleaseUnfinishedMsg(SCliConn* conn) {
#define REQUEST_PERSIS_HANDLE(msg) ((msg)->info.persistHandle == 1)
#define REQUEST_PERSIS_HANDLE(msg) ((msg)->info.persistHandle == 1)
#define REQUEST_RELEASE_HANDLE(cmsg) ((cmsg)->type == Release)
#define REQUEST_RELEASE_HANDLE(cmsg) ((cmsg)->type == Release)
#define EPSET_GET_SIZE(epSet) (epSet)->numOfEps
#define EPSET_GET_INUSE_IP(epSet) ((epSet)->eps[(epSet)->inUse].fqdn)
#define EPSET_GET_INUSE_IP(epSet) ((epSet)->eps[(epSet)->inUse].fqdn)
#define EPSET_GET_INUSE_PORT(epSet) ((epSet)->eps[(epSet)->inUse].port)
#define EPSET_GET_INUSE_PORT(epSet) ((epSet)->eps[(epSet)->inUse].port)
#define EPSET_FORWARD_INUSE(epSet) \
do { \
(epSet)->inUse = (++((epSet)->inUse)) % ((epSet)->numOfEps); \
} while (0)
#define EPSET_DEBUG_STR(epSet, buf) \
do { \
int len = snprintf(buf, sizeof(buf), "epset:{"); \
for (int i = 0; i < (epSet)->numOfEps; i++) { \
if (i == (epSet)->numOfEps - 1) { \
len += snprintf(buf + len, sizeof(buf) - len, "%d. %s:%d", i, (epSet)->eps[i].fqdn, (epSet)->eps[i].port); \
} else { \
len += snprintf(buf + len, sizeof(buf) - len, "%d. %s:%d, ", i, (epSet)->eps[i].fqdn, (epSet)->eps[i].port); \
} \
} \
len += snprintf(buf + len, sizeof(buf) - len, "}"); \
} while (0);
static
void
*
cliWorkThread
(
void
*
arg
);
static
void
*
cliWorkThread
(
void
*
arg
);
...
@@ -492,6 +512,10 @@ static void allocConnRef(SCliConn* conn, bool update) {
...
@@ -492,6 +512,10 @@ static void allocConnRef(SCliConn* conn, bool update) {
conn
->
refId
=
exh
->
refId
;
conn
->
refId
=
exh
->
refId
;
}
}
static
void
addConnToPool
(
void
*
pool
,
SCliConn
*
conn
)
{
static
void
addConnToPool
(
void
*
pool
,
SCliConn
*
conn
)
{
if
(
conn
->
status
==
ConnInPool
)
{
assert
(
0
);
return
;
}
SCliThrd
*
thrd
=
conn
->
hostThrd
;
SCliThrd
*
thrd
=
conn
->
hostThrd
;
CONN_HANDLE_THREAD_QUIT
(
thrd
);
CONN_HANDLE_THREAD_QUIT
(
thrd
);
...
@@ -505,7 +529,7 @@ static void addConnToPool(void* pool, SCliConn* conn) {
...
@@ -505,7 +529,7 @@ static void addConnToPool(void* pool, SCliConn* conn) {
char
key
[
128
]
=
{
0
};
char
key
[
128
]
=
{
0
};
CONN_CONSTRUCT_HASH_KEY
(
key
,
conn
->
ip
,
conn
->
port
);
CONN_CONSTRUCT_HASH_KEY
(
key
,
conn
->
ip
,
conn
->
port
);
tTrace
(
"%s conn %p added to conn pool, read buf cap:
%d"
,
CONN_GET_INST_LABEL
(
conn
),
conn
,
conn
->
readBuf
.
cap
);
tTrace
(
"%s conn %p added to conn pool, read buf cap:%d"
,
CONN_GET_INST_LABEL
(
conn
),
conn
,
conn
->
readBuf
.
cap
);
SConnList
*
plist
=
taosHashGet
((
SHashObj
*
)
pool
,
key
,
strlen
(
key
));
SConnList
*
plist
=
taosHashGet
((
SHashObj
*
)
pool
,
key
,
strlen
(
key
));
// list already create before
// list already create before
...
@@ -751,9 +775,9 @@ SCliConn* cliGetConn(SCliMsg* pMsg, SCliThrd* pThrd) {
...
@@ -751,9 +775,9 @@ SCliConn* cliGetConn(SCliMsg* pMsg, SCliThrd* pThrd) {
STransConnCtx
*
pCtx
=
pMsg
->
ctx
;
STransConnCtx
*
pCtx
=
pMsg
->
ctx
;
conn
=
getConnFromPool
(
pThrd
->
pool
,
EPSET_GET_INUSE_IP
(
&
pCtx
->
epSet
),
EPSET_GET_INUSE_PORT
(
&
pCtx
->
epSet
));
conn
=
getConnFromPool
(
pThrd
->
pool
,
EPSET_GET_INUSE_IP
(
&
pCtx
->
epSet
),
EPSET_GET_INUSE_PORT
(
&
pCtx
->
epSet
));
if
(
conn
!=
NULL
)
{
if
(
conn
!=
NULL
)
{
tTrace
(
"%s conn %p get from conn pool
"
,
CONN_GET_INST_LABEL
(
conn
),
conn
);
tTrace
(
"%s conn %p get from conn pool
:%p"
,
CONN_GET_INST_LABEL
(
conn
),
conn
,
pThrd
->
pool
);
}
else
{
}
else
{
tTrace
(
"%s not found conn in conn pool
%p"
,
((
STrans
*
)
pThrd
->
pTransInst
)
->
label
,
pThrd
->
pool
);
tTrace
(
"%s not found conn in conn pool
:
%p"
,
((
STrans
*
)
pThrd
->
pTransInst
)
->
label
,
pThrd
->
pool
);
}
}
return
conn
;
return
conn
;
}
}
...
@@ -773,7 +797,8 @@ void cliHandleReq(SCliMsg* pMsg, SCliThrd* pThrd) {
...
@@ -773,7 +797,8 @@ void cliHandleReq(SCliMsg* pMsg, SCliThrd* pThrd) {
STrans
*
pTransInst
=
pThrd
->
pTransInst
;
STrans
*
pTransInst
=
pThrd
->
pTransInst
;
cliMayCvtFqdnToIp
(
&
pCtx
->
epSet
,
&
pThrd
->
cvtAddr
);
cliMayCvtFqdnToIp
(
&
pCtx
->
epSet
,
&
pThrd
->
cvtAddr
);
transPrintEpSet
(
&
pCtx
->
epSet
);
// transPrintEpSet(&pCtx->epSet);
SCliConn
*
conn
=
cliGetConn
(
pMsg
,
pThrd
);
SCliConn
*
conn
=
cliGetConn
(
pMsg
,
pThrd
);
if
(
conn
!=
NULL
)
{
if
(
conn
!=
NULL
)
{
transCtxMerge
(
&
conn
->
ctx
,
&
pCtx
->
appCtx
);
transCtxMerge
(
&
conn
->
ctx
,
&
pCtx
->
appCtx
);
...
@@ -955,11 +980,30 @@ static void doDelayTask(void* param) {
...
@@ -955,11 +980,30 @@ static void doDelayTask(void* param) {
STaskArg
*
arg
=
param
;
STaskArg
*
arg
=
param
;
SCliMsg
*
pMsg
=
arg
->
param1
;
SCliMsg
*
pMsg
=
arg
->
param1
;
SCliThrd
*
pThrd
=
arg
->
param2
;
SCliThrd
*
pThrd
=
arg
->
param2
;
taosMemoryFree
(
arg
);
cliHandleReq
(
pMsg
,
pThrd
);
cliHandleReq
(
pMsg
,
pThrd
);
}
taosMemoryFree
(
arg
);
static
void
cliSchedMsgToNextNode
(
SCliMsg
*
pMsg
,
SCliThrd
*
pThrd
)
{
STraceId
*
trace
=
&
pMsg
->
msg
.
info
.
traceId
;
STransConnCtx
*
pCtx
=
pMsg
->
ctx
;
char
buf
[
256
]
=
{
0
};
EPSET_DEBUG_STR
(
&
pCtx
->
epSet
,
buf
);
tGTrace
(
"%s %s, retryCnt:%d, limit:%d"
,
transLabel
(
pThrd
),
buf
,
pCtx
->
retryCnt
+
1
,
pCtx
->
retryLimit
);
STaskArg
*
arg
=
taosMemoryMalloc
(
sizeof
(
STaskArg
));
arg
->
param1
=
pMsg
;
arg
->
param2
=
pThrd
;
transDQSched
(
pThrd
->
delayQueue
,
doDelayTask
,
arg
,
TRANS_RETRY_INTERVAL
);
}
}
void
cliUpdateRetryLimit
(
int8_t
*
val
,
int8_t
exp
,
int8_t
newVal
)
{
if
(
*
val
!=
exp
)
{
*
val
=
newVal
;
}
}
int
cliAppCb
(
SCliConn
*
pConn
,
STransMsg
*
pResp
,
SCliMsg
*
pMsg
)
{
int
cliAppCb
(
SCliConn
*
pConn
,
STransMsg
*
pResp
,
SCliMsg
*
pMsg
)
{
SCliThrd
*
pThrd
=
pConn
->
hostThrd
;
SCliThrd
*
pThrd
=
pConn
->
hostThrd
;
STrans
*
pTransInst
=
pThrd
->
pTransInst
;
STrans
*
pTransInst
=
pThrd
->
pTransInst
;
...
@@ -971,68 +1015,45 @@ int cliAppCb(SCliConn* pConn, STransMsg* pResp, SCliMsg* pMsg) {
...
@@ -971,68 +1015,45 @@ int cliAppCb(SCliConn* pConn, STransMsg* pResp, SCliMsg* pMsg) {
}
}
STransConnCtx
*
pCtx
=
pMsg
->
ctx
;
STransConnCtx
*
pCtx
=
pMsg
->
ctx
;
SEpSet
*
pEpSet
=
&
pCtx
->
epSet
;
if
(
pCtx
->
retryCnt
==
0
)
{
if
(
pCtx
->
retryCount
==
0
)
{
pCtx
->
origEpSet
=
pCtx
->
epSet
;
pCtx
->
origEpSet
=
pCtx
->
epSet
;
}
}
/*
/*
* no retry
* no retry
* 1. query conn
* 1. query conn
* 2. rpc thread already receive quit msg
* 2. rpc thread already receive quit msg
*/
*/
if
(
CONN_NO_PERSIST_BY_APP
(
pConn
)
&&
pThrd
->
quit
==
false
)
{
int32_t
code
=
pResp
->
code
;
tmsg_t
msgType
=
pCtx
->
msgType
;
if
(
CONN_NO_PERSIST_BY_APP
(
pConn
))
{
if
((
pTransInst
->
retry
!=
NULL
&&
pEpSet
->
numOfEps
>
1
&&
(
pTransInst
->
retry
(
pResp
->
code
)))
||
if
(
pTransInst
->
retry
!=
NULL
&&
pTransInst
->
retry
(
code
))
{
(
pResp
->
code
==
TSDB_CODE_RPC_NETWORK_UNAVAIL
||
pResp
->
code
==
TSDB_CODE_APP_NOT_READY
||
pResp
->
code
==
TSDB_CODE_NODE_NOT_DEPLOYED
||
pResp
->
code
==
TSDB_CODE_SYN_NOT_LEADER
))
{
pMsg
->
sent
=
0
;
pMsg
->
sent
=
0
;
tTrace
(
"try to send req to next node"
);
pCtx
->
retryCnt
+=
1
;
pMsg
->
st
=
taosGetTimestampUs
();
if
(
code
==
TSDB_CODE_RPC_NETWORK_UNAVAIL
)
{
transUnrefCliHandle
(
pConn
);
pCtx
->
retryCount
+=
1
;
if
(
pResp
->
code
==
TSDB_CODE_RPC_NETWORK_UNAVAIL
)
{
cliUpdateRetryLimit
(
&
pCtx
->
retryLimit
,
TRANS_RETRY_COUNT_LIMIT
,
EPSET_GET_SIZE
(
&
pCtx
->
epSet
)
*
3
);
if
(
pCtx
->
retryCount
<
pEpSet
->
numOfEps
*
3
)
{
if
(
pCtx
->
retryCnt
<
pCtx
->
retryLimit
)
{
pEpSet
->
inUse
=
(
++
pEpSet
->
inUse
)
%
pEpSet
->
numOfEps
;
EPSET_FORWARD_INUSE
(
&
pCtx
->
epSet
);
STaskArg
*
arg
=
taosMemoryMalloc
(
sizeof
(
STaskArg
));
cliSchedMsgToNextNode
(
pMsg
,
pThrd
);
arg
->
param1
=
pMsg
;
arg
->
param2
=
pThrd
;
transDQSched
(
pThrd
->
delayQueue
,
doDelayTask
,
arg
,
TRANS_RETRY_INTERVAL
);
transPrintEpSet
(
pEpSet
);
tTrace
(
"%s use local epset, inUse: %d, retry count:%d, limit: %d"
,
pTransInst
->
label
,
pEpSet
->
inUse
,
pCtx
->
retryCount
+
1
,
pEpSet
->
numOfEps
*
3
);
transUnrefCliHandle
(
pConn
);
return
-
1
;
return
-
1
;
}
}
}
else
if
(
pCtx
->
retryCount
<
TRANS_RETRY_COUNT_LIMIT
)
{
}
else
{
if
(
pResp
->
contLen
==
0
)
{
addConnToPool
(
pThrd
->
pool
,
pConn
);
pEpSet
->
inUse
=
(
++
pEpSet
->
inUse
)
%
pEpSet
->
numOfEps
;
transPrintEpSet
(
&
pCtx
->
epSet
);
cliUpdateRetryLimit
(
&
pCtx
->
retryLimit
,
TRANS_RETRY_COUNT_LIMIT
,
TRANS_RETRY_COUNT_LIMIT
);
tTrace
(
"%s use local epset, inUse: %d, retry count:%d, limit: %d"
,
pTransInst
->
label
,
pEpSet
->
inUse
,
if
(
pCtx
->
retryCnt
<
pCtx
->
retryLimit
)
{
pCtx
->
retryCount
+
1
,
TRANS_RETRY_COUNT_LIMIT
);
if
(
pResp
->
contLen
==
0
)
{
}
else
{
EPSET_FORWARD_INUSE
(
&
pCtx
->
epSet
);
SEpSet
epSet
=
{
0
};
}
else
{
tDeserializeSEpSet
(
pResp
->
pCont
,
pResp
->
contLen
,
&
epSet
);
tDeserializeSEpSet
(
pResp
->
pCont
,
pResp
->
contLen
,
&
pCtx
->
epSet
);
pCtx
->
epSet
=
epSet
;
}
cliSchedMsgToNextNode
(
pMsg
,
pThrd
);
transPrintEpSet
(
&
pCtx
->
epSet
);
return
-
1
;
tTrace
(
"%s use remote epset, inUse: %d, retry count:%d, limit: %d"
,
pTransInst
->
label
,
pEpSet
->
inUse
,
pCtx
->
retryCount
+
1
,
TRANS_RETRY_COUNT_LIMIT
);
}
if
(
pConn
->
status
!=
ConnInPool
)
{
addConnToPool
(
pThrd
->
pool
,
pConn
);
}
}
STaskArg
*
arg
=
taosMemoryMalloc
(
sizeof
(
STaskArg
));
arg
->
param1
=
pMsg
;
arg
->
param2
=
pThrd
;
transDQSched
(
pThrd
->
delayQueue
,
doDelayTask
,
arg
,
TRANS_RETRY_INTERVAL
);
return
-
1
;
}
}
}
}
}
}
STraceId
*
trace
=
&
pResp
->
info
.
traceId
;
STraceId
*
trace
=
&
pResp
->
info
.
traceId
;
if
(
pCtx
->
pSem
!=
NULL
)
{
if
(
pCtx
->
pSem
!=
NULL
)
{
tGTrace
(
"%s conn %p(sync) handle resp"
,
CONN_GET_INST_LABEL
(
pConn
),
pConn
);
tGTrace
(
"%s conn %p(sync) handle resp"
,
CONN_GET_INST_LABEL
(
pConn
),
pConn
);
...
@@ -1045,10 +1066,10 @@ int cliAppCb(SCliConn* pConn, STransMsg* pResp, SCliMsg* pMsg) {
...
@@ -1045,10 +1066,10 @@ int cliAppCb(SCliConn* pConn, STransMsg* pResp, SCliMsg* pMsg) {
pCtx
->
pRsp
=
NULL
;
pCtx
->
pRsp
=
NULL
;
}
else
{
}
else
{
tGTrace
(
"%s conn %p handle resp"
,
CONN_GET_INST_LABEL
(
pConn
),
pConn
);
tGTrace
(
"%s conn %p handle resp"
,
CONN_GET_INST_LABEL
(
pConn
),
pConn
);
if
(
pResp
->
code
!=
0
||
pCtx
->
retryCount
==
0
||
transEpSetIsEqual
(
&
pCtx
->
epSet
,
&
pCtx
->
origEpSet
))
{
if
(
!
cliIsEpsetUpdated
(
code
,
pCtx
))
{
pTransInst
->
cfp
(
pTransInst
->
parent
,
pResp
,
NULL
);
pTransInst
->
cfp
(
pTransInst
->
parent
,
pResp
,
NULL
);
}
else
{
}
else
{
pTransInst
->
cfp
(
pTransInst
->
parent
,
pResp
,
pE
pSet
);
pTransInst
->
cfp
(
pTransInst
->
parent
,
pResp
,
&
pCtx
->
e
pSet
);
}
}
}
}
return
0
;
return
0
;
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录