Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
taosdata
TDengine
提交
5162ed40
T
TDengine
项目概览
taosdata
/
TDengine
1 年多 前同步成功
通知
1185
Star
22016
Fork
4786
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
1
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
T
TDengine
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
1
Issue
1
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
未验证
提交
5162ed40
编写于
1月 19, 2022
作者:
dengyihao
提交者:
GitHub
1月 19, 2022
浏览文件
操作
浏览文件
下载
差异文件
Merge pull request #9904 from taosdata/feature/rpc
add client
上级
61fd0565
a5253f25
变更
5
隐藏空白更改
内联
并排
Showing
5 changed file
with
156 addition
and
53 deletion
+156
-53
source/libs/transport/inc/transComm.h
source/libs/transport/inc/transComm.h
+19
-0
source/libs/transport/src/trans.c
source/libs/transport/src/trans.c
+1
-0
source/libs/transport/src/transCli.c
source/libs/transport/src/transCli.c
+119
-34
source/libs/transport/src/transSrv.c
source/libs/transport/src/transSrv.c
+14
-16
source/libs/transport/test/rclient.c
source/libs/transport/test/rclient.c
+3
-3
未找到文件。
source/libs/transport/inc/transComm.h
浏览文件 @
5162ed40
...
...
@@ -68,6 +68,25 @@ typedef void* queue[2];
QUEUE_PREV_NEXT(e) = QUEUE_NEXT(e); \
QUEUE_NEXT_PREV(e) = QUEUE_PREV(e); \
}
#define QUEUE_SPLIT(h, q, n) \
do { \
QUEUE_PREV(n) = QUEUE_PREV(h); \
QUEUE_PREV_NEXT(n) = (n); \
QUEUE_NEXT(n) = (q); \
QUEUE_PREV(h) = QUEUE_PREV(q); \
QUEUE_PREV_NEXT(h) = (h); \
QUEUE_PREV(q) = (n); \
} while (0)
#define QUEUE_MOVE(h, n) \
do { \
if (QUEUE_IS_EMPTY(h)) { \
QUEUE_INIT(n); \
} else { \
queue* q = QUEUE_HEAD(h); \
QUEUE_SPLIT(h, q, n); \
} \
} while (0)
/* Return the element at the front of the queue. */
#define QUEUE_HEAD(q) (QUEUE_NEXT(q))
...
...
source/libs/transport/src/trans.c
浏览文件 @
5162ed40
...
...
@@ -35,6 +35,7 @@ void* rpcOpen(const SRpcInit* pInit) {
if
(
pInit
->
label
)
{
tstrncpy
(
pRpc
->
label
,
pInit
->
label
,
strlen
(
pInit
->
label
));
}
pRpc
->
cfp
=
pInit
->
cfp
;
pRpc
->
numOfThreads
=
pInit
->
numOfThreads
>
TSDB_MAX_RPC_THREADS
?
TSDB_MAX_RPC_THREADS
:
pInit
->
numOfThreads
;
pRpc
->
connType
=
pInit
->
connType
;
pRpc
->
tcphandle
=
(
*
taosHandle
[
pRpc
->
connType
])(
0
,
pInit
->
localPort
,
pRpc
->
label
,
pRpc
->
numOfThreads
,
NULL
,
pRpc
);
...
...
source/libs/transport/src/transCli.c
浏览文件 @
5162ed40
...
...
@@ -20,12 +20,16 @@
typedef
struct
SCliConn
{
uv_connect_t
connReq
;
uv_stream_t
*
stream
;
uv_write_t
*
writeReq
;
void
*
data
;
queue
conn
;
char
spi
;
char
secured
;
}
SCliConn
;
typedef
struct
SCliMsg
{
SRpcReqContext
*
context
;
queue
q
;
uint64_t
st
;
}
SCliMsg
;
typedef
struct
SCliThrdObj
{
...
...
@@ -45,86 +49,169 @@ typedef struct SClientObj {
SCliThrdObj
**
pThreadObj
;
}
SClientObj
;
static
void
clientWriteCb
(
uv_write_t
*
req
,
int
status
);
// conn pool
static
SCliConn
*
getConnFromCache
(
void
*
cache
,
char
*
ip
,
uint32_t
port
);
static
void
addConnToCache
(
void
*
cache
,
char
*
ip
,
uint32_t
port
,
SCliConn
*
conn
);
static
void
clientAllocrReadBufferCb
(
uv_handle_t
*
handle
,
size_t
suggested_size
,
uv_buf_t
*
buf
);
static
void
clientReadCb
(
uv_stream_t
*
cli
,
ssize_t
nread
,
const
uv_buf_t
*
buf
);
static
void
clientConnCb
(
struct
uv_connect_s
*
req
,
int
status
);
static
void
clientWriteCb
(
uv_write_t
*
req
,
int
status
);
static
void
clientConnCb
(
uv_connect_t
*
req
,
int
status
);
static
void
clientAsyncCb
(
uv_async_t
*
handle
);
static
void
clientDestroy
(
uv_handle_t
*
handle
);
static
void
clientConnDestroy
(
SCliConn
*
pConn
);
static
void
*
clientThread
(
void
*
arg
);
static
void
clientWriteCb
(
uv_write_t
*
req
,
int
status
)
{
static
void
clientHandleReq
(
SCliMsg
*
pMsg
,
SCliThrdObj
*
pThrd
);
static
void
clientAllocrReadBufferCb
(
uv_handle_t
*
handle
,
size_t
suggested_size
,
uv_buf_t
*
buf
)
{
// impl later
}
static
void
client
FailedCb
(
uv_handle_t
*
handle
)
{
static
void
client
ReadCb
(
uv_stream_t
*
handle
,
ssize_t
nread
,
const
uv_buf_t
*
buf
)
{
// impl later
tDebug
(
"close handle"
);
SCliConn
*
conn
=
handle
->
data
;
if
(
nread
>
0
)
{
return
;
}
//
uv_close
((
uv_handle_t
*
)
handle
,
clientDestroy
);
}
static
void
clientReadCb
(
uv_stream_t
*
cli
,
ssize_t
nread
,
const
uv_buf_t
*
buf
)
{
static
void
clientConnDestroy
(
SCliConn
*
conn
)
{
// impl later
//
}
static
void
clientDestroy
(
uv_handle_t
*
handle
)
{
SCliConn
*
conn
=
handle
->
data
;
clientConnDestroy
(
conn
);
}
static
void
clientConnCb
(
struct
uv_connect_s
*
req
,
int
status
)
{
static
void
clientWriteCb
(
uv_write_t
*
req
,
int
status
)
{
SCliConn
*
pConn
=
req
->
data
;
if
(
status
==
0
)
{
tDebug
(
"data already was written on stream"
);
}
else
{
uv_close
((
uv_handle_t
*
)
pConn
->
stream
,
clientDestroy
);
return
;
}
uv_read_start
((
uv_stream_t
*
)
pConn
->
stream
,
clientAllocrReadBufferCb
,
clientReadCb
);
// impl later
}
static
void
clientWrite
(
SCliConn
*
pConn
)
{
SCliMsg
*
pMsg
=
pConn
->
data
;
SEpSet
*
pEpSet
=
&
pMsg
->
context
->
epSet
;
SRpcHead
*
pHead
=
rpcHeadFromCont
(
pMsg
->
context
->
pCont
);
int
msgLen
=
rpcMsgLenFromCont
(
pMsg
->
context
->
contLen
);
char
*
msg
=
(
char
*
)(
pHead
);
uv_buf_t
wb
=
uv_buf_init
(
msg
,
msgLen
);
uv_write
(
pConn
->
writeReq
,
(
uv_stream_t
*
)
pConn
->
stream
,
&
wb
,
1
,
clientWriteCb
);
}
static
void
clientConnCb
(
uv_connect_t
*
req
,
int
status
)
{
// impl later
SCliConn
*
pConn
=
req
->
data
;
if
(
status
!=
0
)
{
tError
(
"failed to connect %s"
,
uv_err_name
(
status
));
clientConnDestroy
(
pConn
);
return
;
}
SCliMsg
*
pMsg
=
pConn
->
data
;
SEpSet
*
pEpSet
=
&
pMsg
->
context
->
epSet
;
SRpcMsg
rpcMsg
;
// rpcMsg.ahandle = pMsg->context->ahandle;
// rpcMsg.pCont = NULL;
char
*
fqdn
=
pEpSet
->
fqdn
[
pEpSet
->
inUse
];
uint32_t
port
=
pEpSet
->
port
[
pEpSet
->
inUse
];
if
(
status
!=
0
)
{
// call user fp later
tError
(
"failed to connect server(%s, %d), errmsg: %s"
,
fqdn
,
port
,
uv_strerror
(
status
));
uv_close
((
uv_handle_t
*
)
req
->
handle
,
clientFailedCb
);
SRpcInfo
*
pRpc
=
pMsg
->
context
->
pRpc
;
(
pRpc
->
cfp
)(
NULL
,
&
rpcMsg
,
pEpSet
);
uv_close
((
uv_handle_t
*
)
req
->
handle
,
clientDestroy
);
return
;
}
assert
(
pConn
->
stream
==
req
->
handle
);
// impl later
}
static
SCliConn
*
getConnFromCache
(
void
*
cache
,
char
*
ip
,
uint32_t
port
)
{
// impl later
return
NULL
;
}
static
void
clientAsyncCb
(
uv_async_t
*
handle
)
{
SCliThrdObj
*
pThrd
=
handle
->
data
;
SCliMsg
*
pMsg
=
NULL
;
pthread_mutex_lock
(
&
pThrd
->
msgMtx
);
if
(
!
QUEUE_IS_EMPTY
(
&
pThrd
->
msg
))
{
queue
*
head
=
QUEUE_HEAD
(
&
pThrd
->
msg
);
pMsg
=
QUEUE_DATA
(
head
,
SCliMsg
,
q
);
QUEUE_REMOVE
(
head
);
}
pthread_mutex_unlock
(
&
pThrd
->
msgMtx
);
static
void
addConnToCache
(
void
*
cache
,
char
*
ip
,
uint32_t
port
,
SCliConn
*
conn
)
{
// impl later
}
static
void
clientHandleReq
(
SCliMsg
*
pMsg
,
SCliThrdObj
*
pThrd
)
{
SEpSet
*
pEpSet
=
&
pMsg
->
context
->
epSet
;
SEpSet
*
pEpSet
=
&
pMsg
->
context
->
epSet
;
char
*
fqdn
=
pEpSet
->
fqdn
[
pEpSet
->
inUse
];
uint32_t
port
=
pEpSet
->
port
[
pEpSet
->
inUse
];
uint64_t
el
=
taosGetTimestampUs
()
-
pMsg
->
st
;
tDebug
(
"msg tran time cost: %"
PRIu64
""
,
el
);
SCliConn
*
conn
=
getConnFromCache
(
pThrd
->
cache
,
fqdn
,
port
);
if
(
conn
!=
NULL
)
{
// impl later
conn
->
data
=
pMsg
;
conn
->
writeReq
->
data
=
conn
;
clientWrite
(
conn
);
// uv_buf_t wb;
// uv_write(conn->writeReq, (uv_stream_t*)conn->stream, &wb, 1, clientWriteCb);
}
else
{
SCliConn
*
conn
=
malloc
(
sizeof
(
SCliConn
));
conn
->
stream
=
(
uv_stream_t
*
)
malloc
(
sizeof
(
uv_tcp_t
));
uv_tcp_init
(
pThrd
->
loop
,
(
uv_tcp_t
*
)(
conn
->
stream
));
conn
->
writeReq
=
malloc
(
sizeof
(
uv_write_t
));
conn
->
connReq
.
data
=
conn
;
conn
->
data
=
pMsg
;
struct
sockaddr_in
addr
;
uv_ip4_addr
(
fqdn
,
port
,
&
addr
);
// handle error in callback if
connect error
// handle error in callback if
fail to connect
uv_tcp_connect
(
&
conn
->
connReq
,
(
uv_tcp_t
*
)(
conn
->
stream
),
(
const
struct
sockaddr
*
)
&
addr
,
clientConnCb
);
}
// SRpcReqContext* pCxt = pMsg->context;
// SRpcMsg rpcMsg;
// SEpSet* pEpSet = &pMsg->context->epSet;
// SRpcInfo* pRpc = pMsg->context->pRpc;
//// rpcMsg.ahandle = pMsg->context->ahandle;
// rpcMsg.pCont = NULL;
// rpcMsg.ahandle = pMsg->context->ahandle;
// uint64_t el1 = taosGetTimestampUs() - et;
// tError("msg tran back first: time cost: %" PRIu64 "", el1);
// et = taosGetTimestampUs();
//(pRpc->cfp)(NULL, &rpcMsg, pEpSet);
// uint64_t el2 = taosGetTimestampUs() - et;
// tError("msg tran back second: time cost: %" PRIu64 "", el2);
}
}
static
void
clientAsyncCb
(
uv_async_t
*
handle
)
{
SCliThrdObj
*
pThrd
=
handle
->
data
;
SCliMsg
*
pMsg
=
NULL
;
queue
wq
;
//
SRpcHead* pHead = rpcHeadFromCont(pCtx->pCont);
// char* msg = (char*)pHead
;
// int len = rpcMsgLenFromCont(pCtx->contLen
);
// tmsg_t msgType = pCtx->msgType
;
//
batch process to avoid to lock/unlock frequently
pthread_mutex_lock
(
&
pThrd
->
msgMtx
)
;
QUEUE_MOVE
(
&
pThrd
->
msg
,
&
wq
);
pthread_mutex_unlock
(
&
pThrd
->
msgMtx
)
;
// impl later
int
count
=
0
;
while
(
!
QUEUE_IS_EMPTY
(
&
wq
))
{
queue
*
h
=
QUEUE_HEAD
(
&
wq
);
QUEUE_REMOVE
(
h
);
pMsg
=
QUEUE_DATA
(
h
,
SCliMsg
,
q
);
clientHandleReq
(
pMsg
,
pThrd
);
count
++
;
if
(
count
>=
2
)
{
tError
(
"send batch size: %d"
,
count
);
}
}
}
static
void
*
clientThread
(
void
*
arg
)
{
...
...
@@ -142,9 +229,6 @@ void* taosInitClient(uint32_t ip, uint32_t port, char* label, int numOfThreads,
SCliThrdObj
*
pThrd
=
(
SCliThrdObj
*
)
calloc
(
1
,
sizeof
(
SCliThrdObj
));
QUEUE_INIT
(
&
pThrd
->
msg
);
pthread_mutex_init
(
&
pThrd
->
msgMtx
,
NULL
);
// QUEUE_INIT(&pThrd->clientCache);
pThrd
->
loop
=
(
uv_loop_t
*
)
malloc
(
sizeof
(
uv_loop_t
));
uv_loop_init
(
pThrd
->
loop
);
...
...
@@ -186,6 +270,7 @@ void rpcSendRequest(void* shandle, const SEpSet* pEpSet, SRpcMsg* pMsg, int64_t*
}
SCliMsg
*
msg
=
malloc
(
sizeof
(
SCliMsg
));
msg
->
context
=
pContext
;
msg
->
st
=
taosGetTimestampUs
();
SCliThrdObj
*
thrd
=
((
SClientObj
*
)
pRpc
->
tcphandle
)
->
pThreadObj
[
index
%
pRpc
->
numOfThreads
];
...
...
source/libs/transport/src/transSrv.c
浏览文件 @
5162ed40
...
...
@@ -277,10 +277,6 @@ void uvOnReadCb(uv_stream_t* cli, ssize_t nread, const uv_buf_t* buf) {
}
return
;
}
if
(
terrno
!=
0
)
{
// handle err code
}
if
(
nread
!=
UV_EOF
)
{
tDebug
(
"Read error %s
\n
"
,
uv_err_name
(
nread
));
}
...
...
@@ -309,21 +305,23 @@ void uvOnWriteCb(uv_write_t* req, int status) {
void
uvWorkerAsyncCb
(
uv_async_t
*
handle
)
{
SWorkThrdObj
*
pThrd
=
container_of
(
handle
,
SWorkThrdObj
,
workerAsync
);
SConn
*
conn
=
NULL
;
//
opt later
queue
wq
;
//
batch process to avoid to lock/unlock frequently
pthread_mutex_lock
(
&
pThrd
->
connMtx
);
if
(
!
QUEUE_IS_EMPTY
(
&
pThrd
->
conn
))
{
queue
*
head
=
QUEUE_HEAD
(
&
pThrd
->
conn
);
conn
=
QUEUE_DATA
(
head
,
SConn
,
queue
);
QUEUE_REMOVE
(
head
);
}
QUEUE_MOVE
(
&
pThrd
->
conn
,
&
wq
);
pthread_mutex_unlock
(
&
pThrd
->
connMtx
);
if
(
conn
==
NULL
)
{
tError
(
"except occurred, do nothing"
);
return
;
while
(
!
QUEUE_IS_EMPTY
(
&
wq
))
{
queue
*
head
=
QUEUE_HEAD
(
&
wq
);
QUEUE_REMOVE
(
head
);
SConn
*
conn
=
QUEUE_DATA
(
head
,
SConn
,
queue
);
if
(
conn
==
NULL
)
{
tError
(
"except occurred, do nothing"
);
return
;
}
uv_buf_t
wb
=
uv_buf_init
(
conn
->
writeBuf
.
buf
,
conn
->
writeBuf
.
len
);
uv_write
(
conn
->
pWriter
,
(
uv_stream_t
*
)
conn
->
pTcp
,
&
wb
,
1
,
uvOnWriteCb
);
}
uv_buf_t
wb
=
uv_buf_init
(
conn
->
writeBuf
.
buf
,
conn
->
writeBuf
.
len
);
uv_write
(
conn
->
pWriter
,
(
uv_stream_t
*
)
conn
->
pTcp
,
&
wb
,
1
,
uvOnWriteCb
);
}
void
uvOnAcceptCb
(
uv_stream_t
*
stream
,
int
status
)
{
...
...
source/libs/transport/test/rclient.c
浏览文件 @
5162ed40
...
...
@@ -34,8 +34,8 @@ typedef struct {
static
void
processResponse
(
void
*
pParent
,
SRpcMsg
*
pMsg
,
SEpSet
*
pEpSet
)
{
SInfo
*
pInfo
=
(
SInfo
*
)
pMsg
->
ahandle
;
tDebug
(
"thread:%d, response is received, type:%d contLen:%d code:0x%x"
,
pInfo
->
index
,
pMsg
->
msgType
,
pMsg
->
contLen
,
pMsg
->
code
);
//
tDebug("thread:%d, response is received, type:%d contLen:%d code:0x%x", pInfo->index, pMsg->msgType, pMsg->contLen,
//
pMsg->code);
if
(
pEpSet
)
pInfo
->
epSet
=
*
pEpSet
;
...
...
@@ -57,7 +57,7 @@ static void *sendRequest(void *param) {
rpcMsg
.
contLen
=
pInfo
->
msgSize
;
rpcMsg
.
ahandle
=
pInfo
;
rpcMsg
.
msgType
=
1
;
tDebug
(
"thread:%d, send request, contLen:%d num:%d"
,
pInfo
->
index
,
pInfo
->
msgSize
,
pInfo
->
num
);
//
tDebug("thread:%d, send request, contLen:%d num:%d", pInfo->index, pInfo->msgSize, pInfo->num);
rpcSendRequest
(
pInfo
->
pRpc
,
&
pInfo
->
epSet
,
&
rpcMsg
,
NULL
);
if
(
pInfo
->
num
%
20000
==
0
)
tInfo
(
"thread:%d, %d requests have been sent"
,
pInfo
->
index
,
pInfo
->
num
);
tsem_wait
(
&
pInfo
->
rspSem
);
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录