Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
taosdata
TDengine
提交
71539043
TDengine
项目概览
taosdata
/
TDengine
1 年多 前同步成功
通知
1187
Star
22018
Fork
4786
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
1
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
TDengine
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
1
Issue
1
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
未验证
提交
71539043
编写于
4月 11, 2021
作者:
sangshuduo
提交者:
GitHub
4月 11, 2021
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
[TD-3317]<fix>: taosdemo interlace insertion. (#5774)
patch for master. Co-authored-by:
N
Shuduo Sang
<
sdsang@taosdata.com
>
上级
5e43fc0d
变更
2
隐藏空白更改
内联
并排
Showing
2 changed file
with
48 addition
and
26 deletion
+48
-26
src/kit/taosdemo/taosdemo.c
src/kit/taosdemo/taosdemo.c
+46
-24
tests/pytest/tools/taosdemoPerformance.py
tests/pytest/tools/taosdemoPerformance.py
+2
-2
未找到文件。
src/kit/taosdemo/taosdemo.c
浏览文件 @
71539043
...
...
@@ -18,6 +18,7 @@
when in some thread query return error, thread don't exit, but return, otherwise coredump in other thread.
*/
#include <stdint.h>
#define _GNU_SOURCE
#define CURL_STATICLIB
...
...
@@ -3242,7 +3243,7 @@ static bool getMetaFromInsertJsonFile(cJSON* root) {
if
(
numRecPerReq
&&
numRecPerReq
->
type
==
cJSON_Number
)
{
g_args
.
num_of_RPR
=
numRecPerReq
->
valueint
;
}
else
if
(
!
numRecPerReq
)
{
g_args
.
num_of_RPR
=
0xffff
;
g_args
.
num_of_RPR
=
INT32_MAX
;
}
else
{
errorPrint
(
"%s() LN%d, failed to read json, num_of_records_per_req not found
\n
"
,
__func__
,
__LINE__
);
...
...
@@ -4647,7 +4648,7 @@ static int generateSQLHead(char *tableName, int32_t tableSeq,
len
=
snprintf
(
headBuf
,
HEAD_BUFF_LEN
,
"
insert into
%s.%s using %s.%s tags %s values"
,
"%s.%s using %s.%s tags %s values"
,
pThreadInfo
->
db_name
,
tableName
,
pThreadInfo
->
db_name
,
...
...
@@ -4658,14 +4659,14 @@ static int generateSQLHead(char *tableName, int32_t tableSeq,
len
=
snprintf
(
headBuf
,
HEAD_BUFF_LEN
,
"
insert into
%s.%s values"
,
"%s.%s values"
,
pThreadInfo
->
db_name
,
tableName
);
}
else
{
len
=
snprintf
(
headBuf
,
HEAD_BUFF_LEN
,
"
insert into
%s.%s values"
,
"%s.%s values"
,
pThreadInfo
->
db_name
,
tableName
);
}
...
...
@@ -4673,7 +4674,7 @@ static int generateSQLHead(char *tableName, int32_t tableSeq,
len
=
snprintf
(
headBuf
,
HEAD_BUFF_LEN
,
"
insert into
%s.%s values"
,
"%s.%s values"
,
pThreadInfo
->
db_name
,
tableName
);
}
...
...
@@ -4694,6 +4695,7 @@ static int generateInterlaceDataBuffer(
int64_t
startTime
,
int
*
pRemainderBufLen
)
{
assert
(
buffer
);
char
*
pstr
=
buffer
;
SSuperTable
*
superTblInfo
=
pThreadInfo
->
superTblInfo
;
...
...
@@ -4723,18 +4725,20 @@ static int generateInterlaceDataBuffer(
}
else
{
startTime
=
1500000000000
;
}
int
k
=
generateDataTail
(
superTblInfo
,
batchPerTbl
,
pstr
,
*
pRemainderBufLen
,
insertRows
,
0
,
startTime
,
&
(
pThreadInfo
->
samplePos
),
&
dataLen
);
if
(
k
>
0
)
{
if
(
k
==
batchPerTbl
)
{
pstr
+=
dataLen
;
*
pRemainderBufLen
-=
dataLen
;
}
else
{
pstr
-=
headLen
;
pstr
[
0
]
=
'\0'
;
k
=
0
;
}
return
k
;
...
...
@@ -4745,7 +4749,8 @@ static int generateProgressiveDataBuffer(
int32_t
tableSeq
,
threadInfo
*
pThreadInfo
,
char
*
buffer
,
int64_t
insertRows
,
int64_t
startFrom
,
int64_t
startTime
,
int
*
pSamplePos
)
int64_t
startFrom
,
int64_t
startTime
,
int
*
pSamplePos
,
int
*
pRemainderBufLen
)
{
SSuperTable
*
superTblInfo
=
pThreadInfo
->
superTblInfo
;
...
...
@@ -4760,27 +4765,24 @@ static int generateProgressiveDataBuffer(
}
assert
(
buffer
!=
NULL
);
char
*
pstr
=
buffer
;
int
k
=
0
;
int
maxSqlLen
=
superTblInfo
?
superTblInfo
->
maxSqlLen
:
g_args
.
max_sql_len
;
int
remainderBufLen
=
maxSqlLen
;
memset
(
buffer
,
0
,
maxSqlLen
);
char
*
pstr
=
buffer
;
memset
(
buffer
,
0
,
*
pRemainderBufLen
)
;
int
headLen
=
generateSQLHead
(
tableName
,
tableSeq
,
pThreadInfo
,
superTblInfo
,
buffer
,
r
emainderBufLen
);
buffer
,
*
pR
emainderBufLen
);
if
(
headLen
<=
0
)
{
return
0
;
}
pstr
+=
headLen
;
r
emainderBufLen
-=
headLen
;
*
pR
emainderBufLen
-=
headLen
;
int
dataLen
;
k
=
generateDataTail
(
superTblInfo
,
g_args
.
num_of_RPR
,
pstr
,
r
emainderBufLen
,
insertRows
,
startFrom
,
g_args
.
num_of_RPR
,
pstr
,
*
pR
emainderBufLen
,
insertRows
,
startFrom
,
startTime
,
pSamplePos
,
&
dataLen
);
...
...
@@ -4842,18 +4844,17 @@ static void* syncWriteInterlace(threadInfo *pThreadInfo) {
int64_t
startTime
=
pThreadInfo
->
start_time
;
int
batchPerTblTimes
;
int
batchPerTbl
;
assert
(
pThreadInfo
->
ntables
>
0
);
if
(
interlaceRows
>
g_args
.
num_of_RPR
)
interlaceRows
=
g_args
.
num_of_RPR
;
batchPerTbl
=
interlaceRows
;
int
batchPerTbl
=
interlaceRows
;
int
batchPerTblTimes
;
if
((
interlaceRows
>
0
)
&&
(
pThreadInfo
->
ntables
>
1
))
{
batchPerTblTimes
=
(
g_args
.
num_of_RPR
/
(
interlaceRows
*
pThreadInfo
->
ntables
))
+
1
;
g_args
.
num_of_RPR
/
interlaceRows
;
}
else
{
batchPerTblTimes
=
1
;
}
...
...
@@ -4862,6 +4863,9 @@ static void* syncWriteInterlace(threadInfo *pThreadInfo) {
bool
flagSleep
=
true
;
int
sleepTimeTotal
=
0
;
char
*
strInsertInto
=
"insert into "
;
int
nInsertBufLen
=
strlen
(
strInsertInto
);
while
(
pThreadInfo
->
totalInsertRows
<
pThreadInfo
->
ntables
*
insertRows
)
{
if
((
flagSleep
)
&&
(
insert_interval
))
{
st
=
taosGetTimestampUs
();
...
...
@@ -4872,6 +4876,11 @@ static void* syncWriteInterlace(threadInfo *pThreadInfo) {
int
remainderBufLen
=
maxSqlLen
;
char
*
pstr
=
buffer
;
int
len
=
snprintf
(
pstr
,
nInsertBufLen
+
1
,
"%s"
,
strInsertInto
);
pstr
+=
len
;
remainderBufLen
-=
len
;
int
recOfBatch
=
0
;
for
(
int
i
=
0
;
i
<
batchPerTblTimes
;
i
++
)
{
...
...
@@ -4883,6 +4892,7 @@ static void* syncWriteInterlace(threadInfo *pThreadInfo) {
return
NULL
;
}
int
oldRemainderLen
=
remainderBufLen
;
int
generated
=
generateInterlaceDataBuffer
(
tableName
,
batchPerTbl
,
i
,
batchPerTblTimes
,
tableSeq
,
...
...
@@ -4901,6 +4911,7 @@ static void* syncWriteInterlace(threadInfo *pThreadInfo) {
tableSeq
++
;
recOfBatch
+=
batchPerTbl
;
pstr
+=
(
oldRemainderLen
-
remainderBufLen
);
// startTime += batchPerTbl * superTblInfo->timeStampStep;
pThreadInfo
->
totalInsertRows
+=
batchPerTbl
;
verbosePrint
(
"[%d] %s() LN%d batchPerTbl=%d recOfBatch=%d
\n
"
,
...
...
@@ -5012,11 +5023,12 @@ static void* syncWriteProgressive(threadInfo *pThreadInfo) {
debugPrint
(
"%s() LN%d: ### progressive write
\n
"
,
__func__
,
__LINE__
);
SSuperTable
*
superTblInfo
=
pThreadInfo
->
superTblInfo
;
int
maxSqlLen
=
superTblInfo
?
superTblInfo
->
maxSqlLen
:
g_args
.
max_sql_len
;
char
*
buffer
=
calloc
(
superTblInfo
?
superTblInfo
->
maxSqlLen
:
g_args
.
max_sql_l
en
,
1
);
char
*
buffer
=
calloc
(
maxSqlL
en
,
1
);
if
(
NULL
==
buffer
)
{
errorPrint
(
"Failed to alloc %d Bytes, reason:%s
\n
"
,
superTblInfo
?
superTblInfo
->
maxSqlLen
:
g_args
.
max_sql_l
en
,
maxSqlL
en
,
strerror
(
errno
));
return
NULL
;
}
...
...
@@ -5059,10 +5071,20 @@ static void* syncWriteProgressive(threadInfo *pThreadInfo) {
__func__
,
__LINE__
,
pThreadInfo
->
threadID
,
tableSeq
,
tableName
);
int
remainderBufLen
=
maxSqlLen
;
char
*
pstr
=
buffer
;
int
nInsertBufLen
=
strlen
(
"insert into "
);
int
len
=
snprintf
(
pstr
,
nInsertBufLen
+
1
,
"%s"
,
"insert into "
);
pstr
+=
len
;
remainderBufLen
-=
len
;
int
generated
=
generateProgressiveDataBuffer
(
tableName
,
tableSeq
,
pThreadInfo
,
buffe
r
,
insertRows
,
tableName
,
tableSeq
,
pThreadInfo
,
pst
r
,
insertRows
,
i
,
start_time
,
&
(
pThreadInfo
->
samplePos
));
&
(
pThreadInfo
->
samplePos
),
&
remainderBufLen
);
if
(
generated
>
0
)
i
+=
generated
;
else
...
...
tests/pytest/tools/taosdemoPerformance.py
浏览文件 @
71539043
...
...
@@ -51,7 +51,7 @@ class taosdemoPerformace:
"insert_rows"
:
100000
,
"multi_thread_write_one_tbl"
:
"no"
,
"number_of_tbl_in_one_sql"
:
0
,
"
rows_per_tbl
"
:
100
,
"
interlace_rows
"
:
100
,
"max_sql_len"
:
1024000
,
"disorder_ratio"
:
0
,
"disorder_range"
:
1000
,
...
...
@@ -159,4 +159,4 @@ if __name__ == '__main__':
perftest
=
taosdemoPerformace
(
args
.
commit_id
,
args
.
database_name
)
perftest
.
insertData
()
perftest
.
createTablesAndStoreData
()
\ No newline at end of file
perftest
.
createTablesAndStoreData
()
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录