Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
taosdata
TDengine
提交
663439bf
T
TDengine
项目概览
taosdata
/
TDengine
大约 2 年 前同步成功
通知
1192
Star
22018
Fork
4786
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
1
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
T
TDengine
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
1
Issue
1
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
663439bf
编写于
5月 21, 2021
作者:
S
Shengliang Guan
浏览文件
操作
浏览文件
下载
差异文件
Merge branch 'develop' into feature/TD-3951
上级
9be55c32
6708db1f
变更
54
隐藏空白更改
内联
并排
Showing
54 changed file
with
1274 addition
and
856 deletion
+1274
-856
cmake/platform.inc
cmake/platform.inc
+6
-0
src/client/inc/tscUtil.h
src/client/inc/tscUtil.h
+1
-1
src/client/inc/tsclient.h
src/client/inc/tsclient.h
+4
-2
src/client/src/tscSchemaUtil.c
src/client/src/tscSchemaUtil.c
+1
-0
src/client/src/tscServer.c
src/client/src/tscServer.c
+13
-15
src/client/src/tscUtil.c
src/client/src/tscUtil.c
+10
-11
src/common/inc/tglobal.h
src/common/inc/tglobal.h
+3
-0
src/common/src/tglobal.c
src/common/src/tglobal.c
+3
-0
src/connector/jdbc/src/test/java/com/taosdata/jdbc/TSDBPreparedStatementTest.java
...est/java/com/taosdata/jdbc/TSDBPreparedStatementTest.java
+60
-1
src/connector/nodejs/nodetaos/cinterface.js
src/connector/nodejs/nodetaos/cinterface.js
+129
-127
src/connector/nodejs/nodetaos/cursor.js
src/connector/nodejs/nodetaos/cursor.js
+59
-62
src/connector/nodejs/package-lock.json
src/connector/nodejs/package-lock.json
+0
-285
src/connector/nodejs/package.json
src/connector/nodejs/package.json
+1
-1
src/dnode/src/dnodeMain.c
src/dnode/src/dnodeMain.c
+12
-0
src/dnode/src/dnodeSystem.c
src/dnode/src/dnodeSystem.c
+2
-0
src/inc/mnode.h
src/inc/mnode.h
+3
-0
src/inc/monitor.h
src/inc/monitor.h
+2
-1
src/inc/taoserror.h
src/inc/taoserror.h
+3
-0
src/kit/taosdemo/subscribe.json
src/kit/taosdemo/subscribe.json
+1
-1
src/kit/taosdemo/taosdemo.c
src/kit/taosdemo/taosdemo.c
+471
-320
src/mnode/inc/mnodeAcct.h
src/mnode/inc/mnodeAcct.h
+2
-0
src/mnode/inc/mnodeCluster.h
src/mnode/inc/mnodeCluster.h
+2
-0
src/mnode/inc/mnodeDb.h
src/mnode/inc/mnodeDb.h
+2
-0
src/mnode/inc/mnodeDnode.h
src/mnode/inc/mnodeDnode.h
+1
-0
src/mnode/inc/mnodeMnode.h
src/mnode/inc/mnodeMnode.h
+1
-0
src/mnode/inc/mnodeSdb.h
src/mnode/inc/mnodeSdb.h
+2
-0
src/mnode/inc/mnodeTable.h
src/mnode/inc/mnodeTable.h
+1
-0
src/mnode/inc/mnodeUser.h
src/mnode/inc/mnodeUser.h
+2
-0
src/mnode/inc/mnodeVgroup.h
src/mnode/inc/mnodeVgroup.h
+1
-0
src/mnode/src/mnodeAcct.c
src/mnode/src/mnodeAcct.c
+26
-0
src/mnode/src/mnodeCluster.c
src/mnode/src/mnodeCluster.c
+24
-0
src/mnode/src/mnodeDb.c
src/mnode/src/mnodeDb.c
+28
-1
src/mnode/src/mnodeDnode.c
src/mnode/src/mnodeDnode.c
+27
-0
src/mnode/src/mnodeMain.c
src/mnode/src/mnodeMain.c
+17
-0
src/mnode/src/mnodeMnode.c
src/mnode/src/mnodeMnode.c
+27
-0
src/mnode/src/mnodeSdb.c
src/mnode/src/mnodeSdb.c
+56
-0
src/mnode/src/mnodeTable.c
src/mnode/src/mnodeTable.c
+62
-0
src/mnode/src/mnodeUser.c
src/mnode/src/mnodeUser.c
+27
-0
src/mnode/src/mnodeVgroup.c
src/mnode/src/mnodeVgroup.c
+27
-0
src/os/inc/osMips64.h
src/os/inc/osMips64.h
+87
-0
src/os/src/detail/osSignal.c
src/os/src/detail/osSignal.c
+5
-5
src/plugins/monitor/src/monMain.c
src/plugins/monitor/src/monMain.c
+10
-0
src/query/inc/qHistogram.h
src/query/inc/qHistogram.h
+1
-1
src/query/src/qHistogram.c
src/query/src/qHistogram.c
+1
-1
src/util/src/tcrc32c.c
src/util/src/tcrc32c.c
+1
-1
src/vnode/src/vnodeMain.c
src/vnode/src/vnodeMain.c
+10
-2
src/vnode/src/vnodeRead.c
src/vnode/src/vnodeRead.c
+5
-2
src/vnode/src/vnodeStatus.c
src/vnode/src/vnodeStatus.c
+3
-0
src/vnode/src/vnodeSync.c
src/vnode/src/vnodeSync.c
+11
-0
src/vnode/src/vnodeWrite.c
src/vnode/src/vnodeWrite.c
+7
-2
tests/perftest-scripts/perftest-query.sh
tests/perftest-scripts/perftest-query.sh
+1
-1
tests/pytest/tools/taosdemoPerformance.py
tests/pytest/tools/taosdemoPerformance.py
+1
-1
tests/pytest/tools/taosdemoTestWithJson.py
tests/pytest/tools/taosdemoTestWithJson.py
+11
-11
tests/script/general/parser/commit.sim
tests/script/general/parser/commit.sim
+1
-1
未找到文件。
cmake/platform.inc
浏览文件 @
663439bf
...
@@ -102,6 +102,12 @@ IF ("${CPUTYPE}" STREQUAL "")
...
@@ -102,6 +102,12 @@ IF ("${CPUTYPE}" STREQUAL "")
SET
(
TD_LINUX
TRUE
)
SET
(
TD_LINUX
TRUE
)
SET
(
TD_LINUX_64
FALSE
)
SET
(
TD_LINUX_64
FALSE
)
SET
(
TD_ARM_64
TRUE
)
SET
(
TD_ARM_64
TRUE
)
ELSEIF
(
CMAKE_SYSTEM_PROCESSOR
MATCHES
"mips64"
)
SET
(
CPUTYPE
"mips64"
)
MESSAGE
(
STATUS
"Set CPUTYPE to mips64"
)
SET
(
TD_LINUX
TRUE
)
SET
(
TD_LINUX_64
FALSE
)
SET
(
TD_MIPS_64
TRUE
)
ENDIF
()
ENDIF
()
ELSE
()
ELSE
()
...
...
src/client/inc/tscUtil.h
浏览文件 @
663439bf
...
@@ -307,7 +307,7 @@ STableMeta* createSuperTableMeta(STableMetaMsg* pChild);
...
@@ -307,7 +307,7 @@ STableMeta* createSuperTableMeta(STableMetaMsg* pChild);
uint32_t
tscGetTableMetaSize
(
STableMeta
*
pTableMeta
);
uint32_t
tscGetTableMetaSize
(
STableMeta
*
pTableMeta
);
CChildTableMeta
*
tscCreateChildMeta
(
STableMeta
*
pTableMeta
);
CChildTableMeta
*
tscCreateChildMeta
(
STableMeta
*
pTableMeta
);
uint32_t
tscGetTableMetaMaxSize
();
uint32_t
tscGetTableMetaMaxSize
();
int32_t
tscCreateTableMetaFrom
CChild
Meta
(
STableMeta
*
pChild
,
const
char
*
name
,
void
*
buf
);
int32_t
tscCreateTableMetaFrom
STable
Meta
(
STableMeta
*
pChild
,
const
char
*
name
,
void
*
buf
);
STableMeta
*
tscTableMetaDup
(
STableMeta
*
pTableMeta
);
STableMeta
*
tscTableMetaDup
(
STableMeta
*
pTableMeta
);
int32_t
tscCreateQueryFromQueryInfo
(
SQueryInfo
*
pQueryInfo
,
SQueryAttr
*
pQueryAttr
,
void
*
addr
);
int32_t
tscCreateQueryFromQueryInfo
(
SQueryInfo
*
pQueryInfo
,
SQueryAttr
*
pQueryAttr
,
void
*
addr
);
...
...
src/client/inc/tsclient.h
浏览文件 @
663439bf
...
@@ -68,14 +68,16 @@ typedef struct CChildTableMeta {
...
@@ -68,14 +68,16 @@ typedef struct CChildTableMeta {
int32_t
vgId
;
int32_t
vgId
;
STableId
id
;
STableId
id
;
uint8_t
tableType
;
uint8_t
tableType
;
char
sTableName
[
TSDB_TABLE_FNAME_LEN
];
//super table name, not full name
char
sTableName
[
TSDB_TABLE_FNAME_LEN
];
// TODO: refactor super table name, not full name
uint64_t
suid
;
// super table id
}
CChildTableMeta
;
}
CChildTableMeta
;
typedef
struct
STableMeta
{
typedef
struct
STableMeta
{
int32_t
vgId
;
int32_t
vgId
;
STableId
id
;
STableId
id
;
uint8_t
tableType
;
uint8_t
tableType
;
char
sTableName
[
TSDB_TABLE_FNAME_LEN
];
char
sTableName
[
TSDB_TABLE_FNAME_LEN
];
// super table name
uint64_t
suid
;
// super table id
int16_t
sversion
;
int16_t
sversion
;
int16_t
tversion
;
int16_t
tversion
;
STableComInfo
tableInfo
;
STableComInfo
tableInfo
;
...
...
src/client/src/tscSchemaUtil.c
浏览文件 @
663439bf
...
@@ -94,6 +94,7 @@ STableMeta* tscCreateTableMetaFromMsg(STableMetaMsg* pTableMetaMsg) {
...
@@ -94,6 +94,7 @@ STableMeta* tscCreateTableMetaFromMsg(STableMetaMsg* pTableMetaMsg) {
pTableMeta
->
tableType
=
pTableMetaMsg
->
tableType
;
pTableMeta
->
tableType
=
pTableMetaMsg
->
tableType
;
pTableMeta
->
vgId
=
pTableMetaMsg
->
vgroup
.
vgId
;
pTableMeta
->
vgId
=
pTableMetaMsg
->
vgroup
.
vgId
;
pTableMeta
->
suid
=
pTableMetaMsg
->
suid
;
pTableMeta
->
tableInfo
=
(
STableComInfo
)
{
pTableMeta
->
tableInfo
=
(
STableComInfo
)
{
.
numOfTags
=
pTableMetaMsg
->
numOfTags
,
.
numOfTags
=
pTableMetaMsg
->
numOfTags
,
...
...
src/client/src/tscServer.c
浏览文件 @
663439bf
...
@@ -1828,13 +1828,13 @@ int tscBuildHeartBeatMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
...
@@ -1828,13 +1828,13 @@ int tscBuildHeartBeatMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
int
tscProcessTableMetaRsp
(
SSqlObj
*
pSql
)
{
int
tscProcessTableMetaRsp
(
SSqlObj
*
pSql
)
{
STableMetaMsg
*
pMetaMsg
=
(
STableMetaMsg
*
)
pSql
->
res
.
pRsp
;
STableMetaMsg
*
pMetaMsg
=
(
STableMetaMsg
*
)
pSql
->
res
.
pRsp
;
pMetaMsg
->
tid
=
htonl
(
pMetaMsg
->
tid
);
pMetaMsg
->
tid
=
htonl
(
pMetaMsg
->
tid
);
pMetaMsg
->
sversion
=
htons
(
pMetaMsg
->
sversion
);
pMetaMsg
->
sversion
=
htons
(
pMetaMsg
->
sversion
);
pMetaMsg
->
tversion
=
htons
(
pMetaMsg
->
tversion
);
pMetaMsg
->
tversion
=
htons
(
pMetaMsg
->
tversion
);
pMetaMsg
->
vgroup
.
vgId
=
htonl
(
pMetaMsg
->
vgroup
.
vgId
);
pMetaMsg
->
vgroup
.
vgId
=
htonl
(
pMetaMsg
->
vgroup
.
vgId
);
pMetaMsg
->
uid
=
htobe64
(
pMetaMsg
->
uid
);
pMetaMsg
->
uid
=
htobe64
(
pMetaMsg
->
uid
)
;
pMetaMsg
->
suid
=
pMetaMsg
->
suid
;
pMetaMsg
->
contLen
=
htons
(
pMetaMsg
->
contLen
);
pMetaMsg
->
contLen
=
htons
(
pMetaMsg
->
contLen
);
pMetaMsg
->
numOfColumns
=
htons
(
pMetaMsg
->
numOfColumns
);
pMetaMsg
->
numOfColumns
=
htons
(
pMetaMsg
->
numOfColumns
);
if
((
pMetaMsg
->
tableType
!=
TSDB_SUPER_TABLE
)
&&
if
((
pMetaMsg
->
tableType
!=
TSDB_SUPER_TABLE
)
&&
...
@@ -2448,19 +2448,16 @@ int32_t tscGetTableMeta(SSqlObj *pSql, STableMetaInfo *pTableMetaInfo) {
...
@@ -2448,19 +2448,16 @@ int32_t tscGetTableMeta(SSqlObj *pSql, STableMetaInfo *pTableMetaInfo) {
pTableMetaInfo
->
pTableMeta
=
calloc
(
1
,
size
);
pTableMetaInfo
->
pTableMeta
=
calloc
(
1
,
size
);
pTableMetaInfo
->
tableMetaSize
=
size
;
pTableMetaInfo
->
tableMetaSize
=
size
;
}
else
if
(
pTableMetaInfo
->
tableMetaSize
<
size
)
{
}
else
if
(
pTableMetaInfo
->
tableMetaSize
<
size
)
{
char
*
tmp
=
realloc
(
pTableMetaInfo
->
pTableMeta
,
size
);
char
*
tmp
=
realloc
(
pTableMetaInfo
->
pTableMeta
,
size
);
if
(
tmp
==
NULL
)
{
if
(
tmp
==
NULL
)
{
return
TSDB_CODE_TSC_OUT_OF_MEMORY
;
return
TSDB_CODE_TSC_OUT_OF_MEMORY
;
}
}
pTableMetaInfo
->
pTableMeta
=
(
STableMeta
*
)
tmp
;
pTableMetaInfo
->
pTableMeta
=
(
STableMeta
*
)
tmp
;
memset
(
pTableMetaInfo
->
pTableMeta
,
0
,
size
);
pTableMetaInfo
->
tableMetaSize
=
size
;
}
else
{
//uint32_t s = tscGetTableMetaSize(pTableMetaInfo->pTableMeta);
memset
(
pTableMetaInfo
->
pTableMeta
,
0
,
size
);
pTableMetaInfo
->
tableMetaSize
=
size
;
}
}
memset
(
pTableMetaInfo
->
pTableMeta
,
0
,
size
);
pTableMetaInfo
->
tableMetaSize
=
size
;
pTableMetaInfo
->
pTableMeta
->
tableType
=
-
1
;
pTableMetaInfo
->
pTableMeta
->
tableType
=
-
1
;
pTableMetaInfo
->
pTableMeta
->
tableInfo
.
numOfColumns
=
-
1
;
pTableMetaInfo
->
pTableMeta
->
tableInfo
.
numOfColumns
=
-
1
;
...
@@ -2476,8 +2473,9 @@ int32_t tscGetTableMeta(SSqlObj *pSql, STableMetaInfo *pTableMetaInfo) {
...
@@ -2476,8 +2473,9 @@ int32_t tscGetTableMeta(SSqlObj *pSql, STableMetaInfo *pTableMetaInfo) {
STableMeta
*
pMeta
=
pTableMetaInfo
->
pTableMeta
;
STableMeta
*
pMeta
=
pTableMetaInfo
->
pTableMeta
;
if
(
pMeta
->
id
.
uid
>
0
)
{
if
(
pMeta
->
id
.
uid
>
0
)
{
// in case of child table, here only get the
if
(
pMeta
->
tableType
==
TSDB_CHILD_TABLE
)
{
if
(
pMeta
->
tableType
==
TSDB_CHILD_TABLE
)
{
int32_t
code
=
tscCreateTableMetaFrom
CChild
Meta
(
pTableMetaInfo
->
pTableMeta
,
name
,
buf
);
int32_t
code
=
tscCreateTableMetaFrom
STable
Meta
(
pTableMetaInfo
->
pTableMeta
,
name
,
buf
);
if
(
code
!=
TSDB_CODE_SUCCESS
)
{
if
(
code
!=
TSDB_CODE_SUCCESS
)
{
return
getTableMetaFromMnode
(
pSql
,
pTableMetaInfo
);
return
getTableMetaFromMnode
(
pSql
,
pTableMetaInfo
);
}
}
...
...
src/client/src/tscUtil.c
浏览文件 @
663439bf
...
@@ -3370,22 +3370,25 @@ CChildTableMeta* tscCreateChildMeta(STableMeta* pTableMeta) {
...
@@ -3370,22 +3370,25 @@ CChildTableMeta* tscCreateChildMeta(STableMeta* pTableMeta) {
assert
(
pTableMeta
!=
NULL
);
assert
(
pTableMeta
!=
NULL
);
CChildTableMeta
*
cMeta
=
calloc
(
1
,
sizeof
(
CChildTableMeta
));
CChildTableMeta
*
cMeta
=
calloc
(
1
,
sizeof
(
CChildTableMeta
));
cMeta
->
tableType
=
TSDB_CHILD_TABLE
;
cMeta
->
tableType
=
TSDB_CHILD_TABLE
;
cMeta
->
vgId
=
pTableMeta
->
vgId
;
cMeta
->
vgId
=
pTableMeta
->
vgId
;
cMeta
->
id
=
pTableMeta
->
id
;
cMeta
->
id
=
pTableMeta
->
id
;
cMeta
->
suid
=
pTableMeta
->
suid
;
tstrncpy
(
cMeta
->
sTableName
,
pTableMeta
->
sTableName
,
TSDB_TABLE_FNAME_LEN
);
tstrncpy
(
cMeta
->
sTableName
,
pTableMeta
->
sTableName
,
TSDB_TABLE_FNAME_LEN
);
return
cMeta
;
return
cMeta
;
}
}
int32_t
tscCreateTableMetaFrom
CChild
Meta
(
STableMeta
*
pChild
,
const
char
*
name
,
void
*
buf
)
{
int32_t
tscCreateTableMetaFrom
STable
Meta
(
STableMeta
*
pChild
,
const
char
*
name
,
void
*
buf
)
{
assert
(
pChild
!=
NULL
&&
buf
!=
NULL
);
assert
(
pChild
!=
NULL
&&
buf
!=
NULL
);
// uint32_t size = tscGetTableMetaMaxSize();
STableMeta
*
p
=
buf
;
STableMeta
*
p
=
buf
;
//calloc(1, size);
taosHashGetClone
(
tscTableMetaInfo
,
pChild
->
sTableName
,
strnlen
(
pChild
->
sTableName
,
TSDB_TABLE_FNAME_LEN
),
NULL
,
p
,
-
1
);
taosHashGetClone
(
tscTableMetaInfo
,
pChild
->
sTableName
,
strnlen
(
pChild
->
sTableName
,
TSDB_TABLE_FNAME_LEN
),
NULL
,
p
,
-
1
);
if
(
p
->
id
.
uid
>
0
)
{
// tableMeta exists, build child table meta and return
// tableMeta exists, build child table meta according to the super table meta
// the uid need to be checked in addition to the general name of the super table.
if
(
p
->
id
.
uid
>
0
&&
pChild
->
suid
==
p
->
id
.
uid
)
{
pChild
->
sversion
=
p
->
sversion
;
pChild
->
sversion
=
p
->
sversion
;
pChild
->
tversion
=
p
->
tversion
;
pChild
->
tversion
=
p
->
tversion
;
...
@@ -3393,13 +3396,9 @@ int32_t tscCreateTableMetaFromCChildMeta(STableMeta* pChild, const char* name, v
...
@@ -3393,13 +3396,9 @@ int32_t tscCreateTableMetaFromCChildMeta(STableMeta* pChild, const char* name, v
int32_t
total
=
pChild
->
tableInfo
.
numOfColumns
+
pChild
->
tableInfo
.
numOfTags
;
int32_t
total
=
pChild
->
tableInfo
.
numOfColumns
+
pChild
->
tableInfo
.
numOfTags
;
memcpy
(
pChild
->
schema
,
p
->
schema
,
sizeof
(
SSchema
)
*
total
);
memcpy
(
pChild
->
schema
,
p
->
schema
,
sizeof
(
SSchema
)
*
total
);
// tfree(p);
return
TSDB_CODE_SUCCESS
;
return
TSDB_CODE_SUCCESS
;
}
else
{
// super table has been removed, current tableMeta is also expired. remove it here
}
else
{
// super table has been removed, current tableMeta is also expired. remove it here
taosHashRemove
(
tscTableMetaInfo
,
name
,
strnlen
(
name
,
TSDB_TABLE_FNAME_LEN
));
taosHashRemove
(
tscTableMetaInfo
,
name
,
strnlen
(
name
,
TSDB_TABLE_FNAME_LEN
));
// tfree(p);
return
-
1
;
return
-
1
;
}
}
}
}
...
...
src/common/inc/tglobal.h
浏览文件 @
663439bf
...
@@ -142,12 +142,15 @@ extern int32_t tsMonitorInterval;
...
@@ -142,12 +142,15 @@ extern int32_t tsMonitorInterval;
extern
int8_t
tsEnableStream
;
extern
int8_t
tsEnableStream
;
// internal
// internal
extern
int8_t
tsCompactMnodeWal
;
extern
int8_t
tsPrintAuth
;
extern
int8_t
tsPrintAuth
;
extern
int8_t
tscEmbedded
;
extern
int8_t
tscEmbedded
;
extern
char
configDir
[];
extern
char
configDir
[];
extern
char
tsVnodeDir
[];
extern
char
tsVnodeDir
[];
extern
char
tsDnodeDir
[];
extern
char
tsDnodeDir
[];
extern
char
tsMnodeDir
[];
extern
char
tsMnodeDir
[];
extern
char
tsMnodeBakDir
[];
extern
char
tsMnodeTmpDir
[];
extern
char
tsDataDir
[];
extern
char
tsDataDir
[];
extern
char
tsLogDir
[];
extern
char
tsLogDir
[];
extern
char
tsScriptDir
[];
extern
char
tsScriptDir
[];
...
...
src/common/src/tglobal.c
浏览文件 @
663439bf
...
@@ -176,12 +176,15 @@ int32_t tsMonitorInterval = 30; // seconds
...
@@ -176,12 +176,15 @@ int32_t tsMonitorInterval = 30; // seconds
int8_t
tsEnableStream
=
1
;
int8_t
tsEnableStream
=
1
;
// internal
// internal
int8_t
tsCompactMnodeWal
=
0
;
int8_t
tsPrintAuth
=
0
;
int8_t
tsPrintAuth
=
0
;
int8_t
tscEmbedded
=
0
;
int8_t
tscEmbedded
=
0
;
char
configDir
[
TSDB_FILENAME_LEN
]
=
{
0
};
char
configDir
[
TSDB_FILENAME_LEN
]
=
{
0
};
char
tsVnodeDir
[
TSDB_FILENAME_LEN
]
=
{
0
};
char
tsVnodeDir
[
TSDB_FILENAME_LEN
]
=
{
0
};
char
tsDnodeDir
[
TSDB_FILENAME_LEN
]
=
{
0
};
char
tsDnodeDir
[
TSDB_FILENAME_LEN
]
=
{
0
};
char
tsMnodeDir
[
TSDB_FILENAME_LEN
]
=
{
0
};
char
tsMnodeDir
[
TSDB_FILENAME_LEN
]
=
{
0
};
char
tsMnodeTmpDir
[
TSDB_FILENAME_LEN
]
=
{
0
};
char
tsMnodeBakDir
[
TSDB_FILENAME_LEN
]
=
{
0
};
char
tsDataDir
[
TSDB_FILENAME_LEN
]
=
{
0
};
char
tsDataDir
[
TSDB_FILENAME_LEN
]
=
{
0
};
char
tsScriptDir
[
TSDB_FILENAME_LEN
]
=
{
0
};
char
tsScriptDir
[
TSDB_FILENAME_LEN
]
=
{
0
};
char
tsTempDir
[
TSDB_FILENAME_LEN
]
=
"/tmp/"
;
char
tsTempDir
[
TSDB_FILENAME_LEN
]
=
"/tmp/"
;
...
...
src/connector/jdbc/src/test/java/com/taosdata/jdbc/TSDBPreparedStatementTest.java
浏览文件 @
663439bf
...
@@ -207,10 +207,69 @@ public class TSDBPreparedStatementTest {
...
@@ -207,10 +207,69 @@ public class TSDBPreparedStatementTest {
while
(
rs
.
next
())
{
while
(
rs
.
next
())
{
rows
++;
rows
++;
}
}
Assert
.
assertEquals
(
numOfRows
,
rows
);
Assert
.
assertEquals
(
numOfRows
,
rows
);
}
}
}
}
@Test
public
void
bindDataSelectColumnTest
()
throws
SQLException
{
Statement
stmt
=
conn
.
createStatement
();
int
numOfRows
=
1000
;
for
(
int
loop
=
0
;
loop
<
10
;
loop
++){
stmt
.
execute
(
"drop table if exists weather_test"
);
stmt
.
execute
(
"create table weather_test(ts timestamp, f1 nchar(4), f2 float, f3 double, f4 timestamp, f5 int, f6 bool, f7 binary(10))"
);
TSDBPreparedStatement
s
=
(
TSDBPreparedStatement
)
conn
.
prepareStatement
(
"insert into ? (ts, f1, f7) values(?, ?, ?)"
);
Random
r
=
new
Random
();
s
.
setTableName
(
"weather_test"
);
ArrayList
<
Long
>
ts
=
new
ArrayList
<
Long
>();
for
(
int
i
=
0
;
i
<
numOfRows
;
i
++)
{
ts
.
add
(
System
.
currentTimeMillis
()
+
i
);
}
s
.
setTimestamp
(
0
,
ts
);
int
random
=
10
+
r
.
nextInt
(
5
);
ArrayList
<
String
>
s2
=
new
ArrayList
<
String
>();
for
(
int
i
=
0
;
i
<
numOfRows
;
i
++)
{
if
(
i
%
random
==
0
)
{
s2
.
add
(
null
);
}
else
{
s2
.
add
(
"分支"
+
i
%
4
);
}
}
s
.
setNString
(
1
,
s2
,
4
);
random
=
10
+
r
.
nextInt
(
5
);
ArrayList
<
String
>
s5
=
new
ArrayList
<
String
>();
for
(
int
i
=
0
;
i
<
numOfRows
;
i
++)
{
if
(
i
%
random
==
0
)
{
s5
.
add
(
null
);
}
else
{
s5
.
add
(
"test"
+
i
%
10
);
}
}
s
.
setString
(
2
,
s5
,
10
);
s
.
columnDataAddBatch
();
s
.
columnDataExecuteBatch
();
s
.
columnDataCloseBatch
();
String
sql
=
"select * from weather_test"
;
PreparedStatement
statement
=
conn
.
prepareStatement
(
sql
);
ResultSet
rs
=
statement
.
executeQuery
();
int
rows
=
0
;
while
(
rs
.
next
())
{
rows
++;
}
Assert
.
assertEquals
(
numOfRows
,
rows
);
}
}
@Test
@Test
public
void
setBoolean
()
throws
SQLException
{
public
void
setBoolean
()
throws
SQLException
{
pstmt_insert
.
setTimestamp
(
1
,
new
Timestamp
(
System
.
currentTimeMillis
()));
pstmt_insert
.
setTimestamp
(
1
,
new
Timestamp
(
System
.
currentTimeMillis
()));
...
...
src/connector/nodejs/nodetaos/cinterface.js
浏览文件 @
663439bf
...
@@ -9,7 +9,7 @@ const ffi = require('ffi-napi');
...
@@ -9,7 +9,7 @@ const ffi = require('ffi-napi');
const
ArrayType
=
require
(
'
ref-array-napi
'
);
const
ArrayType
=
require
(
'
ref-array-napi
'
);
const
Struct
=
require
(
'
ref-struct-napi
'
);
const
Struct
=
require
(
'
ref-struct-napi
'
);
const
FieldTypes
=
require
(
'
./constants
'
);
const
FieldTypes
=
require
(
'
./constants
'
);
const
errors
=
require
(
'
./error
'
);
const
errors
=
require
(
'
./error
'
);
const
TaosObjects
=
require
(
'
./taosobjects
'
);
const
TaosObjects
=
require
(
'
./taosobjects
'
);
const
{
NULL_POINTER
}
=
require
(
'
ref-napi
'
);
const
{
NULL_POINTER
}
=
require
(
'
ref-napi
'
);
...
@@ -22,7 +22,7 @@ function convertMicrosecondsToDatetime(time) {
...
@@ -22,7 +22,7 @@ function convertMicrosecondsToDatetime(time) {
return
new
TaosObjects
.
TaosTimestamp
(
time
*
0.001
,
true
);
return
new
TaosObjects
.
TaosTimestamp
(
time
*
0.001
,
true
);
}
}
function
convertTimestamp
(
data
,
num_of_rows
,
nbytes
=
0
,
offset
=
0
,
micro
=
false
)
{
function
convertTimestamp
(
data
,
num_of_rows
,
nbytes
=
0
,
offset
=
0
,
micro
=
false
)
{
timestampConverter
=
convertMillisecondsToDatetime
;
timestampConverter
=
convertMillisecondsToDatetime
;
if
(
micro
==
true
)
{
if
(
micro
==
true
)
{
timestampConverter
=
convertMicrosecondsToDatetime
;
timestampConverter
=
convertMicrosecondsToDatetime
;
...
@@ -44,14 +44,14 @@ function convertTimestamp(data, num_of_rows, nbytes = 0, offset = 0, micro=false
...
@@ -44,14 +44,14 @@ function convertTimestamp(data, num_of_rows, nbytes = 0, offset = 0, micro=false
}
}
return
res
;
return
res
;
}
}
function
convertBool
(
data
,
num_of_rows
,
nbytes
=
0
,
offset
=
0
,
micro
=
false
)
{
function
convertBool
(
data
,
num_of_rows
,
nbytes
=
0
,
offset
=
0
,
micro
=
false
)
{
data
=
ref
.
reinterpret
(
data
.
deref
(),
nbytes
*
num_of_rows
,
offset
);
data
=
ref
.
reinterpret
(
data
.
deref
(),
nbytes
*
num_of_rows
,
offset
);
let
res
=
new
Array
(
data
.
length
);
let
res
=
new
Array
(
data
.
length
);
for
(
let
i
=
0
;
i
<
data
.
length
;
i
++
)
{
for
(
let
i
=
0
;
i
<
data
.
length
;
i
++
)
{
if
(
data
[
i
]
==
0
)
{
if
(
data
[
i
]
==
0
)
{
res
[
i
]
=
false
;
res
[
i
]
=
false
;
}
}
else
if
(
data
[
i
]
==
1
){
else
if
(
data
[
i
]
==
1
)
{
res
[
i
]
=
true
;
res
[
i
]
=
true
;
}
}
else
if
(
data
[
i
]
==
FieldTypes
.
C_BOOL_NULL
)
{
else
if
(
data
[
i
]
==
FieldTypes
.
C_BOOL_NULL
)
{
...
@@ -60,29 +60,29 @@ function convertBool(data, num_of_rows, nbytes = 0, offset = 0, micro=false) {
...
@@ -60,29 +60,29 @@ function convertBool(data, num_of_rows, nbytes = 0, offset = 0, micro=false) {
}
}
return
res
;
return
res
;
}
}
function
convertTinyint
(
data
,
num_of_rows
,
nbytes
=
0
,
offset
=
0
,
micro
=
false
)
{
function
convertTinyint
(
data
,
num_of_rows
,
nbytes
=
0
,
offset
=
0
,
micro
=
false
)
{
data
=
ref
.
reinterpret
(
data
.
deref
(),
nbytes
*
num_of_rows
,
offset
);
data
=
ref
.
reinterpret
(
data
.
deref
(),
nbytes
*
num_of_rows
,
offset
);
let
res
=
[];
let
res
=
[];
let
currOffset
=
0
;
let
currOffset
=
0
;
while
(
currOffset
<
data
.
length
)
{
while
(
currOffset
<
data
.
length
)
{
let
d
=
data
.
readIntLE
(
currOffset
,
1
);
let
d
=
data
.
readIntLE
(
currOffset
,
1
);
res
.
push
(
d
==
FieldTypes
.
C_TINYINT_NULL
?
null
:
d
);
res
.
push
(
d
==
FieldTypes
.
C_TINYINT_NULL
?
null
:
d
);
currOffset
+=
nbytes
;
currOffset
+=
nbytes
;
}
}
return
res
;
return
res
;
}
}
function
convertSmallint
(
data
,
num_of_rows
,
nbytes
=
0
,
offset
=
0
,
micro
=
false
)
{
function
convertSmallint
(
data
,
num_of_rows
,
nbytes
=
0
,
offset
=
0
,
micro
=
false
)
{
data
=
ref
.
reinterpret
(
data
.
deref
(),
nbytes
*
num_of_rows
,
offset
);
data
=
ref
.
reinterpret
(
data
.
deref
(),
nbytes
*
num_of_rows
,
offset
);
let
res
=
[];
let
res
=
[];
let
currOffset
=
0
;
let
currOffset
=
0
;
while
(
currOffset
<
data
.
length
)
{
while
(
currOffset
<
data
.
length
)
{
let
d
=
data
.
readIntLE
(
currOffset
,
2
);
let
d
=
data
.
readIntLE
(
currOffset
,
2
);
res
.
push
(
d
==
FieldTypes
.
C_SMALLINT_NULL
?
null
:
d
);
res
.
push
(
d
==
FieldTypes
.
C_SMALLINT_NULL
?
null
:
d
);
currOffset
+=
nbytes
;
currOffset
+=
nbytes
;
}
}
return
res
;
return
res
;
}
}
function
convertInt
(
data
,
num_of_rows
,
nbytes
=
0
,
offset
=
0
,
micro
=
false
)
{
function
convertInt
(
data
,
num_of_rows
,
nbytes
=
0
,
offset
=
0
,
micro
=
false
)
{
data
=
ref
.
reinterpret
(
data
.
deref
(),
nbytes
*
num_of_rows
,
offset
);
data
=
ref
.
reinterpret
(
data
.
deref
(),
nbytes
*
num_of_rows
,
offset
);
let
res
=
[];
let
res
=
[];
let
currOffset
=
0
;
let
currOffset
=
0
;
...
@@ -93,7 +93,7 @@ function convertInt(data, num_of_rows, nbytes = 0, offset = 0, micro=false) {
...
@@ -93,7 +93,7 @@ function convertInt(data, num_of_rows, nbytes = 0, offset = 0, micro=false) {
}
}
return
res
;
return
res
;
}
}
function
convertBigint
(
data
,
num_of_rows
,
nbytes
=
0
,
offset
=
0
,
micro
=
false
)
{
function
convertBigint
(
data
,
num_of_rows
,
nbytes
=
0
,
offset
=
0
,
micro
=
false
)
{
data
=
ref
.
reinterpret
(
data
.
deref
(),
nbytes
*
num_of_rows
,
offset
);
data
=
ref
.
reinterpret
(
data
.
deref
(),
nbytes
*
num_of_rows
,
offset
);
let
res
=
[];
let
res
=
[];
let
currOffset
=
0
;
let
currOffset
=
0
;
...
@@ -104,7 +104,7 @@ function convertBigint(data, num_of_rows, nbytes = 0, offset = 0, micro=false) {
...
@@ -104,7 +104,7 @@ function convertBigint(data, num_of_rows, nbytes = 0, offset = 0, micro=false) {
}
}
return
res
;
return
res
;
}
}
function
convertFloat
(
data
,
num_of_rows
,
nbytes
=
0
,
offset
=
0
,
micro
=
false
)
{
function
convertFloat
(
data
,
num_of_rows
,
nbytes
=
0
,
offset
=
0
,
micro
=
false
)
{
data
=
ref
.
reinterpret
(
data
.
deref
(),
nbytes
*
num_of_rows
,
offset
);
data
=
ref
.
reinterpret
(
data
.
deref
(),
nbytes
*
num_of_rows
,
offset
);
let
res
=
[];
let
res
=
[];
let
currOffset
=
0
;
let
currOffset
=
0
;
...
@@ -115,7 +115,7 @@ function convertFloat(data, num_of_rows, nbytes = 0, offset = 0, micro=false) {
...
@@ -115,7 +115,7 @@ function convertFloat(data, num_of_rows, nbytes = 0, offset = 0, micro=false) {
}
}
return
res
;
return
res
;
}
}
function
convertDouble
(
data
,
num_of_rows
,
nbytes
=
0
,
offset
=
0
,
micro
=
false
)
{
function
convertDouble
(
data
,
num_of_rows
,
nbytes
=
0
,
offset
=
0
,
micro
=
false
)
{
data
=
ref
.
reinterpret
(
data
.
deref
(),
nbytes
*
num_of_rows
,
offset
);
data
=
ref
.
reinterpret
(
data
.
deref
(),
nbytes
*
num_of_rows
,
offset
);
let
res
=
[];
let
res
=
[];
let
currOffset
=
0
;
let
currOffset
=
0
;
...
@@ -126,7 +126,7 @@ function convertDouble(data, num_of_rows, nbytes = 0, offset = 0, micro=false) {
...
@@ -126,7 +126,7 @@ function convertDouble(data, num_of_rows, nbytes = 0, offset = 0, micro=false) {
}
}
return
res
;
return
res
;
}
}
function
convertBinary
(
data
,
num_of_rows
,
nbytes
=
0
,
offset
=
0
,
micro
=
false
)
{
function
convertBinary
(
data
,
num_of_rows
,
nbytes
=
0
,
offset
=
0
,
micro
=
false
)
{
data
=
ref
.
reinterpret
(
data
.
deref
(),
nbytes
*
num_of_rows
,
offset
);
data
=
ref
.
reinterpret
(
data
.
deref
(),
nbytes
*
num_of_rows
,
offset
);
let
res
=
[];
let
res
=
[];
let
currOffset
=
0
;
let
currOffset
=
0
;
...
@@ -142,7 +142,7 @@ function convertBinary(data, num_of_rows, nbytes = 0, offset = 0, micro=false) {
...
@@ -142,7 +142,7 @@ function convertBinary(data, num_of_rows, nbytes = 0, offset = 0, micro=false) {
}
}
return
res
;
return
res
;
}
}
function
convertNchar
(
data
,
num_of_rows
,
nbytes
=
0
,
offset
=
0
,
micro
=
false
)
{
function
convertNchar
(
data
,
num_of_rows
,
nbytes
=
0
,
offset
=
0
,
micro
=
false
)
{
data
=
ref
.
reinterpret
(
data
.
deref
(),
nbytes
*
num_of_rows
,
offset
);
data
=
ref
.
reinterpret
(
data
.
deref
(),
nbytes
*
num_of_rows
,
offset
);
let
res
=
[];
let
res
=
[];
let
dataEntry
=
data
.
slice
(
0
,
nbytes
);
//one entry in a row under a column;
let
dataEntry
=
data
.
slice
(
0
,
nbytes
);
//one entry in a row under a column;
...
@@ -153,23 +153,23 @@ function convertNchar(data, num_of_rows, nbytes = 0, offset = 0, micro=false) {
...
@@ -153,23 +153,23 @@ function convertNchar(data, num_of_rows, nbytes = 0, offset = 0, micro=false) {
// Object with all the relevant converters from pblock data to javascript readable data
// Object with all the relevant converters from pblock data to javascript readable data
let
convertFunctions
=
{
let
convertFunctions
=
{
[
FieldTypes
.
C_BOOL
]
:
convertBool
,
[
FieldTypes
.
C_BOOL
]
:
convertBool
,
[
FieldTypes
.
C_TINYINT
]
:
convertTinyint
,
[
FieldTypes
.
C_TINYINT
]
:
convertTinyint
,
[
FieldTypes
.
C_SMALLINT
]
:
convertSmallint
,
[
FieldTypes
.
C_SMALLINT
]
:
convertSmallint
,
[
FieldTypes
.
C_INT
]
:
convertInt
,
[
FieldTypes
.
C_INT
]
:
convertInt
,
[
FieldTypes
.
C_BIGINT
]
:
convertBigint
,
[
FieldTypes
.
C_BIGINT
]
:
convertBigint
,
[
FieldTypes
.
C_FLOAT
]
:
convertFloat
,
[
FieldTypes
.
C_FLOAT
]
:
convertFloat
,
[
FieldTypes
.
C_DOUBLE
]
:
convertDouble
,
[
FieldTypes
.
C_DOUBLE
]
:
convertDouble
,
[
FieldTypes
.
C_BINARY
]
:
convertBinary
,
[
FieldTypes
.
C_BINARY
]
:
convertBinary
,
[
FieldTypes
.
C_TIMESTAMP
]
:
convertTimestamp
,
[
FieldTypes
.
C_TIMESTAMP
]
:
convertTimestamp
,
[
FieldTypes
.
C_NCHAR
]
:
convertNchar
[
FieldTypes
.
C_NCHAR
]
:
convertNchar
}
}
// Define TaosField structure
// Define TaosField structure
var
char_arr
=
ArrayType
(
ref
.
types
.
char
);
var
char_arr
=
ArrayType
(
ref
.
types
.
char
);
var
TaosField
=
Struct
({
var
TaosField
=
Struct
({
'
name
'
:
char_arr
,
'
name
'
:
char_arr
,
});
});
TaosField
.
fields
.
name
.
type
.
size
=
65
;
TaosField
.
fields
.
name
.
type
.
size
=
65
;
TaosField
.
defineProperty
(
'
type
'
,
ref
.
types
.
char
);
TaosField
.
defineProperty
(
'
type
'
,
ref
.
types
.
char
);
TaosField
.
defineProperty
(
'
bytes
'
,
ref
.
types
.
short
);
TaosField
.
defineProperty
(
'
bytes
'
,
ref
.
types
.
short
);
...
@@ -183,7 +183,7 @@ TaosField.defineProperty('bytes', ref.types.short);
...
@@ -183,7 +183,7 @@ TaosField.defineProperty('bytes', ref.types.short);
* @classdesc The CTaosInterface is the interface through which Node.JS communicates data back and forth with TDengine. It is not advised to
* @classdesc The CTaosInterface is the interface through which Node.JS communicates data back and forth with TDengine. It is not advised to
* access this class directly and use it unless you understand what these functions do.
* access this class directly and use it unless you understand what these functions do.
*/
*/
function
CTaosInterface
(
config
=
null
,
pass
=
false
)
{
function
CTaosInterface
(
config
=
null
,
pass
=
false
)
{
ref
.
types
.
char_ptr
=
ref
.
refType
(
ref
.
types
.
char
);
ref
.
types
.
char_ptr
=
ref
.
refType
(
ref
.
types
.
char
);
ref
.
types
.
void_ptr
=
ref
.
refType
(
ref
.
types
.
void
);
ref
.
types
.
void_ptr
=
ref
.
refType
(
ref
.
types
.
void
);
ref
.
types
.
void_ptr2
=
ref
.
refType
(
ref
.
types
.
void_ptr
);
ref
.
types
.
void_ptr2
=
ref
.
refType
(
ref
.
types
.
void_ptr
);
...
@@ -196,64 +196,65 @@ function CTaosInterface (config = null, pass = false) {
...
@@ -196,64 +196,65 @@ function CTaosInterface (config = null, pass = false) {
taoslibname
=
'
libtaos
'
;
taoslibname
=
'
libtaos
'
;
}
}
this
.
libtaos
=
ffi
.
Library
(
taoslibname
,
{
this
.
libtaos
=
ffi
.
Library
(
taoslibname
,
{
'
taos_options
'
:
[
ref
.
types
.
int
,
[
ref
.
types
.
int
,
ref
.
types
.
void_ptr
]
],
'
taos_options
'
:
[
ref
.
types
.
int
,
[
ref
.
types
.
int
,
ref
.
types
.
void_ptr
]
],
'
taos_init
'
:
[
ref
.
types
.
void
,
[
]
],
'
taos_init
'
:
[
ref
.
types
.
void
,
[]
],
//TAOS *taos_connect(char *ip, char *user, char *pass, char *db, int port)
//TAOS *taos_connect(char *ip, char *user, char *pass, char *db, int port)
'
taos_connect
'
:
[
ref
.
types
.
void_ptr
,
[
ref
.
types
.
char_ptr
,
ref
.
types
.
char_ptr
,
ref
.
types
.
char_ptr
,
ref
.
types
.
char_ptr
,
ref
.
types
.
int
]
],
'
taos_connect
'
:
[
ref
.
types
.
void_ptr
,
[
ref
.
types
.
char_ptr
,
ref
.
types
.
char_ptr
,
ref
.
types
.
char_ptr
,
ref
.
types
.
char_ptr
,
ref
.
types
.
int
]
],
//void taos_close(TAOS *taos)
//void taos_close(TAOS *taos)
'
taos_close
'
:
[
ref
.
types
.
void
,
[
ref
.
types
.
void_ptr
]
],
'
taos_close
'
:
[
ref
.
types
.
void
,
[
ref
.
types
.
void_ptr
]
],
//int *taos_fetch_lengths(TAOS_RES *
tao
s);
//int *taos_fetch_lengths(TAOS_RES *
re
s);
'
taos_fetch_lengths
'
:
[
ref
.
types
.
void_ptr
,
[
ref
.
types
.
void_ptr
]
],
'
taos_fetch_lengths
'
:
[
ref
.
types
.
void_ptr
,
[
ref
.
types
.
void_ptr
]
],
//int taos_query(TAOS *taos, char *sqlstr)
//int taos_query(TAOS *taos, char *sqlstr)
'
taos_query
'
:
[
ref
.
types
.
void_ptr
,
[
ref
.
types
.
void_ptr
,
ref
.
types
.
char_ptr
]
],
'
taos_query
'
:
[
ref
.
types
.
void_ptr
,
[
ref
.
types
.
void_ptr
,
ref
.
types
.
char_ptr
]
],
//int taos_affected_rows(TAOS
*tao
s)
//int taos_affected_rows(TAOS
_RES *re
s)
'
taos_affected_rows
'
:
[
ref
.
types
.
int
,
[
ref
.
types
.
void_ptr
]
],
'
taos_affected_rows
'
:
[
ref
.
types
.
int
,
[
ref
.
types
.
void_ptr
]
],
//int taos_fetch_block(TAOS_RES *res, TAOS_ROW *rows)
//int taos_fetch_block(TAOS_RES *res, TAOS_ROW *rows)
'
taos_fetch_block
'
:
[
ref
.
types
.
int
,
[
ref
.
types
.
void_ptr
,
ref
.
types
.
void_ptr
]
],
'
taos_fetch_block
'
:
[
ref
.
types
.
int
,
[
ref
.
types
.
void_ptr
,
ref
.
types
.
void_ptr
]
],
//int taos_num_fields(TAOS_RES *res);
//int taos_num_fields(TAOS_RES *res);
'
taos_num_fields
'
:
[
ref
.
types
.
int
,
[
ref
.
types
.
void_ptr
]
],
'
taos_num_fields
'
:
[
ref
.
types
.
int
,
[
ref
.
types
.
void_ptr
]
],
//TAOS_ROW taos_fetch_row(TAOS_RES *res)
//TAOS_ROW taos_fetch_row(TAOS_RES *res)
//TAOS_ROW is void **, but we set the return type as a reference instead to get the row
//TAOS_ROW is void **, but we set the return type as a reference instead to get the row
'
taos_fetch_row
'
:
[
ref
.
refType
(
ref
.
types
.
void_ptr2
),
[
ref
.
types
.
void_ptr
]
],
'
taos_fetch_row
'
:
[
ref
.
refType
(
ref
.
types
.
void_ptr2
),
[
ref
.
types
.
void_ptr
]],
'
taos_print_row
'
:
[
ref
.
types
.
int
,
[
ref
.
types
.
char_ptr
,
ref
.
types
.
void_ptr
,
ref
.
types
.
void_ptr
,
ref
.
types
.
int
]],
//int taos_result_precision(TAOS_RES *res)
//int taos_result_precision(TAOS_RES *res)
'
taos_result_precision
'
:
[
ref
.
types
.
int
,
[
ref
.
types
.
void_ptr
]
],
'
taos_result_precision
'
:
[
ref
.
types
.
int
,
[
ref
.
types
.
void_ptr
]
],
//void taos_free_result(TAOS_RES *res)
//void taos_free_result(TAOS_RES *res)
'
taos_free_result
'
:
[
ref
.
types
.
void
,
[
ref
.
types
.
void_ptr
]
],
'
taos_free_result
'
:
[
ref
.
types
.
void
,
[
ref
.
types
.
void_ptr
]
],
//int taos_field_count(TAOS *taos)
//int taos_field_count(TAOS *taos)
'
taos_field_count
'
:
[
ref
.
types
.
int
,
[
ref
.
types
.
void_ptr
]
],
'
taos_field_count
'
:
[
ref
.
types
.
int
,
[
ref
.
types
.
void_ptr
]
],
//TAOS_FIELD *taos_fetch_fields(TAOS_RES *res)
//TAOS_FIELD *taos_fetch_fields(TAOS_RES *res)
'
taos_fetch_fields
'
:
[
ref
.
refType
(
TaosField
),
[
ref
.
types
.
void_ptr
]
],
'
taos_fetch_fields
'
:
[
ref
.
refType
(
TaosField
),
[
ref
.
types
.
void_ptr
]
],
//int taos_errno(TAOS *taos)
//int taos_errno(TAOS *taos)
'
taos_errno
'
:
[
ref
.
types
.
int
,
[
ref
.
types
.
void_ptr
]
],
'
taos_errno
'
:
[
ref
.
types
.
int
,
[
ref
.
types
.
void_ptr
]
],
//char *taos_errstr(TAOS *taos)
//char *taos_errstr(TAOS *taos)
'
taos_errstr
'
:
[
ref
.
types
.
char_ptr
,
[
ref
.
types
.
void_ptr
]
],
'
taos_errstr
'
:
[
ref
.
types
.
char_ptr
,
[
ref
.
types
.
void_ptr
]
],
//void taos_stop_query(TAOS_RES *res);
//void taos_stop_query(TAOS_RES *res);
'
taos_stop_query
'
:
[
ref
.
types
.
void
,
[
ref
.
types
.
void_ptr
]
],
'
taos_stop_query
'
:
[
ref
.
types
.
void
,
[
ref
.
types
.
void_ptr
]
],
//char *taos_get_server_info(TAOS *taos);
//char *taos_get_server_info(TAOS *taos);
'
taos_get_server_info
'
:
[
ref
.
types
.
char_ptr
,
[
ref
.
types
.
void_ptr
]
],
'
taos_get_server_info
'
:
[
ref
.
types
.
char_ptr
,
[
ref
.
types
.
void_ptr
]
],
//char *taos_get_client_info();
//char *taos_get_client_info();
'
taos_get_client_info
'
:
[
ref
.
types
.
char_ptr
,
[
]
],
'
taos_get_client_info
'
:
[
ref
.
types
.
char_ptr
,
[]
],
// ASYNC
// ASYNC
// void taos_query_a(TAOS *taos, char *sqlstr, void (*fp)(void *, TAOS_RES *, int), void *param)
// void taos_query_a(TAOS *taos, char *sqlstr, void (*fp)(void *, TAOS_RES *, int), void *param)
'
taos_query_a
'
:
[
ref
.
types
.
void
,
[
ref
.
types
.
void_ptr
,
ref
.
types
.
char_ptr
,
ref
.
types
.
void_ptr
,
ref
.
types
.
void_ptr
]
],
'
taos_query_a
'
:
[
ref
.
types
.
void
,
[
ref
.
types
.
void_ptr
,
ref
.
types
.
char_ptr
,
ref
.
types
.
void_ptr
,
ref
.
types
.
void_ptr
]
],
// void taos_fetch_rows_a(TAOS_RES *res, void (*fp)(void *param, TAOS_RES *, int numOfRows), void *param);
// void taos_fetch_rows_a(TAOS_RES *res, void (*fp)(void *param, TAOS_RES *, int numOfRows), void *param);
'
taos_fetch_rows_a
'
:
[
ref
.
types
.
void
,
[
ref
.
types
.
void_ptr
,
ref
.
types
.
void_ptr
,
ref
.
types
.
void_ptr
]],
'
taos_fetch_rows_a
'
:
[
ref
.
types
.
void
,
[
ref
.
types
.
void_ptr
,
ref
.
types
.
void_ptr
,
ref
.
types
.
void_ptr
]],
// Subscription
// Subscription
//TAOS_SUB *taos_subscribe(TAOS* taos, int restart, const char* topic, const char *sql, TAOS_SUBSCRIBE_CALLBACK fp, void *param, int interval)
//TAOS_SUB *taos_subscribe(TAOS* taos, int restart, const char* topic, const char *sql, TAOS_SUBSCRIBE_CALLBACK fp, void *param, int interval)
'
taos_subscribe
'
:
[
ref
.
types
.
void_ptr
,
[
ref
.
types
.
void_ptr
,
ref
.
types
.
int
,
ref
.
types
.
char_ptr
,
ref
.
types
.
char_ptr
,
ref
.
types
.
void_ptr
,
ref
.
types
.
void_ptr
,
ref
.
types
.
int
]
],
'
taos_subscribe
'
:
[
ref
.
types
.
void_ptr
,
[
ref
.
types
.
void_ptr
,
ref
.
types
.
int
,
ref
.
types
.
char_ptr
,
ref
.
types
.
char_ptr
,
ref
.
types
.
void_ptr
,
ref
.
types
.
void_ptr
,
ref
.
types
.
int
]
],
// TAOS_RES *taos_consume(TAOS_SUB *tsub)
// TAOS_RES *taos_consume(TAOS_SUB *tsub)
'
taos_consume
'
:
[
ref
.
types
.
void_ptr
,
[
ref
.
types
.
void_ptr
]
],
'
taos_consume
'
:
[
ref
.
types
.
void_ptr
,
[
ref
.
types
.
void_ptr
]
],
//void taos_unsubscribe(TAOS_SUB *tsub);
//void taos_unsubscribe(TAOS_SUB *tsub);
'
taos_unsubscribe
'
:
[
ref
.
types
.
void
,
[
ref
.
types
.
void_ptr
]
],
'
taos_unsubscribe
'
:
[
ref
.
types
.
void
,
[
ref
.
types
.
void_ptr
]
],
// Continuous Query
// Continuous Query
//TAOS_STREAM *taos_open_stream(TAOS *taos, char *sqlstr, void (*fp)(void *param, TAOS_RES *, TAOS_ROW row),
//TAOS_STREAM *taos_open_stream(TAOS *taos, char *sqlstr, void (*fp)(void *param, TAOS_RES *, TAOS_ROW row),
// int64_t stime, void *param, void (*callback)(void *));
// int64_t stime, void *param, void (*callback)(void *));
'
taos_open_stream
'
:
[
ref
.
types
.
void_ptr
,
[
ref
.
types
.
void_ptr
,
ref
.
types
.
char_ptr
,
ref
.
types
.
void_ptr
,
ref
.
types
.
int64
,
ref
.
types
.
void_ptr
,
ref
.
types
.
void_ptr
]
],
'
taos_open_stream
'
:
[
ref
.
types
.
void_ptr
,
[
ref
.
types
.
void_ptr
,
ref
.
types
.
char_ptr
,
ref
.
types
.
void_ptr
,
ref
.
types
.
int64
,
ref
.
types
.
void_ptr
,
ref
.
types
.
void_ptr
]
],
//void taos_close_stream(TAOS_STREAM *tstr);
//void taos_close_stream(TAOS_STREAM *tstr);
'
taos_close_stream
'
:
[
ref
.
types
.
void
,
[
ref
.
types
.
void_ptr
]
]
'
taos_close_stream
'
:
[
ref
.
types
.
void
,
[
ref
.
types
.
void_ptr
]
]
});
});
if
(
pass
==
false
)
{
if
(
pass
==
false
)
{
...
@@ -264,7 +265,7 @@ function CTaosInterface (config = null, pass = false) {
...
@@ -264,7 +265,7 @@ function CTaosInterface (config = null, pass = false) {
try
{
try
{
this
.
_config
=
ref
.
allocCString
(
config
);
this
.
_config
=
ref
.
allocCString
(
config
);
}
}
catch
(
err
)
{
catch
(
err
)
{
throw
"
Attribute Error: config is expected as a str
"
;
throw
"
Attribute Error: config is expected as a str
"
;
}
}
}
}
...
@@ -276,38 +277,38 @@ function CTaosInterface (config = null, pass = false) {
...
@@ -276,38 +277,38 @@ function CTaosInterface (config = null, pass = false) {
return
this
;
return
this
;
}
}
CTaosInterface
.
prototype
.
config
=
function
config
()
{
CTaosInterface
.
prototype
.
config
=
function
config
()
{
return
this
.
_config
;
return
this
.
_config
;
}
}
CTaosInterface
.
prototype
.
connect
=
function
connect
(
host
=
null
,
user
=
"
root
"
,
password
=
"
taosdata
"
,
db
=
null
,
port
=
0
)
{
CTaosInterface
.
prototype
.
connect
=
function
connect
(
host
=
null
,
user
=
"
root
"
,
password
=
"
taosdata
"
,
db
=
null
,
port
=
0
)
{
let
_host
,
_user
,
_password
,
_db
,
_port
;
let
_host
,
_user
,
_password
,
_db
,
_port
;
try
{
try
{
_host
=
host
!=
null
?
ref
.
allocCString
(
host
)
:
ref
.
alloc
(
ref
.
types
.
char_ptr
,
ref
.
NULL
);
_host
=
host
!=
null
?
ref
.
allocCString
(
host
)
:
ref
.
alloc
(
ref
.
types
.
char_ptr
,
ref
.
NULL
);
}
}
catch
(
err
)
{
catch
(
err
)
{
throw
"
Attribute Error: host is expected as a str
"
;
throw
"
Attribute Error: host is expected as a str
"
;
}
}
try
{
try
{
_user
=
ref
.
allocCString
(
user
)
_user
=
ref
.
allocCString
(
user
)
}
}
catch
(
err
)
{
catch
(
err
)
{
throw
"
Attribute Error: user is expected as a str
"
;
throw
"
Attribute Error: user is expected as a str
"
;
}
}
try
{
try
{
_password
=
ref
.
allocCString
(
password
);
_password
=
ref
.
allocCString
(
password
);
}
}
catch
(
err
)
{
catch
(
err
)
{
throw
"
Attribute Error: password is expected as a str
"
;
throw
"
Attribute Error: password is expected as a str
"
;
}
}
try
{
try
{
_db
=
db
!=
null
?
ref
.
allocCString
(
db
)
:
ref
.
alloc
(
ref
.
types
.
char_ptr
,
ref
.
NULL
);
_db
=
db
!=
null
?
ref
.
allocCString
(
db
)
:
ref
.
alloc
(
ref
.
types
.
char_ptr
,
ref
.
NULL
);
}
}
catch
(
err
)
{
catch
(
err
)
{
throw
"
Attribute Error: db is expected as a str
"
;
throw
"
Attribute Error: db is expected as a str
"
;
}
}
try
{
try
{
_port
=
ref
.
alloc
(
ref
.
types
.
int
,
port
);
_port
=
ref
.
alloc
(
ref
.
types
.
int
,
port
);
}
}
catch
(
err
)
{
catch
(
err
)
{
throw
TypeError
(
"
port is expected as an int
"
)
throw
TypeError
(
"
port is expected as an int
"
)
}
}
let
connection
=
this
.
libtaos
.
taos_connect
(
_host
,
_user
,
_password
,
_db
,
_port
);
let
connection
=
this
.
libtaos
.
taos_connect
(
_host
,
_user
,
_password
,
_db
,
_port
);
...
@@ -324,10 +325,10 @@ CTaosInterface.prototype.close = function close(connection) {
...
@@ -324,10 +325,10 @@ CTaosInterface.prototype.close = function close(connection) {
console
.
log
(
"
Connection is closed
"
);
console
.
log
(
"
Connection is closed
"
);
}
}
CTaosInterface
.
prototype
.
query
=
function
query
(
connection
,
sql
)
{
CTaosInterface
.
prototype
.
query
=
function
query
(
connection
,
sql
)
{
return
this
.
libtaos
.
taos_query
(
connection
,
ref
.
allocCString
(
sql
));
return
this
.
libtaos
.
taos_query
(
connection
,
ref
.
allocCString
(
sql
));
}
}
CTaosInterface
.
prototype
.
affectedRows
=
function
affectedRows
(
connection
)
{
CTaosInterface
.
prototype
.
affectedRows
=
function
affectedRows
(
result
)
{
return
this
.
libtaos
.
taos_affected_rows
(
connection
);
return
this
.
libtaos
.
taos_affected_rows
(
result
);
}
}
CTaosInterface
.
prototype
.
useResult
=
function
useResult
(
result
)
{
CTaosInterface
.
prototype
.
useResult
=
function
useResult
(
result
)
{
...
@@ -337,8 +338,8 @@ CTaosInterface.prototype.useResult = function useResult(result) {
...
@@ -337,8 +338,8 @@ CTaosInterface.prototype.useResult = function useResult(result) {
pfields
=
ref
.
reinterpret
(
pfields
,
this
.
fieldsCount
(
result
)
*
68
,
0
);
pfields
=
ref
.
reinterpret
(
pfields
,
this
.
fieldsCount
(
result
)
*
68
,
0
);
for
(
let
i
=
0
;
i
<
pfields
.
length
;
i
+=
68
)
{
for
(
let
i
=
0
;
i
<
pfields
.
length
;
i
+=
68
)
{
//0 - 63 = name //64 - 65 = bytes, 66 - 67 = type
//0 - 63 = name //64 - 65 = bytes, 66 - 67 = type
fields
.
push
(
{
fields
.
push
({
name
:
ref
.
readCString
(
ref
.
reinterpret
(
pfields
,
65
,
i
)),
name
:
ref
.
readCString
(
ref
.
reinterpret
(
pfields
,
65
,
i
)),
type
:
pfields
[
i
+
65
],
type
:
pfields
[
i
+
65
],
bytes
:
pfields
[
i
+
66
]
bytes
:
pfields
[
i
+
66
]
})
})
...
@@ -347,11 +348,10 @@ CTaosInterface.prototype.useResult = function useResult(result) {
...
@@ -347,11 +348,10 @@ CTaosInterface.prototype.useResult = function useResult(result) {
return
fields
;
return
fields
;
}
}
CTaosInterface
.
prototype
.
fetchBlock
=
function
fetchBlock
(
result
,
fields
)
{
CTaosInterface
.
prototype
.
fetchBlock
=
function
fetchBlock
(
result
,
fields
)
{
//let pblock = ref.ref(ref.ref(ref.NULL)); // equal to our raw data
let
pblock
=
ref
.
NULL_POINTER
;
let
pblock
=
this
.
libtaos
.
taos_fetch_row
(
result
);
let
num_of_rows
=
this
.
libtaos
.
taos_fetch_block
(
result
,
pblock
);
let
num_of_rows
=
1
;
if
(
ref
.
isNull
(
pblock
.
deref
())
==
true
)
{
if
(
ref
.
isNull
(
pblock
)
==
true
)
{
return
{
block
:
null
,
num_of_rows
:
0
};
return
{
block
:
null
,
num_of_rows
:
0
};
}
}
var
fieldL
=
this
.
libtaos
.
taos_fetch_lengths
(
result
);
var
fieldL
=
this
.
libtaos
.
taos_fetch_lengths
(
result
);
...
@@ -359,10 +359,10 @@ CTaosInterface.prototype.fetchBlock = function fetchBlock(result, fields) {
...
@@ -359,10 +359,10 @@ CTaosInterface.prototype.fetchBlock = function fetchBlock(result, fields) {
let
isMicro
=
(
this
.
libtaos
.
taos_result_precision
(
result
)
==
FieldTypes
.
C_TIMESTAMP_MICRO
);
let
isMicro
=
(
this
.
libtaos
.
taos_result_precision
(
result
)
==
FieldTypes
.
C_TIMESTAMP_MICRO
);
var
fieldlens
=
[];
var
fieldlens
=
[];
if
(
ref
.
isNull
(
fieldL
)
==
false
)
{
if
(
ref
.
isNull
(
fieldL
)
==
false
)
{
for
(
let
i
=
0
;
i
<
fields
.
length
;
i
++
)
{
for
(
let
i
=
0
;
i
<
fields
.
length
;
i
++
)
{
let
plen
=
ref
.
reinterpret
(
fieldL
,
4
,
i
*
4
);
let
plen
=
ref
.
reinterpret
(
fieldL
,
4
,
i
*
4
);
let
len
=
plen
.
readInt32LE
(
0
);
let
len
=
plen
.
readInt32LE
(
0
);
fieldlens
.
push
(
len
);
fieldlens
.
push
(
len
);
}
}
...
@@ -370,21 +370,23 @@ CTaosInterface.prototype.fetchBlock = function fetchBlock(result, fields) {
...
@@ -370,21 +370,23 @@ CTaosInterface.prototype.fetchBlock = function fetchBlock(result, fields) {
let
blocks
=
new
Array
(
fields
.
length
);
let
blocks
=
new
Array
(
fields
.
length
);
blocks
.
fill
(
null
);
blocks
.
fill
(
null
);
//
num_of_rows = Math.abs(num_of_rows);
num_of_rows
=
Math
.
abs
(
num_of_rows
);
let
offset
=
0
;
let
offset
=
0
;
let
ptr
=
pblock
.
deref
();
for
(
let
i
=
0
;
i
<
fields
.
length
;
i
++
)
{
for
(
let
i
=
0
;
i
<
fields
.
length
;
i
++
)
{
pdata
=
ref
.
reinterpret
(
p
block
,
8
,
i
*
8
);
pdata
=
ref
.
reinterpret
(
p
tr
,
8
,
i
*
8
);
if
(
ref
.
isNull
(
pdata
.
readPointer
()))
{
if
(
ref
.
isNull
(
pdata
.
readPointer
()))
{
blocks
[
i
]
=
new
Array
();
blocks
[
i
]
=
new
Array
();
}
else
{
}
else
{
pdata
=
ref
.
ref
(
pdata
.
readPointer
());
pdata
=
ref
.
ref
(
pdata
.
readPointer
());
if
(
!
convertFunctions
[
fields
[
i
][
'
type
'
]]
)
{
if
(
!
convertFunctions
[
fields
[
i
][
'
type
'
]]
)
{
throw
new
errors
.
DatabaseError
(
"
Invalid data type returned from database
"
);
throw
new
errors
.
DatabaseError
(
"
Invalid data type returned from database
"
);
}
}
blocks
[
i
]
=
convertFunctions
[
fields
[
i
][
'
type
'
]](
pdata
,
1
,
fieldlens
[
i
],
offset
,
isMicro
);
blocks
[
i
]
=
convertFunctions
[
fields
[
i
][
'
type
'
]](
pdata
,
num_of_rows
,
fieldlens
[
i
],
offset
,
isMicro
);
}
}
}
}
return
{
blocks
:
blocks
,
num_of_rows
:
Math
.
abs
(
num_of_rows
)
}
return
{
blocks
:
blocks
,
num_of_rows
}
}
}
CTaosInterface
.
prototype
.
fetchRow
=
function
fetchRow
(
result
,
fields
)
{
CTaosInterface
.
prototype
.
fetchRow
=
function
fetchRow
(
result
,
fields
)
{
let
row
=
this
.
libtaos
.
taos_fetch_row
(
result
);
let
row
=
this
.
libtaos
.
taos_fetch_row
(
result
);
...
@@ -414,7 +416,7 @@ CTaosInterface.prototype.errStr = function errStr(result) {
...
@@ -414,7 +416,7 @@ CTaosInterface.prototype.errStr = function errStr(result) {
// Async
// Async
CTaosInterface
.
prototype
.
query_a
=
function
query_a
(
connection
,
sql
,
callback
,
param
=
ref
.
ref
(
ref
.
NULL
))
{
CTaosInterface
.
prototype
.
query_a
=
function
query_a
(
connection
,
sql
,
callback
,
param
=
ref
.
ref
(
ref
.
NULL
))
{
// void taos_query_a(TAOS *taos, char *sqlstr, void (*fp)(void *param, TAOS_RES *, int), void *param)
// void taos_query_a(TAOS *taos, char *sqlstr, void (*fp)(void *param, TAOS_RES *, int), void *param)
callback
=
ffi
.
Callback
(
ref
.
types
.
void
,
[
ref
.
types
.
void_ptr
,
ref
.
types
.
void_ptr
,
ref
.
types
.
int
],
callback
);
callback
=
ffi
.
Callback
(
ref
.
types
.
void
,
[
ref
.
types
.
void_ptr
,
ref
.
types
.
void_ptr
,
ref
.
types
.
int
],
callback
);
this
.
libtaos
.
taos_query_a
(
connection
,
ref
.
allocCString
(
sql
),
callback
,
param
);
this
.
libtaos
.
taos_query_a
(
connection
,
ref
.
allocCString
(
sql
),
callback
,
param
);
return
param
;
return
param
;
}
}
...
@@ -439,46 +441,46 @@ CTaosInterface.prototype.fetch_rows_a = function fetch_rows_a(result, callback,
...
@@ -439,46 +441,46 @@ CTaosInterface.prototype.fetch_rows_a = function fetch_rows_a(result, callback,
var
fieldL
=
cti
.
libtaos
.
taos_fetch_lengths
(
result
);
var
fieldL
=
cti
.
libtaos
.
taos_fetch_lengths
(
result
);
var
fieldlens
=
[];
var
fieldlens
=
[];
if
(
ref
.
isNull
(
fieldL
)
==
false
)
{
if
(
ref
.
isNull
(
fieldL
)
==
false
)
{
for
(
let
i
=
0
;
i
<
fields
.
length
;
i
++
)
{
for
(
let
i
=
0
;
i
<
fields
.
length
;
i
++
)
{
let
plen
=
ref
.
reinterpret
(
fieldL
,
8
,
i
*
8
);
let
plen
=
ref
.
reinterpret
(
fieldL
,
8
,
i
*
8
);
let
len
=
ref
.
get
(
plen
,
0
,
ref
.
types
.
int32
);
let
len
=
ref
.
get
(
plen
,
0
,
ref
.
types
.
int32
);
fieldlens
.
push
(
len
);
fieldlens
.
push
(
len
);
}
}
}
}
if
(
numOfRows2
>
0
){
if
(
numOfRows2
>
0
)
{
for
(
let
i
=
0
;
i
<
fields
.
length
;
i
++
)
{
for
(
let
i
=
0
;
i
<
fields
.
length
;
i
++
)
{
if
(
ref
.
isNull
(
pdata
.
readPointer
()))
{
if
(
ref
.
isNull
(
pdata
.
readPointer
()))
{
blocks
[
i
]
=
new
Array
();
blocks
[
i
]
=
new
Array
();
}
else
{
}
else
{
if
(
!
convertFunctions
[
fields
[
i
][
'
type
'
]]
)
{
if
(
!
convertFunctions
[
fields
[
i
][
'
type
'
]])
{
throw
new
errors
.
DatabaseError
(
"
Invalid data type returned from database
"
);
throw
new
errors
.
DatabaseError
(
"
Invalid data type returned from database
"
);
}
}
let
prow
=
ref
.
reinterpret
(
row
,
8
,
i
*
8
);
let
prow
=
ref
.
reinterpret
(
row
,
8
,
i
*
8
);
prow
=
prow
.
readPointer
();
prow
=
prow
.
readPointer
();
prow
=
ref
.
ref
(
prow
);
prow
=
ref
.
ref
(
prow
);
blocks
[
i
]
=
convertFunctions
[
fields
[
i
][
'
type
'
]](
prow
,
1
,
fieldlens
[
i
],
offset
,
isMicro
);
blocks
[
i
]
=
convertFunctions
[
fields
[
i
][
'
type
'
]](
prow
,
1
,
fieldlens
[
i
],
offset
,
isMicro
);
//offset += fields[i]['bytes'] * numOfRows2;
//offset += fields[i]['bytes'] * numOfRows2;
}
}
}
}
}
}
callback
(
param2
,
result2
,
numOfRows2
,
blocks
);
callback
(
param2
,
result2
,
numOfRows2
,
blocks
);
}
}
asyncCallbackWrapper
=
ffi
.
Callback
(
ref
.
types
.
void
,
[
ref
.
types
.
void_ptr
,
ref
.
types
.
void_ptr
,
ref
.
types
.
int
],
asyncCallbackWrapper
);
asyncCallbackWrapper
=
ffi
.
Callback
(
ref
.
types
.
void
,
[
ref
.
types
.
void_ptr
,
ref
.
types
.
void_ptr
,
ref
.
types
.
int
],
asyncCallbackWrapper
);
this
.
libtaos
.
taos_fetch_rows_a
(
result
,
asyncCallbackWrapper
,
param
);
this
.
libtaos
.
taos_fetch_rows_a
(
result
,
asyncCallbackWrapper
,
param
);
return
param
;
return
param
;
}
}
// Fetch field meta data by result handle
// Fetch field meta data by result handle
CTaosInterface
.
prototype
.
fetchFields_a
=
function
fetchFields_a
(
result
)
{
CTaosInterface
.
prototype
.
fetchFields_a
=
function
fetchFields_a
(
result
)
{
let
pfields
=
this
.
fetchFields
(
result
);
let
pfields
=
this
.
fetchFields
(
result
);
let
pfieldscount
=
this
.
numFields
(
result
);
let
pfieldscount
=
this
.
numFields
(
result
);
let
fields
=
[];
let
fields
=
[];
if
(
ref
.
isNull
(
pfields
)
==
false
)
{
if
(
ref
.
isNull
(
pfields
)
==
false
)
{
pfields
=
ref
.
reinterpret
(
pfields
,
68
*
pfieldscount
,
0
);
pfields
=
ref
.
reinterpret
(
pfields
,
68
*
pfieldscount
,
0
);
for
(
let
i
=
0
;
i
<
pfields
.
length
;
i
+=
68
)
{
for
(
let
i
=
0
;
i
<
pfields
.
length
;
i
+=
68
)
{
//0 - 64 = name //65 = type, 66 - 67 = bytes
//0 - 64 = name //65 = type, 66 - 67 = bytes
fields
.
push
(
{
fields
.
push
({
name
:
ref
.
readCString
(
ref
.
reinterpret
(
pfields
,
65
,
i
)),
name
:
ref
.
readCString
(
ref
.
reinterpret
(
pfields
,
65
,
i
)),
type
:
pfields
[
i
+
65
],
type
:
pfields
[
i
+
65
],
bytes
:
pfields
[
i
+
66
]
bytes
:
pfields
[
i
+
66
]
})
})
...
@@ -488,7 +490,7 @@ CTaosInterface.prototype.fetchFields_a = function fetchFields_a (result) {
...
@@ -488,7 +490,7 @@ CTaosInterface.prototype.fetchFields_a = function fetchFields_a (result) {
}
}
// Stop a query by result handle
// Stop a query by result handle
CTaosInterface
.
prototype
.
stopQuery
=
function
stopQuery
(
result
)
{
CTaosInterface
.
prototype
.
stopQuery
=
function
stopQuery
(
result
)
{
if
(
result
!=
null
){
if
(
result
!=
null
)
{
this
.
libtaos
.
taos_stop_query
(
result
);
this
.
libtaos
.
taos_stop_query
(
result
);
}
}
else
{
else
{
...
@@ -509,13 +511,13 @@ CTaosInterface.prototype.subscribe = function subscribe(connection, restart, top
...
@@ -509,13 +511,13 @@ CTaosInterface.prototype.subscribe = function subscribe(connection, restart, top
try
{
try
{
sql
=
sql
!=
null
?
ref
.
allocCString
(
sql
)
:
ref
.
alloc
(
ref
.
types
.
char_ptr
,
ref
.
NULL
);
sql
=
sql
!=
null
?
ref
.
allocCString
(
sql
)
:
ref
.
alloc
(
ref
.
types
.
char_ptr
,
ref
.
NULL
);
}
}
catch
(
err
)
{
catch
(
err
)
{
throw
"
Attribute Error: sql is expected as a str
"
;
throw
"
Attribute Error: sql is expected as a str
"
;
}
}
try
{
try
{
topic
=
topic
!=
null
?
ref
.
allocCString
(
topic
)
:
ref
.
alloc
(
ref
.
types
.
char_ptr
,
ref
.
NULL
);
topic
=
topic
!=
null
?
ref
.
allocCString
(
topic
)
:
ref
.
alloc
(
ref
.
types
.
char_ptr
,
ref
.
NULL
);
}
}
catch
(
err
)
{
catch
(
err
)
{
throw
TypeError
(
"
topic is expected as a str
"
);
throw
TypeError
(
"
topic is expected as a str
"
);
}
}
...
@@ -539,8 +541,8 @@ CTaosInterface.prototype.consume = function consume(subscription) {
...
@@ -539,8 +541,8 @@ CTaosInterface.prototype.consume = function consume(subscription) {
pfields
=
ref
.
reinterpret
(
pfields
,
this
.
numFields
(
result
)
*
68
,
0
);
pfields
=
ref
.
reinterpret
(
pfields
,
this
.
numFields
(
result
)
*
68
,
0
);
for
(
let
i
=
0
;
i
<
pfields
.
length
;
i
+=
68
)
{
for
(
let
i
=
0
;
i
<
pfields
.
length
;
i
+=
68
)
{
//0 - 63 = name //64 - 65 = bytes, 66 - 67 = type
//0 - 63 = name //64 - 65 = bytes, 66 - 67 = type
fields
.
push
(
{
fields
.
push
({
name
:
ref
.
readCString
(
ref
.
reinterpret
(
pfields
,
64
,
i
)),
name
:
ref
.
readCString
(
ref
.
reinterpret
(
pfields
,
64
,
i
)),
bytes
:
pfields
[
i
+
64
],
bytes
:
pfields
[
i
+
64
],
type
:
pfields
[
i
+
66
]
type
:
pfields
[
i
+
66
]
})
})
...
@@ -548,7 +550,7 @@ CTaosInterface.prototype.consume = function consume(subscription) {
...
@@ -548,7 +550,7 @@ CTaosInterface.prototype.consume = function consume(subscription) {
}
}
let
data
=
[];
let
data
=
[];
while
(
true
)
{
while
(
true
)
{
let
{
blocks
,
num_of_rows
}
=
this
.
fetchBlock
(
result
,
fields
);
let
{
blocks
,
num_of_rows
}
=
this
.
fetchBlock
(
result
,
fields
);
if
(
num_of_rows
==
0
)
{
if
(
num_of_rows
==
0
)
{
break
;
break
;
...
@@ -559,7 +561,7 @@ CTaosInterface.prototype.consume = function consume(subscription) {
...
@@ -559,7 +561,7 @@ CTaosInterface.prototype.consume = function consume(subscription) {
for
(
let
j
=
0
;
j
<
fields
.
length
;
j
++
)
{
for
(
let
j
=
0
;
j
<
fields
.
length
;
j
++
)
{
rowBlock
[
j
]
=
blocks
[
j
][
i
];
rowBlock
[
j
]
=
blocks
[
j
][
i
];
}
}
data
[
data
.
length
-
1
]
=
(
rowBlock
);
data
[
data
.
length
-
1
]
=
(
rowBlock
);
}
}
}
}
return
{
data
:
data
,
fields
:
fields
,
result
:
result
};
return
{
data
:
data
,
fields
:
fields
,
result
:
result
};
...
@@ -570,11 +572,11 @@ CTaosInterface.prototype.unsubscribe = function unsubscribe(subscription) {
...
@@ -570,11 +572,11 @@ CTaosInterface.prototype.unsubscribe = function unsubscribe(subscription) {
}
}
// Continuous Query
// Continuous Query
CTaosInterface
.
prototype
.
openStream
=
function
openStream
(
connection
,
sql
,
callback
,
stime
,
stoppingCallback
,
param
=
ref
.
ref
(
ref
.
NULL
))
{
CTaosInterface
.
prototype
.
openStream
=
function
openStream
(
connection
,
sql
,
callback
,
stime
,
stoppingCallback
,
param
=
ref
.
ref
(
ref
.
NULL
))
{
try
{
try
{
sql
=
ref
.
allocCString
(
sql
);
sql
=
ref
.
allocCString
(
sql
);
}
}
catch
(
err
)
{
catch
(
err
)
{
throw
"
Attribute Error: sql string is expected as a str
"
;
throw
"
Attribute Error: sql string is expected as a str
"
;
}
}
var
cti
=
this
;
var
cti
=
this
;
...
@@ -587,7 +589,7 @@ CTaosInterface.prototype.openStream = function openStream(connection, sql, callb
...
@@ -587,7 +589,7 @@ CTaosInterface.prototype.openStream = function openStream(connection, sql, callb
let
offset
=
0
;
let
offset
=
0
;
if
(
numOfRows2
>
0
)
{
if
(
numOfRows2
>
0
)
{
for
(
let
i
=
0
;
i
<
fields
.
length
;
i
++
)
{
for
(
let
i
=
0
;
i
<
fields
.
length
;
i
++
)
{
if
(
!
convertFunctions
[
fields
[
i
][
'
type
'
]]
)
{
if
(
!
convertFunctions
[
fields
[
i
][
'
type
'
]])
{
throw
new
errors
.
DatabaseError
(
"
Invalid data type returned from database
"
);
throw
new
errors
.
DatabaseError
(
"
Invalid data type returned from database
"
);
}
}
blocks
[
i
]
=
convertFunctions
[
fields
[
i
][
'
type
'
]](
row
,
numOfRows2
,
fields
[
i
][
'
bytes
'
],
offset
,
isMicro
);
blocks
[
i
]
=
convertFunctions
[
fields
[
i
][
'
type
'
]](
row
,
numOfRows2
,
fields
[
i
][
'
bytes
'
],
offset
,
isMicro
);
...
@@ -596,8 +598,8 @@ CTaosInterface.prototype.openStream = function openStream(connection, sql, callb
...
@@ -596,8 +598,8 @@ CTaosInterface.prototype.openStream = function openStream(connection, sql, callb
}
}
callback
(
param2
,
result2
,
blocks
,
fields
);
callback
(
param2
,
result2
,
blocks
,
fields
);
}
}
asyncCallbackWrapper
=
ffi
.
Callback
(
ref
.
types
.
void
,
[
ref
.
types
.
void_ptr
,
ref
.
types
.
void_ptr
,
ref
.
refType
(
ref
.
types
.
void_ptr2
)
],
asyncCallbackWrapper
);
asyncCallbackWrapper
=
ffi
.
Callback
(
ref
.
types
.
void
,
[
ref
.
types
.
void_ptr
,
ref
.
types
.
void_ptr
,
ref
.
refType
(
ref
.
types
.
void_ptr2
)
],
asyncCallbackWrapper
);
asyncStoppingCallbackWrapper
=
ffi
.
Callback
(
ref
.
types
.
void
,
[
ref
.
types
.
void_ptr
],
stoppingCallback
);
asyncStoppingCallbackWrapper
=
ffi
.
Callback
(
ref
.
types
.
void
,
[
ref
.
types
.
void_ptr
],
stoppingCallback
);
let
streamHandle
=
this
.
libtaos
.
taos_open_stream
(
connection
,
sql
,
asyncCallbackWrapper
,
stime
,
param
,
asyncStoppingCallbackWrapper
);
let
streamHandle
=
this
.
libtaos
.
taos_open_stream
(
connection
,
sql
,
asyncCallbackWrapper
,
stime
,
param
,
asyncStoppingCallbackWrapper
);
if
(
ref
.
isNull
(
streamHandle
))
{
if
(
ref
.
isNull
(
streamHandle
))
{
throw
new
errors
.
TDError
(
'
Failed to open a stream with TDengine
'
);
throw
new
errors
.
TDError
(
'
Failed to open a stream with TDengine
'
);
...
...
src/connector/nodejs/nodetaos/cursor.js
浏览文件 @
663439bf
const
ref
=
require
(
'
ref-napi
'
);
const
ref
=
require
(
'
ref-napi
'
);
require
(
'
./globalfunc.js
'
)
require
(
'
./globalfunc.js
'
)
const
CTaosInterface
=
require
(
'
./cinterface
'
)
const
CTaosInterface
=
require
(
'
./cinterface
'
)
const
errors
=
require
(
'
./error
'
)
const
errors
=
require
(
'
./error
'
)
const
TaosQuery
=
require
(
'
./taosquery
'
)
const
TaosQuery
=
require
(
'
./taosquery
'
)
const
{
PerformanceObserver
,
performance
}
=
require
(
'
perf_hooks
'
);
const
{
PerformanceObserver
,
performance
}
=
require
(
'
perf_hooks
'
);
module
.
exports
=
TDengineCursor
;
module
.
exports
=
TDengineCursor
;
...
@@ -22,7 +22,7 @@ module.exports = TDengineCursor;
...
@@ -22,7 +22,7 @@ module.exports = TDengineCursor;
* @property {fields} - Array of the field objects in order from left to right of the latest data retrieved
* @property {fields} - Array of the field objects in order from left to right of the latest data retrieved
* @since 1.0.0
* @since 1.0.0
*/
*/
function
TDengineCursor
(
connection
=
null
)
{
function
TDengineCursor
(
connection
=
null
)
{
//All parameters are store for sync queries only.
//All parameters are store for sync queries only.
this
.
_rowcount
=
-
1
;
this
.
_rowcount
=
-
1
;
this
.
_connection
=
null
;
this
.
_connection
=
null
;
...
@@ -91,7 +91,7 @@ TDengineCursor.prototype.execute = function execute(operation, options, callback
...
@@ -91,7 +91,7 @@ TDengineCursor.prototype.execute = function execute(operation, options, callback
return
null
;
return
null
;
}
}
if
(
typeof
options
==
'
function
'
)
{
if
(
typeof
options
==
'
function
'
)
{
callback
=
options
;
callback
=
options
;
}
}
if
(
typeof
options
!=
'
object
'
)
options
=
{}
if
(
typeof
options
!=
'
object
'
)
options
=
{}
...
@@ -144,10 +144,10 @@ TDengineCursor.prototype.execute = function execute(operation, options, callback
...
@@ -144,10 +144,10 @@ TDengineCursor.prototype.execute = function execute(operation, options, callback
}
}
TDengineCursor
.
prototype
.
_createAffectedResponse
=
function
(
num
,
time
)
{
TDengineCursor
.
prototype
.
_createAffectedResponse
=
function
(
num
,
time
)
{
return
"
Query OK,
"
+
num
+
"
row(s) affected (
"
+
(
time
*
0.001
).
toFixed
(
8
)
+
"
s)
"
;
return
"
Query OK,
"
+
num
+
"
row(s) affected (
"
+
(
time
*
0.001
).
toFixed
(
8
)
+
"
s)
"
;
}
}
TDengineCursor
.
prototype
.
_createSetResponse
=
function
(
num
,
time
)
{
TDengineCursor
.
prototype
.
_createSetResponse
=
function
(
num
,
time
)
{
return
"
Query OK,
"
+
num
+
"
row(s) in set (
"
+
(
time
*
0.001
).
toFixed
(
8
)
+
"
s)
"
;
return
"
Query OK,
"
+
num
+
"
row(s) in set (
"
+
(
time
*
0.001
).
toFixed
(
8
)
+
"
s)
"
;
}
}
TDengineCursor
.
prototype
.
executemany
=
function
executemany
()
{
TDengineCursor
.
prototype
.
executemany
=
function
executemany
()
{
...
@@ -176,27 +176,22 @@ TDengineCursor.prototype.fetchall = function fetchall(options, callback) {
...
@@ -176,27 +176,22 @@ TDengineCursor.prototype.fetchall = function fetchall(options, callback) {
throw
new
errors
.
OperationalError
(
"
Invalid use of fetchall, either result or fields from query are null. First execute a query first
"
);
throw
new
errors
.
OperationalError
(
"
Invalid use of fetchall, either result or fields from query are null. First execute a query first
"
);
}
}
let
data
=
[];
let
num_of_rows
=
this
.
_chandle
.
affectedRows
(
this
.
_result
);
let
data
=
new
Array
(
num_of_rows
);
this
.
_rowcount
=
0
;
this
.
_rowcount
=
0
;
//let nodetime = 0;
let
time
=
0
;
let
time
=
0
;
const
obs
=
new
PerformanceObserver
((
items
)
=>
{
const
obs
=
new
PerformanceObserver
((
items
)
=>
{
time
+=
items
.
getEntries
()[
0
].
duration
;
time
+=
items
.
getEntries
()[
0
].
duration
;
performance
.
clearMarks
();
performance
.
clearMarks
();
});
});
/*
const obs2 = new PerformanceObserver((items) => {
nodetime += items.getEntries()[0].duration;
performance.clearMarks();
});
obs2.observe({ entryTypes: ['measure'] });
performance.mark('nodea');
*/
obs
.
observe
({
entryTypes
:
[
'
measure
'
]
});
obs
.
observe
({
entryTypes
:
[
'
measure
'
]
});
performance
.
mark
(
'
A
'
);
performance
.
mark
(
'
A
'
);
while
(
true
)
{
while
(
true
)
{
let
blockAndRows
=
this
.
_chandle
.
fetchBlock
(
this
.
_result
,
this
.
_fields
);
let
blockAndRows
=
this
.
_chandle
.
fetchBlock
(
this
.
_result
,
this
.
_fields
);
// console.log(blockAndRows);
// break;
let
block
=
blockAndRows
.
blocks
;
let
block
=
blockAndRows
.
blocks
;
let
num_of_rows
=
blockAndRows
.
num_of_rows
;
let
num_of_rows
=
blockAndRows
.
num_of_rows
;
if
(
num_of_rows
==
0
)
{
if
(
num_of_rows
==
0
)
{
...
@@ -205,22 +200,24 @@ TDengineCursor.prototype.fetchall = function fetchall(options, callback) {
...
@@ -205,22 +200,24 @@ TDengineCursor.prototype.fetchall = function fetchall(options, callback) {
this
.
_rowcount
+=
num_of_rows
;
this
.
_rowcount
+=
num_of_rows
;
let
numoffields
=
this
.
_fields
.
length
;
let
numoffields
=
this
.
_fields
.
length
;
for
(
let
i
=
0
;
i
<
num_of_rows
;
i
++
)
{
for
(
let
i
=
0
;
i
<
num_of_rows
;
i
++
)
{
data
.
push
([]);
//
data.push([]);
let
rowBlock
=
new
Array
(
numoffields
);
let
rowBlock
=
new
Array
(
numoffields
);
for
(
let
j
=
0
;
j
<
numoffields
;
j
++
)
{
for
(
let
j
=
0
;
j
<
numoffields
;
j
++
)
{
rowBlock
[
j
]
=
block
[
j
][
i
];
rowBlock
[
j
]
=
block
[
j
][
i
];
}
}
data
[
data
.
length
-
1
]
=
(
rowBlock
);
data
[
this
.
_rowcount
-
num_of_rows
+
i
]
=
(
rowBlock
);
// data.push(rowBlock);
}
}
}
}
performance
.
mark
(
'
B
'
);
performance
.
mark
(
'
B
'
);
performance
.
measure
(
'
query
'
,
'
A
'
,
'
B
'
);
performance
.
measure
(
'
query
'
,
'
A
'
,
'
B
'
);
let
response
=
this
.
_createSetResponse
(
this
.
_rowcount
,
time
)
let
response
=
this
.
_createSetResponse
(
this
.
_rowcount
,
time
)
console
.
log
(
response
);
console
.
log
(
response
);
// this._connection._clearResultSet();
// this._connection._clearResultSet();
let
fields
=
this
.
fields
;
let
fields
=
this
.
fields
;
this
.
_reset_result
();
this
.
_reset_result
();
this
.
data
=
data
;
this
.
data
=
data
;
...
@@ -239,12 +236,12 @@ TDengineCursor.prototype.fetchall = function fetchall(options, callback) {
...
@@ -239,12 +236,12 @@ TDengineCursor.prototype.fetchall = function fetchall(options, callback) {
* @return {number | Buffer} Number of affected rows or a Buffer that points to the results of the query
* @return {number | Buffer} Number of affected rows or a Buffer that points to the results of the query
* @since 1.0.0
* @since 1.0.0
*/
*/
TDengineCursor
.
prototype
.
execute_a
=
function
execute_a
(
operation
,
options
,
callback
,
param
)
{
TDengineCursor
.
prototype
.
execute_a
=
function
execute_a
(
operation
,
options
,
callback
,
param
)
{
if
(
operation
==
undefined
)
{
if
(
operation
==
undefined
)
{
throw
new
errors
.
ProgrammingError
(
'
No operation passed as argument
'
);
throw
new
errors
.
ProgrammingError
(
'
No operation passed as argument
'
);
return
null
;
return
null
;
}
}
if
(
typeof
options
==
'
function
'
)
{
if
(
typeof
options
==
'
function
'
)
{
//we expect the parameter after callback to be param
//we expect the parameter after callback to be param
param
=
callback
;
param
=
callback
;
callback
=
options
;
callback
=
options
;
...
@@ -265,14 +262,14 @@ TDengineCursor.prototype.execute_a = function execute_a (operation, options, cal
...
@@ -265,14 +262,14 @@ TDengineCursor.prototype.execute_a = function execute_a (operation, options, cal
}
}
if
(
resCode
>=
0
)
{
if
(
resCode
>=
0
)
{
// let fieldCount = cr._chandle.numFields(res2);
// let fieldCount = cr._chandle.numFields(res2);
// if (fieldCount == 0) {
// if (fieldCount == 0) {
// //cr._chandle.freeResult(res2);
// //cr._chandle.freeResult(res2);
// return res2;
// return res2;
// }
// }
// else {
// else {
// return res2;
// return res2;
// }
// }
return
res2
;
return
res2
;
}
}
...
@@ -317,7 +314,7 @@ TDengineCursor.prototype.execute_a = function execute_a (operation, options, cal
...
@@ -317,7 +314,7 @@ TDengineCursor.prototype.execute_a = function execute_a (operation, options, cal
* })
* })
*/
*/
TDengineCursor
.
prototype
.
fetchall_a
=
function
fetchall_a
(
result
,
options
,
callback
,
param
=
{})
{
TDengineCursor
.
prototype
.
fetchall_a
=
function
fetchall_a
(
result
,
options
,
callback
,
param
=
{})
{
if
(
typeof
options
==
'
function
'
)
{
if
(
typeof
options
==
'
function
'
)
{
//we expect the parameter after callback to be param
//we expect the parameter after callback to be param
param
=
callback
;
param
=
callback
;
callback
=
options
;
callback
=
options
;
...
@@ -360,17 +357,17 @@ TDengineCursor.prototype.fetchall_a = function fetchall_a(result, options, callb
...
@@ -360,17 +357,17 @@ TDengineCursor.prototype.fetchall_a = function fetchall_a(result, options, callb
for
(
let
k
=
0
;
k
<
fields
.
length
;
k
++
)
{
for
(
let
k
=
0
;
k
<
fields
.
length
;
k
++
)
{
rowBlock
[
k
]
=
block
[
k
][
j
];
rowBlock
[
k
]
=
block
[
k
][
j
];
}
}
data
[
data
.
length
-
1
]
=
rowBlock
;
data
[
data
.
length
-
1
]
=
rowBlock
;
}
}
}
}
cr
.
_chandle
.
freeResult
(
result2
);
// free result, avoid seg faults and mem leaks!
cr
.
_chandle
.
freeResult
(
result2
);
// free result, avoid seg faults and mem leaks!
callback
(
param2
,
result2
,
numOfRows2
,
{
data
:
data
,
fields
:
fields
});
callback
(
param2
,
result2
,
numOfRows2
,
{
data
:
data
,
fields
:
fields
});
}
}
}
}
ref
.
writeObject
(
buf
,
0
,
param
);
ref
.
writeObject
(
buf
,
0
,
param
);
param
=
this
.
_chandle
.
fetch_rows_a
(
result
,
asyncCallbackWrapper
,
buf
);
//returned param
param
=
this
.
_chandle
.
fetch_rows_a
(
result
,
asyncCallbackWrapper
,
buf
);
//returned param
return
{
param
:
param
,
result
:
result
};
return
{
param
:
param
,
result
:
result
};
}
}
/**
/**
* Stop a query given the result handle.
* Stop a query given the result handle.
...
@@ -428,7 +425,7 @@ TDengineCursor.prototype.subscribe = function subscribe(config) {
...
@@ -428,7 +425,7 @@ TDengineCursor.prototype.subscribe = function subscribe(config) {
*/
*/
TDengineCursor
.
prototype
.
consumeData
=
async
function
consumeData
(
subscription
,
callback
)
{
TDengineCursor
.
prototype
.
consumeData
=
async
function
consumeData
(
subscription
,
callback
)
{
while
(
true
)
{
while
(
true
)
{
let
{
data
,
fields
,
result
}
=
this
.
_chandle
.
consume
(
subscription
);
let
{
data
,
fields
,
result
}
=
this
.
_chandle
.
consume
(
subscription
);
callback
(
data
,
fields
,
result
);
callback
(
data
,
fields
,
result
);
}
}
}
}
...
@@ -450,30 +447,30 @@ TDengineCursor.prototype.unsubscribe = function unsubscribe(subscription) {
...
@@ -450,30 +447,30 @@ TDengineCursor.prototype.unsubscribe = function unsubscribe(subscription) {
* @return {Buffer} A buffer pointing to the stream handle
* @return {Buffer} A buffer pointing to the stream handle
* @since 1.3.0
* @since 1.3.0
*/
*/
TDengineCursor
.
prototype
.
openStream
=
function
openStream
(
sql
,
callback
,
stime
=
0
,
stoppingCallback
,
param
=
{})
{
TDengineCursor
.
prototype
.
openStream
=
function
openStream
(
sql
,
callback
,
stime
=
0
,
stoppingCallback
,
param
=
{})
{
let
buf
=
ref
.
alloc
(
'
Object
'
);
let
buf
=
ref
.
alloc
(
'
Object
'
);
ref
.
writeObject
(
buf
,
0
,
param
);
ref
.
writeObject
(
buf
,
0
,
param
);
let
asyncCallbackWrapper
=
function
(
param2
,
result2
,
blocks
,
fields
)
{
let
asyncCallbackWrapper
=
function
(
param2
,
result2
,
blocks
,
fields
)
{
let
data
=
[];
let
data
=
[];
let
num_of_rows
=
blocks
[
0
].
length
;
let
num_of_rows
=
blocks
[
0
].
length
;
for
(
let
j
=
0
;
j
<
num_of_rows
;
j
++
)
{
for
(
let
j
=
0
;
j
<
num_of_rows
;
j
++
)
{
data
.
push
([]);
data
.
push
([]);
let
rowBlock
=
new
Array
(
fields
.
length
);
let
rowBlock
=
new
Array
(
fields
.
length
);
for
(
let
k
=
0
;
k
<
fields
.
length
;
k
++
)
{
for
(
let
k
=
0
;
k
<
fields
.
length
;
k
++
)
{
rowBlock
[
k
]
=
blocks
[
k
][
j
];
rowBlock
[
k
]
=
blocks
[
k
][
j
];
}
}
data
[
data
.
length
-
1
]
=
rowBlock
;
data
[
data
.
length
-
1
]
=
rowBlock
;
}
}
callback
(
param2
,
result2
,
blocks
,
fields
);
callback
(
param2
,
result2
,
blocks
,
fields
);
}
}
return
this
.
_chandle
.
openStream
(
this
.
_connection
.
_conn
,
sql
,
asyncCallbackWrapper
,
stime
,
stoppingCallback
,
buf
);
return
this
.
_chandle
.
openStream
(
this
.
_connection
.
_conn
,
sql
,
asyncCallbackWrapper
,
stime
,
stoppingCallback
,
buf
);
}
}
/**
/**
* Close a stream
* Close a stream
* @param {Buffer} - A buffer pointing to the handle of the stream to be closed
* @param {Buffer} - A buffer pointing to the handle of the stream to be closed
* @since 1.3.0
* @since 1.3.0
*/
*/
TDengineCursor
.
prototype
.
closeStream
=
function
closeStream
(
stream
)
{
TDengineCursor
.
prototype
.
closeStream
=
function
closeStream
(
stream
)
{
this
.
_chandle
.
closeStream
(
stream
);
this
.
_chandle
.
closeStream
(
stream
);
}
}
src/connector/nodejs/package-lock.json
已删除
100644 → 0
浏览文件 @
9be55c32
{
"name"
:
"td2.0-connector"
,
"version"
:
"2.0.6"
,
"lockfileVersion"
:
1
,
"requires"
:
true
,
"dependencies"
:
{
"array-index"
:
{
"version"
:
"1.0.0"
,
"resolved"
:
"https://registry.npmjs.org/array-index/-/array-index-1.0.0.tgz"
,
"integrity"
:
"sha1-7FanSe4QPk4Ix5C5w1PfFgVbl/k="
,
"requires"
:
{
"debug"
:
"^2.2.0"
,
"es6-symbol"
:
"^3.0.2"
},
"dependencies"
:
{
"debug"
:
{
"version"
:
"2.6.9"
,
"resolved"
:
"https://registry.npmjs.org/debug/-/debug-2.6.9.tgz"
,
"integrity"
:
"sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA=="
,
"requires"
:
{
"ms"
:
"2.0.0"
}
},
"ms"
:
{
"version"
:
"2.0.0"
,
"resolved"
:
"https://registry.npmjs.org/ms/-/ms-2.0.0.tgz"
,
"integrity"
:
"sha1-VgiurfwAvmwpAd9fmGF4jeDVl8g="
}
}
},
"d"
:
{
"version"
:
"1.0.1"
,
"resolved"
:
"https://registry.npmjs.org/d/-/d-1.0.1.tgz"
,
"integrity"
:
"sha512-m62ShEObQ39CfralilEQRjH6oAMtNCV1xJyEx5LpRYUVN+EviphDgUc/F3hnYbADmkiNs67Y+3ylmlG7Lnu+FA=="
,
"requires"
:
{
"es5-ext"
:
"^0.10.50"
,
"type"
:
"^1.0.1"
}
},
"debug"
:
{
"version"
:
"4.3.1"
,
"resolved"
:
"https://registry.npmjs.org/debug/-/debug-4.3.1.tgz"
,
"integrity"
:
"sha512-doEwdvm4PCeK4K3RQN2ZC2BYUBaxwLARCqZmMjtF8a51J2Rb0xpVloFRnCODwqjpwnAoao4pelN8l3RJdv3gRQ=="
,
"requires"
:
{
"ms"
:
"2.1.2"
}
},
"es5-ext"
:
{
"version"
:
"0.10.53"
,
"resolved"
:
"https://registry.npmjs.org/es5-ext/-/es5-ext-0.10.53.tgz"
,
"integrity"
:
"sha512-Xs2Stw6NiNHWypzRTY1MtaG/uJlwCk8kH81920ma8mvN8Xq1gsfhZvpkImLQArw8AHnv8MT2I45J3c0R8slE+Q=="
,
"requires"
:
{
"es6-iterator"
:
"~2.0.3"
,
"es6-symbol"
:
"~3.1.3"
,
"next-tick"
:
"~1.0.0"
}
},
"es6-iterator"
:
{
"version"
:
"2.0.3"
,
"resolved"
:
"https://registry.npmjs.org/es6-iterator/-/es6-iterator-2.0.3.tgz"
,
"integrity"
:
"sha1-p96IkUGgWpSwhUQDstCg+/qY87c="
,
"requires"
:
{
"d"
:
"1"
,
"es5-ext"
:
"^0.10.35"
,
"es6-symbol"
:
"^3.1.1"
}
},
"es6-symbol"
:
{
"version"
:
"3.1.3"
,
"resolved"
:
"https://registry.npmjs.org/es6-symbol/-/es6-symbol-3.1.3.tgz"
,
"integrity"
:
"sha512-NJ6Yn3FuDinBaBRWl/q5X/s4koRHBrgKAu+yGI6JCBeiu3qrcbJhwT2GeR/EXVfylRk8dpQVJoLEFhK+Mu31NA=="
,
"requires"
:
{
"d"
:
"^1.0.1"
,
"ext"
:
"^1.1.2"
}
},
"ext"
:
{
"version"
:
"1.4.0"
,
"resolved"
:
"https://registry.npmjs.org/ext/-/ext-1.4.0.tgz"
,
"integrity"
:
"sha512-Key5NIsUxdqKg3vIsdw9dSuXpPCQ297y6wBjL30edxwPgt2E44WcWBZey/ZvUc6sERLTxKdyCu4gZFmUbk1Q7A=="
,
"requires"
:
{
"type"
:
"^2.0.0"
},
"dependencies"
:
{
"type"
:
{
"version"
:
"2.1.0"
,
"resolved"
:
"https://registry.npmjs.org/type/-/type-2.1.0.tgz"
,
"integrity"
:
"sha512-G9absDWvhAWCV2gmF1zKud3OyC61nZDwWvBL2DApaVFogI07CprggiQAOOjvp2NRjYWFzPyu7vwtDrQFq8jeSA=="
}
}
},
"ffi-napi"
:
{
"version"
:
"3.1.0"
,
"resolved"
:
"https://registry.npmjs.org/ffi-napi/-/ffi-napi-3.1.0.tgz"
,
"integrity"
:
"sha512-EsHO+sP2p/nUC/3l/l8m9niee1BLm4asUFDzkkBGR4kYVgp2KqdAYUomZhkKtzim4Fq7mcYHjpUaIHsMqs+E1g=="
,
"requires"
:
{
"debug"
:
"^4.1.1"
,
"get-uv-event-loop-napi-h"
:
"^1.0.5"
,
"node-addon-api"
:
"^2.0.0"
,
"node-gyp-build"
:
"^4.2.1"
,
"ref-napi"
:
"^2.0.1"
,
"ref-struct-di"
:
"^1.1.0"
},
"dependencies"
:
{
"ref-napi"
:
{
"version"
:
"2.1.2"
,
"resolved"
:
"https://registry.npmjs.org/ref-napi/-/ref-napi-2.1.2.tgz"
,
"integrity"
:
"sha512-aFl+vrIuLWUXMUTQGAwGAuSNLX3Ub5W3iVP8b7KyFFZUdn4+i4U1TXXTop0kCTUfGNu8glBGVz4lowkwMcPVVA=="
,
"requires"
:
{
"debug"
:
"^4.1.1"
,
"get-symbol-from-current-process-h"
:
"^1.0.2"
,
"node-addon-api"
:
"^2.0.0"
,
"node-gyp-build"
:
"^4.2.1"
}
}
}
},
"get-symbol-from-current-process-h"
:
{
"version"
:
"1.0.2"
,
"resolved"
:
"https://registry.npmjs.org/get-symbol-from-current-process-h/-/get-symbol-from-current-process-h-1.0.2.tgz"
,
"integrity"
:
"sha512-syloC6fsCt62ELLrr1VKBM1ggOpMdetX9hTrdW77UQdcApPHLmf7CI7OKcN1c9kYuNxKcDe4iJ4FY9sX3aw2xw=="
},
"get-uv-event-loop-napi-h"
:
{
"version"
:
"1.0.6"
,
"resolved"
:
"https://registry.npmjs.org/get-uv-event-loop-napi-h/-/get-uv-event-loop-napi-h-1.0.6.tgz"
,
"integrity"
:
"sha512-t5c9VNR84nRoF+eLiz6wFrEp1SE2Acg0wS+Ysa2zF0eROes+LzOfuTaVHxGy8AbS8rq7FHEJzjnCZo1BupwdJg=="
,
"requires"
:
{
"get-symbol-from-current-process-h"
:
"^1.0.1"
}
},
"ms"
:
{
"version"
:
"2.1.2"
,
"resolved"
:
"https://registry.npmjs.org/ms/-/ms-2.1.2.tgz"
,
"integrity"
:
"sha512-sGkPx+VjMtmA6MX27oA4FBFELFCZZ4S4XqeGOXCv68tT+jb3vk/RyaKWP0PTKyWtmLSM0b+adUTEvbs1PEaH2w=="
},
"next-tick"
:
{
"version"
:
"1.0.0"
,
"resolved"
:
"https://registry.npmjs.org/next-tick/-/next-tick-1.0.0.tgz"
,
"integrity"
:
"sha1-yobR/ogoFpsBICCOPchCS524NCw="
},
"node-addon-api"
:
{
"version"
:
"2.0.2"
,
"resolved"
:
"https://registry.npmjs.org/node-addon-api/-/node-addon-api-2.0.2.tgz"
,
"integrity"
:
"sha512-Ntyt4AIXyaLIuMHF6IOoTakB3K+RWxwtsHNRxllEoA6vPwP9o4866g6YWDLUdnucilZhmkxiHwHr11gAENw+QA=="
},
"node-gyp-build"
:
{
"version"
:
"4.2.3"
,
"resolved"
:
"https://registry.npmjs.org/node-gyp-build/-/node-gyp-build-4.2.3.tgz"
,
"integrity"
:
"sha512-MN6ZpzmfNCRM+3t57PTJHgHyw/h4OWnZ6mR8P5j/uZtqQr46RRuDE/P+g3n0YR/AiYXeWixZZzaip77gdICfRg=="
},
"ref-array-napi"
:
{
"version"
:
"1.2.1"
,
"resolved"
:
"https://registry.npmjs.org/ref-array-napi/-/ref-array-napi-1.2.1.tgz"
,
"integrity"
:
"sha512-jQp2WWSucmxkqVfoNfm7yDlDeGu3liAbzqfwjNybL80ooLOCnCZpAK2woDInY+lxNOK/VlIVSqeDEYb4gVPuNQ=="
,
"requires"
:
{
"array-index"
:
"1"
,
"debug"
:
"2"
,
"ref-napi"
:
"^1.4.2"
},
"dependencies"
:
{
"debug"
:
{
"version"
:
"2.6.9"
,
"resolved"
:
"https://registry.npmjs.org/debug/-/debug-2.6.9.tgz"
,
"integrity"
:
"sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA=="
,
"requires"
:
{
"ms"
:
"2.0.0"
}
},
"ms"
:
{
"version"
:
"2.0.0"
,
"resolved"
:
"https://registry.npmjs.org/ms/-/ms-2.0.0.tgz"
,
"integrity"
:
"sha1-VgiurfwAvmwpAd9fmGF4jeDVl8g="
},
"ref-napi"
:
{
"version"
:
"1.5.2"
,
"resolved"
:
"https://registry.npmjs.org/ref-napi/-/ref-napi-1.5.2.tgz"
,
"integrity"
:
"sha512-hwyNmWpUkt1bDWDW4aiwCoC+SJfJO69UIdjqssNqdaS0sYJpgqzosGg/rLtk69UoQ8drZdI9yyQefM7eEMM3Gw=="
,
"requires"
:
{
"debug"
:
"^3.1.0"
,
"node-addon-api"
:
"^2.0.0"
,
"node-gyp-build"
:
"^4.2.1"
},
"dependencies"
:
{
"debug"
:
{
"version"
:
"3.2.7"
,
"resolved"
:
"https://registry.npmjs.org/debug/-/debug-3.2.7.tgz"
,
"integrity"
:
"sha512-CFjzYYAi4ThfiQvizrFQevTTXHtnCqWfe7x1AhgEscTz6ZbLbfoLRLPugTQyBth6f8ZERVUSyWHFD/7Wu4t1XQ=="
,
"requires"
:
{
"ms"
:
"^2.1.1"
}
},
"ms"
:
{
"version"
:
"2.1.3"
,
"resolved"
:
"https://registry.npmjs.org/ms/-/ms-2.1.3.tgz"
,
"integrity"
:
"sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA=="
}
}
}
}
},
"ref-napi"
:
{
"version"
:
"3.0.1"
,
"resolved"
:
"https://registry.npmjs.org/ref-napi/-/ref-napi-3.0.1.tgz"
,
"integrity"
:
"sha512-W3rcb0E+tlO9u9ySFnX5vifInwwPGToOfFgTZUHJBNiOBsW0NNvgHz2zJN7ctABo/2yIlgdPQUvuqqfORIF4LA=="
,
"requires"
:
{
"debug"
:
"^4.1.1"
,
"get-symbol-from-current-process-h"
:
"^1.0.2"
,
"node-addon-api"
:
"^2.0.0"
,
"node-gyp-build"
:
"^4.2.1"
}
},
"ref-struct-di"
:
{
"version"
:
"1.1.1"
,
"resolved"
:
"https://registry.npmjs.org/ref-struct-di/-/ref-struct-di-1.1.1.tgz"
,
"integrity"
:
"sha512-2Xyn/0Qgz89VT+++WP0sTosdm9oeowLP23wRJYhG4BFdMUrLj3jhwHZNEytYNYgtPKLNTP3KJX4HEgBvM1/Y2g=="
,
"requires"
:
{
"debug"
:
"^3.1.0"
},
"dependencies"
:
{
"debug"
:
{
"version"
:
"3.2.7"
,
"resolved"
:
"https://registry.npmjs.org/debug/-/debug-3.2.7.tgz"
,
"integrity"
:
"sha512-CFjzYYAi4ThfiQvizrFQevTTXHtnCqWfe7x1AhgEscTz6ZbLbfoLRLPugTQyBth6f8ZERVUSyWHFD/7Wu4t1XQ=="
,
"requires"
:
{
"ms"
:
"^2.1.1"
}
}
}
},
"ref-struct-napi"
:
{
"version"
:
"1.1.1"
,
"resolved"
:
"https://registry.npmjs.org/ref-struct-napi/-/ref-struct-napi-1.1.1.tgz"
,
"integrity"
:
"sha512-YgS5/d7+kT5zgtySYI5ieH0hREdv+DabgDvoczxsui0f9VLm0rrDcWEj4DHKehsH+tJnVMsLwuyctWgvdEcVRw=="
,
"requires"
:
{
"debug"
:
"2"
,
"ref-napi"
:
"^1.4.2"
},
"dependencies"
:
{
"debug"
:
{
"version"
:
"2.6.9"
,
"resolved"
:
"https://registry.npmjs.org/debug/-/debug-2.6.9.tgz"
,
"integrity"
:
"sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA=="
,
"requires"
:
{
"ms"
:
"2.0.0"
}
},
"ms"
:
{
"version"
:
"2.0.0"
,
"resolved"
:
"https://registry.npmjs.org/ms/-/ms-2.0.0.tgz"
,
"integrity"
:
"sha1-VgiurfwAvmwpAd9fmGF4jeDVl8g="
},
"ref-napi"
:
{
"version"
:
"1.5.2"
,
"resolved"
:
"https://registry.npmjs.org/ref-napi/-/ref-napi-1.5.2.tgz"
,
"integrity"
:
"sha512-hwyNmWpUkt1bDWDW4aiwCoC+SJfJO69UIdjqssNqdaS0sYJpgqzosGg/rLtk69UoQ8drZdI9yyQefM7eEMM3Gw=="
,
"requires"
:
{
"debug"
:
"^3.1.0"
,
"node-addon-api"
:
"^2.0.0"
,
"node-gyp-build"
:
"^4.2.1"
},
"dependencies"
:
{
"debug"
:
{
"version"
:
"3.2.7"
,
"resolved"
:
"https://registry.npmjs.org/debug/-/debug-3.2.7.tgz"
,
"integrity"
:
"sha512-CFjzYYAi4ThfiQvizrFQevTTXHtnCqWfe7x1AhgEscTz6ZbLbfoLRLPugTQyBth6f8ZERVUSyWHFD/7Wu4t1XQ=="
,
"requires"
:
{
"ms"
:
"^2.1.1"
}
},
"ms"
:
{
"version"
:
"2.1.3"
,
"resolved"
:
"https://registry.npmjs.org/ms/-/ms-2.1.3.tgz"
,
"integrity"
:
"sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA=="
}
}
}
}
},
"type"
:
{
"version"
:
"1.2.0"
,
"resolved"
:
"https://registry.npmjs.org/type/-/type-1.2.0.tgz"
,
"integrity"
:
"sha512-+5nt5AAniqsCnu2cEQQdpzCAh33kVx8n0VoFidKpB1dVVLAN/F+bgVOqOJqOnEnrhp222clB5p3vUlD+1QAnfg=="
}
}
}
src/connector/nodejs/package.json
浏览文件 @
663439bf
{
{
"name"
:
"td2.0-connector"
,
"name"
:
"td2.0-connector"
,
"version"
:
"2.0.
6
"
,
"version"
:
"2.0.
7
"
,
"description"
:
"A Node.js connector for TDengine."
,
"description"
:
"A Node.js connector for TDengine."
,
"main"
:
"tdengine.js"
,
"main"
:
"tdengine.js"
,
"directories"
:
{
"directories"
:
{
...
...
src/dnode/src/dnodeMain.c
浏览文件 @
663439bf
...
@@ -40,6 +40,7 @@
...
@@ -40,6 +40,7 @@
#include "dnodeShell.h"
#include "dnodeShell.h"
#include "dnodeTelemetry.h"
#include "dnodeTelemetry.h"
#include "module.h"
#include "module.h"
#include "mnode.h"
#if !defined(_MODULE) || !defined(_TD_LINUX)
#if !defined(_MODULE) || !defined(_TD_LINUX)
int32_t
moduleStart
()
{
return
0
;
}
int32_t
moduleStart
()
{
return
0
;
}
...
@@ -216,6 +217,17 @@ static int32_t dnodeInitStorage() {
...
@@ -216,6 +217,17 @@ static int32_t dnodeInitStorage() {
sprintf
(
tsDnodeDir
,
"%s/dnode"
,
tsDataDir
);
sprintf
(
tsDnodeDir
,
"%s/dnode"
,
tsDataDir
);
// sprintf(tsVnodeBakDir, "%s/vnode_bak", tsDataDir);
// sprintf(tsVnodeBakDir, "%s/vnode_bak", tsDataDir);
if
(
tsCompactMnodeWal
==
1
)
{
sprintf
(
tsMnodeTmpDir
,
"%s/mnode_tmp"
,
tsDataDir
);
tfsRmdir
(
tsMnodeTmpDir
);
if
(
dnodeCreateDir
(
tsMnodeTmpDir
)
<
0
)
{
dError
(
"failed to create dir: %s, reason: %s"
,
tsMnodeTmpDir
,
strerror
(
errno
));
return
-
1
;
}
sprintf
(
tsMnodeBakDir
,
"%s/mnode_bak"
,
tsDataDir
);
//tfsRmdir(tsMnodeBakDir);
}
//TODO(dengyihao): no need to init here
//TODO(dengyihao): no need to init here
if
(
dnodeCreateDir
(
tsMnodeDir
)
<
0
)
{
if
(
dnodeCreateDir
(
tsMnodeDir
)
<
0
)
{
dError
(
"failed to create dir: %s, reason: %s"
,
tsMnodeDir
,
strerror
(
errno
));
dError
(
"failed to create dir: %s, reason: %s"
,
tsMnodeDir
,
strerror
(
errno
));
...
...
src/dnode/src/dnodeSystem.c
浏览文件 @
663439bf
...
@@ -42,6 +42,8 @@ int32_t main(int32_t argc, char *argv[]) {
...
@@ -42,6 +42,8 @@ int32_t main(int32_t argc, char *argv[]) {
}
}
}
else
if
(
strcmp
(
argv
[
i
],
"-C"
)
==
0
)
{
}
else
if
(
strcmp
(
argv
[
i
],
"-C"
)
==
0
)
{
dump_config
=
1
;
dump_config
=
1
;
}
else
if
(
strcmp
(
argv
[
i
],
"--compact-mnode-wal"
)
==
0
)
{
tsCompactMnodeWal
=
1
;
}
else
if
(
strcmp
(
argv
[
i
],
"-V"
)
==
0
)
{
}
else
if
(
strcmp
(
argv
[
i
],
"-V"
)
==
0
)
{
#ifdef _ACCT
#ifdef _ACCT
char
*
versionStr
=
"enterprise"
;
char
*
versionStr
=
"enterprise"
;
...
...
src/inc/mnode.h
浏览文件 @
663439bf
...
@@ -73,6 +73,9 @@ int32_t mnodeProcessPeerReq(SMnodeMsg *pMsg);
...
@@ -73,6 +73,9 @@ int32_t mnodeProcessPeerReq(SMnodeMsg *pMsg);
void
mnodeProcessPeerRsp
(
SRpcMsg
*
pMsg
);
void
mnodeProcessPeerRsp
(
SRpcMsg
*
pMsg
);
int32_t
mnodeRetriveAuth
(
char
*
user
,
char
*
spi
,
char
*
encrypt
,
char
*
secret
,
char
*
ckey
);
int32_t
mnodeRetriveAuth
(
char
*
user
,
char
*
spi
,
char
*
encrypt
,
char
*
secret
,
char
*
ckey
);
int32_t
mnodeCompactWal
();
int32_t
mnodeCompactComponents
();
#ifdef __cplusplus
#ifdef __cplusplus
}
}
#endif
#endif
...
...
src/inc/monitor.h
浏览文件 @
663439bf
...
@@ -54,7 +54,8 @@ void monCleanupSystem();
...
@@ -54,7 +54,8 @@ void monCleanupSystem();
void
monSaveAcctLog
(
SAcctMonitorObj
*
pMonObj
);
void
monSaveAcctLog
(
SAcctMonitorObj
*
pMonObj
);
void
monSaveLog
(
int32_t
level
,
const
char
*
const
format
,
...);
void
monSaveLog
(
int32_t
level
,
const
char
*
const
format
,
...);
void
monExecuteSQL
(
char
*
sql
);
void
monExecuteSQL
(
char
*
sql
);
typedef
void
(
*
MonExecuteSQLCbFP
)(
void
*
param
,
TAOS_RES
*
,
int
code
);
void
monExecuteSQLWithResultCallback
(
char
*
sql
,
MonExecuteSQLCbFP
callback
,
void
*
param
);
#ifdef __cplusplus
#ifdef __cplusplus
}
}
#endif
#endif
...
...
src/inc/taoserror.h
浏览文件 @
663439bf
...
@@ -427,6 +427,9 @@ int32_t* taosGetErrno();
...
@@ -427,6 +427,9 @@ int32_t* taosGetErrno();
#define TSDB_CODE_FS_INVLD_LEVEL TAOS_DEF_ERROR_CODE(0, 0x2207) //"tfs invalid level")
#define TSDB_CODE_FS_INVLD_LEVEL TAOS_DEF_ERROR_CODE(0, 0x2207) //"tfs invalid level")
#define TSDB_CODE_FS_NO_VALID_DISK TAOS_DEF_ERROR_CODE(0, 0x2208) //"tfs no valid disk")
#define TSDB_CODE_FS_NO_VALID_DISK TAOS_DEF_ERROR_CODE(0, 0x2208) //"tfs no valid disk")
// monitor
#define TSDB_CODE_MON_CONNECTION_INVALID TAOS_DEF_ERROR_CODE(0, 0x2300) //"monitor invalid monitor db connection")
#ifdef __cplusplus
#ifdef __cplusplus
}
}
#endif
#endif
...
...
src/kit/taosdemo/subscribe.json
浏览文件 @
663439bf
{
{
"filetype"
:
"subscribe"
,
"filetype"
:
"subscribe"
,
"cfgdir"
:
"/etc/taos"
,
"cfgdir"
:
"/etc/taos"
,
"host"
:
"127.0.0.1"
,
"host"
:
"127.0.0.1"
,
"port"
:
6030
,
"port"
:
6030
,
...
...
src/kit/taosdemo/taosdemo.c
浏览文件 @
663439bf
...
@@ -61,6 +61,8 @@ extern char configDir[];
...
@@ -61,6 +61,8 @@ extern char configDir[];
#define QUERY_JSON_NAME "query.json"
#define QUERY_JSON_NAME "query.json"
#define SUBSCRIBE_JSON_NAME "subscribe.json"
#define SUBSCRIBE_JSON_NAME "subscribe.json"
#define STR_INSERT_INTO "INSERT INTO "
enum
TEST_MODE
{
enum
TEST_MODE
{
INSERT_TEST
,
// 0
INSERT_TEST
,
// 0
QUERY_TEST
,
// 1
QUERY_TEST
,
// 1
...
@@ -70,6 +72,8 @@ enum TEST_MODE {
...
@@ -70,6 +72,8 @@ enum TEST_MODE {
#define MAX_RECORDS_PER_REQ 32766
#define MAX_RECORDS_PER_REQ 32766
#define HEAD_BUFF_LEN 1024*24 // 16*1024 + (192+32)*2 + insert into ..
#define MAX_SQL_SIZE 65536
#define MAX_SQL_SIZE 65536
#define BUFFER_SIZE (65536*2)
#define BUFFER_SIZE (65536*2)
#define COND_BUF_LEN BUFFER_SIZE - 30
#define COND_BUF_LEN BUFFER_SIZE - 30
...
@@ -196,6 +200,8 @@ typedef struct {
...
@@ -196,6 +200,8 @@ typedef struct {
}
SColDes
;
}
SColDes
;
/* Used by main to communicate with parse_opt. */
/* Used by main to communicate with parse_opt. */
static
char
*
g_dupstr
=
NULL
;
typedef
struct
SArguments_S
{
typedef
struct
SArguments_S
{
char
*
metaFile
;
char
*
metaFile
;
uint32_t
test_mode
;
uint32_t
test_mode
;
...
@@ -241,7 +247,7 @@ typedef struct SColumn_S {
...
@@ -241,7 +247,7 @@ typedef struct SColumn_S {
char
field
[
TSDB_COL_NAME_LEN
+
1
];
char
field
[
TSDB_COL_NAME_LEN
+
1
];
char
dataType
[
MAX_TB_NAME_SIZE
];
char
dataType
[
MAX_TB_NAME_SIZE
];
uint32_t
dataLen
;
uint32_t
dataLen
;
char
note
[
128
];
char
note
[
128
];
}
StrColumn
;
}
StrColumn
;
typedef
struct
SSuperTable_S
{
typedef
struct
SSuperTable_S
{
...
@@ -374,7 +380,7 @@ typedef struct SpecifiedQueryInfo_S {
...
@@ -374,7 +380,7 @@ typedef struct SpecifiedQueryInfo_S {
uint32_t
asyncMode
;
// 0: sync, 1: async
uint32_t
asyncMode
;
// 0: sync, 1: async
uint64_t
subscribeInterval
;
// ms
uint64_t
subscribeInterval
;
// ms
uint64_t
queryTimes
;
uint64_t
queryTimes
;
int
subscribeRestart
;
bool
subscribeRestart
;
int
subscribeKeepProgress
;
int
subscribeKeepProgress
;
char
sql
[
MAX_QUERY_SQL_COUNT
][
MAX_QUERY_SQL_LENGTH
+
1
];
char
sql
[
MAX_QUERY_SQL_COUNT
][
MAX_QUERY_SQL_LENGTH
+
1
];
char
result
[
MAX_QUERY_SQL_COUNT
][
MAX_FILE_NAME_LEN
+
1
];
char
result
[
MAX_QUERY_SQL_COUNT
][
MAX_FILE_NAME_LEN
+
1
];
...
@@ -389,7 +395,7 @@ typedef struct SuperQueryInfo_S {
...
@@ -389,7 +395,7 @@ typedef struct SuperQueryInfo_S {
uint32_t
threadCnt
;
uint32_t
threadCnt
;
uint32_t
asyncMode
;
// 0: sync, 1: async
uint32_t
asyncMode
;
// 0: sync, 1: async
uint64_t
subscribeInterval
;
// ms
uint64_t
subscribeInterval
;
// ms
int
subscribeRestart
;
bool
subscribeRestart
;
int
subscribeKeepProgress
;
int
subscribeKeepProgress
;
uint64_t
queryTimes
;
uint64_t
queryTimes
;
int64_t
childTblCount
;
int64_t
childTblCount
;
...
@@ -535,11 +541,12 @@ static int taosRandom()
...
@@ -535,11 +541,12 @@ static int taosRandom()
#endif // ifdef Windows
#endif // ifdef Windows
static
void
prompt
();
static
int
createDatabasesAndStables
();
static
int
createDatabasesAndStables
();
static
void
createChildTables
();
static
void
createChildTables
();
static
int
queryDbExec
(
TAOS
*
taos
,
char
*
command
,
QUERY_TYPE
type
,
bool
quiet
);
static
int
queryDbExec
(
TAOS
*
taos
,
char
*
command
,
QUERY_TYPE
type
,
bool
quiet
);
static
int
postProceSql
(
char
*
host
,
struct
sockaddr_in
*
pServAddr
,
uint16_t
port
,
static
int
postProceSql
(
char
*
host
,
struct
sockaddr_in
*
pServAddr
,
char
*
sqlstr
,
char
*
resultFile
);
uint16_t
port
,
char
*
sqlstr
,
char
*
resultFile
);
/* ************ Global variables ************ */
/* ************ Global variables ************ */
...
@@ -690,8 +697,9 @@ static void printHelp() {
...
@@ -690,8 +697,9 @@ static void printHelp() {
"The data_type of columns, default: INT,INT,INT,INT."
);
"The data_type of columns, default: INT,INT,INT,INT."
);
printf
(
"%s%s%s%s
\n
"
,
indent
,
"-w"
,
indent
,
printf
(
"%s%s%s%s
\n
"
,
indent
,
"-w"
,
indent
,
"The length of data_type 'BINARY' or 'NCHAR'. Default is 16"
);
"The length of data_type 'BINARY' or 'NCHAR'. Default is 16"
);
printf
(
"%s%s%s%s
\n
"
,
indent
,
"-l"
,
indent
,
printf
(
"%s%s%s%s%d
\n
"
,
indent
,
"-l"
,
indent
,
"The number of columns per record. Default is 4."
);
"The number of columns per record. Default is 4. Max values is "
,
MAX_NUM_DATATYPE
);
printf
(
"%s%s%s%s
\n
"
,
indent
,
"-T"
,
indent
,
printf
(
"%s%s%s%s
\n
"
,
indent
,
"-T"
,
indent
,
"The number of threads. Default is 10."
);
"The number of threads. Default is 10."
);
printf
(
"%s%s%s%s
\n
"
,
indent
,
"-i"
,
indent
,
printf
(
"%s%s%s%s
\n
"
,
indent
,
"-i"
,
indent
,
...
@@ -735,7 +743,6 @@ static bool isStringNumber(char *input)
...
@@ -735,7 +743,6 @@ static bool isStringNumber(char *input)
}
}
static
void
parse_args
(
int
argc
,
char
*
argv
[],
SArguments
*
arguments
)
{
static
void
parse_args
(
int
argc
,
char
*
argv
[],
SArguments
*
arguments
)
{
char
**
sptr
;
for
(
int
i
=
1
;
i
<
argc
;
i
++
)
{
for
(
int
i
=
1
;
i
<
argc
;
i
++
)
{
if
(
strcmp
(
argv
[
i
],
"-f"
)
==
0
)
{
if
(
strcmp
(
argv
[
i
],
"-f"
)
==
0
)
{
...
@@ -879,20 +886,31 @@ static void parse_args(int argc, char *argv[], SArguments *arguments) {
...
@@ -879,20 +886,31 @@ static void parse_args(int argc, char *argv[], SArguments *arguments) {
}
}
arguments
->
database
=
argv
[
++
i
];
arguments
->
database
=
argv
[
++
i
];
}
else
if
(
strcmp
(
argv
[
i
],
"-l"
)
==
0
)
{
}
else
if
(
strcmp
(
argv
[
i
],
"-l"
)
==
0
)
{
if
((
argc
==
i
+
1
)
||
if
(
argc
==
i
+
1
)
{
(
!
isStringNumber
(
argv
[
i
+
1
])))
{
if
(
!
isStringNumber
(
argv
[
i
+
1
]))
{
printHelp
();
printHelp
();
errorPrint
(
"%s"
,
"
\n\t
-l need a number following!
\n
"
);
errorPrint
(
"%s"
,
"
\n\t
-l need a number following!
\n
"
);
exit
(
EXIT_FAILURE
);
exit
(
EXIT_FAILURE
);
}
}
}
arguments
->
num_of_CPR
=
atoi
(
argv
[
++
i
]);
arguments
->
num_of_CPR
=
atoi
(
argv
[
++
i
]);
if
(
arguments
->
num_of_CPR
>
MAX_NUM_DATATYPE
)
{
printf
(
"WARNING: max acceptible columns count is %d
\n
"
,
MAX_NUM_DATATYPE
);
prompt
();
arguments
->
num_of_CPR
=
MAX_NUM_DATATYPE
;
}
for
(
int
col
=
arguments
->
num_of_CPR
;
col
<
MAX_NUM_DATATYPE
;
col
++
)
{
arguments
->
datatype
[
col
]
=
NULL
;
}
}
else
if
(
strcmp
(
argv
[
i
],
"-b"
)
==
0
)
{
}
else
if
(
strcmp
(
argv
[
i
],
"-b"
)
==
0
)
{
if
(
argc
==
i
+
1
)
{
if
(
argc
==
i
+
1
)
{
printHelp
();
printHelp
();
errorPrint
(
"%s"
,
"
\n\t
-b need valid string following!
\n
"
);
errorPrint
(
"%s"
,
"
\n\t
-b need valid string following!
\n
"
);
exit
(
EXIT_FAILURE
);
exit
(
EXIT_FAILURE
);
}
}
sptr
=
arguments
->
datatype
;
++
i
;
++
i
;
if
(
strstr
(
argv
[
i
],
","
)
==
NULL
)
{
if
(
strstr
(
argv
[
i
],
","
)
==
NULL
)
{
// only one col
// only one col
...
@@ -909,12 +927,12 @@ static void parse_args(int argc, char *argv[], SArguments *arguments) {
...
@@ -909,12 +927,12 @@ static void parse_args(int argc, char *argv[], SArguments *arguments) {
errorPrint
(
"%s"
,
"-b: Invalid data_type!
\n
"
);
errorPrint
(
"%s"
,
"-b: Invalid data_type!
\n
"
);
exit
(
EXIT_FAILURE
);
exit
(
EXIT_FAILURE
);
}
}
sptr
[
0
]
=
argv
[
i
];
arguments
->
datatype
[
0
]
=
argv
[
i
];
}
else
{
}
else
{
// more than one col
// more than one col
int
index
=
0
;
int
index
=
0
;
char
*
dupstr
=
strdup
(
argv
[
i
]);
g_
dupstr
=
strdup
(
argv
[
i
]);
char
*
running
=
dupstr
;
char
*
running
=
g_
dupstr
;
char
*
token
=
strsep
(
&
running
,
","
);
char
*
token
=
strsep
(
&
running
,
","
);
while
(
token
!=
NULL
)
{
while
(
token
!=
NULL
)
{
if
(
strcasecmp
(
token
,
"INT"
)
if
(
strcasecmp
(
token
,
"INT"
)
...
@@ -927,16 +945,15 @@ static void parse_args(int argc, char *argv[], SArguments *arguments) {
...
@@ -927,16 +945,15 @@ static void parse_args(int argc, char *argv[], SArguments *arguments) {
&&
strcasecmp
(
token
,
"BINARY"
)
&&
strcasecmp
(
token
,
"BINARY"
)
&&
strcasecmp
(
token
,
"NCHAR"
))
{
&&
strcasecmp
(
token
,
"NCHAR"
))
{
printHelp
();
printHelp
();
free
(
dupstr
);
free
(
g_
dupstr
);
errorPrint
(
"%s"
,
"-b: Invalid data_type!
\n
"
);
errorPrint
(
"%s"
,
"-b: Invalid data_type!
\n
"
);
exit
(
EXIT_FAILURE
);
exit
(
EXIT_FAILURE
);
}
}
sptr
[
index
++
]
=
token
;
arguments
->
datatype
[
index
++
]
=
token
;
token
=
strsep
(
&
running
,
","
);
token
=
strsep
(
&
running
,
","
);
if
(
index
>=
MAX_NUM_DATATYPE
)
break
;
if
(
index
>=
MAX_NUM_DATATYPE
)
break
;
}
}
free
(
dupstr
);
arguments
->
datatype
[
index
]
=
NULL
;
sptr
[
index
]
=
NULL
;
}
}
}
else
if
(
strcmp
(
argv
[
i
],
"-w"
)
==
0
)
{
}
else
if
(
strcmp
(
argv
[
i
],
"-w"
)
==
0
)
{
if
((
argc
==
i
+
1
)
||
if
((
argc
==
i
+
1
)
||
...
@@ -1071,16 +1088,12 @@ static void parse_args(int argc, char *argv[], SArguments *arguments) {
...
@@ -1071,16 +1088,12 @@ static void parse_args(int argc, char *argv[], SArguments *arguments) {
printf
(
"# Print debug info: %d
\n
"
,
arguments
->
debug_print
);
printf
(
"# Print debug info: %d
\n
"
,
arguments
->
debug_print
);
printf
(
"# Print verbose info: %d
\n
"
,
arguments
->
verbose_print
);
printf
(
"# Print verbose info: %d
\n
"
,
arguments
->
verbose_print
);
printf
(
"###################################################################
\n
"
);
printf
(
"###################################################################
\n
"
);
if
(
!
arguments
->
answer_yes
)
{
printf
(
"Press enter key to continue
\n\n
"
);
prompt
();
(
void
)
getchar
();
}
}
}
}
}
static
bool
getInfoFromJsonFile
(
char
*
file
);
static
bool
getInfoFromJsonFile
(
char
*
file
);
//static int generateOneRowDataForStb(SSuperTable* stbInfo);
//static int getDataIntoMemForStb(SSuperTable* stbInfo);
static
void
init_rand_data
();
static
void
init_rand_data
();
static
void
tmfclose
(
FILE
*
fp
)
{
static
void
tmfclose
(
FILE
*
fp
)
{
if
(
NULL
!=
fp
)
{
if
(
NULL
!=
fp
)
{
...
@@ -1180,7 +1193,8 @@ static void appendResultToFile(TAOS_RES *res, char* resultFile) {
...
@@ -1180,7 +1193,8 @@ static void appendResultToFile(TAOS_RES *res, char* resultFile) {
totalLen
+=
len
;
totalLen
+=
len
;
}
}
verbosePrint
(
"%s() LN%d, databuf=%s resultFile=%s
\n
"
,
__func__
,
__LINE__
,
databuf
,
resultFile
);
verbosePrint
(
"%s() LN%d, databuf=%s resultFile=%s
\n
"
,
__func__
,
__LINE__
,
databuf
,
resultFile
);
appendResultBufToFile
(
databuf
,
resultFile
);
appendResultBufToFile
(
databuf
,
resultFile
);
free
(
databuf
);
free
(
databuf
);
}
}
...
@@ -1333,13 +1347,15 @@ static int printfInsertMeta() {
...
@@ -1333,13 +1347,15 @@ static int printfInsertMeta() {
printf
(
"interface:
\033
[33m%s
\033
[0m
\n
"
,
printf
(
"interface:
\033
[33m%s
\033
[0m
\n
"
,
(
g_args
.
iface
==
TAOSC_IFACE
)
?
"taosc"
:
(
g_args
.
iface
==
REST_IFACE
)
?
"rest"
:
"stmt"
);
(
g_args
.
iface
==
TAOSC_IFACE
)
?
"taosc"
:
(
g_args
.
iface
==
REST_IFACE
)
?
"rest"
:
"stmt"
);
printf
(
"host:
\033
[33m%s:%u
\033
[0m
\n
"
,
g_Dbs
.
host
,
g_Dbs
.
port
);
printf
(
"host:
\033
[33m%s:%u
\033
[0m
\n
"
,
g_Dbs
.
host
,
g_Dbs
.
port
);
printf
(
"user:
\033
[33m%s
\033
[0m
\n
"
,
g_Dbs
.
user
);
printf
(
"user:
\033
[33m%s
\033
[0m
\n
"
,
g_Dbs
.
user
);
printf
(
"password:
\033
[33m%s
\033
[0m
\n
"
,
g_Dbs
.
password
);
printf
(
"password:
\033
[33m%s
\033
[0m
\n
"
,
g_Dbs
.
password
);
printf
(
"configDir:
\033
[33m%s
\033
[0m
\n
"
,
configDir
);
printf
(
"configDir:
\033
[33m%s
\033
[0m
\n
"
,
configDir
);
printf
(
"resultFile:
\033
[33m%s
\033
[0m
\n
"
,
g_Dbs
.
resultFile
);
printf
(
"resultFile:
\033
[33m%s
\033
[0m
\n
"
,
g_Dbs
.
resultFile
);
printf
(
"thread num of insert data:
\033
[33m%d
\033
[0m
\n
"
,
g_Dbs
.
threadCount
);
printf
(
"thread num of insert data:
\033
[33m%d
\033
[0m
\n
"
,
g_Dbs
.
threadCount
);
printf
(
"thread num of create table:
\033
[33m%d
\033
[0m
\n
"
,
g_Dbs
.
threadCountByCreateTbl
);
printf
(
"thread num of create table:
\033
[33m%d
\033
[0m
\n
"
,
g_Dbs
.
threadCountByCreateTbl
);
printf
(
"top insert interval:
\033
[33m%"
PRIu64
"
\033
[0m
\n
"
,
printf
(
"top insert interval:
\033
[33m%"
PRIu64
"
\033
[0m
\n
"
,
g_args
.
insert_interval
);
g_args
.
insert_interval
);
printf
(
"number of records per req:
\033
[33m%"
PRIu64
"
\033
[0m
\n
"
,
printf
(
"number of records per req:
\033
[33m%"
PRIu64
"
\033
[0m
\n
"
,
...
@@ -1351,7 +1367,8 @@ static int printfInsertMeta() {
...
@@ -1351,7 +1367,8 @@ static int printfInsertMeta() {
for
(
int
i
=
0
;
i
<
g_Dbs
.
dbCount
;
i
++
)
{
for
(
int
i
=
0
;
i
<
g_Dbs
.
dbCount
;
i
++
)
{
printf
(
"database[
\033
[33m%d
\033
[0m]:
\n
"
,
i
);
printf
(
"database[
\033
[33m%d
\033
[0m]:
\n
"
,
i
);
printf
(
" database[%d] name:
\033
[33m%s
\033
[0m
\n
"
,
i
,
g_Dbs
.
db
[
i
].
dbName
);
printf
(
" database[%d] name:
\033
[33m%s
\033
[0m
\n
"
,
i
,
g_Dbs
.
db
[
i
].
dbName
);
if
(
0
==
g_Dbs
.
db
[
i
].
drop
)
{
if
(
0
==
g_Dbs
.
db
[
i
].
drop
)
{
printf
(
" drop:
\033
[33mno
\033
[0m
\n
"
);
printf
(
" drop:
\033
[33mno
\033
[0m
\n
"
);
}
else
{
}
else
{
...
@@ -1359,40 +1376,51 @@ static int printfInsertMeta() {
...
@@ -1359,40 +1376,51 @@ static int printfInsertMeta() {
}
}
if
(
g_Dbs
.
db
[
i
].
dbCfg
.
blocks
>
0
)
{
if
(
g_Dbs
.
db
[
i
].
dbCfg
.
blocks
>
0
)
{
printf
(
" blocks:
\033
[33m%d
\033
[0m
\n
"
,
g_Dbs
.
db
[
i
].
dbCfg
.
blocks
);
printf
(
" blocks:
\033
[33m%d
\033
[0m
\n
"
,
g_Dbs
.
db
[
i
].
dbCfg
.
blocks
);
}
}
if
(
g_Dbs
.
db
[
i
].
dbCfg
.
cache
>
0
)
{
if
(
g_Dbs
.
db
[
i
].
dbCfg
.
cache
>
0
)
{
printf
(
" cache:
\033
[33m%d
\033
[0m
\n
"
,
g_Dbs
.
db
[
i
].
dbCfg
.
cache
);
printf
(
" cache:
\033
[33m%d
\033
[0m
\n
"
,
g_Dbs
.
db
[
i
].
dbCfg
.
cache
);
}
}
if
(
g_Dbs
.
db
[
i
].
dbCfg
.
days
>
0
)
{
if
(
g_Dbs
.
db
[
i
].
dbCfg
.
days
>
0
)
{
printf
(
" days:
\033
[33m%d
\033
[0m
\n
"
,
g_Dbs
.
db
[
i
].
dbCfg
.
days
);
printf
(
" days:
\033
[33m%d
\033
[0m
\n
"
,
g_Dbs
.
db
[
i
].
dbCfg
.
days
);
}
}
if
(
g_Dbs
.
db
[
i
].
dbCfg
.
keep
>
0
)
{
if
(
g_Dbs
.
db
[
i
].
dbCfg
.
keep
>
0
)
{
printf
(
" keep:
\033
[33m%d
\033
[0m
\n
"
,
g_Dbs
.
db
[
i
].
dbCfg
.
keep
);
printf
(
" keep:
\033
[33m%d
\033
[0m
\n
"
,
g_Dbs
.
db
[
i
].
dbCfg
.
keep
);
}
}
if
(
g_Dbs
.
db
[
i
].
dbCfg
.
replica
>
0
)
{
if
(
g_Dbs
.
db
[
i
].
dbCfg
.
replica
>
0
)
{
printf
(
" replica:
\033
[33m%d
\033
[0m
\n
"
,
g_Dbs
.
db
[
i
].
dbCfg
.
replica
);
printf
(
" replica:
\033
[33m%d
\033
[0m
\n
"
,
g_Dbs
.
db
[
i
].
dbCfg
.
replica
);
}
}
if
(
g_Dbs
.
db
[
i
].
dbCfg
.
update
>
0
)
{
if
(
g_Dbs
.
db
[
i
].
dbCfg
.
update
>
0
)
{
printf
(
" update:
\033
[33m%d
\033
[0m
\n
"
,
g_Dbs
.
db
[
i
].
dbCfg
.
update
);
printf
(
" update:
\033
[33m%d
\033
[0m
\n
"
,
g_Dbs
.
db
[
i
].
dbCfg
.
update
);
}
}
if
(
g_Dbs
.
db
[
i
].
dbCfg
.
minRows
>
0
)
{
if
(
g_Dbs
.
db
[
i
].
dbCfg
.
minRows
>
0
)
{
printf
(
" minRows:
\033
[33m%d
\033
[0m
\n
"
,
g_Dbs
.
db
[
i
].
dbCfg
.
minRows
);
printf
(
" minRows:
\033
[33m%d
\033
[0m
\n
"
,
g_Dbs
.
db
[
i
].
dbCfg
.
minRows
);
}
}
if
(
g_Dbs
.
db
[
i
].
dbCfg
.
maxRows
>
0
)
{
if
(
g_Dbs
.
db
[
i
].
dbCfg
.
maxRows
>
0
)
{
printf
(
" maxRows:
\033
[33m%d
\033
[0m
\n
"
,
g_Dbs
.
db
[
i
].
dbCfg
.
maxRows
);
printf
(
" maxRows:
\033
[33m%d
\033
[0m
\n
"
,
g_Dbs
.
db
[
i
].
dbCfg
.
maxRows
);
}
}
if
(
g_Dbs
.
db
[
i
].
dbCfg
.
comp
>
0
)
{
if
(
g_Dbs
.
db
[
i
].
dbCfg
.
comp
>
0
)
{
printf
(
" comp:
\033
[33m%d
\033
[0m
\n
"
,
g_Dbs
.
db
[
i
].
dbCfg
.
comp
);
printf
(
" comp:
\033
[33m%d
\033
[0m
\n
"
,
g_Dbs
.
db
[
i
].
dbCfg
.
comp
);
}
}
if
(
g_Dbs
.
db
[
i
].
dbCfg
.
walLevel
>
0
)
{
if
(
g_Dbs
.
db
[
i
].
dbCfg
.
walLevel
>
0
)
{
printf
(
" walLevel:
\033
[33m%d
\033
[0m
\n
"
,
g_Dbs
.
db
[
i
].
dbCfg
.
walLevel
);
printf
(
" walLevel:
\033
[33m%d
\033
[0m
\n
"
,
g_Dbs
.
db
[
i
].
dbCfg
.
walLevel
);
}
}
if
(
g_Dbs
.
db
[
i
].
dbCfg
.
fsync
>
0
)
{
if
(
g_Dbs
.
db
[
i
].
dbCfg
.
fsync
>
0
)
{
printf
(
" fsync:
\033
[33m%d
\033
[0m
\n
"
,
g_Dbs
.
db
[
i
].
dbCfg
.
fsync
);
printf
(
" fsync:
\033
[33m%d
\033
[0m
\n
"
,
g_Dbs
.
db
[
i
].
dbCfg
.
fsync
);
}
}
if
(
g_Dbs
.
db
[
i
].
dbCfg
.
quorum
>
0
)
{
if
(
g_Dbs
.
db
[
i
].
dbCfg
.
quorum
>
0
)
{
printf
(
" quorum:
\033
[33m%d
\033
[0m
\n
"
,
g_Dbs
.
db
[
i
].
dbCfg
.
quorum
);
printf
(
" quorum:
\033
[33m%d
\033
[0m
\n
"
,
g_Dbs
.
db
[
i
].
dbCfg
.
quorum
);
}
}
if
(
g_Dbs
.
db
[
i
].
dbCfg
.
precision
[
0
]
!=
0
)
{
if
(
g_Dbs
.
db
[
i
].
dbCfg
.
precision
[
0
]
!=
0
)
{
if
((
0
==
strncasecmp
(
g_Dbs
.
db
[
i
].
dbCfg
.
precision
,
"ms"
,
2
))
if
((
0
==
strncasecmp
(
g_Dbs
.
db
[
i
].
dbCfg
.
precision
,
"ms"
,
2
))
...
@@ -1586,21 +1614,26 @@ static void printfInsertMetaToFile(FILE* fp) {
...
@@ -1586,21 +1614,26 @@ static void printfInsertMetaToFile(FILE* fp) {
if
(
g_Dbs
.
db
[
i
].
dbCfg
.
precision
[
0
]
!=
0
)
{
if
(
g_Dbs
.
db
[
i
].
dbCfg
.
precision
[
0
]
!=
0
)
{
if
((
0
==
strncasecmp
(
g_Dbs
.
db
[
i
].
dbCfg
.
precision
,
"ms"
,
2
))
if
((
0
==
strncasecmp
(
g_Dbs
.
db
[
i
].
dbCfg
.
precision
,
"ms"
,
2
))
||
(
0
==
strncasecmp
(
g_Dbs
.
db
[
i
].
dbCfg
.
precision
,
"us"
,
2
)))
{
||
(
0
==
strncasecmp
(
g_Dbs
.
db
[
i
].
dbCfg
.
precision
,
"us"
,
2
)))
{
fprintf
(
fp
,
" precision: %s
\n
"
,
g_Dbs
.
db
[
i
].
dbCfg
.
precision
);
fprintf
(
fp
,
" precision: %s
\n
"
,
g_Dbs
.
db
[
i
].
dbCfg
.
precision
);
}
else
{
}
else
{
fprintf
(
fp
,
" precision error: %s
\n
"
,
g_Dbs
.
db
[
i
].
dbCfg
.
precision
);
fprintf
(
fp
,
" precision error: %s
\n
"
,
g_Dbs
.
db
[
i
].
dbCfg
.
precision
);
}
}
}
}
fprintf
(
fp
,
" super table count: %"
PRIu64
"
\n
"
,
g_Dbs
.
db
[
i
].
superTblCount
);
fprintf
(
fp
,
" super table count: %"
PRIu64
"
\n
"
,
for
(
uint64_t
j
=
0
;
j
<
g_Dbs
.
db
[
i
].
superTblCount
;
j
++
)
{
g_Dbs
.
db
[
i
].
superTblCount
);
fprintf
(
fp
,
" super table[%"
PRIu64
"]:
\n
"
,
j
);
for
(
int
j
=
0
;
j
<
g_Dbs
.
db
[
i
].
superTblCount
;
j
++
)
{
fprintf
(
fp
,
" super table[%d]:
\n
"
,
j
);
fprintf
(
fp
,
" stbName: %s
\n
"
,
g_Dbs
.
db
[
i
].
superTbls
[
j
].
sTblName
);
fprintf
(
fp
,
" stbName: %s
\n
"
,
g_Dbs
.
db
[
i
].
superTbls
[
j
].
sTblName
);
if
(
PRE_CREATE_SUBTBL
==
g_Dbs
.
db
[
i
].
superTbls
[
j
].
autoCreateTable
)
{
if
(
PRE_CREATE_SUBTBL
==
g_Dbs
.
db
[
i
].
superTbls
[
j
].
autoCreateTable
)
{
fprintf
(
fp
,
" autoCreateTable: %s
\n
"
,
"no"
);
fprintf
(
fp
,
" autoCreateTable: %s
\n
"
,
"no"
);
}
else
if
(
AUTO_CREATE_SUBTBL
==
g_Dbs
.
db
[
i
].
superTbls
[
j
].
autoCreateTable
)
{
}
else
if
(
AUTO_CREATE_SUBTBL
==
g_Dbs
.
db
[
i
].
superTbls
[
j
].
autoCreateTable
)
{
fprintf
(
fp
,
" autoCreateTable: %s
\n
"
,
"yes"
);
fprintf
(
fp
,
" autoCreateTable: %s
\n
"
,
"yes"
);
}
else
{
}
else
{
fprintf
(
fp
,
" autoCreateTable: %s
\n
"
,
"error"
);
fprintf
(
fp
,
" autoCreateTable: %s
\n
"
,
"error"
);
...
@@ -1608,7 +1641,8 @@ static void printfInsertMetaToFile(FILE* fp) {
...
@@ -1608,7 +1641,8 @@ static void printfInsertMetaToFile(FILE* fp) {
if
(
TBL_NO_EXISTS
==
g_Dbs
.
db
[
i
].
superTbls
[
j
].
childTblExists
)
{
if
(
TBL_NO_EXISTS
==
g_Dbs
.
db
[
i
].
superTbls
[
j
].
childTblExists
)
{
fprintf
(
fp
,
" childTblExists: %s
\n
"
,
"no"
);
fprintf
(
fp
,
" childTblExists: %s
\n
"
,
"no"
);
}
else
if
(
TBL_ALREADY_EXISTS
==
g_Dbs
.
db
[
i
].
superTbls
[
j
].
childTblExists
)
{
}
else
if
(
TBL_ALREADY_EXISTS
==
g_Dbs
.
db
[
i
].
superTbls
[
j
].
childTblExists
)
{
fprintf
(
fp
,
" childTblExists: %s
\n
"
,
"yes"
);
fprintf
(
fp
,
" childTblExists: %s
\n
"
,
"yes"
);
}
else
{
}
else
{
fprintf
(
fp
,
" childTblExists: %s
\n
"
,
"error"
);
fprintf
(
fp
,
" childTblExists: %s
\n
"
,
"error"
);
...
@@ -1640,8 +1674,10 @@ static void printfInsertMetaToFile(FILE* fp) {
...
@@ -1640,8 +1674,10 @@ static void printfInsertMetaToFile(FILE* fp) {
*/
*/
fprintf
(
fp
,
" interlaceRows: %"
PRIu64
"
\n
"
,
fprintf
(
fp
,
" interlaceRows: %"
PRIu64
"
\n
"
,
g_Dbs
.
db
[
i
].
superTbls
[
j
].
interlaceRows
);
g_Dbs
.
db
[
i
].
superTbls
[
j
].
interlaceRows
);
fprintf
(
fp
,
" disorderRange: %d
\n
"
,
g_Dbs
.
db
[
i
].
superTbls
[
j
].
disorderRange
);
fprintf
(
fp
,
" disorderRange: %d
\n
"
,
fprintf
(
fp
,
" disorderRatio: %d
\n
"
,
g_Dbs
.
db
[
i
].
superTbls
[
j
].
disorderRatio
);
g_Dbs
.
db
[
i
].
superTbls
[
j
].
disorderRange
);
fprintf
(
fp
,
" disorderRatio: %d
\n
"
,
g_Dbs
.
db
[
i
].
superTbls
[
j
].
disorderRatio
);
fprintf
(
fp
,
" maxSqlLen: %"
PRIu64
"
\n
"
,
fprintf
(
fp
,
" maxSqlLen: %"
PRIu64
"
\n
"
,
g_Dbs
.
db
[
i
].
superTbls
[
j
].
maxSqlLen
);
g_Dbs
.
db
[
i
].
superTbls
[
j
].
maxSqlLen
);
...
@@ -1649,23 +1685,29 @@ static void printfInsertMetaToFile(FILE* fp) {
...
@@ -1649,23 +1685,29 @@ static void printfInsertMetaToFile(FILE* fp) {
g_Dbs
.
db
[
i
].
superTbls
[
j
].
timeStampStep
);
g_Dbs
.
db
[
i
].
superTbls
[
j
].
timeStampStep
);
fprintf
(
fp
,
" startTimestamp: %s
\n
"
,
fprintf
(
fp
,
" startTimestamp: %s
\n
"
,
g_Dbs
.
db
[
i
].
superTbls
[
j
].
startTimestamp
);
g_Dbs
.
db
[
i
].
superTbls
[
j
].
startTimestamp
);
fprintf
(
fp
,
" sampleFormat: %s
\n
"
,
g_Dbs
.
db
[
i
].
superTbls
[
j
].
sampleFormat
);
fprintf
(
fp
,
" sampleFormat: %s
\n
"
,
fprintf
(
fp
,
" sampleFile: %s
\n
"
,
g_Dbs
.
db
[
i
].
superTbls
[
j
].
sampleFile
);
g_Dbs
.
db
[
i
].
superTbls
[
j
].
sampleFormat
);
fprintf
(
fp
,
" tagsFile: %s
\n
"
,
g_Dbs
.
db
[
i
].
superTbls
[
j
].
tagsFile
);
fprintf
(
fp
,
" sampleFile: %s
\n
"
,
g_Dbs
.
db
[
i
].
superTbls
[
j
].
sampleFile
);
fprintf
(
fp
,
" tagsFile: %s
\n
"
,
g_Dbs
.
db
[
i
].
superTbls
[
j
].
tagsFile
);
fprintf
(
fp
,
" columnCount: %d
\n
"
,
g_Dbs
.
db
[
i
].
superTbls
[
j
].
columnCount
);
fprintf
(
fp
,
" columnCount: %d
\n
"
,
g_Dbs
.
db
[
i
].
superTbls
[
j
].
columnCount
);
for
(
int
k
=
0
;
k
<
g_Dbs
.
db
[
i
].
superTbls
[
j
].
columnCount
;
k
++
)
{
for
(
int
k
=
0
;
k
<
g_Dbs
.
db
[
i
].
superTbls
[
j
].
columnCount
;
k
++
)
{
//printf("dataType:%s, dataLen:%d\t", g_Dbs.db[i].superTbls[j].columns[k].dataType, g_Dbs.db[i].superTbls[j].columns[k].dataLen);
//printf("dataType:%s, dataLen:%d\t", g_Dbs.db[i].superTbls[j].columns[k].dataType, g_Dbs.db[i].superTbls[j].columns[k].dataLen);
if
((
0
==
strncasecmp
(
if
((
0
==
strncasecmp
(
g_Dbs
.
db
[
i
].
superTbls
[
j
].
columns
[
k
].
dataType
,
g_Dbs
.
db
[
i
].
superTbls
[
j
].
columns
[
k
].
dataType
,
"binary"
,
strlen
(
"binary"
)))
"binary"
,
strlen
(
"binary"
)))
||
(
0
==
strncasecmp
(
g_Dbs
.
db
[
i
].
superTbls
[
j
].
columns
[
k
].
dataType
,
||
(
0
==
strncasecmp
(
g_Dbs
.
db
[
i
].
superTbls
[
j
].
columns
[
k
].
dataType
,
"nchar"
,
strlen
(
"nchar"
))))
{
"nchar"
,
strlen
(
"nchar"
))))
{
fprintf
(
fp
,
"column[%d]:%s(%d) "
,
k
,
fprintf
(
fp
,
"column[%d]:%s(%d) "
,
k
,
g_Dbs
.
db
[
i
].
superTbls
[
j
].
columns
[
k
].
dataType
,
g_Dbs
.
db
[
i
].
superTbls
[
j
].
columns
[
k
].
dataType
,
g_Dbs
.
db
[
i
].
superTbls
[
j
].
columns
[
k
].
dataLen
);
g_Dbs
.
db
[
i
].
superTbls
[
j
].
columns
[
k
].
dataLen
);
}
else
{
}
else
{
fprintf
(
fp
,
"column[%d]:%s "
,
k
,
g_Dbs
.
db
[
i
].
superTbls
[
j
].
columns
[
k
].
dataType
);
fprintf
(
fp
,
"column[%d]:%s "
,
k
,
g_Dbs
.
db
[
i
].
superTbls
[
j
].
columns
[
k
].
dataType
);
}
}
}
}
fprintf
(
fp
,
"
\n
"
);
fprintf
(
fp
,
"
\n
"
);
...
@@ -1678,7 +1720,8 @@ static void printfInsertMetaToFile(FILE* fp) {
...
@@ -1678,7 +1720,8 @@ static void printfInsertMetaToFile(FILE* fp) {
"binary"
,
strlen
(
"binary"
)))
"binary"
,
strlen
(
"binary"
)))
||
(
0
==
strncasecmp
(
g_Dbs
.
db
[
i
].
superTbls
[
j
].
tags
[
k
].
dataType
,
||
(
0
==
strncasecmp
(
g_Dbs
.
db
[
i
].
superTbls
[
j
].
tags
[
k
].
dataType
,
"nchar"
,
strlen
(
"nchar"
))))
{
"nchar"
,
strlen
(
"nchar"
))))
{
fprintf
(
fp
,
"tag[%d]:%s(%d) "
,
k
,
g_Dbs
.
db
[
i
].
superTbls
[
j
].
tags
[
k
].
dataType
,
fprintf
(
fp
,
"tag[%d]:%s(%d) "
,
k
,
g_Dbs
.
db
[
i
].
superTbls
[
j
].
tags
[
k
].
dataType
,
g_Dbs
.
db
[
i
].
superTbls
[
j
].
tags
[
k
].
dataLen
);
g_Dbs
.
db
[
i
].
superTbls
[
j
].
tags
[
k
].
dataLen
);
}
else
{
}
else
{
fprintf
(
fp
,
"tag[%d]:%s "
,
k
,
g_Dbs
.
db
[
i
].
superTbls
[
j
].
tags
[
k
].
dataType
);
fprintf
(
fp
,
"tag[%d]:%s "
,
k
,
g_Dbs
.
db
[
i
].
superTbls
[
j
].
tags
[
k
].
dataType
);
...
@@ -3456,10 +3499,7 @@ static bool getMetaFromInsertJsonFile(cJSON* root) {
...
@@ -3456,10 +3499,7 @@ static bool getMetaFromInsertJsonFile(cJSON* root) {
g_args
.
interlace_rows
,
g_args
.
num_of_RPR
);
g_args
.
interlace_rows
,
g_args
.
num_of_RPR
);
printf
(
" interlace rows value will be set to num_of_records_per_req %"
PRIu64
"
\n\n
"
,
printf
(
" interlace rows value will be set to num_of_records_per_req %"
PRIu64
"
\n\n
"
,
g_args
.
num_of_RPR
);
g_args
.
num_of_RPR
);
if
(
!
g_args
.
answer_yes
)
{
prompt
();
printf
(
" press Enter key to continue or Ctrl-C to stop."
);
(
void
)
getchar
();
}
g_args
.
interlace_rows
=
g_args
.
num_of_RPR
;
g_args
.
interlace_rows
=
g_args
.
num_of_RPR
;
}
}
}
else
if
(
!
interlaceRows
)
{
}
else
if
(
!
interlaceRows
)
{
...
@@ -3493,6 +3533,11 @@ static bool getMetaFromInsertJsonFile(cJSON* root) {
...
@@ -3493,6 +3533,11 @@ static bool getMetaFromInsertJsonFile(cJSON* root) {
__func__
,
__LINE__
);
__func__
,
__LINE__
);
goto
PARSE_OVER
;
goto
PARSE_OVER
;
}
else
if
(
numRecPerReq
->
valueint
>
MAX_RECORDS_PER_REQ
)
{
}
else
if
(
numRecPerReq
->
valueint
>
MAX_RECORDS_PER_REQ
)
{
printf
(
"NOTICE: number of records per request value %"
PRIu64
" > %d
\n\n
"
,
numRecPerReq
->
valueint
,
MAX_RECORDS_PER_REQ
);
printf
(
" number of records per request value will be set to %d
\n\n
"
,
MAX_RECORDS_PER_REQ
);
prompt
();
numRecPerReq
->
valueint
=
MAX_RECORDS_PER_REQ
;
numRecPerReq
->
valueint
=
MAX_RECORDS_PER_REQ
;
}
}
g_args
.
num_of_RPR
=
numRecPerReq
->
valueint
;
g_args
.
num_of_RPR
=
numRecPerReq
->
valueint
;
...
@@ -3516,9 +3561,9 @@ static bool getMetaFromInsertJsonFile(cJSON* root) {
...
@@ -3516,9 +3561,9 @@ static bool getMetaFromInsertJsonFile(cJSON* root) {
g_args
.
answer_yes
=
false
;
g_args
.
answer_yes
=
false
;
}
}
}
else
if
(
!
answerPrompt
)
{
}
else
if
(
!
answerPrompt
)
{
g_args
.
answer_yes
=
false
;
g_args
.
answer_yes
=
true
;
// default is no, mean answer_yes.
}
else
{
}
else
{
printf
(
"ERROR: failed to read json, confirm_parameter_prompt not found
\n
"
);
errorPrint
(
"%s"
,
"failed to read json, confirm_parameter_prompt input mistake
\n
"
);
goto
PARSE_OVER
;
goto
PARSE_OVER
;
}
}
...
@@ -3987,10 +4032,7 @@ static bool getMetaFromInsertJsonFile(cJSON* root) {
...
@@ -3987,10 +4032,7 @@ static bool getMetaFromInsertJsonFile(cJSON* root) {
i
,
j
,
g_Dbs
.
db
[
i
].
superTbls
[
j
].
interlaceRows
,
g_args
.
num_of_RPR
);
i
,
j
,
g_Dbs
.
db
[
i
].
superTbls
[
j
].
interlaceRows
,
g_args
.
num_of_RPR
);
printf
(
" interlace rows value will be set to num_of_records_per_req %"
PRIu64
"
\n\n
"
,
printf
(
" interlace rows value will be set to num_of_records_per_req %"
PRIu64
"
\n\n
"
,
g_args
.
num_of_RPR
);
g_args
.
num_of_RPR
);
if
(
!
g_args
.
answer_yes
)
{
prompt
();
printf
(
" press Enter key to continue or Ctrl-C to stop."
);
(
void
)
getchar
();
}
g_Dbs
.
db
[
i
].
superTbls
[
j
].
interlaceRows
=
g_args
.
num_of_RPR
;
g_Dbs
.
db
[
i
].
superTbls
[
j
].
interlaceRows
=
g_args
.
num_of_RPR
;
}
}
}
else
if
(
!
stbInterlaceRows
)
{
}
else
if
(
!
stbInterlaceRows
)
{
...
@@ -4186,7 +4228,8 @@ static bool getMetaFromQueryJsonFile(cJSON* root) {
...
@@ -4186,7 +4228,8 @@ static bool getMetaFromQueryJsonFile(cJSON* root) {
"query_times"
);
"query_times"
);
if
(
specifiedQueryTimes
&&
specifiedQueryTimes
->
type
==
cJSON_Number
)
{
if
(
specifiedQueryTimes
&&
specifiedQueryTimes
->
type
==
cJSON_Number
)
{
if
(
specifiedQueryTimes
->
valueint
<=
0
)
{
if
(
specifiedQueryTimes
->
valueint
<=
0
)
{
errorPrint
(
"%s() LN%d, failed to read json, query_times: %"
PRId64
", need be a valid (>0) number
\n
"
,
errorPrint
(
"%s() LN%d, failed to read json, query_times: %"
PRId64
", need be a valid (>0) number
\n
"
,
__func__
,
__LINE__
,
specifiedQueryTimes
->
valueint
);
__func__
,
__LINE__
,
specifiedQueryTimes
->
valueint
);
goto
PARSE_OVER
;
goto
PARSE_OVER
;
...
@@ -4203,7 +4246,8 @@ static bool getMetaFromQueryJsonFile(cJSON* root) {
...
@@ -4203,7 +4246,8 @@ static bool getMetaFromQueryJsonFile(cJSON* root) {
cJSON
*
concurrent
=
cJSON_GetObjectItem
(
specifiedQuery
,
"concurrent"
);
cJSON
*
concurrent
=
cJSON_GetObjectItem
(
specifiedQuery
,
"concurrent"
);
if
(
concurrent
&&
concurrent
->
type
==
cJSON_Number
)
{
if
(
concurrent
&&
concurrent
->
type
==
cJSON_Number
)
{
if
(
concurrent
->
valueint
<=
0
)
{
if
(
concurrent
->
valueint
<=
0
)
{
errorPrint
(
"%s() LN%d, query sqlCount %"
PRIu64
" or concurrent %"
PRIu64
" is not correct.
\n
"
,
errorPrint
(
"%s() LN%d, query sqlCount %"
PRIu64
" or concurrent %"
PRIu64
" is not correct.
\n
"
,
__func__
,
__LINE__
,
__func__
,
__LINE__
,
g_queryInfo
.
specifiedQueryInfo
.
sqlCount
,
g_queryInfo
.
specifiedQueryInfo
.
sqlCount
,
g_queryInfo
.
specifiedQueryInfo
.
concurrent
);
g_queryInfo
.
specifiedQueryInfo
.
concurrent
);
...
@@ -4242,15 +4286,15 @@ static bool getMetaFromQueryJsonFile(cJSON* root) {
...
@@ -4242,15 +4286,15 @@ static bool getMetaFromQueryJsonFile(cJSON* root) {
cJSON
*
restart
=
cJSON_GetObjectItem
(
specifiedQuery
,
"restart"
);
cJSON
*
restart
=
cJSON_GetObjectItem
(
specifiedQuery
,
"restart"
);
if
(
restart
&&
restart
->
type
==
cJSON_String
&&
restart
->
valuestring
!=
NULL
)
{
if
(
restart
&&
restart
->
type
==
cJSON_String
&&
restart
->
valuestring
!=
NULL
)
{
if
(
0
==
strcmp
(
"yes"
,
restart
->
valuestring
))
{
if
(
0
==
strcmp
(
"yes"
,
restart
->
valuestring
))
{
g_queryInfo
.
specifiedQueryInfo
.
subscribeRestart
=
1
;
g_queryInfo
.
specifiedQueryInfo
.
subscribeRestart
=
true
;
}
else
if
(
0
==
strcmp
(
"no"
,
restart
->
valuestring
))
{
}
else
if
(
0
==
strcmp
(
"no"
,
restart
->
valuestring
))
{
g_queryInfo
.
specifiedQueryInfo
.
subscribeRestart
=
0
;
g_queryInfo
.
specifiedQueryInfo
.
subscribeRestart
=
false
;
}
else
{
}
else
{
printf
(
"ERROR: failed to read json, subscribe restart error
\n
"
);
printf
(
"ERROR: failed to read json, subscribe restart error
\n
"
);
goto
PARSE_OVER
;
goto
PARSE_OVER
;
}
}
}
else
{
}
else
{
g_queryInfo
.
specifiedQueryInfo
.
subscribeRestart
=
1
;
g_queryInfo
.
specifiedQueryInfo
.
subscribeRestart
=
true
;
}
}
cJSON
*
keepProgress
=
cJSON_GetObjectItem
(
specifiedQuery
,
"keepProgress"
);
cJSON
*
keepProgress
=
cJSON_GetObjectItem
(
specifiedQuery
,
"keepProgress"
);
...
@@ -4295,7 +4339,8 @@ static bool getMetaFromQueryJsonFile(cJSON* root) {
...
@@ -4295,7 +4339,8 @@ static bool getMetaFromQueryJsonFile(cJSON* root) {
printf
(
"ERROR: failed to read json, sql not found
\n
"
);
printf
(
"ERROR: failed to read json, sql not found
\n
"
);
goto
PARSE_OVER
;
goto
PARSE_OVER
;
}
}
tstrncpy
(
g_queryInfo
.
specifiedQueryInfo
.
sql
[
j
],
sqlStr
->
valuestring
,
MAX_QUERY_SQL_LENGTH
);
tstrncpy
(
g_queryInfo
.
specifiedQueryInfo
.
sql
[
j
],
sqlStr
->
valuestring
,
MAX_QUERY_SQL_LENGTH
);
cJSON
*
resubAfterConsume
=
cJSON
*
resubAfterConsume
=
cJSON_GetObjectItem
(
specifiedQuery
,
"resubAfterConsume"
);
cJSON_GetObjectItem
(
specifiedQuery
,
"resubAfterConsume"
);
...
@@ -4310,10 +4355,13 @@ static bool getMetaFromQueryJsonFile(cJSON* root) {
...
@@ -4310,10 +4355,13 @@ static bool getMetaFromQueryJsonFile(cJSON* root) {
}
}
cJSON
*
result
=
cJSON_GetObjectItem
(
sql
,
"result"
);
cJSON
*
result
=
cJSON_GetObjectItem
(
sql
,
"result"
);
if
(
NULL
!=
result
&&
result
->
type
==
cJSON_String
&&
result
->
valuestring
!=
NULL
)
{
if
((
NULL
!=
result
)
&&
(
result
->
type
==
cJSON_String
)
tstrncpy
(
g_queryInfo
.
specifiedQueryInfo
.
result
[
j
],
result
->
valuestring
,
MAX_FILE_NAME_LEN
);
&&
(
result
->
valuestring
!=
NULL
))
{
tstrncpy
(
g_queryInfo
.
specifiedQueryInfo
.
result
[
j
],
result
->
valuestring
,
MAX_FILE_NAME_LEN
);
}
else
if
(
NULL
==
result
)
{
}
else
if
(
NULL
==
result
)
{
memset
(
g_queryInfo
.
specifiedQueryInfo
.
result
[
j
],
0
,
MAX_FILE_NAME_LEN
);
memset
(
g_queryInfo
.
specifiedQueryInfo
.
result
[
j
],
0
,
MAX_FILE_NAME_LEN
);
}
else
{
}
else
{
printf
(
"ERROR: failed to read json, super query result file not found
\n
"
);
printf
(
"ERROR: failed to read json, super query result file not found
\n
"
);
goto
PARSE_OVER
;
goto
PARSE_OVER
;
...
@@ -4420,27 +4468,27 @@ static bool getMetaFromQueryJsonFile(cJSON* root) {
...
@@ -4420,27 +4468,27 @@ static bool getMetaFromQueryJsonFile(cJSON* root) {
if
(
subrestart
&&
subrestart
->
type
==
cJSON_String
if
(
subrestart
&&
subrestart
->
type
==
cJSON_String
&&
subrestart
->
valuestring
!=
NULL
)
{
&&
subrestart
->
valuestring
!=
NULL
)
{
if
(
0
==
strcmp
(
"yes"
,
subrestart
->
valuestring
))
{
if
(
0
==
strcmp
(
"yes"
,
subrestart
->
valuestring
))
{
g_queryInfo
.
superQueryInfo
.
subscribeRestart
=
1
;
g_queryInfo
.
superQueryInfo
.
subscribeRestart
=
true
;
}
else
if
(
0
==
strcmp
(
"no"
,
subrestart
->
valuestring
))
{
}
else
if
(
0
==
strcmp
(
"no"
,
subrestart
->
valuestring
))
{
g_queryInfo
.
superQueryInfo
.
subscribeRestart
=
0
;
g_queryInfo
.
superQueryInfo
.
subscribeRestart
=
false
;
}
else
{
}
else
{
printf
(
"ERROR: failed to read json, subscribe restart error
\n
"
);
printf
(
"ERROR: failed to read json, subscribe restart error
\n
"
);
goto
PARSE_OVER
;
goto
PARSE_OVER
;
}
}
}
else
{
}
else
{
g_queryInfo
.
superQueryInfo
.
subscribeRestart
=
1
;
g_queryInfo
.
superQueryInfo
.
subscribeRestart
=
true
;
}
}
cJSON
*
su
b
keepProgress
=
cJSON_GetObjectItem
(
superQuery
,
"keepProgress"
);
cJSON
*
su
per
keepProgress
=
cJSON_GetObjectItem
(
superQuery
,
"keepProgress"
);
if
(
su
b
keepProgress
&&
if
(
su
per
keepProgress
&&
su
b
keepProgress
->
type
==
cJSON_String
su
per
keepProgress
->
type
==
cJSON_String
&&
su
b
keepProgress
->
valuestring
!=
NULL
)
{
&&
su
per
keepProgress
->
valuestring
!=
NULL
)
{
if
(
0
==
strcmp
(
"yes"
,
su
b
keepProgress
->
valuestring
))
{
if
(
0
==
strcmp
(
"yes"
,
su
per
keepProgress
->
valuestring
))
{
g_queryInfo
.
superQueryInfo
.
subscribeKeepProgress
=
1
;
g_queryInfo
.
superQueryInfo
.
subscribeKeepProgress
=
1
;
}
else
if
(
0
==
strcmp
(
"no"
,
su
b
keepProgress
->
valuestring
))
{
}
else
if
(
0
==
strcmp
(
"no"
,
su
per
keepProgress
->
valuestring
))
{
g_queryInfo
.
superQueryInfo
.
subscribeKeepProgress
=
0
;
g_queryInfo
.
superQueryInfo
.
subscribeKeepProgress
=
0
;
}
else
{
}
else
{
printf
(
"ERROR: failed to read json, subscribe keepProgress error
\n
"
);
printf
(
"ERROR: failed to read json, subscribe
super table
keepProgress error
\n
"
);
goto
PARSE_OVER
;
goto
PARSE_OVER
;
}
}
}
else
{
}
else
{
...
@@ -4637,16 +4685,22 @@ static int getRowDataFromSample(
...
@@ -4637,16 +4685,22 @@ static int getRowDataFromSample(
return
dataLen
;
return
dataLen
;
}
}
static
int64_t
generateRowData
(
char
*
recBuf
,
int64_t
timestamp
,
SSuperTable
*
stbInfo
)
{
static
int64_t
generateStbRowData
(
SSuperTable
*
stbInfo
,
char
*
recBuf
,
int64_t
timestamp
)
{
int64_t
dataLen
=
0
;
int64_t
dataLen
=
0
;
char
*
pstr
=
recBuf
;
char
*
pstr
=
recBuf
;
int64_t
maxLen
=
MAX_DATA_SIZE
;
int64_t
maxLen
=
MAX_DATA_SIZE
;
dataLen
+=
snprintf
(
pstr
+
dataLen
,
maxLen
-
dataLen
,
"(%"
PRId64
","
,
timestamp
);
dataLen
+=
snprintf
(
pstr
+
dataLen
,
maxLen
-
dataLen
,
"(%"
PRId64
","
,
timestamp
);
for
(
int
i
=
0
;
i
<
stbInfo
->
columnCount
;
i
++
)
{
for
(
int
i
=
0
;
i
<
stbInfo
->
columnCount
;
i
++
)
{
if
((
0
==
strncasecmp
(
stbInfo
->
columns
[
i
].
dataType
,
"BINARY"
,
strlen
(
"BINARY"
)))
if
((
0
==
strncasecmp
(
stbInfo
->
columns
[
i
].
dataType
,
||
(
0
==
strncasecmp
(
stbInfo
->
columns
[
i
].
dataType
,
"NCHAR"
,
strlen
(
"NCHAR"
))))
{
"BINARY"
,
strlen
(
"BINARY"
)))
||
(
0
==
strncasecmp
(
stbInfo
->
columns
[
i
].
dataType
,
"NCHAR"
,
strlen
(
"NCHAR"
))))
{
if
(
stbInfo
->
columns
[
i
].
dataLen
>
TSDB_MAX_BINARY_LEN
)
{
if
(
stbInfo
->
columns
[
i
].
dataLen
>
TSDB_MAX_BINARY_LEN
)
{
errorPrint
(
"binary or nchar length overflow, max size:%u
\n
"
,
errorPrint
(
"binary or nchar length overflow, max size:%u
\n
"
,
(
uint32_t
)
TSDB_MAX_BINARY_LEN
);
(
uint32_t
)
TSDB_MAX_BINARY_LEN
);
...
@@ -4708,7 +4762,7 @@ static int64_t generateRowData(char* recBuf, int64_t timestamp, SSuperTable* stb
...
@@ -4708,7 +4762,7 @@ static int64_t generateRowData(char* recBuf, int64_t timestamp, SSuperTable* stb
}
}
static
int64_t
generateData
(
char
*
recBuf
,
char
**
data_type
,
static
int64_t
generateData
(
char
*
recBuf
,
char
**
data_type
,
int
num_of_cols
,
int
64_t
timestamp
,
int
lenOfBinary
)
{
int64_t
timestamp
,
int
lenOfBinary
)
{
memset
(
recBuf
,
0
,
MAX_DATA_SIZE
);
memset
(
recBuf
,
0
,
MAX_DATA_SIZE
);
char
*
pstr
=
recBuf
;
char
*
pstr
=
recBuf
;
pstr
+=
sprintf
(
pstr
,
"(%"
PRId64
,
timestamp
);
pstr
+=
sprintf
(
pstr
,
"(%"
PRId64
,
timestamp
);
...
@@ -4859,100 +4913,119 @@ static void getTableName(char *pTblName, threadInfo* pThreadInfo, uint64_t table
...
@@ -4859,100 +4913,119 @@ static void getTableName(char *pTblName, threadInfo* pThreadInfo, uint64_t table
}
}
}
}
static
int64_t
generateDataTail
(
static
int64_t
generateDataTailWithoutStb
(
SSuperTable
*
superTblInfo
,
uint64_t
batch
,
char
*
buffer
,
uint64_t
batch
,
char
*
buffer
,
int64_t
remainderBufLen
,
int64_t
insertRows
,
int64_t
remainderBufLen
,
int64_t
insertRows
,
uint64_t
startFrom
,
int64_t
startTime
,
int64_t
*
pSamplePos
,
int64_t
*
dataLen
)
{
uint64_t
startFrom
,
int64_t
startTime
,
uint64_t
len
=
0
;
/* int64_t *pSamplePos, */
int64_t
*
dataLen
)
{
uint32_t
ncols_per_record
=
1
;
// count first col ts
uint64_t
len
=
0
;
char
*
pstr
=
buffer
;
char
*
pstr
=
buffer
;
if
(
superTblInfo
==
NULL
)
{
uint32_t
datatypeSeq
=
0
;
while
(
g_args
.
datatype
[
datatypeSeq
])
{
datatypeSeq
++
;
ncols_per_record
++
;
}
}
verbosePrint
(
"%s() LN%d batch=%"
PRIu64
"
\n
"
,
__func__
,
__LINE__
,
batch
);
verbosePrint
(
"%s() LN%d batch=%"
PRIu64
"
\n
"
,
__func__
,
__LINE__
,
batch
);
u
int64_t
k
=
0
;
int64_t
k
=
0
;
for
(
k
=
0
;
k
<
batch
;)
{
for
(
k
=
0
;
k
<
batch
;)
{
char
data
[
MAX_DATA_SIZE
];
char
data
[
MAX_DATA_SIZE
];
memset
(
data
,
0
,
MAX_DATA_SIZE
);
memset
(
data
,
0
,
MAX_DATA_SIZE
);
int64_t
retLen
=
0
;
int64_t
retLen
=
0
;
if
(
superTblInfo
)
{
char
**
data_type
=
g_args
.
datatype
;
if
(
0
==
strncasecmp
(
superTblInfo
->
dataSource
,
int
lenOfBinary
=
g_args
.
len_of_binary
;
"sample"
,
strlen
(
"sample"
)))
{
retLen
=
getRowDataFromSample
(
data
,
remainderBufLen
,
startTime
+
superTblInfo
->
timeStampStep
*
k
,
superTblInfo
,
pSamplePos
);
}
else
if
(
0
==
strncasecmp
(
superTblInfo
->
dataSource
,
"rand"
,
strlen
(
"rand"
)))
{
int64_t
randTail
=
superTblInfo
->
timeStampStep
*
k
;
int64_t
randTail
=
DEFAULT_TIMESTAMP_STEP
*
k
;
if
(
superTblInfo
->
disorderRatio
>
0
)
{
int
rand_num
=
taosRandom
()
%
100
;
if
(
g_args
.
disorderRatio
!=
0
)
{
if
(
rand_num
<
superTblInfo
->
disorderRatio
)
{
int
rand_num
=
taosRandom
()
%
100
;
randTail
=
(
randTail
+
(
taosRandom
()
%
superTblInfo
->
disorderRange
+
1
))
*
(
-
1
);
if
(
rand_num
<
g_args
.
disorderRatio
)
{
debugPrint
(
"rand data generated, back %"
PRId64
"
\n
"
,
randTail
);
randTail
=
(
randTail
+
}
(
taosRandom
()
%
g_args
.
disorderRange
+
1
))
*
(
-
1
);
debugPrint
(
"rand data generated, back %"
PRId64
"
\n
"
,
randTail
);
}
}
}
else
{
randTail
=
DEFAULT_TIMESTAMP_STEP
*
k
;
}
int64_t
d
=
startTime
retLen
=
generateData
(
data
,
data_type
,
+
randTail
;
startTime
+
randTail
,
retLen
=
generateRowData
(
lenOfBinary
);
data
,
d
,
superTblInfo
);
}
if
(
retLen
>
remainderBufLen
)
{
if
(
len
>
remainderBufLen
)
break
;
break
;
}
pstr
+=
snprintf
(
pstr
,
retLen
+
1
,
"%s"
,
data
);
pstr
+=
sprintf
(
pstr
,
"%s"
,
data
);
k
++
;
k
++
;
len
+=
retLen
;
len
+=
retLen
;
remainderBufLen
-=
retLen
;
remainderBufLen
-=
retLen
;
}
else
{
char
**
data_type
=
g_args
.
datatype
;
int
lenOfBinary
=
g_args
.
len_of_binary
;
int64_t
randTail
=
DEFAULT_TIMESTAMP_STEP
*
k
;
verbosePrint
(
"%s() LN%d len=%"
PRIu64
" k=%"
PRIu64
"
\n
buffer=%s
\n
"
,
__func__
,
__LINE__
,
len
,
k
,
buffer
);
if
(
g_args
.
disorderRatio
!=
0
)
{
startFrom
++
;
int
rand_num
=
taosRandom
()
%
100
;
if
(
rand_num
<
g_args
.
disorderRatio
)
{
randTail
=
(
randTail
+
(
taosRandom
()
%
g_args
.
disorderRange
+
1
))
*
(
-
1
);
debugPrint
(
"rand data generated, back %"
PRId64
"
\n
"
,
randTail
);
if
(
startFrom
>=
insertRows
)
{
break
;
}
}
*
dataLen
=
len
;
return
k
;
}
static
int64_t
generateStbDataTail
(
SSuperTable
*
superTblInfo
,
uint64_t
batch
,
char
*
buffer
,
int64_t
remainderBufLen
,
int64_t
insertRows
,
uint64_t
startFrom
,
int64_t
startTime
,
int64_t
*
pSamplePos
,
int64_t
*
dataLen
)
{
uint64_t
len
=
0
;
char
*
pstr
=
buffer
;
verbosePrint
(
"%s() LN%d batch=%"
PRIu64
"
\n
"
,
__func__
,
__LINE__
,
batch
);
int64_t
k
=
0
;
for
(
k
=
0
;
k
<
batch
;)
{
char
data
[
MAX_DATA_SIZE
];
memset
(
data
,
0
,
MAX_DATA_SIZE
);
int64_t
retLen
=
0
;
if
(
0
==
strncasecmp
(
superTblInfo
->
dataSource
,
"sample"
,
strlen
(
"sample"
)))
{
retLen
=
getRowDataFromSample
(
data
,
remainderBufLen
,
startTime
+
superTblInfo
->
timeStampStep
*
k
,
superTblInfo
,
pSamplePos
);
}
else
if
(
0
==
strncasecmp
(
superTblInfo
->
dataSource
,
"rand"
,
strlen
(
"rand"
)))
{
int64_t
randTail
=
superTblInfo
->
timeStampStep
*
k
;
if
(
superTblInfo
->
disorderRatio
>
0
)
{
int
rand_num
=
taosRandom
()
%
100
;
if
(
rand_num
<
superTblInfo
->
disorderRatio
)
{
randTail
=
(
randTail
+
(
taosRandom
()
%
superTblInfo
->
disorderRange
+
1
))
*
(
-
1
);
debugPrint
(
"rand data generated, back %"
PRId64
"
\n
"
,
randTail
);
}
}
}
}
else
{
randTail
=
DEFAULT_TIMESTAMP_STEP
*
k
;
}
retLen
=
generateData
(
data
,
data_type
,
int64_t
d
=
startTime
+
randTail
;
ncols_per_record
,
retLen
=
generateStbRowData
(
superTblInfo
,
data
,
d
);
startTime
+
randTail
,
}
lenOfBinary
);
if
(
len
>
remainderBufLen
)
if
(
retLen
>
remainderBufLen
)
{
break
;
break
;
pstr
+=
sprintf
(
pstr
,
"%s"
,
data
);
k
++
;
len
+=
retLen
;
remainderBufLen
-=
retLen
;
}
}
pstr
+=
snprintf
(
pstr
,
retLen
+
1
,
"%s"
,
data
);
k
++
;
len
+=
retLen
;
remainderBufLen
-=
retLen
;
verbosePrint
(
"%s() LN%d len=%"
PRIu64
" k=%"
PRIu64
"
\n
buffer=%s
\n
"
,
verbosePrint
(
"%s() LN%d len=%"
PRIu64
" k=%"
PRIu64
"
\n
buffer=%s
\n
"
,
__func__
,
__LINE__
,
len
,
k
,
buffer
);
__func__
,
__LINE__
,
len
,
k
,
buffer
);
...
@@ -4967,17 +5040,41 @@ static int64_t generateDataTail(
...
@@ -4967,17 +5040,41 @@ static int64_t generateDataTail(
return
k
;
return
k
;
}
}
static
int
generateSQLHead
(
char
*
tableName
,
int32_t
tableSeq
,
threadInfo
*
pThreadInfo
,
SSuperTable
*
superTblInfo
,
static
int
generateSQLHeadWithoutStb
(
char
*
tableName
,
char
*
dbName
,
char
*
buffer
,
int
remainderBufLen
)
char
*
buffer
,
int
remainderBufLen
)
{
{
int
len
;
int
len
;
#define HEAD_BUFF_LEN 1024*24 // 16*1024 + (192+32)*2 + insert into ..
char
headBuf
[
HEAD_BUFF_LEN
];
char
headBuf
[
HEAD_BUFF_LEN
];
if
(
superTblInfo
)
{
len
=
snprintf
(
if
(
AUTO_CREATE_SUBTBL
==
superTblInfo
->
autoCreateTable
)
{
headBuf
,
HEAD_BUFF_LEN
,
"%s.%s values"
,
dbName
,
tableName
);
if
(
len
>
remainderBufLen
)
return
-
1
;
tstrncpy
(
buffer
,
headBuf
,
len
+
1
);
return
len
;
}
static
int
generateStbSQLHead
(
SSuperTable
*
superTblInfo
,
char
*
tableName
,
int32_t
tableSeq
,
char
*
dbName
,
char
*
buffer
,
int
remainderBufLen
)
{
int
len
;
char
headBuf
[
HEAD_BUFF_LEN
];
if
(
AUTO_CREATE_SUBTBL
==
superTblInfo
->
autoCreateTable
)
{
char
*
tagsValBuf
=
NULL
;
char
*
tagsValBuf
=
NULL
;
if
(
0
==
superTblInfo
->
tagSource
)
{
if
(
0
==
superTblInfo
->
tagSource
)
{
tagsValBuf
=
generateTagVaulesForStb
(
superTblInfo
,
tableSeq
);
tagsValBuf
=
generateTagVaulesForStb
(
superTblInfo
,
tableSeq
);
...
@@ -4996,9 +5093,9 @@ static int generateSQLHead(char *tableName, int32_t tableSeq,
...
@@ -4996,9 +5093,9 @@ static int generateSQLHead(char *tableName, int32_t tableSeq,
headBuf
,
headBuf
,
HEAD_BUFF_LEN
,
HEAD_BUFF_LEN
,
"%s.%s using %s.%s tags %s values"
,
"%s.%s using %s.%s tags %s values"
,
pThreadInfo
->
db_n
ame
,
dbN
ame
,
tableName
,
tableName
,
pThreadInfo
->
db_n
ame
,
dbN
ame
,
superTblInfo
->
sTblName
,
superTblInfo
->
sTblName
,
tagsValBuf
);
tagsValBuf
);
tmfree
(
tagsValBuf
);
tmfree
(
tagsValBuf
);
...
@@ -5007,22 +5104,14 @@ static int generateSQLHead(char *tableName, int32_t tableSeq,
...
@@ -5007,22 +5104,14 @@ static int generateSQLHead(char *tableName, int32_t tableSeq,
headBuf
,
headBuf
,
HEAD_BUFF_LEN
,
HEAD_BUFF_LEN
,
"%s.%s values"
,
"%s.%s values"
,
pThreadInfo
->
db_n
ame
,
dbN
ame
,
tableName
);
tableName
);
}
else
{
}
else
{
len
=
snprintf
(
len
=
snprintf
(
headBuf
,
headBuf
,
HEAD_BUFF_LEN
,
HEAD_BUFF_LEN
,
"%s.%s values"
,
"%s.%s values"
,
pThreadInfo
->
db_name
,
dbName
,
tableName
);
}
}
else
{
len
=
snprintf
(
headBuf
,
HEAD_BUFF_LEN
,
"%s.%s values"
,
pThreadInfo
->
db_name
,
tableName
);
tableName
);
}
}
...
@@ -5034,7 +5123,8 @@ static int generateSQLHead(char *tableName, int32_t tableSeq,
...
@@ -5034,7 +5123,8 @@ static int generateSQLHead(char *tableName, int32_t tableSeq,
return
len
;
return
len
;
}
}
static
int64_t
generateInterlaceDataBuffer
(
static
int64_t
generateStbInterlaceData
(
SSuperTable
*
superTblInfo
,
char
*
tableName
,
uint64_t
batchPerTbl
,
uint64_t
i
,
uint64_t
batchPerTblTimes
,
char
*
tableName
,
uint64_t
batchPerTbl
,
uint64_t
i
,
uint64_t
batchPerTblTimes
,
uint64_t
tableSeq
,
uint64_t
tableSeq
,
threadInfo
*
pThreadInfo
,
char
*
buffer
,
threadInfo
*
pThreadInfo
,
char
*
buffer
,
...
@@ -5044,10 +5134,11 @@ static int64_t generateInterlaceDataBuffer(
...
@@ -5044,10 +5134,11 @@ static int64_t generateInterlaceDataBuffer(
{
{
assert
(
buffer
);
assert
(
buffer
);
char
*
pstr
=
buffer
;
char
*
pstr
=
buffer
;
SSuperTable
*
superTblInfo
=
pThreadInfo
->
superTblInfo
;
int
headLen
=
generateSQLHead
(
tableName
,
tableSeq
,
pThreadInfo
,
int
headLen
=
generateStbSQLHead
(
superTblInfo
,
pstr
,
*
pRemainderBufLen
);
superTblInfo
,
tableName
,
tableSeq
,
pThreadInfo
->
db_name
,
pstr
,
*
pRemainderBufLen
);
if
(
headLen
<=
0
)
{
if
(
headLen
<=
0
)
{
return
0
;
return
0
;
...
@@ -5065,19 +5156,58 @@ static int64_t generateInterlaceDataBuffer(
...
@@ -5065,19 +5156,58 @@ static int64_t generateInterlaceDataBuffer(
pThreadInfo
->
threadID
,
__func__
,
__LINE__
,
pThreadInfo
->
threadID
,
__func__
,
__LINE__
,
i
,
batchPerTblTimes
,
batchPerTbl
);
i
,
batchPerTblTimes
,
batchPerTbl
);
if
(
superTblInfo
)
{
if
(
0
==
strncasecmp
(
superTblInfo
->
startTimestamp
,
"now"
,
3
))
{
if
(
0
==
strncasecmp
(
superTblInfo
->
startTimestamp
,
"now"
,
3
))
{
startTime
=
taosGetTimestamp
(
pThreadInfo
->
time_precision
);
startTime
=
taosGetTimestamp
(
pThreadInfo
->
time_precision
);
}
}
int64_t
k
=
generateStbDataTail
(
superTblInfo
,
batchPerTbl
,
pstr
,
*
pRemainderBufLen
,
insertRows
,
0
,
startTime
,
&
(
pThreadInfo
->
samplePos
),
&
dataLen
);
if
(
k
==
batchPerTbl
)
{
pstr
+=
dataLen
;
*
pRemainderBufLen
-=
dataLen
;
}
else
{
}
else
{
startTime
=
1500000000000
;
debugPrint
(
"%s() LN%d, generated data tail: %"
PRIu64
", not equal batch per table: %"
PRIu64
"
\n
"
,
__func__
,
__LINE__
,
k
,
batchPerTbl
);
pstr
-=
headLen
;
pstr
[
0
]
=
'\0'
;
k
=
0
;
}
}
int64_t
k
=
generateDataTail
(
return
k
;
superTblInfo
,
}
batchPerTbl
,
pstr
,
*
pRemainderBufLen
,
insertRows
,
0
,
startTime
,
static
int64_t
generateInterlaceDataWithoutStb
(
&
(
pThreadInfo
->
samplePos
),
&
dataLen
);
char
*
tableName
,
uint64_t
batchPerTbl
,
uint64_t
tableSeq
,
char
*
dbName
,
char
*
buffer
,
int64_t
insertRows
,
uint64_t
*
pRemainderBufLen
)
{
assert
(
buffer
);
char
*
pstr
=
buffer
;
int
headLen
=
generateSQLHeadWithoutStb
(
tableName
,
dbName
,
pstr
,
*
pRemainderBufLen
);
if
(
headLen
<=
0
)
{
return
0
;
}
pstr
+=
headLen
;
*
pRemainderBufLen
-=
headLen
;
int64_t
dataLen
=
0
;
int64_t
startTime
=
1500000000000
;
int64_t
k
=
generateDataTailWithoutStb
(
batchPerTbl
,
pstr
,
*
pRemainderBufLen
,
insertRows
,
0
,
startTime
,
&
dataLen
);
if
(
k
==
batchPerTbl
)
{
if
(
k
==
batchPerTbl
)
{
pstr
+=
dataLen
;
pstr
+=
dataLen
;
...
@@ -5093,34 +5223,55 @@ static int64_t generateInterlaceDataBuffer(
...
@@ -5093,34 +5223,55 @@ static int64_t generateInterlaceDataBuffer(
return
k
;
return
k
;
}
}
static
int64_t
generateProgressiveDataBuffer
(
static
int64_t
generateStbProgressiveData
(
SSuperTable
*
superTblInfo
,
char
*
tableName
,
char
*
tableName
,
int64_t
tableSeq
,
int64_t
tableSeq
,
threadInfo
*
pThreadInfo
,
char
*
buffer
,
char
*
dbName
,
char
*
buffer
,
int64_t
insertRows
,
int64_t
insertRows
,
uint64_t
startFrom
,
int64_t
startTime
,
int64_t
*
pSamplePos
,
uint64_t
startFrom
,
int64_t
startTime
,
int64_t
*
pSamplePos
,
int64_t
*
pRemainderBufLen
)
int64_t
*
pRemainderBufLen
)
{
{
SSuperTable
*
superTblInfo
=
pThreadInfo
->
superTblInfo
;
assert
(
buffer
!=
NULL
);
char
*
pstr
=
buffer
;
int
ncols_per_record
=
1
;
// count first col ts
memset
(
buffer
,
0
,
*
pRemainderBufLen
);
if
(
superTblInfo
==
NULL
)
{
int64_t
headLen
=
generateStbSQLHead
(
int
datatypeSeq
=
0
;
superTblInfo
,
while
(
g_args
.
datatype
[
datatypeSeq
])
{
tableName
,
tableSeq
,
dbName
,
datatypeSeq
++
;
buffer
,
*
pRemainderBufLen
);
ncols_per_record
++
;
}
if
(
headLen
<=
0
)
{
return
0
;
}
}
pstr
+=
headLen
;
*
pRemainderBufLen
-=
headLen
;
int64_t
dataLen
;
return
generateStbDataTail
(
superTblInfo
,
g_args
.
num_of_RPR
,
pstr
,
*
pRemainderBufLen
,
insertRows
,
startFrom
,
startTime
,
pSamplePos
,
&
dataLen
);
}
static
int64_t
generateProgressiveDataWithoutStb
(
char
*
tableName
,
int64_t
tableSeq
,
threadInfo
*
pThreadInfo
,
char
*
buffer
,
int64_t
insertRows
,
uint64_t
startFrom
,
int64_t
startTime
,
int64_t
*
pSamplePos
,
int64_t
*
pRemainderBufLen
)
{
assert
(
buffer
!=
NULL
);
assert
(
buffer
!=
NULL
);
char
*
pstr
=
buffer
;
char
*
pstr
=
buffer
;
int64_t
k
=
0
;
memset
(
buffer
,
0
,
*
pRemainderBufLen
);
memset
(
buffer
,
0
,
*
pRemainderBufLen
);
int64_t
headLen
=
generateSQLHead
(
tableName
,
tableSeq
,
pThreadInfo
,
superTblInfo
,
int64_t
headLen
=
generateSQLHeadWithoutStb
(
tableName
,
pThreadInfo
->
db_name
,
buffer
,
*
pRemainderBufLen
);
buffer
,
*
pRemainderBufLen
);
if
(
headLen
<=
0
)
{
if
(
headLen
<=
0
)
{
...
@@ -5130,12 +5281,11 @@ static int64_t generateProgressiveDataBuffer(
...
@@ -5130,12 +5281,11 @@ static int64_t generateProgressiveDataBuffer(
*
pRemainderBufLen
-=
headLen
;
*
pRemainderBufLen
-=
headLen
;
int64_t
dataLen
;
int64_t
dataLen
;
k
=
generateDataTail
(
superTblInfo
,
return
generateDataTailWithoutStb
(
g_args
.
num_of_RPR
,
pstr
,
*
pRemainderBufLen
,
insertRows
,
startFrom
,
g_args
.
num_of_RPR
,
pstr
,
*
pRemainderBufLen
,
insertRows
,
startFrom
,
startTime
,
startTime
,
pSamplePos
,
&
dataLen
);
/*pSamplePos, */
&
dataLen
);
return
k
;
}
}
static
void
printStatPerThread
(
threadInfo
*
pThreadInfo
)
static
void
printStatPerThread
(
threadInfo
*
pThreadInfo
)
...
@@ -5147,12 +5297,16 @@ static void printStatPerThread(threadInfo *pThreadInfo)
...
@@ -5147,12 +5297,16 @@ static void printStatPerThread(threadInfo *pThreadInfo)
(
double
)(
pThreadInfo
->
totalAffectedRows
/
(
pThreadInfo
->
totalDelay
/
1000
.
0
)));
(
double
)(
pThreadInfo
->
totalAffectedRows
/
(
pThreadInfo
->
totalDelay
/
1000
.
0
)));
}
}
// sync write interlace data
static
void
*
syncWriteInterlace
(
threadInfo
*
pThreadInfo
)
{
static
void
*
syncWriteInterlace
(
threadInfo
*
pThreadInfo
)
{
debugPrint
(
"[%d] %s() LN%d: ### interlace write
\n
"
,
debugPrint
(
"[%d] %s() LN%d: ### interlace write
\n
"
,
pThreadInfo
->
threadID
,
__func__
,
__LINE__
);
pThreadInfo
->
threadID
,
__func__
,
__LINE__
);
int64_t
insertRows
;
int64_t
insertRows
;
uint64_t
interlaceRows
;
uint64_t
interlaceRows
;
uint64_t
maxSqlLen
;
int64_t
nTimeStampStep
;
uint64_t
insert_interval
;
SSuperTable
*
superTblInfo
=
pThreadInfo
->
superTblInfo
;
SSuperTable
*
superTblInfo
=
pThreadInfo
->
superTblInfo
;
...
@@ -5165,26 +5319,38 @@ static void* syncWriteInterlace(threadInfo *pThreadInfo) {
...
@@ -5165,26 +5319,38 @@ static void* syncWriteInterlace(threadInfo *pThreadInfo) {
}
else
{
}
else
{
interlaceRows
=
superTblInfo
->
interlaceRows
;
interlaceRows
=
superTblInfo
->
interlaceRows
;
}
}
maxSqlLen
=
superTblInfo
->
maxSqlLen
;
nTimeStampStep
=
superTblInfo
->
timeStampStep
;
insert_interval
=
superTblInfo
->
insertInterval
;
}
else
{
}
else
{
insertRows
=
g_args
.
num_of_DPT
;
insertRows
=
g_args
.
num_of_DPT
;
interlaceRows
=
g_args
.
interlace_rows
;
interlaceRows
=
g_args
.
interlace_rows
;
maxSqlLen
=
g_args
.
max_sql_len
;
nTimeStampStep
=
DEFAULT_TIMESTAMP_STEP
;
insert_interval
=
g_args
.
insert_interval
;
}
}
debugPrint
(
"[%d] %s() LN%d: start_table_from=%"
PRIu64
" ntables=%"
PRId64
" insertRows=%"
PRIu64
"
\n
"
,
pThreadInfo
->
threadID
,
__func__
,
__LINE__
,
pThreadInfo
->
start_table_from
,
pThreadInfo
->
ntables
,
insertRows
);
if
(
interlaceRows
>
insertRows
)
if
(
interlaceRows
>
insertRows
)
interlaceRows
=
insertRows
;
interlaceRows
=
insertRows
;
if
(
interlaceRows
>
g_args
.
num_of_RPR
)
if
(
interlaceRows
>
g_args
.
num_of_RPR
)
interlaceRows
=
g_args
.
num_of_RPR
;
interlaceRows
=
g_args
.
num_of_RPR
;
int
progOrInterlace
;
uint64_t
batchPerTbl
=
interlaceRows
;
uint64_t
batchPerTblTimes
;
if
(
interlaceRows
>
0
)
{
if
((
interlaceRows
>
0
)
&&
(
pThreadInfo
->
ntables
>
1
))
{
progOrInterlace
=
INTERLACE_INSERT_MODE
;
batchPerTblTimes
=
g_args
.
num_of_RPR
/
interlaceRows
;
}
else
{
}
else
{
progOrInterlace
=
PROGRESSIVE_INSERT_MODE
;
batchPerTblTimes
=
1
;
}
}
uint64_t
maxSqlLen
=
superTblInfo
?
superTblInfo
->
maxSqlLen
:
g_args
.
max_sql_len
;
pThreadInfo
->
buffer
=
calloc
(
maxSqlLen
,
1
);
pThreadInfo
->
buffer
=
calloc
(
maxSqlLen
,
1
);
if
(
NULL
==
pThreadInfo
->
buffer
)
{
if
(
NULL
==
pThreadInfo
->
buffer
)
{
errorPrint
(
"%s() LN%d, Failed to alloc %"
PRIu64
" Bytes, reason:%s
\n
"
,
errorPrint
(
"%s() LN%d, Failed to alloc %"
PRIu64
" Bytes, reason:%s
\n
"
,
...
@@ -5192,15 +5358,9 @@ static void* syncWriteInterlace(threadInfo *pThreadInfo) {
...
@@ -5192,15 +5358,9 @@ static void* syncWriteInterlace(threadInfo *pThreadInfo) {
return
NULL
;
return
NULL
;
}
}
char
tableName
[
TSDB_TABLE_NAME_LEN
];
pThreadInfo
->
totalInsertRows
=
0
;
pThreadInfo
->
totalInsertRows
=
0
;
pThreadInfo
->
totalAffectedRows
=
0
;
pThreadInfo
->
totalAffectedRows
=
0
;
int64_t
nTimeStampStep
=
superTblInfo
?
superTblInfo
->
timeStampStep
:
DEFAULT_TIMESTAMP_STEP
;
uint64_t
insert_interval
=
superTblInfo
?
superTblInfo
->
insertInterval
:
g_args
.
insert_interval
;
uint64_t
st
=
0
;
uint64_t
st
=
0
;
uint64_t
et
=
UINT64_MAX
;
uint64_t
et
=
UINT64_MAX
;
...
@@ -5209,30 +5369,12 @@ static void* syncWriteInterlace(threadInfo *pThreadInfo) {
...
@@ -5209,30 +5369,12 @@ static void* syncWriteInterlace(threadInfo *pThreadInfo) {
uint64_t
endTs
;
uint64_t
endTs
;
uint64_t
tableSeq
=
pThreadInfo
->
start_table_from
;
uint64_t
tableSeq
=
pThreadInfo
->
start_table_from
;
debugPrint
(
"[%d] %s() LN%d: start_table_from=%"
PRIu64
" ntables=%"
PRId64
" insertRows=%"
PRIu64
"
\n
"
,
pThreadInfo
->
threadID
,
__func__
,
__LINE__
,
pThreadInfo
->
start_table_from
,
pThreadInfo
->
ntables
,
insertRows
);
int64_t
startTime
=
pThreadInfo
->
start_time
;
int64_t
startTime
=
pThreadInfo
->
start_time
;
uint64_t
batchPerTbl
=
interlaceRows
;
uint64_t
batchPerTblTimes
;
if
((
interlaceRows
>
0
)
&&
(
pThreadInfo
->
ntables
>
1
))
{
batchPerTblTimes
=
g_args
.
num_of_RPR
/
interlaceRows
;
}
else
{
batchPerTblTimes
=
1
;
}
uint64_t
generatedRecPerTbl
=
0
;
uint64_t
generatedRecPerTbl
=
0
;
bool
flagSleep
=
true
;
bool
flagSleep
=
true
;
uint64_t
sleepTimeTotal
=
0
;
uint64_t
sleepTimeTotal
=
0
;
char
*
strInsertInto
=
"insert into "
;
int
nInsertBufLen
=
strlen
(
strInsertInto
);
while
(
pThreadInfo
->
totalInsertRows
<
pThreadInfo
->
ntables
*
insertRows
)
{
while
(
pThreadInfo
->
totalInsertRows
<
pThreadInfo
->
ntables
*
insertRows
)
{
if
((
flagSleep
)
&&
(
insert_interval
))
{
if
((
flagSleep
)
&&
(
insert_interval
))
{
st
=
taosGetTimestampMs
();
st
=
taosGetTimestampMs
();
...
@@ -5244,13 +5386,16 @@ static void* syncWriteInterlace(threadInfo *pThreadInfo) {
...
@@ -5244,13 +5386,16 @@ static void* syncWriteInterlace(threadInfo *pThreadInfo) {
char
*
pstr
=
pThreadInfo
->
buffer
;
char
*
pstr
=
pThreadInfo
->
buffer
;
int
len
=
snprintf
(
pstr
,
nInsertBufLen
+
1
,
"%s"
,
strInsertInto
);
int
len
=
snprintf
(
pstr
,
strlen
(
STR_INSERT_INTO
)
+
1
,
"%s"
,
STR_INSERT_INTO
);
pstr
+=
len
;
pstr
+=
len
;
remainderBufLen
-=
len
;
remainderBufLen
-=
len
;
uint64_t
recOfBatch
=
0
;
uint64_t
recOfBatch
=
0
;
for
(
uint64_t
i
=
0
;
i
<
batchPerTblTimes
;
i
++
)
{
for
(
uint64_t
i
=
0
;
i
<
batchPerTblTimes
;
i
++
)
{
char
tableName
[
TSDB_TABLE_NAME_LEN
];
getTableName
(
tableName
,
pThreadInfo
,
tableSeq
);
getTableName
(
tableName
,
pThreadInfo
,
tableSeq
);
if
(
0
==
strlen
(
tableName
))
{
if
(
0
==
strlen
(
tableName
))
{
errorPrint
(
"[%d] %s() LN%d, getTableName return null
\n
"
,
errorPrint
(
"[%d] %s() LN%d, getTableName return null
\n
"
,
...
@@ -5260,13 +5405,25 @@ static void* syncWriteInterlace(threadInfo *pThreadInfo) {
...
@@ -5260,13 +5405,25 @@ static void* syncWriteInterlace(threadInfo *pThreadInfo) {
}
}
uint64_t
oldRemainderLen
=
remainderBufLen
;
uint64_t
oldRemainderLen
=
remainderBufLen
;
int64_t
generated
=
generateInterlaceDataBuffer
(
tableName
,
batchPerTbl
,
i
,
batchPerTblTimes
,
int64_t
generated
;
tableSeq
,
if
(
superTblInfo
)
{
pThreadInfo
,
pstr
,
generated
=
generateStbInterlaceData
(
insertRows
,
superTblInfo
,
startTime
,
tableName
,
batchPerTbl
,
i
,
batchPerTblTimes
,
&
remainderBufLen
);
tableSeq
,
pThreadInfo
,
pstr
,
insertRows
,
startTime
,
&
remainderBufLen
);
}
else
{
generated
=
generateInterlaceDataWithoutStb
(
tableName
,
batchPerTbl
,
tableSeq
,
pThreadInfo
->
db_name
,
pstr
,
insertRows
,
&
remainderBufLen
);
}
debugPrint
(
"[%d] %s() LN%d, generated records is %"
PRId64
"
\n
"
,
debugPrint
(
"[%d] %s() LN%d, generated records is %"
PRId64
"
\n
"
,
pThreadInfo
->
threadID
,
__func__
,
__LINE__
,
generated
);
pThreadInfo
->
threadID
,
__func__
,
__LINE__
,
generated
);
...
@@ -5287,8 +5444,7 @@ static void* syncWriteInterlace(threadInfo *pThreadInfo) {
...
@@ -5287,8 +5444,7 @@ static void* syncWriteInterlace(threadInfo *pThreadInfo) {
pThreadInfo
->
threadID
,
__func__
,
__LINE__
,
pThreadInfo
->
threadID
,
__func__
,
__LINE__
,
batchPerTbl
,
recOfBatch
);
batchPerTbl
,
recOfBatch
);
if
(
progOrInterlace
==
INTERLACE_INSERT_MODE
)
{
if
(
tableSeq
==
pThreadInfo
->
start_table_from
+
pThreadInfo
->
ntables
)
{
if
(
tableSeq
==
pThreadInfo
->
start_table_from
+
pThreadInfo
->
ntables
)
{
// turn to first table
// turn to first table
tableSeq
=
pThreadInfo
->
start_table_from
;
tableSeq
=
pThreadInfo
->
start_table_from
;
generatedRecPerTbl
+=
batchPerTbl
;
generatedRecPerTbl
+=
batchPerTbl
;
...
@@ -5306,7 +5462,6 @@ static void* syncWriteInterlace(threadInfo *pThreadInfo) {
...
@@ -5306,7 +5462,6 @@ static void* syncWriteInterlace(threadInfo *pThreadInfo) {
if
(
pThreadInfo
->
ntables
*
batchPerTbl
<
g_args
.
num_of_RPR
)
if
(
pThreadInfo
->
ntables
*
batchPerTbl
<
g_args
.
num_of_RPR
)
break
;
break
;
}
}
}
verbosePrint
(
"[%d] %s() LN%d generatedRecPerTbl=%"
PRId64
" insertRows=%"
PRId64
"
\n
"
,
verbosePrint
(
"[%d] %s() LN%d generatedRecPerTbl=%"
PRId64
" insertRows=%"
PRId64
"
\n
"
,
...
@@ -5384,19 +5539,18 @@ free_of_interlace:
...
@@ -5384,19 +5539,18 @@ free_of_interlace:
return
NULL
;
return
NULL
;
}
}
// sync insertion
// sync insertion progressive data
/*
1 thread: 100 tables * 2000 rows/s
1 thread: 10 tables * 20000 rows/s
6 thread: 300 tables * 2000 rows/s
2 taosinsertdata , 1 thread: 10 tables * 20000 rows/s
*/
static
void
*
syncWriteProgressive
(
threadInfo
*
pThreadInfo
)
{
static
void
*
syncWriteProgressive
(
threadInfo
*
pThreadInfo
)
{
debugPrint
(
"%s() LN%d: ### progressive write
\n
"
,
__func__
,
__LINE__
);
debugPrint
(
"%s() LN%d: ### progressive write
\n
"
,
__func__
,
__LINE__
);
SSuperTable
*
superTblInfo
=
pThreadInfo
->
superTblInfo
;
SSuperTable
*
superTblInfo
=
pThreadInfo
->
superTblInfo
;
uint64_t
maxSqlLen
=
superTblInfo
?
superTblInfo
->
maxSqlLen
:
g_args
.
max_sql_len
;
uint64_t
maxSqlLen
=
superTblInfo
?
superTblInfo
->
maxSqlLen
:
g_args
.
max_sql_len
;
int64_t
timeStampStep
=
superTblInfo
?
superTblInfo
->
timeStampStep
:
DEFAULT_TIMESTAMP_STEP
;
int64_t
insertRows
=
(
superTblInfo
)
?
superTblInfo
->
insertRows
:
g_args
.
num_of_DPT
;
verbosePrint
(
"%s() LN%d insertRows=%"
PRId64
"
\n
"
,
__func__
,
__LINE__
,
insertRows
);
pThreadInfo
->
buffer
=
calloc
(
maxSqlLen
,
1
);
pThreadInfo
->
buffer
=
calloc
(
maxSqlLen
,
1
);
if
(
NULL
==
pThreadInfo
->
buffer
)
{
if
(
NULL
==
pThreadInfo
->
buffer
)
{
...
@@ -5410,8 +5564,6 @@ static void* syncWriteProgressive(threadInfo *pThreadInfo) {
...
@@ -5410,8 +5564,6 @@ static void* syncWriteProgressive(threadInfo *pThreadInfo) {
uint64_t
startTs
=
taosGetTimestampMs
();
uint64_t
startTs
=
taosGetTimestampMs
();
uint64_t
endTs
;
uint64_t
endTs
;
int64_t
timeStampStep
=
superTblInfo
?
superTblInfo
->
timeStampStep
:
DEFAULT_TIMESTAMP_STEP
;
/* int insert_interval =
/* int insert_interval =
superTblInfo?superTblInfo->insertInterval:g_args.insert_interval;
superTblInfo?superTblInfo->insertInterval:g_args.insert_interval;
uint64_t st = 0;
uint64_t st = 0;
...
@@ -5423,21 +5575,12 @@ static void* syncWriteProgressive(threadInfo *pThreadInfo) {
...
@@ -5423,21 +5575,12 @@ static void* syncWriteProgressive(threadInfo *pThreadInfo) {
pThreadInfo
->
samplePos
=
0
;
pThreadInfo
->
samplePos
=
0
;
for
(
uint64_t
tableSeq
=
for
(
uint64_t
tableSeq
=
pThreadInfo
->
start_table_from
;
pThreadInfo
->
start_table_from
;
tableSeq
<=
pThreadInfo
->
end_table_to
;
tableSeq
<=
pThreadInfo
->
end_table_to
;
tableSeq
++
)
{
tableSeq
++
)
{
int64_t
start_time
=
pThreadInfo
->
start_time
;
int64_t
start_time
=
pThreadInfo
->
start_time
;
int64_t
insertRows
=
(
superTblInfo
)
?
superTblInfo
->
insertRows
:
g_args
.
num_of_DPT
;
verbosePrint
(
"%s() LN%d insertRows=%"
PRId64
"
\n
"
,
__func__
,
__LINE__
,
insertRows
);
for
(
uint64_t
i
=
0
;
i
<
insertRows
;)
{
for
(
uint64_t
i
=
0
;
i
<
insertRows
;)
{
/*
if (insert_interval) {
st = taosGetTimestampMs();
}
*/
char
tableName
[
TSDB_TABLE_NAME_LEN
];
char
tableName
[
TSDB_TABLE_NAME_LEN
];
getTableName
(
tableName
,
pThreadInfo
,
tableSeq
);
getTableName
(
tableName
,
pThreadInfo
,
tableSeq
);
verbosePrint
(
"%s() LN%d: tid=%d seq=%"
PRId64
" tableName=%s
\n
"
,
verbosePrint
(
"%s() LN%d: tid=%d seq=%"
PRId64
" tableName=%s
\n
"
,
...
@@ -5446,18 +5589,28 @@ static void* syncWriteProgressive(threadInfo *pThreadInfo) {
...
@@ -5446,18 +5589,28 @@ static void* syncWriteProgressive(threadInfo *pThreadInfo) {
int64_t
remainderBufLen
=
maxSqlLen
;
int64_t
remainderBufLen
=
maxSqlLen
;
char
*
pstr
=
pThreadInfo
->
buffer
;
char
*
pstr
=
pThreadInfo
->
buffer
;
int
nInsertBufLen
=
strlen
(
"insert into "
);
int
len
=
snprintf
(
pstr
,
nInsertBufLen
+
1
,
"%s"
,
"insert into "
);
int
len
=
snprintf
(
pstr
,
strlen
(
STR_INSERT_INTO
)
+
1
,
"%s"
,
STR_INSERT_INTO
);
pstr
+=
len
;
pstr
+=
len
;
remainderBufLen
-=
len
;
remainderBufLen
-=
len
;
int64_t
generated
=
generateProgressiveDataBuffer
(
int64_t
generated
;
if
(
superTblInfo
)
{
generated
=
generateStbProgressiveData
(
superTblInfo
,
tableName
,
tableSeq
,
pThreadInfo
->
db_name
,
pstr
,
insertRows
,
i
,
start_time
,
&
(
pThreadInfo
->
samplePos
),
&
remainderBufLen
);
}
else
{
generated
=
generateProgressiveDataWithoutStb
(
tableName
,
tableSeq
,
pThreadInfo
,
pstr
,
insertRows
,
tableName
,
tableSeq
,
pThreadInfo
,
pstr
,
insertRows
,
i
,
start_time
,
i
,
start_time
,
&
(
pThreadInfo
->
samplePos
),
&
(
pThreadInfo
->
samplePos
),
&
remainderBufLen
);
&
remainderBufLen
);
}
if
(
generated
>
0
)
if
(
generated
>
0
)
i
+=
generated
;
i
+=
generated
;
else
else
...
@@ -5502,27 +5655,14 @@ static void* syncWriteProgressive(threadInfo *pThreadInfo) {
...
@@ -5502,27 +5655,14 @@ static void* syncWriteProgressive(threadInfo *pThreadInfo) {
if
(
i
>=
insertRows
)
if
(
i
>=
insertRows
)
break
;
break
;
/*
if (insert_interval) {
et = taosGetTimestampMs();
if (insert_interval > ((et - st)) ) {
int sleep_time = insert_interval - (et -st);
performancePrint("%s() LN%d sleep: %d ms for insert interval\n",
__func__, __LINE__, sleep_time);
taosMsleep(sleep_time); // ms
}
}
*/
}
// num_of_DPT
}
// num_of_DPT
if
(
g_args
.
verbose_print
)
{
if
(
(
g_args
.
verbose_print
)
&&
if
((
tableSeq
==
pThreadInfo
->
ntables
-
1
)
&&
superTblInfo
&&
(
tableSeq
==
pThreadInfo
->
ntables
-
1
)
&&
(
superTblInfo
)
&&
(
0
==
strncasecmp
(
(
0
==
strncasecmp
(
superTblInfo
->
dataSource
,
"sample"
,
strlen
(
"sample"
))))
{
superTblInfo
->
dataSource
,
"sample"
,
strlen
(
"sample"
))))
{
verbosePrint
(
"%s() LN%d samplePos=%"
PRId64
"
\n
"
,
verbosePrint
(
"%s() LN%d samplePos=%"
PRId64
"
\n
"
,
__func__
,
__LINE__
,
pThreadInfo
->
samplePos
);
__func__
,
__LINE__
,
pThreadInfo
->
samplePos
);
}
}
}
}
// tableSeq
}
// tableSeq
...
@@ -5557,7 +5697,6 @@ static void* syncWrite(void *sarg) {
...
@@ -5557,7 +5697,6 @@ static void* syncWrite(void *sarg) {
// progressive mode
// progressive mode
return
syncWriteProgressive
(
pThreadInfo
);
return
syncWriteProgressive
(
pThreadInfo
);
}
}
}
}
static
void
callBack
(
void
*
param
,
TAOS_RES
*
res
,
int
code
)
{
static
void
callBack
(
void
*
param
,
TAOS_RES
*
res
,
int
code
)
{
...
@@ -5595,10 +5734,12 @@ static void callBack(void *param, TAOS_RES *res, int code) {
...
@@ -5595,10 +5734,12 @@ static void callBack(void *param, TAOS_RES *res, int code) {
int
rand_num
=
taosRandom
()
%
100
;
int
rand_num
=
taosRandom
()
%
100
;
if
(
0
!=
pThreadInfo
->
superTblInfo
->
disorderRatio
if
(
0
!=
pThreadInfo
->
superTblInfo
->
disorderRatio
&&
rand_num
<
pThreadInfo
->
superTblInfo
->
disorderRatio
)
{
&&
rand_num
<
pThreadInfo
->
superTblInfo
->
disorderRatio
)
{
int64_t
d
=
pThreadInfo
->
lastTs
-
(
taosRandom
()
%
pThreadInfo
->
superTblInfo
->
disorderRange
+
1
);
int64_t
d
=
pThreadInfo
->
lastTs
generateRowData
(
data
,
d
,
pThreadInfo
->
superTblInfo
);
-
(
taosRandom
()
%
pThreadInfo
->
superTblInfo
->
disorderRange
+
1
);
generateStbRowData
(
pThreadInfo
->
superTblInfo
,
data
,
d
);
}
else
{
}
else
{
generateRowData
(
data
,
pThreadInfo
->
lastTs
+=
1000
,
pThreadInfo
->
superTblInfo
);
generateStbRowData
(
pThreadInfo
->
superTblInfo
,
data
,
pThreadInfo
->
lastTs
+=
1000
);
}
}
pstr
+=
sprintf
(
pstr
,
"%s"
,
data
);
pstr
+=
sprintf
(
pstr
,
"%s"
,
data
);
pThreadInfo
->
counter
++
;
pThreadInfo
->
counter
++
;
...
@@ -5769,19 +5910,13 @@ static void startMultiThreadInsertData(int threads, char* db_name,
...
@@ -5769,19 +5910,13 @@ static void startMultiThreadInsertData(int threads, char* db_name,
&&
((
superTblInfo
->
childTblOffset
+
superTblInfo
->
childTblLimit
)
&&
((
superTblInfo
->
childTblOffset
+
superTblInfo
->
childTblLimit
)
>
superTblInfo
->
childTblCount
))
{
>
superTblInfo
->
childTblCount
))
{
printf
(
"WARNING: specified offset + limit > child table count!
\n
"
);
printf
(
"WARNING: specified offset + limit > child table count!
\n
"
);
if
(
!
g_args
.
answer_yes
)
{
prompt
();
printf
(
" Press enter key to continue or Ctrl-C to stop
\n\n
"
);
(
void
)
getchar
();
}
}
}
if
((
superTblInfo
->
childTblExists
!=
TBL_NO_EXISTS
)
if
((
superTblInfo
->
childTblExists
!=
TBL_NO_EXISTS
)
&&
(
0
==
superTblInfo
->
childTblLimit
))
{
&&
(
0
==
superTblInfo
->
childTblLimit
))
{
printf
(
"WARNING: specified limit = 0, which cannot find table name to insert or query!
\n
"
);
printf
(
"WARNING: specified limit = 0, which cannot find table name to insert or query!
\n
"
);
if
(
!
g_args
.
answer_yes
)
{
prompt
();
printf
(
" Press enter key to continue or Ctrl-C to stop
\n\n
"
);
(
void
)
getchar
();
}
}
}
superTblInfo
->
childTblName
=
(
char
*
)
calloc
(
1
,
superTblInfo
->
childTblName
=
(
char
*
)
calloc
(
1
,
...
@@ -6129,6 +6264,13 @@ static void *readMetric(void *sarg) {
...
@@ -6129,6 +6264,13 @@ static void *readMetric(void *sarg) {
return
NULL
;
return
NULL
;
}
}
static
void
prompt
()
{
if
(
!
g_args
.
answer_yes
)
{
printf
(
" Press enter key to continue or Ctrl-C to stop
\n\n
"
);
(
void
)
getchar
();
}
}
static
int
insertTestProcess
()
{
static
int
insertTestProcess
()
{
...
@@ -6149,10 +6291,7 @@ static int insertTestProcess() {
...
@@ -6149,10 +6291,7 @@ static int insertTestProcess() {
if
(
g_fpOfInsertResult
)
if
(
g_fpOfInsertResult
)
printfInsertMetaToFile
(
g_fpOfInsertResult
);
printfInsertMetaToFile
(
g_fpOfInsertResult
);
if
(
!
g_args
.
answer_yes
)
{
prompt
();
printf
(
"Press enter key to continue
\n\n
"
);
(
void
)
getchar
();
}
init_rand_data
();
init_rand_data
();
...
@@ -6422,10 +6561,7 @@ static int queryTestProcess() {
...
@@ -6422,10 +6561,7 @@ static int queryTestProcess() {
&
g_queryInfo
.
superQueryInfo
.
childTblCount
);
&
g_queryInfo
.
superQueryInfo
.
childTblCount
);
}
}
if
(
!
g_args
.
answer_yes
)
{
prompt
();
printf
(
"Press enter key to continue
\n\n
"
);
(
void
)
getchar
();
}
if
(
g_args
.
debug_print
||
g_args
.
verbose_print
)
{
if
(
g_args
.
debug_print
||
g_args
.
verbose_print
)
{
printfQuerySystemInfo
(
taos
);
printfQuerySystemInfo
(
taos
);
...
@@ -6712,13 +6848,18 @@ static void *superSubscribe(void *sarg) {
...
@@ -6712,13 +6848,18 @@ static void *superSubscribe(void *sarg) {
continue
;
continue
;
}
}
taosMsleep
(
g_queryInfo
.
superQueryInfo
.
subscribeInterval
);
// ms
res
=
taos_consume
(
tsub
[
i
]);
res
=
taos_consume
(
tsub
[
i
]);
if
(
res
)
{
if
(
res
)
{
if
(
g_queryInfo
.
superQueryInfo
.
result
[
pThreadInfo
->
querySeq
][
0
]
!=
0
)
{
if
(
g_queryInfo
.
superQueryInfo
.
result
[
pThreadInfo
->
querySeq
][
0
]
!=
0
)
{
sprintf
(
pThreadInfo
->
fp
,
"%s-%d"
,
sprintf
(
pThreadInfo
->
fp
,
"%s-%d"
,
g_queryInfo
.
superQueryInfo
.
result
[
pThreadInfo
->
querySeq
],
g_queryInfo
.
superQueryInfo
.
result
[
pThreadInfo
->
querySeq
],
pThreadInfo
->
threadID
);
pThreadInfo
->
threadID
);
appendResultToFile
(
res
,
pThreadInfo
->
fp
);
}
if
(
g_queryInfo
.
superQueryInfo
.
result
[
pThreadInfo
->
querySeq
][
0
]
!=
0
)
{
sprintf
(
pThreadInfo
->
fp
,
"%s-%d"
,
g_queryInfo
.
superQueryInfo
.
result
[
pThreadInfo
->
querySeq
],
pThreadInfo
->
threadID
);
appendResultToFile
(
res
,
pThreadInfo
->
fp
);
appendResultToFile
(
res
,
pThreadInfo
->
fp
);
}
}
consumed
[
i
]
++
;
consumed
[
i
]
++
;
...
@@ -6813,9 +6954,15 @@ static void *specifiedSubscribe(void *sarg) {
...
@@ -6813,9 +6954,15 @@ static void *specifiedSubscribe(void *sarg) {
continue
;
continue
;
}
}
taosMsleep
(
g_queryInfo
.
specifiedQueryInfo
.
subscribeInterval
);
// ms
res
=
taos_consume
(
tsub
);
res
=
taos_consume
(
tsub
);
if
(
res
)
{
if
(
res
)
{
if
(
g_queryInfo
.
specifiedQueryInfo
.
result
[
pThreadInfo
->
querySeq
][
0
]
!=
0
)
{
sprintf
(
pThreadInfo
->
fp
,
"%s-%d"
,
g_queryInfo
.
specifiedQueryInfo
.
result
[
pThreadInfo
->
querySeq
],
pThreadInfo
->
threadID
);
appendResultToFile
(
res
,
pThreadInfo
->
fp
);
}
consumed
++
;
consumed
++
;
if
((
g_queryInfo
.
specifiedQueryInfo
.
subscribeKeepProgress
)
if
((
g_queryInfo
.
specifiedQueryInfo
.
subscribeKeepProgress
)
&&
(
consumed
>=
&&
(
consumed
>=
...
@@ -6852,10 +6999,7 @@ static int subscribeTestProcess() {
...
@@ -6852,10 +6999,7 @@ static int subscribeTestProcess() {
printfQueryMeta
();
printfQueryMeta
();
resetAfterAnsiEscape
();
resetAfterAnsiEscape
();
if
(
!
g_args
.
answer_yes
)
{
prompt
();
printf
(
"Press enter key to continue
\n\n
"
);
(
void
)
getchar
();
}
TAOS
*
taos
=
NULL
;
TAOS
*
taos
=
NULL
;
taos
=
taos_connect
(
g_queryInfo
.
host
,
taos
=
taos_connect
(
g_queryInfo
.
host
,
...
@@ -7113,17 +7257,21 @@ static void setParaFromArg(){
...
@@ -7113,17 +7257,21 @@ static void setParaFromArg(){
if
(
g_Dbs
.
db
[
0
].
superTbls
[
0
].
columnCount
>
g_args
.
num_of_CPR
)
{
if
(
g_Dbs
.
db
[
0
].
superTbls
[
0
].
columnCount
>
g_args
.
num_of_CPR
)
{
g_Dbs
.
db
[
0
].
superTbls
[
0
].
columnCount
=
g_args
.
num_of_CPR
;
g_Dbs
.
db
[
0
].
superTbls
[
0
].
columnCount
=
g_args
.
num_of_CPR
;
}
else
{
}
else
{
for
(
int
i
=
g_Dbs
.
db
[
0
].
superTbls
[
0
].
columnCount
;
i
<
g_args
.
num_of_CPR
;
i
++
)
{
for
(
int
i
=
g_Dbs
.
db
[
0
].
superTbls
[
0
].
columnCount
;
tstrncpy
(
g_Dbs
.
db
[
0
].
superTbls
[
0
].
columns
[
i
].
dataType
,
"INT"
,
MAX_TB_NAME_SIZE
);
i
<
g_args
.
num_of_CPR
;
i
++
)
{
tstrncpy
(
g_Dbs
.
db
[
0
].
superTbls
[
0
].
columns
[
i
].
dataType
,
"INT"
,
MAX_TB_NAME_SIZE
);
g_Dbs
.
db
[
0
].
superTbls
[
0
].
columns
[
i
].
dataLen
=
0
;
g_Dbs
.
db
[
0
].
superTbls
[
0
].
columns
[
i
].
dataLen
=
0
;
g_Dbs
.
db
[
0
].
superTbls
[
0
].
columnCount
++
;
g_Dbs
.
db
[
0
].
superTbls
[
0
].
columnCount
++
;
}
}
}
}
tstrncpy
(
g_Dbs
.
db
[
0
].
superTbls
[
0
].
tags
[
0
].
dataType
,
"INT"
,
MAX_TB_NAME_SIZE
);
tstrncpy
(
g_Dbs
.
db
[
0
].
superTbls
[
0
].
tags
[
0
].
dataType
,
"INT"
,
MAX_TB_NAME_SIZE
);
g_Dbs
.
db
[
0
].
superTbls
[
0
].
tags
[
0
].
dataLen
=
0
;
g_Dbs
.
db
[
0
].
superTbls
[
0
].
tags
[
0
].
dataLen
=
0
;
tstrncpy
(
g_Dbs
.
db
[
0
].
superTbls
[
0
].
tags
[
1
].
dataType
,
"BINARY"
,
MAX_TB_NAME_SIZE
);
tstrncpy
(
g_Dbs
.
db
[
0
].
superTbls
[
0
].
tags
[
1
].
dataType
,
"BINARY"
,
MAX_TB_NAME_SIZE
);
g_Dbs
.
db
[
0
].
superTbls
[
0
].
tags
[
1
].
dataLen
=
g_args
.
len_of_binary
;
g_Dbs
.
db
[
0
].
superTbls
[
0
].
tags
[
1
].
dataLen
=
g_args
.
len_of_binary
;
g_Dbs
.
db
[
0
].
superTbls
[
0
].
tagCount
=
2
;
g_Dbs
.
db
[
0
].
superTbls
[
0
].
tagCount
=
2
;
}
else
{
}
else
{
...
@@ -7343,6 +7491,9 @@ int main(int argc, char *argv[]) {
...
@@ -7343,6 +7491,9 @@ int main(int argc, char *argv[]) {
}
else
{
}
else
{
testCmdLine
();
testCmdLine
();
}
}
if
(
g_dupstr
)
free
(
g_dupstr
);
}
}
return
0
;
return
0
;
...
...
src/mnode/inc/mnodeAcct.h
浏览文件 @
663439bf
...
@@ -35,6 +35,8 @@ void mnodeDropDbFromAcct(SAcctObj *pAcct, SDbObj *pDb);
...
@@ -35,6 +35,8 @@ void mnodeDropDbFromAcct(SAcctObj *pAcct, SDbObj *pDb);
void
mnodeAddUserToAcct
(
SAcctObj
*
pAcct
,
SUserObj
*
pUser
);
void
mnodeAddUserToAcct
(
SAcctObj
*
pAcct
,
SUserObj
*
pUser
);
void
mnodeDropUserFromAcct
(
SAcctObj
*
pAcct
,
SUserObj
*
pUser
);
void
mnodeDropUserFromAcct
(
SAcctObj
*
pAcct
,
SUserObj
*
pUser
);
int32_t
mnodeCompactAccts
();
#ifdef __cplusplus
#ifdef __cplusplus
}
}
#endif
#endif
...
...
src/mnode/inc/mnodeCluster.h
浏览文件 @
663439bf
...
@@ -25,6 +25,8 @@ void mnodeCleanupCluster();
...
@@ -25,6 +25,8 @@ void mnodeCleanupCluster();
void
mnodeUpdateClusterId
();
void
mnodeUpdateClusterId
();
const
char
*
mnodeGetClusterId
();
const
char
*
mnodeGetClusterId
();
int32_t
mnodeCompactCluster
();
#ifdef __cplusplus
#ifdef __cplusplus
}
}
#endif
#endif
...
...
src/mnode/inc/mnodeDb.h
浏览文件 @
663439bf
...
@@ -41,6 +41,8 @@ void mnodeDecDbRef(SDbObj *pDb);
...
@@ -41,6 +41,8 @@ void mnodeDecDbRef(SDbObj *pDb);
bool
mnodeCheckIsMonitorDB
(
char
*
db
,
char
*
monitordb
);
bool
mnodeCheckIsMonitorDB
(
char
*
db
,
char
*
monitordb
);
void
mnodeDropAllDbs
(
SAcctObj
*
pAcct
);
void
mnodeDropAllDbs
(
SAcctObj
*
pAcct
);
int32_t
mnodeCompactDbs
();
// util func
// util func
void
mnodeAddSuperTableIntoDb
(
SDbObj
*
pDb
);
void
mnodeAddSuperTableIntoDb
(
SDbObj
*
pDb
);
void
mnodeRemoveSuperTableFromDb
(
SDbObj
*
pDb
);
void
mnodeRemoveSuperTableFromDb
(
SDbObj
*
pDb
);
...
...
src/mnode/inc/mnodeDnode.h
浏览文件 @
663439bf
...
@@ -77,6 +77,7 @@ void * mnodeGetDnodeByEp(char *ep);
...
@@ -77,6 +77,7 @@ void * mnodeGetDnodeByEp(char *ep);
void
mnodeUpdateDnode
(
SDnodeObj
*
pDnode
);
void
mnodeUpdateDnode
(
SDnodeObj
*
pDnode
);
int32_t
mnodeDropDnode
(
SDnodeObj
*
pDnode
,
void
*
pMsg
);
int32_t
mnodeDropDnode
(
SDnodeObj
*
pDnode
,
void
*
pMsg
);
int32_t
mnodeCompactDnodes
();
extern
int32_t
tsAccessSquence
;
extern
int32_t
tsAccessSquence
;
#ifdef __cplusplus
#ifdef __cplusplus
...
...
src/mnode/inc/mnodeMnode.h
浏览文件 @
663439bf
...
@@ -50,6 +50,7 @@ char* mnodeGetMnodeMasterEp();
...
@@ -50,6 +50,7 @@ char* mnodeGetMnodeMasterEp();
void
mnodeGetMnodeInfos
(
void
*
mnodes
);
void
mnodeGetMnodeInfos
(
void
*
mnodes
);
void
mnodeUpdateMnodeEpSet
(
SMInfos
*
pMnodes
);
void
mnodeUpdateMnodeEpSet
(
SMInfos
*
pMnodes
);
int32_t
mnodeCompactMnodes
();
#ifdef __cplusplus
#ifdef __cplusplus
}
}
#endif
#endif
...
...
src/mnode/inc/mnodeSdb.h
浏览文件 @
663439bf
...
@@ -92,6 +92,7 @@ void sdbUpdateMnodeRoles();
...
@@ -92,6 +92,7 @@ void sdbUpdateMnodeRoles();
int32_t
sdbGetReplicaNum
();
int32_t
sdbGetReplicaNum
();
int32_t
sdbInsertRow
(
SSdbRow
*
pRow
);
int32_t
sdbInsertRow
(
SSdbRow
*
pRow
);
int32_t
sdbInsertCompactRow
(
SSdbRow
*
pRow
);
int32_t
sdbDeleteRow
(
SSdbRow
*
pRow
);
int32_t
sdbDeleteRow
(
SSdbRow
*
pRow
);
int32_t
sdbUpdateRow
(
SSdbRow
*
pRow
);
int32_t
sdbUpdateRow
(
SSdbRow
*
pRow
);
int32_t
sdbInsertRowToQueue
(
SSdbRow
*
pRow
);
int32_t
sdbInsertRowToQueue
(
SSdbRow
*
pRow
);
...
@@ -106,6 +107,7 @@ int32_t sdbGetId(void *pTable);
...
@@ -106,6 +107,7 @@ int32_t sdbGetId(void *pTable);
uint64_t
sdbGetVersion
();
uint64_t
sdbGetVersion
();
bool
sdbCheckRowDeleted
(
void
*
pTable
,
void
*
pRow
);
bool
sdbCheckRowDeleted
(
void
*
pTable
,
void
*
pRow
);
int32_t
mnodeCompactWal
();
#ifdef __cplusplus
#ifdef __cplusplus
}
}
#endif
#endif
...
...
src/mnode/inc/mnodeTable.h
浏览文件 @
663439bf
...
@@ -36,6 +36,7 @@ void mnodeCancelGetNextSuperTable(void *pIter);
...
@@ -36,6 +36,7 @@ void mnodeCancelGetNextSuperTable(void *pIter);
void
mnodeDropAllChildTables
(
SDbObj
*
pDropDb
);
void
mnodeDropAllChildTables
(
SDbObj
*
pDropDb
);
void
mnodeDropAllSuperTables
(
SDbObj
*
pDropDb
);
void
mnodeDropAllSuperTables
(
SDbObj
*
pDropDb
);
void
mnodeDropAllChildTablesInVgroups
(
SVgObj
*
pVgroup
);
void
mnodeDropAllChildTablesInVgroups
(
SVgObj
*
pVgroup
);
int32_t
mnodeCompactTables
();
#ifdef __cplusplus
#ifdef __cplusplus
}
}
...
...
src/mnode/inc/mnodeUser.h
浏览文件 @
663439bf
...
@@ -33,6 +33,8 @@ char * mnodeGetUserFromMsg(void *pMnodeMsg);
...
@@ -33,6 +33,8 @@ char * mnodeGetUserFromMsg(void *pMnodeMsg);
int32_t
mnodeCreateUser
(
SAcctObj
*
pAcct
,
char
*
name
,
char
*
pass
,
void
*
pMsg
);
int32_t
mnodeCreateUser
(
SAcctObj
*
pAcct
,
char
*
name
,
char
*
pass
,
void
*
pMsg
);
void
mnodeDropAllUsers
(
SAcctObj
*
pAcct
);
void
mnodeDropAllUsers
(
SAcctObj
*
pAcct
);
int32_t
mnodeCompactUsers
();
#ifdef __cplusplus
#ifdef __cplusplus
}
}
#endif
#endif
...
...
src/mnode/inc/mnodeVgroup.h
浏览文件 @
663439bf
...
@@ -32,6 +32,7 @@ void mnodeDropAllDbVgroups(SDbObj *pDropDb);
...
@@ -32,6 +32,7 @@ void mnodeDropAllDbVgroups(SDbObj *pDropDb);
void
mnodeSendDropAllDbVgroupsMsg
(
SDbObj
*
pDropDb
);
void
mnodeSendDropAllDbVgroupsMsg
(
SDbObj
*
pDropDb
);
void
mnodeDropAllDnodeVgroups
(
SDnodeObj
*
pDropDnode
);
void
mnodeDropAllDnodeVgroups
(
SDnodeObj
*
pDropDnode
);
//void mnodeUpdateAllDbVgroups(SDbObj *pAlterDb);
//void mnodeUpdateAllDbVgroups(SDbObj *pAlterDb);
int32_t
mnodeCompactVgroups
();
void
*
mnodeGetNextVgroup
(
void
*
pIter
,
SVgObj
**
pVgroup
);
void
*
mnodeGetNextVgroup
(
void
*
pIter
,
SVgObj
**
pVgroup
);
void
mnodeCancelGetNextVgroup
(
void
*
pIter
);
void
mnodeCancelGetNextVgroup
(
void
*
pIter
);
...
...
src/mnode/src/mnodeAcct.c
浏览文件 @
663439bf
...
@@ -238,6 +238,32 @@ static int32_t mnodeCreateRootAcct() {
...
@@ -238,6 +238,32 @@ static int32_t mnodeCreateRootAcct() {
return
sdbInsertRow
(
&
row
);
return
sdbInsertRow
(
&
row
);
}
}
int32_t
mnodeCompactAccts
()
{
void
*
pIter
=
NULL
;
SAcctObj
*
pAcct
=
NULL
;
mInfo
(
"start to compact accts table..."
);
while
(
1
)
{
pIter
=
mnodeGetNextAcct
(
pIter
,
&
pAcct
);
if
(
pAcct
==
NULL
)
break
;
SSdbRow
row
=
{
.
type
=
SDB_OPER_GLOBAL
,
.
pTable
=
tsAcctSdb
,
.
pObj
=
pAcct
,
};
mInfo
(
"compact accts %s"
,
pAcct
->
user
);
sdbInsertCompactRow
(
&
row
);
}
mInfo
(
"end to compact accts table..."
);
return
0
;
}
#ifndef _ACCT
#ifndef _ACCT
int32_t
acctInit
()
{
return
TSDB_CODE_SUCCESS
;
}
int32_t
acctInit
()
{
return
TSDB_CODE_SUCCESS
;
}
...
...
src/mnode/src/mnodeCluster.c
浏览文件 @
663439bf
...
@@ -237,3 +237,27 @@ static int32_t mnodeRetrieveClusters(SShowObj *pShow, char *data, int32_t rows,
...
@@ -237,3 +237,27 @@ static int32_t mnodeRetrieveClusters(SShowObj *pShow, char *data, int32_t rows,
pShow
->
numOfReads
+=
numOfRows
;
pShow
->
numOfReads
+=
numOfRows
;
return
numOfRows
;
return
numOfRows
;
}
}
int32_t
mnodeCompactCluster
()
{
SClusterObj
*
pCluster
=
NULL
;
void
*
pIter
;
mInfo
(
"start to compact cluster table..."
);
pIter
=
mnodeGetNextCluster
(
NULL
,
&
pCluster
);
while
(
pCluster
)
{
SSdbRow
row
=
{
.
type
=
SDB_OPER_GLOBAL
,
.
pTable
=
tsClusterSdb
,
.
pObj
=
pCluster
,
};
sdbInsertCompactRow
(
&
row
);
pIter
=
mnodeGetNextCluster
(
pIter
,
&
pCluster
);
}
mInfo
(
"end to compact cluster table..."
);
return
0
;
}
\ No newline at end of file
src/mnode/src/mnodeDb.c
浏览文件 @
663439bf
...
@@ -389,7 +389,7 @@ static void mnodeSetDefaultDbCfg(SDbCfg *pCfg) {
...
@@ -389,7 +389,7 @@ static void mnodeSetDefaultDbCfg(SDbCfg *pCfg) {
if
(
pCfg
->
compression
<
0
)
pCfg
->
compression
=
tsCompression
;
if
(
pCfg
->
compression
<
0
)
pCfg
->
compression
=
tsCompression
;
if
(
pCfg
->
walLevel
<
0
)
pCfg
->
walLevel
=
tsWAL
;
if
(
pCfg
->
walLevel
<
0
)
pCfg
->
walLevel
=
tsWAL
;
if
(
pCfg
->
replications
<
0
)
pCfg
->
replications
=
tsReplications
;
if
(
pCfg
->
replications
<
0
)
pCfg
->
replications
=
tsReplications
;
if
(
pCfg
->
quorum
<
0
)
pCfg
->
quorum
=
tsQuorum
;
if
(
pCfg
->
quorum
<
0
)
pCfg
->
quorum
=
MIN
(
tsQuorum
,
pCfg
->
replications
)
;
if
(
pCfg
->
update
<
0
)
pCfg
->
update
=
tsUpdate
;
if
(
pCfg
->
update
<
0
)
pCfg
->
update
=
tsUpdate
;
if
(
pCfg
->
cacheLastRow
<
0
)
pCfg
->
cacheLastRow
=
tsCacheLastRow
;
if
(
pCfg
->
cacheLastRow
<
0
)
pCfg
->
cacheLastRow
=
tsCacheLastRow
;
if
(
pCfg
->
dbType
<
0
)
pCfg
->
dbType
=
0
;
if
(
pCfg
->
dbType
<
0
)
pCfg
->
dbType
=
0
;
...
@@ -1271,3 +1271,30 @@ void mnodeDropAllDbs(SAcctObj *pAcct) {
...
@@ -1271,3 +1271,30 @@ void mnodeDropAllDbs(SAcctObj *pAcct) {
mInfo
(
"acct:%s, all dbs:%d is dropped from sdb"
,
pAcct
->
user
,
numOfDbs
);
mInfo
(
"acct:%s, all dbs:%d is dropped from sdb"
,
pAcct
->
user
,
numOfDbs
);
}
}
int32_t
mnodeCompactDbs
()
{
void
*
pIter
=
NULL
;
SDbObj
*
pDb
=
NULL
;
mInfo
(
"start to compact dbs table..."
);
while
(
1
)
{
pIter
=
mnodeGetNextDb
(
pIter
,
&
pDb
);
if
(
pDb
==
NULL
)
break
;
SSdbRow
row
=
{
.
type
=
SDB_OPER_GLOBAL
,
.
pTable
=
tsDbSdb
,
.
pObj
=
pDb
,
.
rowSize
=
sizeof
(
SDbObj
),
};
mInfo
(
"compact dbs %s"
,
pDb
->
name
);
sdbInsertCompactRow
(
&
row
);
}
mInfo
(
"end to compact dbs table..."
);
return
0
;
}
\ No newline at end of file
src/mnode/src/mnodeDnode.c
浏览文件 @
663439bf
...
@@ -1270,3 +1270,30 @@ char* dnodeRoles[] = {
...
@@ -1270,3 +1270,30 @@ char* dnodeRoles[] = {
"vnode"
,
"vnode"
,
"any"
"any"
};
};
int32_t
mnodeCompactDnodes
()
{
SDnodeObj
*
pDnode
=
NULL
;
void
*
pIter
=
NULL
;
mInfo
(
"start to compact dnodes table..."
);
while
(
1
)
{
pIter
=
mnodeGetNextDnode
(
pIter
,
&
pDnode
);
if
(
pDnode
==
NULL
)
break
;
SSdbRow
row
=
{
.
type
=
SDB_OPER_GLOBAL
,
.
pTable
=
tsDnodeSdb
,
.
pObj
=
pDnode
,
.
rowSize
=
sizeof
(
SDnodeObj
),
};
mInfo
(
"compact dnode %d"
,
pDnode
->
dnodeId
);
sdbInsertCompactRow
(
&
row
);
}
mInfo
(
"end to compact dnodes table..."
);
return
0
;
}
\ No newline at end of file
src/mnode/src/mnodeMain.c
浏览文件 @
663439bf
...
@@ -57,6 +57,18 @@ static SStep tsMnodeSteps[] = {
...
@@ -57,6 +57,18 @@ static SStep tsMnodeSteps[] = {
{
"show"
,
mnodeInitShow
,
mnodeCleanUpShow
}
{
"show"
,
mnodeInitShow
,
mnodeCleanUpShow
}
};
};
static
SStep
tsMnodeCompactSteps
[]
=
{
{
"cluster"
,
mnodeCompactCluster
,
NULL
},
{
"dnodes"
,
mnodeCompactDnodes
,
NULL
},
{
"mnodes"
,
mnodeCompactMnodes
,
NULL
},
{
"accts"
,
mnodeCompactAccts
,
NULL
},
{
"users"
,
mnodeCompactUsers
,
NULL
},
{
"dbs"
,
mnodeCompactDbs
,
NULL
},
{
"vgroups"
,
mnodeCompactVgroups
,
NULL
},
{
"tables"
,
mnodeCompactTables
,
NULL
},
};
static
void
mnodeInitTimer
();
static
void
mnodeInitTimer
();
static
void
mnodeCleanupTimer
();
static
void
mnodeCleanupTimer
();
static
bool
mnodeNeedStart
()
;
static
bool
mnodeNeedStart
()
;
...
@@ -71,6 +83,11 @@ static int32_t mnodeInitComponents() {
...
@@ -71,6 +83,11 @@ static int32_t mnodeInitComponents() {
return
dnodeStepInit
(
tsMnodeSteps
,
stepSize
);
return
dnodeStepInit
(
tsMnodeSteps
,
stepSize
);
}
}
int32_t
mnodeCompactComponents
()
{
int32_t
stepSize
=
sizeof
(
tsMnodeCompactSteps
)
/
sizeof
(
SStep
);
return
dnodeStepInit
(
tsMnodeCompactSteps
,
stepSize
);
}
int32_t
mnodeStartSystem
()
{
int32_t
mnodeStartSystem
()
{
if
(
tsMgmtIsRunning
)
{
if
(
tsMgmtIsRunning
)
{
mInfo
(
"mnode module already started..."
);
mInfo
(
"mnode module already started..."
);
...
...
src/mnode/src/mnodeMnode.c
浏览文件 @
663439bf
...
@@ -566,3 +566,30 @@ static int32_t mnodeRetrieveMnodes(SShowObj *pShow, char *data, int32_t rows, vo
...
@@ -566,3 +566,30 @@ static int32_t mnodeRetrieveMnodes(SShowObj *pShow, char *data, int32_t rows, vo
return
numOfRows
;
return
numOfRows
;
}
}
int32_t
mnodeCompactMnodes
()
{
void
*
pIter
=
NULL
;
SMnodeObj
*
pMnode
=
NULL
;
mInfo
(
"start to compact mnodes table..."
);
while
(
1
)
{
pIter
=
mnodeGetNextMnode
(
pIter
,
&
pMnode
);
if
(
pMnode
==
NULL
)
break
;
SSdbRow
row
=
{
.
type
=
SDB_OPER_GLOBAL
,
.
pTable
=
tsMnodeSdb
,
.
pObj
=
pMnode
,
.
rowSize
=
sizeof
(
SMnodeObj
),
};
mInfo
(
"compact mnode %d"
,
pMnode
->
mnodeId
);
sdbInsertCompactRow
(
&
row
);
}
mInfo
(
"end to compact mnodes table..."
);
return
0
;
}
\ No newline at end of file
src/mnode/src/mnodeSdb.c
浏览文件 @
663439bf
...
@@ -20,6 +20,7 @@
...
@@ -20,6 +20,7 @@
#include "tutil.h"
#include "tutil.h"
#include "tref.h"
#include "tref.h"
#include "tbn.h"
#include "tbn.h"
#include "tfs.h"
#include "tqueue.h"
#include "tqueue.h"
#include "twal.h"
#include "twal.h"
#include "tsync.h"
#include "tsync.h"
...
@@ -450,6 +451,12 @@ int32_t sdbInit() {
...
@@ -450,6 +451,12 @@ int32_t sdbInit() {
}
}
tsSdbMgmt
.
status
=
SDB_STATUS_SERVING
;
tsSdbMgmt
.
status
=
SDB_STATUS_SERVING
;
if
(
tsCompactMnodeWal
)
{
mnodeCompactWal
();
exit
(
EXIT_SUCCESS
);
}
return
TSDB_CODE_SUCCESS
;
return
TSDB_CODE_SUCCESS
;
}
}
...
@@ -726,6 +733,12 @@ static int32_t sdbProcessWrite(void *wparam, void *hparam, int32_t qtype, void *
...
@@ -726,6 +733,12 @@ static int32_t sdbProcessWrite(void *wparam, void *hparam, int32_t qtype, void *
}
}
}
}
int32_t
sdbInsertCompactRow
(
SSdbRow
*
pRow
)
{
SSdbTable
*
pTable
=
pRow
->
pTable
;
if
(
pTable
==
NULL
)
return
TSDB_CODE_MND_SDB_INVALID_TABLE_TYPE
;
return
sdbWriteRowToQueue
(
pRow
,
SDB_ACTION_INSERT
);
}
int32_t
sdbInsertRow
(
SSdbRow
*
pRow
)
{
int32_t
sdbInsertRow
(
SSdbRow
*
pRow
)
{
SSdbTable
*
pTable
=
pRow
->
pTable
;
SSdbTable
*
pTable
=
pRow
->
pTable
;
if
(
pTable
==
NULL
)
return
TSDB_CODE_MND_SDB_INVALID_TABLE_TYPE
;
if
(
pTable
==
NULL
)
return
TSDB_CODE_MND_SDB_INVALID_TABLE_TYPE
;
...
@@ -1138,3 +1151,46 @@ static void *sdbWorkerFp(void *pWorker) {
...
@@ -1138,3 +1151,46 @@ static void *sdbWorkerFp(void *pWorker) {
int32_t
sdbGetReplicaNum
()
{
int32_t
sdbGetReplicaNum
()
{
return
tsSdbMgmt
.
cfg
.
replica
;
return
tsSdbMgmt
.
cfg
.
replica
;
}
}
int32_t
mnodeCompactWal
()
{
sdbInfo
(
"vgId:1, start compact mnode wal..."
);
// close old wal
walFsync
(
tsSdbMgmt
.
wal
,
true
);
walClose
(
tsSdbMgmt
.
wal
);
// reset version,then compacted wal log can start from version 1
tsSdbMgmt
.
version
=
0
;
// change wal to wal_tmp dir
SWalCfg
walCfg
=
{.
vgId
=
1
,
.
walLevel
=
TAOS_WAL_FSYNC
,
.
keep
=
TAOS_WAL_KEEP
,
.
fsyncPeriod
=
0
};
char
temp
[
TSDB_FILENAME_LEN
]
=
{
0
};
sprintf
(
temp
,
"%s/wal"
,
tsMnodeTmpDir
);
tsSdbMgmt
.
wal
=
walOpen
(
temp
,
&
walCfg
);
walRenew
(
tsSdbMgmt
.
wal
);
// compact memory tables info to wal tmp dir
if
(
mnodeCompactComponents
()
!=
0
)
{
tfsRmdir
(
tsMnodeTmpDir
);
return
-
1
;
}
// close wal
walFsync
(
tsSdbMgmt
.
wal
,
true
);
walClose
(
tsSdbMgmt
.
wal
);
// rename old wal to wal_bak
if
(
taosRename
(
tsMnodeDir
,
tsMnodeBakDir
)
!=
0
)
{
return
-
1
;
}
// rename wal_tmp to wal
if
(
taosRename
(
tsMnodeTmpDir
,
tsMnodeDir
)
!=
0
)
{
return
-
1
;
}
// del wal_tmp dir
sdbInfo
(
"vgId:1, compact mnode wal success"
);
return
0
;
}
\ No newline at end of file
src/mnode/src/mnodeTable.c
浏览文件 @
663439bf
...
@@ -3242,3 +3242,65 @@ static int32_t mnodeRetrieveStreamTables(SShowObj *pShow, char *data, int32_t ro
...
@@ -3242,3 +3242,65 @@ static int32_t mnodeRetrieveStreamTables(SShowObj *pShow, char *data, int32_t ro
return
numOfRows
;
return
numOfRows
;
}
}
static
int32_t
mnodeCompactSuperTables
()
{
void
*
pIter
=
NULL
;
SSTableObj
*
pTable
=
NULL
;
mInfo
(
"start to compact super table..."
);
while
(
1
)
{
pIter
=
mnodeGetNextSuperTable
(
pIter
,
&
pTable
);
if
(
pTable
==
NULL
)
break
;
int32_t
schemaSize
=
(
pTable
->
numOfColumns
+
pTable
->
numOfTags
)
*
sizeof
(
SSchema
);
SSdbRow
row
=
{
.
type
=
SDB_OPER_GLOBAL
,
.
pTable
=
tsSuperTableSdb
,
.
pObj
=
pTable
,
.
rowSize
=
sizeof
(
SSTableObj
)
+
schemaSize
,
};
mInfo
(
"compact super %"
PRIu64
,
pTable
->
uid
);
sdbInsertCompactRow
(
&
row
);
}
mInfo
(
"end to compact super table..."
);
return
0
;
}
static
int32_t
mnodeCompactChildTables
()
{
void
*
pIter
=
NULL
;
SCTableObj
*
pTable
=
NULL
;
mInfo
(
"start to compact child table..."
);
while
(
1
)
{
pIter
=
mnodeGetNextChildTable
(
pIter
,
&
pTable
);
if
(
pTable
==
NULL
)
break
;
SSdbRow
row
=
{
.
type
=
SDB_OPER_GLOBAL
,
.
pObj
=
pTable
,
.
pTable
=
tsChildTableSdb
,
};
mInfo
(
"compact child %"
PRIu64
":%d"
,
pTable
->
uid
,
pTable
->
tid
);
sdbInsertCompactRow
(
&
row
);
}
mInfo
(
"end to compact child table..."
);
return
0
;
}
int32_t
mnodeCompactTables
()
{
mnodeCompactSuperTables
();
mnodeCompactChildTables
();
return
0
;
}
\ No newline at end of file
src/mnode/src/mnodeUser.c
浏览文件 @
663439bf
...
@@ -617,3 +617,30 @@ static int32_t mnodeProcessAuthMsg(SMnodeMsg *pMsg) {
...
@@ -617,3 +617,30 @@ static int32_t mnodeProcessAuthMsg(SMnodeMsg *pMsg) {
return
mnodeRetriveAuth
(
pAuthMsg
->
user
,
&
pAuthRsp
->
spi
,
&
pAuthRsp
->
encrypt
,
pAuthRsp
->
secret
,
pAuthRsp
->
ckey
);
return
mnodeRetriveAuth
(
pAuthMsg
->
user
,
&
pAuthRsp
->
spi
,
&
pAuthRsp
->
encrypt
,
pAuthRsp
->
secret
,
pAuthRsp
->
ckey
);
}
}
int32_t
mnodeCompactUsers
()
{
void
*
pIter
=
NULL
;
SUserObj
*
pUser
=
NULL
;
mInfo
(
"start to compact users table..."
);
while
(
1
)
{
pIter
=
mnodeGetNextUser
(
pIter
,
&
pUser
);
if
(
pUser
==
NULL
)
break
;
SSdbRow
row
=
{
.
type
=
SDB_OPER_GLOBAL
,
.
pTable
=
tsUserSdb
,
.
pObj
=
pUser
,
.
rowSize
=
sizeof
(
SUserObj
),
};
mInfo
(
"compact users %s"
,
pUser
->
user
);
sdbInsertCompactRow
(
&
row
);
}
mInfo
(
"end to compact users table..."
);
return
0
;
}
\ No newline at end of file
src/mnode/src/mnodeVgroup.c
浏览文件 @
663439bf
...
@@ -1302,3 +1302,30 @@ void mnodeSetVgidVer(int8_t *cver, uint64_t iver) {
...
@@ -1302,3 +1302,30 @@ void mnodeSetVgidVer(int8_t *cver, uint64_t iver) {
cver
[
1
]
=
(
int8_t
)((
int32_t
)(
iver
%
100000
)
/
100
);
cver
[
1
]
=
(
int8_t
)((
int32_t
)(
iver
%
100000
)
/
100
);
cver
[
2
]
=
(
int8_t
)(
iver
%
100
);
cver
[
2
]
=
(
int8_t
)(
iver
%
100
);
}
}
int32_t
mnodeCompactVgroups
()
{
void
*
pIter
=
NULL
;
SVgObj
*
pVgroup
=
NULL
;
mInfo
(
"start to compact vgroups table..."
);
while
(
1
)
{
pIter
=
mnodeGetNextVgroup
(
pIter
,
&
pVgroup
);
if
(
pVgroup
==
NULL
)
break
;
SSdbRow
row
=
{
.
type
=
SDB_OPER_GLOBAL
,
.
pTable
=
tsVgroupSdb
,
.
pObj
=
pVgroup
,
.
rowSize
=
sizeof
(
SVgObj
),
};
mInfo
(
"compact vgroups %d"
,
pVgroup
->
vgId
);
sdbInsertCompactRow
(
&
row
);
}
mInfo
(
"end to compact vgroups table..."
);
return
0
;
}
\ No newline at end of file
src/os/inc/osMips64.h
0 → 100644
浏览文件 @
663439bf
/*
* Copyright (c) 2019 TAOS Data, Inc. <jhtao@taosdata.com>
*
* This program is free software: you can use, redistribute, and/or modify
* it under the terms of the GNU Affero General Public License, version 3
* or later ("AGPL"), as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#ifndef TDENGINE_OS_MIPS64_H
#define TDENGINE_OS_MIPS64_H
#ifdef __cplusplus
extern
"C"
{
#endif
#include <stdio.h>
#include <stdlib.h>
#include <argp.h>
#include <arpa/inet.h>
#include <assert.h>
#include <ctype.h>
#include <dirent.h>
#include <endian.h>
#include <errno.h>
#include <float.h>
#include <ifaddrs.h>
#include <libgen.h>
#include <limits.h>
#include <locale.h>
#include <math.h>
#include <netdb.h>
#include <netinet/in.h>
#include <netinet/ip.h>
#include <netinet/tcp.h>
#include <netinet/udp.h>
#include <pthread.h>
#include <pwd.h>
#include <regex.h>
#include <semaphore.h>
#include <signal.h>
#include <stdarg.h>
#include <stdbool.h>
#include <stddef.h>
#include <stdint.h>
#include <string.h>
#include <strings.h>
#include <sys/epoll.h>
#include <sys/eventfd.h>
#include <sys/file.h>
#include <sys/ioctl.h>
#include <sys/mman.h>
#include <sys/sendfile.h>
#include <sys/socket.h>
#include <sys/stat.h>
#include <sys/syscall.h>
#include <sys/statvfs.h>
#include <sys/time.h>
#include <sys/types.h>
#include <sys/uio.h>
#include <sys/un.h>
#include <syslog.h>
#include <termios.h>
#include <unistd.h>
#include <wchar.h>
#include <wordexp.h>
#include <wctype.h>
#include <inttypes.h>
#include <fcntl.h>
#include <sys/utsname.h>
#include <sys/resource.h>
#include <error.h>
#include <linux/sysctl.h>
#include <math.h>
#include <poll.h>
#ifdef __cplusplus
}
#endif
#endif
src/os/src/detail/osSignal.c
浏览文件 @
663439bf
...
@@ -25,14 +25,14 @@
...
@@ -25,14 +25,14 @@
typedef
void
(
*
FLinuxSignalHandler
)(
int32_t
signum
,
siginfo_t
*
sigInfo
,
void
*
context
);
typedef
void
(
*
FLinuxSignalHandler
)(
int32_t
signum
,
siginfo_t
*
sigInfo
,
void
*
context
);
void
taosSetSignal
(
int32_t
signum
,
FSignalHandler
sigfp
)
{
void
taosSetSignal
(
int32_t
signum
,
FSignalHandler
sigfp
)
{
struct
sigaction
act
=
{{
0
}};
struct
sigaction
act
;
memset
(
&
act
,
0
,
sizeof
(
act
));
#if 1
#if 1
act
.
sa_flags
=
SA_SIGINFO
;
act
.
sa_flags
=
SA_SIGINFO
;
act
.
sa_sigaction
=
(
FLinuxSignalHandler
)
sigfp
;
act
.
sa_sigaction
=
(
FLinuxSignalHandler
)
sigfp
;
#else
#else
act
.
sa_handler
=
sigfp
;
act
.
sa_handler
=
sigfp
;
#endif
#endif
sigaction
(
signum
,
&
act
,
NULL
);
sigaction
(
signum
,
&
act
,
NULL
);
}
}
void
taosIgnSignal
(
int32_t
signum
)
{
void
taosIgnSignal
(
int32_t
signum
)
{
...
...
src/plugins/monitor/src/monMain.c
浏览文件 @
663439bf
...
@@ -417,3 +417,13 @@ void monExecuteSQL(char *sql) {
...
@@ -417,3 +417,13 @@ void monExecuteSQL(char *sql) {
monDebug
(
"execute sql:%s"
,
sql
);
monDebug
(
"execute sql:%s"
,
sql
);
taos_query_a
(
tsMonitor
.
conn
,
sql
,
monExecSqlCb
,
"sql"
);
taos_query_a
(
tsMonitor
.
conn
,
sql
,
monExecSqlCb
,
"sql"
);
}
}
void
monExecuteSQLWithResultCallback
(
char
*
sql
,
MonExecuteSQLCbFP
callback
,
void
*
param
)
{
if
(
tsMonitor
.
conn
==
NULL
)
{
callback
(
param
,
NULL
,
TSDB_CODE_MON_CONNECTION_INVALID
);
return
;
}
monDebug
(
"execute sql:%s"
,
sql
);
taos_query_a
(
tsMonitor
.
conn
,
sql
,
callback
,
param
);
}
src/query/inc/qHistogram.h
浏览文件 @
663439bf
...
@@ -40,7 +40,7 @@ typedef struct SHeapEntry {
...
@@ -40,7 +40,7 @@ typedef struct SHeapEntry {
}
SHeapEntry
;
}
SHeapEntry
;
typedef
struct
SHistogramInfo
{
typedef
struct
SHistogramInfo
{
int
32
_t
numOfElems
;
int
64
_t
numOfElems
;
int32_t
numOfEntries
;
int32_t
numOfEntries
;
int32_t
maxEntries
;
int32_t
maxEntries
;
double
min
;
double
min
;
...
...
src/query/src/qHistogram.c
浏览文件 @
663439bf
...
@@ -446,7 +446,7 @@ void tHistogramDestroy(SHistogramInfo** pHisto) {
...
@@ -446,7 +446,7 @@ void tHistogramDestroy(SHistogramInfo** pHisto) {
}
}
void
tHistogramPrint
(
SHistogramInfo
*
pHisto
)
{
void
tHistogramPrint
(
SHistogramInfo
*
pHisto
)
{
printf
(
"total entries: %d, elements: %
d
\n
"
,
pHisto
->
numOfEntries
,
pHisto
->
numOfElems
);
printf
(
"total entries: %d, elements: %
"
PRId64
"
\n
"
,
pHisto
->
numOfEntries
,
pHisto
->
numOfElems
);
#if defined(USE_ARRAYLIST)
#if defined(USE_ARRAYLIST)
for
(
int32_t
i
=
0
;
i
<
pHisto
->
numOfEntries
;
++
i
)
{
for
(
int32_t
i
=
0
;
i
<
pHisto
->
numOfEntries
;
++
i
)
{
printf
(
"%d: (%f, %"
PRId64
")
\n
"
,
i
+
1
,
pHisto
->
elems
[
i
].
val
,
pHisto
->
elems
[
i
].
num
);
printf
(
"%d: (%f, %"
PRId64
")
\n
"
,
i
+
1
,
pHisto
->
elems
[
i
].
val
,
pHisto
->
elems
[
i
].
num
);
...
...
src/util/src/tcrc32c.c
浏览文件 @
663439bf
...
@@ -18,7 +18,7 @@
...
@@ -18,7 +18,7 @@
3. This notice may not be removed or altered from any source distribution.
3. This notice may not be removed or altered from any source distribution.
*/
*/
#if
ndef _TD_ARM_
#if
!defined(_TD_ARM_) && !defined(_TD_MIPS_)
#include <nmmintrin.h>
#include <nmmintrin.h>
#endif
#endif
...
...
src/vnode/src/vnodeMain.c
浏览文件 @
663439bf
...
@@ -120,12 +120,14 @@ int32_t vnodeDrop(int32_t vgId) {
...
@@ -120,12 +120,14 @@ int32_t vnodeDrop(int32_t vgId) {
vDebug
(
"vgId:%d, failed to drop, vnode not find"
,
vgId
);
vDebug
(
"vgId:%d, failed to drop, vnode not find"
,
vgId
);
return
TSDB_CODE_VND_INVALID_VGROUP_ID
;
return
TSDB_CODE_VND_INVALID_VGROUP_ID
;
}
}
if
(
pVnode
->
dropped
)
{
vnodeRelease
(
pVnode
);
return
TSDB_CODE_SUCCESS
;
}
vInfo
(
"vgId:%d, vnode will be dropped, refCount:%d pVnode:%p"
,
pVnode
->
vgId
,
pVnode
->
refCount
,
pVnode
);
vInfo
(
"vgId:%d, vnode will be dropped, refCount:%d pVnode:%p"
,
pVnode
->
vgId
,
pVnode
->
refCount
,
pVnode
);
pVnode
->
dropped
=
1
;
pVnode
->
dropped
=
1
;
// remove from hash, so new messages wont be consumed
vnodeRemoveFromHash
(
pVnode
);
vnodeRelease
(
pVnode
);
vnodeRelease
(
pVnode
);
vnodeCleanupInMWorker
(
pVnode
);
vnodeCleanupInMWorker
(
pVnode
);
...
@@ -425,6 +427,10 @@ int32_t vnodeOpen(int32_t vgId) {
...
@@ -425,6 +427,10 @@ int32_t vnodeOpen(int32_t vgId) {
int32_t
vnodeClose
(
int32_t
vgId
)
{
int32_t
vnodeClose
(
int32_t
vgId
)
{
SVnodeObj
*
pVnode
=
vnodeAcquire
(
vgId
);
SVnodeObj
*
pVnode
=
vnodeAcquire
(
vgId
);
if
(
pVnode
==
NULL
)
return
0
;
if
(
pVnode
==
NULL
)
return
0
;
if
(
pVnode
->
dropped
)
{
vnodeRelease
(
pVnode
);
return
0
;
}
vDebug
(
"vgId:%d, vnode will be closed, pVnode:%p"
,
pVnode
->
vgId
,
pVnode
);
vDebug
(
"vgId:%d, vnode will be closed, pVnode:%p"
,
pVnode
->
vgId
,
pVnode
);
vnodeRemoveFromHash
(
pVnode
);
vnodeRemoveFromHash
(
pVnode
);
...
@@ -510,6 +516,8 @@ void vnodeCleanUp(SVnodeObj *pVnode) {
...
@@ -510,6 +516,8 @@ void vnodeCleanUp(SVnodeObj *pVnode) {
vnodeSetClosingStatus
(
pVnode
);
vnodeSetClosingStatus
(
pVnode
);
vnodeRemoveFromHash
(
pVnode
);
// stop replication module
// stop replication module
if
(
pVnode
->
sync
>
0
)
{
if
(
pVnode
->
sync
>
0
)
{
int64_t
sync
=
pVnode
->
sync
;
int64_t
sync
=
pVnode
->
sync
;
...
...
src/vnode/src/vnodeRead.c
浏览文件 @
663439bf
...
@@ -117,14 +117,17 @@ static SVReadMsg *vnodeBuildVReadMsg(SVnodeObj *pVnode, void *pCont, int32_t con
...
@@ -117,14 +117,17 @@ static SVReadMsg *vnodeBuildVReadMsg(SVnodeObj *pVnode, void *pCont, int32_t con
}
}
int32_t
vnodeWriteToRQueue
(
void
*
vparam
,
void
*
pCont
,
int32_t
contLen
,
int8_t
qtype
,
void
*
rparam
)
{
int32_t
vnodeWriteToRQueue
(
void
*
vparam
,
void
*
pCont
,
int32_t
contLen
,
int8_t
qtype
,
void
*
rparam
)
{
SVnodeObj
*
pVnode
=
vparam
;
if
(
pVnode
->
dropped
)
{
return
TSDB_CODE_APP_NOT_READY
;
}
SVReadMsg
*
pRead
=
vnodeBuildVReadMsg
(
vparam
,
pCont
,
contLen
,
qtype
,
rparam
);
SVReadMsg
*
pRead
=
vnodeBuildVReadMsg
(
vparam
,
pCont
,
contLen
,
qtype
,
rparam
);
if
(
pRead
==
NULL
)
{
if
(
pRead
==
NULL
)
{
assert
(
terrno
!=
0
);
assert
(
terrno
!=
0
);
return
terrno
;
return
terrno
;
}
}
SVnodeObj
*
pVnode
=
vparam
;
int32_t
code
=
vnodeCheckRead
(
pVnode
);
int32_t
code
=
vnodeCheckRead
(
pVnode
);
if
(
code
!=
TSDB_CODE_SUCCESS
)
{
if
(
code
!=
TSDB_CODE_SUCCESS
)
{
taosFreeQitem
(
pRead
);
taosFreeQitem
(
pRead
);
...
...
src/vnode/src/vnodeStatus.c
浏览文件 @
663439bf
...
@@ -66,6 +66,9 @@ static bool vnodeSetClosingStatusImp(SVnodeObj* pVnode) {
...
@@ -66,6 +66,9 @@ static bool vnodeSetClosingStatusImp(SVnodeObj* pVnode) {
}
}
bool
vnodeSetClosingStatus
(
SVnodeObj
*
pVnode
)
{
bool
vnodeSetClosingStatus
(
SVnodeObj
*
pVnode
)
{
if
(
pVnode
->
status
==
TAOS_VN_STATUS_CLOSING
)
return
true
;
while
(
!
vnodeSetClosingStatusImp
(
pVnode
))
{
while
(
!
vnodeSetClosingStatusImp
(
pVnode
))
{
taosMsleep
(
1
);
taosMsleep
(
1
);
}
}
...
...
src/vnode/src/vnodeSync.c
浏览文件 @
663439bf
...
@@ -55,6 +55,11 @@ void vnodeNotifyRole(int32_t vgId, int8_t role) {
...
@@ -55,6 +55,11 @@ void vnodeNotifyRole(int32_t vgId, int8_t role) {
vTrace
(
"vgId:%d, vnode not found while notify role"
,
vgId
);
vTrace
(
"vgId:%d, vnode not found while notify role"
,
vgId
);
return
;
return
;
}
}
if
(
pVnode
->
dropped
)
{
vTrace
(
"vgId:%d, vnode dropped while notify role"
,
vgId
);
vnodeRelease
(
pVnode
);
return
;
}
vInfo
(
"vgId:%d, sync role changed from %s to %s"
,
pVnode
->
vgId
,
syncRole
[
pVnode
->
role
],
syncRole
[
role
]);
vInfo
(
"vgId:%d, sync role changed from %s to %s"
,
pVnode
->
vgId
,
syncRole
[
pVnode
->
role
],
syncRole
[
role
]);
pVnode
->
role
=
role
;
pVnode
->
role
=
role
;
...
@@ -75,6 +80,11 @@ void vnodeCtrlFlow(int32_t vgId, int32_t level) {
...
@@ -75,6 +80,11 @@ void vnodeCtrlFlow(int32_t vgId, int32_t level) {
vTrace
(
"vgId:%d, vnode not found while flow ctrl"
,
vgId
);
vTrace
(
"vgId:%d, vnode not found while flow ctrl"
,
vgId
);
return
;
return
;
}
}
if
(
pVnode
->
dropped
)
{
vTrace
(
"vgId:%d, vnode dropped while flow ctrl"
,
vgId
);
vnodeRelease
(
pVnode
);
return
;
}
if
(
pVnode
->
flowctrlLevel
!=
level
)
{
if
(
pVnode
->
flowctrlLevel
!=
level
)
{
vDebug
(
"vgId:%d, set flowctrl level from %d to %d"
,
pVnode
->
vgId
,
pVnode
->
flowctrlLevel
,
level
);
vDebug
(
"vgId:%d, set flowctrl level from %d to %d"
,
pVnode
->
vgId
,
pVnode
->
flowctrlLevel
,
level
);
...
@@ -129,6 +139,7 @@ int32_t vnodeWriteToCache(int32_t vgId, void *wparam, int32_t qtype, void *rpara
...
@@ -129,6 +139,7 @@ int32_t vnodeWriteToCache(int32_t vgId, void *wparam, int32_t qtype, void *rpara
SVnodeObj
*
pVnode
=
vnodeAcquire
(
vgId
);
SVnodeObj
*
pVnode
=
vnodeAcquire
(
vgId
);
if
(
pVnode
==
NULL
)
{
if
(
pVnode
==
NULL
)
{
vError
(
"vgId:%d, vnode not found while write to cache"
,
vgId
);
vError
(
"vgId:%d, vnode not found while write to cache"
,
vgId
);
vnodeRelease
(
pVnode
);
return
TSDB_CODE_VND_INVALID_VGROUP_ID
;
return
TSDB_CODE_VND_INVALID_VGROUP_ID
;
}
}
...
...
src/vnode/src/vnodeWrite.c
浏览文件 @
663439bf
...
@@ -340,8 +340,11 @@ static void vnodeFlowCtrlMsgToWQueue(void *param, void *tmrId) {
...
@@ -340,8 +340,11 @@ static void vnodeFlowCtrlMsgToWQueue(void *param, void *tmrId) {
if
(
pWrite
->
processedCount
>=
100
)
{
if
(
pWrite
->
processedCount
>=
100
)
{
vError
(
"vgId:%d, msg:%p, failed to process since %s, retry:%d"
,
pVnode
->
vgId
,
pWrite
,
tstrerror
(
code
),
vError
(
"vgId:%d, msg:%p, failed to process since %s, retry:%d"
,
pVnode
->
vgId
,
pWrite
,
tstrerror
(
code
),
pWrite
->
processedCount
);
pWrite
->
processedCount
);
pWrite
->
processedCount
=
1
;
void
*
handle
=
pWrite
->
rpcMsg
.
handle
;
dnodeSendRpcVWriteRsp
(
pWrite
->
pVnode
,
pWrite
,
code
);
taosFreeQitem
(
pWrite
);
vnodeRelease
(
pVnode
);
SRpcMsg
rpcRsp
=
{.
handle
=
handle
,
.
code
=
code
};
rpcSendResponse
(
&
rpcRsp
);
}
else
{
}
else
{
code
=
vnodePerformFlowCtrl
(
pWrite
);
code
=
vnodePerformFlowCtrl
(
pWrite
);
if
(
code
==
0
)
{
if
(
code
==
0
)
{
...
@@ -386,4 +389,6 @@ void vnodeWaitWriteCompleted(SVnodeObj *pVnode) {
...
@@ -386,4 +389,6 @@ void vnodeWaitWriteCompleted(SVnodeObj *pVnode) {
vTrace
(
"vgId:%d, queued wmsg num:%d"
,
pVnode
->
vgId
,
pVnode
->
queuedWMsg
);
vTrace
(
"vgId:%d, queued wmsg num:%d"
,
pVnode
->
vgId
,
pVnode
->
queuedWMsg
);
taosMsleep
(
10
);
taosMsleep
(
10
);
}
}
taosMsleep
(
900
);
}
}
tests/perftest-scripts/perftest-query.sh
浏览文件 @
663439bf
...
@@ -74,7 +74,7 @@ function runQueryPerfTest {
...
@@ -74,7 +74,7 @@ function runQueryPerfTest {
python3 tools/taosdemoPerformance.py
-c
$LOCAL_COMMIT
|
tee
-a
$PERFORMANCE_TEST_REPORT
python3 tools/taosdemoPerformance.py
-c
$LOCAL_COMMIT
|
tee
-a
$PERFORMANCE_TEST_REPORT
python3 perfbenchmark/joinPerformance.py |
tee
-a
$PERFORMANCE_TEST_REPORT
#
python3 perfbenchmark/joinPerformance.py | tee -a $PERFORMANCE_TEST_REPORT
}
}
...
...
tests/pytest/tools/taosdemoPerformance.py
浏览文件 @
663439bf
...
@@ -104,7 +104,7 @@ class taosdemoPerformace:
...
@@ -104,7 +104,7 @@ class taosdemoPerformace:
return
output
return
output
def
insertData
(
self
):
def
insertData
(
self
):
os
.
system
(
"taosdemo -f %s > taosdemoperf.txt"
%
self
.
generateJson
())
os
.
system
(
"taosdemo -f %s > taosdemoperf.txt
2>&1
"
%
self
.
generateJson
())
self
.
createTableTime
=
self
.
getCMDOutput
(
"grep 'Spent' taosdemoperf.txt | awk 'NR==1{print $2}'"
)
self
.
createTableTime
=
self
.
getCMDOutput
(
"grep 'Spent' taosdemoperf.txt | awk 'NR==1{print $2}'"
)
self
.
insertRecordsTime
=
self
.
getCMDOutput
(
"grep 'Spent' taosdemoperf.txt | awk 'NR==2{print $2}'"
)
self
.
insertRecordsTime
=
self
.
getCMDOutput
(
"grep 'Spent' taosdemoperf.txt | awk 'NR==2{print $2}'"
)
self
.
recordsPerSecond
=
self
.
getCMDOutput
(
"grep 'Spent' taosdemoperf.txt | awk 'NR==2{print $16}'"
)
self
.
recordsPerSecond
=
self
.
getCMDOutput
(
"grep 'Spent' taosdemoperf.txt | awk 'NR==2{print $16}'"
)
...
...
tests/pytest/tools/taosdemoTestWithJson.py
浏览文件 @
663439bf
...
@@ -23,32 +23,32 @@ class TDTestCase:
...
@@ -23,32 +23,32 @@ class TDTestCase:
def
init
(
self
,
conn
,
logSql
):
def
init
(
self
,
conn
,
logSql
):
tdLog
.
debug
(
"start to execute %s"
%
__file__
)
tdLog
.
debug
(
"start to execute %s"
%
__file__
)
tdSql
.
init
(
conn
.
cursor
(),
logSql
)
tdSql
.
init
(
conn
.
cursor
(),
logSql
)
def
getBuildPath
(
self
):
def
getBuildPath
(
self
):
selfPath
=
os
.
path
.
dirname
(
os
.
path
.
realpath
(
__file__
))
selfPath
=
os
.
path
.
dirname
(
os
.
path
.
realpath
(
__file__
))
if
(
"community"
in
selfPath
)
:
if
"community"
in
selfPath
:
projPath
=
selfPath
[:
selfPath
.
find
(
"community"
)]
projPath
=
selfPath
[:
selfPath
.
find
(
"community"
)]
else
:
else
:
projPath
=
selfPath
[:
selfPath
.
find
(
"tests"
)]
projPath
=
selfPath
[:
selfPath
.
find
(
"tests"
)]
for
root
,
dirs
,
files
in
os
.
walk
(
projPath
):
for
root
,
dirs
,
files
in
os
.
walk
(
projPath
):
if
(
"taosd"
in
files
)
:
if
"taosd"
in
files
:
rootRealPath
=
os
.
path
.
dirname
(
os
.
path
.
realpath
(
root
))
rootRealPath
=
os
.
path
.
dirname
(
os
.
path
.
realpath
(
root
))
if
(
"packaging"
not
in
rootRealPath
)
:
if
"packaging"
not
in
rootRealPath
:
buildPath
=
root
[:
len
(
root
)
-
len
(
"/build/bin"
)]
buildPath
=
root
[:
len
(
root
)
-
len
(
"/build/bin"
)]
break
break
return
buildPath
return
buildPath
def
run
(
self
):
def
run
(
self
):
tdSql
.
prepare
()
tdSql
.
prepare
()
buildPath
=
self
.
getBuildPath
()
buildPath
=
self
.
getBuildPath
()
if
(
buildPath
==
""
)
:
if
buildPath
==
""
:
tdLog
.
exit
(
"taosd not found!"
)
tdLog
.
exit
(
"taosd not found!"
)
else
:
else
:
tdLog
.
info
(
"taosd found in %s"
%
buildPath
)
tdLog
.
info
(
"taosd found in %s"
%
buildPath
)
binPath
=
buildPath
+
"/build/bin/"
binPath
=
buildPath
+
"/build/bin/"
os
.
system
(
"
yes | %staosdemo -f tools/insert.json
"
%
binPath
)
os
.
system
(
"
%staosdemo -f tools/insert.json -y
"
%
binPath
)
tdSql
.
execute
(
"use db01"
)
tdSql
.
execute
(
"use db01"
)
tdSql
.
query
(
"select count(*) from stb01"
)
tdSql
.
query
(
"select count(*) from stb01"
)
...
...
tests/script/general/parser/commit.sim
浏览文件 @
663439bf
...
@@ -68,7 +68,7 @@ while $loop <= $loops
...
@@ -68,7 +68,7 @@ while $loop <= $loops
while $i < 10
while $i < 10
sql select count(*) from $stb where t1 = $i
sql select count(*) from $stb where t1 = $i
if $data00 != $rowNum then
if $data00 != $rowNum then
print expect $rowNum, actual: $data00
print expect $rowNum
, actual: $data00
return -1
return -1
endi
endi
$i = $i + 1
$i = $i + 1
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录