Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
慢慢CG
TDengine
提交
f3b42525
T
TDengine
项目概览
慢慢CG
/
TDengine
与 Fork 源项目一致
Fork自
taosdata / TDengine
通知
1
Star
0
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
T
TDengine
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
f3b42525
编写于
5月 26, 2021
作者:
T
tickduan
浏览文件
操作
浏览文件
下载
差异文件
Merge branch 'master' into cq with continue query funciton
上级
4b4199ce
2be5b246
变更
40
显示空白变更内容
内联
并排
Showing
40 changed file
with
1475 addition
and
940 deletion
+1475
-940
.gitmodules
.gitmodules
+3
-3
cmake/platform.inc
cmake/platform.inc
+6
-0
cmake/version.inc
cmake/version.inc
+1
-1
packaging/deb/makedeb.sh
packaging/deb/makedeb.sh
+6
-1
packaging/rpm/tdengine.spec
packaging/rpm/tdengine.spec
+6
-1
packaging/tools/make_install.sh
packaging/tools/make_install.sh
+10
-2
packaging/tools/makeclient.sh
packaging/tools/makeclient.sh
+12
-4
packaging/tools/makeclient_power.sh
packaging/tools/makeclient_power.sh
+11
-3
packaging/tools/makepkg.sh
packaging/tools/makepkg.sh
+30
-3
packaging/tools/makepkg_power.sh
packaging/tools/makepkg_power.sh
+11
-2
snap/snapcraft.yaml
snap/snapcraft.yaml
+2
-2
src/client/src/tscSQLParser.c
src/client/src/tscSQLParser.c
+15
-2
src/client/src/tscServer.c
src/client/src/tscServer.c
+4
-4
src/client/src/tscSub.c
src/client/src/tscSub.c
+3
-3
src/connector/go
src/connector/go
+1
-1
src/connector/nodejs/nodetaos/cinterface.js
src/connector/nodejs/nodetaos/cinterface.js
+129
-127
src/connector/nodejs/nodetaos/cursor.js
src/connector/nodejs/nodetaos/cursor.js
+59
-62
src/connector/nodejs/package-lock.json
src/connector/nodejs/package-lock.json
+0
-285
src/connector/nodejs/package.json
src/connector/nodejs/package.json
+1
-1
src/inc/taoserror.h
src/inc/taoserror.h
+1
-0
src/kit/taosdemo/taosdemo.c
src/kit/taosdemo/taosdemo.c
+381
-371
src/mnode/src/mnodeSdb.c
src/mnode/src/mnodeSdb.c
+5
-5
src/mnode/src/mnodeTable.c
src/mnode/src/mnodeTable.c
+2
-2
src/os/src/detail/osSignal.c
src/os/src/detail/osSignal.c
+3
-3
src/sync/src/syncMain.c
src/sync/src/syncMain.c
+6
-3
src/util/src/tcache.c
src/util/src/tcache.c
+1
-1
src/util/src/tcrc32c.c
src/util/src/tcrc32c.c
+1
-1
src/vnode/src/vnodeMain.c
src/vnode/src/vnodeMain.c
+5
-1
src/vnode/src/vnodeSync.c
src/vnode/src/vnodeSync.c
+6
-1
tests/Jenkinsfile
tests/Jenkinsfile
+3
-3
tests/mas/Jenkinsfile
tests/mas/Jenkinsfile
+309
-0
tests/perftest-scripts/perftest-query.sh
tests/perftest-scripts/perftest-query.sh
+11
-4
tests/pytest/crash_gen/service_manager.py
tests/pytest/crash_gen/service_manager.py
+28
-30
tests/pytest/crash_gen/shared/types.py
tests/pytest/crash_gen/shared/types.py
+3
-1
tests/pytest/fulltest.sh
tests/pytest/fulltest.sh
+1
-1
tests/pytest/insert/insertFromCSVPerformance.py
tests/pytest/insert/insertFromCSVPerformance.py
+1
-1
tests/pytest/perfbenchmark/taosdemoInsert.py
tests/pytest/perfbenchmark/taosdemoInsert.py
+387
-0
tests/pytest/tools/taosdemoPerformance.py
tests/pytest/tools/taosdemoPerformance.py
+3
-3
tests/script/general/parser/select_with_tags.sim
tests/script/general/parser/select_with_tags.sim
+7
-1
tests/script/unique/arbitrator/dn3_mn1_vnode_nomaster.sim
tests/script/unique/arbitrator/dn3_mn1_vnode_nomaster.sim
+1
-1
未找到文件。
.gitmodules
浏览文件 @
f3b42525
[submodule "src/connector/go"]
path = src/connector/go
url =
https://github.com/taosdata/driver-go
url =
git@github.com:taosdata/driver-go.git
[submodule "src/connector/grafanaplugin"]
path = src/connector/grafanaplugin
url =
https://github.com/taosdata/grafanaplugin
url =
git@github.com:taosdata/grafanaplugin.git
[submodule "src/connector/hivemq-tdengine-extension"]
path = src/connector/hivemq-tdengine-extension
url =
https://github.com/huskar-t
/hivemq-tdengine-extension.git
url =
git@github.com:taosdata
/hivemq-tdengine-extension.git
[submodule "tests/examples/rust"]
path = tests/examples/rust
url = https://github.com/songtianyi/tdengine-rust-bindings.git
cmake/platform.inc
浏览文件 @
f3b42525
...
...
@@ -102,6 +102,12 @@ IF ("${CPUTYPE}" STREQUAL "")
SET
(
TD_LINUX
TRUE
)
SET
(
TD_LINUX_64
FALSE
)
SET
(
TD_ARM_64
TRUE
)
ELSEIF
(
CMAKE_SYSTEM_PROCESSOR
MATCHES
"mips64"
)
SET
(
CPUTYPE
"mips64"
)
MESSAGE
(
STATUS
"Set CPUTYPE to mips64"
)
SET
(
TD_LINUX
TRUE
)
SET
(
TD_LINUX_64
FALSE
)
SET
(
TD_MIPS_64
TRUE
)
ENDIF
()
ELSE
()
...
...
cmake/version.inc
浏览文件 @
f3b42525
...
...
@@ -4,7 +4,7 @@ PROJECT(TDengine)
IF
(
DEFINED
VERNUMBER
)
SET
(
TD_VER_NUMBER
$
{
VERNUMBER
})
ELSE
()
SET
(
TD_VER_NUMBER
"2.0.20.
2
"
)
SET
(
TD_VER_NUMBER
"2.0.20.
5
"
)
ENDIF
()
IF
(
DEFINED
VERCOMPATIBLE
)
...
...
packaging/deb/makedeb.sh
浏览文件 @
f3b42525
...
...
@@ -58,7 +58,12 @@ cp ${compile_dir}/build/lib/${libfile} ${pkg_dir}${install_home_pat
cp
${
compile_dir
}
/../src/inc/taos.h
${
pkg_dir
}${
install_home_path
}
/include
cp
${
compile_dir
}
/../src/inc/taoserror.h
${
pkg_dir
}${
install_home_path
}
/include
cp
-r
${
top_dir
}
/tests/examples/
*
${
pkg_dir
}${
install_home_path
}
/examples
cp
-r
${
top_dir
}
/src/connector/grafanaplugin
${
pkg_dir
}${
install_home_path
}
/connector
if
[
-d
"
${
top_dir
}
/src/connector/grafanaplugin/dist"
]
;
then
cp
-r
${
top_dir
}
/src/connector/grafanaplugin/dist
${
pkg_dir
}${
install_home_path
}
/connector/grafanaplugin
else
echo
"grafanaplugin bundled directory not found!"
exit
1
fi
cp
-r
${
top_dir
}
/src/connector/python
${
pkg_dir
}${
install_home_path
}
/connector
cp
-r
${
top_dir
}
/src/connector/go
${
pkg_dir
}${
install_home_path
}
/connector
cp
-r
${
top_dir
}
/src/connector/nodejs
${
pkg_dir
}${
install_home_path
}
/connector
...
...
packaging/rpm/tdengine.spec
浏览文件 @
f3b42525
...
...
@@ -66,7 +66,12 @@ cp %{_compiledir}/build/bin/taosdump %{buildroot}%{homepath}/bin
cp %{_compiledir}/build/lib/${libfile} %{buildroot}%{homepath}/driver
cp %{_compiledir}/../src/inc/taos.h %{buildroot}%{homepath}/include
cp %{_compiledir}/../src/inc/taoserror.h %{buildroot}%{homepath}/include
cp -r %{_compiledir}/../src/connector/grafanaplugin %{buildroot}%{homepath}/connector
if [ -d %{_compiledir}/../src/connector/grafanaplugin/dist ]; then
cp -r %{_compiledir}/../src/connector/grafanaplugin/dist %{buildroot}%{homepath}/connector/grafanaplugin
else
echo grafanaplugin bundled directory not found!
exit 1
fi
cp -r %{_compiledir}/../src/connector/python %{buildroot}%{homepath}/connector
cp -r %{_compiledir}/../src/connector/go %{buildroot}%{homepath}/connector
cp -r %{_compiledir}/../src/connector/nodejs %{buildroot}%{homepath}/connector
...
...
packaging/tools/make_install.sh
浏览文件 @
f3b42525
...
...
@@ -243,9 +243,17 @@ function install_data() {
}
function
install_connector
()
{
${
csudo
}
cp
-rf
${
source_dir
}
/src/connector/grafanaplugin
${
install_main_dir
}
/connector
if
[
-d
"
${
source_dir
}
/src/connector/grafanaplugin/dist"
]
;
then
${
csudo
}
cp
-rf
${
source_dir
}
/src/connector/grafanaplugin/dist
${
install_main_dir
}
/connector/grafanaplugin
else
echo
"WARNING: grafanaplugin bundled dir not found, please check if want to use it!"
fi
if
find
${
source_dir
}
/src/connector/go
-mindepth
1
-maxdepth
1 |
read
;
then
${
csudo
}
cp
-r
${
source_dir
}
/src/connector/go
${
install_main_dir
}
/connector
else
echo
"WARNING: go connector not found, please check if want to use it!"
fi
${
csudo
}
cp
-rf
${
source_dir
}
/src/connector/python
${
install_main_dir
}
/connector
${
csudo
}
cp
-rf
${
source_dir
}
/src/connector/go
${
install_main_dir
}
/connector
${
csudo
}
cp
${
binary_dir
}
/build/lib/
*
.jar
${
install_main_dir
}
/connector &> /dev/null
&&
${
csudo
}
chmod
777
${
install_main_dir
}
/connector/
*
.jar
||
echo
&> /dev/null
}
...
...
packaging/tools/makeclient.sh
浏览文件 @
f3b42525
...
...
@@ -117,9 +117,17 @@ if [[ "$pagMode" != "lite" ]] && [[ "$cpuType" != "aarch32" ]]; then
if
[
"
$osType
"
!=
"Darwin"
]
;
then
cp
${
build_dir
}
/lib/
*
.jar
${
install_dir
}
/connector
||
:
fi
cp
-r
${
connector_dir
}
/grafanaplugin
${
install_dir
}
/connector/
cp
-r
${
connector_dir
}
/python
${
install_dir
}
/connector/
if
[
-d
"
${
connector_dir
}
/grafanaplugin/dist"
]
;
then
cp
-r
${
connector_dir
}
/grafanaplugin/dist
${
install_dir
}
/connector/grafanaplugin
else
echo
"WARNING: grafanaplugin bundled dir not found, please check if want to use it!"
fi
if
find
${
connector_dir
}
/go
-mindepth
1
-maxdepth
1 |
read
;
then
cp
-r
${
connector_dir
}
/go
${
install_dir
}
/connector
else
echo
"WARNING: go connector not found, please check if want to use it!"
fi
cp
-r
${
connector_dir
}
/python
${
install_dir
}
/connector
cp
-r
${
connector_dir
}
/nodejs
${
install_dir
}
/connector
fi
# Copy release note
...
...
packaging/tools/makeclient_power.sh
浏览文件 @
f3b42525
...
...
@@ -144,9 +144,17 @@ if [[ "$pagMode" != "lite" ]] && [[ "$cpuType" != "aarch32" ]]; then
if
[
"
$osType
"
!=
"Darwin"
]
;
then
cp
${
build_dir
}
/lib/
*
.jar
${
install_dir
}
/connector
||
:
fi
cp
-r
${
connector_dir
}
/grafanaplugin
${
install_dir
}
/connector/
cp
-r
${
connector_dir
}
/python
${
install_dir
}
/connector/
if
[
-d
"
${
connector_dir
}
/grafanaplugin/dist"
]
;
then
cp
-r
${
connector_dir
}
/grafanaplugin/dist
${
install_dir
}
/connector/grafanaplugin
else
echo
"WARNING: grafanaplugin bunlded dir not found, please check if want to use it!"
fi
if
find
${
connector_dir
}
/go
-mindepth
1
-maxdepth
1 |
read
;
then
cp
-r
${
connector_dir
}
/go
${
install_dir
}
/connector
else
echo
"WARNING: go connector not found, please check if want to use it!"
fi
cp
-r
${
connector_dir
}
/python
${
install_dir
}
/connector
sed
-i
'/password/ {s/taosdata/powerdb/g}'
${
install_dir
}
/connector/python/linux/python2/taos/cinterface.py
sed
-i
'/password/ {s/taosdata/powerdb/g}'
${
install_dir
}
/connector/python/linux/python3/taos/cinterface.py
...
...
packaging/tools/makepkg.sh
浏览文件 @
f3b42525
...
...
@@ -114,6 +114,25 @@ mkdir -p ${install_dir}/examples
examples_dir
=
"
${
top_dir
}
/tests/examples"
cp
-r
${
examples_dir
}
/c
${
install_dir
}
/examples
if
[[
"
$pagMode
"
!=
"lite"
]]
&&
[[
"
$cpuType
"
!=
"aarch32"
]]
;
then
if
[
-d
${
examples_dir
}
/JDBC/connectionPools/target
]
;
then
rm
-rf
${
examples_dir
}
/JDBC/connectionPools/target
fi
if
[
-d
${
examples_dir
}
/JDBC/JDBCDemo/target
]
;
then
rm
-rf
${
examples_dir
}
/JDBC/JDBCDemo/target
fi
if
[
-d
${
examples_dir
}
/JDBC/mybatisplus-demo/target
]
;
then
rm
-rf
${
examples_dir
}
/JDBC/mybatisplus-demo/target
fi
if
[
-d
${
examples_dir
}
/JDBC/springbootdemo/target
]
;
then
rm
-rf
${
examples_dir
}
/JDBC/springbootdemo/target
fi
if
[
-d
${
examples_dir
}
/JDBC/SpringJdbcTemplate/target
]
;
then
rm
-rf
${
examples_dir
}
/JDBC/SpringJdbcTemplate/target
fi
if
[
-d
${
examples_dir
}
/JDBC/taosdemo/target
]
;
then
rm
-rf
${
examples_dir
}
/JDBC/taosdemo/target
fi
cp
-r
${
examples_dir
}
/JDBC
${
install_dir
}
/examples
cp
-r
${
examples_dir
}
/matlab
${
install_dir
}
/examples
cp
-r
${
examples_dir
}
/python
${
install_dir
}
/examples
...
...
@@ -131,9 +150,17 @@ connector_dir="${code_dir}/connector"
mkdir
-p
${
install_dir
}
/connector
if
[[
"
$pagMode
"
!=
"lite"
]]
&&
[[
"
$cpuType
"
!=
"aarch32"
]]
;
then
cp
${
build_dir
}
/lib/
*
.jar
${
install_dir
}
/connector
||
:
cp
-r
${
connector_dir
}
/grafanaplugin
${
install_dir
}
/connector/
cp
-r
${
connector_dir
}
/python
${
install_dir
}
/connector/
if
[
-d
"
${
connector_dir
}
/grafanaplugin/dist"
]
;
then
cp
-r
${
connector_dir
}
/grafanaplugin/dist
${
install_dir
}
/connector/grafanaplugin
else
echo
"WARNING: grafanaplugin bundled dir not found, please check if you want to use it!"
fi
if
find
${
connector_dir
}
/go
-mindepth
1
-maxdepth
1 |
read
;
then
cp
-r
${
connector_dir
}
/go
${
install_dir
}
/connector
else
echo
"WARNING: go connector not found, please check if want to use it!"
fi
cp
-r
${
connector_dir
}
/python
${
install_dir
}
/connector
cp
-r
${
connector_dir
}
/nodejs
${
install_dir
}
/connector
fi
# Copy release note
...
...
packaging/tools/makepkg_power.sh
浏览文件 @
f3b42525
...
...
@@ -166,9 +166,18 @@ connector_dir="${code_dir}/connector"
mkdir
-p
${
install_dir
}
/connector
if
[[
"
$pagMode
"
!=
"lite"
]]
&&
[[
"
$cpuType
"
!=
"aarch32"
]]
;
then
cp
${
build_dir
}
/lib/
*
.jar
${
install_dir
}
/connector
||
:
cp
-r
${
connector_dir
}
/grafanaplugin
${
install_dir
}
/connector/
cp
-r
${
connector_dir
}
/python
${
install_dir
}
/connector/
if
[
-d
"
${
connector_dir
}
/grafanaplugin/dist"
]
;
then
cp
-r
${
connector_dir
}
/grafanaplugin/dist
${
install_dir
}
/connector/grafanaplugin
else
echo
"WARNING: grafanaplugin bundled dir not found, please check if want to use it!"
fi
if
find
${
connector_dir
}
/go
-mindepth
1
-maxdepth
1 |
read
;
then
cp
-r
${
connector_dir
}
/go
${
install_dir
}
/connector
else
echo
"WARNING: go connector not found, please check if want to use it!"
fi
cp
-r
${
connector_dir
}
/python
${
install_dir
}
/connector/
sed
-i
'/password/ {s/taosdata/powerdb/g}'
${
install_dir
}
/connector/python/linux/python2/taos/cinterface.py
sed
-i
'/password/ {s/taosdata/powerdb/g}'
${
install_dir
}
/connector/python/linux/python3/taos/cinterface.py
...
...
snap/snapcraft.yaml
浏览文件 @
f3b42525
name
:
tdengine
base
:
core18
version
:
'
2.0.20.
2
'
version
:
'
2.0.20.
5
'
icon
:
snap/gui/t-dengine.svg
summary
:
an open-source big data platform designed and optimized for IoT.
description
:
|
...
...
@@ -72,7 +72,7 @@ parts:
-
usr/bin/taosd
-
usr/bin/taos
-
usr/bin/taosdemo
-
usr/lib/libtaos.so.2.0.20.
2
-
usr/lib/libtaos.so.2.0.20.
5
-
usr/lib/libtaos.so.1
-
usr/lib/libtaos.so
...
...
src/client/src/tscSQLParser.c
浏览文件 @
f3b42525
...
...
@@ -6494,7 +6494,6 @@ int32_t doCheckForCreateFromStable(SSqlObj* pSql, SSqlInfo* pInfo) {
size_t
valSize
=
taosArrayGetSize
(
pValList
);
// too long tag values will return invalid sql, not be truncated automatically
SSchema
*
pTagSchema
=
tscGetTableTagSchema
(
pStableMetaInfo
->
pTableMeta
);
STagData
*
pTag
=
&
pCreateTableInfo
->
tagdata
;
...
...
@@ -6504,7 +6503,6 @@ int32_t doCheckForCreateFromStable(SSqlObj* pSql, SSqlInfo* pInfo) {
return
TSDB_CODE_TSC_OUT_OF_MEMORY
;
}
SArray
*
pNameList
=
NULL
;
size_t
nameSize
=
0
;
int32_t
schemaSize
=
tscGetNumOfTags
(
pStableMetaInfo
->
pTableMeta
);
...
...
@@ -7119,6 +7117,7 @@ int32_t doValidateSqlNode(SSqlObj* pSql, SQuerySqlNode* pQuerySqlNode, int32_t i
const
char
*
msg6
=
"too many tables in from clause"
;
const
char
*
msg7
=
"invalid table alias name"
;
const
char
*
msg8
=
"alias name too long"
;
const
char
*
msg9
=
"only tag query not compatible with normal column filter"
;
int32_t
code
=
TSDB_CODE_SUCCESS
;
...
...
@@ -7288,6 +7287,20 @@ int32_t doValidateSqlNode(SSqlObj* pSql, SQuerySqlNode* pQuerySqlNode, int32_t i
if
(
hasUnsupportFunctionsForSTableQuery
(
pCmd
,
pQueryInfo
))
{
return
TSDB_CODE_TSC_INVALID_SQL
;
}
if
(
tscQueryTags
(
pQueryInfo
))
{
SSqlExpr
*
pExpr1
=
tscSqlExprGet
(
pQueryInfo
,
0
);
if
(
pExpr1
->
functionId
!=
TSDB_FUNC_TID_TAG
)
{
int32_t
numOfCols
=
(
int32_t
)
taosArrayGetSize
(
pQueryInfo
->
colList
);
for
(
int32_t
i
=
0
;
i
<
numOfCols
;
++
i
)
{
SColumn
*
pCols
=
taosArrayGetP
(
pQueryInfo
->
colList
,
i
);
if
(
pCols
->
numOfFilters
>
0
)
{
return
invalidSqlErrMsg
(
tscGetErrorMsgPayload
(
pCmd
),
msg9
);
}
}
}
}
}
if
(
parseSessionClause
(
pCmd
,
pQueryInfo
,
pQuerySqlNode
)
!=
TSDB_CODE_SUCCESS
)
{
...
...
src/client/src/tscServer.c
浏览文件 @
f3b42525
...
...
@@ -2020,8 +2020,9 @@ int tscProcessTableMetaRsp(SSqlObj *pSql) {
}
}
tscDebug
(
"0x%"
PRIx64
" recv table meta, uid:%"
PRIu64
", tid:%d, name:%s"
,
pSql
->
self
,
pTableMeta
->
id
.
uid
,
pTableMeta
->
id
.
tid
,
tNameGetTableName
(
&
pTableMetaInfo
->
name
));
tscDebug
(
"0x%"
PRIx64
" recv table meta, uid:%"
PRIu64
", tid:%d, name:%s, numOfCols:%d, numOfTags:%d"
,
pSql
->
self
,
pTableMeta
->
id
.
uid
,
pTableMeta
->
id
.
tid
,
tNameGetTableName
(
&
pTableMetaInfo
->
name
),
pTableMeta
->
tableInfo
.
numOfColumns
,
pTableMeta
->
tableInfo
.
numOfTags
);
free
(
pTableMeta
);
return
TSDB_CODE_SUCCESS
;
...
...
@@ -2164,8 +2165,7 @@ int tscProcessSTableVgroupRsp(SSqlObj *pSql) {
pInfo
->
vgroupList
->
numOfVgroups
=
pVgroupMsg
->
numOfVgroups
;
if
(
pInfo
->
vgroupList
->
numOfVgroups
<=
0
)
{
//tfree(pInfo->vgroupList);
tscError
(
"0x%"
PRIx64
" empty vgroup info"
,
pSql
->
self
);
tscDebug
(
"0x%"
PRIx64
" empty vgroup info"
,
pSql
->
self
);
}
else
{
for
(
int32_t
j
=
0
;
j
<
pInfo
->
vgroupList
->
numOfVgroups
;
++
j
)
{
// just init, no need to lock
...
...
src/client/src/tscSub.c
浏览文件 @
f3b42525
...
...
@@ -215,7 +215,7 @@ static void tscProcessSubscriptionTimer(void *handle, void *tmrId) {
taosTmrReset
(
tscProcessSubscriptionTimer
,
pSub
->
interval
,
pSub
,
tscTmr
,
&
pSub
->
pTimer
);
}
//TODO refactor: extract table list name not simply from the sql
static
SArray
*
getTableList
(
SSqlObj
*
pSql
)
{
const
char
*
p
=
strstr
(
pSql
->
sqlstr
,
" from "
);
assert
(
p
!=
NULL
);
// we are sure this is a 'select' statement
...
...
@@ -224,11 +224,11 @@ static SArray* getTableList( SSqlObj* pSql ) {
SSqlObj
*
pNew
=
taos_query
(
pSql
->
pTscObj
,
sql
);
if
(
pNew
==
NULL
)
{
tscError
(
"0x%"
PRIx64
"failed to retrieve table id: cannot create new sql object."
,
pSql
->
self
);
tscError
(
"0x%"
PRIx64
"
failed to retrieve table id: cannot create new sql object."
,
pSql
->
self
);
return
NULL
;
}
else
if
(
taos_errno
(
pNew
)
!=
TSDB_CODE_SUCCESS
)
{
tscError
(
"0x%"
PRIx64
"failed to retrieve table id,error: %s"
,
pSql
->
self
,
tstrerror
(
taos_errno
(
pNew
)));
tscError
(
"0x%"
PRIx64
"
failed to retrieve table id,error: %s"
,
pSql
->
self
,
tstrerror
(
taos_errno
(
pNew
)));
return
NULL
;
}
...
...
go
@
8ce6d865
Subproject commit
7a26c432f8b4203e42344ff3290b9b9b01b983d5
Subproject commit
8ce6d86558afc8c0b50c10f990fd2b4270cf06fc
src/connector/nodejs/nodetaos/cinterface.js
浏览文件 @
f3b42525
...
...
@@ -9,7 +9,7 @@ const ffi = require('ffi-napi');
const
ArrayType
=
require
(
'
ref-array-napi
'
);
const
Struct
=
require
(
'
ref-struct-napi
'
);
const
FieldTypes
=
require
(
'
./constants
'
);
const
errors
=
require
(
'
./error
'
);
const
errors
=
require
(
'
./error
'
);
const
TaosObjects
=
require
(
'
./taosobjects
'
);
const
{
NULL_POINTER
}
=
require
(
'
ref-napi
'
);
...
...
@@ -22,7 +22,7 @@ function convertMicrosecondsToDatetime(time) {
return
new
TaosObjects
.
TaosTimestamp
(
time
*
0.001
,
true
);
}
function
convertTimestamp
(
data
,
num_of_rows
,
nbytes
=
0
,
offset
=
0
,
micro
=
false
)
{
function
convertTimestamp
(
data
,
num_of_rows
,
nbytes
=
0
,
offset
=
0
,
micro
=
false
)
{
timestampConverter
=
convertMillisecondsToDatetime
;
if
(
micro
==
true
)
{
timestampConverter
=
convertMicrosecondsToDatetime
;
...
...
@@ -44,14 +44,14 @@ function convertTimestamp(data, num_of_rows, nbytes = 0, offset = 0, micro=false
}
return
res
;
}
function
convertBool
(
data
,
num_of_rows
,
nbytes
=
0
,
offset
=
0
,
micro
=
false
)
{
function
convertBool
(
data
,
num_of_rows
,
nbytes
=
0
,
offset
=
0
,
micro
=
false
)
{
data
=
ref
.
reinterpret
(
data
.
deref
(),
nbytes
*
num_of_rows
,
offset
);
let
res
=
new
Array
(
data
.
length
);
for
(
let
i
=
0
;
i
<
data
.
length
;
i
++
)
{
if
(
data
[
i
]
==
0
)
{
res
[
i
]
=
false
;
}
else
if
(
data
[
i
]
==
1
){
else
if
(
data
[
i
]
==
1
)
{
res
[
i
]
=
true
;
}
else
if
(
data
[
i
]
==
FieldTypes
.
C_BOOL_NULL
)
{
...
...
@@ -60,29 +60,29 @@ function convertBool(data, num_of_rows, nbytes = 0, offset = 0, micro=false) {
}
return
res
;
}
function
convertTinyint
(
data
,
num_of_rows
,
nbytes
=
0
,
offset
=
0
,
micro
=
false
)
{
function
convertTinyint
(
data
,
num_of_rows
,
nbytes
=
0
,
offset
=
0
,
micro
=
false
)
{
data
=
ref
.
reinterpret
(
data
.
deref
(),
nbytes
*
num_of_rows
,
offset
);
let
res
=
[];
let
currOffset
=
0
;
while
(
currOffset
<
data
.
length
)
{
let
d
=
data
.
readIntLE
(
currOffset
,
1
);
let
d
=
data
.
readIntLE
(
currOffset
,
1
);
res
.
push
(
d
==
FieldTypes
.
C_TINYINT_NULL
?
null
:
d
);
currOffset
+=
nbytes
;
}
return
res
;
}
function
convertSmallint
(
data
,
num_of_rows
,
nbytes
=
0
,
offset
=
0
,
micro
=
false
)
{
function
convertSmallint
(
data
,
num_of_rows
,
nbytes
=
0
,
offset
=
0
,
micro
=
false
)
{
data
=
ref
.
reinterpret
(
data
.
deref
(),
nbytes
*
num_of_rows
,
offset
);
let
res
=
[];
let
currOffset
=
0
;
while
(
currOffset
<
data
.
length
)
{
let
d
=
data
.
readIntLE
(
currOffset
,
2
);
let
d
=
data
.
readIntLE
(
currOffset
,
2
);
res
.
push
(
d
==
FieldTypes
.
C_SMALLINT_NULL
?
null
:
d
);
currOffset
+=
nbytes
;
}
return
res
;
}
function
convertInt
(
data
,
num_of_rows
,
nbytes
=
0
,
offset
=
0
,
micro
=
false
)
{
function
convertInt
(
data
,
num_of_rows
,
nbytes
=
0
,
offset
=
0
,
micro
=
false
)
{
data
=
ref
.
reinterpret
(
data
.
deref
(),
nbytes
*
num_of_rows
,
offset
);
let
res
=
[];
let
currOffset
=
0
;
...
...
@@ -93,7 +93,7 @@ function convertInt(data, num_of_rows, nbytes = 0, offset = 0, micro=false) {
}
return
res
;
}
function
convertBigint
(
data
,
num_of_rows
,
nbytes
=
0
,
offset
=
0
,
micro
=
false
)
{
function
convertBigint
(
data
,
num_of_rows
,
nbytes
=
0
,
offset
=
0
,
micro
=
false
)
{
data
=
ref
.
reinterpret
(
data
.
deref
(),
nbytes
*
num_of_rows
,
offset
);
let
res
=
[];
let
currOffset
=
0
;
...
...
@@ -104,7 +104,7 @@ function convertBigint(data, num_of_rows, nbytes = 0, offset = 0, micro=false) {
}
return
res
;
}
function
convertFloat
(
data
,
num_of_rows
,
nbytes
=
0
,
offset
=
0
,
micro
=
false
)
{
function
convertFloat
(
data
,
num_of_rows
,
nbytes
=
0
,
offset
=
0
,
micro
=
false
)
{
data
=
ref
.
reinterpret
(
data
.
deref
(),
nbytes
*
num_of_rows
,
offset
);
let
res
=
[];
let
currOffset
=
0
;
...
...
@@ -115,7 +115,7 @@ function convertFloat(data, num_of_rows, nbytes = 0, offset = 0, micro=false) {
}
return
res
;
}
function
convertDouble
(
data
,
num_of_rows
,
nbytes
=
0
,
offset
=
0
,
micro
=
false
)
{
function
convertDouble
(
data
,
num_of_rows
,
nbytes
=
0
,
offset
=
0
,
micro
=
false
)
{
data
=
ref
.
reinterpret
(
data
.
deref
(),
nbytes
*
num_of_rows
,
offset
);
let
res
=
[];
let
currOffset
=
0
;
...
...
@@ -126,7 +126,7 @@ function convertDouble(data, num_of_rows, nbytes = 0, offset = 0, micro=false) {
}
return
res
;
}
function
convertBinary
(
data
,
num_of_rows
,
nbytes
=
0
,
offset
=
0
,
micro
=
false
)
{
function
convertBinary
(
data
,
num_of_rows
,
nbytes
=
0
,
offset
=
0
,
micro
=
false
)
{
data
=
ref
.
reinterpret
(
data
.
deref
(),
nbytes
*
num_of_rows
,
offset
);
let
res
=
[];
let
currOffset
=
0
;
...
...
@@ -142,7 +142,7 @@ function convertBinary(data, num_of_rows, nbytes = 0, offset = 0, micro=false) {
}
return
res
;
}
function
convertNchar
(
data
,
num_of_rows
,
nbytes
=
0
,
offset
=
0
,
micro
=
false
)
{
function
convertNchar
(
data
,
num_of_rows
,
nbytes
=
0
,
offset
=
0
,
micro
=
false
)
{
data
=
ref
.
reinterpret
(
data
.
deref
(),
nbytes
*
num_of_rows
,
offset
);
let
res
=
[];
let
dataEntry
=
data
.
slice
(
0
,
nbytes
);
//one entry in a row under a column;
...
...
@@ -153,23 +153,23 @@ function convertNchar(data, num_of_rows, nbytes = 0, offset = 0, micro=false) {
// Object with all the relevant converters from pblock data to javascript readable data
let
convertFunctions
=
{
[
FieldTypes
.
C_BOOL
]
:
convertBool
,
[
FieldTypes
.
C_TINYINT
]
:
convertTinyint
,
[
FieldTypes
.
C_SMALLINT
]
:
convertSmallint
,
[
FieldTypes
.
C_INT
]
:
convertInt
,
[
FieldTypes
.
C_BIGINT
]
:
convertBigint
,
[
FieldTypes
.
C_FLOAT
]
:
convertFloat
,
[
FieldTypes
.
C_DOUBLE
]
:
convertDouble
,
[
FieldTypes
.
C_BINARY
]
:
convertBinary
,
[
FieldTypes
.
C_TIMESTAMP
]
:
convertTimestamp
,
[
FieldTypes
.
C_NCHAR
]
:
convertNchar
[
FieldTypes
.
C_BOOL
]
:
convertBool
,
[
FieldTypes
.
C_TINYINT
]
:
convertTinyint
,
[
FieldTypes
.
C_SMALLINT
]
:
convertSmallint
,
[
FieldTypes
.
C_INT
]
:
convertInt
,
[
FieldTypes
.
C_BIGINT
]
:
convertBigint
,
[
FieldTypes
.
C_FLOAT
]
:
convertFloat
,
[
FieldTypes
.
C_DOUBLE
]
:
convertDouble
,
[
FieldTypes
.
C_BINARY
]
:
convertBinary
,
[
FieldTypes
.
C_TIMESTAMP
]
:
convertTimestamp
,
[
FieldTypes
.
C_NCHAR
]
:
convertNchar
}
// Define TaosField structure
var
char_arr
=
ArrayType
(
ref
.
types
.
char
);
var
TaosField
=
Struct
({
'
name
'
:
char_arr
,
});
});
TaosField
.
fields
.
name
.
type
.
size
=
65
;
TaosField
.
defineProperty
(
'
type
'
,
ref
.
types
.
char
);
TaosField
.
defineProperty
(
'
bytes
'
,
ref
.
types
.
short
);
...
...
@@ -183,7 +183,7 @@ TaosField.defineProperty('bytes', ref.types.short);
* @classdesc The CTaosInterface is the interface through which Node.JS communicates data back and forth with TDengine. It is not advised to
* access this class directly and use it unless you understand what these functions do.
*/
function
CTaosInterface
(
config
=
null
,
pass
=
false
)
{
function
CTaosInterface
(
config
=
null
,
pass
=
false
)
{
ref
.
types
.
char_ptr
=
ref
.
refType
(
ref
.
types
.
char
);
ref
.
types
.
void_ptr
=
ref
.
refType
(
ref
.
types
.
void
);
ref
.
types
.
void_ptr2
=
ref
.
refType
(
ref
.
types
.
void_ptr
);
...
...
@@ -196,64 +196,65 @@ function CTaosInterface (config = null, pass = false) {
taoslibname
=
'
libtaos
'
;
}
this
.
libtaos
=
ffi
.
Library
(
taoslibname
,
{
'
taos_options
'
:
[
ref
.
types
.
int
,
[
ref
.
types
.
int
,
ref
.
types
.
void_ptr
]
],
'
taos_init
'
:
[
ref
.
types
.
void
,
[
]
],
'
taos_options
'
:
[
ref
.
types
.
int
,
[
ref
.
types
.
int
,
ref
.
types
.
void_ptr
]
],
'
taos_init
'
:
[
ref
.
types
.
void
,
[]
],
//TAOS *taos_connect(char *ip, char *user, char *pass, char *db, int port)
'
taos_connect
'
:
[
ref
.
types
.
void_ptr
,
[
ref
.
types
.
char_ptr
,
ref
.
types
.
char_ptr
,
ref
.
types
.
char_ptr
,
ref
.
types
.
char_ptr
,
ref
.
types
.
int
]
],
'
taos_connect
'
:
[
ref
.
types
.
void_ptr
,
[
ref
.
types
.
char_ptr
,
ref
.
types
.
char_ptr
,
ref
.
types
.
char_ptr
,
ref
.
types
.
char_ptr
,
ref
.
types
.
int
]
],
//void taos_close(TAOS *taos)
'
taos_close
'
:
[
ref
.
types
.
void
,
[
ref
.
types
.
void_ptr
]
],
//int *taos_fetch_lengths(TAOS_RES *
tao
s);
'
taos_fetch_lengths
'
:
[
ref
.
types
.
void_ptr
,
[
ref
.
types
.
void_ptr
]
],
'
taos_close
'
:
[
ref
.
types
.
void
,
[
ref
.
types
.
void_ptr
]
],
//int *taos_fetch_lengths(TAOS_RES *
re
s);
'
taos_fetch_lengths
'
:
[
ref
.
types
.
void_ptr
,
[
ref
.
types
.
void_ptr
]
],
//int taos_query(TAOS *taos, char *sqlstr)
'
taos_query
'
:
[
ref
.
types
.
void_ptr
,
[
ref
.
types
.
void_ptr
,
ref
.
types
.
char_ptr
]
],
//int taos_affected_rows(TAOS
*tao
s)
'
taos_affected_rows
'
:
[
ref
.
types
.
int
,
[
ref
.
types
.
void_ptr
]
],
'
taos_query
'
:
[
ref
.
types
.
void_ptr
,
[
ref
.
types
.
void_ptr
,
ref
.
types
.
char_ptr
]
],
//int taos_affected_rows(TAOS
_RES *re
s)
'
taos_affected_rows
'
:
[
ref
.
types
.
int
,
[
ref
.
types
.
void_ptr
]
],
//int taos_fetch_block(TAOS_RES *res, TAOS_ROW *rows)
'
taos_fetch_block
'
:
[
ref
.
types
.
int
,
[
ref
.
types
.
void_ptr
,
ref
.
types
.
void_ptr
]
],
'
taos_fetch_block
'
:
[
ref
.
types
.
int
,
[
ref
.
types
.
void_ptr
,
ref
.
types
.
void_ptr
]
],
//int taos_num_fields(TAOS_RES *res);
'
taos_num_fields
'
:
[
ref
.
types
.
int
,
[
ref
.
types
.
void_ptr
]
],
'
taos_num_fields
'
:
[
ref
.
types
.
int
,
[
ref
.
types
.
void_ptr
]
],
//TAOS_ROW taos_fetch_row(TAOS_RES *res)
//TAOS_ROW is void **, but we set the return type as a reference instead to get the row
'
taos_fetch_row
'
:
[
ref
.
refType
(
ref
.
types
.
void_ptr2
),
[
ref
.
types
.
void_ptr
]
],
'
taos_fetch_row
'
:
[
ref
.
refType
(
ref
.
types
.
void_ptr2
),
[
ref
.
types
.
void_ptr
]],
'
taos_print_row
'
:
[
ref
.
types
.
int
,
[
ref
.
types
.
char_ptr
,
ref
.
types
.
void_ptr
,
ref
.
types
.
void_ptr
,
ref
.
types
.
int
]],
//int taos_result_precision(TAOS_RES *res)
'
taos_result_precision
'
:
[
ref
.
types
.
int
,
[
ref
.
types
.
void_ptr
]
],
'
taos_result_precision
'
:
[
ref
.
types
.
int
,
[
ref
.
types
.
void_ptr
]
],
//void taos_free_result(TAOS_RES *res)
'
taos_free_result
'
:
[
ref
.
types
.
void
,
[
ref
.
types
.
void_ptr
]
],
'
taos_free_result
'
:
[
ref
.
types
.
void
,
[
ref
.
types
.
void_ptr
]
],
//int taos_field_count(TAOS *taos)
'
taos_field_count
'
:
[
ref
.
types
.
int
,
[
ref
.
types
.
void_ptr
]
],
'
taos_field_count
'
:
[
ref
.
types
.
int
,
[
ref
.
types
.
void_ptr
]
],
//TAOS_FIELD *taos_fetch_fields(TAOS_RES *res)
'
taos_fetch_fields
'
:
[
ref
.
refType
(
TaosField
),
[
ref
.
types
.
void_ptr
]
],
'
taos_fetch_fields
'
:
[
ref
.
refType
(
TaosField
),
[
ref
.
types
.
void_ptr
]
],
//int taos_errno(TAOS *taos)
'
taos_errno
'
:
[
ref
.
types
.
int
,
[
ref
.
types
.
void_ptr
]
],
'
taos_errno
'
:
[
ref
.
types
.
int
,
[
ref
.
types
.
void_ptr
]
],
//char *taos_errstr(TAOS *taos)
'
taos_errstr
'
:
[
ref
.
types
.
char_ptr
,
[
ref
.
types
.
void_ptr
]
],
'
taos_errstr
'
:
[
ref
.
types
.
char_ptr
,
[
ref
.
types
.
void_ptr
]
],
//void taos_stop_query(TAOS_RES *res);
'
taos_stop_query
'
:
[
ref
.
types
.
void
,
[
ref
.
types
.
void_ptr
]
],
'
taos_stop_query
'
:
[
ref
.
types
.
void
,
[
ref
.
types
.
void_ptr
]
],
//char *taos_get_server_info(TAOS *taos);
'
taos_get_server_info
'
:
[
ref
.
types
.
char_ptr
,
[
ref
.
types
.
void_ptr
]
],
'
taos_get_server_info
'
:
[
ref
.
types
.
char_ptr
,
[
ref
.
types
.
void_ptr
]
],
//char *taos_get_client_info();
'
taos_get_client_info
'
:
[
ref
.
types
.
char_ptr
,
[
]
],
'
taos_get_client_info
'
:
[
ref
.
types
.
char_ptr
,
[]
],
// ASYNC
// void taos_query_a(TAOS *taos, char *sqlstr, void (*fp)(void *, TAOS_RES *, int), void *param)
'
taos_query_a
'
:
[
ref
.
types
.
void
,
[
ref
.
types
.
void_ptr
,
ref
.
types
.
char_ptr
,
ref
.
types
.
void_ptr
,
ref
.
types
.
void_ptr
]
],
'
taos_query_a
'
:
[
ref
.
types
.
void
,
[
ref
.
types
.
void_ptr
,
ref
.
types
.
char_ptr
,
ref
.
types
.
void_ptr
,
ref
.
types
.
void_ptr
]
],
// void taos_fetch_rows_a(TAOS_RES *res, void (*fp)(void *param, TAOS_RES *, int numOfRows), void *param);
'
taos_fetch_rows_a
'
:
[
ref
.
types
.
void
,
[
ref
.
types
.
void_ptr
,
ref
.
types
.
void_ptr
,
ref
.
types
.
void_ptr
]],
'
taos_fetch_rows_a
'
:
[
ref
.
types
.
void
,
[
ref
.
types
.
void_ptr
,
ref
.
types
.
void_ptr
,
ref
.
types
.
void_ptr
]],
// Subscription
//TAOS_SUB *taos_subscribe(TAOS* taos, int restart, const char* topic, const char *sql, TAOS_SUBSCRIBE_CALLBACK fp, void *param, int interval)
'
taos_subscribe
'
:
[
ref
.
types
.
void_ptr
,
[
ref
.
types
.
void_ptr
,
ref
.
types
.
int
,
ref
.
types
.
char_ptr
,
ref
.
types
.
char_ptr
,
ref
.
types
.
void_ptr
,
ref
.
types
.
void_ptr
,
ref
.
types
.
int
]
],
'
taos_subscribe
'
:
[
ref
.
types
.
void_ptr
,
[
ref
.
types
.
void_ptr
,
ref
.
types
.
int
,
ref
.
types
.
char_ptr
,
ref
.
types
.
char_ptr
,
ref
.
types
.
void_ptr
,
ref
.
types
.
void_ptr
,
ref
.
types
.
int
]
],
// TAOS_RES *taos_consume(TAOS_SUB *tsub)
'
taos_consume
'
:
[
ref
.
types
.
void_ptr
,
[
ref
.
types
.
void_ptr
]
],
'
taos_consume
'
:
[
ref
.
types
.
void_ptr
,
[
ref
.
types
.
void_ptr
]
],
//void taos_unsubscribe(TAOS_SUB *tsub);
'
taos_unsubscribe
'
:
[
ref
.
types
.
void
,
[
ref
.
types
.
void_ptr
]
],
'
taos_unsubscribe
'
:
[
ref
.
types
.
void
,
[
ref
.
types
.
void_ptr
]
],
// Continuous Query
//TAOS_STREAM *taos_open_stream(TAOS *taos, char *sqlstr, void (*fp)(void *param, TAOS_RES *, TAOS_ROW row),
// int64_t stime, void *param, void (*callback)(void *));
'
taos_open_stream
'
:
[
ref
.
types
.
void_ptr
,
[
ref
.
types
.
void_ptr
,
ref
.
types
.
char_ptr
,
ref
.
types
.
void_ptr
,
ref
.
types
.
int64
,
ref
.
types
.
void_ptr
,
ref
.
types
.
void_ptr
]
],
'
taos_open_stream
'
:
[
ref
.
types
.
void_ptr
,
[
ref
.
types
.
void_ptr
,
ref
.
types
.
char_ptr
,
ref
.
types
.
void_ptr
,
ref
.
types
.
int64
,
ref
.
types
.
void_ptr
,
ref
.
types
.
void_ptr
]
],
//void taos_close_stream(TAOS_STREAM *tstr);
'
taos_close_stream
'
:
[
ref
.
types
.
void
,
[
ref
.
types
.
void_ptr
]
]
'
taos_close_stream
'
:
[
ref
.
types
.
void
,
[
ref
.
types
.
void_ptr
]
]
});
if
(
pass
==
false
)
{
...
...
@@ -264,7 +265,7 @@ function CTaosInterface (config = null, pass = false) {
try
{
this
.
_config
=
ref
.
allocCString
(
config
);
}
catch
(
err
)
{
catch
(
err
)
{
throw
"
Attribute Error: config is expected as a str
"
;
}
}
...
...
@@ -277,37 +278,37 @@ function CTaosInterface (config = null, pass = false) {
}
CTaosInterface
.
prototype
.
config
=
function
config
()
{
return
this
.
_config
;
}
CTaosInterface
.
prototype
.
connect
=
function
connect
(
host
=
null
,
user
=
"
root
"
,
password
=
"
taosdata
"
,
db
=
null
,
port
=
0
)
{
let
_host
,
_user
,
_password
,
_db
,
_port
;
}
CTaosInterface
.
prototype
.
connect
=
function
connect
(
host
=
null
,
user
=
"
root
"
,
password
=
"
taosdata
"
,
db
=
null
,
port
=
0
)
{
let
_host
,
_user
,
_password
,
_db
,
_port
;
try
{
_host
=
host
!=
null
?
ref
.
allocCString
(
host
)
:
ref
.
alloc
(
ref
.
types
.
char_ptr
,
ref
.
NULL
);
}
catch
(
err
)
{
catch
(
err
)
{
throw
"
Attribute Error: host is expected as a str
"
;
}
try
{
_user
=
ref
.
allocCString
(
user
)
}
catch
(
err
)
{
catch
(
err
)
{
throw
"
Attribute Error: user is expected as a str
"
;
}
try
{
_password
=
ref
.
allocCString
(
password
);
}
catch
(
err
)
{
catch
(
err
)
{
throw
"
Attribute Error: password is expected as a str
"
;
}
try
{
_db
=
db
!=
null
?
ref
.
allocCString
(
db
)
:
ref
.
alloc
(
ref
.
types
.
char_ptr
,
ref
.
NULL
);
}
catch
(
err
)
{
catch
(
err
)
{
throw
"
Attribute Error: db is expected as a str
"
;
}
try
{
_port
=
ref
.
alloc
(
ref
.
types
.
int
,
port
);
}
catch
(
err
)
{
catch
(
err
)
{
throw
TypeError
(
"
port is expected as an int
"
)
}
let
connection
=
this
.
libtaos
.
taos_connect
(
_host
,
_user
,
_password
,
_db
,
_port
);
...
...
@@ -326,8 +327,8 @@ CTaosInterface.prototype.close = function close(connection) {
CTaosInterface
.
prototype
.
query
=
function
query
(
connection
,
sql
)
{
return
this
.
libtaos
.
taos_query
(
connection
,
ref
.
allocCString
(
sql
));
}
CTaosInterface
.
prototype
.
affectedRows
=
function
affectedRows
(
connection
)
{
return
this
.
libtaos
.
taos_affected_rows
(
connection
);
CTaosInterface
.
prototype
.
affectedRows
=
function
affectedRows
(
result
)
{
return
this
.
libtaos
.
taos_affected_rows
(
result
);
}
CTaosInterface
.
prototype
.
useResult
=
function
useResult
(
result
)
{
...
...
@@ -337,8 +338,8 @@ CTaosInterface.prototype.useResult = function useResult(result) {
pfields
=
ref
.
reinterpret
(
pfields
,
this
.
fieldsCount
(
result
)
*
68
,
0
);
for
(
let
i
=
0
;
i
<
pfields
.
length
;
i
+=
68
)
{
//0 - 63 = name //64 - 65 = bytes, 66 - 67 = type
fields
.
push
(
{
name
:
ref
.
readCString
(
ref
.
reinterpret
(
pfields
,
65
,
i
)),
fields
.
push
({
name
:
ref
.
readCString
(
ref
.
reinterpret
(
pfields
,
65
,
i
)),
type
:
pfields
[
i
+
65
],
bytes
:
pfields
[
i
+
66
]
})
...
...
@@ -347,11 +348,10 @@ CTaosInterface.prototype.useResult = function useResult(result) {
return
fields
;
}
CTaosInterface
.
prototype
.
fetchBlock
=
function
fetchBlock
(
result
,
fields
)
{
//let pblock = ref.ref(ref.ref(ref.NULL)); // equal to our raw data
let
pblock
=
this
.
libtaos
.
taos_fetch_row
(
result
);
let
num_of_rows
=
1
;
if
(
ref
.
isNull
(
pblock
)
==
true
)
{
return
{
block
:
null
,
num_of_rows
:
0
};
let
pblock
=
ref
.
NULL_POINTER
;
let
num_of_rows
=
this
.
libtaos
.
taos_fetch_block
(
result
,
pblock
);
if
(
ref
.
isNull
(
pblock
.
deref
())
==
true
)
{
return
{
block
:
null
,
num_of_rows
:
0
};
}
var
fieldL
=
this
.
libtaos
.
taos_fetch_lengths
(
result
);
...
...
@@ -361,8 +361,8 @@ CTaosInterface.prototype.fetchBlock = function fetchBlock(result, fields) {
var
fieldlens
=
[];
if
(
ref
.
isNull
(
fieldL
)
==
false
)
{
for
(
let
i
=
0
;
i
<
fields
.
length
;
i
++
)
{
let
plen
=
ref
.
reinterpret
(
fieldL
,
4
,
i
*
4
);
for
(
let
i
=
0
;
i
<
fields
.
length
;
i
++
)
{
let
plen
=
ref
.
reinterpret
(
fieldL
,
4
,
i
*
4
);
let
len
=
plen
.
readInt32LE
(
0
);
fieldlens
.
push
(
len
);
}
...
...
@@ -370,21 +370,23 @@ CTaosInterface.prototype.fetchBlock = function fetchBlock(result, fields) {
let
blocks
=
new
Array
(
fields
.
length
);
blocks
.
fill
(
null
);
//
num_of_rows = Math.abs(num_of_rows);
num_of_rows
=
Math
.
abs
(
num_of_rows
);
let
offset
=
0
;
let
ptr
=
pblock
.
deref
();
for
(
let
i
=
0
;
i
<
fields
.
length
;
i
++
)
{
pdata
=
ref
.
reinterpret
(
p
block
,
8
,
i
*
8
);
if
(
ref
.
isNull
(
pdata
.
readPointer
()))
{
pdata
=
ref
.
reinterpret
(
p
tr
,
8
,
i
*
8
);
if
(
ref
.
isNull
(
pdata
.
readPointer
()))
{
blocks
[
i
]
=
new
Array
();
}
else
{
}
else
{
pdata
=
ref
.
ref
(
pdata
.
readPointer
());
if
(
!
convertFunctions
[
fields
[
i
][
'
type
'
]]
)
{
if
(
!
convertFunctions
[
fields
[
i
][
'
type
'
]]
)
{
throw
new
errors
.
DatabaseError
(
"
Invalid data type returned from database
"
);
}
blocks
[
i
]
=
convertFunctions
[
fields
[
i
][
'
type
'
]](
pdata
,
1
,
fieldlens
[
i
],
offset
,
isMicro
);
blocks
[
i
]
=
convertFunctions
[
fields
[
i
][
'
type
'
]](
pdata
,
num_of_rows
,
fieldlens
[
i
],
offset
,
isMicro
);
}
}
return
{
blocks
:
blocks
,
num_of_rows
:
Math
.
abs
(
num_of_rows
)
}
return
{
blocks
:
blocks
,
num_of_rows
}
}
CTaosInterface
.
prototype
.
fetchRow
=
function
fetchRow
(
result
,
fields
)
{
let
row
=
this
.
libtaos
.
taos_fetch_row
(
result
);
...
...
@@ -414,7 +416,7 @@ CTaosInterface.prototype.errStr = function errStr(result) {
// Async
CTaosInterface
.
prototype
.
query_a
=
function
query_a
(
connection
,
sql
,
callback
,
param
=
ref
.
ref
(
ref
.
NULL
))
{
// void taos_query_a(TAOS *taos, char *sqlstr, void (*fp)(void *param, TAOS_RES *, int), void *param)
callback
=
ffi
.
Callback
(
ref
.
types
.
void
,
[
ref
.
types
.
void_ptr
,
ref
.
types
.
void_ptr
,
ref
.
types
.
int
],
callback
);
callback
=
ffi
.
Callback
(
ref
.
types
.
void
,
[
ref
.
types
.
void_ptr
,
ref
.
types
.
void_ptr
,
ref
.
types
.
int
],
callback
);
this
.
libtaos
.
taos_query_a
(
connection
,
ref
.
allocCString
(
sql
),
callback
,
param
);
return
param
;
}
...
...
@@ -440,21 +442,21 @@ CTaosInterface.prototype.fetch_rows_a = function fetch_rows_a(result, callback,
var
fieldlens
=
[];
if
(
ref
.
isNull
(
fieldL
)
==
false
)
{
for
(
let
i
=
0
;
i
<
fields
.
length
;
i
++
)
{
let
plen
=
ref
.
reinterpret
(
fieldL
,
8
,
i
*
8
);
let
len
=
ref
.
get
(
plen
,
0
,
ref
.
types
.
int32
);
for
(
let
i
=
0
;
i
<
fields
.
length
;
i
++
)
{
let
plen
=
ref
.
reinterpret
(
fieldL
,
8
,
i
*
8
);
let
len
=
ref
.
get
(
plen
,
0
,
ref
.
types
.
int32
);
fieldlens
.
push
(
len
);
}
}
if
(
numOfRows2
>
0
){
if
(
numOfRows2
>
0
)
{
for
(
let
i
=
0
;
i
<
fields
.
length
;
i
++
)
{
if
(
ref
.
isNull
(
pdata
.
readPointer
()))
{
if
(
ref
.
isNull
(
pdata
.
readPointer
()))
{
blocks
[
i
]
=
new
Array
();
}
else
{
if
(
!
convertFunctions
[
fields
[
i
][
'
type
'
]]
)
{
}
else
{
if
(
!
convertFunctions
[
fields
[
i
][
'
type
'
]])
{
throw
new
errors
.
DatabaseError
(
"
Invalid data type returned from database
"
);
}
let
prow
=
ref
.
reinterpret
(
row
,
8
,
i
*
8
);
let
prow
=
ref
.
reinterpret
(
row
,
8
,
i
*
8
);
prow
=
prow
.
readPointer
();
prow
=
ref
.
ref
(
prow
);
blocks
[
i
]
=
convertFunctions
[
fields
[
i
][
'
type
'
]](
prow
,
1
,
fieldlens
[
i
],
offset
,
isMicro
);
...
...
@@ -464,21 +466,21 @@ CTaosInterface.prototype.fetch_rows_a = function fetch_rows_a(result, callback,
}
callback
(
param2
,
result2
,
numOfRows2
,
blocks
);
}
asyncCallbackWrapper
=
ffi
.
Callback
(
ref
.
types
.
void
,
[
ref
.
types
.
void_ptr
,
ref
.
types
.
void_ptr
,
ref
.
types
.
int
],
asyncCallbackWrapper
);
asyncCallbackWrapper
=
ffi
.
Callback
(
ref
.
types
.
void
,
[
ref
.
types
.
void_ptr
,
ref
.
types
.
void_ptr
,
ref
.
types
.
int
],
asyncCallbackWrapper
);
this
.
libtaos
.
taos_fetch_rows_a
(
result
,
asyncCallbackWrapper
,
param
);
return
param
;
}
// Fetch field meta data by result handle
CTaosInterface
.
prototype
.
fetchFields_a
=
function
fetchFields_a
(
result
)
{
CTaosInterface
.
prototype
.
fetchFields_a
=
function
fetchFields_a
(
result
)
{
let
pfields
=
this
.
fetchFields
(
result
);
let
pfieldscount
=
this
.
numFields
(
result
);
let
fields
=
[];
if
(
ref
.
isNull
(
pfields
)
==
false
)
{
pfields
=
ref
.
reinterpret
(
pfields
,
68
*
pfieldscount
,
0
);
pfields
=
ref
.
reinterpret
(
pfields
,
68
*
pfieldscount
,
0
);
for
(
let
i
=
0
;
i
<
pfields
.
length
;
i
+=
68
)
{
//0 - 64 = name //65 = type, 66 - 67 = bytes
fields
.
push
(
{
name
:
ref
.
readCString
(
ref
.
reinterpret
(
pfields
,
65
,
i
)),
fields
.
push
({
name
:
ref
.
readCString
(
ref
.
reinterpret
(
pfields
,
65
,
i
)),
type
:
pfields
[
i
+
65
],
bytes
:
pfields
[
i
+
66
]
})
...
...
@@ -488,7 +490,7 @@ CTaosInterface.prototype.fetchFields_a = function fetchFields_a (result) {
}
// Stop a query by result handle
CTaosInterface
.
prototype
.
stopQuery
=
function
stopQuery
(
result
)
{
if
(
result
!=
null
){
if
(
result
!=
null
)
{
this
.
libtaos
.
taos_stop_query
(
result
);
}
else
{
...
...
@@ -509,13 +511,13 @@ CTaosInterface.prototype.subscribe = function subscribe(connection, restart, top
try
{
sql
=
sql
!=
null
?
ref
.
allocCString
(
sql
)
:
ref
.
alloc
(
ref
.
types
.
char_ptr
,
ref
.
NULL
);
}
catch
(
err
)
{
catch
(
err
)
{
throw
"
Attribute Error: sql is expected as a str
"
;
}
try
{
topic
=
topic
!=
null
?
ref
.
allocCString
(
topic
)
:
ref
.
alloc
(
ref
.
types
.
char_ptr
,
ref
.
NULL
);
}
catch
(
err
)
{
catch
(
err
)
{
throw
TypeError
(
"
topic is expected as a str
"
);
}
...
...
@@ -539,8 +541,8 @@ CTaosInterface.prototype.consume = function consume(subscription) {
pfields
=
ref
.
reinterpret
(
pfields
,
this
.
numFields
(
result
)
*
68
,
0
);
for
(
let
i
=
0
;
i
<
pfields
.
length
;
i
+=
68
)
{
//0 - 63 = name //64 - 65 = bytes, 66 - 67 = type
fields
.
push
(
{
name
:
ref
.
readCString
(
ref
.
reinterpret
(
pfields
,
64
,
i
)),
fields
.
push
({
name
:
ref
.
readCString
(
ref
.
reinterpret
(
pfields
,
64
,
i
)),
bytes
:
pfields
[
i
+
64
],
type
:
pfields
[
i
+
66
]
})
...
...
@@ -548,7 +550,7 @@ CTaosInterface.prototype.consume = function consume(subscription) {
}
let
data
=
[];
while
(
true
)
{
while
(
true
)
{
let
{
blocks
,
num_of_rows
}
=
this
.
fetchBlock
(
result
,
fields
);
if
(
num_of_rows
==
0
)
{
break
;
...
...
@@ -559,7 +561,7 @@ CTaosInterface.prototype.consume = function consume(subscription) {
for
(
let
j
=
0
;
j
<
fields
.
length
;
j
++
)
{
rowBlock
[
j
]
=
blocks
[
j
][
i
];
}
data
[
data
.
length
-
1
]
=
(
rowBlock
);
data
[
data
.
length
-
1
]
=
(
rowBlock
);
}
}
return
{
data
:
data
,
fields
:
fields
,
result
:
result
};
...
...
@@ -570,11 +572,11 @@ CTaosInterface.prototype.unsubscribe = function unsubscribe(subscription) {
}
// Continuous Query
CTaosInterface
.
prototype
.
openStream
=
function
openStream
(
connection
,
sql
,
callback
,
stime
,
stoppingCallback
,
param
=
ref
.
ref
(
ref
.
NULL
))
{
CTaosInterface
.
prototype
.
openStream
=
function
openStream
(
connection
,
sql
,
callback
,
stime
,
stoppingCallback
,
param
=
ref
.
ref
(
ref
.
NULL
))
{
try
{
sql
=
ref
.
allocCString
(
sql
);
}
catch
(
err
)
{
catch
(
err
)
{
throw
"
Attribute Error: sql string is expected as a str
"
;
}
var
cti
=
this
;
...
...
@@ -587,7 +589,7 @@ CTaosInterface.prototype.openStream = function openStream(connection, sql, callb
let
offset
=
0
;
if
(
numOfRows2
>
0
)
{
for
(
let
i
=
0
;
i
<
fields
.
length
;
i
++
)
{
if
(
!
convertFunctions
[
fields
[
i
][
'
type
'
]]
)
{
if
(
!
convertFunctions
[
fields
[
i
][
'
type
'
]])
{
throw
new
errors
.
DatabaseError
(
"
Invalid data type returned from database
"
);
}
blocks
[
i
]
=
convertFunctions
[
fields
[
i
][
'
type
'
]](
row
,
numOfRows2
,
fields
[
i
][
'
bytes
'
],
offset
,
isMicro
);
...
...
@@ -596,8 +598,8 @@ CTaosInterface.prototype.openStream = function openStream(connection, sql, callb
}
callback
(
param2
,
result2
,
blocks
,
fields
);
}
asyncCallbackWrapper
=
ffi
.
Callback
(
ref
.
types
.
void
,
[
ref
.
types
.
void_ptr
,
ref
.
types
.
void_ptr
,
ref
.
refType
(
ref
.
types
.
void_ptr2
)
],
asyncCallbackWrapper
);
asyncStoppingCallbackWrapper
=
ffi
.
Callback
(
ref
.
types
.
void
,
[
ref
.
types
.
void_ptr
],
stoppingCallback
);
asyncCallbackWrapper
=
ffi
.
Callback
(
ref
.
types
.
void
,
[
ref
.
types
.
void_ptr
,
ref
.
types
.
void_ptr
,
ref
.
refType
(
ref
.
types
.
void_ptr2
)
],
asyncCallbackWrapper
);
asyncStoppingCallbackWrapper
=
ffi
.
Callback
(
ref
.
types
.
void
,
[
ref
.
types
.
void_ptr
],
stoppingCallback
);
let
streamHandle
=
this
.
libtaos
.
taos_open_stream
(
connection
,
sql
,
asyncCallbackWrapper
,
stime
,
param
,
asyncStoppingCallbackWrapper
);
if
(
ref
.
isNull
(
streamHandle
))
{
throw
new
errors
.
TDError
(
'
Failed to open a stream with TDengine
'
);
...
...
src/connector/nodejs/nodetaos/cursor.js
浏览文件 @
f3b42525
const
ref
=
require
(
'
ref-napi
'
);
require
(
'
./globalfunc.js
'
)
const
CTaosInterface
=
require
(
'
./cinterface
'
)
const
errors
=
require
(
'
./error
'
)
const
errors
=
require
(
'
./error
'
)
const
TaosQuery
=
require
(
'
./taosquery
'
)
const
{
PerformanceObserver
,
performance
}
=
require
(
'
perf_hooks
'
);
module
.
exports
=
TDengineCursor
;
...
...
@@ -22,7 +22,7 @@ module.exports = TDengineCursor;
* @property {fields} - Array of the field objects in order from left to right of the latest data retrieved
* @since 1.0.0
*/
function
TDengineCursor
(
connection
=
null
)
{
function
TDengineCursor
(
connection
=
null
)
{
//All parameters are store for sync queries only.
this
.
_rowcount
=
-
1
;
this
.
_connection
=
null
;
...
...
@@ -176,27 +176,22 @@ TDengineCursor.prototype.fetchall = function fetchall(options, callback) {
throw
new
errors
.
OperationalError
(
"
Invalid use of fetchall, either result or fields from query are null. First execute a query first
"
);
}
let
data
=
[];
let
num_of_rows
=
this
.
_chandle
.
affectedRows
(
this
.
_result
);
let
data
=
new
Array
(
num_of_rows
);
this
.
_rowcount
=
0
;
//let nodetime = 0;
let
time
=
0
;
const
obs
=
new
PerformanceObserver
((
items
)
=>
{
time
+=
items
.
getEntries
()[
0
].
duration
;
performance
.
clearMarks
();
});
/*
const obs2 = new PerformanceObserver((items) => {
nodetime += items.getEntries()[0].duration;
performance.clearMarks();
});
obs2.observe({ entryTypes: ['measure'] });
performance.mark('nodea');
*/
obs
.
observe
({
entryTypes
:
[
'
measure
'
]
});
performance
.
mark
(
'
A
'
);
while
(
true
)
{
while
(
true
)
{
let
blockAndRows
=
this
.
_chandle
.
fetchBlock
(
this
.
_result
,
this
.
_fields
);
// console.log(blockAndRows);
// break;
let
block
=
blockAndRows
.
blocks
;
let
num_of_rows
=
blockAndRows
.
num_of_rows
;
if
(
num_of_rows
==
0
)
{
...
...
@@ -205,16 +200,18 @@ TDengineCursor.prototype.fetchall = function fetchall(options, callback) {
this
.
_rowcount
+=
num_of_rows
;
let
numoffields
=
this
.
_fields
.
length
;
for
(
let
i
=
0
;
i
<
num_of_rows
;
i
++
)
{
data
.
push
([]);
//
data.push([]);
let
rowBlock
=
new
Array
(
numoffields
);
for
(
let
j
=
0
;
j
<
numoffields
;
j
++
)
{
rowBlock
[
j
]
=
block
[
j
][
i
];
}
data
[
data
.
length
-
1
]
=
(
rowBlock
);
data
[
this
.
_rowcount
-
num_of_rows
+
i
]
=
(
rowBlock
);
// data.push(rowBlock);
}
}
performance
.
mark
(
'
B
'
);
performance
.
measure
(
'
query
'
,
'
A
'
,
'
B
'
);
let
response
=
this
.
_createSetResponse
(
this
.
_rowcount
,
time
)
...
...
@@ -239,7 +236,7 @@ TDengineCursor.prototype.fetchall = function fetchall(options, callback) {
* @return {number | Buffer} Number of affected rows or a Buffer that points to the results of the query
* @since 1.0.0
*/
TDengineCursor
.
prototype
.
execute_a
=
function
execute_a
(
operation
,
options
,
callback
,
param
)
{
TDengineCursor
.
prototype
.
execute_a
=
function
execute_a
(
operation
,
options
,
callback
,
param
)
{
if
(
operation
==
undefined
)
{
throw
new
errors
.
ProgrammingError
(
'
No operation passed as argument
'
);
return
null
;
...
...
@@ -265,14 +262,14 @@ TDengineCursor.prototype.execute_a = function execute_a (operation, options, cal
}
if
(
resCode
>=
0
)
{
// let fieldCount = cr._chandle.numFields(res2);
// if (fieldCount == 0) {
// //cr._chandle.freeResult(res2);
// return res2;
// }
// else {
// return res2;
// }
// let fieldCount = cr._chandle.numFields(res2);
// if (fieldCount == 0) {
// //cr._chandle.freeResult(res2);
// return res2;
// }
// else {
// return res2;
// }
return
res2
;
}
...
...
@@ -360,17 +357,17 @@ TDengineCursor.prototype.fetchall_a = function fetchall_a(result, options, callb
for
(
let
k
=
0
;
k
<
fields
.
length
;
k
++
)
{
rowBlock
[
k
]
=
block
[
k
][
j
];
}
data
[
data
.
length
-
1
]
=
rowBlock
;
data
[
data
.
length
-
1
]
=
rowBlock
;
}
}
cr
.
_chandle
.
freeResult
(
result2
);
// free result, avoid seg faults and mem leaks!
callback
(
param2
,
result2
,
numOfRows2
,
{
data
:
data
,
fields
:
fields
});
callback
(
param2
,
result2
,
numOfRows2
,
{
data
:
data
,
fields
:
fields
});
}
}
ref
.
writeObject
(
buf
,
0
,
param
);
param
=
this
.
_chandle
.
fetch_rows_a
(
result
,
asyncCallbackWrapper
,
buf
);
//returned param
return
{
param
:
param
,
result
:
result
};
return
{
param
:
param
,
result
:
result
};
}
/**
* Stop a query given the result handle.
...
...
@@ -428,7 +425,7 @@ TDengineCursor.prototype.subscribe = function subscribe(config) {
*/
TDengineCursor
.
prototype
.
consumeData
=
async
function
consumeData
(
subscription
,
callback
)
{
while
(
true
)
{
let
{
data
,
fields
,
result
}
=
this
.
_chandle
.
consume
(
subscription
);
let
{
data
,
fields
,
result
}
=
this
.
_chandle
.
consume
(
subscription
);
callback
(
data
,
fields
,
result
);
}
}
...
...
@@ -450,7 +447,7 @@ TDengineCursor.prototype.unsubscribe = function unsubscribe(subscription) {
* @return {Buffer} A buffer pointing to the stream handle
* @since 1.3.0
*/
TDengineCursor
.
prototype
.
openStream
=
function
openStream
(
sql
,
callback
,
stime
=
0
,
stoppingCallback
,
param
=
{})
{
TDengineCursor
.
prototype
.
openStream
=
function
openStream
(
sql
,
callback
,
stime
=
0
,
stoppingCallback
,
param
=
{})
{
let
buf
=
ref
.
alloc
(
'
Object
'
);
ref
.
writeObject
(
buf
,
0
,
param
);
...
...
@@ -463,17 +460,17 @@ TDengineCursor.prototype.unsubscribe = function unsubscribe(subscription) {
for
(
let
k
=
0
;
k
<
fields
.
length
;
k
++
)
{
rowBlock
[
k
]
=
blocks
[
k
][
j
];
}
data
[
data
.
length
-
1
]
=
rowBlock
;
data
[
data
.
length
-
1
]
=
rowBlock
;
}
callback
(
param2
,
result2
,
blocks
,
fields
);
}
return
this
.
_chandle
.
openStream
(
this
.
_connection
.
_conn
,
sql
,
asyncCallbackWrapper
,
stime
,
stoppingCallback
,
buf
);
}
/**
}
/**
* Close a stream
* @param {Buffer} - A buffer pointing to the handle of the stream to be closed
* @since 1.3.0
*/
TDengineCursor
.
prototype
.
closeStream
=
function
closeStream
(
stream
)
{
TDengineCursor
.
prototype
.
closeStream
=
function
closeStream
(
stream
)
{
this
.
_chandle
.
closeStream
(
stream
);
}
}
src/connector/nodejs/package-lock.json
已删除
100644 → 0
浏览文件 @
4b4199ce
{
"name"
:
"td2.0-connector"
,
"version"
:
"2.0.6"
,
"lockfileVersion"
:
1
,
"requires"
:
true
,
"dependencies"
:
{
"array-index"
:
{
"version"
:
"1.0.0"
,
"resolved"
:
"https://registry.npmjs.org/array-index/-/array-index-1.0.0.tgz"
,
"integrity"
:
"sha1-7FanSe4QPk4Ix5C5w1PfFgVbl/k="
,
"requires"
:
{
"debug"
:
"^2.2.0"
,
"es6-symbol"
:
"^3.0.2"
},
"dependencies"
:
{
"debug"
:
{
"version"
:
"2.6.9"
,
"resolved"
:
"https://registry.npmjs.org/debug/-/debug-2.6.9.tgz"
,
"integrity"
:
"sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA=="
,
"requires"
:
{
"ms"
:
"2.0.0"
}
},
"ms"
:
{
"version"
:
"2.0.0"
,
"resolved"
:
"https://registry.npmjs.org/ms/-/ms-2.0.0.tgz"
,
"integrity"
:
"sha1-VgiurfwAvmwpAd9fmGF4jeDVl8g="
}
}
},
"d"
:
{
"version"
:
"1.0.1"
,
"resolved"
:
"https://registry.npmjs.org/d/-/d-1.0.1.tgz"
,
"integrity"
:
"sha512-m62ShEObQ39CfralilEQRjH6oAMtNCV1xJyEx5LpRYUVN+EviphDgUc/F3hnYbADmkiNs67Y+3ylmlG7Lnu+FA=="
,
"requires"
:
{
"es5-ext"
:
"^0.10.50"
,
"type"
:
"^1.0.1"
}
},
"debug"
:
{
"version"
:
"4.3.1"
,
"resolved"
:
"https://registry.npmjs.org/debug/-/debug-4.3.1.tgz"
,
"integrity"
:
"sha512-doEwdvm4PCeK4K3RQN2ZC2BYUBaxwLARCqZmMjtF8a51J2Rb0xpVloFRnCODwqjpwnAoao4pelN8l3RJdv3gRQ=="
,
"requires"
:
{
"ms"
:
"2.1.2"
}
},
"es5-ext"
:
{
"version"
:
"0.10.53"
,
"resolved"
:
"https://registry.npmjs.org/es5-ext/-/es5-ext-0.10.53.tgz"
,
"integrity"
:
"sha512-Xs2Stw6NiNHWypzRTY1MtaG/uJlwCk8kH81920ma8mvN8Xq1gsfhZvpkImLQArw8AHnv8MT2I45J3c0R8slE+Q=="
,
"requires"
:
{
"es6-iterator"
:
"~2.0.3"
,
"es6-symbol"
:
"~3.1.3"
,
"next-tick"
:
"~1.0.0"
}
},
"es6-iterator"
:
{
"version"
:
"2.0.3"
,
"resolved"
:
"https://registry.npmjs.org/es6-iterator/-/es6-iterator-2.0.3.tgz"
,
"integrity"
:
"sha1-p96IkUGgWpSwhUQDstCg+/qY87c="
,
"requires"
:
{
"d"
:
"1"
,
"es5-ext"
:
"^0.10.35"
,
"es6-symbol"
:
"^3.1.1"
}
},
"es6-symbol"
:
{
"version"
:
"3.1.3"
,
"resolved"
:
"https://registry.npmjs.org/es6-symbol/-/es6-symbol-3.1.3.tgz"
,
"integrity"
:
"sha512-NJ6Yn3FuDinBaBRWl/q5X/s4koRHBrgKAu+yGI6JCBeiu3qrcbJhwT2GeR/EXVfylRk8dpQVJoLEFhK+Mu31NA=="
,
"requires"
:
{
"d"
:
"^1.0.1"
,
"ext"
:
"^1.1.2"
}
},
"ext"
:
{
"version"
:
"1.4.0"
,
"resolved"
:
"https://registry.npmjs.org/ext/-/ext-1.4.0.tgz"
,
"integrity"
:
"sha512-Key5NIsUxdqKg3vIsdw9dSuXpPCQ297y6wBjL30edxwPgt2E44WcWBZey/ZvUc6sERLTxKdyCu4gZFmUbk1Q7A=="
,
"requires"
:
{
"type"
:
"^2.0.0"
},
"dependencies"
:
{
"type"
:
{
"version"
:
"2.1.0"
,
"resolved"
:
"https://registry.npmjs.org/type/-/type-2.1.0.tgz"
,
"integrity"
:
"sha512-G9absDWvhAWCV2gmF1zKud3OyC61nZDwWvBL2DApaVFogI07CprggiQAOOjvp2NRjYWFzPyu7vwtDrQFq8jeSA=="
}
}
},
"ffi-napi"
:
{
"version"
:
"3.1.0"
,
"resolved"
:
"https://registry.npmjs.org/ffi-napi/-/ffi-napi-3.1.0.tgz"
,
"integrity"
:
"sha512-EsHO+sP2p/nUC/3l/l8m9niee1BLm4asUFDzkkBGR4kYVgp2KqdAYUomZhkKtzim4Fq7mcYHjpUaIHsMqs+E1g=="
,
"requires"
:
{
"debug"
:
"^4.1.1"
,
"get-uv-event-loop-napi-h"
:
"^1.0.5"
,
"node-addon-api"
:
"^2.0.0"
,
"node-gyp-build"
:
"^4.2.1"
,
"ref-napi"
:
"^2.0.1"
,
"ref-struct-di"
:
"^1.1.0"
},
"dependencies"
:
{
"ref-napi"
:
{
"version"
:
"2.1.2"
,
"resolved"
:
"https://registry.npmjs.org/ref-napi/-/ref-napi-2.1.2.tgz"
,
"integrity"
:
"sha512-aFl+vrIuLWUXMUTQGAwGAuSNLX3Ub5W3iVP8b7KyFFZUdn4+i4U1TXXTop0kCTUfGNu8glBGVz4lowkwMcPVVA=="
,
"requires"
:
{
"debug"
:
"^4.1.1"
,
"get-symbol-from-current-process-h"
:
"^1.0.2"
,
"node-addon-api"
:
"^2.0.0"
,
"node-gyp-build"
:
"^4.2.1"
}
}
}
},
"get-symbol-from-current-process-h"
:
{
"version"
:
"1.0.2"
,
"resolved"
:
"https://registry.npmjs.org/get-symbol-from-current-process-h/-/get-symbol-from-current-process-h-1.0.2.tgz"
,
"integrity"
:
"sha512-syloC6fsCt62ELLrr1VKBM1ggOpMdetX9hTrdW77UQdcApPHLmf7CI7OKcN1c9kYuNxKcDe4iJ4FY9sX3aw2xw=="
},
"get-uv-event-loop-napi-h"
:
{
"version"
:
"1.0.6"
,
"resolved"
:
"https://registry.npmjs.org/get-uv-event-loop-napi-h/-/get-uv-event-loop-napi-h-1.0.6.tgz"
,
"integrity"
:
"sha512-t5c9VNR84nRoF+eLiz6wFrEp1SE2Acg0wS+Ysa2zF0eROes+LzOfuTaVHxGy8AbS8rq7FHEJzjnCZo1BupwdJg=="
,
"requires"
:
{
"get-symbol-from-current-process-h"
:
"^1.0.1"
}
},
"ms"
:
{
"version"
:
"2.1.2"
,
"resolved"
:
"https://registry.npmjs.org/ms/-/ms-2.1.2.tgz"
,
"integrity"
:
"sha512-sGkPx+VjMtmA6MX27oA4FBFELFCZZ4S4XqeGOXCv68tT+jb3vk/RyaKWP0PTKyWtmLSM0b+adUTEvbs1PEaH2w=="
},
"next-tick"
:
{
"version"
:
"1.0.0"
,
"resolved"
:
"https://registry.npmjs.org/next-tick/-/next-tick-1.0.0.tgz"
,
"integrity"
:
"sha1-yobR/ogoFpsBICCOPchCS524NCw="
},
"node-addon-api"
:
{
"version"
:
"2.0.2"
,
"resolved"
:
"https://registry.npmjs.org/node-addon-api/-/node-addon-api-2.0.2.tgz"
,
"integrity"
:
"sha512-Ntyt4AIXyaLIuMHF6IOoTakB3K+RWxwtsHNRxllEoA6vPwP9o4866g6YWDLUdnucilZhmkxiHwHr11gAENw+QA=="
},
"node-gyp-build"
:
{
"version"
:
"4.2.3"
,
"resolved"
:
"https://registry.npmjs.org/node-gyp-build/-/node-gyp-build-4.2.3.tgz"
,
"integrity"
:
"sha512-MN6ZpzmfNCRM+3t57PTJHgHyw/h4OWnZ6mR8P5j/uZtqQr46RRuDE/P+g3n0YR/AiYXeWixZZzaip77gdICfRg=="
},
"ref-array-napi"
:
{
"version"
:
"1.2.1"
,
"resolved"
:
"https://registry.npmjs.org/ref-array-napi/-/ref-array-napi-1.2.1.tgz"
,
"integrity"
:
"sha512-jQp2WWSucmxkqVfoNfm7yDlDeGu3liAbzqfwjNybL80ooLOCnCZpAK2woDInY+lxNOK/VlIVSqeDEYb4gVPuNQ=="
,
"requires"
:
{
"array-index"
:
"1"
,
"debug"
:
"2"
,
"ref-napi"
:
"^1.4.2"
},
"dependencies"
:
{
"debug"
:
{
"version"
:
"2.6.9"
,
"resolved"
:
"https://registry.npmjs.org/debug/-/debug-2.6.9.tgz"
,
"integrity"
:
"sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA=="
,
"requires"
:
{
"ms"
:
"2.0.0"
}
},
"ms"
:
{
"version"
:
"2.0.0"
,
"resolved"
:
"https://registry.npmjs.org/ms/-/ms-2.0.0.tgz"
,
"integrity"
:
"sha1-VgiurfwAvmwpAd9fmGF4jeDVl8g="
},
"ref-napi"
:
{
"version"
:
"1.5.2"
,
"resolved"
:
"https://registry.npmjs.org/ref-napi/-/ref-napi-1.5.2.tgz"
,
"integrity"
:
"sha512-hwyNmWpUkt1bDWDW4aiwCoC+SJfJO69UIdjqssNqdaS0sYJpgqzosGg/rLtk69UoQ8drZdI9yyQefM7eEMM3Gw=="
,
"requires"
:
{
"debug"
:
"^3.1.0"
,
"node-addon-api"
:
"^2.0.0"
,
"node-gyp-build"
:
"^4.2.1"
},
"dependencies"
:
{
"debug"
:
{
"version"
:
"3.2.7"
,
"resolved"
:
"https://registry.npmjs.org/debug/-/debug-3.2.7.tgz"
,
"integrity"
:
"sha512-CFjzYYAi4ThfiQvizrFQevTTXHtnCqWfe7x1AhgEscTz6ZbLbfoLRLPugTQyBth6f8ZERVUSyWHFD/7Wu4t1XQ=="
,
"requires"
:
{
"ms"
:
"^2.1.1"
}
},
"ms"
:
{
"version"
:
"2.1.3"
,
"resolved"
:
"https://registry.npmjs.org/ms/-/ms-2.1.3.tgz"
,
"integrity"
:
"sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA=="
}
}
}
}
},
"ref-napi"
:
{
"version"
:
"3.0.1"
,
"resolved"
:
"https://registry.npmjs.org/ref-napi/-/ref-napi-3.0.1.tgz"
,
"integrity"
:
"sha512-W3rcb0E+tlO9u9ySFnX5vifInwwPGToOfFgTZUHJBNiOBsW0NNvgHz2zJN7ctABo/2yIlgdPQUvuqqfORIF4LA=="
,
"requires"
:
{
"debug"
:
"^4.1.1"
,
"get-symbol-from-current-process-h"
:
"^1.0.2"
,
"node-addon-api"
:
"^2.0.0"
,
"node-gyp-build"
:
"^4.2.1"
}
},
"ref-struct-di"
:
{
"version"
:
"1.1.1"
,
"resolved"
:
"https://registry.npmjs.org/ref-struct-di/-/ref-struct-di-1.1.1.tgz"
,
"integrity"
:
"sha512-2Xyn/0Qgz89VT+++WP0sTosdm9oeowLP23wRJYhG4BFdMUrLj3jhwHZNEytYNYgtPKLNTP3KJX4HEgBvM1/Y2g=="
,
"requires"
:
{
"debug"
:
"^3.1.0"
},
"dependencies"
:
{
"debug"
:
{
"version"
:
"3.2.7"
,
"resolved"
:
"https://registry.npmjs.org/debug/-/debug-3.2.7.tgz"
,
"integrity"
:
"sha512-CFjzYYAi4ThfiQvizrFQevTTXHtnCqWfe7x1AhgEscTz6ZbLbfoLRLPugTQyBth6f8ZERVUSyWHFD/7Wu4t1XQ=="
,
"requires"
:
{
"ms"
:
"^2.1.1"
}
}
}
},
"ref-struct-napi"
:
{
"version"
:
"1.1.1"
,
"resolved"
:
"https://registry.npmjs.org/ref-struct-napi/-/ref-struct-napi-1.1.1.tgz"
,
"integrity"
:
"sha512-YgS5/d7+kT5zgtySYI5ieH0hREdv+DabgDvoczxsui0f9VLm0rrDcWEj4DHKehsH+tJnVMsLwuyctWgvdEcVRw=="
,
"requires"
:
{
"debug"
:
"2"
,
"ref-napi"
:
"^1.4.2"
},
"dependencies"
:
{
"debug"
:
{
"version"
:
"2.6.9"
,
"resolved"
:
"https://registry.npmjs.org/debug/-/debug-2.6.9.tgz"
,
"integrity"
:
"sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA=="
,
"requires"
:
{
"ms"
:
"2.0.0"
}
},
"ms"
:
{
"version"
:
"2.0.0"
,
"resolved"
:
"https://registry.npmjs.org/ms/-/ms-2.0.0.tgz"
,
"integrity"
:
"sha1-VgiurfwAvmwpAd9fmGF4jeDVl8g="
},
"ref-napi"
:
{
"version"
:
"1.5.2"
,
"resolved"
:
"https://registry.npmjs.org/ref-napi/-/ref-napi-1.5.2.tgz"
,
"integrity"
:
"sha512-hwyNmWpUkt1bDWDW4aiwCoC+SJfJO69UIdjqssNqdaS0sYJpgqzosGg/rLtk69UoQ8drZdI9yyQefM7eEMM3Gw=="
,
"requires"
:
{
"debug"
:
"^3.1.0"
,
"node-addon-api"
:
"^2.0.0"
,
"node-gyp-build"
:
"^4.2.1"
},
"dependencies"
:
{
"debug"
:
{
"version"
:
"3.2.7"
,
"resolved"
:
"https://registry.npmjs.org/debug/-/debug-3.2.7.tgz"
,
"integrity"
:
"sha512-CFjzYYAi4ThfiQvizrFQevTTXHtnCqWfe7x1AhgEscTz6ZbLbfoLRLPugTQyBth6f8ZERVUSyWHFD/7Wu4t1XQ=="
,
"requires"
:
{
"ms"
:
"^2.1.1"
}
},
"ms"
:
{
"version"
:
"2.1.3"
,
"resolved"
:
"https://registry.npmjs.org/ms/-/ms-2.1.3.tgz"
,
"integrity"
:
"sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA=="
}
}
}
}
},
"type"
:
{
"version"
:
"1.2.0"
,
"resolved"
:
"https://registry.npmjs.org/type/-/type-1.2.0.tgz"
,
"integrity"
:
"sha512-+5nt5AAniqsCnu2cEQQdpzCAh33kVx8n0VoFidKpB1dVVLAN/F+bgVOqOJqOnEnrhp222clB5p3vUlD+1QAnfg=="
}
}
}
src/connector/nodejs/package.json
浏览文件 @
f3b42525
{
"name"
:
"td2.0-connector"
,
"version"
:
"2.0.
6
"
,
"version"
:
"2.0.
7
"
,
"description"
:
"A Node.js connector for TDengine."
,
"main"
:
"tdengine.js"
,
"directories"
:
{
...
...
src/inc/taoserror.h
浏览文件 @
f3b42525
...
...
@@ -219,6 +219,7 @@ int32_t* taosGetErrno();
#define TSDB_CODE_VND_NO_WRITE_AUTH TAOS_DEF_ERROR_CODE(0, 0x0512) //"Database write operation denied")
#define TSDB_CODE_VND_IS_SYNCING TAOS_DEF_ERROR_CODE(0, 0x0513) //"Database is syncing")
#define TSDB_CODE_VND_INVALID_TSDB_STATE TAOS_DEF_ERROR_CODE(0, 0x0514) //"Invalid tsdb state")
#define TSDB_CODE_VND_IS_CLOSING TAOS_DEF_ERROR_CODE(0, 0x0515) //"Database is closing")
// tsdb
#define TSDB_CODE_TDB_INVALID_TABLE_ID TAOS_DEF_ERROR_CODE(0, 0x0600) //"Invalid table ID")
...
...
src/kit/taosdemo/taosdemo.c
浏览文件 @
f3b42525
...
...
@@ -189,6 +189,8 @@ typedef struct {
}
SColDes
;
/* Used by main to communicate with parse_opt. */
static
char
*
g_dupstr
=
NULL
;
typedef
struct
SArguments_S
{
char
*
metaFile
;
uint32_t
test_mode
;
...
...
@@ -361,7 +363,7 @@ typedef struct SDbs_S {
typedef
struct
SpecifiedQueryInfo_S
{
uint64_t
queryInterval
;
// 0: unlimit > 0 loop/s
uint
64
_t
concurrent
;
uint
32
_t
concurrent
;
uint64_t
sqlCount
;
uint32_t
asyncMode
;
// 0: sync, 1: async
uint64_t
subscribeInterval
;
// ms
...
...
@@ -372,6 +374,9 @@ typedef struct SpecifiedQueryInfo_S {
char
result
[
MAX_QUERY_SQL_COUNT
][
MAX_FILE_NAME_LEN
+
1
];
int
resubAfterConsume
[
MAX_QUERY_SQL_COUNT
];
TAOS_SUB
*
tsub
[
MAX_QUERY_SQL_COUNT
];
char
topic
[
MAX_QUERY_SQL_COUNT
][
32
];
int
consumed
[
MAX_QUERY_SQL_COUNT
];
TAOS_RES
*
res
[
MAX_QUERY_SQL_COUNT
];
uint64_t
totalQueried
;
}
SpecifiedQueryInfo
;
...
...
@@ -389,7 +394,7 @@ typedef struct SuperQueryInfo_S {
uint64_t
sqlCount
;
char
sql
[
MAX_QUERY_SQL_COUNT
][
MAX_QUERY_SQL_LENGTH
+
1
];
char
result
[
MAX_QUERY_SQL_COUNT
][
MAX_FILE_NAME_LEN
+
1
];
int
resubAfterConsume
[
MAX_QUERY_SQL_COUNT
]
;
int
resubAfterConsume
;
TAOS_SUB
*
tsub
[
MAX_QUERY_SQL_COUNT
];
char
*
childTblName
;
...
...
@@ -416,7 +421,8 @@ typedef struct SThreadInfo_S {
int
threadID
;
char
db_name
[
MAX_DB_NAME_SIZE
+
1
];
uint32_t
time_precision
;
char
fp
[
4096
];
char
filePath
[
4096
];
FILE
*
fp
;
char
tb_prefix
[
MAX_TB_NAME_SIZE
];
uint64_t
start_table_from
;
uint64_t
end_table_to
;
...
...
@@ -449,6 +455,7 @@ typedef struct SThreadInfo_S {
// seq of query or subscribe
uint64_t
querySeq
;
// sequence number of sql command
TAOS_SUB
*
tsub
;
}
threadInfo
;
...
...
@@ -526,12 +533,13 @@ static int taosRandom()
#endif // ifdef Windows
static
void
prompt
();
static
void
prompt2
();
static
int
createDatabasesAndStables
();
static
void
createChildTables
();
static
int
queryDbExec
(
TAOS
*
taos
,
char
*
command
,
QUERY_TYPE
type
,
bool
quiet
);
static
int
postProceSql
(
char
*
host
,
struct
sockaddr_in
*
pServAddr
,
uint16_t
port
,
char
*
sqlstr
,
char
*
resultFile
);
uint16_t
port
,
char
*
sqlstr
,
threadInfo
*
pThreadInfo
);
static
int64_t
getTSRandTail
(
int64_t
timeStampStep
,
int32_t
seq
,
int
disorderRatio
,
int
disorderRange
);
/* ************ Global variables ************ */
...
...
@@ -679,8 +687,9 @@ static void printHelp() {
"The data_type of columns, default: INT,INT,INT,INT."
);
printf
(
"%s%s%s%s
\n
"
,
indent
,
"-w"
,
indent
,
"The length of data_type 'BINARY' or 'NCHAR'. Default is 16"
);
printf
(
"%s%s%s%s
\n
"
,
indent
,
"-l"
,
indent
,
"The number of columns per record. Default is 4."
);
printf
(
"%s%s%s%s%d
\n
"
,
indent
,
"-l"
,
indent
,
"The number of columns per record. Default is 4. Max values is "
,
MAX_NUM_DATATYPE
);
printf
(
"%s%s%s%s
\n
"
,
indent
,
"-T"
,
indent
,
"The number of threads. Default is 10."
);
printf
(
"%s%s%s%s
\n
"
,
indent
,
"-i"
,
indent
,
...
...
@@ -724,7 +733,6 @@ static bool isStringNumber(char *input)
}
static
void
parse_args
(
int
argc
,
char
*
argv
[],
SArguments
*
arguments
)
{
char
**
sptr
;
for
(
int
i
=
1
;
i
<
argc
;
i
++
)
{
if
(
strcmp
(
argv
[
i
],
"-f"
)
==
0
)
{
...
...
@@ -851,20 +859,31 @@ static void parse_args(int argc, char *argv[], SArguments *arguments) {
}
arguments
->
database
=
argv
[
++
i
];
}
else
if
(
strcmp
(
argv
[
i
],
"-l"
)
==
0
)
{
if
(
(
argc
==
i
+
1
)
||
(
!
isStringNumber
(
argv
[
i
+
1
])
))
{
if
(
argc
==
i
+
1
)
{
if
(
!
isStringNumber
(
argv
[
i
+
1
]
))
{
printHelp
();
errorPrint
(
"%s"
,
"
\n\t
-l need a number following!
\n
"
);
exit
(
EXIT_FAILURE
);
}
}
arguments
->
num_of_CPR
=
atoi
(
argv
[
++
i
]);
if
(
arguments
->
num_of_CPR
>
MAX_NUM_DATATYPE
)
{
printf
(
"WARNING: max acceptible columns count is %d
\n
"
,
MAX_NUM_DATATYPE
);
prompt
();
arguments
->
num_of_CPR
=
MAX_NUM_DATATYPE
;
}
for
(
int
col
=
arguments
->
num_of_CPR
;
col
<
MAX_NUM_DATATYPE
;
col
++
)
{
arguments
->
datatype
[
col
]
=
NULL
;
}
}
else
if
(
strcmp
(
argv
[
i
],
"-b"
)
==
0
)
{
if
(
argc
==
i
+
1
)
{
printHelp
();
errorPrint
(
"%s"
,
"
\n\t
-b need valid string following!
\n
"
);
exit
(
EXIT_FAILURE
);
}
sptr
=
arguments
->
datatype
;
++
i
;
if
(
strstr
(
argv
[
i
],
","
)
==
NULL
)
{
// only one col
...
...
@@ -881,12 +900,12 @@ static void parse_args(int argc, char *argv[], SArguments *arguments) {
errorPrint
(
"%s"
,
"-b: Invalid data_type!
\n
"
);
exit
(
EXIT_FAILURE
);
}
sptr
[
0
]
=
argv
[
i
];
arguments
->
datatype
[
0
]
=
argv
[
i
];
}
else
{
// more than one col
int
index
=
0
;
char
*
dupstr
=
strdup
(
argv
[
i
]);
char
*
running
=
dupstr
;
g_
dupstr
=
strdup
(
argv
[
i
]);
char
*
running
=
g_
dupstr
;
char
*
token
=
strsep
(
&
running
,
","
);
while
(
token
!=
NULL
)
{
if
(
strcasecmp
(
token
,
"INT"
)
...
...
@@ -899,16 +918,15 @@ static void parse_args(int argc, char *argv[], SArguments *arguments) {
&&
strcasecmp
(
token
,
"BINARY"
)
&&
strcasecmp
(
token
,
"NCHAR"
))
{
printHelp
();
free
(
dupstr
);
free
(
g_
dupstr
);
errorPrint
(
"%s"
,
"-b: Invalid data_type!
\n
"
);
exit
(
EXIT_FAILURE
);
}
sptr
[
index
++
]
=
token
;
arguments
->
datatype
[
index
++
]
=
token
;
token
=
strsep
(
&
running
,
","
);
if
(
index
>=
MAX_NUM_DATATYPE
)
break
;
}
free
(
dupstr
);
sptr
[
index
]
=
NULL
;
arguments
->
datatype
[
index
]
=
NULL
;
}
}
else
if
(
strcmp
(
argv
[
i
],
"-w"
)
==
0
)
{
if
((
argc
==
i
+
1
)
||
...
...
@@ -1081,9 +1099,9 @@ static int queryDbExec(TAOS *taos, char *command, QUERY_TYPE type, bool quiet) {
}
}
verbosePrint
(
"%s() LN%d - command: %s
\n
"
,
__func__
,
__LINE__
,
command
);
if
(
code
!=
0
)
{
if
(
!
quiet
)
{
debugPrint
(
"%s() LN%d - command: %s
\n
"
,
__func__
,
__LINE__
,
command
);
errorPrint
(
"Failed to execute %s, reason: %s
\n
"
,
command
,
taos_errstr
(
res
));
}
taos_free_result
(
res
);
...
...
@@ -1101,24 +1119,22 @@ static int queryDbExec(TAOS *taos, char *command, QUERY_TYPE type, bool quiet) {
return
0
;
}
static
void
appendResultBufToFile
(
char
*
resultBuf
,
char
*
resultFile
)
static
void
appendResultBufToFile
(
char
*
resultBuf
,
threadInfo
*
pThreadInfo
)
{
FILE
*
fp
=
NULL
;
if
(
resultFile
[
0
]
!=
0
)
{
fp
=
fopen
(
resultFile
,
"at"
);
if
(
fp
==
NULL
)
{
pThreadInfo
->
fp
=
fopen
(
pThreadInfo
->
filePath
,
"at"
);
if
(
pThreadInfo
->
fp
==
NULL
)
{
errorPrint
(
"%s() LN%d, failed to open result file: %s, result will not save to file
\n
"
,
__func__
,
__LINE__
,
resultFile
);
__func__
,
__LINE__
,
pThreadInfo
->
filePath
);
return
;
}
}
fprintf
(
fp
,
"%s"
,
resultBuf
);
tmfclose
(
fp
);
fprintf
(
pThreadInfo
->
fp
,
"%s"
,
resultBuf
);
tmfclose
(
pThreadInfo
->
fp
);
pThreadInfo
->
fp
=
NULL
;
}
static
void
appendResultToFile
(
TAOS_RES
*
res
,
char
*
resultFile
)
{
static
void
fetchResult
(
TAOS_RES
*
res
,
threadInfo
*
pThreadInfo
)
{
TAOS_ROW
row
=
NULL
;
int
num_rows
=
0
;
int
num_fields
=
taos_field_count
(
res
);
...
...
@@ -1136,8 +1152,9 @@ static void appendResultToFile(TAOS_RES *res, char* resultFile) {
// fetch the records row by row
while
((
row
=
taos_fetch_row
(
res
)))
{
if
(
totalLen
>=
100
*
1024
*
1024
-
32000
)
{
appendResultBufToFile
(
databuf
,
resultFile
);
if
((
strlen
(
pThreadInfo
->
filePath
)
>
0
)
&&
(
totalLen
>=
100
*
1024
*
1024
-
32000
))
{
appendResultBufToFile
(
databuf
,
pThreadInfo
);
totalLen
=
0
;
memset
(
databuf
,
0
,
100
*
1024
*
1024
);
}
...
...
@@ -1150,8 +1167,10 @@ static void appendResultToFile(TAOS_RES *res, char* resultFile) {
}
verbosePrint
(
"%s() LN%d, databuf=%s resultFile=%s
\n
"
,
__func__
,
__LINE__
,
databuf
,
resultFile
);
appendResultBufToFile
(
databuf
,
resultFile
);
__func__
,
__LINE__
,
databuf
,
pThreadInfo
->
filePath
);
if
(
strlen
(
pThreadInfo
->
filePath
)
>
0
)
{
appendResultBufToFile
(
databuf
,
pThreadInfo
);
}
free
(
databuf
);
}
...
...
@@ -1167,16 +1186,14 @@ static void selectAndGetResult(
return
;
}
if
((
strlen
(
pThreadInfo
->
fp
)))
{
appendResultToFile
(
res
,
pThreadInfo
->
fp
);
}
fetchResult
(
res
,
pThreadInfo
);
taos_free_result
(
res
);
}
else
if
(
0
==
strncasecmp
(
g_queryInfo
.
queryMode
,
"rest"
,
strlen
(
"rest"
)))
{
int
retCode
=
postProceSql
(
g_queryInfo
.
host
,
&
(
g_queryInfo
.
serv_addr
),
g_queryInfo
.
port
,
command
,
pThreadInfo
->
fp
);
pThreadInfo
);
if
(
0
!=
retCode
)
{
printf
(
"====restful return fail, threadID[%d]
\n
"
,
pThreadInfo
->
threadID
);
}
...
...
@@ -1709,7 +1726,7 @@ static void printfQueryMeta() {
printf
(
"query interval:
\033
[33m%"
PRIu64
" ms
\033
[0m
\n
"
,
g_queryInfo
.
specifiedQueryInfo
.
queryInterval
);
printf
(
"top query times:
\033
[33m%"
PRIu64
"
\033
[0m
\n
"
,
g_args
.
query_times
);
printf
(
"concurrent:
\033
[33m%
"
PRIu64
"
\033
[0m
\n
"
,
printf
(
"concurrent:
\033
[33m%
d
\033
[0m
\n
"
,
g_queryInfo
.
specifiedQueryInfo
.
concurrent
);
printf
(
"mod:
\033
[33m%s
\033
[0m
\n
"
,
(
g_queryInfo
.
specifiedQueryInfo
.
asyncMode
)
?
"async"
:
"sync"
);
...
...
@@ -2006,13 +2023,13 @@ static void printfQuerySystemInfo(TAOS * taos) {
// show variables
res
=
taos_query
(
taos
,
"show variables;"
);
//
appendResultToFile
(res, filename);
//
fetchResult
(res, filename);
xDumpResultToFile
(
filename
,
res
);
// show dnodes
res
=
taos_query
(
taos
,
"show dnodes;"
);
xDumpResultToFile
(
filename
,
res
);
//
appendResultToFile
(res, filename);
//
fetchResult
(res, filename);
// show databases
res
=
taos_query
(
taos
,
"show databases;"
);
...
...
@@ -2048,7 +2065,7 @@ static void printfQuerySystemInfo(TAOS * taos) {
}
static
int
postProceSql
(
char
*
host
,
struct
sockaddr_in
*
pServAddr
,
uint16_t
port
,
char
*
sqlstr
,
char
*
resultFile
)
char
*
sqlstr
,
threadInfo
*
pThreadInfo
)
{
char
*
req_fmt
=
"POST %s HTTP/1.1
\r\n
Host: %s:%d
\r\n
Accept: */*
\r\n
Authorization: Basic %s
\r\n
Content-Length: %d
\r\n
Content-Type: application/x-www-form-urlencoded
\r\n\r\n
%s"
;
...
...
@@ -2184,8 +2201,8 @@ static int postProceSql(char *host, struct sockaddr_in *pServAddr, uint16_t port
response_buf
[
RESP_BUF_LEN
-
1
]
=
'\0'
;
printf
(
"Response:
\n
%s
\n
"
,
response_buf
);
if
(
resultFile
)
{
appendResultBufToFile
(
response_buf
,
resultFile
);
if
(
strlen
(
pThreadInfo
->
filePath
)
>
0
)
{
appendResultBufToFile
(
response_buf
,
pThreadInfo
);
}
free
(
request_buf
);
...
...
@@ -2677,8 +2694,6 @@ static int createSuperTable(
snprintf
(
command
,
BUFFER_SIZE
,
"create table if not exists %s.%s (ts timestamp%s) tags %s"
,
dbName
,
superTbl
->
sTblName
,
cols
,
tags
);
verbosePrint
(
"%s() LN%d: %s
\n
"
,
__func__
,
__LINE__
,
command
);
if
(
0
!=
queryDbExec
(
taos
,
command
,
NO_INSERT_TYPE
,
false
))
{
errorPrint
(
"create supertable %s failed!
\n\n
"
,
superTbl
->
sTblName
);
...
...
@@ -2701,7 +2716,6 @@ static int createDatabasesAndStables() {
for
(
int
i
=
0
;
i
<
g_Dbs
.
dbCount
;
i
++
)
{
if
(
g_Dbs
.
db
[
i
].
drop
)
{
sprintf
(
command
,
"drop database if exists %s;"
,
g_Dbs
.
db
[
i
].
dbName
);
verbosePrint
(
"%s() %d command: %s
\n
"
,
__func__
,
__LINE__
,
command
);
if
(
0
!=
queryDbExec
(
taos
,
command
,
NO_INSERT_TYPE
,
false
))
{
taos_close
(
taos
);
return
-
1
;
...
...
@@ -2774,7 +2788,6 @@ static int createDatabasesAndStables() {
" precision
\'
%s
\'
;"
,
g_Dbs
.
db
[
i
].
dbCfg
.
precision
);
}
debugPrint
(
"%s() %d command: %s
\n
"
,
__func__
,
__LINE__
,
command
);
if
(
0
!=
queryDbExec
(
taos
,
command
,
NO_INSERT_TYPE
,
false
))
{
taos_close
(
taos
);
errorPrint
(
"
\n
create database %s failed!
\n\n
"
,
g_Dbs
.
db
[
i
].
dbName
);
...
...
@@ -2791,8 +2804,6 @@ static int createDatabasesAndStables() {
for
(
int
j
=
0
;
j
<
g_Dbs
.
db
[
i
].
superTblCount
;
j
++
)
{
sprintf
(
command
,
"describe %s.%s;"
,
g_Dbs
.
db
[
i
].
dbName
,
g_Dbs
.
db
[
i
].
superTbls
[
j
].
sTblName
);
verbosePrint
(
"%s() %d command: %s
\n
"
,
__func__
,
__LINE__
,
command
);
ret
=
queryDbExec
(
taos
,
command
,
NO_INSERT_TYPE
,
true
);
if
((
ret
!=
0
)
||
(
g_Dbs
.
db
[
i
].
drop
))
{
...
...
@@ -2896,7 +2907,6 @@ static void* createTable(void *sarg)
}
len
=
0
;
verbosePrint
(
"%s() LN%d %s
\n
"
,
__func__
,
__LINE__
,
buffer
);
if
(
0
!=
queryDbExec
(
pThreadInfo
->
taos
,
buffer
,
NO_INSERT_TYPE
,
false
)){
errorPrint
(
"queryDbExec() failed. buffer:
\n
%s
\n
"
,
buffer
);
free
(
buffer
);
...
...
@@ -2912,7 +2922,6 @@ static void* createTable(void *sarg)
}
if
(
0
!=
len
)
{
verbosePrint
(
"%s() %d buffer: %s
\n
"
,
__func__
,
__LINE__
,
buffer
);
if
(
0
!=
queryDbExec
(
pThreadInfo
->
taos
,
buffer
,
NO_INSERT_TYPE
,
false
))
{
errorPrint
(
"queryDbExec() failed. buffer:
\n
%s
\n
"
,
buffer
);
}
...
...
@@ -2948,18 +2957,18 @@ static int startMultiThreadCreateChildTable(
b
=
ntables
%
threads
;
for
(
int64_t
i
=
0
;
i
<
threads
;
i
++
)
{
threadInfo
*
t_i
nfo
=
infos
+
i
;
t_i
nfo
->
threadID
=
i
;
tstrncpy
(
t_i
nfo
->
db_name
,
db_name
,
MAX_DB_NAME_SIZE
);
t_i
nfo
->
superTblInfo
=
superTblInfo
;
threadInfo
*
pThreadI
nfo
=
infos
+
i
;
pThreadI
nfo
->
threadID
=
i
;
tstrncpy
(
pThreadI
nfo
->
db_name
,
db_name
,
MAX_DB_NAME_SIZE
);
pThreadI
nfo
->
superTblInfo
=
superTblInfo
;
verbosePrint
(
"%s() %d db_name: %s
\n
"
,
__func__
,
__LINE__
,
db_name
);
t_i
nfo
->
taos
=
taos_connect
(
pThreadI
nfo
->
taos
=
taos_connect
(
g_Dbs
.
host
,
g_Dbs
.
user
,
g_Dbs
.
password
,
db_name
,
g_Dbs
.
port
);
if
(
t_i
nfo
->
taos
==
NULL
)
{
if
(
pThreadI
nfo
->
taos
==
NULL
)
{
errorPrint
(
"%s() LN%d, Failed to connect to TDengine, reason:%s
\n
"
,
__func__
,
__LINE__
,
taos_errstr
(
NULL
));
free
(
pids
);
...
...
@@ -2967,14 +2976,14 @@ static int startMultiThreadCreateChildTable(
return
-
1
;
}
t_i
nfo
->
start_table_from
=
startFrom
;
t_i
nfo
->
ntables
=
i
<
b
?
a
+
1
:
a
;
t_i
nfo
->
end_table_to
=
i
<
b
?
startFrom
+
a
:
startFrom
+
a
-
1
;
startFrom
=
t_i
nfo
->
end_table_to
+
1
;
t_i
nfo
->
use_metric
=
true
;
t_i
nfo
->
cols
=
cols
;
t_i
nfo
->
minDelay
=
UINT64_MAX
;
pthread_create
(
pids
+
i
,
NULL
,
createTable
,
t_i
nfo
);
pThreadI
nfo
->
start_table_from
=
startFrom
;
pThreadI
nfo
->
ntables
=
i
<
b
?
a
+
1
:
a
;
pThreadI
nfo
->
end_table_to
=
i
<
b
?
startFrom
+
a
:
startFrom
+
a
-
1
;
startFrom
=
pThreadI
nfo
->
end_table_to
+
1
;
pThreadI
nfo
->
use_metric
=
true
;
pThreadI
nfo
->
cols
=
cols
;
pThreadI
nfo
->
minDelay
=
UINT64_MAX
;
pthread_create
(
pids
+
i
,
NULL
,
createTable
,
pThreadI
nfo
);
}
for
(
int
i
=
0
;
i
<
threads
;
i
++
)
{
...
...
@@ -2982,8 +2991,8 @@ static int startMultiThreadCreateChildTable(
}
for
(
int
i
=
0
;
i
<
threads
;
i
++
)
{
threadInfo
*
t_i
nfo
=
infos
+
i
;
taos_close
(
t_i
nfo
->
taos
);
threadInfo
*
pThreadI
nfo
=
infos
+
i
;
taos_close
(
pThreadI
nfo
->
taos
);
}
free
(
pids
);
...
...
@@ -3442,16 +3451,6 @@ static bool getMetaFromInsertJsonFile(cJSON* root) {
}
g_args
.
interlace_rows
=
interlaceRows
->
valueint
;
// rows per table need be less than insert batch
if
(
g_args
.
interlace_rows
>
g_args
.
num_of_RPR
)
{
printf
(
"NOTICE: interlace rows value %"
PRIu64
" > num_of_records_per_req %"
PRIu64
"
\n\n
"
,
g_args
.
interlace_rows
,
g_args
.
num_of_RPR
);
printf
(
" interlace rows value will be set to num_of_records_per_req %"
PRIu64
"
\n\n
"
,
g_args
.
num_of_RPR
);
prompt2
();
g_args
.
interlace_rows
=
g_args
.
num_of_RPR
;
}
}
else
if
(
!
interlaceRows
)
{
g_args
.
interlace_rows
=
0
;
// 0 means progressive mode, > 0 mean interlace mode. max value is less or equ num_of_records_per_req
}
else
{
...
...
@@ -3483,6 +3482,11 @@ static bool getMetaFromInsertJsonFile(cJSON* root) {
__func__
,
__LINE__
);
goto
PARSE_OVER
;
}
else
if
(
numRecPerReq
->
valueint
>
MAX_RECORDS_PER_REQ
)
{
printf
(
"NOTICE: number of records per request value %"
PRIu64
" > %d
\n\n
"
,
numRecPerReq
->
valueint
,
MAX_RECORDS_PER_REQ
);
printf
(
" number of records per request value will be set to %d
\n\n
"
,
MAX_RECORDS_PER_REQ
);
prompt
();
numRecPerReq
->
valueint
=
MAX_RECORDS_PER_REQ
;
}
g_args
.
num_of_RPR
=
numRecPerReq
->
valueint
;
...
...
@@ -3512,6 +3516,16 @@ static bool getMetaFromInsertJsonFile(cJSON* root) {
goto
PARSE_OVER
;
}
// rows per table need be less than insert batch
if
(
g_args
.
interlace_rows
>
g_args
.
num_of_RPR
)
{
printf
(
"NOTICE: interlace rows value %"
PRIu64
" > num_of_records_per_req %"
PRIu64
"
\n\n
"
,
g_args
.
interlace_rows
,
g_args
.
num_of_RPR
);
printf
(
" interlace rows value will be set to num_of_records_per_req %"
PRIu64
"
\n\n
"
,
g_args
.
num_of_RPR
);
prompt
();
g_args
.
interlace_rows
=
g_args
.
num_of_RPR
;
}
cJSON
*
dbs
=
cJSON_GetObjectItem
(
root
,
"databases"
);
if
(
!
dbs
||
dbs
->
type
!=
cJSON_Array
)
{
printf
(
"ERROR: failed to read json, databases not found
\n
"
);
...
...
@@ -3968,10 +3982,7 @@ static bool getMetaFromInsertJsonFile(cJSON* root) {
i
,
j
,
g_Dbs
.
db
[
i
].
superTbls
[
j
].
interlaceRows
,
g_args
.
num_of_RPR
);
printf
(
" interlace rows value will be set to num_of_records_per_req %"
PRIu64
"
\n\n
"
,
g_args
.
num_of_RPR
);
if
(
!
g_args
.
answer_yes
)
{
printf
(
" press Enter key to continue or Ctrl-C to stop."
);
(
void
)
getchar
();
}
prompt
();
g_Dbs
.
db
[
i
].
superTbls
[
j
].
interlaceRows
=
g_args
.
num_of_RPR
;
}
}
else
if
(
!
interlaceRows
)
{
...
...
@@ -4186,7 +4197,7 @@ static bool getMetaFromQueryJsonFile(cJSON* root) {
if
(
concurrent
&&
concurrent
->
type
==
cJSON_Number
)
{
if
(
concurrent
->
valueint
<=
0
)
{
errorPrint
(
"%s() LN%d, query sqlCount %"
PRIu64
" or concurrent %
"
PRIu64
"
is not correct.
\n
"
,
"%s() LN%d, query sqlCount %"
PRIu64
" or concurrent %
d
is not correct.
\n
"
,
__func__
,
__LINE__
,
g_queryInfo
.
specifiedQueryInfo
.
sqlCount
,
g_queryInfo
.
specifiedQueryInfo
.
concurrent
);
...
...
@@ -4253,24 +4264,28 @@ static bool getMetaFromQueryJsonFile(cJSON* root) {
}
// sqls
cJSON
*
s
uper
Sqls
=
cJSON_GetObjectItem
(
specifiedQuery
,
"sqls"
);
if
(
!
s
uper
Sqls
)
{
cJSON
*
s
pecified
Sqls
=
cJSON_GetObjectItem
(
specifiedQuery
,
"sqls"
);
if
(
!
s
pecified
Sqls
)
{
g_queryInfo
.
specifiedQueryInfo
.
sqlCount
=
0
;
}
else
if
(
s
uper
Sqls
->
type
!=
cJSON_Array
)
{
}
else
if
(
s
pecified
Sqls
->
type
!=
cJSON_Array
)
{
errorPrint
(
"%s() LN%d, failed to read json, super sqls not found
\n
"
,
__func__
,
__LINE__
);
goto
PARSE_OVER
;
}
else
{
int
superSqlSize
=
cJSON_GetArraySize
(
superSqls
);
if
(
superSqlSize
>
MAX_QUERY_SQL_COUNT
)
{
errorPrint
(
"%s() LN%d, failed to read json, query sql size overflow, max is %d
\n
"
,
__func__
,
__LINE__
,
MAX_QUERY_SQL_COUNT
);
int
superSqlSize
=
cJSON_GetArraySize
(
specifiedSqls
);
if
(
superSqlSize
*
g_queryInfo
.
specifiedQueryInfo
.
concurrent
>
MAX_QUERY_SQL_COUNT
)
{
errorPrint
(
"%s() LN%d, failed to read json, query sql(%d) * concurrent(%d) overflow, max is %d
\n
"
,
__func__
,
__LINE__
,
superSqlSize
,
g_queryInfo
.
specifiedQueryInfo
.
concurrent
,
MAX_QUERY_SQL_COUNT
);
goto
PARSE_OVER
;
}
g_queryInfo
.
specifiedQueryInfo
.
sqlCount
=
superSqlSize
;
for
(
int
j
=
0
;
j
<
superSqlSize
;
++
j
)
{
cJSON
*
sql
=
cJSON_GetArrayItem
(
s
uper
Sqls
,
j
);
cJSON
*
sql
=
cJSON_GetArrayItem
(
s
pecified
Sqls
,
j
);
if
(
sql
==
NULL
)
continue
;
cJSON
*
sqlStr
=
cJSON_GetObjectItem
(
sql
,
"sql"
);
...
...
@@ -4434,16 +4449,28 @@ static bool getMetaFromQueryJsonFile(cJSON* root) {
g_queryInfo
.
superQueryInfo
.
subscribeKeepProgress
=
0
;
}
// sqls
cJSON
*
subsqls
=
cJSON_GetObjectItem
(
superQuery
,
"sqls"
);
if
(
!
subsqls
)
{
cJSON
*
superResubAfterConsume
=
cJSON_GetObjectItem
(
superQuery
,
"resubAfterConsume"
);
if
(
superResubAfterConsume
&&
superResubAfterConsume
->
type
==
cJSON_Number
)
{
g_queryInfo
.
superQueryInfo
.
resubAfterConsume
=
superResubAfterConsume
->
valueint
;
}
else
if
(
!
superResubAfterConsume
)
{
//printf("failed to read json, subscribe interval no found\n");
////goto PARSE_OVER;
g_queryInfo
.
superQueryInfo
.
resubAfterConsume
=
1
;
}
// supert table sqls
cJSON
*
superSqls
=
cJSON_GetObjectItem
(
superQuery
,
"sqls"
);
if
(
!
superSqls
)
{
g_queryInfo
.
superQueryInfo
.
sqlCount
=
0
;
}
else
if
(
su
bs
qls
->
type
!=
cJSON_Array
)
{
}
else
if
(
su
perS
qls
->
type
!=
cJSON_Array
)
{
errorPrint
(
"%s() LN%d: failed to read json, super sqls not found
\n
"
,
__func__
,
__LINE__
);
goto
PARSE_OVER
;
}
else
{
int
superSqlSize
=
cJSON_GetArraySize
(
su
bs
qls
);
int
superSqlSize
=
cJSON_GetArraySize
(
su
perS
qls
);
if
(
superSqlSize
>
MAX_QUERY_SQL_COUNT
)
{
errorPrint
(
"%s() LN%d, failed to read json, query sql size overflow, max is %d
\n
"
,
__func__
,
__LINE__
,
MAX_QUERY_SQL_COUNT
);
...
...
@@ -4452,7 +4479,7 @@ static bool getMetaFromQueryJsonFile(cJSON* root) {
g_queryInfo
.
superQueryInfo
.
sqlCount
=
superSqlSize
;
for
(
int
j
=
0
;
j
<
superSqlSize
;
++
j
)
{
cJSON
*
sql
=
cJSON_GetArrayItem
(
su
bs
qls
,
j
);
cJSON
*
sql
=
cJSON_GetArrayItem
(
su
perS
qls
,
j
);
if
(
sql
==
NULL
)
continue
;
cJSON
*
sqlStr
=
cJSON_GetObjectItem
(
sql
,
"sql"
);
...
...
@@ -4465,18 +4492,6 @@ static bool getMetaFromQueryJsonFile(cJSON* root) {
tstrncpy
(
g_queryInfo
.
superQueryInfo
.
sql
[
j
],
sqlStr
->
valuestring
,
MAX_QUERY_SQL_LENGTH
);
cJSON
*
superResubAfterConsume
=
cJSON_GetObjectItem
(
sql
,
"resubAfterConsume"
);
if
(
superResubAfterConsume
&&
superResubAfterConsume
->
type
==
cJSON_Number
)
{
g_queryInfo
.
superQueryInfo
.
resubAfterConsume
[
j
]
=
superResubAfterConsume
->
valueint
;
}
else
if
(
!
superResubAfterConsume
)
{
//printf("failed to read json, subscribe interval no found\n");
//goto PARSE_OVER;
g_queryInfo
.
superQueryInfo
.
resubAfterConsume
[
j
]
=
1
;
}
cJSON
*
result
=
cJSON_GetObjectItem
(
sql
,
"result"
);
if
(
result
!=
NULL
&&
result
->
type
==
cJSON_String
&&
result
->
valuestring
!=
NULL
){
...
...
@@ -4812,10 +4827,12 @@ static int64_t execInsert(threadInfo *pThreadInfo, char *buffer, uint64_t k)
return
affectedRows
;
}
static
void
getTableName
(
char
*
pTblName
,
threadInfo
*
pThreadInfo
,
uint64_t
tableSeq
)
static
void
getTableName
(
char
*
pTblName
,
threadInfo
*
pThreadInfo
,
uint64_t
tableSeq
)
{
SSuperTable
*
superTblInfo
=
pThreadInfo
->
superTblInfo
;
if
(
superTblInfo
)
{
if
((
superTblInfo
)
&&
(
AUTO_CREATE_SUBTBL
!=
superTblInfo
->
autoCreateTable
))
{
if
(
superTblInfo
->
childTblLimit
>
0
)
{
snprintf
(
pTblName
,
TSDB_TABLE_NAME_LEN
,
"%s"
,
superTblInfo
->
childTblName
+
...
...
@@ -4854,6 +4871,14 @@ static int64_t generateDataTail(
verbosePrint
(
"%s() LN%d batch=%"
PRIu64
"
\n
"
,
__func__
,
__LINE__
,
batch
);
bool
tsRand
;
if
((
superTblInfo
)
&&
(
0
==
strncasecmp
(
superTblInfo
->
dataSource
,
"rand"
,
strlen
(
"rand"
))))
{
tsRand
=
true
;
}
else
{
tsRand
=
false
;
}
uint64_t
k
=
0
;
for
(
k
=
0
;
k
<
batch
;)
{
char
data
[
MAX_DATA_SIZE
];
...
...
@@ -4862,34 +4887,22 @@ static int64_t generateDataTail(
int64_t
retLen
=
0
;
if
(
superTblInfo
)
{
if
(
0
==
strncasecmp
(
superTblInfo
->
dataSource
,
"sample"
,
strlen
(
"sample"
)))
{
if
(
tsRand
)
{
retLen
=
generateRowData
(
data
,
startTime
+
getTSRandTail
(
superTblInfo
->
timeStampStep
,
k
,
superTblInfo
->
disorderRatio
,
superTblInfo
->
disorderRange
),
superTblInfo
);
}
else
{
retLen
=
getRowDataFromSample
(
data
,
remainderBufLen
,
startTime
+
superTblInfo
->
timeStampStep
*
k
,
superTblInfo
,
pSamplePos
);
}
else
if
(
0
==
strncasecmp
(
superTblInfo
->
dataSource
,
"rand"
,
strlen
(
"rand"
)))
{
int64_t
randTail
=
superTblInfo
->
timeStampStep
*
k
;
if
(
superTblInfo
->
disorderRatio
>
0
)
{
int
rand_num
=
taosRandom
()
%
100
;
if
(
rand_num
<
superTblInfo
->
disorderRatio
)
{
randTail
=
(
randTail
+
(
taosRandom
()
%
superTblInfo
->
disorderRange
+
1
))
*
(
-
1
);
debugPrint
(
"rand data generated, back %"
PRId64
"
\n
"
,
randTail
);
}
}
int64_t
d
=
startTime
+
randTail
;
retLen
=
generateRowData
(
data
,
d
,
superTblInfo
);
}
if
(
retLen
>
remainderBufLen
)
{
break
;
}
...
...
@@ -4901,25 +4914,13 @@ static int64_t generateDataTail(
}
else
{
char
**
data_type
=
g_args
.
datatype
;
int
lenOfBinary
=
g_args
.
len_of_binary
;
int64_t
randTail
=
DEFAULT_TIMESTAMP_STEP
*
k
;
if
(
g_args
.
disorderRatio
!=
0
)
{
int
rand_num
=
taosRandom
()
%
100
;
if
(
rand_num
<
g_args
.
disorderRatio
)
{
randTail
=
(
randTail
+
(
taosRandom
()
%
g_args
.
disorderRange
+
1
))
*
(
-
1
);
debugPrint
(
"rand data generated, back %"
PRId64
"
\n
"
,
randTail
);
}
}
else
{
randTail
=
DEFAULT_TIMESTAMP_STEP
*
k
;
}
retLen
=
generateData
(
data
,
data_type
,
ncols_per_record
,
startTime
+
randTail
,
startTime
+
getTSRandTail
(
DEFAULT_TIMESTAMP_STEP
,
k
,
g_args
.
disorderRatio
,
g_args
.
disorderRange
),
lenOfBinary
);
if
(
len
>
remainderBufLen
)
break
;
...
...
@@ -5069,6 +5070,22 @@ static int64_t generateInterlaceDataBuffer(
return
k
;
}
static
int64_t
getTSRandTail
(
int64_t
timeStampStep
,
int32_t
seq
,
int
disorderRatio
,
int
disorderRange
)
{
int64_t
randTail
=
timeStampStep
*
seq
;
if
(
disorderRatio
>
0
)
{
int
rand_num
=
taosRandom
()
%
100
;
if
(
rand_num
<
disorderRatio
)
{
randTail
=
(
randTail
+
(
taosRandom
()
%
disorderRange
+
1
))
*
(
-
1
);
debugPrint
(
"rand data generated, back %"
PRId64
"
\n
"
,
randTail
);
}
}
return
randTail
;
}
static
int64_t
generateProgressiveDataBuffer
(
char
*
tableName
,
int64_t
tableSeq
,
...
...
@@ -5759,19 +5776,13 @@ static void startMultiThreadInsertData(int threads, char* db_name,
&&
((
superTblInfo
->
childTblOffset
+
superTblInfo
->
childTblLimit
)
>
superTblInfo
->
childTblCount
))
{
printf
(
"WARNING: specified offset + limit > child table count!
\n
"
);
if
(
!
g_args
.
answer_yes
)
{
printf
(
" Press enter key to continue or Ctrl-C to stop
\n\n
"
);
(
void
)
getchar
();
}
prompt
();
}
if
((
superTblInfo
->
childTblExists
!=
TBL_NO_EXISTS
)
&&
(
0
==
superTblInfo
->
childTblLimit
))
{
printf
(
"WARNING: specified limit = 0, which cannot find table name to insert or query!
\n
"
);
if
(
!
g_args
.
answer_yes
)
{
printf
(
" Press enter key to continue or Ctrl-C to stop
\n\n
"
);
(
void
)
getchar
();
}
prompt
();
}
superTblInfo
->
childTblName
=
(
char
*
)
calloc
(
1
,
...
...
@@ -5814,49 +5825,49 @@ static void startMultiThreadInsertData(int threads, char* db_name,
}
for
(
int
i
=
0
;
i
<
threads
;
i
++
)
{
threadInfo
*
t_i
nfo
=
infos
+
i
;
t_i
nfo
->
threadID
=
i
;
tstrncpy
(
t_i
nfo
->
db_name
,
db_name
,
MAX_DB_NAME_SIZE
);
t_i
nfo
->
time_precision
=
timePrec
;
t_i
nfo
->
superTblInfo
=
superTblInfo
;
threadInfo
*
pThreadI
nfo
=
infos
+
i
;
pThreadI
nfo
->
threadID
=
i
;
tstrncpy
(
pThreadI
nfo
->
db_name
,
db_name
,
MAX_DB_NAME_SIZE
);
pThreadI
nfo
->
time_precision
=
timePrec
;
pThreadI
nfo
->
superTblInfo
=
superTblInfo
;
t_i
nfo
->
start_time
=
start_time
;
t_i
nfo
->
minDelay
=
UINT64_MAX
;
pThreadI
nfo
->
start_time
=
start_time
;
pThreadI
nfo
->
minDelay
=
UINT64_MAX
;
if
((
NULL
==
superTblInfo
)
||
(
0
==
strncasecmp
(
superTblInfo
->
insertMode
,
"taosc"
,
5
)))
{
//
t_i
nfo->taos = taos;
t_i
nfo
->
taos
=
taos_connect
(
//
pThreadI
nfo->taos = taos;
pThreadI
nfo
->
taos
=
taos_connect
(
g_Dbs
.
host
,
g_Dbs
.
user
,
g_Dbs
.
password
,
db_name
,
g_Dbs
.
port
);
if
(
NULL
==
t_i
nfo
->
taos
)
{
if
(
NULL
==
pThreadI
nfo
->
taos
)
{
errorPrint
(
"connect to server fail from insert sub thread, reason: %s
\n
"
,
taos_errstr
(
NULL
));
exit
(
-
1
);
}
}
else
{
t_i
nfo
->
taos
=
NULL
;
pThreadI
nfo
->
taos
=
NULL
;
}
/* if ((NULL == superTblInfo)
|| (0 == superTblInfo->multiThreadWriteOneTbl)) {
*/
t_i
nfo
->
start_table_from
=
startFrom
;
t_i
nfo
->
ntables
=
i
<
b
?
a
+
1
:
a
;
t_i
nfo
->
end_table_to
=
i
<
b
?
startFrom
+
a
:
startFrom
+
a
-
1
;
startFrom
=
t_i
nfo
->
end_table_to
+
1
;
pThreadI
nfo
->
start_table_from
=
startFrom
;
pThreadI
nfo
->
ntables
=
i
<
b
?
a
+
1
:
a
;
pThreadI
nfo
->
end_table_to
=
i
<
b
?
startFrom
+
a
:
startFrom
+
a
-
1
;
startFrom
=
pThreadI
nfo
->
end_table_to
+
1
;
/* } else {
t_i
nfo->start_table_from = 0;
t_i
nfo->ntables = superTblInfo->childTblCount;
t_info->start_time = t_i
nfo->start_time + rand_int() % 10000 - rand_tinyint();
pThreadI
nfo->start_table_from = 0;
pThreadI
nfo->ntables = superTblInfo->childTblCount;
pThreadInfo->start_time = pThreadI
nfo->start_time + rand_int() % 10000 - rand_tinyint();
}
*/
tsem_init
(
&
(
t_i
nfo
->
lock_sem
),
0
,
0
);
tsem_init
(
&
(
pThreadI
nfo
->
lock_sem
),
0
,
0
);
if
(
ASYNC_MODE
==
g_Dbs
.
asyncMode
)
{
pthread_create
(
pids
+
i
,
NULL
,
asyncWrite
,
t_i
nfo
);
pthread_create
(
pids
+
i
,
NULL
,
asyncWrite
,
pThreadI
nfo
);
}
else
{
pthread_create
(
pids
+
i
,
NULL
,
syncWrite
,
t_i
nfo
);
pthread_create
(
pids
+
i
,
NULL
,
syncWrite
,
pThreadI
nfo
);
}
}
...
...
@@ -5871,27 +5882,27 @@ static void startMultiThreadInsertData(int threads, char* db_name,
double
avgDelay
=
0
;
for
(
int
i
=
0
;
i
<
threads
;
i
++
)
{
threadInfo
*
t_i
nfo
=
infos
+
i
;
threadInfo
*
pThreadI
nfo
=
infos
+
i
;
tsem_destroy
(
&
(
t_i
nfo
->
lock_sem
));
taos_close
(
t_i
nfo
->
taos
);
tsem_destroy
(
&
(
pThreadI
nfo
->
lock_sem
));
taos_close
(
pThreadI
nfo
->
taos
);
debugPrint
(
"%s() LN%d, [%d] totalInsert=%"
PRIu64
" totalAffected=%"
PRIu64
"
\n
"
,
__func__
,
__LINE__
,
t_info
->
threadID
,
t_i
nfo
->
totalInsertRows
,
t_i
nfo
->
totalAffectedRows
);
pThreadInfo
->
threadID
,
pThreadI
nfo
->
totalInsertRows
,
pThreadI
nfo
->
totalAffectedRows
);
if
(
superTblInfo
)
{
superTblInfo
->
totalAffectedRows
+=
t_i
nfo
->
totalAffectedRows
;
superTblInfo
->
totalInsertRows
+=
t_i
nfo
->
totalInsertRows
;
superTblInfo
->
totalAffectedRows
+=
pThreadI
nfo
->
totalAffectedRows
;
superTblInfo
->
totalInsertRows
+=
pThreadI
nfo
->
totalInsertRows
;
}
else
{
g_args
.
totalAffectedRows
+=
t_i
nfo
->
totalAffectedRows
;
g_args
.
totalInsertRows
+=
t_i
nfo
->
totalInsertRows
;
g_args
.
totalAffectedRows
+=
pThreadI
nfo
->
totalAffectedRows
;
g_args
.
totalInsertRows
+=
pThreadI
nfo
->
totalInsertRows
;
}
totalDelay
+=
t_i
nfo
->
totalDelay
;
cntDelay
+=
t_i
nfo
->
cntDelay
;
if
(
t_info
->
maxDelay
>
maxDelay
)
maxDelay
=
t_i
nfo
->
maxDelay
;
if
(
t_info
->
minDelay
<
minDelay
)
minDelay
=
t_i
nfo
->
minDelay
;
totalDelay
+=
pThreadI
nfo
->
totalDelay
;
cntDelay
+=
pThreadI
nfo
->
cntDelay
;
if
(
pThreadInfo
->
maxDelay
>
maxDelay
)
maxDelay
=
pThreadI
nfo
->
maxDelay
;
if
(
pThreadInfo
->
minDelay
<
minDelay
)
minDelay
=
pThreadI
nfo
->
minDelay
;
}
cntDelay
-=
1
;
...
...
@@ -5947,26 +5958,26 @@ static void startMultiThreadInsertData(int threads, char* db_name,
static
void
*
readTable
(
void
*
sarg
)
{
#if 1
threadInfo
*
ri
nfo
=
(
threadInfo
*
)
sarg
;
TAOS
*
taos
=
ri
nfo
->
taos
;
threadInfo
*
pThreadI
nfo
=
(
threadInfo
*
)
sarg
;
TAOS
*
taos
=
pThreadI
nfo
->
taos
;
char
command
[
BUFFER_SIZE
]
=
"
\0
"
;
uint64_t
sTime
=
ri
nfo
->
start_time
;
char
*
tb_prefix
=
ri
nfo
->
tb_prefix
;
FILE
*
fp
=
fopen
(
rinfo
->
fp
,
"a"
);
uint64_t
sTime
=
pThreadI
nfo
->
start_time
;
char
*
tb_prefix
=
pThreadI
nfo
->
tb_prefix
;
FILE
*
fp
=
fopen
(
pThreadInfo
->
filePath
,
"a"
);
if
(
NULL
==
fp
)
{
errorPrint
(
"fopen %s fail, reason:%s.
\n
"
,
rinfo
->
fp
,
strerror
(
errno
));
errorPrint
(
"fopen %s fail, reason:%s.
\n
"
,
pThreadInfo
->
filePath
,
strerror
(
errno
));
return
NULL
;
}
int64_t
num_of_DPT
;
/* if (
ri
nfo->superTblInfo) {
num_of_DPT =
ri
nfo->superTblInfo->insertRows; // nrecords_per_table;
/* if (
pThreadI
nfo->superTblInfo) {
num_of_DPT =
pThreadI
nfo->superTblInfo->insertRows; // nrecords_per_table;
} else {
*/
num_of_DPT
=
g_args
.
num_of_DPT
;
// }
int64_t
num_of_tables
=
ri
nfo
->
ntables
;
// rinfo->end_table_to - rinfo->start_table_from + 1;
int64_t
num_of_tables
=
pThreadI
nfo
->
ntables
;
// rinfo->end_table_to - rinfo->start_table_from + 1;
int64_t
totalData
=
num_of_DPT
*
num_of_tables
;
bool
do_aggreFunc
=
g_Dbs
.
do_aggreFunc
;
...
...
@@ -6019,17 +6030,17 @@ static void *readTable(void *sarg) {
static
void
*
readMetric
(
void
*
sarg
)
{
#if 1
threadInfo
*
ri
nfo
=
(
threadInfo
*
)
sarg
;
TAOS
*
taos
=
ri
nfo
->
taos
;
threadInfo
*
pThreadI
nfo
=
(
threadInfo
*
)
sarg
;
TAOS
*
taos
=
pThreadI
nfo
->
taos
;
char
command
[
BUFFER_SIZE
]
=
"
\0
"
;
FILE
*
fp
=
fopen
(
rinfo
->
fp
,
"a"
);
FILE
*
fp
=
fopen
(
pThreadInfo
->
filePath
,
"a"
);
if
(
NULL
==
fp
)
{
printf
(
"fopen %s fail, reason:%s.
\n
"
,
rinfo
->
fp
,
strerror
(
errno
));
printf
(
"fopen %s fail, reason:%s.
\n
"
,
pThreadInfo
->
filePath
,
strerror
(
errno
));
return
NULL
;
}
int64_t
num_of_DPT
=
ri
nfo
->
superTblInfo
->
insertRows
;
int64_t
num_of_tables
=
ri
nfo
->
ntables
;
// rinfo->end_table_to - rinfo->start_table_from + 1;
int64_t
num_of_DPT
=
pThreadI
nfo
->
superTblInfo
->
insertRows
;
int64_t
num_of_tables
=
pThreadI
nfo
->
ntables
;
// rinfo->end_table_to - rinfo->start_table_from + 1;
int64_t
totalData
=
num_of_DPT
*
num_of_tables
;
bool
do_aggreFunc
=
g_Dbs
.
do_aggreFunc
;
...
...
@@ -6093,15 +6104,7 @@ static void *readMetric(void *sarg) {
static
void
prompt
()
{
if
(
!
g_args
.
answer_yes
)
{
printf
(
"Press enter key to continue
\n\n
"
);
(
void
)
getchar
();
}
}
static
void
prompt2
()
{
if
(
!
g_args
.
answer_yes
)
{
printf
(
" press Enter key to continue or Ctrl-C to stop."
);
printf
(
" Press enter key to continue or Ctrl-C to stop
\n\n
"
);
(
void
)
getchar
();
}
}
...
...
@@ -6236,8 +6239,8 @@ static void *specifiedTableQuery(void *sarg) {
uint64_t
lastPrintTime
=
taosGetTimestampMs
();
uint64_t
startTs
=
taosGetTimestampMs
();
if
(
g_queryInfo
.
specifiedQueryInfo
.
result
[
pThreadInfo
->
querySeq
]
[
0
]
!=
0
)
{
sprintf
(
pThreadInfo
->
fp
,
"%s-%d"
,
if
(
g_queryInfo
.
specifiedQueryInfo
.
result
[
pThreadInfo
->
querySeq
]
!=
NULL
)
{
sprintf
(
pThreadInfo
->
filePath
,
"%s-%d"
,
g_queryInfo
.
specifiedQueryInfo
.
result
[
pThreadInfo
->
querySeq
],
pThreadInfo
->
threadID
);
}
...
...
@@ -6337,8 +6340,8 @@ static void *superTableQuery(void *sarg) {
for
(
int
j
=
0
;
j
<
g_queryInfo
.
superQueryInfo
.
sqlCount
;
j
++
)
{
memset
(
sqlstr
,
0
,
sizeof
(
sqlstr
));
replaceChildTblName
(
g_queryInfo
.
superQueryInfo
.
sql
[
j
],
sqlstr
,
i
);
if
(
g_queryInfo
.
superQueryInfo
.
result
[
j
]
[
0
]
!=
0
)
{
sprintf
(
pThreadInfo
->
f
p
,
"%s-%d"
,
if
(
g_queryInfo
.
superQueryInfo
.
result
[
j
]
!=
NULL
)
{
sprintf
(
pThreadInfo
->
f
ilePath
,
"%s-%d"
,
g_queryInfo
.
superQueryInfo
.
result
[
j
],
pThreadInfo
->
threadID
);
}
...
...
@@ -6428,15 +6431,14 @@ static int queryTestProcess() {
for
(
uint64_t
i
=
0
;
i
<
nSqlCount
;
i
++
)
{
for
(
int
j
=
0
;
j
<
nConcurrent
;
j
++
)
{
uint64_t
seq
=
i
*
nConcurrent
+
j
;
threadInfo
*
t_i
nfo
=
infos
+
seq
;
t_i
nfo
->
threadID
=
seq
;
t_i
nfo
->
querySeq
=
i
;
threadInfo
*
pThreadI
nfo
=
infos
+
seq
;
pThreadI
nfo
->
threadID
=
seq
;
pThreadI
nfo
->
querySeq
=
i
;
if
(
0
==
strncasecmp
(
g_queryInfo
.
queryMode
,
"taosc"
,
5
))
{
char
sqlStr
[
MAX_TB_NAME_SIZE
*
2
];
sprintf
(
sqlStr
,
"use %s"
,
g_queryInfo
.
dbName
);
verbosePrint
(
"%s() %d sqlStr: %s
\n
"
,
__func__
,
__LINE__
,
sqlStr
);
if
(
0
!=
queryDbExec
(
taos
,
sqlStr
,
NO_INSERT_TYPE
,
false
))
{
taos_close
(
taos
);
free
(
infos
);
...
...
@@ -6447,10 +6449,10 @@ static int queryTestProcess() {
}
}
t_i
nfo
->
taos
=
NULL
;
// TODO: workaround to use separate taos connection;
pThreadI
nfo
->
taos
=
NULL
;
// TODO: workaround to use separate taos connection;
pthread_create
(
pids
+
seq
,
NULL
,
specifiedTableQuery
,
t_i
nfo
);
pThreadI
nfo
);
}
}
}
else
{
...
...
@@ -6490,15 +6492,15 @@ static int queryTestProcess() {
uint64_t
startFrom
=
0
;
for
(
int
i
=
0
;
i
<
threads
;
i
++
)
{
threadInfo
*
t_i
nfo
=
infosOfSub
+
i
;
t_i
nfo
->
threadID
=
i
;
threadInfo
*
pThreadI
nfo
=
infosOfSub
+
i
;
pThreadI
nfo
->
threadID
=
i
;
t_i
nfo
->
start_table_from
=
startFrom
;
t_i
nfo
->
ntables
=
i
<
b
?
a
+
1
:
a
;
t_i
nfo
->
end_table_to
=
i
<
b
?
startFrom
+
a
:
startFrom
+
a
-
1
;
startFrom
=
t_i
nfo
->
end_table_to
+
1
;
t_i
nfo
->
taos
=
NULL
;
// TODO: workaround to use separate taos connection;
pthread_create
(
pidsOfSub
+
i
,
NULL
,
superTableQuery
,
t_i
nfo
);
pThreadI
nfo
->
start_table_from
=
startFrom
;
pThreadI
nfo
->
ntables
=
i
<
b
?
a
+
1
:
a
;
pThreadI
nfo
->
end_table_to
=
i
<
b
?
startFrom
+
a
:
startFrom
+
a
-
1
;
startFrom
=
pThreadI
nfo
->
end_table_to
+
1
;
pThreadI
nfo
->
taos
=
NULL
;
// TODO: workaround to use separate taos connection;
pthread_create
(
pidsOfSub
+
i
,
NULL
,
superTableQuery
,
pThreadI
nfo
);
}
g_queryInfo
.
superQueryInfo
.
threadCnt
=
threads
;
...
...
@@ -6545,7 +6547,7 @@ static void stable_sub_callback(
}
if
(
param
)
appendResultToFile
(
res
,
((
threadInfo
*
)
param
)
->
fp
);
fetchResult
(
res
,
(
threadInfo
*
)
param
);
// tao_unscribe() will free result.
}
...
...
@@ -6558,7 +6560,7 @@ static void specified_sub_callback(
}
if
(
param
)
appendResultToFile
(
res
,
((
threadInfo
*
)
param
)
->
fp
);
fetchResult
(
res
,
(
threadInfo
*
)
param
);
// tao_unscribe() will free result.
}
...
...
@@ -6602,6 +6604,7 @@ static void *superSubscribe(void *sarg) {
threadInfo
*
pThreadInfo
=
(
threadInfo
*
)
sarg
;
char
subSqlstr
[
MAX_QUERY_SQL_LENGTH
];
TAOS_SUB
*
tsub
[
MAX_QUERY_SQL_COUNT
]
=
{
0
};
uint64_t
tsubSeq
;
if
(
pThreadInfo
->
ntables
>
MAX_QUERY_SQL_COUNT
)
{
errorPrint
(
"The table number(%"
PRId64
") of the thread is more than max query sql count: %d
\n
"
,
...
...
@@ -6611,18 +6614,15 @@ static void *superSubscribe(void *sarg) {
}
if
(
pThreadInfo
->
taos
==
NULL
)
{
TAOS
*
taos
=
NULL
;
taos
=
taos_connect
(
g_queryInfo
.
host
,
pThreadInfo
->
taos
=
taos_connect
(
g_queryInfo
.
host
,
g_queryInfo
.
user
,
g_queryInfo
.
password
,
g_queryInfo
.
dbName
,
g_queryInfo
.
port
);
if
(
taos
==
NULL
)
{
if
(
pThreadInfo
->
taos
==
NULL
)
{
errorPrint
(
"[%d] Failed to connect to TDengine, reason:%s
\n
"
,
pThreadInfo
->
threadID
,
taos_errstr
(
NULL
));
return
NULL
;
}
else
{
pThreadInfo
->
taos
=
taos
;
}
}
...
...
@@ -6638,6 +6638,8 @@ static void *superSubscribe(void *sarg) {
char
topic
[
32
]
=
{
0
};
for
(
uint64_t
i
=
pThreadInfo
->
start_table_from
;
i
<=
pThreadInfo
->
end_table_to
;
i
++
)
{
tsubSeq
=
i
-
pThreadInfo
->
start_table_from
;
verbosePrint
(
"%s() LN%d, [%d], start=%"
PRId64
" end=%"
PRId64
" i=%"
PRIu64
"
\n
"
,
__func__
,
__LINE__
,
pThreadInfo
->
threadID
,
...
...
@@ -6650,19 +6652,19 @@ static void *superSubscribe(void *sarg) {
g_queryInfo
.
superQueryInfo
.
sql
[
pThreadInfo
->
querySeq
],
subSqlstr
,
i
);
if
(
g_queryInfo
.
superQueryInfo
.
result
[
pThreadInfo
->
querySeq
][
0
]
!=
0
)
{
sprintf
(
pThreadInfo
->
f
p
,
"%s-%d"
,
sprintf
(
pThreadInfo
->
f
ilePath
,
"%s-%d"
,
g_queryInfo
.
superQueryInfo
.
result
[
pThreadInfo
->
querySeq
],
pThreadInfo
->
threadID
);
}
debugPrint
(
"%s() LN%d, [%d] subSqlstr: %s
\n
"
,
__func__
,
__LINE__
,
pThreadInfo
->
threadID
,
subSqlstr
);
tsub
[
i
]
=
subscribeImpl
(
tsub
[
tsubSeq
]
=
subscribeImpl
(
STABLE_CLASS
,
pThreadInfo
,
subSqlstr
,
topic
,
g_queryInfo
.
superQueryInfo
.
subscribeRestart
,
g_queryInfo
.
superQueryInfo
.
subscribeInterval
);
if
(
NULL
==
tsub
[
i
])
{
if
(
NULL
==
tsub
[
tsubSeq
])
{
taos_close
(
pThreadInfo
->
taos
);
return
NULL
;
}
...
...
@@ -6675,45 +6677,53 @@ static void *superSubscribe(void *sarg) {
}
TAOS_RES
*
res
=
NULL
;
uint64_t
st
=
0
,
et
=
0
;
while
(
1
)
{
for
(
uint64_t
i
=
pThreadInfo
->
start_table_from
;
i
<=
pThreadInfo
->
end_table_to
;
i
++
)
{
tsubSeq
=
i
-
pThreadInfo
->
start_table_from
;
if
(
ASYNC_MODE
==
g_queryInfo
.
superQueryInfo
.
asyncMode
)
{
continue
;
}
res
=
taos_consume
(
tsub
[
i
]);
st
=
taosGetTimestampMs
();
performancePrint
(
"st: %"
PRIu64
" et: %"
PRIu64
" st-et: %"
PRIu64
"
\n
"
,
st
,
et
,
(
st
-
et
));
res
=
taos_consume
(
tsub
[
tsubSeq
]);
et
=
taosGetTimestampMs
();
performancePrint
(
"st: %"
PRIu64
" et: %"
PRIu64
" delta: %"
PRIu64
"
\n
"
,
st
,
et
,
(
et
-
st
));
if
(
res
)
{
if
(
g_queryInfo
.
superQueryInfo
.
result
[
pThreadInfo
->
querySeq
][
0
]
!=
0
)
{
sprintf
(
pThreadInfo
->
fp
,
"%s-%d"
,
sprintf
(
pThreadInfo
->
filePath
,
"%s-%d"
,
g_queryInfo
.
superQueryInfo
.
result
[
pThreadInfo
->
querySeq
],
pThreadInfo
->
threadID
);
appendResultToFile
(
res
,
pThreadInfo
->
fp
);
fetchResult
(
res
,
pThreadInfo
);
}
if
(
g_queryInfo
.
superQueryInfo
.
result
[
pThreadInfo
->
querySeq
][
0
]
!=
0
)
{
sprintf
(
pThreadInfo
->
fp
,
"%s-%d"
,
sprintf
(
pThreadInfo
->
filePath
,
"%s-%d"
,
g_queryInfo
.
superQueryInfo
.
result
[
pThreadInfo
->
querySeq
],
pThreadInfo
->
threadID
);
appendResultToFile
(
res
,
pThreadInfo
->
fp
);
fetchResult
(
res
,
pThreadInfo
);
}
consumed
[
i
]
++
;
consumed
[
tsubSeq
]
++
;
if
((
g_queryInfo
.
superQueryInfo
.
subscribeKeepProgress
)
&&
(
consumed
[
i
]
>=
g_queryInfo
.
superQueryInfo
.
resubAfterConsume
[
pThreadInfo
->
querySeq
]
))
{
&&
(
consumed
[
tsubSeq
]
>=
g_queryInfo
.
superQueryInfo
.
resubAfterConsume
))
{
printf
(
"keepProgress:%d, resub super table query: %"
PRIu64
"
\n
"
,
g_queryInfo
.
superQueryInfo
.
subscribeKeepProgress
,
pThreadInfo
->
querySeq
);
taos_unsubscribe
(
tsub
,
taos_unsubscribe
(
tsub
[
tsubSeq
]
,
g_queryInfo
.
superQueryInfo
.
subscribeKeepProgress
);
consumed
[
i
]
=
0
;
tsub
[
i
]
=
subscribeImpl
(
consumed
[
tsubSeq
]
=
0
;
tsub
[
tsubSeq
]
=
subscribeImpl
(
STABLE_CLASS
,
pThreadInfo
,
subSqlstr
,
topic
,
g_queryInfo
.
superQueryInfo
.
subscribeRestart
,
g_queryInfo
.
superQueryInfo
.
subscribeInterval
);
if
(
NULL
==
tsub
[
i
])
{
if
(
NULL
==
tsub
[
tsubSeq
])
{
taos_close
(
pThreadInfo
->
taos
);
return
NULL
;
}
...
...
@@ -6725,7 +6735,8 @@ static void *superSubscribe(void *sarg) {
for
(
uint64_t
i
=
pThreadInfo
->
start_table_from
;
i
<=
pThreadInfo
->
end_table_to
;
i
++
)
{
taos_unsubscribe
(
tsub
[
i
],
0
);
tsubSeq
=
i
-
pThreadInfo
->
start_table_from
;
taos_unsubscribe
(
tsub
[
tsubSeq
],
0
);
}
taos_close
(
pThreadInfo
->
taos
);
...
...
@@ -6734,95 +6745,92 @@ static void *superSubscribe(void *sarg) {
static
void
*
specifiedSubscribe
(
void
*
sarg
)
{
threadInfo
*
pThreadInfo
=
(
threadInfo
*
)
sarg
;
TAOS_SUB
*
tsub
=
NULL
;
//
TAOS_SUB* tsub = NULL;
if
(
pThreadInfo
->
taos
==
NULL
)
{
TAOS
*
taos
=
NULL
;
taos
=
taos_connect
(
g_queryInfo
.
host
,
pThreadInfo
->
taos
=
taos_connect
(
g_queryInfo
.
host
,
g_queryInfo
.
user
,
g_queryInfo
.
password
,
g_queryInfo
.
dbName
,
g_queryInfo
.
port
);
if
(
taos
==
NULL
)
{
if
(
pThreadInfo
->
taos
==
NULL
)
{
errorPrint
(
"[%d] Failed to connect to TDengine, reason:%s
\n
"
,
pThreadInfo
->
threadID
,
taos_errstr
(
NULL
));
return
NULL
;
}
else
{
pThreadInfo
->
taos
=
taos
;
}
}
char
sqlStr
[
MAX_TB_NAME_SIZE
*
2
];
sprintf
(
sqlStr
,
"use %s"
,
g_queryInfo
.
dbName
);
debugPrint
(
"%s() %d sqlStr: %s
\n
"
,
__func__
,
__LINE__
,
sqlStr
);
if
(
0
!=
queryDbExec
(
pThreadInfo
->
taos
,
sqlStr
,
NO_INSERT_TYPE
,
false
))
{
taos_close
(
pThreadInfo
->
taos
);
return
NULL
;
}
char
topic
[
32
]
=
{
0
};
sprintf
(
topic
,
"taosdemo-subscribe-%"
PRIu64
""
,
pThreadInfo
->
querySeq
);
if
(
g_queryInfo
.
specifiedQueryInfo
.
result
[
pThreadInfo
->
querySeq
][
0
]
!=
0
)
{
sprintf
(
pThreadInfo
->
fp
,
"%s-%d"
,
sprintf
(
g_queryInfo
.
specifiedQueryInfo
.
topic
[
pThreadInfo
->
threadID
],
"taosdemo-subscribe-%"
PRIu64
"-%d"
,
pThreadInfo
->
querySeq
,
pThreadInfo
->
threadID
);
if
(
g_queryInfo
.
specifiedQueryInfo
.
result
[
pThreadInfo
->
querySeq
]
!=
NULL
)
{
sprintf
(
pThreadInfo
->
filePath
,
"%s-%d"
,
g_queryInfo
.
specifiedQueryInfo
.
result
[
pThreadInfo
->
querySeq
],
pThreadInfo
->
threadID
);
}
tsub
=
subscribeImpl
(
g_queryInfo
.
specifiedQueryInfo
.
tsub
[
pThreadInfo
->
threadID
]
=
subscribeImpl
(
SPECIFIED_CLASS
,
pThreadInfo
,
g_queryInfo
.
specifiedQueryInfo
.
sql
[
pThreadInfo
->
querySeq
],
topic
,
g_queryInfo
.
specifiedQueryInfo
.
topic
[
pThreadInfo
->
threadID
]
,
g_queryInfo
.
specifiedQueryInfo
.
subscribeRestart
,
g_queryInfo
.
specifiedQueryInfo
.
subscribeInterval
);
if
(
NULL
==
tsub
)
{
if
(
NULL
==
g_queryInfo
.
specifiedQueryInfo
.
tsub
[
pThreadInfo
->
threadID
]
)
{
taos_close
(
pThreadInfo
->
taos
);
return
NULL
;
}
// start loop to consume result
TAOS_RES
*
res
=
NULL
;
int
consumed
;
g_queryInfo
.
specifiedQueryInfo
.
consumed
[
pThreadInfo
->
threadID
]
=
0
;
while
(
1
)
{
if
(
ASYNC_MODE
==
g_queryInfo
.
specifiedQueryInfo
.
asyncMode
)
{
continue
;
}
res
=
taos_consume
(
tsub
);
if
(
res
)
{
g_queryInfo
.
specifiedQueryInfo
.
res
[
pThreadInfo
->
threadID
]
=
taos_consume
(
g_queryInfo
.
specifiedQueryInfo
.
tsub
[
pThreadInfo
->
threadID
]);
if
(
g_queryInfo
.
specifiedQueryInfo
.
res
[
pThreadInfo
->
threadID
])
{
if
(
g_queryInfo
.
specifiedQueryInfo
.
result
[
pThreadInfo
->
querySeq
][
0
]
!=
0
)
{
sprintf
(
pThreadInfo
->
f
p
,
"%s-%d"
,
sprintf
(
pThreadInfo
->
f
ilePath
,
"%s-%d"
,
g_queryInfo
.
specifiedQueryInfo
.
result
[
pThreadInfo
->
querySeq
],
pThreadInfo
->
threadID
);
appendResultToFile
(
res
,
pThreadInfo
->
fp
);
fetchResult
(
g_queryInfo
.
specifiedQueryInfo
.
res
[
pThreadInfo
->
threadID
],
pThreadInfo
);
}
consumed
++
;
g_queryInfo
.
specifiedQueryInfo
.
consumed
[
pThreadInfo
->
threadID
]
++
;
if
((
g_queryInfo
.
specifiedQueryInfo
.
subscribeKeepProgress
)
&&
(
consumed
>=
&&
(
g_queryInfo
.
specifiedQueryInfo
.
consumed
[
pThreadInfo
->
threadID
]
>=
g_queryInfo
.
specifiedQueryInfo
.
resubAfterConsume
[
pThreadInfo
->
querySeq
]))
{
printf
(
"keepProgress:%d, resub specified query: %"
PRIu64
"
\n
"
,
g_queryInfo
.
specifiedQueryInfo
.
subscribeKeepProgress
,
pThreadInfo
->
querySeq
);
consumed
=
0
;
taos_unsubscribe
(
tsub
,
g_queryInfo
.
specifiedQueryInfo
.
consumed
[
pThreadInfo
->
threadID
]
=
0
;
taos_unsubscribe
(
g_queryInfo
.
specifiedQueryInfo
.
tsub
[
pThreadInfo
->
threadID
]
,
g_queryInfo
.
specifiedQueryInfo
.
subscribeKeepProgress
);
tsub
=
subscribeImpl
(
g_queryInfo
.
specifiedQueryInfo
.
tsub
[
pThreadInfo
->
threadID
]
=
subscribeImpl
(
SPECIFIED_CLASS
,
pThreadInfo
,
g_queryInfo
.
specifiedQueryInfo
.
sql
[
pThreadInfo
->
querySeq
],
topic
,
g_queryInfo
.
specifiedQueryInfo
.
topic
[
pThreadInfo
->
threadID
]
,
g_queryInfo
.
specifiedQueryInfo
.
subscribeRestart
,
g_queryInfo
.
specifiedQueryInfo
.
subscribeInterval
);
if
(
NULL
==
tsub
)
{
if
(
NULL
==
g_queryInfo
.
specifiedQueryInfo
.
tsub
[
pThreadInfo
->
threadID
]
)
{
taos_close
(
pThreadInfo
->
taos
);
return
NULL
;
}
}
}
}
taos_free_result
(
res
);
taos_unsubscribe
(
tsub
,
0
);
taos_free_result
(
g_queryInfo
.
specifiedQueryInfo
.
res
[
pThreadInfo
->
threadID
]
);
taos_unsubscribe
(
g_queryInfo
.
specifiedQueryInfo
.
tsub
[
pThreadInfo
->
querySeq
]
,
0
);
taos_close
(
pThreadInfo
->
taos
);
return
NULL
;
...
...
@@ -6892,18 +6900,18 @@ static int subscribeTestProcess() {
for
(
int
i
=
0
;
i
<
g_queryInfo
.
specifiedQueryInfo
.
sqlCount
;
i
++
)
{
for
(
int
j
=
0
;
j
<
g_queryInfo
.
specifiedQueryInfo
.
concurrent
;
j
++
)
{
uint64_t
seq
=
i
*
g_queryInfo
.
specifiedQueryInfo
.
concurrent
+
j
;
threadInfo
*
t_i
nfo
=
infos
+
seq
;
t_i
nfo
->
threadID
=
seq
;
t_i
nfo
->
querySeq
=
i
;
t_i
nfo
->
taos
=
NULL
;
// TODO: workaround to use separate taos connection;
pthread_create
(
pids
+
seq
,
NULL
,
specifiedSubscribe
,
t_i
nfo
);
threadInfo
*
pThreadI
nfo
=
infos
+
seq
;
pThreadI
nfo
->
threadID
=
seq
;
pThreadI
nfo
->
querySeq
=
i
;
pThreadI
nfo
->
taos
=
NULL
;
// TODO: workaround to use separate taos connection;
pthread_create
(
pids
+
seq
,
NULL
,
specifiedSubscribe
,
pThreadI
nfo
);
}
}
}
//==== create threads for super table query
if
(
g_queryInfo
.
superQueryInfo
.
sqlCount
<=
0
)
{
printf
(
"%s() LN%d, super table query sqlCount %"
PRIu64
".
\n
"
,
debugPrint
(
"%s() LN%d, super table query sqlCount %"
PRIu64
".
\n
"
,
__func__
,
__LINE__
,
g_queryInfo
.
superQueryInfo
.
sqlCount
);
}
else
{
...
...
@@ -6942,17 +6950,17 @@ static int subscribeTestProcess() {
uint64_t
startFrom
=
0
;
for
(
int
j
=
0
;
j
<
threads
;
j
++
)
{
uint64_t
seq
=
i
*
threads
+
j
;
threadInfo
*
t_i
nfo
=
infosOfStable
+
seq
;
t_i
nfo
->
threadID
=
seq
;
t_i
nfo
->
querySeq
=
i
;
t_i
nfo
->
start_table_from
=
startFrom
;
t_i
nfo
->
ntables
=
j
<
b
?
a
+
1
:
a
;
t_i
nfo
->
end_table_to
=
j
<
b
?
startFrom
+
a
:
startFrom
+
a
-
1
;
startFrom
=
t_i
nfo
->
end_table_to
+
1
;
t_i
nfo
->
taos
=
NULL
;
// TODO: workaround to use separate taos connection;
threadInfo
*
pThreadI
nfo
=
infosOfStable
+
seq
;
pThreadI
nfo
->
threadID
=
seq
;
pThreadI
nfo
->
querySeq
=
i
;
pThreadI
nfo
->
start_table_from
=
startFrom
;
pThreadI
nfo
->
ntables
=
j
<
b
?
a
+
1
:
a
;
pThreadI
nfo
->
end_table_to
=
j
<
b
?
startFrom
+
a
:
startFrom
+
a
-
1
;
startFrom
=
pThreadI
nfo
->
end_table_to
+
1
;
pThreadI
nfo
->
taos
=
NULL
;
// TODO: workaround to use separate taos connection;
pthread_create
(
pidsOfStable
+
seq
,
NULL
,
superSubscribe
,
t_i
nfo
);
NULL
,
superSubscribe
,
pThreadI
nfo
);
}
}
...
...
@@ -7181,7 +7189,6 @@ static void querySqlFile(TAOS* taos, char* sqlFile)
}
memcpy
(
cmd
+
cmd_len
,
line
,
read_len
);
verbosePrint
(
"%s() LN%d cmd: %s
\n
"
,
__func__
,
__LINE__
,
cmd
);
if
(
0
!=
queryDbExec
(
taos
,
cmd
,
NO_INSERT_TYPE
,
false
))
{
errorPrint
(
"%s() LN%d, queryDbExec %s failed!
\n
"
,
__func__
,
__LINE__
,
cmd
);
...
...
@@ -7231,47 +7238,47 @@ static void queryResult() {
// query data
pthread_t
read_id
;
threadInfo
*
r
Info
=
malloc
(
sizeof
(
threadInfo
));
assert
(
r
Info
);
r
Info
->
start_time
=
1500000000000
;
// 2017-07-14 10:40:00.000
r
Info
->
start_table_from
=
0
;
threadInfo
*
pThread
Info
=
malloc
(
sizeof
(
threadInfo
));
assert
(
pThread
Info
);
pThread
Info
->
start_time
=
1500000000000
;
// 2017-07-14 10:40:00.000
pThread
Info
->
start_table_from
=
0
;
//
r
Info->do_aggreFunc = g_Dbs.do_aggreFunc;
//
pThread
Info->do_aggreFunc = g_Dbs.do_aggreFunc;
if
(
g_args
.
use_metric
)
{
r
Info
->
ntables
=
g_Dbs
.
db
[
0
].
superTbls
[
0
].
childTblCount
;
r
Info
->
end_table_to
=
g_Dbs
.
db
[
0
].
superTbls
[
0
].
childTblCount
-
1
;
r
Info
->
superTblInfo
=
&
g_Dbs
.
db
[
0
].
superTbls
[
0
];
tstrncpy
(
r
Info
->
tb_prefix
,
pThread
Info
->
ntables
=
g_Dbs
.
db
[
0
].
superTbls
[
0
].
childTblCount
;
pThread
Info
->
end_table_to
=
g_Dbs
.
db
[
0
].
superTbls
[
0
].
childTblCount
-
1
;
pThread
Info
->
superTblInfo
=
&
g_Dbs
.
db
[
0
].
superTbls
[
0
];
tstrncpy
(
pThread
Info
->
tb_prefix
,
g_Dbs
.
db
[
0
].
superTbls
[
0
].
childTblPrefix
,
MAX_TB_NAME_SIZE
);
}
else
{
r
Info
->
ntables
=
g_args
.
num_of_tables
;
r
Info
->
end_table_to
=
g_args
.
num_of_tables
-
1
;
tstrncpy
(
r
Info
->
tb_prefix
,
g_args
.
tb_prefix
,
MAX_TB_NAME_SIZE
);
pThread
Info
->
ntables
=
g_args
.
num_of_tables
;
pThread
Info
->
end_table_to
=
g_args
.
num_of_tables
-
1
;
tstrncpy
(
pThread
Info
->
tb_prefix
,
g_args
.
tb_prefix
,
MAX_TB_NAME_SIZE
);
}
r
Info
->
taos
=
taos_connect
(
pThread
Info
->
taos
=
taos_connect
(
g_Dbs
.
host
,
g_Dbs
.
user
,
g_Dbs
.
password
,
g_Dbs
.
db
[
0
].
dbName
,
g_Dbs
.
port
);
if
(
r
Info
->
taos
==
NULL
)
{
if
(
pThread
Info
->
taos
==
NULL
)
{
errorPrint
(
"Failed to connect to TDengine, reason:%s
\n
"
,
taos_errstr
(
NULL
));
free
(
r
Info
);
free
(
pThread
Info
);
exit
(
-
1
);
}
tstrncpy
(
rInfo
->
fp
,
g_Dbs
.
resultFile
,
MAX_FILE_NAME_LEN
);
tstrncpy
(
pThreadInfo
->
filePath
,
g_Dbs
.
resultFile
,
MAX_FILE_NAME_LEN
);
if
(
!
g_Dbs
.
use_metric
)
{
pthread_create
(
&
read_id
,
NULL
,
readTable
,
r
Info
);
pthread_create
(
&
read_id
,
NULL
,
readTable
,
pThread
Info
);
}
else
{
pthread_create
(
&
read_id
,
NULL
,
readMetric
,
r
Info
);
pthread_create
(
&
read_id
,
NULL
,
readMetric
,
pThread
Info
);
}
pthread_join
(
read_id
,
NULL
);
taos_close
(
r
Info
->
taos
);
free
(
r
Info
);
taos_close
(
pThread
Info
->
taos
);
free
(
pThread
Info
);
}
static
void
testCmdLine
()
{
...
...
@@ -7325,6 +7332,9 @@ int main(int argc, char *argv[]) {
}
else
{
testCmdLine
();
}
if
(
g_dupstr
)
free
(
g_dupstr
);
}
return
0
;
...
...
src/mnode/src/mnodeSdb.c
浏览文件 @
f3b42525
...
...
@@ -712,13 +712,13 @@ static int32_t sdbProcessWrite(void *wparam, void *hparam, int32_t qtype, void *
if
(
action
==
SDB_ACTION_INSERT
)
{
return
sdbPerformInsertAction
(
pHead
,
pTable
);
}
else
if
(
action
==
SDB_ACTION_DELETE
)
{
if
(
qtype
==
TAOS_QTYPE_FWD
)
{
//
if (qtype == TAOS_QTYPE_FWD) {
// Drop database/stable may take a long time and cause a timeout, so we confirm first then reput it into queue
sdbWriteFwdToQueue
(
1
,
hparam
,
TAOS_QTYPE_QUERY
,
unused
);
return
TSDB_CODE_SUCCESS
;
}
else
{
//
sdbWriteFwdToQueue(1, hparam, TAOS_QTYPE_QUERY, unused);
//
return TSDB_CODE_SUCCESS;
//
} else {
return
sdbPerformDeleteAction
(
pHead
,
pTable
);
}
//
}
}
else
if
(
action
==
SDB_ACTION_UPDATE
)
{
return
sdbPerformUpdateAction
(
pHead
,
pTable
);
}
else
{
...
...
src/mnode/src/mnodeTable.c
浏览文件 @
f3b42525
...
...
@@ -1189,8 +1189,8 @@ static int32_t mnodeFindSuperTableTagIndex(SSTableObj *pStable, const char *tagN
static
int32_t
mnodeAddSuperTableTagCb
(
SMnodeMsg
*
pMsg
,
int32_t
code
)
{
SSTableObj
*
pStable
=
(
SSTableObj
*
)
pMsg
->
pTable
;
mLInfo
(
"msg:%p, app:%p stable %s, add tag result:%s"
,
pMsg
,
pMsg
->
rpcMsg
.
ahandle
,
pStable
->
info
.
tableId
,
tstrerror
(
code
));
mLInfo
(
"msg:%p, app:%p stable %s, add tag result:%s
, numOfTags:%d
"
,
pMsg
,
pMsg
->
rpcMsg
.
ahandle
,
pStable
->
info
.
tableId
,
tstrerror
(
code
)
,
pStable
->
numOfTags
);
return
code
;
}
...
...
src/os/src/detail/osSignal.c
浏览文件 @
f3b42525
...
...
@@ -23,7 +23,7 @@
typedef
void
(
*
FLinuxSignalHandler
)(
int32_t
signum
,
siginfo_t
*
sigInfo
,
void
*
context
);
void
taosSetSignal
(
int32_t
signum
,
FSignalHandler
sigfp
)
{
struct
sigaction
act
=
{{
0
}};
struct
sigaction
act
;
memset
(
&
act
,
0
,
sizeof
(
act
));
#if 1
act
.
sa_flags
=
SA_SIGINFO
;
act
.
sa_sigaction
=
(
FLinuxSignalHandler
)
sigfp
;
...
...
src/sync/src/syncMain.c
浏览文件 @
f3b42525
...
...
@@ -709,7 +709,7 @@ static void syncChooseMaster(SSyncNode *pNode) {
}
static
SSyncPeer
*
syncCheckMaster
(
SSyncNode
*
pNode
)
{
int32_t
onlineNum
=
0
;
int32_t
onlineNum
=
0
,
arbOnlineNum
=
0
;
int32_t
masterIndex
=
-
1
;
int32_t
replica
=
pNode
->
replica
;
...
...
@@ -723,13 +723,15 @@ static SSyncPeer *syncCheckMaster(SSyncNode *pNode) {
SSyncPeer
*
pArb
=
pNode
->
peerInfo
[
TAOS_SYNC_MAX_REPLICA
];
if
(
pArb
&&
pArb
->
role
!=
TAOS_SYNC_ROLE_OFFLINE
)
{
onlineNum
++
;
++
arbOnlineNum
;
replica
=
pNode
->
replica
+
1
;
}
if
(
onlineNum
<=
replica
*
0
.
5
)
{
if
(
nodeRole
!=
TAOS_SYNC_ROLE_UNSYNCED
)
{
if
(
nodeRole
==
TAOS_SYNC_ROLE_MASTER
&&
onlineNum
==
replica
*
0
.
5
&&
onlineNum
>=
1
)
{
if
(
nodeRole
==
TAOS_SYNC_ROLE_MASTER
&&
onlineNum
==
replica
*
0
.
5
&&
((
replica
>
2
&&
onlineNum
-
arbOnlineNum
>
1
)
||
pNode
->
replica
<
3
)
)
{
sInfo
(
"vgId:%d, self keep work as master, online:%d replica:%d"
,
pNode
->
vgId
,
onlineNum
,
replica
);
masterIndex
=
pNode
->
selfIndex
;
}
else
{
nodeRole
=
TAOS_SYNC_ROLE_UNSYNCED
;
sInfo
(
"vgId:%d, self change to unsynced state, online:%d replica:%d"
,
pNode
->
vgId
,
onlineNum
,
replica
);
...
...
@@ -1002,6 +1004,7 @@ static void syncProcessForwardFromPeer(char *cont, SSyncPeer *pPeer) {
if
(
nodeRole
==
TAOS_SYNC_ROLE_SLAVE
)
{
// nodeVersion = pHead->version;
code
=
(
*
pNode
->
writeToCacheFp
)(
pNode
->
vgId
,
pHead
,
TAOS_QTYPE_FWD
,
NULL
);
syncConfirmForward
(
pNode
->
rid
,
pHead
->
version
,
code
,
false
);
}
else
{
if
(
nodeSStatus
!=
TAOS_SYNC_STATUS_INIT
)
{
code
=
syncSaveIntoBuffer
(
pPeer
,
pHead
);
...
...
@@ -1404,7 +1407,7 @@ static void syncMonitorFwdInfos(void *param, void *tmrId) {
pthread_mutex_lock
(
&
pNode
->
mutex
);
for
(
int32_t
i
=
0
;
i
<
pSyncFwds
->
fwds
;
++
i
)
{
SFwdInfo
*
pFwdInfo
=
pSyncFwds
->
fwdInfo
+
(
pSyncFwds
->
first
+
i
)
%
SYNC_MAX_FWDS
;
if
(
ABS
(
time
-
pFwdInfo
->
time
)
<
2
000
)
break
;
if
(
ABS
(
time
-
pFwdInfo
->
time
)
<
10
000
)
break
;
sDebug
(
"vgId:%d, forward info expired, hver:%"
PRIu64
" curtime:%"
PRIu64
" savetime:%"
PRIu64
,
pNode
->
vgId
,
pFwdInfo
->
version
,
time
,
pFwdInfo
->
time
);
...
...
src/util/src/tcache.c
浏览文件 @
f3b42525
...
...
@@ -613,7 +613,7 @@ void doCleanupDataCache(SCacheObj *pCacheObj) {
// todo memory leak if there are object with refcount greater than 0 in hash table?
taosHashCleanup
(
pCacheObj
->
pHashTable
);
taosTrashcanEmpty
(
pCacheObj
,
tru
e
);
taosTrashcanEmpty
(
pCacheObj
,
fals
e
);
__cache_lock_destroy
(
pCacheObj
);
...
...
src/util/src/tcrc32c.c
浏览文件 @
f3b42525
...
...
@@ -17,7 +17,7 @@
misrepresented as being the original software.
3. This notice may not be removed or altered from any source distribution.
*/
#if
ndef _TD_ARM_
#if
!defined(_TD_ARM_) && !defined(_TD_MIPS_)
#include <nmmintrin.h>
#endif
...
...
src/vnode/src/vnodeMain.c
浏览文件 @
f3b42525
...
...
@@ -419,7 +419,11 @@ void vnodeDestroy(SVnodeObj *pVnode) {
}
if
(
pVnode
->
tsdb
)
{
code
=
tsdbCloseRepo
(
pVnode
->
tsdb
,
1
);
// the deleted vnode does not need to commit, so as to speed up the deletion
int
toCommit
=
1
;
if
(
pVnode
->
dropped
)
toCommit
=
0
;
code
=
tsdbCloseRepo
(
pVnode
->
tsdb
,
toCommit
);
pVnode
->
tsdb
=
NULL
;
}
...
...
src/vnode/src/vnodeSync.c
浏览文件 @
f3b42525
...
...
@@ -126,11 +126,16 @@ void vnodeStopSyncFile(int32_t vgId, uint64_t fversion) {
}
void
vnodeConfirmForard
(
int32_t
vgId
,
void
*
wparam
,
int32_t
code
)
{
void
*
pVnode
=
vnodeAcquire
(
vgId
);
SVnodeObj
*
pVnode
=
vnodeAcquire
(
vgId
);
if
(
pVnode
==
NULL
)
{
vError
(
"vgId:%d, vnode not found while confirm forward"
,
vgId
);
}
if
(
code
==
TSDB_CODE_SYN_CONFIRM_EXPIRED
&&
pVnode
->
status
==
TAOS_VN_STATUS_CLOSING
)
{
vDebug
(
"vgId:%d, db:%s, vnode is closing while confirm forward"
,
vgId
,
pVnode
->
db
);
code
=
TSDB_CODE_VND_IS_CLOSING
;
}
dnodeSendRpcVWriteRsp
(
pVnode
,
wparam
,
code
);
vnodeRelease
(
pVnode
);
}
...
...
tests/Jenkinsfile
浏览文件 @
f3b42525
...
...
@@ -37,7 +37,7 @@ pipeline {
stage
(
'Parallel test stage'
)
{
parallel
{
stage
(
'pytest'
)
{
agent
{
label
'
184
'
}
agent
{
label
'
slad1
'
}
steps
{
pre_test
()
sh
'''
...
...
@@ -62,7 +62,7 @@ pipeline {
}
stage
(
'test_crash_gen'
)
{
agent
{
label
"
185
"
}
agent
{
label
"
slad2
"
}
steps
{
pre_test
()
sh
'''
...
...
@@ -149,7 +149,7 @@ pipeline {
}
stage
(
'test_valgrind'
)
{
agent
{
label
"
186
"
}
agent
{
label
"
slad3
"
}
steps
{
pre_test
()
...
...
tests/mas/Jenkinsfile
0 → 100644
浏览文件 @
f3b42525
def
pre_test
(){
sh
'''
sudo rmtaos||echo 'no taosd installed'
'''
sh
'''
cd ${WKC}
git reset --hard
git checkout $BRANCH_NAME
git pull
git submodule update
cd ${WK}
git reset --hard
git checkout $BRANCH_NAME
git pull
export TZ=Asia/Harbin
date
rm -rf ${WK}/debug
mkdir debug
cd debug
cmake .. > /dev/null
make > /dev/null
make install > /dev/null
pip3 install ${WKC}/src/connector/python/linux/python3/
'''
return
1
}
pipeline
{
agent
none
environment
{
WK
=
'/var/lib/jenkins/workspace/TDinternal'
WKC
=
'/var/lib/jenkins/workspace/TDinternal/community'
}
stages
{
stage
(
'Parallel test stage'
)
{
parallel
{
stage
(
'pytest'
)
{
agent
{
label
'slam1'
}
steps
{
pre_test
()
sh
'''
cd ${WKC}/tests
find pytest -name '*'sql|xargs rm -rf
./test-all.sh pytest
date'''
}
}
stage
(
'test_b1'
)
{
agent
{
label
'slam2'
}
steps
{
pre_test
()
sh
'''
cd ${WKC}/tests
./test-all.sh b1
date'''
}
}
stage
(
'test_crash_gen'
)
{
agent
{
label
"slam3"
}
steps
{
pre_test
()
sh
'''
cd ${WKC}/tests/pytest
'''
catchError
(
buildResult:
'SUCCESS'
,
stageResult:
'FAILURE'
)
{
sh
'''
cd ${WKC}/tests/pytest
./crash_gen.sh -a -p -t 4 -s 2000
'''
}
catchError
(
buildResult:
'SUCCESS'
,
stageResult:
'FAILURE'
)
{
sh
'''
cd ${WKC}/tests/pytest
rm -rf /var/lib/taos/*
rm -rf /var/log/taos/*
./handle_crash_gen_val_log.sh
'''
}
catchError
(
buildResult:
'SUCCESS'
,
stageResult:
'FAILURE'
)
{
sh
'''
cd ${WKC}/tests/pytest
rm -rf /var/lib/taos/*
rm -rf /var/log/taos/*
./handle_taosd_val_log.sh
'''
}
sh
'''
systemctl start taosd
sleep 10
'''
catchError
(
buildResult:
'SUCCESS'
,
stageResult:
'FAILURE'
)
{
sh
'''
cd ${WKC}/tests/gotest
bash batchtest.sh
'''
}
catchError
(
buildResult:
'SUCCESS'
,
stageResult:
'FAILURE'
)
{
sh
'''
cd ${WKC}/tests/examples/python/PYTHONConnectorChecker
python3 PythonChecker.py
'''
}
catchError
(
buildResult:
'SUCCESS'
,
stageResult:
'FAILURE'
)
{
sh
'''
cd ${WKC}/tests/examples/JDBC/JDBCDemo/
mvn clean package assembly:single -DskipTests >/dev/null
java -jar target/JDBCDemo-SNAPSHOT-jar-with-dependencies.jar -host 127.0.0.1
'''
}
catchError
(
buildResult:
'SUCCESS'
,
stageResult:
'FAILURE'
)
{
sh
'''
cd ${WKC}/src/connector/jdbc
mvn clean package -Dmaven.test.skip=true >/dev/null
cd ${WKC}/tests/examples/JDBC/JDBCDemo/
java --class-path=../../../../src/connector/jdbc/target:$JAVA_HOME/jre/lib/ext -jar target/JDBCDemo-SNAPSHOT-jar-with-dependencies.jar -host 127.0.0.1
'''
}
catchError
(
buildResult:
'SUCCESS'
,
stageResult:
'FAILURE'
)
{
sh
'''
cp -rf ${WKC}/tests/examples/nodejs ${JENKINS_HOME}/workspace/
cd ${JENKINS_HOME}/workspace/nodejs
node nodejsChecker.js host=localhost
'''
}
catchError
(
buildResult:
'SUCCESS'
,
stageResult:
'FAILURE'
)
{
sh
'''
cd ${JENKINS_HOME}/workspace/C#NET/src/CheckC#
dotnet run
'''
}
sh
'''
systemctl stop taosd
cd ${WKC}/tests
./test-all.sh b2
date
'''
sh
'''
cd ${WKC}/tests
./test-all.sh full unit
date'''
}
}
stage
(
'test_valgrind'
)
{
agent
{
label
"slam4"
}
steps
{
pre_test
()
catchError
(
buildResult:
'SUCCESS'
,
stageResult:
'FAILURE'
)
{
sh
'''
cd ${WKC}/tests/pytest
nohup taosd >/dev/null &
sleep 10
python3 concurrent_inquiry.py -c 1
'''
}
sh
'''
cd ${WKC}/tests
./test-all.sh full jdbc
date'''
sh
'''
cd ${WKC}/tests/pytest
./valgrind-test.sh 2>&1 > mem-error-out.log
./handle_val_log.sh
date
cd ${WKC}/tests
./test-all.sh b3
date'''
sh
'''
date
cd ${WKC}/tests
./test-all.sh full example
date'''
}
}
stage
(
'arm64_build'
){
agent
{
label
'arm64'
}
steps
{
sh
'''
cd ${WK}
git fetch
git checkout develop
git pull
cd ${WKC}
git fetch
git checkout develop
git pull
git submodule update
cd ${WKC}/packaging
./release.sh -v cluster -c aarch64 -n 2.0.0.0 -m 2.0.0.0
'''
}
}
stage
(
'arm32_build'
){
agent
{
label
'arm32'
}
steps
{
catchError
(
buildResult:
'SUCCESS'
,
stageResult:
'FAILURE'
)
{
sh
'''
cd ${WK}
git fetch
git checkout develop
git pull
cd ${WKC}
git fetch
git checkout develop
git pull
git submodule update
cd ${WKC}/packaging
./release.sh -v cluster -c aarch32 -n 2.0.0.0 -m 2.0.0.0
'''
}
}
}
}
}
}
post
{
success
{
emailext
(
subject:
"SUCCESSFUL: Job '${env.JOB_NAME} [${env.BUILD_NUMBER}]'"
,
body:
'''<!DOCTYPE html>
<html>
<head>
<meta charset="UTF-8">
</head>
<body leftmargin="8" marginwidth="0" topmargin="8" marginheight="4" offset="0">
<table width="95%" cellpadding="0" cellspacing="0" style="font-size: 16pt; font-family: Tahoma, Arial, Helvetica, sans-serif">
<tr>
<td><br />
<b><font color="#0B610B"><font size="6">构建信息</font></font></b>
<hr size="2" width="100%" align="center" /></td>
</tr>
<tr>
<td>
<ul>
<div style="font-size:18px">
<li>构建名称>>分支:${PROJECT_NAME}</li>
<li>构建结果:<span style="color:green"> Successful </span></li>
<li>构建编号:${BUILD_NUMBER}</li>
<li>触发用户:${CAUSE}</li>
<li>变更概要:${CHANGES}</li>
<li>构建地址:<a href=${BUILD_URL}>${BUILD_URL}</a></li>
<li>构建日志:<a href=${BUILD_URL}console>${BUILD_URL}console</a></li>
<li>变更集:${JELLY_SCRIPT}</li>
</div>
</ul>
</td>
</tr>
</table></font>
</body>
</html>'''
,
to:
"yqliu@taosdata.com,pxiao@taosdata.com"
,
from:
"support@taosdata.com"
)
}
failure
{
emailext
(
subject:
"FAILED: Job '${env.JOB_NAME} [${env.BUILD_NUMBER}]'"
,
body:
'''<!DOCTYPE html>
<html>
<head>
<meta charset="UTF-8">
</head>
<body leftmargin="8" marginwidth="0" topmargin="8" marginheight="4" offset="0">
<table width="95%" cellpadding="0" cellspacing="0" style="font-size: 16pt; font-family: Tahoma, Arial, Helvetica, sans-serif">
<tr>
<td><br />
<b><font color="#0B610B"><font size="6">构建信息</font></font></b>
<hr size="2" width="100%" align="center" /></td>
</tr>
<tr>
<td>
<ul>
<div style="font-size:18px">
<li>构建名称>>分支:${PROJECT_NAME}</li>
<li>构建结果:<span style="color:green"> Successful </span></li>
<li>构建编号:${BUILD_NUMBER}</li>
<li>触发用户:${CAUSE}</li>
<li>变更概要:${CHANGES}</li>
<li>构建地址:<a href=${BUILD_URL}>${BUILD_URL}</a></li>
<li>构建日志:<a href=${BUILD_URL}console>${BUILD_URL}console</a></li>
<li>变更集:${JELLY_SCRIPT}</li>
</div>
</ul>
</td>
</tr>
</table></font>
</body>
</html>'''
,
to:
"yqliu@taosdata.com,pxiao@taosdata.com"
,
from:
"support@taosdata.com"
)
}
}
}
\ No newline at end of file
tests/perftest-scripts/perftest-query.sh
浏览文件 @
f3b42525
...
...
@@ -64,18 +64,25 @@ function runQueryPerfTest {
[
-f
$PERFORMANCE_TEST_REPORT
]
&&
rm
$PERFORMANCE_TEST_REPORT
nohup
$WORK_DIR
/TDengine/debug/build/bin/taosd
-c
/etc/taosperf/
>
/dev/null 2>&1 &
echoInfo
"Wait TDengine to start"
sleep
30
0
sleep
6
0
echoInfo
"Run Performance Test"
cd
$WORK_DIR
/TDengine/tests/pytest
python3 query/queryPerformance.py
-c
$LOCAL_COMMIT
|
tee
-a
$PERFORMANCE_TEST_REPORT
mkdir
-p
/var/lib/perf/
mkdir
-p
/var/log/perf/
rm
-rf
/var/lib/perf/
*
rm
-rf
/var/log/perf/
*
nohup
$WORK_DIR
/TDengine/debug/build/bin/taosd
-c
/etc/perf/
>
/dev/null 2>&1 &
echoInfo
"Wait TDengine to start"
sleep
10
echoInfo
"Run Performance Test"
cd
$WORK_DIR
/TDengine/tests/pytest
python3 insert/insertFromCSVPerformance.py
-c
$LOCAL_COMMIT
|
tee
-a
$PERFORMANCE_TEST_REPORT
python3 tools/taosdemoPerformance.py
-c
$LOCAL_COMMIT
|
tee
-a
$PERFORMANCE_TEST_REPORT
python3 perfbenchmark/joinPerformance.py |
tee
-a
$PERFORMANCE_TEST_REPORT
}
...
...
tests/pytest/crash_gen/service_manager.py
浏览文件 @
f3b42525
...
...
@@ -22,7 +22,7 @@ from queue import Queue, Empty
from
.shared.config
import
Config
from
.shared.db
import
DbTarget
,
DbConn
from
.shared.misc
import
Logging
,
Helper
,
CrashGenError
,
Status
,
Progress
,
Dice
from
.shared.types
import
DirPath
from
.shared.types
import
DirPath
,
IpcStream
# from crash_gen.misc import CrashGenError, Dice, Helper, Logging, Progress, Status
# from crash_gen.db import DbConn, DbTarget
...
...
@@ -177,13 +177,12 @@ quorum 2
return
"127.0.0.1"
def
getServiceCmdLine
(
self
):
# to start the instance
cmdLine
=
[]
if
Config
.
getConfig
().
track_memory_leaks
:
Logging
.
info
(
"Invoking VALGRIND on service..."
)
cmdLine
=
[
'valgrind'
,
'--leak-check=yes'
]
return
[
'exec /usr/bin/valgrind'
,
'--leak-check=yes'
,
self
.
getExecFile
(),
'-c'
,
self
.
getCfgDir
()]
else
:
# TODO: move "exec -c" into Popen(), we can both "use shell" and NOT fork so ask to lose kill control
cmdLine
+=
[
"exec "
+
self
.
getExecFile
(),
'-c'
,
self
.
getCfgDir
()]
# used in subproce.Popen()
return
cmdLine
return
[
"exec "
+
self
.
getExecFile
(),
'-c'
,
self
.
getCfgDir
()]
# used in subproce.Popen()
def
_getDnodes
(
self
,
dbc
):
dbc
.
query
(
"show dnodes"
)
...
...
@@ -281,16 +280,16 @@ class TdeSubProcess:
return
'[TdeSubProc: pid = {}, status = {}]'
.
format
(
self
.
getPid
(),
self
.
getStatus
()
)
def
get
StdOut
(
self
)
->
BinaryIO
:
def
get
IpcStdOut
(
self
)
->
IpcStream
:
if
self
.
_popen
.
universal_newlines
:
# alias of text_mode
raise
CrashGenError
(
"We need binary mode for STDOUT IPC"
)
# Logging.info("Type of stdout is: {}".format(type(self._popen.stdout)))
return
typing
.
cast
(
BinaryIO
,
self
.
_popen
.
stdout
)
return
typing
.
cast
(
IpcStream
,
self
.
_popen
.
stdout
)
def
get
StdErr
(
self
)
->
BinaryIO
:
def
get
IpcStdErr
(
self
)
->
IpcStream
:
if
self
.
_popen
.
universal_newlines
:
# alias of text_mode
raise
CrashGenError
(
"We need binary mode for STDERR IPC"
)
return
typing
.
cast
(
BinaryIO
,
self
.
_popen
.
stderr
)
return
typing
.
cast
(
IpcStream
,
self
.
_popen
.
stderr
)
# Now it's always running, since we matched the life cycle
# def isRunning(self):
...
...
@@ -302,11 +301,6 @@ class TdeSubProcess:
def
_start
(
self
,
cmdLine
)
->
Popen
:
ON_POSIX
=
'posix'
in
sys
.
builtin_module_names
# Sanity check
# if self.subProcess: # already there
# raise RuntimeError("Corrupt process state")
# Prepare environment variables for coverage information
# Ref: https://stackoverflow.com/questions/2231227/python-subprocess-popen-with-a-modified-environment
myEnv
=
os
.
environ
.
copy
()
...
...
@@ -314,9 +308,8 @@ class TdeSubProcess:
# print(myEnv)
# print("Starting TDengine with env: ", myEnv.items())
# print("Starting TDengine via Shell: {}".format(cmdLineStr
))
print
(
"Starting TDengine: {}"
.
format
(
cmdLine
))
# useShell = True # Needed to pass environments into it
return
Popen
(
' '
.
join
(
cmdLine
),
# ' '.join(cmdLine) if useShell else cmdLine,
shell
=
True
,
# Always use shell, since we need to pass ENV vars
...
...
@@ -732,19 +725,19 @@ class ServiceManagerThread:
self
.
_ipcQueue
=
Queue
()
# type: Queue
self
.
_thread
=
threading
.
Thread
(
# First thread captures server OUTPUT
target
=
self
.
svcOutputReader
,
args
=
(
subProc
.
getStdOut
(),
self
.
_ipcQueue
,
logDir
))
args
=
(
subProc
.
get
Ipc
StdOut
(),
self
.
_ipcQueue
,
logDir
))
self
.
_thread
.
daemon
=
True
# thread dies with the program
self
.
_thread
.
start
()
time
.
sleep
(
0.01
)
if
not
self
.
_thread
.
is_alive
():
# What happened?
Logging
.
info
(
"Failed to start
ed
process to monitor STDOUT"
)
Logging
.
info
(
"Failed to start process to monitor STDOUT"
)
self
.
stop
()
raise
CrashGenError
(
"Failed to start thread to monitor STDOUT"
)
Logging
.
info
(
"Successfully started process to monitor STDOUT"
)
self
.
_thread2
=
threading
.
Thread
(
# 2nd thread captures server ERRORs
target
=
self
.
svcErrorReader
,
args
=
(
subProc
.
getStdErr
(),
self
.
_ipcQueue
,
logDir
))
args
=
(
subProc
.
get
Ipc
StdErr
(),
self
.
_ipcQueue
,
logDir
))
self
.
_thread2
.
daemon
=
True
# thread dies with the program
self
.
_thread2
.
start
()
time
.
sleep
(
0.01
)
...
...
@@ -887,14 +880,19 @@ class ServiceManagerThread:
print
(
"
\n
Non-UTF8 server output: {}
\n
"
.
format
(
bChunk
.
decode
(
'cp437'
)))
return
None
def
_textChunkGenerator
(
self
,
streamIn
:
BinaryIO
,
logDir
:
str
,
logFile
:
str
def
_textChunkGenerator
(
self
,
streamIn
:
IpcStream
,
logDir
:
str
,
logFile
:
str
)
->
Generator
[
TextChunk
,
None
,
None
]:
'''
Take an input stream with binary data, produced a generator of decoded
"text chunks", and also save the original binary data in a log file.
Take an input stream with binary data (likely from Popen), produced a generator of decoded
"text chunks".
Side effect: it also save the original binary data in a log file.
'''
os
.
makedirs
(
logDir
,
exist_ok
=
True
)
logF
=
open
(
os
.
path
.
join
(
logDir
,
logFile
),
'wb'
)
if
logF
is
None
:
Logging
.
error
(
"Failed to open log file (binary write): {}/{}"
.
format
(
logDir
,
logFile
))
return
for
bChunk
in
iter
(
streamIn
.
readline
,
b
''
):
logF
.
write
(
bChunk
)
# Write to log file immediately
tChunk
=
self
.
_decodeBinaryChunk
(
bChunk
)
# decode
...
...
@@ -902,14 +900,14 @@ class ServiceManagerThread:
yield
tChunk
# TODO: split into actual text lines
# At the end...
streamIn
.
close
()
# Close the stream
logF
.
close
()
# Close the
output
file
streamIn
.
close
()
# Close the
incoming
stream
logF
.
close
()
# Close the
log
file
def
svcOutputReader
(
self
,
stdOut
:
BinaryIO
,
queue
,
logDir
:
str
):
def
svcOutputReader
(
self
,
ipcStdOut
:
IpcStream
,
queue
,
logDir
:
str
):
'''
The infinite routine that processes the STDOUT stream for the sub process being managed.
:param
s
tdOut: the IO stream object used to fetch the data from
:param
ipcS
tdOut: the IO stream object used to fetch the data from
:param queue: the queue where we dump the roughly parsed chunk-by-chunk text data
:param logDir: where we should dump a verbatim output file
'''
...
...
@@ -917,7 +915,7 @@ class ServiceManagerThread:
# Important Reference: https://stackoverflow.com/questions/375427/non-blocking-read-on-a-subprocess-pipe-in-python
# print("This is the svcOutput Reader...")
# stdOut.readline() # Skip the first output? TODO: remove?
for
tChunk
in
self
.
_textChunkGenerator
(
s
tdOut
,
logDir
,
'stdout.log'
)
:
for
tChunk
in
self
.
_textChunkGenerator
(
ipcS
tdOut
,
logDir
,
'stdout.log'
)
:
queue
.
put
(
tChunk
)
# tChunk garanteed not to be None
self
.
_printProgress
(
"_i"
)
...
...
@@ -940,12 +938,12 @@ class ServiceManagerThread:
Logging
.
info
(
"EOF found TDengine STDOUT, marking the process as terminated"
)
self
.
setStatus
(
Status
.
STATUS_STOPPED
)
def
svcErrorReader
(
self
,
stdErr
:
BinaryIO
,
queue
,
logDir
:
str
):
def
svcErrorReader
(
self
,
ipcStdErr
:
IpcStream
,
queue
,
logDir
:
str
):
# os.makedirs(logDir, exist_ok=True)
# logFile = os.path.join(logDir,'stderr.log')
# fErr = open(logFile, 'wb')
# for line in iter(err.readline, b''):
for
tChunk
in
self
.
_textChunkGenerator
(
s
tdErr
,
logDir
,
'stderr.log'
)
:
for
tChunk
in
self
.
_textChunkGenerator
(
ipcS
tdErr
,
logDir
,
'stderr.log'
)
:
queue
.
put
(
tChunk
)
# tChunk garanteed not to be None
# fErr.write(line)
Logging
.
info
(
"TDengine STDERR: {}"
.
format
(
tChunk
))
...
...
tests/pytest/crash_gen/shared/types.py
浏览文件 @
f3b42525
from
typing
import
Any
,
List
,
Dict
,
NewType
from
typing
import
Any
,
BinaryIO
,
List
,
Dict
,
NewType
from
enum
import
Enum
DirPath
=
NewType
(
'DirPath'
,
str
)
...
...
@@ -26,3 +26,5 @@ class TdDataType(Enum):
TdColumns
=
Dict
[
str
,
TdDataType
]
TdTags
=
Dict
[
str
,
TdDataType
]
IpcStream
=
NewType
(
'IpcStream'
,
BinaryIO
)
\ No newline at end of file
tests/pytest/fulltest.sh
浏览文件 @
f3b42525
...
...
@@ -183,7 +183,7 @@ python3 ./test.py -f stable/query_after_reset.py
# perfbenchmark
python3 ./test.py
-f
perfbenchmark/bug3433.py
#python3 ./test.py -f perfbenchmark/bug3589.py
python3 ./test.py
-f
perfbenchmark/taosdemoInsert.py
#query
python3 ./test.py
-f
query/filter.py
...
...
tests/pytest/insert/insertFromCSVPerformance.py
浏览文件 @
f3b42525
...
...
@@ -31,7 +31,7 @@ class insertFromCSVPerformace:
self
.
host
=
"127.0.0.1"
self
.
user
=
"root"
self
.
password
=
"taosdata"
self
.
config
=
"/etc/
taos
perf"
self
.
config
=
"/etc/perf"
self
.
conn
=
taos
.
connect
(
self
.
host
,
self
.
user
,
...
...
tests/pytest/perfbenchmark/taosdemoInsert.py
0 → 100644
浏览文件 @
f3b42525
###################################################################
# Copyright (c) 2016 by TAOS Technologies, Inc.
# All rights reserved.
#
# This file is proprietary and confidential to TAOS Technologies.
# No part of this file may be reproduced, stored, transmitted,
# disclosed or used in any form or by any means other than as
# expressly provided by the written permission from Jianhui Tao
#
###################################################################
# -*- coding: utf-8 -*-
import
taos
import
sys
import
os
import
json
import
argparse
import
subprocess
import
datetime
import
re
from
multiprocessing
import
cpu_count
from
util.log
import
*
from
util.sql
import
*
from
util.cases
import
*
from
util.dnodes
import
*
from
util.dnodes
import
TDDnode
class
Taosdemo
:
def
__init__
(
self
,
clearCache
,
dbName
,
keep
):
self
.
clearCache
=
clearCache
self
.
dbname
=
dbName
self
.
drop
=
"yes"
self
.
keep
=
keep
self
.
host
=
"127.0.0.1"
self
.
user
=
"root"
self
.
password
=
"taosdata"
# self.config = "/etc/taosperf"
# self.conn = taos.connect(
# self.host,
# self.user,
# self.password,
# self.config)
# env config
def
getBuildPath
(
self
)
->
str
:
selfPath
=
os
.
path
.
dirname
(
os
.
path
.
realpath
(
__file__
))
if
(
"community"
in
selfPath
):
projPath
=
selfPath
[:
selfPath
.
find
(
"community"
)]
else
:
projPath
=
selfPath
[:
selfPath
.
find
(
"tests"
)]
for
root
,
dirs
,
files
in
os
.
walk
(
projPath
):
if
(
"taosd"
in
files
):
rootRealPath
=
os
.
path
.
dirname
(
os
.
path
.
realpath
(
root
))
if
(
"packaging"
not
in
rootRealPath
):
buildPath
=
root
[:
len
(
root
)
-
len
(
"/debug/build/bin"
)]
break
return
buildPath
def
getExeToolsDir
(
self
)
->
str
:
self
.
debugdir
=
self
.
getBuildPath
()
+
"/debug/build/bin"
return
self
.
debugdir
def
getCfgDir
(
self
)
->
str
:
self
.
config
=
self
.
getBuildPath
()
+
"/sim/dnode1/cfg"
return
self
.
config
# taodemo insert file config
def
dbinfocfg
(
self
)
->
dict
:
return
{
"name"
:
self
.
dbname
,
"drop"
:
self
.
drop
,
"replica"
:
1
,
"days"
:
10
,
"cache"
:
16
,
"blocks"
:
8
,
"precision"
:
"ms"
,
"keep"
:
self
.
keep
,
"minRows"
:
100
,
"maxRows"
:
4096
,
"comp"
:
2
,
"walLevel"
:
1
,
"cachelast"
:
0
,
"quorum"
:
1
,
"fsync"
:
3000
,
"update"
:
0
}
def
type_check
(
func
):
def
wrapper
(
self
,
**
kwargs
):
num_types
=
[
"int"
,
"float"
,
"bigint"
,
"tinyint"
,
"smallint"
,
"double"
]
str_types
=
[
"binary"
,
"nchar"
]
for
k
,
v
in
kwargs
.
items
():
if
k
.
lower
()
not
in
num_types
and
k
.
lower
()
not
in
str_types
:
return
f
"args
{
k
}
type error, not allowed"
elif
not
isinstance
(
v
,
(
int
,
list
,
tuple
)):
return
f
"value
{
v
}
type error, not allowed"
elif
k
.
lower
()
in
num_types
and
not
isinstance
(
v
,
int
):
return
f
"arg
{
v
}
takes 1 positional argument must be type int "
elif
isinstance
(
v
,
(
list
,
tuple
))
and
len
(
v
)
>
2
:
return
f
"arg
{
v
}
takes from 1 to 2 positional arguments but more than 2 were given "
elif
isinstance
(
v
,(
list
,
tuple
))
and
[
False
for
_
in
v
if
not
isinstance
(
_
,
int
)
]:
return
f
"arg
{
v
}
takes from 1 to 2 positional arguments must be type int "
else
:
pass
return
func
(
self
,
**
kwargs
)
return
wrapper
@
type_check
def
column_tag_count
(
self
,
**
column_tag
)
->
list
:
init_column_tag
=
[]
for
k
,
v
in
column_tag
.
items
():
if
re
.
search
(
k
,
"int, float, bigint, tinyint, smallint, double"
,
re
.
IGNORECASE
):
init_column_tag
.
append
({
"type"
:
k
,
"count"
:
v
})
elif
re
.
search
(
k
,
"binary, nchar"
,
re
.
IGNORECASE
):
if
isinstance
(
v
,
int
):
init_column_tag
.
append
({
"type"
:
k
,
"count"
:
v
,
"len"
:
8
})
elif
len
(
v
)
==
1
:
init_column_tag
.
append
({
"type"
:
k
,
"count"
:
v
[
0
],
"len"
:
8
})
else
:
init_column_tag
.
append
({
"type"
:
k
,
"count"
:
v
[
0
],
"len"
:
v
[
1
]})
return
init_column_tag
def
stbcfg
(
self
,
stb
:
str
,
child_tab_count
:
int
,
rows
:
int
,
prechildtab
:
str
,
columns
:
dict
,
tags
:
dict
)
->
dict
:
return
{
"name"
:
stb
,
"child_table_exists"
:
"no"
,
"childtable_count"
:
child_tab_count
,
"childtable_prefix"
:
prechildtab
,
"auto_create_table"
:
"no"
,
"batch_create_tbl_num"
:
10
,
"data_source"
:
"rand"
,
"insert_mode"
:
"taosc"
,
"insert_rows"
:
rows
,
"childtable_limit"
:
0
,
"childtable_offset"
:
0
,
"rows_per_tbl"
:
1
,
"max_sql_len"
:
65480
,
"disorder_ratio"
:
0
,
"disorder_range"
:
1000
,
"timestamp_step"
:
10
,
"start_timestamp"
:
f
"
{
datetime
.
datetime
.
now
():
%
F
%
X
}
"
,
"sample_format"
:
"csv"
,
"sample_file"
:
"./sample.csv"
,
"tags_file"
:
""
,
"columns"
:
self
.
column_tag_count
(
**
columns
),
"tags"
:
self
.
column_tag_count
(
**
tags
)
}
def
schemecfg
(
self
,
intcount
=
1
,
floatcount
=
0
,
bcount
=
0
,
tcount
=
0
,
scount
=
0
,
doublecount
=
0
,
binarycount
=
0
,
ncharcount
=
0
):
return
{
"INT"
:
intcount
,
"FLOAT"
:
floatcount
,
"BIGINT"
:
bcount
,
"TINYINT"
:
tcount
,
"SMALLINT"
:
scount
,
"DOUBLE"
:
doublecount
,
"BINARY"
:
binarycount
,
"NCHAR"
:
ncharcount
}
def
insertcfg
(
self
,
db
:
dict
,
stbs
:
list
)
->
dict
:
return
{
"filetype"
:
"insert"
,
"cfgdir"
:
self
.
config
,
"host"
:
self
.
host
,
"port"
:
6030
,
"user"
:
self
.
user
,
"password"
:
self
.
password
,
"thread_count"
:
cpu_count
(),
"thread_count_create_tbl"
:
cpu_count
(),
"result_file"
:
"/tmp/insert_res.txt"
,
"confirm_parameter_prompt"
:
"no"
,
"insert_interval"
:
0
,
"num_of_records_per_req"
:
100
,
"max_sql_len"
:
1024000
,
"databases"
:
[{
"dbinfo"
:
db
,
"super_tables"
:
stbs
}]
}
def
createinsertfile
(
self
,
db
:
dict
,
stbs
:
list
)
->
str
:
date
=
datetime
.
datetime
.
now
()
file_create_table
=
f
"/tmp/insert_
{
date
:
%
F
-%
H
%
M
}
.json"
with
open
(
file_create_table
,
'w'
)
as
f
:
json
.
dump
(
self
.
insertcfg
(
db
,
stbs
),
f
)
return
file_create_table
# taosdemo query file config
def
querysqls
(
self
,
sql
:
str
)
->
list
:
return
[{
"sql"
:
sql
,
"result"
:
""
}]
def
querycfg
(
self
,
sql
:
str
)
->
dict
:
return
{
"filetype"
:
"query"
,
"cfgdir"
:
self
.
config
,
"host"
:
self
.
host
,
"port"
:
6030
,
"user"
:
self
.
user
,
"password"
:
self
.
password
,
"confirm_parameter_prompt"
:
"yes"
,
"query_times"
:
10
,
"query_mode"
:
"taosc"
,
"databases"
:
self
.
dbname
,
"specified_table_query"
:
{
"query_interval"
:
0
,
"concurrent"
:
cpu_count
(),
"sqls"
:
self
.
querysqls
(
sql
)
}
}
def
createqueryfile
(
self
,
sql
:
str
):
date
=
datetime
.
datetime
.
now
()
file_query_table
=
f
"/tmp/query_
{
date
:
%
F
-%
H
%
M
}
.json"
with
open
(
file_query_table
,
"w"
)
as
f
:
json
.
dump
(
self
.
querycfg
(
sql
),
f
)
return
file_query_table
# Execute taosdemo, and delete temporary files when finished
def
taosdemotable
(
self
,
filepath
:
str
,
resultfile
=
"/dev/null"
):
taosdemopath
=
self
.
getBuildPath
()
+
"/debug/build/bin"
with
open
(
filepath
,
"r"
)
as
f
:
filetype
=
json
.
load
(
f
)[
"filetype"
]
if
filetype
==
"insert"
:
taosdemo_table_cmd
=
f
"
{
taosdemopath
}
/taosdemo -f
{
filepath
}
>
{
resultfile
}
2>&1"
else
:
taosdemo_table_cmd
=
f
"yes |
{
taosdemopath
}
/taosdemo -f
{
filepath
}
>
{
resultfile
}
2>&1"
try
:
_
=
subprocess
.
check_output
(
taosdemo_table_cmd
,
shell
=
True
).
decode
(
"utf-8"
)
except
subprocess
.
CalledProcessError
as
e
:
_
=
e
.
output
def
droptmpfile
(
self
,
filepath
:
str
):
drop_file_cmd
=
f
"[ -f
{
filepath
}
] && rm -f
{
filepath
}
"
try
:
_
=
subprocess
.
check_output
(
drop_file_cmd
,
shell
=
True
).
decode
(
"utf-8"
)
except
subprocess
.
CalledProcessError
as
e
:
_
=
e
.
output
# TODO:需要完成TD-4153的数据插入和客户端请求的性能查询。
def
td4153insert
(
self
):
tdLog
.
printNoPrefix
(
"========== start to create table and insert data =========="
)
self
.
dbname
=
"td4153"
db
=
self
.
dbinfocfg
()
stblist
=
[]
columntype
=
self
.
schemecfg
(
intcount
=
1
,
ncharcount
=
100
)
tagtype
=
self
.
schemecfg
(
intcount
=
1
)
stbname
=
"stb1"
prechild
=
"t1"
stable
=
self
.
stbcfg
(
stb
=
stbname
,
prechildtab
=
prechild
,
child_tab_count
=
2
,
rows
=
10000
,
columns
=
columntype
,
tags
=
tagtype
)
stblist
.
append
(
stable
)
insertfile
=
self
.
createinsertfile
(
db
=
db
,
stbs
=
stblist
)
nmon_file
=
f
"/tmp/insert_
{
datetime
.
datetime
.
now
():
%
F
-%
H
%
M
}
.nmon"
cmd
=
f
"nmon -s5 -F
{
nmon_file
}
-m /tmp/"
try
:
_
=
subprocess
.
check_output
(
cmd
,
shell
=
True
).
decode
(
"utf-8"
)
except
subprocess
.
CalledProcessError
as
e
:
_
=
e
.
output
self
.
taosdemotable
(
insertfile
)
self
.
droptmpfile
(
insertfile
)
self
.
droptmpfile
(
"/tmp/insert_res.txt"
)
# In order to prevent too many performance files from being generated, the nmon file is deleted.
# and the delete statement can be cancelled during the actual test.
self
.
droptmpfile
(
nmon_file
)
cmd
=
f
"ps -ef|grep -w nmon| grep -v grep | awk '{{print $2}}'"
try
:
time
.
sleep
(
10
)
_
=
subprocess
.
check_output
(
cmd
,
shell
=
True
).
decode
(
"utf-8"
)
except
BaseException
as
e
:
raise
e
def
td4153query
(
self
):
tdLog
.
printNoPrefix
(
"========== start to query operation =========="
)
sqls
=
{
"select_all"
:
"select * from stb1"
,
"select_join"
:
"select * from t10, t11 where t10.ts=t11.ts"
}
for
type
,
sql
in
sqls
.
items
():
result_file
=
f
"/tmp/queryResult_
{
type
}
.log"
query_file
=
self
.
createqueryfile
(
sql
)
try
:
self
.
taosdemotable
(
query_file
,
resultfile
=
result_file
)
except
subprocess
.
CalledProcessError
as
e
:
out_put
=
e
.
output
if
result_file
:
print
(
f
"execute rows
{
type
.
split
(
'_'
)[
1
]
}
sql, the sql is:
{
sql
}
"
)
max_sql_time_cmd
=
f
'''
grep -o Spent.*s
{
result_file
}
|awk 'NR==1{{max=$2;next}}{{max=max>$2?max:$2}}END{{print "Max=",max,"s"}}'
'''
max_sql_time
=
subprocess
.
check_output
(
max_sql_time_cmd
,
shell
=
True
).
decode
(
"UTF-8"
)
print
(
f
"
{
type
.
split
(
'_'
)[
1
]
}
rows sql time :
{
max_sql_time
}
"
)
min_sql_time_cmd
=
f
'''
grep -o Spent.*s
{
result_file
}
|awk 'NR==1{{min=$2;next}}{{min=min<$2?min:$2}}END{{print "Min=",min,"s"}}'
'''
min_sql_time
=
subprocess
.
check_output
(
min_sql_time_cmd
,
shell
=
True
).
decode
(
"UTF-8"
)
print
(
f
"
{
type
.
split
(
'_'
)[
1
]
}
rows sql time :
{
min_sql_time
}
"
)
avg_sql_time_cmd
=
f
'''
grep -o Spent.*s
{
result_file
}
|awk '{{sum+=$2}}END{{print "Average=",sum/NR,"s"}}'
'''
avg_sql_time
=
subprocess
.
check_output
(
avg_sql_time_cmd
,
shell
=
True
).
decode
(
"UTF-8"
)
print
(
f
"
{
type
.
split
(
'_'
)[
1
]
}
rows sql time :
{
avg_sql_time
}
"
)
self
.
droptmpfile
(
query_file
)
self
.
droptmpfile
(
result_file
)
drop_query_tmt_file_cmd
=
" find ./ -name 'querySystemInfo-*' -type f -exec rm {} \; "
try
:
_
=
subprocess
.
check_output
(
drop_query_tmt_file_cmd
,
shell
=
True
).
decode
(
"utf-8"
)
except
subprocess
.
CalledProcessError
as
e
:
_
=
e
.
output
pass
def
td4153
(
self
):
self
.
td4153insert
()
self
.
td4153query
()
if
__name__
==
'__main__'
:
parser
=
argparse
.
ArgumentParser
()
parser
.
add_argument
(
'-r'
,
'--remove-cache'
,
action
=
'store_true'
,
default
=
False
,
help
=
'clear cache before query (default: False)'
)
parser
.
add_argument
(
'-d'
,
'--database-name'
,
action
=
'store'
,
default
=
'db'
,
type
=
str
,
help
=
'Database name to be created (default: db)'
)
parser
.
add_argument
(
'-k'
,
'--keep-time'
,
action
=
'store'
,
default
=
3650
,
type
=
int
,
help
=
'Database keep parameters (default: 3650)'
)
args
=
parser
.
parse_args
()
taosdemo
=
Taosdemo
(
args
.
remove_cache
,
args
.
database_name
,
args
.
keep_time
)
# taosdemo.conn = taos.connect(
# taosdemo.host,
# taosdemo.user,
# taosdemo.password,
# taosdemo.config
# )
debugdir
=
taosdemo
.
getExeToolsDir
()
cfgdir
=
taosdemo
.
getCfgDir
()
cmd
=
f
"
{
debugdir
}
/taosd -c
{
cfgdir
}
>/dev/null 2>&1 &"
try
:
_
=
subprocess
.
check_output
(
cmd
,
shell
=
True
).
decode
(
"utf-8"
)
except
subprocess
.
CalledProcessError
as
e
:
_
=
e
.
output
if
taosdemo
.
clearCache
:
# must be root permission
subprocess
.
check_output
(
"echo 3 > /proc/sys/vm/drop_caches"
,
shell
=
True
).
decode
(
"utf-8"
)
taosdemo
.
td4153
()
tests/pytest/tools/taosdemoPerformance.py
浏览文件 @
f3b42525
...
...
@@ -24,7 +24,7 @@ class taosdemoPerformace:
self
.
host
=
"127.0.0.1"
self
.
user
=
"root"
self
.
password
=
"taosdata"
self
.
config
=
"/etc/
taos
perf"
self
.
config
=
"/etc/perf"
self
.
conn
=
taos
.
connect
(
self
.
host
,
self
.
user
,
...
...
@@ -77,7 +77,7 @@ class taosdemoPerformace:
insert_data
=
{
"filetype"
:
"insert"
,
"cfgdir"
:
"/etc/
taos
perf"
,
"cfgdir"
:
"/etc/perf"
,
"host"
:
"127.0.0.1"
,
"port"
:
6030
,
"user"
:
"root"
,
...
...
@@ -104,7 +104,7 @@ class taosdemoPerformace:
return
output
def
insertData
(
self
):
os
.
system
(
"taosdemo -f %s > taosdemoperf.txt"
%
self
.
generateJson
())
os
.
system
(
"taosdemo -f %s > taosdemoperf.txt
2>&1
"
%
self
.
generateJson
())
self
.
createTableTime
=
self
.
getCMDOutput
(
"grep 'Spent' taosdemoperf.txt | awk 'NR==1{print $2}'"
)
self
.
insertRecordsTime
=
self
.
getCMDOutput
(
"grep 'Spent' taosdemoperf.txt | awk 'NR==2{print $2}'"
)
self
.
recordsPerSecond
=
self
.
getCMDOutput
(
"grep 'Spent' taosdemoperf.txt | awk 'NR==2{print $16}'"
)
...
...
tests/script/general/parser/select_with_tags.sim
浏览文件 @
f3b42525
...
...
@@ -847,10 +847,16 @@ sql_error select tbname, t1 from select_tags_mt0 interval(1y);
#valid sql: select first(c1), last(c2), count(*) from select_tags_mt0 group by tbname, t1;
#valid sql: select first(c1), tbname, t1 from select_tags_mt0 group by t2;
print ==================================>TD-4231
sql_error select t1,tbname from select_tags_mt0 where c1<0
sql_error select t1,tbname from select_tags_mt0 where c1<0 and tbname in ('select_tags_tb12')
sql select tbname from select_tags_mt0 where tbname in ('select_tags_tb12');
sql_error select first(c1), last(c2), t1 from select_tags_mt0 group by tbname;
sql_error select first(c1), last(c2), tbname, t2 from select_tags_mt0 group by tbname;
sql_error select first(c1), count(*), t2, t1, tbname from select_tags_mt0 group by tbname;
#
this sql is valid
: select first(c1), t2 from select_tags_mt0 group by tbname;
#
valid sql
: select first(c1), t2 from select_tags_mt0 group by tbname;
#sql select first(ts), tbname from select_tags_mt0 group by tbname;
#sql select count(c1) from select_tags_mt0 where c1=99 group by tbname;
...
...
tests/script/unique/arbitrator/dn3_mn1_vnode_nomaster.sim
浏览文件 @
f3b42525
...
...
@@ -158,7 +158,7 @@ if $dnode4Vtatus != offline then
sleep 2000
goto wait_dnode4_vgroup_offline
endi
if $dnode3Vtatus !=
master
then
if $dnode3Vtatus !=
unsynced
then
sleep 2000
goto wait_dnode4_vgroup_offline
endi
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录