Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
taosdata
TDengine
提交
162c0124
TDengine
项目概览
taosdata
/
TDengine
1 年多 前同步成功
通知
1185
Star
22016
Fork
4786
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
1
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
TDengine
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
1
Issue
1
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
162c0124
编写于
5月 26, 2021
作者:
H
Haojun Liao
浏览文件
操作
浏览文件
下载
差异文件
Merge branch 'master' into fix/query
上级
29746411
04d464bd
变更
37
隐藏空白更改
内联
并排
Showing
37 changed file
with
1852 addition
and
1083 deletion
+1852
-1083
.gitmodules
.gitmodules
+3
-3
cmake/platform.inc
cmake/platform.inc
+6
-0
cmake/version.inc
cmake/version.inc
+1
-1
packaging/deb/makedeb.sh
packaging/deb/makedeb.sh
+6
-1
packaging/rpm/tdengine.spec
packaging/rpm/tdengine.spec
+6
-1
packaging/tools/make_install.sh
packaging/tools/make_install.sh
+10
-2
packaging/tools/makeclient.sh
packaging/tools/makeclient.sh
+12
-4
packaging/tools/makeclient_power.sh
packaging/tools/makeclient_power.sh
+11
-3
packaging/tools/makepkg.sh
packaging/tools/makepkg.sh
+30
-3
packaging/tools/makepkg_power.sh
packaging/tools/makepkg_power.sh
+11
-2
snap/snapcraft.yaml
snap/snapcraft.yaml
+2
-2
src/client/src/tscStream.c
src/client/src/tscStream.c
+1
-0
src/connector/go
src/connector/go
+1
-1
src/connector/nodejs/nodetaos/cinterface.js
src/connector/nodejs/nodetaos/cinterface.js
+129
-127
src/connector/nodejs/nodetaos/cursor.js
src/connector/nodejs/nodetaos/cursor.js
+59
-62
src/connector/nodejs/package-lock.json
src/connector/nodejs/package-lock.json
+0
-285
src/connector/nodejs/package.json
src/connector/nodejs/package.json
+1
-1
src/kit/taosdemo/async-sub.json
src/kit/taosdemo/async-sub.json
+41
-0
src/kit/taosdemo/subscribe.json
src/kit/taosdemo/subscribe.json
+30
-10
src/kit/taosdemo/taosdemo.c
src/kit/taosdemo/taosdemo.c
+713
-509
src/os/src/detail/osSignal.c
src/os/src/detail/osSignal.c
+3
-3
src/sync/src/syncMain.c
src/sync/src/syncMain.c
+6
-3
src/util/src/tcache.c
src/util/src/tcache.c
+1
-1
src/util/src/tcrc32c.c
src/util/src/tcrc32c.c
+1
-1
src/vnode/src/vnodeMain.c
src/vnode/src/vnodeMain.c
+5
-1
src/vnode/src/vnodeWrite.c
src/vnode/src/vnodeWrite.c
+5
-2
tests/Jenkinsfile
tests/Jenkinsfile
+3
-3
tests/mas/Jenkinsfile
tests/mas/Jenkinsfile
+309
-0
tests/perftest-scripts/perftest-query.sh
tests/perftest-scripts/perftest-query.sh
+11
-4
tests/pytest/crash_gen/service_manager.py
tests/pytest/crash_gen/service_manager.py
+28
-30
tests/pytest/crash_gen/shared/types.py
tests/pytest/crash_gen/shared/types.py
+3
-1
tests/pytest/fulltest.sh
tests/pytest/fulltest.sh
+1
-1
tests/pytest/insert/insertFromCSVPerformance.py
tests/pytest/insert/insertFromCSVPerformance.py
+1
-1
tests/pytest/perfbenchmark/taosdemoInsert.py
tests/pytest/perfbenchmark/taosdemoInsert.py
+387
-0
tests/pytest/tools/taosdemoPerformance.py
tests/pytest/tools/taosdemoPerformance.py
+3
-3
tests/pytest/tools/taosdemoTestWithJson.py
tests/pytest/tools/taosdemoTestWithJson.py
+11
-11
tests/script/unique/arbitrator/dn3_mn1_vnode_nomaster.sim
tests/script/unique/arbitrator/dn3_mn1_vnode_nomaster.sim
+1
-1
未找到文件。
.gitmodules
浏览文件 @
162c0124
[submodule "src/connector/go"]
path = src/connector/go
url =
https://github.com/taosdata/driver-go
url =
git@github.com:taosdata/driver-go.git
[submodule "src/connector/grafanaplugin"]
path = src/connector/grafanaplugin
url =
https://github.com/taosdata/grafanaplugin
url =
git@github.com:taosdata/grafanaplugin.git
[submodule "src/connector/hivemq-tdengine-extension"]
path = src/connector/hivemq-tdengine-extension
url =
https://github.com/huskar-t
/hivemq-tdengine-extension.git
url =
git@github.com:taosdata
/hivemq-tdengine-extension.git
[submodule "tests/examples/rust"]
path = tests/examples/rust
url = https://github.com/songtianyi/tdengine-rust-bindings.git
cmake/platform.inc
浏览文件 @
162c0124
...
...
@@ -102,6 +102,12 @@ IF ("${CPUTYPE}" STREQUAL "")
SET
(
TD_LINUX
TRUE
)
SET
(
TD_LINUX_64
FALSE
)
SET
(
TD_ARM_64
TRUE
)
ELSEIF
(
CMAKE_SYSTEM_PROCESSOR
MATCHES
"mips64"
)
SET
(
CPUTYPE
"mips64"
)
MESSAGE
(
STATUS
"Set CPUTYPE to mips64"
)
SET
(
TD_LINUX
TRUE
)
SET
(
TD_LINUX_64
FALSE
)
SET
(
TD_MIPS_64
TRUE
)
ENDIF
()
ELSE
()
...
...
cmake/version.inc
浏览文件 @
162c0124
...
...
@@ -4,7 +4,7 @@ PROJECT(TDengine)
IF
(
DEFINED
VERNUMBER
)
SET
(
TD_VER_NUMBER
$
{
VERNUMBER
})
ELSE
()
SET
(
TD_VER_NUMBER
"2.0.20.
2
"
)
SET
(
TD_VER_NUMBER
"2.0.20.
5
"
)
ENDIF
()
IF
(
DEFINED
VERCOMPATIBLE
)
...
...
packaging/deb/makedeb.sh
浏览文件 @
162c0124
...
...
@@ -58,7 +58,12 @@ cp ${compile_dir}/build/lib/${libfile} ${pkg_dir}${install_home_pat
cp
${
compile_dir
}
/../src/inc/taos.h
${
pkg_dir
}${
install_home_path
}
/include
cp
${
compile_dir
}
/../src/inc/taoserror.h
${
pkg_dir
}${
install_home_path
}
/include
cp
-r
${
top_dir
}
/tests/examples/
*
${
pkg_dir
}${
install_home_path
}
/examples
cp
-r
${
top_dir
}
/src/connector/grafanaplugin
${
pkg_dir
}${
install_home_path
}
/connector
if
[
-d
"
${
top_dir
}
/src/connector/grafanaplugin/dist"
]
;
then
cp
-r
${
top_dir
}
/src/connector/grafanaplugin/dist
${
pkg_dir
}${
install_home_path
}
/connector/grafanaplugin
else
echo
"grafanaplugin bundled directory not found!"
exit
1
fi
cp
-r
${
top_dir
}
/src/connector/python
${
pkg_dir
}${
install_home_path
}
/connector
cp
-r
${
top_dir
}
/src/connector/go
${
pkg_dir
}${
install_home_path
}
/connector
cp
-r
${
top_dir
}
/src/connector/nodejs
${
pkg_dir
}${
install_home_path
}
/connector
...
...
packaging/rpm/tdengine.spec
浏览文件 @
162c0124
...
...
@@ -66,7 +66,12 @@ cp %{_compiledir}/build/bin/taosdump %{buildroot}%{homepath}/bin
cp %{_compiledir}/build/lib/${libfile} %{buildroot}%{homepath}/driver
cp %{_compiledir}/../src/inc/taos.h %{buildroot}%{homepath}/include
cp %{_compiledir}/../src/inc/taoserror.h %{buildroot}%{homepath}/include
cp -r %{_compiledir}/../src/connector/grafanaplugin %{buildroot}%{homepath}/connector
if [ -d %{_compiledir}/../src/connector/grafanaplugin/dist ]; then
cp -r %{_compiledir}/../src/connector/grafanaplugin/dist %{buildroot}%{homepath}/connector/grafanaplugin
else
echo grafanaplugin bundled directory not found!
exit 1
fi
cp -r %{_compiledir}/../src/connector/python %{buildroot}%{homepath}/connector
cp -r %{_compiledir}/../src/connector/go %{buildroot}%{homepath}/connector
cp -r %{_compiledir}/../src/connector/nodejs %{buildroot}%{homepath}/connector
...
...
packaging/tools/make_install.sh
浏览文件 @
162c0124
...
...
@@ -243,9 +243,17 @@ function install_data() {
}
function
install_connector
()
{
${
csudo
}
cp
-rf
${
source_dir
}
/src/connector/grafanaplugin
${
install_main_dir
}
/connector
if
[
-d
"
${
source_dir
}
/src/connector/grafanaplugin/dist"
]
;
then
${
csudo
}
cp
-rf
${
source_dir
}
/src/connector/grafanaplugin/dist
${
install_main_dir
}
/connector/grafanaplugin
else
echo
"WARNING: grafanaplugin bundled dir not found, please check if want to use it!"
fi
if
find
${
source_dir
}
/src/connector/go
-mindepth
1
-maxdepth
1 |
read
;
then
${
csudo
}
cp
-r
${
source_dir
}
/src/connector/go
${
install_main_dir
}
/connector
else
echo
"WARNING: go connector not found, please check if want to use it!"
fi
${
csudo
}
cp
-rf
${
source_dir
}
/src/connector/python
${
install_main_dir
}
/connector
${
csudo
}
cp
-rf
${
source_dir
}
/src/connector/go
${
install_main_dir
}
/connector
${
csudo
}
cp
${
binary_dir
}
/build/lib/
*
.jar
${
install_main_dir
}
/connector &> /dev/null
&&
${
csudo
}
chmod
777
${
install_main_dir
}
/connector/
*
.jar
||
echo
&> /dev/null
}
...
...
packaging/tools/makeclient.sh
浏览文件 @
162c0124
...
...
@@ -117,10 +117,18 @@ if [[ "$pagMode" != "lite" ]] && [[ "$cpuType" != "aarch32" ]]; then
if
[
"
$osType
"
!=
"Darwin"
]
;
then
cp
${
build_dir
}
/lib/
*
.jar
${
install_dir
}
/connector
||
:
fi
cp
-r
${
connector_dir
}
/grafanaplugin
${
install_dir
}
/connector/
cp
-r
${
connector_dir
}
/python
${
install_dir
}
/connector/
cp
-r
${
connector_dir
}
/go
${
install_dir
}
/connector
cp
-r
${
connector_dir
}
/nodejs
${
install_dir
}
/connector
if
[
-d
"
${
connector_dir
}
/grafanaplugin/dist"
]
;
then
cp
-r
${
connector_dir
}
/grafanaplugin/dist
${
install_dir
}
/connector/grafanaplugin
else
echo
"WARNING: grafanaplugin bundled dir not found, please check if want to use it!"
fi
if
find
${
connector_dir
}
/go
-mindepth
1
-maxdepth
1 |
read
;
then
cp
-r
${
connector_dir
}
/go
${
install_dir
}
/connector
else
echo
"WARNING: go connector not found, please check if want to use it!"
fi
cp
-r
${
connector_dir
}
/python
${
install_dir
}
/connector
cp
-r
${
connector_dir
}
/nodejs
${
install_dir
}
/connector
fi
# Copy release note
# cp ${script_dir}/release_note ${install_dir}
...
...
packaging/tools/makeclient_power.sh
浏览文件 @
162c0124
...
...
@@ -144,9 +144,17 @@ if [[ "$pagMode" != "lite" ]] && [[ "$cpuType" != "aarch32" ]]; then
if
[
"
$osType
"
!=
"Darwin"
]
;
then
cp
${
build_dir
}
/lib/
*
.jar
${
install_dir
}
/connector
||
:
fi
cp
-r
${
connector_dir
}
/grafanaplugin
${
install_dir
}
/connector/
cp
-r
${
connector_dir
}
/python
${
install_dir
}
/connector/
cp
-r
${
connector_dir
}
/go
${
install_dir
}
/connector
if
[
-d
"
${
connector_dir
}
/grafanaplugin/dist"
]
;
then
cp
-r
${
connector_dir
}
/grafanaplugin/dist
${
install_dir
}
/connector/grafanaplugin
else
echo
"WARNING: grafanaplugin bunlded dir not found, please check if want to use it!"
fi
if
find
${
connector_dir
}
/go
-mindepth
1
-maxdepth
1 |
read
;
then
cp
-r
${
connector_dir
}
/go
${
install_dir
}
/connector
else
echo
"WARNING: go connector not found, please check if want to use it!"
fi
cp
-r
${
connector_dir
}
/python
${
install_dir
}
/connector
sed
-i
'/password/ {s/taosdata/powerdb/g}'
${
install_dir
}
/connector/python/linux/python2/taos/cinterface.py
sed
-i
'/password/ {s/taosdata/powerdb/g}'
${
install_dir
}
/connector/python/linux/python3/taos/cinterface.py
...
...
packaging/tools/makepkg.sh
浏览文件 @
162c0124
...
...
@@ -114,6 +114,25 @@ mkdir -p ${install_dir}/examples
examples_dir
=
"
${
top_dir
}
/tests/examples"
cp
-r
${
examples_dir
}
/c
${
install_dir
}
/examples
if
[[
"
$pagMode
"
!=
"lite"
]]
&&
[[
"
$cpuType
"
!=
"aarch32"
]]
;
then
if
[
-d
${
examples_dir
}
/JDBC/connectionPools/target
]
;
then
rm
-rf
${
examples_dir
}
/JDBC/connectionPools/target
fi
if
[
-d
${
examples_dir
}
/JDBC/JDBCDemo/target
]
;
then
rm
-rf
${
examples_dir
}
/JDBC/JDBCDemo/target
fi
if
[
-d
${
examples_dir
}
/JDBC/mybatisplus-demo/target
]
;
then
rm
-rf
${
examples_dir
}
/JDBC/mybatisplus-demo/target
fi
if
[
-d
${
examples_dir
}
/JDBC/springbootdemo/target
]
;
then
rm
-rf
${
examples_dir
}
/JDBC/springbootdemo/target
fi
if
[
-d
${
examples_dir
}
/JDBC/SpringJdbcTemplate/target
]
;
then
rm
-rf
${
examples_dir
}
/JDBC/SpringJdbcTemplate/target
fi
if
[
-d
${
examples_dir
}
/JDBC/taosdemo/target
]
;
then
rm
-rf
${
examples_dir
}
/JDBC/taosdemo/target
fi
cp
-r
${
examples_dir
}
/JDBC
${
install_dir
}
/examples
cp
-r
${
examples_dir
}
/matlab
${
install_dir
}
/examples
cp
-r
${
examples_dir
}
/python
${
install_dir
}
/examples
...
...
@@ -131,9 +150,17 @@ connector_dir="${code_dir}/connector"
mkdir
-p
${
install_dir
}
/connector
if
[[
"
$pagMode
"
!=
"lite"
]]
&&
[[
"
$cpuType
"
!=
"aarch32"
]]
;
then
cp
${
build_dir
}
/lib/
*
.jar
${
install_dir
}
/connector
||
:
cp
-r
${
connector_dir
}
/grafanaplugin
${
install_dir
}
/connector/
cp
-r
${
connector_dir
}
/python
${
install_dir
}
/connector/
cp
-r
${
connector_dir
}
/go
${
install_dir
}
/connector
if
[
-d
"
${
connector_dir
}
/grafanaplugin/dist"
]
;
then
cp
-r
${
connector_dir
}
/grafanaplugin/dist
${
install_dir
}
/connector/grafanaplugin
else
echo
"WARNING: grafanaplugin bundled dir not found, please check if you want to use it!"
fi
if
find
${
connector_dir
}
/go
-mindepth
1
-maxdepth
1 |
read
;
then
cp
-r
${
connector_dir
}
/go
${
install_dir
}
/connector
else
echo
"WARNING: go connector not found, please check if want to use it!"
fi
cp
-r
${
connector_dir
}
/python
${
install_dir
}
/connector
cp
-r
${
connector_dir
}
/nodejs
${
install_dir
}
/connector
fi
# Copy release note
...
...
packaging/tools/makepkg_power.sh
浏览文件 @
162c0124
...
...
@@ -166,9 +166,18 @@ connector_dir="${code_dir}/connector"
mkdir
-p
${
install_dir
}
/connector
if
[[
"
$pagMode
"
!=
"lite"
]]
&&
[[
"
$cpuType
"
!=
"aarch32"
]]
;
then
cp
${
build_dir
}
/lib/
*
.jar
${
install_dir
}
/connector
||
:
cp
-r
${
connector_dir
}
/grafanaplugin
${
install_dir
}
/connector/
if
[
-d
"
${
connector_dir
}
/grafanaplugin/dist"
]
;
then
cp
-r
${
connector_dir
}
/grafanaplugin/dist
${
install_dir
}
/connector/grafanaplugin
else
echo
"WARNING: grafanaplugin bundled dir not found, please check if want to use it!"
fi
if
find
${
connector_dir
}
/go
-mindepth
1
-maxdepth
1 |
read
;
then
cp
-r
${
connector_dir
}
/go
${
install_dir
}
/connector
else
echo
"WARNING: go connector not found, please check if want to use it!"
fi
cp
-r
${
connector_dir
}
/python
${
install_dir
}
/connector/
cp
-r
${
connector_dir
}
/go
${
install_dir
}
/connector
sed
-i
'/password/ {s/taosdata/powerdb/g}'
${
install_dir
}
/connector/python/linux/python2/taos/cinterface.py
sed
-i
'/password/ {s/taosdata/powerdb/g}'
${
install_dir
}
/connector/python/linux/python3/taos/cinterface.py
...
...
snap/snapcraft.yaml
浏览文件 @
162c0124
name
:
tdengine
base
:
core18
version
:
'
2.0.20.
2
'
version
:
'
2.0.20.
5
'
icon
:
snap/gui/t-dengine.svg
summary
:
an open-source big data platform designed and optimized for IoT.
description
:
|
...
...
@@ -72,7 +72,7 @@ parts:
-
usr/bin/taosd
-
usr/bin/taos
-
usr/bin/taosdemo
-
usr/lib/libtaos.so.2.0.20.
2
-
usr/lib/libtaos.so.2.0.20.
5
-
usr/lib/libtaos.so.1
-
usr/lib/libtaos.so
...
...
src/client/src/tscStream.c
浏览文件 @
162c0124
...
...
@@ -623,6 +623,7 @@ TAOS_STREAM *taos_open_stream(TAOS *taos, const char *sqlstr, void (*fp)(void *p
if
(
pSql
->
sqlstr
==
NULL
)
{
tscError
(
"0x%"
PRIx64
" failed to malloc sql string buffer"
,
pSql
->
self
);
tscFreeSqlObj
(
pSql
);
free
(
pStream
);
return
NULL
;
}
...
...
go
@
8ce6d865
Subproject commit
7a26c432f8b4203e42344ff3290b9b9b01b983d5
Subproject commit
8ce6d86558afc8c0b50c10f990fd2b4270cf06fc
src/connector/nodejs/nodetaos/cinterface.js
浏览文件 @
162c0124
...
...
@@ -9,7 +9,7 @@ const ffi = require('ffi-napi');
const
ArrayType
=
require
(
'
ref-array-napi
'
);
const
Struct
=
require
(
'
ref-struct-napi
'
);
const
FieldTypes
=
require
(
'
./constants
'
);
const
errors
=
require
(
'
./error
'
);
const
errors
=
require
(
'
./error
'
);
const
TaosObjects
=
require
(
'
./taosobjects
'
);
const
{
NULL_POINTER
}
=
require
(
'
ref-napi
'
);
...
...
@@ -22,7 +22,7 @@ function convertMicrosecondsToDatetime(time) {
return
new
TaosObjects
.
TaosTimestamp
(
time
*
0.001
,
true
);
}
function
convertTimestamp
(
data
,
num_of_rows
,
nbytes
=
0
,
offset
=
0
,
micro
=
false
)
{
function
convertTimestamp
(
data
,
num_of_rows
,
nbytes
=
0
,
offset
=
0
,
micro
=
false
)
{
timestampConverter
=
convertMillisecondsToDatetime
;
if
(
micro
==
true
)
{
timestampConverter
=
convertMicrosecondsToDatetime
;
...
...
@@ -44,14 +44,14 @@ function convertTimestamp(data, num_of_rows, nbytes = 0, offset = 0, micro=false
}
return
res
;
}
function
convertBool
(
data
,
num_of_rows
,
nbytes
=
0
,
offset
=
0
,
micro
=
false
)
{
function
convertBool
(
data
,
num_of_rows
,
nbytes
=
0
,
offset
=
0
,
micro
=
false
)
{
data
=
ref
.
reinterpret
(
data
.
deref
(),
nbytes
*
num_of_rows
,
offset
);
let
res
=
new
Array
(
data
.
length
);
for
(
let
i
=
0
;
i
<
data
.
length
;
i
++
)
{
if
(
data
[
i
]
==
0
)
{
res
[
i
]
=
false
;
}
else
if
(
data
[
i
]
==
1
){
else
if
(
data
[
i
]
==
1
)
{
res
[
i
]
=
true
;
}
else
if
(
data
[
i
]
==
FieldTypes
.
C_BOOL_NULL
)
{
...
...
@@ -60,29 +60,29 @@ function convertBool(data, num_of_rows, nbytes = 0, offset = 0, micro=false) {
}
return
res
;
}
function
convertTinyint
(
data
,
num_of_rows
,
nbytes
=
0
,
offset
=
0
,
micro
=
false
)
{
function
convertTinyint
(
data
,
num_of_rows
,
nbytes
=
0
,
offset
=
0
,
micro
=
false
)
{
data
=
ref
.
reinterpret
(
data
.
deref
(),
nbytes
*
num_of_rows
,
offset
);
let
res
=
[];
let
currOffset
=
0
;
while
(
currOffset
<
data
.
length
)
{
let
d
=
data
.
readIntLE
(
currOffset
,
1
);
let
d
=
data
.
readIntLE
(
currOffset
,
1
);
res
.
push
(
d
==
FieldTypes
.
C_TINYINT_NULL
?
null
:
d
);
currOffset
+=
nbytes
;
}
return
res
;
}
function
convertSmallint
(
data
,
num_of_rows
,
nbytes
=
0
,
offset
=
0
,
micro
=
false
)
{
function
convertSmallint
(
data
,
num_of_rows
,
nbytes
=
0
,
offset
=
0
,
micro
=
false
)
{
data
=
ref
.
reinterpret
(
data
.
deref
(),
nbytes
*
num_of_rows
,
offset
);
let
res
=
[];
let
currOffset
=
0
;
while
(
currOffset
<
data
.
length
)
{
let
d
=
data
.
readIntLE
(
currOffset
,
2
);
let
d
=
data
.
readIntLE
(
currOffset
,
2
);
res
.
push
(
d
==
FieldTypes
.
C_SMALLINT_NULL
?
null
:
d
);
currOffset
+=
nbytes
;
}
return
res
;
}
function
convertInt
(
data
,
num_of_rows
,
nbytes
=
0
,
offset
=
0
,
micro
=
false
)
{
function
convertInt
(
data
,
num_of_rows
,
nbytes
=
0
,
offset
=
0
,
micro
=
false
)
{
data
=
ref
.
reinterpret
(
data
.
deref
(),
nbytes
*
num_of_rows
,
offset
);
let
res
=
[];
let
currOffset
=
0
;
...
...
@@ -93,7 +93,7 @@ function convertInt(data, num_of_rows, nbytes = 0, offset = 0, micro=false) {
}
return
res
;
}
function
convertBigint
(
data
,
num_of_rows
,
nbytes
=
0
,
offset
=
0
,
micro
=
false
)
{
function
convertBigint
(
data
,
num_of_rows
,
nbytes
=
0
,
offset
=
0
,
micro
=
false
)
{
data
=
ref
.
reinterpret
(
data
.
deref
(),
nbytes
*
num_of_rows
,
offset
);
let
res
=
[];
let
currOffset
=
0
;
...
...
@@ -104,7 +104,7 @@ function convertBigint(data, num_of_rows, nbytes = 0, offset = 0, micro=false) {
}
return
res
;
}
function
convertFloat
(
data
,
num_of_rows
,
nbytes
=
0
,
offset
=
0
,
micro
=
false
)
{
function
convertFloat
(
data
,
num_of_rows
,
nbytes
=
0
,
offset
=
0
,
micro
=
false
)
{
data
=
ref
.
reinterpret
(
data
.
deref
(),
nbytes
*
num_of_rows
,
offset
);
let
res
=
[];
let
currOffset
=
0
;
...
...
@@ -115,7 +115,7 @@ function convertFloat(data, num_of_rows, nbytes = 0, offset = 0, micro=false) {
}
return
res
;
}
function
convertDouble
(
data
,
num_of_rows
,
nbytes
=
0
,
offset
=
0
,
micro
=
false
)
{
function
convertDouble
(
data
,
num_of_rows
,
nbytes
=
0
,
offset
=
0
,
micro
=
false
)
{
data
=
ref
.
reinterpret
(
data
.
deref
(),
nbytes
*
num_of_rows
,
offset
);
let
res
=
[];
let
currOffset
=
0
;
...
...
@@ -126,7 +126,7 @@ function convertDouble(data, num_of_rows, nbytes = 0, offset = 0, micro=false) {
}
return
res
;
}
function
convertBinary
(
data
,
num_of_rows
,
nbytes
=
0
,
offset
=
0
,
micro
=
false
)
{
function
convertBinary
(
data
,
num_of_rows
,
nbytes
=
0
,
offset
=
0
,
micro
=
false
)
{
data
=
ref
.
reinterpret
(
data
.
deref
(),
nbytes
*
num_of_rows
,
offset
);
let
res
=
[];
let
currOffset
=
0
;
...
...
@@ -142,7 +142,7 @@ function convertBinary(data, num_of_rows, nbytes = 0, offset = 0, micro=false) {
}
return
res
;
}
function
convertNchar
(
data
,
num_of_rows
,
nbytes
=
0
,
offset
=
0
,
micro
=
false
)
{
function
convertNchar
(
data
,
num_of_rows
,
nbytes
=
0
,
offset
=
0
,
micro
=
false
)
{
data
=
ref
.
reinterpret
(
data
.
deref
(),
nbytes
*
num_of_rows
,
offset
);
let
res
=
[];
let
dataEntry
=
data
.
slice
(
0
,
nbytes
);
//one entry in a row under a column;
...
...
@@ -153,23 +153,23 @@ function convertNchar(data, num_of_rows, nbytes = 0, offset = 0, micro=false) {
// Object with all the relevant converters from pblock data to javascript readable data
let
convertFunctions
=
{
[
FieldTypes
.
C_BOOL
]
:
convertBool
,
[
FieldTypes
.
C_TINYINT
]
:
convertTinyint
,
[
FieldTypes
.
C_SMALLINT
]
:
convertSmallint
,
[
FieldTypes
.
C_INT
]
:
convertInt
,
[
FieldTypes
.
C_BIGINT
]
:
convertBigint
,
[
FieldTypes
.
C_FLOAT
]
:
convertFloat
,
[
FieldTypes
.
C_DOUBLE
]
:
convertDouble
,
[
FieldTypes
.
C_BINARY
]
:
convertBinary
,
[
FieldTypes
.
C_TIMESTAMP
]
:
convertTimestamp
,
[
FieldTypes
.
C_NCHAR
]
:
convertNchar
[
FieldTypes
.
C_BOOL
]
:
convertBool
,
[
FieldTypes
.
C_TINYINT
]
:
convertTinyint
,
[
FieldTypes
.
C_SMALLINT
]
:
convertSmallint
,
[
FieldTypes
.
C_INT
]
:
convertInt
,
[
FieldTypes
.
C_BIGINT
]
:
convertBigint
,
[
FieldTypes
.
C_FLOAT
]
:
convertFloat
,
[
FieldTypes
.
C_DOUBLE
]
:
convertDouble
,
[
FieldTypes
.
C_BINARY
]
:
convertBinary
,
[
FieldTypes
.
C_TIMESTAMP
]
:
convertTimestamp
,
[
FieldTypes
.
C_NCHAR
]
:
convertNchar
}
// Define TaosField structure
var
char_arr
=
ArrayType
(
ref
.
types
.
char
);
var
TaosField
=
Struct
({
'
name
'
:
char_arr
,
});
'
name
'
:
char_arr
,
});
TaosField
.
fields
.
name
.
type
.
size
=
65
;
TaosField
.
defineProperty
(
'
type
'
,
ref
.
types
.
char
);
TaosField
.
defineProperty
(
'
bytes
'
,
ref
.
types
.
short
);
...
...
@@ -183,7 +183,7 @@ TaosField.defineProperty('bytes', ref.types.short);
* @classdesc The CTaosInterface is the interface through which Node.JS communicates data back and forth with TDengine. It is not advised to
* access this class directly and use it unless you understand what these functions do.
*/
function
CTaosInterface
(
config
=
null
,
pass
=
false
)
{
function
CTaosInterface
(
config
=
null
,
pass
=
false
)
{
ref
.
types
.
char_ptr
=
ref
.
refType
(
ref
.
types
.
char
);
ref
.
types
.
void_ptr
=
ref
.
refType
(
ref
.
types
.
void
);
ref
.
types
.
void_ptr2
=
ref
.
refType
(
ref
.
types
.
void_ptr
);
...
...
@@ -196,64 +196,65 @@ function CTaosInterface (config = null, pass = false) {
taoslibname
=
'
libtaos
'
;
}
this
.
libtaos
=
ffi
.
Library
(
taoslibname
,
{
'
taos_options
'
:
[
ref
.
types
.
int
,
[
ref
.
types
.
int
,
ref
.
types
.
void_ptr
]
],
'
taos_init
'
:
[
ref
.
types
.
void
,
[
]
],
'
taos_options
'
:
[
ref
.
types
.
int
,
[
ref
.
types
.
int
,
ref
.
types
.
void_ptr
]
],
'
taos_init
'
:
[
ref
.
types
.
void
,
[]
],
//TAOS *taos_connect(char *ip, char *user, char *pass, char *db, int port)
'
taos_connect
'
:
[
ref
.
types
.
void_ptr
,
[
ref
.
types
.
char_ptr
,
ref
.
types
.
char_ptr
,
ref
.
types
.
char_ptr
,
ref
.
types
.
char_ptr
,
ref
.
types
.
int
]
],
'
taos_connect
'
:
[
ref
.
types
.
void_ptr
,
[
ref
.
types
.
char_ptr
,
ref
.
types
.
char_ptr
,
ref
.
types
.
char_ptr
,
ref
.
types
.
char_ptr
,
ref
.
types
.
int
]
],
//void taos_close(TAOS *taos)
'
taos_close
'
:
[
ref
.
types
.
void
,
[
ref
.
types
.
void_ptr
]
],
//int *taos_fetch_lengths(TAOS_RES *
tao
s);
'
taos_fetch_lengths
'
:
[
ref
.
types
.
void_ptr
,
[
ref
.
types
.
void_ptr
]
],
'
taos_close
'
:
[
ref
.
types
.
void
,
[
ref
.
types
.
void_ptr
]
],
//int *taos_fetch_lengths(TAOS_RES *
re
s);
'
taos_fetch_lengths
'
:
[
ref
.
types
.
void_ptr
,
[
ref
.
types
.
void_ptr
]
],
//int taos_query(TAOS *taos, char *sqlstr)
'
taos_query
'
:
[
ref
.
types
.
void_ptr
,
[
ref
.
types
.
void_ptr
,
ref
.
types
.
char_ptr
]
],
//int taos_affected_rows(TAOS
*tao
s)
'
taos_affected_rows
'
:
[
ref
.
types
.
int
,
[
ref
.
types
.
void_ptr
]
],
'
taos_query
'
:
[
ref
.
types
.
void_ptr
,
[
ref
.
types
.
void_ptr
,
ref
.
types
.
char_ptr
]
],
//int taos_affected_rows(TAOS
_RES *re
s)
'
taos_affected_rows
'
:
[
ref
.
types
.
int
,
[
ref
.
types
.
void_ptr
]
],
//int taos_fetch_block(TAOS_RES *res, TAOS_ROW *rows)
'
taos_fetch_block
'
:
[
ref
.
types
.
int
,
[
ref
.
types
.
void_ptr
,
ref
.
types
.
void_ptr
]
],
'
taos_fetch_block
'
:
[
ref
.
types
.
int
,
[
ref
.
types
.
void_ptr
,
ref
.
types
.
void_ptr
]
],
//int taos_num_fields(TAOS_RES *res);
'
taos_num_fields
'
:
[
ref
.
types
.
int
,
[
ref
.
types
.
void_ptr
]
],
'
taos_num_fields
'
:
[
ref
.
types
.
int
,
[
ref
.
types
.
void_ptr
]
],
//TAOS_ROW taos_fetch_row(TAOS_RES *res)
//TAOS_ROW is void **, but we set the return type as a reference instead to get the row
'
taos_fetch_row
'
:
[
ref
.
refType
(
ref
.
types
.
void_ptr2
),
[
ref
.
types
.
void_ptr
]
],
'
taos_fetch_row
'
:
[
ref
.
refType
(
ref
.
types
.
void_ptr2
),
[
ref
.
types
.
void_ptr
]],
'
taos_print_row
'
:
[
ref
.
types
.
int
,
[
ref
.
types
.
char_ptr
,
ref
.
types
.
void_ptr
,
ref
.
types
.
void_ptr
,
ref
.
types
.
int
]],
//int taos_result_precision(TAOS_RES *res)
'
taos_result_precision
'
:
[
ref
.
types
.
int
,
[
ref
.
types
.
void_ptr
]
],
'
taos_result_precision
'
:
[
ref
.
types
.
int
,
[
ref
.
types
.
void_ptr
]
],
//void taos_free_result(TAOS_RES *res)
'
taos_free_result
'
:
[
ref
.
types
.
void
,
[
ref
.
types
.
void_ptr
]
],
'
taos_free_result
'
:
[
ref
.
types
.
void
,
[
ref
.
types
.
void_ptr
]
],
//int taos_field_count(TAOS *taos)
'
taos_field_count
'
:
[
ref
.
types
.
int
,
[
ref
.
types
.
void_ptr
]
],
'
taos_field_count
'
:
[
ref
.
types
.
int
,
[
ref
.
types
.
void_ptr
]
],
//TAOS_FIELD *taos_fetch_fields(TAOS_RES *res)
'
taos_fetch_fields
'
:
[
ref
.
refType
(
TaosField
),
[
ref
.
types
.
void_ptr
]
],
'
taos_fetch_fields
'
:
[
ref
.
refType
(
TaosField
),
[
ref
.
types
.
void_ptr
]
],
//int taos_errno(TAOS *taos)
'
taos_errno
'
:
[
ref
.
types
.
int
,
[
ref
.
types
.
void_ptr
]
],
'
taos_errno
'
:
[
ref
.
types
.
int
,
[
ref
.
types
.
void_ptr
]
],
//char *taos_errstr(TAOS *taos)
'
taos_errstr
'
:
[
ref
.
types
.
char_ptr
,
[
ref
.
types
.
void_ptr
]
],
'
taos_errstr
'
:
[
ref
.
types
.
char_ptr
,
[
ref
.
types
.
void_ptr
]
],
//void taos_stop_query(TAOS_RES *res);
'
taos_stop_query
'
:
[
ref
.
types
.
void
,
[
ref
.
types
.
void_ptr
]
],
'
taos_stop_query
'
:
[
ref
.
types
.
void
,
[
ref
.
types
.
void_ptr
]
],
//char *taos_get_server_info(TAOS *taos);
'
taos_get_server_info
'
:
[
ref
.
types
.
char_ptr
,
[
ref
.
types
.
void_ptr
]
],
'
taos_get_server_info
'
:
[
ref
.
types
.
char_ptr
,
[
ref
.
types
.
void_ptr
]
],
//char *taos_get_client_info();
'
taos_get_client_info
'
:
[
ref
.
types
.
char_ptr
,
[
]
],
'
taos_get_client_info
'
:
[
ref
.
types
.
char_ptr
,
[]
],
// ASYNC
// void taos_query_a(TAOS *taos, char *sqlstr, void (*fp)(void *, TAOS_RES *, int), void *param)
'
taos_query_a
'
:
[
ref
.
types
.
void
,
[
ref
.
types
.
void_ptr
,
ref
.
types
.
char_ptr
,
ref
.
types
.
void_ptr
,
ref
.
types
.
void_ptr
]
],
'
taos_query_a
'
:
[
ref
.
types
.
void
,
[
ref
.
types
.
void_ptr
,
ref
.
types
.
char_ptr
,
ref
.
types
.
void_ptr
,
ref
.
types
.
void_ptr
]
],
// void taos_fetch_rows_a(TAOS_RES *res, void (*fp)(void *param, TAOS_RES *, int numOfRows), void *param);
'
taos_fetch_rows_a
'
:
[
ref
.
types
.
void
,
[
ref
.
types
.
void_ptr
,
ref
.
types
.
void_ptr
,
ref
.
types
.
void_ptr
]],
'
taos_fetch_rows_a
'
:
[
ref
.
types
.
void
,
[
ref
.
types
.
void_ptr
,
ref
.
types
.
void_ptr
,
ref
.
types
.
void_ptr
]],
// Subscription
//TAOS_SUB *taos_subscribe(TAOS* taos, int restart, const char* topic, const char *sql, TAOS_SUBSCRIBE_CALLBACK fp, void *param, int interval)
'
taos_subscribe
'
:
[
ref
.
types
.
void_ptr
,
[
ref
.
types
.
void_ptr
,
ref
.
types
.
int
,
ref
.
types
.
char_ptr
,
ref
.
types
.
char_ptr
,
ref
.
types
.
void_ptr
,
ref
.
types
.
void_ptr
,
ref
.
types
.
int
]
],
'
taos_subscribe
'
:
[
ref
.
types
.
void_ptr
,
[
ref
.
types
.
void_ptr
,
ref
.
types
.
int
,
ref
.
types
.
char_ptr
,
ref
.
types
.
char_ptr
,
ref
.
types
.
void_ptr
,
ref
.
types
.
void_ptr
,
ref
.
types
.
int
]
],
// TAOS_RES *taos_consume(TAOS_SUB *tsub)
'
taos_consume
'
:
[
ref
.
types
.
void_ptr
,
[
ref
.
types
.
void_ptr
]
],
'
taos_consume
'
:
[
ref
.
types
.
void_ptr
,
[
ref
.
types
.
void_ptr
]
],
//void taos_unsubscribe(TAOS_SUB *tsub);
'
taos_unsubscribe
'
:
[
ref
.
types
.
void
,
[
ref
.
types
.
void_ptr
]
],
'
taos_unsubscribe
'
:
[
ref
.
types
.
void
,
[
ref
.
types
.
void_ptr
]
],
// Continuous Query
//TAOS_STREAM *taos_open_stream(TAOS *taos, char *sqlstr, void (*fp)(void *param, TAOS_RES *, TAOS_ROW row),
// int64_t stime, void *param, void (*callback)(void *));
'
taos_open_stream
'
:
[
ref
.
types
.
void_ptr
,
[
ref
.
types
.
void_ptr
,
ref
.
types
.
char_ptr
,
ref
.
types
.
void_ptr
,
ref
.
types
.
int64
,
ref
.
types
.
void_ptr
,
ref
.
types
.
void_ptr
]
],
'
taos_open_stream
'
:
[
ref
.
types
.
void_ptr
,
[
ref
.
types
.
void_ptr
,
ref
.
types
.
char_ptr
,
ref
.
types
.
void_ptr
,
ref
.
types
.
int64
,
ref
.
types
.
void_ptr
,
ref
.
types
.
void_ptr
]
],
//void taos_close_stream(TAOS_STREAM *tstr);
'
taos_close_stream
'
:
[
ref
.
types
.
void
,
[
ref
.
types
.
void_ptr
]
]
'
taos_close_stream
'
:
[
ref
.
types
.
void
,
[
ref
.
types
.
void_ptr
]
]
});
if
(
pass
==
false
)
{
...
...
@@ -264,7 +265,7 @@ function CTaosInterface (config = null, pass = false) {
try
{
this
.
_config
=
ref
.
allocCString
(
config
);
}
catch
(
err
)
{
catch
(
err
)
{
throw
"
Attribute Error: config is expected as a str
"
;
}
}
...
...
@@ -276,38 +277,38 @@ function CTaosInterface (config = null, pass = false) {
return
this
;
}
CTaosInterface
.
prototype
.
config
=
function
config
()
{
return
this
.
_config
;
}
CTaosInterface
.
prototype
.
connect
=
function
connect
(
host
=
null
,
user
=
"
root
"
,
password
=
"
taosdata
"
,
db
=
null
,
port
=
0
)
{
let
_host
,
_user
,
_password
,
_db
,
_port
;
try
{
return
this
.
_config
;
}
CTaosInterface
.
prototype
.
connect
=
function
connect
(
host
=
null
,
user
=
"
root
"
,
password
=
"
taosdata
"
,
db
=
null
,
port
=
0
)
{
let
_host
,
_user
,
_password
,
_db
,
_port
;
try
{
_host
=
host
!=
null
?
ref
.
allocCString
(
host
)
:
ref
.
alloc
(
ref
.
types
.
char_ptr
,
ref
.
NULL
);
}
catch
(
err
)
{
catch
(
err
)
{
throw
"
Attribute Error: host is expected as a str
"
;
}
try
{
_user
=
ref
.
allocCString
(
user
)
}
catch
(
err
)
{
catch
(
err
)
{
throw
"
Attribute Error: user is expected as a str
"
;
}
try
{
_password
=
ref
.
allocCString
(
password
);
}
catch
(
err
)
{
catch
(
err
)
{
throw
"
Attribute Error: password is expected as a str
"
;
}
try
{
_db
=
db
!=
null
?
ref
.
allocCString
(
db
)
:
ref
.
alloc
(
ref
.
types
.
char_ptr
,
ref
.
NULL
);
}
catch
(
err
)
{
catch
(
err
)
{
throw
"
Attribute Error: db is expected as a str
"
;
}
try
{
_port
=
ref
.
alloc
(
ref
.
types
.
int
,
port
);
}
catch
(
err
)
{
catch
(
err
)
{
throw
TypeError
(
"
port is expected as an int
"
)
}
let
connection
=
this
.
libtaos
.
taos_connect
(
_host
,
_user
,
_password
,
_db
,
_port
);
...
...
@@ -324,10 +325,10 @@ CTaosInterface.prototype.close = function close(connection) {
console
.
log
(
"
Connection is closed
"
);
}
CTaosInterface
.
prototype
.
query
=
function
query
(
connection
,
sql
)
{
return
this
.
libtaos
.
taos_query
(
connection
,
ref
.
allocCString
(
sql
));
return
this
.
libtaos
.
taos_query
(
connection
,
ref
.
allocCString
(
sql
));
}
CTaosInterface
.
prototype
.
affectedRows
=
function
affectedRows
(
connection
)
{
return
this
.
libtaos
.
taos_affected_rows
(
connection
);
CTaosInterface
.
prototype
.
affectedRows
=
function
affectedRows
(
result
)
{
return
this
.
libtaos
.
taos_affected_rows
(
result
);
}
CTaosInterface
.
prototype
.
useResult
=
function
useResult
(
result
)
{
...
...
@@ -337,8 +338,8 @@ CTaosInterface.prototype.useResult = function useResult(result) {
pfields
=
ref
.
reinterpret
(
pfields
,
this
.
fieldsCount
(
result
)
*
68
,
0
);
for
(
let
i
=
0
;
i
<
pfields
.
length
;
i
+=
68
)
{
//0 - 63 = name //64 - 65 = bytes, 66 - 67 = type
fields
.
push
(
{
name
:
ref
.
readCString
(
ref
.
reinterpret
(
pfields
,
65
,
i
)),
fields
.
push
({
name
:
ref
.
readCString
(
ref
.
reinterpret
(
pfields
,
65
,
i
)),
type
:
pfields
[
i
+
65
],
bytes
:
pfields
[
i
+
66
]
})
...
...
@@ -347,11 +348,10 @@ CTaosInterface.prototype.useResult = function useResult(result) {
return
fields
;
}
CTaosInterface
.
prototype
.
fetchBlock
=
function
fetchBlock
(
result
,
fields
)
{
//let pblock = ref.ref(ref.ref(ref.NULL)); // equal to our raw data
let
pblock
=
this
.
libtaos
.
taos_fetch_row
(
result
);
let
num_of_rows
=
1
;
if
(
ref
.
isNull
(
pblock
)
==
true
)
{
return
{
block
:
null
,
num_of_rows
:
0
};
let
pblock
=
ref
.
NULL_POINTER
;
let
num_of_rows
=
this
.
libtaos
.
taos_fetch_block
(
result
,
pblock
);
if
(
ref
.
isNull
(
pblock
.
deref
())
==
true
)
{
return
{
block
:
null
,
num_of_rows
:
0
};
}
var
fieldL
=
this
.
libtaos
.
taos_fetch_lengths
(
result
);
...
...
@@ -359,10 +359,10 @@ CTaosInterface.prototype.fetchBlock = function fetchBlock(result, fields) {
let
isMicro
=
(
this
.
libtaos
.
taos_result_precision
(
result
)
==
FieldTypes
.
C_TIMESTAMP_MICRO
);
var
fieldlens
=
[];
if
(
ref
.
isNull
(
fieldL
)
==
false
)
{
for
(
let
i
=
0
;
i
<
fields
.
length
;
i
++
)
{
let
plen
=
ref
.
reinterpret
(
fieldL
,
4
,
i
*
4
);
for
(
let
i
=
0
;
i
<
fields
.
length
;
i
++
)
{
let
plen
=
ref
.
reinterpret
(
fieldL
,
4
,
i
*
4
);
let
len
=
plen
.
readInt32LE
(
0
);
fieldlens
.
push
(
len
);
}
...
...
@@ -370,21 +370,23 @@ CTaosInterface.prototype.fetchBlock = function fetchBlock(result, fields) {
let
blocks
=
new
Array
(
fields
.
length
);
blocks
.
fill
(
null
);
//
num_of_rows = Math.abs(num_of_rows);
num_of_rows
=
Math
.
abs
(
num_of_rows
);
let
offset
=
0
;
let
ptr
=
pblock
.
deref
();
for
(
let
i
=
0
;
i
<
fields
.
length
;
i
++
)
{
pdata
=
ref
.
reinterpret
(
p
block
,
8
,
i
*
8
);
if
(
ref
.
isNull
(
pdata
.
readPointer
()))
{
blocks
[
i
]
=
new
Array
();
}
else
{
pdata
=
ref
.
ref
(
pdata
.
readPointer
());
if
(
!
convertFunctions
[
fields
[
i
][
'
type
'
]]
)
{
throw
new
errors
.
DatabaseError
(
"
Invalid data type returned from database
"
);
}
blocks
[
i
]
=
convertFunctions
[
fields
[
i
][
'
type
'
]](
pdata
,
1
,
fieldlens
[
i
],
offset
,
isMicro
);
}
}
return
{
blocks
:
blocks
,
num_of_rows
:
Math
.
abs
(
num_of_rows
)
}
pdata
=
ref
.
reinterpret
(
p
tr
,
8
,
i
*
8
);
if
(
ref
.
isNull
(
pdata
.
readPointer
()))
{
blocks
[
i
]
=
new
Array
();
}
else
{
pdata
=
ref
.
ref
(
pdata
.
readPointer
());
if
(
!
convertFunctions
[
fields
[
i
][
'
type
'
]]
)
{
throw
new
errors
.
DatabaseError
(
"
Invalid data type returned from database
"
);
}
blocks
[
i
]
=
convertFunctions
[
fields
[
i
][
'
type
'
]](
pdata
,
num_of_rows
,
fieldlens
[
i
],
offset
,
isMicro
);
}
}
return
{
blocks
:
blocks
,
num_of_rows
}
}
CTaosInterface
.
prototype
.
fetchRow
=
function
fetchRow
(
result
,
fields
)
{
let
row
=
this
.
libtaos
.
taos_fetch_row
(
result
);
...
...
@@ -414,7 +416,7 @@ CTaosInterface.prototype.errStr = function errStr(result) {
// Async
CTaosInterface
.
prototype
.
query_a
=
function
query_a
(
connection
,
sql
,
callback
,
param
=
ref
.
ref
(
ref
.
NULL
))
{
// void taos_query_a(TAOS *taos, char *sqlstr, void (*fp)(void *param, TAOS_RES *, int), void *param)
callback
=
ffi
.
Callback
(
ref
.
types
.
void
,
[
ref
.
types
.
void_ptr
,
ref
.
types
.
void_ptr
,
ref
.
types
.
int
],
callback
);
callback
=
ffi
.
Callback
(
ref
.
types
.
void
,
[
ref
.
types
.
void_ptr
,
ref
.
types
.
void_ptr
,
ref
.
types
.
int
],
callback
);
this
.
libtaos
.
taos_query_a
(
connection
,
ref
.
allocCString
(
sql
),
callback
,
param
);
return
param
;
}
...
...
@@ -439,46 +441,46 @@ CTaosInterface.prototype.fetch_rows_a = function fetch_rows_a(result, callback,
var
fieldL
=
cti
.
libtaos
.
taos_fetch_lengths
(
result
);
var
fieldlens
=
[];
if
(
ref
.
isNull
(
fieldL
)
==
false
)
{
for
(
let
i
=
0
;
i
<
fields
.
length
;
i
++
)
{
let
plen
=
ref
.
reinterpret
(
fieldL
,
8
,
i
*
8
);
let
len
=
ref
.
get
(
plen
,
0
,
ref
.
types
.
int32
);
for
(
let
i
=
0
;
i
<
fields
.
length
;
i
++
)
{
let
plen
=
ref
.
reinterpret
(
fieldL
,
8
,
i
*
8
);
let
len
=
ref
.
get
(
plen
,
0
,
ref
.
types
.
int32
);
fieldlens
.
push
(
len
);
}
}
if
(
numOfRows2
>
0
){
if
(
numOfRows2
>
0
)
{
for
(
let
i
=
0
;
i
<
fields
.
length
;
i
++
)
{
if
(
ref
.
isNull
(
pdata
.
readPointer
()))
{
blocks
[
i
]
=
new
Array
();
}
else
{
if
(
!
convertFunctions
[
fields
[
i
][
'
type
'
]]
)
{
throw
new
errors
.
DatabaseError
(
"
Invalid data type returned from database
"
);
}
let
prow
=
ref
.
reinterpret
(
row
,
8
,
i
*
8
);
prow
=
prow
.
readPointer
();
prow
=
ref
.
ref
(
prow
);
blocks
[
i
]
=
convertFunctions
[
fields
[
i
][
'
type
'
]](
prow
,
1
,
fieldlens
[
i
],
offset
,
isMicro
);
//offset += fields[i]['bytes'] * numOfRows2;
}
if
(
ref
.
isNull
(
pdata
.
readPointer
()))
{
blocks
[
i
]
=
new
Array
();
}
else
{
if
(
!
convertFunctions
[
fields
[
i
][
'
type
'
]])
{
throw
new
errors
.
DatabaseError
(
"
Invalid data type returned from database
"
);
}
let
prow
=
ref
.
reinterpret
(
row
,
8
,
i
*
8
);
prow
=
prow
.
readPointer
();
prow
=
ref
.
ref
(
prow
);
blocks
[
i
]
=
convertFunctions
[
fields
[
i
][
'
type
'
]](
prow
,
1
,
fieldlens
[
i
],
offset
,
isMicro
);
//offset += fields[i]['bytes'] * numOfRows2;
}
}
}
callback
(
param2
,
result2
,
numOfRows2
,
blocks
);
}
asyncCallbackWrapper
=
ffi
.
Callback
(
ref
.
types
.
void
,
[
ref
.
types
.
void_ptr
,
ref
.
types
.
void_ptr
,
ref
.
types
.
int
],
asyncCallbackWrapper
);
asyncCallbackWrapper
=
ffi
.
Callback
(
ref
.
types
.
void
,
[
ref
.
types
.
void_ptr
,
ref
.
types
.
void_ptr
,
ref
.
types
.
int
],
asyncCallbackWrapper
);
this
.
libtaos
.
taos_fetch_rows_a
(
result
,
asyncCallbackWrapper
,
param
);
return
param
;
}
// Fetch field meta data by result handle
CTaosInterface
.
prototype
.
fetchFields_a
=
function
fetchFields_a
(
result
)
{
CTaosInterface
.
prototype
.
fetchFields_a
=
function
fetchFields_a
(
result
)
{
let
pfields
=
this
.
fetchFields
(
result
);
let
pfieldscount
=
this
.
numFields
(
result
);
let
fields
=
[];
if
(
ref
.
isNull
(
pfields
)
==
false
)
{
pfields
=
ref
.
reinterpret
(
pfields
,
68
*
pfieldscount
,
0
);
pfields
=
ref
.
reinterpret
(
pfields
,
68
*
pfieldscount
,
0
);
for
(
let
i
=
0
;
i
<
pfields
.
length
;
i
+=
68
)
{
//0 - 64 = name //65 = type, 66 - 67 = bytes
fields
.
push
(
{
name
:
ref
.
readCString
(
ref
.
reinterpret
(
pfields
,
65
,
i
)),
fields
.
push
({
name
:
ref
.
readCString
(
ref
.
reinterpret
(
pfields
,
65
,
i
)),
type
:
pfields
[
i
+
65
],
bytes
:
pfields
[
i
+
66
]
})
...
...
@@ -488,7 +490,7 @@ CTaosInterface.prototype.fetchFields_a = function fetchFields_a (result) {
}
// Stop a query by result handle
CTaosInterface
.
prototype
.
stopQuery
=
function
stopQuery
(
result
)
{
if
(
result
!=
null
){
if
(
result
!=
null
)
{
this
.
libtaos
.
taos_stop_query
(
result
);
}
else
{
...
...
@@ -509,13 +511,13 @@ CTaosInterface.prototype.subscribe = function subscribe(connection, restart, top
try
{
sql
=
sql
!=
null
?
ref
.
allocCString
(
sql
)
:
ref
.
alloc
(
ref
.
types
.
char_ptr
,
ref
.
NULL
);
}
catch
(
err
)
{
catch
(
err
)
{
throw
"
Attribute Error: sql is expected as a str
"
;
}
try
{
topic
=
topic
!=
null
?
ref
.
allocCString
(
topic
)
:
ref
.
alloc
(
ref
.
types
.
char_ptr
,
ref
.
NULL
);
}
catch
(
err
)
{
catch
(
err
)
{
throw
TypeError
(
"
topic is expected as a str
"
);
}
...
...
@@ -539,8 +541,8 @@ CTaosInterface.prototype.consume = function consume(subscription) {
pfields
=
ref
.
reinterpret
(
pfields
,
this
.
numFields
(
result
)
*
68
,
0
);
for
(
let
i
=
0
;
i
<
pfields
.
length
;
i
+=
68
)
{
//0 - 63 = name //64 - 65 = bytes, 66 - 67 = type
fields
.
push
(
{
name
:
ref
.
readCString
(
ref
.
reinterpret
(
pfields
,
64
,
i
)),
fields
.
push
({
name
:
ref
.
readCString
(
ref
.
reinterpret
(
pfields
,
64
,
i
)),
bytes
:
pfields
[
i
+
64
],
type
:
pfields
[
i
+
66
]
})
...
...
@@ -548,7 +550,7 @@ CTaosInterface.prototype.consume = function consume(subscription) {
}
let
data
=
[];
while
(
true
)
{
while
(
true
)
{
let
{
blocks
,
num_of_rows
}
=
this
.
fetchBlock
(
result
,
fields
);
if
(
num_of_rows
==
0
)
{
break
;
...
...
@@ -559,7 +561,7 @@ CTaosInterface.prototype.consume = function consume(subscription) {
for
(
let
j
=
0
;
j
<
fields
.
length
;
j
++
)
{
rowBlock
[
j
]
=
blocks
[
j
][
i
];
}
data
[
data
.
length
-
1
]
=
(
rowBlock
);
data
[
data
.
length
-
1
]
=
(
rowBlock
);
}
}
return
{
data
:
data
,
fields
:
fields
,
result
:
result
};
...
...
@@ -570,11 +572,11 @@ CTaosInterface.prototype.unsubscribe = function unsubscribe(subscription) {
}
// Continuous Query
CTaosInterface
.
prototype
.
openStream
=
function
openStream
(
connection
,
sql
,
callback
,
stime
,
stoppingCallback
,
param
=
ref
.
ref
(
ref
.
NULL
))
{
CTaosInterface
.
prototype
.
openStream
=
function
openStream
(
connection
,
sql
,
callback
,
stime
,
stoppingCallback
,
param
=
ref
.
ref
(
ref
.
NULL
))
{
try
{
sql
=
ref
.
allocCString
(
sql
);
}
catch
(
err
)
{
catch
(
err
)
{
throw
"
Attribute Error: sql string is expected as a str
"
;
}
var
cti
=
this
;
...
...
@@ -587,7 +589,7 @@ CTaosInterface.prototype.openStream = function openStream(connection, sql, callb
let
offset
=
0
;
if
(
numOfRows2
>
0
)
{
for
(
let
i
=
0
;
i
<
fields
.
length
;
i
++
)
{
if
(
!
convertFunctions
[
fields
[
i
][
'
type
'
]]
)
{
if
(
!
convertFunctions
[
fields
[
i
][
'
type
'
]])
{
throw
new
errors
.
DatabaseError
(
"
Invalid data type returned from database
"
);
}
blocks
[
i
]
=
convertFunctions
[
fields
[
i
][
'
type
'
]](
row
,
numOfRows2
,
fields
[
i
][
'
bytes
'
],
offset
,
isMicro
);
...
...
@@ -596,8 +598,8 @@ CTaosInterface.prototype.openStream = function openStream(connection, sql, callb
}
callback
(
param2
,
result2
,
blocks
,
fields
);
}
asyncCallbackWrapper
=
ffi
.
Callback
(
ref
.
types
.
void
,
[
ref
.
types
.
void_ptr
,
ref
.
types
.
void_ptr
,
ref
.
refType
(
ref
.
types
.
void_ptr2
)
],
asyncCallbackWrapper
);
asyncStoppingCallbackWrapper
=
ffi
.
Callback
(
ref
.
types
.
void
,
[
ref
.
types
.
void_ptr
],
stoppingCallback
);
asyncCallbackWrapper
=
ffi
.
Callback
(
ref
.
types
.
void
,
[
ref
.
types
.
void_ptr
,
ref
.
types
.
void_ptr
,
ref
.
refType
(
ref
.
types
.
void_ptr2
)
],
asyncCallbackWrapper
);
asyncStoppingCallbackWrapper
=
ffi
.
Callback
(
ref
.
types
.
void
,
[
ref
.
types
.
void_ptr
],
stoppingCallback
);
let
streamHandle
=
this
.
libtaos
.
taos_open_stream
(
connection
,
sql
,
asyncCallbackWrapper
,
stime
,
param
,
asyncStoppingCallbackWrapper
);
if
(
ref
.
isNull
(
streamHandle
))
{
throw
new
errors
.
TDError
(
'
Failed to open a stream with TDengine
'
);
...
...
src/connector/nodejs/nodetaos/cursor.js
浏览文件 @
162c0124
const
ref
=
require
(
'
ref-napi
'
);
require
(
'
./globalfunc.js
'
)
const
CTaosInterface
=
require
(
'
./cinterface
'
)
const
errors
=
require
(
'
./error
'
)
const
errors
=
require
(
'
./error
'
)
const
TaosQuery
=
require
(
'
./taosquery
'
)
const
{
PerformanceObserver
,
performance
}
=
require
(
'
perf_hooks
'
);
module
.
exports
=
TDengineCursor
;
...
...
@@ -22,7 +22,7 @@ module.exports = TDengineCursor;
* @property {fields} - Array of the field objects in order from left to right of the latest data retrieved
* @since 1.0.0
*/
function
TDengineCursor
(
connection
=
null
)
{
function
TDengineCursor
(
connection
=
null
)
{
//All parameters are store for sync queries only.
this
.
_rowcount
=
-
1
;
this
.
_connection
=
null
;
...
...
@@ -91,7 +91,7 @@ TDengineCursor.prototype.execute = function execute(operation, options, callback
return
null
;
}
if
(
typeof
options
==
'
function
'
)
{
if
(
typeof
options
==
'
function
'
)
{
callback
=
options
;
}
if
(
typeof
options
!=
'
object
'
)
options
=
{}
...
...
@@ -144,10 +144,10 @@ TDengineCursor.prototype.execute = function execute(operation, options, callback
}
TDengineCursor
.
prototype
.
_createAffectedResponse
=
function
(
num
,
time
)
{
return
"
Query OK,
"
+
num
+
"
row(s) affected (
"
+
(
time
*
0.001
).
toFixed
(
8
)
+
"
s)
"
;
return
"
Query OK,
"
+
num
+
"
row(s) affected (
"
+
(
time
*
0.001
).
toFixed
(
8
)
+
"
s)
"
;
}
TDengineCursor
.
prototype
.
_createSetResponse
=
function
(
num
,
time
)
{
return
"
Query OK,
"
+
num
+
"
row(s) in set (
"
+
(
time
*
0.001
).
toFixed
(
8
)
+
"
s)
"
;
return
"
Query OK,
"
+
num
+
"
row(s) in set (
"
+
(
time
*
0.001
).
toFixed
(
8
)
+
"
s)
"
;
}
TDengineCursor
.
prototype
.
executemany
=
function
executemany
()
{
...
...
@@ -176,27 +176,22 @@ TDengineCursor.prototype.fetchall = function fetchall(options, callback) {
throw
new
errors
.
OperationalError
(
"
Invalid use of fetchall, either result or fields from query are null. First execute a query first
"
);
}
let
data
=
[];
let
num_of_rows
=
this
.
_chandle
.
affectedRows
(
this
.
_result
);
let
data
=
new
Array
(
num_of_rows
);
this
.
_rowcount
=
0
;
//let nodetime = 0;
let
time
=
0
;
const
obs
=
new
PerformanceObserver
((
items
)
=>
{
time
+=
items
.
getEntries
()[
0
].
duration
;
performance
.
clearMarks
();
});
/*
const obs2 = new PerformanceObserver((items) => {
nodetime += items.getEntries()[0].duration;
performance.clearMarks();
});
obs2.observe({ entryTypes: ['measure'] });
performance.mark('nodea');
*/
obs
.
observe
({
entryTypes
:
[
'
measure
'
]
});
performance
.
mark
(
'
A
'
);
while
(
true
)
{
while
(
true
)
{
let
blockAndRows
=
this
.
_chandle
.
fetchBlock
(
this
.
_result
,
this
.
_fields
);
// console.log(blockAndRows);
// break;
let
block
=
blockAndRows
.
blocks
;
let
num_of_rows
=
blockAndRows
.
num_of_rows
;
if
(
num_of_rows
==
0
)
{
...
...
@@ -205,22 +200,24 @@ TDengineCursor.prototype.fetchall = function fetchall(options, callback) {
this
.
_rowcount
+=
num_of_rows
;
let
numoffields
=
this
.
_fields
.
length
;
for
(
let
i
=
0
;
i
<
num_of_rows
;
i
++
)
{
data
.
push
([]);
//
data.push([]);
let
rowBlock
=
new
Array
(
numoffields
);
for
(
let
j
=
0
;
j
<
numoffields
;
j
++
)
{
rowBlock
[
j
]
=
block
[
j
][
i
];
}
data
[
data
.
length
-
1
]
=
(
rowBlock
);
data
[
this
.
_rowcount
-
num_of_rows
+
i
]
=
(
rowBlock
);
// data.push(rowBlock);
}
}
performance
.
mark
(
'
B
'
);
performance
.
measure
(
'
query
'
,
'
A
'
,
'
B
'
);
let
response
=
this
.
_createSetResponse
(
this
.
_rowcount
,
time
)
console
.
log
(
response
);
// this._connection._clearResultSet();
// this._connection._clearResultSet();
let
fields
=
this
.
fields
;
this
.
_reset_result
();
this
.
data
=
data
;
...
...
@@ -239,12 +236,12 @@ TDengineCursor.prototype.fetchall = function fetchall(options, callback) {
* @return {number | Buffer} Number of affected rows or a Buffer that points to the results of the query
* @since 1.0.0
*/
TDengineCursor
.
prototype
.
execute_a
=
function
execute_a
(
operation
,
options
,
callback
,
param
)
{
TDengineCursor
.
prototype
.
execute_a
=
function
execute_a
(
operation
,
options
,
callback
,
param
)
{
if
(
operation
==
undefined
)
{
throw
new
errors
.
ProgrammingError
(
'
No operation passed as argument
'
);
return
null
;
}
if
(
typeof
options
==
'
function
'
)
{
if
(
typeof
options
==
'
function
'
)
{
//we expect the parameter after callback to be param
param
=
callback
;
callback
=
options
;
...
...
@@ -265,14 +262,14 @@ TDengineCursor.prototype.execute_a = function execute_a (operation, options, cal
}
if
(
resCode
>=
0
)
{
// let fieldCount = cr._chandle.numFields(res2);
// if (fieldCount == 0) {
// //cr._chandle.freeResult(res2);
// return res2;
// }
// else {
// return res2;
// }
// let fieldCount = cr._chandle.numFields(res2);
// if (fieldCount == 0) {
// //cr._chandle.freeResult(res2);
// return res2;
// }
// else {
// return res2;
// }
return
res2
;
}
...
...
@@ -317,7 +314,7 @@ TDengineCursor.prototype.execute_a = function execute_a (operation, options, cal
* })
*/
TDengineCursor
.
prototype
.
fetchall_a
=
function
fetchall_a
(
result
,
options
,
callback
,
param
=
{})
{
if
(
typeof
options
==
'
function
'
)
{
if
(
typeof
options
==
'
function
'
)
{
//we expect the parameter after callback to be param
param
=
callback
;
callback
=
options
;
...
...
@@ -360,17 +357,17 @@ TDengineCursor.prototype.fetchall_a = function fetchall_a(result, options, callb
for
(
let
k
=
0
;
k
<
fields
.
length
;
k
++
)
{
rowBlock
[
k
]
=
block
[
k
][
j
];
}
data
[
data
.
length
-
1
]
=
rowBlock
;
data
[
data
.
length
-
1
]
=
rowBlock
;
}
}
cr
.
_chandle
.
freeResult
(
result2
);
// free result, avoid seg faults and mem leaks!
callback
(
param2
,
result2
,
numOfRows2
,
{
data
:
data
,
fields
:
fields
});
callback
(
param2
,
result2
,
numOfRows2
,
{
data
:
data
,
fields
:
fields
});
}
}
ref
.
writeObject
(
buf
,
0
,
param
);
param
=
this
.
_chandle
.
fetch_rows_a
(
result
,
asyncCallbackWrapper
,
buf
);
//returned param
return
{
param
:
param
,
result
:
result
};
return
{
param
:
param
,
result
:
result
};
}
/**
* Stop a query given the result handle.
...
...
@@ -428,7 +425,7 @@ TDengineCursor.prototype.subscribe = function subscribe(config) {
*/
TDengineCursor
.
prototype
.
consumeData
=
async
function
consumeData
(
subscription
,
callback
)
{
while
(
true
)
{
let
{
data
,
fields
,
result
}
=
this
.
_chandle
.
consume
(
subscription
);
let
{
data
,
fields
,
result
}
=
this
.
_chandle
.
consume
(
subscription
);
callback
(
data
,
fields
,
result
);
}
}
...
...
@@ -450,30 +447,30 @@ TDengineCursor.prototype.unsubscribe = function unsubscribe(subscription) {
* @return {Buffer} A buffer pointing to the stream handle
* @since 1.3.0
*/
TDengineCursor
.
prototype
.
openStream
=
function
openStream
(
sql
,
callback
,
stime
=
0
,
stoppingCallback
,
param
=
{})
{
let
buf
=
ref
.
alloc
(
'
Object
'
);
ref
.
writeObject
(
buf
,
0
,
param
);
TDengineCursor
.
prototype
.
openStream
=
function
openStream
(
sql
,
callback
,
stime
=
0
,
stoppingCallback
,
param
=
{})
{
let
buf
=
ref
.
alloc
(
'
Object
'
);
ref
.
writeObject
(
buf
,
0
,
param
);
let
asyncCallbackWrapper
=
function
(
param2
,
result2
,
blocks
,
fields
)
{
let
data
=
[];
let
num_of_rows
=
blocks
[
0
].
length
;
for
(
let
j
=
0
;
j
<
num_of_rows
;
j
++
)
{
data
.
push
([]);
let
rowBlock
=
new
Array
(
fields
.
length
);
for
(
let
k
=
0
;
k
<
fields
.
length
;
k
++
)
{
rowBlock
[
k
]
=
blocks
[
k
][
j
];
}
data
[
data
.
length
-
1
]
=
rowBlock
;
}
callback
(
param2
,
result2
,
blocks
,
fields
);
}
return
this
.
_chandle
.
openStream
(
this
.
_connection
.
_conn
,
sql
,
asyncCallbackWrapper
,
stime
,
stoppingCallback
,
buf
);
}
/**
* Close a stream
* @param {Buffer} - A buffer pointing to the handle of the stream to be closed
* @since 1.3.0
*/
TDengineCursor
.
prototype
.
closeStream
=
function
closeStream
(
stream
)
{
this
.
_chandle
.
closeStream
(
stream
);
}
let
asyncCallbackWrapper
=
function
(
param2
,
result2
,
blocks
,
fields
)
{
let
data
=
[];
let
num_of_rows
=
blocks
[
0
].
length
;
for
(
let
j
=
0
;
j
<
num_of_rows
;
j
++
)
{
data
.
push
([]);
let
rowBlock
=
new
Array
(
fields
.
length
);
for
(
let
k
=
0
;
k
<
fields
.
length
;
k
++
)
{
rowBlock
[
k
]
=
blocks
[
k
][
j
];
}
data
[
data
.
length
-
1
]
=
rowBlock
;
}
callback
(
param2
,
result2
,
blocks
,
fields
);
}
return
this
.
_chandle
.
openStream
(
this
.
_connection
.
_conn
,
sql
,
asyncCallbackWrapper
,
stime
,
stoppingCallback
,
buf
);
}
/**
* Close a stream
* @param {Buffer} - A buffer pointing to the handle of the stream to be closed
* @since 1.3.0
*/
TDengineCursor
.
prototype
.
closeStream
=
function
closeStream
(
stream
)
{
this
.
_chandle
.
closeStream
(
stream
);
}
src/connector/nodejs/package-lock.json
已删除
100644 → 0
浏览文件 @
29746411
{
"name"
:
"td2.0-connector"
,
"version"
:
"2.0.6"
,
"lockfileVersion"
:
1
,
"requires"
:
true
,
"dependencies"
:
{
"array-index"
:
{
"version"
:
"1.0.0"
,
"resolved"
:
"https://registry.npmjs.org/array-index/-/array-index-1.0.0.tgz"
,
"integrity"
:
"sha1-7FanSe4QPk4Ix5C5w1PfFgVbl/k="
,
"requires"
:
{
"debug"
:
"^2.2.0"
,
"es6-symbol"
:
"^3.0.2"
},
"dependencies"
:
{
"debug"
:
{
"version"
:
"2.6.9"
,
"resolved"
:
"https://registry.npmjs.org/debug/-/debug-2.6.9.tgz"
,
"integrity"
:
"sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA=="
,
"requires"
:
{
"ms"
:
"2.0.0"
}
},
"ms"
:
{
"version"
:
"2.0.0"
,
"resolved"
:
"https://registry.npmjs.org/ms/-/ms-2.0.0.tgz"
,
"integrity"
:
"sha1-VgiurfwAvmwpAd9fmGF4jeDVl8g="
}
}
},
"d"
:
{
"version"
:
"1.0.1"
,
"resolved"
:
"https://registry.npmjs.org/d/-/d-1.0.1.tgz"
,
"integrity"
:
"sha512-m62ShEObQ39CfralilEQRjH6oAMtNCV1xJyEx5LpRYUVN+EviphDgUc/F3hnYbADmkiNs67Y+3ylmlG7Lnu+FA=="
,
"requires"
:
{
"es5-ext"
:
"^0.10.50"
,
"type"
:
"^1.0.1"
}
},
"debug"
:
{
"version"
:
"4.3.1"
,
"resolved"
:
"https://registry.npmjs.org/debug/-/debug-4.3.1.tgz"
,
"integrity"
:
"sha512-doEwdvm4PCeK4K3RQN2ZC2BYUBaxwLARCqZmMjtF8a51J2Rb0xpVloFRnCODwqjpwnAoao4pelN8l3RJdv3gRQ=="
,
"requires"
:
{
"ms"
:
"2.1.2"
}
},
"es5-ext"
:
{
"version"
:
"0.10.53"
,
"resolved"
:
"https://registry.npmjs.org/es5-ext/-/es5-ext-0.10.53.tgz"
,
"integrity"
:
"sha512-Xs2Stw6NiNHWypzRTY1MtaG/uJlwCk8kH81920ma8mvN8Xq1gsfhZvpkImLQArw8AHnv8MT2I45J3c0R8slE+Q=="
,
"requires"
:
{
"es6-iterator"
:
"~2.0.3"
,
"es6-symbol"
:
"~3.1.3"
,
"next-tick"
:
"~1.0.0"
}
},
"es6-iterator"
:
{
"version"
:
"2.0.3"
,
"resolved"
:
"https://registry.npmjs.org/es6-iterator/-/es6-iterator-2.0.3.tgz"
,
"integrity"
:
"sha1-p96IkUGgWpSwhUQDstCg+/qY87c="
,
"requires"
:
{
"d"
:
"1"
,
"es5-ext"
:
"^0.10.35"
,
"es6-symbol"
:
"^3.1.1"
}
},
"es6-symbol"
:
{
"version"
:
"3.1.3"
,
"resolved"
:
"https://registry.npmjs.org/es6-symbol/-/es6-symbol-3.1.3.tgz"
,
"integrity"
:
"sha512-NJ6Yn3FuDinBaBRWl/q5X/s4koRHBrgKAu+yGI6JCBeiu3qrcbJhwT2GeR/EXVfylRk8dpQVJoLEFhK+Mu31NA=="
,
"requires"
:
{
"d"
:
"^1.0.1"
,
"ext"
:
"^1.1.2"
}
},
"ext"
:
{
"version"
:
"1.4.0"
,
"resolved"
:
"https://registry.npmjs.org/ext/-/ext-1.4.0.tgz"
,
"integrity"
:
"sha512-Key5NIsUxdqKg3vIsdw9dSuXpPCQ297y6wBjL30edxwPgt2E44WcWBZey/ZvUc6sERLTxKdyCu4gZFmUbk1Q7A=="
,
"requires"
:
{
"type"
:
"^2.0.0"
},
"dependencies"
:
{
"type"
:
{
"version"
:
"2.1.0"
,
"resolved"
:
"https://registry.npmjs.org/type/-/type-2.1.0.tgz"
,
"integrity"
:
"sha512-G9absDWvhAWCV2gmF1zKud3OyC61nZDwWvBL2DApaVFogI07CprggiQAOOjvp2NRjYWFzPyu7vwtDrQFq8jeSA=="
}
}
},
"ffi-napi"
:
{
"version"
:
"3.1.0"
,
"resolved"
:
"https://registry.npmjs.org/ffi-napi/-/ffi-napi-3.1.0.tgz"
,
"integrity"
:
"sha512-EsHO+sP2p/nUC/3l/l8m9niee1BLm4asUFDzkkBGR4kYVgp2KqdAYUomZhkKtzim4Fq7mcYHjpUaIHsMqs+E1g=="
,
"requires"
:
{
"debug"
:
"^4.1.1"
,
"get-uv-event-loop-napi-h"
:
"^1.0.5"
,
"node-addon-api"
:
"^2.0.0"
,
"node-gyp-build"
:
"^4.2.1"
,
"ref-napi"
:
"^2.0.1"
,
"ref-struct-di"
:
"^1.1.0"
},
"dependencies"
:
{
"ref-napi"
:
{
"version"
:
"2.1.2"
,
"resolved"
:
"https://registry.npmjs.org/ref-napi/-/ref-napi-2.1.2.tgz"
,
"integrity"
:
"sha512-aFl+vrIuLWUXMUTQGAwGAuSNLX3Ub5W3iVP8b7KyFFZUdn4+i4U1TXXTop0kCTUfGNu8glBGVz4lowkwMcPVVA=="
,
"requires"
:
{
"debug"
:
"^4.1.1"
,
"get-symbol-from-current-process-h"
:
"^1.0.2"
,
"node-addon-api"
:
"^2.0.0"
,
"node-gyp-build"
:
"^4.2.1"
}
}
}
},
"get-symbol-from-current-process-h"
:
{
"version"
:
"1.0.2"
,
"resolved"
:
"https://registry.npmjs.org/get-symbol-from-current-process-h/-/get-symbol-from-current-process-h-1.0.2.tgz"
,
"integrity"
:
"sha512-syloC6fsCt62ELLrr1VKBM1ggOpMdetX9hTrdW77UQdcApPHLmf7CI7OKcN1c9kYuNxKcDe4iJ4FY9sX3aw2xw=="
},
"get-uv-event-loop-napi-h"
:
{
"version"
:
"1.0.6"
,
"resolved"
:
"https://registry.npmjs.org/get-uv-event-loop-napi-h/-/get-uv-event-loop-napi-h-1.0.6.tgz"
,
"integrity"
:
"sha512-t5c9VNR84nRoF+eLiz6wFrEp1SE2Acg0wS+Ysa2zF0eROes+LzOfuTaVHxGy8AbS8rq7FHEJzjnCZo1BupwdJg=="
,
"requires"
:
{
"get-symbol-from-current-process-h"
:
"^1.0.1"
}
},
"ms"
:
{
"version"
:
"2.1.2"
,
"resolved"
:
"https://registry.npmjs.org/ms/-/ms-2.1.2.tgz"
,
"integrity"
:
"sha512-sGkPx+VjMtmA6MX27oA4FBFELFCZZ4S4XqeGOXCv68tT+jb3vk/RyaKWP0PTKyWtmLSM0b+adUTEvbs1PEaH2w=="
},
"next-tick"
:
{
"version"
:
"1.0.0"
,
"resolved"
:
"https://registry.npmjs.org/next-tick/-/next-tick-1.0.0.tgz"
,
"integrity"
:
"sha1-yobR/ogoFpsBICCOPchCS524NCw="
},
"node-addon-api"
:
{
"version"
:
"2.0.2"
,
"resolved"
:
"https://registry.npmjs.org/node-addon-api/-/node-addon-api-2.0.2.tgz"
,
"integrity"
:
"sha512-Ntyt4AIXyaLIuMHF6IOoTakB3K+RWxwtsHNRxllEoA6vPwP9o4866g6YWDLUdnucilZhmkxiHwHr11gAENw+QA=="
},
"node-gyp-build"
:
{
"version"
:
"4.2.3"
,
"resolved"
:
"https://registry.npmjs.org/node-gyp-build/-/node-gyp-build-4.2.3.tgz"
,
"integrity"
:
"sha512-MN6ZpzmfNCRM+3t57PTJHgHyw/h4OWnZ6mR8P5j/uZtqQr46RRuDE/P+g3n0YR/AiYXeWixZZzaip77gdICfRg=="
},
"ref-array-napi"
:
{
"version"
:
"1.2.1"
,
"resolved"
:
"https://registry.npmjs.org/ref-array-napi/-/ref-array-napi-1.2.1.tgz"
,
"integrity"
:
"sha512-jQp2WWSucmxkqVfoNfm7yDlDeGu3liAbzqfwjNybL80ooLOCnCZpAK2woDInY+lxNOK/VlIVSqeDEYb4gVPuNQ=="
,
"requires"
:
{
"array-index"
:
"1"
,
"debug"
:
"2"
,
"ref-napi"
:
"^1.4.2"
},
"dependencies"
:
{
"debug"
:
{
"version"
:
"2.6.9"
,
"resolved"
:
"https://registry.npmjs.org/debug/-/debug-2.6.9.tgz"
,
"integrity"
:
"sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA=="
,
"requires"
:
{
"ms"
:
"2.0.0"
}
},
"ms"
:
{
"version"
:
"2.0.0"
,
"resolved"
:
"https://registry.npmjs.org/ms/-/ms-2.0.0.tgz"
,
"integrity"
:
"sha1-VgiurfwAvmwpAd9fmGF4jeDVl8g="
},
"ref-napi"
:
{
"version"
:
"1.5.2"
,
"resolved"
:
"https://registry.npmjs.org/ref-napi/-/ref-napi-1.5.2.tgz"
,
"integrity"
:
"sha512-hwyNmWpUkt1bDWDW4aiwCoC+SJfJO69UIdjqssNqdaS0sYJpgqzosGg/rLtk69UoQ8drZdI9yyQefM7eEMM3Gw=="
,
"requires"
:
{
"debug"
:
"^3.1.0"
,
"node-addon-api"
:
"^2.0.0"
,
"node-gyp-build"
:
"^4.2.1"
},
"dependencies"
:
{
"debug"
:
{
"version"
:
"3.2.7"
,
"resolved"
:
"https://registry.npmjs.org/debug/-/debug-3.2.7.tgz"
,
"integrity"
:
"sha512-CFjzYYAi4ThfiQvizrFQevTTXHtnCqWfe7x1AhgEscTz6ZbLbfoLRLPugTQyBth6f8ZERVUSyWHFD/7Wu4t1XQ=="
,
"requires"
:
{
"ms"
:
"^2.1.1"
}
},
"ms"
:
{
"version"
:
"2.1.3"
,
"resolved"
:
"https://registry.npmjs.org/ms/-/ms-2.1.3.tgz"
,
"integrity"
:
"sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA=="
}
}
}
}
},
"ref-napi"
:
{
"version"
:
"3.0.1"
,
"resolved"
:
"https://registry.npmjs.org/ref-napi/-/ref-napi-3.0.1.tgz"
,
"integrity"
:
"sha512-W3rcb0E+tlO9u9ySFnX5vifInwwPGToOfFgTZUHJBNiOBsW0NNvgHz2zJN7ctABo/2yIlgdPQUvuqqfORIF4LA=="
,
"requires"
:
{
"debug"
:
"^4.1.1"
,
"get-symbol-from-current-process-h"
:
"^1.0.2"
,
"node-addon-api"
:
"^2.0.0"
,
"node-gyp-build"
:
"^4.2.1"
}
},
"ref-struct-di"
:
{
"version"
:
"1.1.1"
,
"resolved"
:
"https://registry.npmjs.org/ref-struct-di/-/ref-struct-di-1.1.1.tgz"
,
"integrity"
:
"sha512-2Xyn/0Qgz89VT+++WP0sTosdm9oeowLP23wRJYhG4BFdMUrLj3jhwHZNEytYNYgtPKLNTP3KJX4HEgBvM1/Y2g=="
,
"requires"
:
{
"debug"
:
"^3.1.0"
},
"dependencies"
:
{
"debug"
:
{
"version"
:
"3.2.7"
,
"resolved"
:
"https://registry.npmjs.org/debug/-/debug-3.2.7.tgz"
,
"integrity"
:
"sha512-CFjzYYAi4ThfiQvizrFQevTTXHtnCqWfe7x1AhgEscTz6ZbLbfoLRLPugTQyBth6f8ZERVUSyWHFD/7Wu4t1XQ=="
,
"requires"
:
{
"ms"
:
"^2.1.1"
}
}
}
},
"ref-struct-napi"
:
{
"version"
:
"1.1.1"
,
"resolved"
:
"https://registry.npmjs.org/ref-struct-napi/-/ref-struct-napi-1.1.1.tgz"
,
"integrity"
:
"sha512-YgS5/d7+kT5zgtySYI5ieH0hREdv+DabgDvoczxsui0f9VLm0rrDcWEj4DHKehsH+tJnVMsLwuyctWgvdEcVRw=="
,
"requires"
:
{
"debug"
:
"2"
,
"ref-napi"
:
"^1.4.2"
},
"dependencies"
:
{
"debug"
:
{
"version"
:
"2.6.9"
,
"resolved"
:
"https://registry.npmjs.org/debug/-/debug-2.6.9.tgz"
,
"integrity"
:
"sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA=="
,
"requires"
:
{
"ms"
:
"2.0.0"
}
},
"ms"
:
{
"version"
:
"2.0.0"
,
"resolved"
:
"https://registry.npmjs.org/ms/-/ms-2.0.0.tgz"
,
"integrity"
:
"sha1-VgiurfwAvmwpAd9fmGF4jeDVl8g="
},
"ref-napi"
:
{
"version"
:
"1.5.2"
,
"resolved"
:
"https://registry.npmjs.org/ref-napi/-/ref-napi-1.5.2.tgz"
,
"integrity"
:
"sha512-hwyNmWpUkt1bDWDW4aiwCoC+SJfJO69UIdjqssNqdaS0sYJpgqzosGg/rLtk69UoQ8drZdI9yyQefM7eEMM3Gw=="
,
"requires"
:
{
"debug"
:
"^3.1.0"
,
"node-addon-api"
:
"^2.0.0"
,
"node-gyp-build"
:
"^4.2.1"
},
"dependencies"
:
{
"debug"
:
{
"version"
:
"3.2.7"
,
"resolved"
:
"https://registry.npmjs.org/debug/-/debug-3.2.7.tgz"
,
"integrity"
:
"sha512-CFjzYYAi4ThfiQvizrFQevTTXHtnCqWfe7x1AhgEscTz6ZbLbfoLRLPugTQyBth6f8ZERVUSyWHFD/7Wu4t1XQ=="
,
"requires"
:
{
"ms"
:
"^2.1.1"
}
},
"ms"
:
{
"version"
:
"2.1.3"
,
"resolved"
:
"https://registry.npmjs.org/ms/-/ms-2.1.3.tgz"
,
"integrity"
:
"sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA=="
}
}
}
}
},
"type"
:
{
"version"
:
"1.2.0"
,
"resolved"
:
"https://registry.npmjs.org/type/-/type-1.2.0.tgz"
,
"integrity"
:
"sha512-+5nt5AAniqsCnu2cEQQdpzCAh33kVx8n0VoFidKpB1dVVLAN/F+bgVOqOJqOnEnrhp222clB5p3vUlD+1QAnfg=="
}
}
}
src/connector/nodejs/package.json
浏览文件 @
162c0124
{
"name"
:
"td2.0-connector"
,
"version"
:
"2.0.
6
"
,
"version"
:
"2.0.
7
"
,
"description"
:
"A Node.js connector for TDengine."
,
"main"
:
"tdengine.js"
,
"directories"
:
{
...
...
src/kit/taosdemo/async-sub.json
0 → 100644
浏览文件 @
162c0124
{
"filetype"
:
"subscribe"
,
"cfgdir"
:
"/etc/taos"
,
"host"
:
"127.0.0.1"
,
"port"
:
6030
,
"user"
:
"root"
,
"password"
:
"taosdata"
,
"databases"
:
"test"
,
"specified_table_query"
:
{
"concurrent"
:
1
,
"mode"
:
"async"
,
"interval"
:
1000
,
"restart"
:
"yes"
,
"keepProgress"
:
"yes"
,
"resubAfterConsume"
:
10
,
"sqls"
:
[
{
"sql"
:
"select col1 from meters where col1 > 1;"
,
"result"
:
"./subscribe_res0.txt"
},
{
"sql"
:
"select col2 from meters where col2 > 1;"
,
"result"
:
"./subscribe_res2.txt"
}
]
},
"super_table_query"
:
{
"stblname"
:
"meters"
,
"threads"
:
1
,
"mode"
:
"sync"
,
"interval"
:
1000
,
"restart"
:
"yes"
,
"keepProgress"
:
"yes"
,
"sqls"
:
[
{
"sql"
:
"select col1 from xxxx where col1 > 10;"
,
"result"
:
"./subscribe_res1.txt"
}
]
}
}
src/kit/taosdemo/subscribe.json
浏览文件 @
162c0124
{
"filetype"
:
"subscribe"
,
"filetype"
:
"subscribe"
,
"cfgdir"
:
"/etc/taos"
,
"host"
:
"127.0.0.1"
,
"port"
:
6030
,
"user"
:
"root"
,
"password"
:
"taosdata"
,
"databases"
:
"dbx"
,
"specified_table_query"
:
{
"concurrent"
:
1
,
"mode"
:
"sync"
,
"interval"
:
5000
,
"restart"
:
"yes"
,
"keepProgress"
:
"yes"
,
"sqls"
:
[{
"sql"
:
"select avg(col1) from stb01 where col1 > 1;"
,
"result"
:
"./subscribe_res0.txt"
}]
},
"super_table_query"
:
{
"stblname"
:
"stb"
,
"threads"
:
1
,
"mode"
:
"sync"
,
"interval"
:
10000
,
"restart"
:
"yes"
,
"keepProgress"
:
"yes"
,
"sqls"
:
[{
"sql"
:
"select col1 from xxxx where col1 > 10;"
,
"result"
:
"./subscribe_res1.txt"
}]
}
"databases"
:
"test"
,
"specified_table_query"
:
{
"concurrent"
:
1
,
"mode"
:
"sync"
,
"interval"
:
1000
,
"restart"
:
"yes"
,
"keepProgress"
:
"yes"
,
"resubAfterConsume"
:
10
,
"sqls"
:
[
{
"sql"
:
"select avg(col1) from meters where col1 > 1;"
,
"result"
:
"./subscribe_res0.txt"
}
]
},
"super_table_query"
:
{
"stblname"
:
"meters"
,
"threads"
:
1
,
"mode"
:
"sync"
,
"interval"
:
1000
,
"restart"
:
"yes"
,
"keepProgress"
:
"yes"
,
"sqls"
:
[
{
"sql"
:
"select col1 from xxxx where col1 > 10;"
,
"result"
:
"./subscribe_res1.txt"
}
]
}
}
src/kit/taosdemo/taosdemo.c
浏览文件 @
162c0124
...
...
@@ -114,12 +114,18 @@ typedef enum TALBE_EXISTS_EN {
TBL_EXISTS_BUTT
}
TALBE_EXISTS_EN
;
enum
MODE
{
enum
enumSYNC_
MODE
{
SYNC_MODE
,
ASYNC_MODE
,
MODE_BUT
};
typedef
enum
enumQUERY_CLASS
{
SPECIFIED_CLASS
,
STABLE_CLASS
,
CLASS_BUT
}
QUERY_CLASS
;
typedef
enum
enum_INSERT_MODE
{
PROGRESSIVE_INSERT_MODE
,
INTERLACE_INSERT_MODE
,
...
...
@@ -183,6 +189,8 @@ typedef struct {
}
SColDes
;
/* Used by main to communicate with parse_opt. */
static
char
*
g_dupstr
=
NULL
;
typedef
struct
SArguments_S
{
char
*
metaFile
;
uint32_t
test_mode
;
...
...
@@ -227,7 +235,7 @@ typedef struct SColumn_S {
char
field
[
TSDB_COL_NAME_LEN
+
1
];
char
dataType
[
MAX_TB_NAME_SIZE
];
uint32_t
dataLen
;
char
note
[
128
];
char
note
[
128
];
}
StrColumn
;
typedef
struct
SSuperTable_S
{
...
...
@@ -355,16 +363,20 @@ typedef struct SDbs_S {
typedef
struct
SpecifiedQueryInfo_S
{
uint64_t
queryInterval
;
// 0: unlimit > 0 loop/s
uint
64
_t
concurrent
;
uint
32
_t
concurrent
;
uint64_t
sqlCount
;
uint32_t
asyncMode
;
// 0: sync, 1: async
uint64_t
subscribeInterval
;
// ms
uint64_t
queryTimes
;
int
subscribeRestart
;
bool
subscribeRestart
;
int
subscribeKeepProgress
;
char
sql
[
MAX_QUERY_SQL_COUNT
][
MAX_QUERY_SQL_LENGTH
+
1
];
char
result
[
MAX_QUERY_SQL_COUNT
][
MAX_FILE_NAME_LEN
+
1
];
int
resubAfterConsume
[
MAX_QUERY_SQL_COUNT
];
TAOS_SUB
*
tsub
[
MAX_QUERY_SQL_COUNT
];
char
topic
[
MAX_QUERY_SQL_COUNT
][
32
];
int
consumed
[
MAX_QUERY_SQL_COUNT
];
TAOS_RES
*
res
[
MAX_QUERY_SQL_COUNT
];
uint64_t
totalQueried
;
}
SpecifiedQueryInfo
;
...
...
@@ -374,7 +386,7 @@ typedef struct SuperQueryInfo_S {
uint32_t
threadCnt
;
uint32_t
asyncMode
;
// 0: sync, 1: async
uint64_t
subscribeInterval
;
// ms
int
subscribeRestart
;
bool
subscribeRestart
;
int
subscribeKeepProgress
;
uint64_t
queryTimes
;
int64_t
childTblCount
;
...
...
@@ -382,6 +394,7 @@ typedef struct SuperQueryInfo_S {
uint64_t
sqlCount
;
char
sql
[
MAX_QUERY_SQL_COUNT
][
MAX_QUERY_SQL_LENGTH
+
1
];
char
result
[
MAX_QUERY_SQL_COUNT
][
MAX_FILE_NAME_LEN
+
1
];
int
resubAfterConsume
;
TAOS_SUB
*
tsub
[
MAX_QUERY_SQL_COUNT
];
char
*
childTblName
;
...
...
@@ -399,7 +412,7 @@ typedef struct SQueryMetaInfo_S {
char
queryMode
[
MAX_TB_NAME_SIZE
];
// taosc, rest
SpecifiedQueryInfo
specifiedQueryInfo
;
SuperQueryInfo
superQueryInfo
;
SuperQueryInfo
superQueryInfo
;
uint64_t
totalQueried
;
}
SQueryMetaInfo
;
...
...
@@ -408,7 +421,8 @@ typedef struct SThreadInfo_S {
int
threadID
;
char
db_name
[
MAX_DB_NAME_SIZE
+
1
];
uint32_t
time_precision
;
char
fp
[
4096
];
char
filePath
[
4096
];
FILE
*
fp
;
char
tb_prefix
[
MAX_TB_NAME_SIZE
];
uint64_t
start_table_from
;
uint64_t
end_table_to
;
...
...
@@ -439,8 +453,10 @@ typedef struct SThreadInfo_S {
uint64_t
maxDelay
;
uint64_t
minDelay
;
//
query
//
seq of query or subscribe
uint64_t
querySeq
;
// sequence number of sql command
TAOS_SUB
*
tsub
;
}
threadInfo
;
#ifdef WINDOWS
...
...
@@ -516,11 +532,14 @@ static int taosRandom()
#endif // ifdef Windows
static
void
prompt
();
static
int
createDatabasesAndStables
();
static
void
createChildTables
();
static
int
queryDbExec
(
TAOS
*
taos
,
char
*
command
,
QUERY_TYPE
type
,
bool
quiet
);
static
int
postProceSql
(
char
*
host
,
struct
sockaddr_in
*
pServAddr
,
uint16_t
port
,
char
*
sqlstr
,
char
*
resultFile
);
static
int
postProceSql
(
char
*
host
,
struct
sockaddr_in
*
pServAddr
,
uint16_t
port
,
char
*
sqlstr
,
threadInfo
*
pThreadInfo
);
static
int64_t
getTSRandTail
(
int64_t
timeStampStep
,
int32_t
seq
,
int
disorderRatio
,
int
disorderRange
);
/* ************ Global variables ************ */
...
...
@@ -668,8 +687,9 @@ static void printHelp() {
"The data_type of columns, default: INT,INT,INT,INT."
);
printf
(
"%s%s%s%s
\n
"
,
indent
,
"-w"
,
indent
,
"The length of data_type 'BINARY' or 'NCHAR'. Default is 16"
);
printf
(
"%s%s%s%s
\n
"
,
indent
,
"-l"
,
indent
,
"The number of columns per record. Default is 4."
);
printf
(
"%s%s%s%s%d
\n
"
,
indent
,
"-l"
,
indent
,
"The number of columns per record. Default is 4. Max values is "
,
MAX_NUM_DATATYPE
);
printf
(
"%s%s%s%s
\n
"
,
indent
,
"-T"
,
indent
,
"The number of threads. Default is 10."
);
printf
(
"%s%s%s%s
\n
"
,
indent
,
"-i"
,
indent
,
...
...
@@ -713,7 +733,6 @@ static bool isStringNumber(char *input)
}
static
void
parse_args
(
int
argc
,
char
*
argv
[],
SArguments
*
arguments
)
{
char
**
sptr
;
for
(
int
i
=
1
;
i
<
argc
;
i
++
)
{
if
(
strcmp
(
argv
[
i
],
"-f"
)
==
0
)
{
...
...
@@ -840,20 +859,31 @@ static void parse_args(int argc, char *argv[], SArguments *arguments) {
}
arguments
->
database
=
argv
[
++
i
];
}
else
if
(
strcmp
(
argv
[
i
],
"-l"
)
==
0
)
{
if
((
argc
==
i
+
1
)
||
(
!
isStringNumber
(
argv
[
i
+
1
])))
{
printHelp
();
errorPrint
(
"%s"
,
"
\n\t
-l need a number following!
\n
"
);
exit
(
EXIT_FAILURE
);
if
(
argc
==
i
+
1
)
{
if
(
!
isStringNumber
(
argv
[
i
+
1
]))
{
printHelp
();
errorPrint
(
"%s"
,
"
\n\t
-l need a number following!
\n
"
);
exit
(
EXIT_FAILURE
);
}
}
arguments
->
num_of_CPR
=
atoi
(
argv
[
++
i
]);
if
(
arguments
->
num_of_CPR
>
MAX_NUM_DATATYPE
)
{
printf
(
"WARNING: max acceptible columns count is %d
\n
"
,
MAX_NUM_DATATYPE
);
prompt
();
arguments
->
num_of_CPR
=
MAX_NUM_DATATYPE
;
}
for
(
int
col
=
arguments
->
num_of_CPR
;
col
<
MAX_NUM_DATATYPE
;
col
++
)
{
arguments
->
datatype
[
col
]
=
NULL
;
}
}
else
if
(
strcmp
(
argv
[
i
],
"-b"
)
==
0
)
{
if
(
argc
==
i
+
1
)
{
printHelp
();
errorPrint
(
"%s"
,
"
\n\t
-b need valid string following!
\n
"
);
exit
(
EXIT_FAILURE
);
}
sptr
=
arguments
->
datatype
;
++
i
;
if
(
strstr
(
argv
[
i
],
","
)
==
NULL
)
{
// only one col
...
...
@@ -870,12 +900,12 @@ static void parse_args(int argc, char *argv[], SArguments *arguments) {
errorPrint
(
"%s"
,
"-b: Invalid data_type!
\n
"
);
exit
(
EXIT_FAILURE
);
}
sptr
[
0
]
=
argv
[
i
];
arguments
->
datatype
[
0
]
=
argv
[
i
];
}
else
{
// more than one col
int
index
=
0
;
char
*
dupstr
=
strdup
(
argv
[
i
]);
char
*
running
=
dupstr
;
g_
dupstr
=
strdup
(
argv
[
i
]);
char
*
running
=
g_
dupstr
;
char
*
token
=
strsep
(
&
running
,
","
);
while
(
token
!=
NULL
)
{
if
(
strcasecmp
(
token
,
"INT"
)
...
...
@@ -888,16 +918,15 @@ static void parse_args(int argc, char *argv[], SArguments *arguments) {
&&
strcasecmp
(
token
,
"BINARY"
)
&&
strcasecmp
(
token
,
"NCHAR"
))
{
printHelp
();
free
(
dupstr
);
free
(
g_
dupstr
);
errorPrint
(
"%s"
,
"-b: Invalid data_type!
\n
"
);
exit
(
EXIT_FAILURE
);
}
sptr
[
index
++
]
=
token
;
arguments
->
datatype
[
index
++
]
=
token
;
token
=
strsep
(
&
running
,
","
);
if
(
index
>=
MAX_NUM_DATATYPE
)
break
;
}
free
(
dupstr
);
sptr
[
index
]
=
NULL
;
arguments
->
datatype
[
index
]
=
NULL
;
}
}
else
if
(
strcmp
(
argv
[
i
],
"-w"
)
==
0
)
{
if
((
argc
==
i
+
1
)
||
...
...
@@ -1031,10 +1060,8 @@ static void parse_args(int argc, char *argv[], SArguments *arguments) {
printf
(
"# Print debug info: %d
\n
"
,
arguments
->
debug_print
);
printf
(
"# Print verbose info: %d
\n
"
,
arguments
->
verbose_print
);
printf
(
"###################################################################
\n
"
);
if
(
!
arguments
->
answer_yes
)
{
printf
(
"Press enter key to continue
\n\n
"
);
(
void
)
getchar
();
}
prompt
();
}
}
...
...
@@ -1072,9 +1099,9 @@ static int queryDbExec(TAOS *taos, char *command, QUERY_TYPE type, bool quiet) {
}
}
verbosePrint
(
"%s() LN%d - command: %s
\n
"
,
__func__
,
__LINE__
,
command
);
if
(
code
!=
0
)
{
if
(
!
quiet
)
{
debugPrint
(
"%s() LN%d - command: %s
\n
"
,
__func__
,
__LINE__
,
command
);
errorPrint
(
"Failed to execute %s, reason: %s
\n
"
,
command
,
taos_errstr
(
res
));
}
taos_free_result
(
res
);
...
...
@@ -1092,25 +1119,22 @@ static int queryDbExec(TAOS *taos, char *command, QUERY_TYPE type, bool quiet) {
return
0
;
}
static
void
appendResultBufToFile
(
char
*
resultBuf
,
char
*
resultFile
)
static
void
appendResultBufToFile
(
char
*
resultBuf
,
threadInfo
*
pThreadInfo
)
{
FILE
*
fp
=
NULL
;
if
(
resultFile
[
0
]
!=
0
)
{
fp
=
fopen
(
resultFile
,
"at"
);
if
(
fp
==
NULL
)
{
pThreadInfo
->
fp
=
fopen
(
pThreadInfo
->
filePath
,
"at"
);
if
(
pThreadInfo
->
fp
==
NULL
)
{
errorPrint
(
"%s() LN%d, failed to open result file: %s, result will not save to file
\n
"
,
__func__
,
__LINE__
,
resultFile
);
__func__
,
__LINE__
,
pThreadInfo
->
filePath
);
return
;
}
}
fprintf
(
fp
,
"%s"
,
resultBuf
);
tmfclose
(
fp
)
;
fprintf
(
pThreadInfo
->
fp
,
"%s"
,
resultBuf
);
tmfclose
(
pThreadInfo
->
fp
);
pThreadInfo
->
fp
=
NULL
;
}
static
void
appendResultToFile
(
TAOS_RES
*
res
,
char
*
resultFile
)
{
static
void
fetchResult
(
TAOS_RES
*
res
,
threadInfo
*
pThreadInfo
)
{
TAOS_ROW
row
=
NULL
;
int
num_rows
=
0
;
int
num_fields
=
taos_field_count
(
res
);
...
...
@@ -1128,10 +1152,11 @@ static void appendResultToFile(TAOS_RES *res, char* resultFile) {
// fetch the records row by row
while
((
row
=
taos_fetch_row
(
res
)))
{
if
(
totalLen
>=
100
*
1024
*
1024
-
32000
)
{
appendResultBufToFile
(
databuf
,
resultFile
);
totalLen
=
0
;
memset
(
databuf
,
0
,
100
*
1024
*
1024
);
if
((
strlen
(
pThreadInfo
->
filePath
)
>
0
)
&&
(
totalLen
>=
100
*
1024
*
1024
-
32000
))
{
appendResultBufToFile
(
databuf
,
pThreadInfo
);
totalLen
=
0
;
memset
(
databuf
,
0
,
100
*
1024
*
1024
);
}
num_rows
++
;
int
len
=
taos_print_row
(
temp
,
row
,
fields
,
num_fields
);
...
...
@@ -1141,12 +1166,16 @@ static void appendResultToFile(TAOS_RES *res, char* resultFile) {
totalLen
+=
len
;
}
verbosePrint
(
"%s() LN%d, databuf=%s resultFile=%s
\n
"
,
__func__
,
__LINE__
,
databuf
,
resultFile
);
appendResultBufToFile
(
databuf
,
resultFile
);
verbosePrint
(
"%s() LN%d, databuf=%s resultFile=%s
\n
"
,
__func__
,
__LINE__
,
databuf
,
pThreadInfo
->
filePath
);
if
(
strlen
(
pThreadInfo
->
filePath
)
>
0
)
{
appendResultBufToFile
(
databuf
,
pThreadInfo
);
}
free
(
databuf
);
}
static
void
selectAndGetResult
(
threadInfo
*
pThreadInfo
,
char
*
command
,
char
*
resultFile
)
static
void
selectAndGetResult
(
threadInfo
*
pThreadInfo
,
char
*
command
)
{
if
(
0
==
strncasecmp
(
g_queryInfo
.
queryMode
,
"taosc"
,
strlen
(
"taosc"
)))
{
TAOS_RES
*
res
=
taos_query
(
pThreadInfo
->
taos
,
command
);
...
...
@@ -1157,16 +1186,14 @@ static void selectAndGetResult(threadInfo *pThreadInfo, char *command, char* res
return
;
}
if
((
resultFile
)
&&
(
strlen
(
resultFile
)))
{
appendResultToFile
(
res
,
resultFile
);
}
fetchResult
(
res
,
pThreadInfo
);
taos_free_result
(
res
);
}
else
if
(
0
==
strncasecmp
(
g_queryInfo
.
queryMode
,
"rest"
,
strlen
(
"rest"
)))
{
int
retCode
=
postProceSql
(
g_queryInfo
.
host
,
&
(
g_queryInfo
.
serv_addr
),
g_queryInfo
.
port
,
command
,
resultFile
);
pThreadInfo
);
if
(
0
!=
retCode
)
{
printf
(
"====restful return fail, threadID[%d]
\n
"
,
pThreadInfo
->
threadID
);
}
...
...
@@ -1291,13 +1318,15 @@ static void init_rand_data() {
static
int
printfInsertMeta
()
{
SHOW_PARSE_RESULT_START
();
printf
(
"host:
\033
[33m%s:%u
\033
[0m
\n
"
,
g_Dbs
.
host
,
g_Dbs
.
port
);
printf
(
"host:
\033
[33m%s:%u
\033
[0m
\n
"
,
g_Dbs
.
host
,
g_Dbs
.
port
);
printf
(
"user:
\033
[33m%s
\033
[0m
\n
"
,
g_Dbs
.
user
);
printf
(
"password:
\033
[33m%s
\033
[0m
\n
"
,
g_Dbs
.
password
);
printf
(
"configDir:
\033
[33m%s
\033
[0m
\n
"
,
configDir
);
printf
(
"resultFile:
\033
[33m%s
\033
[0m
\n
"
,
g_Dbs
.
resultFile
);
printf
(
"thread num of insert data:
\033
[33m%d
\033
[0m
\n
"
,
g_Dbs
.
threadCount
);
printf
(
"thread num of create table:
\033
[33m%d
\033
[0m
\n
"
,
g_Dbs
.
threadCountByCreateTbl
);
printf
(
"thread num of create table:
\033
[33m%d
\033
[0m
\n
"
,
g_Dbs
.
threadCountByCreateTbl
);
printf
(
"top insert interval:
\033
[33m%"
PRIu64
"
\033
[0m
\n
"
,
g_args
.
insert_interval
);
printf
(
"number of records per req:
\033
[33m%"
PRIu64
"
\033
[0m
\n
"
,
...
...
@@ -1309,7 +1338,8 @@ static int printfInsertMeta() {
for
(
int
i
=
0
;
i
<
g_Dbs
.
dbCount
;
i
++
)
{
printf
(
"database[
\033
[33m%d
\033
[0m]:
\n
"
,
i
);
printf
(
" database[%d] name:
\033
[33m%s
\033
[0m
\n
"
,
i
,
g_Dbs
.
db
[
i
].
dbName
);
printf
(
" database[%d] name:
\033
[33m%s
\033
[0m
\n
"
,
i
,
g_Dbs
.
db
[
i
].
dbName
);
if
(
0
==
g_Dbs
.
db
[
i
].
drop
)
{
printf
(
" drop:
\033
[33mno
\033
[0m
\n
"
);
}
else
{
...
...
@@ -1317,40 +1347,51 @@ static int printfInsertMeta() {
}
if
(
g_Dbs
.
db
[
i
].
dbCfg
.
blocks
>
0
)
{
printf
(
" blocks:
\033
[33m%d
\033
[0m
\n
"
,
g_Dbs
.
db
[
i
].
dbCfg
.
blocks
);
printf
(
" blocks:
\033
[33m%d
\033
[0m
\n
"
,
g_Dbs
.
db
[
i
].
dbCfg
.
blocks
);
}
if
(
g_Dbs
.
db
[
i
].
dbCfg
.
cache
>
0
)
{
printf
(
" cache:
\033
[33m%d
\033
[0m
\n
"
,
g_Dbs
.
db
[
i
].
dbCfg
.
cache
);
printf
(
" cache:
\033
[33m%d
\033
[0m
\n
"
,
g_Dbs
.
db
[
i
].
dbCfg
.
cache
);
}
if
(
g_Dbs
.
db
[
i
].
dbCfg
.
days
>
0
)
{
printf
(
" days:
\033
[33m%d
\033
[0m
\n
"
,
g_Dbs
.
db
[
i
].
dbCfg
.
days
);
printf
(
" days:
\033
[33m%d
\033
[0m
\n
"
,
g_Dbs
.
db
[
i
].
dbCfg
.
days
);
}
if
(
g_Dbs
.
db
[
i
].
dbCfg
.
keep
>
0
)
{
printf
(
" keep:
\033
[33m%d
\033
[0m
\n
"
,
g_Dbs
.
db
[
i
].
dbCfg
.
keep
);
printf
(
" keep:
\033
[33m%d
\033
[0m
\n
"
,
g_Dbs
.
db
[
i
].
dbCfg
.
keep
);
}
if
(
g_Dbs
.
db
[
i
].
dbCfg
.
replica
>
0
)
{
printf
(
" replica:
\033
[33m%d
\033
[0m
\n
"
,
g_Dbs
.
db
[
i
].
dbCfg
.
replica
);
printf
(
" replica:
\033
[33m%d
\033
[0m
\n
"
,
g_Dbs
.
db
[
i
].
dbCfg
.
replica
);
}
if
(
g_Dbs
.
db
[
i
].
dbCfg
.
update
>
0
)
{
printf
(
" update:
\033
[33m%d
\033
[0m
\n
"
,
g_Dbs
.
db
[
i
].
dbCfg
.
update
);
printf
(
" update:
\033
[33m%d
\033
[0m
\n
"
,
g_Dbs
.
db
[
i
].
dbCfg
.
update
);
}
if
(
g_Dbs
.
db
[
i
].
dbCfg
.
minRows
>
0
)
{
printf
(
" minRows:
\033
[33m%d
\033
[0m
\n
"
,
g_Dbs
.
db
[
i
].
dbCfg
.
minRows
);
printf
(
" minRows:
\033
[33m%d
\033
[0m
\n
"
,
g_Dbs
.
db
[
i
].
dbCfg
.
minRows
);
}
if
(
g_Dbs
.
db
[
i
].
dbCfg
.
maxRows
>
0
)
{
printf
(
" maxRows:
\033
[33m%d
\033
[0m
\n
"
,
g_Dbs
.
db
[
i
].
dbCfg
.
maxRows
);
printf
(
" maxRows:
\033
[33m%d
\033
[0m
\n
"
,
g_Dbs
.
db
[
i
].
dbCfg
.
maxRows
);
}
if
(
g_Dbs
.
db
[
i
].
dbCfg
.
comp
>
0
)
{
printf
(
" comp:
\033
[33m%d
\033
[0m
\n
"
,
g_Dbs
.
db
[
i
].
dbCfg
.
comp
);
}
if
(
g_Dbs
.
db
[
i
].
dbCfg
.
walLevel
>
0
)
{
printf
(
" walLevel:
\033
[33m%d
\033
[0m
\n
"
,
g_Dbs
.
db
[
i
].
dbCfg
.
walLevel
);
printf
(
" walLevel:
\033
[33m%d
\033
[0m
\n
"
,
g_Dbs
.
db
[
i
].
dbCfg
.
walLevel
);
}
if
(
g_Dbs
.
db
[
i
].
dbCfg
.
fsync
>
0
)
{
printf
(
" fsync:
\033
[33m%d
\033
[0m
\n
"
,
g_Dbs
.
db
[
i
].
dbCfg
.
fsync
);
printf
(
" fsync:
\033
[33m%d
\033
[0m
\n
"
,
g_Dbs
.
db
[
i
].
dbCfg
.
fsync
);
}
if
(
g_Dbs
.
db
[
i
].
dbCfg
.
quorum
>
0
)
{
printf
(
" quorum:
\033
[33m%d
\033
[0m
\n
"
,
g_Dbs
.
db
[
i
].
dbCfg
.
quorum
);
printf
(
" quorum:
\033
[33m%d
\033
[0m
\n
"
,
g_Dbs
.
db
[
i
].
dbCfg
.
quorum
);
}
if
(
g_Dbs
.
db
[
i
].
dbCfg
.
precision
[
0
]
!=
0
)
{
if
((
0
==
strncasecmp
(
g_Dbs
.
db
[
i
].
dbCfg
.
precision
,
"ms"
,
2
))
...
...
@@ -1543,21 +1584,26 @@ static void printfInsertMetaToFile(FILE* fp) {
if
(
g_Dbs
.
db
[
i
].
dbCfg
.
precision
[
0
]
!=
0
)
{
if
((
0
==
strncasecmp
(
g_Dbs
.
db
[
i
].
dbCfg
.
precision
,
"ms"
,
2
))
||
(
0
==
strncasecmp
(
g_Dbs
.
db
[
i
].
dbCfg
.
precision
,
"us"
,
2
)))
{
fprintf
(
fp
,
" precision: %s
\n
"
,
g_Dbs
.
db
[
i
].
dbCfg
.
precision
);
fprintf
(
fp
,
" precision: %s
\n
"
,
g_Dbs
.
db
[
i
].
dbCfg
.
precision
);
}
else
{
fprintf
(
fp
,
" precision error: %s
\n
"
,
g_Dbs
.
db
[
i
].
dbCfg
.
precision
);
fprintf
(
fp
,
" precision error: %s
\n
"
,
g_Dbs
.
db
[
i
].
dbCfg
.
precision
);
}
}
fprintf
(
fp
,
" super table count: %"
PRIu64
"
\n
"
,
g_Dbs
.
db
[
i
].
superTblCount
);
fprintf
(
fp
,
" super table count: %"
PRIu64
"
\n
"
,
g_Dbs
.
db
[
i
].
superTblCount
);
for
(
int
j
=
0
;
j
<
g_Dbs
.
db
[
i
].
superTblCount
;
j
++
)
{
fprintf
(
fp
,
" super table[%d]:
\n
"
,
j
);
fprintf
(
fp
,
" stbName: %s
\n
"
,
g_Dbs
.
db
[
i
].
superTbls
[
j
].
sTblName
);
fprintf
(
fp
,
" stbName: %s
\n
"
,
g_Dbs
.
db
[
i
].
superTbls
[
j
].
sTblName
);
if
(
PRE_CREATE_SUBTBL
==
g_Dbs
.
db
[
i
].
superTbls
[
j
].
autoCreateTable
)
{
fprintf
(
fp
,
" autoCreateTable: %s
\n
"
,
"no"
);
}
else
if
(
AUTO_CREATE_SUBTBL
==
g_Dbs
.
db
[
i
].
superTbls
[
j
].
autoCreateTable
)
{
}
else
if
(
AUTO_CREATE_SUBTBL
==
g_Dbs
.
db
[
i
].
superTbls
[
j
].
autoCreateTable
)
{
fprintf
(
fp
,
" autoCreateTable: %s
\n
"
,
"yes"
);
}
else
{
fprintf
(
fp
,
" autoCreateTable: %s
\n
"
,
"error"
);
...
...
@@ -1565,7 +1611,8 @@ static void printfInsertMetaToFile(FILE* fp) {
if
(
TBL_NO_EXISTS
==
g_Dbs
.
db
[
i
].
superTbls
[
j
].
childTblExists
)
{
fprintf
(
fp
,
" childTblExists: %s
\n
"
,
"no"
);
}
else
if
(
TBL_ALREADY_EXISTS
==
g_Dbs
.
db
[
i
].
superTbls
[
j
].
childTblExists
)
{
}
else
if
(
TBL_ALREADY_EXISTS
==
g_Dbs
.
db
[
i
].
superTbls
[
j
].
childTblExists
)
{
fprintf
(
fp
,
" childTblExists: %s
\n
"
,
"yes"
);
}
else
{
fprintf
(
fp
,
" childTblExists: %s
\n
"
,
"error"
);
...
...
@@ -1596,8 +1643,10 @@ static void printfInsertMetaToFile(FILE* fp) {
*/
fprintf
(
fp
,
" interlaceRows: %"
PRIu64
"
\n
"
,
g_Dbs
.
db
[
i
].
superTbls
[
j
].
interlaceRows
);
fprintf
(
fp
,
" disorderRange: %d
\n
"
,
g_Dbs
.
db
[
i
].
superTbls
[
j
].
disorderRange
);
fprintf
(
fp
,
" disorderRatio: %d
\n
"
,
g_Dbs
.
db
[
i
].
superTbls
[
j
].
disorderRatio
);
fprintf
(
fp
,
" disorderRange: %d
\n
"
,
g_Dbs
.
db
[
i
].
superTbls
[
j
].
disorderRange
);
fprintf
(
fp
,
" disorderRatio: %d
\n
"
,
g_Dbs
.
db
[
i
].
superTbls
[
j
].
disorderRatio
);
fprintf
(
fp
,
" maxSqlLen: %"
PRIu64
"
\n
"
,
g_Dbs
.
db
[
i
].
superTbls
[
j
].
maxSqlLen
);
...
...
@@ -1605,23 +1654,29 @@ static void printfInsertMetaToFile(FILE* fp) {
g_Dbs
.
db
[
i
].
superTbls
[
j
].
timeStampStep
);
fprintf
(
fp
,
" startTimestamp: %s
\n
"
,
g_Dbs
.
db
[
i
].
superTbls
[
j
].
startTimestamp
);
fprintf
(
fp
,
" sampleFormat: %s
\n
"
,
g_Dbs
.
db
[
i
].
superTbls
[
j
].
sampleFormat
);
fprintf
(
fp
,
" sampleFile: %s
\n
"
,
g_Dbs
.
db
[
i
].
superTbls
[
j
].
sampleFile
);
fprintf
(
fp
,
" tagsFile: %s
\n
"
,
g_Dbs
.
db
[
i
].
superTbls
[
j
].
tagsFile
);
fprintf
(
fp
,
" sampleFormat: %s
\n
"
,
g_Dbs
.
db
[
i
].
superTbls
[
j
].
sampleFormat
);
fprintf
(
fp
,
" sampleFile: %s
\n
"
,
g_Dbs
.
db
[
i
].
superTbls
[
j
].
sampleFile
);
fprintf
(
fp
,
" tagsFile: %s
\n
"
,
g_Dbs
.
db
[
i
].
superTbls
[
j
].
tagsFile
);
fprintf
(
fp
,
" columnCount: %d
\n
"
,
g_Dbs
.
db
[
i
].
superTbls
[
j
].
columnCount
);
fprintf
(
fp
,
" columnCount: %d
\n
"
,
g_Dbs
.
db
[
i
].
superTbls
[
j
].
columnCount
);
for
(
int
k
=
0
;
k
<
g_Dbs
.
db
[
i
].
superTbls
[
j
].
columnCount
;
k
++
)
{
//printf("dataType:%s, dataLen:%d\t", g_Dbs.db[i].superTbls[j].columns[k].dataType, g_Dbs.db[i].superTbls[j].columns[k].dataLen);
if
((
0
==
strncasecmp
(
g_Dbs
.
db
[
i
].
superTbls
[
j
].
columns
[
k
].
dataType
,
"binary"
,
strlen
(
"binary"
)))
||
(
0
==
strncasecmp
(
g_Dbs
.
db
[
i
].
superTbls
[
j
].
columns
[
k
].
dataType
,
||
(
0
==
strncasecmp
(
g_Dbs
.
db
[
i
].
superTbls
[
j
].
columns
[
k
].
dataType
,
"nchar"
,
strlen
(
"nchar"
))))
{
fprintf
(
fp
,
"column[%d]:%s(%d) "
,
k
,
g_Dbs
.
db
[
i
].
superTbls
[
j
].
columns
[
k
].
dataType
,
g_Dbs
.
db
[
i
].
superTbls
[
j
].
columns
[
k
].
dataLen
);
}
else
{
fprintf
(
fp
,
"column[%d]:%s "
,
k
,
g_Dbs
.
db
[
i
].
superTbls
[
j
].
columns
[
k
].
dataType
);
fprintf
(
fp
,
"column[%d]:%s "
,
k
,
g_Dbs
.
db
[
i
].
superTbls
[
j
].
columns
[
k
].
dataType
);
}
}
fprintf
(
fp
,
"
\n
"
);
...
...
@@ -1634,7 +1689,8 @@ static void printfInsertMetaToFile(FILE* fp) {
"binary"
,
strlen
(
"binary"
)))
||
(
0
==
strncasecmp
(
g_Dbs
.
db
[
i
].
superTbls
[
j
].
tags
[
k
].
dataType
,
"nchar"
,
strlen
(
"nchar"
))))
{
fprintf
(
fp
,
"tag[%d]:%s(%d) "
,
k
,
g_Dbs
.
db
[
i
].
superTbls
[
j
].
tags
[
k
].
dataType
,
fprintf
(
fp
,
"tag[%d]:%s(%d) "
,
k
,
g_Dbs
.
db
[
i
].
superTbls
[
j
].
tags
[
k
].
dataType
,
g_Dbs
.
db
[
i
].
superTbls
[
j
].
tags
[
k
].
dataLen
);
}
else
{
fprintf
(
fp
,
"tag[%d]:%s "
,
k
,
g_Dbs
.
db
[
i
].
superTbls
[
j
].
tags
[
k
].
dataType
);
...
...
@@ -1670,7 +1726,7 @@ static void printfQueryMeta() {
printf
(
"query interval:
\033
[33m%"
PRIu64
" ms
\033
[0m
\n
"
,
g_queryInfo
.
specifiedQueryInfo
.
queryInterval
);
printf
(
"top query times:
\033
[33m%"
PRIu64
"
\033
[0m
\n
"
,
g_args
.
query_times
);
printf
(
"concurrent:
\033
[33m%
"
PRIu64
"
\033
[0m
\n
"
,
printf
(
"concurrent:
\033
[33m%
d
\033
[0m
\n
"
,
g_queryInfo
.
specifiedQueryInfo
.
concurrent
);
printf
(
"mod:
\033
[33m%s
\033
[0m
\n
"
,
(
g_queryInfo
.
specifiedQueryInfo
.
asyncMode
)
?
"async"
:
"sync"
);
...
...
@@ -1967,13 +2023,13 @@ static void printfQuerySystemInfo(TAOS * taos) {
// show variables
res
=
taos_query
(
taos
,
"show variables;"
);
//
appendResultToFile
(res, filename);
//
fetchResult
(res, filename);
xDumpResultToFile
(
filename
,
res
);
// show dnodes
res
=
taos_query
(
taos
,
"show dnodes;"
);
xDumpResultToFile
(
filename
,
res
);
//
appendResultToFile
(res, filename);
//
fetchResult
(res, filename);
// show databases
res
=
taos_query
(
taos
,
"show databases;"
);
...
...
@@ -2009,7 +2065,7 @@ static void printfQuerySystemInfo(TAOS * taos) {
}
static
int
postProceSql
(
char
*
host
,
struct
sockaddr_in
*
pServAddr
,
uint16_t
port
,
char
*
sqlstr
,
char
*
resultFile
)
char
*
sqlstr
,
threadInfo
*
pThreadInfo
)
{
char
*
req_fmt
=
"POST %s HTTP/1.1
\r\n
Host: %s:%d
\r\n
Accept: */*
\r\n
Authorization: Basic %s
\r\n
Content-Length: %d
\r\n
Content-Type: application/x-www-form-urlencoded
\r\n\r\n
%s"
;
...
...
@@ -2145,8 +2201,8 @@ static int postProceSql(char *host, struct sockaddr_in *pServAddr, uint16_t port
response_buf
[
RESP_BUF_LEN
-
1
]
=
'\0'
;
printf
(
"Response:
\n
%s
\n
"
,
response_buf
);
if
(
resultFile
)
{
appendResultBufToFile
(
response_buf
,
resultFile
);
if
(
strlen
(
pThreadInfo
->
filePath
)
>
0
)
{
appendResultBufToFile
(
response_buf
,
pThreadInfo
);
}
free
(
request_buf
);
...
...
@@ -2638,8 +2694,6 @@ static int createSuperTable(
snprintf
(
command
,
BUFFER_SIZE
,
"create table if not exists %s.%s (ts timestamp%s) tags %s"
,
dbName
,
superTbl
->
sTblName
,
cols
,
tags
);
verbosePrint
(
"%s() LN%d: %s
\n
"
,
__func__
,
__LINE__
,
command
);
if
(
0
!=
queryDbExec
(
taos
,
command
,
NO_INSERT_TYPE
,
false
))
{
errorPrint
(
"create supertable %s failed!
\n\n
"
,
superTbl
->
sTblName
);
...
...
@@ -2662,7 +2716,6 @@ static int createDatabasesAndStables() {
for
(
int
i
=
0
;
i
<
g_Dbs
.
dbCount
;
i
++
)
{
if
(
g_Dbs
.
db
[
i
].
drop
)
{
sprintf
(
command
,
"drop database if exists %s;"
,
g_Dbs
.
db
[
i
].
dbName
);
verbosePrint
(
"%s() %d command: %s
\n
"
,
__func__
,
__LINE__
,
command
);
if
(
0
!=
queryDbExec
(
taos
,
command
,
NO_INSERT_TYPE
,
false
))
{
taos_close
(
taos
);
return
-
1
;
...
...
@@ -2735,7 +2788,6 @@ static int createDatabasesAndStables() {
" precision
\'
%s
\'
;"
,
g_Dbs
.
db
[
i
].
dbCfg
.
precision
);
}
debugPrint
(
"%s() %d command: %s
\n
"
,
__func__
,
__LINE__
,
command
);
if
(
0
!=
queryDbExec
(
taos
,
command
,
NO_INSERT_TYPE
,
false
))
{
taos_close
(
taos
);
errorPrint
(
"
\n
create database %s failed!
\n\n
"
,
g_Dbs
.
db
[
i
].
dbName
);
...
...
@@ -2752,8 +2804,6 @@ static int createDatabasesAndStables() {
for
(
int
j
=
0
;
j
<
g_Dbs
.
db
[
i
].
superTblCount
;
j
++
)
{
sprintf
(
command
,
"describe %s.%s;"
,
g_Dbs
.
db
[
i
].
dbName
,
g_Dbs
.
db
[
i
].
superTbls
[
j
].
sTblName
);
verbosePrint
(
"%s() %d command: %s
\n
"
,
__func__
,
__LINE__
,
command
);
ret
=
queryDbExec
(
taos
,
command
,
NO_INSERT_TYPE
,
true
);
if
((
ret
!=
0
)
||
(
g_Dbs
.
db
[
i
].
drop
))
{
...
...
@@ -2857,7 +2907,6 @@ static void* createTable(void *sarg)
}
len
=
0
;
verbosePrint
(
"%s() LN%d %s
\n
"
,
__func__
,
__LINE__
,
buffer
);
if
(
0
!=
queryDbExec
(
pThreadInfo
->
taos
,
buffer
,
NO_INSERT_TYPE
,
false
)){
errorPrint
(
"queryDbExec() failed. buffer:
\n
%s
\n
"
,
buffer
);
free
(
buffer
);
...
...
@@ -2873,7 +2922,6 @@ static void* createTable(void *sarg)
}
if
(
0
!=
len
)
{
verbosePrint
(
"%s() %d buffer: %s
\n
"
,
__func__
,
__LINE__
,
buffer
);
if
(
0
!=
queryDbExec
(
pThreadInfo
->
taos
,
buffer
,
NO_INSERT_TYPE
,
false
))
{
errorPrint
(
"queryDbExec() failed. buffer:
\n
%s
\n
"
,
buffer
);
}
...
...
@@ -2909,18 +2957,18 @@ static int startMultiThreadCreateChildTable(
b
=
ntables
%
threads
;
for
(
int64_t
i
=
0
;
i
<
threads
;
i
++
)
{
threadInfo
*
t_i
nfo
=
infos
+
i
;
t_i
nfo
->
threadID
=
i
;
tstrncpy
(
t_i
nfo
->
db_name
,
db_name
,
MAX_DB_NAME_SIZE
);
t_i
nfo
->
superTblInfo
=
superTblInfo
;
threadInfo
*
pThreadI
nfo
=
infos
+
i
;
pThreadI
nfo
->
threadID
=
i
;
tstrncpy
(
pThreadI
nfo
->
db_name
,
db_name
,
MAX_DB_NAME_SIZE
);
pThreadI
nfo
->
superTblInfo
=
superTblInfo
;
verbosePrint
(
"%s() %d db_name: %s
\n
"
,
__func__
,
__LINE__
,
db_name
);
t_i
nfo
->
taos
=
taos_connect
(
pThreadI
nfo
->
taos
=
taos_connect
(
g_Dbs
.
host
,
g_Dbs
.
user
,
g_Dbs
.
password
,
db_name
,
g_Dbs
.
port
);
if
(
t_i
nfo
->
taos
==
NULL
)
{
if
(
pThreadI
nfo
->
taos
==
NULL
)
{
errorPrint
(
"%s() LN%d, Failed to connect to TDengine, reason:%s
\n
"
,
__func__
,
__LINE__
,
taos_errstr
(
NULL
));
free
(
pids
);
...
...
@@ -2928,14 +2976,14 @@ static int startMultiThreadCreateChildTable(
return
-
1
;
}
t_i
nfo
->
start_table_from
=
startFrom
;
t_i
nfo
->
ntables
=
i
<
b
?
a
+
1
:
a
;
t_i
nfo
->
end_table_to
=
i
<
b
?
startFrom
+
a
:
startFrom
+
a
-
1
;
startFrom
=
t_i
nfo
->
end_table_to
+
1
;
t_i
nfo
->
use_metric
=
true
;
t_i
nfo
->
cols
=
cols
;
t_i
nfo
->
minDelay
=
UINT64_MAX
;
pthread_create
(
pids
+
i
,
NULL
,
createTable
,
t_i
nfo
);
pThreadI
nfo
->
start_table_from
=
startFrom
;
pThreadI
nfo
->
ntables
=
i
<
b
?
a
+
1
:
a
;
pThreadI
nfo
->
end_table_to
=
i
<
b
?
startFrom
+
a
:
startFrom
+
a
-
1
;
startFrom
=
pThreadI
nfo
->
end_table_to
+
1
;
pThreadI
nfo
->
use_metric
=
true
;
pThreadI
nfo
->
cols
=
cols
;
pThreadI
nfo
->
minDelay
=
UINT64_MAX
;
pthread_create
(
pids
+
i
,
NULL
,
createTable
,
pThreadI
nfo
);
}
for
(
int
i
=
0
;
i
<
threads
;
i
++
)
{
...
...
@@ -2943,8 +2991,8 @@ static int startMultiThreadCreateChildTable(
}
for
(
int
i
=
0
;
i
<
threads
;
i
++
)
{
threadInfo
*
t_i
nfo
=
infos
+
i
;
taos_close
(
t_i
nfo
->
taos
);
threadInfo
*
pThreadI
nfo
=
infos
+
i
;
taos_close
(
pThreadI
nfo
->
taos
);
}
free
(
pids
);
...
...
@@ -3403,19 +3451,6 @@ static bool getMetaFromInsertJsonFile(cJSON* root) {
}
g_args
.
interlace_rows
=
interlaceRows
->
valueint
;
// rows per table need be less than insert batch
if
(
g_args
.
interlace_rows
>
g_args
.
num_of_RPR
)
{
printf
(
"NOTICE: interlace rows value %"
PRIu64
" > num_of_records_per_req %"
PRIu64
"
\n\n
"
,
g_args
.
interlace_rows
,
g_args
.
num_of_RPR
);
printf
(
" interlace rows value will be set to num_of_records_per_req %"
PRIu64
"
\n\n
"
,
g_args
.
num_of_RPR
);
if
(
!
g_args
.
answer_yes
)
{
printf
(
" press Enter key to continue or Ctrl-C to stop."
);
(
void
)
getchar
();
}
g_args
.
interlace_rows
=
g_args
.
num_of_RPR
;
}
}
else
if
(
!
interlaceRows
)
{
g_args
.
interlace_rows
=
0
;
// 0 means progressive mode, > 0 mean interlace mode. max value is less or equ num_of_records_per_req
}
else
{
...
...
@@ -3447,6 +3482,11 @@ static bool getMetaFromInsertJsonFile(cJSON* root) {
__func__
,
__LINE__
);
goto
PARSE_OVER
;
}
else
if
(
numRecPerReq
->
valueint
>
MAX_RECORDS_PER_REQ
)
{
printf
(
"NOTICE: number of records per request value %"
PRIu64
" > %d
\n\n
"
,
numRecPerReq
->
valueint
,
MAX_RECORDS_PER_REQ
);
printf
(
" number of records per request value will be set to %d
\n\n
"
,
MAX_RECORDS_PER_REQ
);
prompt
();
numRecPerReq
->
valueint
=
MAX_RECORDS_PER_REQ
;
}
g_args
.
num_of_RPR
=
numRecPerReq
->
valueint
;
...
...
@@ -3470,12 +3510,22 @@ static bool getMetaFromInsertJsonFile(cJSON* root) {
g_args
.
answer_yes
=
false
;
}
}
else
if
(
!
answerPrompt
)
{
g_args
.
answer_yes
=
false
;
g_args
.
answer_yes
=
true
;
// default is no, mean answer_yes.
}
else
{
printf
(
"ERROR: failed to read json, confirm_parameter_prompt not found
\n
"
);
errorPrint
(
"%s"
,
"failed to read json, confirm_parameter_prompt input mistake
\n
"
);
goto
PARSE_OVER
;
}
// rows per table need be less than insert batch
if
(
g_args
.
interlace_rows
>
g_args
.
num_of_RPR
)
{
printf
(
"NOTICE: interlace rows value %"
PRIu64
" > num_of_records_per_req %"
PRIu64
"
\n\n
"
,
g_args
.
interlace_rows
,
g_args
.
num_of_RPR
);
printf
(
" interlace rows value will be set to num_of_records_per_req %"
PRIu64
"
\n\n
"
,
g_args
.
num_of_RPR
);
prompt
();
g_args
.
interlace_rows
=
g_args
.
num_of_RPR
;
}
cJSON
*
dbs
=
cJSON_GetObjectItem
(
root
,
"databases"
);
if
(
!
dbs
||
dbs
->
type
!=
cJSON_Array
)
{
printf
(
"ERROR: failed to read json, databases not found
\n
"
);
...
...
@@ -3932,10 +3982,7 @@ static bool getMetaFromInsertJsonFile(cJSON* root) {
i
,
j
,
g_Dbs
.
db
[
i
].
superTbls
[
j
].
interlaceRows
,
g_args
.
num_of_RPR
);
printf
(
" interlace rows value will be set to num_of_records_per_req %"
PRIu64
"
\n\n
"
,
g_args
.
num_of_RPR
);
if
(
!
g_args
.
answer_yes
)
{
printf
(
" press Enter key to continue or Ctrl-C to stop."
);
(
void
)
getchar
();
}
prompt
();
g_Dbs
.
db
[
i
].
superTbls
[
j
].
interlaceRows
=
g_args
.
num_of_RPR
;
}
}
else
if
(
!
interlaceRows
)
{
...
...
@@ -4131,7 +4178,8 @@ static bool getMetaFromQueryJsonFile(cJSON* root) {
"query_times"
);
if
(
specifiedQueryTimes
&&
specifiedQueryTimes
->
type
==
cJSON_Number
)
{
if
(
specifiedQueryTimes
->
valueint
<=
0
)
{
errorPrint
(
"%s() LN%d, failed to read json, query_times: %"
PRId64
", need be a valid (>0) number
\n
"
,
errorPrint
(
"%s() LN%d, failed to read json, query_times: %"
PRId64
", need be a valid (>0) number
\n
"
,
__func__
,
__LINE__
,
specifiedQueryTimes
->
valueint
);
goto
PARSE_OVER
;
...
...
@@ -4148,7 +4196,8 @@ static bool getMetaFromQueryJsonFile(cJSON* root) {
cJSON
*
concurrent
=
cJSON_GetObjectItem
(
specifiedQuery
,
"concurrent"
);
if
(
concurrent
&&
concurrent
->
type
==
cJSON_Number
)
{
if
(
concurrent
->
valueint
<=
0
)
{
errorPrint
(
"%s() LN%d, query sqlCount %"
PRIu64
" or concurrent %"
PRIu64
" is not correct.
\n
"
,
errorPrint
(
"%s() LN%d, query sqlCount %"
PRIu64
" or concurrent %d is not correct.
\n
"
,
__func__
,
__LINE__
,
g_queryInfo
.
specifiedQueryInfo
.
sqlCount
,
g_queryInfo
.
specifiedQueryInfo
.
concurrent
);
...
...
@@ -4187,15 +4236,15 @@ static bool getMetaFromQueryJsonFile(cJSON* root) {
cJSON
*
restart
=
cJSON_GetObjectItem
(
specifiedQuery
,
"restart"
);
if
(
restart
&&
restart
->
type
==
cJSON_String
&&
restart
->
valuestring
!=
NULL
)
{
if
(
0
==
strcmp
(
"yes"
,
restart
->
valuestring
))
{
g_queryInfo
.
specifiedQueryInfo
.
subscribeRestart
=
1
;
g_queryInfo
.
specifiedQueryInfo
.
subscribeRestart
=
true
;
}
else
if
(
0
==
strcmp
(
"no"
,
restart
->
valuestring
))
{
g_queryInfo
.
specifiedQueryInfo
.
subscribeRestart
=
0
;
g_queryInfo
.
specifiedQueryInfo
.
subscribeRestart
=
false
;
}
else
{
printf
(
"ERROR: failed to read json, subscribe restart error
\n
"
);
goto
PARSE_OVER
;
}
}
else
{
g_queryInfo
.
specifiedQueryInfo
.
subscribeRestart
=
1
;
g_queryInfo
.
specifiedQueryInfo
.
subscribeRestart
=
true
;
}
cJSON
*
keepProgress
=
cJSON_GetObjectItem
(
specifiedQuery
,
"keepProgress"
);
...
...
@@ -4215,24 +4264,28 @@ static bool getMetaFromQueryJsonFile(cJSON* root) {
}
// sqls
cJSON
*
s
uper
Sqls
=
cJSON_GetObjectItem
(
specifiedQuery
,
"sqls"
);
if
(
!
s
uper
Sqls
)
{
cJSON
*
s
pecified
Sqls
=
cJSON_GetObjectItem
(
specifiedQuery
,
"sqls"
);
if
(
!
s
pecified
Sqls
)
{
g_queryInfo
.
specifiedQueryInfo
.
sqlCount
=
0
;
}
else
if
(
s
uper
Sqls
->
type
!=
cJSON_Array
)
{
}
else
if
(
s
pecified
Sqls
->
type
!=
cJSON_Array
)
{
errorPrint
(
"%s() LN%d, failed to read json, super sqls not found
\n
"
,
__func__
,
__LINE__
);
goto
PARSE_OVER
;
}
else
{
int
superSqlSize
=
cJSON_GetArraySize
(
superSqls
);
if
(
superSqlSize
>
MAX_QUERY_SQL_COUNT
)
{
errorPrint
(
"%s() LN%d, failed to read json, query sql size overflow, max is %d
\n
"
,
__func__
,
__LINE__
,
MAX_QUERY_SQL_COUNT
);
int
superSqlSize
=
cJSON_GetArraySize
(
specifiedSqls
);
if
(
superSqlSize
*
g_queryInfo
.
specifiedQueryInfo
.
concurrent
>
MAX_QUERY_SQL_COUNT
)
{
errorPrint
(
"%s() LN%d, failed to read json, query sql(%d) * concurrent(%d) overflow, max is %d
\n
"
,
__func__
,
__LINE__
,
superSqlSize
,
g_queryInfo
.
specifiedQueryInfo
.
concurrent
,
MAX_QUERY_SQL_COUNT
);
goto
PARSE_OVER
;
}
g_queryInfo
.
specifiedQueryInfo
.
sqlCount
=
superSqlSize
;
for
(
int
j
=
0
;
j
<
superSqlSize
;
++
j
)
{
cJSON
*
sql
=
cJSON_GetArrayItem
(
s
uper
Sqls
,
j
);
cJSON
*
sql
=
cJSON_GetArrayItem
(
s
pecified
Sqls
,
j
);
if
(
sql
==
NULL
)
continue
;
cJSON
*
sqlStr
=
cJSON_GetObjectItem
(
sql
,
"sql"
);
...
...
@@ -4240,13 +4293,29 @@ static bool getMetaFromQueryJsonFile(cJSON* root) {
printf
(
"ERROR: failed to read json, sql not found
\n
"
);
goto
PARSE_OVER
;
}
tstrncpy
(
g_queryInfo
.
specifiedQueryInfo
.
sql
[
j
],
sqlStr
->
valuestring
,
MAX_QUERY_SQL_LENGTH
);
tstrncpy
(
g_queryInfo
.
specifiedQueryInfo
.
sql
[
j
],
sqlStr
->
valuestring
,
MAX_QUERY_SQL_LENGTH
);
cJSON
*
resubAfterConsume
=
cJSON_GetObjectItem
(
specifiedQuery
,
"resubAfterConsume"
);
if
(
resubAfterConsume
&&
resubAfterConsume
->
type
==
cJSON_Number
)
{
g_queryInfo
.
specifiedQueryInfo
.
resubAfterConsume
[
j
]
=
resubAfterConsume
->
valueint
;
}
else
if
(
!
resubAfterConsume
)
{
//printf("failed to read json, subscribe interval no found\n");
//goto PARSE_OVER;
g_queryInfo
.
specifiedQueryInfo
.
resubAfterConsume
[
j
]
=
1
;
}
cJSON
*
result
=
cJSON_GetObjectItem
(
sql
,
"result"
);
if
(
NULL
!=
result
&&
result
->
type
==
cJSON_String
&&
result
->
valuestring
!=
NULL
)
{
tstrncpy
(
g_queryInfo
.
specifiedQueryInfo
.
result
[
j
],
result
->
valuestring
,
MAX_FILE_NAME_LEN
);
if
((
NULL
!=
result
)
&&
(
result
->
type
==
cJSON_String
)
&&
(
result
->
valuestring
!=
NULL
))
{
tstrncpy
(
g_queryInfo
.
specifiedQueryInfo
.
result
[
j
],
result
->
valuestring
,
MAX_FILE_NAME_LEN
);
}
else
if
(
NULL
==
result
)
{
memset
(
g_queryInfo
.
specifiedQueryInfo
.
result
[
j
],
0
,
MAX_FILE_NAME_LEN
);
memset
(
g_queryInfo
.
specifiedQueryInfo
.
result
[
j
],
0
,
MAX_FILE_NAME_LEN
);
}
else
{
printf
(
"ERROR: failed to read json, super query result file not found
\n
"
);
goto
PARSE_OVER
;
...
...
@@ -4353,43 +4422,55 @@ static bool getMetaFromQueryJsonFile(cJSON* root) {
if
(
subrestart
&&
subrestart
->
type
==
cJSON_String
&&
subrestart
->
valuestring
!=
NULL
)
{
if
(
0
==
strcmp
(
"yes"
,
subrestart
->
valuestring
))
{
g_queryInfo
.
superQueryInfo
.
subscribeRestart
=
1
;
g_queryInfo
.
superQueryInfo
.
subscribeRestart
=
true
;
}
else
if
(
0
==
strcmp
(
"no"
,
subrestart
->
valuestring
))
{
g_queryInfo
.
superQueryInfo
.
subscribeRestart
=
0
;
g_queryInfo
.
superQueryInfo
.
subscribeRestart
=
false
;
}
else
{
printf
(
"ERROR: failed to read json, subscribe restart error
\n
"
);
goto
PARSE_OVER
;
}
}
else
{
g_queryInfo
.
superQueryInfo
.
subscribeRestart
=
1
;
g_queryInfo
.
superQueryInfo
.
subscribeRestart
=
true
;
}
cJSON
*
su
b
keepProgress
=
cJSON_GetObjectItem
(
superQuery
,
"keepProgress"
);
if
(
su
b
keepProgress
&&
su
b
keepProgress
->
type
==
cJSON_String
&&
su
b
keepProgress
->
valuestring
!=
NULL
)
{
if
(
0
==
strcmp
(
"yes"
,
su
b
keepProgress
->
valuestring
))
{
cJSON
*
su
per
keepProgress
=
cJSON_GetObjectItem
(
superQuery
,
"keepProgress"
);
if
(
su
per
keepProgress
&&
su
per
keepProgress
->
type
==
cJSON_String
&&
su
per
keepProgress
->
valuestring
!=
NULL
)
{
if
(
0
==
strcmp
(
"yes"
,
su
per
keepProgress
->
valuestring
))
{
g_queryInfo
.
superQueryInfo
.
subscribeKeepProgress
=
1
;
}
else
if
(
0
==
strcmp
(
"no"
,
su
b
keepProgress
->
valuestring
))
{
}
else
if
(
0
==
strcmp
(
"no"
,
su
per
keepProgress
->
valuestring
))
{
g_queryInfo
.
superQueryInfo
.
subscribeKeepProgress
=
0
;
}
else
{
printf
(
"ERROR: failed to read json, subscribe keepProgress error
\n
"
);
printf
(
"ERROR: failed to read json, subscribe
super table
keepProgress error
\n
"
);
goto
PARSE_OVER
;
}
}
else
{
g_queryInfo
.
superQueryInfo
.
subscribeKeepProgress
=
0
;
}
// sqls
cJSON
*
subsqls
=
cJSON_GetObjectItem
(
superQuery
,
"sqls"
);
if
(
!
subsqls
)
{
cJSON
*
superResubAfterConsume
=
cJSON_GetObjectItem
(
superQuery
,
"resubAfterConsume"
);
if
(
superResubAfterConsume
&&
superResubAfterConsume
->
type
==
cJSON_Number
)
{
g_queryInfo
.
superQueryInfo
.
resubAfterConsume
=
superResubAfterConsume
->
valueint
;
}
else
if
(
!
superResubAfterConsume
)
{
//printf("failed to read json, subscribe interval no found\n");
////goto PARSE_OVER;
g_queryInfo
.
superQueryInfo
.
resubAfterConsume
=
1
;
}
// supert table sqls
cJSON
*
superSqls
=
cJSON_GetObjectItem
(
superQuery
,
"sqls"
);
if
(
!
superSqls
)
{
g_queryInfo
.
superQueryInfo
.
sqlCount
=
0
;
}
else
if
(
su
bs
qls
->
type
!=
cJSON_Array
)
{
}
else
if
(
su
perS
qls
->
type
!=
cJSON_Array
)
{
errorPrint
(
"%s() LN%d: failed to read json, super sqls not found
\n
"
,
__func__
,
__LINE__
);
goto
PARSE_OVER
;
}
else
{
int
superSqlSize
=
cJSON_GetArraySize
(
su
bs
qls
);
int
superSqlSize
=
cJSON_GetArraySize
(
su
perS
qls
);
if
(
superSqlSize
>
MAX_QUERY_SQL_COUNT
)
{
errorPrint
(
"%s() LN%d, failed to read json, query sql size overflow, max is %d
\n
"
,
__func__
,
__LINE__
,
MAX_QUERY_SQL_COUNT
);
...
...
@@ -4398,7 +4479,7 @@ static bool getMetaFromQueryJsonFile(cJSON* root) {
g_queryInfo
.
superQueryInfo
.
sqlCount
=
superSqlSize
;
for
(
int
j
=
0
;
j
<
superSqlSize
;
++
j
)
{
cJSON
*
sql
=
cJSON_GetArrayItem
(
su
bs
qls
,
j
);
cJSON
*
sql
=
cJSON_GetArrayItem
(
su
perS
qls
,
j
);
if
(
sql
==
NULL
)
continue
;
cJSON
*
sqlStr
=
cJSON_GetObjectItem
(
sql
,
"sql"
);
...
...
@@ -4746,10 +4827,12 @@ static int64_t execInsert(threadInfo *pThreadInfo, char *buffer, uint64_t k)
return
affectedRows
;
}
static
void
getTableName
(
char
*
pTblName
,
threadInfo
*
pThreadInfo
,
uint64_t
tableSeq
)
static
void
getTableName
(
char
*
pTblName
,
threadInfo
*
pThreadInfo
,
uint64_t
tableSeq
)
{
SSuperTable
*
superTblInfo
=
pThreadInfo
->
superTblInfo
;
if
(
superTblInfo
)
{
if
((
superTblInfo
)
&&
(
AUTO_CREATE_SUBTBL
!=
superTblInfo
->
autoCreateTable
))
{
if
(
superTblInfo
->
childTblLimit
>
0
)
{
snprintf
(
pTblName
,
TSDB_TABLE_NAME_LEN
,
"%s"
,
superTblInfo
->
childTblName
+
...
...
@@ -4788,6 +4871,14 @@ static int64_t generateDataTail(
verbosePrint
(
"%s() LN%d batch=%"
PRIu64
"
\n
"
,
__func__
,
__LINE__
,
batch
);
bool
tsRand
;
if
((
superTblInfo
)
&&
(
0
==
strncasecmp
(
superTblInfo
->
dataSource
,
"rand"
,
strlen
(
"rand"
))))
{
tsRand
=
true
;
}
else
{
tsRand
=
false
;
}
uint64_t
k
=
0
;
for
(
k
=
0
;
k
<
batch
;)
{
char
data
[
MAX_DATA_SIZE
];
...
...
@@ -4796,71 +4887,47 @@ static int64_t generateDataTail(
int64_t
retLen
=
0
;
if
(
superTblInfo
)
{
if
(
0
==
strncasecmp
(
superTblInfo
->
dataSource
,
"sample"
,
strlen
(
"sample"
)))
{
if
(
tsRand
)
{
retLen
=
generateRowData
(
data
,
startTime
+
getTSRandTail
(
superTblInfo
->
timeStampStep
,
k
,
superTblInfo
->
disorderRatio
,
superTblInfo
->
disorderRange
),
superTblInfo
);
}
else
{
retLen
=
getRowDataFromSample
(
data
,
remainderBufLen
,
startTime
+
superTblInfo
->
timeStampStep
*
k
,
superTblInfo
,
pSamplePos
);
}
else
if
(
0
==
strncasecmp
(
superTblInfo
->
dataSource
,
"rand"
,
strlen
(
"rand"
)))
{
int64_t
randTail
=
superTblInfo
->
timeStampStep
*
k
;
if
(
superTblInfo
->
disorderRatio
>
0
)
{
int
rand_num
=
taosRandom
()
%
100
;
if
(
rand_num
<
superTblInfo
->
disorderRatio
)
{
randTail
=
(
randTail
+
(
taosRandom
()
%
superTblInfo
->
disorderRange
+
1
))
*
(
-
1
);
debugPrint
(
"rand data generated, back %"
PRId64
"
\n
"
,
randTail
);
}
}
int64_t
d
=
startTime
+
randTail
;
retLen
=
generateRowData
(
data
,
d
,
superTblInfo
);
}
if
(
retLen
>
remainderBufLen
)
{
break
;
}
pstr
+=
snprintf
(
pstr
,
retLen
+
1
,
"%s"
,
data
);
k
++
;
len
+=
retLen
;
remainderBufLen
-=
retLen
;
}
else
{
char
**
data_type
=
g_args
.
datatype
;
int
lenOfBinary
=
g_args
.
len_of_binary
;
int64_t
randTail
=
DEFAULT_TIMESTAMP_STEP
*
k
;
if
(
g_args
.
disorderRatio
!=
0
)
{
int
rand_num
=
taosRandom
()
%
100
;
if
(
rand_num
<
g_args
.
disorderRatio
)
{
randTail
=
(
randTail
+
(
taosRandom
()
%
g_args
.
disorderRange
+
1
))
*
(
-
1
);
debugPrint
(
"rand data generated, back %"
PRId64
"
\n
"
,
randTail
);
if
(
retLen
>
remainderBufLen
)
{
break
;
}
}
else
{
randTail
=
DEFAULT_TIMESTAMP_STEP
*
k
;
}
retLen
=
generateData
(
data
,
data_type
,
ncols_per_record
,
startTime
+
randTail
,
lenOfBinary
);
if
(
len
>
remainderBufLen
)
break
;
pstr
+=
snprintf
(
pstr
,
retLen
+
1
,
"%s"
,
data
);
k
++
;
len
+=
retLen
;
remainderBufLen
-=
retLen
;
}
else
{
char
**
data_type
=
g_args
.
datatype
;
int
lenOfBinary
=
g_args
.
len_of_binary
;
retLen
=
generateData
(
data
,
data_type
,
ncols_per_record
,
startTime
+
getTSRandTail
(
DEFAULT_TIMESTAMP_STEP
,
k
,
g_args
.
disorderRatio
,
g_args
.
disorderRange
),
lenOfBinary
);
if
(
len
>
remainderBufLen
)
break
;
pstr
+=
sprintf
(
pstr
,
"%s"
,
data
);
k
++
;
len
+=
retLen
;
remainderBufLen
-=
retLen
;
pstr
+=
sprintf
(
pstr
,
"%s"
,
data
);
k
++
;
len
+=
retLen
;
remainderBufLen
-=
retLen
;
}
verbosePrint
(
"%s() LN%d len=%"
PRIu64
" k=%"
PRIu64
"
\n
buffer=%s
\n
"
,
...
...
@@ -5003,6 +5070,22 @@ static int64_t generateInterlaceDataBuffer(
return
k
;
}
static
int64_t
getTSRandTail
(
int64_t
timeStampStep
,
int32_t
seq
,
int
disorderRatio
,
int
disorderRange
)
{
int64_t
randTail
=
timeStampStep
*
seq
;
if
(
disorderRatio
>
0
)
{
int
rand_num
=
taosRandom
()
%
100
;
if
(
rand_num
<
disorderRatio
)
{
randTail
=
(
randTail
+
(
taosRandom
()
%
disorderRange
+
1
))
*
(
-
1
);
debugPrint
(
"rand data generated, back %"
PRId64
"
\n
"
,
randTail
);
}
}
return
randTail
;
}
static
int64_t
generateProgressiveDataBuffer
(
char
*
tableName
,
int64_t
tableSeq
,
...
...
@@ -5342,6 +5425,7 @@ static void* syncWriteProgressive(threadInfo *pThreadInfo) {
int64_t
start_time
=
pThreadInfo
->
start_time
;
int64_t
insertRows
=
(
superTblInfo
)
?
superTblInfo
->
insertRows
:
g_args
.
num_of_DPT
;
verbosePrint
(
"%s() LN%d insertRows=%"
PRId64
"
\n
"
,
__func__
,
__LINE__
,
insertRows
);
for
(
uint64_t
i
=
0
;
i
<
insertRows
;)
{
...
...
@@ -5508,7 +5592,8 @@ static void callBack(void *param, TAOS_RES *res, int code) {
int
rand_num
=
taosRandom
()
%
100
;
if
(
0
!=
pThreadInfo
->
superTblInfo
->
disorderRatio
&&
rand_num
<
pThreadInfo
->
superTblInfo
->
disorderRatio
)
{
int64_t
d
=
pThreadInfo
->
lastTs
-
(
taosRandom
()
%
pThreadInfo
->
superTblInfo
->
disorderRange
+
1
);
int64_t
d
=
pThreadInfo
->
lastTs
-
(
taosRandom
()
%
pThreadInfo
->
superTblInfo
->
disorderRange
+
1
);
generateRowData
(
data
,
d
,
pThreadInfo
->
superTblInfo
);
}
else
{
generateRowData
(
data
,
pThreadInfo
->
lastTs
+=
1000
,
pThreadInfo
->
superTblInfo
);
...
...
@@ -5691,19 +5776,13 @@ static void startMultiThreadInsertData(int threads, char* db_name,
&&
((
superTblInfo
->
childTblOffset
+
superTblInfo
->
childTblLimit
)
>
superTblInfo
->
childTblCount
))
{
printf
(
"WARNING: specified offset + limit > child table count!
\n
"
);
if
(
!
g_args
.
answer_yes
)
{
printf
(
" Press enter key to continue or Ctrl-C to stop
\n\n
"
);
(
void
)
getchar
();
}
prompt
();
}
if
((
superTblInfo
->
childTblExists
!=
TBL_NO_EXISTS
)
&&
(
0
==
superTblInfo
->
childTblLimit
))
{
printf
(
"WARNING: specified limit = 0, which cannot find table name to insert or query!
\n
"
);
if
(
!
g_args
.
answer_yes
)
{
printf
(
" Press enter key to continue or Ctrl-C to stop
\n\n
"
);
(
void
)
getchar
();
}
prompt
();
}
superTblInfo
->
childTblName
=
(
char
*
)
calloc
(
1
,
...
...
@@ -5746,49 +5825,49 @@ static void startMultiThreadInsertData(int threads, char* db_name,
}
for
(
int
i
=
0
;
i
<
threads
;
i
++
)
{
threadInfo
*
t_i
nfo
=
infos
+
i
;
t_i
nfo
->
threadID
=
i
;
tstrncpy
(
t_i
nfo
->
db_name
,
db_name
,
MAX_DB_NAME_SIZE
);
t_i
nfo
->
time_precision
=
timePrec
;
t_i
nfo
->
superTblInfo
=
superTblInfo
;
threadInfo
*
pThreadI
nfo
=
infos
+
i
;
pThreadI
nfo
->
threadID
=
i
;
tstrncpy
(
pThreadI
nfo
->
db_name
,
db_name
,
MAX_DB_NAME_SIZE
);
pThreadI
nfo
->
time_precision
=
timePrec
;
pThreadI
nfo
->
superTblInfo
=
superTblInfo
;
t_i
nfo
->
start_time
=
start_time
;
t_i
nfo
->
minDelay
=
UINT64_MAX
;
pThreadI
nfo
->
start_time
=
start_time
;
pThreadI
nfo
->
minDelay
=
UINT64_MAX
;
if
((
NULL
==
superTblInfo
)
||
(
0
==
strncasecmp
(
superTblInfo
->
insertMode
,
"taosc"
,
5
)))
{
//
t_i
nfo->taos = taos;
t_i
nfo
->
taos
=
taos_connect
(
//
pThreadI
nfo->taos = taos;
pThreadI
nfo
->
taos
=
taos_connect
(
g_Dbs
.
host
,
g_Dbs
.
user
,
g_Dbs
.
password
,
db_name
,
g_Dbs
.
port
);
if
(
NULL
==
t_i
nfo
->
taos
)
{
if
(
NULL
==
pThreadI
nfo
->
taos
)
{
errorPrint
(
"connect to server fail from insert sub thread, reason: %s
\n
"
,
taos_errstr
(
NULL
));
exit
(
-
1
);
}
}
else
{
t_i
nfo
->
taos
=
NULL
;
pThreadI
nfo
->
taos
=
NULL
;
}
/* if ((NULL == superTblInfo)
|| (0 == superTblInfo->multiThreadWriteOneTbl)) {
*/
t_i
nfo
->
start_table_from
=
startFrom
;
t_i
nfo
->
ntables
=
i
<
b
?
a
+
1
:
a
;
t_i
nfo
->
end_table_to
=
i
<
b
?
startFrom
+
a
:
startFrom
+
a
-
1
;
startFrom
=
t_i
nfo
->
end_table_to
+
1
;
pThreadI
nfo
->
start_table_from
=
startFrom
;
pThreadI
nfo
->
ntables
=
i
<
b
?
a
+
1
:
a
;
pThreadI
nfo
->
end_table_to
=
i
<
b
?
startFrom
+
a
:
startFrom
+
a
-
1
;
startFrom
=
pThreadI
nfo
->
end_table_to
+
1
;
/* } else {
t_i
nfo->start_table_from = 0;
t_i
nfo->ntables = superTblInfo->childTblCount;
t_info->start_time = t_i
nfo->start_time + rand_int() % 10000 - rand_tinyint();
pThreadI
nfo->start_table_from = 0;
pThreadI
nfo->ntables = superTblInfo->childTblCount;
pThreadInfo->start_time = pThreadI
nfo->start_time + rand_int() % 10000 - rand_tinyint();
}
*/
tsem_init
(
&
(
t_i
nfo
->
lock_sem
),
0
,
0
);
tsem_init
(
&
(
pThreadI
nfo
->
lock_sem
),
0
,
0
);
if
(
ASYNC_MODE
==
g_Dbs
.
asyncMode
)
{
pthread_create
(
pids
+
i
,
NULL
,
asyncWrite
,
t_i
nfo
);
pthread_create
(
pids
+
i
,
NULL
,
asyncWrite
,
pThreadI
nfo
);
}
else
{
pthread_create
(
pids
+
i
,
NULL
,
syncWrite
,
t_i
nfo
);
pthread_create
(
pids
+
i
,
NULL
,
syncWrite
,
pThreadI
nfo
);
}
}
...
...
@@ -5803,27 +5882,27 @@ static void startMultiThreadInsertData(int threads, char* db_name,
double
avgDelay
=
0
;
for
(
int
i
=
0
;
i
<
threads
;
i
++
)
{
threadInfo
*
t_i
nfo
=
infos
+
i
;
threadInfo
*
pThreadI
nfo
=
infos
+
i
;
tsem_destroy
(
&
(
t_i
nfo
->
lock_sem
));
taos_close
(
t_i
nfo
->
taos
);
tsem_destroy
(
&
(
pThreadI
nfo
->
lock_sem
));
taos_close
(
pThreadI
nfo
->
taos
);
debugPrint
(
"%s() LN%d, [%d] totalInsert=%"
PRIu64
" totalAffected=%"
PRIu64
"
\n
"
,
__func__
,
__LINE__
,
t_info
->
threadID
,
t_i
nfo
->
totalInsertRows
,
t_i
nfo
->
totalAffectedRows
);
pThreadInfo
->
threadID
,
pThreadI
nfo
->
totalInsertRows
,
pThreadI
nfo
->
totalAffectedRows
);
if
(
superTblInfo
)
{
superTblInfo
->
totalAffectedRows
+=
t_i
nfo
->
totalAffectedRows
;
superTblInfo
->
totalInsertRows
+=
t_i
nfo
->
totalInsertRows
;
superTblInfo
->
totalAffectedRows
+=
pThreadI
nfo
->
totalAffectedRows
;
superTblInfo
->
totalInsertRows
+=
pThreadI
nfo
->
totalInsertRows
;
}
else
{
g_args
.
totalAffectedRows
+=
t_i
nfo
->
totalAffectedRows
;
g_args
.
totalInsertRows
+=
t_i
nfo
->
totalInsertRows
;
g_args
.
totalAffectedRows
+=
pThreadI
nfo
->
totalAffectedRows
;
g_args
.
totalInsertRows
+=
pThreadI
nfo
->
totalInsertRows
;
}
totalDelay
+=
t_i
nfo
->
totalDelay
;
cntDelay
+=
t_i
nfo
->
cntDelay
;
if
(
t_info
->
maxDelay
>
maxDelay
)
maxDelay
=
t_i
nfo
->
maxDelay
;
if
(
t_info
->
minDelay
<
minDelay
)
minDelay
=
t_i
nfo
->
minDelay
;
totalDelay
+=
pThreadI
nfo
->
totalDelay
;
cntDelay
+=
pThreadI
nfo
->
cntDelay
;
if
(
pThreadInfo
->
maxDelay
>
maxDelay
)
maxDelay
=
pThreadI
nfo
->
maxDelay
;
if
(
pThreadInfo
->
minDelay
<
minDelay
)
minDelay
=
pThreadI
nfo
->
minDelay
;
}
cntDelay
-=
1
;
...
...
@@ -5879,26 +5958,26 @@ static void startMultiThreadInsertData(int threads, char* db_name,
static
void
*
readTable
(
void
*
sarg
)
{
#if 1
threadInfo
*
ri
nfo
=
(
threadInfo
*
)
sarg
;
TAOS
*
taos
=
ri
nfo
->
taos
;
threadInfo
*
pThreadI
nfo
=
(
threadInfo
*
)
sarg
;
TAOS
*
taos
=
pThreadI
nfo
->
taos
;
char
command
[
BUFFER_SIZE
]
=
"
\0
"
;
uint64_t
sTime
=
ri
nfo
->
start_time
;
char
*
tb_prefix
=
ri
nfo
->
tb_prefix
;
FILE
*
fp
=
fopen
(
rinfo
->
fp
,
"a"
);
uint64_t
sTime
=
pThreadI
nfo
->
start_time
;
char
*
tb_prefix
=
pThreadI
nfo
->
tb_prefix
;
FILE
*
fp
=
fopen
(
pThreadInfo
->
filePath
,
"a"
);
if
(
NULL
==
fp
)
{
errorPrint
(
"fopen %s fail, reason:%s.
\n
"
,
rinfo
->
fp
,
strerror
(
errno
));
errorPrint
(
"fopen %s fail, reason:%s.
\n
"
,
pThreadInfo
->
filePath
,
strerror
(
errno
));
return
NULL
;
}
int64_t
num_of_DPT
;
/* if (
ri
nfo->superTblInfo) {
num_of_DPT =
ri
nfo->superTblInfo->insertRows; // nrecords_per_table;
/* if (
pThreadI
nfo->superTblInfo) {
num_of_DPT =
pThreadI
nfo->superTblInfo->insertRows; // nrecords_per_table;
} else {
*/
num_of_DPT
=
g_args
.
num_of_DPT
;
// }
int64_t
num_of_tables
=
ri
nfo
->
ntables
;
// rinfo->end_table_to - rinfo->start_table_from + 1;
int64_t
num_of_tables
=
pThreadI
nfo
->
ntables
;
// rinfo->end_table_to - rinfo->start_table_from + 1;
int64_t
totalData
=
num_of_DPT
*
num_of_tables
;
bool
do_aggreFunc
=
g_Dbs
.
do_aggreFunc
;
...
...
@@ -5951,17 +6030,17 @@ static void *readTable(void *sarg) {
static
void
*
readMetric
(
void
*
sarg
)
{
#if 1
threadInfo
*
ri
nfo
=
(
threadInfo
*
)
sarg
;
TAOS
*
taos
=
ri
nfo
->
taos
;
threadInfo
*
pThreadI
nfo
=
(
threadInfo
*
)
sarg
;
TAOS
*
taos
=
pThreadI
nfo
->
taos
;
char
command
[
BUFFER_SIZE
]
=
"
\0
"
;
FILE
*
fp
=
fopen
(
rinfo
->
fp
,
"a"
);
FILE
*
fp
=
fopen
(
pThreadInfo
->
filePath
,
"a"
);
if
(
NULL
==
fp
)
{
printf
(
"fopen %s fail, reason:%s.
\n
"
,
rinfo
->
fp
,
strerror
(
errno
));
printf
(
"fopen %s fail, reason:%s.
\n
"
,
pThreadInfo
->
filePath
,
strerror
(
errno
));
return
NULL
;
}
int64_t
num_of_DPT
=
ri
nfo
->
superTblInfo
->
insertRows
;
int64_t
num_of_tables
=
ri
nfo
->
ntables
;
// rinfo->end_table_to - rinfo->start_table_from + 1;
int64_t
num_of_DPT
=
pThreadI
nfo
->
superTblInfo
->
insertRows
;
int64_t
num_of_tables
=
pThreadI
nfo
->
ntables
;
// rinfo->end_table_to - rinfo->start_table_from + 1;
int64_t
totalData
=
num_of_DPT
*
num_of_tables
;
bool
do_aggreFunc
=
g_Dbs
.
do_aggreFunc
;
...
...
@@ -6022,6 +6101,13 @@ static void *readMetric(void *sarg) {
return
NULL
;
}
static
void
prompt
()
{
if
(
!
g_args
.
answer_yes
)
{
printf
(
" Press enter key to continue or Ctrl-C to stop
\n\n
"
);
(
void
)
getchar
();
}
}
static
int
insertTestProcess
()
{
...
...
@@ -6042,10 +6128,7 @@ static int insertTestProcess() {
if
(
g_fpOfInsertResult
)
printfInsertMetaToFile
(
g_fpOfInsertResult
);
if
(
!
g_args
.
answer_yes
)
{
printf
(
"Press enter key to continue
\n\n
"
);
(
void
)
getchar
();
}
prompt
();
init_rand_data
();
...
...
@@ -6156,23 +6239,22 @@ static void *specifiedTableQuery(void *sarg) {
uint64_t
lastPrintTime
=
taosGetTimestampMs
();
uint64_t
startTs
=
taosGetTimestampMs
();
if
(
g_queryInfo
.
specifiedQueryInfo
.
result
[
pThreadInfo
->
querySeq
]
!=
NULL
)
{
sprintf
(
pThreadInfo
->
filePath
,
"%s-%d"
,
g_queryInfo
.
specifiedQueryInfo
.
result
[
pThreadInfo
->
querySeq
],
pThreadInfo
->
threadID
);
}
while
(
queryTimes
--
)
{
if
(
g_queryInfo
.
specifiedQueryInfo
.
queryInterval
&&
(
et
-
st
)
<
(
int64_t
)
g_queryInfo
.
specifiedQueryInfo
.
queryInterval
)
{
taosMsleep
(
g_queryInfo
.
specifiedQueryInfo
.
queryInterval
-
(
et
-
st
));
// ms
}
char
tmpFile
[
MAX_FILE_NAME_LEN
*
2
]
=
{
0
};
if
(
g_queryInfo
.
specifiedQueryInfo
.
result
[
pThreadInfo
->
querySeq
][
0
]
!=
0
)
{
sprintf
(
tmpFile
,
"%s-%d"
,
g_queryInfo
.
specifiedQueryInfo
.
result
[
pThreadInfo
->
querySeq
],
pThreadInfo
->
threadID
);
}
st
=
taosGetTimestampMs
();
selectAndGetResult
(
pThreadInfo
,
g_queryInfo
.
specifiedQueryInfo
.
sql
[
pThreadInfo
->
querySeq
]
,
tmpFile
);
g_queryInfo
.
specifiedQueryInfo
.
sql
[
pThreadInfo
->
querySeq
]);
et
=
taosGetTimestampMs
();
printf
(
"=thread[%"
PRId64
"] use %s complete one sql, Spent %10.3f s
\n
"
,
...
...
@@ -6258,13 +6340,12 @@ static void *superTableQuery(void *sarg) {
for
(
int
j
=
0
;
j
<
g_queryInfo
.
superQueryInfo
.
sqlCount
;
j
++
)
{
memset
(
sqlstr
,
0
,
sizeof
(
sqlstr
));
replaceChildTblName
(
g_queryInfo
.
superQueryInfo
.
sql
[
j
],
sqlstr
,
i
);
char
tmpFile
[
MAX_FILE_NAME_LEN
*
2
]
=
{
0
};
if
(
g_queryInfo
.
superQueryInfo
.
result
[
j
][
0
]
!=
0
)
{
sprintf
(
tmpFile
,
"%s-%d"
,
if
(
g_queryInfo
.
superQueryInfo
.
result
[
j
]
!=
NULL
)
{
sprintf
(
pThreadInfo
->
filePath
,
"%s-%d"
,
g_queryInfo
.
superQueryInfo
.
result
[
j
],
pThreadInfo
->
threadID
);
}
selectAndGetResult
(
pThreadInfo
,
sqlstr
,
tmpFile
);
selectAndGetResult
(
pThreadInfo
,
sqlstr
);
totalQueried
++
;
g_queryInfo
.
superQueryInfo
.
totalQueried
++
;
...
...
@@ -6317,10 +6398,7 @@ static int queryTestProcess() {
&
g_queryInfo
.
superQueryInfo
.
childTblCount
);
}
if
(
!
g_args
.
answer_yes
)
{
printf
(
"Press enter key to continue
\n\n
"
);
(
void
)
getchar
();
}
prompt
();
if
(
g_args
.
debug_print
||
g_args
.
verbose_print
)
{
printfQuerySystemInfo
(
taos
);
...
...
@@ -6336,7 +6414,7 @@ static int queryTestProcess() {
threadInfo
*
infos
=
NULL
;
//==== create sub threads for query from specify table
int
nConcurrent
=
g_queryInfo
.
specifiedQueryInfo
.
concurrent
;
in
t
nSqlCount
=
g_queryInfo
.
specifiedQueryInfo
.
sqlCount
;
uint64_
t
nSqlCount
=
g_queryInfo
.
specifiedQueryInfo
.
sqlCount
;
uint64_t
startTs
=
taosGetTimestampMs
();
...
...
@@ -6350,32 +6428,32 @@ static int queryTestProcess() {
ERROR_EXIT
(
"memory allocation failed for create threads
\n
"
);
}
for
(
int
i
=
0
;
i
<
nConcurre
nt
;
i
++
)
{
for
(
int
j
=
0
;
j
<
nSqlCou
nt
;
j
++
)
{
threadInfo
*
t_info
=
infos
+
i
*
nSqlCou
nt
+
j
;
t_info
->
threadID
=
i
*
nSqlCount
+
j
;
t_info
->
querySeq
=
j
;
if
(
0
==
strncasecmp
(
g_queryInfo
.
queryMode
,
"taosc"
,
5
))
{
char
sqlStr
[
MAX_TB_NAME_SIZE
*
2
];
sprintf
(
sqlStr
,
"use %s"
,
g_queryInfo
.
dbName
)
;
verbosePrint
(
"%s() %d sqlStr: %s
\n
"
,
__func__
,
__LINE__
,
sqlStr
);
if
(
0
!=
queryDbExec
(
taos
,
sqlStr
,
NO_INSERT_TYPE
,
false
))
{
taos_close
(
taos
);
free
(
infos
);
free
(
pids
);
errorPrint
(
"use database %s failed!
\n\n
"
,
g_queryInfo
.
dbName
);
return
-
1
;
}
}
for
(
uint64_t
i
=
0
;
i
<
nSqlCou
nt
;
i
++
)
{
for
(
int
j
=
0
;
j
<
nConcurre
nt
;
j
++
)
{
uint64_t
seq
=
i
*
nConcurre
nt
+
j
;
threadInfo
*
pThreadInfo
=
infos
+
seq
;
pThreadInfo
->
threadID
=
seq
;
pThreadInfo
->
querySeq
=
i
;
if
(
0
==
strncasecmp
(
g_queryInfo
.
queryMode
,
"taosc"
,
5
))
{
char
sqlStr
[
MAX_TB_NAME_SIZE
*
2
]
;
sprintf
(
sqlStr
,
"use %s"
,
g_queryInfo
.
dbName
);
if
(
0
!=
queryDbExec
(
taos
,
sqlStr
,
NO_INSERT_TYPE
,
false
))
{
taos_close
(
taos
);
free
(
infos
);
free
(
pids
);
errorPrint
(
"use database %s failed!
\n\n
"
,
g_queryInfo
.
dbName
);
return
-
1
;
}
}
t_i
nfo
->
taos
=
NULL
;
// TODO: workaround to use separate taos connection;
pThreadI
nfo
->
taos
=
NULL
;
// TODO: workaround to use separate taos connection;
pthread_create
(
pids
+
i
*
nSqlCount
+
j
,
NULL
,
specifiedTableQuery
,
t_i
nfo
);
}
pthread_create
(
pids
+
seq
,
NULL
,
specifiedTableQuery
,
pThreadI
nfo
);
}
}
}
else
{
g_queryInfo
.
specifiedQueryInfo
.
concurrent
=
0
;
...
...
@@ -6414,15 +6492,15 @@ static int queryTestProcess() {
uint64_t
startFrom
=
0
;
for
(
int
i
=
0
;
i
<
threads
;
i
++
)
{
threadInfo
*
t_i
nfo
=
infosOfSub
+
i
;
t_i
nfo
->
threadID
=
i
;
threadInfo
*
pThreadI
nfo
=
infosOfSub
+
i
;
pThreadI
nfo
->
threadID
=
i
;
t_i
nfo
->
start_table_from
=
startFrom
;
t_i
nfo
->
ntables
=
i
<
b
?
a
+
1
:
a
;
t_i
nfo
->
end_table_to
=
i
<
b
?
startFrom
+
a
:
startFrom
+
a
-
1
;
startFrom
=
t_i
nfo
->
end_table_to
+
1
;
t_i
nfo
->
taos
=
NULL
;
// TODO: workaround to use separate taos connection;
pthread_create
(
pidsOfSub
+
i
,
NULL
,
superTableQuery
,
t_i
nfo
);
pThreadI
nfo
->
start_table_from
=
startFrom
;
pThreadI
nfo
->
ntables
=
i
<
b
?
a
+
1
:
a
;
pThreadI
nfo
->
end_table_to
=
i
<
b
?
startFrom
+
a
:
startFrom
+
a
-
1
;
startFrom
=
pThreadI
nfo
->
end_table_to
+
1
;
pThreadI
nfo
->
taos
=
NULL
;
// TODO: workaround to use separate taos connection;
pthread_create
(
pidsOfSub
+
i
,
NULL
,
superTableQuery
,
pThreadI
nfo
);
}
g_queryInfo
.
superQueryInfo
.
threadCnt
=
threads
;
...
...
@@ -6460,7 +6538,21 @@ static int queryTestProcess() {
return
0
;
}
static
void
subscribe_callback
(
TAOS_SUB
*
tsub
,
TAOS_RES
*
res
,
void
*
param
,
int
code
)
{
static
void
stable_sub_callback
(
TAOS_SUB
*
tsub
,
TAOS_RES
*
res
,
void
*
param
,
int
code
)
{
if
(
res
==
NULL
||
taos_errno
(
res
)
!=
0
)
{
errorPrint
(
"%s() LN%d, failed to subscribe result, code:%d, reason:%s
\n
"
,
__func__
,
__LINE__
,
code
,
taos_errstr
(
res
));
return
;
}
if
(
param
)
fetchResult
(
res
,
(
threadInfo
*
)
param
);
// tao_unscribe() will free result.
}
static
void
specified_sub_callback
(
TAOS_SUB
*
tsub
,
TAOS_RES
*
res
,
void
*
param
,
int
code
)
{
if
(
res
==
NULL
||
taos_errno
(
res
)
!=
0
)
{
errorPrint
(
"%s() LN%d, failed to subscribe result, code:%d, reason:%s
\n
"
,
__func__
,
__LINE__
,
code
,
taos_errstr
(
res
));
...
...
@@ -6468,23 +6560,36 @@ static void subscribe_callback(TAOS_SUB* tsub, TAOS_RES *res, void* param, int c
}
if
(
param
)
appendResultToFile
(
res
,
(
char
*
)
param
);
fetchResult
(
res
,
(
threadInfo
*
)
param
);
// tao_unscribe() will free result.
}
static
TAOS_SUB
*
subscribeImpl
(
TAOS
*
taos
,
char
*
sql
,
char
*
topic
,
char
*
resultFileName
)
{
QUERY_CLASS
class
,
threadInfo
*
pThreadInfo
,
char
*
sql
,
char
*
topic
,
bool
restart
,
uint64_t
interval
)
{
TAOS_SUB
*
tsub
=
NULL
;
if
(
ASYNC_MODE
==
g_queryInfo
.
specifiedQueryInfo
.
asyncMode
)
{
tsub
=
taos_subscribe
(
taos
,
g_queryInfo
.
specifiedQueryInfo
.
subscribeRestart
,
topic
,
sql
,
subscribe_callback
,
(
void
*
)
resultFileName
,
if
((
SPECIFIED_CLASS
==
class
)
&&
(
ASYNC_MODE
==
g_queryInfo
.
specifiedQueryInfo
.
asyncMode
))
{
tsub
=
taos_subscribe
(
pThreadInfo
->
taos
,
restart
,
topic
,
sql
,
specified_sub_callback
,
(
void
*
)
pThreadInfo
,
g_queryInfo
.
specifiedQueryInfo
.
subscribeInterval
);
}
else
if
((
STABLE_CLASS
==
class
)
&&
(
ASYNC_MODE
==
g_queryInfo
.
superQueryInfo
.
asyncMode
))
{
tsub
=
taos_subscribe
(
pThreadInfo
->
taos
,
restart
,
topic
,
sql
,
stable_sub_callback
,
(
void
*
)
pThreadInfo
,
g_queryInfo
.
superQueryInfo
.
subscribeInterval
);
}
else
{
tsub
=
taos_subscribe
(
taos
,
g_queryInfo
.
specifiedQueryInfo
.
subscribeRestart
,
topic
,
sql
,
NULL
,
NULL
,
0
);
tsub
=
taos_subscribe
(
pThreadInfo
->
taos
,
restart
,
topic
,
sql
,
NULL
,
NULL
,
interval
);
}
if
(
tsub
==
NULL
)
{
...
...
@@ -6499,23 +6604,25 @@ static void *superSubscribe(void *sarg) {
threadInfo
*
pThreadInfo
=
(
threadInfo
*
)
sarg
;
char
subSqlstr
[
MAX_QUERY_SQL_LENGTH
];
TAOS_SUB
*
tsub
[
MAX_QUERY_SQL_COUNT
]
=
{
0
};
uint64_t
tsubSeq
;
if
(
g_queryInfo
.
superQueryInfo
.
sqlCount
==
0
)
return
NULL
;
if
(
pThreadInfo
->
ntables
>
MAX_QUERY_SQL_COUNT
)
{
errorPrint
(
"The table number(%"
PRId64
") of the thread is more than max query sql count: %d
\n
"
,
pThreadInfo
->
ntables
,
MAX_QUERY_SQL_COUNT
);
exit
(
-
1
);
}
if
(
pThreadInfo
->
taos
==
NULL
)
{
TAOS
*
taos
=
NULL
;
taos
=
taos_connect
(
g_queryInfo
.
host
,
pThreadInfo
->
taos
=
taos_connect
(
g_queryInfo
.
host
,
g_queryInfo
.
user
,
g_queryInfo
.
password
,
g_queryInfo
.
dbName
,
g_queryInfo
.
port
);
if
(
taos
==
NULL
)
{
if
(
pThreadInfo
->
taos
==
NULL
)
{
errorPrint
(
"[%d] Failed to connect to TDengine, reason:%s
\n
"
,
pThreadInfo
->
threadID
,
taos_errstr
(
NULL
));
return
NULL
;
}
else
{
pThreadInfo
->
taos
=
taos
;
}
}
...
...
@@ -6531,48 +6638,96 @@ static void *superSubscribe(void *sarg) {
char
topic
[
32
]
=
{
0
};
for
(
uint64_t
i
=
pThreadInfo
->
start_table_from
;
i
<=
pThreadInfo
->
end_table_to
;
i
++
)
{
for
(
int
j
=
0
;
j
<
g_queryInfo
.
superQueryInfo
.
sqlCount
;
j
++
)
{
sprintf
(
topic
,
"taosdemo-subscribe-%"
PRIu64
"-%d"
,
i
,
j
);
memset
(
subSqlstr
,
0
,
sizeof
(
subSqlstr
));
replaceChildTblName
(
g_queryInfo
.
superQueryInfo
.
sql
[
j
],
subSqlstr
,
i
);
char
tmpFile
[
MAX_FILE_NAME_LEN
*
2
]
=
{
0
};
if
(
g_queryInfo
.
superQueryInfo
.
result
[
j
][
0
]
!=
0
)
{
sprintf
(
tmpFile
,
"%s-%d"
,
g_queryInfo
.
superQueryInfo
.
result
[
j
],
pThreadInfo
->
threadID
);
}
uint64_t
subSeq
=
i
*
g_queryInfo
.
superQueryInfo
.
sqlCount
+
j
;
debugPrint
(
"%s() LN%d, subSeq=%"
PRIu64
" subSqlstr: %s
\n
"
,
__func__
,
__LINE__
,
subSeq
,
subSqlstr
);
tsub
[
subSeq
]
=
subscribeImpl
(
pThreadInfo
->
taos
,
subSqlstr
,
topic
,
tmpFile
);
if
(
NULL
==
tsub
[
subSeq
])
{
tsubSeq
=
i
-
pThreadInfo
->
start_table_from
;
verbosePrint
(
"%s() LN%d, [%d], start=%"
PRId64
" end=%"
PRId64
" i=%"
PRIu64
"
\n
"
,
__func__
,
__LINE__
,
pThreadInfo
->
threadID
,
pThreadInfo
->
start_table_from
,
pThreadInfo
->
end_table_to
,
i
);
sprintf
(
topic
,
"taosdemo-subscribe-%"
PRIu64
"-%"
PRIu64
""
,
i
,
pThreadInfo
->
querySeq
);
memset
(
subSqlstr
,
0
,
sizeof
(
subSqlstr
));
replaceChildTblName
(
g_queryInfo
.
superQueryInfo
.
sql
[
pThreadInfo
->
querySeq
],
subSqlstr
,
i
);
if
(
g_queryInfo
.
superQueryInfo
.
result
[
pThreadInfo
->
querySeq
][
0
]
!=
0
)
{
sprintf
(
pThreadInfo
->
filePath
,
"%s-%d"
,
g_queryInfo
.
superQueryInfo
.
result
[
pThreadInfo
->
querySeq
],
pThreadInfo
->
threadID
);
}
debugPrint
(
"%s() LN%d, [%d] subSqlstr: %s
\n
"
,
__func__
,
__LINE__
,
pThreadInfo
->
threadID
,
subSqlstr
);
tsub
[
tsubSeq
]
=
subscribeImpl
(
STABLE_CLASS
,
pThreadInfo
,
subSqlstr
,
topic
,
g_queryInfo
.
superQueryInfo
.
subscribeRestart
,
g_queryInfo
.
superQueryInfo
.
subscribeInterval
);
if
(
NULL
==
tsub
[
tsubSeq
])
{
taos_close
(
pThreadInfo
->
taos
);
return
NULL
;
}
}
}
// start loop to consume result
int
consumed
[
MAX_QUERY_SQL_COUNT
];
for
(
int
i
=
0
;
i
<
MAX_QUERY_SQL_COUNT
;
i
++
)
{
consumed
[
i
]
=
0
;
}
TAOS_RES
*
res
=
NULL
;
uint64_t
st
=
0
,
et
=
0
;
while
(
1
)
{
for
(
uint64_t
i
=
pThreadInfo
->
start_table_from
;
i
<=
pThreadInfo
->
end_table_to
;
i
++
)
{
for
(
int
j
=
0
;
j
<
g_queryInfo
.
superQueryInfo
.
sqlCount
;
j
++
)
{
if
(
ASYNC_MODE
==
g_queryInfo
.
superQueryInfo
.
asyncMode
)
{
continue
;
}
for
(
uint64_t
i
=
pThreadInfo
->
start_table_from
;
i
<=
pThreadInfo
->
end_table_to
;
i
++
)
{
tsubSeq
=
i
-
pThreadInfo
->
start_table_from
;
if
(
ASYNC_MODE
==
g_queryInfo
.
superQueryInfo
.
asyncMode
)
{
continue
;
}
uint64_t
subSeq
=
i
*
g_queryInfo
.
superQueryInfo
.
sqlCount
+
j
;
taosMsleep
(
100
);
// ms
res
=
taos_consume
(
tsub
[
subSeq
]);
if
(
res
)
{
char
tmpFile
[
MAX_FILE_NAME_LEN
*
2
]
=
{
0
};
if
(
g_queryInfo
.
superQueryInfo
.
result
[
j
][
0
]
!=
0
)
{
sprintf
(
tmpFile
,
"%s-%d"
,
g_queryInfo
.
superQueryInfo
.
result
[
j
],
pThreadInfo
->
threadID
);
appendResultToFile
(
res
,
tmpFile
);
}
}
st
=
taosGetTimestampMs
();
performancePrint
(
"st: %"
PRIu64
" et: %"
PRIu64
" st-et: %"
PRIu64
"
\n
"
,
st
,
et
,
(
st
-
et
));
res
=
taos_consume
(
tsub
[
tsubSeq
]);
et
=
taosGetTimestampMs
();
performancePrint
(
"st: %"
PRIu64
" et: %"
PRIu64
" delta: %"
PRIu64
"
\n
"
,
st
,
et
,
(
et
-
st
));
if
(
res
)
{
if
(
g_queryInfo
.
superQueryInfo
.
result
[
pThreadInfo
->
querySeq
][
0
]
!=
0
)
{
sprintf
(
pThreadInfo
->
filePath
,
"%s-%d"
,
g_queryInfo
.
superQueryInfo
.
result
[
pThreadInfo
->
querySeq
],
pThreadInfo
->
threadID
);
fetchResult
(
res
,
pThreadInfo
);
}
if
(
g_queryInfo
.
superQueryInfo
.
result
[
pThreadInfo
->
querySeq
][
0
]
!=
0
)
{
sprintf
(
pThreadInfo
->
filePath
,
"%s-%d"
,
g_queryInfo
.
superQueryInfo
.
result
[
pThreadInfo
->
querySeq
],
pThreadInfo
->
threadID
);
fetchResult
(
res
,
pThreadInfo
);
}
consumed
[
tsubSeq
]
++
;
if
((
g_queryInfo
.
superQueryInfo
.
subscribeKeepProgress
)
&&
(
consumed
[
tsubSeq
]
>=
g_queryInfo
.
superQueryInfo
.
resubAfterConsume
))
{
printf
(
"keepProgress:%d, resub super table query: %"
PRIu64
"
\n
"
,
g_queryInfo
.
superQueryInfo
.
subscribeKeepProgress
,
pThreadInfo
->
querySeq
);
taos_unsubscribe
(
tsub
[
tsubSeq
],
g_queryInfo
.
superQueryInfo
.
subscribeKeepProgress
);
consumed
[
tsubSeq
]
=
0
;
tsub
[
tsubSeq
]
=
subscribeImpl
(
STABLE_CLASS
,
pThreadInfo
,
subSqlstr
,
topic
,
g_queryInfo
.
superQueryInfo
.
subscribeRestart
,
g_queryInfo
.
superQueryInfo
.
subscribeInterval
);
if
(
NULL
==
tsub
[
tsubSeq
])
{
taos_close
(
pThreadInfo
->
taos
);
return
NULL
;
}
}
}
}
}
...
...
@@ -6580,11 +6735,8 @@ static void *superSubscribe(void *sarg) {
for
(
uint64_t
i
=
pThreadInfo
->
start_table_from
;
i
<=
pThreadInfo
->
end_table_to
;
i
++
)
{
for
(
int
j
=
0
;
j
<
g_queryInfo
.
superQueryInfo
.
sqlCount
;
j
++
)
{
uint64_t
subSeq
=
i
*
g_queryInfo
.
superQueryInfo
.
sqlCount
+
j
;
taos_unsubscribe
(
tsub
[
subSeq
],
g_queryInfo
.
superQueryInfo
.
subscribeKeepProgress
);
}
tsubSeq
=
i
-
pThreadInfo
->
start_table_from
;
taos_unsubscribe
(
tsub
[
tsubSeq
],
0
);
}
taos_close
(
pThreadInfo
->
taos
);
...
...
@@ -6593,78 +6745,94 @@ static void *superSubscribe(void *sarg) {
static
void
*
specifiedSubscribe
(
void
*
sarg
)
{
threadInfo
*
pThreadInfo
=
(
threadInfo
*
)
sarg
;
TAOS_SUB
*
tsub
[
MAX_QUERY_SQL_COUNT
]
=
{
0
};
if
(
g_queryInfo
.
specifiedQueryInfo
.
sqlCount
==
0
)
return
NULL
;
// TAOS_SUB* tsub = NULL;
if
(
pThreadInfo
->
taos
==
NULL
)
{
TAOS
*
taos
=
NULL
;
taos
=
taos_connect
(
g_queryInfo
.
host
,
pThreadInfo
->
taos
=
taos_connect
(
g_queryInfo
.
host
,
g_queryInfo
.
user
,
g_queryInfo
.
password
,
g_queryInfo
.
dbName
,
g_queryInfo
.
port
);
if
(
taos
==
NULL
)
{
if
(
pThreadInfo
->
taos
==
NULL
)
{
errorPrint
(
"[%d] Failed to connect to TDengine, reason:%s
\n
"
,
pThreadInfo
->
threadID
,
taos_errstr
(
NULL
));
return
NULL
;
}
else
{
pThreadInfo
->
taos
=
taos
;
}
}
char
sqlStr
[
MAX_TB_NAME_SIZE
*
2
];
sprintf
(
sqlStr
,
"use %s"
,
g_queryInfo
.
dbName
);
debugPrint
(
"%s() %d sqlStr: %s
\n
"
,
__func__
,
__LINE__
,
sqlStr
);
if
(
0
!=
queryDbExec
(
pThreadInfo
->
taos
,
sqlStr
,
NO_INSERT_TYPE
,
false
))
{
taos_close
(
pThreadInfo
->
taos
);
return
NULL
;
}
char
topic
[
32
]
=
{
0
};
for
(
int
i
=
0
;
i
<
g_queryInfo
.
specifiedQueryInfo
.
sqlCount
;
i
++
)
{
sprintf
(
topic
,
"taosdemo-subscribe-%d"
,
i
);
char
tmpFile
[
MAX_FILE_NAME_LEN
*
2
]
=
{
0
};
if
(
g_queryInfo
.
specifiedQueryInfo
.
result
[
i
][
0
]
!=
0
)
{
sprintf
(
tmpFile
,
"%s-%d"
,
g_queryInfo
.
specifiedQueryInfo
.
result
[
i
],
pThreadInfo
->
threadID
);
}
tsub
[
i
]
=
subscribeImpl
(
pThreadInfo
->
taos
,
g_queryInfo
.
specifiedQueryInfo
.
sql
[
i
],
topic
,
tmpFile
);
if
(
NULL
==
tsub
[
i
])
{
taos_close
(
pThreadInfo
->
taos
);
return
NULL
;
}
sprintf
(
g_queryInfo
.
specifiedQueryInfo
.
topic
[
pThreadInfo
->
threadID
],
"taosdemo-subscribe-%"
PRIu64
"-%d"
,
pThreadInfo
->
querySeq
,
pThreadInfo
->
threadID
);
if
(
g_queryInfo
.
specifiedQueryInfo
.
result
[
pThreadInfo
->
querySeq
]
!=
NULL
)
{
sprintf
(
pThreadInfo
->
filePath
,
"%s-%d"
,
g_queryInfo
.
specifiedQueryInfo
.
result
[
pThreadInfo
->
querySeq
],
pThreadInfo
->
threadID
);
}
g_queryInfo
.
specifiedQueryInfo
.
tsub
[
pThreadInfo
->
threadID
]
=
subscribeImpl
(
SPECIFIED_CLASS
,
pThreadInfo
,
g_queryInfo
.
specifiedQueryInfo
.
sql
[
pThreadInfo
->
querySeq
],
g_queryInfo
.
specifiedQueryInfo
.
topic
[
pThreadInfo
->
threadID
],
g_queryInfo
.
specifiedQueryInfo
.
subscribeRestart
,
g_queryInfo
.
specifiedQueryInfo
.
subscribeInterval
);
if
(
NULL
==
g_queryInfo
.
specifiedQueryInfo
.
tsub
[
pThreadInfo
->
threadID
])
{
taos_close
(
pThreadInfo
->
taos
);
return
NULL
;
}
// start loop to consume result
TAOS_RES
*
res
=
NULL
;
g_queryInfo
.
specifiedQueryInfo
.
consumed
[
pThreadInfo
->
threadID
]
=
0
;
while
(
1
)
{
for
(
int
i
=
0
;
i
<
g_queryInfo
.
specifiedQueryInfo
.
sqlCount
;
i
++
)
{
if
(
ASYNC_MODE
==
g_queryInfo
.
specifiedQueryInfo
.
asyncMode
)
{
continue
;
}
taosMsleep
(
1000
);
// ms
res
=
taos_consume
(
tsub
[
i
]);
if
(
res
)
{
char
tmpFile
[
MAX_FILE_NAME_LEN
*
2
]
=
{
0
};
if
(
g_queryInfo
.
specifiedQueryInfo
.
result
[
i
][
0
]
!=
0
)
{
sprintf
(
tmpFile
,
"%s-%d"
,
g_queryInfo
.
specifiedQueryInfo
.
result
[
i
],
pThreadInfo
->
threadID
);
appendResultToFile
(
res
,
tmpFile
);
}
}
}
}
taos_free_result
(
res
);
g_queryInfo
.
specifiedQueryInfo
.
res
[
pThreadInfo
->
threadID
]
=
taos_consume
(
g_queryInfo
.
specifiedQueryInfo
.
tsub
[
pThreadInfo
->
threadID
]);
if
(
g_queryInfo
.
specifiedQueryInfo
.
res
[
pThreadInfo
->
threadID
])
{
if
(
g_queryInfo
.
specifiedQueryInfo
.
result
[
pThreadInfo
->
querySeq
][
0
]
!=
0
)
{
sprintf
(
pThreadInfo
->
filePath
,
"%s-%d"
,
g_queryInfo
.
specifiedQueryInfo
.
result
[
pThreadInfo
->
querySeq
],
pThreadInfo
->
threadID
);
fetchResult
(
g_queryInfo
.
specifiedQueryInfo
.
res
[
pThreadInfo
->
threadID
],
pThreadInfo
);
}
for
(
int
i
=
0
;
i
<
g_queryInfo
.
specifiedQueryInfo
.
sqlCount
;
i
++
)
{
taos_unsubscribe
(
tsub
[
i
],
g_queryInfo
.
specifiedQueryInfo
.
subscribeKeepProgress
);
g_queryInfo
.
specifiedQueryInfo
.
consumed
[
pThreadInfo
->
threadID
]
++
;
if
((
g_queryInfo
.
specifiedQueryInfo
.
subscribeKeepProgress
)
&&
(
g_queryInfo
.
specifiedQueryInfo
.
consumed
[
pThreadInfo
->
threadID
]
>=
g_queryInfo
.
specifiedQueryInfo
.
resubAfterConsume
[
pThreadInfo
->
querySeq
]))
{
printf
(
"keepProgress:%d, resub specified query: %"
PRIu64
"
\n
"
,
g_queryInfo
.
specifiedQueryInfo
.
subscribeKeepProgress
,
pThreadInfo
->
querySeq
);
g_queryInfo
.
specifiedQueryInfo
.
consumed
[
pThreadInfo
->
threadID
]
=
0
;
taos_unsubscribe
(
g_queryInfo
.
specifiedQueryInfo
.
tsub
[
pThreadInfo
->
threadID
],
g_queryInfo
.
specifiedQueryInfo
.
subscribeKeepProgress
);
g_queryInfo
.
specifiedQueryInfo
.
tsub
[
pThreadInfo
->
threadID
]
=
subscribeImpl
(
SPECIFIED_CLASS
,
pThreadInfo
,
g_queryInfo
.
specifiedQueryInfo
.
sql
[
pThreadInfo
->
querySeq
],
g_queryInfo
.
specifiedQueryInfo
.
topic
[
pThreadInfo
->
threadID
],
g_queryInfo
.
specifiedQueryInfo
.
subscribeRestart
,
g_queryInfo
.
specifiedQueryInfo
.
subscribeInterval
);
if
(
NULL
==
g_queryInfo
.
specifiedQueryInfo
.
tsub
[
pThreadInfo
->
threadID
])
{
taos_close
(
pThreadInfo
->
taos
);
return
NULL
;
}
}
}
}
taos_free_result
(
g_queryInfo
.
specifiedQueryInfo
.
res
[
pThreadInfo
->
threadID
]);
taos_unsubscribe
(
g_queryInfo
.
specifiedQueryInfo
.
tsub
[
pThreadInfo
->
querySeq
],
0
);
taos_close
(
pThreadInfo
->
taos
);
return
NULL
;
}
...
...
@@ -6673,10 +6841,7 @@ static int subscribeTestProcess() {
printfQueryMeta
();
resetAfterAnsiEscape
();
if
(
!
g_args
.
answer_yes
)
{
printf
(
"Press enter key to continue
\n\n
"
);
(
void
)
getchar
();
}
prompt
();
TAOS
*
taos
=
NULL
;
taos
=
taos_connect
(
g_queryInfo
.
host
,
...
...
@@ -6702,9 +6867,13 @@ static int subscribeTestProcess() {
pthread_t
*
pids
=
NULL
;
threadInfo
*
infos
=
NULL
;
//==== create sub threads for query for specified table
pthread_t
*
pidsOfStable
=
NULL
;
threadInfo
*
infosOfStable
=
NULL
;
//==== create threads for query for specified table
if
(
g_queryInfo
.
specifiedQueryInfo
.
sqlCount
<=
0
)
{
printf
(
"%s() LN%d, sepcified query sqlCount %"
PRIu64
".
\n
"
,
debugPrint
(
"%s() LN%d, sepcified query sqlCount %"
PRIu64
".
\n
"
,
__func__
,
__LINE__
,
g_queryInfo
.
specifiedQueryInfo
.
sqlCount
);
}
else
{
...
...
@@ -6715,80 +6884,109 @@ static int subscribeTestProcess() {
exit
(
-
1
);
}
pids
=
malloc
(
g_queryInfo
.
specifiedQueryInfo
.
concurrent
*
sizeof
(
pthread_t
));
infos
=
malloc
(
g_queryInfo
.
specifiedQueryInfo
.
concurrent
*
sizeof
(
threadInfo
));
pids
=
malloc
(
g_queryInfo
.
specifiedQueryInfo
.
sqlCount
*
g_queryInfo
.
specifiedQueryInfo
.
concurrent
*
sizeof
(
pthread_t
));
infos
=
malloc
(
g_queryInfo
.
specifiedQueryInfo
.
sqlCount
*
g_queryInfo
.
specifiedQueryInfo
.
concurrent
*
sizeof
(
threadInfo
));
if
((
NULL
==
pids
)
||
(
NULL
==
infos
))
{
errorPrint
(
"%s() LN%d, malloc failed for create threads
\n
"
,
__func__
,
__LINE__
);
exit
(
-
1
);
}
for
(
int
i
=
0
;
i
<
g_queryInfo
.
specifiedQueryInfo
.
concurrent
;
i
++
)
{
threadInfo
*
t_info
=
infos
+
i
;
t_info
->
threadID
=
i
;
t_info
->
taos
=
NULL
;
// TODO: workaround to use separate taos connection;
pthread_create
(
pids
+
i
,
NULL
,
specifiedSubscribe
,
t_info
);
for
(
int
i
=
0
;
i
<
g_queryInfo
.
specifiedQueryInfo
.
sqlCount
;
i
++
)
{
for
(
int
j
=
0
;
j
<
g_queryInfo
.
specifiedQueryInfo
.
concurrent
;
j
++
)
{
uint64_t
seq
=
i
*
g_queryInfo
.
specifiedQueryInfo
.
concurrent
+
j
;
threadInfo
*
pThreadInfo
=
infos
+
seq
;
pThreadInfo
->
threadID
=
seq
;
pThreadInfo
->
querySeq
=
i
;
pThreadInfo
->
taos
=
NULL
;
// TODO: workaround to use separate taos connection;
pthread_create
(
pids
+
seq
,
NULL
,
specifiedSubscribe
,
pThreadInfo
);
}
}
}
//==== create sub threads for super table query
pthread_t
*
pidsOfSub
=
NULL
;
threadInfo
*
infosOfSub
=
NULL
;
if
((
g_queryInfo
.
superQueryInfo
.
sqlCount
>
0
)
//==== create threads for super table query
if
(
g_queryInfo
.
superQueryInfo
.
sqlCount
<=
0
)
{
debugPrint
(
"%s() LN%d, super table query sqlCount %"
PRIu64
".
\n
"
,
__func__
,
__LINE__
,
g_queryInfo
.
superQueryInfo
.
sqlCount
);
}
else
{
if
((
g_queryInfo
.
superQueryInfo
.
sqlCount
>
0
)
&&
(
g_queryInfo
.
superQueryInfo
.
threadCnt
>
0
))
{
pidsOfSub
=
malloc
(
g_queryInfo
.
superQueryInfo
.
threadCnt
*
pidsOfStable
=
malloc
(
g_queryInfo
.
superQueryInfo
.
sqlCount
*
g_queryInfo
.
superQueryInfo
.
threadCnt
*
sizeof
(
pthread_t
));
infosOfSub
=
malloc
(
g_queryInfo
.
superQueryInfo
.
threadCnt
*
infosOfStable
=
malloc
(
g_queryInfo
.
superQueryInfo
.
sqlCount
*
g_queryInfo
.
superQueryInfo
.
threadCnt
*
sizeof
(
threadInfo
));
if
((
NULL
==
pidsOfSub
)
||
(
NULL
==
infosOfSub
))
{
errorPrint
(
"%s() LN%d, malloc failed for create threads
\n
"
,
if
((
NULL
==
pidsOfStable
)
||
(
NULL
==
infosOfStable
))
{
errorPrint
(
"%s() LN%d, malloc failed for create threads
\n
"
,
__func__
,
__LINE__
);
// taos_close(taos);
exit
(
-
1
);
}
// taos_close(taos);
exit
(
-
1
);
}
int64_t
ntables
=
g_queryInfo
.
superQueryInfo
.
childTblCount
;
int
threads
=
g_queryInfo
.
superQueryInfo
.
threadCnt
;
int64_t
ntables
=
g_queryInfo
.
superQueryInfo
.
childTblCount
;
int
threads
=
g_queryInfo
.
superQueryInfo
.
threadCnt
;
int64_t
a
=
ntables
/
threads
;
if
(
a
<
1
)
{
threads
=
ntables
;
a
=
1
;
}
int64_t
b
=
0
;
if
(
threads
!=
0
)
{
b
=
ntables
%
threads
;
}
int64_t
a
=
ntables
/
threads
;
if
(
a
<
1
)
{
threads
=
ntables
;
a
=
1
;
}
uint64_t
startFrom
=
0
;
for
(
int
i
=
0
;
i
<
threads
;
i
++
)
{
threadInfo
*
t_info
=
infosOfSub
+
i
;
t_info
->
threadID
=
i
;
int64_t
b
=
0
;
if
(
threads
!=
0
)
{
b
=
ntables
%
threads
;
}
t_info
->
start_table_from
=
startFrom
;
t_info
->
ntables
=
i
<
b
?
a
+
1
:
a
;
t_info
->
end_table_to
=
i
<
b
?
startFrom
+
a
:
startFrom
+
a
-
1
;
startFrom
=
t_info
->
end_table_to
+
1
;
t_info
->
taos
=
NULL
;
// TODO: workaround to use separate taos connection;
pthread_create
(
pidsOfSub
+
i
,
NULL
,
superSubscribe
,
t_info
);
}
for
(
uint64_t
i
=
0
;
i
<
g_queryInfo
.
superQueryInfo
.
sqlCount
;
i
++
)
{
uint64_t
startFrom
=
0
;
for
(
int
j
=
0
;
j
<
threads
;
j
++
)
{
uint64_t
seq
=
i
*
threads
+
j
;
threadInfo
*
pThreadInfo
=
infosOfStable
+
seq
;
pThreadInfo
->
threadID
=
seq
;
pThreadInfo
->
querySeq
=
i
;
pThreadInfo
->
start_table_from
=
startFrom
;
pThreadInfo
->
ntables
=
j
<
b
?
a
+
1
:
a
;
pThreadInfo
->
end_table_to
=
j
<
b
?
startFrom
+
a
:
startFrom
+
a
-
1
;
startFrom
=
pThreadInfo
->
end_table_to
+
1
;
pThreadInfo
->
taos
=
NULL
;
// TODO: workaround to use separate taos connection;
pthread_create
(
pidsOfStable
+
seq
,
NULL
,
superSubscribe
,
pThreadInfo
);
}
}
g_queryInfo
.
superQueryInfo
.
threadCnt
=
threads
;
g_queryInfo
.
superQueryInfo
.
threadCnt
=
threads
;
for
(
int
i
=
0
;
i
<
g_queryInfo
.
superQueryInfo
.
threadCnt
;
i
++
)
{
pthread_join
(
pidsOfSub
[
i
],
NULL
);
for
(
int
i
=
0
;
i
<
g_queryInfo
.
superQueryInfo
.
sqlCount
;
i
++
)
{
for
(
int
j
=
0
;
j
<
threads
;
j
++
)
{
uint64_t
seq
=
i
*
threads
+
j
;
pthread_join
(
pidsOfStable
[
seq
],
NULL
);
}
}
}
}
for
(
int
i
=
0
;
i
<
g_queryInfo
.
specifiedQueryInfo
.
concurrent
;
i
++
)
{
pthread_join
(
pids
[
i
],
NULL
);
for
(
int
i
=
0
;
i
<
g_queryInfo
.
specifiedQueryInfo
.
sqlCount
;
i
++
)
{
for
(
int
j
=
0
;
j
<
g_queryInfo
.
specifiedQueryInfo
.
concurrent
;
j
++
)
{
uint64_t
seq
=
i
*
g_queryInfo
.
specifiedQueryInfo
.
concurrent
+
j
;
pthread_join
(
pids
[
seq
],
NULL
);
}
}
tmfree
((
char
*
)
pids
);
tmfree
((
char
*
)
infos
);
tmfree
((
char
*
)
pidsOfS
ub
);
tmfree
((
char
*
)
infosOfS
ub
);
tmfree
((
char
*
)
pidsOfS
table
);
tmfree
((
char
*
)
infosOfS
table
);
// taos_close(taos);
return
0
;
}
...
...
@@ -6901,17 +7099,21 @@ static void setParaFromArg(){
if
(
g_Dbs
.
db
[
0
].
superTbls
[
0
].
columnCount
>
g_args
.
num_of_CPR
)
{
g_Dbs
.
db
[
0
].
superTbls
[
0
].
columnCount
=
g_args
.
num_of_CPR
;
}
else
{
for
(
int
i
=
g_Dbs
.
db
[
0
].
superTbls
[
0
].
columnCount
;
i
<
g_args
.
num_of_CPR
;
i
++
)
{
tstrncpy
(
g_Dbs
.
db
[
0
].
superTbls
[
0
].
columns
[
i
].
dataType
,
"INT"
,
MAX_TB_NAME_SIZE
);
for
(
int
i
=
g_Dbs
.
db
[
0
].
superTbls
[
0
].
columnCount
;
i
<
g_args
.
num_of_CPR
;
i
++
)
{
tstrncpy
(
g_Dbs
.
db
[
0
].
superTbls
[
0
].
columns
[
i
].
dataType
,
"INT"
,
MAX_TB_NAME_SIZE
);
g_Dbs
.
db
[
0
].
superTbls
[
0
].
columns
[
i
].
dataLen
=
0
;
g_Dbs
.
db
[
0
].
superTbls
[
0
].
columnCount
++
;
}
}
tstrncpy
(
g_Dbs
.
db
[
0
].
superTbls
[
0
].
tags
[
0
].
dataType
,
"INT"
,
MAX_TB_NAME_SIZE
);
tstrncpy
(
g_Dbs
.
db
[
0
].
superTbls
[
0
].
tags
[
0
].
dataType
,
"INT"
,
MAX_TB_NAME_SIZE
);
g_Dbs
.
db
[
0
].
superTbls
[
0
].
tags
[
0
].
dataLen
=
0
;
tstrncpy
(
g_Dbs
.
db
[
0
].
superTbls
[
0
].
tags
[
1
].
dataType
,
"BINARY"
,
MAX_TB_NAME_SIZE
);
tstrncpy
(
g_Dbs
.
db
[
0
].
superTbls
[
0
].
tags
[
1
].
dataType
,
"BINARY"
,
MAX_TB_NAME_SIZE
);
g_Dbs
.
db
[
0
].
superTbls
[
0
].
tags
[
1
].
dataLen
=
g_args
.
len_of_binary
;
g_Dbs
.
db
[
0
].
superTbls
[
0
].
tagCount
=
2
;
}
else
{
...
...
@@ -6987,7 +7189,6 @@ static void querySqlFile(TAOS* taos, char* sqlFile)
}
memcpy
(
cmd
+
cmd_len
,
line
,
read_len
);
verbosePrint
(
"%s() LN%d cmd: %s
\n
"
,
__func__
,
__LINE__
,
cmd
);
if
(
0
!=
queryDbExec
(
taos
,
cmd
,
NO_INSERT_TYPE
,
false
))
{
errorPrint
(
"%s() LN%d, queryDbExec %s failed!
\n
"
,
__func__
,
__LINE__
,
cmd
);
...
...
@@ -7037,47 +7238,47 @@ static void queryResult() {
// query data
pthread_t
read_id
;
threadInfo
*
r
Info
=
malloc
(
sizeof
(
threadInfo
));
assert
(
r
Info
);
r
Info
->
start_time
=
1500000000000
;
// 2017-07-14 10:40:00.000
r
Info
->
start_table_from
=
0
;
threadInfo
*
pThread
Info
=
malloc
(
sizeof
(
threadInfo
));
assert
(
pThread
Info
);
pThread
Info
->
start_time
=
1500000000000
;
// 2017-07-14 10:40:00.000
pThread
Info
->
start_table_from
=
0
;
//
r
Info->do_aggreFunc = g_Dbs.do_aggreFunc;
//
pThread
Info->do_aggreFunc = g_Dbs.do_aggreFunc;
if
(
g_args
.
use_metric
)
{
r
Info
->
ntables
=
g_Dbs
.
db
[
0
].
superTbls
[
0
].
childTblCount
;
r
Info
->
end_table_to
=
g_Dbs
.
db
[
0
].
superTbls
[
0
].
childTblCount
-
1
;
r
Info
->
superTblInfo
=
&
g_Dbs
.
db
[
0
].
superTbls
[
0
];
tstrncpy
(
r
Info
->
tb_prefix
,
pThread
Info
->
ntables
=
g_Dbs
.
db
[
0
].
superTbls
[
0
].
childTblCount
;
pThread
Info
->
end_table_to
=
g_Dbs
.
db
[
0
].
superTbls
[
0
].
childTblCount
-
1
;
pThread
Info
->
superTblInfo
=
&
g_Dbs
.
db
[
0
].
superTbls
[
0
];
tstrncpy
(
pThread
Info
->
tb_prefix
,
g_Dbs
.
db
[
0
].
superTbls
[
0
].
childTblPrefix
,
MAX_TB_NAME_SIZE
);
}
else
{
r
Info
->
ntables
=
g_args
.
num_of_tables
;
r
Info
->
end_table_to
=
g_args
.
num_of_tables
-
1
;
tstrncpy
(
r
Info
->
tb_prefix
,
g_args
.
tb_prefix
,
MAX_TB_NAME_SIZE
);
pThread
Info
->
ntables
=
g_args
.
num_of_tables
;
pThread
Info
->
end_table_to
=
g_args
.
num_of_tables
-
1
;
tstrncpy
(
pThread
Info
->
tb_prefix
,
g_args
.
tb_prefix
,
MAX_TB_NAME_SIZE
);
}
r
Info
->
taos
=
taos_connect
(
pThread
Info
->
taos
=
taos_connect
(
g_Dbs
.
host
,
g_Dbs
.
user
,
g_Dbs
.
password
,
g_Dbs
.
db
[
0
].
dbName
,
g_Dbs
.
port
);
if
(
r
Info
->
taos
==
NULL
)
{
if
(
pThread
Info
->
taos
==
NULL
)
{
errorPrint
(
"Failed to connect to TDengine, reason:%s
\n
"
,
taos_errstr
(
NULL
));
free
(
r
Info
);
free
(
pThread
Info
);
exit
(
-
1
);
}
tstrncpy
(
rInfo
->
fp
,
g_Dbs
.
resultFile
,
MAX_FILE_NAME_LEN
);
tstrncpy
(
pThreadInfo
->
filePath
,
g_Dbs
.
resultFile
,
MAX_FILE_NAME_LEN
);
if
(
!
g_Dbs
.
use_metric
)
{
pthread_create
(
&
read_id
,
NULL
,
readTable
,
r
Info
);
pthread_create
(
&
read_id
,
NULL
,
readTable
,
pThread
Info
);
}
else
{
pthread_create
(
&
read_id
,
NULL
,
readMetric
,
r
Info
);
pthread_create
(
&
read_id
,
NULL
,
readMetric
,
pThread
Info
);
}
pthread_join
(
read_id
,
NULL
);
taos_close
(
r
Info
->
taos
);
free
(
r
Info
);
taos_close
(
pThread
Info
->
taos
);
free
(
pThread
Info
);
}
static
void
testCmdLine
()
{
...
...
@@ -7131,6 +7332,9 @@ int main(int argc, char *argv[]) {
}
else
{
testCmdLine
();
}
if
(
g_dupstr
)
free
(
g_dupstr
);
}
return
0
;
...
...
src/os/src/detail/osSignal.c
浏览文件 @
162c0124
...
...
@@ -23,14 +23,14 @@
typedef
void
(
*
FLinuxSignalHandler
)(
int32_t
signum
,
siginfo_t
*
sigInfo
,
void
*
context
);
void
taosSetSignal
(
int32_t
signum
,
FSignalHandler
sigfp
)
{
struct
sigaction
act
=
{{
0
}};
struct
sigaction
act
;
memset
(
&
act
,
0
,
sizeof
(
act
));
#if 1
act
.
sa_flags
=
SA_SIGINFO
;
act
.
sa_sigaction
=
(
FLinuxSignalHandler
)
sigfp
;
#else
act
.
sa_handler
=
sigfp
;
act
.
sa_handler
=
sigfp
;
#endif
sigaction
(
signum
,
&
act
,
NULL
);
sigaction
(
signum
,
&
act
,
NULL
);
}
void
taosIgnSignal
(
int32_t
signum
)
{
...
...
src/sync/src/syncMain.c
浏览文件 @
162c0124
...
...
@@ -709,7 +709,7 @@ static void syncChooseMaster(SSyncNode *pNode) {
}
static
SSyncPeer
*
syncCheckMaster
(
SSyncNode
*
pNode
)
{
int32_t
onlineNum
=
0
;
int32_t
onlineNum
=
0
,
arbOnlineNum
=
0
;
int32_t
masterIndex
=
-
1
;
int32_t
replica
=
pNode
->
replica
;
...
...
@@ -723,13 +723,15 @@ static SSyncPeer *syncCheckMaster(SSyncNode *pNode) {
SSyncPeer
*
pArb
=
pNode
->
peerInfo
[
TAOS_SYNC_MAX_REPLICA
];
if
(
pArb
&&
pArb
->
role
!=
TAOS_SYNC_ROLE_OFFLINE
)
{
onlineNum
++
;
++
arbOnlineNum
;
replica
=
pNode
->
replica
+
1
;
}
if
(
onlineNum
<=
replica
*
0
.
5
)
{
if
(
nodeRole
!=
TAOS_SYNC_ROLE_UNSYNCED
)
{
if
(
nodeRole
==
TAOS_SYNC_ROLE_MASTER
&&
onlineNum
==
replica
*
0
.
5
&&
onlineNum
>=
1
)
{
if
(
nodeRole
==
TAOS_SYNC_ROLE_MASTER
&&
onlineNum
==
replica
*
0
.
5
&&
((
replica
>
2
&&
onlineNum
-
arbOnlineNum
>
1
)
||
pNode
->
replica
<
3
)
)
{
sInfo
(
"vgId:%d, self keep work as master, online:%d replica:%d"
,
pNode
->
vgId
,
onlineNum
,
replica
);
masterIndex
=
pNode
->
selfIndex
;
}
else
{
nodeRole
=
TAOS_SYNC_ROLE_UNSYNCED
;
sInfo
(
"vgId:%d, self change to unsynced state, online:%d replica:%d"
,
pNode
->
vgId
,
onlineNum
,
replica
);
...
...
@@ -1002,6 +1004,7 @@ static void syncProcessForwardFromPeer(char *cont, SSyncPeer *pPeer) {
if
(
nodeRole
==
TAOS_SYNC_ROLE_SLAVE
)
{
// nodeVersion = pHead->version;
code
=
(
*
pNode
->
writeToCacheFp
)(
pNode
->
vgId
,
pHead
,
TAOS_QTYPE_FWD
,
NULL
);
syncConfirmForward
(
pNode
->
rid
,
pHead
->
version
,
code
,
false
);
}
else
{
if
(
nodeSStatus
!=
TAOS_SYNC_STATUS_INIT
)
{
code
=
syncSaveIntoBuffer
(
pPeer
,
pHead
);
...
...
@@ -1404,7 +1407,7 @@ static void syncMonitorFwdInfos(void *param, void *tmrId) {
pthread_mutex_lock
(
&
pNode
->
mutex
);
for
(
int32_t
i
=
0
;
i
<
pSyncFwds
->
fwds
;
++
i
)
{
SFwdInfo
*
pFwdInfo
=
pSyncFwds
->
fwdInfo
+
(
pSyncFwds
->
first
+
i
)
%
SYNC_MAX_FWDS
;
if
(
ABS
(
time
-
pFwdInfo
->
time
)
<
2
000
)
break
;
if
(
ABS
(
time
-
pFwdInfo
->
time
)
<
10
000
)
break
;
sDebug
(
"vgId:%d, forward info expired, hver:%"
PRIu64
" curtime:%"
PRIu64
" savetime:%"
PRIu64
,
pNode
->
vgId
,
pFwdInfo
->
version
,
time
,
pFwdInfo
->
time
);
...
...
src/util/src/tcache.c
浏览文件 @
162c0124
...
...
@@ -613,7 +613,7 @@ void doCleanupDataCache(SCacheObj *pCacheObj) {
// todo memory leak if there are object with refcount greater than 0 in hash table?
taosHashCleanup
(
pCacheObj
->
pHashTable
);
taosTrashcanEmpty
(
pCacheObj
,
tru
e
);
taosTrashcanEmpty
(
pCacheObj
,
fals
e
);
__cache_lock_destroy
(
pCacheObj
);
...
...
src/util/src/tcrc32c.c
浏览文件 @
162c0124
...
...
@@ -17,7 +17,7 @@
misrepresented as being the original software.
3. This notice may not be removed or altered from any source distribution.
*/
#if
ndef _TD_ARM_
#if
!defined(_TD_ARM_) && !defined(_TD_MIPS_)
#include <nmmintrin.h>
#endif
...
...
src/vnode/src/vnodeMain.c
浏览文件 @
162c0124
...
...
@@ -419,7 +419,11 @@ void vnodeDestroy(SVnodeObj *pVnode) {
}
if
(
pVnode
->
tsdb
)
{
code
=
tsdbCloseRepo
(
pVnode
->
tsdb
,
1
);
// the deleted vnode does not need to commit, so as to speed up the deletion
int
toCommit
=
1
;
if
(
pVnode
->
dropped
)
toCommit
=
0
;
code
=
tsdbCloseRepo
(
pVnode
->
tsdb
,
toCommit
);
pVnode
->
tsdb
=
NULL
;
}
...
...
src/vnode/src/vnodeWrite.c
浏览文件 @
162c0124
...
...
@@ -340,8 +340,11 @@ static void vnodeFlowCtrlMsgToWQueue(void *param, void *tmrId) {
if
(
pWrite
->
processedCount
>=
100
)
{
vError
(
"vgId:%d, msg:%p, failed to process since %s, retry:%d"
,
pVnode
->
vgId
,
pWrite
,
tstrerror
(
code
),
pWrite
->
processedCount
);
pWrite
->
processedCount
=
1
;
dnodeSendRpcVWriteRsp
(
pWrite
->
pVnode
,
pWrite
,
code
);
void
*
handle
=
pWrite
->
rpcMsg
.
handle
;
taosFreeQitem
(
pWrite
);
vnodeRelease
(
pVnode
);
SRpcMsg
rpcRsp
=
{.
handle
=
handle
,
.
code
=
code
};
rpcSendResponse
(
&
rpcRsp
);
}
else
{
code
=
vnodePerformFlowCtrl
(
pWrite
);
if
(
code
==
0
)
{
...
...
tests/Jenkinsfile
浏览文件 @
162c0124
...
...
@@ -37,7 +37,7 @@ pipeline {
stage
(
'Parallel test stage'
)
{
parallel
{
stage
(
'pytest'
)
{
agent
{
label
'
184
'
}
agent
{
label
'
slad1
'
}
steps
{
pre_test
()
sh
'''
...
...
@@ -62,7 +62,7 @@ pipeline {
}
stage
(
'test_crash_gen'
)
{
agent
{
label
"
185
"
}
agent
{
label
"
slad2
"
}
steps
{
pre_test
()
sh
'''
...
...
@@ -149,7 +149,7 @@ pipeline {
}
stage
(
'test_valgrind'
)
{
agent
{
label
"
186
"
}
agent
{
label
"
slad3
"
}
steps
{
pre_test
()
...
...
tests/mas/Jenkinsfile
0 → 100644
浏览文件 @
162c0124
def
pre_test
(){
sh
'''
sudo rmtaos||echo 'no taosd installed'
'''
sh
'''
cd ${WKC}
git reset --hard
git checkout $BRANCH_NAME
git pull
git submodule update
cd ${WK}
git reset --hard
git checkout $BRANCH_NAME
git pull
export TZ=Asia/Harbin
date
rm -rf ${WK}/debug
mkdir debug
cd debug
cmake .. > /dev/null
make > /dev/null
make install > /dev/null
pip3 install ${WKC}/src/connector/python/linux/python3/
'''
return
1
}
pipeline
{
agent
none
environment
{
WK
=
'/var/lib/jenkins/workspace/TDinternal'
WKC
=
'/var/lib/jenkins/workspace/TDinternal/community'
}
stages
{
stage
(
'Parallel test stage'
)
{
parallel
{
stage
(
'pytest'
)
{
agent
{
label
'slam1'
}
steps
{
pre_test
()
sh
'''
cd ${WKC}/tests
find pytest -name '*'sql|xargs rm -rf
./test-all.sh pytest
date'''
}
}
stage
(
'test_b1'
)
{
agent
{
label
'slam2'
}
steps
{
pre_test
()
sh
'''
cd ${WKC}/tests
./test-all.sh b1
date'''
}
}
stage
(
'test_crash_gen'
)
{
agent
{
label
"slam3"
}
steps
{
pre_test
()
sh
'''
cd ${WKC}/tests/pytest
'''
catchError
(
buildResult:
'SUCCESS'
,
stageResult:
'FAILURE'
)
{
sh
'''
cd ${WKC}/tests/pytest
./crash_gen.sh -a -p -t 4 -s 2000
'''
}
catchError
(
buildResult:
'SUCCESS'
,
stageResult:
'FAILURE'
)
{
sh
'''
cd ${WKC}/tests/pytest
rm -rf /var/lib/taos/*
rm -rf /var/log/taos/*
./handle_crash_gen_val_log.sh
'''
}
catchError
(
buildResult:
'SUCCESS'
,
stageResult:
'FAILURE'
)
{
sh
'''
cd ${WKC}/tests/pytest
rm -rf /var/lib/taos/*
rm -rf /var/log/taos/*
./handle_taosd_val_log.sh
'''
}
sh
'''
systemctl start taosd
sleep 10
'''
catchError
(
buildResult:
'SUCCESS'
,
stageResult:
'FAILURE'
)
{
sh
'''
cd ${WKC}/tests/gotest
bash batchtest.sh
'''
}
catchError
(
buildResult:
'SUCCESS'
,
stageResult:
'FAILURE'
)
{
sh
'''
cd ${WKC}/tests/examples/python/PYTHONConnectorChecker
python3 PythonChecker.py
'''
}
catchError
(
buildResult:
'SUCCESS'
,
stageResult:
'FAILURE'
)
{
sh
'''
cd ${WKC}/tests/examples/JDBC/JDBCDemo/
mvn clean package assembly:single -DskipTests >/dev/null
java -jar target/JDBCDemo-SNAPSHOT-jar-with-dependencies.jar -host 127.0.0.1
'''
}
catchError
(
buildResult:
'SUCCESS'
,
stageResult:
'FAILURE'
)
{
sh
'''
cd ${WKC}/src/connector/jdbc
mvn clean package -Dmaven.test.skip=true >/dev/null
cd ${WKC}/tests/examples/JDBC/JDBCDemo/
java --class-path=../../../../src/connector/jdbc/target:$JAVA_HOME/jre/lib/ext -jar target/JDBCDemo-SNAPSHOT-jar-with-dependencies.jar -host 127.0.0.1
'''
}
catchError
(
buildResult:
'SUCCESS'
,
stageResult:
'FAILURE'
)
{
sh
'''
cp -rf ${WKC}/tests/examples/nodejs ${JENKINS_HOME}/workspace/
cd ${JENKINS_HOME}/workspace/nodejs
node nodejsChecker.js host=localhost
'''
}
catchError
(
buildResult:
'SUCCESS'
,
stageResult:
'FAILURE'
)
{
sh
'''
cd ${JENKINS_HOME}/workspace/C#NET/src/CheckC#
dotnet run
'''
}
sh
'''
systemctl stop taosd
cd ${WKC}/tests
./test-all.sh b2
date
'''
sh
'''
cd ${WKC}/tests
./test-all.sh full unit
date'''
}
}
stage
(
'test_valgrind'
)
{
agent
{
label
"slam4"
}
steps
{
pre_test
()
catchError
(
buildResult:
'SUCCESS'
,
stageResult:
'FAILURE'
)
{
sh
'''
cd ${WKC}/tests/pytest
nohup taosd >/dev/null &
sleep 10
python3 concurrent_inquiry.py -c 1
'''
}
sh
'''
cd ${WKC}/tests
./test-all.sh full jdbc
date'''
sh
'''
cd ${WKC}/tests/pytest
./valgrind-test.sh 2>&1 > mem-error-out.log
./handle_val_log.sh
date
cd ${WKC}/tests
./test-all.sh b3
date'''
sh
'''
date
cd ${WKC}/tests
./test-all.sh full example
date'''
}
}
stage
(
'arm64_build'
){
agent
{
label
'arm64'
}
steps
{
sh
'''
cd ${WK}
git fetch
git checkout develop
git pull
cd ${WKC}
git fetch
git checkout develop
git pull
git submodule update
cd ${WKC}/packaging
./release.sh -v cluster -c aarch64 -n 2.0.0.0 -m 2.0.0.0
'''
}
}
stage
(
'arm32_build'
){
agent
{
label
'arm32'
}
steps
{
catchError
(
buildResult:
'SUCCESS'
,
stageResult:
'FAILURE'
)
{
sh
'''
cd ${WK}
git fetch
git checkout develop
git pull
cd ${WKC}
git fetch
git checkout develop
git pull
git submodule update
cd ${WKC}/packaging
./release.sh -v cluster -c aarch32 -n 2.0.0.0 -m 2.0.0.0
'''
}
}
}
}
}
}
post
{
success
{
emailext
(
subject:
"SUCCESSFUL: Job '${env.JOB_NAME} [${env.BUILD_NUMBER}]'"
,
body:
'''<!DOCTYPE html>
<html>
<head>
<meta charset="UTF-8">
</head>
<body leftmargin="8" marginwidth="0" topmargin="8" marginheight="4" offset="0">
<table width="95%" cellpadding="0" cellspacing="0" style="font-size: 16pt; font-family: Tahoma, Arial, Helvetica, sans-serif">
<tr>
<td><br />
<b><font color="#0B610B"><font size="6">构建信息</font></font></b>
<hr size="2" width="100%" align="center" /></td>
</tr>
<tr>
<td>
<ul>
<div style="font-size:18px">
<li>构建名称>>分支:${PROJECT_NAME}</li>
<li>构建结果:<span style="color:green"> Successful </span></li>
<li>构建编号:${BUILD_NUMBER}</li>
<li>触发用户:${CAUSE}</li>
<li>变更概要:${CHANGES}</li>
<li>构建地址:<a href=${BUILD_URL}>${BUILD_URL}</a></li>
<li>构建日志:<a href=${BUILD_URL}console>${BUILD_URL}console</a></li>
<li>变更集:${JELLY_SCRIPT}</li>
</div>
</ul>
</td>
</tr>
</table></font>
</body>
</html>'''
,
to:
"yqliu@taosdata.com,pxiao@taosdata.com"
,
from:
"support@taosdata.com"
)
}
failure
{
emailext
(
subject:
"FAILED: Job '${env.JOB_NAME} [${env.BUILD_NUMBER}]'"
,
body:
'''<!DOCTYPE html>
<html>
<head>
<meta charset="UTF-8">
</head>
<body leftmargin="8" marginwidth="0" topmargin="8" marginheight="4" offset="0">
<table width="95%" cellpadding="0" cellspacing="0" style="font-size: 16pt; font-family: Tahoma, Arial, Helvetica, sans-serif">
<tr>
<td><br />
<b><font color="#0B610B"><font size="6">构建信息</font></font></b>
<hr size="2" width="100%" align="center" /></td>
</tr>
<tr>
<td>
<ul>
<div style="font-size:18px">
<li>构建名称>>分支:${PROJECT_NAME}</li>
<li>构建结果:<span style="color:green"> Successful </span></li>
<li>构建编号:${BUILD_NUMBER}</li>
<li>触发用户:${CAUSE}</li>
<li>变更概要:${CHANGES}</li>
<li>构建地址:<a href=${BUILD_URL}>${BUILD_URL}</a></li>
<li>构建日志:<a href=${BUILD_URL}console>${BUILD_URL}console</a></li>
<li>变更集:${JELLY_SCRIPT}</li>
</div>
</ul>
</td>
</tr>
</table></font>
</body>
</html>'''
,
to:
"yqliu@taosdata.com,pxiao@taosdata.com"
,
from:
"support@taosdata.com"
)
}
}
}
\ No newline at end of file
tests/perftest-scripts/perftest-query.sh
浏览文件 @
162c0124
...
...
@@ -64,18 +64,25 @@ function runQueryPerfTest {
[
-f
$PERFORMANCE_TEST_REPORT
]
&&
rm
$PERFORMANCE_TEST_REPORT
nohup
$WORK_DIR
/TDengine/debug/build/bin/taosd
-c
/etc/taosperf/
>
/dev/null 2>&1 &
echoInfo
"Wait TDengine to start"
sleep
30
0
sleep
6
0
echoInfo
"Run Performance Test"
cd
$WORK_DIR
/TDengine/tests/pytest
python3 query/queryPerformance.py
-c
$LOCAL_COMMIT
|
tee
-a
$PERFORMANCE_TEST_REPORT
mkdir
-p
/var/lib/perf/
mkdir
-p
/var/log/perf/
rm
-rf
/var/lib/perf/
*
rm
-rf
/var/log/perf/
*
nohup
$WORK_DIR
/TDengine/debug/build/bin/taosd
-c
/etc/perf/
>
/dev/null 2>&1 &
echoInfo
"Wait TDengine to start"
sleep
10
echoInfo
"Run Performance Test"
cd
$WORK_DIR
/TDengine/tests/pytest
python3 insert/insertFromCSVPerformance.py
-c
$LOCAL_COMMIT
|
tee
-a
$PERFORMANCE_TEST_REPORT
python3 tools/taosdemoPerformance.py
-c
$LOCAL_COMMIT
|
tee
-a
$PERFORMANCE_TEST_REPORT
python3 perfbenchmark/joinPerformance.py |
tee
-a
$PERFORMANCE_TEST_REPORT
}
...
...
tests/pytest/crash_gen/service_manager.py
浏览文件 @
162c0124
...
...
@@ -22,7 +22,7 @@ from queue import Queue, Empty
from
.shared.config
import
Config
from
.shared.db
import
DbTarget
,
DbConn
from
.shared.misc
import
Logging
,
Helper
,
CrashGenError
,
Status
,
Progress
,
Dice
from
.shared.types
import
DirPath
from
.shared.types
import
DirPath
,
IpcStream
# from crash_gen.misc import CrashGenError, Dice, Helper, Logging, Progress, Status
# from crash_gen.db import DbConn, DbTarget
...
...
@@ -177,13 +177,12 @@ quorum 2
return
"127.0.0.1"
def
getServiceCmdLine
(
self
):
# to start the instance
cmdLine
=
[]
if
Config
.
getConfig
().
track_memory_leaks
:
Logging
.
info
(
"Invoking VALGRIND on service..."
)
cmdLine
=
[
'valgrind'
,
'--leak-check=yes'
]
# TODO: move "exec -c" into Popen(), we can both "use shell" and NOT fork so ask to lose kill control
cmdLine
+=
[
"exec "
+
self
.
getExecFile
(),
'-c'
,
self
.
getCfgDir
()]
# used in subproce.Popen()
return
cmdLine
return
[
'exec /usr/bin/valgrind'
,
'--leak-check=yes'
,
self
.
getExecFile
(),
'-c'
,
self
.
getCfgDir
()
]
else
:
# TODO: move "exec -c" into Popen(), we can both "use shell" and NOT fork so ask to lose kill control
return
[
"exec "
+
self
.
getExecFile
(),
'-c'
,
self
.
getCfgDir
()]
# used in subproce.Popen()
def
_getDnodes
(
self
,
dbc
):
dbc
.
query
(
"show dnodes"
)
...
...
@@ -281,16 +280,16 @@ class TdeSubProcess:
return
'[TdeSubProc: pid = {}, status = {}]'
.
format
(
self
.
getPid
(),
self
.
getStatus
()
)
def
get
StdOut
(
self
)
->
BinaryIO
:
def
get
IpcStdOut
(
self
)
->
IpcStream
:
if
self
.
_popen
.
universal_newlines
:
# alias of text_mode
raise
CrashGenError
(
"We need binary mode for STDOUT IPC"
)
# Logging.info("Type of stdout is: {}".format(type(self._popen.stdout)))
return
typing
.
cast
(
BinaryIO
,
self
.
_popen
.
stdout
)
return
typing
.
cast
(
IpcStream
,
self
.
_popen
.
stdout
)
def
get
StdErr
(
self
)
->
BinaryIO
:
def
get
IpcStdErr
(
self
)
->
IpcStream
:
if
self
.
_popen
.
universal_newlines
:
# alias of text_mode
raise
CrashGenError
(
"We need binary mode for STDERR IPC"
)
return
typing
.
cast
(
BinaryIO
,
self
.
_popen
.
stderr
)
return
typing
.
cast
(
IpcStream
,
self
.
_popen
.
stderr
)
# Now it's always running, since we matched the life cycle
# def isRunning(self):
...
...
@@ -301,11 +300,6 @@ class TdeSubProcess:
def
_start
(
self
,
cmdLine
)
->
Popen
:
ON_POSIX
=
'posix'
in
sys
.
builtin_module_names
# Sanity check
# if self.subProcess: # already there
# raise RuntimeError("Corrupt process state")
# Prepare environment variables for coverage information
# Ref: https://stackoverflow.com/questions/2231227/python-subprocess-popen-with-a-modified-environment
...
...
@@ -314,9 +308,8 @@ class TdeSubProcess:
# print(myEnv)
# print("Starting TDengine with env: ", myEnv.items())
# print("Starting TDengine via Shell: {}".format(cmdLineStr
))
print
(
"Starting TDengine: {}"
.
format
(
cmdLine
))
# useShell = True # Needed to pass environments into it
return
Popen
(
' '
.
join
(
cmdLine
),
# ' '.join(cmdLine) if useShell else cmdLine,
shell
=
True
,
# Always use shell, since we need to pass ENV vars
...
...
@@ -732,19 +725,19 @@ class ServiceManagerThread:
self
.
_ipcQueue
=
Queue
()
# type: Queue
self
.
_thread
=
threading
.
Thread
(
# First thread captures server OUTPUT
target
=
self
.
svcOutputReader
,
args
=
(
subProc
.
getStdOut
(),
self
.
_ipcQueue
,
logDir
))
args
=
(
subProc
.
get
Ipc
StdOut
(),
self
.
_ipcQueue
,
logDir
))
self
.
_thread
.
daemon
=
True
# thread dies with the program
self
.
_thread
.
start
()
time
.
sleep
(
0.01
)
if
not
self
.
_thread
.
is_alive
():
# What happened?
Logging
.
info
(
"Failed to start
ed
process to monitor STDOUT"
)
Logging
.
info
(
"Failed to start process to monitor STDOUT"
)
self
.
stop
()
raise
CrashGenError
(
"Failed to start thread to monitor STDOUT"
)
Logging
.
info
(
"Successfully started process to monitor STDOUT"
)
self
.
_thread2
=
threading
.
Thread
(
# 2nd thread captures server ERRORs
target
=
self
.
svcErrorReader
,
args
=
(
subProc
.
getStdErr
(),
self
.
_ipcQueue
,
logDir
))
args
=
(
subProc
.
get
Ipc
StdErr
(),
self
.
_ipcQueue
,
logDir
))
self
.
_thread2
.
daemon
=
True
# thread dies with the program
self
.
_thread2
.
start
()
time
.
sleep
(
0.01
)
...
...
@@ -887,14 +880,19 @@ class ServiceManagerThread:
print
(
"
\n
Non-UTF8 server output: {}
\n
"
.
format
(
bChunk
.
decode
(
'cp437'
)))
return
None
def
_textChunkGenerator
(
self
,
streamIn
:
BinaryIO
,
logDir
:
str
,
logFile
:
str
def
_textChunkGenerator
(
self
,
streamIn
:
IpcStream
,
logDir
:
str
,
logFile
:
str
)
->
Generator
[
TextChunk
,
None
,
None
]:
'''
Take an input stream with binary data, produced a generator of decoded
"text chunks", and also save the original binary data in a log file.
Take an input stream with binary data (likely from Popen), produced a generator of decoded
"text chunks".
Side effect: it also save the original binary data in a log file.
'''
os
.
makedirs
(
logDir
,
exist_ok
=
True
)
logF
=
open
(
os
.
path
.
join
(
logDir
,
logFile
),
'wb'
)
if
logF
is
None
:
Logging
.
error
(
"Failed to open log file (binary write): {}/{}"
.
format
(
logDir
,
logFile
))
return
for
bChunk
in
iter
(
streamIn
.
readline
,
b
''
):
logF
.
write
(
bChunk
)
# Write to log file immediately
tChunk
=
self
.
_decodeBinaryChunk
(
bChunk
)
# decode
...
...
@@ -902,14 +900,14 @@ class ServiceManagerThread:
yield
tChunk
# TODO: split into actual text lines
# At the end...
streamIn
.
close
()
# Close the stream
logF
.
close
()
# Close the
output
file
streamIn
.
close
()
# Close the
incoming
stream
logF
.
close
()
# Close the
log
file
def
svcOutputReader
(
self
,
stdOut
:
BinaryIO
,
queue
,
logDir
:
str
):
def
svcOutputReader
(
self
,
ipcStdOut
:
IpcStream
,
queue
,
logDir
:
str
):
'''
The infinite routine that processes the STDOUT stream for the sub process being managed.
:param
s
tdOut: the IO stream object used to fetch the data from
:param
ipcS
tdOut: the IO stream object used to fetch the data from
:param queue: the queue where we dump the roughly parsed chunk-by-chunk text data
:param logDir: where we should dump a verbatim output file
'''
...
...
@@ -917,7 +915,7 @@ class ServiceManagerThread:
# Important Reference: https://stackoverflow.com/questions/375427/non-blocking-read-on-a-subprocess-pipe-in-python
# print("This is the svcOutput Reader...")
# stdOut.readline() # Skip the first output? TODO: remove?
for
tChunk
in
self
.
_textChunkGenerator
(
s
tdOut
,
logDir
,
'stdout.log'
)
:
for
tChunk
in
self
.
_textChunkGenerator
(
ipcS
tdOut
,
logDir
,
'stdout.log'
)
:
queue
.
put
(
tChunk
)
# tChunk garanteed not to be None
self
.
_printProgress
(
"_i"
)
...
...
@@ -940,12 +938,12 @@ class ServiceManagerThread:
Logging
.
info
(
"EOF found TDengine STDOUT, marking the process as terminated"
)
self
.
setStatus
(
Status
.
STATUS_STOPPED
)
def
svcErrorReader
(
self
,
stdErr
:
BinaryIO
,
queue
,
logDir
:
str
):
def
svcErrorReader
(
self
,
ipcStdErr
:
IpcStream
,
queue
,
logDir
:
str
):
# os.makedirs(logDir, exist_ok=True)
# logFile = os.path.join(logDir,'stderr.log')
# fErr = open(logFile, 'wb')
# for line in iter(err.readline, b''):
for
tChunk
in
self
.
_textChunkGenerator
(
s
tdErr
,
logDir
,
'stderr.log'
)
:
for
tChunk
in
self
.
_textChunkGenerator
(
ipcS
tdErr
,
logDir
,
'stderr.log'
)
:
queue
.
put
(
tChunk
)
# tChunk garanteed not to be None
# fErr.write(line)
Logging
.
info
(
"TDengine STDERR: {}"
.
format
(
tChunk
))
...
...
tests/pytest/crash_gen/shared/types.py
浏览文件 @
162c0124
from
typing
import
Any
,
List
,
Dict
,
NewType
from
typing
import
Any
,
BinaryIO
,
List
,
Dict
,
NewType
from
enum
import
Enum
DirPath
=
NewType
(
'DirPath'
,
str
)
...
...
@@ -26,3 +26,5 @@ class TdDataType(Enum):
TdColumns
=
Dict
[
str
,
TdDataType
]
TdTags
=
Dict
[
str
,
TdDataType
]
IpcStream
=
NewType
(
'IpcStream'
,
BinaryIO
)
\ No newline at end of file
tests/pytest/fulltest.sh
浏览文件 @
162c0124
...
...
@@ -183,7 +183,7 @@ python3 ./test.py -f stable/query_after_reset.py
# perfbenchmark
python3 ./test.py
-f
perfbenchmark/bug3433.py
#python3 ./test.py -f perfbenchmark/bug3589.py
python3 ./test.py
-f
perfbenchmark/taosdemoInsert.py
#query
python3 ./test.py
-f
query/filter.py
...
...
tests/pytest/insert/insertFromCSVPerformance.py
浏览文件 @
162c0124
...
...
@@ -31,7 +31,7 @@ class insertFromCSVPerformace:
self
.
host
=
"127.0.0.1"
self
.
user
=
"root"
self
.
password
=
"taosdata"
self
.
config
=
"/etc/
taos
perf"
self
.
config
=
"/etc/perf"
self
.
conn
=
taos
.
connect
(
self
.
host
,
self
.
user
,
...
...
tests/pytest/perfbenchmark/taosdemoInsert.py
0 → 100644
浏览文件 @
162c0124
###################################################################
# Copyright (c) 2016 by TAOS Technologies, Inc.
# All rights reserved.
#
# This file is proprietary and confidential to TAOS Technologies.
# No part of this file may be reproduced, stored, transmitted,
# disclosed or used in any form or by any means other than as
# expressly provided by the written permission from Jianhui Tao
#
###################################################################
# -*- coding: utf-8 -*-
import
taos
import
sys
import
os
import
json
import
argparse
import
subprocess
import
datetime
import
re
from
multiprocessing
import
cpu_count
from
util.log
import
*
from
util.sql
import
*
from
util.cases
import
*
from
util.dnodes
import
*
from
util.dnodes
import
TDDnode
class
Taosdemo
:
def
__init__
(
self
,
clearCache
,
dbName
,
keep
):
self
.
clearCache
=
clearCache
self
.
dbname
=
dbName
self
.
drop
=
"yes"
self
.
keep
=
keep
self
.
host
=
"127.0.0.1"
self
.
user
=
"root"
self
.
password
=
"taosdata"
# self.config = "/etc/taosperf"
# self.conn = taos.connect(
# self.host,
# self.user,
# self.password,
# self.config)
# env config
def
getBuildPath
(
self
)
->
str
:
selfPath
=
os
.
path
.
dirname
(
os
.
path
.
realpath
(
__file__
))
if
(
"community"
in
selfPath
):
projPath
=
selfPath
[:
selfPath
.
find
(
"community"
)]
else
:
projPath
=
selfPath
[:
selfPath
.
find
(
"tests"
)]
for
root
,
dirs
,
files
in
os
.
walk
(
projPath
):
if
(
"taosd"
in
files
):
rootRealPath
=
os
.
path
.
dirname
(
os
.
path
.
realpath
(
root
))
if
(
"packaging"
not
in
rootRealPath
):
buildPath
=
root
[:
len
(
root
)
-
len
(
"/debug/build/bin"
)]
break
return
buildPath
def
getExeToolsDir
(
self
)
->
str
:
self
.
debugdir
=
self
.
getBuildPath
()
+
"/debug/build/bin"
return
self
.
debugdir
def
getCfgDir
(
self
)
->
str
:
self
.
config
=
self
.
getBuildPath
()
+
"/sim/dnode1/cfg"
return
self
.
config
# taodemo insert file config
def
dbinfocfg
(
self
)
->
dict
:
return
{
"name"
:
self
.
dbname
,
"drop"
:
self
.
drop
,
"replica"
:
1
,
"days"
:
10
,
"cache"
:
16
,
"blocks"
:
8
,
"precision"
:
"ms"
,
"keep"
:
self
.
keep
,
"minRows"
:
100
,
"maxRows"
:
4096
,
"comp"
:
2
,
"walLevel"
:
1
,
"cachelast"
:
0
,
"quorum"
:
1
,
"fsync"
:
3000
,
"update"
:
0
}
def
type_check
(
func
):
def
wrapper
(
self
,
**
kwargs
):
num_types
=
[
"int"
,
"float"
,
"bigint"
,
"tinyint"
,
"smallint"
,
"double"
]
str_types
=
[
"binary"
,
"nchar"
]
for
k
,
v
in
kwargs
.
items
():
if
k
.
lower
()
not
in
num_types
and
k
.
lower
()
not
in
str_types
:
return
f
"args
{
k
}
type error, not allowed"
elif
not
isinstance
(
v
,
(
int
,
list
,
tuple
)):
return
f
"value
{
v
}
type error, not allowed"
elif
k
.
lower
()
in
num_types
and
not
isinstance
(
v
,
int
):
return
f
"arg
{
v
}
takes 1 positional argument must be type int "
elif
isinstance
(
v
,
(
list
,
tuple
))
and
len
(
v
)
>
2
:
return
f
"arg
{
v
}
takes from 1 to 2 positional arguments but more than 2 were given "
elif
isinstance
(
v
,(
list
,
tuple
))
and
[
False
for
_
in
v
if
not
isinstance
(
_
,
int
)
]:
return
f
"arg
{
v
}
takes from 1 to 2 positional arguments must be type int "
else
:
pass
return
func
(
self
,
**
kwargs
)
return
wrapper
@
type_check
def
column_tag_count
(
self
,
**
column_tag
)
->
list
:
init_column_tag
=
[]
for
k
,
v
in
column_tag
.
items
():
if
re
.
search
(
k
,
"int, float, bigint, tinyint, smallint, double"
,
re
.
IGNORECASE
):
init_column_tag
.
append
({
"type"
:
k
,
"count"
:
v
})
elif
re
.
search
(
k
,
"binary, nchar"
,
re
.
IGNORECASE
):
if
isinstance
(
v
,
int
):
init_column_tag
.
append
({
"type"
:
k
,
"count"
:
v
,
"len"
:
8
})
elif
len
(
v
)
==
1
:
init_column_tag
.
append
({
"type"
:
k
,
"count"
:
v
[
0
],
"len"
:
8
})
else
:
init_column_tag
.
append
({
"type"
:
k
,
"count"
:
v
[
0
],
"len"
:
v
[
1
]})
return
init_column_tag
def
stbcfg
(
self
,
stb
:
str
,
child_tab_count
:
int
,
rows
:
int
,
prechildtab
:
str
,
columns
:
dict
,
tags
:
dict
)
->
dict
:
return
{
"name"
:
stb
,
"child_table_exists"
:
"no"
,
"childtable_count"
:
child_tab_count
,
"childtable_prefix"
:
prechildtab
,
"auto_create_table"
:
"no"
,
"batch_create_tbl_num"
:
10
,
"data_source"
:
"rand"
,
"insert_mode"
:
"taosc"
,
"insert_rows"
:
rows
,
"childtable_limit"
:
0
,
"childtable_offset"
:
0
,
"rows_per_tbl"
:
1
,
"max_sql_len"
:
65480
,
"disorder_ratio"
:
0
,
"disorder_range"
:
1000
,
"timestamp_step"
:
10
,
"start_timestamp"
:
f
"
{
datetime
.
datetime
.
now
():
%
F
%
X
}
"
,
"sample_format"
:
"csv"
,
"sample_file"
:
"./sample.csv"
,
"tags_file"
:
""
,
"columns"
:
self
.
column_tag_count
(
**
columns
),
"tags"
:
self
.
column_tag_count
(
**
tags
)
}
def
schemecfg
(
self
,
intcount
=
1
,
floatcount
=
0
,
bcount
=
0
,
tcount
=
0
,
scount
=
0
,
doublecount
=
0
,
binarycount
=
0
,
ncharcount
=
0
):
return
{
"INT"
:
intcount
,
"FLOAT"
:
floatcount
,
"BIGINT"
:
bcount
,
"TINYINT"
:
tcount
,
"SMALLINT"
:
scount
,
"DOUBLE"
:
doublecount
,
"BINARY"
:
binarycount
,
"NCHAR"
:
ncharcount
}
def
insertcfg
(
self
,
db
:
dict
,
stbs
:
list
)
->
dict
:
return
{
"filetype"
:
"insert"
,
"cfgdir"
:
self
.
config
,
"host"
:
self
.
host
,
"port"
:
6030
,
"user"
:
self
.
user
,
"password"
:
self
.
password
,
"thread_count"
:
cpu_count
(),
"thread_count_create_tbl"
:
cpu_count
(),
"result_file"
:
"/tmp/insert_res.txt"
,
"confirm_parameter_prompt"
:
"no"
,
"insert_interval"
:
0
,
"num_of_records_per_req"
:
100
,
"max_sql_len"
:
1024000
,
"databases"
:
[{
"dbinfo"
:
db
,
"super_tables"
:
stbs
}]
}
def
createinsertfile
(
self
,
db
:
dict
,
stbs
:
list
)
->
str
:
date
=
datetime
.
datetime
.
now
()
file_create_table
=
f
"/tmp/insert_
{
date
:
%
F
-%
H
%
M
}
.json"
with
open
(
file_create_table
,
'w'
)
as
f
:
json
.
dump
(
self
.
insertcfg
(
db
,
stbs
),
f
)
return
file_create_table
# taosdemo query file config
def
querysqls
(
self
,
sql
:
str
)
->
list
:
return
[{
"sql"
:
sql
,
"result"
:
""
}]
def
querycfg
(
self
,
sql
:
str
)
->
dict
:
return
{
"filetype"
:
"query"
,
"cfgdir"
:
self
.
config
,
"host"
:
self
.
host
,
"port"
:
6030
,
"user"
:
self
.
user
,
"password"
:
self
.
password
,
"confirm_parameter_prompt"
:
"yes"
,
"query_times"
:
10
,
"query_mode"
:
"taosc"
,
"databases"
:
self
.
dbname
,
"specified_table_query"
:
{
"query_interval"
:
0
,
"concurrent"
:
cpu_count
(),
"sqls"
:
self
.
querysqls
(
sql
)
}
}
def
createqueryfile
(
self
,
sql
:
str
):
date
=
datetime
.
datetime
.
now
()
file_query_table
=
f
"/tmp/query_
{
date
:
%
F
-%
H
%
M
}
.json"
with
open
(
file_query_table
,
"w"
)
as
f
:
json
.
dump
(
self
.
querycfg
(
sql
),
f
)
return
file_query_table
# Execute taosdemo, and delete temporary files when finished
def
taosdemotable
(
self
,
filepath
:
str
,
resultfile
=
"/dev/null"
):
taosdemopath
=
self
.
getBuildPath
()
+
"/debug/build/bin"
with
open
(
filepath
,
"r"
)
as
f
:
filetype
=
json
.
load
(
f
)[
"filetype"
]
if
filetype
==
"insert"
:
taosdemo_table_cmd
=
f
"
{
taosdemopath
}
/taosdemo -f
{
filepath
}
>
{
resultfile
}
2>&1"
else
:
taosdemo_table_cmd
=
f
"yes |
{
taosdemopath
}
/taosdemo -f
{
filepath
}
>
{
resultfile
}
2>&1"
try
:
_
=
subprocess
.
check_output
(
taosdemo_table_cmd
,
shell
=
True
).
decode
(
"utf-8"
)
except
subprocess
.
CalledProcessError
as
e
:
_
=
e
.
output
def
droptmpfile
(
self
,
filepath
:
str
):
drop_file_cmd
=
f
"[ -f
{
filepath
}
] && rm -f
{
filepath
}
"
try
:
_
=
subprocess
.
check_output
(
drop_file_cmd
,
shell
=
True
).
decode
(
"utf-8"
)
except
subprocess
.
CalledProcessError
as
e
:
_
=
e
.
output
# TODO:需要完成TD-4153的数据插入和客户端请求的性能查询。
def
td4153insert
(
self
):
tdLog
.
printNoPrefix
(
"========== start to create table and insert data =========="
)
self
.
dbname
=
"td4153"
db
=
self
.
dbinfocfg
()
stblist
=
[]
columntype
=
self
.
schemecfg
(
intcount
=
1
,
ncharcount
=
100
)
tagtype
=
self
.
schemecfg
(
intcount
=
1
)
stbname
=
"stb1"
prechild
=
"t1"
stable
=
self
.
stbcfg
(
stb
=
stbname
,
prechildtab
=
prechild
,
child_tab_count
=
2
,
rows
=
10000
,
columns
=
columntype
,
tags
=
tagtype
)
stblist
.
append
(
stable
)
insertfile
=
self
.
createinsertfile
(
db
=
db
,
stbs
=
stblist
)
nmon_file
=
f
"/tmp/insert_
{
datetime
.
datetime
.
now
():
%
F
-%
H
%
M
}
.nmon"
cmd
=
f
"nmon -s5 -F
{
nmon_file
}
-m /tmp/"
try
:
_
=
subprocess
.
check_output
(
cmd
,
shell
=
True
).
decode
(
"utf-8"
)
except
subprocess
.
CalledProcessError
as
e
:
_
=
e
.
output
self
.
taosdemotable
(
insertfile
)
self
.
droptmpfile
(
insertfile
)
self
.
droptmpfile
(
"/tmp/insert_res.txt"
)
# In order to prevent too many performance files from being generated, the nmon file is deleted.
# and the delete statement can be cancelled during the actual test.
self
.
droptmpfile
(
nmon_file
)
cmd
=
f
"ps -ef|grep -w nmon| grep -v grep | awk '{{print $2}}'"
try
:
time
.
sleep
(
10
)
_
=
subprocess
.
check_output
(
cmd
,
shell
=
True
).
decode
(
"utf-8"
)
except
BaseException
as
e
:
raise
e
def
td4153query
(
self
):
tdLog
.
printNoPrefix
(
"========== start to query operation =========="
)
sqls
=
{
"select_all"
:
"select * from stb1"
,
"select_join"
:
"select * from t10, t11 where t10.ts=t11.ts"
}
for
type
,
sql
in
sqls
.
items
():
result_file
=
f
"/tmp/queryResult_
{
type
}
.log"
query_file
=
self
.
createqueryfile
(
sql
)
try
:
self
.
taosdemotable
(
query_file
,
resultfile
=
result_file
)
except
subprocess
.
CalledProcessError
as
e
:
out_put
=
e
.
output
if
result_file
:
print
(
f
"execute rows
{
type
.
split
(
'_'
)[
1
]
}
sql, the sql is:
{
sql
}
"
)
max_sql_time_cmd
=
f
'''
grep -o Spent.*s
{
result_file
}
|awk 'NR==1{{max=$2;next}}{{max=max>$2?max:$2}}END{{print "Max=",max,"s"}}'
'''
max_sql_time
=
subprocess
.
check_output
(
max_sql_time_cmd
,
shell
=
True
).
decode
(
"UTF-8"
)
print
(
f
"
{
type
.
split
(
'_'
)[
1
]
}
rows sql time :
{
max_sql_time
}
"
)
min_sql_time_cmd
=
f
'''
grep -o Spent.*s
{
result_file
}
|awk 'NR==1{{min=$2;next}}{{min=min<$2?min:$2}}END{{print "Min=",min,"s"}}'
'''
min_sql_time
=
subprocess
.
check_output
(
min_sql_time_cmd
,
shell
=
True
).
decode
(
"UTF-8"
)
print
(
f
"
{
type
.
split
(
'_'
)[
1
]
}
rows sql time :
{
min_sql_time
}
"
)
avg_sql_time_cmd
=
f
'''
grep -o Spent.*s
{
result_file
}
|awk '{{sum+=$2}}END{{print "Average=",sum/NR,"s"}}'
'''
avg_sql_time
=
subprocess
.
check_output
(
avg_sql_time_cmd
,
shell
=
True
).
decode
(
"UTF-8"
)
print
(
f
"
{
type
.
split
(
'_'
)[
1
]
}
rows sql time :
{
avg_sql_time
}
"
)
self
.
droptmpfile
(
query_file
)
self
.
droptmpfile
(
result_file
)
drop_query_tmt_file_cmd
=
" find ./ -name 'querySystemInfo-*' -type f -exec rm {} \; "
try
:
_
=
subprocess
.
check_output
(
drop_query_tmt_file_cmd
,
shell
=
True
).
decode
(
"utf-8"
)
except
subprocess
.
CalledProcessError
as
e
:
_
=
e
.
output
pass
def
td4153
(
self
):
self
.
td4153insert
()
self
.
td4153query
()
if
__name__
==
'__main__'
:
parser
=
argparse
.
ArgumentParser
()
parser
.
add_argument
(
'-r'
,
'--remove-cache'
,
action
=
'store_true'
,
default
=
False
,
help
=
'clear cache before query (default: False)'
)
parser
.
add_argument
(
'-d'
,
'--database-name'
,
action
=
'store'
,
default
=
'db'
,
type
=
str
,
help
=
'Database name to be created (default: db)'
)
parser
.
add_argument
(
'-k'
,
'--keep-time'
,
action
=
'store'
,
default
=
3650
,
type
=
int
,
help
=
'Database keep parameters (default: 3650)'
)
args
=
parser
.
parse_args
()
taosdemo
=
Taosdemo
(
args
.
remove_cache
,
args
.
database_name
,
args
.
keep_time
)
# taosdemo.conn = taos.connect(
# taosdemo.host,
# taosdemo.user,
# taosdemo.password,
# taosdemo.config
# )
debugdir
=
taosdemo
.
getExeToolsDir
()
cfgdir
=
taosdemo
.
getCfgDir
()
cmd
=
f
"
{
debugdir
}
/taosd -c
{
cfgdir
}
>/dev/null 2>&1 &"
try
:
_
=
subprocess
.
check_output
(
cmd
,
shell
=
True
).
decode
(
"utf-8"
)
except
subprocess
.
CalledProcessError
as
e
:
_
=
e
.
output
if
taosdemo
.
clearCache
:
# must be root permission
subprocess
.
check_output
(
"echo 3 > /proc/sys/vm/drop_caches"
,
shell
=
True
).
decode
(
"utf-8"
)
taosdemo
.
td4153
()
tests/pytest/tools/taosdemoPerformance.py
浏览文件 @
162c0124
...
...
@@ -24,7 +24,7 @@ class taosdemoPerformace:
self
.
host
=
"127.0.0.1"
self
.
user
=
"root"
self
.
password
=
"taosdata"
self
.
config
=
"/etc/
taos
perf"
self
.
config
=
"/etc/perf"
self
.
conn
=
taos
.
connect
(
self
.
host
,
self
.
user
,
...
...
@@ -77,7 +77,7 @@ class taosdemoPerformace:
insert_data
=
{
"filetype"
:
"insert"
,
"cfgdir"
:
"/etc/
taos
perf"
,
"cfgdir"
:
"/etc/perf"
,
"host"
:
"127.0.0.1"
,
"port"
:
6030
,
"user"
:
"root"
,
...
...
@@ -104,7 +104,7 @@ class taosdemoPerformace:
return
output
def
insertData
(
self
):
os
.
system
(
"taosdemo -f %s > taosdemoperf.txt"
%
self
.
generateJson
())
os
.
system
(
"taosdemo -f %s > taosdemoperf.txt
2>&1
"
%
self
.
generateJson
())
self
.
createTableTime
=
self
.
getCMDOutput
(
"grep 'Spent' taosdemoperf.txt | awk 'NR==1{print $2}'"
)
self
.
insertRecordsTime
=
self
.
getCMDOutput
(
"grep 'Spent' taosdemoperf.txt | awk 'NR==2{print $2}'"
)
self
.
recordsPerSecond
=
self
.
getCMDOutput
(
"grep 'Spent' taosdemoperf.txt | awk 'NR==2{print $16}'"
)
...
...
tests/pytest/tools/taosdemoTestWithJson.py
浏览文件 @
162c0124
...
...
@@ -23,32 +23,32 @@ class TDTestCase:
def
init
(
self
,
conn
,
logSql
):
tdLog
.
debug
(
"start to execute %s"
%
__file__
)
tdSql
.
init
(
conn
.
cursor
(),
logSql
)
def
getBuildPath
(
self
):
selfPath
=
os
.
path
.
dirname
(
os
.
path
.
realpath
(
__file__
))
if
(
"community"
in
selfPath
)
:
projPath
=
selfPath
[:
selfPath
.
find
(
"community"
)]
if
"community"
in
selfPath
:
projPath
=
selfPath
[:
selfPath
.
find
(
"community"
)]
else
:
projPath
=
selfPath
[:
selfPath
.
find
(
"tests"
)]
projPath
=
selfPath
[:
selfPath
.
find
(
"tests"
)]
for
root
,
dirs
,
files
in
os
.
walk
(
projPath
):
if
(
"taosd"
in
files
)
:
if
"taosd"
in
files
:
rootRealPath
=
os
.
path
.
dirname
(
os
.
path
.
realpath
(
root
))
if
(
"packaging"
not
in
rootRealPath
)
:
buildPath
=
root
[:
len
(
root
)
-
len
(
"/build/bin"
)]
if
"packaging"
not
in
rootRealPath
:
buildPath
=
root
[:
len
(
root
)
-
len
(
"/build/bin"
)]
break
return
buildPath
def
run
(
self
):
tdSql
.
prepare
()
buildPath
=
self
.
getBuildPath
()
if
(
buildPath
==
""
)
:
if
buildPath
==
""
:
tdLog
.
exit
(
"taosd not found!"
)
else
:
tdLog
.
info
(
"taosd found in %s"
%
buildPath
)
binPath
=
buildPath
+
"/build/bin/"
os
.
system
(
"
yes | %staosdemo -f tools/insert.json
"
%
binPath
)
binPath
=
buildPath
+
"/build/bin/"
os
.
system
(
"
%staosdemo -f tools/insert.json -y
"
%
binPath
)
tdSql
.
execute
(
"use db01"
)
tdSql
.
query
(
"select count(*) from stb01"
)
...
...
tests/script/unique/arbitrator/dn3_mn1_vnode_nomaster.sim
浏览文件 @
162c0124
...
...
@@ -158,7 +158,7 @@ if $dnode4Vtatus != offline then
sleep 2000
goto wait_dnode4_vgroup_offline
endi
if $dnode3Vtatus !=
master
then
if $dnode3Vtatus !=
unsynced
then
sleep 2000
goto wait_dnode4_vgroup_offline
endi
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录