Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
taosdata
TDengine
提交
9286f77e
T
TDengine
项目概览
taosdata
/
TDengine
接近 2 年 前同步成功
通知
1192
Star
22018
Fork
4786
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
1
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
T
TDengine
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
1
Issue
1
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
9286f77e
编写于
4月 27, 2021
作者:
H
Haojun Liao
浏览文件
操作
浏览文件
下载
差异文件
[td-225]merge develop
上级
e40c1ffc
4df6967d
变更
68
展开全部
隐藏空白更改
内联
并排
Showing
68 changed file
with
3976 addition
and
241 deletion
+3976
-241
packaging/cfg/taos.cfg
packaging/cfg/taos.cfg
+1
-1
src/client/src/tscAsync.c
src/client/src/tscAsync.c
+4
-4
src/client/src/tscLocal.c
src/client/src/tscLocal.c
+1
-1
src/client/src/tscLocalMerge.c
src/client/src/tscLocalMerge.c
+9
-9
src/client/src/tscParseInsert.c
src/client/src/tscParseInsert.c
+3
-3
src/client/src/tscProfile.c
src/client/src/tscProfile.c
+1
-1
src/client/src/tscSQLParser.c
src/client/src/tscSQLParser.c
+1
-1
src/client/src/tscServer.c
src/client/src/tscServer.c
+30
-30
src/client/src/tscSql.c
src/client/src/tscSql.c
+6
-7
src/client/src/tscStream.c
src/client/src/tscStream.c
+12
-4
src/client/src/tscSub.c
src/client/src/tscSub.c
+2
-2
src/client/src/tscSubquery.c
src/client/src/tscSubquery.c
+67
-63
src/client/src/tscUtil.c
src/client/src/tscUtil.c
+7
-7
src/common/src/tglobal.c
src/common/src/tglobal.c
+1
-1
src/kit/taosdemo/taosdemo.c
src/kit/taosdemo/taosdemo.c
+99
-74
src/mnode/inc/mnodeDb.h
src/mnode/inc/mnodeDb.h
+1
-0
src/mnode/src/mnodeDb.c
src/mnode/src/mnodeDb.c
+18
-0
src/mnode/src/mnodeDnode.c
src/mnode/src/mnodeDnode.c
+9
-0
src/vnode/src/vnodeWrite.c
src/vnode/src/vnodeWrite.c
+4
-2
tests/perftest-scripts/perftest-taosdemo-compare.sh
tests/perftest-scripts/perftest-taosdemo-compare.sh
+147
-0
tests/pytest/client/thousandsofClient.py
tests/pytest/client/thousandsofClient.py
+55
-0
tests/pytest/cluster/TD-3693/how-to-use
tests/pytest/cluster/TD-3693/how-to-use
+9
-0
tests/pytest/cluster/TD-3693/insert1Data.json
tests/pytest/cluster/TD-3693/insert1Data.json
+88
-0
tests/pytest/cluster/TD-3693/insert2Data.json
tests/pytest/cluster/TD-3693/insert2Data.json
+88
-0
tests/pytest/cluster/TD-3693/multClient.py
tests/pytest/cluster/TD-3693/multClient.py
+74
-0
tests/pytest/cluster/TD-3693/multQuery.py
tests/pytest/cluster/TD-3693/multQuery.py
+72
-0
tests/pytest/cluster/TD-3693/queryCount.json
tests/pytest/cluster/TD-3693/queryCount.json
+15
-0
tests/pytest/fulltest.sh
tests/pytest/fulltest.sh
+7
-0
tests/pytest/functions/function_count_last_stab.py
tests/pytest/functions/function_count_last_stab.py
+70
-0
tests/pytest/functions/function_operations.py
tests/pytest/functions/function_operations.py
+2
-2
tests/pytest/query/queryFilterTswithDateUnit.py
tests/pytest/query/queryFilterTswithDateUnit.py
+25
-22
tests/pytest/query/queryTscomputWithNow.py
tests/pytest/query/queryTscomputWithNow.py
+177
-0
tests/pytest/stream/cqSupportBefore1970.py
tests/pytest/stream/cqSupportBefore1970.py
+93
-0
tests/pytest/stream/showStreamExecTimeisNull.py
tests/pytest/stream/showStreamExecTimeisNull.py
+97
-0
tests/pytest/tools/taosdemoAllTest/TD-3453/query-interrupt.json
...pytest/tools/taosdemoAllTest/TD-3453/query-interrupt.json
+62
-0
tests/pytest/tools/taosdemoAllTest/TD-3453/query-interrupt.py
...s/pytest/tools/taosdemoAllTest/TD-3453/query-interrupt.py
+89
-0
tests/pytest/tools/taosdemoAllTest/TD-3453/queryall.json
tests/pytest/tools/taosdemoAllTest/TD-3453/queryall.json
+20
-0
tests/pytest/tools/taosdemoAllTest/convertResFile.py
tests/pytest/tools/taosdemoAllTest/convertResFile.py
+35
-0
tests/pytest/tools/taosdemoAllTest/insert-1s1tnt1r.json
tests/pytest/tools/taosdemoAllTest/insert-1s1tnt1r.json
+88
-0
tests/pytest/tools/taosdemoAllTest/insert-1s1tntmr.json
tests/pytest/tools/taosdemoAllTest/insert-1s1tntmr.json
+88
-0
tests/pytest/tools/taosdemoAllTest/insert-disorder.json
tests/pytest/tools/taosdemoAllTest/insert-disorder.json
+88
-0
tests/pytest/tools/taosdemoAllTest/insert-illegal-columns-count-0.json
...tools/taosdemoAllTest/insert-illegal-columns-count-0.json
+88
-0
tests/pytest/tools/taosdemoAllTest/insert-illegal-columns-lmax.json
...st/tools/taosdemoAllTest/insert-illegal-columns-lmax.json
+88
-0
tests/pytest/tools/taosdemoAllTest/insert-illegal-columns.json
.../pytest/tools/taosdemoAllTest/insert-illegal-columns.json
+88
-0
tests/pytest/tools/taosdemoAllTest/insert-illegal-tags-count129.json
...t/tools/taosdemoAllTest/insert-illegal-tags-count129.json
+88
-0
tests/pytest/tools/taosdemoAllTest/insert-interlace-row.json
tests/pytest/tools/taosdemoAllTest/insert-interlace-row.json
+62
-0
tests/pytest/tools/taosdemoAllTest/insert-interval-speed.json
...s/pytest/tools/taosdemoAllTest/insert-interval-speed.json
+88
-0
tests/pytest/tools/taosdemoAllTest/insert-newdb.json
tests/pytest/tools/taosdemoAllTest/insert-newdb.json
+166
-0
tests/pytest/tools/taosdemoAllTest/insert-newtable.json
tests/pytest/tools/taosdemoAllTest/insert-newtable.json
+166
-0
tests/pytest/tools/taosdemoAllTest/insert-nodbnodrop.json
tests/pytest/tools/taosdemoAllTest/insert-nodbnodrop.json
+62
-0
tests/pytest/tools/taosdemoAllTest/insert-offset.json
tests/pytest/tools/taosdemoAllTest/insert-offset.json
+166
-0
tests/pytest/tools/taosdemoAllTest/insert-renewdb.json
tests/pytest/tools/taosdemoAllTest/insert-renewdb.json
+166
-0
tests/pytest/tools/taosdemoAllTest/insert-sample.json
tests/pytest/tools/taosdemoAllTest/insert-sample.json
+88
-0
tests/pytest/tools/taosdemoAllTest/insert-timestep.json
tests/pytest/tools/taosdemoAllTest/insert-timestep.json
+88
-0
tests/pytest/tools/taosdemoAllTest/moredemo-insert-offset.py
tests/pytest/tools/taosdemoAllTest/moredemo-insert-offset.py
+72
-0
tests/pytest/tools/taosdemoAllTest/moredemo-offset-limit1.json
.../pytest/tools/taosdemoAllTest/moredemo-offset-limit1.json
+62
-0
tests/pytest/tools/taosdemoAllTest/moredemo-offset-limit5.json
.../pytest/tools/taosdemoAllTest/moredemo-offset-limit5.json
+62
-0
tests/pytest/tools/taosdemoAllTest/moredemo-offset-limit94.json
...pytest/tools/taosdemoAllTest/moredemo-offset-limit94.json
+62
-0
tests/pytest/tools/taosdemoAllTest/moredemo-offset-newdb.json
...s/pytest/tools/taosdemoAllTest/moredemo-offset-newdb.json
+61
-0
tests/pytest/tools/taosdemoAllTest/sample.csv
tests/pytest/tools/taosdemoAllTest/sample.csv
+3
-0
tests/pytest/tools/taosdemoAllTest/speciQuery.json
tests/pytest/tools/taosdemoAllTest/speciQuery.json
+36
-0
tests/pytest/tools/taosdemoAllTest/speciQueryInsertdata.json
tests/pytest/tools/taosdemoAllTest/speciQueryInsertdata.json
+86
-0
tests/pytest/tools/taosdemoAllTest/tags.csv
tests/pytest/tools/taosdemoAllTest/tags.csv
+2
-0
tests/pytest/tools/taosdemoAllTest/taosdemoTestInsertWithJson.py
...ytest/tools/taosdemoAllTest/taosdemoTestInsertWithJson.py
+229
-0
tests/pytest/tools/taosdemoAllTest/taosdemoTestQueryWithJson.py
...pytest/tools/taosdemoAllTest/taosdemoTestQueryWithJson.py
+91
-0
tests/script/unique/cluster/balance2.sim
tests/script/unique/cluster/balance2.sim
+19
-4
tests/script/unique/dnode/remove1.sim
tests/script/unique/dnode/remove1.sim
+2
-2
tests/script/unique/dnode/remove2.sim
tests/script/unique/dnode/remove2.sim
+20
-1
未找到文件。
packaging/cfg/taos.cfg
浏览文件 @
9286f77e
...
@@ -64,7 +64,7 @@
...
@@ -64,7 +64,7 @@
# monitorInterval 30
# monitorInterval 30
# number of seconds allowed for a dnode to be offline, for cluster only
# number of seconds allowed for a dnode to be offline, for cluster only
# offlineThreshold 864000
0
# offlineThreshold 864000
# RPC re-try timer, millisecond
# RPC re-try timer, millisecond
# rpcTimer 300
# rpcTimer 300
...
...
src/client/src/tscAsync.c
浏览文件 @
9286f77e
...
@@ -49,7 +49,7 @@ void doAsyncQuery(STscObj* pObj, SSqlObj* pSql, __async_cb_func_t fp, void* para
...
@@ -49,7 +49,7 @@ void doAsyncQuery(STscObj* pObj, SSqlObj* pSql, __async_cb_func_t fp, void* para
pSql
->
sqlstr
=
calloc
(
1
,
sqlLen
+
1
);
pSql
->
sqlstr
=
calloc
(
1
,
sqlLen
+
1
);
if
(
pSql
->
sqlstr
==
NULL
)
{
if
(
pSql
->
sqlstr
==
NULL
)
{
tscError
(
"
%p failed to malloc sql string buffer"
,
pSql
);
tscError
(
"
0x%"
PRIx64
" failed to malloc sql string buffer"
,
pSql
->
self
);
pSql
->
res
.
code
=
TSDB_CODE_TSC_OUT_OF_MEMORY
;
pSql
->
res
.
code
=
TSDB_CODE_TSC_OUT_OF_MEMORY
;
tscAsyncResultOnError
(
pSql
);
tscAsyncResultOnError
(
pSql
);
return
;
return
;
...
@@ -81,7 +81,7 @@ void taos_query_a(TAOS *taos, const char *sqlstr, __async_cb_func_t fp, void *pa
...
@@ -81,7 +81,7 @@ void taos_query_a(TAOS *taos, const char *sqlstr, __async_cb_func_t fp, void *pa
TAOS_RES
*
taos_query_ra
(
TAOS
*
taos
,
const
char
*
sqlstr
,
__async_cb_func_t
fp
,
void
*
param
)
{
TAOS_RES
*
taos_query_ra
(
TAOS
*
taos
,
const
char
*
sqlstr
,
__async_cb_func_t
fp
,
void
*
param
)
{
STscObj
*
pObj
=
(
STscObj
*
)
taos
;
STscObj
*
pObj
=
(
STscObj
*
)
taos
;
if
(
pObj
==
NULL
||
pObj
->
signature
!=
pObj
)
{
if
(
pObj
==
NULL
||
pObj
->
signature
!=
pObj
)
{
tscError
(
"
bug!!! pObj:%p
"
,
pObj
);
tscError
(
"
pObj:%p is NULL or freed
"
,
pObj
);
terrno
=
TSDB_CODE_TSC_DISCONNECTED
;
terrno
=
TSDB_CODE_TSC_DISCONNECTED
;
tscQueueAsyncError
(
fp
,
param
,
TSDB_CODE_TSC_DISCONNECTED
);
tscQueueAsyncError
(
fp
,
param
,
TSDB_CODE_TSC_DISCONNECTED
);
return
NULL
;
return
NULL
;
...
@@ -288,7 +288,7 @@ static void tscAsyncResultCallback(SSchedMsg *pMsg) {
...
@@ -288,7 +288,7 @@ static void tscAsyncResultCallback(SSchedMsg *pMsg) {
}
}
assert
(
pSql
->
res
.
code
!=
TSDB_CODE_SUCCESS
);
assert
(
pSql
->
res
.
code
!=
TSDB_CODE_SUCCESS
);
tscError
(
"
%p invoke user specified function due to error occurred, code:%s"
,
pSql
,
tstrerror
(
pSql
->
res
.
code
));
tscError
(
"
0x%"
PRIx64
" async result callback, code:%s"
,
pSql
->
self
,
tstrerror
(
pSql
->
res
.
code
));
SSqlRes
*
pRes
=
&
pSql
->
res
;
SSqlRes
*
pRes
=
&
pSql
->
res
;
if
(
pSql
->
fp
==
NULL
||
pSql
->
fetchFp
==
NULL
){
if
(
pSql
->
fp
==
NULL
||
pSql
->
fetchFp
==
NULL
){
...
@@ -368,7 +368,7 @@ void tscTableMetaCallBack(void *param, TAOS_RES *res, int code) {
...
@@ -368,7 +368,7 @@ void tscTableMetaCallBack(void *param, TAOS_RES *res, int code) {
SSqlObj
*
sub
=
(
SSqlObj
*
)
res
;
SSqlObj
*
sub
=
(
SSqlObj
*
)
res
;
const
char
*
msg
=
(
sub
->
cmd
.
command
==
TSDB_SQL_STABLEVGROUP
)
?
"vgroup-list"
:
"table-meta"
;
const
char
*
msg
=
(
sub
->
cmd
.
command
==
TSDB_SQL_STABLEVGROUP
)
?
"vgroup-list"
:
"table-meta"
;
if
(
code
!=
TSDB_CODE_SUCCESS
)
{
if
(
code
!=
TSDB_CODE_SUCCESS
)
{
tscError
(
"
%p get %s failed, code:%s"
,
pSql
,
msg
,
tstrerror
(
code
));
tscError
(
"
0x%"
PRIx64
" get %s failed, code:%s"
,
pSql
->
self
,
msg
,
tstrerror
(
code
));
goto
_error
;
goto
_error
;
}
}
...
...
src/client/src/tscLocal.c
浏览文件 @
9286f77e
...
@@ -926,7 +926,7 @@ int tscProcessLocalCmd(SSqlObj *pSql) {
...
@@ -926,7 +926,7 @@ int tscProcessLocalCmd(SSqlObj *pSql) {
pRes
->
code
=
tscProcessServStatus
(
pSql
);
pRes
->
code
=
tscProcessServStatus
(
pSql
);
}
else
{
}
else
{
pRes
->
code
=
TSDB_CODE_TSC_INVALID_SQL
;
pRes
->
code
=
TSDB_CODE_TSC_INVALID_SQL
;
tscError
(
"
%p not support command:%d"
,
pSql
,
pCmd
->
command
);
tscError
(
"
0x%"
PRIx64
" not support command:%d"
,
pSql
->
self
,
pCmd
->
command
);
}
}
// keep the code in local variable in order to avoid invalid read in case of async query
// keep the code in local variable in order to avoid invalid read in case of async query
...
...
src/client/src/tscLocalMerge.c
浏览文件 @
9286f77e
...
@@ -113,14 +113,14 @@ void tscCreateLocalMerger(tExtMemBuffer **pMemBuffer, int32_t numOfBuffer, tOrde
...
@@ -113,14 +113,14 @@ void tscCreateLocalMerger(tExtMemBuffer **pMemBuffer, int32_t numOfBuffer, tOrde
if
(
pMemBuffer
==
NULL
)
{
if
(
pMemBuffer
==
NULL
)
{
tscLocalReducerEnvDestroy
(
pMemBuffer
,
pDesc
,
finalmodel
,
pFFModel
,
numOfBuffer
);
tscLocalReducerEnvDestroy
(
pMemBuffer
,
pDesc
,
finalmodel
,
pFFModel
,
numOfBuffer
);
tscError
(
"
%p pMemBuffer
is NULL"
,
pMemBuffer
);
tscError
(
"
pMemBuffer:%p
is NULL"
,
pMemBuffer
);
pRes
->
code
=
TSDB_CODE_TSC_APP_ERROR
;
pRes
->
code
=
TSDB_CODE_TSC_APP_ERROR
;
return
;
return
;
}
}
if
(
pDesc
->
pColumnModel
==
NULL
)
{
if
(
pDesc
->
pColumnModel
==
NULL
)
{
tscLocalReducerEnvDestroy
(
pMemBuffer
,
pDesc
,
finalmodel
,
pFFModel
,
numOfBuffer
);
tscLocalReducerEnvDestroy
(
pMemBuffer
,
pDesc
,
finalmodel
,
pFFModel
,
numOfBuffer
);
tscError
(
"
%p no local buffer or intermediate result format model"
,
pSql
);
tscError
(
"
0x%"
PRIx64
" no local buffer or intermediate result format model"
,
pSql
->
self
);
pRes
->
code
=
TSDB_CODE_TSC_APP_ERROR
;
pRes
->
code
=
TSDB_CODE_TSC_APP_ERROR
;
return
;
return
;
}
}
...
@@ -144,7 +144,7 @@ void tscCreateLocalMerger(tExtMemBuffer **pMemBuffer, int32_t numOfBuffer, tOrde
...
@@ -144,7 +144,7 @@ void tscCreateLocalMerger(tExtMemBuffer **pMemBuffer, int32_t numOfBuffer, tOrde
}
}
if
(
pDesc
->
pColumnModel
->
capacity
>=
pMemBuffer
[
0
]
->
pageSize
)
{
if
(
pDesc
->
pColumnModel
->
capacity
>=
pMemBuffer
[
0
]
->
pageSize
)
{
tscError
(
"
%p Invalid value of buffer capacity %d and page size %d "
,
pSql
,
pDesc
->
pColumnModel
->
capacity
,
tscError
(
"
0x%"
PRIx64
" Invalid value of buffer capacity %d and page size %d "
,
pSql
->
self
,
pDesc
->
pColumnModel
->
capacity
,
pMemBuffer
[
0
]
->
pageSize
);
pMemBuffer
[
0
]
->
pageSize
);
tscLocalReducerEnvDestroy
(
pMemBuffer
,
pDesc
,
finalmodel
,
pFFModel
,
numOfBuffer
);
tscLocalReducerEnvDestroy
(
pMemBuffer
,
pDesc
,
finalmodel
,
pFFModel
,
numOfBuffer
);
...
@@ -154,9 +154,9 @@ void tscCreateLocalMerger(tExtMemBuffer **pMemBuffer, int32_t numOfBuffer, tOrde
...
@@ -154,9 +154,9 @@ void tscCreateLocalMerger(tExtMemBuffer **pMemBuffer, int32_t numOfBuffer, tOrde
size_t
size
=
sizeof
(
SLocalMerger
)
+
POINTER_BYTES
*
numOfFlush
;
size_t
size
=
sizeof
(
SLocalMerger
)
+
POINTER_BYTES
*
numOfFlush
;
SLocalMerger
*
p
Merg
er
=
(
SLocalMerger
*
)
calloc
(
1
,
size
);
SLocalMerger
*
p
Reduc
er
=
(
SLocalMerger
*
)
calloc
(
1
,
size
);
if
(
p
Merg
er
==
NULL
)
{
if
(
p
Reduc
er
==
NULL
)
{
tscError
(
"
%p failed to create local merge structure, out of memory"
,
pSql
);
tscError
(
"
0x%"
PRIx64
" failed to create local merge structure, out of memory"
,
pSql
->
self
);
tscLocalReducerEnvDestroy
(
pMemBuffer
,
pDesc
,
finalmodel
,
pFFModel
,
numOfBuffer
);
tscLocalReducerEnvDestroy
(
pMemBuffer
,
pDesc
,
finalmodel
,
pFFModel
,
numOfBuffer
);
pRes
->
code
=
TSDB_CODE_TSC_OUT_OF_MEMORY
;
pRes
->
code
=
TSDB_CODE_TSC_OUT_OF_MEMORY
;
...
@@ -180,7 +180,7 @@ void tscCreateLocalMerger(tExtMemBuffer **pMemBuffer, int32_t numOfBuffer, tOrde
...
@@ -180,7 +180,7 @@ void tscCreateLocalMerger(tExtMemBuffer **pMemBuffer, int32_t numOfBuffer, tOrde
for
(
int32_t
j
=
0
;
j
<
numOfFlushoutInFile
;
++
j
)
{
for
(
int32_t
j
=
0
;
j
<
numOfFlushoutInFile
;
++
j
)
{
SLocalDataSource
*
ds
=
(
SLocalDataSource
*
)
malloc
(
sizeof
(
SLocalDataSource
)
+
pMemBuffer
[
0
]
->
pageSize
);
SLocalDataSource
*
ds
=
(
SLocalDataSource
*
)
malloc
(
sizeof
(
SLocalDataSource
)
+
pMemBuffer
[
0
]
->
pageSize
);
if
(
ds
==
NULL
)
{
if
(
ds
==
NULL
)
{
tscError
(
"
%p failed to create merge structure"
,
pSql
);
tscError
(
"
0x%"
PRIx64
" failed to create merge structure"
,
pSql
->
self
);
pRes
->
code
=
TSDB_CODE_TSC_OUT_OF_MEMORY
;
pRes
->
code
=
TSDB_CODE_TSC_OUT_OF_MEMORY
;
tfree
(
pMerger
);
tfree
(
pMerger
);
return
;
return
;
...
@@ -539,7 +539,7 @@ int32_t tscLocalReducerEnvCreate(SSqlObj *pSql, tExtMemBuffer ***pMemBuffer, tOr
...
@@ -539,7 +539,7 @@ int32_t tscLocalReducerEnvCreate(SSqlObj *pSql, tExtMemBuffer ***pMemBuffer, tOr
(
*
pMemBuffer
)
=
(
tExtMemBuffer
**
)
malloc
(
POINTER_BYTES
*
pSql
->
subState
.
numOfSub
);
(
*
pMemBuffer
)
=
(
tExtMemBuffer
**
)
malloc
(
POINTER_BYTES
*
pSql
->
subState
.
numOfSub
);
if
(
*
pMemBuffer
==
NULL
)
{
if
(
*
pMemBuffer
==
NULL
)
{
tscError
(
"
%p failed to allocate memory"
,
pSql
);
tscError
(
"
0x%"
PRIx64
" failed to allocate memory"
,
pSql
->
self
);
pRes
->
code
=
TSDB_CODE_TSC_OUT_OF_MEMORY
;
pRes
->
code
=
TSDB_CODE_TSC_OUT_OF_MEMORY
;
return
pRes
->
code
;
return
pRes
->
code
;
}
}
...
@@ -548,7 +548,7 @@ int32_t tscLocalReducerEnvCreate(SSqlObj *pSql, tExtMemBuffer ***pMemBuffer, tOr
...
@@ -548,7 +548,7 @@ int32_t tscLocalReducerEnvCreate(SSqlObj *pSql, tExtMemBuffer ***pMemBuffer, tOr
pSchema
=
(
SSchema
*
)
calloc
(
1
,
sizeof
(
SSchema
)
*
size
);
pSchema
=
(
SSchema
*
)
calloc
(
1
,
sizeof
(
SSchema
)
*
size
);
if
(
pSchema
==
NULL
)
{
if
(
pSchema
==
NULL
)
{
tscError
(
"
%p failed to allocate memory"
,
pSql
);
tscError
(
"
0x%"
PRIx64
" failed to allocate memory"
,
pSql
->
self
);
pRes
->
code
=
TSDB_CODE_TSC_OUT_OF_MEMORY
;
pRes
->
code
=
TSDB_CODE_TSC_OUT_OF_MEMORY
;
return
pRes
->
code
;
return
pRes
->
code
;
}
}
...
...
src/client/src/tscParseInsert.c
浏览文件 @
9286f77e
...
@@ -1147,7 +1147,7 @@ int tsParseInsertSql(SSqlObj *pSql) {
...
@@ -1147,7 +1147,7 @@ int tsParseInsertSql(SSqlObj *pSql) {
return
code
;
return
code
;
}
}
tscError
(
"
%p async insert parse error, code:%s"
,
pSql
,
tstrerror
(
code
));
tscError
(
"
0x%"
PRIx64
" async insert parse error, code:%s"
,
pSql
->
self
,
tstrerror
(
code
));
pCmd
->
curSql
=
NULL
;
pCmd
->
curSql
=
NULL
;
goto
_clean
;
goto
_clean
;
}
}
...
@@ -1415,7 +1415,7 @@ static void parseFileSendDataBlock(void *param, TAOS_RES *tres, int32_t numOfRow
...
@@ -1415,7 +1415,7 @@ static void parseFileSendDataBlock(void *param, TAOS_RES *tres, int32_t numOfRow
assert
(
pSql
->
res
.
numOfRows
==
0
);
assert
(
pSql
->
res
.
numOfRows
==
0
);
int32_t
ret
=
fseek
(
fp
,
0
,
SEEK_SET
);
int32_t
ret
=
fseek
(
fp
,
0
,
SEEK_SET
);
if
(
ret
<
0
)
{
if
(
ret
<
0
)
{
tscError
(
"
%p failed to seek SEEK_SET since:%s"
,
pSql
,
tstrerror
(
errno
));
tscError
(
"
0x%"
PRIx64
" failed to seek SEEK_SET since:%s"
,
pSql
->
self
,
tstrerror
(
errno
));
code
=
TAOS_SYSTEM_ERROR
(
errno
);
code
=
TAOS_SYSTEM_ERROR
(
errno
);
goto
_error
;
goto
_error
;
}
}
...
@@ -1536,7 +1536,7 @@ void tscImportDataFromFile(SSqlObj *pSql) {
...
@@ -1536,7 +1536,7 @@ void tscImportDataFromFile(SSqlObj *pSql) {
FILE
*
fp
=
fopen
(
pCmd
->
payload
,
"rb"
);
FILE
*
fp
=
fopen
(
pCmd
->
payload
,
"rb"
);
if
(
fp
==
NULL
)
{
if
(
fp
==
NULL
)
{
pSql
->
res
.
code
=
TAOS_SYSTEM_ERROR
(
errno
);
pSql
->
res
.
code
=
TAOS_SYSTEM_ERROR
(
errno
);
tscError
(
"
%p failed to open file %s to load data from file, code:%s"
,
pSql
,
pCmd
->
payload
,
tstrerror
(
pSql
->
res
.
code
));
tscError
(
"
0x%"
PRIx64
" failed to open file %s to load data from file, code:%s"
,
pSql
->
self
,
pCmd
->
payload
,
tstrerror
(
pSql
->
res
.
code
));
tfree
(
pSupporter
);
tfree
(
pSupporter
);
taos_free_result
(
pNew
);
taos_free_result
(
pNew
);
...
...
src/client/src/tscProfile.c
浏览文件 @
9286f77e
...
@@ -104,7 +104,7 @@ void tscSaveSlowQuery(SSqlObj *pSql) {
...
@@ -104,7 +104,7 @@ void tscSaveSlowQuery(SSqlObj *pSql) {
char
*
sql
=
malloc
(
sqlSize
);
char
*
sql
=
malloc
(
sqlSize
);
if
(
sql
==
NULL
)
{
if
(
sql
==
NULL
)
{
tscError
(
"
%p failed to allocate memory to sent slow query to dnode"
,
pSql
);
tscError
(
"
0x%"
PRIx64
" failed to allocate memory to sent slow query to dnode"
,
pSql
->
self
);
return
;
return
;
}
}
...
...
src/client/src/tscSQLParser.c
浏览文件 @
9286f77e
...
@@ -5222,7 +5222,7 @@ int32_t setAlterTableInfo(SSqlObj* pSql, struct SSqlInfo* pInfo) {
...
@@ -5222,7 +5222,7 @@ int32_t setAlterTableInfo(SSqlObj* pSql, struct SSqlInfo* pInfo) {
int32_t
size
=
sizeof
(
SUpdateTableTagValMsg
)
+
pTagsSchema
->
bytes
+
schemaLen
+
TSDB_EXTRA_PAYLOAD_SIZE
;
int32_t
size
=
sizeof
(
SUpdateTableTagValMsg
)
+
pTagsSchema
->
bytes
+
schemaLen
+
TSDB_EXTRA_PAYLOAD_SIZE
;
if
(
TSDB_CODE_SUCCESS
!=
tscAllocPayload
(
pCmd
,
size
))
{
if
(
TSDB_CODE_SUCCESS
!=
tscAllocPayload
(
pCmd
,
size
))
{
tscError
(
"
%p failed to malloc for alter table msg"
,
pSql
);
tscError
(
"
0x%"
PRIx64
" failed to malloc for alter table msg"
,
pSql
->
self
);
return
TSDB_CODE_TSC_OUT_OF_MEMORY
;
return
TSDB_CODE_TSC_OUT_OF_MEMORY
;
}
}
...
...
src/client/src/tscServer.c
浏览文件 @
9286f77e
...
@@ -222,7 +222,7 @@ void tscProcessHeartBeatRsp(void *param, TAOS_RES *tres, int code) {
...
@@ -222,7 +222,7 @@ void tscProcessHeartBeatRsp(void *param, TAOS_RES *tres, int code) {
assert
(
online
<=
total
);
assert
(
online
<=
total
);
if
(
online
<
total
)
{
if
(
online
<
total
)
{
tscError
(
"
HB:%p, total dnode:%d, online dnode:%d"
,
pSql
,
total
,
online
);
tscError
(
"
0x%"
PRIx64
", HB, total dnode:%d, online dnode:%d"
,
pSql
->
self
,
total
,
online
);
pSql
->
res
.
code
=
TSDB_CODE_RPC_NETWORK_UNAVAIL
;
pSql
->
res
.
code
=
TSDB_CODE_RPC_NETWORK_UNAVAIL
;
}
}
...
@@ -274,7 +274,7 @@ void tscProcessActivityTimer(void *handle, void *tmrId) {
...
@@ -274,7 +274,7 @@ void tscProcessActivityTimer(void *handle, void *tmrId) {
taosReleaseRef
(
tscObjRef
,
pObj
->
hbrid
);
taosReleaseRef
(
tscObjRef
,
pObj
->
hbrid
);
if
(
code
!=
TSDB_CODE_SUCCESS
)
{
if
(
code
!=
TSDB_CODE_SUCCESS
)
{
tscError
(
"
%p failed to sent HB to server, reason:%s"
,
pHB
,
tstrerror
(
code
));
tscError
(
"
0x%"
PRIx64
" failed to sent HB to server, reason:%s"
,
pHB
->
self
,
tstrerror
(
code
));
}
}
taosReleaseRef
(
tscRefId
,
rid
);
taosReleaseRef
(
tscRefId
,
rid
);
...
@@ -286,7 +286,7 @@ int tscSendMsgToServer(SSqlObj *pSql) {
...
@@ -286,7 +286,7 @@ int tscSendMsgToServer(SSqlObj *pSql) {
char
*
pMsg
=
rpcMallocCont
(
pCmd
->
payloadLen
);
char
*
pMsg
=
rpcMallocCont
(
pCmd
->
payloadLen
);
if
(
NULL
==
pMsg
)
{
if
(
NULL
==
pMsg
)
{
tscError
(
"
%p msg:%s malloc failed"
,
pSql
,
taosMsg
[
pSql
->
cmd
.
msgType
]);
tscError
(
"
0x%"
PRIx64
" msg:%s malloc failed"
,
pSql
->
self
,
taosMsg
[
pSql
->
cmd
.
msgType
]);
return
TSDB_CODE_TSC_OUT_OF_MEMORY
;
return
TSDB_CODE_TSC_OUT_OF_MEMORY
;
}
}
...
@@ -370,11 +370,11 @@ void tscProcessMsgFromServer(SRpcMsg *rpcMsg, SRpcEpSet *pEpSet) {
...
@@ -370,11 +370,11 @@ void tscProcessMsgFromServer(SRpcMsg *rpcMsg, SRpcEpSet *pEpSet) {
rpcMsg
->
code
==
TSDB_CODE_APP_NOT_READY
))
{
rpcMsg
->
code
==
TSDB_CODE_APP_NOT_READY
))
{
pSql
->
retry
++
;
pSql
->
retry
++
;
tscWarn
(
"
%p it shall renew table meta, code:%s, retry:%d"
,
pSql
,
tstrerror
(
rpcMsg
->
code
),
pSql
->
retry
);
tscWarn
(
"
0x%"
PRIx64
" it shall renew table meta, code:%s, retry:%d"
,
pSql
->
self
,
tstrerror
(
rpcMsg
->
code
),
pSql
->
retry
);
pSql
->
res
.
code
=
rpcMsg
->
code
;
// keep the previous error code
pSql
->
res
.
code
=
rpcMsg
->
code
;
// keep the previous error code
if
(
pSql
->
retry
>
pSql
->
maxRetry
)
{
if
(
pSql
->
retry
>
pSql
->
maxRetry
)
{
tscError
(
"
%p max retry %d reached, give up"
,
pSql
,
pSql
->
maxRetry
);
tscError
(
"
0x%"
PRIx64
" max retry %d reached, give up"
,
pSql
->
self
,
pSql
->
maxRetry
);
}
else
{
}
else
{
// wait for a little bit moment and then retry
// wait for a little bit moment and then retry
// todo do not sleep in rpc callback thread, add this process into queueu to process
// todo do not sleep in rpc callback thread, add this process into queueu to process
...
@@ -666,8 +666,8 @@ static char *doSerializeTableInfo(SQueryTableMsg *pQueryMsg, SSqlObj *pSql, STab
...
@@ -666,8 +666,8 @@ static char *doSerializeTableInfo(SQueryTableMsg *pQueryMsg, SSqlObj *pSql, STab
assert
(
index
<
pTableMetaInfo
->
vgroupList
->
numOfVgroups
);
assert
(
index
<
pTableMetaInfo
->
vgroupList
->
numOfVgroups
);
pVgroupInfo
=
&
pTableMetaInfo
->
vgroupList
->
vgroups
[
index
];
pVgroupInfo
=
&
pTableMetaInfo
->
vgroupList
->
vgroups
[
index
];
}
else
{
}
else
{
tscError
(
"
%p No vgroup info found"
,
pSql
);
tscError
(
"
0x%"
PRIx64
" No vgroup info found"
,
pSql
->
self
);
*
succeed
=
0
;
*
succeed
=
0
;
return
pMsg
;
return
pMsg
;
}
}
...
@@ -767,7 +767,7 @@ static int32_t serializeSqlExpr(SSqlExpr* pExpr, STableMetaInfo* pTableMetaInfo,
...
@@ -767,7 +767,7 @@ static int32_t serializeSqlExpr(SSqlExpr* pExpr, STableMetaInfo* pTableMetaInfo,
// the queried table has been removed and a new table with the same name has already been created already
// the queried table has been removed and a new table with the same name has already been created already
// return error msg
// return error msg
if
(
pExpr
->
uid
!=
pTableMeta
->
id
.
uid
)
{
if
(
pExpr
->
uid
!=
pTableMeta
->
id
.
uid
)
{
tscError
(
"
%p table has already been destroyed"
,
addr
);
tscError
(
"
0x%"
PRIx64
" table has already been destroyed"
,
addr
->
self
);
return
TSDB_CODE_TSC_INVALID_TABLE_NAME
;
return
TSDB_CODE_TSC_INVALID_TABLE_NAME
;
}
}
...
@@ -823,7 +823,7 @@ int tscBuildQueryMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
...
@@ -823,7 +823,7 @@ int tscBuildQueryMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
tscError
(
"%p failed to malloc for query msg"
,
pSql
);
tscError
(
"%p failed to malloc for query msg"
,
pSql
);
return
TSDB_CODE_TSC_INVALID_SQL
;
// todo add test for this
return
TSDB_CODE_TSC_INVALID_SQL
;
// todo add test for this
}
}
SQueryInfo
*
pQueryInfo
=
tscGetActiveQueryInfo
(
pCmd
);
SQueryInfo
*
pQueryInfo
=
tscGetActiveQueryInfo
(
pCmd
);
SQueryAttr
query
=
{{
0
}};
SQueryAttr
query
=
{{
0
}};
...
@@ -1063,7 +1063,7 @@ int32_t tscBuildCreateDnodeMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
...
@@ -1063,7 +1063,7 @@ int32_t tscBuildCreateDnodeMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
SSqlCmd
*
pCmd
=
&
pSql
->
cmd
;
SSqlCmd
*
pCmd
=
&
pSql
->
cmd
;
pCmd
->
payloadLen
=
sizeof
(
SCreateDnodeMsg
);
pCmd
->
payloadLen
=
sizeof
(
SCreateDnodeMsg
);
if
(
TSDB_CODE_SUCCESS
!=
tscAllocPayload
(
pCmd
,
pCmd
->
payloadLen
))
{
if
(
TSDB_CODE_SUCCESS
!=
tscAllocPayload
(
pCmd
,
pCmd
->
payloadLen
))
{
tscError
(
"
%p failed to malloc for query msg"
,
pSql
);
tscError
(
"
0x%"
PRIx64
" failed to malloc for query msg"
,
pSql
->
self
);
return
TSDB_CODE_TSC_OUT_OF_MEMORY
;
return
TSDB_CODE_TSC_OUT_OF_MEMORY
;
}
}
...
@@ -1081,7 +1081,7 @@ int32_t tscBuildAcctMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
...
@@ -1081,7 +1081,7 @@ int32_t tscBuildAcctMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
SSqlCmd
*
pCmd
=
&
pSql
->
cmd
;
SSqlCmd
*
pCmd
=
&
pSql
->
cmd
;
pCmd
->
payloadLen
=
sizeof
(
SCreateAcctMsg
);
pCmd
->
payloadLen
=
sizeof
(
SCreateAcctMsg
);
if
(
TSDB_CODE_SUCCESS
!=
tscAllocPayload
(
pCmd
,
pCmd
->
payloadLen
))
{
if
(
TSDB_CODE_SUCCESS
!=
tscAllocPayload
(
pCmd
,
pCmd
->
payloadLen
))
{
tscError
(
"
%p failed to malloc for query msg"
,
pSql
);
tscError
(
"
0x%"
PRIx64
" failed to malloc for query msg"
,
pSql
->
self
);
return
TSDB_CODE_TSC_OUT_OF_MEMORY
;
return
TSDB_CODE_TSC_OUT_OF_MEMORY
;
}
}
...
@@ -1127,7 +1127,7 @@ int32_t tscBuildUserMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
...
@@ -1127,7 +1127,7 @@ int32_t tscBuildUserMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
pCmd
->
payloadLen
=
sizeof
(
SCreateUserMsg
);
pCmd
->
payloadLen
=
sizeof
(
SCreateUserMsg
);
if
(
TSDB_CODE_SUCCESS
!=
tscAllocPayload
(
pCmd
,
pCmd
->
payloadLen
))
{
if
(
TSDB_CODE_SUCCESS
!=
tscAllocPayload
(
pCmd
,
pCmd
->
payloadLen
))
{
tscError
(
"
%p failed to malloc for query msg"
,
pSql
);
tscError
(
"
0x%"
PRIx64
" failed to malloc for query msg"
,
pSql
->
self
);
return
TSDB_CODE_TSC_OUT_OF_MEMORY
;
return
TSDB_CODE_TSC_OUT_OF_MEMORY
;
}
}
...
@@ -1166,7 +1166,7 @@ int32_t tscBuildDropDbMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
...
@@ -1166,7 +1166,7 @@ int32_t tscBuildDropDbMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
pCmd
->
payloadLen
=
sizeof
(
SDropDbMsg
);
pCmd
->
payloadLen
=
sizeof
(
SDropDbMsg
);
if
(
TSDB_CODE_SUCCESS
!=
tscAllocPayload
(
pCmd
,
pCmd
->
payloadLen
))
{
if
(
TSDB_CODE_SUCCESS
!=
tscAllocPayload
(
pCmd
,
pCmd
->
payloadLen
))
{
tscError
(
"
%p failed to malloc for query msg"
,
pSql
);
tscError
(
"
0x%"
PRIx64
" failed to malloc for query msg"
,
pSql
->
self
);
return
TSDB_CODE_TSC_OUT_OF_MEMORY
;
return
TSDB_CODE_TSC_OUT_OF_MEMORY
;
}
}
...
@@ -1188,7 +1188,7 @@ int32_t tscBuildDropTableMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
...
@@ -1188,7 +1188,7 @@ int32_t tscBuildDropTableMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
pCmd
->
payloadLen
=
sizeof
(
SCMDropTableMsg
);
pCmd
->
payloadLen
=
sizeof
(
SCMDropTableMsg
);
if
(
TSDB_CODE_SUCCESS
!=
tscAllocPayload
(
pCmd
,
pCmd
->
payloadLen
))
{
if
(
TSDB_CODE_SUCCESS
!=
tscAllocPayload
(
pCmd
,
pCmd
->
payloadLen
))
{
tscError
(
"
%p failed to malloc for query msg"
,
pSql
);
tscError
(
"
0x%"
PRIx64
" failed to malloc for query msg"
,
pSql
->
self
);
return
TSDB_CODE_TSC_OUT_OF_MEMORY
;
return
TSDB_CODE_TSC_OUT_OF_MEMORY
;
}
}
...
@@ -1209,7 +1209,7 @@ int32_t tscBuildDropDnodeMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
...
@@ -1209,7 +1209,7 @@ int32_t tscBuildDropDnodeMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
pCmd
->
payloadLen
=
sizeof
(
SDropDnodeMsg
);
pCmd
->
payloadLen
=
sizeof
(
SDropDnodeMsg
);
if
(
TSDB_CODE_SUCCESS
!=
tscAllocPayload
(
pCmd
,
pCmd
->
payloadLen
))
{
if
(
TSDB_CODE_SUCCESS
!=
tscAllocPayload
(
pCmd
,
pCmd
->
payloadLen
))
{
tscError
(
"
%p failed to malloc for query msg"
,
pSql
);
tscError
(
"
0x%"
PRIx64
" failed to malloc for query msg"
,
pSql
->
self
);
return
TSDB_CODE_TSC_OUT_OF_MEMORY
;
return
TSDB_CODE_TSC_OUT_OF_MEMORY
;
}
}
...
@@ -1230,7 +1230,7 @@ int32_t tscBuildDropUserAcctMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
...
@@ -1230,7 +1230,7 @@ int32_t tscBuildDropUserAcctMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
pCmd
->
msgType
=
(
pInfo
->
type
==
TSDB_SQL_DROP_USER
)
?
TSDB_MSG_TYPE_CM_DROP_USER
:
TSDB_MSG_TYPE_CM_DROP_ACCT
;
pCmd
->
msgType
=
(
pInfo
->
type
==
TSDB_SQL_DROP_USER
)
?
TSDB_MSG_TYPE_CM_DROP_USER
:
TSDB_MSG_TYPE_CM_DROP_ACCT
;
if
(
TSDB_CODE_SUCCESS
!=
tscAllocPayload
(
pCmd
,
pCmd
->
payloadLen
))
{
if
(
TSDB_CODE_SUCCESS
!=
tscAllocPayload
(
pCmd
,
pCmd
->
payloadLen
))
{
tscError
(
"
%p failed to malloc for query msg"
,
pSql
);
tscError
(
"
0x%"
PRIx64
" failed to malloc for query msg"
,
pSql
->
self
);
return
TSDB_CODE_TSC_OUT_OF_MEMORY
;
return
TSDB_CODE_TSC_OUT_OF_MEMORY
;
}
}
...
@@ -1245,7 +1245,7 @@ int32_t tscBuildUseDbMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
...
@@ -1245,7 +1245,7 @@ int32_t tscBuildUseDbMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
pCmd
->
payloadLen
=
sizeof
(
SUseDbMsg
);
pCmd
->
payloadLen
=
sizeof
(
SUseDbMsg
);
if
(
TSDB_CODE_SUCCESS
!=
tscAllocPayload
(
pCmd
,
pCmd
->
payloadLen
))
{
if
(
TSDB_CODE_SUCCESS
!=
tscAllocPayload
(
pCmd
,
pCmd
->
payloadLen
))
{
tscError
(
"
%p failed to malloc for query msg"
,
pSql
);
tscError
(
"
0x%"
PRIx64
" failed to malloc for query msg"
,
pSql
->
self
);
return
TSDB_CODE_TSC_OUT_OF_MEMORY
;
return
TSDB_CODE_TSC_OUT_OF_MEMORY
;
}
}
...
@@ -1262,7 +1262,7 @@ int32_t tscBuildSyncDbReplicaMsg(SSqlObj* pSql, SSqlInfo *pInfo) {
...
@@ -1262,7 +1262,7 @@ int32_t tscBuildSyncDbReplicaMsg(SSqlObj* pSql, SSqlInfo *pInfo) {
pCmd
->
payloadLen
=
sizeof
(
SSyncDbMsg
);
pCmd
->
payloadLen
=
sizeof
(
SSyncDbMsg
);
if
(
TSDB_CODE_SUCCESS
!=
tscAllocPayload
(
pCmd
,
pCmd
->
payloadLen
))
{
if
(
TSDB_CODE_SUCCESS
!=
tscAllocPayload
(
pCmd
,
pCmd
->
payloadLen
))
{
tscError
(
"
%p failed to malloc for query msg"
,
pSql
);
tscError
(
"
0x%"
PRIx64
" failed to malloc for query msg"
,
pSql
->
self
);
return
TSDB_CODE_TSC_OUT_OF_MEMORY
;
return
TSDB_CODE_TSC_OUT_OF_MEMORY
;
}
}
...
@@ -1281,7 +1281,7 @@ int32_t tscBuildShowMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
...
@@ -1281,7 +1281,7 @@ int32_t tscBuildShowMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
pCmd
->
payloadLen
=
sizeof
(
SShowMsg
)
+
100
;
pCmd
->
payloadLen
=
sizeof
(
SShowMsg
)
+
100
;
if
(
TSDB_CODE_SUCCESS
!=
tscAllocPayload
(
pCmd
,
pCmd
->
payloadLen
))
{
if
(
TSDB_CODE_SUCCESS
!=
tscAllocPayload
(
pCmd
,
pCmd
->
payloadLen
))
{
tscError
(
"
%p failed to malloc for query msg"
,
pSql
);
tscError
(
"
0x%"
PRIx64
" failed to malloc for query msg"
,
pSql
->
self
);
return
TSDB_CODE_TSC_OUT_OF_MEMORY
;
return
TSDB_CODE_TSC_OUT_OF_MEMORY
;
}
}
...
@@ -1367,7 +1367,7 @@ int tscBuildCreateTableMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
...
@@ -1367,7 +1367,7 @@ int tscBuildCreateTableMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
// Reallocate the payload size
// Reallocate the payload size
size
=
tscEstimateCreateTableMsgLength
(
pSql
,
pInfo
);
size
=
tscEstimateCreateTableMsgLength
(
pSql
,
pInfo
);
if
(
TSDB_CODE_SUCCESS
!=
tscAllocPayload
(
pCmd
,
size
))
{
if
(
TSDB_CODE_SUCCESS
!=
tscAllocPayload
(
pCmd
,
size
))
{
tscError
(
"
%p failed to malloc for create table msg"
,
pSql
);
tscError
(
"
0x%"
PRIx64
" failed to malloc for create table msg"
,
pSql
->
self
);
return
TSDB_CODE_TSC_OUT_OF_MEMORY
;
return
TSDB_CODE_TSC_OUT_OF_MEMORY
;
}
}
...
@@ -1466,7 +1466,7 @@ int tscBuildAlterTableMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
...
@@ -1466,7 +1466,7 @@ int tscBuildAlterTableMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
SAlterTableInfo
*
pAlterInfo
=
pInfo
->
pAlterInfo
;
SAlterTableInfo
*
pAlterInfo
=
pInfo
->
pAlterInfo
;
int
size
=
tscEstimateAlterTableMsgLength
(
pCmd
);
int
size
=
tscEstimateAlterTableMsgLength
(
pCmd
);
if
(
TSDB_CODE_SUCCESS
!=
tscAllocPayload
(
pCmd
,
size
))
{
if
(
TSDB_CODE_SUCCESS
!=
tscAllocPayload
(
pCmd
,
size
))
{
tscError
(
"
%p failed to malloc for alter table msg"
,
pSql
);
tscError
(
"
0x%"
PRIx64
" failed to malloc for alter table msg"
,
pSql
->
self
);
return
TSDB_CODE_TSC_OUT_OF_MEMORY
;
return
TSDB_CODE_TSC_OUT_OF_MEMORY
;
}
}
...
@@ -1540,7 +1540,7 @@ int tscBuildRetrieveFromMgmtMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
...
@@ -1540,7 +1540,7 @@ int tscBuildRetrieveFromMgmtMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
pCmd
->
payloadLen
=
sizeof
(
SRetrieveTableMsg
);
pCmd
->
payloadLen
=
sizeof
(
SRetrieveTableMsg
);
if
(
TSDB_CODE_SUCCESS
!=
tscAllocPayload
(
pCmd
,
pCmd
->
payloadLen
))
{
if
(
TSDB_CODE_SUCCESS
!=
tscAllocPayload
(
pCmd
,
pCmd
->
payloadLen
))
{
tscError
(
"
%p failed to malloc for query msg"
,
pSql
);
tscError
(
"
0x%"
PRIx64
" failed to malloc for query msg"
,
pSql
->
self
);
return
TSDB_CODE_TSC_OUT_OF_MEMORY
;
return
TSDB_CODE_TSC_OUT_OF_MEMORY
;
}
}
...
@@ -1662,7 +1662,7 @@ int tscBuildConnectMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
...
@@ -1662,7 +1662,7 @@ int tscBuildConnectMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
pCmd
->
payloadLen
=
sizeof
(
SConnectMsg
);
pCmd
->
payloadLen
=
sizeof
(
SConnectMsg
);
if
(
TSDB_CODE_SUCCESS
!=
tscAllocPayload
(
pCmd
,
pCmd
->
payloadLen
))
{
if
(
TSDB_CODE_SUCCESS
!=
tscAllocPayload
(
pCmd
,
pCmd
->
payloadLen
))
{
tscError
(
"
%p failed to malloc for query msg"
,
pSql
);
tscError
(
"
0x%"
PRIx64
" failed to malloc for query msg"
,
pSql
->
self
);
return
TSDB_CODE_TSC_OUT_OF_MEMORY
;
return
TSDB_CODE_TSC_OUT_OF_MEMORY
;
}
}
...
@@ -1802,7 +1802,7 @@ int tscBuildHeartBeatMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
...
@@ -1802,7 +1802,7 @@ int tscBuildHeartBeatMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
int
size
=
numOfQueries
*
sizeof
(
SQueryDesc
)
+
numOfStreams
*
sizeof
(
SStreamDesc
)
+
sizeof
(
SHeartBeatMsg
)
+
100
;
int
size
=
numOfQueries
*
sizeof
(
SQueryDesc
)
+
numOfStreams
*
sizeof
(
SStreamDesc
)
+
sizeof
(
SHeartBeatMsg
)
+
100
;
if
(
TSDB_CODE_SUCCESS
!=
tscAllocPayload
(
pCmd
,
size
))
{
if
(
TSDB_CODE_SUCCESS
!=
tscAllocPayload
(
pCmd
,
size
))
{
pthread_mutex_unlock
(
&
pObj
->
mutex
);
pthread_mutex_unlock
(
&
pObj
->
mutex
);
tscError
(
"
%p failed to create heartbeat msg"
,
pSql
);
tscError
(
"
0x%"
PRIx64
" failed to create heartbeat msg"
,
pSql
->
self
);
return
TSDB_CODE_TSC_OUT_OF_MEMORY
;
return
TSDB_CODE_TSC_OUT_OF_MEMORY
;
}
}
...
@@ -1878,7 +1878,7 @@ int tscProcessTableMetaRsp(SSqlObj *pSql) {
...
@@ -1878,7 +1878,7 @@ int tscProcessTableMetaRsp(SSqlObj *pSql) {
STableMeta
*
pTableMeta
=
tscCreateTableMetaFromMsg
(
pMetaMsg
);
STableMeta
*
pTableMeta
=
tscCreateTableMetaFromMsg
(
pMetaMsg
);
if
(
!
tIsValidSchema
(
pTableMeta
->
schema
,
pTableMeta
->
tableInfo
.
numOfColumns
,
pTableMeta
->
tableInfo
.
numOfTags
))
{
if
(
!
tIsValidSchema
(
pTableMeta
->
schema
,
pTableMeta
->
tableInfo
.
numOfColumns
,
pTableMeta
->
tableInfo
.
numOfTags
))
{
tscError
(
"
%p invalid table meta from mnode, name:%s"
,
pSql
,
tNameGetTableName
(
&
pTableMetaInfo
->
name
));
tscError
(
"
0x%"
PRIx64
" invalid table meta from mnode, name:%s"
,
pSql
->
self
,
tNameGetTableName
(
&
pTableMetaInfo
->
name
));
return
TSDB_CODE_TSC_INVALID_VALUE
;
return
TSDB_CODE_TSC_INVALID_VALUE
;
}
}
...
@@ -2072,7 +2072,7 @@ int tscProcessSTableVgroupRsp(SSqlObj *pSql) {
...
@@ -2072,7 +2072,7 @@ int tscProcessSTableVgroupRsp(SSqlObj *pSql) {
pInfo
->
vgroupList
->
numOfVgroups
=
pVgroupMsg
->
numOfVgroups
;
pInfo
->
vgroupList
->
numOfVgroups
=
pVgroupMsg
->
numOfVgroups
;
if
(
pInfo
->
vgroupList
->
numOfVgroups
<=
0
)
{
if
(
pInfo
->
vgroupList
->
numOfVgroups
<=
0
)
{
//tfree(pInfo->vgroupList);
//tfree(pInfo->vgroupList);
tscError
(
"
%p empty vgroup info"
,
pSql
);
tscError
(
"
0x%"
PRIx64
" empty vgroup info"
,
pSql
->
self
);
}
else
{
}
else
{
for
(
int32_t
j
=
0
;
j
<
pInfo
->
vgroupList
->
numOfVgroups
;
++
j
)
{
for
(
int32_t
j
=
0
;
j
<
pInfo
->
vgroupList
->
numOfVgroups
;
++
j
)
{
// just init, no need to lock
// just init, no need to lock
...
@@ -2388,7 +2388,7 @@ void tscTableMetaCallBack(void *param, TAOS_RES *res, int code);
...
@@ -2388,7 +2388,7 @@ void tscTableMetaCallBack(void *param, TAOS_RES *res, int code);
static
int32_t
getTableMetaFromMnode
(
SSqlObj
*
pSql
,
STableMetaInfo
*
pTableMetaInfo
)
{
static
int32_t
getTableMetaFromMnode
(
SSqlObj
*
pSql
,
STableMetaInfo
*
pTableMetaInfo
)
{
SSqlObj
*
pNew
=
calloc
(
1
,
sizeof
(
SSqlObj
));
SSqlObj
*
pNew
=
calloc
(
1
,
sizeof
(
SSqlObj
));
if
(
NULL
==
pNew
)
{
if
(
NULL
==
pNew
)
{
tscError
(
"
%p malloc failed for new sqlobj to get table meta"
,
pSql
);
tscError
(
"
0x%"
PRIx64
" malloc failed for new sqlobj to get table meta"
,
pSql
->
self
);
return
TSDB_CODE_TSC_OUT_OF_MEMORY
;
return
TSDB_CODE_TSC_OUT_OF_MEMORY
;
}
}
...
@@ -2402,7 +2402,7 @@ static int32_t getTableMetaFromMnode(SSqlObj *pSql, STableMetaInfo *pTableMetaIn
...
@@ -2402,7 +2402,7 @@ static int32_t getTableMetaFromMnode(SSqlObj *pSql, STableMetaInfo *pTableMetaIn
pNew
->
cmd
.
autoCreated
=
pSql
->
cmd
.
autoCreated
;
// create table if not exists
pNew
->
cmd
.
autoCreated
=
pSql
->
cmd
.
autoCreated
;
// create table if not exists
if
(
TSDB_CODE_SUCCESS
!=
tscAllocPayload
(
&
pNew
->
cmd
,
TSDB_DEFAULT_PAYLOAD_SIZE
+
pSql
->
cmd
.
payloadLen
))
{
if
(
TSDB_CODE_SUCCESS
!=
tscAllocPayload
(
&
pNew
->
cmd
,
TSDB_DEFAULT_PAYLOAD_SIZE
+
pSql
->
cmd
.
payloadLen
))
{
tscError
(
"
%p malloc failed for payload to get table meta"
,
pSql
);
tscError
(
"
0x%"
PRIx64
" malloc failed for payload to get table meta"
,
pSql
->
self
);
tscFreeSqlObj
(
pNew
);
tscFreeSqlObj
(
pNew
);
return
TSDB_CODE_TSC_OUT_OF_MEMORY
;
return
TSDB_CODE_TSC_OUT_OF_MEMORY
;
}
}
...
@@ -2415,7 +2415,7 @@ static int32_t getTableMetaFromMnode(SSqlObj *pSql, STableMetaInfo *pTableMetaIn
...
@@ -2415,7 +2415,7 @@ static int32_t getTableMetaFromMnode(SSqlObj *pSql, STableMetaInfo *pTableMetaIn
if
(
pSql
->
cmd
.
autoCreated
)
{
if
(
pSql
->
cmd
.
autoCreated
)
{
int32_t
code
=
copyTagData
(
&
pNew
->
cmd
.
tagData
,
&
pSql
->
cmd
.
tagData
);
int32_t
code
=
copyTagData
(
&
pNew
->
cmd
.
tagData
,
&
pSql
->
cmd
.
tagData
);
if
(
code
!=
TSDB_CODE_SUCCESS
)
{
if
(
code
!=
TSDB_CODE_SUCCESS
)
{
tscError
(
"
%p malloc failed for new tag data to get table meta"
,
pSql
);
tscError
(
"
0x%"
PRIx64
" malloc failed for new tag data to get table meta"
,
pSql
->
self
);
tscFreeSqlObj
(
pNew
);
tscFreeSqlObj
(
pNew
);
return
TSDB_CODE_TSC_OUT_OF_MEMORY
;
return
TSDB_CODE_TSC_OUT_OF_MEMORY
;
}
}
...
@@ -2493,7 +2493,7 @@ int tscRenewTableMeta(SSqlObj *pSql, int32_t tableIndex) {
...
@@ -2493,7 +2493,7 @@ int tscRenewTableMeta(SSqlObj *pSql, int32_t tableIndex) {
char
name
[
TSDB_TABLE_FNAME_LEN
]
=
{
0
};
char
name
[
TSDB_TABLE_FNAME_LEN
]
=
{
0
};
int32_t
code
=
tNameExtractFullName
(
&
pTableMetaInfo
->
name
,
name
);
int32_t
code
=
tNameExtractFullName
(
&
pTableMetaInfo
->
name
,
name
);
if
(
code
!=
TSDB_CODE_SUCCESS
)
{
if
(
code
!=
TSDB_CODE_SUCCESS
)
{
tscError
(
"
%p failed to generate the table full name"
,
pSql
);
tscError
(
"
0x%"
PRIx64
" failed to generate the table full name"
,
pSql
->
self
);
return
TSDB_CODE_TSC_INVALID_SQL
;
return
TSDB_CODE_TSC_INVALID_SQL
;
}
}
...
...
src/client/src/tscSql.c
浏览文件 @
9286f77e
...
@@ -588,7 +588,7 @@ static bool tscKillQueryInDnode(SSqlObj* pSql) {
...
@@ -588,7 +588,7 @@ static bool tscKillQueryInDnode(SSqlObj* pSql) {
void
taos_free_result
(
TAOS_RES
*
res
)
{
void
taos_free_result
(
TAOS_RES
*
res
)
{
SSqlObj
*
pSql
=
(
SSqlObj
*
)
res
;
SSqlObj
*
pSql
=
(
SSqlObj
*
)
res
;
if
(
pSql
==
NULL
||
pSql
->
signature
!=
pSql
)
{
if
(
pSql
==
NULL
||
pSql
->
signature
!=
pSql
)
{
tscError
(
"
%p already released sqlObj"
,
res
);
tscError
(
"
0x%"
PRIx64
" already released sqlObj"
,
pSql
?
pSql
->
self
:
-
1
);
return
;
return
;
}
}
...
@@ -881,15 +881,14 @@ int taos_validate_sql(TAOS *taos, const char *sql) {
...
@@ -881,15 +881,14 @@ int taos_validate_sql(TAOS *taos, const char *sql) {
int32_t
sqlLen
=
(
int32_t
)
strlen
(
sql
);
int32_t
sqlLen
=
(
int32_t
)
strlen
(
sql
);
if
(
sqlLen
>
tsMaxSQLStringLen
)
{
if
(
sqlLen
>
tsMaxSQLStringLen
)
{
tscError
(
"
%p sql too long"
,
pSql
);
tscError
(
"
0x%"
PRIx64
" sql too long"
,
pSql
->
self
);
tfree
(
pSql
);
tfree
(
pSql
);
return
TSDB_CODE_TSC_EXCEED_SQL_LIMIT
;
return
TSDB_CODE_TSC_EXCEED_SQL_LIMIT
;
}
}
pSql
->
sqlstr
=
realloc
(
pSql
->
sqlstr
,
sqlLen
+
1
);
pSql
->
sqlstr
=
realloc
(
pSql
->
sqlstr
,
sqlLen
+
1
);
if
(
pSql
->
sqlstr
==
NULL
)
{
if
(
pSql
->
sqlstr
==
NULL
)
{
tscError
(
"%p failed to malloc sql string buffer"
,
pSql
);
tscError
(
"0x%"
PRIx64
" failed to malloc sql string buffer"
,
pSql
->
self
);
tscDebug
(
"0x%"
PRIx64
" Valid SQL result:%d, %s pObj:%p"
,
pSql
->
self
,
pRes
->
code
,
taos_errstr
(
pSql
),
pObj
);
tfree
(
pSql
);
tfree
(
pSql
);
return
TSDB_CODE_TSC_OUT_OF_MEMORY
;
return
TSDB_CODE_TSC_OUT_OF_MEMORY
;
}
}
...
@@ -914,7 +913,7 @@ int taos_validate_sql(TAOS *taos, const char *sql) {
...
@@ -914,7 +913,7 @@ int taos_validate_sql(TAOS *taos, const char *sql) {
}
}
if
(
code
!=
TSDB_CODE_SUCCESS
)
{
if
(
code
!=
TSDB_CODE_SUCCESS
)
{
tsc
Debug
(
"0x%"
PRIx64
" V
alid SQL result:%d, %s pObj:%p"
,
pSql
->
self
,
code
,
taos_errstr
(
pSql
),
pObj
);
tsc
Error
(
"0x%"
PRIx64
" inv
alid SQL result:%d, %s pObj:%p"
,
pSql
->
self
,
code
,
taos_errstr
(
pSql
),
pObj
);
}
}
taos_free_result
(
pSql
);
taos_free_result
(
pSql
);
...
@@ -1031,14 +1030,14 @@ int taos_load_table_info(TAOS *taos, const char *tableNameList) {
...
@@ -1031,14 +1030,14 @@ int taos_load_table_info(TAOS *taos, const char *tableNameList) {
int32_t
tblListLen
=
(
int32_t
)
strlen
(
tableNameList
);
int32_t
tblListLen
=
(
int32_t
)
strlen
(
tableNameList
);
if
(
tblListLen
>
MAX_TABLE_NAME_LENGTH
)
{
if
(
tblListLen
>
MAX_TABLE_NAME_LENGTH
)
{
tscError
(
"
%p tableNameList too long, length:%d, maximum allowed:%d"
,
pSql
,
tblListLen
,
MAX_TABLE_NAME_LENGTH
);
tscError
(
"
0x%"
PRIx64
" tableNameList too long, length:%d, maximum allowed:%d"
,
pSql
->
self
,
tblListLen
,
MAX_TABLE_NAME_LENGTH
);
tscFreeSqlObj
(
pSql
);
tscFreeSqlObj
(
pSql
);
return
TSDB_CODE_TSC_INVALID_SQL
;
return
TSDB_CODE_TSC_INVALID_SQL
;
}
}
char
*
str
=
calloc
(
1
,
tblListLen
+
1
);
char
*
str
=
calloc
(
1
,
tblListLen
+
1
);
if
(
str
==
NULL
)
{
if
(
str
==
NULL
)
{
tscError
(
"
%p failed to malloc sql string buffer"
,
pSql
);
tscError
(
"
0x%"
PRIx64
" failed to malloc sql string buffer"
,
pSql
->
self
);
tscFreeSqlObj
(
pSql
);
tscFreeSqlObj
(
pSql
);
return
TSDB_CODE_TSC_OUT_OF_MEMORY
;
return
TSDB_CODE_TSC_OUT_OF_MEMORY
;
}
}
...
...
src/client/src/tscStream.c
浏览文件 @
9286f77e
...
@@ -203,6 +203,14 @@ static void tscProcessStreamQueryCallback(void *param, TAOS_RES *tres, int numOf
...
@@ -203,6 +203,14 @@ static void tscProcessStreamQueryCallback(void *param, TAOS_RES *tres, int numOf
tNameExtractFullName
(
&
pTableMetaInfo
->
name
,
name
);
tNameExtractFullName
(
&
pTableMetaInfo
->
name
,
name
);
taosHashRemove
(
tscTableMetaInfo
,
name
,
strnlen
(
name
,
TSDB_TABLE_FNAME_LEN
));
taosHashRemove
(
tscTableMetaInfo
,
name
,
strnlen
(
name
,
TSDB_TABLE_FNAME_LEN
));
tfree
(
pTableMetaInfo
->
pTableMeta
);
tscFreeSqlResult
(
pStream
->
pSql
);
tscFreeSubobj
(
pStream
->
pSql
);
tfree
(
pStream
->
pSql
->
pSubs
);
pStream
->
pSql
->
subState
.
numOfSub
=
0
;
pTableMetaInfo
->
vgroupList
=
tscVgroupInfoClear
(
pTableMetaInfo
->
vgroupList
);
pTableMetaInfo
->
vgroupList
=
tscVgroupInfoClear
(
pTableMetaInfo
->
vgroupList
);
tscSetRetryTimer
(
pStream
,
pStream
->
pSql
,
retryDelay
);
tscSetRetryTimer
(
pStream
,
pStream
->
pSql
,
retryDelay
);
...
@@ -468,8 +476,8 @@ static int32_t tscSetSlidingWindowInfo(SSqlObj *pSql, SSqlStream *pStream) {
...
@@ -468,8 +476,8 @@ static int32_t tscSetSlidingWindowInfo(SSqlObj *pSql, SSqlStream *pStream) {
}
}
if
(
pQueryInfo
->
interval
.
sliding
>
pQueryInfo
->
interval
.
interval
)
{
if
(
pQueryInfo
->
interval
.
sliding
>
pQueryInfo
->
interval
.
interval
)
{
tscWarn
(
"0x%"
PRIx64
" stream:%p, sliding value:%"
PRId64
" can not be larger than interval range, reset to:%"
PRId64
,
tscWarn
(
"0x%"
PRIx64
" stream:%p, sliding value:%"
PRId64
" can not be larger than interval range, reset to:%"
PRId64
,
pSql
->
self
,
pStream
,
pSql
->
self
,
pStream
,
pQueryInfo
->
interval
.
sliding
,
pQueryInfo
->
interval
.
interval
);
pQueryInfo
->
interval
.
sliding
,
pQueryInfo
->
interval
.
interval
);
pQueryInfo
->
interval
.
sliding
=
pQueryInfo
->
interval
.
interval
;
pQueryInfo
->
interval
.
sliding
=
pQueryInfo
->
interval
.
interval
;
}
}
...
@@ -601,7 +609,7 @@ TAOS_STREAM *taos_open_stream(TAOS *taos, const char *sqlstr, void (*fp)(void *p
...
@@ -601,7 +609,7 @@ TAOS_STREAM *taos_open_stream(TAOS *taos, const char *sqlstr, void (*fp)(void *p
SSqlStream
*
pStream
=
(
SSqlStream
*
)
calloc
(
1
,
sizeof
(
SSqlStream
));
SSqlStream
*
pStream
=
(
SSqlStream
*
)
calloc
(
1
,
sizeof
(
SSqlStream
));
if
(
pStream
==
NULL
)
{
if
(
pStream
==
NULL
)
{
tscError
(
"
%p open stream failed, sql:%s, reason:%s, code:0x%08x"
,
pSql
,
sqlstr
,
pCmd
->
payload
,
pRes
->
code
);
tscError
(
"
0x%"
PRIx64
" open stream failed, sql:%s, reason:%s, code:0x%08x"
,
pSql
->
self
,
sqlstr
,
pCmd
->
payload
,
pRes
->
code
);
tscFreeSqlObj
(
pSql
);
tscFreeSqlObj
(
pSql
);
return
NULL
;
return
NULL
;
}
}
...
@@ -617,7 +625,7 @@ TAOS_STREAM *taos_open_stream(TAOS *taos, const char *sqlstr, void (*fp)(void *p
...
@@ -617,7 +625,7 @@ TAOS_STREAM *taos_open_stream(TAOS *taos, const char *sqlstr, void (*fp)(void *p
pSql
->
sqlstr
=
calloc
(
1
,
strlen
(
sqlstr
)
+
1
);
pSql
->
sqlstr
=
calloc
(
1
,
strlen
(
sqlstr
)
+
1
);
if
(
pSql
->
sqlstr
==
NULL
)
{
if
(
pSql
->
sqlstr
==
NULL
)
{
tscError
(
"
%p failed to malloc sql string buffer"
,
pSql
);
tscError
(
"
0x%"
PRIx64
" failed to malloc sql string buffer"
,
pSql
->
self
);
tscFreeSqlObj
(
pSql
);
tscFreeSqlObj
(
pSql
);
return
NULL
;
return
NULL
;
}
}
...
...
src/client/src/tscSub.c
浏览文件 @
9286f77e
...
@@ -224,11 +224,11 @@ static SArray* getTableList( SSqlObj* pSql ) {
...
@@ -224,11 +224,11 @@ static SArray* getTableList( SSqlObj* pSql ) {
SSqlObj
*
pNew
=
taos_query
(
pSql
->
pTscObj
,
sql
);
SSqlObj
*
pNew
=
taos_query
(
pSql
->
pTscObj
,
sql
);
if
(
pNew
==
NULL
)
{
if
(
pNew
==
NULL
)
{
tscError
(
"
failed to retrieve table id: cannot create new sql object."
);
tscError
(
"
0x%"
PRIx64
"failed to retrieve table id: cannot create new sql object."
,
pSql
->
self
);
return
NULL
;
return
NULL
;
}
else
if
(
taos_errno
(
pNew
)
!=
TSDB_CODE_SUCCESS
)
{
}
else
if
(
taos_errno
(
pNew
)
!=
TSDB_CODE_SUCCESS
)
{
tscError
(
"
failed to retrieve table id: %s"
,
tstrerror
(
taos_errno
(
pNew
)));
tscError
(
"
0x%"
PRIx64
"failed to retrieve table id,error: %s"
,
pSql
->
self
,
tstrerror
(
taos_errno
(
pNew
)));
return
NULL
;
return
NULL
;
}
}
...
...
src/client/src/tscSubquery.c
浏览文件 @
9286f77e
此差异已折叠。
点击以展开。
src/client/src/tscUtil.c
浏览文件 @
9286f77e
...
@@ -1262,7 +1262,7 @@ int32_t tscMergeTableDataBlocks(SSqlObj* pSql, bool freeBlockMap) {
...
@@ -1262,7 +1262,7 @@ int32_t tscMergeTableDataBlocks(SSqlObj* pSql, bool freeBlockMap) {
int32_t
ret
=
tscGetDataBlockFromList
(
pVnodeDataBlockHashList
,
pOneTableBlock
->
vgId
,
TSDB_PAYLOAD_SIZE
,
int32_t
ret
=
tscGetDataBlockFromList
(
pVnodeDataBlockHashList
,
pOneTableBlock
->
vgId
,
TSDB_PAYLOAD_SIZE
,
INSERT_HEAD_SIZE
,
0
,
&
pOneTableBlock
->
tableName
,
pOneTableBlock
->
pTableMeta
,
&
dataBuf
,
pVnodeDataBlockList
);
INSERT_HEAD_SIZE
,
0
,
&
pOneTableBlock
->
tableName
,
pOneTableBlock
->
pTableMeta
,
&
dataBuf
,
pVnodeDataBlockList
);
if
(
ret
!=
TSDB_CODE_SUCCESS
)
{
if
(
ret
!=
TSDB_CODE_SUCCESS
)
{
tscError
(
"
%p failed to prepare the data block buffer for merging table data, code:%d"
,
pSql
,
ret
);
tscError
(
"
0x%"
PRIx64
" failed to prepare the data block buffer for merging table data, code:%d"
,
pSql
->
self
,
ret
);
taosHashCleanup
(
pVnodeDataBlockHashList
);
taosHashCleanup
(
pVnodeDataBlockHashList
);
tscDestroyBlockArrayList
(
pVnodeDataBlockList
);
tscDestroyBlockArrayList
(
pVnodeDataBlockList
);
return
ret
;
return
ret
;
...
@@ -1281,7 +1281,7 @@ int32_t tscMergeTableDataBlocks(SSqlObj* pSql, bool freeBlockMap) {
...
@@ -1281,7 +1281,7 @@ int32_t tscMergeTableDataBlocks(SSqlObj* pSql, bool freeBlockMap) {
dataBuf
->
pData
=
tmp
;
dataBuf
->
pData
=
tmp
;
memset
(
dataBuf
->
pData
+
dataBuf
->
size
,
0
,
dataBuf
->
nAllocSize
-
dataBuf
->
size
);
memset
(
dataBuf
->
pData
+
dataBuf
->
size
,
0
,
dataBuf
->
nAllocSize
-
dataBuf
->
size
);
}
else
{
// failed to allocate memory, free already allocated memory and return error code
}
else
{
// failed to allocate memory, free already allocated memory and return error code
tscError
(
"
%p failed to allocate memory for merging submit block, size:%d"
,
pSql
,
dataBuf
->
nAllocSize
);
tscError
(
"
0x%"
PRIx64
" failed to allocate memory for merging submit block, size:%d"
,
pSql
->
self
,
dataBuf
->
nAllocSize
);
taosHashCleanup
(
pVnodeDataBlockHashList
);
taosHashCleanup
(
pVnodeDataBlockHashList
);
tscDestroyBlockArrayList
(
pVnodeDataBlockList
);
tscDestroyBlockArrayList
(
pVnodeDataBlockList
);
...
@@ -2489,7 +2489,7 @@ void registerSqlObj(SSqlObj* pSql) {
...
@@ -2489,7 +2489,7 @@ void registerSqlObj(SSqlObj* pSql) {
SSqlObj
*
createSimpleSubObj
(
SSqlObj
*
pSql
,
__async_cb_func_t
fp
,
void
*
param
,
int32_t
cmd
)
{
SSqlObj
*
createSimpleSubObj
(
SSqlObj
*
pSql
,
__async_cb_func_t
fp
,
void
*
param
,
int32_t
cmd
)
{
SSqlObj
*
pNew
=
(
SSqlObj
*
)
calloc
(
1
,
sizeof
(
SSqlObj
));
SSqlObj
*
pNew
=
(
SSqlObj
*
)
calloc
(
1
,
sizeof
(
SSqlObj
));
if
(
pNew
==
NULL
)
{
if
(
pNew
==
NULL
)
{
tscError
(
"
%p new subquery failed, tableIndex:%d"
,
pSql
,
0
);
tscError
(
"
0x%"
PRIx64
" new subquery failed, tableIndex:%d"
,
pSql
->
self
,
0
);
return
NULL
;
return
NULL
;
}
}
...
@@ -2503,7 +2503,7 @@ SSqlObj* createSimpleSubObj(SSqlObj* pSql, __async_cb_func_t fp, void* param, in
...
@@ -2503,7 +2503,7 @@ SSqlObj* createSimpleSubObj(SSqlObj* pSql, __async_cb_func_t fp, void* param, in
int32_t
code
=
copyTagData
(
&
pNew
->
cmd
.
tagData
,
&
pSql
->
cmd
.
tagData
);
int32_t
code
=
copyTagData
(
&
pNew
->
cmd
.
tagData
,
&
pSql
->
cmd
.
tagData
);
if
(
code
!=
TSDB_CODE_SUCCESS
)
{
if
(
code
!=
TSDB_CODE_SUCCESS
)
{
tscError
(
"
%p new subquery failed, unable to malloc tag data, tableIndex:%d"
,
pSql
,
0
);
tscError
(
"
0x%"
PRIx64
" new subquery failed, unable to malloc tag data, tableIndex:%d"
,
pSql
->
self
,
0
);
free
(
pNew
);
free
(
pNew
);
return
NULL
;
return
NULL
;
}
}
...
@@ -2579,7 +2579,7 @@ SSqlObj* createSubqueryObj(SSqlObj* pSql, int16_t tableIndex, __async_cb_func_t
...
@@ -2579,7 +2579,7 @@ SSqlObj* createSubqueryObj(SSqlObj* pSql, int16_t tableIndex, __async_cb_func_t
SSqlObj
*
pNew
=
(
SSqlObj
*
)
calloc
(
1
,
sizeof
(
SSqlObj
));
SSqlObj
*
pNew
=
(
SSqlObj
*
)
calloc
(
1
,
sizeof
(
SSqlObj
));
if
(
pNew
==
NULL
)
{
if
(
pNew
==
NULL
)
{
tscError
(
"
%p new subquery failed, tableIndex:%d"
,
pSql
,
tableIndex
);
tscError
(
"
0x%"
PRIx64
" new subquery failed, tableIndex:%d"
,
pSql
->
self
,
tableIndex
);
terrno
=
TSDB_CODE_TSC_OUT_OF_MEMORY
;
terrno
=
TSDB_CODE_TSC_OUT_OF_MEMORY
;
return
NULL
;
return
NULL
;
}
}
...
@@ -2670,7 +2670,7 @@ SSqlObj* createSubqueryObj(SSqlObj* pSql, int16_t tableIndex, __async_cb_func_t
...
@@ -2670,7 +2670,7 @@ SSqlObj* createSubqueryObj(SSqlObj* pSql, int16_t tableIndex, __async_cb_func_t
}
}
if
(
tscAllocPayload
(
pnCmd
,
TSDB_DEFAULT_PAYLOAD_SIZE
)
!=
TSDB_CODE_SUCCESS
)
{
if
(
tscAllocPayload
(
pnCmd
,
TSDB_DEFAULT_PAYLOAD_SIZE
)
!=
TSDB_CODE_SUCCESS
)
{
tscError
(
"
%p new subquery failed, tableIndex:%d, vgroupIndex:%d"
,
pSql
,
tableIndex
,
pTableMetaInfo
->
vgroupIndex
);
tscError
(
"
0x%"
PRIx64
" new subquery failed, tableIndex:%d, vgroupIndex:%d"
,
pSql
->
self
,
tableIndex
,
pTableMetaInfo
->
vgroupIndex
);
terrno
=
TSDB_CODE_TSC_OUT_OF_MEMORY
;
terrno
=
TSDB_CODE_TSC_OUT_OF_MEMORY
;
goto
_error
;
goto
_error
;
}
}
...
@@ -2721,7 +2721,7 @@ SSqlObj* createSubqueryObj(SSqlObj* pSql, int16_t tableIndex, __async_cb_func_t
...
@@ -2721,7 +2721,7 @@ SSqlObj* createSubqueryObj(SSqlObj* pSql, int16_t tableIndex, __async_cb_func_t
// this case cannot be happened
// this case cannot be happened
if
(
pFinalInfo
->
pTableMeta
==
NULL
)
{
if
(
pFinalInfo
->
pTableMeta
==
NULL
)
{
tscError
(
"
%p new subquery failed since no tableMeta, name:%s"
,
pSql
,
tNameGetTableName
(
&
pTableMetaInfo
->
name
));
tscError
(
"
0x%"
PRIx64
" new subquery failed since no tableMeta, name:%s"
,
pSql
->
self
,
tNameGetTableName
(
&
pTableMetaInfo
->
name
));
if
(
pPrevSql
!=
NULL
)
{
// pass the previous error to client
if
(
pPrevSql
!=
NULL
)
{
// pass the previous error to client
assert
(
pPrevSql
->
res
.
code
!=
TSDB_CODE_SUCCESS
);
assert
(
pPrevSql
->
res
.
code
!=
TSDB_CODE_SUCCESS
);
...
...
src/common/src/tglobal.c
浏览文件 @
9286f77e
...
@@ -139,7 +139,7 @@ int32_t tsTableIncStepPerVnode = TSDB_TABLES_STEP;
...
@@ -139,7 +139,7 @@ int32_t tsTableIncStepPerVnode = TSDB_TABLES_STEP;
int8_t
tsEnableBalance
=
1
;
int8_t
tsEnableBalance
=
1
;
int8_t
tsAlternativeRole
=
0
;
int8_t
tsAlternativeRole
=
0
;
int32_t
tsBalanceInterval
=
300
;
// seconds
int32_t
tsBalanceInterval
=
300
;
// seconds
int32_t
tsOfflineThreshold
=
86400
*
10
0
;
// seconds 10
0 days
int32_t
tsOfflineThreshold
=
86400
*
10
;
// seconds of 1
0 days
int32_t
tsMnodeEqualVnodeNum
=
4
;
int32_t
tsMnodeEqualVnodeNum
=
4
;
int8_t
tsEnableFlowCtrl
=
1
;
int8_t
tsEnableFlowCtrl
=
1
;
int8_t
tsEnableSlaveQuery
=
1
;
int8_t
tsEnableSlaveQuery
=
1
;
...
...
src/kit/taosdemo/taosdemo.c
浏览文件 @
9286f77e
...
@@ -81,7 +81,7 @@ enum QUERY_MODE {
...
@@ -81,7 +81,7 @@ enum QUERY_MODE {
#define MAX_DB_NAME_SIZE 64
#define MAX_DB_NAME_SIZE 64
#define MAX_HOSTNAME_SIZE 64
#define MAX_HOSTNAME_SIZE 64
#define MAX_TB_NAME_SIZE 64
#define MAX_TB_NAME_SIZE 64
#define MAX_DATA_SIZE
16000
#define MAX_DATA_SIZE
(16*1024)
#define MAX_NUM_DATATYPE 10
#define MAX_NUM_DATATYPE 10
#define OPT_ABORT 1
/* –abort */
#define OPT_ABORT 1
/* –abort */
#define STRING_LEN 60000
#define STRING_LEN 60000
...
@@ -252,8 +252,8 @@ typedef struct SSuperTable_S {
...
@@ -252,8 +252,8 @@ typedef struct SSuperTable_S {
int
maxSqlLen
;
//
int
maxSqlLen
;
//
int
insertInterval
;
// insert interval, will override global insert interval
int
insertInterval
;
// insert interval, will override global insert interval
int64_t
insertRows
;
// 0: no limit
int64_t
insertRows
;
int
timeStampStep
;
int
64_t
timeStampStep
;
char
startTimestamp
[
MAX_TB_NAME_SIZE
];
char
startTimestamp
[
MAX_TB_NAME_SIZE
];
char
sampleFormat
[
MAX_TB_NAME_SIZE
];
// csv, json
char
sampleFormat
[
MAX_TB_NAME_SIZE
];
// csv, json
char
sampleFile
[
MAX_FILE_NAME_LEN
+
1
];
char
sampleFile
[
MAX_FILE_NAME_LEN
+
1
];
...
@@ -530,50 +530,50 @@ char *aggreFunc[] = {"*", "count(*)", "avg(col0)", "sum(col0)",
...
@@ -530,50 +530,50 @@ char *aggreFunc[] = {"*", "count(*)", "avg(col0)", "sum(col0)",
"max(col0)"
,
"min(col0)"
,
"first(col0)"
,
"last(col0)"
};
"max(col0)"
,
"min(col0)"
,
"first(col0)"
,
"last(col0)"
};
SArguments
g_args
=
{
SArguments
g_args
=
{
NULL
,
// metaFile
NULL
,
// metaFile
0
,
// test_mode
0
,
// test_mode
"127.0.0.1"
,
// host
"127.0.0.1"
,
// host
6030
,
// port
6030
,
// port
"root"
,
// user
"root"
,
// user
#ifdef _TD_POWER_
#ifdef _TD_POWER_
"powerdb"
,
// password
"powerdb"
,
// password
#else
#else
"taosdata"
,
// password
"taosdata"
,
// password
#endif
#endif
"test"
,
// database
"test"
,
// database
1
,
// replica
1
,
// replica
"t"
,
// tb_prefix
"t"
,
// tb_prefix
NULL
,
// sqlFile
NULL
,
// sqlFile
true
,
// use_metric
true
,
// use_metric
true
,
// drop_database
true
,
// drop_database
true
,
// insert_only
true
,
// insert_only
false
,
// debug_print
false
,
// debug_print
false
,
// verbose_print
false
,
// verbose_print
false
,
// performance statistic print
false
,
// performance statistic print
false
,
// answer_yes;
false
,
// answer_yes;
"./output.txt"
,
// output_file
"./output.txt"
,
// output_file
0
,
// mode : sync or async
0
,
// mode : sync or async
{
{
"INT"
,
// datatype
"INT"
,
// datatype
"INT"
,
// datatype
"INT"
,
// datatype
"INT"
,
// datatype
"INT"
,
// datatype
"INT"
,
// datatype
"INT"
,
// datatype
},
},
16
,
// len_of_binary
16
,
// len_of_binary
4
,
// num_of_CPR
4
,
// num_of_CPR
10
,
// num_of_connections/thread
10
,
// num_of_connections/thread
0
,
// insert_interval
0
,
// insert_interval
1
,
// query_times
1
,
// query_times
0
,
// interlace_rows;
0
,
// interlace_rows;
30000
,
// num_of_RPR
30000
,
// num_of_RPR
1024000
,
// max_sql_len
1024000
,
// max_sql_len
10000
,
// num_of_tables
10000
,
// num_of_tables
10000
,
// num_of_DPT
10000
,
// num_of_DPT
0
,
// abort
0
,
// abort
0
,
// disorderRatio
0
,
// disorderRatio
1000
,
// disorderRange
1000
,
// disorderRange
1
,
// method_of_delete
1
,
// method_of_delete
NULL
// arg_list
NULL
// arg_list
};
};
...
@@ -1368,9 +1368,9 @@ static int printfInsertMeta() {
...
@@ -1368,9 +1368,9 @@ static int printfInsertMeta() {
printf
(
" disorderRatio:
\033
[33m%d
\033
[0m
\n
"
,
printf
(
" disorderRatio:
\033
[33m%d
\033
[0m
\n
"
,
g_Dbs
.
db
[
i
].
superTbls
[
j
].
disorderRatio
);
g_Dbs
.
db
[
i
].
superTbls
[
j
].
disorderRatio
);
printf
(
" maxSqlLen:
\033
[33m%d
\033
[0m
\n
"
,
printf
(
" maxSqlLen:
\033
[33m%d
\033
[0m
\n
"
,
g_Dbs
.
db
[
i
].
superTbls
[
j
].
maxSqlLen
);
g_Dbs
.
db
[
i
].
superTbls
[
j
].
maxSqlLen
);
printf
(
" timeStampStep:
\033
[33m%
d
\033
[0m
\n
"
,
printf
(
" timeStampStep:
\033
[33m%
"
PRId64
"
\033
[0m
\n
"
,
g_Dbs
.
db
[
i
].
superTbls
[
j
].
timeStampStep
);
g_Dbs
.
db
[
i
].
superTbls
[
j
].
timeStampStep
);
printf
(
" startTimestamp:
\033
[33m%s
\033
[0m
\n
"
,
printf
(
" startTimestamp:
\033
[33m%s
\033
[0m
\n
"
,
g_Dbs
.
db
[
i
].
superTbls
[
j
].
startTimestamp
);
g_Dbs
.
db
[
i
].
superTbls
[
j
].
startTimestamp
);
printf
(
" sampleFormat:
\033
[33m%s
\033
[0m
\n
"
,
printf
(
" sampleFormat:
\033
[33m%s
\033
[0m
\n
"
,
...
@@ -1541,7 +1541,7 @@ static void printfInsertMetaToFile(FILE* fp) {
...
@@ -1541,7 +1541,7 @@ static void printfInsertMetaToFile(FILE* fp) {
fprintf
(
fp
,
" disorderRatio: %d
\n
"
,
g_Dbs
.
db
[
i
].
superTbls
[
j
].
disorderRatio
);
fprintf
(
fp
,
" disorderRatio: %d
\n
"
,
g_Dbs
.
db
[
i
].
superTbls
[
j
].
disorderRatio
);
fprintf
(
fp
,
" maxSqlLen: %d
\n
"
,
g_Dbs
.
db
[
i
].
superTbls
[
j
].
maxSqlLen
);
fprintf
(
fp
,
" maxSqlLen: %d
\n
"
,
g_Dbs
.
db
[
i
].
superTbls
[
j
].
maxSqlLen
);
fprintf
(
fp
,
" timeStampStep: %
d
\n
"
,
g_Dbs
.
db
[
i
].
superTbls
[
j
].
timeStampStep
);
fprintf
(
fp
,
" timeStampStep: %
"
PRId64
"
\n
"
,
g_Dbs
.
db
[
i
].
superTbls
[
j
].
timeStampStep
);
fprintf
(
fp
,
" startTimestamp: %s
\n
"
,
g_Dbs
.
db
[
i
].
superTbls
[
j
].
startTimestamp
);
fprintf
(
fp
,
" startTimestamp: %s
\n
"
,
g_Dbs
.
db
[
i
].
superTbls
[
j
].
startTimestamp
);
fprintf
(
fp
,
" sampleFormat: %s
\n
"
,
g_Dbs
.
db
[
i
].
superTbls
[
j
].
sampleFormat
);
fprintf
(
fp
,
" sampleFormat: %s
\n
"
,
g_Dbs
.
db
[
i
].
superTbls
[
j
].
sampleFormat
);
fprintf
(
fp
,
" sampleFile: %s
\n
"
,
g_Dbs
.
db
[
i
].
superTbls
[
j
].
sampleFile
);
fprintf
(
fp
,
" sampleFile: %s
\n
"
,
g_Dbs
.
db
[
i
].
superTbls
[
j
].
sampleFile
);
...
@@ -3657,7 +3657,6 @@ static bool getMetaFromInsertJsonFile(cJSON* root) {
...
@@ -3657,7 +3657,6 @@ static bool getMetaFromInsertJsonFile(cJSON* root) {
goto
PARSE_OVER
;
goto
PARSE_OVER
;
}
}
/*
cJSON
*
batchCreateTbl
=
cJSON_GetObjectItem
(
stbInfo
,
"batch_create_tbl_num"
);
cJSON
*
batchCreateTbl
=
cJSON_GetObjectItem
(
stbInfo
,
"batch_create_tbl_num"
);
if
(
batchCreateTbl
&&
batchCreateTbl
->
type
==
cJSON_Number
)
{
if
(
batchCreateTbl
&&
batchCreateTbl
->
type
==
cJSON_Number
)
{
g_Dbs
.
db
[
i
].
superTbls
[
j
].
batchCreateTableNum
=
batchCreateTbl
->
valueint
;
g_Dbs
.
db
[
i
].
superTbls
[
j
].
batchCreateTableNum
=
batchCreateTbl
->
valueint
;
...
@@ -3667,7 +3666,6 @@ static bool getMetaFromInsertJsonFile(cJSON* root) {
...
@@ -3667,7 +3666,6 @@ static bool getMetaFromInsertJsonFile(cJSON* root) {
printf
(
"ERROR: failed to read json, batch_create_tbl_num not found
\n
"
);
printf
(
"ERROR: failed to read json, batch_create_tbl_num not found
\n
"
);
goto
PARSE_OVER
;
goto
PARSE_OVER
;
}
}
*/
cJSON
*
childTblExists
=
cJSON_GetObjectItem
(
stbInfo
,
"child_table_exists"
);
// yes, no
cJSON
*
childTblExists
=
cJSON_GetObjectItem
(
stbInfo
,
"child_table_exists"
);
// yes, no
if
(
childTblExists
if
(
childTblExists
...
@@ -4646,9 +4644,9 @@ static void getTableName(char *pTblName, threadInfo* pThreadInfo, int tableSeq)
...
@@ -4646,9 +4644,9 @@ static void getTableName(char *pTblName, threadInfo* pThreadInfo, int tableSeq)
}
}
static
int
generateDataTail
(
static
int
generateDataTail
(
SSuperTable
*
superTblInfo
,
SSuperTable
*
superTblInfo
,
int
batch
,
char
*
buffer
,
int
remainderBufLen
,
int64_t
insertRows
,
int
batch
,
char
*
buffer
,
int
remainderBufLen
,
int64_t
insertRows
,
int64_t
startFrom
,
u
int64_t
startTime
,
int
*
pSamplePos
,
int
*
dataLen
)
{
int64_t
startFrom
,
int64_t
startTime
,
int
*
pSamplePos
,
int
*
dataLen
)
{
int
len
=
0
;
int
len
=
0
;
int
ncols_per_record
=
1
;
// count first col ts
int
ncols_per_record
=
1
;
// count first col ts
...
@@ -4868,6 +4866,8 @@ static int generateInterlaceDataBuffer(
...
@@ -4868,6 +4866,8 @@ static int generateInterlaceDataBuffer(
pstr
+=
dataLen
;
pstr
+=
dataLen
;
*
pRemainderBufLen
-=
dataLen
;
*
pRemainderBufLen
-=
dataLen
;
}
else
{
}
else
{
debugPrint
(
"%s() LN%d, generated data tail: %d, not equal batch per table: %d
\n
"
,
__func__
,
__LINE__
,
k
,
batchPerTbl
);
pstr
-=
headLen
;
pstr
-=
headLen
;
pstr
[
0
]
=
'\0'
;
pstr
[
0
]
=
'\0'
;
k
=
0
;
k
=
0
;
...
@@ -4925,10 +4925,24 @@ static void* syncWriteInterlace(threadInfo *pThreadInfo) {
...
@@ -4925,10 +4925,24 @@ static void* syncWriteInterlace(threadInfo *pThreadInfo) {
debugPrint
(
"[%d] %s() LN%d: ### interlace write
\n
"
,
debugPrint
(
"[%d] %s() LN%d: ### interlace write
\n
"
,
pThreadInfo
->
threadID
,
__func__
,
__LINE__
);
pThreadInfo
->
threadID
,
__func__
,
__LINE__
);
int64_t
insertRows
;
int
interlaceRows
;
SSuperTable
*
superTblInfo
=
pThreadInfo
->
superTblInfo
;
SSuperTable
*
superTblInfo
=
pThreadInfo
->
superTblInfo
;
int64_t
insertRows
=
(
superTblInfo
)
?
superTblInfo
->
insertRows
:
g_args
.
num_of_DPT
;
if
(
superTblInfo
)
{
int
interlaceRows
=
superTblInfo
?
superTblInfo
->
interlaceRows
:
g_args
.
interlace_rows
;
insertRows
=
superTblInfo
->
insertRows
;
if
((
superTblInfo
->
interlaceRows
==
0
)
&&
(
g_args
.
interlace_rows
>
0
))
{
interlaceRows
=
g_args
.
interlace_rows
;
}
else
{
interlaceRows
=
superTblInfo
->
interlaceRows
;
}
}
else
{
insertRows
=
g_args
.
num_of_DPT
;
interlaceRows
=
g_args
.
interlace_rows
;
}
if
(
interlaceRows
>
insertRows
)
if
(
interlaceRows
>
insertRows
)
interlaceRows
=
insertRows
;
interlaceRows
=
insertRows
;
...
@@ -4960,7 +4974,7 @@ static void* syncWriteInterlace(threadInfo *pThreadInfo) {
...
@@ -4960,7 +4974,7 @@ static void* syncWriteInterlace(threadInfo *pThreadInfo) {
pThreadInfo
->
totalInsertRows
=
0
;
pThreadInfo
->
totalInsertRows
=
0
;
pThreadInfo
->
totalAffectedRows
=
0
;
pThreadInfo
->
totalAffectedRows
=
0
;
int
nTimeStampStep
=
superTblInfo
?
superTblInfo
->
timeStampStep
:
DEFAULT_TIMESTAMP_STEP
;
int
64_t
nTimeStampStep
=
superTblInfo
?
superTblInfo
->
timeStampStep
:
DEFAULT_TIMESTAMP_STEP
;
int
insert_interval
=
int
insert_interval
=
superTblInfo
?
superTblInfo
->
insertInterval
:
g_args
.
insert_interval
;
superTblInfo
?
superTblInfo
->
insertInterval
:
g_args
.
insert_interval
;
...
@@ -5059,18 +5073,18 @@ static void* syncWriteInterlace(threadInfo *pThreadInfo) {
...
@@ -5059,18 +5073,18 @@ static void* syncWriteInterlace(threadInfo *pThreadInfo) {
startTime
=
pThreadInfo
->
start_time
startTime
=
pThreadInfo
->
start_time
+
generatedRecPerTbl
*
nTimeStampStep
;
+
generatedRecPerTbl
*
nTimeStampStep
;
flagSleep
=
true
;
flagSleep
=
true
;
if
(
generatedRecPerTbl
>=
insertRows
)
if
(
generatedRecPerTbl
>=
insertRows
)
break
;
break
;
if
(
pThreadInfo
->
ntables
*
batchPerTbl
<
g_args
.
num_of_RPR
)
int
remainRows
=
insertRows
-
generatedRecPerTbl
;
break
;
if
((
remainRows
>
0
)
&&
(
batchPerTbl
>
remainRows
))
}
batchPerTbl
=
remainRows
;
}
int
remainRows
=
insertRows
-
generatedRecPerTbl
;
if
(
pThreadInfo
->
ntables
*
batchPerTbl
<
g_args
.
num_of_RPR
)
if
((
remainRows
>
0
)
&&
(
batchPerTbl
>
remainRows
))
break
;
batchPerTbl
=
remainRows
;
}
}
verbosePrint
(
"[%d] %s() LN%d generatedRecPerTbl=%d insertRows=%"
PRId64
"
\n
"
,
verbosePrint
(
"[%d] %s() LN%d generatedRecPerTbl=%d insertRows=%"
PRId64
"
\n
"
,
pThreadInfo
->
threadID
,
__func__
,
__LINE__
,
pThreadInfo
->
threadID
,
__func__
,
__LINE__
,
...
@@ -5169,7 +5183,7 @@ static void* syncWriteProgressive(threadInfo *pThreadInfo) {
...
@@ -5169,7 +5183,7 @@ static void* syncWriteProgressive(threadInfo *pThreadInfo) {
int64_t
startTs
=
taosGetTimestampMs
();
int64_t
startTs
=
taosGetTimestampMs
();
int64_t
endTs
;
int64_t
endTs
;
int
timeStampStep
=
int
64_t
timeStampStep
=
superTblInfo
?
superTblInfo
->
timeStampStep
:
DEFAULT_TIMESTAMP_STEP
;
superTblInfo
?
superTblInfo
->
timeStampStep
:
DEFAULT_TIMESTAMP_STEP
;
/* int insert_interval =
/* int insert_interval =
superTblInfo?superTblInfo->insertInterval:g_args.insert_interval;
superTblInfo?superTblInfo->insertInterval:g_args.insert_interval;
...
@@ -5294,7 +5308,18 @@ static void* syncWrite(void *sarg) {
...
@@ -5294,7 +5308,18 @@ static void* syncWrite(void *sarg) {
threadInfo
*
pThreadInfo
=
(
threadInfo
*
)
sarg
;
threadInfo
*
pThreadInfo
=
(
threadInfo
*
)
sarg
;
SSuperTable
*
superTblInfo
=
pThreadInfo
->
superTblInfo
;
SSuperTable
*
superTblInfo
=
pThreadInfo
->
superTblInfo
;
int
interlaceRows
=
superTblInfo
?
superTblInfo
->
interlaceRows
:
g_args
.
interlace_rows
;
int
interlaceRows
;
if
(
superTblInfo
)
{
if
((
superTblInfo
->
interlaceRows
==
0
)
&&
(
g_args
.
interlace_rows
>
0
))
{
interlaceRows
=
g_args
.
interlace_rows
;
}
else
{
interlaceRows
=
superTblInfo
->
interlaceRows
;
}
}
else
{
interlaceRows
=
g_args
.
interlace_rows
;
}
if
(
interlaceRows
>
0
)
{
if
(
interlaceRows
>
0
)
{
// interlace mode
// interlace mode
...
@@ -5993,8 +6018,8 @@ static void *specifiedTableQuery(void *sarg) {
...
@@ -5993,8 +6018,8 @@ static void *specifiedTableQuery(void *sarg) {
pThreadInfo
->
threadID
,
pThreadInfo
->
threadID
,
totalQueried
,
totalQueried
,
(
double
)(
totalQueried
/
((
endTs
-
startTs
)
/
1000
.
0
)));
(
double
)(
totalQueried
/
((
endTs
-
startTs
)
/
1000
.
0
)));
lastPrintTime
=
currentPrintTime
;
}
}
lastPrintTime
=
currentPrintTime
;
}
}
return
NULL
;
return
NULL
;
}
}
...
@@ -6079,8 +6104,8 @@ static void *superTableQuery(void *sarg) {
...
@@ -6079,8 +6104,8 @@ static void *superTableQuery(void *sarg) {
pThreadInfo
->
threadID
,
pThreadInfo
->
threadID
,
totalQueried
,
totalQueried
,
(
double
)(
totalQueried
/
((
endTs
-
startTs
)
/
1000
.
0
)));
(
double
)(
totalQueried
/
((
endTs
-
startTs
)
/
1000
.
0
)));
lastPrintTime
=
currentPrintTime
;
}
}
lastPrintTime
=
currentPrintTime
;
}
}
}
}
et
=
taosGetTimestampMs
();
et
=
taosGetTimestampMs
();
...
@@ -6424,7 +6449,7 @@ static void *specifiedSubscribe(void *sarg) {
...
@@ -6424,7 +6449,7 @@ static void *specifiedSubscribe(void *sarg) {
}
}
tsub
[
i
]
=
subscribeImpl
(
pThreadInfo
->
taos
,
tsub
[
i
]
=
subscribeImpl
(
pThreadInfo
->
taos
,
g_queryInfo
.
specifiedQueryInfo
.
sql
[
i
],
topic
,
tmpFile
);
g_queryInfo
.
specifiedQueryInfo
.
sql
[
i
],
topic
,
tmpFile
);
if
(
NULL
==
g_queryInfo
.
specifiedQueryInfo
.
tsub
[
i
])
{
if
(
NULL
==
tsub
[
i
])
{
taos_close
(
pThreadInfo
->
taos
);
taos_close
(
pThreadInfo
->
taos
);
return
NULL
;
return
NULL
;
}
}
...
...
src/mnode/inc/mnodeDb.h
浏览文件 @
9286f77e
...
@@ -31,6 +31,7 @@ enum _TSDB_DB_STATUS {
...
@@ -31,6 +31,7 @@ enum _TSDB_DB_STATUS {
int32_t
mnodeInitDbs
();
int32_t
mnodeInitDbs
();
void
mnodeCleanupDbs
();
void
mnodeCleanupDbs
();
int64_t
mnodeGetDbNum
();
int64_t
mnodeGetDbNum
();
int32_t
mnodeGetDbMaxReplica
();
SDbObj
*
mnodeGetDb
(
char
*
db
);
SDbObj
*
mnodeGetDb
(
char
*
db
);
SDbObj
*
mnodeGetDbByTableName
(
char
*
db
);
SDbObj
*
mnodeGetDbByTableName
(
char
*
db
);
void
*
mnodeGetNextDb
(
void
*
pIter
,
SDbObj
**
pDb
);
void
*
mnodeGetNextDb
(
void
*
pIter
,
SDbObj
**
pDb
);
...
...
src/mnode/src/mnodeDb.c
浏览文件 @
9286f77e
...
@@ -74,6 +74,24 @@ int64_t mnodeGetDbNum() {
...
@@ -74,6 +74,24 @@ int64_t mnodeGetDbNum() {
return
sdbGetNumOfRows
(
tsDbSdb
);
return
sdbGetNumOfRows
(
tsDbSdb
);
}
}
int32_t
mnodeGetDbMaxReplica
()
{
int32_t
maxReplica
=
0
;
SDbObj
*
pDb
=
NULL
;
void
*
pIter
=
NULL
;
while
(
1
)
{
pIter
=
mnodeGetNextDb
(
pIter
,
&
pDb
);
if
(
pDb
==
NULL
)
break
;
if
(
pDb
->
cfg
.
replications
>
maxReplica
)
maxReplica
=
pDb
->
cfg
.
replications
;
mnodeDecDbRef
(
pDb
);
}
return
maxReplica
;
}
static
int32_t
mnodeDbActionInsert
(
SSdbRow
*
pRow
)
{
static
int32_t
mnodeDbActionInsert
(
SSdbRow
*
pRow
)
{
SDbObj
*
pDb
=
pRow
->
pObj
;
SDbObj
*
pDb
=
pRow
->
pObj
;
SAcctObj
*
pAcct
=
mnodeGetAcct
(
pDb
->
acct
);
SAcctObj
*
pAcct
=
mnodeGetAcct
(
pDb
->
acct
);
...
...
src/mnode/src/mnodeDnode.c
浏览文件 @
9286f77e
...
@@ -29,6 +29,7 @@
...
@@ -29,6 +29,7 @@
#include "mnodeDef.h"
#include "mnodeDef.h"
#include "mnodeInt.h"
#include "mnodeInt.h"
#include "mnodeDnode.h"
#include "mnodeDnode.h"
#include "mnodeDb.h"
#include "mnodeMnode.h"
#include "mnodeMnode.h"
#include "mnodeSdb.h"
#include "mnodeSdb.h"
#include "mnodeShow.h"
#include "mnodeShow.h"
...
@@ -745,6 +746,14 @@ static int32_t mnodeDropDnodeByEp(char *ep, SMnodeMsg *pMsg) {
...
@@ -745,6 +746,14 @@ static int32_t mnodeDropDnodeByEp(char *ep, SMnodeMsg *pMsg) {
return
TSDB_CODE_MND_NO_REMOVE_MASTER
;
return
TSDB_CODE_MND_NO_REMOVE_MASTER
;
}
}
int32_t
maxReplica
=
mnodeGetDbMaxReplica
();
int32_t
dnodesNum
=
mnodeGetDnodesNum
();
if
(
dnodesNum
<=
maxReplica
)
{
mError
(
"dnode:%d, can't drop dnode:%s, #dnodes: %d, replia: %d"
,
pDnode
->
dnodeId
,
ep
,
dnodesNum
,
maxReplica
);
mnodeDecDnodeRef
(
pDnode
);
return
TSDB_CODE_MND_NO_ENOUGH_DNODES
;
}
mInfo
(
"dnode:%d, start to drop it"
,
pDnode
->
dnodeId
);
mInfo
(
"dnode:%d, start to drop it"
,
pDnode
->
dnodeId
);
int32_t
code
=
bnDropDnode
(
pDnode
);
int32_t
code
=
bnDropDnode
(
pDnode
);
...
...
src/vnode/src/vnodeWrite.c
浏览文件 @
9286f77e
...
@@ -347,9 +347,11 @@ static void vnodeFlowCtrlMsgToWQueue(void *param, void *tmrId) {
...
@@ -347,9 +347,11 @@ static void vnodeFlowCtrlMsgToWQueue(void *param, void *tmrId) {
vDebug
(
"vgId:%d, msg:%p, write into vwqueue after flowctrl, retry:%d"
,
pVnode
->
vgId
,
pWrite
,
vDebug
(
"vgId:%d, msg:%p, write into vwqueue after flowctrl, retry:%d"
,
pVnode
->
vgId
,
pWrite
,
pWrite
->
processedCount
);
pWrite
->
processedCount
);
pWrite
->
processedCount
=
0
;
pWrite
->
processedCount
=
0
;
void
*
handle
=
pWrite
->
rpcMsg
.
handle
;
code
=
vnodeWriteToWQueueImp
(
pWrite
);
code
=
vnodeWriteToWQueueImp
(
pWrite
);
if
(
code
!=
0
)
{
if
(
code
!=
TSDB_CODE_SUCCESS
)
{
dnodeSendRpcVWriteRsp
(
pWrite
->
pVnode
,
pWrite
,
code
);
SRpcMsg
rpcRsp
=
{.
handle
=
handle
,
.
code
=
code
};
rpcSendResponse
(
&
rpcRsp
);
}
}
}
}
}
}
...
...
tests/perftest-scripts/perftest-taosdemo-compare.sh
0 → 100755
浏览文件 @
9286f77e
#!/bin/bash
WORK_DIR
=
/home/ubuntu/pxiao
TDENGINE_DIR
=
/home/ubuntu/pxiao/TDengine
NUM_OF_VERSIONS
=
5
CURRENT_VERSION
=
0
today
=
`
date
+
"%Y%m%d"
`
TAOSDEMO_COMPARE_TEST_REPORT
=
$TDENGINE_DIR
/tests/taosdemo-compare-test-report-
$today
.log
# Coloured Echoes
function
red_echo
{
echo
-e
"
\0
33[31m
$@
\0
33[0m"
;
}
function
green_echo
{
echo
-e
"
\0
33[32m
$@
\0
33[0m"
;
}
function
yellow_echo
{
echo
-e
"
\0
33[33m
$@
\0
33[0m"
;
}
function
white_echo
{
echo
-e
"
\0
33[1;37m
$@
\0
33[0m"
;
}
# Coloured Printfs
function
red_printf
{
printf
"
\0
33[31m
$@
\0
33[0m"
;
}
function
green_printf
{
printf
"
\0
33[32m
$@
\0
33[0m"
;
}
function
yellow_printf
{
printf
"
\0
33[33m
$@
\0
33[0m"
;
}
function
white_printf
{
printf
"
\0
33[1;37m
$@
\0
33[0m"
;
}
# Debugging Outputs
function
white_brackets
{
local
args
=
"
$@
"
;
white_printf
"["
;
printf
"
${
args
}
"
;
white_printf
"]"
;
}
function
echoInfo
{
local
args
=
"
$@
"
;
white_brackets
$(
green_printf
"INFO"
)
&&
echo
"
${
args
}
"
;
}
function
echoWarn
{
local
args
=
"
$@
"
;
echo
"
$(
white_brackets
"
$(
yellow_printf
"WARN"
)
"
&&
echo
"
${
args
}
"
;
)
"
1>&2
;
}
function
echoError
{
local
args
=
"
$@
"
;
echo
"
$(
white_brackets
"
$(
red_printf
"ERROR"
)
"
&&
echo
"
${
args
}
"
;
)
"
1>&2
;
}
function
getCurrentVersion
{
echoInfo
"Build TDengine"
cd
$WORK_DIR
/TDengine
git remote update
>
/dev/null
git reset
--hard
HEAD
git checkout master
REMOTE_COMMIT
=
`
git rev-parse
--short
remotes/origin/master
`
LOCAL_COMMIT
=
`
git rev-parse
--short
@
`
echo
" LOCAL:
$LOCAL_COMMIT
"
echo
"REMOTE:
$REMOTE_COMMIT
"
if
[
"
$LOCAL_COMMIT
"
==
"
$REMOTE_COMMIT
"
]
;
then
echo
"repo up-to-date"
else
echo
"repo need to pull"
git pull
>
/dev/null 2>&1
fi
cd
debug
rm
-rf
*
cmake ..
>
/dev/null 2>&1
make
>
/dev/null 2>&1
make
install
>
/dev/null 2>&1
rm
-rf
$WORK_DIR
/taosdemo
cp
-r
$TDENGINE_DIR
/src/kit/taosdemo
$WORK_DIR
CURRENT_VERSION
=
`
taosd
-V
|
grep
version |
awk
'{print $3}'
|
awk
-F
.
'{print $3}'
`
}
function
buildTDengineByVersion
()
{
echoInfo
"build TDengine on branch:
$1
"
git reset
--hard
HEAD
git checkout
$1
git pull
>
/dev/null
rm
-rf
$TDENGINE_DIR
/src/kit/taosdemo
cp
-r
$WORK_DIR
/taosdemo
$TDENGINE_DIR
/src/kit
cd
$TDENGINE_DIR
/debug
rm
-rf
*
cmake ..
>
/dev/null 2>&1
make
>
/dev/null 2>&1
make
install
>
/dev/null 2>&1
}
function
stopTaosd
{
echo
"Stop taosd"
systemctl stop taosd
PID
=
`
ps
-ef
|grep
-w
taosd |
grep
-v
grep
|
awk
'{print $2}'
`
while
[
-n
"
$PID
"
]
do
pkill
-TERM
-x
taosd
sleep
1
PID
=
`
ps
-ef
|grep
-w
taosd |
grep
-v
grep
|
awk
'{print $2}'
`
done
}
function
startTaosd
{
echo
"Start taosd"
rm
-rf
/var/lib/perf/
*
rm
-rf
/var/log/perf/
*
nohup
taosd
-c
/etc/perf/
>
/dev/null 2>&1 &
sleep
10
}
function
runTaosdemoCompare
{
echoInfo
"Stop Taosd"
stopTaosd
getCurrentVersion
release
=
"master"
[
-f
$TAOSDEMO_COMPARE_TEST_REPORT
]
&&
rm
$TAOSDEMO_COMPARE_TEST_REPORT
for
((
i
=
0
;
i<
$NUM_OF_VERSIONS
;
i++
))
do
startTaosd
taos
-s
"drop database if exists demodb;"
taosdemo
-y
-d
demodb
>
taosdemoperf.txt
echo
"==================== taosdemo performance for
$release
===================="
|
tee
-a
$TAOSDEMO_COMPARE_TEST_REPORT
CREATE_TABLE_TIME
=
`
grep
'Spent'
taosdemoperf.txt |
awk
'NR==1{print $2}'
`
INSERT_RECORDS_TIME
=
`
grep
'Spent'
taosdemoperf.txt |
awk
'NR==2{print $2}'
`
RECORDS_PER_SECOND
=
`
grep
'Spent'
taosdemoperf.txt |
awk
'NR==2{print $16}'
`
AVG_DELAY
=
`
grep
'delay'
taosdemoperf.txt |
awk
'{print $4}'
|
awk
-Fm
'{print $1}'
`
MAX_DELAY
=
`
grep
'delay'
taosdemoperf.txt |
awk
'{print $6}'
|
awk
-Fm
'{print $1}'
`
MIN_DELAY
=
`
grep
'delay'
taosdemoperf.txt |
awk
'{print $8}'
|
awk
-Fm
'{print $1}'
`
echo
"create table time:
$CREATE_TABLE_TIME
seconds"
|
tee
-a
$TAOSDEMO_COMPARE_TEST_REPORT
echo
"insert records time:
$INSERT_RECORDS_TIME
seconds"
|
tee
-a
$TAOSDEMO_COMPARE_TEST_REPORT
echo
"records per second:
$RECORDS_PER_SECOND
records/second"
|
tee
-a
$TAOSDEMO_COMPARE_TEST_REPORT
echo
"avg delay:
$AVG_DELAY
ms"
|
tee
-a
$TAOSDEMO_COMPARE_TEST_REPORT
echo
"max delay:
$MAX_DELAY
ms"
|
tee
-a
$TAOSDEMO_COMPARE_TEST_REPORT
echo
"min delay:
$MIN_DELAY
ms"
|
tee
-a
$TAOSDEMO_COMPARE_TEST_REPORT
[
-f
taosdemoperf.txt
]
&&
rm
taosdemoperf.txt
stopTaosd
version
=
`
expr
$CURRENT_VERSION
-
$i
`
release
=
"release/s1
$version
"
buildTDengineByVersion
$release
done
}
function
sendReport
{
echo
"send report"
receiver
=
"develop@taosdata.com"
mimebody
=
"MIME-Version: 1.0
\n
Content-Type: text/html; charset=utf-8
\n
"
cd
$TDENGINE_DIR
sed
-i
's/\x1b\[[0-9;]*m//g'
$TAOSDEMO_COMPARE_TEST_REPORT
BODY_CONTENT
=
`
cat
$TAOSDEMO_COMPARE_TEST_REPORT
`
echo
-e
"to:
${
receiver
}
\n
subject: taosdemo performance compare test report
${
today
}
, commit ID:
${
LOCAL_COMMIT
}
\n\n
${
today
}
:
\n
${
BODY_CONTENT
}
"
|
\
(
cat
-
&&
uuencode
$TAOSDEMO_COMPARE_TEST_REPORT
taosdemo-compare-test-report-
$today
.log
)
|
\
ssmtp
"
${
receiver
}
"
&&
echo
"Report Sent!"
}
runTaosdemoCompare
sendReport
echoInfo
"End of Taosdemo Compare Test"
|
tee
-a
$WORK_DIR
/cron.log
\ No newline at end of file
tests/pytest/client/thousandsofClient.py
0 → 100644
浏览文件 @
9286f77e
###################################################################
# Copyright (c) 2016 by TAOS Technologies, Inc.
# All rights reserved.
#
# This file is proprietary and confidential to TAOS Technologies.
# No part of this file may be reproduced, stored, transmitted,
# disclosed or used in any form or by any means other than as
# expressly provided by the written permission from Jianhui Tao
#
###################################################################
# -*- coding: utf-8 -*-
import
os
import
sys
sys
.
path
.
insert
(
0
,
os
.
getcwd
())
from
util.log
import
*
from
util.sql
import
*
from
util.dnodes
import
*
import
taos
import
threading
class
TwoClients
:
def
initConnection
(
self
):
self
.
host
=
"127.0.0.1"
self
.
user
=
"root"
self
.
password
=
"taosdata"
self
.
config
=
"/home/chr/taosdata/TDengine/sim/dnode1/cfg "
def
newCloseCon
(
times
):
newConList
=
[]
for
times
in
range
(
0
,
times
)
:
newConList
.
append
(
taos
.
connect
(
self
.
host
,
self
.
user
,
self
.
password
,
self
.
config
))
for
times
in
range
(
0
,
times
)
:
newConList
[
times
].
close
()
def
run
(
self
):
tdDnodes
.
init
(
""
)
tdDnodes
.
setTestCluster
(
False
)
tdDnodes
.
setValgrind
(
False
)
tdDnodes
.
stopAll
()
tdDnodes
.
deploy
(
1
)
tdDnodes
.
start
(
1
)
# multiple new and cloes connection
for
m
in
range
(
1
,
101
)
:
t
=
threading
.
Thread
(
target
=
newCloseCon
,
args
=
(
10
,))
t
.
start
()
clients
=
TwoClients
()
clients
.
initConnection
()
clients
.
run
()
\ No newline at end of file
tests/pytest/cluster/TD-3693/how-to-use
0 → 100644
浏览文件 @
9286f77e
execute:
cd TDengine/tests/pytest && python3 ./test.py -f cluster/TD-3693/multClient.py && python3 cluster/TD-3693/multQuery.py
1. 使用测试的集群,三个节点fc1、fct2、fct4。
2. 用taosdemo建两个库db1和db2,副本数目为1,插入一定数据。
3. db1在mnode的master上(fct2),db2在mnode的slave上(fct4)。
4. 珲哥修改taosdemo,变成多线程查询,修改后的软件我命名成taosdemoMul,然后做持续多线程查询db2上的数据,建立多个连接
5. 4中查询过程放到后台,同时再次在db2执行建表、插入,查询操作。循环执行查询10次,每次间隔91s。
6. 然后查询taosd的log日志,看是否还存在上述问题“send auth msg to mnodes”。
\ No newline at end of file
tests/pytest/cluster/TD-3693/insert1Data.json
0 → 100644
浏览文件 @
9286f77e
{
"filetype"
:
"insert"
,
"cfgdir"
:
"/etc/taos"
,
"host"
:
"192.168.1.104"
,
"port"
:
6030
,
"user"
:
"root"
,
"password"
:
"taosdata"
,
"thread_count"
:
4
,
"thread_count_create_tbl"
:
4
,
"result_file"
:
"./insert_res.txt"
,
"confirm_parameter_prompt"
:
"no"
,
"insert_interval"
:
0
,
"interlace_rows"
:
10
,
"num_of_records_per_req"
:
1000
,
"max_sql_len"
:
1024000
,
"databases"
:
[{
"dbinfo"
:
{
"name"
:
"db1"
,
"drop"
:
"yes"
,
"replica"
:
1
,
"days"
:
10
,
"cache"
:
50
,
"blocks"
:
8
,
"precision"
:
"ms"
,
"keep"
:
3650
,
"minRows"
:
100
,
"maxRows"
:
4096
,
"comp"
:
2
,
"walLevel"
:
1
,
"cachelast"
:
0
,
"quorum"
:
1
,
"fsync"
:
3000
,
"update"
:
0
},
"super_tables"
:
[{
"name"
:
"stb0"
,
"child_table_exists"
:
"no"
,
"childtable_count"
:
10
,
"childtable_prefix"
:
"stb00_"
,
"auto_create_table"
:
"no"
,
"batch_create_tbl_num"
:
10
,
"data_source"
:
"rand"
,
"insert_mode"
:
"taosc"
,
"insert_rows"
:
10000
,
"childtable_limit"
:
0
,
"childtable_offset"
:
0
,
"multi_thread_write_one_tbl"
:
"no"
,
"interlace_rows"
:
0
,
"insert_interval"
:
0
,
"max_sql_len"
:
1024000
,
"disorder_ratio"
:
0
,
"disorder_range"
:
1000
,
"timestamp_step"
:
1
,
"start_timestamp"
:
"2020-10-01 00:00:00.000"
,
"sample_format"
:
"csv"
,
"sample_file"
:
"./sample.csv"
,
"tags_file"
:
""
,
"columns"
:
[{
"type"
:
"INT"
},
{
"type"
:
"DOUBLE"
,
"count"
:
10
},
{
"type"
:
"BINARY"
,
"len"
:
16
,
"count"
:
3
},
{
"type"
:
"BINARY"
,
"len"
:
32
,
"count"
:
6
}],
"tags"
:
[{
"type"
:
"TINYINT"
,
"count"
:
2
},
{
"type"
:
"BINARY"
,
"len"
:
16
,
"count"
:
5
}]
},
{
"name"
:
"stb1"
,
"child_table_exists"
:
"no"
,
"childtable_count"
:
20
,
"childtable_prefix"
:
"stb01_"
,
"auto_create_table"
:
"no"
,
"batch_create_tbl_num"
:
10
,
"data_source"
:
"rand"
,
"insert_mode"
:
"taosc"
,
"insert_rows"
:
20000
,
"childtable_limit"
:
0
,
"childtable_offset"
:
0
,
"multi_thread_write_one_tbl"
:
"no"
,
"interlace_rows"
:
0
,
"insert_interval"
:
0
,
"max_sql_len"
:
1024000
,
"disorder_ratio"
:
0
,
"disorder_range"
:
1000
,
"timestamp_step"
:
1
,
"start_timestamp"
:
"2020-10-01 00:00:00.000"
,
"sample_format"
:
"csv"
,
"sample_file"
:
"./sample.csv"
,
"tags_file"
:
""
,
"columns"
:
[{
"type"
:
"INT"
},
{
"type"
:
"DOUBLE"
,
"count"
:
10
},
{
"type"
:
"BINARY"
,
"len"
:
16
,
"count"
:
3
},
{
"type"
:
"BINARY"
,
"len"
:
32
,
"count"
:
6
}],
"tags"
:
[{
"type"
:
"TINYINT"
,
"count"
:
2
},
{
"type"
:
"BINARY"
,
"len"
:
16
,
"count"
:
5
}]
}]
}]
}
tests/pytest/cluster/TD-3693/insert2Data.json
0 → 100644
浏览文件 @
9286f77e
{
"filetype"
:
"insert"
,
"cfgdir"
:
"/etc/taos"
,
"host"
:
"192.168.1.104"
,
"port"
:
6030
,
"user"
:
"root"
,
"password"
:
"taosdata"
,
"thread_count"
:
4
,
"thread_count_create_tbl"
:
4
,
"result_file"
:
"./insert_res.txt"
,
"confirm_parameter_prompt"
:
"no"
,
"insert_interval"
:
0
,
"interlace_rows"
:
10
,
"num_of_records_per_req"
:
1000
,
"max_sql_len"
:
1024000
,
"databases"
:
[{
"dbinfo"
:
{
"name"
:
"db2"
,
"drop"
:
"yes"
,
"replica"
:
1
,
"days"
:
10
,
"cache"
:
50
,
"blocks"
:
8
,
"precision"
:
"ms"
,
"keep"
:
3650
,
"minRows"
:
100
,
"maxRows"
:
4096
,
"comp"
:
2
,
"walLevel"
:
1
,
"cachelast"
:
0
,
"quorum"
:
1
,
"fsync"
:
3000
,
"update"
:
0
},
"super_tables"
:
[{
"name"
:
"stb0"
,
"child_table_exists"
:
"no"
,
"childtable_count"
:
10
,
"childtable_prefix"
:
"stb00_"
,
"auto_create_table"
:
"no"
,
"batch_create_tbl_num"
:
10
,
"data_source"
:
"rand"
,
"insert_mode"
:
"taosc"
,
"insert_rows"
:
10000
,
"childtable_limit"
:
0
,
"childtable_offset"
:
0
,
"multi_thread_write_one_tbl"
:
"no"
,
"interlace_rows"
:
0
,
"insert_interval"
:
0
,
"max_sql_len"
:
1024000
,
"disorder_ratio"
:
0
,
"disorder_range"
:
1000
,
"timestamp_step"
:
1
,
"start_timestamp"
:
"2020-10-01 00:00:00.000"
,
"sample_format"
:
"csv"
,
"sample_file"
:
"./sample.csv"
,
"tags_file"
:
""
,
"columns"
:
[{
"type"
:
"INT"
},
{
"type"
:
"DOUBLE"
,
"count"
:
10
},
{
"type"
:
"BINARY"
,
"len"
:
16
,
"count"
:
3
},
{
"type"
:
"BINARY"
,
"len"
:
32
,
"count"
:
6
}],
"tags"
:
[{
"type"
:
"TINYINT"
,
"count"
:
2
},
{
"type"
:
"BINARY"
,
"len"
:
16
,
"count"
:
5
}]
},
{
"name"
:
"stb1"
,
"child_table_exists"
:
"no"
,
"childtable_count"
:
20
,
"childtable_prefix"
:
"stb01_"
,
"auto_create_table"
:
"no"
,
"batch_create_tbl_num"
:
10
,
"data_source"
:
"rand"
,
"insert_mode"
:
"taosc"
,
"insert_rows"
:
20000
,
"childtable_limit"
:
0
,
"childtable_offset"
:
0
,
"multi_thread_write_one_tbl"
:
"no"
,
"interlace_rows"
:
0
,
"insert_interval"
:
0
,
"max_sql_len"
:
1024000
,
"disorder_ratio"
:
0
,
"disorder_range"
:
1000
,
"timestamp_step"
:
1
,
"start_timestamp"
:
"2020-10-01 00:00:00.000"
,
"sample_format"
:
"csv"
,
"sample_file"
:
"./sample.csv"
,
"tags_file"
:
""
,
"columns"
:
[{
"type"
:
"INT"
},
{
"type"
:
"DOUBLE"
,
"count"
:
10
},
{
"type"
:
"BINARY"
,
"len"
:
16
,
"count"
:
3
},
{
"type"
:
"BINARY"
,
"len"
:
32
,
"count"
:
6
}],
"tags"
:
[{
"type"
:
"TINYINT"
,
"count"
:
2
},
{
"type"
:
"BINARY"
,
"len"
:
16
,
"count"
:
5
}]
}]
}]
}
tests/pytest/cluster/TD-3693/multClient.py
0 → 100644
浏览文件 @
9286f77e
###################################################################
# Copyright (c) 2016 by TAOS Technologies, Inc.
# All rights reserved.
#
# This file is proprietary and confidential to TAOS Technologies.
# No part of this file may be reproduced, stored, transmitted,
# disclosed or used in any form or by any means other than as
# expressly provided by the written permission from Jianhui Tao
#
###################################################################
# -*- coding: utf-8 -*-
import
sys
import
os
from
util.log
import
*
from
util.cases
import
*
from
util.sql
import
*
from
util.dnodes
import
*
class
TDTestCase
:
def
init
(
self
,
conn
,
logSql
):
tdLog
.
debug
(
"start to execute %s"
%
__file__
)
tdSql
.
init
(
conn
.
cursor
(),
logSql
)
self
.
rowNum
=
100000
self
.
ts
=
1537146000000
def
getBuildPath
(
self
):
selfPath
=
os
.
path
.
dirname
(
os
.
path
.
realpath
(
__file__
))
if
(
"community"
in
selfPath
):
projPath
=
selfPath
[:
selfPath
.
find
(
"community"
)]
else
:
projPath
=
selfPath
[:
selfPath
.
find
(
"tests"
)]
for
root
,
dirs
,
files
in
os
.
walk
(
projPath
):
if
(
"taosd"
in
files
):
rootRealPath
=
os
.
path
.
dirname
(
os
.
path
.
realpath
(
root
))
if
(
"packaging"
not
in
rootRealPath
):
buildPath
=
root
[:
len
(
root
)
-
len
(
"/build/bin"
)]
break
return
buildPath
def
run
(
self
):
buildPath
=
self
.
getBuildPath
()
if
(
buildPath
==
""
):
tdLog
.
exit
(
"taosd not found!"
)
else
:
tdLog
.
info
(
"taosd found in %s"
%
buildPath
)
binPath
=
buildPath
+
"/build/bin/"
# insert data to cluster'db
os
.
system
(
"%staosdemo -f cluster/TD-3693/insert1Data.json -y "
%
binPath
)
# multiple new and cloes connection with query data
os
.
system
(
"%staosdemo -f cluster/TD-3693/insert2Data.json -y "
%
binPath
)
os
.
system
(
"nohup %staosdemoMul -f cluster/TD-3693/queryCount.json -y & "
%
binPath
)
# delete useless files
os
.
system
(
"rm -rf ./insert_res.txt"
)
os
.
system
(
"rm -rf ./querySystemInfo*"
)
os
.
system
(
"rm -rf cluster/TD-3693/multClient.py.sql"
)
os
.
system
(
"rm -rf ./querySystemInfo*"
)
def
stop
(
self
):
tdSql
.
close
()
tdLog
.
success
(
"%s successfully executed"
%
__file__
)
tdCases
.
addWindows
(
__file__
,
TDTestCase
())
tdCases
.
addLinux
(
__file__
,
TDTestCase
())
tests/pytest/cluster/TD-3693/multQuery.py
0 → 100644
浏览文件 @
9286f77e
###################################################################
# Copyright (c) 2016 by TAOS Technologies, Inc.
# All rights reserved.
#
# This file is proprietary and confidential to TAOS Technologies.
# No part of this file may be reproduced, stored, transmitted,
# disclosed or used in any form or by any means other than as
# expressly provided by the written permission from Jianhui Tao
#
###################################################################
# -*- coding: utf-8 -*-
import
os
import
sys
sys
.
path
.
insert
(
0
,
os
.
getcwd
())
from
util.log
import
*
from
util.sql
import
*
from
util.dnodes
import
*
import
taos
import
threading
class
TwoClients
:
def
initConnection
(
self
):
self
.
host
=
"fct4"
self
.
user
=
"root"
self
.
password
=
"taosdata"
self
.
config
=
"/etc/taos/"
self
.
rowNum
=
10
self
.
ts
=
1537146000000
def
run
(
self
):
# query data from cluster'db
conn
=
taos
.
connect
(
host
=
self
.
host
,
user
=
self
.
user
,
password
=
self
.
password
,
config
=
self
.
config
)
cur
=
conn
.
cursor
()
tdSql
.
init
(
cur
,
True
)
tdSql
.
execute
(
"use db2"
)
cur
.
execute
(
"select count (tbname) from stb0"
)
tdSql
.
query
(
"select count (tbname) from stb0"
)
tdSql
.
checkData
(
0
,
0
,
10
)
tdSql
.
query
(
"select count (tbname) from stb1"
)
tdSql
.
checkData
(
0
,
0
,
20
)
tdSql
.
query
(
"select count(*) from stb00_0"
)
tdSql
.
checkData
(
0
,
0
,
10000
)
tdSql
.
query
(
"select count(*) from stb0"
)
tdSql
.
checkData
(
0
,
0
,
100000
)
tdSql
.
query
(
"select count(*) from stb01_0"
)
tdSql
.
checkData
(
0
,
0
,
20000
)
tdSql
.
query
(
"select count(*) from stb1"
)
tdSql
.
checkData
(
0
,
0
,
400000
)
tdSql
.
execute
(
"drop table if exists squerytest"
)
tdSql
.
execute
(
"drop table if exists querytest"
)
tdSql
.
execute
(
'''create stable squerytest(ts timestamp, col1 tinyint, col2 smallint, col3 int, col4 bigint, col5 float, col6 double, col7 bool, col8 binary(20), col9 nchar(20), col11 tinyint unsigned, col12 smallint unsigned, col13 int unsigned, col14 bigint unsigned) tags(loc nchar(20))'''
)
tdSql
.
execute
(
"create table querytest using squerytest tags('beijing')"
)
tdSql
.
execute
(
"insert into querytest(ts) values(%d)"
%
(
self
.
ts
-
1
))
for
i
in
range
(
self
.
rowNum
):
tdSql
.
execute
(
"insert into querytest values(%d, %d, %d, %d, %d, %f, %f, %d, 'taosdata%d', '涛思数据%d', %d, %d, %d, %d)"
%
(
self
.
ts
+
i
,
i
+
1
,
1
,
i
+
1
,
i
+
1
,
i
+
0.1
,
i
+
0.1
,
i
%
2
,
i
+
1
,
i
+
1
,
i
+
1
,
i
+
1
,
i
+
1
,
i
+
1
))
for
j
in
range
(
10
):
tdSql
.
execute
(
"use db2"
)
tdSql
.
query
(
"select count(*),last(*) from querytest group by col1"
)
tdSql
.
checkRows
(
10
)
tdSql
.
checkData
(
0
,
0
,
1
)
tdSql
.
checkData
(
1
,
2
,
2
)
tdSql
.
checkData
(
1
,
3
,
1
)
sleep
(
88
)
tdSql
.
execute
(
"drop table if exists squerytest"
)
tdSql
.
execute
(
"drop table if exists querytest"
)
clients
=
TwoClients
()
clients
.
initConnection
()
clients
.
run
()
\ No newline at end of file
tests/pytest/cluster/TD-3693/queryCount.json
0 → 100644
浏览文件 @
9286f77e
{
"filetype"
:
"query"
,
"cfgdir"
:
"/etc/taos"
,
"host"
:
"192.168.1.104"
,
"port"
:
6030
,
"user"
:
"root"
,
"password"
:
"taosdata"
,
"confirm_parameter_prompt"
:
"no"
,
"databases"
:
"db2"
,
"query_times"
:
1000000
,
"specified_table_query"
:
{
"query_interval"
:
1
,
"concurrent"
:
100
,
"sqls"
:
[{
"sql"
:
"select count(*) from db.stb0"
,
"result"
:
""
}]
}
}
\ No newline at end of file
tests/pytest/fulltest.sh
浏览文件 @
9286f77e
...
@@ -151,6 +151,9 @@ python3 test.py -f tools/taosdemoTestTblAlt.py
...
@@ -151,6 +151,9 @@ python3 test.py -f tools/taosdemoTestTblAlt.py
python3 test.py
-f
tools/taosdemoTestSampleData.py
python3 test.py
-f
tools/taosdemoTestSampleData.py
python3 test.py
-f
tools/taosdemoTestInterlace.py
python3 test.py
-f
tools/taosdemoTestInterlace.py
python3 test.py
-f
tools/taosdemoTestQuery.py
python3 test.py
-f
tools/taosdemoTestQuery.py
python3 test.py
-f
tools/taosdemoAllTest/taosdemoTestInsertWithJson.py
python3 test.py
-f
tools/taosdemoAllTest/taosdemoTestQueryWithJson.py
# update
# update
python3 ./test.py
-f
update/allow_update.py
python3 ./test.py
-f
update/allow_update.py
...
@@ -221,6 +224,7 @@ python3 ./test.py -f query/queryJoin10tables.py
...
@@ -221,6 +224,7 @@ python3 ./test.py -f query/queryJoin10tables.py
python3 ./test.py
-f
query/queryStddevWithGroupby.py
python3 ./test.py
-f
query/queryStddevWithGroupby.py
python3 ./test.py
-f
query/querySecondtscolumnTowherenow.py
python3 ./test.py
-f
query/querySecondtscolumnTowherenow.py
python3 ./test.py
-f
query/queryFilterTswithDateUnit.py
python3 ./test.py
-f
query/queryFilterTswithDateUnit.py
python3 ./test.py
-f
query/queryTscomputWithNow.py
...
@@ -235,6 +239,8 @@ python3 ./test.py -f stream/history.py
...
@@ -235,6 +239,8 @@ python3 ./test.py -f stream/history.py
python3 ./test.py
-f
stream/sys.py
python3 ./test.py
-f
stream/sys.py
python3 ./test.py
-f
stream/table_1.py
python3 ./test.py
-f
stream/table_1.py
python3 ./test.py
-f
stream/table_n.py
python3 ./test.py
-f
stream/table_n.py
python3 ./test.py
-f
stream/showStreamExecTimeisNull.py
python3 ./test.py
-f
stream/cqSupportBefore1970.py
#alter table
#alter table
python3 ./test.py
-f
alter/alter_table_crash.py
python3 ./test.py
-f
alter/alter_table_crash.py
...
@@ -280,6 +286,7 @@ python3 ./test.py -f functions/all_null_value.py
...
@@ -280,6 +286,7 @@ python3 ./test.py -f functions/all_null_value.py
python3 ./test.py
-f
functions/function_avg.py
-r
1
python3 ./test.py
-f
functions/function_avg.py
-r
1
python3 ./test.py
-f
functions/function_bottom.py
-r
1
python3 ./test.py
-f
functions/function_bottom.py
-r
1
python3 ./test.py
-f
functions/function_count.py
-r
1
python3 ./test.py
-f
functions/function_count.py
-r
1
python3 ./test.py
-f
functions/function_count_last_stab.py
python3 ./test.py
-f
functions/function_diff.py
-r
1
python3 ./test.py
-f
functions/function_diff.py
-r
1
python3 ./test.py
-f
functions/function_first.py
-r
1
python3 ./test.py
-f
functions/function_first.py
-r
1
python3 ./test.py
-f
functions/function_last.py
-r
1
python3 ./test.py
-f
functions/function_last.py
-r
1
...
...
tests/pytest/functions/function_count_last_stab.py
0 → 100644
浏览文件 @
9286f77e
###################################################################
# Copyright (c) 2016 by TAOS Technologies, Inc.
# All rights reserved.
#
# This file is proprietary and confidential to TAOS Technologies.
# No part of this file may be reproduced, stored, transmitted,
# disclosed or used in any form or by any means other than as
# expressly provided by the written permission from Jianhui Tao
#
###################################################################
# -*- coding: utf-8 -*-
import
sys
import
taos
from
util.log
import
*
from
util.cases
import
*
from
util.sql
import
*
import
numpy
as
np
class
TDTestCase
:
def
init
(
self
,
conn
,
logSql
):
tdLog
.
debug
(
"start to execute %s"
%
__file__
)
tdSql
.
init
(
conn
.
cursor
())
self
.
rowNum
=
10
self
.
ts
=
1537146000000
def
run
(
self
):
tdSql
.
prepare
()
tdSql
.
execute
(
'''create stable stest(ts timestamp, col1 tinyint, col2 smallint, col3 int, col4 bigint, col5 float, col6 double,
col7 bool, col8 binary(20), col9 nchar(20), col11 tinyint unsigned, col12 smallint unsigned, col13 int unsigned, col14 bigint unsigned) tags(loc nchar(20))'''
)
tdSql
.
execute
(
"create table test1 using stest tags('beijing')"
)
tdSql
.
execute
(
"insert into test1(ts) values(%d)"
%
(
self
.
ts
-
1
))
# last verifacation
for
i
in
range
(
self
.
rowNum
):
tdSql
.
execute
(
"insert into test1 values(%d, %d, %d, %d, %d, %f, %f, %d, 'taosdata%d', '涛思数据%d', %d, %d, %d, %d)"
%
(
self
.
ts
+
i
,
i
+
1
,
1
,
i
+
1
,
i
+
1
,
i
+
0.1
,
i
+
0.1
,
i
%
2
,
i
+
1
,
i
+
1
,
i
+
1
,
i
+
1
,
i
+
1
,
i
+
1
))
tdSql
.
query
(
"select count(*),last(*) from stest group by col1"
)
tdSql
.
checkRows
(
10
)
tdSql
.
checkData
(
0
,
0
,
1
)
tdSql
.
checkData
(
1
,
2
,
2
)
tdSql
.
checkData
(
1
,
3
,
1
)
tdSql
.
query
(
"select count(*),last(*) from stest group by col2"
)
tdSql
.
checkRows
(
1
)
tdSql
.
checkData
(
0
,
0
,
10
)
tdSql
.
checkData
(
0
,
2
,
10
)
tdSql
.
checkData
(
0
,
3
,
1
)
tdSql
.
query
(
"select count(*),last(ts,stest.*) from stest group by col1"
)
tdSql
.
checkRows
(
10
)
tdSql
.
checkData
(
0
,
0
,
1
)
tdSql
.
checkData
(
0
,
2
,
"2018-09-17 09:00:00"
)
tdSql
.
checkData
(
1
,
4
,
1
)
def
stop
(
self
):
tdSql
.
close
()
tdLog
.
success
(
"%s successfully executed"
%
__file__
)
tdCases
.
addWindows
(
__file__
,
TDTestCase
())
tdCases
.
addLinux
(
__file__
,
TDTestCase
())
tests/pytest/functions/function_operations.py
浏览文件 @
9286f77e
...
@@ -82,14 +82,14 @@ class TDTestCase:
...
@@ -82,14 +82,14 @@ class TDTestCase:
self
.
ts
=
self
.
ts
+
self
.
rowNum
+
10
self
.
ts
=
self
.
ts
+
self
.
rowNum
+
10
tdSql
.
execute
(
"insert into test1 values(%d, 1, 1, 1, 1, 1.1, 1.1, 1, NULL, '涛思数据3',
1, 1, 1, 1
)"
%
(
self
.
ts
+
self
.
rowNum
+
1
))
tdSql
.
execute
(
"insert into test1 values(%d, 1, 1, 1, 1, 1.1, 1.1, 1, NULL, '涛思数据3',
254, 65534, 4294967294, 18446744073709551614
)"
%
(
self
.
ts
+
self
.
rowNum
+
1
))
tdSql
.
execute
(
"insert into test1 values(%d, 1, 1, 1, 1, 1.1, 1.1, 1, 'taosdata', NULL, 1, 1, 1, 1)"
%
(
self
.
ts
+
self
.
rowNum
+
2
))
tdSql
.
execute
(
"insert into test1 values(%d, 1, 1, 1, 1, 1.1, 1.1, 1, 'taosdata', NULL, 1, 1, 1, 1)"
%
(
self
.
ts
+
self
.
rowNum
+
2
))
tdSql
.
execute
(
"insert into test1 values(%d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL)"
%
(
self
.
ts
+
self
.
rowNum
+
3
))
tdSql
.
execute
(
"insert into test1 values(%d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL)"
%
(
self
.
ts
+
self
.
rowNum
+
3
))
tdSql
.
execute
(
"insert into test1 values(%d, 1, 1, 1, 1, NULL, 1.1, 1, NULL, '涛思数据3', 1, 1, 1, 1)"
%
(
self
.
ts
+
self
.
rowNum
+
4
))
tdSql
.
execute
(
"insert into test1 values(%d, 1, 1, 1, 1, NULL, 1.1, 1, NULL, '涛思数据3', 1, 1, 1, 1)"
%
(
self
.
ts
+
self
.
rowNum
+
4
))
tdSql
.
execute
(
"insert into test1 values(%d, 1, 1, 1, 1, 1.1, NULL, 1, 'taosdata', NULL, 1, 1, 1, 1)"
%
(
self
.
ts
+
self
.
rowNum
+
5
))
tdSql
.
execute
(
"insert into test1 values(%d, 1, 1, 1, 1, 1.1, NULL, 1, 'taosdata', NULL, 1, 1, 1, 1)"
%
(
self
.
ts
+
self
.
rowNum
+
5
))
self
.
rowNum
=
self
.
rowNum
+
5
self
.
rowNum
=
self
.
rowNum
+
5
col_list
=
[
'col1'
,
'col2'
,
'col3'
,
'col4'
,
'col5'
,
'col6'
,
'col7'
,
'col8'
,
'col9'
,
'col11'
,
'col12'
,
'col13'
,
'col14'
,
'1'
,
'1.1'
,
'NULL'
]
col_list
=
[
'col1'
,
'col2'
,
'col3'
,
'col4'
,
'col5'
,
'col6'
,
'col7'
,
'col8'
,
'col9'
,
'col11'
,
'col12'
,
'col13'
,
'col14'
,
'1'
,
'1.1'
,
'NULL'
,
'18446744073709551614'
]
op_list
=
[
'+'
,
'-'
,
'*'
,
'/'
,
'%'
]
op_list
=
[
'+'
,
'-'
,
'*'
,
'/'
,
'%'
]
err_list
=
[
'col7'
,
'col8'
,
'col9'
,
'NULL'
]
err_list
=
[
'col7'
,
'col8'
,
'col9'
,
'NULL'
]
order_lsit
=
[
' order by ts '
,
' order by ts desc '
,
' order by ts asc '
]
order_lsit
=
[
' order by ts '
,
' order by ts desc '
,
' order by ts asc '
]
...
...
tests/pytest/query/queryFilterTswithDateUnit.py
浏览文件 @
9286f77e
...
@@ -47,53 +47,53 @@ class TDTestCase:
...
@@ -47,53 +47,53 @@ class TDTestCase:
for
col
in
cols
:
for
col
in
cols
:
tdSql
.
error
(
f
" select * from tts1 where
{
col
}
= 1d "
)
tdSql
.
error
(
f
" select * from tts1 where
{
col
}
= 1d "
)
tdSql
.
error
(
f
" select * from tts1 where
{
col
}
< 1d "
)
tdSql
.
error
(
f
" select * from tts1 where
{
col
}
<
-
1d "
)
tdSql
.
error
(
f
" select * from tts1 where
{
col
}
> 1d "
)
tdSql
.
error
(
f
" select * from tts1 where
{
col
}
> 1d "
)
tdSql
.
error
(
f
" select * from tts1 where
{
col
}
>= 1d "
)
tdSql
.
error
(
f
" select * from tts1 where
{
col
}
>=
-
1d "
)
tdSql
.
error
(
f
" select * from tts1 where
{
col
}
<= 1d "
)
tdSql
.
error
(
f
" select * from tts1 where
{
col
}
<= 1d "
)
tdSql
.
error
(
f
" select * from tts1 where
{
col
}
<> 1d "
)
tdSql
.
error
(
f
" select * from tts1 where
{
col
}
<> 1d "
)
tdSql
.
error
(
f
" select * from tts1 where
{
col
}
= 1m "
)
tdSql
.
error
(
f
" select * from tts1 where
{
col
}
=
-
1m "
)
tdSql
.
error
(
f
" select * from tts1 where
{
col
}
< 1m "
)
tdSql
.
error
(
f
" select * from tts1 where
{
col
}
< 1m "
)
tdSql
.
error
(
f
" select * from tts1 where
{
col
}
> 1m "
)
tdSql
.
error
(
f
" select * from tts1 where
{
col
}
> 1m "
)
tdSql
.
error
(
f
" select * from tts1 where
{
col
}
>= 1m "
)
tdSql
.
error
(
f
" select * from tts1 where
{
col
}
>=
-
1m "
)
tdSql
.
error
(
f
" select * from tts1 where
{
col
}
<= 1m "
)
tdSql
.
error
(
f
" select * from tts1 where
{
col
}
<= 1m "
)
tdSql
.
error
(
f
" select * from tts1 where
{
col
}
<> 1m "
)
tdSql
.
error
(
f
" select * from tts1 where
{
col
}
<> 1m "
)
tdSql
.
error
(
f
" select * from tts1 where
{
col
}
= 1s "
)
tdSql
.
error
(
f
" select * from tts1 where
{
col
}
=
-
1s "
)
tdSql
.
error
(
f
" select * from tts1 where
{
col
}
< 1s "
)
tdSql
.
error
(
f
" select * from tts1 where
{
col
}
< 1s "
)
tdSql
.
error
(
f
" select * from tts1 where
{
col
}
> 1s "
)
tdSql
.
error
(
f
" select * from tts1 where
{
col
}
> 1s "
)
tdSql
.
error
(
f
" select * from tts1 where
{
col
}
>= 1s "
)
tdSql
.
error
(
f
" select * from tts1 where
{
col
}
>=
-
1s "
)
tdSql
.
error
(
f
" select * from tts1 where
{
col
}
<= 1s "
)
tdSql
.
error
(
f
" select * from tts1 where
{
col
}
<= 1s "
)
tdSql
.
error
(
f
" select * from tts1 where
{
col
}
<> 1s "
)
tdSql
.
error
(
f
" select * from tts1 where
{
col
}
<> 1s "
)
tdSql
.
error
(
f
" select * from tts1 where
{
col
}
= 1a "
)
tdSql
.
error
(
f
" select * from tts1 where
{
col
}
=
-
1a "
)
tdSql
.
error
(
f
" select * from tts1 where
{
col
}
< 1a "
)
tdSql
.
error
(
f
" select * from tts1 where
{
col
}
< 1a "
)
tdSql
.
error
(
f
" select * from tts1 where
{
col
}
> 1a "
)
tdSql
.
error
(
f
" select * from tts1 where
{
col
}
> 1a "
)
tdSql
.
error
(
f
" select * from tts1 where
{
col
}
>= 1a "
)
tdSql
.
error
(
f
" select * from tts1 where
{
col
}
>=
-
1a "
)
tdSql
.
error
(
f
" select * from tts1 where
{
col
}
<= 1a "
)
tdSql
.
error
(
f
" select * from tts1 where
{
col
}
<= 1a "
)
tdSql
.
error
(
f
" select * from tts1 where
{
col
}
<> 1a "
)
tdSql
.
error
(
f
" select * from tts1 where
{
col
}
<> 1a "
)
tdSql
.
error
(
f
" select * from tts1 where
{
col
}
= 1h "
)
tdSql
.
error
(
f
" select * from tts1 where
{
col
}
=
-
1h "
)
tdSql
.
error
(
f
" select * from tts1 where
{
col
}
< 1h "
)
tdSql
.
error
(
f
" select * from tts1 where
{
col
}
< 1h "
)
tdSql
.
error
(
f
" select * from tts1 where
{
col
}
> 1h "
)
tdSql
.
error
(
f
" select * from tts1 where
{
col
}
> 1h "
)
tdSql
.
error
(
f
" select * from tts1 where
{
col
}
>= 1h "
)
tdSql
.
error
(
f
" select * from tts1 where
{
col
}
>=
-
1h "
)
tdSql
.
error
(
f
" select * from tts1 where
{
col
}
<= 1h "
)
tdSql
.
error
(
f
" select * from tts1 where
{
col
}
<= 1h "
)
tdSql
.
error
(
f
" select * from tts1 where
{
col
}
<> 1h "
)
tdSql
.
error
(
f
" select * from tts1 where
{
col
}
<> 1h "
)
tdSql
.
error
(
f
" select * from tts1 where
{
col
}
= 1w "
)
tdSql
.
error
(
f
" select * from tts1 where
{
col
}
=
-
1w "
)
tdSql
.
error
(
f
" select * from tts1 where
{
col
}
< 1w "
)
tdSql
.
error
(
f
" select * from tts1 where
{
col
}
< 1w "
)
tdSql
.
error
(
f
" select * from tts1 where
{
col
}
> 1w "
)
tdSql
.
error
(
f
" select * from tts1 where
{
col
}
> 1w "
)
tdSql
.
error
(
f
" select * from tts1 where
{
col
}
>= 1w "
)
tdSql
.
error
(
f
" select * from tts1 where
{
col
}
>=
-
1w "
)
tdSql
.
error
(
f
" select * from tts1 where
{
col
}
<= 1w "
)
tdSql
.
error
(
f
" select * from tts1 where
{
col
}
<= 1w "
)
tdSql
.
error
(
f
" select * from tts1 where
{
col
}
<> 1w "
)
tdSql
.
error
(
f
" select * from tts1 where
{
col
}
<> 1w "
)
tdSql
.
error
(
f
" select * from tts1 where
{
col
}
= 1u "
)
tdSql
.
error
(
f
" select * from tts1 where
{
col
}
=
-
1u "
)
tdSql
.
error
(
f
" select * from tts1 where
{
col
}
< 1u "
)
tdSql
.
error
(
f
" select * from tts1 where
{
col
}
< 1u "
)
tdSql
.
error
(
f
" select * from tts1 where
{
col
}
> 1u "
)
tdSql
.
error
(
f
" select * from tts1 where
{
col
}
> 1u "
)
tdSql
.
error
(
f
" select * from tts1 where
{
col
}
>= 1u "
)
tdSql
.
error
(
f
" select * from tts1 where
{
col
}
>=
-
1u "
)
tdSql
.
error
(
f
" select * from tts1 where
{
col
}
<= 1u "
)
tdSql
.
error
(
f
" select * from tts1 where
{
col
}
<= 1u "
)
tdSql
.
error
(
f
" select * from tts1 where
{
col
}
<>
1
u "
)
tdSql
.
error
(
f
" select * from tts1 where
{
col
}
<> u "
)
tdSql
.
error
(
f
" select * from tts1 where
{
col
}
= 0d "
)
tdSql
.
error
(
f
" select * from tts1 where
{
col
}
= 0d "
)
tdSql
.
error
(
f
" select * from tts1 where
{
col
}
< 0s "
)
tdSql
.
error
(
f
" select * from tts1 where
{
col
}
< 0s "
)
...
@@ -125,6 +125,12 @@ class TDTestCase:
...
@@ -125,6 +125,12 @@ class TDTestCase:
tdSql
.
error
(
f
" select * from tts1 where
{
col
}
<> 0/1d "
)
tdSql
.
error
(
f
" select * from tts1 where
{
col
}
<> 0/1d "
)
tdSql
.
error
(
f
" select * from tts1 where
{
col
}
<> 1w+'2010-01-01 00:00:00' "
)
tdSql
.
error
(
f
" select * from tts1 where
{
col
}
<> 1w+'2010-01-01 00:00:00' "
)
tdSql
.
error
(
f
" select * from tts1 where
{
col
}
= 1-1h "
)
tdSql
.
error
(
f
" select * from tts1 where
{
col
}
< 1w-d "
)
tdSql
.
error
(
f
" select * from tts1 where
{
col
}
> 0/u "
)
tdSql
.
error
(
f
" select * from tts1 where
{
col
}
>= d/s "
)
tdSql
.
error
(
f
" select * from tts1 where
{
col
}
<= 1/a "
)
tdSql
.
error
(
f
" select * from tts1 where
{
col
}
<> d/1 "
)
def
run
(
self
):
def
run
(
self
):
tdSql
.
execute
(
"drop database if exists dbms"
)
tdSql
.
execute
(
"drop database if exists dbms"
)
...
@@ -148,19 +154,16 @@ class TDTestCase:
...
@@ -148,19 +154,16 @@ class TDTestCase:
# create databases precision is us
# create databases precision is us
tdSql
.
execute
(
"create database if not exists dbus keep 36500 precision 'us' "
)
tdSql
.
execute
(
"create database if not exists dbus keep 36500 precision 'us' "
)
tdSql
.
execute
(
"use dbus"
)
tdSql
.
execute
(
"use dbus"
)
tsp2
=
-
28800000
*
1000
tsp2
=
tsp2
*
1000
tsp3
=
-
946800000000
*
1000
tsp3
=
tsp3
*
1000
self
.
insertnow
(
tsp1
,
tsp2
,
tsp3
)
self
.
insertnow
(
tsp1
,
tsp2
,
tsp3
)
self
.
querynow
()
self
.
querynow
()
def
stop
(
self
):
def
stop
(
self
):
tdSql
.
close
()
tdSql
.
close
()
tdLog
.
success
(
f
"
{
__file__
}
successfully executed"
)
tdLog
.
success
(
f
"
{
__file__
}
successfully executed"
)
tdCases
.
addWindows
(
__file__
,
TDTestCase
())
tdCases
.
addWindows
(
__file__
,
TDTestCase
())
tdCases
.
addLinux
(
__file__
,
TDTestCase
())
tdCases
.
addLinux
(
__file__
,
TDTestCase
())
\ No newline at end of file
tests/pytest/query/queryTscomputWithNow.py
0 → 100644
浏览文件 @
9286f77e
###################################################################
# Copyright (c) 2016 by TAOS Technologies, Inc.
# All rights reserved.
#
# This file is proprietary and confidential to TAOS Technologies.
# No part of this file may be reproduced, stored, transmitted,
# disclosed or used in any form or by any means other than as
# expressly provided by the written permission from Jianhui Tao
#
###################################################################
# -*- coding: utf-8 -*-
import
sys
from
util.log
import
*
from
util.cases
import
*
from
util.sql
import
*
from
util.dnodes
import
*
class
TDTestCase
:
def
init
(
self
,
conn
,
logSql
):
tdLog
.
debug
(
f
"start to execute
{
__file__
}
"
)
tdSql
.
init
(
conn
.
cursor
(),
logSql
)
def
inertnow
(
self
):
tsp1
=
0
tsp2
=
-
28800000
tsp3
=
-
946800000000
tdSql
.
execute
(
"create table stbts (ts timestamp, ts1 timestamp, c1 int, ts2 timestamp) TAGS(t1 int)"
)
tdSql
.
execute
(
"create table tts1 using stbts tags(1)"
)
tdSql
.
execute
(
"insert into tts1 values (now+1d, now+1d, 6, now+1d)"
)
tdSql
.
execute
(
"insert into tts1 values (now, now, 5, now)"
)
tdSql
.
execute
(
"insert into tts1 values (now-1d, now-1d, 4, now-1d)"
)
tdSql
.
execute
(
f
"insert into tts1 values (
{
tsp1
}
,
{
tsp1
}
, 3,
{
tsp1
}
)"
)
tdSql
.
execute
(
f
"insert into tts1 values (
{
tsp2
}
,
{
tsp2
}
, 2,
{
tsp2
}
)"
)
tdSql
.
execute
(
f
"insert into tts1 values (
{
tsp3
}
,
{
tsp3
}
, 1,
{
tsp3
}
)"
)
def
querynow
(
self
):
interval_day1
=
(
datetime
.
date
.
today
()
-
datetime
.
date
(
1970
,
1
,
1
)).
days
interval_day2
=
(
datetime
.
date
.
today
()
-
datetime
.
date
(
1940
,
1
,
1
)).
days
tdLog
.
printNoPrefix
(
"==========step query: execute query operation"
)
time
.
sleep
(
1
)
tdSql
.
execute
(
" select * from tts1 where ts > now+1d "
)
ts_len1
=
len
(
tdSql
.
cursor
.
fetchall
())
tdSql
.
execute
(
" select * from tts1 where ts < now+1d "
)
ts_len2
=
len
(
tdSql
.
cursor
.
fetchall
())
tdSql
.
execute
(
" select * from tts1 where ts > now-1d "
)
ts_len3
=
len
(
tdSql
.
cursor
.
fetchall
())
tdSql
.
execute
(
" select * from tts1 where ts < now-1d "
)
ts_len4
=
len
(
tdSql
.
cursor
.
fetchall
())
tdSql
.
execute
(
f
" select * from tts1 where ts > now-
{
interval_day1
+
1
}
d "
)
ts_len5
=
len
(
tdSql
.
cursor
.
fetchall
())
tdSql
.
execute
(
f
" select * from tts1 where ts < now-
{
interval_day1
+
1
}
d "
)
ts_len6
=
len
(
tdSql
.
cursor
.
fetchall
())
tdSql
.
execute
(
f
" select * from tts1 where ts > now-
{
interval_day1
-
1
}
d "
)
ts_len7
=
len
(
tdSql
.
cursor
.
fetchall
())
tdSql
.
execute
(
f
" select * from tts1 where ts < now-
{
interval_day1
-
1
}
d "
)
ts_len8
=
len
(
tdSql
.
cursor
.
fetchall
())
tdSql
.
execute
(
f
" select * from tts1 where ts > now-
{
interval_day2
+
1
}
d "
)
ts_len9
=
len
(
tdSql
.
cursor
.
fetchall
())
tdSql
.
execute
(
f
" select * from tts1 where ts < now-
{
interval_day2
+
1
}
d "
)
ts_len10
=
len
(
tdSql
.
cursor
.
fetchall
())
tdSql
.
execute
(
f
" select * from tts1 where ts > now-
{
interval_day2
-
1
}
d "
)
ts_len11
=
len
(
tdSql
.
cursor
.
fetchall
())
tdSql
.
execute
(
f
" select * from tts1 where ts < now-
{
interval_day2
-
1
}
d "
)
ts_len12
=
len
(
tdSql
.
cursor
.
fetchall
())
tdSql
.
query
(
" select * from tts1 where ts1 > now+1d "
)
tdSql
.
checkRows
(
ts_len1
)
tdSql
.
query
(
" select * from tts1 where ts2 > now+1440m "
)
tdSql
.
checkRows
(
ts_len1
)
tdSql
.
query
(
" select * from tts1 where ts1 < now+1d "
)
tdSql
.
checkRows
(
ts_len2
)
tdSql
.
query
(
" select * from tts1 where ts2 < now+1440m "
)
tdSql
.
checkRows
(
ts_len2
)
tdSql
.
query
(
" select * from tts1 where ts1 > now-1d "
)
tdSql
.
checkRows
(
ts_len3
)
tdSql
.
query
(
" select * from tts1 where ts2 > now-1440m "
)
tdSql
.
checkRows
(
ts_len3
)
tdSql
.
query
(
" select * from tts1 where ts1 < now-1d "
)
tdSql
.
checkRows
(
ts_len4
)
tdSql
.
query
(
" select * from tts1 where ts2 < now-1440m "
)
tdSql
.
checkRows
(
ts_len4
)
tdSql
.
query
(
f
" select * from tts1 where ts1 > now-
{
interval_day1
+
1
}
d "
)
tdSql
.
checkRows
(
ts_len5
)
tdSql
.
query
(
f
" select * from tts1 where ts2 > now-
{
(
interval_day1
+
1
)
*
1440
}
m "
)
tdSql
.
checkRows
(
ts_len5
)
tdSql
.
query
(
f
" select * from tts1 where ts1 < now-
{
interval_day1
+
1
}
d "
)
tdSql
.
checkRows
(
ts_len6
)
tdSql
.
query
(
f
" select * from tts1 where ts2 < now-
{
(
interval_day1
+
1
)
*
1440
}
m "
)
tdSql
.
checkRows
(
ts_len6
)
tdSql
.
query
(
f
" select * from tts1 where ts1 > now-
{
interval_day1
-
1
}
d "
)
tdSql
.
checkRows
(
ts_len7
)
tdSql
.
query
(
f
" select * from tts1 where ts2 > now-
{
(
interval_day1
-
1
)
*
1440
}
m "
)
tdSql
.
checkRows
(
ts_len7
)
tdSql
.
query
(
f
" select * from tts1 where ts1 < now-
{
interval_day1
-
1
}
d "
)
tdSql
.
checkRows
(
ts_len8
)
tdSql
.
query
(
f
" select * from tts1 where ts2 < now-
{
(
interval_day1
-
1
)
*
1440
}
m "
)
tdSql
.
checkRows
(
ts_len8
)
tdSql
.
query
(
f
" select * from tts1 where ts1 > now-
{
interval_day2
+
1
}
d "
)
tdSql
.
checkRows
(
ts_len9
)
tdSql
.
query
(
f
" select * from tts1 where ts2 > now-
{
(
interval_day2
+
1
)
*
1440
}
m "
)
tdSql
.
checkRows
(
ts_len9
)
tdSql
.
query
(
f
" select * from tts1 where ts1 < now-
{
interval_day2
+
1
}
d "
)
tdSql
.
checkRows
(
ts_len10
)
tdSql
.
query
(
f
" select * from tts1 where ts2 < now-
{
(
interval_day2
+
1
)
*
1440
}
m "
)
tdSql
.
checkRows
(
ts_len10
)
tdSql
.
query
(
f
" select * from tts1 where ts1 > now-
{
interval_day2
-
1
}
d "
)
tdSql
.
checkRows
(
ts_len11
)
tdSql
.
query
(
f
" select * from tts1 where ts2 > now-
{
(
interval_day2
-
1
)
*
1440
}
m "
)
tdSql
.
checkRows
(
ts_len11
)
tdSql
.
query
(
f
" select * from tts1 where ts1 < now-
{
interval_day2
-
1
}
d "
)
tdSql
.
checkRows
(
ts_len12
)
tdSql
.
query
(
f
" select * from tts1 where ts2 < now-
{
(
interval_day2
-
1
)
*
1440
}
m "
)
tdSql
.
checkRows
(
ts_len12
)
def
run
(
self
):
tdSql
.
execute
(
"drop database if exists dbms"
)
tdSql
.
execute
(
"drop database if exists dbus"
)
# timestamp list:
# 0 -> "1970-01-01 08:00:00" | -28800000 -> "1970-01-01 00:00:00" | -946800000000 -> "1940-01-01 00:00:00"
# -631180800000 -> "1950-01-01 00:00:00"
tdLog
.
printNoPrefix
(
"==========step1:create table precision ms && insert data && query"
)
# create databases precision is ms
tdSql
.
execute
(
"create database if not exists dbms keep 36500"
)
tdSql
.
execute
(
"use dbms"
)
self
.
inertnow
()
self
.
querynow
()
tdLog
.
printNoPrefix
(
"==========step2:create table precision us && insert data && query"
)
# create databases precision is us
tdSql
.
execute
(
"create database if not exists dbus keep 36500 precision 'us' "
)
tdSql
.
execute
(
"use dbus"
)
self
.
inertnow
()
self
.
querynow
()
tdSql
.
query
(
"show dnodes"
)
index
=
tdSql
.
getData
(
0
,
0
)
tdDnodes
.
stop
(
index
)
tdDnodes
.
start
(
index
)
tdLog
.
printNoPrefix
(
"==========step3:after wal, query table precision ms"
)
tdSql
.
execute
(
"use dbus"
)
self
.
querynow
()
tdLog
.
printNoPrefix
(
"==========step4: query table precision us"
)
tdSql
.
execute
(
"use dbus"
)
self
.
querynow
()
def
stop
(
self
):
tdSql
.
close
()
tdLog
.
success
(
f
"
{
__file__
}
successfully executed"
)
tdCases
.
addWindows
(
__file__
,
TDTestCase
())
tdCases
.
addLinux
(
__file__
,
TDTestCase
())
\ No newline at end of file
tests/pytest/stream/cqSupportBefore1970.py
0 → 100644
浏览文件 @
9286f77e
# No part of this file may be reproduced, stored, transmitted,
# disclosed or used in any form or by any means other than as
# expressly provided by the written permission from Jianhui Tao
#
###################################################################
# -*- coding: utf-8 -*-
import
sys
from
util.log
import
*
from
util.cases
import
*
from
util.sql
import
*
from
util.dnodes
import
*
class
TDTestCase
:
def
init
(
self
,
conn
,
logSql
):
tdLog
.
debug
(
f
"start to execute
{
__file__
}
"
)
tdSql
.
init
(
conn
.
cursor
(),
logSql
)
def
insertnow
(
self
):
# timestamp list:
# 0 -> "1970-01-01 08:00:00" | -28800000 -> "1970-01-01 00:00:00" | -946800000000 -> "1940-01-01 00:00:00"
# -631180800000 -> "1950-01-01 00:00:00"
tsp1
=
0
tsp2
=
-
28800000
tsp3
=
-
946800000000
tsp4
=
"1969-01-01 00:00:00.000"
tdSql
.
execute
(
"insert into tcq1 values (now-11d, 5)"
)
tdSql
.
execute
(
f
"insert into tcq1 values (
{
tsp1
}
, 4)"
)
tdSql
.
execute
(
f
"insert into tcq1 values (
{
tsp2
}
, 3)"
)
tdSql
.
execute
(
f
"insert into tcq1 values ('
{
tsp4
}
', 2)"
)
tdSql
.
execute
(
f
"insert into tcq1 values (
{
tsp3
}
, 1)"
)
def
waitedQuery
(
self
,
sql
,
expectRows
,
timeout
):
tdLog
.
info
(
f
"sql:
{
sql
}
, try to retrieve
{
expectRows
}
rows in
{
timeout
}
seconds"
)
try
:
for
i
in
range
(
timeout
):
tdSql
.
cursor
.
execute
(
sql
)
self
.
queryResult
=
tdSql
.
cursor
.
fetchall
()
self
.
queryRows
=
len
(
self
.
queryResult
)
self
.
queryCols
=
len
(
tdSql
.
cursor
.
description
)
# tdLog.info("sql: %s, try to retrieve %d rows,get %d rows" % (sql, expectRows, self.queryRows))
if
self
.
queryRows
>=
expectRows
:
return
(
self
.
queryRows
,
i
)
time
.
sleep
(
1
)
except
Exception
as
e
:
caller
=
inspect
.
getframeinfo
(
inspect
.
stack
()[
1
][
0
])
tdLog
.
notice
(
f
"
{
caller
.
filename
}
(
{
caller
.
lineno
}
) failed: sql:
{
sql
}
,
{
repr
(
e
)
}
"
)
raise
Exception
(
repr
(
e
))
return
(
self
.
queryRows
,
timeout
)
def
cq
(
self
):
tdSql
.
execute
(
"create table cq1 as select avg(c1) from tcq1 where ts > -946800000000 interval(10d) sliding(1d)"
)
self
.
waitedQuery
(
"select * from cq1"
,
1
,
120
)
def
querycq
(
self
):
tdSql
.
query
(
"select * from cq1"
)
tdSql
.
checkData
(
0
,
1
,
1.0
)
tdSql
.
checkData
(
10
,
1
,
2.0
)
def
run
(
self
):
tdSql
.
execute
(
"drop database if exists dbcq"
)
tdSql
.
execute
(
"create database if not exists dbcq keep 36500"
)
tdSql
.
execute
(
"use dbcq"
)
tdSql
.
execute
(
"create table stbcq (ts timestamp, c1 int ) TAGS(t1 int)"
)
tdSql
.
execute
(
"create table tcq1 using stbcq tags(1)"
)
self
.
insertnow
()
self
.
cq
()
self
.
querycq
()
# after wal and sync, check again
tdSql
.
query
(
"show dnodes"
)
index
=
tdSql
.
getData
(
0
,
0
)
tdDnodes
.
stop
(
index
)
tdDnodes
.
start
(
index
)
self
.
querycq
()
def
stop
(
self
):
tdSql
.
close
()
tdLog
.
success
(
f
"
{
__file__
}
successfully executed"
)
tdCases
.
addWindows
(
__file__
,
TDTestCase
())
tdCases
.
addLinux
(
__file__
,
TDTestCase
())
\ No newline at end of file
tests/pytest/stream/showStreamExecTimeisNull.py
0 → 100644
浏览文件 @
9286f77e
# No part of this file may be reproduced, stored, transmitted,
# disclosed or used in any form or by any means other than as
# expressly provided by the written permission from Jianhui Tao
#
###################################################################
# -*- coding: utf-8 -*-
import
sys
from
util.log
import
*
from
util.cases
import
*
from
util.sql
import
*
from
util.dnodes
import
*
class
TDTestCase
:
def
init
(
self
,
conn
,
logSql
):
tdLog
.
debug
(
f
"start to execute
{
__file__
}
"
)
tdSql
.
init
(
conn
.
cursor
(),
logSql
)
def
insertnow
(
self
):
# timestamp list:
# 0 -> "1970-01-01 08:00:00" | -28800000 -> "1970-01-01 00:00:00" | -946800000000 -> "1940-01-01 00:00:00"
# -631180800000 -> "1950-01-01 00:00:00"
tsp1
=
0
tsp2
=
-
28800000
tsp3
=
-
946800000000
tsp4
=
"1969-01-01 00:00:00.000"
tdSql
.
execute
(
"insert into tcq1 values (now-11d, 5)"
)
tdSql
.
execute
(
f
"insert into tcq1 values (
{
tsp1
}
, 4)"
)
tdSql
.
execute
(
f
"insert into tcq1 values (
{
tsp2
}
, 3)"
)
tdSql
.
execute
(
f
"insert into tcq1 values ('
{
tsp4
}
', 2)"
)
tdSql
.
execute
(
f
"insert into tcq1 values (
{
tsp3
}
, 1)"
)
def
waitedQuery
(
self
,
sql
,
expectRows
,
timeout
):
tdLog
.
info
(
f
"sql:
{
sql
}
, try to retrieve
{
expectRows
}
rows in
{
timeout
}
seconds"
)
try
:
for
i
in
range
(
timeout
):
tdSql
.
cursor
.
execute
(
sql
)
self
.
queryResult
=
tdSql
.
cursor
.
fetchall
()
self
.
queryRows
=
len
(
self
.
queryResult
)
self
.
queryCols
=
len
(
tdSql
.
cursor
.
description
)
# tdLog.info("sql: %s, try to retrieve %d rows,get %d rows" % (sql, expectRows, self.queryRows))
if
self
.
queryRows
>=
expectRows
:
return
(
self
.
queryRows
,
i
)
time
.
sleep
(
1
)
except
Exception
as
e
:
caller
=
inspect
.
getframeinfo
(
inspect
.
stack
()[
1
][
0
])
tdLog
.
notice
(
f
"
{
caller
.
filename
}
(
{
caller
.
lineno
}
) failed: sql:
{
sql
}
,
{
repr
(
e
)
}
"
)
raise
Exception
(
repr
(
e
))
return
(
self
.
queryRows
,
timeout
)
def
showstream
(
self
):
tdSql
.
execute
(
"create table cq1 as select avg(c1) from tcq1 interval(10d) sliding(1d)"
)
sql
=
"show streams"
timeout
=
30
exception
=
"ValueError('year -292275055 is out of range')"
try
:
for
i
in
range
(
timeout
):
tdSql
.
cursor
.
execute
(
sql
)
self
.
queryResult
=
tdSql
.
cursor
.
fetchall
()
self
.
queryRows
=
len
(
self
.
queryResult
)
self
.
queryCols
=
len
(
tdSql
.
cursor
.
description
)
# tdLog.info("sql: %s, try to retrieve %d rows,get %d rows" % (sql, expectRows, self.queryRows))
if
self
.
queryRows
>=
timeout
:
return
(
self
.
queryRows
,
i
)
time
.
sleep
(
1
)
except
Exception
as
e
:
tdLog
.
info
(
f
"sql:
{
sql
}
except raise
{
exception
}
, actually raise
{
repr
(
e
)
}
"
)
else
:
tdLog
.
exit
(
f
"sql:
{
sql
}
except raise
{
exception
}
, actually not"
)
def
run
(
self
):
tdSql
.
execute
(
"drop database if exists dbcq"
)
tdSql
.
execute
(
"create database if not exists dbcq keep 36500"
)
tdSql
.
execute
(
"use dbcq"
)
tdSql
.
execute
(
"create table stbcq (ts timestamp, c1 int ) TAGS(t1 int)"
)
tdSql
.
execute
(
"create table tcq1 using stbcq tags(1)"
)
self
.
insertnow
()
self
.
showstream
()
def
stop
(
self
):
tdSql
.
close
()
tdLog
.
success
(
f
"
{
__file__
}
successfully executed"
)
tdCases
.
addWindows
(
__file__
,
TDTestCase
())
tdCases
.
addLinux
(
__file__
,
TDTestCase
())
\ No newline at end of file
tests/pytest/tools/taosdemoAllTest/TD-3453/query-interrupt.json
0 → 100644
浏览文件 @
9286f77e
{
"filetype"
:
"insert"
,
"cfgdir"
:
"/etc/taos"
,
"host"
:
"127.0.0.1"
,
"port"
:
6030
,
"user"
:
"root"
,
"password"
:
"taosdata"
,
"thread_count"
:
4
,
"thread_count_create_tbl"
:
4
,
"result_file"
:
"./insert_res.txt"
,
"confirm_parameter_prompt"
:
"no"
,
"insert_interval"
:
0
,
"interlace_rows"
:
100
,
"num_of_records_per_req"
:
1000
,
"max_sql_len"
:
1024000
,
"databases"
:
[{
"dbinfo"
:
{
"name"
:
"db"
,
"drop"
:
"yes"
,
"replica"
:
1
,
"days"
:
10
,
"cache"
:
50
,
"blocks"
:
8
,
"precision"
:
"ms"
,
"keep"
:
365
,
"minRows"
:
100
,
"maxRows"
:
4096
,
"comp"
:
2
,
"walLevel"
:
1
,
"cachelast"
:
0
,
"quorum"
:
1
,
"fsync"
:
3000
,
"update"
:
0
},
"super_tables"
:
[{
"name"
:
"stb0"
,
"child_table_exists"
:
"no"
,
"childtable_count"
:
60
,
"childtable_prefix"
:
"stb00_"
,
"auto_create_table"
:
"no"
,
"batch_create_tbl_num"
:
20
,
"data_source"
:
"rand"
,
"insert_mode"
:
"taosc"
,
"insert_rows"
:
100000
,
"childtable_limit"
:
-1
,
"childtable_offset"
:
0
,
"multi_thread_write_one_tbl"
:
"no"
,
"interlace_rows"
:
1000
,
"insert_interval"
:
0
,
"max_sql_len"
:
1024000
,
"disorder_ratio"
:
0
,
"disorder_range"
:
1000
,
"timestamp_step"
:
1
,
"start_timestamp"
:
"2020-10-01 00:00:00.000"
,
"sample_format"
:
"csv"
,
"sample_file"
:
"./sample.csv"
,
"tags_file"
:
""
,
"columns"
:
[{
"type"
:
"INT"
},
{
"type"
:
"DOUBLE"
,
"count"
:
10
},
{
"type"
:
"BINARY"
,
"len"
:
16
,
"count"
:
3
},
{
"type"
:
"BINARY"
,
"len"
:
32
,
"count"
:
6
}],
"tags"
:
[{
"type"
:
"TINYINT"
,
"count"
:
2
},
{
"type"
:
"BINARY"
,
"len"
:
16
,
"count"
:
5
}]
}]
}]
}
tests/pytest/tools/taosdemoAllTest/TD-3453/query-interrupt.py
0 → 100644
浏览文件 @
9286f77e
###################################################################
# Copyright (c) 2016 by TAOS Technologies, Inc.
# All rights reserved.
#
# This file is proprietary and confidential to TAOS Technologies.
# No part of this file may be reproduced, stored, transmitted,
# disclosed or used in any form or by any means other than as
# expressly provided by the written permission from Jianhui Tao
#
###################################################################
# -*- coding: utf-8 -*-
import
sys
import
os
import
subprocess
import
time
from
util.log
import
*
from
util.cases
import
*
from
util.sql
import
*
from
util.dnodes
import
*
class
TDTestCase
:
def
init
(
self
,
conn
,
logSql
):
tdLog
.
debug
(
"start to execute %s"
%
__file__
)
tdSql
.
init
(
conn
.
cursor
(),
logSql
)
def
getBuildPath
(
self
):
selfPath
=
os
.
path
.
dirname
(
os
.
path
.
realpath
(
__file__
))
if
(
"community"
in
selfPath
):
projPath
=
selfPath
[:
selfPath
.
find
(
"community"
)]
else
:
projPath
=
selfPath
[:
selfPath
.
find
(
"tests"
)]
for
root
,
dirs
,
files
in
os
.
walk
(
projPath
):
if
(
"taosd"
in
files
):
rootRealPath
=
os
.
path
.
dirname
(
os
.
path
.
realpath
(
root
))
if
(
"packaging"
not
in
rootRealPath
):
buildPath
=
root
[:
len
(
root
)
-
len
(
"/build/bin"
)]
break
return
buildPath
def
run
(
self
):
tdSql
.
prepare
()
buildPath
=
self
.
getBuildPath
()
if
(
buildPath
==
""
):
tdLog
.
exit
(
"taosd not found!"
)
else
:
tdLog
.
info
(
"taosd found in %s"
%
buildPath
)
binPath
=
buildPath
+
"/build/bin/"
# # insert 1000w rows in stb0
os
.
system
(
"%staosdemo -f tools/taosdemoAllTest/TD-3453/query-interrupt.json -y "
%
binPath
)
tdSql
.
execute
(
"use db"
)
tdSql
.
query
(
"select count (tbname) from stb0"
)
tdSql
.
checkData
(
0
,
0
,
60
)
tdSql
.
query
(
"select count(*) from stb0"
)
tdSql
.
checkData
(
0
,
0
,
6000000
)
os
.
system
(
'%staosdemo -f tools/taosdemoAllTest/TD-3453/queryall.json -y & '
%
binPath
)
time
.
sleep
(
2
)
query_pid
=
int
(
subprocess
.
getstatusoutput
(
'ps aux|grep "TD-3453/queryall.json" |grep -v "grep"|awk
\'
{print $2}
\'
'
)[
1
])
taosd_cpu_load_1
=
float
(
subprocess
.
getstatusoutput
(
'top -n 1 -b -p $(ps aux|grep "bin/taosd -c"|grep -v "grep" |awk
\'
{print $2}
\'
)|awk
\'
END{print}
\'
|awk
\'
{print $9}
\'
'
)[
1
])
if
taosd_cpu_load_1
>
10.0
:
os
.
system
(
"kill -9 %d"
%
query_pid
)
time
.
sleep
(
5
)
taosd_cpu_load_2
=
float
(
subprocess
.
getstatusoutput
(
'top -n 1 -b -p $(ps aux|grep "bin/taosd -c"|grep -v "grep" |awk
\'
{print $2}
\'
)|awk
\'
END{print}
\'
|awk
\'
{print $9}
\'
'
)[
1
])
if
taosd_cpu_load_2
<
10.0
:
suc_kill
=
60
else
:
suc_kill
=
10
print
(
"taosd_cpu_load is higher than 10%"
)
else
:
suc_kill
=
20
print
(
"taosd_cpu_load is still less than 10%"
)
tdSql
.
query
(
"select count (tbname) from stb0"
)
tdSql
.
checkData
(
0
,
0
,
"%d"
%
suc_kill
)
os
.
system
(
"rm -rf querySystemInfo*"
)
os
.
system
(
"rm -rf insert_res.txt"
)
os
.
system
(
"rm -rf insert_res.txt"
)
def
stop
(
self
):
tdSql
.
close
()
tdLog
.
success
(
"%s successfully executed"
%
__file__
)
tdCases
.
addWindows
(
__file__
,
TDTestCase
())
tdCases
.
addLinux
(
__file__
,
TDTestCase
())
tests/pytest/tools/taosdemoAllTest/TD-3453/queryall.json
0 → 100644
浏览文件 @
9286f77e
{
"filetype"
:
"query"
,
"cfgdir"
:
"/etc/taos"
,
"host"
:
"127.0.0.1"
,
"port"
:
6030
,
"user"
:
"root"
,
"password"
:
"taosdata"
,
"confirm_parameter_prompt"
:
"no"
,
"databases"
:
"db"
,
"specified_table_query"
:{
"query_interval"
:
1
,
"concurrent"
:
1
,
"sqls"
:[
{
"sql"
:
"select * from stb0"
,
"result"
:
""
}
]
}
}
\ No newline at end of file
tests/pytest/tools/taosdemoAllTest/convertResFile.py
0 → 100644
浏览文件 @
9286f77e
from
datetime
import
datetime
import
time
import
os
os
.
system
(
"awk -v OFS=',' '{$1=$1;print$0}' ./all_query_res0.txt > ./new_query_res0.txt"
)
with
open
(
'./new_query_res0.txt'
,
'r+'
)
as
f0
:
contents
=
f0
.
readlines
()
if
os
.
path
.
exists
(
'./test_query_res0.txt'
):
os
.
system
(
"rm -rf ./test_query_res0.txt"
)
for
i
in
range
(
len
(
contents
)):
content
=
contents
[
i
].
rstrip
(
'
\n
'
)
stimestamp
=
content
.
split
(
','
)[
0
]
timestamp
=
int
(
stimestamp
)
d
=
datetime
.
fromtimestamp
(
timestamp
/
1000
)
str0
=
d
.
strftime
(
"%Y-%m-%d %H:%M:%S.%f"
)[:
-
3
]
ts
=
"'"
+
str0
+
"'"
str1
=
"'"
+
content
.
split
(
','
)[
1
]
+
"'"
str2
=
"'"
+
content
.
split
(
','
)[
2
]
+
"'"
content
=
ts
+
","
+
str1
+
","
+
str2
+
","
+
content
.
split
(
','
,
3
)[
3
]
contents
[
i
]
=
content
+
"
\n
"
with
open
(
'./test_query_res0.txt'
,
'a'
)
as
fi
:
fi
.
write
(
contents
[
i
])
os
.
system
(
"rm -rf ./new_query_res0.txt"
)
# timestamp = 1604160000099
# d = datetime.fromtimestamp(timestamp/1000)
# str1 = d.strftime("%Y-%m-%d %H:%M:%S.%f")
# print(str1[:-3])
tests/pytest/tools/taosdemoAllTest/insert-1s1tnt1r.json
0 → 100644
浏览文件 @
9286f77e
{
"filetype"
:
"insert"
,
"cfgdir"
:
"/etc/taos"
,
"host"
:
"127.0.0.1"
,
"port"
:
6030
,
"user"
:
"root"
,
"password"
:
"taosdata"
,
"thread_count"
:
4
,
"thread_count_create_tbl"
:
4
,
"result_file"
:
"./insert_res.txt"
,
"confirm_parameter_prompt"
:
"no"
,
"insert_interval"
:
0
,
"interlace_rows"
:
10
,
"num_of_records_per_req"
:
1
,
"max_sql_len"
:
1024000
,
"databases"
:
[{
"dbinfo"
:
{
"name"
:
"db"
,
"drop"
:
"yes"
,
"replica"
:
1
,
"days"
:
10
,
"cache"
:
50
,
"blocks"
:
8
,
"precision"
:
"ms"
,
"keep"
:
365
,
"minRows"
:
100
,
"maxRows"
:
4096
,
"comp"
:
2
,
"walLevel"
:
1
,
"cachelast"
:
0
,
"quorum"
:
1
,
"fsync"
:
3000
,
"update"
:
0
},
"super_tables"
:
[{
"name"
:
"stb0"
,
"child_table_exists"
:
"no"
,
"childtable_count"
:
1000
,
"childtable_prefix"
:
"stb00_"
,
"auto_create_table"
:
"no"
,
"batch_create_tbl_num"
:
1
,
"data_source"
:
"rand"
,
"insert_mode"
:
"taosc"
,
"insert_rows"
:
100
,
"childtable_limit"
:
0
,
"childtable_offset"
:
0
,
"multi_thread_write_one_tbl"
:
"no"
,
"interlace_rows"
:
0
,
"insert_interval"
:
0
,
"max_sql_len"
:
1024000
,
"disorder_ratio"
:
0
,
"disorder_range"
:
1000
,
"timestamp_step"
:
1
,
"start_timestamp"
:
"2020-10-01 00:00:00.000"
,
"sample_format"
:
"csv"
,
"sample_file"
:
"./sample.csv"
,
"tags_file"
:
""
,
"columns"
:
[{
"type"
:
"INT"
},
{
"type"
:
"DOUBLE"
,
"count"
:
10
},
{
"type"
:
"BINARY"
,
"len"
:
16
,
"count"
:
3
},
{
"type"
:
"BINARY"
,
"len"
:
32
,
"count"
:
6
}],
"tags"
:
[{
"type"
:
"TINYINT"
,
"count"
:
2
},
{
"type"
:
"BINARY"
,
"len"
:
16
,
"count"
:
5
}]
},
{
"name"
:
"stb1"
,
"child_table_exists"
:
"no"
,
"childtable_count"
:
1000
,
"childtable_prefix"
:
"stb01_"
,
"auto_create_table"
:
"no"
,
"batch_create_tbl_num"
:
10
,
"data_source"
:
"rand"
,
"insert_mode"
:
"taosc"
,
"insert_rows"
:
200
,
"childtable_limit"
:
0
,
"childtable_offset"
:
0
,
"multi_thread_write_one_tbl"
:
"no"
,
"interlace_rows"
:
0
,
"insert_interval"
:
0
,
"max_sql_len"
:
1024000
,
"disorder_ratio"
:
0
,
"disorder_range"
:
1000
,
"timestamp_step"
:
1
,
"start_timestamp"
:
"2020-10-01 00:00:00.000"
,
"sample_format"
:
"csv"
,
"sample_file"
:
"./sample.csv"
,
"tags_file"
:
""
,
"columns"
:
[{
"type"
:
"INT"
},
{
"type"
:
"DOUBLE"
,
"count"
:
4
},
{
"type"
:
"BINARY"
,
"len"
:
16
,
"count"
:
3
},
{
"type"
:
"BINARY"
,
"len"
:
32
,
"count"
:
6
}],
"tags"
:
[{
"type"
:
"TINYINT"
,
"count"
:
2
},
{
"type"
:
"BINARY"
,
"len"
:
16
,
"count"
:
5
}]
}]
}]
}
tests/pytest/tools/taosdemoAllTest/insert-1s1tntmr.json
0 → 100644
浏览文件 @
9286f77e
{
"filetype"
:
"insert"
,
"cfgdir"
:
"/etc/taos"
,
"host"
:
"127.0.0.1"
,
"port"
:
6030
,
"user"
:
"root"
,
"password"
:
"taosdata"
,
"thread_count"
:
4
,
"thread_count_create_tbl"
:
4
,
"result_file"
:
"./insert_res.txt"
,
"confirm_parameter_prompt"
:
"no"
,
"insert_interval"
:
0
,
"interlace_rows"
:
10
,
"num_of_records_per_req"
:
1000
,
"max_sql_len"
:
1024000
,
"databases"
:
[{
"dbinfo"
:
{
"name"
:
"db"
,
"drop"
:
"yes"
,
"replica"
:
1
,
"days"
:
10
,
"cache"
:
50
,
"blocks"
:
8
,
"precision"
:
"ms"
,
"keep"
:
365
,
"minRows"
:
100
,
"maxRows"
:
4096
,
"comp"
:
2
,
"walLevel"
:
1
,
"cachelast"
:
0
,
"quorum"
:
1
,
"fsync"
:
3000
,
"update"
:
0
},
"super_tables"
:
[{
"name"
:
"stb0"
,
"child_table_exists"
:
"no"
,
"childtable_count"
:
10
,
"childtable_prefix"
:
"stb00_"
,
"auto_create_table"
:
"no"
,
"batch_create_tbl_num"
:
10
,
"data_source"
:
"rand"
,
"insert_mode"
:
"taosc"
,
"insert_rows"
:
10000
,
"childtable_limit"
:
0
,
"childtable_offset"
:
0
,
"multi_thread_write_one_tbl"
:
"no"
,
"interlace_rows"
:
0
,
"insert_interval"
:
0
,
"max_sql_len"
:
1024000
,
"disorder_ratio"
:
0
,
"disorder_range"
:
1000
,
"timestamp_step"
:
1
,
"start_timestamp"
:
"2020-10-01 00:00:00.000"
,
"sample_format"
:
"csv"
,
"sample_file"
:
"./sample.csv"
,
"tags_file"
:
""
,
"columns"
:
[{
"type"
:
"INT"
},
{
"type"
:
"DOUBLE"
,
"count"
:
10
},
{
"type"
:
"BINARY"
,
"len"
:
16
,
"count"
:
3
},
{
"type"
:
"BINARY"
,
"len"
:
32
,
"count"
:
6
}],
"tags"
:
[{
"type"
:
"TINYINT"
,
"count"
:
2
},
{
"type"
:
"BINARY"
,
"len"
:
16
,
"count"
:
5
}]
},
{
"name"
:
"stb1"
,
"child_table_exists"
:
"no"
,
"childtable_count"
:
20
,
"childtable_prefix"
:
"stb01_"
,
"auto_create_table"
:
"no"
,
"batch_create_tbl_num"
:
10
,
"data_source"
:
"rand"
,
"insert_mode"
:
"taosc"
,
"insert_rows"
:
20000
,
"childtable_limit"
:
0
,
"childtable_offset"
:
0
,
"multi_thread_write_one_tbl"
:
"no"
,
"interlace_rows"
:
0
,
"insert_interval"
:
0
,
"max_sql_len"
:
1024000
,
"disorder_ratio"
:
0
,
"disorder_range"
:
1000
,
"timestamp_step"
:
1
,
"start_timestamp"
:
"2020-10-01 00:00:00.000"
,
"sample_format"
:
"csv"
,
"sample_file"
:
"./sample.csv"
,
"tags_file"
:
""
,
"columns"
:
[{
"type"
:
"INT"
},
{
"type"
:
"DOUBLE"
,
"count"
:
10
},
{
"type"
:
"BINARY"
,
"len"
:
16
,
"count"
:
3
},
{
"type"
:
"BINARY"
,
"len"
:
32
,
"count"
:
6
}],
"tags"
:
[{
"type"
:
"TINYINT"
,
"count"
:
2
},
{
"type"
:
"BINARY"
,
"len"
:
16
,
"count"
:
5
}]
}]
}]
}
tests/pytest/tools/taosdemoAllTest/insert-disorder.json
0 → 100644
浏览文件 @
9286f77e
{
"filetype"
:
"insert"
,
"cfgdir"
:
"/etc/taos"
,
"host"
:
"127.0.0.1"
,
"port"
:
6030
,
"user"
:
"root"
,
"password"
:
"taosdata"
,
"thread_count"
:
4
,
""
:
4
,
"result_file"
:
"./insert_res.txt"
,
"confirm_parameter_prompt"
:
"no"
,
"insert_interval"
:
0
,
"interlace_rows"
:
10
,
"num_of_records_per_req"
:
1000
,
"max_sql_len"
:
1024000
,
"databases"
:
[{
"dbinfo"
:
{
"name"
:
"db"
,
"drop"
:
"yes"
,
"replica"
:
1
,
"days"
:
10
,
"cache"
:
50
,
"blocks"
:
8
,
"precision"
:
"ms"
,
"keep"
:
365
,
"minRows"
:
100
,
"maxRows"
:
4096
,
"comp"
:
2
,
"walLevel"
:
1
,
"cachelast"
:
0
,
"quorum"
:
1
,
"fsync"
:
3000
,
"update"
:
0
},
"super_tables"
:
[{
"name"
:
"stb0"
,
"child_table_exists"
:
"no"
,
"childtable_count"
:
1
,
"childtable_prefix"
:
"stb00_"
,
"auto_create_table"
:
"no"
,
"batch_create_tbl_num"
:
10
,
"data_source"
:
"rand"
,
"insert_mode"
:
"taosc"
,
"insert_rows"
:
10
,
"childtable_limit"
:
-1
,
"childtable_offset"
:
0
,
"multi_thread_write_one_tbl"
:
"no"
,
"interlace_rows"
:
1
,
"insert_interval"
:
0
,
"max_sql_len"
:
1024000
,
"disorder_ratio"
:
10
,
"disorder_range"
:
100
,
"timestamp_step"
:
1000
,
"start_timestamp"
:
"2020-10-01 00:00:00.000"
,
"sample_format"
:
"csv"
,
"sample_file"
:
"./sample.csv"
,
"tags_file"
:
""
,
"columns"
:
[{
"type"
:
"INT"
}],
"tags"
:
[{
"type"
:
"TINYINT"
,
"count"
:
2
},
{
"type"
:
"BINARY"
,
"len"
:
16
,
"count"
:
5
}]
},
{
"name"
:
"stb1"
,
"child_table_exists"
:
"no"
,
"childtable_count"
:
1
,
"childtable_prefix"
:
"stb01_"
,
"auto_create_table"
:
"no"
,
"batch_create_tbl_num"
:
1
,
"data_source"
:
"rand"
,
"insert_mode"
:
"taosc"
,
"insert_rows"
:
10
,
"childtable_limit"
:
-1
,
"childtable_offset"
:
0
,
"multi_thread_write_one_tbl"
:
"no"
,
"interlace_rows"
:
0
,
"insert_interval"
:
0
,
"max_sql_len"
:
1024000
,
"disorder_ratio"
:
100
,
"disorder_range"
:
1
,
"timestamp_step"
:
1000
,
"start_timestamp"
:
"2020-10-01 00:00:00.000"
,
"sample_format"
:
"csv"
,
"sample_file"
:
"./sample.csv"
,
"tags_file"
:
""
,
"columns"
:
[{
"type"
:
"INT"
},
{
"type"
:
"DOUBLE"
,
"count"
:
10
},
{
"type"
:
"BINARY"
,
"len"
:
16
,
"count"
:
3
},
{
"type"
:
"BINARY"
,
"len"
:
32
,
"count"
:
6
}],
"tags"
:
[{
"type"
:
"TINYINT"
,
"count"
:
2
},
{
"type"
:
"BINARY"
,
"len"
:
16
,
"count"
:
5
}]
}]
}]
}
tests/pytest/tools/taosdemoAllTest/insert-illegal-columns-count-0.json
0 → 100644
浏览文件 @
9286f77e
{
"filetype"
:
"insert"
,
"cfgdir"
:
"/etc/taos"
,
"host"
:
"127.0.0.1"
,
"port"
:
6030
,
"user"
:
"root"
,
"password"
:
"taosdata"
,
"thread_count"
:
4
,
"thread_count_create_tbl"
:
4
,
"result_file"
:
"./insert_res.txt"
,
"confirm_parameter_prompt"
:
"no"
,
"insert_interval"
:
0
,
"interlace_rows"
:
10
,
"num_of_records_per_req"
:
100
,
"max_sql_len"
:
10240000000
,
"databases"
:
[{
"dbinfo"
:
{
"name"
:
"db"
,
"drop"
:
"yes"
,
"replica"
:
1
,
"days"
:
10
,
"cache"
:
50
,
"blocks"
:
8
,
"precision"
:
"ms"
,
"keep"
:
365
,
"minRows"
:
100
,
"maxRows"
:
4096
,
"comp"
:
2
,
"walLevel"
:
1
,
"cachelast"
:
0
,
"quorum"
:
1
,
"fsync"
:
3000
,
"update"
:
0
},
"super_tables"
:
[{
"name"
:
"stb0"
,
"child_table_exists"
:
"no"
,
"childtable_count"
:
10
,
"childtable_prefix"
:
"stb00_"
,
"auto_create_table"
:
"no"
,
"batch_create_tbl_num"
:
10
,
"data_source"
:
"rand"
,
"insert_mode"
:
"taosc"
,
"insert_rows"
:
1000
,
"childtable_limit"
:
0
,
"childtable_offset"
:
0
,
"multi_thread_write_one_tbl"
:
"no"
,
"interlace_rows"
:
0
,
"insert_interval"
:
0
,
"max_sql_len"
:
1024000
,
"disorder_ratio"
:
0
,
"disorder_range"
:
1000
,
"timestamp_step"
:
1
,
"start_timestamp"
:
"2020-10-01 00:00:00.000"
,
"sample_format"
:
"csv"
,
"sample_file"
:
"./sample.csv"
,
"tags_file"
:
""
,
"columns"
:
[{
"type"
:
"INT"
},
{
"type"
:
"DOUBLE"
,
"count"
:
0
},
{
"type"
:
"BINARY"
,
"len"
:
16
,
"count"
:
3
},
{
"type"
:
"BINARY"
,
"len"
:
32
,
"count"
:
6
}],
"tags"
:
[{
"type"
:
"TINYINT"
,
"count"
:
2
},
{
"type"
:
"BINARY"
,
"len"
:
16
,
"count"
:
7
}]
},
{
"name"
:
"stb1"
,
"child_table_exists"
:
"no"
,
"childtable_count"
:
20
,
"childtable_prefix"
:
"stb01_"
,
"auto_create_table"
:
"no"
,
"batch_create_tbl_num"
:
12
,
"data_source"
:
"rand"
,
"insert_mode"
:
"taosc"
,
"insert_rows"
:
2000
,
"childtable_limit"
:
0
,
"childtable_offset"
:
0
,
"multi_thread_write_one_tbl"
:
"no"
,
"interlace_rows"
:
0
,
"insert_interval"
:
0
,
"max_sql_len"
:
1024000
,
"disorder_ratio"
:
0
,
"disorder_range"
:
1000
,
"timestamp_step"
:
1
,
"start_timestamp"
:
"2020-10-01 00:00:00.000"
,
"sample_format"
:
"csv"
,
"sample_file"
:
"./sample.csv"
,
"tags_file"
:
""
,
"columns"
:
[{
"type"
:
"INT"
},
{
"type"
:
"DOUBLE"
,
"count"
:
10
},
{
"type"
:
"BINARY"
,
"len"
:
16
,
"count"
:
3
},
{
"type"
:
"BINARY"
,
"len"
:
32
,
"count"
:
6
}],
"tags"
:
[{
"type"
:
"TINYINT"
,
"count"
:
2
},
{
"type"
:
"BINARY"
,
"len"
:
16
,
"count"
:
5
}]
}]
}]
}
tests/pytest/tools/taosdemoAllTest/insert-illegal-columns-lmax.json
0 → 100644
浏览文件 @
9286f77e
{
"filetype"
:
"insert"
,
"cfgdir"
:
"/etc/taos"
,
"host"
:
"127.0.0.1"
,
"port"
:
6030
,
"user"
:
"root"
,
"password"
:
"taosdata"
,
"thread_count"
:
4
,
"thread_count_create_tbl"
:
4
,
"result_file"
:
"./insert_res.txt"
,
"confirm_parameter_prompt"
:
"no"
,
"insert_interval"
:
0
,
"interlace_rows"
:
10
,
"num_of_records_per_req"
:
100
,
"max_sql_len"
:
10240000000
,
"databases"
:
[{
"dbinfo"
:
{
"name"
:
"db"
,
"drop"
:
"yes"
,
"replica"
:
1
,
"days"
:
10
,
"cache"
:
50
,
"blocks"
:
8
,
"precision"
:
"ms"
,
"keep"
:
365
,
"minRows"
:
100
,
"maxRows"
:
4096
,
"comp"
:
2
,
"walLevel"
:
1
,
"cachelast"
:
0
,
"quorum"
:
1
,
"fsync"
:
3000
,
"update"
:
0
},
"super_tables"
:
[{
"name"
:
"stb0"
,
"child_table_exists"
:
"no"
,
"childtable_count"
:
10
,
"childtable_prefix"
:
"stb00_"
,
"auto_create_table"
:
"no"
,
"batch_create_tbl_num"
:
10
,
"data_source"
:
"rand"
,
"insert_mode"
:
"taosc"
,
"insert_rows"
:
1000
,
"childtable_limit"
:
0
,
"childtable_offset"
:
0
,
"multi_thread_write_one_tbl"
:
"no"
,
"interlace_rows"
:
0
,
"insert_interval"
:
0
,
"max_sql_len"
:
1024000
,
"disorder_ratio"
:
0
,
"disorder_range"
:
1000
,
"timestamp_step"
:
1
,
"start_timestamp"
:
"2020-10-01 00:00:00.000"
,
"sample_format"
:
"csv"
,
"sample_file"
:
"./sample.csv"
,
"tags_file"
:
""
,
"columns"
:
[{
"type"
:
"INT"
},
{
"type"
:
"DOUBLE"
,
"count"
:
1024
},
{
"type"
:
"BINARY"
,
"len"
:
16
,
"count"
:
3
},
{
"type"
:
"BINARY"
,
"len"
:
32
,
"count"
:
6
}],
"tags"
:
[{
"type"
:
"TINYINT"
,
"count"
:
2
},
{
"type"
:
"BINARY"
,
"len"
:
16
,
"count"
:
7
}]
},
{
"name"
:
"stb1"
,
"child_table_exists"
:
"no"
,
"childtable_count"
:
20
,
"childtable_prefix"
:
"stb01_"
,
"auto_create_table"
:
"no"
,
"batch_create_tbl_num"
:
12
,
"data_source"
:
"rand"
,
"insert_mode"
:
"taosc"
,
"insert_rows"
:
2000
,
"childtable_limit"
:
0
,
"childtable_offset"
:
0
,
"multi_thread_write_one_tbl"
:
"no"
,
"interlace_rows"
:
0
,
"insert_interval"
:
0
,
"max_sql_len"
:
1024000
,
"disorder_ratio"
:
0
,
"disorder_range"
:
1000
,
"timestamp_step"
:
1
,
"start_timestamp"
:
"2020-10-01 00:00:00.000"
,
"sample_format"
:
"csv"
,
"sample_file"
:
"./sample.csv"
,
"tags_file"
:
""
,
"columns"
:
[{
"type"
:
"INT"
},
{
"type"
:
"DOUBLE"
,
"count"
:
1004
},
{
"type"
:
"BINARY"
,
"len"
:
16
,
"count"
:
3
},
{
"type"
:
"BINARY"
,
"len"
:
32
,
"count"
:
6
}],
"tags"
:
[{
"type"
:
"TINYINT"
,
"count"
:
2
},
{
"type"
:
"BINARY"
,
"len"
:
16
,
"count"
:
7
}]
}]
}]
}
tests/pytest/tools/taosdemoAllTest/insert-illegal-columns.json
0 → 100644
浏览文件 @
9286f77e
{
"filetype"
:
"insert"
,
"cfgdir"
:
"/etc/taos"
,
"host"
:
"127.0.0.1"
,
"port"
:
6030
,
"user"
:
"root"
,
"password"
:
"taosdata"
,
"thread_count"
:
4
,
"thread_count_create_tbl"
:
4
,
"result_file"
:
"./insert_res.txt"
,
"confirm_parameter_prompt"
:
"no"
,
"insert_interval"
:
0
,
"interlace_rows"
:
10
,
"num_of_records_per_req"
:
100
,
"max_sql_len"
:
10240000000
,
"databases"
:
[{
"dbinfo"
:
{
"name"
:
"db"
,
"drop"
:
"yes"
,
"replica"
:
1
,
"days"
:
10
,
"cache"
:
50
,
"blocks"
:
8
,
"precision"
:
"ms"
,
"keep"
:
365
,
"minRows"
:
100
,
"maxRows"
:
4096
,
"comp"
:
2
,
"walLevel"
:
1
,
"cachelast"
:
0
,
"quorum"
:
1
,
"fsync"
:
3000
,
"update"
:
0
},
"super_tables"
:
[{
"name"
:
"stb0"
,
"child_table_exists"
:
"no"
,
"childtable_count"
:
10
,
"childtable_prefix"
:
"stb00_"
,
"auto_create_table"
:
"no"
,
"batch_create_tbl_num"
:
10
,
"data_source"
:
"rand"
,
"insert_mode"
:
"taosc"
,
"insert_rows"
:
1000
,
"childtable_limit"
:
0
,
"childtable_offset"
:
0
,
"multi_thread_write_one_tbl"
:
"no"
,
"interlace_rows"
:
0
,
"insert_interval"
:
0
,
"max_sql_len"
:
1024000
,
"disorder_ratio"
:
0
,
"disorder_range"
:
1000
,
"timestamp_step"
:
1
,
"start_timestamp"
:
"2020-10-01 00:00:00.000"
,
"sample_format"
:
"csv"
,
"sample_file"
:
"./sample.csv"
,
"tags_file"
:
""
,
"columns"
:
[{
"type"
:
"INT"
},
{
"type"
:
"DOUBLE"
,
"count"
:
1005
},
{
"type"
:
"BINARY"
,
"len"
:
16
,
"count"
:
3
},
{
"type"
:
"BINARY"
,
"len"
:
32
,
"count"
:
6
}],
"tags"
:
[{
"type"
:
"TINYINT"
,
"count"
:
2
},
{
"type"
:
"BINARY"
,
"len"
:
16
,
"count"
:
7
}]
},
{
"name"
:
"stb1"
,
"child_table_exists"
:
"no"
,
"childtable_count"
:
20
,
"childtable_prefix"
:
"stb01_"
,
"auto_create_table"
:
"no"
,
"batch_create_tbl_num"
:
12
,
"data_source"
:
"rand"
,
"insert_mode"
:
"taosc"
,
"insert_rows"
:
2000
,
"childtable_limit"
:
0
,
"childtable_offset"
:
0
,
"multi_thread_write_one_tbl"
:
"no"
,
"interlace_rows"
:
0
,
"insert_interval"
:
0
,
"max_sql_len"
:
1024000
,
"disorder_ratio"
:
0
,
"disorder_range"
:
1000
,
"timestamp_step"
:
1
,
"start_timestamp"
:
"2020-10-01 00:00:00.000"
,
"sample_format"
:
"csv"
,
"sample_file"
:
"./sample.csv"
,
"tags_file"
:
""
,
"columns"
:
[{
"type"
:
"INT"
},
{
"type"
:
"DOUBLE"
,
"count"
:
10
},
{
"type"
:
"BINARY"
,
"len"
:
16
,
"count"
:
3
},
{
"type"
:
"BINARY"
,
"len"
:
32
,
"count"
:
6
}],
"tags"
:
[{
"type"
:
"TINYINT"
,
"count"
:
2
},
{
"type"
:
"BINARY"
,
"len"
:
16
,
"count"
:
5
}]
}]
}]
}
tests/pytest/tools/taosdemoAllTest/insert-illegal-tags-count129.json
0 → 100644
浏览文件 @
9286f77e
{
"filetype"
:
"insert"
,
"cfgdir"
:
"/etc/taos"
,
"host"
:
"127.0.0.1"
,
"port"
:
6030
,
"user"
:
"root"
,
"password"
:
"taosdata"
,
"thread_count"
:
4
,
"thread_count_create_tbl"
:
4
,
"result_file"
:
"./insert_res.txt"
,
"confirm_parameter_prompt"
:
"no"
,
"insert_interval"
:
0
,
"interlace_rows"
:
10
,
"num_of_records_per_req"
:
100
,
"max_sql_len"
:
10240000000
,
"databases"
:
[{
"dbinfo"
:
{
"name"
:
"db1"
,
"drop"
:
"yes"
,
"replica"
:
1
,
"days"
:
10
,
"cache"
:
50
,
"blocks"
:
8
,
"precision"
:
"ms"
,
"keep"
:
365
,
"minRows"
:
100
,
"maxRows"
:
4096
,
"comp"
:
2
,
"walLevel"
:
1
,
"cachelast"
:
0
,
"quorum"
:
1
,
"fsync"
:
3000
,
"update"
:
0
},
"super_tables"
:
[{
"name"
:
"stb0"
,
"child_table_exists"
:
"no"
,
"childtable_count"
:
10
,
"childtable_prefix"
:
"stb00_"
,
"auto_create_table"
:
"no"
,
"batch_create_tbl_num"
:
10
,
"data_source"
:
"rand"
,
"insert_mode"
:
"taosc"
,
"insert_rows"
:
1000
,
"childtable_limit"
:
0
,
"childtable_offset"
:
0
,
"multi_thread_write_one_tbl"
:
"no"
,
"interlace_rows"
:
0
,
"insert_interval"
:
0
,
"max_sql_len"
:
1024000
,
"disorder_ratio"
:
0
,
"disorder_range"
:
1000
,
"timestamp_step"
:
1
,
"start_timestamp"
:
"2020-10-01 00:00:00.000"
,
"sample_format"
:
"csv"
,
"sample_file"
:
"./sample.csv"
,
"tags_file"
:
""
,
"columns"
:
[{
"type"
:
"INT"
},
{
"type"
:
"DOUBLE"
,
"count"
:
1
},
{
"type"
:
"BIGINT"
,
"count"
:
1
},
{
"type"
:
"float"
,
"count"
:
1
},
{
"type"
:
"double"
,
"count"
:
1
},
{
"type"
:
"BINARY"
,
"len"
:
16
,
"count"
:
3
},
{
"type"
:
"BINARY"
,
"len"
:
32
,
"count"
:
6
}],
"tags"
:
[{
"type"
:
"TINYINT"
,
"count"
:
127
},
{
"type"
:
"BINARY"
,
"len"
:
16
,
"count"
:
2
}]
},
{
"name"
:
"stb1"
,
"child_table_exists"
:
"no"
,
"childtable_count"
:
20
,
"childtable_prefix"
:
"stb01_"
,
"auto_create_table"
:
"no"
,
"batch_create_tbl_num"
:
12
,
"data_source"
:
"rand"
,
"insert_mode"
:
"taosc"
,
"insert_rows"
:
2000
,
"childtable_limit"
:
0
,
"childtable_offset"
:
0
,
"multi_thread_write_one_tbl"
:
"no"
,
"interlace_rows"
:
0
,
"insert_interval"
:
0
,
"max_sql_len"
:
1024000
,
"disorder_ratio"
:
0
,
"disorder_range"
:
1000
,
"timestamp_step"
:
1
,
"start_timestamp"
:
"2020-10-01 00:00:00.000"
,
"sample_format"
:
"csv"
,
"sample_file"
:
"./sample.csv"
,
"tags_file"
:
""
,
"columns"
:
[{
"type"
:
"INT"
},
{
"type"
:
"DOUBLE"
,
"count"
:
10
},
{
"type"
:
"BINARY"
,
"len"
:
16
,
"count"
:
3
},
{
"type"
:
"BINARY"
,
"len"
:
32
,
"count"
:
6
}],
"tags"
:
[{
"type"
:
"TINYINT"
,
"count"
:
2
},
{
"type"
:
"BINARY"
,
"len"
:
16
,
"count"
:
5
}]
}]
}]
}
tests/pytest/tools/taosdemoAllTest/insert-interlace-row.json
0 → 100644
浏览文件 @
9286f77e
{
"filetype"
:
"insert"
,
"cfgdir"
:
"/etc/taos"
,
"host"
:
"127.0.0.1"
,
"port"
:
6030
,
"user"
:
"root"
,
"password"
:
"taosdata"
,
"thread_count"
:
4
,
"thread_count_create_tbl"
:
4
,
"result_file"
:
"./insert_res.txt"
,
"confirm_parameter_prompt"
:
"no"
,
"insert_interval"
:
0
,
"interlace_rows"
:
100
,
"num_of_records_per_req"
:
1000
,
"max_sql_len"
:
1024000
,
"databases"
:
[{
"dbinfo"
:
{
"name"
:
"db"
,
"drop"
:
"yes"
,
"replica"
:
1
,
"days"
:
10
,
"cache"
:
50
,
"blocks"
:
8
,
"precision"
:
"ms"
,
"keep"
:
365
,
"minRows"
:
100
,
"maxRows"
:
4096
,
"comp"
:
2
,
"walLevel"
:
1
,
"cachelast"
:
0
,
"quorum"
:
1
,
"fsync"
:
3000
,
"update"
:
0
},
"super_tables"
:
[{
"name"
:
"stb0"
,
"child_table_exists"
:
"no"
,
"childtable_count"
:
100
,
"childtable_prefix"
:
"stb00_"
,
"auto_create_table"
:
"no"
,
"batch_create_tbl_num"
:
20
,
"data_source"
:
"rand"
,
"insert_mode"
:
"taosc"
,
"insert_rows"
:
150
,
"childtable_limit"
:
-1
,
"childtable_offset"
:
0
,
"multi_thread_write_one_tbl"
:
"no"
,
"interlace_rows"
:
151
,
"insert_interval"
:
0
,
"max_sql_len"
:
1024000
,
"disorder_ratio"
:
0
,
"disorder_range"
:
1000
,
"timestamp_step"
:
1
,
"start_timestamp"
:
"2020-10-01 00:00:00.000"
,
"sample_format"
:
"csv"
,
"sample_file"
:
"./sample.csv"
,
"tags_file"
:
""
,
"columns"
:
[{
"type"
:
"INT"
},
{
"type"
:
"DOUBLE"
,
"count"
:
10
},
{
"type"
:
"BINARY"
,
"len"
:
16
,
"count"
:
3
},
{
"type"
:
"BINARY"
,
"len"
:
32
,
"count"
:
6
}],
"tags"
:
[{
"type"
:
"TINYINT"
,
"count"
:
2
},
{
"type"
:
"BINARY"
,
"len"
:
16
,
"count"
:
5
}]
}]
}]
}
tests/pytest/tools/taosdemoAllTest/insert-interval-speed.json
0 → 100644
浏览文件 @
9286f77e
{
"filetype"
:
"insert"
,
"cfgdir"
:
"/etc/taos"
,
"host"
:
"127.0.0.1"
,
"port"
:
6030
,
"user"
:
"root"
,
"password"
:
"taosdata"
,
"thread_count"
:
4
,
"thread_count_create_tbl"
:
4
,
"result_file"
:
"./insert_res.txt"
,
"confirm_parameter_prompt"
:
"no"
,
"insert_interval"
:
100
,
"interlace_rows"
:
0
,
"num_of_records_per_req"
:
2000
,
"max_sql_len"
:
1024000
,
"databases"
:
[{
"dbinfo"
:
{
"name"
:
"db"
,
"drop"
:
"yes"
,
"replica"
:
1
,
"days"
:
10
,
"cache"
:
16
,
"blocks"
:
8
,
"precision"
:
"ms"
,
"keep"
:
365
,
"minRows"
:
100
,
"maxRows"
:
4096
,
"comp"
:
2
,
"walLevel"
:
1
,
"cachelast"
:
0
,
"quorum"
:
1
,
"fsync"
:
3000
,
"update"
:
0
},
"super_tables"
:
[{
"name"
:
"stb0"
,
"child_table_exists"
:
"no"
,
"childtable_count"
:
100
,
"childtable_prefix"
:
"stb00_"
,
"auto_create_table"
:
"no"
,
"batch_create_tbl_num"
:
10
,
"data_source"
:
"rand"
,
"insert_mode"
:
"taosc"
,
"insert_rows"
:
20000
,
"childtable_limit"
:
0
,
"childtable_offset"
:
0
,
"multi_thread_write_one_tbl"
:
"no"
,
"interlace_rows"
:
1000
,
"insert_interval"
:
0
,
"max_sql_len"
:
1024000
,
"disorder_ratio"
:
0
,
"disorder_range"
:
1000
,
"timestamp_step"
:
1
,
"start_timestamp"
:
"2020-10-01 00:00:00.000"
,
"sample_format"
:
"csv"
,
"sample_file"
:
"./sample.csv"
,
"tags_file"
:
""
,
"columns"
:
[{
"type"
:
"INT"
},
{
"type"
:
"DOUBLE"
,
"count"
:
10
},
{
"type"
:
"BINARY"
,
"len"
:
16
,
"count"
:
3
},
{
"type"
:
"BINARY"
,
"len"
:
32
,
"count"
:
6
}],
"tags"
:
[{
"type"
:
"TINYINT"
,
"count"
:
2
},
{
"type"
:
"BINARY"
,
"len"
:
16
,
"count"
:
5
}]
},
{
"name"
:
"stb1"
,
"child_table_exists"
:
"no"
,
"childtable_count"
:
100
,
"childtable_prefix"
:
"stb01_"
,
"auto_create_table"
:
"no"
,
"batch_create_tbl_num"
:
10
,
"data_source"
:
"rand"
,
"insert_mode"
:
"taosc"
,
"insert_rows"
:
20000
,
"childtable_limit"
:
0
,
"childtable_offset"
:
0
,
"multi_thread_write_one_tbl"
:
"no"
,
"interlace_rows"
:
1000
,
"insert_interval"
:
2000
,
"max_sql_len"
:
1024000
,
"disorder_ratio"
:
0
,
"disorder_range"
:
1000
,
"timestamp_step"
:
1
,
"start_timestamp"
:
"2020-10-01 00:00:00.000"
,
"sample_format"
:
"csv"
,
"sample_file"
:
"./sample.csv"
,
"tags_file"
:
""
,
"columns"
:
[{
"type"
:
"INT"
},
{
"type"
:
"DOUBLE"
,
"count"
:
9
},
{
"type"
:
"BINARY"
,
"len"
:
16
,
"count"
:
3
},
{
"type"
:
"BINARY"
,
"len"
:
32
,
"count"
:
6
}],
"tags"
:
[{
"type"
:
"TINYINT"
,
"count"
:
2
},
{
"type"
:
"BINARY"
,
"len"
:
16
,
"count"
:
5
}]
}]
}]
}
tests/pytest/tools/taosdemoAllTest/insert-newdb.json
0 → 100644
浏览文件 @
9286f77e
{
"filetype"
:
"insert"
,
"cfgdir"
:
"/etc/taos"
,
"host"
:
"127.0.0.1"
,
"port"
:
6030
,
"user"
:
"root"
,
"password"
:
"taosdata"
,
"thread_count"
:
4
,
"thread_count_create_tbl"
:
4
,
"result_file"
:
"./insert_res.txt"
,
"confirm_parameter_prompt"
:
"no"
,
"insert_interval"
:
0
,
"interlace_rows"
:
0
,
"num_of_records_per_req"
:
3000
,
"max_sql_len"
:
1024000
,
"databases"
:
[{
"dbinfo"
:
{
"name"
:
"db"
,
"drop"
:
"yes"
,
"replica"
:
1
,
"days"
:
10
,
"cache"
:
16
,
"blocks"
:
8
,
"precision"
:
"ms"
,
"keep"
:
365
,
"minRows"
:
100
,
"maxRows"
:
4096
,
"comp"
:
2
,
"walLevel"
:
1
,
"cachelast"
:
0
,
"quorum"
:
1
,
"fsync"
:
3000
,
"update"
:
1
},
"super_tables"
:
[{
"name"
:
"stb0"
,
"child_table_exists"
:
"yes"
,
"childtable_count"
:
5
,
"childtable_prefix"
:
"stb00_"
,
"auto_create_table"
:
"no"
,
"batch_create_tbl_num"
:
10
,
"data_source"
:
"rand"
,
"insert_mode"
:
"taosc"
,
"insert_rows"
:
10
,
"childtable_limit"
:
-1
,
"childtable_offset"
:
0
,
"multi_thread_write_one_tbl"
:
"no"
,
"interlace_rows"
:
0
,
"insert_interval"
:
0
,
"max_sql_len"
:
1024000
,
"disorder_ratio"
:
0
,
"disorder_range"
:
1000
,
"timestamp_step"
:
1
,
"start_timestamp"
:
"2020-10-01 00:00:00.000"
,
"sample_format"
:
"csv"
,
"sample_file"
:
"./sample.csv"
,
"tags_file"
:
""
,
"columns"
:
[{
"type"
:
"INT"
},
{
"type"
:
"DOUBLE"
,
"count"
:
6
},
{
"type"
:
"BINARY"
,
"len"
:
16
,
"count"
:
3
},
{
"type"
:
"BINARY"
,
"len"
:
32
,
"count"
:
6
}],
"tags"
:
[{
"type"
:
"TINYINT"
,
"count"
:
2
},
{
"type"
:
"BINARY"
,
"len"
:
16
,
"count"
:
5
}]
},
{
"name"
:
"stb1"
,
"child_table_exists"
:
"no"
,
"childtable_count"
:
6
,
"childtable_prefix"
:
"stb01_"
,
"auto_create_table"
:
"no"
,
"batch_create_tbl_num"
:
10
,
"data_source"
:
"rand"
,
"insert_mode"
:
"taosc"
,
"insert_rows"
:
20
,
"childtable_limit"
:
-1
,
"childtable_offset"
:
0
,
"multi_thread_write_one_tbl"
:
"no"
,
"interlace_rows"
:
0
,
"insert_interval"
:
0
,
"max_sql_len"
:
1024000
,
"disorder_ratio"
:
0
,
"disorder_range"
:
1000
,
"timestamp_step"
:
1
,
"start_timestamp"
:
"2020-10-01 00:00:00.000"
,
"sample_format"
:
"csv"
,
"sample_file"
:
"./sample.csv"
,
"tags_file"
:
""
,
"columns"
:
[{
"type"
:
"INT"
},
{
"type"
:
"DOUBLE"
,
"count"
:
6
},
{
"type"
:
"BINARY"
,
"len"
:
16
,
"count"
:
3
},
{
"type"
:
"BINARY"
,
"len"
:
32
,
"count"
:
6
}],
"tags"
:
[{
"type"
:
"TINYINT"
,
"count"
:
2
},
{
"type"
:
"BINARY"
,
"len"
:
16
,
"count"
:
5
}]
},
{
"name"
:
"stb2"
,
"child_table_exists"
:
"no"
,
"childtable_count"
:
7
,
"childtable_prefix"
:
"stb02_"
,
"auto_create_table"
:
"no"
,
"batch_create_tbl_num"
:
10
,
"data_source"
:
"rand"
,
"insert_mode"
:
"taosc"
,
"insert_rows"
:
20
,
"childtable_limit"
:
4
,
"childtable_offset"
:
0
,
"multi_thread_write_one_tbl"
:
"no"
,
"interlace_rows"
:
0
,
"insert_interval"
:
0
,
"max_sql_len"
:
1024000
,
"disorder_ratio"
:
0
,
"disorder_range"
:
1000
,
"timestamp_step"
:
1
,
"start_timestamp"
:
"2020-10-01 00:00:00.000"
,
"sample_format"
:
"csv"
,
"sample_file"
:
"./sample.csv"
,
"tags_file"
:
""
,
"columns"
:
[{
"type"
:
"INT"
},
{
"type"
:
"DOUBLE"
,
"count"
:
6
},
{
"type"
:
"BINARY"
,
"len"
:
16
,
"count"
:
3
},
{
"type"
:
"BINARY"
,
"len"
:
32
,
"count"
:
6
}],
"tags"
:
[{
"type"
:
"TINYINT"
,
"count"
:
2
},
{
"type"
:
"BINARY"
,
"len"
:
16
,
"count"
:
5
}]
},
{
"name"
:
"stb3"
,
"child_table_exists"
:
"no"
,
"childtable_count"
:
8
,
"childtable_prefix"
:
"stb03_"
,
"auto_create_table"
:
"no"
,
"batch_create_tbl_num"
:
10
,
"data_source"
:
"rand"
,
"insert_mode"
:
"taosc"
,
"insert_rows"
:
20
,
"childtable_limit"
:
2
,
"childtable_offset"
:
7
,
"multi_thread_write_one_tbl"
:
"no"
,
"interlace_rows"
:
0
,
"insert_interval"
:
0
,
"max_sql_len"
:
1024000
,
"disorder_ratio"
:
0
,
"disorder_range"
:
1000
,
"timestamp_step"
:
1
,
"start_timestamp"
:
"2020-10-01 00:00:00.000"
,
"sample_format"
:
"csv"
,
"sample_file"
:
"./sample.csv"
,
"tags_file"
:
""
,
"columns"
:
[{
"type"
:
"INT"
},
{
"type"
:
"DOUBLE"
,
"count"
:
6
},
{
"type"
:
"BINARY"
,
"len"
:
16
,
"count"
:
3
},
{
"type"
:
"BINARY"
,
"len"
:
32
,
"count"
:
6
}],
"tags"
:
[{
"type"
:
"TINYINT"
,
"count"
:
2
},
{
"type"
:
"BINARY"
,
"len"
:
16
,
"count"
:
5
}]
},
{
"name"
:
"stb4"
,
"child_table_exists"
:
"no"
,
"childtable_count"
:
8
,
"childtable_prefix"
:
"stb04_"
,
"auto_create_table"
:
"no"
,
"batch_create_tbl_num"
:
10
,
"data_source"
:
"rand"
,
"insert_mode"
:
"taosc"
,
"insert_rows"
:
20
,
"childtable_limit"
:
0
,
"childtable_offset"
:
7
,
"multi_thread_write_one_tbl"
:
"no"
,
"interlace_rows"
:
0
,
"insert_interval"
:
0
,
"max_sql_len"
:
1024000
,
"disorder_ratio"
:
0
,
"disorder_range"
:
1000
,
"timestamp_step"
:
1
,
"start_timestamp"
:
"2020-10-01 00:00:00.000"
,
"sample_format"
:
"csv"
,
"sample_file"
:
"./sample.csv"
,
"tags_file"
:
""
,
"columns"
:
[{
"type"
:
"INT"
},
{
"type"
:
"DOUBLE"
,
"count"
:
6
},
{
"type"
:
"BINARY"
,
"len"
:
16
,
"count"
:
3
},
{
"type"
:
"BINARY"
,
"len"
:
32
,
"count"
:
6
}],
"tags"
:
[{
"type"
:
"TINYINT"
,
"count"
:
2
},
{
"type"
:
"BINARY"
,
"len"
:
16
,
"count"
:
5
}]
}]
}]
}
tests/pytest/tools/taosdemoAllTest/insert-newtable.json
0 → 100644
浏览文件 @
9286f77e
{
"filetype"
:
"insert"
,
"cfgdir"
:
"/etc/taos"
,
"host"
:
"127.0.0.1"
,
"port"
:
6030
,
"user"
:
"root"
,
"password"
:
"taosdata"
,
"thread_count"
:
4
,
"thread_count_create_tbl"
:
4
,
"result_file"
:
"./insert_res.txt"
,
"confirm_parameter_prompt"
:
"no"
,
"insert_interval"
:
0
,
"interlace_rows"
:
0
,
"num_of_records_per_req"
:
3000
,
"max_sql_len"
:
1024000
,
"databases"
:
[{
"dbinfo"
:
{
"name"
:
"db"
,
"drop"
:
"no"
,
"replica"
:
1
,
"days"
:
10
,
"cache"
:
16
,
"blocks"
:
8
,
"precision"
:
"ms"
,
"keep"
:
365
,
"minRows"
:
100
,
"maxRows"
:
4096
,
"comp"
:
2
,
"walLevel"
:
1
,
"cachelast"
:
0
,
"quorum"
:
1
,
"fsync"
:
3000
,
"update"
:
1
},
"super_tables"
:
[{
"name"
:
"stb0"
,
"child_table_exists"
:
"yes"
,
"childtable_count"
:
5
,
"childtable_prefix"
:
"stb00_"
,
"auto_create_table"
:
"no"
,
"batch_create_tbl_num"
:
10
,
"data_source"
:
"rand"
,
"insert_mode"
:
"taosc"
,
"insert_rows"
:
20
,
"childtable_limit"
:
-1
,
"childtable_offset"
:
0
,
"multi_thread_write_one_tbl"
:
"no"
,
"interlace_rows"
:
0
,
"insert_interval"
:
0
,
"max_sql_len"
:
1024000
,
"disorder_ratio"
:
0
,
"disorder_range"
:
1000
,
"timestamp_step"
:
1
,
"start_timestamp"
:
"2020-12-01 00:00:00.000"
,
"sample_format"
:
"csv"
,
"sample_file"
:
"./sample.csv"
,
"tags_file"
:
""
,
"columns"
:
[{
"type"
:
"INT"
},
{
"type"
:
"DOUBLE"
,
"count"
:
6
},
{
"type"
:
"BINARY"
,
"len"
:
16
,
"count"
:
3
},
{
"type"
:
"BINARY"
,
"len"
:
32
,
"count"
:
6
}],
"tags"
:
[{
"type"
:
"TINYINT"
,
"count"
:
2
},
{
"type"
:
"BINARY"
,
"len"
:
16
,
"count"
:
5
}]
},
{
"name"
:
"stb1"
,
"child_table_exists"
:
"no"
,
"childtable_count"
:
6
,
"childtable_prefix"
:
"stb01_"
,
"auto_create_table"
:
"no"
,
"batch_create_tbl_num"
:
10
,
"data_source"
:
"rand"
,
"insert_mode"
:
"taosc"
,
"insert_rows"
:
20
,
"childtable_limit"
:
-1
,
"childtable_offset"
:
0
,
"multi_thread_write_one_tbl"
:
"no"
,
"interlace_rows"
:
0
,
"insert_interval"
:
0
,
"max_sql_len"
:
1024000
,
"disorder_ratio"
:
0
,
"disorder_range"
:
1000
,
"timestamp_step"
:
1
,
"start_timestamp"
:
"2020-12-01 00:00:00.000"
,
"sample_format"
:
"csv"
,
"sample_file"
:
"./sample.csv"
,
"tags_file"
:
""
,
"columns"
:
[{
"type"
:
"INT"
},
{
"type"
:
"DOUBLE"
,
"count"
:
6
},
{
"type"
:
"BINARY"
,
"len"
:
16
,
"count"
:
3
},
{
"type"
:
"BINARY"
,
"len"
:
32
,
"count"
:
6
}],
"tags"
:
[{
"type"
:
"TINYINT"
,
"count"
:
2
},
{
"type"
:
"BINARY"
,
"len"
:
16
,
"count"
:
5
}]
},
{
"name"
:
"stb2"
,
"child_table_exists"
:
"no"
,
"childtable_count"
:
7
,
"childtable_prefix"
:
"stb02_"
,
"auto_create_table"
:
"no"
,
"batch_create_tbl_num"
:
10
,
"data_source"
:
"rand"
,
"insert_mode"
:
"taosc"
,
"insert_rows"
:
20
,
"childtable_limit"
:
4
,
"childtable_offset"
:
0
,
"multi_thread_write_one_tbl"
:
"no"
,
"interlace_rows"
:
0
,
"insert_interval"
:
0
,
"max_sql_len"
:
1024000
,
"disorder_ratio"
:
0
,
"disorder_range"
:
1000
,
"timestamp_step"
:
1
,
"start_timestamp"
:
"2020-12-01 00:00:00.000"
,
"sample_format"
:
"csv"
,
"sample_file"
:
"./sample.csv"
,
"tags_file"
:
""
,
"columns"
:
[{
"type"
:
"INT"
},
{
"type"
:
"DOUBLE"
,
"count"
:
6
},
{
"type"
:
"BINARY"
,
"len"
:
16
,
"count"
:
3
},
{
"type"
:
"BINARY"
,
"len"
:
32
,
"count"
:
6
}],
"tags"
:
[{
"type"
:
"TINYINT"
,
"count"
:
2
},
{
"type"
:
"BINARY"
,
"len"
:
16
,
"count"
:
5
}]
},
{
"name"
:
"stb3"
,
"child_table_exists"
:
"no"
,
"childtable_count"
:
8
,
"childtable_prefix"
:
"stb03_"
,
"auto_create_table"
:
"no"
,
"batch_create_tbl_num"
:
10
,
"data_source"
:
"rand"
,
"insert_mode"
:
"taosc"
,
"insert_rows"
:
20
,
"childtable_limit"
:
2
,
"childtable_offset"
:
7
,
"multi_thread_write_one_tbl"
:
"no"
,
"interlace_rows"
:
0
,
"insert_interval"
:
0
,
"max_sql_len"
:
1024000
,
"disorder_ratio"
:
0
,
"disorder_range"
:
1000
,
"timestamp_step"
:
1
,
"start_timestamp"
:
"2020-12-01 00:00:00.000"
,
"sample_format"
:
"csv"
,
"sample_file"
:
"./sample.csv"
,
"tags_file"
:
""
,
"columns"
:
[{
"type"
:
"INT"
},
{
"type"
:
"DOUBLE"
,
"count"
:
6
},
{
"type"
:
"BINARY"
,
"len"
:
16
,
"count"
:
3
},
{
"type"
:
"BINARY"
,
"len"
:
32
,
"count"
:
6
}],
"tags"
:
[{
"type"
:
"TINYINT"
,
"count"
:
2
},
{
"type"
:
"BINARY"
,
"len"
:
16
,
"count"
:
5
}]
},
{
"name"
:
"stb4"
,
"child_table_exists"
:
"no"
,
"childtable_count"
:
8
,
"childtable_prefix"
:
"stb04_"
,
"auto_create_table"
:
"no"
,
"batch_create_tbl_num"
:
10
,
"data_source"
:
"rand"
,
"insert_mode"
:
"taosc"
,
"insert_rows"
:
30
,
"childtable_limit"
:
0
,
"childtable_offset"
:
7
,
"multi_thread_write_one_tbl"
:
"no"
,
"interlace_rows"
:
0
,
"insert_interval"
:
0
,
"max_sql_len"
:
1024000
,
"disorder_ratio"
:
0
,
"disorder_range"
:
1000
,
"timestamp_step"
:
1
,
"start_timestamp"
:
"2020-12-01 00:00:00.000"
,
"sample_format"
:
"csv"
,
"sample_file"
:
"./sample.csv"
,
"tags_file"
:
""
,
"columns"
:
[{
"type"
:
"INT"
},
{
"type"
:
"DOUBLE"
,
"count"
:
6
},
{
"type"
:
"BINARY"
,
"len"
:
16
,
"count"
:
3
},
{
"type"
:
"BINARY"
,
"len"
:
32
,
"count"
:
6
}],
"tags"
:
[{
"type"
:
"TINYINT"
,
"count"
:
2
},
{
"type"
:
"BINARY"
,
"len"
:
16
,
"count"
:
5
}]
}]
}]
}
tests/pytest/tools/taosdemoAllTest/insert-nodbnodrop.json
0 → 100644
浏览文件 @
9286f77e
{
"filetype"
:
"insert"
,
"cfgdir"
:
"/etc/taos"
,
"host"
:
"127.0.0.1"
,
"port"
:
6030
,
"user"
:
"root"
,
"password"
:
"taosdata"
,
"thread_count"
:
4
,
"thread_count_create_tbl"
:
4
,
"result_file"
:
"./insert_res.txt"
,
"confirm_parameter_prompt"
:
"no"
,
"insert_interval"
:
0
,
"interlace_rows"
:
0
,
"num_of_records_per_req"
:
3000
,
"max_sql_len"
:
1024000
,
"databases"
:
[{
"dbinfo"
:
{
"name"
:
"dbno"
,
"drop"
:
"no"
,
"replica"
:
1
,
"days"
:
10
,
"cache"
:
16
,
"blocks"
:
8
,
"precision"
:
"ms"
,
"keep"
:
365
,
"minRows"
:
100
,
"maxRows"
:
4096
,
"comp"
:
2
,
"walLevel"
:
1
,
"cachelast"
:
0
,
"quorum"
:
1
,
"fsync"
:
3000
,
"update"
:
1
},
"super_tables"
:
[{
"name"
:
"stb0"
,
"child_table_exists"
:
"no"
,
"childtable_count"
:
5
,
"childtable_prefix"
:
"stb00_"
,
"auto_create_table"
:
"yes"
,
"batch_create_tbl_num"
:
10
,
"data_source"
:
"rand"
,
"insert_mode"
:
"taosc"
,
"insert_rows"
:
10
,
"childtable_limit"
:
-1
,
"childtable_offset"
:
0
,
"multi_thread_write_one_tbl"
:
"no"
,
"interlace_rows"
:
0
,
"insert_interval"
:
0
,
"max_sql_len"
:
1024000
,
"disorder_ratio"
:
0
,
"disorder_range"
:
1000
,
"timestamp_step"
:
1
,
"start_timestamp"
:
"2020-10-01 00:00:00.000"
,
"sample_format"
:
"csv"
,
"sample_file"
:
"./sample.csv"
,
"tags_file"
:
""
,
"columns"
:
[{
"type"
:
"INT"
},
{
"type"
:
"DOUBLE"
,
"count"
:
6
},
{
"type"
:
"BINARY"
,
"len"
:
16
,
"count"
:
3
},
{
"type"
:
"BINARY"
,
"len"
:
32
,
"count"
:
6
}],
"tags"
:
[{
"type"
:
"TINYINT"
,
"count"
:
2
},
{
"type"
:
"BINARY"
,
"len"
:
16
,
"count"
:
5
}]
}]
}]
}
tests/pytest/tools/taosdemoAllTest/insert-offset.json
0 → 100644
浏览文件 @
9286f77e
{
"filetype"
:
"insert"
,
"cfgdir"
:
"/etc/taos"
,
"host"
:
"127.0.0.1"
,
"port"
:
6030
,
"user"
:
"root"
,
"password"
:
"taosdata"
,
"thread_count"
:
4
,
"thread_count_create_tbl"
:
4
,
"result_file"
:
"./insert_res.txt"
,
"confirm_parameter_prompt"
:
"no"
,
"insert_interval"
:
0
,
"interlace_rows"
:
0
,
"num_of_records_per_req"
:
3000
,
"max_sql_len"
:
1024000
,
"databases"
:
[{
"dbinfo"
:
{
"name"
:
"db"
,
"drop"
:
"no"
,
"replica"
:
1
,
"days"
:
10
,
"cache"
:
16
,
"blocks"
:
8
,
"precision"
:
"ms"
,
"keep"
:
365
,
"minRows"
:
100
,
"maxRows"
:
4096
,
"comp"
:
2
,
"walLevel"
:
1
,
"cachelast"
:
0
,
"quorum"
:
1
,
"fsync"
:
3000
,
"update"
:
0
},
"super_tables"
:
[{
"name"
:
"stb0"
,
"child_table_exists"
:
"yes"
,
"childtable_count"
:
5
,
"childtable_prefix"
:
"stb00_"
,
"auto_create_table"
:
"no"
,
"batch_create_tbl_num"
:
10
,
"data_source"
:
"rand"
,
"insert_mode"
:
"taosc"
,
"insert_rows"
:
20
,
"childtable_limit"
:
0
,
"childtable_offset"
:
0
,
"multi_thread_write_one_tbl"
:
"no"
,
"interlace_rows"
:
0
,
"insert_interval"
:
0
,
"max_sql_len"
:
1024000
,
"disorder_ratio"
:
0
,
"disorder_range"
:
1000
,
"timestamp_step"
:
1
,
"start_timestamp"
:
"2020-11-01 00:00:00.000"
,
"sample_format"
:
"csv"
,
"sample_file"
:
"./sample.csv"
,
"tags_file"
:
""
,
"columns"
:
[{
"type"
:
"INT"
},
{
"type"
:
"DOUBLE"
,
"count"
:
6
},
{
"type"
:
"BINARY"
,
"len"
:
16
,
"count"
:
3
},
{
"type"
:
"BINARY"
,
"len"
:
32
,
"count"
:
6
}],
"tags"
:
[{
"type"
:
"TINYINT"
,
"count"
:
2
},
{
"type"
:
"BINARY"
,
"len"
:
16
,
"count"
:
5
}]
},
{
"name"
:
"stb1"
,
"child_table_exists"
:
"yes"
,
"childtable_count"
:
6
,
"childtable_prefix"
:
"stb01_"
,
"auto_create_table"
:
"no"
,
"batch_create_tbl_num"
:
10
,
"data_source"
:
"rand"
,
"insert_mode"
:
"taosc"
,
"insert_rows"
:
20
,
"childtable_limit"
:
-1
,
"childtable_offset"
:
0
,
"multi_thread_write_one_tbl"
:
"no"
,
"interlace_rows"
:
0
,
"insert_interval"
:
0
,
"max_sql_len"
:
1024000
,
"disorder_ratio"
:
0
,
"disorder_range"
:
1000
,
"timestamp_step"
:
1
,
"start_timestamp"
:
"2020-11-01 00:00:00.000"
,
"sample_format"
:
"csv"
,
"sample_file"
:
"./sample.csv"
,
"tags_file"
:
""
,
"columns"
:
[{
"type"
:
"INT"
},
{
"type"
:
"DOUBLE"
,
"count"
:
6
},
{
"type"
:
"BINARY"
,
"len"
:
16
,
"count"
:
3
},
{
"type"
:
"BINARY"
,
"len"
:
32
,
"count"
:
6
}],
"tags"
:
[{
"type"
:
"TINYINT"
,
"count"
:
2
},
{
"type"
:
"BINARY"
,
"len"
:
16
,
"count"
:
5
}]
},
{
"name"
:
"stb2"
,
"child_table_exists"
:
"yes"
,
"childtable_count"
:
7
,
"childtable_prefix"
:
"stb02_"
,
"auto_create_table"
:
"no"
,
"batch_create_tbl_num"
:
10
,
"data_source"
:
"rand"
,
"insert_mode"
:
"taosc"
,
"insert_rows"
:
20
,
"childtable_limit"
:
4
,
"childtable_offset"
:
0
,
"multi_thread_write_one_tbl"
:
"no"
,
"interlace_rows"
:
0
,
"insert_interval"
:
0
,
"max_sql_len"
:
1024000
,
"disorder_ratio"
:
0
,
"disorder_range"
:
1000
,
"timestamp_step"
:
1
,
"start_timestamp"
:
"2020-11-01 00:00:00.000"
,
"sample_format"
:
"csv"
,
"sample_file"
:
"./sample.csv"
,
"tags_file"
:
""
,
"columns"
:
[{
"type"
:
"INT"
},
{
"type"
:
"DOUBLE"
,
"count"
:
6
},
{
"type"
:
"BINARY"
,
"len"
:
16
,
"count"
:
3
},
{
"type"
:
"BINARY"
,
"len"
:
32
,
"count"
:
6
}],
"tags"
:
[{
"type"
:
"TINYINT"
,
"count"
:
2
},
{
"type"
:
"BINARY"
,
"len"
:
16
,
"count"
:
5
}]
},
{
"name"
:
"stb3"
,
"child_table_exists"
:
"yes"
,
"childtable_count"
:
8
,
"childtable_prefix"
:
"stb03_"
,
"auto_create_table"
:
"no"
,
"batch_create_tbl_num"
:
10
,
"data_source"
:
"rand"
,
"insert_mode"
:
"taosc"
,
"insert_rows"
:
20
,
"childtable_limit"
:
2
,
"childtable_offset"
:
7
,
"multi_thread_write_one_tbl"
:
"no"
,
"interlace_rows"
:
0
,
"insert_interval"
:
0
,
"max_sql_len"
:
1024000
,
"disorder_ratio"
:
0
,
"disorder_range"
:
1000
,
"timestamp_step"
:
1
,
"start_timestamp"
:
"2020-11-01 00:00:00.000"
,
"sample_format"
:
"csv"
,
"sample_file"
:
"./sample.csv"
,
"tags_file"
:
""
,
"columns"
:
[{
"type"
:
"INT"
},
{
"type"
:
"DOUBLE"
,
"count"
:
6
},
{
"type"
:
"BINARY"
,
"len"
:
16
,
"count"
:
3
},
{
"type"
:
"BINARY"
,
"len"
:
32
,
"count"
:
6
}],
"tags"
:
[{
"type"
:
"TINYINT"
,
"count"
:
2
},
{
"type"
:
"BINARY"
,
"len"
:
16
,
"count"
:
5
}]
},
{
"name"
:
"stb4"
,
"child_table_exists"
:
"yes"
,
"childtable_count"
:
8
,
"childtable_prefix"
:
"stb04_"
,
"auto_create_table"
:
"no"
,
"batch_create_tbl_num"
:
10
,
"data_source"
:
"rand"
,
"insert_mode"
:
"taosc"
,
"insert_rows"
:
20
,
"childtable_limit"
:
0
,
"childtable_offset"
:
7
,
"multi_thread_write_one_tbl"
:
"no"
,
"interlace_rows"
:
0
,
"insert_interval"
:
0
,
"max_sql_len"
:
1024000
,
"disorder_ratio"
:
0
,
"disorder_range"
:
1000
,
"timestamp_step"
:
1
,
"start_timestamp"
:
"2020-11-01 00:00:00.000"
,
"sample_format"
:
"csv"
,
"sample_file"
:
"./sample.csv"
,
"tags_file"
:
""
,
"columns"
:
[{
"type"
:
"INT"
},
{
"type"
:
"DOUBLE"
,
"count"
:
6
},
{
"type"
:
"BINARY"
,
"len"
:
16
,
"count"
:
3
},
{
"type"
:
"BINARY"
,
"len"
:
32
,
"count"
:
6
}],
"tags"
:
[{
"type"
:
"TINYINT"
,
"count"
:
2
},
{
"type"
:
"BINARY"
,
"len"
:
16
,
"count"
:
5
}]
}]
}]
}
tests/pytest/tools/taosdemoAllTest/insert-renewdb.json
0 → 100644
浏览文件 @
9286f77e
{
"filetype"
:
"insert"
,
"cfgdir"
:
"/etc/taos"
,
"host"
:
"127.0.0.1"
,
"port"
:
6030
,
"user"
:
"root"
,
"password"
:
"taosdata"
,
"thread_count"
:
4
,
"thread_count_create_tbl"
:
4
,
"result_file"
:
"./insert_res.txt"
,
"confirm_parameter_prompt"
:
"no"
,
"insert_interval"
:
0
,
"interlace_rows"
:
0
,
"num_of_records_per_req"
:
3000
,
"max_sql_len"
:
1024000
,
"databases"
:
[{
"dbinfo"
:
{
"name"
:
"db"
,
"drop"
:
"yes"
,
"replica"
:
1
,
"days"
:
10
,
"cache"
:
16
,
"blocks"
:
8
,
"precision"
:
"ms"
,
"keep"
:
365
,
"minRows"
:
100
,
"maxRows"
:
4096
,
"comp"
:
2
,
"walLevel"
:
1
,
"cachelast"
:
0
,
"quorum"
:
1
,
"fsync"
:
3000
,
"update"
:
1
},
"super_tables"
:
[{
"name"
:
"stb0"
,
"child_table_exists"
:
"yes"
,
"childtable_count"
:
5
,
"childtable_prefix"
:
"stb00_"
,
"auto_create_table"
:
"no"
,
"batch_create_tbl_num"
:
10
,
"data_source"
:
"rand"
,
"insert_mode"
:
"taosc"
,
"insert_rows"
:
10
,
"childtable_limit"
:
-1
,
"childtable_offset"
:
0
,
"multi_thread_write_one_tbl"
:
"no"
,
"interlace_rows"
:
0
,
"insert_interval"
:
0
,
"max_sql_len"
:
1024000
,
"disorder_ratio"
:
0
,
"disorder_range"
:
1000
,
"timestamp_step"
:
1
,
"start_timestamp"
:
"2020-10-01 00:00:00.000"
,
"sample_format"
:
"csv"
,
"sample_file"
:
"./sample.csv"
,
"tags_file"
:
""
,
"columns"
:
[{
"type"
:
"INT"
},
{
"type"
:
"DOUBLE"
,
"count"
:
6
},
{
"type"
:
"BINARY"
,
"len"
:
16
,
"count"
:
3
},
{
"type"
:
"BINARY"
,
"len"
:
32
,
"count"
:
6
}],
"tags"
:
[{
"type"
:
"TINYINT"
,
"count"
:
2
},
{
"type"
:
"BINARY"
,
"len"
:
16
,
"count"
:
5
}]
},
{
"name"
:
"stb1"
,
"child_table_exists"
:
"no"
,
"childtable_count"
:
6
,
"childtable_prefix"
:
"stb01_"
,
"auto_create_table"
:
"no"
,
"batch_create_tbl_num"
:
10
,
"data_source"
:
"rand"
,
"insert_mode"
:
"taosc"
,
"insert_rows"
:
20
,
"childtable_limit"
:
-1
,
"childtable_offset"
:
0
,
"multi_thread_write_one_tbl"
:
"no"
,
"interlace_rows"
:
0
,
"insert_interval"
:
0
,
"max_sql_len"
:
1024000
,
"disorder_ratio"
:
0
,
"disorder_range"
:
1000
,
"timestamp_step"
:
1
,
"start_timestamp"
:
"2020-10-01 00:00:00.000"
,
"sample_format"
:
"csv"
,
"sample_file"
:
"./sample.csv"
,
"tags_file"
:
""
,
"columns"
:
[{
"type"
:
"INT"
},
{
"type"
:
"DOUBLE"
,
"count"
:
6
},
{
"type"
:
"BINARY"
,
"len"
:
16
,
"count"
:
3
},
{
"type"
:
"BINARY"
,
"len"
:
32
,
"count"
:
6
}],
"tags"
:
[{
"type"
:
"TINYINT"
,
"count"
:
2
},
{
"type"
:
"BINARY"
,
"len"
:
16
,
"count"
:
5
}]
},
{
"name"
:
"stb2"
,
"child_table_exists"
:
"no"
,
"childtable_count"
:
7
,
"childtable_prefix"
:
"stb02_"
,
"auto_create_table"
:
"no"
,
"batch_create_tbl_num"
:
10
,
"data_source"
:
"rand"
,
"insert_mode"
:
"taosc"
,
"insert_rows"
:
20
,
"childtable_limit"
:
4
,
"childtable_offset"
:
0
,
"multi_thread_write_one_tbl"
:
"no"
,
"interlace_rows"
:
0
,
"insert_interval"
:
0
,
"max_sql_len"
:
1024000
,
"disorder_ratio"
:
0
,
"disorder_range"
:
1000
,
"timestamp_step"
:
1
,
"start_timestamp"
:
"2020-10-01 00:00:00.000"
,
"sample_format"
:
"csv"
,
"sample_file"
:
"./sample.csv"
,
"tags_file"
:
""
,
"columns"
:
[{
"type"
:
"INT"
},
{
"type"
:
"DOUBLE"
,
"count"
:
6
},
{
"type"
:
"BINARY"
,
"len"
:
16
,
"count"
:
3
},
{
"type"
:
"BINARY"
,
"len"
:
32
,
"count"
:
6
}],
"tags"
:
[{
"type"
:
"TINYINT"
,
"count"
:
2
},
{
"type"
:
"BINARY"
,
"len"
:
16
,
"count"
:
5
}]
},
{
"name"
:
"stb3"
,
"child_table_exists"
:
"no"
,
"childtable_count"
:
8
,
"childtable_prefix"
:
"stb03_"
,
"auto_create_table"
:
"no"
,
"batch_create_tbl_num"
:
10
,
"data_source"
:
"rand"
,
"insert_mode"
:
"taosc"
,
"insert_rows"
:
20
,
"childtable_limit"
:
2
,
"childtable_offset"
:
7
,
"multi_thread_write_one_tbl"
:
"no"
,
"interlace_rows"
:
0
,
"insert_interval"
:
0
,
"max_sql_len"
:
1024000
,
"disorder_ratio"
:
0
,
"disorder_range"
:
1000
,
"timestamp_step"
:
1
,
"start_timestamp"
:
"2020-10-01 00:00:00.000"
,
"sample_format"
:
"csv"
,
"sample_file"
:
"./sample.csv"
,
"tags_file"
:
""
,
"columns"
:
[{
"type"
:
"INT"
},
{
"type"
:
"DOUBLE"
,
"count"
:
6
},
{
"type"
:
"BINARY"
,
"len"
:
16
,
"count"
:
3
},
{
"type"
:
"BINARY"
,
"len"
:
32
,
"count"
:
6
}],
"tags"
:
[{
"type"
:
"TINYINT"
,
"count"
:
2
},
{
"type"
:
"BINARY"
,
"len"
:
16
,
"count"
:
5
}]
},
{
"name"
:
"stb4"
,
"child_table_exists"
:
"no"
,
"childtable_count"
:
8
,
"childtable_prefix"
:
"stb04_"
,
"auto_create_table"
:
"no"
,
"batch_create_tbl_num"
:
10
,
"data_source"
:
"rand"
,
"insert_mode"
:
"taosc"
,
"insert_rows"
:
20
,
"childtable_limit"
:
0
,
"childtable_offset"
:
7
,
"multi_thread_write_one_tbl"
:
"no"
,
"interlace_rows"
:
0
,
"insert_interval"
:
0
,
"max_sql_len"
:
1024000
,
"disorder_ratio"
:
0
,
"disorder_range"
:
1000
,
"timestamp_step"
:
1
,
"start_timestamp"
:
"2020-10-01 00:00:00.000"
,
"sample_format"
:
"csv"
,
"sample_file"
:
"./sample.csv"
,
"tags_file"
:
""
,
"columns"
:
[{
"type"
:
"INT"
},
{
"type"
:
"DOUBLE"
,
"count"
:
6
},
{
"type"
:
"BINARY"
,
"len"
:
16
,
"count"
:
3
},
{
"type"
:
"BINARY"
,
"len"
:
32
,
"count"
:
6
}],
"tags"
:
[{
"type"
:
"TINYINT"
,
"count"
:
2
},
{
"type"
:
"BINARY"
,
"len"
:
16
,
"count"
:
5
}]
}]
}]
}
tests/pytest/tools/taosdemoAllTest/insert-sample.json
0 → 100644
浏览文件 @
9286f77e
{
"filetype"
:
"insert"
,
"cfgdir"
:
"/etc/taos"
,
"host"
:
"127.0.0.1"
,
"port"
:
6030
,
"user"
:
"root"
,
"password"
:
"taosdata"
,
"thread_count"
:
4
,
"thread_count_create_tbl"
:
4
,
"result_file"
:
"./insert_res.txt"
,
"confirm_parameter_prompt"
:
"no"
,
"insert_interval"
:
0
,
"interlace_rows"
:
10
,
"num_of_records_per_req"
:
1000
,
"max_sql_len"
:
1024000
,
"databases"
:
[{
"dbinfo"
:
{
"name"
:
"dbtest123"
,
"drop"
:
"yes"
,
"replica"
:
1
,
"days"
:
10
,
"cache"
:
50
,
"blocks"
:
8
,
"precision"
:
"ms"
,
"keep"
:
365
,
"minRows"
:
100
,
"maxRows"
:
4096
,
"comp"
:
2
,
"walLevel"
:
1
,
"cachelast"
:
0
,
"quorum"
:
1
,
"fsync"
:
3000
,
"update"
:
0
},
"super_tables"
:
[{
"name"
:
"stb0"
,
"child_table_exists"
:
"no"
,
"childtable_count"
:
1
,
"childtable_prefix"
:
"stb00_"
,
"auto_create_table"
:
"no"
,
"batch_create_tbl_num"
:
10
,
"data_source"
:
"sample"
,
"insert_mode"
:
"taosc"
,
"insert_rows"
:
10
,
"childtable_limit"
:
-1
,
"childtable_offset"
:
0
,
"multi_thread_write_one_tbl"
:
"no"
,
"interlace_rows"
:
0
,
"insert_interval"
:
0
,
"max_sql_len"
:
1024000
,
"disorder_ratio"
:
0
,
"disorder_range"
:
1
,
"timestamp_step"
:
1
,
"start_timestamp"
:
"2020-10-01 00:00:00.000"
,
"sample_format"
:
"csv"
,
"sample_file"
:
"./tools/taosdemoAllTest/sample.csv"
,
"tags_file"
:
""
,
"columns"
:
[{
"type"
:
"INT"
,
"count"
:
3
},
{
"type"
:
"DOUBLE"
,
"count"
:
3
},
{
"type"
:
"BINARY"
,
"len"
:
16
,
"count"
:
1
},
{
"type"
:
"BINARY"
,
"len"
:
32
,
"count"
:
1
},
{
"type"
:
"BOOL"
}],
"tags"
:
[{
"type"
:
"TINYINT"
,
"count"
:
2
},
{
"type"
:
"BINARY"
,
"len"
:
16
,
"count"
:
5
}]
},
{
"name"
:
"stb1"
,
"child_table_exists"
:
"no"
,
"childtable_count"
:
2
,
"childtable_prefix"
:
"stb01_"
,
"auto_create_table"
:
"no"
,
"batch_create_tbl_num"
:
10
,
"data_source"
:
"rand"
,
"insert_mode"
:
"taosc"
,
"insert_rows"
:
10
,
"childtable_limit"
:
-1
,
"childtable_offset"
:
0
,
"multi_thread_write_one_tbl"
:
"no"
,
"interlace_rows"
:
0
,
"insert_interval"
:
0
,
"max_sql_len"
:
1024000
,
"disorder_ratio"
:
0
,
"disorder_range"
:
1000
,
"timestamp_step"
:
10
,
"start_timestamp"
:
"2020-10-01 00:00:00.000"
,
"sample_format"
:
"csv"
,
"sample_file"
:
"./sample.csv"
,
"tags_file"
:
"./tools/taosdemoAllTest/tags.csv"
,
"columns"
:
[{
"type"
:
"INT"
},
{
"type"
:
"DOUBLE"
,
"count"
:
10
},
{
"type"
:
"BINARY"
,
"len"
:
16
,
"count"
:
3
},
{
"type"
:
"BINARY"
,
"len"
:
32
,
"count"
:
6
}],
"tags"
:
[{
"type"
:
"TINYINT"
,
"count"
:
3
},
{
"type"
:
"BINARY"
,
"len"
:
16
,
"count"
:
2
}]
}]
}]
}
tests/pytest/tools/taosdemoAllTest/insert-timestep.json
0 → 100644
浏览文件 @
9286f77e
{
"filetype"
:
"insert"
,
"cfgdir"
:
"/etc/taos"
,
"host"
:
"127.0.0.1"
,
"port"
:
6030
,
"user"
:
"root"
,
"password"
:
"taosdata"
,
"thread_count"
:
4
,
"thread_count_create_tbl"
:
4
,
"result_file"
:
"./insert_res.txt"
,
"confirm_parameter_prompt"
:
"no"
,
"insert_interval"
:
0
,
"interlace_rows"
:
10
,
"num_of_records_per_req"
:
1000
,
"max_sql_len"
:
1024000
,
"databases"
:
[{
"dbinfo"
:
{
"name"
:
"db"
,
"drop"
:
"yes"
,
"replica"
:
1
,
"days"
:
10
,
"cache"
:
50
,
"blocks"
:
8
,
"precision"
:
"ms"
,
"keep"
:
365
,
"minRows"
:
100
,
"maxRows"
:
4096
,
"comp"
:
2
,
"walLevel"
:
1
,
"cachelast"
:
0
,
"quorum"
:
1
,
"fsync"
:
3000
,
"update"
:
0
},
"super_tables"
:
[{
"name"
:
"stb0"
,
"child_table_exists"
:
"no"
,
"childtable_count"
:
10
,
"childtable_prefix"
:
"stb00_"
,
"auto_create_table"
:
"no"
,
"batch_create_tbl_num"
:
10
,
"data_source"
:
"rand"
,
"insert_mode"
:
"taosc"
,
"insert_rows"
:
20
,
"childtable_limit"
:
0
,
"childtable_offset"
:
0
,
"multi_thread_write_one_tbl"
:
"no"
,
"interlace_rows"
:
0
,
"insert_interval"
:
0
,
"max_sql_len"
:
1024000
,
"disorder_ratio"
:
0
,
"disorder_range"
:
1000
,
"timestamp_step"
:
1
,
"start_timestamp"
:
"2020-10-01 00:00:00.000"
,
"sample_format"
:
"csv"
,
"sample_file"
:
"./sample.csv"
,
"tags_file"
:
""
,
"columns"
:
[{
"type"
:
"INT"
},
{
"type"
:
"DOUBLE"
,
"count"
:
10
},
{
"type"
:
"BINARY"
,
"len"
:
16
,
"count"
:
3
},
{
"type"
:
"BINARY"
,
"len"
:
32
,
"count"
:
6
}],
"tags"
:
[{
"type"
:
"TINYINT"
,
"count"
:
2
},
{
"type"
:
"BINARY"
,
"len"
:
16
,
"count"
:
5
}]
},
{
"name"
:
"stb1"
,
"child_table_exists"
:
"no"
,
"childtable_count"
:
20
,
"childtable_prefix"
:
"stb01_"
,
"auto_create_table"
:
"no"
,
"batch_create_tbl_num"
:
10
,
"data_source"
:
"rand"
,
"insert_mode"
:
"taosc"
,
"insert_rows"
:
20
,
"childtable_limit"
:
0
,
"childtable_offset"
:
0
,
"multi_thread_write_one_tbl"
:
"no"
,
"interlace_rows"
:
0
,
"insert_interval"
:
0
,
"max_sql_len"
:
1024000
,
"disorder_ratio"
:
0
,
"disorder_range"
:
1000
,
"timestamp_step"
:
10
,
"start_timestamp"
:
"2020-11-01 00:00:00.000"
,
"sample_format"
:
"csv"
,
"sample_file"
:
"./sample.csv"
,
"tags_file"
:
""
,
"columns"
:
[{
"type"
:
"INT"
},
{
"type"
:
"DOUBLE"
,
"count"
:
10
},
{
"type"
:
"BINARY"
,
"len"
:
16
,
"count"
:
3
},
{
"type"
:
"BINARY"
,
"len"
:
32
,
"count"
:
6
}],
"tags"
:
[{
"type"
:
"TINYINT"
,
"count"
:
2
},
{
"type"
:
"BINARY"
,
"len"
:
16
,
"count"
:
5
}]
}]
}]
}
tests/pytest/tools/taosdemoAllTest/moredemo-insert-offset.py
0 → 100644
浏览文件 @
9286f77e
###################################################################
# Copyright (c) 2016 by TAOS Technologies, Inc.
# All rights reserved.
#
# This file is proprietary and confidential to TAOS Technologies.
# No part of this file may be reproduced, stored, transmitted,
# disclosed or used in any form or by any means other than as
# expressly provided by the written permission from Jianhui Tao
#
###################################################################
# -*- coding: utf-8 -*-
import
sys
import
os
from
util.log
import
*
from
util.cases
import
*
from
util.sql
import
*
from
util.dnodes
import
*
class
TDTestCase
:
def
init
(
self
,
conn
,
logSql
):
tdLog
.
debug
(
"start to execute %s"
%
__file__
)
tdSql
.
init
(
conn
.
cursor
(),
logSql
)
def
getBuildPath
(
self
):
selfPath
=
os
.
path
.
dirname
(
os
.
path
.
realpath
(
__file__
))
if
(
"community"
in
selfPath
):
projPath
=
selfPath
[:
selfPath
.
find
(
"community"
)]
else
:
projPath
=
selfPath
[:
selfPath
.
find
(
"tests"
)]
for
root
,
dirs
,
files
in
os
.
walk
(
projPath
):
if
(
"taosd"
in
files
):
rootRealPath
=
os
.
path
.
dirname
(
os
.
path
.
realpath
(
root
))
if
(
"packaging"
not
in
rootRealPath
):
buildPath
=
root
[:
len
(
root
)
-
len
(
"/build/bin"
)]
break
return
buildPath
def
run
(
self
):
buildPath
=
self
.
getBuildPath
()
if
(
buildPath
==
""
):
tdLog
.
exit
(
"taosd not found!"
)
else
:
tdLog
.
info
(
"taosd found in %s"
%
buildPath
)
binPath
=
buildPath
+
"/build/bin/"
# insert: drop and child_table_exists combination test
# insert: using parament "childtable_offset and childtable_limit" to control table'offset point and offset
os
.
system
(
"%staosdemo -f tools/taosdemoAllTest/moredemo-offset-newdb.json"
%
binPath
)
os
.
system
(
"%staosdemo -f tools/taosdemoAllTest/moredemo-offset-limit1.json & "
%
binPath
)
os
.
system
(
"%staosdemo -f tools/taosdemoAllTest/moredemo-offset-limit94.json & "
%
binPath
)
os
.
system
(
"%staosdemo -f tools/taosdemoAllTest/moredemo-offset-limit5.json & "
%
binPath
)
sleep
(
15
)
tdSql
.
execute
(
"use db"
)
tdSql
.
query
(
"select count(*) from stb0"
)
tdSql
.
checkData
(
0
,
0
,
1000000
)
os
.
system
(
"rm -rf ./insert_res.txt"
)
os
.
system
(
"rm -rf tools/taosdemoAllTest/taosdemoTestWithJson-1.py.sql"
)
def
stop
(
self
):
tdSql
.
close
()
tdLog
.
success
(
"%s successfully executed"
%
__file__
)
tdCases
.
addWindows
(
__file__
,
TDTestCase
())
tdCases
.
addLinux
(
__file__
,
TDTestCase
())
tests/pytest/tools/taosdemoAllTest/moredemo-offset-limit1.json
0 → 100644
浏览文件 @
9286f77e
{
"filetype"
:
"insert"
,
"cfgdir"
:
"/etc/taos"
,
"host"
:
"127.0.0.1"
,
"port"
:
6030
,
"user"
:
"root"
,
"password"
:
"taosdata"
,
"thread_count"
:
4
,
"thread_count_create_tbl"
:
4
,
"result_file"
:
"./insert_res.txt"
,
"confirm_parameter_prompt"
:
"no"
,
"insert_interval"
:
0
,
"interlace_rows"
:
0
,
"num_of_records_per_req"
:
3000
,
"max_sql_len"
:
1024000
,
"databases"
:
[{
"dbinfo"
:
{
"name"
:
"db"
,
"drop"
:
"no"
,
"replica"
:
1
,
"days"
:
10
,
"cache"
:
16
,
"blocks"
:
8
,
"precision"
:
"ms"
,
"keep"
:
365
,
"minRows"
:
100
,
"maxRows"
:
4096
,
"comp"
:
2
,
"walLevel"
:
1
,
"cachelast"
:
0
,
"quorum"
:
1
,
"fsync"
:
3000
,
"update"
:
0
},
"super_tables"
:
[{
"name"
:
"stb0"
,
"child_table_exists"
:
"yes"
,
"childtable_count"
:
100
,
"childtable_prefix"
:
"stb00_"
,
"auto_create_table"
:
"no"
,
"batch_create_tbl_num"
:
10
,
"data_source"
:
"rand"
,
"insert_mode"
:
"taosc"
,
"insert_rows"
:
10000
,
"childtable_limit"
:
1
,
"childtable_offset"
:
99
,
"interlace_rows"
:
0
,
"insert_interval"
:
0
,
"max_sql_len"
:
1024000
,
"disorder_ratio"
:
0
,
"disorder_range"
:
1000
,
"timestamp_step"
:
1
,
"start_timestamp"
:
"2020-11-01 00:00:00.000"
,
"sample_format"
:
"csv"
,
"sample_file"
:
"./sample.csv"
,
"tags_file"
:
""
,
"columns"
:
[{
"type"
:
"INT"
},
{
"type"
:
"DOUBLE"
,
"count"
:
6
},
{
"type"
:
"BINARY"
,
"len"
:
16
,
"count"
:
3
},
{
"type"
:
"BINARY"
,
"len"
:
32
,
"count"
:
6
}],
"tags"
:
[{
"type"
:
"TINYINT"
,
"count"
:
2
},
{
"type"
:
"BINARY"
,
"len"
:
16
,
"count"
:
5
}]
}]
}]
}
tests/pytest/tools/taosdemoAllTest/moredemo-offset-limit5.json
0 → 100644
浏览文件 @
9286f77e
{
"filetype"
:
"insert"
,
"cfgdir"
:
"/etc/taos"
,
"host"
:
"127.0.0.1"
,
"port"
:
6030
,
"user"
:
"root"
,
"password"
:
"taosdata"
,
"thread_count"
:
4
,
"thread_count_create_tbl"
:
4
,
"result_file"
:
"./insert_res.txt"
,
"confirm_parameter_prompt"
:
"no"
,
"insert_interval"
:
0
,
"interlace_rows"
:
0
,
"num_of_records_per_req"
:
3000
,
"max_sql_len"
:
1024000
,
"databases"
:
[{
"dbinfo"
:
{
"name"
:
"db"
,
"drop"
:
"no"
,
"replica"
:
1
,
"days"
:
10
,
"cache"
:
16
,
"blocks"
:
8
,
"precision"
:
"ms"
,
"keep"
:
365
,
"minRows"
:
100
,
"maxRows"
:
4096
,
"comp"
:
2
,
"walLevel"
:
1
,
"cachelast"
:
0
,
"quorum"
:
1
,
"fsync"
:
3000
,
"update"
:
0
},
"super_tables"
:
[{
"name"
:
"stb0"
,
"child_table_exists"
:
"yes"
,
"childtable_count"
:
100
,
"childtable_prefix"
:
"stb00_"
,
"auto_create_table"
:
"no"
,
"batch_create_tbl_num"
:
10
,
"data_source"
:
"rand"
,
"insert_mode"
:
"taosc"
,
"insert_rows"
:
10000
,
"childtable_limit"
:
5
,
"childtable_offset"
:
0
,
"interlace_rows"
:
0
,
"insert_interval"
:
0
,
"max_sql_len"
:
1024000
,
"disorder_ratio"
:
0
,
"disorder_range"
:
1000
,
"timestamp_step"
:
1
,
"start_timestamp"
:
"2020-11-01 00:00:00.000"
,
"sample_format"
:
"csv"
,
"sample_file"
:
"./sample.csv"
,
"tags_file"
:
""
,
"columns"
:
[{
"type"
:
"INT"
},
{
"type"
:
"DOUBLE"
,
"count"
:
6
},
{
"type"
:
"BINARY"
,
"len"
:
16
,
"count"
:
3
},
{
"type"
:
"BINARY"
,
"len"
:
32
,
"count"
:
6
}],
"tags"
:
[{
"type"
:
"TINYINT"
,
"count"
:
2
},
{
"type"
:
"BINARY"
,
"len"
:
16
,
"count"
:
5
}]
}]
}]
}
tests/pytest/tools/taosdemoAllTest/moredemo-offset-limit94.json
0 → 100644
浏览文件 @
9286f77e
{
"filetype"
:
"insert"
,
"cfgdir"
:
"/etc/taos"
,
"host"
:
"127.0.0.1"
,
"port"
:
6030
,
"user"
:
"root"
,
"password"
:
"taosdata"
,
"thread_count"
:
4
,
"thread_count_create_tbl"
:
4
,
"result_file"
:
"./insert_res.txt"
,
"confirm_parameter_prompt"
:
"no"
,
"insert_interval"
:
0
,
"interlace_rows"
:
0
,
"num_of_records_per_req"
:
3000
,
"max_sql_len"
:
1024000
,
"databases"
:
[{
"dbinfo"
:
{
"name"
:
"db"
,
"drop"
:
"no"
,
"replica"
:
1
,
"days"
:
10
,
"cache"
:
16
,
"blocks"
:
8
,
"precision"
:
"ms"
,
"keep"
:
365
,
"minRows"
:
100
,
"maxRows"
:
4096
,
"comp"
:
2
,
"walLevel"
:
1
,
"cachelast"
:
0
,
"quorum"
:
1
,
"fsync"
:
3000
,
"update"
:
0
},
"super_tables"
:
[{
"name"
:
"stb0"
,
"child_table_exists"
:
"yes"
,
"childtable_count"
:
100
,
"childtable_prefix"
:
"stb00_"
,
"auto_create_table"
:
"no"
,
"batch_create_tbl_num"
:
10
,
"data_source"
:
"rand"
,
"insert_mode"
:
"taosc"
,
"insert_rows"
:
10000
,
"childtable_limit"
:
94
,
"childtable_offset"
:
5
,
"interlace_rows"
:
0
,
"insert_interval"
:
0
,
"max_sql_len"
:
1024000
,
"disorder_ratio"
:
0
,
"disorder_range"
:
1000
,
"timestamp_step"
:
1
,
"start_timestamp"
:
"2020-11-01 00:00:00.000"
,
"sample_format"
:
"csv"
,
"sample_file"
:
"./sample.csv"
,
"tags_file"
:
""
,
"columns"
:
[{
"type"
:
"INT"
},
{
"type"
:
"DOUBLE"
,
"count"
:
6
},
{
"type"
:
"BINARY"
,
"len"
:
16
,
"count"
:
3
},
{
"type"
:
"BINARY"
,
"len"
:
32
,
"count"
:
6
}],
"tags"
:
[{
"type"
:
"TINYINT"
,
"count"
:
2
},
{
"type"
:
"BINARY"
,
"len"
:
16
,
"count"
:
5
}]
}]
}]
}
tests/pytest/tools/taosdemoAllTest/moredemo-offset-newdb.json
0 → 100644
浏览文件 @
9286f77e
{
"filetype"
:
"insert"
,
"cfgdir"
:
"/etc/taos"
,
"host"
:
"127.0.0.1"
,
"port"
:
6030
,
"user"
:
"root"
,
"password"
:
"taosdata"
,
"thread_count"
:
4
,
"thread_count_create_tbl"
:
4
,
"result_file"
:
"./insert_res.txt"
,
"confirm_parameter_prompt"
:
"no"
,
"insert_interval"
:
0
,
"interlace_rows"
:
0
,
"num_of_records_per_req"
:
3000
,
"max_sql_len"
:
1024000
,
"databases"
:
[{
"dbinfo"
:
{
"name"
:
"db"
,
"drop"
:
"yes"
,
"replica"
:
1
,
"days"
:
10
,
"cache"
:
16
,
"blocks"
:
8
,
"precision"
:
"ms"
,
"keep"
:
365
,
"minRows"
:
100
,
"maxRows"
:
4096
,
"comp"
:
2
,
"walLevel"
:
1
,
"cachelast"
:
0
,
"quorum"
:
1
,
"fsync"
:
3000
,
"update"
:
0
},
"super_tables"
:
[{
"name"
:
"stb0"
,
"child_table_exists"
:
"no"
,
"childtable_count"
:
100
,
"childtable_prefix"
:
"stb00_"
,
"auto_create_table"
:
"no"
,
"batch_create_tbl_num"
:
10
,
"data_source"
:
"rand"
,
"insert_mode"
:
"taosc"
,
"insert_rows"
:
0
,
"childtable_limit"
:
0
,
"childtable_offset"
:
0
,
"interlace_rows"
:
0
,
"insert_interval"
:
0
,
"max_sql_len"
:
1024000
,
"disorder_ratio"
:
0
,
"disorder_range"
:
1000
,
"timestamp_step"
:
1
,
"start_timestamp"
:
"2020-11-01 00:00:00.000"
,
"sample_format"
:
"csv"
,
"sample_file"
:
"./sample.csv"
,
"tags_file"
:
""
,
"columns"
:
[{
"type"
:
"INT"
},
{
"type"
:
"DOUBLE"
,
"count"
:
6
},
{
"type"
:
"BINARY"
,
"len"
:
16
,
"count"
:
3
},
{
"type"
:
"BINARY"
,
"len"
:
32
,
"count"
:
6
}],
"tags"
:
[{
"type"
:
"TINYINT"
,
"count"
:
2
},
{
"type"
:
"BINARY"
,
"len"
:
16
,
"count"
:
5
}]
}]
}]
}
tests/pytest/tools/taosdemoAllTest/sample.csv
0 → 100644
浏览文件 @
9286f77e
1,-1,2147483647,0,2247483647.1,-12.2,'12ac,;\[uer]','23ac,;\[uer23423]123123','true'
0,-1,2147483647,0,2247483647.1,-12.2,'12ac,;\[uer]','23ac,;\[uer23423]123123','true'
0,-1,2147483647,0,2247483647.1,-12.2,'12ac,;\[uer]','23ac,;\[uer23423]123123','false'
\ No newline at end of file
tests/pytest/tools/taosdemoAllTest/speciQuery.json
0 → 100644
浏览文件 @
9286f77e
{
"filetype"
:
"query"
,
"cfgdir"
:
"/etc/taos"
,
"host"
:
"127.0.0.1"
,
"port"
:
6030
,
"user"
:
"root"
,
"password"
:
"taosdata"
,
"confirm_parameter_prompt"
:
"no"
,
"databases"
:
"db"
,
"query_times"
:
2
,
"specified_table_query"
:
{
"query_interval"
:
1
,
"concurrent"
:
3
,
"sqls"
:
[
{
"sql"
:
"select last_row(*) from stb0 "
,
"result"
:
"./query_res0.txt"
},
{
"sql"
:
"select count(*) from stb00_1"
,
"result"
:
"./query_res1.txt"
}
]
},
"super_table_query"
:
{
"stblname"
:
"stb1"
,
"query_interval"
:
1
,
"threads"
:
3
,
"sqls"
:
[
{
"sql"
:
"select last_row(ts) from xxxx"
,
"result"
:
"./query_res2.txt"
}
]
}
}
tests/pytest/tools/taosdemoAllTest/speciQueryInsertdata.json
0 → 100644
浏览文件 @
9286f77e
{
"filetype"
:
"insert"
,
"cfgdir"
:
"/etc/taos"
,
"host"
:
"127.0.0.1"
,
"port"
:
6030
,
"user"
:
"root"
,
"password"
:
"taosdata"
,
"thread_count"
:
4
,
"thread_count_create_tbl"
:
4
,
"result_file"
:
"./insert_res.txt"
,
"confirm_parameter_prompt"
:
"no"
,
"insert_interval"
:
0
,
"interlace_rows"
:
0
,
"num_of_records_per_req"
:
3000
,
"max_sql_len"
:
1024000
,
"databases"
:
[{
"dbinfo"
:
{
"name"
:
"db"
,
"drop"
:
"yes"
,
"replica"
:
1
,
"days"
:
10
,
"cache"
:
16
,
"blocks"
:
8
,
"precision"
:
"ms"
,
"keep"
:
365
,
"minRows"
:
100
,
"maxRows"
:
4096
,
"comp"
:
2
,
"walLevel"
:
1
,
"cachelast"
:
0
,
"quorum"
:
1
,
"fsync"
:
3000
,
"update"
:
0
},
"super_tables"
:
[{
"name"
:
"stb0"
,
"child_table_exists"
:
"no"
,
"childtable_count"
:
100
,
"childtable_prefix"
:
"stb00_"
,
"auto_create_table"
:
"no"
,
"batch_create_tbl_num"
:
10
,
"data_source"
:
"rand"
,
"insert_mode"
:
"taosc"
,
"insert_rows"
:
100
,
"childtable_limit"
:
0
,
"childtable_offset"
:
0
,
"interlace_rows"
:
0
,
"insert_interval"
:
0
,
"max_sql_len"
:
1024000
,
"disorder_ratio"
:
0
,
"disorder_range"
:
1000
,
"timestamp_step"
:
1
,
"start_timestamp"
:
"2020-11-01 00:00:00.000"
,
"sample_format"
:
"csv"
,
"sample_file"
:
"./sample.csv"
,
"tags_file"
:
""
,
"columns"
:
[{
"type"
:
"BINARY"
,
"len"
:
16
,
"count"
:
1
},
{
"type"
:
"BINARY"
,
"len"
:
32
,
"count"
:
1
},
{
"type"
:
"INT"
},
{
"type"
:
"DOUBLE"
,
"count"
:
1
}],
"tags"
:
[{
"type"
:
"TINYINT"
,
"count"
:
2
},
{
"type"
:
"BINARY"
,
"len"
:
16
,
"count"
:
5
}]
},
{
"name"
:
"stb1"
,
"child_table_exists"
:
"no"
,
"childtable_count"
:
100
,
"childtable_prefix"
:
"stb01_"
,
"auto_create_table"
:
"no"
,
"batch_create_tbl_num"
:
10
,
"data_source"
:
"rand"
,
"insert_mode"
:
"taosc"
,
"insert_rows"
:
200
,
"childtable_limit"
:
0
,
"childtable_offset"
:
0
,
"interlace_rows"
:
0
,
"insert_interval"
:
0
,
"max_sql_len"
:
1024000
,
"disorder_ratio"
:
0
,
"disorder_range"
:
1000
,
"timestamp_step"
:
1
,
"start_timestamp"
:
"2020-11-01 00:00:00.000"
,
"sample_format"
:
"csv"
,
"sample_file"
:
"./sample.csv"
,
"tags_file"
:
""
,
"columns"
:
[{
"type"
:
"INT"
},
{
"type"
:
"DOUBLE"
,
"count"
:
6
},
{
"type"
:
"BINARY"
,
"len"
:
16
,
"count"
:
3
},
{
"type"
:
"BINARY"
,
"len"
:
32
,
"count"
:
6
}],
"tags"
:
[{
"type"
:
"TINYINT"
,
"count"
:
2
},
{
"type"
:
"BINARY"
,
"len"
:
16
,
"count"
:
5
}]
}]
}]
}
tests/pytest/tools/taosdemoAllTest/tags.csv
0 → 100644
浏览文件 @
9286f77e
1,-127,127,'23ac,;\[uer]3','true'
1,-127,126,'23ac,;\[uer]3','true'
tests/pytest/tools/taosdemoAllTest/taosdemoTestInsertWithJson.py
0 → 100644
浏览文件 @
9286f77e
此差异已折叠。
点击以展开。
tests/pytest/tools/taosdemoAllTest/taosdemoTestQueryWithJson.py
0 → 100644
浏览文件 @
9286f77e
###################################################################
# Copyright (c) 2016 by TAOS Technologies, Inc.
# All rights reserved.
#
# This file is proprietary and confidential to TAOS Technologies.
# No part of this file may be reproduced, stored, transmitted,
# disclosed or used in any form or by any means other than as
# expressly provided by the written permission from Jianhui Tao
#
###################################################################
# -*- coding: utf-8 -*-
import
sys
import
os
from
util.log
import
*
from
util.cases
import
*
from
util.sql
import
*
from
util.dnodes
import
*
import
time
from
datetime
import
datetime
class
TDTestCase
:
def
init
(
self
,
conn
,
logSql
):
tdLog
.
debug
(
"start to execute %s"
%
__file__
)
tdSql
.
init
(
conn
.
cursor
(),
logSql
)
def
getBuildPath
(
self
):
selfPath
=
os
.
path
.
dirname
(
os
.
path
.
realpath
(
__file__
))
if
(
"community"
in
selfPath
):
projPath
=
selfPath
[:
selfPath
.
find
(
"community"
)]
else
:
projPath
=
selfPath
[:
selfPath
.
find
(
"tests"
)]
for
root
,
dirs
,
files
in
os
.
walk
(
projPath
):
if
(
"taosd"
in
files
):
rootRealPath
=
os
.
path
.
dirname
(
os
.
path
.
realpath
(
root
))
if
(
"packaging"
not
in
rootRealPath
):
buildPath
=
root
[:
len
(
root
)
-
len
(
"/build/bin"
)]
break
return
buildPath
def
run
(
self
):
buildPath
=
self
.
getBuildPath
()
if
(
buildPath
==
""
):
tdLog
.
exit
(
"taosd not found!"
)
else
:
tdLog
.
info
(
"taosd found in %s"
%
buildPath
)
binPath
=
buildPath
+
"/build/bin/"
# insert: drop and child_table_exists combination test
# insert: using parament "childtable_offset and childtable_limit" to control table'offset point and offset
os
.
system
(
"%staosdemo -f tools/taosdemoAllTest/speciQueryInsertdata.json"
%
binPath
)
os
.
system
(
"%staosdemo -f tools/taosdemoAllTest/speciQuery.json"
%
binPath
)
os
.
system
(
"cat query_res0.txt* |sort -u > all_query_res0.txt"
)
os
.
system
(
"cat query_res1.txt* |sort -u > all_query_res1.txt"
)
os
.
system
(
"cat query_res2.txt* |sort -u > all_query_res2.txt"
)
tdSql
.
execute
(
"use db"
)
tdSql
.
execute
(
'create table result0 using stb0 tags(121,43,"beijing","beijing","beijing","beijing","beijing")'
)
os
.
system
(
"python3 tools/taosdemoAllTest/convertResFile.py"
)
tdSql
.
execute
(
"insert into result0 file './test_query_res0.txt'"
)
tdSql
.
query
(
"select ts from result0"
)
tdSql
.
checkData
(
0
,
0
,
"2020-11-01 00:00:00.099000"
)
tdSql
.
query
(
"select count(*) from result0"
)
tdSql
.
checkData
(
0
,
0
,
1
)
with
open
(
'./all_query_res1.txt'
,
'r+'
)
as
f1
:
result1
=
int
(
f1
.
readline
())
tdSql
.
query
(
"select count(*) from stb00_1"
)
tdSql
.
checkData
(
0
,
0
,
"%d"
%
result1
)
with
open
(
'./all_query_res2.txt'
,
'r+'
)
as
f2
:
result2
=
int
(
f2
.
readline
())
d2
=
datetime
.
fromtimestamp
(
result2
/
1000
)
timest
=
d2
.
strftime
(
"%Y-%m-%d %H:%M:%S.%f"
)
tdSql
.
query
(
"select last_row(ts) from stb1"
)
tdSql
.
checkData
(
0
,
0
,
"%s"
%
timest
)
os
.
system
(
"rm -rf ./insert_res.txt"
)
os
.
system
(
"rm -rf tools/taosdemoAllTest/taosdemoTestQuerytWithJson.py.sql"
)
os
.
system
(
"rm -rf ./querySystemInfo*"
)
os
.
system
(
"rm -rf ./query_res*"
)
os
.
system
(
"rm -rf ./all_query*"
)
os
.
system
(
"rm -rf ./test_query_res0.txt"
)
def
stop
(
self
):
tdSql
.
close
()
tdLog
.
success
(
"%s successfully executed"
%
__file__
)
tdCases
.
addWindows
(
__file__
,
TDTestCase
())
tdCases
.
addLinux
(
__file__
,
TDTestCase
())
tests/script/unique/cluster/balance2.sim
浏览文件 @
9286f77e
...
@@ -338,10 +338,6 @@ system sh/exec.sh -n dnode1 -s stop -x SIGINT
...
@@ -338,10 +338,6 @@ system sh/exec.sh -n dnode1 -s stop -x SIGINT
print stop dnode1 and sleep 3000
print stop dnode1 and sleep 3000
sleep 3000
sleep 3000
sql drop dnode $hostname1
print drop dnode1 and sleep 9000
sleep 9000
sql show mnodes
sql show mnodes
$dnode1Role = $data2_1
$dnode1Role = $data2_1
$dnode4Role = $data2_4
$dnode4Role = $data2_4
...
@@ -357,6 +353,25 @@ endi
...
@@ -357,6 +353,25 @@ endi
print ============================== step6.1
print ============================== step6.1
system sh/exec.sh -n dnode1 -s start
system sh/exec.sh -n dnode1 -s start
$x = 0
step6.1:
$x = $x + 1
sleep 1000
if $x == 10 then
return -1
endi
sql show dnodes
print dnode1 $data4_1
if $data4_1 != ready then
goto step6.1
endi
sql drop dnode $hostname1
print drop dnode1 and sleep 9000
sleep 9000
$x = 0
$x = 0
show6:
show6:
$x = $x + 1
$x = $x + 1
...
...
tests/script/unique/dnode/remove1.sim
浏览文件 @
9286f77e
...
@@ -97,7 +97,6 @@ if $data2_2 != 3 then
...
@@ -97,7 +97,6 @@ if $data2_2 != 3 then
endi
endi
print ========== step3
print ========== step3
sql drop dnode $hostname2
$x = 0
$x = 0
show3:
show3:
...
@@ -114,6 +113,7 @@ print dnode2 openVnodes $data2_2
...
@@ -114,6 +113,7 @@ print dnode2 openVnodes $data2_2
print ========== step4
print ========== step4
sql create dnode $hostname3
sql create dnode $hostname3
system sh/exec.sh -n dnode3 -s start
system sh/exec.sh -n dnode3 -s start
sql drop dnode $hostname2
$x = 0
$x = 0
show4:
show4:
...
@@ -224,4 +224,4 @@ system sh/exec.sh -n dnode4 -s stop -x SIGINT
...
@@ -224,4 +224,4 @@ system sh/exec.sh -n dnode4 -s stop -x SIGINT
system sh/exec.sh -n dnode5 -s stop -x SIGINT
system sh/exec.sh -n dnode5 -s stop -x SIGINT
system sh/exec.sh -n dnode6 -s stop -x SIGINT
system sh/exec.sh -n dnode6 -s stop -x SIGINT
system sh/exec.sh -n dnode7 -s stop -x SIGINT
system sh/exec.sh -n dnode7 -s stop -x SIGINT
system sh/exec.sh -n dnode8 -s stop -x SIGINT
system sh/exec.sh -n dnode8 -s stop -x SIGINT
\ No newline at end of file
tests/script/unique/dnode/remove2.sim
浏览文件 @
9286f77e
...
@@ -98,7 +98,6 @@ endi
...
@@ -98,7 +98,6 @@ endi
print ========== step3
print ========== step3
system sh/exec.sh -n dnode2 -s stop -x SIGINT
system sh/exec.sh -n dnode2 -s stop -x SIGINT
sql drop dnode $hostname2
sql show dnodes
sql show dnodes
print dnode1 openVnodes $data2_1
print dnode1 openVnodes $data2_1
...
@@ -128,6 +127,26 @@ endi
...
@@ -128,6 +127,26 @@ endi
print ============ step 4.1
print ============ step 4.1
system sh/exec.sh -n dnode2 -s start
system sh/exec.sh -n dnode2 -s start
$x = 0
step4.1:
$x = $x + 1
sleep 1000
if $x == 10 then
return -1
endi
sql show dnodes
print dnode1 $data4_1
print dnode2 $data4_2
print dnode3 $data4_3
print dnode4 $data4_4
if $data4_2 != ready then
goto step4.1
endi
sql drop dnode $hostname2
$x = 0
$x = 0
show4:
show4:
$x = $x + 1
$x = $x + 1
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录