Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
taosdata
TDengine
提交
359e5426
T
TDengine
项目概览
taosdata
/
TDengine
1 年多 前同步成功
通知
1187
Star
22018
Fork
4786
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
1
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
T
TDengine
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
1
Issue
1
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
未验证
提交
359e5426
编写于
6月 17, 2022
作者:
H
Hui Li
提交者:
GitHub
6月 17, 2022
浏览文件
操作
浏览文件
下载
差异文件
Merge pull request #13882 from taosdata/test/distributed_aggregate
test: add case for max about distribute plan
上级
d9965889
9eff85b6
变更
8
隐藏空白更改
内联
并排
Showing
8 changed file
with
1833 addition
and
170 deletion
+1833
-170
tests/system-test/2-query/distribute_agg_apercentile.py
tests/system-test/2-query/distribute_agg_apercentile.py
+198
-0
tests/system-test/2-query/distribute_agg_count.py
tests/system-test/2-query/distribute_agg_count.py
+296
-0
tests/system-test/2-query/distribute_agg_max.py
tests/system-test/2-query/distribute_agg_max.py
+293
-0
tests/system-test/2-query/distribute_agg_min.py
tests/system-test/2-query/distribute_agg_min.py
+294
-0
tests/system-test/2-query/distribute_agg_spread.py
tests/system-test/2-query/distribute_agg_spread.py
+281
-0
tests/system-test/2-query/distribute_agg_sum.py
tests/system-test/2-query/distribute_agg_sum.py
+278
-0
tests/system-test/2-query/max.py
tests/system-test/2-query/max.py
+187
-170
tests/system-test/fulltest.sh
tests/system-test/fulltest.sh
+6
-0
未找到文件。
tests/system-test/2-query/distribute_agg_apercentile.py
0 → 100644
浏览文件 @
359e5426
from
util.log
import
*
from
util.cases
import
*
from
util.sql
import
*
import
numpy
as
np
import
random
class
TDTestCase
:
updatecfgDict
=
{
'debugFlag'
:
143
,
"cDebugFlag"
:
143
,
"uDebugFlag"
:
143
,
"rpcDebugFlag"
:
143
,
"tmrDebugFlag"
:
143
,
"jniDebugFlag"
:
143
,
"simDebugFlag"
:
143
,
"dDebugFlag"
:
143
,
"dDebugFlag"
:
143
,
"vDebugFlag"
:
143
,
"mDebugFlag"
:
143
,
"qDebugFlag"
:
143
,
"wDebugFlag"
:
143
,
"sDebugFlag"
:
143
,
"tsdbDebugFlag"
:
143
,
"tqDebugFlag"
:
143
,
"fsDebugFlag"
:
143
,
"fnDebugFlag"
:
143
,
"maxTablesPerVnode"
:
2
,
"minTablesPerVnode"
:
2
,
"tableIncStepPerVnode"
:
2
}
def
init
(
self
,
conn
,
logSql
):
tdLog
.
debug
(
"start to execute %s"
%
__file__
)
tdSql
.
init
(
conn
.
cursor
())
self
.
vnode_disbutes
=
None
self
.
ts
=
1537146000000
def
prepare_datas_of_distribute
(
self
):
# prepate datas for 20 tables distributed at different vgroups
tdSql
.
execute
(
"create database if not exists testdb keep 3650 duration 1000 vgroups 5"
)
tdSql
.
execute
(
" use testdb "
)
tdSql
.
execute
(
'''create table stb1
(ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp)
tags (t0 timestamp, t1 int, t2 bigint, t3 smallint, t4 tinyint, t5 float, t6 double, t7 bool, t8 binary(16),t9 nchar(32))
'''
)
tdSql
.
execute
(
'''
create table t1
(ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp)
'''
)
for
i
in
range
(
20
):
tdSql
.
execute
(
f
'create table ct
{
i
+
1
}
using stb1 tags ( now(),
{
1
*
i
}
,
{
11111
*
i
}
,
{
111
*
i
}
,
{
11
*
i
}
,
{
1.11
*
i
}
,
{
11.11
*
i
}
,
{
i
%
2
}
, "binary
{
i
}
", "nchar
{
i
}
" )'
)
for
i
in
range
(
9
):
tdSql
.
execute
(
f
"insert into ct1 values ( now()-
{
i
*
10
}
s,
{
1
*
i
}
,
{
11111
*
i
}
,
{
111
*
i
}
,
{
11
*
i
}
,
{
1.11
*
i
}
,
{
11.11
*
i
}
,
{
i
%
2
}
, 'binary
{
i
}
', 'nchar
{
i
}
', now()+
{
1
*
i
}
a )"
)
tdSql
.
execute
(
f
"insert into ct4 values ( now()-
{
i
*
90
}
d,
{
1
*
i
}
,
{
11111
*
i
}
,
{
111
*
i
}
,
{
11
*
i
}
,
{
1.11
*
i
}
,
{
11.11
*
i
}
,
{
i
%
2
}
, 'binary
{
i
}
', 'nchar
{
i
}
', now()+
{
1
*
i
}
a )"
)
for
i
in
range
(
1
,
21
):
if
i
==
1
or
i
==
4
:
continue
else
:
tbname
=
"ct"
+
f
'
{
i
}
'
for
j
in
range
(
9
):
tdSql
.
execute
(
f
"insert into
{
tbname
}
values ( now()-
{
(
i
+
j
)
*
10
}
s,
{
1
*
(
j
+
i
)
}
,
{
11111
*
(
j
+
i
)
}
,
{
111
*
(
j
+
i
)
}
,
{
11
*
(
j
)
}
,
{
1.11
*
(
j
+
i
)
}
,
{
11.11
*
(
j
+
i
)
}
,
{
(
j
+
i
)
%
2
}
, 'binary
{
j
}
', 'nchar
{
j
}
', now()+
{
1
*
j
}
a )"
)
tdSql
.
execute
(
"insert into ct1 values (now()-45s, 0, 0, 0, 0, 0, 0, 0, 'binary0', 'nchar0', now()+8a )"
)
tdSql
.
execute
(
"insert into ct1 values (now()+10s, 9, -99999, -999, -99, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )"
)
tdSql
.
execute
(
"insert into ct1 values (now()+15s, 9, -99999, -999, -99, -9.99, NULL, 1, 'binary9', 'nchar9', now()+9a )"
)
tdSql
.
execute
(
"insert into ct1 values (now()+20s, 9, -99999, -999, NULL, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )"
)
tdSql
.
execute
(
"insert into ct4 values (now()-810d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) "
)
tdSql
.
execute
(
"insert into ct4 values (now()-400d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) "
)
tdSql
.
execute
(
"insert into ct4 values (now()+90d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) "
)
tdSql
.
execute
(
f
'''insert into t1 values
( '2020-04-21 01:01:01.000', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
( '2020-10-21 01:01:01.000', 1, 11111, 111, 11, 1.11, 11.11, 1, "binary1", "nchar1", now()+1a )
( '2020-12-31 01:01:01.000', 2, 22222, 222, 22, 2.22, 22.22, 0, "binary2", "nchar2", now()+2a )
( '2021-01-01 01:01:06.000', 3, 33333, 333, 33, 3.33, 33.33, 0, "binary3", "nchar3", now()+3a )
( '2021-05-07 01:01:10.000', 4, 44444, 444, 44, 4.44, 44.44, 1, "binary4", "nchar4", now()+4a )
( '2021-07-21 01:01:01.000', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
( '2021-09-30 01:01:16.000', 5, 55555, 555, 55, 5.55, 55.55, 0, "binary5", "nchar5", now()+5a )
( '2022-02-01 01:01:20.000', 6, 66666, 666, 66, 6.66, 66.66, 1, "binary6", "nchar6", now()+6a )
( '2022-10-28 01:01:26.000', 7, 00000, 000, 00, 0.00, 00.00, 1, "binary7", "nchar7", "1970-01-01 08:00:00.000" )
( '2022-12-01 01:01:30.000', 8, -88888, -888, -88, -8.88, -88.88, 0, "binary8", "nchar8", "1969-01-01 01:00:00.000" )
( '2022-12-31 01:01:36.000', 9, -99999999999999999, -999, -99, -9.99, -999999999999999999999.99, 1, "binary9", "nchar9", "1900-01-01 00:00:00.000" )
( '2023-02-21 01:01:01.000', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
'''
)
tdLog
.
info
(
" prepare data for distributed_aggregate done! "
)
def
check_distribute_datas
(
self
):
# get vgroup_ids of all
tdSql
.
query
(
"show vgroups "
)
vgroups
=
tdSql
.
queryResult
vnode_tables
=
{}
for
vgroup_id
in
vgroups
:
vnode_tables
[
vgroup_id
[
0
]]
=
[]
# check sub_table of per vnode ,make sure sub_table has been distributed
tdSql
.
query
(
"show tables like 'ct%'"
)
table_names
=
tdSql
.
queryResult
tablenames
=
[]
for
table_name
in
table_names
:
vnode_tables
[
table_name
[
6
]].
append
(
table_name
[
0
])
self
.
vnode_disbutes
=
vnode_tables
count
=
0
for
k
,
v
in
vnode_tables
.
items
():
if
len
(
v
)
>=
2
:
count
+=
1
if
count
<
2
:
tdLog
.
exit
(
" the datas of all not satisfy sub_table has been distributed "
)
def
distribute_agg_query
(
self
):
# basic filter
tdSql
.
query
(
"select apercentile(c1 , 20) from stb1 where c1 is null"
)
tdSql
.
checkRows
(
0
)
tdSql
.
query
(
"select apercentile(c1 , 20) from stb1 where t1=1"
)
tdSql
.
checkData
(
0
,
0
,
2.800000000
)
tdSql
.
query
(
"select apercentile(c1+c2 ,100) from stb1 where c1 =1 "
)
tdSql
.
checkData
(
0
,
0
,
11112.000000000
)
tdSql
.
query
(
"select apercentile(c1 ,10 ) from stb1 where tbname=
\"
ct2
\"
"
)
tdSql
.
checkData
(
0
,
0
,
2.000000000
)
tdSql
.
query
(
"select apercentile(c1,20) from stb1 partition by tbname"
)
tdSql
.
checkRows
(
20
)
tdSql
.
query
(
"select apercentile(c1,20) from stb1 where t1> 4 partition by tbname"
)
tdSql
.
checkRows
(
15
)
# union all
tdSql
.
query
(
"select apercentile(c1,20) from stb1 union all select apercentile(c1,20) from stb1 "
)
tdSql
.
checkRows
(
2
)
tdSql
.
checkData
(
0
,
0
,
7.389181281
)
# join
tdSql
.
execute
(
" create database if not exists db "
)
tdSql
.
execute
(
" use db "
)
tdSql
.
execute
(
" create stable st (ts timestamp , c1 int ,c2 float) tags(t1 int) "
)
tdSql
.
execute
(
" create table tb1 using st tags(1) "
)
tdSql
.
execute
(
" create table tb2 using st tags(2) "
)
for
i
in
range
(
10
):
ts
=
i
*
10
+
self
.
ts
tdSql
.
execute
(
f
" insert into tb1 values(
{
ts
}
,
{
i
}
,
{
i
}
.0)"
)
tdSql
.
execute
(
f
" insert into tb2 values(
{
ts
}
,
{
i
}
,
{
i
}
.0)"
)
tdSql
.
query
(
"select apercentile(tb1.c1,100), apercentile(tb2.c2,100) from tb1, tb2 where tb1.ts=tb2.ts"
)
tdSql
.
checkRows
(
1
)
tdSql
.
checkData
(
0
,
0
,
9.000000000
)
tdSql
.
checkData
(
0
,
0
,
9.000000000
)
# group by
tdSql
.
execute
(
" use testdb "
)
tdSql
.
query
(
" select max(c1),c1 from stb1 group by t1 "
)
tdSql
.
checkRows
(
20
)
tdSql
.
query
(
" select max(c1),c1 from stb1 group by c1 "
)
tdSql
.
checkRows
(
30
)
tdSql
.
query
(
" select max(c1),c2 from stb1 group by c2 "
)
tdSql
.
checkRows
(
31
)
# partition by tbname or partition by tag
tdSql
.
query
(
"select apercentile(c1 ,10)from stb1 partition by tbname"
)
query_data
=
tdSql
.
queryResult
# nest query for support max
tdSql
.
query
(
"select apercentile(c2+2,10)+1 from (select max(c1) c2 from stb1)"
)
tdSql
.
checkData
(
0
,
0
,
31.000000000
)
tdSql
.
query
(
"select apercentile(c1+2,10)+1 as c2 from (select ts ,c1 ,c2 from stb1)"
)
tdSql
.
checkData
(
0
,
0
,
7.560701700
)
tdSql
.
query
(
"select apercentile(a+2,10)+1 as c2 from (select ts ,abs(c1) a ,c2 from stb1)"
)
tdSql
.
checkData
(
0
,
0
,
7.560701700
)
# mixup with other functions
tdSql
.
query
(
"select max(c1),count(c1),last(c2,c3),spread(c1), apercentile(c1,10) from stb1"
)
tdSql
.
checkData
(
0
,
0
,
28
)
tdSql
.
checkData
(
0
,
1
,
184
)
tdSql
.
checkData
(
0
,
2
,
-
99999
)
tdSql
.
checkData
(
0
,
3
,
-
999
)
tdSql
.
checkData
(
0
,
4
,
28.000000000
)
tdSql
.
checkData
(
0
,
5
,
4.560701700
)
def
run
(
self
):
self
.
prepare_datas_of_distribute
()
self
.
check_distribute_datas
()
self
.
distribute_agg_query
()
def
stop
(
self
):
tdSql
.
close
()
tdLog
.
success
(
"%s successfully executed"
%
__file__
)
tdCases
.
addWindows
(
__file__
,
TDTestCase
())
tdCases
.
addLinux
(
__file__
,
TDTestCase
())
tests/system-test/2-query/distribute_agg_count.py
0 → 100644
浏览文件 @
359e5426
from
util.log
import
*
from
util.cases
import
*
from
util.sql
import
*
import
numpy
as
np
import
random
class
TDTestCase
:
updatecfgDict
=
{
'debugFlag'
:
143
,
"cDebugFlag"
:
143
,
"uDebugFlag"
:
143
,
"rpcDebugFlag"
:
143
,
"tmrDebugFlag"
:
143
,
"jniDebugFlag"
:
143
,
"simDebugFlag"
:
143
,
"dDebugFlag"
:
143
,
"dDebugFlag"
:
143
,
"vDebugFlag"
:
143
,
"mDebugFlag"
:
143
,
"qDebugFlag"
:
143
,
"wDebugFlag"
:
143
,
"sDebugFlag"
:
143
,
"tsdbDebugFlag"
:
143
,
"tqDebugFlag"
:
143
,
"fsDebugFlag"
:
143
,
"fnDebugFlag"
:
143
,
"maxTablesPerVnode"
:
2
,
"minTablesPerVnode"
:
2
,
"tableIncStepPerVnode"
:
2
}
def
init
(
self
,
conn
,
logSql
):
tdLog
.
debug
(
"start to execute %s"
%
__file__
)
tdSql
.
init
(
conn
.
cursor
())
self
.
vnode_disbutes
=
None
self
.
ts
=
1537146000000
def
check_count_functions
(
self
,
tbname
,
col_name
):
max_sql
=
f
"select count(
{
col_name
}
) from
{
tbname
}
;"
same_sql
=
f
"select sum(c) from (select
{
col_name
}
,1 as c from
{
tbname
}
where
{
col_name
}
is not null) "
tdSql
.
query
(
max_sql
)
max_result
=
tdSql
.
queryResult
tdSql
.
query
(
same_sql
)
same_result
=
tdSql
.
queryResult
if
max_result
!=
same_result
:
tdLog
.
exit
(
" count function work not as expected, sql : %s "
%
max_sql
)
else
:
tdLog
.
info
(
" count function work as expected, sql : %s "
%
max_sql
)
def
prepare_datas_of_distribute
(
self
):
# prepate datas for 20 tables distributed at different vgroups
tdSql
.
execute
(
"create database if not exists testdb keep 3650 duration 1000 vgroups 5"
)
tdSql
.
execute
(
" use testdb "
)
tdSql
.
execute
(
'''create table stb1
(ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp)
tags (t0 timestamp, t1 int, t2 bigint, t3 smallint, t4 tinyint, t5 float, t6 double, t7 bool, t8 binary(16),t9 nchar(32))
'''
)
tdSql
.
execute
(
'''
create table t1
(ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp)
'''
)
for
i
in
range
(
20
):
tdSql
.
execute
(
f
'create table ct
{
i
+
1
}
using stb1 tags ( now(),
{
1
*
i
}
,
{
11111
*
i
}
,
{
111
*
i
}
,
{
11
*
i
}
,
{
1.11
*
i
}
,
{
11.11
*
i
}
,
{
i
%
2
}
, "binary
{
i
}
", "nchar
{
i
}
" )'
)
for
i
in
range
(
9
):
tdSql
.
execute
(
f
"insert into ct1 values ( now()-
{
i
*
10
}
s,
{
1
*
i
}
,
{
11111
*
i
}
,
{
111
*
i
}
,
{
11
*
i
}
,
{
1.11
*
i
}
,
{
11.11
*
i
}
,
{
i
%
2
}
, 'binary
{
i
}
', 'nchar
{
i
}
', now()+
{
1
*
i
}
a )"
)
tdSql
.
execute
(
f
"insert into ct4 values ( now()-
{
i
*
90
}
d,
{
1
*
i
}
,
{
11111
*
i
}
,
{
111
*
i
}
,
{
11
*
i
}
,
{
1.11
*
i
}
,
{
11.11
*
i
}
,
{
i
%
2
}
, 'binary
{
i
}
', 'nchar
{
i
}
', now()+
{
1
*
i
}
a )"
)
for
i
in
range
(
1
,
21
):
if
i
==
1
or
i
==
4
:
continue
else
:
tbname
=
"ct"
+
f
'
{
i
}
'
for
j
in
range
(
9
):
tdSql
.
execute
(
f
"insert into
{
tbname
}
values ( now()-
{
(
i
+
j
)
*
10
}
s,
{
1
*
(
j
+
i
)
}
,
{
11111
*
(
j
+
i
)
}
,
{
111
*
(
j
+
i
)
}
,
{
11
*
(
j
)
}
,
{
1.11
*
(
j
+
i
)
}
,
{
11.11
*
(
j
+
i
)
}
,
{
(
j
+
i
)
%
2
}
, 'binary
{
j
}
', 'nchar
{
j
}
', now()+
{
1
*
j
}
a )"
)
tdSql
.
execute
(
"insert into ct1 values (now()-45s, 0, 0, 0, 0, 0, 0, 0, 'binary0', 'nchar0', now()+8a )"
)
tdSql
.
execute
(
"insert into ct1 values (now()+10s, 9, -99999, -999, -99, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )"
)
tdSql
.
execute
(
"insert into ct1 values (now()+15s, 9, -99999, -999, -99, -9.99, NULL, 1, 'binary9', 'nchar9', now()+9a )"
)
tdSql
.
execute
(
"insert into ct1 values (now()+20s, 9, -99999, -999, NULL, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )"
)
tdSql
.
execute
(
"insert into ct4 values (now()-810d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) "
)
tdSql
.
execute
(
"insert into ct4 values (now()-400d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) "
)
tdSql
.
execute
(
"insert into ct4 values (now()+90d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) "
)
tdSql
.
execute
(
f
'''insert into t1 values
( '2020-04-21 01:01:01.000', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
( '2020-10-21 01:01:01.000', 1, 11111, 111, 11, 1.11, 11.11, 1, "binary1", "nchar1", now()+1a )
( '2020-12-31 01:01:01.000', 2, 22222, 222, 22, 2.22, 22.22, 0, "binary2", "nchar2", now()+2a )
( '2021-01-01 01:01:06.000', 3, 33333, 333, 33, 3.33, 33.33, 0, "binary3", "nchar3", now()+3a )
( '2021-05-07 01:01:10.000', 4, 44444, 444, 44, 4.44, 44.44, 1, "binary4", "nchar4", now()+4a )
( '2021-07-21 01:01:01.000', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
( '2021-09-30 01:01:16.000', 5, 55555, 555, 55, 5.55, 55.55, 0, "binary5", "nchar5", now()+5a )
( '2022-02-01 01:01:20.000', 6, 66666, 666, 66, 6.66, 66.66, 1, "binary6", "nchar6", now()+6a )
( '2022-10-28 01:01:26.000', 7, 00000, 000, 00, 0.00, 00.00, 1, "binary7", "nchar7", "1970-01-01 08:00:00.000" )
( '2022-12-01 01:01:30.000', 8, -88888, -888, -88, -8.88, -88.88, 0, "binary8", "nchar8", "1969-01-01 01:00:00.000" )
( '2022-12-31 01:01:36.000', 9, -99999999999999999, -999, -99, -9.99, -999999999999999999999.99, 1, "binary9", "nchar9", "1900-01-01 00:00:00.000" )
( '2023-02-21 01:01:01.000', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
'''
)
tdLog
.
info
(
" prepare data for distributed_aggregate done! "
)
def
check_distribute_datas
(
self
):
# get vgroup_ids of all
tdSql
.
query
(
"show vgroups "
)
vgroups
=
tdSql
.
queryResult
vnode_tables
=
{}
for
vgroup_id
in
vgroups
:
vnode_tables
[
vgroup_id
[
0
]]
=
[]
# check sub_table of per vnode ,make sure sub_table has been distributed
tdSql
.
query
(
"show tables like 'ct%'"
)
table_names
=
tdSql
.
queryResult
tablenames
=
[]
for
table_name
in
table_names
:
vnode_tables
[
table_name
[
6
]].
append
(
table_name
[
0
])
self
.
vnode_disbutes
=
vnode_tables
count
=
0
for
k
,
v
in
vnode_tables
.
items
():
if
len
(
v
)
>=
2
:
count
+=
1
if
count
<
2
:
tdLog
.
exit
(
" the datas of all not satisfy sub_table has been distributed "
)
def
check_count_distribute_diff_vnode
(
self
,
col_name
):
vgroup_ids
=
[]
for
k
,
v
in
self
.
vnode_disbutes
.
items
():
if
len
(
v
)
>=
2
:
vgroup_ids
.
append
(
k
)
distribute_tbnames
=
[]
for
vgroup_id
in
vgroup_ids
:
vnode_tables
=
self
.
vnode_disbutes
[
vgroup_id
]
distribute_tbnames
.
append
(
random
.
sample
(
vnode_tables
,
1
)[
0
])
tbname_ins
=
""
for
tbname
in
distribute_tbnames
:
tbname_ins
+=
"'%s' ,"
%
tbname
tbname_filters
=
tbname_ins
[:
-
1
]
max_sql
=
f
"select count(
{
col_name
}
) from stb1 where tbname in (
{
tbname_filters
}
);"
same_sql
=
f
"select sum(c) from (select
{
col_name
}
,1 as c from stb1 where tbname in (
{
tbname_filters
}
) and
{
col_name
}
is not null) "
tdSql
.
query
(
max_sql
)
max_result
=
tdSql
.
queryResult
tdSql
.
query
(
same_sql
)
same_result
=
tdSql
.
queryResult
if
max_result
!=
same_result
:
tdLog
.
exit
(
" count function work not as expected, sql : %s "
%
max_sql
)
else
:
tdLog
.
info
(
" count function work as expected, sql : %s "
%
max_sql
)
def
check_count_status
(
self
):
# check max function work status
tdSql
.
query
(
"show tables like 'ct%'"
)
table_names
=
tdSql
.
queryResult
tablenames
=
[]
for
table_name
in
table_names
:
tablenames
.
append
(
table_name
[
0
])
tdSql
.
query
(
"desc stb1"
)
col_names
=
tdSql
.
queryResult
colnames
=
[]
for
col_name
in
col_names
:
if
col_name
[
1
]
in
[
"INT"
,
"BIGINT"
,
"SMALLINT"
,
"TINYINT"
,
"FLOAT"
,
"DOUBLE"
]:
colnames
.
append
(
col_name
[
0
])
for
tablename
in
tablenames
:
for
colname
in
colnames
:
self
.
check_count_functions
(
tablename
,
colname
)
# check max function for different vnode
for
colname
in
colnames
:
if
colname
.
startswith
(
"c"
):
self
.
check_count_distribute_diff_vnode
(
colname
)
else
:
# self.check_count_distribute_diff_vnode(colname) # bug for tag
pass
def
distribute_agg_query
(
self
):
# basic filter
tdSql
.
query
(
"select count(c1) from stb1 "
)
tdSql
.
checkData
(
0
,
0
,
184
)
tdSql
.
query
(
"select count(c1) from stb1 where t1=1"
)
tdSql
.
checkData
(
0
,
0
,
9
)
tdSql
.
query
(
"select count(c1+c2) from stb1 where c1 =1 "
)
tdSql
.
checkData
(
0
,
0
,
2
)
tdSql
.
query
(
"select count(c1) from stb1 where tbname=
\"
ct2
\"
"
)
tdSql
.
checkData
(
0
,
0
,
9
)
tdSql
.
query
(
"select count(c1) from stb1 partition by tbname"
)
tdSql
.
checkRows
(
20
)
tdSql
.
query
(
"select count(c1) from stb1 where t1> 4 partition by tbname"
)
tdSql
.
checkRows
(
15
)
# union all
tdSql
.
query
(
"select count(c1) from stb1 union all select count(c1) from stb1 "
)
tdSql
.
checkRows
(
2
)
tdSql
.
checkData
(
0
,
0
,
184
)
# join
tdSql
.
execute
(
" create database if not exists db "
)
tdSql
.
execute
(
" use db "
)
tdSql
.
execute
(
" create stable st (ts timestamp , c1 int ,c2 float) tags(t1 int) "
)
tdSql
.
execute
(
" create table tb1 using st tags(1) "
)
tdSql
.
execute
(
" create table tb2 using st tags(2) "
)
for
i
in
range
(
10
):
ts
=
i
*
10
+
self
.
ts
tdSql
.
execute
(
f
" insert into tb1 values(
{
ts
}
,
{
i
}
,
{
i
}
.0)"
)
tdSql
.
execute
(
f
" insert into tb2 values(
{
ts
}
,
{
i
}
,
{
i
}
.0)"
)
tdSql
.
query
(
"select count(tb1.c1), count(tb2.c2) from tb1, tb2 where tb1.ts=tb2.ts"
)
tdSql
.
checkRows
(
1
)
tdSql
.
checkData
(
0
,
0
,
10
)
tdSql
.
checkData
(
0
,
1
,
10
)
# group by
tdSql
.
execute
(
" use testdb "
)
tdSql
.
query
(
" select count(*) from stb1 "
)
tdSql
.
checkData
(
0
,
0
,
187
)
tdSql
.
query
(
" select count(*) from stb1 group by t1 "
)
tdSql
.
checkRows
(
20
)
tdSql
.
query
(
" select count(*) from stb1 group by c1 "
)
tdSql
.
checkRows
(
30
)
tdSql
.
query
(
" select count(*) from stb1 group by c2 "
)
tdSql
.
checkRows
(
31
)
# partition by tbname or partition by tag
tdSql
.
query
(
"select max(c1),tbname from stb1 partition by tbname"
)
query_data
=
tdSql
.
queryResult
for
row
in
query_data
:
tbname
=
row
[
1
]
tdSql
.
query
(
" select max(c1) from %s "
%
tbname
)
tdSql
.
checkData
(
0
,
0
,
row
[
0
])
tdSql
.
query
(
"select max(c1),tbname from stb1 partition by t1"
)
query_data
=
tdSql
.
queryResult
for
row
in
query_data
:
tbname
=
row
[
1
]
tdSql
.
query
(
" select max(c1) from %s "
%
tbname
)
tdSql
.
checkData
(
0
,
0
,
row
[
0
])
# nest query for support max
tdSql
.
query
(
"select abs(c2+2)+1 from (select count(c1) c2 from stb1)"
)
tdSql
.
checkData
(
0
,
0
,
187.000000000
)
tdSql
.
query
(
"select count(c1+2) as c2 from (select ts ,c1 ,c2 from stb1)"
)
tdSql
.
checkData
(
0
,
0
,
184
)
tdSql
.
query
(
"select count(a+2) as c2 from (select ts ,abs(c1) a ,c2 from stb1)"
)
tdSql
.
checkData
(
0
,
0
,
184
)
# mixup with other functions
tdSql
.
query
(
"select max(c1),count(c1),last(c2,c3) from stb1"
)
tdSql
.
checkData
(
0
,
0
,
28
)
tdSql
.
checkData
(
0
,
1
,
184
)
tdSql
.
checkData
(
0
,
2
,
-
99999
)
tdSql
.
checkData
(
0
,
3
,
-
999
)
def
run
(
self
):
self
.
prepare_datas_of_distribute
()
self
.
check_distribute_datas
()
self
.
check_count_status
()
self
.
distribute_agg_query
()
def
stop
(
self
):
tdSql
.
close
()
tdLog
.
success
(
"%s successfully executed"
%
__file__
)
tdCases
.
addWindows
(
__file__
,
TDTestCase
())
tdCases
.
addLinux
(
__file__
,
TDTestCase
())
tests/system-test/2-query/distribute_agg_max.py
0 → 100644
浏览文件 @
359e5426
from
util.log
import
*
from
util.cases
import
*
from
util.sql
import
*
import
numpy
as
np
import
random
class
TDTestCase
:
updatecfgDict
=
{
'debugFlag'
:
143
,
"cDebugFlag"
:
143
,
"uDebugFlag"
:
143
,
"rpcDebugFlag"
:
143
,
"tmrDebugFlag"
:
143
,
"jniDebugFlag"
:
143
,
"simDebugFlag"
:
143
,
"dDebugFlag"
:
143
,
"dDebugFlag"
:
143
,
"vDebugFlag"
:
143
,
"mDebugFlag"
:
143
,
"qDebugFlag"
:
143
,
"wDebugFlag"
:
143
,
"sDebugFlag"
:
143
,
"tsdbDebugFlag"
:
143
,
"tqDebugFlag"
:
143
,
"fsDebugFlag"
:
143
,
"fnDebugFlag"
:
143
,
"maxTablesPerVnode"
:
2
,
"minTablesPerVnode"
:
2
,
"tableIncStepPerVnode"
:
2
}
def
init
(
self
,
conn
,
logSql
):
tdLog
.
debug
(
"start to execute %s"
%
__file__
)
tdSql
.
init
(
conn
.
cursor
())
self
.
vnode_disbutes
=
None
self
.
ts
=
1537146000000
def
check_max_functions
(
self
,
tbname
,
col_name
):
max_sql
=
f
"select max(
{
col_name
}
) from
{
tbname
}
;"
same_sql
=
f
"select
{
col_name
}
from
{
tbname
}
order by
{
col_name
}
desc limit 1"
tdSql
.
query
(
max_sql
)
max_result
=
tdSql
.
queryResult
tdSql
.
query
(
same_sql
)
same_result
=
tdSql
.
queryResult
if
max_result
!=
same_result
:
tdLog
.
exit
(
" max function work not as expected, sql : %s "
%
max_sql
)
else
:
tdLog
.
info
(
" max function work as expected, sql : %s "
%
max_sql
)
def
prepare_datas_of_distribute
(
self
):
# prepate datas for 20 tables distributed at different vgroups
tdSql
.
execute
(
"create database if not exists testdb keep 3650 duration 1000 vgroups 5"
)
tdSql
.
execute
(
" use testdb "
)
tdSql
.
execute
(
'''create table stb1
(ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp)
tags (t0 timestamp, t1 int, t2 bigint, t3 smallint, t4 tinyint, t5 float, t6 double, t7 bool, t8 binary(16),t9 nchar(32))
'''
)
tdSql
.
execute
(
'''
create table t1
(ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp)
'''
)
for
i
in
range
(
20
):
tdSql
.
execute
(
f
'create table ct
{
i
+
1
}
using stb1 tags ( now(),
{
1
*
i
}
,
{
11111
*
i
}
,
{
111
*
i
}
,
{
11
*
i
}
,
{
1.11
*
i
}
,
{
11.11
*
i
}
,
{
i
%
2
}
, "binary
{
i
}
", "nchar
{
i
}
" )'
)
for
i
in
range
(
9
):
tdSql
.
execute
(
f
"insert into ct1 values ( now()-
{
i
*
10
}
s,
{
1
*
i
}
,
{
11111
*
i
}
,
{
111
*
i
}
,
{
11
*
i
}
,
{
1.11
*
i
}
,
{
11.11
*
i
}
,
{
i
%
2
}
, 'binary
{
i
}
', 'nchar
{
i
}
', now()+
{
1
*
i
}
a )"
)
tdSql
.
execute
(
f
"insert into ct4 values ( now()-
{
i
*
90
}
d,
{
1
*
i
}
,
{
11111
*
i
}
,
{
111
*
i
}
,
{
11
*
i
}
,
{
1.11
*
i
}
,
{
11.11
*
i
}
,
{
i
%
2
}
, 'binary
{
i
}
', 'nchar
{
i
}
', now()+
{
1
*
i
}
a )"
)
for
i
in
range
(
1
,
21
):
if
i
==
1
or
i
==
4
:
continue
else
:
tbname
=
"ct"
+
f
'
{
i
}
'
for
j
in
range
(
9
):
tdSql
.
execute
(
f
"insert into
{
tbname
}
values ( now()-
{
(
i
+
j
)
*
10
}
s,
{
1
*
(
j
+
i
)
}
,
{
11111
*
(
j
+
i
)
}
,
{
111
*
(
j
+
i
)
}
,
{
11
*
(
j
)
}
,
{
1.11
*
(
j
+
i
)
}
,
{
11.11
*
(
j
+
i
)
}
,
{
(
j
+
i
)
%
2
}
, 'binary
{
j
}
', 'nchar
{
j
}
', now()+
{
1
*
j
}
a )"
)
tdSql
.
execute
(
"insert into ct1 values (now()-45s, 0, 0, 0, 0, 0, 0, 0, 'binary0', 'nchar0', now()+8a )"
)
tdSql
.
execute
(
"insert into ct1 values (now()+10s, 9, -99999, -999, -99, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )"
)
tdSql
.
execute
(
"insert into ct1 values (now()+15s, 9, -99999, -999, -99, -9.99, NULL, 1, 'binary9', 'nchar9', now()+9a )"
)
tdSql
.
execute
(
"insert into ct1 values (now()+20s, 9, -99999, -999, NULL, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )"
)
tdSql
.
execute
(
"insert into ct4 values (now()-810d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) "
)
tdSql
.
execute
(
"insert into ct4 values (now()-400d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) "
)
tdSql
.
execute
(
"insert into ct4 values (now()+90d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) "
)
tdSql
.
execute
(
f
'''insert into t1 values
( '2020-04-21 01:01:01.000', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
( '2020-10-21 01:01:01.000', 1, 11111, 111, 11, 1.11, 11.11, 1, "binary1", "nchar1", now()+1a )
( '2020-12-31 01:01:01.000', 2, 22222, 222, 22, 2.22, 22.22, 0, "binary2", "nchar2", now()+2a )
( '2021-01-01 01:01:06.000', 3, 33333, 333, 33, 3.33, 33.33, 0, "binary3", "nchar3", now()+3a )
( '2021-05-07 01:01:10.000', 4, 44444, 444, 44, 4.44, 44.44, 1, "binary4", "nchar4", now()+4a )
( '2021-07-21 01:01:01.000', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
( '2021-09-30 01:01:16.000', 5, 55555, 555, 55, 5.55, 55.55, 0, "binary5", "nchar5", now()+5a )
( '2022-02-01 01:01:20.000', 6, 66666, 666, 66, 6.66, 66.66, 1, "binary6", "nchar6", now()+6a )
( '2022-10-28 01:01:26.000', 7, 00000, 000, 00, 0.00, 00.00, 1, "binary7", "nchar7", "1970-01-01 08:00:00.000" )
( '2022-12-01 01:01:30.000', 8, -88888, -888, -88, -8.88, -88.88, 0, "binary8", "nchar8", "1969-01-01 01:00:00.000" )
( '2022-12-31 01:01:36.000', 9, -99999999999999999, -999, -99, -9.99, -999999999999999999999.99, 1, "binary9", "nchar9", "1900-01-01 00:00:00.000" )
( '2023-02-21 01:01:01.000', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
'''
)
tdLog
.
info
(
" prepare data for distributed_aggregate done! "
)
def
check_distribute_datas
(
self
):
# get vgroup_ids of all
tdSql
.
query
(
"show vgroups "
)
vgroups
=
tdSql
.
queryResult
vnode_tables
=
{}
for
vgroup_id
in
vgroups
:
vnode_tables
[
vgroup_id
[
0
]]
=
[]
# check sub_table of per vnode ,make sure sub_table has been distributed
tdSql
.
query
(
"show tables like 'ct%'"
)
table_names
=
tdSql
.
queryResult
tablenames
=
[]
for
table_name
in
table_names
:
vnode_tables
[
table_name
[
6
]].
append
(
table_name
[
0
])
self
.
vnode_disbutes
=
vnode_tables
count
=
0
for
k
,
v
in
vnode_tables
.
items
():
if
len
(
v
)
>=
2
:
count
+=
1
if
count
<
2
:
tdLog
.
exit
(
" the datas of all not satisfy sub_table has been distributed "
)
def
check_max_distribute_diff_vnode
(
self
,
col_name
):
vgroup_ids
=
[]
for
k
,
v
in
self
.
vnode_disbutes
.
items
():
if
len
(
v
)
>=
2
:
vgroup_ids
.
append
(
k
)
distribute_tbnames
=
[]
for
vgroup_id
in
vgroup_ids
:
vnode_tables
=
self
.
vnode_disbutes
[
vgroup_id
]
distribute_tbnames
.
append
(
random
.
sample
(
vnode_tables
,
1
)[
0
])
tbname_ins
=
""
for
tbname
in
distribute_tbnames
:
tbname_ins
+=
"'%s' ,"
%
tbname
tbname_filters
=
tbname_ins
[:
-
1
]
max_sql
=
f
"select max(
{
col_name
}
) from stb1 where tbname in (
{
tbname_filters
}
);"
same_sql
=
f
"select
{
col_name
}
from stb1 where tbname in (
{
tbname_filters
}
) order by
{
col_name
}
desc limit 1"
tdSql
.
query
(
max_sql
)
max_result
=
tdSql
.
queryResult
tdSql
.
query
(
same_sql
)
same_result
=
tdSql
.
queryResult
if
max_result
!=
same_result
:
tdLog
.
exit
(
" max function work not as expected, sql : %s "
%
max_sql
)
else
:
tdLog
.
info
(
" max function work as expected, sql : %s "
%
max_sql
)
def
check_max_status
(
self
):
# check max function work status
tdSql
.
query
(
"show tables like 'ct%'"
)
table_names
=
tdSql
.
queryResult
tablenames
=
[]
for
table_name
in
table_names
:
tablenames
.
append
(
table_name
[
0
])
tdSql
.
query
(
"desc stb1"
)
col_names
=
tdSql
.
queryResult
colnames
=
[]
for
col_name
in
col_names
:
if
col_name
[
1
]
in
[
"INT"
,
"BIGINT"
,
"SMALLINT"
,
"TINYINT"
,
"FLOAT"
,
"DOUBLE"
]:
colnames
.
append
(
col_name
[
0
])
for
tablename
in
tablenames
:
for
colname
in
colnames
:
self
.
check_max_functions
(
tablename
,
colname
)
# check max function for different vnode
for
colname
in
colnames
:
if
colname
.
startswith
(
"c"
):
self
.
check_max_distribute_diff_vnode
(
colname
)
else
:
# self.check_max_distribute_diff_vnode(colname) # bug for tag
pass
def
distribute_agg_query
(
self
):
# basic filter
tdSql
.
query
(
"select max(c1) from stb1 where c1 is null"
)
tdSql
.
checkRows
(
0
)
tdSql
.
query
(
"select max(c1) from stb1 where t1=1"
)
tdSql
.
checkData
(
0
,
0
,
10
)
tdSql
.
query
(
"select max(c1+c2) from stb1 where c1 =1 "
)
tdSql
.
checkData
(
0
,
0
,
11112.000000000
)
tdSql
.
query
(
"select max(c1) from stb1 where tbname=
\"
ct2
\"
"
)
tdSql
.
checkData
(
0
,
0
,
10
)
tdSql
.
query
(
"select max(c1) from stb1 partition by tbname"
)
tdSql
.
checkRows
(
20
)
tdSql
.
query
(
"select max(c1) from stb1 where t1> 4 partition by tbname"
)
tdSql
.
checkRows
(
15
)
# union all
tdSql
.
query
(
"select max(c1) from stb1 union all select max(c1) from stb1 "
)
tdSql
.
checkRows
(
2
)
tdSql
.
checkData
(
0
,
0
,
28
)
# join
tdSql
.
execute
(
" create database if not exists db "
)
tdSql
.
execute
(
" use db "
)
tdSql
.
execute
(
" create stable st (ts timestamp , c1 int ,c2 float) tags(t1 int) "
)
tdSql
.
execute
(
" create table tb1 using st tags(1) "
)
tdSql
.
execute
(
" create table tb2 using st tags(2) "
)
for
i
in
range
(
10
):
ts
=
i
*
10
+
self
.
ts
tdSql
.
execute
(
f
" insert into tb1 values(
{
ts
}
,
{
i
}
,
{
i
}
.0)"
)
tdSql
.
execute
(
f
" insert into tb2 values(
{
ts
}
,
{
i
}
,
{
i
}
.0)"
)
tdSql
.
query
(
"select max(tb1.c1), tb2.c2 from tb1, tb2 where tb1.ts=tb2.ts"
)
tdSql
.
checkRows
(
1
)
tdSql
.
checkData
(
0
,
0
,
9
)
tdSql
.
checkData
(
0
,
0
,
9.00000
)
# group by
tdSql
.
execute
(
" use testdb "
)
tdSql
.
query
(
" select max(c1),c1 from stb1 group by t1 "
)
tdSql
.
checkRows
(
20
)
tdSql
.
query
(
" select max(c1),c1 from stb1 group by c1 "
)
tdSql
.
checkRows
(
30
)
tdSql
.
query
(
" select max(c1),c2 from stb1 group by c2 "
)
tdSql
.
checkRows
(
31
)
# partition by tbname or partition by tag
tdSql
.
query
(
"select max(c1),tbname from stb1 partition by tbname"
)
query_data
=
tdSql
.
queryResult
for
row
in
query_data
:
tbname
=
row
[
1
]
tdSql
.
query
(
" select max(c1) from %s "
%
tbname
)
tdSql
.
checkData
(
0
,
0
,
row
[
0
])
tdSql
.
query
(
"select max(c1),tbname from stb1 partition by t1"
)
query_data
=
tdSql
.
queryResult
for
row
in
query_data
:
tbname
=
row
[
1
]
tdSql
.
query
(
" select max(c1) from %s "
%
tbname
)
tdSql
.
checkData
(
0
,
0
,
row
[
0
])
# nest query for support max
tdSql
.
query
(
"select abs(c2+2)+1 from (select max(c1) c2 from stb1)"
)
tdSql
.
checkData
(
0
,
0
,
31.000000000
)
tdSql
.
query
(
"select max(c1+2)+1 as c2 from (select ts ,c1 ,c2 from stb1)"
)
tdSql
.
checkData
(
0
,
0
,
31.000000000
)
tdSql
.
query
(
"select max(a+2)+1 as c2 from (select ts ,abs(c1) a ,c2 from stb1)"
)
tdSql
.
checkData
(
0
,
0
,
31.000000000
)
# mixup with other functions
tdSql
.
query
(
"select max(c1),count(c1),last(c2,c3) from stb1"
)
tdSql
.
checkData
(
0
,
0
,
28
)
tdSql
.
checkData
(
0
,
1
,
184
)
tdSql
.
checkData
(
0
,
2
,
-
99999
)
tdSql
.
checkData
(
0
,
3
,
-
999
)
def
run
(
self
):
self
.
prepare_datas_of_distribute
()
self
.
check_distribute_datas
()
self
.
check_max_status
()
self
.
distribute_agg_query
()
def
stop
(
self
):
tdSql
.
close
()
tdLog
.
success
(
"%s successfully executed"
%
__file__
)
tdCases
.
addWindows
(
__file__
,
TDTestCase
())
tdCases
.
addLinux
(
__file__
,
TDTestCase
())
tests/system-test/2-query/distribute_agg_min.py
0 → 100644
浏览文件 @
359e5426
from
util.log
import
*
from
util.cases
import
*
from
util.sql
import
*
import
numpy
as
np
import
random
class
TDTestCase
:
updatecfgDict
=
{
'debugFlag'
:
143
,
"cDebugFlag"
:
143
,
"uDebugFlag"
:
143
,
"rpcDebugFlag"
:
143
,
"tmrDebugFlag"
:
143
,
"jniDebugFlag"
:
143
,
"simDebugFlag"
:
143
,
"dDebugFlag"
:
143
,
"dDebugFlag"
:
143
,
"vDebugFlag"
:
143
,
"mDebugFlag"
:
143
,
"qDebugFlag"
:
143
,
"wDebugFlag"
:
143
,
"sDebugFlag"
:
143
,
"tsdbDebugFlag"
:
143
,
"tqDebugFlag"
:
143
,
"fsDebugFlag"
:
143
,
"fnDebugFlag"
:
143
,
"maxTablesPerVnode"
:
2
,
"minTablesPerVnode"
:
2
,
"tableIncStepPerVnode"
:
2
}
def
init
(
self
,
conn
,
logSql
):
tdLog
.
debug
(
"start to execute %s"
%
__file__
)
tdSql
.
init
(
conn
.
cursor
())
self
.
vnode_disbutes
=
None
self
.
ts
=
1537146000000
def
check_min_functions
(
self
,
tbname
,
col_name
):
min_sql
=
f
"select min(
{
col_name
}
) from
{
tbname
}
;"
same_sql
=
f
"select
{
col_name
}
from
{
tbname
}
where
{
col_name
}
is not null order by
{
col_name
}
asc limit 1"
tdSql
.
query
(
min_sql
)
min_result
=
tdSql
.
queryResult
tdSql
.
query
(
same_sql
)
same_result
=
tdSql
.
queryResult
if
min_result
!=
same_result
:
tdLog
.
exit
(
" min function work not as expected, sql : %s "
%
min_sql
)
else
:
tdLog
.
info
(
" min function work as expected, sql : %s "
%
min_sql
)
def
prepare_datas_of_distribute
(
self
):
# prepate datas for 20 tables distributed at different vgroups
tdSql
.
execute
(
"create database if not exists testdb keep 3650 duration 1000 vgroups 5"
)
tdSql
.
execute
(
" use testdb "
)
tdSql
.
execute
(
'''create table stb1
(ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp)
tags (t0 timestamp, t1 int, t2 bigint, t3 smallint, t4 tinyint, t5 float, t6 double, t7 bool, t8 binary(16),t9 nchar(32))
'''
)
tdSql
.
execute
(
'''
create table t1
(ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp)
'''
)
for
i
in
range
(
20
):
tdSql
.
execute
(
f
'create table ct
{
i
+
1
}
using stb1 tags ( now(),
{
1
*
i
}
,
{
11111
*
i
}
,
{
111
*
i
}
,
{
11
*
i
}
,
{
1.11
*
i
}
,
{
11.11
*
i
}
,
{
i
%
2
}
, "binary
{
i
}
", "nchar
{
i
}
" )'
)
for
i
in
range
(
9
):
tdSql
.
execute
(
f
"insert into ct1 values ( now()-
{
i
*
10
}
s,
{
1
*
i
}
,
{
11111
*
i
}
,
{
111
*
i
}
,
{
11
*
i
}
,
{
1.11
*
i
}
,
{
11.11
*
i
}
,
{
i
%
2
}
, 'binary
{
i
}
', 'nchar
{
i
}
', now()+
{
1
*
i
}
a )"
)
tdSql
.
execute
(
f
"insert into ct4 values ( now()-
{
i
*
90
}
d,
{
1
*
i
}
,
{
11111
*
i
}
,
{
111
*
i
}
,
{
11
*
i
}
,
{
1.11
*
i
}
,
{
11.11
*
i
}
,
{
i
%
2
}
, 'binary
{
i
}
', 'nchar
{
i
}
', now()+
{
1
*
i
}
a )"
)
for
i
in
range
(
1
,
21
):
if
i
==
1
or
i
==
4
:
continue
else
:
tbname
=
"ct"
+
f
'
{
i
}
'
for
j
in
range
(
9
):
tdSql
.
execute
(
f
"insert into
{
tbname
}
values ( now()-
{
(
i
+
j
)
*
10
}
s,
{
1
*
(
j
+
i
)
}
,
{
11111
*
(
j
+
i
)
}
,
{
111
*
(
j
+
i
)
}
,
{
11
*
(
j
)
}
,
{
1.11
*
(
j
+
i
)
}
,
{
11.11
*
(
j
+
i
)
}
,
{
(
j
+
i
)
%
2
}
, 'binary
{
j
}
', 'nchar
{
j
}
', now()+
{
1
*
j
}
a )"
)
tdSql
.
execute
(
"insert into ct1 values (now()-45s, 0, 0, 0, 0, 0, 0, 0, 'binary0', 'nchar0', now()+8a )"
)
tdSql
.
execute
(
"insert into ct1 values (now()+10s, 9, -99999, -999, -99, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )"
)
tdSql
.
execute
(
"insert into ct1 values (now()+15s, 9, -99999, -999, -99, -9.99, NULL, 1, 'binary9', 'nchar9', now()+9a )"
)
tdSql
.
execute
(
"insert into ct1 values (now()+20s, 9, -99999, -999, NULL, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )"
)
tdSql
.
execute
(
"insert into ct4 values (now()-810d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) "
)
tdSql
.
execute
(
"insert into ct4 values (now()-400d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) "
)
tdSql
.
execute
(
"insert into ct4 values (now()+90d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) "
)
tdSql
.
execute
(
f
'''insert into t1 values
( '2020-04-21 01:01:01.000', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
( '2020-10-21 01:01:01.000', 1, 11111, 111, 11, 1.11, 11.11, 1, "binary1", "nchar1", now()+1a )
( '2020-12-31 01:01:01.000', 2, 22222, 222, 22, 2.22, 22.22, 0, "binary2", "nchar2", now()+2a )
( '2021-01-01 01:01:06.000', 3, 33333, 333, 33, 3.33, 33.33, 0, "binary3", "nchar3", now()+3a )
( '2021-05-07 01:01:10.000', 4, 44444, 444, 44, 4.44, 44.44, 1, "binary4", "nchar4", now()+4a )
( '2021-07-21 01:01:01.000', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
( '2021-09-30 01:01:16.000', 5, 55555, 555, 55, 5.55, 55.55, 0, "binary5", "nchar5", now()+5a )
( '2022-02-01 01:01:20.000', 6, 66666, 666, 66, 6.66, 66.66, 1, "binary6", "nchar6", now()+6a )
( '2022-10-28 01:01:26.000', 7, 00000, 000, 00, 0.00, 00.00, 1, "binary7", "nchar7", "1970-01-01 08:00:00.000" )
( '2022-12-01 01:01:30.000', 8, -88888, -888, -88, -8.88, -88.88, 0, "binary8", "nchar8", "1969-01-01 01:00:00.000" )
( '2022-12-31 01:01:36.000', 9, -99999999999999999, -999, -99, -9.99, -999999999999999999999.99, 1, "binary9", "nchar9", "1900-01-01 00:00:00.000" )
( '2023-02-21 01:01:01.000', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
'''
)
tdLog
.
info
(
" prepare data for distributed_aggregate done! "
)
def
check_distribute_datas
(
self
):
# get vgroup_ids of all
tdSql
.
query
(
"show vgroups "
)
vgroups
=
tdSql
.
queryResult
vnode_tables
=
{}
for
vgroup_id
in
vgroups
:
vnode_tables
[
vgroup_id
[
0
]]
=
[]
# check sub_table of per vnode ,make sure sub_table has been distributed
tdSql
.
query
(
"show tables like 'ct%'"
)
table_names
=
tdSql
.
queryResult
tablenames
=
[]
for
table_name
in
table_names
:
vnode_tables
[
table_name
[
6
]].
append
(
table_name
[
0
])
self
.
vnode_disbutes
=
vnode_tables
count
=
0
for
k
,
v
in
vnode_tables
.
items
():
if
len
(
v
)
>=
2
:
count
+=
1
if
count
<
2
:
tdLog
.
exit
(
" the datas of all not satisfy sub_table has been distributed "
)
def
check_min_distribute_diff_vnode
(
self
,
col_name
):
vgroup_ids
=
[]
for
k
,
v
in
self
.
vnode_disbutes
.
items
():
if
len
(
v
)
>=
2
:
vgroup_ids
.
append
(
k
)
distribute_tbnames
=
[]
for
vgroup_id
in
vgroup_ids
:
vnode_tables
=
self
.
vnode_disbutes
[
vgroup_id
]
distribute_tbnames
.
append
(
random
.
sample
(
vnode_tables
,
1
)[
0
])
tbname_ins
=
""
for
tbname
in
distribute_tbnames
:
tbname_ins
+=
"'%s' ,"
%
tbname
tbname_filters
=
tbname_ins
[:
-
1
]
min_sql
=
f
"select min(
{
col_name
}
) from stb1 where tbname in (
{
tbname_filters
}
);"
same_sql
=
f
"select
{
col_name
}
from stb1 where tbname in (
{
tbname_filters
}
) and
{
col_name
}
is not null order by
{
col_name
}
asc limit 1"
tdSql
.
query
(
min_sql
)
min_result
=
tdSql
.
queryResult
tdSql
.
query
(
same_sql
)
same_result
=
tdSql
.
queryResult
if
min_result
!=
same_result
:
tdLog
.
exit
(
" min function work not as expected, sql : %s "
%
min_sql
)
else
:
tdLog
.
info
(
" min function work as expected, sql : %s "
%
min_sql
)
def
check_min_status
(
self
):
# check max function work status
tdSql
.
query
(
"show tables like 'ct%'"
)
table_names
=
tdSql
.
queryResult
tablenames
=
[]
for
table_name
in
table_names
:
tablenames
.
append
(
table_name
[
0
])
tdSql
.
query
(
"desc stb1"
)
col_names
=
tdSql
.
queryResult
colnames
=
[]
for
col_name
in
col_names
:
if
col_name
[
1
]
in
[
"INT"
,
"BIGINT"
,
"SMALLINT"
,
"TINYINT"
,
"FLOAT"
,
"DOUBLE"
]:
colnames
.
append
(
col_name
[
0
])
for
tablename
in
tablenames
:
for
colname
in
colnames
:
self
.
check_min_functions
(
tablename
,
colname
)
# check max function for different vnode
for
colname
in
colnames
:
if
colname
.
startswith
(
"c"
):
self
.
check_min_distribute_diff_vnode
(
colname
)
else
:
# self.check_min_distribute_diff_vnode(colname) # bug for tag
pass
def
distribute_agg_query
(
self
):
# basic filter
tdSql
.
query
(
"select min(c1) from stb1 where c1 is null"
)
tdSql
.
checkRows
(
0
)
tdSql
.
query
(
"select min(c1) from stb1 where t1=1"
)
tdSql
.
checkData
(
0
,
0
,
2
)
tdSql
.
query
(
"select min(c1+c2) from stb1 where c1 =1 "
)
tdSql
.
checkData
(
0
,
0
,
11112.000000000
)
tdSql
.
query
(
"select min(c1) from stb1 where tbname=
\"
ct2
\"
"
)
tdSql
.
checkData
(
0
,
0
,
2
)
tdSql
.
query
(
"select min(c1) from stb1 partition by tbname"
)
tdSql
.
checkRows
(
20
)
tdSql
.
query
(
"select min(c1) from stb1 where t1> 4 partition by tbname"
)
tdSql
.
checkRows
(
15
)
# union all
tdSql
.
query
(
"select min(c1) from stb1 union all select min(c1) from stb1 "
)
tdSql
.
checkRows
(
2
)
tdSql
.
checkData
(
0
,
0
,
0
)
# join
tdSql
.
execute
(
" create database if not exists db "
)
tdSql
.
execute
(
" use db "
)
tdSql
.
execute
(
" create stable st (ts timestamp , c1 int ,c2 float) tags(t1 int) "
)
tdSql
.
execute
(
" create table tb1 using st tags(1) "
)
tdSql
.
execute
(
" create table tb2 using st tags(2) "
)
for
i
in
range
(
10
):
ts
=
i
*
10
+
self
.
ts
tdSql
.
execute
(
f
" insert into tb1 values(
{
ts
}
,
{
i
}
,
{
i
}
.0)"
)
tdSql
.
execute
(
f
" insert into tb2 values(
{
ts
}
,
{
i
}
,
{
i
}
.0)"
)
tdSql
.
query
(
"select min(tb1.c1), tb2.c2 from tb1, tb2 where tb1.ts=tb2.ts"
)
tdSql
.
checkRows
(
1
)
tdSql
.
checkData
(
0
,
0
,
0
)
tdSql
.
checkData
(
0
,
0
,
0.00000
)
# group by
tdSql
.
execute
(
" use testdb "
)
tdSql
.
query
(
" select min(c1),c1 from stb1 group by t1 "
)
tdSql
.
checkRows
(
20
)
tdSql
.
query
(
" select min(c1),c1 from stb1 group by c1 "
)
tdSql
.
checkRows
(
30
)
tdSql
.
query
(
" select min(c1),c2 from stb1 group by c2 "
)
tdSql
.
checkRows
(
31
)
# partition by tbname or partition by tag
tdSql
.
query
(
"select min(c1),tbname from stb1 partition by tbname"
)
query_data
=
tdSql
.
queryResult
for
row
in
query_data
:
tbname
=
row
[
1
]
tdSql
.
query
(
" select min(c1) from %s "
%
tbname
)
tdSql
.
checkData
(
0
,
0
,
row
[
0
])
tdSql
.
query
(
"select min(c1),tbname from stb1 partition by t1"
)
query_data
=
tdSql
.
queryResult
for
row
in
query_data
:
tbname
=
row
[
1
]
tdSql
.
query
(
" select min(c1) from %s "
%
tbname
)
tdSql
.
checkData
(
0
,
0
,
row
[
0
])
# nest query for support max
tdSql
.
query
(
"select abs(c2+2)+1 from (select min(c1) c2 from stb1)"
)
tdSql
.
checkData
(
0
,
0
,
3.000000000
)
tdSql
.
query
(
"select min(c1+2)+1 as c2 from (select ts ,c1 ,c2 from stb1)"
)
tdSql
.
checkData
(
0
,
0
,
3.000000000
)
tdSql
.
query
(
"select min(a+2)+1 as c2 from (select ts ,abs(c1) a ,c2 from stb1)"
)
tdSql
.
checkData
(
0
,
0
,
3.000000000
)
# mixup with other functions
tdSql
.
query
(
"select max(c1),count(c1),last(c2,c3),min(c1) from stb1"
)
tdSql
.
checkData
(
0
,
0
,
28
)
tdSql
.
checkData
(
0
,
1
,
184
)
tdSql
.
checkData
(
0
,
2
,
-
99999
)
tdSql
.
checkData
(
0
,
3
,
-
999
)
tdSql
.
checkData
(
0
,
4
,
0
)
def
run
(
self
):
self
.
prepare_datas_of_distribute
()
self
.
check_distribute_datas
()
self
.
check_min_status
()
self
.
distribute_agg_query
()
def
stop
(
self
):
tdSql
.
close
()
tdLog
.
success
(
"%s successfully executed"
%
__file__
)
tdCases
.
addWindows
(
__file__
,
TDTestCase
())
tdCases
.
addLinux
(
__file__
,
TDTestCase
())
tests/system-test/2-query/distribute_agg_spread.py
0 → 100644
浏览文件 @
359e5426
from
util.log
import
*
from
util.cases
import
*
from
util.sql
import
*
import
numpy
as
np
import
random
class
TDTestCase
:
updatecfgDict
=
{
'debugFlag'
:
143
,
"cDebugFlag"
:
143
,
"uDebugFlag"
:
143
,
"rpcDebugFlag"
:
143
,
"tmrDebugFlag"
:
143
,
"jniDebugFlag"
:
143
,
"simDebugFlag"
:
143
,
"dDebugFlag"
:
143
,
"dDebugFlag"
:
143
,
"vDebugFlag"
:
143
,
"mDebugFlag"
:
143
,
"qDebugFlag"
:
143
,
"wDebugFlag"
:
143
,
"sDebugFlag"
:
143
,
"tsdbDebugFlag"
:
143
,
"tqDebugFlag"
:
143
,
"fsDebugFlag"
:
143
,
"fnDebugFlag"
:
143
,
"maxTablesPerVnode"
:
2
,
"minTablesPerVnode"
:
2
,
"tableIncStepPerVnode"
:
2
}
def
init
(
self
,
conn
,
logSql
):
tdLog
.
debug
(
"start to execute %s"
%
__file__
)
tdSql
.
init
(
conn
.
cursor
())
self
.
vnode_disbutes
=
None
self
.
ts
=
1537146000000
def
check_spread_functions
(
self
,
tbname
,
col_name
):
spread_sql
=
f
"select spread(
{
col_name
}
) from
{
tbname
}
;"
same_sql
=
f
"select max(
{
col_name
}
)-min(
{
col_name
}
) from
{
tbname
}
"
tdSql
.
query
(
spread_sql
)
spread_result
=
tdSql
.
queryResult
tdSql
.
query
(
same_sql
)
same_result
=
tdSql
.
queryResult
if
spread_result
!=
same_result
:
tdLog
.
exit
(
" max function work not as expected, sql : %s "
%
spread_sql
)
else
:
tdLog
.
info
(
" max function work as expected, sql : %s "
%
spread_sql
)
def
prepare_datas_of_distribute
(
self
):
# prepate datas for 20 tables distributed at different vgroups
tdSql
.
execute
(
"create database if not exists testdb keep 3650 duration 1000 vgroups 5"
)
tdSql
.
execute
(
" use testdb "
)
tdSql
.
execute
(
'''create table stb1
(ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp)
tags (t0 timestamp, t1 int, t2 bigint, t3 smallint, t4 tinyint, t5 float, t6 double, t7 bool, t8 binary(16),t9 nchar(32))
'''
)
tdSql
.
execute
(
'''
create table t1
(ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp)
'''
)
for
i
in
range
(
20
):
tdSql
.
execute
(
f
'create table ct
{
i
+
1
}
using stb1 tags ( now(),
{
1
*
i
}
,
{
11111
*
i
}
,
{
111
*
i
}
,
{
11
*
i
}
,
{
1.11
*
i
}
,
{
11.11
*
i
}
,
{
i
%
2
}
, "binary
{
i
}
", "nchar
{
i
}
" )'
)
for
i
in
range
(
9
):
tdSql
.
execute
(
f
"insert into ct1 values ( now()-
{
i
*
10
}
s,
{
1
*
i
}
,
{
11111
*
i
}
,
{
111
*
i
}
,
{
11
*
i
}
,
{
1.11
*
i
}
,
{
11.11
*
i
}
,
{
i
%
2
}
, 'binary
{
i
}
', 'nchar
{
i
}
', now()+
{
1
*
i
}
a )"
)
tdSql
.
execute
(
f
"insert into ct4 values ( now()-
{
i
*
90
}
d,
{
1
*
i
}
,
{
11111
*
i
}
,
{
111
*
i
}
,
{
11
*
i
}
,
{
1.11
*
i
}
,
{
11.11
*
i
}
,
{
i
%
2
}
, 'binary
{
i
}
', 'nchar
{
i
}
', now()+
{
1
*
i
}
a )"
)
for
i
in
range
(
1
,
21
):
if
i
==
1
or
i
==
4
:
continue
else
:
tbname
=
"ct"
+
f
'
{
i
}
'
for
j
in
range
(
9
):
tdSql
.
execute
(
f
"insert into
{
tbname
}
values ( now()-
{
(
i
+
j
)
*
10
}
s,
{
1
*
(
j
+
i
)
}
,
{
11111
*
(
j
+
i
)
}
,
{
111
*
(
j
+
i
)
}
,
{
11
*
(
j
)
}
,
{
1.11
*
(
j
+
i
)
}
,
{
11.11
*
(
j
+
i
)
}
,
{
(
j
+
i
)
%
2
}
, 'binary
{
j
}
', 'nchar
{
j
}
', now()+
{
1
*
j
}
a )"
)
tdSql
.
execute
(
"insert into ct1 values (now()-45s, 0, 0, 0, 0, 0, 0, 0, 'binary0', 'nchar0', now()+8a )"
)
tdSql
.
execute
(
"insert into ct1 values (now()+10s, 9, -99999, -999, -99, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )"
)
tdSql
.
execute
(
"insert into ct1 values (now()+15s, 9, -99999, -999, -99, -9.99, NULL, 1, 'binary9', 'nchar9', now()+9a )"
)
tdSql
.
execute
(
"insert into ct1 values (now()+20s, 9, -99999, -999, NULL, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )"
)
tdSql
.
execute
(
"insert into ct4 values (now()-810d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) "
)
tdSql
.
execute
(
"insert into ct4 values (now()-400d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) "
)
tdSql
.
execute
(
"insert into ct4 values (now()+90d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) "
)
tdSql
.
execute
(
f
'''insert into t1 values
( '2020-04-21 01:01:01.000', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
( '2020-10-21 01:01:01.000', 1, 11111, 111, 11, 1.11, 11.11, 1, "binary1", "nchar1", now()+1a )
( '2020-12-31 01:01:01.000', 2, 22222, 222, 22, 2.22, 22.22, 0, "binary2", "nchar2", now()+2a )
( '2021-01-01 01:01:06.000', 3, 33333, 333, 33, 3.33, 33.33, 0, "binary3", "nchar3", now()+3a )
( '2021-05-07 01:01:10.000', 4, 44444, 444, 44, 4.44, 44.44, 1, "binary4", "nchar4", now()+4a )
( '2021-07-21 01:01:01.000', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
( '2021-09-30 01:01:16.000', 5, 55555, 555, 55, 5.55, 55.55, 0, "binary5", "nchar5", now()+5a )
( '2022-02-01 01:01:20.000', 6, 66666, 666, 66, 6.66, 66.66, 1, "binary6", "nchar6", now()+6a )
( '2022-10-28 01:01:26.000', 7, 00000, 000, 00, 0.00, 00.00, 1, "binary7", "nchar7", "1970-01-01 08:00:00.000" )
( '2022-12-01 01:01:30.000', 8, -88888, -888, -88, -8.88, -88.88, 0, "binary8", "nchar8", "1969-01-01 01:00:00.000" )
( '2022-12-31 01:01:36.000', 9, -99999999999999999, -999, -99, -9.99, -999999999999999999999.99, 1, "binary9", "nchar9", "1900-01-01 00:00:00.000" )
( '2023-02-21 01:01:01.000', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
'''
)
tdLog
.
info
(
" prepare data for distributed_aggregate done! "
)
def
check_distribute_datas
(
self
):
# get vgroup_ids of all
tdSql
.
query
(
"show vgroups "
)
vgroups
=
tdSql
.
queryResult
vnode_tables
=
{}
for
vgroup_id
in
vgroups
:
vnode_tables
[
vgroup_id
[
0
]]
=
[]
# check sub_table of per vnode ,make sure sub_table has been distributed
tdSql
.
query
(
"show tables like 'ct%'"
)
table_names
=
tdSql
.
queryResult
tablenames
=
[]
for
table_name
in
table_names
:
vnode_tables
[
table_name
[
6
]].
append
(
table_name
[
0
])
self
.
vnode_disbutes
=
vnode_tables
count
=
0
for
k
,
v
in
vnode_tables
.
items
():
if
len
(
v
)
>=
2
:
count
+=
1
if
count
<
2
:
tdLog
.
exit
(
" the datas of all not satisfy sub_table has been distributed "
)
def
check_spread_distribute_diff_vnode
(
self
,
col_name
):
vgroup_ids
=
[]
for
k
,
v
in
self
.
vnode_disbutes
.
items
():
if
len
(
v
)
>=
2
:
vgroup_ids
.
append
(
k
)
distribute_tbnames
=
[]
for
vgroup_id
in
vgroup_ids
:
vnode_tables
=
self
.
vnode_disbutes
[
vgroup_id
]
distribute_tbnames
.
append
(
random
.
sample
(
vnode_tables
,
1
)[
0
])
tbname_ins
=
""
for
tbname
in
distribute_tbnames
:
tbname_ins
+=
"'%s' ,"
%
tbname
tbname_filters
=
tbname_ins
[:
-
1
]
spread_sql
=
f
"select spread(
{
col_name
}
) from stb1 where tbname in (
{
tbname_filters
}
)"
same_sql
=
f
"select max(
{
col_name
}
) - min(
{
col_name
}
) from stb1 where tbname in (
{
tbname_filters
}
)"
tdSql
.
query
(
spread_sql
)
spread_result
=
tdSql
.
queryResult
tdSql
.
query
(
same_sql
)
same_result
=
tdSql
.
queryResult
if
spread_result
!=
same_result
:
tdLog
.
exit
(
" spread function work not as expected, sql : %s "
%
spread_sql
)
else
:
tdLog
.
info
(
" spread function work as expected, sql : %s "
%
spread_sql
)
def
check_spread_status
(
self
):
# check max function work status
tdSql
.
query
(
"show tables like 'ct%'"
)
table_names
=
tdSql
.
queryResult
tablenames
=
[]
for
table_name
in
table_names
:
tablenames
.
append
(
table_name
[
0
])
tdSql
.
query
(
"desc stb1"
)
col_names
=
tdSql
.
queryResult
colnames
=
[]
for
col_name
in
col_names
:
if
col_name
[
1
]
in
[
"INT"
,
"BIGINT"
,
"SMALLINT"
,
"TINYINT"
,
"FLOAT"
,
"DOUBLE"
]:
colnames
.
append
(
col_name
[
0
])
for
tablename
in
tablenames
:
for
colname
in
colnames
:
self
.
check_spread_functions
(
tablename
,
colname
)
# check max function for different vnode
for
colname
in
colnames
:
if
colname
.
startswith
(
"c"
):
self
.
check_spread_distribute_diff_vnode
(
colname
)
else
:
# self.check_spread_distribute_diff_vnode(colname) # bug for tag
pass
def
distribute_agg_query
(
self
):
# basic filter
tdSql
.
query
(
"select spread(c1) from stb1 where c1 is null"
)
tdSql
.
checkRows
(
0
)
tdSql
.
query
(
"select spread(c1) from stb1 where t1=1"
)
tdSql
.
checkData
(
0
,
0
,
8.000000000
)
tdSql
.
query
(
"select spread(c1+c2) from stb1 where c1 =1 "
)
tdSql
.
checkData
(
0
,
0
,
0.000000000
)
tdSql
.
query
(
"select spread(c1) from stb1 where tbname=
\"
ct2
\"
"
)
tdSql
.
checkData
(
0
,
0
,
8.000000000
)
tdSql
.
query
(
"select spread(c1) from stb1 partition by tbname"
)
tdSql
.
checkRows
(
20
)
tdSql
.
query
(
"select spread(c1) from stb1 where t1> 4 partition by tbname"
)
tdSql
.
checkRows
(
15
)
# union all
tdSql
.
query
(
"select spread(c1) from stb1 union all select max(c1)-min(c1) from stb1 "
)
tdSql
.
checkRows
(
2
)
tdSql
.
checkData
(
0
,
0
,
28.000000000
)
# join
tdSql
.
execute
(
" create database if not exists db "
)
tdSql
.
execute
(
" use db "
)
tdSql
.
execute
(
" create stable st (ts timestamp , c1 int ,c2 float) tags(t1 int) "
)
tdSql
.
execute
(
" create table tb1 using st tags(1) "
)
tdSql
.
execute
(
" create table tb2 using st tags(2) "
)
for
i
in
range
(
10
):
ts
=
i
*
10
+
self
.
ts
tdSql
.
execute
(
f
" insert into tb1 values(
{
ts
}
,
{
i
}
,
{
i
}
.0)"
)
tdSql
.
execute
(
f
" insert into tb2 values(
{
ts
}
,
{
i
}
,
{
i
}
.0)"
)
tdSql
.
query
(
"select spread(tb1.c1), spread(tb2.c2) from tb1, tb2 where tb1.ts=tb2.ts"
)
tdSql
.
checkRows
(
1
)
tdSql
.
checkData
(
0
,
0
,
9.000000000
)
tdSql
.
checkData
(
0
,
0
,
9.00000
)
# group by
tdSql
.
execute
(
" use testdb "
)
tdSql
.
query
(
" select max(c1),c1 from stb1 group by t1 "
)
tdSql
.
checkRows
(
20
)
tdSql
.
query
(
" select max(c1),c1 from stb1 group by c1 "
)
tdSql
.
checkRows
(
30
)
tdSql
.
query
(
" select max(c1),c2 from stb1 group by c2 "
)
tdSql
.
checkRows
(
31
)
# partition by tbname or partition by tag
tdSql
.
query
(
"select spread(c1) from stb1 partition by tbname"
)
query_data
=
tdSql
.
queryResult
# nest query for support max
tdSql
.
query
(
"select spread(c2+2)+1 from (select max(c1) c2 from stb1)"
)
tdSql
.
checkData
(
0
,
0
,
1.000000000
)
tdSql
.
query
(
"select spread(c1+2)+1 as c2 from (select ts ,c1 ,c2 from stb1)"
)
tdSql
.
checkData
(
0
,
0
,
29.000000000
)
tdSql
.
query
(
"select spread(a+2)+1 as c2 from (select ts ,abs(c1) a ,c2 from stb1)"
)
tdSql
.
checkData
(
0
,
0
,
29.000000000
)
# mixup with other functions
tdSql
.
query
(
"select max(c1),count(c1),last(c2,c3),spread(c1) from stb1"
)
tdSql
.
checkData
(
0
,
0
,
28
)
tdSql
.
checkData
(
0
,
1
,
184
)
tdSql
.
checkData
(
0
,
2
,
-
99999
)
tdSql
.
checkData
(
0
,
3
,
-
999
)
tdSql
.
checkData
(
0
,
4
,
28.000000000
)
def
run
(
self
):
self
.
prepare_datas_of_distribute
()
self
.
check_distribute_datas
()
self
.
check_spread_status
()
self
.
distribute_agg_query
()
def
stop
(
self
):
tdSql
.
close
()
tdLog
.
success
(
"%s successfully executed"
%
__file__
)
tdCases
.
addWindows
(
__file__
,
TDTestCase
())
tdCases
.
addLinux
(
__file__
,
TDTestCase
())
tests/system-test/2-query/distribute_agg_sum.py
0 → 100644
浏览文件 @
359e5426
from
util.log
import
*
from
util.cases
import
*
from
util.sql
import
*
import
numpy
as
np
import
random
,
os
,
sys
import
platform
class
TDTestCase
:
updatecfgDict
=
{
'debugFlag'
:
143
,
"cDebugFlag"
:
143
,
"uDebugFlag"
:
143
,
"rpcDebugFlag"
:
143
,
"tmrDebugFlag"
:
143
,
"jniDebugFlag"
:
143
,
"simDebugFlag"
:
143
,
"dDebugFlag"
:
143
,
"dDebugFlag"
:
143
,
"vDebugFlag"
:
143
,
"mDebugFlag"
:
143
,
"qDebugFlag"
:
143
,
"wDebugFlag"
:
143
,
"sDebugFlag"
:
143
,
"tsdbDebugFlag"
:
143
,
"tqDebugFlag"
:
143
,
"fsDebugFlag"
:
143
,
"fnDebugFlag"
:
143
,
"maxTablesPerVnode"
:
2
,
"minTablesPerVnode"
:
2
,
"tableIncStepPerVnode"
:
2
}
def
init
(
self
,
conn
,
logSql
):
tdLog
.
debug
(
"start to execute %s"
%
__file__
)
tdSql
.
init
(
conn
.
cursor
())
self
.
vnode_disbutes
=
None
self
.
ts
=
1537146000000
def
check_sum_functions
(
self
,
tbname
,
col_name
):
sum_sql
=
f
"select sum(
{
col_name
}
) from
{
tbname
}
;"
same_sql
=
f
"select
{
col_name
}
from
{
tbname
}
where
{
col_name
}
is not null "
tdSql
.
query
(
same_sql
)
pre_data
=
np
.
array
(
tdSql
.
queryResult
)[
np
.
array
(
tdSql
.
queryResult
)
!=
None
]
if
(
platform
.
system
().
lower
()
==
'windows'
and
pre_data
.
dtype
==
'int32'
):
pre_data
=
np
.
array
(
pre_data
,
dtype
=
'int64'
)
pre_sum
=
np
.
sum
(
pre_data
)
tdSql
.
query
(
sum_sql
)
tdSql
.
checkData
(
0
,
0
,
pre_sum
)
def
prepare_datas_of_distribute
(
self
):
# prepate datas for 20 tables distributed at different vgroups
tdSql
.
execute
(
"create database if not exists testdb keep 3650 duration 1000 vgroups 5"
)
tdSql
.
execute
(
" use testdb "
)
tdSql
.
execute
(
'''create table stb1
(ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp)
tags (t0 timestamp, t1 int, t2 bigint, t3 smallint, t4 tinyint, t5 float, t6 double, t7 bool, t8 binary(16),t9 nchar(32))
'''
)
tdSql
.
execute
(
'''
create table t1
(ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp)
'''
)
for
i
in
range
(
20
):
tdSql
.
execute
(
f
'create table ct
{
i
+
1
}
using stb1 tags ( now(),
{
1
*
i
}
,
{
11111
*
i
}
,
{
111
*
i
}
,
{
11
*
i
}
,
{
1.11
*
i
}
,
{
11.11
*
i
}
,
{
i
%
2
}
, "binary
{
i
}
", "nchar
{
i
}
" )'
)
for
i
in
range
(
9
):
tdSql
.
execute
(
f
"insert into ct1 values ( now()-
{
i
*
10
}
s,
{
1
*
i
}
,
{
11111
*
i
}
,
{
111
*
i
}
,
{
11
*
i
}
,
{
1.11
*
i
}
,
{
11.11
*
i
}
,
{
i
%
2
}
, 'binary
{
i
}
', 'nchar
{
i
}
', now()+
{
1
*
i
}
a )"
)
tdSql
.
execute
(
f
"insert into ct4 values ( now()-
{
i
*
90
}
d,
{
1
*
i
}
,
{
11111
*
i
}
,
{
111
*
i
}
,
{
11
*
i
}
,
{
1.11
*
i
}
,
{
11.11
*
i
}
,
{
i
%
2
}
, 'binary
{
i
}
', 'nchar
{
i
}
', now()+
{
1
*
i
}
a )"
)
for
i
in
range
(
1
,
21
):
if
i
==
1
or
i
==
4
:
continue
else
:
tbname
=
"ct"
+
f
'
{
i
}
'
for
j
in
range
(
9
):
tdSql
.
execute
(
f
"insert into
{
tbname
}
values ( now()-
{
(
i
+
j
)
*
10
}
s,
{
1
*
(
j
+
i
)
}
,
{
11111
*
(
j
+
i
)
}
,
{
111
*
(
j
+
i
)
}
,
{
11
*
(
j
)
}
,
{
1.11
*
(
j
+
i
)
}
,
{
11.11
*
(
j
+
i
)
}
,
{
(
j
+
i
)
%
2
}
, 'binary
{
j
}
', 'nchar
{
j
}
', now()+
{
1
*
j
}
a )"
)
tdSql
.
execute
(
"insert into ct1 values (now()-45s, 0, 0, 0, 0, 0, 0, 0, 'binary0', 'nchar0', now()+8a )"
)
tdSql
.
execute
(
"insert into ct1 values (now()+10s, 9, -99999, -999, -99, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )"
)
tdSql
.
execute
(
"insert into ct1 values (now()+15s, 9, -99999, -999, -99, -9.99, NULL, 1, 'binary9', 'nchar9', now()+9a )"
)
tdSql
.
execute
(
"insert into ct1 values (now()+20s, 9, -99999, -999, NULL, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )"
)
tdSql
.
execute
(
"insert into ct4 values (now()-810d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) "
)
tdSql
.
execute
(
"insert into ct4 values (now()-400d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) "
)
tdSql
.
execute
(
"insert into ct4 values (now()+90d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) "
)
tdSql
.
execute
(
f
'''insert into t1 values
( '2020-04-21 01:01:01.000', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
( '2020-10-21 01:01:01.000', 1, 11111, 111, 11, 1.11, 11.11, 1, "binary1", "nchar1", now()+1a )
( '2020-12-31 01:01:01.000', 2, 22222, 222, 22, 2.22, 22.22, 0, "binary2", "nchar2", now()+2a )
( '2021-01-01 01:01:06.000', 3, 33333, 333, 33, 3.33, 33.33, 0, "binary3", "nchar3", now()+3a )
( '2021-05-07 01:01:10.000', 4, 44444, 444, 44, 4.44, 44.44, 1, "binary4", "nchar4", now()+4a )
( '2021-07-21 01:01:01.000', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
( '2021-09-30 01:01:16.000', 5, 55555, 555, 55, 5.55, 55.55, 0, "binary5", "nchar5", now()+5a )
( '2022-02-01 01:01:20.000', 6, 66666, 666, 66, 6.66, 66.66, 1, "binary6", "nchar6", now()+6a )
( '2022-10-28 01:01:26.000', 7, 00000, 000, 00, 0.00, 00.00, 1, "binary7", "nchar7", "1970-01-01 08:00:00.000" )
( '2022-12-01 01:01:30.000', 8, -88888, -888, -88, -8.88, -88.88, 0, "binary8", "nchar8", "1969-01-01 01:00:00.000" )
( '2022-12-31 01:01:36.000', 9, -99999999999999999, -999, -99, -9.99, -999999999999999999999.99, 1, "binary9", "nchar9", "1900-01-01 00:00:00.000" )
( '2023-02-21 01:01:01.000', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
'''
)
tdLog
.
info
(
" prepare data for distributed_aggregate done! "
)
def
check_distribute_datas
(
self
):
# get vgroup_ids of all
tdSql
.
query
(
"show vgroups "
)
vgroups
=
tdSql
.
queryResult
vnode_tables
=
{}
for
vgroup_id
in
vgroups
:
vnode_tables
[
vgroup_id
[
0
]]
=
[]
# check sub_table of per vnode ,make sure sub_table has been distributed
tdSql
.
query
(
"show tables like 'ct%'"
)
table_names
=
tdSql
.
queryResult
tablenames
=
[]
for
table_name
in
table_names
:
vnode_tables
[
table_name
[
6
]].
append
(
table_name
[
0
])
self
.
vnode_disbutes
=
vnode_tables
count
=
0
for
k
,
v
in
vnode_tables
.
items
():
if
len
(
v
)
>=
2
:
count
+=
1
if
count
<
2
:
tdLog
.
exit
(
" the datas of all not satisfy sub_table has been distributed "
)
def
check_sum_distribute_diff_vnode
(
self
,
col_name
):
vgroup_ids
=
[]
for
k
,
v
in
self
.
vnode_disbutes
.
items
():
if
len
(
v
)
>=
2
:
vgroup_ids
.
append
(
k
)
distribute_tbnames
=
[]
for
vgroup_id
in
vgroup_ids
:
vnode_tables
=
self
.
vnode_disbutes
[
vgroup_id
]
distribute_tbnames
.
append
(
random
.
sample
(
vnode_tables
,
1
)[
0
])
tbname_ins
=
""
for
tbname
in
distribute_tbnames
:
tbname_ins
+=
"'%s' ,"
%
tbname
tbname_filters
=
tbname_ins
[:
-
1
]
sum_sql
=
f
"select sum(
{
col_name
}
) from stb1 where tbname in (
{
tbname_filters
}
);"
same_sql
=
f
"select
{
col_name
}
from stb1 where tbname in (
{
tbname_filters
}
) and
{
col_name
}
is not null "
tdSql
.
query
(
same_sql
)
pre_data
=
np
.
array
(
tdSql
.
queryResult
)[
np
.
array
(
tdSql
.
queryResult
)
!=
None
]
if
(
platform
.
system
().
lower
()
==
'windows'
and
pre_data
.
dtype
==
'int32'
):
pre_data
=
np
.
array
(
pre_data
,
dtype
=
'int64'
)
pre_sum
=
np
.
sum
(
pre_data
)
tdSql
.
query
(
sum_sql
)
tdSql
.
checkData
(
0
,
0
,
pre_sum
)
def
check_sum_status
(
self
):
# check max function work status
tdSql
.
query
(
"show tables like 'ct%'"
)
table_names
=
tdSql
.
queryResult
tablenames
=
[]
for
table_name
in
table_names
:
tablenames
.
append
(
table_name
[
0
])
tdSql
.
query
(
"desc stb1"
)
col_names
=
tdSql
.
queryResult
colnames
=
[]
for
col_name
in
col_names
:
if
col_name
[
1
]
in
[
"INT"
,
"BIGINT"
,
"SMALLINT"
,
"TINYINT"
,
"FLOAT"
,
"DOUBLE"
]:
colnames
.
append
(
col_name
[
0
])
for
tablename
in
tablenames
:
for
colname
in
colnames
:
self
.
check_sum_functions
(
tablename
,
colname
)
# check max function for different vnode
for
colname
in
colnames
:
if
colname
.
startswith
(
"c"
):
self
.
check_sum_distribute_diff_vnode
(
colname
)
else
:
# self.check_sum_distribute_diff_vnode(colname) # bug for tag
pass
def
distribute_agg_query
(
self
):
# basic filter
tdSql
.
query
(
" select sum(c1) from stb1 "
)
tdSql
.
checkData
(
0
,
0
,
2592
)
tdSql
.
query
(
" select sum(a) from (select sum(c1) a from stb1 partition by tbname) "
)
tdSql
.
checkData
(
0
,
0
,
2592
)
tdSql
.
query
(
" select sum(c1) from stb1 where t1=1"
)
tdSql
.
checkData
(
0
,
0
,
54
)
tdSql
.
query
(
"select sum(c1+c2) from stb1 where c1 =1 "
)
tdSql
.
checkData
(
0
,
0
,
22224.000000000
)
tdSql
.
query
(
"select sum(c1) from stb1 where tbname=
\"
ct2
\"
"
)
tdSql
.
checkData
(
0
,
0
,
54
)
tdSql
.
query
(
"select sum(c1) from stb1 partition by tbname"
)
tdSql
.
checkRows
(
20
)
tdSql
.
query
(
"select sum(c1) from stb1 where t1> 4 partition by tbname"
)
tdSql
.
checkRows
(
15
)
# union all
tdSql
.
query
(
"select sum(c1) from stb1 union all select sum(c1) from stb1 "
)
tdSql
.
checkRows
(
2
)
tdSql
.
checkData
(
0
,
0
,
2592
)
tdSql
.
query
(
"select sum(a) from (select sum(c1) a from stb1 union all select sum(c1) a from stb1)"
)
tdSql
.
checkRows
(
1
)
tdSql
.
checkData
(
0
,
0
,
5184
)
# join
tdSql
.
execute
(
" create database if not exists db "
)
tdSql
.
execute
(
" use db "
)
tdSql
.
execute
(
" create stable st (ts timestamp , c1 int ,c2 float) tags(t1 int) "
)
tdSql
.
execute
(
" create table tb1 using st tags(1) "
)
tdSql
.
execute
(
" create table tb2 using st tags(2) "
)
for
i
in
range
(
10
):
ts
=
i
*
10
+
self
.
ts
tdSql
.
execute
(
f
" insert into tb1 values(
{
ts
}
,
{
i
}
,
{
i
}
.0)"
)
tdSql
.
execute
(
f
" insert into tb2 values(
{
ts
}
,
{
i
}
,
{
i
}
.0)"
)
tdSql
.
query
(
"select sum(tb1.c1), sum(tb2.c2) from tb1, tb2 where tb1.ts=tb2.ts"
)
tdSql
.
checkRows
(
1
)
tdSql
.
checkData
(
0
,
0
,
45
)
tdSql
.
checkData
(
0
,
1
,
45.000000000
)
# group by
tdSql
.
execute
(
" use testdb "
)
# partition by tbname or partition by tag
tdSql
.
query
(
"select sum(c1) from stb1 partition by tbname"
)
tdSql
.
checkRows
(
20
)
# nest query for support max
tdSql
.
query
(
"select abs(c2+2)+1 from (select sum(c1) c2 from stb1)"
)
tdSql
.
checkData
(
0
,
0
,
2595.000000000
)
tdSql
.
query
(
"select sum(c1+2) as c2 from (select ts ,c1 ,c2 from stb1)"
)
tdSql
.
checkData
(
0
,
0
,
2960.000000000
)
tdSql
.
query
(
"select sum(a+2) as c2 from (select ts ,abs(c1) a ,c2 from stb1)"
)
tdSql
.
checkData
(
0
,
0
,
2960.000000000
)
# mixup with other functions
tdSql
.
query
(
"select max(c1),count(c1),last(c2,c3),sum(c1+c2) from stb1"
)
tdSql
.
checkData
(
0
,
0
,
28
)
tdSql
.
checkData
(
0
,
1
,
184
)
tdSql
.
checkData
(
0
,
2
,
-
99999
)
tdSql
.
checkData
(
0
,
3
,
-
999
)
tdSql
.
checkData
(
0
,
4
,
28202310.000000000
)
def
run
(
self
):
self
.
prepare_datas_of_distribute
()
self
.
check_distribute_datas
()
self
.
check_sum_status
()
self
.
distribute_agg_query
()
def
stop
(
self
):
tdSql
.
close
()
tdLog
.
success
(
"%s successfully executed"
%
__file__
)
tdCases
.
addWindows
(
__file__
,
TDTestCase
())
tdCases
.
addLinux
(
__file__
,
TDTestCase
())
tests/system-test/2-query/max.py
浏览文件 @
359e5426
...
...
@@ -5,198 +5,215 @@ import numpy as np
class
TDTestCase
:
updatecfgDict
=
{
'debugFlag'
:
143
,
"cDebugFlag"
:
143
,
"uDebugFlag"
:
143
,
"rpcDebugFlag"
:
143
,
"tmrDebugFlag"
:
143
,
"jniDebugFlag"
:
143
,
"simDebugFlag"
:
143
,
"dDebugFlag"
:
143
,
"dDebugFlag"
:
143
,
"vDebugFlag"
:
143
,
"mDebugFlag"
:
143
,
"qDebugFlag"
:
143
,
"wDebugFlag"
:
143
,
"sDebugFlag"
:
143
,
"tsdbDebugFlag"
:
143
,
"tqDebugFlag"
:
143
,
"fsDebugFlag"
:
143
,
"fnDebugFlag"
:
143
,
"maxTablesPerVnode"
:
2
,
"minTablesPerVnode"
:
2
,
"tableIncStepPerVnode"
:
2
}
def
init
(
self
,
conn
,
logSql
):
tdLog
.
debug
(
"start to execute %s"
%
__file__
)
tdSql
.
init
(
conn
.
cursor
())
self
.
rowNum
=
10
self
.
ts
=
1537146000000
def
prepare_data
(
self
):
pass
def
run
(
self
):
self
.
binary_str
=
'taosdata'
self
.
nchar_str
=
'涛思数据'
def
max_check_stb_and_tb_base
(
self
):
tdSql
.
prepare
()
intData
=
[]
floatData
=
[]
tdSql
.
execute
(
'''create table stb(ts timestamp, col1 tinyint, col2 smallint, col3 int, col4 bigint, col5 float, col6 double,
col7 bool, col8 binary(20), col9 nchar(20), col11 tinyint unsigned, col12 smallint unsigned, col13 int unsigned, col14 bigint unsigned) tags(loc nchar(20))'''
)
tdSql
.
execute
(
'''create table stb(ts timestamp, col1 tinyint, col2 smallint, col3 int, col4 bigint, col5 tinyint unsigned, col6 smallint unsigned,
col7 int unsigned, col8 bigint unsigned, col9 float, col10 double, col11 bool, col12 binary(20), col13 nchar(20)) tags(loc nchar(20))'''
)
tdSql
.
execute
(
"create table stb_1 using stb tags('beijing')"
)
tdSql
.
execute
(
'''create table ntb(ts timestamp, col1 tinyint, col2 smallint, col3 int, col4 bigint, col5 float, col6 double,
col7 bool, col8 binary(20), col9 nchar(20), col11 tinyint unsigned, col12 smallint unsigned, col13 int unsigned, col14 bigint unsigned)'''
)
for
i
in
range
(
self
.
rowNum
):
tdSql
.
execute
(
"insert into ntb values(%d, %d, %d, %d, %d, %f, %f, %d, 'taosdata%d', '涛思数据%d', %d, %d, %d, %d)"
%
(
self
.
ts
+
i
,
i
+
1
,
i
+
1
,
i
+
1
,
i
+
1
,
i
+
0.1
,
i
+
0.1
,
i
%
2
,
i
+
1
,
i
+
1
,
i
+
1
,
i
+
1
,
i
+
1
,
i
+
1
))
tdSql
.
execute
(
f
"insert into stb_1 values(%d, %d, %d, %d, %d, %d, %d, %d, %d, %f, %f, %d, '
{
self
.
binary_str
}
%d', '
{
self
.
nchar_str
}
%d')"
%
(
self
.
ts
+
i
,
i
+
1
,
i
+
1
,
i
+
1
,
i
+
1
,
i
+
1
,
i
+
1
,
i
+
1
,
i
+
1
,
i
+
0.1
,
i
+
0.1
,
i
%
2
,
i
+
1
,
i
+
1
))
intData
.
append
(
i
+
1
)
floatData
.
append
(
i
+
0.1
)
for
i
in
[
'ts'
,
'col11'
,
'col12'
,
'col13'
]:
for
j
in
[
'db.stb'
,
'stb'
,
'db.stb_1'
,
'stb_1'
]:
tdSql
.
error
(
f
'select max(
{
i
}
from
{
j
}
)'
)
for
i
in
range
(
1
,
11
):
for
j
in
[
'db.stb'
,
'stb'
,
'db.stb_1'
,
'stb_1'
]:
tdSql
.
query
(
f
"select max(col
{
i
}
) from
{
j
}
"
)
if
i
<
9
:
tdSql
.
checkData
(
0
,
0
,
np
.
max
(
intData
))
elif
i
>=
9
:
tdSql
.
checkData
(
0
,
0
,
np
.
max
(
floatData
))
tdSql
.
query
(
"select max(col1) from stb_1 where col2<=5"
)
tdSql
.
checkData
(
0
,
0
,
5
)
tdSql
.
query
(
"select max(col1) from stb where col2<=5"
)
tdSql
.
checkData
(
0
,
0
,
5
)
tdSql
.
execute
(
'drop database db'
)
def
max_check_ntb_base
(
self
):
tdSql
.
prepare
()
intData
=
[]
floatData
=
[]
tdSql
.
execute
(
'''create table ntb(ts timestamp, col1 tinyint, col2 smallint, col3 int, col4 bigint, col5 tinyint unsigned, col6 smallint unsigned,
col7 int unsigned, col8 bigint unsigned, col9 float, col10 double, col11 bool, col12 binary(20), col13 nchar(20))'''
)
for
i
in
range
(
self
.
rowNum
):
tdSql
.
execute
(
"insert into stb_1 values(%d, %d, %d, %d, %d, %f, %f, %d, 'taosdata%d', '涛思数据%d', %d, %d, %d, %d)"
%
(
self
.
ts
+
i
,
i
+
1
,
i
+
1
,
i
+
1
,
i
+
1
,
i
+
0.1
,
i
+
0.1
,
i
%
2
,
i
+
1
,
i
+
1
,
i
+
1
,
i
+
1
,
i
+
1
,
i
+
1
))
tdSql
.
execute
(
f
"insert into ntb values(%d, %d, %d, %d, %d, %d, %d, %d, %d, %f, %f, %d, '
{
self
.
binary_str
}
%d', '
{
self
.
nchar_str
}
%d')"
%
(
self
.
ts
+
i
,
i
+
1
,
i
+
1
,
i
+
1
,
i
+
1
,
i
+
1
,
i
+
1
,
i
+
1
,
i
+
1
,
i
+
0.1
,
i
+
0.1
,
i
%
2
,
i
+
1
,
i
+
1
))
intData
.
append
(
i
+
1
)
floatData
.
append
(
i
+
0.1
)
# max verifacation
tdSql
.
error
(
"select max(ts) from stb_1"
)
tdSql
.
error
(
"select max(ts) from db.stb_1"
)
tdSql
.
error
(
"select max(col7) from stb_1"
)
tdSql
.
error
(
"select max(col7) from db.stb_1"
)
tdSql
.
error
(
"select max(col8) from stb_1"
)
tdSql
.
error
(
"select max(col8) from db.stb_1"
)
tdSql
.
error
(
"select max(col9) from stb_1"
)
tdSql
.
error
(
"select max(col9) from db.stb_1"
)
tdSql
.
query
(
"select max(col1) from stb_1"
)
tdSql
.
checkData
(
0
,
0
,
np
.
max
(
intData
))
tdSql
.
query
(
"select max(col1) from db.stb_1"
)
tdSql
.
checkData
(
0
,
0
,
np
.
max
(
intData
))
tdSql
.
query
(
"select max(col2) from stb_1"
)
tdSql
.
checkData
(
0
,
0
,
np
.
max
(
intData
))
tdSql
.
query
(
"select max(col2) from db.stb_1"
)
tdSql
.
checkData
(
0
,
0
,
np
.
max
(
intData
))
tdSql
.
query
(
"select max(col3) from stb_1"
)
tdSql
.
checkData
(
0
,
0
,
np
.
max
(
intData
))
tdSql
.
query
(
"select max(col3) from db.stb_1"
)
tdSql
.
checkData
(
0
,
0
,
np
.
max
(
intData
))
tdSql
.
query
(
"select max(col4) from stb_1"
)
tdSql
.
checkData
(
0
,
0
,
np
.
max
(
intData
))
tdSql
.
query
(
"select max(col4) from db.stb_1"
)
tdSql
.
checkData
(
0
,
0
,
np
.
max
(
intData
))
tdSql
.
query
(
"select max(col11) from stb_1"
)
tdSql
.
checkData
(
0
,
0
,
np
.
max
(
intData
))
tdSql
.
query
(
"select max(col11) from db.stb_1"
)
tdSql
.
checkData
(
0
,
0
,
np
.
max
(
intData
))
tdSql
.
query
(
"select max(col12) from stb_1"
)
tdSql
.
checkData
(
0
,
0
,
np
.
max
(
intData
))
tdSql
.
query
(
"select max(col12) from db.stb_1"
)
tdSql
.
checkData
(
0
,
0
,
np
.
max
(
intData
))
tdSql
.
query
(
"select max(col13) from stb_1"
)
tdSql
.
checkData
(
0
,
0
,
np
.
max
(
intData
))
tdSql
.
query
(
"select max(col13) from db.stb_1"
)
tdSql
.
checkData
(
0
,
0
,
np
.
max
(
intData
))
tdSql
.
query
(
"select max(col14) from stb_1"
)
tdSql
.
checkData
(
0
,
0
,
np
.
max
(
intData
))
tdSql
.
query
(
"select max(col14) from db.stb_1"
)
tdSql
.
checkData
(
0
,
0
,
np
.
max
(
intData
))
tdSql
.
query
(
"select max(col5) from stb_1"
)
tdSql
.
checkData
(
0
,
0
,
np
.
max
(
floatData
))
tdSql
.
query
(
"select max(col5) from db.stb_1"
)
tdSql
.
checkData
(
0
,
0
,
np
.
max
(
floatData
))
tdSql
.
query
(
"select max(col6) from stb_1"
)
tdSql
.
checkData
(
0
,
0
,
np
.
max
(
floatData
))
tdSql
.
query
(
"select max(col6) from db.stb_1"
)
tdSql
.
checkData
(
0
,
0
,
np
.
max
(
floatData
))
tdSql
.
query
(
"select max(col1) from stb_1 where col2<=5"
)
floatData
.
append
(
i
+
0.1
)
for
i
in
[
'ts'
,
'col11'
,
'col12'
,
'col13'
]:
for
j
in
[
'db.ntb'
,
'ntb'
]:
tdSql
.
error
(
f
'select max(
{
i
}
from
{
j
}
)'
)
for
i
in
range
(
1
,
11
):
for
j
in
[
'db.ntb'
,
'ntb'
]:
tdSql
.
query
(
f
"select max(col
{
i
}
) from
{
j
}
"
)
if
i
<
9
:
tdSql
.
checkData
(
0
,
0
,
np
.
max
(
intData
))
elif
i
>=
9
:
tdSql
.
checkData
(
0
,
0
,
np
.
max
(
floatData
))
tdSql
.
query
(
"select max(col1) from ntb where col2<=5"
)
tdSql
.
checkData
(
0
,
0
,
5
)
tdSql
.
execute
(
'drop database db'
)
def
check_max_functions
(
self
,
tbname
,
col_name
):
max_sql
=
f
"select max(
{
col_name
}
) from
{
tbname
}
;"
same_sql
=
f
"select
{
col_name
}
from
{
tbname
}
order by
{
col_name
}
desc limit 1"
tdSql
.
query
(
max_sql
)
max_result
=
tdSql
.
queryResult
tdSql
.
query
(
same_sql
)
same_result
=
tdSql
.
queryResult
if
max_result
!=
same_result
:
tdLog
.
exit
(
" max function work not as expected, sql : %s "
%
max_sql
)
else
:
tdLog
.
info
(
" max function work as expected, sql : %s "
%
max_sql
)
def
support_distributed_aggregate
(
self
):
# prepate datas for 20 tables distributed at different vgroups
tdSql
.
execute
(
"create database if not exists testdb keep 3650 duration 1000 vgroups 5"
)
tdSql
.
execute
(
" use testdb "
)
tdSql
.
execute
(
'''create table stb1
(ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp)
tags (t0 timestamp, t1 int, t2 bigint, t3 smallint, t4 tinyint, t5 float, t6 double, t7 bool, t8 binary(16),t9 nchar(32))
'''
)
tdSql
.
execute
(
'''
create table t1
(ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp)
'''
)
for
i
in
range
(
20
):
tdSql
.
execute
(
f
'create table ct
{
i
+
1
}
using stb1 tags ( now(),
{
1
*
i
}
,
{
11111
*
i
}
,
{
111
*
i
}
,
{
11
*
i
}
,
{
1.11
*
i
}
,
{
11.11
*
i
}
,
{
i
%
2
}
, "binary
{
i
}
", "nchar
{
i
}
" )'
)
for
i
in
range
(
9
):
tdSql
.
execute
(
f
"insert into ct1 values ( now()-
{
i
*
10
}
s,
{
1
*
i
}
,
{
11111
*
i
}
,
{
111
*
i
}
,
{
11
*
i
}
,
{
1.11
*
i
}
,
{
11.11
*
i
}
,
{
i
%
2
}
, 'binary
{
i
}
', 'nchar
{
i
}
', now()+
{
1
*
i
}
a )"
)
tdSql
.
execute
(
f
"insert into ct4 values ( now()-
{
i
*
90
}
d,
{
1
*
i
}
,
{
11111
*
i
}
,
{
111
*
i
}
,
{
11
*
i
}
,
{
1.11
*
i
}
,
{
11.11
*
i
}
,
{
i
%
2
}
, 'binary
{
i
}
', 'nchar
{
i
}
', now()+
{
1
*
i
}
a )"
)
for
i
in
range
(
1
,
21
):
if
i
==
1
or
i
==
4
:
continue
else
:
tbname
=
"ct"
+
f
'
{
i
}
'
for
j
in
range
(
9
):
tdSql
.
execute
(
f
"insert into
{
tbname
}
values ( now()-
{
(
i
+
j
)
*
10
}
s,
{
1
*
(
j
+
i
)
}
,
{
11111
*
(
j
+
i
)
}
,
{
111
*
(
j
+
i
)
}
,
{
11
*
(
j
)
}
,
{
1.11
*
(
j
+
i
)
}
,
{
11.11
*
(
j
+
i
)
}
,
{
(
j
+
i
)
%
2
}
, 'binary
{
j
}
', 'nchar
{
j
}
', now()+
{
1
*
j
}
a )"
)
tdSql
.
execute
(
"insert into ct1 values (now()-45s, 0, 0, 0, 0, 0, 0, 0, 'binary0', 'nchar0', now()+8a )"
)
tdSql
.
execute
(
"insert into ct1 values (now()+10s, 9, -99999, -999, -99, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )"
)
tdSql
.
execute
(
"insert into ct1 values (now()+15s, 9, -99999, -999, -99, -9.99, NULL, 1, 'binary9', 'nchar9', now()+9a )"
)
tdSql
.
execute
(
"insert into ct1 values (now()+20s, 9, -99999, -999, NULL, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )"
)
tdSql
.
execute
(
"insert into ct4 values (now()-810d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) "
)
tdSql
.
execute
(
"insert into ct4 values (now()-400d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) "
)
tdSql
.
execute
(
"insert into ct4 values (now()+90d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) "
)
tdSql
.
execute
(
f
'''insert into t1 values
( '2020-04-21 01:01:01.000', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
( '2020-10-21 01:01:01.000', 1, 11111, 111, 11, 1.11, 11.11, 1, "binary1", "nchar1", now()+1a )
( '2020-12-31 01:01:01.000', 2, 22222, 222, 22, 2.22, 22.22, 0, "binary2", "nchar2", now()+2a )
( '2021-01-01 01:01:06.000', 3, 33333, 333, 33, 3.33, 33.33, 0, "binary3", "nchar3", now()+3a )
( '2021-05-07 01:01:10.000', 4, 44444, 444, 44, 4.44, 44.44, 1, "binary4", "nchar4", now()+4a )
( '2021-07-21 01:01:01.000', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
( '2021-09-30 01:01:16.000', 5, 55555, 555, 55, 5.55, 55.55, 0, "binary5", "nchar5", now()+5a )
( '2022-02-01 01:01:20.000', 6, 66666, 666, 66, 6.66, 66.66, 1, "binary6", "nchar6", now()+6a )
( '2022-10-28 01:01:26.000', 7, 00000, 000, 00, 0.00, 00.00, 1, "binary7", "nchar7", "1970-01-01 08:00:00.000" )
( '2022-12-01 01:01:30.000', 8, -88888, -888, -88, -8.88, -88.88, 0, "binary8", "nchar8", "1969-01-01 01:00:00.000" )
( '2022-12-31 01:01:36.000', 9, -99999999999999999, -999, -99, -9.99, -999999999999999999999.99, 1, "binary9", "nchar9", "1900-01-01 00:00:00.000" )
( '2023-02-21 01:01:01.000', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
'''
)
tdLog
.
info
(
" prepare data for distributed_aggregate done! "
)
# get vgroup_ids of all
tdSql
.
query
(
"show vgroups "
)
vgroups
=
tdSql
.
queryResult
vnode_tables
=
{}
for
vgroup_id
in
vgroups
:
vnode_tables
[
vgroup_id
[
0
]]
=
[]
# check sub_table of per vnode ,make sure sub_table has been distributed
tdSql
.
query
(
"show tables like 'ct%'"
)
table_names
=
tdSql
.
queryResult
tablenames
=
[]
for
table_name
in
table_names
:
vnode_tables
[
table_name
[
6
]].
append
(
table_name
[
0
])
count
=
0
for
k
,
v
in
vnode_tables
.
items
():
if
len
(
v
)
>=
2
:
count
+=
1
if
count
<
2
:
tdLog
.
exit
(
" the datas of all not satisfy sub_table has been distributed "
)
# check max function work status
tdSql
.
query
(
"show tables like 'ct%'"
)
table_names
=
tdSql
.
queryResult
tablenames
=
[]
for
table_name
in
table_names
:
tablenames
.
append
(
table_name
[
0
])
tdSql
.
query
(
"desc stb1"
)
col_names
=
tdSql
.
queryResult
colnames
=
[]
for
col_name
in
col_names
:
if
col_name
[
1
]
in
[
"INT"
,
"BIGINT"
,
"SMALLINT"
,
"TINYINT"
,
"FLOAT"
,
"DOUBLE"
]:
colnames
.
append
(
col_name
[
0
])
for
tablename
in
tablenames
:
for
colname
in
colnames
:
self
.
check_max_functions
(
tablename
,
colname
)
tdSql
.
error
(
"select max(ts) from stb"
)
tdSql
.
error
(
"select max(ts) from db.stb"
)
tdSql
.
error
(
"select max(col7) from stb"
)
tdSql
.
error
(
"select max(col7) from db.stb"
)
tdSql
.
error
(
"select max(col8) from stb"
)
tdSql
.
error
(
"select max(col8) from db.stb"
)
tdSql
.
error
(
"select max(col9) from stb"
)
tdSql
.
error
(
"select max(col9) from db.stb"
)
tdSql
.
query
(
"select max(col1) from stb"
)
tdSql
.
checkData
(
0
,
0
,
np
.
max
(
intData
))
tdSql
.
query
(
"select max(col1) from db.stb"
)
tdSql
.
checkData
(
0
,
0
,
np
.
max
(
intData
))
tdSql
.
query
(
"select max(col2) from stb"
)
tdSql
.
checkData
(
0
,
0
,
np
.
max
(
intData
))
tdSql
.
query
(
"select max(col2) from db.stb"
)
tdSql
.
checkData
(
0
,
0
,
np
.
max
(
intData
))
tdSql
.
query
(
"select max(col3) from stb"
)
tdSql
.
checkData
(
0
,
0
,
np
.
max
(
intData
))
tdSql
.
query
(
"select max(col3) from db.stb"
)
tdSql
.
checkData
(
0
,
0
,
np
.
max
(
intData
))
tdSql
.
query
(
"select max(col4) from stb"
)
tdSql
.
checkData
(
0
,
0
,
np
.
max
(
intData
))
tdSql
.
query
(
"select max(col4) from db.stb"
)
tdSql
.
checkData
(
0
,
0
,
np
.
max
(
intData
))
tdSql
.
query
(
"select max(col11) from stb"
)
tdSql
.
checkData
(
0
,
0
,
np
.
max
(
intData
))
tdSql
.
query
(
"select max(col11) from db.stb"
)
tdSql
.
checkData
(
0
,
0
,
np
.
max
(
intData
))
tdSql
.
query
(
"select max(col12) from stb"
)
tdSql
.
checkData
(
0
,
0
,
np
.
max
(
intData
))
tdSql
.
query
(
"select max(col12) from db.stb"
)
tdSql
.
checkData
(
0
,
0
,
np
.
max
(
intData
))
tdSql
.
query
(
"select max(col13) from stb"
)
tdSql
.
checkData
(
0
,
0
,
np
.
max
(
intData
))
tdSql
.
query
(
"select max(col13) from db.stb"
)
tdSql
.
checkData
(
0
,
0
,
np
.
max
(
intData
))
tdSql
.
query
(
"select max(col14) from stb"
)
tdSql
.
checkData
(
0
,
0
,
np
.
max
(
intData
))
tdSql
.
query
(
"select max(col14) from db.stb"
)
tdSql
.
checkData
(
0
,
0
,
np
.
max
(
intData
))
tdSql
.
query
(
"select max(col5) from stb"
)
tdSql
.
checkData
(
0
,
0
,
np
.
max
(
floatData
))
tdSql
.
query
(
"select max(col5) from db.stb"
)
tdSql
.
checkData
(
0
,
0
,
np
.
max
(
floatData
))
tdSql
.
query
(
"select max(col6) from stb"
)
tdSql
.
checkData
(
0
,
0
,
np
.
max
(
floatData
))
tdSql
.
query
(
"select max(col6) from db.stb"
)
tdSql
.
checkData
(
0
,
0
,
np
.
max
(
floatData
))
tdSql
.
query
(
"select max(col1) from stb where col2<=5"
)
tdSql
.
checkData
(
0
,
0
,
5
)
# max function with basic filter
print
(
vnode_tables
)
tdSql
.
error
(
"select max(ts) from ntb"
)
tdSql
.
error
(
"select max(ts) from db.ntb"
)
tdSql
.
error
(
"select max(col7) from ntb"
)
tdSql
.
error
(
"select max(col7) from db.ntb"
)
tdSql
.
error
(
"select max(col8) from ntb"
)
tdSql
.
error
(
"select max(col8) from db.ntb"
)
tdSql
.
error
(
"select max(col9) from ntb"
)
tdSql
.
error
(
"select max(col9) from db.ntb"
)
tdSql
.
query
(
"select max(col1) from ntb"
)
tdSql
.
checkData
(
0
,
0
,
np
.
max
(
intData
))
tdSql
.
query
(
"select max(col1) from db.ntb"
)
tdSql
.
checkData
(
0
,
0
,
np
.
max
(
intData
))
tdSql
.
query
(
"select max(col2) from ntb"
)
tdSql
.
checkData
(
0
,
0
,
np
.
max
(
intData
))
tdSql
.
query
(
"select max(col2) from db.ntb"
)
tdSql
.
checkData
(
0
,
0
,
np
.
max
(
intData
))
tdSql
.
query
(
"select max(col3) from ntb"
)
tdSql
.
checkData
(
0
,
0
,
np
.
max
(
intData
))
tdSql
.
query
(
"select max(col3) from db.ntb"
)
tdSql
.
checkData
(
0
,
0
,
np
.
max
(
intData
))
tdSql
.
query
(
"select max(col4) from ntb"
)
tdSql
.
checkData
(
0
,
0
,
np
.
max
(
intData
))
tdSql
.
query
(
"select max(col4) from db.ntb"
)
tdSql
.
checkData
(
0
,
0
,
np
.
max
(
intData
))
tdSql
.
query
(
"select max(col11) from ntb"
)
tdSql
.
checkData
(
0
,
0
,
np
.
max
(
intData
))
tdSql
.
query
(
"select max(col11) from db.ntb"
)
tdSql
.
checkData
(
0
,
0
,
np
.
max
(
intData
))
tdSql
.
query
(
"select max(col12) from ntb"
)
tdSql
.
checkData
(
0
,
0
,
np
.
max
(
intData
))
tdSql
.
query
(
"select max(col12) from db.ntb"
)
tdSql
.
checkData
(
0
,
0
,
np
.
max
(
intData
))
tdSql
.
query
(
"select max(col13) from ntb"
)
tdSql
.
checkData
(
0
,
0
,
np
.
max
(
intData
))
tdSql
.
query
(
"select max(col13) from db.ntb"
)
tdSql
.
checkData
(
0
,
0
,
np
.
max
(
intData
))
tdSql
.
query
(
"select max(col14) from ntb"
)
tdSql
.
checkData
(
0
,
0
,
np
.
max
(
intData
))
tdSql
.
query
(
"select max(col14) from db.ntb"
)
tdSql
.
checkData
(
0
,
0
,
np
.
max
(
intData
))
tdSql
.
query
(
"select max(col5) from ntb"
)
tdSql
.
checkData
(
0
,
0
,
np
.
max
(
floatData
))
tdSql
.
query
(
"select max(col5) from db.ntb"
)
tdSql
.
checkData
(
0
,
0
,
np
.
max
(
floatData
))
tdSql
.
query
(
"select max(col6) from ntb"
)
tdSql
.
checkData
(
0
,
0
,
np
.
max
(
floatData
))
tdSql
.
query
(
"select max(col6) from db.ntb"
)
tdSql
.
checkData
(
0
,
0
,
np
.
max
(
floatData
))
tdSql
.
query
(
"select max(col1) from stb_1 where col2<=5"
)
tdSql
.
checkData
(
0
,
0
,
5
)
def
run
(
self
):
# max verifacation
self
.
max_check_stb_and_tb_base
()
self
.
max_check_ntb_base
()
self
.
support_distributed_aggregate
()
def
stop
(
self
):
tdSql
.
close
()
tdLog
.
success
(
"%s successfully executed"
%
__file__
)
...
...
tests/system-test/fulltest.sh
浏览文件 @
359e5426
...
...
@@ -98,6 +98,12 @@ python3 ./test.py -f 2-query/stateduration.py
python3 ./test.py
-f
2-query/function_stateduration.py
python3 ./test.py
-f
2-query/statecount.py
python3 ./test.py
-f
2-query/tail.py
python3 ./test.py
-f
2-query/distribute_agg_count.py
python3 ./test.py
-f
2-query/distribute_agg_max.py
python3 ./test.py
-f
2-query/distribute_agg_min.py
python3 ./test.py
-f
2-query/distribute_agg_sum.py
python3 ./test.py
-f
2-query/distribute_agg_spread.py
python3 ./test.py
-f
2-query/distribute_agg_apercentile.py
python3 ./test.py
-f
6-cluster/5dnode1mnode.py
python3 ./test.py
-f
6-cluster/5dnode2mnode.py
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录