Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
taosdata
TDengine
提交
2167d0fc
T
TDengine
项目概览
taosdata
/
TDengine
大约 1 年 前同步成功
通知
1185
Star
22015
Fork
4786
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
1
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
T
TDengine
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
1
Issue
1
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
体验新版 GitCode,发现更多精彩内容 >>
提交
2167d0fc
编写于
1月 04, 2022
作者:
A
Alex Duan
浏览文件
操作
浏览文件
下载
差异文件
Merge branch 'develop' into fix/TS-802-D1
上级
5bdf5152
97f08c97
变更
20
展开全部
隐藏空白更改
内联
并排
Showing
20 changed file
with
1537 addition
and
781 deletion
+1537
-781
Jenkinsfile
Jenkinsfile
+123
-24
src/client/src/tscGlobalmerge.c
src/client/src/tscGlobalmerge.c
+2
-2
src/client/src/tscServer.c
src/client/src/tscServer.c
+1
-0
src/inc/tsdb.h
src/inc/tsdb.h
+4
-0
src/kit/taos-tools
src/kit/taos-tools
+1
-1
src/plugins/monitor/src/monMain.c
src/plugins/monitor/src/monMain.c
+3
-90
src/query/inc/qExecutor.h
src/query/inc/qExecutor.h
+2
-1
src/query/src/qExecutor.c
src/query/src/qExecutor.c
+51
-16
src/tsdb/src/tsdbRead.c
src/tsdb/src/tsdbRead.c
+289
-26
src/util/inc/tutil.h
src/util/inc/tutil.h
+7
-0
src/util/src/tutil.c
src/util/src/tutil.c
+13
-0
tests/pytest/fulltest-others.sh
tests/pytest/fulltest-others.sh
+0
-13
tests/pytest/fulltest-query.sh
tests/pytest/fulltest-query.sh
+0
-21
tests/pytest/fulltest-tools.sh
tests/pytest/fulltest-tools.sh
+0
-19
tests/pytest/fulltest.sh
tests/pytest/fulltest.sh
+1
-0
tests/pytest/query/queryLimit.py
tests/pytest/query/queryLimit.py
+194
-0
tests/script/general/parser/where.sim
tests/script/general/parser/where.sim
+8
-0
tests/script/jenkins/basic.txt
tests/script/jenkins/basic.txt
+0
-52
tests/system-test/5-taos-tools/dump_col_tag.py
tests/system-test/5-taos-tools/dump_col_tag.py
+789
-515
tests/test-CI.sh
tests/test-CI.sh
+49
-1
未找到文件。
Jenkinsfile
浏览文件 @
2167d0fc
...
...
@@ -8,6 +8,7 @@ def skipbuild = 0
def
win_stop
=
0
def
scope
=
[]
def
mod
=
[
0
,
1
,
2
,
3
,
4
]
def
sim_mod
=
[
0
,
1
,
2
,
3
]
def
abortPreviousBuilds
()
{
def
currentJobName
=
env
.
JOB_NAME
...
...
@@ -45,6 +46,7 @@ def pre_test(){
killall -9 gdb || echo "no gdb running"
killall -9 python3.8 || echo "no python program running"
cd ${WKC}
[ -f src/connector/grafanaplugin/README.md ] && rm -f src/connector/grafanaplugin/README.md > /dev/null || echo "failed to remove grafanaplugin README.md"
git reset --hard HEAD~10 >/dev/null
'''
script
{
...
...
@@ -120,6 +122,7 @@ def pre_test_noinstall(){
sh
'hostname'
sh
'''
cd ${WKC}
[ -f src/connector/grafanaplugin/README.md ] && rm -f src/connector/grafanaplugin/README.md > /dev/null || echo "failed to remove grafanaplugin README.md"
git reset --hard HEAD~10 >/dev/null
'''
script
{
...
...
@@ -192,6 +195,7 @@ def pre_test_mac(){
sh
'hostname'
sh
'''
cd ${WKC}
[ -f src/connector/grafanaplugin/README.md ] && rm -f src/connector/grafanaplugin/README.md > /dev/null || echo "failed to remove grafanaplugin README.md"
git reset --hard HEAD~10 >/dev/null
'''
script
{
...
...
@@ -382,7 +386,9 @@ pipeline {
temp
=
(
gitlog
=~
/\((.*?)\)/
)
temp
=
temp
[
0
].
remove
(
1
)
scope
=
temp
.
split
(
","
)
scope
=
[
'connector'
,
'query'
,
'insert'
,
'other'
,
'tools'
,
'taosAdapter'
]
Collections
.
shuffle
mod
Collections
.
shuffle
sim_mod
}
}
...
...
@@ -400,7 +406,7 @@ pipeline {
}
parallel
{
stage
(
'python_1'
)
{
agent
{
label
" slave1 || slave
6 || slave11 || slave16
"
}
agent
{
label
" slave1 || slave
11
"
}
steps
{
pre_test
()
timeout
(
time:
100
,
unit:
'MINUTES'
){
...
...
@@ -417,7 +423,7 @@ pipeline {
}
}
stage
(
'python_2'
)
{
agent
{
label
" slave2 || slave
7 || slave12 || slave17
"
}
agent
{
label
" slave2 || slave
12
"
}
steps
{
pre_test
()
timeout
(
time:
100
,
unit:
'MINUTES'
){
...
...
@@ -434,7 +440,7 @@ pipeline {
}
}
stage
(
'python_3'
)
{
agent
{
label
" slave3 || slave
8 || slave13 ||slave18
"
}
agent
{
label
" slave3 || slave
13
"
}
steps
{
timeout
(
time:
105
,
unit:
'MINUTES'
){
pre_test
()
...
...
@@ -451,7 +457,7 @@ pipeline {
}
}
stage
(
'python_4'
)
{
agent
{
label
" slave4 || slave
9 || slave14 || slave19
"
}
agent
{
label
" slave4 || slave
14
"
}
steps
{
timeout
(
time:
100
,
unit:
'MINUTES'
){
pre_test
()
...
...
@@ -469,7 +475,7 @@ pipeline {
}
}
stage
(
'python_5'
)
{
agent
{
label
" slave5 || slave1
0 || slave15 || slave20
"
}
agent
{
label
" slave5 || slave1
5
"
}
steps
{
timeout
(
time:
100
,
unit:
'MINUTES'
){
pre_test
()
...
...
@@ -486,35 +492,98 @@ pipeline {
}
}
}
stage
(
'
arm64centos7
'
)
{
agent
{
label
"
arm64centos7
"
}
stage
(
'
sim_1
'
)
{
agent
{
label
"
slave6 || slave16
"
}
steps
{
pre_test_noinstall
()
}
pre_test
()
timeout
(
time:
100
,
unit:
'MINUTES'
){
sh
"""
date
cd ${WKC}/tests
./test-CI.sh sim 4 ${sim_mod[0]}
date"""
}
}
}
stage
(
'
arm64centos8
'
)
{
agent
{
label
"
arm64centos8
"
}
stage
(
'
sim_2
'
)
{
agent
{
label
"
slave7 || slave17
"
}
steps
{
pre_test_noinstall
()
pre_test
()
timeout
(
time:
100
,
unit:
'MINUTES'
){
sh
"""
date
cd ${WKC}/tests
./test-CI.sh sim 4 ${sim_mod[1]}
date"""
}
}
}
stage
(
'
arm32bionic
'
)
{
agent
{
label
"
arm32bionic
"
}
stage
(
'
sim_3
'
)
{
agent
{
label
"
slave8 || slave18
"
}
steps
{
pre_test_noinstall
()
timeout
(
time:
105
,
unit:
'MINUTES'
){
pre_test
()
sh
"""
date
cd ${WKC}/tests
./test-CI.sh sim 4 ${sim_mod[2]}
date"""
}
}
}
stage
(
'
arm64bionic
'
)
{
agent
{
label
"
arm64bionic
"
}
stage
(
'
sim_4
'
)
{
agent
{
label
"
slave9 || slave19
"
}
steps
{
pre_test_noinstall
()
timeout
(
time:
100
,
unit:
'MINUTES'
){
pre_test
()
sh
"""
date
cd ${WKC}/tests
./test-CI.sh sim 4 ${sim_mod[3]}
date"""
}
}
}
stage
(
'
arm64focal
'
)
{
agent
{
label
"
arm64focal
"
}
stage
(
'
other
'
)
{
agent
{
label
"
slave10 || slave20
"
}
steps
{
pre_test_noinstall
()
timeout
(
time:
100
,
unit:
'MINUTES'
){
pre_test
()
timeout
(
time:
60
,
unit:
'MINUTES'
){
sh
'''
cd ${WKC}/tests/pytest
./crash_gen.sh -a -p -t 4 -s 2000
'''
}
timeout
(
time:
60
,
unit:
'MINUTES'
){
sh
'''
cd ${WKC}/tests/pytest
rm -rf /var/lib/taos/*
rm -rf /var/log/taos/*
./handle_crash_gen_val_log.sh
'''
sh
'''
cd ${WKC}/tests/pytest
rm -rf /var/lib/taos/*
rm -rf /var/log/taos/*
./handle_taosd_val_log.sh
'''
}
catchError
(
buildResult:
'SUCCESS'
,
stageResult:
'FAILURE'
)
{
sh
'''
cd ${WKC}/tests/pytest
./valgrind-test.sh 2>&1 > mem-error-out.log
./handle_val_log.sh
'''
}
sh
'''
cd ${WKC}/tests
./test-all.sh full unit
date
'''
}
}
}
stage
(
'centos7'
)
{
agent
{
label
" centos7 "
}
...
...
@@ -546,12 +615,41 @@ pipeline {
pre_test_mac
()
}
}
stage
(
'arm64centos7'
)
{
agent
{
label
" arm64centos7 "
}
steps
{
pre_test_noinstall
()
}
}
stage
(
'arm64centos8'
)
{
agent
{
label
" arm64centos8 "
}
steps
{
pre_test_noinstall
()
}
}
stage
(
'arm32bionic'
)
{
agent
{
label
" arm32bionic "
}
steps
{
pre_test_noinstall
()
}
}
stage
(
'arm64bionic'
)
{
agent
{
label
" arm64bionic "
}
steps
{
pre_test_noinstall
()
}
}
stage
(
'arm64focal'
)
{
agent
{
label
" arm64focal "
}
steps
{
pre_test_noinstall
()
}
}
stage
(
'build'
){
agent
{
label
" wintest "
}
steps
{
pre_test
()
script
{
script
{
while
(
win_stop
==
0
){
sleep
(
1
)
}
...
...
@@ -561,6 +659,7 @@ pipeline {
stage
(
'test'
){
agent
{
label
"win"
}
steps
{
catchError
(
buildResult:
'FAILURE'
,
stageResult:
'FAILURE'
)
{
pre_test_win
()
timeout
(
time:
20
,
unit:
'MINUTES'
){
...
...
@@ -569,7 +668,7 @@ pipeline {
.\\test-all.bat wintest
'''
}
}
}
script
{
win_stop
=
1
}
...
...
src/client/src/tscGlobalmerge.c
浏览文件 @
2167d0fc
...
...
@@ -902,7 +902,7 @@ SSDataBlock* doGlobalAggregate(void* param, bool* newgroup) {
// not belongs to the same group, return the result of current group;
setInputDataBlock
(
pOperator
,
pAggInfo
->
binfo
.
pCtx
,
pAggInfo
->
pExistBlock
,
TSDB_ORDER_ASC
);
updateOutputBuf
(
&
pAggInfo
->
binfo
,
&
pAggInfo
->
bufCapacity
,
pAggInfo
->
pExistBlock
->
info
.
rows
);
updateOutputBuf
(
&
pAggInfo
->
binfo
,
&
pAggInfo
->
bufCapacity
,
pAggInfo
->
pExistBlock
->
info
.
rows
,
pOperator
->
pRuntimeEnv
);
{
// reset output buffer
for
(
int32_t
j
=
0
;
j
<
pOperator
->
numOfOutput
;
++
j
)
{
...
...
@@ -954,7 +954,7 @@ SSDataBlock* doGlobalAggregate(void* param, bool* newgroup) {
// not belongs to the same group, return the result of current group
setInputDataBlock
(
pOperator
,
pAggInfo
->
binfo
.
pCtx
,
pBlock
,
TSDB_ORDER_ASC
);
updateOutputBuf
(
&
pAggInfo
->
binfo
,
&
pAggInfo
->
bufCapacity
,
pBlock
->
info
.
rows
*
pAggInfo
->
resultRowFactor
);
updateOutputBuf
(
&
pAggInfo
->
binfo
,
&
pAggInfo
->
bufCapacity
,
pBlock
->
info
.
rows
*
pAggInfo
->
resultRowFactor
,
pOperator
->
pRuntimeEnv
);
doExecuteFinalMerge
(
pOperator
,
pOperator
->
numOfOutput
,
pBlock
);
savePrevOrderColumns
(
pAggInfo
->
currentGroupColData
,
pAggInfo
->
groupColumnList
,
pBlock
,
0
,
&
pAggInfo
->
hasGroupColData
);
...
...
src/client/src/tscServer.c
浏览文件 @
2167d0fc
...
...
@@ -1866,6 +1866,7 @@ int tscProcessRetrieveGlobalMergeRsp(SSqlObj *pSql) {
bool
convertJson
=
true
;
if
(
pQueryInfo
->
isStddev
==
true
)
convertJson
=
false
;
convertQueryResult
(
pRes
,
pQueryInfo
,
pSql
->
self
,
true
,
convertJson
);
pRes
->
code
=
pQueryInfo
->
pQInfo
->
code
;
code
=
pRes
->
code
;
if
(
pRes
->
code
==
TSDB_CODE_SUCCESS
)
{
...
...
src/inc/tsdb.h
浏览文件 @
2167d0fc
...
...
@@ -173,6 +173,7 @@ typedef void *TsdbQueryHandleT; // Use void to hide implementation details
typedef
struct
STsdbQueryCond
{
STimeWindow
twindow
;
int32_t
order
;
// desc|asc order to iterate the data block
int64_t
offset
;
// skip offset put down to tsdb
int32_t
numOfCols
;
SColumnInfo
*
colList
;
bool
loadExternalRows
;
// load external rows or not
...
...
@@ -391,6 +392,9 @@ void tsdbResetQueryHandleForNewTable(TsdbQueryHandleT queryHandle, STsdbQueryCon
int32_t
tsdbGetFileBlocksDistInfo
(
TsdbQueryHandleT
*
queryHandle
,
STableBlockDist
*
pTableBlockInfo
);
// obtain queryHandle attribute
int64_t
tsdbSkipOffset
(
TsdbQueryHandleT
queryHandle
);
/**
* get the statistics of repo usage
* @param repo. point to the tsdbrepo
...
...
taos-tools
@
59f00a69
比较
dd78bfff
...
59f00a69
Subproject commit
dd78bfff5549c08153798719c1707ab441b5f4ab
Subproject commit
59f00a69f36b08cea86a70a22c29b2c27ef506ae
src/plugins/monitor/src/monMain.c
浏览文件 @
2167d0fc
...
...
@@ -171,7 +171,6 @@ static void monSaveSystemInfo();
static
void
monSaveClusterInfo
();
static
void
monSaveDnodesInfo
();
static
void
monSaveVgroupsInfo
();
static
void
monSaveSlowQueryInfo
();
static
void
monSaveDisksInfo
();
static
void
monSaveGrantsInfo
();
static
void
monSaveHttpReqInfo
();
...
...
@@ -321,7 +320,6 @@ static void *monThreadFunc(void *param) {
monSaveClusterInfo
();
}
monSaveVgroupsInfo
();
monSaveSlowQueryInfo
();
monSaveDisksInfo
();
monSaveGrantsInfo
();
monSaveHttpReqInfo
();
...
...
@@ -383,9 +381,9 @@ static void monBuildMonitorSql(char *sql, int32_t cmd) {
tsMonitorDbName
,
TSDB_DEFAULT_USER
);
}
else
if
(
cmd
==
MON_CMD_CREATE_TB_SLOWQUERY
)
{
snprintf
(
sql
,
SQL_LENGTH
,
"create table if not exists %s.slowquery(ts timestamp,
query_id
"
"binary(%d),
username binary(%d), qid binary(%d), created_time timestamp, time bigint, end_point binary(%d)
, sql binary(%d))"
,
tsMonitorDbName
,
QUERY_ID_LEN
,
TSDB_TABLE_FNAME_LEN
-
1
,
QUERY_ID_LEN
,
TSDB_EP_LEN
,
TSDB_SLOW_QUERY_SQL_LEN
);
"create table if not exists %s.slowquery(ts timestamp,
username
"
"binary(%d),
created_time timestamp, time bigint
, sql binary(%d))"
,
tsMonitorDbName
,
TSDB_TABLE_FNAME_LEN
-
1
,
TSDB_SLOW_QUERY_SQL_LEN
);
}
else
if
(
cmd
==
MON_CMD_CREATE_TB_LOG
)
{
snprintf
(
sql
,
SQL_LENGTH
,
"create table if not exists %s.log(ts timestamp, level tinyint, "
...
...
@@ -1213,91 +1211,6 @@ static void monSaveVgroupsInfo() {
taos_free_result
(
result
);
}
static
void
monSaveSlowQueryInfo
()
{
int64_t
ts
=
taosGetTimestampUs
();
char
*
sql
=
tsMonitor
.
sql
;
int32_t
pos
=
snprintf
(
sql
,
SQL_LENGTH
,
"insert into %s.slowquery values(%"
PRId64
,
tsMonitorDbName
,
ts
);
bool
has_slowquery
=
false
;
TAOS_RES
*
result
=
taos_query
(
tsMonitor
.
conn
,
"show queries"
);
int32_t
code
=
taos_errno
(
result
);
if
(
code
!=
TSDB_CODE_SUCCESS
)
{
monError
(
"failed to execute cmd: show queries, reason:%s"
,
tstrerror
(
code
));
}
TAOS_ROW
row
;
int32_t
num_fields
=
taos_num_fields
(
result
);
TAOS_FIELD
*
fields
=
taos_fetch_fields
(
result
);
int32_t
charLen
;
while
((
row
=
taos_fetch_row
(
result
)))
{
for
(
int
i
=
0
;
i
<
num_fields
;
++
i
)
{
if
(
strcmp
(
fields
[
i
].
name
,
"query_id"
)
==
0
)
{
has_slowquery
=
true
;
charLen
=
monGetRowElemCharLen
(
fields
[
i
],
(
char
*
)
row
[
i
]);
if
(
charLen
<
0
)
{
monError
(
"failed to save slow_query info, reason: invalid row %s len, sql:%s"
,
(
char
*
)
row
[
i
],
tsMonitor
.
sql
);
goto
DONE
;
}
pos
+=
snprintf
(
sql
+
pos
,
strlen
(
SQL_STR_FMT
)
+
charLen
+
1
,
", "
SQL_STR_FMT
,
(
char
*
)
row
[
i
]);
}
else
if
(
strcmp
(
fields
[
i
].
name
,
"user"
)
==
0
)
{
charLen
=
monGetRowElemCharLen
(
fields
[
i
],
(
char
*
)
row
[
i
]);
if
(
charLen
<
0
)
{
monError
(
"failed to save slow_query info, reason: invalid row %s len, sql:%s"
,
(
char
*
)
row
[
i
],
tsMonitor
.
sql
);
goto
DONE
;
}
pos
+=
snprintf
(
sql
+
pos
,
strlen
(
SQL_STR_FMT
)
+
charLen
+
1
,
", "
SQL_STR_FMT
,
(
char
*
)
row
[
i
]);
}
else
if
(
strcmp
(
fields
[
i
].
name
,
"qid"
)
==
0
)
{
charLen
=
monGetRowElemCharLen
(
fields
[
i
],
(
char
*
)
row
[
i
]);
if
(
charLen
<
0
)
{
monError
(
"failed to save slow_query info, reason: invalid row %s len, sql:%s"
,
(
char
*
)
row
[
i
],
tsMonitor
.
sql
);
goto
DONE
;
}
pos
+=
snprintf
(
sql
+
pos
,
strlen
(
SQL_STR_FMT
)
+
charLen
+
1
,
", "
SQL_STR_FMT
,
(
char
*
)
row
[
i
]);
}
else
if
(
strcmp
(
fields
[
i
].
name
,
"created_time"
)
==
0
)
{
int64_t
create_time
=
*
(
int64_t
*
)
row
[
i
];
create_time
=
convertTimePrecision
(
create_time
,
TSDB_TIME_PRECISION_MILLI
,
TSDB_TIME_PRECISION_MICRO
);
pos
+=
snprintf
(
sql
+
pos
,
SQL_LENGTH
,
", %"
PRId64
""
,
create_time
);
}
else
if
(
strcmp
(
fields
[
i
].
name
,
"time"
)
==
0
)
{
pos
+=
snprintf
(
sql
+
pos
,
SQL_LENGTH
,
", %"
PRId64
""
,
*
(
int64_t
*
)
row
[
i
]);
}
else
if
(
strcmp
(
fields
[
i
].
name
,
"ep"
)
==
0
)
{
charLen
=
monGetRowElemCharLen
(
fields
[
i
],
(
char
*
)
row
[
i
]);
if
(
charLen
<
0
)
{
monError
(
"failed to save slow_query info, reason: invalid row %s len, sql:%s"
,
(
char
*
)
row
[
i
],
tsMonitor
.
sql
);
goto
DONE
;
}
pos
+=
snprintf
(
sql
+
pos
,
strlen
(
SQL_STR_FMT
)
+
charLen
+
1
,
", "
SQL_STR_FMT
,
(
char
*
)
row
[
i
]);
}
else
if
(
strcmp
(
fields
[
i
].
name
,
"sql"
)
==
0
)
{
charLen
=
monGetRowElemCharLen
(
fields
[
i
],
(
char
*
)
row
[
i
]);
if
(
charLen
<
0
)
{
monError
(
"failed to save slow_query info, reason: invalid row %s len, sql:%s"
,
(
char
*
)
row
[
i
],
tsMonitor
.
sql
);
goto
DONE
;
}
pos
+=
snprintf
(
sql
+
pos
,
strlen
(
SQL_STR_FMT
)
+
charLen
+
2
,
", "
SQL_STR_FMT
")"
,
(
char
*
)
row
[
i
]);
}
}
}
monDebug
(
"save slow query, sql:%s"
,
sql
);
if
(
!
has_slowquery
)
{
goto
DONE
;
}
void
*
res
=
taos_query
(
tsMonitor
.
conn
,
tsMonitor
.
sql
);
code
=
taos_errno
(
res
);
taos_free_result
(
res
);
if
(
code
!=
0
)
{
monError
(
"failed to save slowquery info, reason:%s, sql:%s"
,
tstrerror
(
code
),
tsMonitor
.
sql
);
}
else
{
monIncSubmitReqCnt
();
monDebug
(
"successfully to save slowquery info, sql:%s"
,
tsMonitor
.
sql
);
}
DONE:
taos_free_result
(
result
);
return
;
}
static
void
monSaveDisksInfo
()
{
int64_t
ts
=
taosGetTimestampUs
();
char
*
sql
=
tsMonitor
.
sql
;
...
...
src/query/inc/qExecutor.h
浏览文件 @
2167d0fc
...
...
@@ -237,6 +237,7 @@ typedef struct SQueryAttr {
bool
createFilterOperator
;
// if filter operator is needed
bool
multigroupResult
;
// multigroup result can exist in one SSDataBlock
bool
needSort
;
// need sort rowRes
bool
skipOffset
;
// can skip offset if true
int32_t
interBufSize
;
// intermediate buffer sizse
int32_t
havingNum
;
// having expr number
...
...
@@ -659,7 +660,7 @@ void* doDestroyFilterInfo(SSingleColumnFilterInfo* pFilterInfo, int32_t numOfFil
void
setInputDataBlock
(
SOperatorInfo
*
pOperator
,
SQLFunctionCtx
*
pCtx
,
SSDataBlock
*
pBlock
,
int32_t
order
);
int32_t
getNumOfResult
(
SQueryRuntimeEnv
*
pRuntimeEnv
,
SQLFunctionCtx
*
pCtx
,
int32_t
numOfOutput
);
void
finalizeQueryResult
(
SOperatorInfo
*
pOperator
,
SQLFunctionCtx
*
pCtx
,
SResultRowInfo
*
pResultRowInfo
,
int32_t
*
rowCellInfoOffset
);
void
updateOutputBuf
(
SOptrBasicInfo
*
pBInfo
,
int32_t
*
bufCapacity
,
int32_t
numOfInputRows
);
void
updateOutputBuf
(
SOptrBasicInfo
*
pBInfo
,
int32_t
*
bufCapacity
,
int32_t
numOfInputRows
,
SQueryRuntimeEnv
*
runtimeEnv
);
void
clearOutputBuf
(
SOptrBasicInfo
*
pBInfo
,
int32_t
*
bufCapacity
);
void
copyTsColoum
(
SSDataBlock
*
pRes
,
SQLFunctionCtx
*
pCtx
,
int32_t
numOfOutput
);
...
...
src/query/src/qExecutor.c
浏览文件 @
2167d0fc
...
...
@@ -1432,7 +1432,7 @@ static void doWindowBorderInterpolation(SOperatorInfo* pOperatorInfo, SSDataBloc
int32_t
step
=
GET_FORWARD_DIRECTION_FACTOR
(
pQueryAttr
->
order
.
order
);
if
(
pBlock
->
pDataBlock
==
NULL
){
tscError
(
"
pBlock->pDataBlock == NULL"
);
qError
(
"window border interpolation:
pBlock->pDataBlock == NULL"
);
return
;
}
SColumnInfoData
*
pColInfo
=
taosArrayGet
(
pBlock
->
pDataBlock
,
0
);
...
...
@@ -3586,7 +3586,7 @@ void setDefaultOutputBuf(SQueryRuntimeEnv *pRuntimeEnv, SOptrBasicInfo *pInfo, i
initCtxOutputBuffer
(
pCtx
,
pDataBlock
->
info
.
numOfCols
);
}
void
updateOutputBuf
(
SOptrBasicInfo
*
pBInfo
,
int32_t
*
bufCapacity
,
int32_t
numOfInputRows
)
{
void
updateOutputBuf
(
SOptrBasicInfo
*
pBInfo
,
int32_t
*
bufCapacity
,
int32_t
numOfInputRows
,
SQueryRuntimeEnv
*
runtimeEnv
)
{
SSDataBlock
*
pDataBlock
=
pBInfo
->
pRes
;
int32_t
newSize
=
pDataBlock
->
info
.
rows
+
numOfInputRows
+
5
;
// extra output buffer
...
...
@@ -3594,7 +3594,7 @@ void updateOutputBuf(SOptrBasicInfo* pBInfo, int32_t *bufCapacity, int32_t numOf
for
(
int32_t
i
=
0
;
i
<
pDataBlock
->
info
.
numOfCols
;
++
i
)
{
SColumnInfoData
*
pColInfo
=
taosArrayGet
(
pDataBlock
->
pDataBlock
,
i
);
char
*
p
=
realloc
(
pColInfo
->
pData
,
newSize
*
pColInfo
->
info
.
bytes
);
char
*
p
=
realloc
(
pColInfo
->
pData
,
((
size_t
)
newSize
)
*
pColInfo
->
info
.
bytes
);
if
(
p
!=
NULL
)
{
pColInfo
->
pData
=
p
;
...
...
@@ -3602,7 +3602,10 @@ void updateOutputBuf(SOptrBasicInfo* pBInfo, int32_t *bufCapacity, int32_t numOf
pBInfo
->
pCtx
[
i
].
pOutput
=
pColInfo
->
pData
;
(
*
bufCapacity
)
=
newSize
;
}
else
{
// longjmp
size_t
allocateSize
=
((
size_t
)(
newSize
))
*
pColInfo
->
info
.
bytes
;
qError
(
"can not allocate %zu bytes for output. Rows: %d, colBytes %d"
,
allocateSize
,
newSize
,
pColInfo
->
info
.
bytes
);
longjmp
(
runtimeEnv
->
env
,
TSDB_CODE_QRY_OUT_OF_MEMORY
);
}
}
}
...
...
@@ -3610,7 +3613,7 @@ void updateOutputBuf(SOptrBasicInfo* pBInfo, int32_t *bufCapacity, int32_t numOf
for
(
int32_t
i
=
0
;
i
<
pDataBlock
->
info
.
numOfCols
;
++
i
)
{
SColumnInfoData
*
pColInfo
=
taosArrayGet
(
pDataBlock
->
pDataBlock
,
i
);
pBInfo
->
pCtx
[
i
].
pOutput
=
pColInfo
->
pData
+
pColInfo
->
info
.
bytes
*
pDataBlock
->
info
.
rows
;
pBInfo
->
pCtx
[
i
].
pOutput
=
pColInfo
->
pData
+
(
size_t
)
pColInfo
->
info
.
bytes
*
pDataBlock
->
info
.
rows
;
// set the correct pointer after the memory buffer reallocated.
int32_t
functionId
=
pBInfo
->
pCtx
[
i
].
functionId
;
...
...
@@ -4902,6 +4905,11 @@ STsdbQueryCond createTsdbQueryCond(SQueryAttr* pQueryAttr, STimeWindow* win) {
.
loadExternalRows
=
false
,
};
// set offset with
if
(
pQueryAttr
->
skipOffset
)
{
cond
.
offset
=
pQueryAttr
->
limit
.
offset
;
}
TIME_WINDOW_COPY
(
cond
.
twindow
,
*
win
);
return
cond
;
}
...
...
@@ -5767,7 +5775,7 @@ static SSDataBlock* doProjectOperation(void* param, bool* newgroup) {
// the pDataBlock are always the same one, no need to call this again
setInputDataBlock
(
pOperator
,
pInfo
->
pCtx
,
pBlock
,
order
);
updateOutputBuf
(
&
pProjectInfo
->
binfo
,
&
pProjectInfo
->
bufCapacity
,
pBlock
->
info
.
rows
);
updateOutputBuf
(
&
pProjectInfo
->
binfo
,
&
pProjectInfo
->
bufCapacity
,
pBlock
->
info
.
rows
,
pOperator
->
pRuntimeEnv
);
projectApplyFunctions
(
pRuntimeEnv
,
pInfo
->
pCtx
,
pOperator
->
numOfOutput
);
if
(
pTableQueryInfo
!=
NULL
)
{
...
...
@@ -5833,7 +5841,7 @@ static SSDataBlock* doProjectOperation(void* param, bool* newgroup) {
// the pDataBlock are always the same one, no need to call this again
setInputDataBlock
(
pOperator
,
pInfo
->
pCtx
,
pBlock
,
order
);
updateOutputBuf
(
&
pProjectInfo
->
binfo
,
&
pProjectInfo
->
bufCapacity
,
pBlock
->
info
.
rows
);
updateOutputBuf
(
&
pProjectInfo
->
binfo
,
&
pProjectInfo
->
bufCapacity
,
pBlock
->
info
.
rows
,
pOperator
->
pRuntimeEnv
);
projectApplyFunctions
(
pRuntimeEnv
,
pInfo
->
pCtx
,
pOperator
->
numOfOutput
);
if
(
pTableQueryInfo
!=
NULL
)
{
...
...
@@ -5870,19 +5878,38 @@ static SSDataBlock* doLimit(void* param, bool* newgroup) {
return
NULL
;
}
bool
move
=
false
;
int32_t
skip
=
0
;
int32_t
remain
=
0
;
int64_t
srows
=
tsdbSkipOffset
(
pRuntimeEnv
->
pQueryHandle
);
if
(
pRuntimeEnv
->
currentOffset
==
0
)
{
break
;
}
else
if
(
srows
>
0
)
{
if
(
pRuntimeEnv
->
currentOffset
-
srows
>=
pBlock
->
info
.
rows
)
{
pRuntimeEnv
->
currentOffset
-=
pBlock
->
info
.
rows
;
}
else
{
move
=
true
;
skip
=
(
int32_t
)(
pRuntimeEnv
->
currentOffset
-
srows
);
remain
=
(
int32_t
)(
pBlock
->
info
.
rows
-
skip
);
}
}
else
if
(
pRuntimeEnv
->
currentOffset
>=
pBlock
->
info
.
rows
)
{
pRuntimeEnv
->
currentOffset
-=
pBlock
->
info
.
rows
;
}
else
{
int32_t
remain
=
(
int32_t
)(
pBlock
->
info
.
rows
-
pRuntimeEnv
->
currentOffset
);
move
=
true
;
skip
=
(
int32_t
)
pRuntimeEnv
->
currentOffset
;
remain
=
(
int32_t
)(
pBlock
->
info
.
rows
-
pRuntimeEnv
->
currentOffset
);
}
// need move
if
(
move
)
{
pBlock
->
info
.
rows
=
remain
;
for
(
int32_t
i
=
0
;
i
<
pBlock
->
info
.
numOfCols
;
++
i
)
{
SColumnInfoData
*
pColInfoData
=
taosArrayGet
(
pBlock
->
pDataBlock
,
i
);
int16_t
bytes
=
pColInfoData
->
info
.
bytes
;
memmove
(
pColInfoData
->
pData
,
pColInfoData
->
pData
+
bytes
*
pRuntimeEnv
->
currentOffset
,
remain
*
bytes
);
memmove
(
pColInfoData
->
pData
,
pColInfoData
->
pData
+
skip
*
bytes
,
remain
*
bytes
);
}
pRuntimeEnv
->
currentOffset
=
0
;
...
...
@@ -6330,7 +6357,7 @@ static void doTimeEveryImpl(SOperatorInfo* pOperator, SQLFunctionCtx *pCtx, SSDa
break
;
}
updateOutputBuf
(
&
pEveryInfo
->
binfo
,
&
pEveryInfo
->
bufCapacity
,
0
);
updateOutputBuf
(
&
pEveryInfo
->
binfo
,
&
pEveryInfo
->
bufCapacity
,
0
,
pOperator
->
pRuntimeEnv
);
}
}
}
...
...
@@ -6350,7 +6377,7 @@ static SSDataBlock* doTimeEvery(void* param, bool* newgroup) {
pRes
->
info
.
rows
=
0
;
if
(
!
pEveryInfo
->
groupDone
)
{
updateOutputBuf
(
&
pEveryInfo
->
binfo
,
&
pEveryInfo
->
bufCapacity
,
0
);
updateOutputBuf
(
&
pEveryInfo
->
binfo
,
&
pEveryInfo
->
bufCapacity
,
0
,
pOperator
->
pRuntimeEnv
);
doTimeEveryImpl
(
pOperator
,
pInfo
->
pCtx
,
pEveryInfo
->
lastBlock
,
false
);
if
(
pRes
->
info
.
rows
>=
pRuntimeEnv
->
resultInfo
.
threshold
)
{
copyTsColoum
(
pRes
,
pInfo
->
pCtx
,
pOperator
->
numOfOutput
);
...
...
@@ -6386,7 +6413,7 @@ static SSDataBlock* doTimeEvery(void* param, bool* newgroup) {
// the pDataBlock are always the same one, no need to call this again
setInputDataBlock
(
pOperator
,
pInfo
->
pCtx
,
pBlock
,
order
);
updateOutputBuf
(
&
pEveryInfo
->
binfo
,
&
pEveryInfo
->
bufCapacity
,
pBlock
->
info
.
rows
);
updateOutputBuf
(
&
pEveryInfo
->
binfo
,
&
pEveryInfo
->
bufCapacity
,
pBlock
->
info
.
rows
,
pOperator
->
pRuntimeEnv
);
doTimeEveryImpl
(
pOperator
,
pInfo
->
pCtx
,
pBlock
,
*
newgroup
);
if
(
pEveryInfo
->
groupDone
&&
pOperator
->
upstream
[
0
]
->
notify
)
{
...
...
@@ -6412,7 +6439,7 @@ static SSDataBlock* doTimeEvery(void* param, bool* newgroup) {
if
(
!
pEveryInfo
->
groupDone
)
{
pEveryInfo
->
allDone
=
true
;
updateOutputBuf
(
&
pEveryInfo
->
binfo
,
&
pEveryInfo
->
bufCapacity
,
0
);
updateOutputBuf
(
&
pEveryInfo
->
binfo
,
&
pEveryInfo
->
bufCapacity
,
0
,
pOperator
->
pRuntimeEnv
);
doTimeEveryImpl
(
pOperator
,
pInfo
->
pCtx
,
NULL
,
false
);
if
(
pRes
->
info
.
rows
>=
pRuntimeEnv
->
resultInfo
.
threshold
)
{
break
;
...
...
@@ -6433,7 +6460,7 @@ static SSDataBlock* doTimeEvery(void* param, bool* newgroup) {
// Return result of the previous group in the firstly.
if
(
*
newgroup
)
{
if
(
!
pEveryInfo
->
groupDone
)
{
updateOutputBuf
(
&
pEveryInfo
->
binfo
,
&
pEveryInfo
->
bufCapacity
,
0
);
updateOutputBuf
(
&
pEveryInfo
->
binfo
,
&
pEveryInfo
->
bufCapacity
,
0
,
pOperator
->
pRuntimeEnv
);
doTimeEveryImpl
(
pOperator
,
pInfo
->
pCtx
,
NULL
,
false
);
if
(
pRes
->
info
.
rows
>=
pRuntimeEnv
->
resultInfo
.
threshold
)
{
pEveryInfo
->
existDataBlock
=
pBlock
;
...
...
@@ -6469,7 +6496,7 @@ static SSDataBlock* doTimeEvery(void* param, bool* newgroup) {
// the pDataBlock are always the same one, no need to call this again
setInputDataBlock
(
pOperator
,
pInfo
->
pCtx
,
pBlock
,
order
);
updateOutputBuf
(
&
pEveryInfo
->
binfo
,
&
pEveryInfo
->
bufCapacity
,
pBlock
->
info
.
rows
);
updateOutputBuf
(
&
pEveryInfo
->
binfo
,
&
pEveryInfo
->
bufCapacity
,
pBlock
->
info
.
rows
,
pOperator
->
pRuntimeEnv
);
pEveryInfo
->
groupDone
=
false
;
...
...
@@ -8957,6 +8984,14 @@ SQInfo* createQInfoImpl(SQueryTableMsg* pQueryMsg, SGroupbyExpr* pGroupbyExpr, S
doUpdateExprColumnIndex
(
pQueryAttr
);
// calc skipOffset
if
(
pQueryMsg
->
offset
>
0
&&
TSDB_QUERY_HAS_TYPE
(
pQueryMsg
->
queryType
,
TSDB_QUERY_TYPE_PROJECTION_QUERY
))
{
if
(
pQueryAttr
->
stableQuery
)
pQueryAttr
->
skipOffset
=
false
;
else
pQueryAttr
->
skipOffset
=
pQueryAttr
->
pFilters
==
NULL
;
}
if
(
pSecExprs
!=
NULL
)
{
int32_t
resultRowSize
=
0
;
...
...
src/tsdb/src/tsdbRead.c
浏览文件 @
2167d0fc
...
...
@@ -39,6 +39,9 @@
.tid = (_checkInfo)->tableId.tid, \
.uid = (_checkInfo)->tableId.uid})
// limit offset start optimization for rows read over this value
#define OFFSET_SKIP_THRESHOLD 5000
enum
{
TSDB_QUERY_TYPE_ALL
=
1
,
TSDB_QUERY_TYPE_LAST
=
2
,
...
...
@@ -117,6 +120,9 @@ typedef struct STsdbQueryHandle {
STsdbRepo
*
pTsdb
;
SQueryFilePos
cur
;
// current position
int16_t
order
;
int64_t
offset
;
// limit offset
int64_t
srows
;
// skip offset rows
int64_t
frows
;
// forbid skip offset rows
STimeWindow
window
;
// the primary query time window that applies to all queries
SDataStatis
*
statis
;
// query level statistics, only one table block statistics info exists at any time
int32_t
numOfBlocks
;
...
...
@@ -155,6 +161,11 @@ typedef struct STableGroupSupporter {
STSchema
*
pTagSchema
;
}
STableGroupSupporter
;
typedef
struct
SRange
{
int32_t
from
;
int32_t
to
;
}
SRange
;
static
STimeWindow
updateLastrowForEachGroup
(
STableGroupInfo
*
groupList
);
static
int32_t
checkForCachedLastRow
(
STsdbQueryHandle
*
pQueryHandle
,
STableGroupInfo
*
groupList
);
static
int32_t
checkForCachedLast
(
STsdbQueryHandle
*
pQueryHandle
);
...
...
@@ -413,6 +424,9 @@ static STsdbQueryHandle* tsdbQueryTablesImpl(STsdbRepo* tsdb, STsdbQueryCond* pC
}
pQueryHandle
->
order
=
pCond
->
order
;
pQueryHandle
->
offset
=
pCond
->
offset
;
pQueryHandle
->
srows
=
0
;
pQueryHandle
->
frows
=
0
;
pQueryHandle
->
pTsdb
=
tsdb
;
pQueryHandle
->
type
=
TSDB_QUERY_TYPE_ALL
;
pQueryHandle
->
cur
.
fid
=
INT32_MIN
;
...
...
@@ -529,6 +543,9 @@ void tsdbResetQueryHandle(TsdbQueryHandleT queryHandle, STsdbQueryCond *pCond) {
}
pQueryHandle
->
order
=
pCond
->
order
;
pQueryHandle
->
offset
=
pCond
->
offset
;
pQueryHandle
->
srows
=
0
;
pQueryHandle
->
frows
=
0
;
pQueryHandle
->
window
=
pCond
->
twindow
;
pQueryHandle
->
type
=
TSDB_QUERY_TYPE_ALL
;
pQueryHandle
->
cur
.
fid
=
-
1
;
...
...
@@ -1073,63 +1090,302 @@ static int32_t binarySearchForBlock(SBlock* pBlock, int32_t numOfBlocks, TSKEY s
return
midSlot
;
}
static
int32_t
loadBlockInfo
(
STsdbQueryHandle
*
pQueryHandle
,
int32_t
index
,
int32_t
*
numOfBlocks
)
{
int32_t
code
=
0
;
// array :1 2 3 5 7 -2 (8 9) skip 4 and 6
int32_t
memMoveByArray
(
SBlock
*
blocks
,
SArray
*
pArray
)
{
// pArray is NULL or size is zero , no need block to move
if
(
pArray
==
NULL
)
return
0
;
size_t
count
=
taosArrayGetSize
(
pArray
);
if
(
count
==
0
)
return
0
;
STableCheckInfo
*
pCheckInfo
=
taosArrayGet
(
pQueryHandle
->
pTableCheckInfo
,
index
);
pCheckInfo
->
numOfBlocks
=
0
;
// memmove
int32_t
num
=
0
;
SRange
*
ranges
=
(
SRange
*
)
TARRAY_GET_START
(
pArray
);
for
(
size_t
i
=
0
;
i
<
count
;
i
++
)
{
int32_t
step
=
ranges
[
i
].
to
-
ranges
[
i
].
from
+
1
;
memmove
(
blocks
+
num
,
blocks
+
ranges
[
i
].
from
,
sizeof
(
SBlock
)
*
step
);
num
+=
step
;
}
if
(
tsdbSetReadTable
(
&
pQueryHandle
->
rhelper
,
pCheckInfo
->
pTableObj
)
!=
TSDB_CODE_SUCCESS
)
{
code
=
terrno
;
return
code
;
return
num
;
}
// if block data in memory return false else true
bool
blockNoItemInMem
(
STsdbQueryHandle
*
q
,
SBlock
*
pBlock
)
{
if
(
q
->
pMemRef
==
NULL
)
{
return
false
;
}
SBlockIdx
*
compIndex
=
pQueryHandle
->
rhelper
.
pBlkIdx
;
// mem
if
(
q
->
pMemRef
->
snapshot
.
mem
)
{
SMemTable
*
mem
=
q
->
pMemRef
->
snapshot
.
mem
;
if
(
timeIntersect
(
mem
->
keyFirst
,
mem
->
keyLast
,
pBlock
->
keyFirst
,
pBlock
->
keyLast
))
return
false
;
}
// imem
if
(
q
->
pMemRef
->
snapshot
.
imem
)
{
SMemTable
*
imem
=
q
->
pMemRef
->
snapshot
.
imem
;
if
(
timeIntersect
(
imem
->
keyFirst
,
imem
->
keyLast
,
pBlock
->
keyFirst
,
pBlock
->
keyLast
))
return
false
;
}
// no data block in this file, try next file
if
(
compIndex
==
NULL
||
compIndex
->
uid
!=
pCheckInfo
->
tableId
.
uid
)
{
return
0
;
// no data blocks in the file belongs to pCheckInfo->pTable
return
true
;
}
#define MAYBE_IN_MEMORY_ROWS 4000 // approximately the capacity of one block
// skip blocks . return value is skip blocks number, skip rows reduce from *pOffset
static
int32_t
offsetSkipBlock
(
STsdbQueryHandle
*
q
,
SBlockInfo
*
pBlockInfo
,
int64_t
skey
,
int64_t
ekey
,
int32_t
sblock
,
int32_t
eblock
,
SArray
**
ppArray
,
bool
order
)
{
int32_t
num
=
0
;
SBlock
*
blocks
=
pBlockInfo
->
blocks
;
SArray
*
pArray
=
NULL
;
SRange
range
;
range
.
from
=
-
1
;
//
// ASC
//
if
(
order
)
{
for
(
int32_t
i
=
sblock
;
i
<
eblock
;
i
++
)
{
bool
skip
=
false
;
SBlock
*
pBlock
=
&
blocks
[
i
];
if
(
i
==
sblock
&&
skey
>
pBlock
->
keyFirst
)
{
q
->
frows
+=
pBlock
->
numOfRows
;
// some rows time < s
}
else
{
// check can skip
if
(
q
->
srows
+
q
->
frows
+
pBlock
->
numOfRows
+
MAYBE_IN_MEMORY_ROWS
<
q
->
offset
)
{
// approximately calculate
if
(
blockNoItemInMem
(
q
,
pBlock
))
{
// can skip
q
->
srows
+=
pBlock
->
numOfRows
;
skip
=
true
;
}
else
{
q
->
frows
+=
pBlock
->
numOfRows
;
// maybe have some row in memroy
}
}
else
{
// the remainder be put to pArray
if
(
pArray
==
NULL
)
pArray
=
taosArrayInit
(
1
,
sizeof
(
SRange
));
if
(
range
.
from
==
-
1
)
{
range
.
from
=
i
;
}
else
{
if
(
range
.
to
+
1
!=
i
)
{
// add the previous
taosArrayPush
(
pArray
,
&
range
);
range
.
from
=
i
;
}
}
range
.
to
=
eblock
-
1
;
taosArrayPush
(
pArray
,
&
range
);
range
.
from
=
-
1
;
break
;
}
}
if
(
skip
)
{
num
++
;
}
else
{
// can't skip, append block index to pArray
if
(
pArray
==
NULL
)
pArray
=
taosArrayInit
(
10
,
sizeof
(
SRange
));
if
(
range
.
from
==
-
1
)
{
range
.
from
=
i
;
}
else
{
if
(
range
.
to
+
1
!=
i
)
{
// add the previous
taosArrayPush
(
pArray
,
&
range
);
range
.
from
=
i
;
}
}
range
.
to
=
i
;
}
}
// end append
if
(
range
.
from
!=
-
1
)
{
if
(
pArray
==
NULL
)
pArray
=
taosArrayInit
(
1
,
sizeof
(
SRange
));
taosArrayPush
(
pArray
,
&
range
);
}
// ASC return
*
ppArray
=
pArray
;
return
num
;
}
// DES
for
(
int32_t
i
=
eblock
-
1
;
i
>=
sblock
;
i
--
)
{
bool
skip
=
false
;
SBlock
*
pBlock
=
&
blocks
[
i
];
if
(
i
==
eblock
-
1
&&
ekey
<
pBlock
->
keyLast
)
{
q
->
frows
+=
pBlock
->
numOfRows
;
// some rows time > e
}
else
{
// check can skip
if
(
q
->
srows
+
q
->
frows
+
pBlock
->
numOfRows
+
MAYBE_IN_MEMORY_ROWS
<
q
->
offset
)
{
// approximately calculate
if
(
blockNoItemInMem
(
q
,
pBlock
))
{
// can skip
q
->
srows
+=
pBlock
->
numOfRows
;
skip
=
true
;
}
else
{
q
->
frows
+=
pBlock
->
numOfRows
;
// maybe have some row in memroy
}
}
else
{
// the remainder be put to pArray
if
(
pArray
==
NULL
)
pArray
=
taosArrayInit
(
1
,
sizeof
(
SRange
));
if
(
range
.
from
==
-
1
)
{
range
.
from
=
i
;
}
else
{
if
(
range
.
to
-
1
!=
i
)
{
// add the previous
taosArrayPush
(
pArray
,
&
range
);
range
.
from
=
i
;
}
}
range
.
to
=
0
;
taosArrayPush
(
pArray
,
&
range
);
range
.
from
=
-
1
;
break
;
}
}
if
(
skip
)
{
num
++
;
}
else
{
// can't skip, append block index to pArray
if
(
pArray
==
NULL
)
pArray
=
taosArrayInit
(
10
,
sizeof
(
SRange
));
if
(
range
.
from
==
-
1
)
{
range
.
from
=
i
;
}
else
{
if
(
range
.
to
+
1
!=
i
)
{
// add the previous
taosArrayPush
(
pArray
,
&
range
);
range
.
from
=
i
;
}
}
range
.
to
=
i
;
}
}
assert
(
compIndex
->
len
>
0
);
// end append
if
(
range
.
from
!=
-
1
)
{
if
(
pArray
==
NULL
)
pArray
=
taosArrayInit
(
1
,
sizeof
(
SRange
));
taosArrayPush
(
pArray
,
&
range
);
}
if
(
pArray
==
NULL
)
return
num
;
if
(
tsdbLoadBlockInfo
(
&
(
pQueryHandle
->
rhelper
),
(
void
**
)(
&
pCheckInfo
->
pCompInfo
),
(
uint32_t
*
)(
&
pCheckInfo
->
compSize
))
<
0
)
{
return
terrno
;
// reverse array
size_t
count
=
taosArrayGetSize
(
pArray
);
SRange
*
ranges
=
TARRAY_GET_START
(
pArray
);
SArray
*
pArray1
=
taosArrayInit
(
count
,
sizeof
(
SRange
));
size_t
i
=
count
-
1
;
while
(
i
>=
0
)
{
range
.
from
=
ranges
[
i
].
to
;
range
.
to
=
ranges
[
i
].
from
;
taosArrayPush
(
pArray1
,
&
range
);
if
(
i
==
0
)
break
;
i
--
;
}
SBlockInfo
*
pCompInfo
=
pCheckInfo
->
pCompInfo
;
TSKEY
s
=
TSKEY_INITIAL_VAL
,
e
=
TSKEY_INITIAL_VAL
;
*
ppArray
=
pArray1
;
taosArrayDestroy
(
&
pArray
);
return
num
;
}
if
(
ASCENDING_TRAVERSE
(
pQueryHandle
->
order
))
{
// shrink blocks by condition of query
static
void
shrinkBlocksByQuery
(
STsdbQueryHandle
*
pQueryHandle
,
STableCheckInfo
*
pCheckInfo
)
{
SBlockInfo
*
pCompInfo
=
pCheckInfo
->
pCompInfo
;
SBlockIdx
*
compIndex
=
pQueryHandle
->
rhelper
.
pBlkIdx
;
bool
order
=
ASCENDING_TRAVERSE
(
pQueryHandle
->
order
);
if
(
order
)
{
assert
(
pCheckInfo
->
lastKey
<=
pQueryHandle
->
window
.
ekey
&&
pQueryHandle
->
window
.
skey
<=
pQueryHandle
->
window
.
ekey
);
}
else
{
assert
(
pCheckInfo
->
lastKey
>=
pQueryHandle
->
window
.
ekey
&&
pQueryHandle
->
window
.
skey
>=
pQueryHandle
->
window
.
ekey
);
}
TSKEY
s
=
TSKEY_INITIAL_VAL
,
e
=
TSKEY_INITIAL_VAL
;
s
=
MIN
(
pCheckInfo
->
lastKey
,
pQueryHandle
->
window
.
ekey
);
e
=
MAX
(
pCheckInfo
->
lastKey
,
pQueryHandle
->
window
.
ekey
);
// discard the unqualified data block based on the query time window
int32_t
start
=
binarySearchForBlock
(
pCompInfo
->
blocks
,
compIndex
->
numOfBlocks
,
s
,
TSDB_ORDER_ASC
);
int32_t
end
=
start
;
if
(
s
>
pCompInfo
->
blocks
[
start
].
keyLast
)
{
return
0
;
return
;
}
// todo speedup the procedure of located end block
int32_t
end
=
start
;
// locate e index of blocks -> end
while
(
end
<
(
int32_t
)
compIndex
->
numOfBlocks
&&
(
pCompInfo
->
blocks
[
end
].
keyFirst
<=
e
))
{
end
+=
1
;
}
pCheckInfo
->
numOfBlocks
=
(
end
-
start
);
// calc offset can skip blocks number
int32_t
nSkip
=
0
;
SArray
*
pArray
=
NULL
;
if
(
pQueryHandle
->
offset
>
0
)
{
nSkip
=
offsetSkipBlock
(
pQueryHandle
,
pCompInfo
,
s
,
e
,
start
,
end
,
&
pArray
,
order
);
}
if
(
nSkip
>
0
)
{
// have offset and can skip
pCheckInfo
->
numOfBlocks
=
memMoveByArray
(
pCompInfo
->
blocks
,
pArray
);
}
else
{
// no offset
pCheckInfo
->
numOfBlocks
=
end
-
start
;
if
(
start
>
0
)
memmove
(
pCompInfo
->
blocks
,
&
pCompInfo
->
blocks
[
start
],
pCheckInfo
->
numOfBlocks
*
sizeof
(
SBlock
));
}
if
(
pArray
)
taosArrayDestroy
(
&
pArray
);
}
// load one table (tsd_index point to) need load blocks info and put into pCheckInfo->pCompInfo->blocks
static
int32_t
loadBlockInfo
(
STsdbQueryHandle
*
pQueryHandle
,
int32_t
tsd_index
,
int32_t
*
numOfBlocks
)
{
//
// ONE PART. Load all blocks info from one table of tsd_index
//
int32_t
code
=
0
;
STableCheckInfo
*
pCheckInfo
=
taosArrayGet
(
pQueryHandle
->
pTableCheckInfo
,
tsd_index
);
pCheckInfo
->
numOfBlocks
=
0
;
if
(
tsdbSetReadTable
(
&
pQueryHandle
->
rhelper
,
pCheckInfo
->
pTableObj
)
!=
TSDB_CODE_SUCCESS
)
{
code
=
terrno
;
return
code
;
}
SBlockIdx
*
compIndex
=
pQueryHandle
->
rhelper
.
pBlkIdx
;
// no data block in this file, try next file
if
(
compIndex
==
NULL
||
compIndex
->
uid
!=
pCheckInfo
->
tableId
.
uid
)
{
return
0
;
// no data blocks in the file belongs to pCheckInfo->pTable
}
if
(
start
>
0
)
{
memmove
(
pCompInfo
->
blocks
,
&
pCompInfo
->
blocks
[
start
],
pCheckInfo
->
numOfBlocks
*
sizeof
(
SBlock
));
if
(
pCheckInfo
->
compSize
<
(
int32_t
)
compIndex
->
len
)
{
assert
(
compIndex
->
len
>
0
);
char
*
t
=
realloc
(
pCheckInfo
->
pCompInfo
,
compIndex
->
len
);
if
(
t
==
NULL
)
{
terrno
=
TSDB_CODE_TDB_OUT_OF_MEMORY
;
code
=
TSDB_CODE_TDB_OUT_OF_MEMORY
;
return
code
;
}
pCheckInfo
->
pCompInfo
=
(
SBlockInfo
*
)
t
;
pCheckInfo
->
compSize
=
compIndex
->
len
;
}
if
(
tsdbLoadBlockInfo
(
&
(
pQueryHandle
->
rhelper
),
(
void
**
)(
&
pCheckInfo
->
pCompInfo
),
(
uint32_t
*
)(
&
pCheckInfo
->
compSize
))
<
0
)
{
return
terrno
;
}
//
// TWO PART. shrink no need blocks from all blocks by condition of query
//
shrinkBlocksByQuery
(
pQueryHandle
,
pCheckInfo
);
(
*
numOfBlocks
)
+=
pCheckInfo
->
numOfBlocks
;
return
0
;
}
...
...
@@ -4312,4 +4568,11 @@ end:
return
string
;
}
// obtain queryHandle attribute
int64_t
tsdbSkipOffset
(
TsdbQueryHandleT
queryHandle
)
{
STsdbQueryHandle
*
pQueryHandle
=
(
STsdbQueryHandle
*
)
queryHandle
;
if
(
pQueryHandle
)
{
return
pQueryHandle
->
srows
;
}
return
0
;
}
\ No newline at end of file
src/util/inc/tutil.h
浏览文件 @
2167d0fc
...
...
@@ -58,6 +58,13 @@ static FORCE_INLINE void taosEncryptPass(uint8_t *inBuf, size_t inLen, char *tar
memcpy
(
target
,
context
.
digest
,
TSDB_KEY_LEN
);
}
//
// TSKEY util
//
// if time area(s1,e1) intersect with time area(s2,e2) then return true else return false
bool
timeIntersect
(
TSKEY
s1
,
TSKEY
e1
,
TSKEY
s2
,
TSKEY
e2
);
#ifdef __cplusplus
}
#endif
...
...
src/util/src/tutil.c
浏览文件 @
2167d0fc
...
...
@@ -549,3 +549,16 @@ FORCE_INLINE double taos_align_get_double(const char* pBuf) {
memcpy
(
&
dv
,
pBuf
,
sizeof
(
dv
));
// in ARM, return *((const double*)(pBuf)) may cause problem
return
dv
;
}
//
// TSKEY util
//
// if time area(s1,e1) intersect with time area(s2,e2) then return true else return false
bool
timeIntersect
(
TSKEY
s1
,
TSKEY
e1
,
TSKEY
s2
,
TSKEY
e2
)
{
// s1,e1 and s2,e2 have 7 scenarios, 5 is intersection, 2 is no intersection, so we pick up 2.
if
(
e2
<
s1
||
s2
>
e1
)
return
false
;
else
return
true
;
}
\ No newline at end of file
tests/pytest/fulltest-others.sh
浏览文件 @
2167d0fc
#!/bin/bash
ulimit
-c
unlimited
#======================p1-start===============
#python3 ./test.py -f dbmgmt/database-name-boundary.py
python3 test.py
-f
dbmgmt/nanoSecondCheck.py
#
python3 ./test.py
-f
tsdb/tsdbComp.py
# user
python3 ./test.py
-f
user/user_create.py
python3 ./test.py
-f
user/pass_len.py
#======================p1-end===============
#======================p2-start===============
# perfbenchmark
python3 ./test.py
-f
perfbenchmark/bug3433.py
#python3 ./test.py -f perfbenchmark/bug3589.py
#python3 ./test.py -f perfbenchmark/taosdemoInsert.py
#alter table
python3 ./test.py
-f
alter/alter_table_crash.py
python3 ./test.py
-f
alter/alterTabAddTagWithNULL.py
python3 ./test.py
-f
alter/alterTimestampColDataProcess.py
#======================p2-end===============
#======================p3-start===============
python3 ./test.py
-f
alter/alter_table.py
python3 ./test.py
-f
alter/alter_debugFlag.py
python3 ./test.py
-f
alter/alter_keep.py
python3 ./test.py
-f
alter/alter_cacheLastRow.py
python3 ./test.py
-f
alter/alter_create_exception.py
python3 ./test.py
-f
alter/alterColMultiTimes.py
#======================p3-end===============
#======================p4-start===============
python3 ./test.py
-f
account/account_create.py
# client
python3 ./test.py
-f
client/client.py
python3 ./test.py
-f
client/version.py
...
...
@@ -50,12 +39,10 @@ python3 ./test.py -f client/taoshellCheckCase.py
# python3 ./test.py -f client/change_time_1_2.py
python3 client/twoClients.py
python3 testMinTablesPerVnode.py
# topic
python3 ./test.py
-f
topic/topicQuery.py
#======================p4-end===============
#======================p5-start===============
python3 ./test.py
-f
../system-test/0-management/1-stable/create_col_tag.py
python3 ./test.py
-f
../develop-test/0-management/3-tag/json_tag.py
#======================p5-end===============
tests/pytest/fulltest-query.sh
浏览文件 @
2167d0fc
#!/bin/bash
ulimit
-c
unlimited
#======================p1-start===============
# timezone
python3 ./test.py
-f
TimeZone/TestCaseTimeZone.py
#stable
python3 ./test.py
-f
stable/insert.py
python3 ./test.py
-f
stable/query_after_reset.py
#table
python3 ./test.py
-f
table/alter_wal0.py
python3 ./test.py
-f
table/column_name.py
...
...
@@ -22,7 +19,6 @@ python3 ./test.py -f table/boundary.py
#python3 ./test.py -f table/create.py
python3 ./test.py
-f
table/del_stable.py
python3 ./test.py
-f
table/create_db_from_normal_db.py
# tag
python3 ./test.py
-f
tag_lite/filter.py
python3 ./test.py
-f
tag_lite/create-tags-boundary.py
...
...
@@ -38,10 +34,8 @@ python3 ./test.py -f tag_lite/bool_binary.py
python3 ./test.py
-f
tag_lite/bool_int.py
python3 ./test.py
-f
tag_lite/bool.py
python3 ./test.py
-f
tag_lite/change.py
#======================p1-end===============
#======================p2-start===============
python3 ./test.py
-f
tag_lite/column.py
python3 ./test.py
-f
tag_lite/commit.py
python3 ./test.py
-f
tag_lite/create.py
...
...
@@ -65,10 +59,8 @@ python3 ./test.py -f tag_lite/unsignedTinyint.py
python3 ./test.py
-f
tag_lite/alter_tag.py
python3 ./test.py
-f
tag_lite/drop_auto_create.py
python3 ./test.py
-f
tag_lite/json_tag_extra.py
#======================p2-end===============
#======================p3-start===============
#query
python3 ./test.py
-f
query/distinctOneColTb.py
python3 ./test.py
-f
query/filter.py
...
...
@@ -118,10 +110,8 @@ python3 ./test.py -f query/subqueryFilter.py
python3 ./test.py
-f
query/nestedQuery/queryInterval.py
python3 ./test.py
-f
query/queryStateWindow.py
# python3 ./test.py -f query/nestedQuery/queryWithOrderLimit.py
#======================p3-end===============
#======================p4-start===============
python3 ./test.py
-f
query/nestquery_last_row.py
python3 ./test.py
-f
query/nestedQuery/nestedQuery.py
python3 ./test.py
-f
query/nestedQuery/nestedQuery_datacheck.py
...
...
@@ -145,7 +135,6 @@ python3 ./test.py -f query/query.py
python3 ./test.py
-f
query/queryDiffColsTagsAndOr.py
python3 ./test.py
-f
query/queryGroupTbname.py
python3 ./test.py
-f
query/queryRegex.py
#stream
python3 ./test.py
-f
stream/metric_1.py
python3 ./test.py
-f
stream/metric_n.py
...
...
@@ -159,18 +148,14 @@ python3 ./test.py -f stream/table_1.py
python3 ./test.py
-f
stream/table_n.py
python3 ./test.py
-f
stream/showStreamExecTimeisNull.py
python3 ./test.py
-f
stream/cqSupportBefore1970.py
python3 ./test.py
-f
query/queryGroupbyWithInterval.py
python3 queryCount.py
# subscribe
python3 test.py
-f
subscribe/singlemeter.py
#python3 test.py -f subscribe/stability.py
python3 test.py
-f
subscribe/supertable.py
#======================p4-end===============
#======================p5-start===============
# functions
python3 ./test.py
-f
functions/all_null_value.py
python3 ./test.py
-f
functions/function_avg.py
-r
1
...
...
@@ -208,12 +193,6 @@ python3 ./test.py -f functions/function_mavg.py
python3 ./test.py
-f
functions/function_csum.py
python3 ./test.py
-f
functions/function_percentile2.py
python3 ./test.py
-f
functions/variable_httpDbNameMandatory.py
######## system-test
#python3 ./test.py -f ../system-test/2-query/9-others/TD-11389.py # this case will run when this bug fix TD-11389
#======================p5-end===============
tests/pytest/fulltest-tools.sh
浏览文件 @
2167d0fc
#!/bin/bash
ulimit
-c
unlimited
#======================p1-start===============
# tools
python3 test.py
-f
tools/taosdumpTest.py
python3 test.py
-f
tools/taosdumpTest2.py
python3 test.py
-f
tools/taosdemoTest.py
python3 test.py
-f
tools/taosdemoTestWithoutMetric.py
python3 test.py
-f
tools/taosdemoTestWithJson.py
#======================p1-end===============
#======================p2-start===============
python3 test.py
-f
tools/taosdemoTestLimitOffset.py
python3 test.py
-f
tools/taosdemoTestTblAlt.py
python3 test.py
-f
tools/taosdemoTestSampleData.py
...
...
@@ -21,7 +17,6 @@ python3 test.py -f tools/taosdemoTestQuery.py
python3 ./test.py
-f
tools/taosdemoTestdatatype.py
#======================p2-end===============
#======================p3-start===============
# nano support
python3 test.py
-f
tools/taosdemoAllTest/NanoTestCase/taosdemoTestSupportNanoInsert.py
python3 test.py
-f
tools/taosdemoAllTest/NanoTestCase/taosdemoTestSupportNanoQuery.py
...
...
@@ -31,28 +26,14 @@ python3 test.py -f tools/taosdumpTestNanoSupport.py
python3 test.py
-f
tools/taosdemoAllTest/taosdemoTestInsertWithJson.py
#======================p3-end===============
#======================p4-start===============
python3 test.py
-f
tools/taosdemoAllTest/taosdemoTestQueryWithJson.py
python3 test.py
-f
tools/taosdemoAllTest/taosdemoTestInsertAllType.py
python3 test.py
-f
tools/taosdemoAllTest/TD-4985/query-limit-offset.py
python3 test.py
-f
tools/taosdemoAllTest/TD-5213/insert4096columns_not_use_taosdemo.py
python3 test.py
-f
tools/taosdemoAllTest/TD-5213/insertSigcolumnsNum4096.py
python3 test.py
-f
tools/taosdemoAllTest/taosdemoTestInsertWithJsonStmt.py
python3 test.py
-f
tools/taosdemoAllTest/taosdemoTestInsertWithJsonSml.py
python3 test.py
-f
tools/taosdemoAllTest/taosdemoTestInsertShell.py
#======================p4-end===============
#======================p5-start===============
#======================p5-end===============
tests/pytest/fulltest.sh
浏览文件 @
2167d0fc
...
...
@@ -287,6 +287,7 @@ python3 ./test.py -f query/queryCnameDisplay.py
python3 test.py
-f
query/nestedQuery/queryWithSpread.py
python3 ./test.py
-f
query/bug6586.py
# python3 ./test.py -f query/bug5903.py
python3 ./test.py
-f
query/queryLimit.py
#stream
python3 ./test.py
-f
stream/metric_1.py
...
...
tests/pytest/query/queryLimit.py
0 → 100644
浏览文件 @
2167d0fc
###################################################################
# Copyright (c) 2016 by TAOS Technologies, Inc.
# All rights reserved.
#
# This file is proprietary and confidential to TAOS Technologies.
# No part of this file may be reproduced, stored, transmitted,
# disclosed or used in any form or by any means other than as
# expressly provided by the written permission from Jianhui Tao
#
###################################################################
# -*- coding: utf-8 -*-
import
sys
from
numpy.lib.function_base
import
insert
import
taos
from
util.log
import
*
from
util.cases
import
*
from
util.sql
import
*
import
numpy
as
np
# constant define
WAITS
=
5
# wait seconds
class
TDTestCase
:
#
# --------------- main frame -------------------
#
def
caseDescription
(
self
):
'''
limit and offset keyword function test cases;
case1: limit offset base function test
case2: limit offset advance test
'''
return
# init
def
init
(
self
,
conn
,
logSql
):
tdLog
.
debug
(
"start to execute %s"
%
__file__
)
tdSql
.
init
(
conn
.
cursor
())
tdSql
.
prepare
()
self
.
create_tables
();
self
.
ts
=
1500000000000
# run case
def
run
(
self
):
# insert data
self
.
insert_data
(
"t1"
,
self
.
ts
,
300
*
10000
,
30000
);
# test base case
self
.
test_case1
()
tdLog
.
debug
(
" LIMIT test_case1 ............ [OK]"
)
# test advance case
self
.
test_case2
()
tdLog
.
debug
(
" LIMIT test_case2 ............ [OK]"
)
# stop
def
stop
(
self
):
tdSql
.
close
()
tdLog
.
success
(
"%s successfully executed"
%
__file__
)
#
# --------------- case -------------------
#
# create table
def
create_tables
(
self
):
# super table
tdSql
.
execute
(
"create table st(ts timestamp, i1 int) tags(area int)"
);
# child table
tdSql
.
execute
(
"create table t1 using st tags(1)"
);
tdSql
.
execute
(
"create table t2 using st tags(2)"
);
tdSql
.
execute
(
"create table t3 using st tags(3)"
);
return
# insert data1
def
insert_data
(
self
,
tbname
,
ts_start
,
count
,
batch_num
):
pre_insert
=
"insert into %s values"
%
tbname
sql
=
pre_insert
tdLog
.
debug
(
"doing insert table %s rows=%d ..."
%
(
tbname
,
count
))
for
i
in
range
(
count
):
sql
+=
" (%d,%d)"
%
(
ts_start
+
i
*
1000
,
i
)
if
i
>
0
and
i
%
batch_num
==
0
:
tdSql
.
execute
(
sql
)
sql
=
pre_insert
# end sql
if
sql
!=
pre_insert
:
tdSql
.
execute
(
sql
)
tdLog
.
debug
(
"INSERT TABLE DATA ............ [OK]"
)
return
# test case1 base
def
test_case1
(
self
):
#
# limit base function
#
# base no where
sql
=
"select * from t1 limit 10"
tdSql
.
waitedQuery
(
sql
,
10
,
WAITS
)
tdSql
.
checkData
(
0
,
1
,
0
)
tdSql
.
checkData
(
9
,
1
,
9
)
sql
=
"select * from t1 order by ts desc limit 10"
# desc
tdSql
.
waitedQuery
(
sql
,
10
,
WAITS
)
tdSql
.
checkData
(
0
,
1
,
2999999
)
tdSql
.
checkData
(
9
,
1
,
2999990
)
# have where
sql
=
"select * from t1 where ts>='2017-07-14 10:40:01' and ts<'2017-07-14 10:40:06' limit 10"
tdSql
.
waitedQuery
(
sql
,
5
,
WAITS
)
tdSql
.
checkData
(
0
,
1
,
1
)
tdSql
.
checkData
(
4
,
1
,
5
)
sql
=
"select * from t1 where ts>='2017-08-18 03:59:52' and ts<'2017-08-18 03:59:57' order by ts desc limit 10"
# desc
tdSql
.
waitedQuery
(
sql
,
5
,
WAITS
)
tdSql
.
checkData
(
0
,
1
,
2999996
)
tdSql
.
checkData
(
4
,
1
,
2999992
)
#
# offset base function
#
# no where
sql
=
"select * from t1 limit 10 offset 5"
tdSql
.
waitedQuery
(
sql
,
10
,
WAITS
)
tdSql
.
checkData
(
0
,
1
,
5
)
tdSql
.
checkData
(
9
,
1
,
14
)
sql
=
"select * from t1 order by ts desc limit 10 offset 5"
# desc
tdSql
.
waitedQuery
(
sql
,
10
,
WAITS
)
tdSql
.
checkData
(
0
,
1
,
2999994
)
tdSql
.
checkData
(
9
,
1
,
2999985
)
# have where only ts
sql
=
"select * from t1 where ts>='2017-07-14 10:40:10' and ts<'2017-07-14 10:40:20' limit 10 offset 5"
tdSql
.
waitedQuery
(
sql
,
5
,
WAITS
)
tdSql
.
checkData
(
0
,
1
,
15
)
tdSql
.
checkData
(
4
,
1
,
19
)
sql
=
"select * from t1 where ts>='2017-08-18 03:59:52' and ts<'2017-08-18 03:59:57' order by ts desc limit 10 offset 4"
# desc
tdSql
.
waitedQuery
(
sql
,
1
,
WAITS
)
tdSql
.
checkData
(
0
,
1
,
2999992
)
# have where with other column condition
sql
=
"select * from t1 where i1>=1 and i1<11 limit 10 offset 5"
tdSql
.
waitedQuery
(
sql
,
5
,
WAITS
)
tdSql
.
checkData
(
0
,
1
,
6
)
tdSql
.
checkData
(
4
,
1
,
10
)
sql
=
"select * from t1 where i1>=300000 and i1<=500000 order by ts desc limit 10 offset 100000"
# desc
tdSql
.
waitedQuery
(
sql
,
10
,
WAITS
)
tdSql
.
checkData
(
0
,
1
,
400000
)
tdSql
.
checkData
(
9
,
1
,
399991
)
# have where with ts and other column condition
sql
=
"select * from t1 where ts>='2017-07-14 10:40:10' and ts<'2017-07-14 10:40:50' and i1>=20 and i1<=25 limit 10 offset 5"
tdSql
.
waitedQuery
(
sql
,
1
,
WAITS
)
tdSql
.
checkData
(
0
,
1
,
25
)
return
# test advance
def
test_case2
(
self
):
#
# OFFSET merge file data with memory data
#
# offset
sql
=
"select * from t1 limit 10 offset 72000"
tdSql
.
waitedQuery
(
sql
,
10
,
WAITS
)
tdSql
.
checkData
(
0
,
1
,
72000
)
# each insert one row into NO.0 NO.2 NO.7 blocks
sql
=
"insert into t1 values (%d, 0) (%d, 2) (%d, 7)"
%
(
self
.
ts
+
1
,
self
.
ts
+
2
*
3300
*
1000
+
1
,
self
.
ts
+
7
*
3300
*
1000
+
1
)
tdSql
.
execute
(
sql
)
# query result
sql
=
"select * from t1 limit 10 offset 72000"
tdSql
.
waitedQuery
(
sql
,
10
,
WAITS
)
tdSql
.
checkData
(
0
,
1
,
72000
-
3
)
# have where
sql
=
"select * from t1 where ts>='2017-07-14 10:40:10' and ts<'2017-07-22 18:40:10' limit 10 offset 72000"
tdSql
.
waitedQuery
(
sql
,
10
,
WAITS
)
tdSql
.
checkData
(
0
,
1
,
72000
-
3
+
10
+
1
)
# have where desc
sql
=
"select * from t1 where ts<'2017-07-14 20:40:00' order by ts desc limit 15 offset 36000"
tdSql
.
waitedQuery
(
sql
,
3
,
WAITS
)
tdSql
.
checkData
(
0
,
1
,
1
)
#
# add case with filename
#
tdCases
.
addWindows
(
__file__
,
TDTestCase
())
tdCases
.
addLinux
(
__file__
,
TDTestCase
())
\ No newline at end of file
tests/script/general/parser/where.sim
浏览文件 @
2167d0fc
...
...
@@ -360,4 +360,12 @@ sql select * from (select * from where_ts) where ts<now;
if $rows != 5 then
return -1
endi
print ======================>td-11169
sql drop table where_ts;
sql create stable m1 (ts timestamp , k int) tags(a binary(15000));
sql create table tm0 using m1 tags('abc');
sql insert into tm0 values(now, 1);
sql select top(k, 100), a from m1;
system sh/exec.sh -n dnode1 -s stop -x SIGINT
tests/script/jenkins/basic.txt
浏览文件 @
2167d0fc
cd ../../../debug; cmake ..
cd ../../../debug; make
#======================b1-start===============
./test.sh -f general/field/2.sim
./test.sh -f general/field/3.sim
./test.sh -f general/field/4.sim
...
...
@@ -14,8 +12,6 @@ cd ../../../debug; make
./test.sh -f general/field/single.sim
./test.sh -f general/field/smallint.sim
./test.sh -f general/field/tinyint.sim
# ./test.sh -f general/http/autocreate.sim
# ./test.sh -f general/http/chunked.sim
# ./test.sh -f general/http/gzip.sim
...
...
@@ -27,7 +23,6 @@ cd ../../../debug; make
# ./test.sh -f general/http/telegraf.sim
# ./test.sh -f general/http/grafana_bug.sim
# ./test.sh -f general/http/grafana.sim
./test.sh -f general/insert/basic.sim
./test.sh -f general/insert/insert_drop.sim
./test.sh -f general/insert/query_block1_memory.sim
...
...
@@ -37,7 +32,6 @@ cd ../../../debug; make
./test.sh -f general/insert/query_file_memory.sim
./test.sh -f general/insert/query_multi_file.sim
./test.sh -f general/insert/tcp.sim
./test.sh -f general/parser/alter.sim
./test.sh -f general/parser/alter1.sim
./test.sh -f general/parser/alter_stable.sim
...
...
@@ -90,30 +84,22 @@ cd ../../../debug; make
./test.sh -f general/db/nosuchfile.sim
./test.sh -f general/parser/function.sim
./test.sh -f unique/cluster/vgroup100.sim
# ./test.sh -f unique/http/admin.sim
# ./test.sh -f unique/http/opentsdb.sim
./test.sh -f unique/import/replica2.sim
./test.sh -f unique/import/replica3.sim
./test.sh -f general/alter/cached_schema_after_alter.sim
#======================b1-end===============
#======================b2-start===============
#./test.sh -f general/wal/sync.sim
./test.sh -f general/wal/kill.sim
./test.sh -f general/wal/maxtables.sim
./test.sh -f general/user/authority.sim
./test.sh -f general/user/monitor.sim
./test.sh -f general/user/pass_alter.sim
./test.sh -f general/user/pass_len.sim
./test.sh -f general/user/user_create.sim
./test.sh -f general/user/user_len.sim
./test.sh -f general/vector/metrics_field.sim
./test.sh -f general/vector/metrics_mix.sim
./test.sh -f general/vector/metrics_query.sim
...
...
@@ -125,7 +111,6 @@ cd ../../../debug; make
./test.sh -f general/vector/table_mix.sim
./test.sh -f general/vector/table_query.sim
./test.sh -f general/vector/table_time.sim
./test.sh -f unique/account/account_create.sim
./test.sh -f unique/account/account_delete.sim
./test.sh -f unique/account/account_len.sim
...
...
@@ -137,24 +122,17 @@ cd ../../../debug; make
./test.sh -f unique/account/usage.sim
./test.sh -f unique/account/user_create.sim
./test.sh -f unique/account/user_len.sim
./test.sh -f unique/big/maxvnodes.sim
./test.sh -f unique/big/tcp.sim
./test.sh -f unique/cluster/alter.sim
./test.sh -f unique/cluster/cache.sim
#./test.sh -f unique/http/admin.sim
#./test.sh -f unique/http/opentsdb.sim
./test.sh -f unique/import/replica2.sim
./test.sh -f unique/import/replica3.sim
./test.sh -f general/alter/cached_schema_after_alter.sim
#======================b2-end===============
#======================b3-start===============
./test.sh -f unique/arbitrator/check_cluster_cfg_para.sim
#./test.sh -f unique/arbitrator/dn2_mn1_cache_file_sync.sim
./test.sh -f unique/arbitrator/dn3_mn1_full_createTableFail.sim
...
...
@@ -175,7 +153,6 @@ cd ../../../debug; make
./test.sh -f unique/arbitrator/dn3_mn1_r3_vnode_delDir.sim
./test.sh -f unique/arbitrator/dn3_mn1_vnode_nomaster.sim
./test.sh -f unique/arbitrator/dn3_mn2_killDnode.sim
./test.sh -f unique/arbitrator/offline_replica2_alterTable_online.sim
./test.sh -f unique/arbitrator/offline_replica2_alterTag_online.sim
./test.sh -f unique/arbitrator/offline_replica2_createTable_online.sim
...
...
@@ -189,19 +166,16 @@ cd ../../../debug; make
./test.sh -f unique/arbitrator/replica_changeWithArbitrator.sim
./test.sh -f unique/arbitrator/sync_replica2_alterTable_add.sim
./test.sh -f unique/arbitrator/sync_replica2_alterTable_drop.sim
./test.sh -f unique/arbitrator/sync_replica2_dropDb.sim
./test.sh -f unique/arbitrator/sync_replica2_dropTable.sim
./test.sh -f unique/arbitrator/sync_replica3_alterTable_add.sim
./test.sh -f unique/arbitrator/sync_replica3_alterTable_drop.sim
./test.sh -f unique/arbitrator/sync_replica3_dropDb.sim
./test.sh -f unique/arbitrator/sync_replica3_dropTable.sim
./test.sh -f unique/migrate/mn2_vn2_repl2_rmMnodeDir.sim
./test.sh -f unique/migrate/mn2_vn2_repl2_rmMnodeVnodeDir.sim
./test.sh -f unique/migrate/mn2_vn2_repl2_rmMnodeVnodeDir_stopAll_starAll.sim
./test.sh -f unique/migrate/mn2_vn2_repl2_rmVnodeDir.sim
./test.sh -f unique/stable/balance_replica1.sim
./test.sh -f unique/stable/dnode2_stop.sim
./test.sh -f unique/stable/dnode2.sim
...
...
@@ -210,11 +184,8 @@ cd ../../../debug; make
./test.sh -f unique/stable/replica2_vnode3.sim
./test.sh -f unique/stable/replica3_dnode6.sim
./test.sh -f unique/stable/replica3_vnode3.sim
#======================b3-end===============
#======================b4-start===============
./test.sh -f general/alter/count.sim
./test.sh -f general/alter/dnode.sim
./test.sh -f general/alter/import.sim
...
...
@@ -222,22 +193,17 @@ cd ../../../debug; make
./test.sh -f general/alter/insert2.sim
./test.sh -f general/alter/metrics.sim
./test.sh -f general/alter/table.sim
./test.sh -f general/cache/new_metrics.sim
./test.sh -f general/cache/restart_metrics.sim
./test.sh -f general/cache/restart_table.sim
./test.sh -f general/connection/connection.sim
./test.sh -f general/column/commit.sim
./test.sh -f general/column/metrics.sim
./test.sh -f general/column/table.sim
./test.sh -f general/compress/commitlog.sim
./test.sh -f general/compress/compress.sim
./test.sh -f general/compress/compress2.sim
./test.sh -f general/compress/uncompress.sim
./test.sh -f general/stable/disk.sim
./test.sh -f general/stable/dnode3.sim
./test.sh -f general/stable/metrics.sim
...
...
@@ -245,7 +211,6 @@ cd ../../../debug; make
./test.sh -f general/stable/show.sim
./test.sh -f general/stable/values.sim
./test.sh -f general/stable/vnode3.sim
./test.sh -f unique/column/replica3.sim
./test.sh -f issue/TD-2713.sim
./test.sh -f general/parser/select_distinct_tag.sim
...
...
@@ -253,10 +218,8 @@ cd ../../../debug; make
./test.sh -f issue/TD-2677.sim
./test.sh -f issue/TD-2680.sim
./test.sh -f unique/dnode/lossdata.sim
#======================b4-end===============
#======================b5-start===============
./test.sh -f unique/dnode/alternativeRole.sim
./test.sh -f unique/dnode/balance1.sim
./test.sh -f unique/dnode/balance2.sim
...
...
@@ -264,7 +227,6 @@ cd ../../../debug; make
./test.sh -f unique/dnode/balancex.sim
./test.sh -f unique/dnode/offline1.sim
./test.sh -f unique/dnode/offline2.sim
./test.sh -f general/stream/metrics_del.sim
./test.sh -f general/stream/metrics_replica1_vnoden.sim
./test.sh -f general/stream/restart_stream.sim
...
...
@@ -272,22 +234,18 @@ cd ../../../debug; make
./test.sh -f general/stream/stream_restart.sim
./test.sh -f general/stream/table_del.sim
./test.sh -f general/stream/table_replica1_vnoden.sim
./test.sh -f general/connection/test_old_data.sim
./test.sh -f unique/dnode/datatrans_3node.sim
./test.sh -f unique/dnode/datatrans_3node_2.sim
./test.sh -f general/db/alter_tables_d2.sim
./test.sh -f general/db/alter_tables_v1.sim
./test.sh -f general/db/alter_tables_v4.sim
#======================b5-end===============
#======================b6-start===============
./test.sh -f unique/dnode/reason.sim
./test.sh -f unique/dnode/remove1.sim
./test.sh -f unique/dnode/remove2.sim
./test.sh -f unique/dnode/vnode_clean.sim
./test.sh -f unique/db/commit.sim
./test.sh -f unique/db/delete.sim
./test.sh -f unique/db/delete_part.sim
...
...
@@ -298,14 +256,12 @@ cd ../../../debug; make
./test.sh -f unique/db/replica_reduce32.sim
./test.sh -f unique/db/replica_reduce31.sim
./test.sh -f unique/db/replica_part.sim
./test.sh -f unique/vnode/many.sim
./test.sh -f unique/vnode/replica2_basic2.sim
./test.sh -f unique/vnode/replica2_repeat.sim
./test.sh -f unique/vnode/replica3_basic.sim
./test.sh -f unique/vnode/replica3_repeat.sim
./test.sh -f unique/vnode/replica3_vgroup.sim
./test.sh -f unique/dnode/monitor.sim
./test.sh -f unique/dnode/monitor_bug.sim
./test.sh -f unique/dnode/simple.sim
...
...
@@ -315,7 +271,6 @@ cd ../../../debug; make
./test.sh -f unique/dnode/offline3.sim
./test.sh -f general/wal/kill.sim
./test.sh -f general/wal/maxtables.sim
./test.sh -f general/import/basic.sim
./test.sh -f general/import/commit.sim
./test.sh -f general/import/large.sim
...
...
@@ -323,10 +278,8 @@ cd ../../../debug; make
./test.sh -f unique/cluster/balance1.sim
./test.sh -f unique/cluster/balance2.sim
./test.sh -f unique/cluster/balance3.sim
#======================b6-end===============
#======================b7-start===============
./test.sh -f general/compute/avg.sim
./test.sh -f general/compute/bottom.sim
./test.sh -f general/compute/count.sim
...
...
@@ -343,7 +296,6 @@ cd ../../../debug; make
./test.sh -f general/compute/stddev.sim
./test.sh -f general/compute/sum.sim
./test.sh -f general/compute/top.sim
./test.sh -f general/db/alter_option.sim
./test.sh -f general/db/alter_vgroups.sim
./test.sh -f general/db/basic.sim
...
...
@@ -392,7 +344,6 @@ cd ../../../debug; make
./test.sh -f general/table/tinyint.sim
./test.sh -f general/table/vgroup.sim
./test.sh -f general/table/createmulti.sim
./test.sh -f unique/mnode/mgmt20.sim
./test.sh -f unique/mnode/mgmt21.sim
./test.sh -f unique/mnode/mgmt22.sim
...
...
@@ -403,7 +354,6 @@ cd ../../../debug; make
./test.sh -f unique/mnode/mgmt33.sim
./test.sh -f unique/mnode/mgmt34.sim
./test.sh -f unique/mnode/mgmtr2.sim
#./test.sh -f unique/arbitrator/insert_duplicationTs.sim
./test.sh -f general/parser/join_manyblocks.sim
./test.sh -f general/parser/stableOp.sim
...
...
@@ -415,9 +365,7 @@ cd ../../../debug; make
./test.sh -f general/parser/last_cache.sim
./test.sh -f unique/big/balance.sim
./test.sh -f general/parser/nestquery.sim
./test.sh -f general/parser/udf.sim
./test.sh -f general/parser/udf_dll.sim
./test.sh -f general/parser/udf_dll_stable.sim
#======================b7-end===============
tests/system-test/5-taos-tools/dump_col_tag.py
浏览文件 @
2167d0fc
此差异已折叠。
点击以展开。
tests/test-CI.sh
浏览文件 @
2167d0fc
...
...
@@ -51,7 +51,52 @@ function dohavecore(){
fi
fi
}
function
runSimCaseOneByOnefq
{
end
=
`
sed
-n
'$='
jenkins/basic.txt
`
for
((
i
=
1
;
i<
=
$end
;
i++
)
)
;
do
if
[[
$((
$i
%
$1
))
-eq
$3
]]
;
then
line
=
`
sed
-n
"
$i
"
p jenkins/basic.txt
`
if
[[
$line
=
~ ^./test.sh
*
]]
||
[[
$line
=
~ ^run
*
]]
;
then
case
=
`
echo
$line
|
grep
sim
$
|awk
'{print $NF}'
`
start_time
=
`
date
+%s
`
date
+%F
\
%T
|
tee
-a
out.log
if
[[
"
$tests_dir
"
==
*
"
$IN_TDINTERNAL
"
*
]]
;
then
echo
-n
$case
./test.sh
-f
$case
>
case
.log 2>&1
&&
\
(
grep
-q
'script.*'
$case
'.*failed.*, err.*lineNum'
../../../sim/tsim/log/taoslog0.0
&&
echo
-e
"
${
RED
}
failed
${
NC
}
"
|
tee
-a
out.log
||
echo
-e
"
${
GREEN
}
success
${
NC
}
"
|
tee
-a
out.log
)
||
\
(
grep
-q
'script.*success.*m$'
../../../sim/tsim/log/taoslog0.0
&&
echo
-e
"
${
GREEN
}
success
${
NC
}
"
|
tee
-a
out.log
)
||
\
(
echo
-e
"
${
RED
}
failed
${
NC
}
"
|
tee
-a
out.log
&&
echo
'=====================log====================='
&&
cat
case
.log
)
else
echo
-n
$case
./test.sh
-f
$case
>
../../sim/case.log 2>&1
&&
\
(
grep
-q
'script.*'
$case
'.*failed.*, err.*lineNum'
../../sim/tsim/log/taoslog0.0
&&
echo
-e
"
${
RED
}
failed
${
NC
}
"
|
tee
-a
out.log
||
echo
-e
"
${
GREEN
}
success
${
NC
}
"
|
tee
-a
out.log
)||
\
(
grep
-q
'script.*success.*m$'
../../sim/tsim/log/taoslog0.0
&&
echo
-e
"
${
GREEN
}
success
${
NC
}
"
|
tee
-a
out.log
)
||
\
(
echo
-e
"
${
RED
}
failed
${
NC
}
"
|
tee
-a
out.log
&&
echo
'=====================log====================='
&&
cat
case
.log
)
fi
out_log
=
`
tail
-1
out.log
`
if
[[
$out_log
=
~
'failed'
]]
;
then
rm
case
.log
if
[[
"
$tests_dir
"
==
*
"
$IN_TDINTERNAL
"
*
]]
;
then
cp
-r
../../../sim ~/sim_
`
date
"+%Y_%m_%d_%H:%M:%S"
`
else
cp
-r
../../sim ~/sim_
`
date
"+%Y_%m_%d_%H:%M:%S"
`
fi
dohavecore
$2
1
if
[[
$2
==
1
]]
;
then
exit
8
fi
fi
end_time
=
`
date
+%s
`
echo
execution
time
of
$case
was
`
expr
$end_time
-
$start_time
`
s.
|
tee
-a
out.log
dohavecore
$2
1
fi
fi
done
rm
-rf
../../../sim/case.log
rm
-rf
../../sim/case.log
}
function
runPyCaseOneByOne
{
while
read
-r
line
;
do
...
...
@@ -173,7 +218,6 @@ if [ "${OS}" == "Linux" ]; then
fi
echo
"### run Python test case ###"
cd
$tests_dir
...
...
@@ -204,6 +248,10 @@ if [ "$1" == "full" ]; then
runPyCaseOneByOne fulltest-other.sh
runPyCaseOneByOne fulltest-insert.sh
runPyCaseOneByOne fulltest-connector.sh
elif
[
"
$1
"
==
"sim"
]
;
then
echo
"### run sim
$2
test ###"
cd
$tests_dir
/script
runSimCaseOneByOnefq
$2
1
$3
else
echo
"### run
$1
$2
test ###"
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录