Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
taosdata
TDengine
提交
3752a81f
T
TDengine
项目概览
taosdata
/
TDengine
大约 2 年 前同步成功
通知
1193
Star
22018
Fork
4786
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
1
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
T
TDengine
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
1
Issue
1
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
3752a81f
编写于
11月 18, 2021
作者:
S
Shengliang Guan
浏览文件
操作
浏览文件
下载
差异文件
merge from master into develop
上级
d145eb83
f6c0f746
变更
31
隐藏空白更改
内联
并排
Showing
31 changed file
with
853 addition
and
364 deletion
+853
-364
.gitignore
.gitignore
+4
-0
Jenkinsfile
Jenkinsfile
+73
-2
packaging/release.sh
packaging/release.sh
+1
-0
src/client/inc/tscUtil.h
src/client/inc/tscUtil.h
+2
-1
src/client/src/tscAsync.c
src/client/src/tscAsync.c
+1
-1
src/client/src/tscPrepare.c
src/client/src/tscPrepare.c
+61
-15
src/client/src/tscSub.c
src/client/src/tscSub.c
+1
-1
src/client/src/tscUtil.c
src/client/src/tscUtil.c
+27
-15
src/common/src/tdataformat.c
src/common/src/tdataformat.c
+25
-11
src/common/src/tglobal.c
src/common/src/tglobal.c
+3
-0
src/connector/jdbc/src/main/java/com/taosdata/jdbc/utils/HttpClientPoolUtil.java
...main/java/com/taosdata/jdbc/utils/HttpClientPoolUtil.java
+1
-0
src/mnode/src/mnodeDb.c
src/mnode/src/mnodeDb.c
+1
-1
src/mnode/src/mnodeShow.c
src/mnode/src/mnodeShow.c
+1
-1
src/mnode/src/mnodeVgroup.c
src/mnode/src/mnodeVgroup.c
+0
-24
src/os/src/detail/osTime.c
src/os/src/detail/osTime.c
+22
-7
src/query/inc/sql.y
src/query/inc/sql.y
+0
-6
src/query/src/qAggMain.c
src/query/src/qAggMain.c
+2
-2
src/tsdb/src/tsdbMeta.c
src/tsdb/src/tsdbMeta.c
+1
-1
src/tsdb/src/tsdbRead.c
src/tsdb/src/tsdbRead.c
+105
-83
tests/pytest/fulltest.sh
tests/pytest/fulltest.sh
+1
-0
tests/pytest/functions/function_derivative.py
tests/pytest/functions/function_derivative.py
+4
-3
tests/pytest/query/bug5903.py
tests/pytest/query/bug5903.py
+36
-0
tests/pytest/query/udf.py
tests/pytest/query/udf.py
+368
-123
tests/pytest/table/create.py
tests/pytest/table/create.py
+2
-0
tests/script/api/batchprepare.c
tests/script/api/batchprepare.c
+46
-3
tests/script/general/parser/udf_dll.sim
tests/script/general/parser/udf_dll.sim
+1
-1
tests/script/general/parser/udf_dll_stable.sim
tests/script/general/parser/udf_dll_stable.sim
+1
-1
tests/script/sh/abs_max.c
tests/script/sh/abs_max.c
+31
-26
tests/script/sh/add_one.c
tests/script/sh/add_one.c
+5
-7
tests/script/sh/add_one_64232.c
tests/script/sh/add_one_64232.c
+1
-3
tests/script/sh/sum_double.c
tests/script/sh/sum_double.c
+26
-26
未找到文件。
.gitignore
浏览文件 @
3752a81f
...
@@ -82,6 +82,10 @@ tests/comparisonTest/opentsdb/opentsdbtest/.settings/
...
@@ -82,6 +82,10 @@ tests/comparisonTest/opentsdb/opentsdbtest/.settings/
tests/examples/JDBC/JDBCDemo/.classpath
tests/examples/JDBC/JDBCDemo/.classpath
tests/examples/JDBC/JDBCDemo/.project
tests/examples/JDBC/JDBCDemo/.project
tests/examples/JDBC/JDBCDemo/.settings/
tests/examples/JDBC/JDBCDemo/.settings/
tests/script/api/batchprepare
tests/script/api/stmt
tests/script/api/stmtBatchTest
tests/script/api/stmtTest
# Emacs
# Emacs
# -*- mode: gitignore; -*-
# -*- mode: gitignore; -*-
...
...
Jenkinsfile
浏览文件 @
3752a81f
...
@@ -251,6 +251,77 @@ def pre_test_mac(){
...
@@ -251,6 +251,77 @@ def pre_test_mac(){
'''
'''
return
1
return
1
}
}
def
pre_test_noinstall
(){
sh
'hostname'
sh
'''
cd ${WKC}
git reset --hard HEAD~10 >/dev/null
'''
script
{
if
(
env
.
CHANGE_TARGET
==
'master'
)
{
sh
'''
cd ${WKC}
git checkout master
'''
}
else
if
(
env
.
CHANGE_TARGET
==
'2.0'
){
sh
'''
cd ${WKC}
git checkout 2.0
'''
}
else
{
sh
'''
cd ${WKC}
git checkout develop
'''
}
}
sh
'''
cd ${WKC}
git pull >/dev/null
git fetch origin +refs/pull/${CHANGE_ID}/merge
git checkout -qf FETCH_HEAD
git clean -dfx
git submodule update --init --recursive
cd ${WK}
git reset --hard HEAD~10
'''
script
{
if
(
env
.
CHANGE_TARGET
==
'master'
)
{
sh
'''
cd ${WK}
git checkout master
'''
}
else
if
(
env
.
CHANGE_TARGET
==
'2.0'
){
sh
'''
cd ${WK}
git checkout 2.0
'''
}
else
{
sh
'''
cd ${WK}
git checkout develop
'''
}
}
sh
'''
cd ${WK}
git pull >/dev/null
export TZ=Asia/Harbin
date
git clean -dfx
mkdir debug
cd debug
cmake .. > /dev/null
make
'''
return
1
}
def
pre_test_win
(){
def
pre_test_win
(){
bat
'''
bat
'''
taskkill /f /t /im python.exe
taskkill /f /t /im python.exe
...
@@ -429,7 +500,7 @@ pipeline {
...
@@ -429,7 +500,7 @@ pipeline {
stage
(
'python_3_s6'
)
{
stage
(
'python_3_s6'
)
{
agent
{
label
" slave6 || slave16 "
}
agent
{
label
" slave6 || slave16 "
}
steps
{
steps
{
timeout
(
time:
5
5
,
unit:
'MINUTES'
){
timeout
(
time:
6
5
,
unit:
'MINUTES'
){
pre_test
()
pre_test
()
sh
'''
sh
'''
date
date
...
@@ -575,7 +646,7 @@ pipeline {
...
@@ -575,7 +646,7 @@ pipeline {
stage
(
'test_b6_s9'
)
{
stage
(
'test_b6_s9'
)
{
agent
{
label
" slave9 || slave19 "
}
agent
{
label
" slave9 || slave19 "
}
steps
{
steps
{
timeout
(
time:
5
5
,
unit:
'MINUTES'
){
timeout
(
time:
10
5
,
unit:
'MINUTES'
){
pre_test
()
pre_test
()
sh
'''
sh
'''
date
date
...
...
packaging/release.sh
浏览文件 @
3752a81f
...
@@ -194,6 +194,7 @@ fi
...
@@ -194,6 +194,7 @@ fi
if
[[
"
$dbName
"
==
"pro"
]]
;
then
if
[[
"
$dbName
"
==
"pro"
]]
;
then
sed
-i
"s/taos config/prodb config/g"
${
top_dir
}
/src/util/src/tconfig.c
sed
-i
"s/taos config/prodb config/g"
${
top_dir
}
/src/util/src/tconfig.c
sed
-i
"s/TDengine/ProDB/g"
${
top_dir
}
/src/dnode/src/dnodeSystem.c
fi
fi
echo
"build
${
pagMode
}
package ..."
echo
"build
${
pagMode
}
package ..."
...
...
src/client/inc/tscUtil.h
浏览文件 @
3752a81f
...
@@ -119,7 +119,8 @@ typedef struct SBlockKeyInfo {
...
@@ -119,7 +119,8 @@ typedef struct SBlockKeyInfo {
int32_t
converToStr
(
char
*
str
,
int
type
,
void
*
buf
,
int32_t
bufSize
,
int32_t
*
len
);
int32_t
converToStr
(
char
*
str
,
int
type
,
void
*
buf
,
int32_t
bufSize
,
int32_t
*
len
);
int32_t
tscCreateDataBlock
(
size_t
initialSize
,
int32_t
rowSize
,
int32_t
startOffset
,
SName
*
name
,
STableMeta
*
pTableMeta
,
STableDataBlocks
**
dataBlocks
);
int32_t
tscCreateDataBlock
(
size_t
initialSize
,
int32_t
rowSize
,
int32_t
startOffset
,
SName
*
name
,
STableMeta
*
pTableMeta
,
STableDataBlocks
**
dataBlocks
);
void
tscDestroyDataBlock
(
SSqlObj
*
pSql
,
STableDataBlocks
*
pDataBlock
,
bool
removeMeta
);
int32_t
tscCreateDataBlockData
(
STableDataBlocks
*
dataBuf
,
size_t
defaultSize
,
int32_t
rowSize
,
int32_t
startOffset
);
void
tscDestroyDataBlock
(
SSqlObj
*
pSql
,
STableDataBlocks
*
pDataBlock
,
bool
removeMeta
);
void
tscSortRemoveDataBlockDupRowsRaw
(
STableDataBlocks
*
dataBuf
);
void
tscSortRemoveDataBlockDupRowsRaw
(
STableDataBlocks
*
dataBuf
);
int
tscSortRemoveDataBlockDupRows
(
STableDataBlocks
*
dataBuf
,
SBlockKeyInfo
*
pBlkKeyInfo
);
int
tscSortRemoveDataBlockDupRows
(
STableDataBlocks
*
dataBuf
,
SBlockKeyInfo
*
pBlkKeyInfo
);
int32_t
tsSetBlockInfo
(
SSubmitBlk
*
pBlocks
,
const
STableMeta
*
pTableMeta
,
int32_t
numOfRows
);
int32_t
tsSetBlockInfo
(
SSubmitBlk
*
pBlocks
,
const
STableMeta
*
pTableMeta
,
int32_t
numOfRows
);
...
...
src/client/src/tscAsync.c
浏览文件 @
3752a81f
...
@@ -237,7 +237,7 @@ void taos_fetch_rows_a(TAOS_RES *tres, __async_cb_func_t fp, void *param) {
...
@@ -237,7 +237,7 @@ void taos_fetch_rows_a(TAOS_RES *tres, __async_cb_func_t fp, void *param) {
return
;
return
;
}
}
if
(
pRes
->
qId
==
0
)
{
if
(
pRes
->
qId
==
0
&&
pSql
->
cmd
.
command
!=
TSDB_SQL_RETRIEVE_EMPTY_RESULT
)
{
tscError
(
"qhandle is invalid"
);
tscError
(
"qhandle is invalid"
);
pRes
->
code
=
TSDB_CODE_TSC_INVALID_QHANDLE
;
pRes
->
code
=
TSDB_CODE_TSC_INVALID_QHANDLE
;
tscAsyncResultOnError
(
pSql
);
tscAsyncResultOnError
(
pSql
);
...
...
src/client/src/tscPrepare.c
浏览文件 @
3752a81f
...
@@ -48,12 +48,14 @@ typedef struct SMultiTbStmt {
...
@@ -48,12 +48,14 @@ typedef struct SMultiTbStmt {
bool
nameSet
;
bool
nameSet
;
bool
tagSet
;
bool
tagSet
;
bool
subSet
;
bool
subSet
;
bool
tagColSet
;
uint64_t
currentUid
;
uint64_t
currentUid
;
char
*
sqlstr
;
char
*
sqlstr
;
uint32_t
tbNum
;
uint32_t
tbNum
;
SStrToken
tbname
;
SStrToken
tbname
;
SStrToken
stbname
;
SStrToken
stbname
;
SStrToken
values
;
SStrToken
values
;
SStrToken
tagCols
;
SArray
*
tags
;
SArray
*
tags
;
STableDataBlocks
*
lastBlock
;
STableDataBlocks
*
lastBlock
;
SHashObj
*
pTableHash
;
SHashObj
*
pTableHash
;
...
@@ -1250,6 +1252,12 @@ static void insertBatchClean(STscStmt* pStmt) {
...
@@ -1250,6 +1252,12 @@ static void insertBatchClean(STscStmt* pStmt) {
pCmd
->
insertParam
.
pDataBlocks
=
tscDestroyBlockArrayList
(
pSql
,
pCmd
->
insertParam
.
pDataBlocks
);
pCmd
->
insertParam
.
pDataBlocks
=
tscDestroyBlockArrayList
(
pSql
,
pCmd
->
insertParam
.
pDataBlocks
);
pCmd
->
insertParam
.
numOfTables
=
0
;
pCmd
->
insertParam
.
numOfTables
=
0
;
STableDataBlocks
**
p
=
taosHashIterate
(
pCmd
->
insertParam
.
pTableBlockHashList
,
NULL
);
while
(
p
)
{
tfree
((
*
p
)
->
pData
);
p
=
taosHashIterate
(
pCmd
->
insertParam
.
pTableBlockHashList
,
p
);
}
taosHashClear
(
pCmd
->
insertParam
.
pTableBlockHashList
);
taosHashClear
(
pCmd
->
insertParam
.
pTableBlockHashList
);
tscFreeSqlResult
(
pSql
);
tscFreeSqlResult
(
pSql
);
tscFreeSubobj
(
pSql
);
tscFreeSubobj
(
pSql
);
...
@@ -1343,9 +1351,40 @@ int stmtParseInsertTbTags(SSqlObj* pSql, STscStmt* pStmt) {
...
@@ -1343,9 +1351,40 @@ int stmtParseInsertTbTags(SSqlObj* pSql, STscStmt* pStmt) {
pStmt
->
mtb
.
stbname
=
sToken
;
pStmt
->
mtb
.
stbname
=
sToken
;
sToken
=
tStrGetToken
(
pCmd
->
insertParam
.
sql
,
&
index
,
false
);
sToken
=
tStrGetToken
(
pCmd
->
insertParam
.
sql
,
&
index
,
false
);
if
(
sToken
.
n
<=
0
||
sToken
.
type
!=
TK_TAGS
)
{
if
(
sToken
.
n
<=
0
||
((
sToken
.
type
!=
TK_TAGS
)
&&
(
sToken
.
type
!=
TK_LP
)))
{
tscError
(
"keyword TAGS expected, sql:%s"
,
pCmd
->
insertParam
.
sql
);
tscError
(
"invalid token, sql:%s"
,
pCmd
->
insertParam
.
sql
);
return
tscSQLSyntaxErrMsg
(
pCmd
->
payload
,
"keyword TAGS expected"
,
sToken
.
z
?
sToken
.
z
:
pCmd
->
insertParam
.
sql
);
return
tscSQLSyntaxErrMsg
(
pCmd
->
payload
,
"invalid token"
,
sToken
.
z
?
sToken
.
z
:
pCmd
->
insertParam
.
sql
);
}
// ... (tag_col_list) TAGS(tag_val_list) ...
int32_t
tagColsCnt
=
0
;
if
(
sToken
.
type
==
TK_LP
)
{
pStmt
->
mtb
.
tagColSet
=
true
;
pStmt
->
mtb
.
tagCols
=
sToken
;
int32_t
tagColsStart
=
index
;
while
(
1
)
{
sToken
=
tStrGetToken
(
pCmd
->
insertParam
.
sql
,
&
index
,
false
);
if
(
sToken
.
type
==
TK_ILLEGAL
)
{
return
tscSQLSyntaxErrMsg
(
pCmd
->
payload
,
"unrecognized token"
,
sToken
.
z
);
}
if
(
sToken
.
type
==
TK_ID
)
{
++
tagColsCnt
;
}
if
(
sToken
.
type
==
TK_RP
)
{
break
;
}
}
if
(
tagColsCnt
==
0
)
{
tscError
(
"tag column list expected, sql:%s"
,
pCmd
->
insertParam
.
sql
);
return
tscSQLSyntaxErrMsg
(
pCmd
->
payload
,
"tag column list expected"
,
pCmd
->
insertParam
.
sql
);
}
pStmt
->
mtb
.
tagCols
.
n
=
index
-
tagColsStart
+
1
;
sToken
=
tStrGetToken
(
pCmd
->
insertParam
.
sql
,
&
index
,
false
);
if
(
sToken
.
n
<=
0
||
sToken
.
type
!=
TK_TAGS
)
{
tscError
(
"keyword TAGS expected, sql:%s"
,
pCmd
->
insertParam
.
sql
);
return
tscSQLSyntaxErrMsg
(
pCmd
->
payload
,
"keyword TAGS expected"
,
sToken
.
z
?
sToken
.
z
:
pCmd
->
insertParam
.
sql
);
}
}
}
sToken
=
tStrGetToken
(
pCmd
->
insertParam
.
sql
,
&
index
,
false
);
sToken
=
tStrGetToken
(
pCmd
->
insertParam
.
sql
,
&
index
,
false
);
...
@@ -1385,6 +1424,11 @@ int stmtParseInsertTbTags(SSqlObj* pSql, STscStmt* pStmt) {
...
@@ -1385,6 +1424,11 @@ int stmtParseInsertTbTags(SSqlObj* pSql, STscStmt* pStmt) {
return
tscSQLSyntaxErrMsg
(
pCmd
->
payload
,
"no tags"
,
pCmd
->
insertParam
.
sql
);
return
tscSQLSyntaxErrMsg
(
pCmd
->
payload
,
"no tags"
,
pCmd
->
insertParam
.
sql
);
}
}
if
(
tagColsCnt
>
0
&&
taosArrayGetSize
(
pStmt
->
mtb
.
tags
)
!=
tagColsCnt
)
{
tscError
(
"not match tags, sql:%s"
,
pCmd
->
insertParam
.
sql
);
return
tscSQLSyntaxErrMsg
(
pCmd
->
payload
,
"not match tags"
,
pCmd
->
insertParam
.
sql
);
}
sToken
=
tStrGetToken
(
pCmd
->
insertParam
.
sql
,
&
index
,
false
);
sToken
=
tStrGetToken
(
pCmd
->
insertParam
.
sql
,
&
index
,
false
);
if
(
sToken
.
n
<=
0
||
(
sToken
.
type
!=
TK_VALUES
&&
sToken
.
type
!=
TK_LP
))
{
if
(
sToken
.
n
<=
0
||
(
sToken
.
type
!=
TK_VALUES
&&
sToken
.
type
!=
TK_LP
))
{
tscError
(
"sql error, sql:%s"
,
pCmd
->
insertParam
.
sql
);
tscError
(
"sql error, sql:%s"
,
pCmd
->
insertParam
.
sql
);
...
@@ -1407,7 +1451,13 @@ int stmtGenInsertStatement(SSqlObj* pSql, STscStmt* pStmt, const char* name, TAO
...
@@ -1407,7 +1451,13 @@ int stmtGenInsertStatement(SSqlObj* pSql, STscStmt* pStmt, const char* name, TAO
int32_t
j
=
0
;
int32_t
j
=
0
;
while
(
1
)
{
while
(
1
)
{
len
=
(
size_t
)
snprintf
(
str
,
size
-
1
,
"insert into %s using %.*s tags("
,
name
,
pStmt
->
mtb
.
stbname
.
n
,
pStmt
->
mtb
.
stbname
.
z
);
if
(
pStmt
->
mtb
.
tagColSet
)
{
len
=
(
size_t
)
snprintf
(
str
,
size
-
1
,
"insert into %s using %.*s %.*s tags("
,
name
,
pStmt
->
mtb
.
stbname
.
n
,
pStmt
->
mtb
.
stbname
.
z
,
pStmt
->
mtb
.
tagCols
.
n
,
pStmt
->
mtb
.
tagCols
.
z
);
}
else
{
len
=
(
size_t
)
snprintf
(
str
,
size
-
1
,
"insert into %s using %.*s tags("
,
name
,
pStmt
->
mtb
.
stbname
.
n
,
pStmt
->
mtb
.
stbname
.
z
);
}
if
(
len
>=
(
size
-
1
))
{
if
(
len
>=
(
size
-
1
))
{
size
*=
2
;
size
*=
2
;
free
(
str
);
free
(
str
);
...
@@ -1659,6 +1709,13 @@ int taos_stmt_set_tbname_tags(TAOS_STMT* stmt, const char* name, TAOS_BIND* tags
...
@@ -1659,6 +1709,13 @@ int taos_stmt_set_tbname_tags(TAOS_STMT* stmt, const char* name, TAOS_BIND* tags
STMT_RET
(
TSDB_CODE_TSC_APP_ERROR
);
STMT_RET
(
TSDB_CODE_TSC_APP_ERROR
);
}
}
if
((
*
t1
)
->
pData
==
NULL
)
{
code
=
tscCreateDataBlockData
(
*
t1
,
TSDB_PAYLOAD_SIZE
,
(
*
t1
)
->
pTableMeta
->
tableInfo
.
rowSize
,
sizeof
(
SSubmitBlk
));
if
(
code
!=
TSDB_CODE_SUCCESS
)
{
STMT_RET
(
code
);
}
}
SSubmitBlk
*
pBlk
=
(
SSubmitBlk
*
)
(
*
t1
)
->
pData
;
SSubmitBlk
*
pBlk
=
(
SSubmitBlk
*
)
(
*
t1
)
->
pData
;
pCmd
->
batchSize
=
pBlk
->
numOfRows
;
pCmd
->
batchSize
=
pBlk
->
numOfRows
;
if
(
pBlk
->
numOfRows
==
0
)
{
if
(
pBlk
->
numOfRows
==
0
)
{
...
@@ -1784,7 +1841,6 @@ int taos_stmt_set_tbname_tags(TAOS_STMT* stmt, const char* name, TAOS_BIND* tags
...
@@ -1784,7 +1841,6 @@ int taos_stmt_set_tbname_tags(TAOS_STMT* stmt, const char* name, TAOS_BIND* tags
STMT_RET
(
code
);
STMT_RET
(
code
);
}
}
int
taos_stmt_set_sub_tbname
(
TAOS_STMT
*
stmt
,
const
char
*
name
)
{
int
taos_stmt_set_sub_tbname
(
TAOS_STMT
*
stmt
,
const
char
*
name
)
{
STscStmt
*
pStmt
=
(
STscStmt
*
)
stmt
;
STscStmt
*
pStmt
=
(
STscStmt
*
)
stmt
;
STMT_CHECK
STMT_CHECK
...
@@ -1792,8 +1848,6 @@ int taos_stmt_set_sub_tbname(TAOS_STMT* stmt, const char* name) {
...
@@ -1792,8 +1848,6 @@ int taos_stmt_set_sub_tbname(TAOS_STMT* stmt, const char* name) {
return
taos_stmt_set_tbname_tags
(
stmt
,
name
,
NULL
);
return
taos_stmt_set_tbname_tags
(
stmt
,
name
,
NULL
);
}
}
int
taos_stmt_set_tbname
(
TAOS_STMT
*
stmt
,
const
char
*
name
)
{
int
taos_stmt_set_tbname
(
TAOS_STMT
*
stmt
,
const
char
*
name
)
{
STscStmt
*
pStmt
=
(
STscStmt
*
)
stmt
;
STscStmt
*
pStmt
=
(
STscStmt
*
)
stmt
;
STMT_CHECK
STMT_CHECK
...
@@ -1801,7 +1855,6 @@ int taos_stmt_set_tbname(TAOS_STMT* stmt, const char* name) {
...
@@ -1801,7 +1855,6 @@ int taos_stmt_set_tbname(TAOS_STMT* stmt, const char* name) {
return
taos_stmt_set_tbname_tags
(
stmt
,
name
,
NULL
);
return
taos_stmt_set_tbname_tags
(
stmt
,
name
,
NULL
);
}
}
int
taos_stmt_close
(
TAOS_STMT
*
stmt
)
{
int
taos_stmt_close
(
TAOS_STMT
*
stmt
)
{
STscStmt
*
pStmt
=
(
STscStmt
*
)
stmt
;
STscStmt
*
pStmt
=
(
STscStmt
*
)
stmt
;
if
(
pStmt
==
NULL
||
pStmt
->
taos
==
NULL
)
{
if
(
pStmt
==
NULL
||
pStmt
->
taos
==
NULL
)
{
...
@@ -1868,7 +1921,6 @@ int taos_stmt_bind_param(TAOS_STMT* stmt, TAOS_BIND* bind) {
...
@@ -1868,7 +1921,6 @@ int taos_stmt_bind_param(TAOS_STMT* stmt, TAOS_BIND* bind) {
}
}
}
}
int
taos_stmt_bind_param_batch
(
TAOS_STMT
*
stmt
,
TAOS_MULTI_BIND
*
bind
)
{
int
taos_stmt_bind_param_batch
(
TAOS_STMT
*
stmt
,
TAOS_MULTI_BIND
*
bind
)
{
STscStmt
*
pStmt
=
(
STscStmt
*
)
stmt
;
STscStmt
*
pStmt
=
(
STscStmt
*
)
stmt
;
...
@@ -1932,8 +1984,6 @@ int taos_stmt_bind_single_param_batch(TAOS_STMT* stmt, TAOS_MULTI_BIND* bind, in
...
@@ -1932,8 +1984,6 @@ int taos_stmt_bind_single_param_batch(TAOS_STMT* stmt, TAOS_MULTI_BIND* bind, in
STMT_RET
(
insertStmtBindParamBatch
(
pStmt
,
bind
,
colIdx
));
STMT_RET
(
insertStmtBindParamBatch
(
pStmt
,
bind
,
colIdx
));
}
}
int
taos_stmt_add_batch
(
TAOS_STMT
*
stmt
)
{
int
taos_stmt_add_batch
(
TAOS_STMT
*
stmt
)
{
STscStmt
*
pStmt
=
(
STscStmt
*
)
stmt
;
STscStmt
*
pStmt
=
(
STscStmt
*
)
stmt
;
STMT_CHECK
STMT_CHECK
...
@@ -2086,7 +2136,6 @@ int taos_stmt_get_param(TAOS_STMT *stmt, int idx, int *type, int *bytes) {
...
@@ -2086,7 +2136,6 @@ int taos_stmt_get_param(TAOS_STMT *stmt, int idx, int *type, int *bytes) {
}
}
}
}
char
*
taos_stmt_errstr
(
TAOS_STMT
*
stmt
)
{
char
*
taos_stmt_errstr
(
TAOS_STMT
*
stmt
)
{
STscStmt
*
pStmt
=
(
STscStmt
*
)
stmt
;
STscStmt
*
pStmt
=
(
STscStmt
*
)
stmt
;
...
@@ -2097,8 +2146,6 @@ char *taos_stmt_errstr(TAOS_STMT *stmt) {
...
@@ -2097,8 +2146,6 @@ char *taos_stmt_errstr(TAOS_STMT *stmt) {
return
taos_errstr
(
pStmt
->
pSql
);
return
taos_errstr
(
pStmt
->
pSql
);
}
}
const
char
*
taos_data_type
(
int
type
)
{
const
char
*
taos_data_type
(
int
type
)
{
switch
(
type
)
{
switch
(
type
)
{
case
TSDB_DATA_TYPE_NULL
:
return
"TSDB_DATA_TYPE_NULL"
;
case
TSDB_DATA_TYPE_NULL
:
return
"TSDB_DATA_TYPE_NULL"
;
...
@@ -2115,4 +2162,3 @@ const char *taos_data_type(int type) {
...
@@ -2115,4 +2162,3 @@ const char *taos_data_type(int type) {
default:
return
"UNKNOWN"
;
default:
return
"UNKNOWN"
;
}
}
}
}
src/client/src/tscSub.c
浏览文件 @
3752a81f
...
@@ -468,7 +468,7 @@ SSqlObj* recreateSqlObj(SSub* pSub) {
...
@@ -468,7 +468,7 @@ SSqlObj* recreateSqlObj(SSub* pSub) {
}
}
registerSqlObj
(
pSql
);
registerSqlObj
(
pSql
);
pSql
->
rootObj
=
pSql
;
code
=
tsParseSql
(
pSql
,
true
);
code
=
tsParseSql
(
pSql
,
true
);
if
(
code
==
TSDB_CODE_TSC_ACTION_IN_PROGRESS
)
{
if
(
code
==
TSDB_CODE_TSC_ACTION_IN_PROGRESS
)
{
tsem_wait
(
&
pSub
->
sem
);
tsem_wait
(
&
pSub
->
sem
);
...
...
src/client/src/tscUtil.c
浏览文件 @
3752a81f
...
@@ -187,7 +187,7 @@ bool tscQueryTags(SQueryInfo* pQueryInfo) {
...
@@ -187,7 +187,7 @@ bool tscQueryTags(SQueryInfo* pQueryInfo) {
continue
;
continue
;
}
}
if
(
functId
!=
TSDB_FUNC_TAGPRJ
&&
functId
!=
TSDB_FUNC_TID_TAG
)
{
if
(
functId
!=
TSDB_FUNC_TAGPRJ
&&
functId
!=
TSDB_FUNC_TID_TAG
&&
functId
!=
TSDB_FUNC_BLKINFO
)
{
return
false
;
return
false
;
}
}
}
}
...
@@ -1845,6 +1845,32 @@ int32_t tscCreateDataBlock(size_t defaultSize, int32_t rowSize, int32_t startOff
...
@@ -1845,6 +1845,32 @@ int32_t tscCreateDataBlock(size_t defaultSize, int32_t rowSize, int32_t startOff
return
TSDB_CODE_TSC_OUT_OF_MEMORY
;
return
TSDB_CODE_TSC_OUT_OF_MEMORY
;
}
}
int32_t
code
=
tscCreateDataBlockData
(
dataBuf
,
defaultSize
,
rowSize
,
startOffset
);
if
(
code
!=
TSDB_CODE_SUCCESS
)
{
tfree
(
dataBuf
);
return
code
;
}
//Here we keep the tableMeta to avoid it to be remove by other threads.
dataBuf
->
pTableMeta
=
tscTableMetaDup
(
pTableMeta
);
SParsedDataColInfo
*
pColInfo
=
&
dataBuf
->
boundColumnInfo
;
SSchema
*
pSchema
=
tscGetTableSchema
(
dataBuf
->
pTableMeta
);
tscSetBoundColumnInfo
(
pColInfo
,
pSchema
,
dataBuf
->
pTableMeta
->
tableInfo
.
numOfColumns
);
dataBuf
->
vgId
=
dataBuf
->
pTableMeta
->
vgId
;
tNameAssign
(
&
dataBuf
->
tableName
,
name
);
assert
(
defaultSize
>
0
&&
pTableMeta
!=
NULL
&&
dataBuf
->
pTableMeta
!=
NULL
);
*
dataBlocks
=
dataBuf
;
return
TSDB_CODE_SUCCESS
;
}
int32_t
tscCreateDataBlockData
(
STableDataBlocks
*
dataBuf
,
size_t
defaultSize
,
int32_t
rowSize
,
int32_t
startOffset
)
{
assert
(
dataBuf
!=
NULL
);
dataBuf
->
nAllocSize
=
(
uint32_t
)
defaultSize
;
dataBuf
->
nAllocSize
=
(
uint32_t
)
defaultSize
;
dataBuf
->
headerSize
=
startOffset
;
dataBuf
->
headerSize
=
startOffset
;
...
@@ -1857,30 +1883,16 @@ int32_t tscCreateDataBlock(size_t defaultSize, int32_t rowSize, int32_t startOff
...
@@ -1857,30 +1883,16 @@ int32_t tscCreateDataBlock(size_t defaultSize, int32_t rowSize, int32_t startOff
dataBuf
->
pData
=
malloc
(
dataBuf
->
nAllocSize
);
dataBuf
->
pData
=
malloc
(
dataBuf
->
nAllocSize
);
if
(
dataBuf
->
pData
==
NULL
)
{
if
(
dataBuf
->
pData
==
NULL
)
{
tscError
(
"failed to allocated memory, reason:%s"
,
strerror
(
errno
));
tscError
(
"failed to allocated memory, reason:%s"
,
strerror
(
errno
));
tfree
(
dataBuf
);
return
TSDB_CODE_TSC_OUT_OF_MEMORY
;
return
TSDB_CODE_TSC_OUT_OF_MEMORY
;
}
}
memset
(
dataBuf
->
pData
,
0
,
sizeof
(
SSubmitBlk
));
memset
(
dataBuf
->
pData
,
0
,
sizeof
(
SSubmitBlk
));
//Here we keep the tableMeta to avoid it to be remove by other threads.
dataBuf
->
pTableMeta
=
tscTableMetaDup
(
pTableMeta
);
SParsedDataColInfo
*
pColInfo
=
&
dataBuf
->
boundColumnInfo
;
SSchema
*
pSchema
=
tscGetTableSchema
(
dataBuf
->
pTableMeta
);
tscSetBoundColumnInfo
(
pColInfo
,
pSchema
,
dataBuf
->
pTableMeta
->
tableInfo
.
numOfColumns
);
dataBuf
->
ordered
=
true
;
dataBuf
->
ordered
=
true
;
dataBuf
->
prevTS
=
INT64_MIN
;
dataBuf
->
prevTS
=
INT64_MIN
;
dataBuf
->
rowSize
=
rowSize
;
dataBuf
->
rowSize
=
rowSize
;
dataBuf
->
size
=
startOffset
;
dataBuf
->
size
=
startOffset
;
dataBuf
->
tsSource
=
-
1
;
dataBuf
->
tsSource
=
-
1
;
dataBuf
->
vgId
=
dataBuf
->
pTableMeta
->
vgId
;
tNameAssign
(
&
dataBuf
->
tableName
,
name
);
assert
(
defaultSize
>
0
&&
pTableMeta
!=
NULL
&&
dataBuf
->
pTableMeta
!=
NULL
);
*
dataBlocks
=
dataBuf
;
return
TSDB_CODE_SUCCESS
;
return
TSDB_CODE_SUCCESS
;
}
}
...
...
src/common/src/tdataformat.c
浏览文件 @
3752a81f
...
@@ -253,9 +253,10 @@ int dataColAppendVal(SDataCol *pCol, const void *value, int numOfRows, int maxPo
...
@@ -253,9 +253,10 @@ int dataColAppendVal(SDataCol *pCol, const void *value, int numOfRows, int maxPo
}
}
if
(
tdAllocMemForCol
(
pCol
,
maxPoints
)
<
0
)
return
-
1
;
if
(
tdAllocMemForCol
(
pCol
,
maxPoints
)
<
0
)
return
-
1
;
if
(
numOfRows
>
0
)
{
if
(((
rowOffset
==
0
)
&&
(
numOfRows
>
0
))
||
((
rowOffset
==
-
1
)
&&
(
numOfRows
>=
0
)))
{
// Find the first not null value, fill all previouse values as NULL
// Find the first not null value, fill all previouse values as NULL
dataColSetNEleNull
(
pCol
,
numOfRows
);
dataColSetNEleNull
(
pCol
,
numOfRows
-
rowOffset
);
}
}
}
}
...
@@ -463,9 +464,7 @@ static void tdAppendDataRowToDataCol(SDataRow row, STSchema *pSchema, SDataCols
...
@@ -463,9 +464,7 @@ static void tdAppendDataRowToDataCol(SDataRow row, STSchema *pSchema, SDataCols
int
rcol
=
0
;
int
rcol
=
0
;
int
dcol
=
0
;
int
dcol
=
0
;
while
(
dcol
<
pCols
->
numOfCols
)
{
while
(
dcol
<
pCols
->
numOfCols
)
{
bool
setCol
=
0
;
SDataCol
*
pDataCol
=
&
(
pCols
->
cols
[
dcol
]);
SDataCol
*
pDataCol
=
&
(
pCols
->
cols
[
dcol
]);
if
(
rcol
>=
schemaNCols
(
pSchema
))
{
if
(
rcol
>=
schemaNCols
(
pSchema
))
{
dataColAppendVal
(
pDataCol
,
getNullValue
(
pDataCol
->
type
),
pCols
->
numOfRows
,
pCols
->
maxPoints
,
rowOffset
);
dataColAppendVal
(
pDataCol
,
getNullValue
(
pDataCol
->
type
),
pCols
->
numOfRows
,
pCols
->
maxPoints
,
rowOffset
);
...
@@ -476,14 +475,22 @@ static void tdAppendDataRowToDataCol(SDataRow row, STSchema *pSchema, SDataCols
...
@@ -476,14 +475,22 @@ static void tdAppendDataRowToDataCol(SDataRow row, STSchema *pSchema, SDataCols
STColumn
*
pRowCol
=
schemaColAt
(
pSchema
,
rcol
);
STColumn
*
pRowCol
=
schemaColAt
(
pSchema
,
rcol
);
if
(
pRowCol
->
colId
==
pDataCol
->
colId
)
{
if
(
pRowCol
->
colId
==
pDataCol
->
colId
)
{
void
*
value
=
tdGetRowDataOfCol
(
row
,
pRowCol
->
type
,
pRowCol
->
offset
+
TD_DATA_ROW_HEAD_SIZE
);
void
*
value
=
tdGetRowDataOfCol
(
row
,
pRowCol
->
type
,
pRowCol
->
offset
+
TD_DATA_ROW_HEAD_SIZE
);
if
(
!
isNull
(
value
,
pDataCol
->
type
))
setCol
=
1
;
if
(
rowOffset
==
0
)
{
dataColAppendVal
(
pDataCol
,
value
,
pCols
->
numOfRows
,
pCols
->
maxPoints
,
rowOffset
);
dataColAppendVal
(
pDataCol
,
value
,
pCols
->
numOfRows
,
pCols
->
maxPoints
,
rowOffset
);
}
else
if
(
rowOffset
==
-
1
)
{
// for update 2
if
(
!
isNull
(
value
,
pDataCol
->
type
))
{
dataColAppendVal
(
pDataCol
,
value
,
pCols
->
numOfRows
,
pCols
->
maxPoints
,
rowOffset
);
}
}
else
{
ASSERT
(
0
);
}
dcol
++
;
dcol
++
;
rcol
++
;
rcol
++
;
}
else
if
(
pRowCol
->
colId
<
pDataCol
->
colId
)
{
}
else
if
(
pRowCol
->
colId
<
pDataCol
->
colId
)
{
rcol
++
;
rcol
++
;
}
else
{
}
else
{
if
(
forceSetNull
||
setCol
)
{
if
(
forceSetNull
)
{
dataColAppendVal
(
pDataCol
,
getNullValue
(
pDataCol
->
type
),
pCols
->
numOfRows
,
pCols
->
maxPoints
,
rowOffset
);
dataColAppendVal
(
pDataCol
,
getNullValue
(
pDataCol
->
type
),
pCols
->
numOfRows
,
pCols
->
maxPoints
,
rowOffset
);
}
}
dcol
++
;
dcol
++
;
...
@@ -501,7 +508,6 @@ static void tdAppendKvRowToDataCol(SKVRow row, STSchema *pSchema, SDataCols *pCo
...
@@ -501,7 +508,6 @@ static void tdAppendKvRowToDataCol(SKVRow row, STSchema *pSchema, SDataCols *pCo
int
nRowCols
=
kvRowNCols
(
row
);
int
nRowCols
=
kvRowNCols
(
row
);
while
(
dcol
<
pCols
->
numOfCols
)
{
while
(
dcol
<
pCols
->
numOfCols
)
{
bool
setCol
=
0
;
SDataCol
*
pDataCol
=
&
(
pCols
->
cols
[
dcol
]);
SDataCol
*
pDataCol
=
&
(
pCols
->
cols
[
dcol
]);
if
(
rcol
>=
nRowCols
||
rcol
>=
schemaNCols
(
pSchema
))
{
if
(
rcol
>=
nRowCols
||
rcol
>=
schemaNCols
(
pSchema
))
{
dataColAppendVal
(
pDataCol
,
getNullValue
(
pDataCol
->
type
),
pCols
->
numOfRows
,
pCols
->
maxPoints
,
rowOffset
);
dataColAppendVal
(
pDataCol
,
getNullValue
(
pDataCol
->
type
),
pCols
->
numOfRows
,
pCols
->
maxPoints
,
rowOffset
);
...
@@ -513,14 +519,22 @@ static void tdAppendKvRowToDataCol(SKVRow row, STSchema *pSchema, SDataCols *pCo
...
@@ -513,14 +519,22 @@ static void tdAppendKvRowToDataCol(SKVRow row, STSchema *pSchema, SDataCols *pCo
if
(
colIdx
->
colId
==
pDataCol
->
colId
)
{
if
(
colIdx
->
colId
==
pDataCol
->
colId
)
{
void
*
value
=
tdGetKvRowDataOfCol
(
row
,
colIdx
->
offset
);
void
*
value
=
tdGetKvRowDataOfCol
(
row
,
colIdx
->
offset
);
if
(
!
isNull
(
value
,
pDataCol
->
type
))
setCol
=
1
;
if
(
rowOffset
==
0
)
{
dataColAppendVal
(
pDataCol
,
value
,
pCols
->
numOfRows
,
pCols
->
maxPoints
,
rowOffset
);
dataColAppendVal
(
pDataCol
,
value
,
pCols
->
numOfRows
,
pCols
->
maxPoints
,
rowOffset
);
}
else
if
(
rowOffset
==
-
1
)
{
// for update 2
if
(
!
isNull
(
value
,
pDataCol
->
type
))
{
dataColAppendVal
(
pDataCol
,
value
,
pCols
->
numOfRows
,
pCols
->
maxPoints
,
rowOffset
);
}
}
else
{
ASSERT
(
0
);
}
++
dcol
;
++
dcol
;
++
rcol
;
++
rcol
;
}
else
if
(
colIdx
->
colId
<
pDataCol
->
colId
)
{
}
else
if
(
colIdx
->
colId
<
pDataCol
->
colId
)
{
++
rcol
;
++
rcol
;
}
else
{
}
else
{
if
(
forceSetNull
||
setCo
l
)
{
if
(
forceSetNul
l
)
{
dataColAppendVal
(
pDataCol
,
getNullValue
(
pDataCol
->
type
),
pCols
->
numOfRows
,
pCols
->
maxPoints
,
rowOffset
);
dataColAppendVal
(
pDataCol
,
getNullValue
(
pDataCol
->
type
),
pCols
->
numOfRows
,
pCols
->
maxPoints
,
rowOffset
);
}
}
++
dcol
;
++
dcol
;
...
...
src/common/src/tglobal.c
浏览文件 @
3752a81f
...
@@ -27,6 +27,9 @@
...
@@ -27,6 +27,9 @@
#include "tulog.h"
#include "tulog.h"
#include "tutil.h"
#include "tutil.h"
// TSDB
bool
tsdbForceKeepFile
=
false
;
// cluster
// cluster
char
tsFirst
[
TSDB_EP_LEN
]
=
{
0
};
char
tsFirst
[
TSDB_EP_LEN
]
=
{
0
};
char
tsSecond
[
TSDB_EP_LEN
]
=
{
0
};
char
tsSecond
[
TSDB_EP_LEN
]
=
{
0
};
...
...
src/connector/jdbc/src/main/java/com/taosdata/jdbc/utils/HttpClientPoolUtil.java
浏览文件 @
3752a81f
...
@@ -5,6 +5,7 @@ import com.taosdata.jdbc.TSDBErrorNumbers;
...
@@ -5,6 +5,7 @@ import com.taosdata.jdbc.TSDBErrorNumbers;
import
org.apache.http.HeaderElement
;
import
org.apache.http.HeaderElement
;
import
org.apache.http.HeaderElementIterator
;
import
org.apache.http.HeaderElementIterator
;
import
org.apache.http.HttpEntity
;
import
org.apache.http.HttpEntity
;
import
org.apache.http.NoHttpResponseException
;
import
org.apache.http.client.ClientProtocolException
;
import
org.apache.http.client.ClientProtocolException
;
import
org.apache.http.client.config.RequestConfig
;
import
org.apache.http.client.config.RequestConfig
;
import
org.apache.http.client.methods.*
;
import
org.apache.http.client.methods.*
;
...
...
src/mnode/src/mnodeDb.c
浏览文件 @
3752a81f
...
@@ -1139,7 +1139,7 @@ static int32_t mnodeAlterDbFp(SMnodeMsg *pMsg) {
...
@@ -1139,7 +1139,7 @@ static int32_t mnodeAlterDbFp(SMnodeMsg *pMsg) {
return
sdbUpdateRow
(
&
row
);
return
sdbUpdateRow
(
&
row
);
}
}
//
bnNotify();
bnNotify
();
return
TSDB_CODE_MND_ACTION_IN_PROGRESS
;
return
TSDB_CODE_MND_ACTION_IN_PROGRESS
;
}
}
...
...
src/mnode/src/mnodeShow.c
浏览文件 @
3752a81f
...
@@ -121,7 +121,7 @@ static int32_t mnodeProcessShowMsg(SMnodeMsg *pMsg) {
...
@@ -121,7 +121,7 @@ static int32_t mnodeProcessShowMsg(SMnodeMsg *pMsg) {
}
}
if
(
!
tsMnodeShowMetaFp
[
pShowMsg
->
type
]
||
!
tsMnodeShowRetrieveFp
[
pShowMsg
->
type
])
{
if
(
!
tsMnodeShowMetaFp
[
pShowMsg
->
type
]
||
!
tsMnodeShowRetrieveFp
[
pShowMsg
->
type
])
{
m
Error
(
"show type:%s is not support"
,
mnodeGetShowType
(
pShowMsg
->
type
));
m
Warn
(
"show type:%s is not support"
,
mnodeGetShowType
(
pShowMsg
->
type
));
return
TSDB_CODE_COM_OPS_NOT_SUPPORT
;
return
TSDB_CODE_COM_OPS_NOT_SUPPORT
;
}
}
...
...
src/mnode/src/mnodeVgroup.c
浏览文件 @
3752a81f
...
@@ -742,19 +742,6 @@ static int32_t mnodeGetVgroupMeta(STableMetaMsg *pMeta, SShowObj *pShow, void *p
...
@@ -742,19 +742,6 @@ static int32_t mnodeGetVgroupMeta(STableMetaMsg *pMeta, SShowObj *pShow, void *p
return
0
;
return
0
;
}
}
static
bool
mnodeFilterVgroups
(
SVgObj
*
pVgroup
,
STableObj
*
pTable
)
{
if
(
NULL
==
pTable
||
pTable
->
type
==
TSDB_SUPER_TABLE
)
{
return
true
;
}
SCTableObj
*
pCTable
=
(
SCTableObj
*
)
pTable
;
if
(
pVgroup
->
vgId
==
pCTable
->
vgId
)
{
return
true
;
}
else
{
return
false
;
}
}
static
int32_t
mnodeRetrieveVgroups
(
SShowObj
*
pShow
,
char
*
data
,
int32_t
rows
,
void
*
pConn
)
{
static
int32_t
mnodeRetrieveVgroups
(
SShowObj
*
pShow
,
char
*
data
,
int32_t
rows
,
void
*
pConn
)
{
int32_t
numOfRows
=
0
;
int32_t
numOfRows
=
0
;
SVgObj
*
pVgroup
=
NULL
;
SVgObj
*
pVgroup
=
NULL
;
...
@@ -770,11 +757,6 @@ static int32_t mnodeRetrieveVgroups(SShowObj *pShow, char *data, int32_t rows, v
...
@@ -770,11 +757,6 @@ static int32_t mnodeRetrieveVgroups(SShowObj *pShow, char *data, int32_t rows, v
return
0
;
return
0
;
}
}
STableObj
*
pTable
=
NULL
;
if
(
pShow
->
payloadLen
>
0
)
{
pTable
=
mnodeGetTable
(
pShow
->
payload
);
}
while
(
numOfRows
<
rows
)
{
while
(
numOfRows
<
rows
)
{
pShow
->
pIter
=
mnodeGetNextVgroup
(
pShow
->
pIter
,
&
pVgroup
);
pShow
->
pIter
=
mnodeGetNextVgroup
(
pShow
->
pIter
,
&
pVgroup
);
if
(
pVgroup
==
NULL
)
break
;
if
(
pVgroup
==
NULL
)
break
;
...
@@ -784,11 +766,6 @@ static int32_t mnodeRetrieveVgroups(SShowObj *pShow, char *data, int32_t rows, v
...
@@ -784,11 +766,6 @@ static int32_t mnodeRetrieveVgroups(SShowObj *pShow, char *data, int32_t rows, v
continue
;
continue
;
}
}
if
(
!
mnodeFilterVgroups
(
pVgroup
,
pTable
))
{
mnodeDecVgroupRef
(
pVgroup
);
continue
;
}
cols
=
0
;
cols
=
0
;
pWrite
=
data
+
pShow
->
offset
[
cols
]
*
rows
+
pShow
->
bytes
[
cols
]
*
numOfRows
;
pWrite
=
data
+
pShow
->
offset
[
cols
]
*
rows
+
pShow
->
bytes
[
cols
]
*
numOfRows
;
...
@@ -842,7 +819,6 @@ static int32_t mnodeRetrieveVgroups(SShowObj *pShow, char *data, int32_t rows, v
...
@@ -842,7 +819,6 @@ static int32_t mnodeRetrieveVgroups(SShowObj *pShow, char *data, int32_t rows, v
mnodeVacuumResult
(
data
,
pShow
->
numOfColumns
,
numOfRows
,
rows
,
pShow
);
mnodeVacuumResult
(
data
,
pShow
->
numOfColumns
,
numOfRows
,
rows
,
pShow
);
pShow
->
numOfReads
+=
numOfRows
;
pShow
->
numOfReads
+=
numOfRows
;
mnodeDecTableRef
(
pTable
);
mnodeDecDbRef
(
pDb
);
mnodeDecDbRef
(
pDb
);
return
numOfRows
;
return
numOfRows
;
...
...
src/os/src/detail/osTime.c
浏览文件 @
3752a81f
...
@@ -424,29 +424,44 @@ int64_t convertTimePrecision(int64_t time, int32_t fromPrecision, int32_t toPrec
...
@@ -424,29 +424,44 @@ int64_t convertTimePrecision(int64_t time, int32_t fromPrecision, int32_t toPrec
}
}
}
//end switch fromPrecision
}
//end switch fromPrecision
end_:
end_:
if
(
tempResult
>
(
double
)
INT64_MAX
)
return
INT64_MAX
;
if
(
tempResult
>
=
(
double
)
INT64_MAX
)
return
INT64_MAX
;
if
(
tempResult
<
(
double
)
INT64_MIN
)
return
INT64_MIN
+
1
;
// INT64_MIN means NULL
if
(
tempResult
<
=
(
double
)
INT64_MIN
)
return
INT64_MIN
+
1
;
// INT64_MIN means NULL
return
time
;
return
time
;
}
}
static
int32_t
getDuration
(
int64_t
val
,
char
unit
,
int64_t
*
result
,
int32_t
timePrecision
)
{
static
int32_t
getDuration
(
int64_t
val
,
char
unit
,
int64_t
*
result
,
int32_t
timePrecision
)
{
switch
(
unit
)
{
switch
(
unit
)
{
case
's'
:
case
's'
:{
double
temp
=
((
double
)
val
)
*
MILLISECOND_PER_SECOND
;
if
(
temp
>=
(
double
)
INT64_MAX
||
temp
<=
(
double
)
INT64_MIN
)
return
-
1
;
(
*
result
)
=
convertTimePrecision
(
val
*
MILLISECOND_PER_SECOND
,
TSDB_TIME_PRECISION_MILLI
,
timePrecision
);
(
*
result
)
=
convertTimePrecision
(
val
*
MILLISECOND_PER_SECOND
,
TSDB_TIME_PRECISION_MILLI
,
timePrecision
);
break
;
break
;
case
'm'
:
}
case
'm'
:{
double
temp
=
((
double
)
val
)
*
MILLISECOND_PER_MINUTE
;
if
(
temp
>=
(
double
)
INT64_MAX
||
temp
<=
(
double
)
INT64_MIN
)
return
-
1
;
(
*
result
)
=
convertTimePrecision
(
val
*
MILLISECOND_PER_MINUTE
,
TSDB_TIME_PRECISION_MILLI
,
timePrecision
);
(
*
result
)
=
convertTimePrecision
(
val
*
MILLISECOND_PER_MINUTE
,
TSDB_TIME_PRECISION_MILLI
,
timePrecision
);
break
;
break
;
case
'h'
:
}
case
'h'
:{
double
temp
=
((
double
)
val
)
*
MILLISECOND_PER_HOUR
;
if
(
temp
>=
(
double
)
INT64_MAX
||
temp
<=
(
double
)
INT64_MIN
)
return
-
1
;
(
*
result
)
=
convertTimePrecision
(
val
*
MILLISECOND_PER_HOUR
,
TSDB_TIME_PRECISION_MILLI
,
timePrecision
);
(
*
result
)
=
convertTimePrecision
(
val
*
MILLISECOND_PER_HOUR
,
TSDB_TIME_PRECISION_MILLI
,
timePrecision
);
break
;
break
;
case
'd'
:
}
case
'd'
:
{
double
temp
=
((
double
)
val
)
*
MILLISECOND_PER_DAY
;
if
(
temp
>=
(
double
)
INT64_MAX
||
temp
<=
(
double
)
INT64_MIN
)
return
-
1
;
(
*
result
)
=
convertTimePrecision
(
val
*
MILLISECOND_PER_DAY
,
TSDB_TIME_PRECISION_MILLI
,
timePrecision
);
(
*
result
)
=
convertTimePrecision
(
val
*
MILLISECOND_PER_DAY
,
TSDB_TIME_PRECISION_MILLI
,
timePrecision
);
break
;
break
;
case
'w'
:
}
case
'w'
:
{
double
temp
=
((
double
)
val
)
*
MILLISECOND_PER_WEEK
;
if
(
temp
>=
(
double
)
INT64_MAX
||
temp
<=
(
double
)
INT64_MIN
)
return
-
1
;
(
*
result
)
=
convertTimePrecision
(
val
*
MILLISECOND_PER_WEEK
,
TSDB_TIME_PRECISION_MILLI
,
timePrecision
);
(
*
result
)
=
convertTimePrecision
(
val
*
MILLISECOND_PER_WEEK
,
TSDB_TIME_PRECISION_MILLI
,
timePrecision
);
break
;
break
;
}
case
'a'
:
case
'a'
:
(
*
result
)
=
convertTimePrecision
(
val
,
TSDB_TIME_PRECISION_MILLI
,
timePrecision
);
(
*
result
)
=
convertTimePrecision
(
val
,
TSDB_TIME_PRECISION_MILLI
,
timePrecision
);
break
;
break
;
...
...
src/query/inc/sql.y
浏览文件 @
3752a81f
...
@@ -127,12 +127,6 @@ cmd ::= SHOW dbPrefix(X) VGROUPS. {
...
@@ -127,12 +127,6 @@ cmd ::= SHOW dbPrefix(X) VGROUPS. {
setShowOptions(pInfo, TSDB_MGMT_TABLE_VGROUP, &token, 0);
setShowOptions(pInfo, TSDB_MGMT_TABLE_VGROUP, &token, 0);
}
}
cmd ::= SHOW dbPrefix(X) VGROUPS ids(Y). {
SStrToken token;
tSetDbName(&token, &X);
setShowOptions(pInfo, TSDB_MGMT_TABLE_VGROUP, &token, &Y);
}
//drop configure for tables
//drop configure for tables
cmd ::= DROP TABLE ifexists(Y) ids(X) cpxName(Z). {
cmd ::= DROP TABLE ifexists(Y) ids(X) cpxName(Z). {
X.n += Z.n;
X.n += Z.n;
...
...
src/query/src/qAggMain.c
浏览文件 @
3752a81f
...
@@ -821,7 +821,7 @@ static int32_t lastDistFuncRequired(SQLFunctionCtx *pCtx, STimeWindow* w, int32_
...
@@ -821,7 +821,7 @@ static int32_t lastDistFuncRequired(SQLFunctionCtx *pCtx, STimeWindow* w, int32_
if
(
pInfo
->
hasResult
!=
DATA_SET_FLAG
)
{
if
(
pInfo
->
hasResult
!=
DATA_SET_FLAG
)
{
return
BLK_DATA_ALL_NEEDED
;
return
BLK_DATA_ALL_NEEDED
;
}
else
{
}
else
{
return
(
pInfo
->
ts
>
w
->
ekey
)
?
BLK_DATA_NO_NEEDED
:
BLK_DATA_ALL_NEEDED
;
return
(
pInfo
->
ts
>
=
w
->
ekey
)
?
BLK_DATA_NO_NEEDED
:
BLK_DATA_ALL_NEEDED
;
}
}
}
}
...
@@ -4459,7 +4459,7 @@ void generateBlockDistResult(STableBlockDist *pTableBlockDist, char* result) {
...
@@ -4459,7 +4459,7 @@ void generateBlockDistResult(STableBlockDist *pTableBlockDist, char* result) {
"5th=[%d], 10th=[%d], 20th=[%d], 30th=[%d], 40th=[%d], 50th=[%d]
\n\t
"
"5th=[%d], 10th=[%d], 20th=[%d], 30th=[%d], 40th=[%d], 50th=[%d]
\n\t
"
"60th=[%d], 70th=[%d], 80th=[%d], 90th=[%d], 95th=[%d], 99th=[%d]
\n\t
"
"60th=[%d], 70th=[%d], 80th=[%d], 90th=[%d], 95th=[%d], 99th=[%d]
\n\t
"
"Min=[%"
PRId64
"(Rows)] Max=[%"
PRId64
"(Rows)] Avg=[%"
PRId64
"(Rows)] Stddev=[%.2f]
\n\t
"
"Min=[%"
PRId64
"(Rows)] Max=[%"
PRId64
"(Rows)] Avg=[%"
PRId64
"(Rows)] Stddev=[%.2f]
\n\t
"
"Rows=[%"
PRIu64
"], Blocks=[%"
PRId64
"], SmallBlocks=[%d], Size=[%.3f(Kb)] Comp=[%.
2f
]
\n\t
"
"Rows=[%"
PRIu64
"], Blocks=[%"
PRId64
"], SmallBlocks=[%d], Size=[%.3f(Kb)] Comp=[%.
5g
]
\n\t
"
"RowsInMem=[%d]
\n\t
"
,
"RowsInMem=[%d]
\n\t
"
,
percentiles
[
0
],
percentiles
[
1
],
percentiles
[
2
],
percentiles
[
3
],
percentiles
[
4
],
percentiles
[
5
],
percentiles
[
0
],
percentiles
[
1
],
percentiles
[
2
],
percentiles
[
3
],
percentiles
[
4
],
percentiles
[
5
],
percentiles
[
6
],
percentiles
[
7
],
percentiles
[
8
],
percentiles
[
9
],
percentiles
[
10
],
percentiles
[
11
],
percentiles
[
6
],
percentiles
[
7
],
percentiles
[
8
],
percentiles
[
9
],
percentiles
[
10
],
percentiles
[
11
],
...
...
src/tsdb/src/tsdbMeta.c
浏览文件 @
3752a81f
...
@@ -342,7 +342,7 @@ int tsdbUpdateTableTagValue(STsdbRepo *repo, SUpdateTableTagValMsg *pMsg) {
...
@@ -342,7 +342,7 @@ int tsdbUpdateTableTagValue(STsdbRepo *repo, SUpdateTableTagValMsg *pMsg) {
tsdbError
(
tsdbError
(
"vgId:%d failed to update tag value of table %s since version out of date, client tag version %d server tag "
"vgId:%d failed to update tag value of table %s since version out of date, client tag version %d server tag "
"version %d"
,
"version %d"
,
REPO_ID
(
pRepo
),
TABLE_CHAR_NAME
(
pTable
),
pMsg
->
tversion
,
schemaVersion
(
pTable
->
tagSchema
));
REPO_ID
(
pRepo
),
TABLE_CHAR_NAME
(
pTable
),
pMsg
->
tversion
,
schemaVersion
(
pTable
->
pSuper
->
tagSchema
));
terrno
=
TSDB_CODE_TDB_TAG_VER_OUT_OF_DATE
;
terrno
=
TSDB_CODE_TDB_TAG_VER_OUT_OF_DATE
;
return
-
1
;
return
-
1
;
}
}
...
...
src/tsdb/src/tsdbRead.c
浏览文件 @
3752a81f
...
@@ -1377,66 +1377,63 @@ static int32_t loadFileDataBlock(STsdbQueryHandle* pQueryHandle, SBlock* pBlock,
...
@@ -1377,66 +1377,63 @@ static int32_t loadFileDataBlock(STsdbQueryHandle* pQueryHandle, SBlock* pBlock,
return
code
;
return
code
;
}
}
static
int
doBinarySearchKey
(
char
*
pValue
,
int
num
,
TSKEY
key
,
int
order
)
{
// search last keyList[ret] < key order asc and keyList[ret] > key order desc
int
firstPos
,
lastPos
,
midPos
=
-
1
;
static
int
doBinarySearchKey
(
TSKEY
*
keyList
,
int
num
,
int
pos
,
TSKEY
key
,
int
order
)
{
int
numOfRows
;
// start end posistion
TSKEY
*
keyList
;
int
s
,
e
;
s
=
pos
;
assert
(
order
==
TSDB_ORDER_ASC
||
order
==
TSDB_ORDER_DESC
);
// check
if
(
num
<=
0
)
return
-
1
;
assert
(
pos
>=
0
&&
pos
<
num
);
assert
(
num
>
0
);
keyList
=
(
TSKEY
*
)
pValue
;
firstPos
=
0
;
if
(
order
==
TSDB_ORDER_ASC
)
{
lastPos
=
num
-
1
;
if
(
order
==
TSDB_ORDER_DESC
)
{
// find the first position which is smaller than the key
// find the first position which is smaller than the key
e
=
num
-
1
;
if
(
key
<
keyList
[
pos
])
return
-
1
;
while
(
1
)
{
while
(
1
)
{
if
(
key
>=
keyList
[
lastPos
])
return
lastPos
;
// check can return
if
(
key
==
keyList
[
firstPos
])
return
firstPos
;
if
(
key
>=
keyList
[
e
])
if
(
key
<
keyList
[
firstPos
])
return
firstPos
-
1
;
return
e
;
if
(
key
<=
keyList
[
s
])
numOfRows
=
lastPos
-
firstPos
+
1
;
return
s
;
midPos
=
(
numOfRows
>>
1
)
+
firstPos
;
if
(
e
-
s
<=
1
)
return
s
;
if
(
key
<
keyList
[
midPos
])
{
lastPos
=
midPos
-
1
;
// change start or end position
}
else
if
(
key
>
keyList
[
midPos
])
{
int
mid
=
s
+
(
e
-
s
+
1
)
/
2
;
firstPos
=
midPos
+
1
;
if
(
keyList
[
mid
]
>
key
)
}
else
{
e
=
mid
;
break
;
else
if
(
keyList
[
mid
]
<
key
)
}
s
=
mid
;
}
else
return
mid
;
}
else
{
}
}
else
{
// DESC
// find the first position which is bigger than the key
// find the first position which is bigger than the key
while
(
1
)
{
e
=
0
;
if
(
key
<=
keyList
[
firstPos
])
return
firstPos
;
if
(
key
>
keyList
[
pos
])
if
(
key
==
keyList
[
lastPos
])
return
lastPos
;
return
-
1
;
while
(
1
)
{
if
(
key
>
keyList
[
lastPos
])
{
// check can return
lastPos
=
lastPos
+
1
;
if
(
key
<=
keyList
[
e
])
if
(
lastPos
>=
num
)
return
e
;
return
-
1
;
if
(
key
>=
keyList
[
s
])
return
s
;
if
(
s
-
e
<=
1
)
return
s
;
// change start or end position
int
mid
=
s
-
(
s
-
e
+
1
)
/
2
;
if
(
keyList
[
mid
]
<
key
)
e
=
mid
;
else
if
(
keyList
[
mid
]
>
key
)
s
=
mid
;
else
else
return
lastPos
;
return
mid
;
}
}
numOfRows
=
lastPos
-
firstPos
+
1
;
midPos
=
(
numOfRows
>>
1
)
+
firstPos
;
if
(
key
<
keyList
[
midPos
])
{
lastPos
=
midPos
-
1
;
}
else
if
(
key
>
keyList
[
midPos
])
{
firstPos
=
midPos
+
1
;
}
else
{
break
;
}
}
}
}
return
midPos
;
}
}
static
int32_t
doCopyRowsFromFileBlock
(
STsdbQueryHandle
*
pQueryHandle
,
int32_t
capacity
,
int32_t
numOfRows
,
int32_t
start
,
int32_t
end
)
{
static
int32_t
doCopyRowsFromFileBlock
(
STsdbQueryHandle
*
pQueryHandle
,
int32_t
capacity
,
int32_t
numOfRows
,
int32_t
start
,
int32_t
end
)
{
...
@@ -1844,7 +1841,6 @@ static void copyAllRemainRowsFromFileBlock(STsdbQueryHandle* pQueryHandle, STabl
...
@@ -1844,7 +1841,6 @@ static void copyAllRemainRowsFromFileBlock(STsdbQueryHandle* pQueryHandle, STabl
int32_t
getEndPosInDataBlock
(
STsdbQueryHandle
*
pQueryHandle
,
SDataBlockInfo
*
pBlockInfo
)
{
int32_t
getEndPosInDataBlock
(
STsdbQueryHandle
*
pQueryHandle
,
SDataBlockInfo
*
pBlockInfo
)
{
// NOTE: reverse the order to find the end position in data block
// NOTE: reverse the order to find the end position in data block
int32_t
endPos
=
-
1
;
int32_t
endPos
=
-
1
;
int32_t
order
=
ASCENDING_TRAVERSE
(
pQueryHandle
->
order
)
?
TSDB_ORDER_DESC
:
TSDB_ORDER_ASC
;
SQueryFilePos
*
cur
=
&
pQueryHandle
->
cur
;
SQueryFilePos
*
cur
=
&
pQueryHandle
->
cur
;
SDataCols
*
pCols
=
pQueryHandle
->
rhelper
.
pDCols
[
0
];
SDataCols
*
pCols
=
pQueryHandle
->
rhelper
.
pDCols
[
0
];
...
@@ -1857,7 +1853,9 @@ int32_t getEndPosInDataBlock(STsdbQueryHandle* pQueryHandle, SDataBlockInfo* pBl
...
@@ -1857,7 +1853,9 @@ int32_t getEndPosInDataBlock(STsdbQueryHandle* pQueryHandle, SDataBlockInfo* pBl
cur
->
mixBlock
=
(
cur
->
pos
!=
pBlockInfo
->
rows
-
1
);
cur
->
mixBlock
=
(
cur
->
pos
!=
pBlockInfo
->
rows
-
1
);
}
else
{
}
else
{
assert
(
pCols
->
numOfRows
>
0
);
assert
(
pCols
->
numOfRows
>
0
);
endPos
=
doBinarySearchKey
(
pCols
->
cols
[
0
].
pData
,
pCols
->
numOfRows
,
pQueryHandle
->
window
.
ekey
,
order
);
int
pos
=
ASCENDING_TRAVERSE
(
pQueryHandle
->
order
)
?
0
:
pBlockInfo
->
rows
-
1
;
endPos
=
doBinarySearchKey
(
pCols
->
cols
[
0
].
pData
,
pCols
->
numOfRows
,
pos
,
pQueryHandle
->
window
.
ekey
,
pQueryHandle
->
order
);
assert
(
endPos
!=
-
1
);
cur
->
mixBlock
=
true
;
cur
->
mixBlock
=
true
;
}
}
...
@@ -1877,17 +1875,16 @@ static void doMergeTwoLevelData(STsdbQueryHandle* pQueryHandle, STableCheckInfo*
...
@@ -1877,17 +1875,16 @@ static void doMergeTwoLevelData(STsdbQueryHandle* pQueryHandle, STableCheckInfo*
assert
(
pCols
->
cols
[
0
].
type
==
TSDB_DATA_TYPE_TIMESTAMP
&&
pCols
->
cols
[
0
].
colId
==
PRIMARYKEY_TIMESTAMP_COL_INDEX
&&
assert
(
pCols
->
cols
[
0
].
type
==
TSDB_DATA_TYPE_TIMESTAMP
&&
pCols
->
cols
[
0
].
colId
==
PRIMARYKEY_TIMESTAMP_COL_INDEX
&&
cur
->
pos
>=
0
&&
cur
->
pos
<
pBlock
->
numOfRows
);
cur
->
pos
>=
0
&&
cur
->
pos
<
pBlock
->
numOfRows
);
TSKEY
*
tsArray
=
pCols
->
cols
[
0
].
pData
;
// key read from file
assert
(
pCols
->
numOfRows
==
pBlock
->
numOfRows
&&
tsArray
[
0
]
==
pBlock
->
keyFirst
&&
tsArray
[
pBlock
->
numOfRows
-
1
]
==
pBlock
->
keyLast
);
TSKEY
*
keyFile
=
pCols
->
cols
[
0
].
pData
;
assert
(
pCols
->
numOfRows
==
pBlock
->
numOfRows
&&
keyFile
[
0
]
==
pBlock
->
keyFirst
&&
keyFile
[
pBlock
->
numOfRows
-
1
]
==
pBlock
->
keyLast
);
// for search the endPos, so the order needs to reverse
int32_t
order
=
(
pQueryHandle
->
order
==
TSDB_ORDER_ASC
)
?
TSDB_ORDER_DESC
:
TSDB_ORDER_ASC
;
int32_t
step
=
ASCENDING_TRAVERSE
(
pQueryHandle
->
order
)
?
1
:-
1
;
int32_t
step
=
ASCENDING_TRAVERSE
(
pQueryHandle
->
order
)
?
1
:-
1
;
int32_t
numOfCols
=
(
int32_t
)(
QH_GET_NUM_OF_COLS
(
pQueryHandle
));
int32_t
numOfCols
=
(
int32_t
)(
QH_GET_NUM_OF_COLS
(
pQueryHandle
));
STable
*
pTable
=
pCheckInfo
->
pTableObj
;
STable
*
pTable
=
pCheckInfo
->
pTableObj
;
int32_t
endPos
=
getEndPosInDataBlock
(
pQueryHandle
,
&
blockInfo
);
int32_t
endPos
=
getEndPosInDataBlock
(
pQueryHandle
,
&
blockInfo
);
tsdbDebug
(
"%p uid:%"
PRIu64
",tid:%d start merge data block, file block range:%"
PRIu64
"-%"
PRIu64
" rows:%d, start:%d,"
tsdbDebug
(
"%p uid:%"
PRIu64
",tid:%d start merge data block, file block range:%"
PRIu64
"-%"
PRIu64
" rows:%d, start:%d,"
"end:%d, 0x%"
PRIx64
,
"end:%d, 0x%"
PRIx64
,
...
@@ -1902,6 +1899,7 @@ static void doMergeTwoLevelData(STsdbQueryHandle* pQueryHandle, STableCheckInfo*
...
@@ -1902,6 +1899,7 @@ static void doMergeTwoLevelData(STsdbQueryHandle* pQueryHandle, STableCheckInfo*
STSchema
*
pSchema1
=
NULL
;
STSchema
*
pSchema1
=
NULL
;
STSchema
*
pSchema2
=
NULL
;
STSchema
*
pSchema2
=
NULL
;
// position in file ->fpos
int32_t
pos
=
cur
->
pos
;
int32_t
pos
=
cur
->
pos
;
cur
->
win
=
TSWINDOW_INITIALIZER
;
cur
->
win
=
TSWINDOW_INITIALIZER
;
...
@@ -1924,9 +1922,13 @@ static void doMergeTwoLevelData(STsdbQueryHandle* pQueryHandle, STableCheckInfo*
...
@@ -1924,9 +1922,13 @@ static void doMergeTwoLevelData(STsdbQueryHandle* pQueryHandle, STableCheckInfo*
break
;
break
;
}
}
if
(((
pos
>
endPos
||
tsArray
[
pos
]
>
pQueryHandle
->
window
.
ekey
)
&&
ASCENDING_TRAVERSE
(
pQueryHandle
->
order
))
||
// break if pos not in this block endPos range. note old code when pos is -1 can crash.
((
pos
<
endPos
||
tsArray
[
pos
]
<
pQueryHandle
->
window
.
ekey
)
&&
!
ASCENDING_TRAVERSE
(
pQueryHandle
->
order
)))
{
if
(
ASCENDING_TRAVERSE
(
pQueryHandle
->
order
))
{
//ASC
break
;
if
(
pos
>
endPos
||
keyFile
[
pos
]
>
pQueryHandle
->
window
.
ekey
)
break
;
}
else
{
//DESC
if
(
pos
<
endPos
||
keyFile
[
pos
]
<
pQueryHandle
->
window
.
ekey
)
break
;
}
}
if
((
key
<
tsArray
[
pos
]
&&
ASCENDING_TRAVERSE
(
pQueryHandle
->
order
))
||
if
((
key
<
tsArray
[
pos
]
&&
ASCENDING_TRAVERSE
(
pQueryHandle
->
order
))
||
...
@@ -1942,16 +1944,18 @@ static void doMergeTwoLevelData(STsdbQueryHandle* pQueryHandle, STableCheckInfo*
...
@@ -1942,16 +1944,18 @@ static void doMergeTwoLevelData(STsdbQueryHandle* pQueryHandle, STableCheckInfo*
mergeTwoRowFromMem
(
pQueryHandle
,
pQueryHandle
->
outputCapacity
,
numOfRows
,
row1
,
row2
,
numOfCols
,
pTable
,
pSchema1
,
pSchema2
,
true
);
mergeTwoRowFromMem
(
pQueryHandle
,
pQueryHandle
->
outputCapacity
,
numOfRows
,
row1
,
row2
,
numOfCols
,
pTable
,
pSchema1
,
pSchema2
,
true
);
numOfRows
+=
1
;
numOfRows
+=
1
;
// record start key with memory key if not
if
(
cur
->
win
.
skey
==
TSKEY_INITIAL_VAL
)
{
if
(
cur
->
win
.
skey
==
TSKEY_INITIAL_VAL
)
{
cur
->
win
.
skey
=
key
;
cur
->
win
.
skey
=
key
Mem
;
}
}
cur
->
win
.
ekey
=
key
;
cur
->
win
.
ekey
=
key
Mem
;
cur
->
lastKey
=
key
+
step
;
cur
->
lastKey
=
key
Mem
+
step
;
cur
->
mixBlock
=
true
;
cur
->
mixBlock
=
true
;
moveToNextRowInMem
(
pCheckInfo
);
moveToNextRowInMem
(
pCheckInfo
);
}
else
if
(
key
==
tsArray
[
pos
])
{
// data in buffer has the same timestamp of data in file block, ignore it
// same select mem key if update is true
}
else
if
(
keyMem
==
keyFile
[
pos
])
{
if
(
pCfg
->
update
)
{
if
(
pCfg
->
update
)
{
if
(
pCfg
->
update
==
TD_ROW_PARTIAL_UPDATE
)
{
if
(
pCfg
->
update
==
TD_ROW_PARTIAL_UPDATE
)
{
doCopyRowsFromFileBlock
(
pQueryHandle
,
pQueryHandle
->
outputCapacity
,
numOfRows
,
pos
,
pos
);
doCopyRowsFromFileBlock
(
pQueryHandle
,
pQueryHandle
->
outputCapacity
,
numOfRows
,
pos
,
pos
);
...
@@ -1969,31 +1973,36 @@ static void doMergeTwoLevelData(STsdbQueryHandle* pQueryHandle, STableCheckInfo*
...
@@ -1969,31 +1973,36 @@ static void doMergeTwoLevelData(STsdbQueryHandle* pQueryHandle, STableCheckInfo*
mergeTwoRowFromMem
(
pQueryHandle
,
pQueryHandle
->
outputCapacity
,
numOfRows
,
row1
,
row2
,
numOfCols
,
pTable
,
pSchema1
,
pSchema2
,
forceSetNull
);
mergeTwoRowFromMem
(
pQueryHandle
,
pQueryHandle
->
outputCapacity
,
numOfRows
,
row1
,
row2
,
numOfCols
,
pTable
,
pSchema1
,
pSchema2
,
forceSetNull
);
numOfRows
+=
1
;
numOfRows
+=
1
;
if
(
cur
->
win
.
skey
==
TSKEY_INITIAL_VAL
)
{
if
(
cur
->
win
.
skey
==
TSKEY_INITIAL_VAL
)
{
cur
->
win
.
skey
=
key
;
cur
->
win
.
skey
=
key
Mem
;
}
}
cur
->
win
.
ekey
=
key
;
cur
->
win
.
ekey
=
key
Mem
;
cur
->
lastKey
=
key
+
step
;
cur
->
lastKey
=
keyMem
+
step
;
cur
->
mixBlock
=
true
;
cur
->
mixBlock
=
true
;
//mem move next
moveToNextRowInMem
(
pCheckInfo
);
moveToNextRowInMem
(
pCheckInfo
);
//file move next, discard file row
pos
+=
step
;
pos
+=
step
;
}
else
{
}
else
{
// not update, only mem move to next, discard mem row
moveToNextRowInMem
(
pCheckInfo
);
moveToNextRowInMem
(
pCheckInfo
);
}
}
}
else
if
((
key
>
tsArray
[
pos
]
&&
ASCENDING_TRAVERSE
(
pQueryHandle
->
order
))
||
// put file row
(
key
<
tsArray
[
pos
]
&&
!
ASCENDING_TRAVERSE
(
pQueryHandle
->
order
)))
{
}
else
if
((
keyMem
>
keyFile
[
pos
]
&&
ASCENDING_TRAVERSE
(
pQueryHandle
->
order
))
||
(
keyMem
<
keyFile
[
pos
]
&&
!
ASCENDING_TRAVERSE
(
pQueryHandle
->
order
)))
{
if
(
cur
->
win
.
skey
==
TSKEY_INITIAL_VAL
)
{
if
(
cur
->
win
.
skey
==
TSKEY_INITIAL_VAL
)
{
cur
->
win
.
skey
=
tsArray
[
pos
];
cur
->
win
.
skey
=
keyFile
[
pos
];
}
}
int32_t
end
=
doBinarySearchKey
(
pCols
->
cols
[
0
].
pData
,
pCols
->
numOfRows
,
key
,
order
);
int32_t
end
=
doBinarySearchKey
(
pCols
->
cols
[
0
].
pData
,
pCols
->
numOfRows
,
pos
,
keyMem
,
pQueryHandle
->
order
);
assert
(
end
!=
-
1
);
assert
(
end
!=
-
1
);
if
(
tsArray
[
end
]
==
key
)
{
// the value of key in cache equals to the end timestamp value, ignore it
if
(
tsArray
[
end
]
==
key
)
{
// the value of key in cache equals to the end timestamp value, ignore it
if
(
pCfg
->
update
==
TD_ROW_DISCARD_UPDATE
)
{
if
(
pCfg
->
update
==
TD_ROW_DISCARD_UPDATE
)
{
moveToNextRowInMem
(
pCheckInfo
);
moveToNextRowInMem
(
pCheckInfo
);
}
else
{
}
else
{
// can update, don't copy then deal on next loop with keyMem == keyFile[pos]
end
-=
step
;
end
-=
step
;
}
}
}
}
...
@@ -2001,10 +2010,17 @@ static void doMergeTwoLevelData(STsdbQueryHandle* pQueryHandle, STableCheckInfo*
...
@@ -2001,10 +2010,17 @@ static void doMergeTwoLevelData(STsdbQueryHandle* pQueryHandle, STableCheckInfo*
int32_t
qstart
=
0
,
qend
=
0
;
int32_t
qstart
=
0
,
qend
=
0
;
getQualifiedRowsPos
(
pQueryHandle
,
pos
,
end
,
numOfRows
,
&
qstart
,
&
qend
);
getQualifiedRowsPos
(
pQueryHandle
,
pos
,
end
,
numOfRows
,
&
qstart
,
&
qend
);
numOfRows
=
doCopyRowsFromFileBlock
(
pQueryHandle
,
pQueryHandle
->
outputCapacity
,
numOfRows
,
qstart
,
qend
);
if
(
qend
>=
qstart
)
{
pos
+=
(
qend
-
qstart
+
1
)
*
step
;
// copy qend - qstart + 1 rows from file
numOfRows
=
doCopyRowsFromFileBlock
(
pQueryHandle
,
pQueryHandle
->
outputCapacity
,
numOfRows
,
qstart
,
qend
);
cur
->
win
.
ekey
=
ASCENDING_TRAVERSE
(
pQueryHandle
->
order
)
?
tsArray
[
qend
]
:
tsArray
[
qstart
];
int32_t
num
=
qend
-
qstart
+
1
;
pos
+=
num
*
step
;
}
else
{
// nothing copy from file
pos
+=
step
;
}
cur
->
win
.
ekey
=
ASCENDING_TRAVERSE
(
pQueryHandle
->
order
)
?
keyFile
[
qend
]
:
keyFile
[
qstart
];
cur
->
lastKey
=
cur
->
win
.
ekey
+
step
;
cur
->
lastKey
=
cur
->
win
.
ekey
+
step
;
}
}
}
while
(
numOfRows
<
pQueryHandle
->
outputCapacity
);
}
while
(
numOfRows
<
pQueryHandle
->
outputCapacity
);
...
@@ -2021,7 +2037,7 @@ static void doMergeTwoLevelData(STsdbQueryHandle* pQueryHandle, STableCheckInfo*
...
@@ -2021,7 +2037,7 @@ static void doMergeTwoLevelData(STsdbQueryHandle* pQueryHandle, STableCheckInfo*
!
ASCENDING_TRAVERSE
(
pQueryHandle
->
order
)))
{
!
ASCENDING_TRAVERSE
(
pQueryHandle
->
order
)))
{
// no data in cache or data in cache is greater than the ekey of time window, load data from file block
// no data in cache or data in cache is greater than the ekey of time window, load data from file block
if
(
cur
->
win
.
skey
==
TSKEY_INITIAL_VAL
)
{
if
(
cur
->
win
.
skey
==
TSKEY_INITIAL_VAL
)
{
cur
->
win
.
skey
=
tsArray
[
pos
];
cur
->
win
.
skey
=
keyFile
[
pos
];
}
}
int32_t
start
=
-
1
,
end
=
-
1
;
int32_t
start
=
-
1
,
end
=
-
1
;
...
@@ -2030,7 +2046,7 @@ static void doMergeTwoLevelData(STsdbQueryHandle* pQueryHandle, STableCheckInfo*
...
@@ -2030,7 +2046,7 @@ static void doMergeTwoLevelData(STsdbQueryHandle* pQueryHandle, STableCheckInfo*
numOfRows
=
doCopyRowsFromFileBlock
(
pQueryHandle
,
pQueryHandle
->
outputCapacity
,
numOfRows
,
start
,
end
);
numOfRows
=
doCopyRowsFromFileBlock
(
pQueryHandle
,
pQueryHandle
->
outputCapacity
,
numOfRows
,
start
,
end
);
pos
+=
(
end
-
start
+
1
)
*
step
;
pos
+=
(
end
-
start
+
1
)
*
step
;
cur
->
win
.
ekey
=
ASCENDING_TRAVERSE
(
pQueryHandle
->
order
)
?
tsArray
[
end
]
:
tsArray
[
start
];
cur
->
win
.
ekey
=
ASCENDING_TRAVERSE
(
pQueryHandle
->
order
)
?
keyFile
[
end
]
:
keyFile
[
start
];
cur
->
lastKey
=
cur
->
win
.
ekey
+
step
;
cur
->
lastKey
=
cur
->
win
.
ekey
+
step
;
cur
->
mixBlock
=
true
;
cur
->
mixBlock
=
true
;
}
}
...
@@ -2946,6 +2962,9 @@ static bool loadDataBlockFromTableSeq(STsdbQueryHandle* pQueryHandle) {
...
@@ -2946,6 +2962,9 @@ static bool loadDataBlockFromTableSeq(STsdbQueryHandle* pQueryHandle) {
// handle data in cache situation
// handle data in cache situation
bool
tsdbNextDataBlock
(
TsdbQueryHandleT
pHandle
)
{
bool
tsdbNextDataBlock
(
TsdbQueryHandleT
pHandle
)
{
STsdbQueryHandle
*
pQueryHandle
=
(
STsdbQueryHandle
*
)
pHandle
;
STsdbQueryHandle
*
pQueryHandle
=
(
STsdbQueryHandle
*
)
pHandle
;
if
(
pQueryHandle
==
NULL
)
{
return
false
;
}
if
(
emptyQueryTimewindow
(
pQueryHandle
))
{
if
(
emptyQueryTimewindow
(
pQueryHandle
))
{
tsdbDebug
(
"%p query window not overlaps with the data set, no result returned, 0x%"
PRIx64
,
pQueryHandle
,
pQueryHandle
->
qId
);
tsdbDebug
(
"%p query window not overlaps with the data set, no result returned, 0x%"
PRIx64
,
pQueryHandle
,
pQueryHandle
->
qId
);
...
@@ -3065,6 +3084,9 @@ static int32_t doGetExternalRow(STsdbQueryHandle* pQueryHandle, int16_t type, SM
...
@@ -3065,6 +3084,9 @@ static int32_t doGetExternalRow(STsdbQueryHandle* pQueryHandle, int16_t type, SM
pSecQueryHandle
=
tsdbQueryTablesImpl
(
pQueryHandle
->
pTsdb
,
&
cond
,
pQueryHandle
->
qId
,
pMemRef
);
pSecQueryHandle
=
tsdbQueryTablesImpl
(
pQueryHandle
->
pTsdb
,
&
cond
,
pQueryHandle
->
qId
,
pMemRef
);
tfree
(
cond
.
colList
);
tfree
(
cond
.
colList
);
if
(
pSecQueryHandle
==
NULL
)
{
goto
out_of_memory
;
}
// current table, only one table
// current table, only one table
STableCheckInfo
*
pCurrent
=
taosArrayGet
(
pQueryHandle
->
pTableCheckInfo
,
pQueryHandle
->
activeIndex
);
STableCheckInfo
*
pCurrent
=
taosArrayGet
(
pQueryHandle
->
pTableCheckInfo
,
pQueryHandle
->
activeIndex
);
...
...
tests/pytest/fulltest.sh
浏览文件 @
3752a81f
...
@@ -279,6 +279,7 @@ python3 ./test.py -f query/queryCnameDisplay.py
...
@@ -279,6 +279,7 @@ python3 ./test.py -f query/queryCnameDisplay.py
# python3 ./test.py -f query/long_where_query.py
# python3 ./test.py -f query/long_where_query.py
python3 test.py
-f
query/nestedQuery/queryWithSpread.py
python3 test.py
-f
query/nestedQuery/queryWithSpread.py
python3 ./test.py
-f
query/bug6586.py
python3 ./test.py
-f
query/bug6586.py
python3 ./test.py
-f
query/bug5903.py
#stream
#stream
python3 ./test.py
-f
stream/metric_1.py
python3 ./test.py
-f
stream/metric_1.py
...
...
tests/pytest/functions/function_derivative.py
浏览文件 @
3752a81f
...
@@ -68,9 +68,9 @@ class TDTestCase:
...
@@ -68,9 +68,9 @@ class TDTestCase:
tdSql
.
checkData
(
0
,
0
,
"2018-09-17 09:00:10.000"
)
tdSql
.
checkData
(
0
,
0
,
"2018-09-17 09:00:10.000"
)
tdSql
.
checkData
(
0
,
1
,
"2018-09-17 09:00:10.000"
)
tdSql
.
checkData
(
0
,
1
,
"2018-09-17 09:00:10.000"
)
tdSql
.
checkData
(
0
,
3
,
"2018-09-17 09:00:10.000"
)
tdSql
.
checkData
(
0
,
3
,
"2018-09-17 09:00:10.000"
)
tdSql
.
checkData
(
1
,
0
,
"2018-09-17 09:00:20.00
9
"
)
tdSql
.
checkData
(
1
,
0
,
"2018-09-17 09:00:20.00
0
"
)
tdSql
.
checkData
(
1
,
1
,
"2018-09-17 09:00:20.00
9
"
)
tdSql
.
checkData
(
1
,
1
,
"2018-09-17 09:00:20.00
0
"
)
tdSql
.
checkData
(
1
,
3
,
"2018-09-17 09:00:20.00
9
"
)
tdSql
.
checkData
(
1
,
3
,
"2018-09-17 09:00:20.00
0
"
)
tdSql
.
query
(
"select ts from(select ts,derivative(col, 10s, 0) from stb group by tbname)"
)
tdSql
.
query
(
"select ts from(select ts,derivative(col, 10s, 0) from stb group by tbname)"
)
...
@@ -150,6 +150,7 @@ class TDTestCase:
...
@@ -150,6 +150,7 @@ class TDTestCase:
tdSql
.
error
(
"select derivative(col, -106752999999999922222d, 0) from stb group by tbname"
);
#overflow error
tdSql
.
error
(
"select derivative(col, -106752999999999922222d, 0) from stb group by tbname"
);
#overflow error
tdSql
.
error
(
"select derivative(col, 10y, 0) from stb group by tbname"
)
#TD-10399, DB error: syntax error near '10y, 0) from stb group by tbname;'
tdSql
.
error
(
"select derivative(col, 10y, 0) from stb group by tbname"
)
#TD-10399, DB error: syntax error near '10y, 0) from stb group by tbname;'
tdSql
.
error
(
"select derivative(col, -106752d, 0) from stb group by tbname"
)
#TD-10398 overflow tips
tdSql
.
error
(
"select derivative(col, -106752d, 0) from stb group by tbname"
)
#TD-10398 overflow tips
tdSql
.
error
(
"select derivative(col, 106751991168d, 0) from stb group by tbname"
)
#TD-10398 overflow tips
def
run
(
self
):
def
run
(
self
):
tdSql
.
prepare
()
tdSql
.
prepare
()
...
...
tests/pytest/query/bug5903.py
0 → 100644
浏览文件 @
3752a81f
###################################################################
# Copyright (c) 2016 by TAOS Technologies, Inc.
# All rights reserved.
#
# This file is proprietary and confidential to TAOS Technologies.
# No part of this file may be reproduced, stored, transmitted,
# disclosed or used in any form or by any means other than as
# expressly provided by the written permission from Jianhui Tao
#
###################################################################
# -*- coding: utf-8 -*-
from
util.log
import
*
from
util.cases
import
*
from
util.sql
import
*
class
TDTestCase
:
def
init
(
self
,
conn
,
logSql
):
tdLog
.
debug
(
"start to execute %s"
%
__file__
)
tdSql
.
init
(
conn
.
cursor
(),
logSql
)
def
run
(
self
):
# TD-5903 show db.vgroups xxx. xxx is invalid content, but still returns results.
tdSql
.
execute
(
"create database if not exists test_5903"
)
tdSql
.
execute
(
"show test_5903.vgroups"
)
tdSql
.
error
(
"show test_5903.vgroups xxx"
)
def
stop
(
self
):
tdSql
.
close
()
tdLog
.
success
(
"%s successfully executed"
%
__file__
)
tdCases
.
addWindows
(
__file__
,
TDTestCase
())
tdCases
.
addLinux
(
__file__
,
TDTestCase
())
tests/pytest/query/udf.py
浏览文件 @
3752a81f
...
@@ -73,21 +73,18 @@ class TDTestCase:
...
@@ -73,21 +73,18 @@ class TDTestCase:
tdSql
.
error
(
sql
)
tdSql
.
error
(
sql
)
sql
=
'select abs_max(c2) from db.stb'
sql
=
'select abs_max(c2) from db.stb'
tdSql
.
query
(
sql
)
tdSql
.
query
(
sql
)
tdSql
.
checkData
(
0
,
0
,
1
410065607
)
tdSql
.
checkData
(
0
,
0
,
1
0000000199
)
def
test_udf_values
(
self
):
def
test_udf_values
(
self
):
tdSql
.
execute
(
"drop function abs_max"
)
tdSql
.
execute
(
"drop function abs_max"
)
tdSql
.
execute
(
"create function add_one as '/tmp/add_one.so' outputtype int"
)
tdSql
.
execute
(
"create function add_one as '/tmp/add_one.so' outputtype int"
)
tdSql
.
execute
(
"create aggregate function abs_max as '/tmp/abs_max.so' outputtype bigint;"
)
tdSql
.
execute
(
"create aggregate function abs_max as '/tmp/abs_max.so' outputtype bigint;"
)
tdSql
.
execute
(
"create aggregate function sum_double as '/tmp/sum_double.so' outputtype
int bufsize 128
;"
)
tdSql
.
execute
(
"create aggregate function sum_double as '/tmp/sum_double.so' outputtype
bigint
;"
)
# UDF bug no 1 -> follow 3 cases about this bug ;
# tdSql.error("create aggregate function max as '/tmp/abs_max.so' outputtype bigint ;")
# tdSql.error("create aggregate function max as '/tmp/abs_max.so' outputtype bigint ;")
# tdSql.error("create aggregate function avg as '/tmp/abs_max.so' outputtype bigint ;")
# tdSql.error("create aggregate function avg as '/tmp/abs_max.so' outputtype bigint ;")
# tdSql.error("create aggregate function dbs as '/tmp/abs_max.so' outputtype bigint ;")
# tdSql.error("create aggregate function dbs as '/tmp/abs_max.so' outputtype bigint ;")
tdSql
.
execute
(
"drop database if exists test"
)
tdSql
.
execute
(
"drop database if exists test"
)
tdSql
.
execute
(
"create database test"
)
tdSql
.
execute
(
"create database test"
)
tdSql
.
execute
(
"use test"
)
tdSql
.
execute
(
"use test"
)
...
@@ -117,7 +114,7 @@ class TDTestCase:
...
@@ -117,7 +114,7 @@ class TDTestCase:
tdSql
.
execute
(
"insert into bound values(%d, %d , %f, %d , %s)"
%
(
epoch_time
+
1000
,
intdata2
+
1
,
float
(
intdata2
+
1
),
bigintdata2
+
1
,
"'binary"
+
str
(
intdata2
+
1
)
+
"'"
))
tdSql
.
execute
(
"insert into bound values(%d, %d , %f, %d , %s)"
%
(
epoch_time
+
1000
,
intdata2
+
1
,
float
(
intdata2
+
1
),
bigintdata2
+
1
,
"'binary"
+
str
(
intdata2
+
1
)
+
"'"
))
# check super table calculation results
# check super table calculation results
tdSql
.
query
(
"select add_one(id) from st"
)
tdSql
.
query
(
"select add_one(id)
test
from st"
)
tdSql
.
checkData
(
0
,
0
,
1
)
tdSql
.
checkData
(
0
,
0
,
1
)
tdSql
.
checkData
(
1
,
0
,
2
)
tdSql
.
checkData
(
1
,
0
,
2
)
tdSql
.
checkData
(
4
,
0
,
5
)
tdSql
.
checkData
(
4
,
0
,
5
)
...
@@ -157,29 +154,266 @@ class TDTestCase:
...
@@ -157,29 +154,266 @@ class TDTestCase:
tdLog
.
info
(
" ====== unexpected error occured about UDF function ====="
)
tdLog
.
info
(
" ====== unexpected error occured about UDF function ====="
)
sys
.
exit
()
sys
.
exit
()
# UDF bug no 2 -> values of abs_max not inconsistent from common table and stable.
tdSql
.
query
(
"select abs_max(val) from st"
)
# tdSql.query("select abs_max(val) from st") # result is 0 rows
tdSql
.
query
(
"select abs_max(val) from tb1"
)
# tdSql.query("select abs_max(val) from tb1")
tdSql
.
checkRows
(
0
)
# tdSql.checkData(0,0,0) # this is error result
tdSql
.
query
(
"select sum_double(val) from st"
)
# tdSql.query("select sum_double(val) from st") # result is 0 rows
tdSql
.
query
(
"select sum_double(val) from tb1"
)
# tdSql.query("select sum_double(val) from tb1")
tdSql
.
checkRows
(
0
)
# tdSql.checkData(0,0,0) # this is error result
# UDF bug no 3 -> values of abs_max will error for boundary number
# check super table calculation results
# check super table calculation results
#
tdSql.query("select abs_max(number) from st")
tdSql
.
query
(
"select abs_max(number) from st"
)
#
tdSql.checkData(0,0,9223372036854775807)
tdSql
.
checkData
(
0
,
0
,
9223372036854775807
)
# check common table calculation results
# check common table calculation results
tdSql
.
query
(
"select abs_max(number) from tb1"
)
tdSql
.
query
(
"select abs_max(number) from tb1"
)
tdSql
.
checkData
(
0
,
0
,
400
)
tdSql
.
checkData
(
0
,
0
,
400
)
tdSql
.
query
(
"select abs_max(number) from tb2"
)
tdSql
.
query
(
"select abs_max(number) from tb2"
)
tdSql
.
checkData
(
0
,
0
,
400
)
tdSql
.
checkData
(
0
,
0
,
400
)
tdSql
.
execute
(
"select add_one(id) from st limit 10 offset 2"
)
tdSql
.
query
(
"select add_one(id) from st where ts > 1604298064000 and ts < 1604298064020 "
)
tdSql
.
checkData
(
0
,
0
,
1
)
tdSql
.
checkData
(
1
,
0
,
-
2147483644
)
tdSql
.
query
(
"select add_one(id) from tb1 where ts > 1604298064000 and ts < 1604298064020 "
)
tdSql
.
checkData
(
0
,
0
,
1
)
tdSql
.
query
(
"select sum_double(id) from st where ts > 1604298064030 and ts < 1604298064060 "
)
tdSql
.
checkData
(
0
,
0
,
14
)
tdSql
.
query
(
"select sum_double(id) from tb2 where ts > 1604298064030 and ts < 1604298064060 "
)
tdSql
.
checkRows
(
0
)
tdSql
.
query
(
"select add_one(id) from st where ts = 1604298064000 "
)
tdSql
.
checkData
(
0
,
0
,
-
2147483645
)
tdSql
.
query
(
"select add_one(id) from st where ts > 1604298064000 and id in (2,3) and ind =1;"
)
tdSql
.
checkData
(
0
,
0
,
3
)
tdSql
.
checkData
(
1
,
0
,
4
)
tdSql
.
query
(
"select id , add_one(id) from tb1 where ts > 1604298064000 and id in (2,3)"
)
tdSql
.
checkData
(
0
,
0
,
2
)
tdSql
.
checkData
(
0
,
1
,
3
)
tdSql
.
checkData
(
1
,
0
,
3
)
tdSql
.
checkData
(
1
,
1
,
4
)
tdSql
.
query
(
"select sum_double(id) from tb1 where ts > 1604298064000 and id in (2,3)"
)
tdSql
.
checkData
(
0
,
0
,
10
)
tdSql
.
query
(
"select sum_double(id) from st where ts > 1604298064000 and id in (2,3) and ind =1"
)
tdSql
.
checkData
(
0
,
0
,
10
)
tdSql
.
query
(
"select abs_max(number) from st where ts > 1604298064000 and id in (2,3) and ind =1"
)
tdSql
.
checkData
(
0
,
0
,
300
)
tdSql
.
query
(
"select sum_double(id) from st where ts = 1604298064030 "
)
tdSql
.
checkData
(
0
,
0
,
4
)
tdSql
.
query
(
"select abs_max(number) from st where ts = 1604298064100 "
)
tdSql
.
checkData
(
0
,
0
,
9223372036854775806
)
tdSql
.
query
(
"select abs_max(number) from tb2 where ts = 1604298064100 "
)
tdSql
.
checkData
(
0
,
0
,
400
)
tdSql
.
query
(
"select sum_double(id) from tb2 where ts = 1604298064100 "
)
tdSql
.
checkData
(
0
,
0
,
8
)
tdSql
.
query
(
"select add_one(id) from st where ts >= 1604298064000 and ts <= 1604298064010"
)
tdSql
.
checkData
(
0
,
0
,
1
)
tdSql
.
checkData
(
1
,
0
,
-
2147483645
)
tdSql
.
checkData
(
2
,
0
,
-
2147483644
)
tdSql
.
query
(
"select add_one(id) from tb1 where ts >= 1604298064000 and ts <= 1604298064010"
)
tdSql
.
checkData
(
0
,
0
,
1
)
tdSql
.
query
(
"select sum_double(id) from st where ts >= 1604298064030 and ts <= 1604298064050"
)
tdSql
.
checkData
(
0
,
0
,
18
)
tdSql
.
query
(
"select sum_double(id) from tb2 where ts >= 1604298064030 and ts <= 1604298064100"
)
tdSql
.
checkData
(
0
,
0
,
20
)
tdSql
.
query
(
"select abs_max(number) from tb2 where ts >= 1604298064030 and ts <= 1604298064100"
)
tdSql
.
checkData
(
0
,
0
,
400
)
tdSql
.
query
(
"select abs_max(number) from st where ts >= 1604298064030 and ts <= 1604298064100"
)
tdSql
.
checkData
(
0
,
0
,
9223372036854775806
)
tdSql
.
query
(
"select id from st where id != 0 and ts >=1604298064070"
)
tdSql
.
checkData
(
0
,
0
,
1
)
tdSql
.
query
(
"select add_one(id) from st where id != 0 and ts >=1604298064070"
)
tdSql
.
checkData
(
0
,
0
,
2
)
tdSql
.
query
(
"select add_one(id) from st where id <> 0 and ts >=1604298064010"
)
tdSql
.
checkData
(
0
,
0
,
2
)
tdSql
.
query
(
"select sum_double(id) from st where id in (2,3,4) and ts >=1604298064070"
)
tdSql
.
checkData
(
0
,
0
,
18
)
tdSql
.
query
(
"select sum_double(id) from tb2 where id in (2,3,4) and ts >=1604298064070"
)
tdSql
.
checkData
(
0
,
0
,
18
)
tdSql
.
query
(
"select abs_max(number) from st where id in (2,3,4) and ts >=1604298064070"
)
tdSql
.
checkData
(
0
,
0
,
400
)
tdSql
.
query
(
"select add_one(id) from st where id = 0 "
)
tdSql
.
checkData
(
0
,
0
,
1
)
tdSql
.
checkData
(
1
,
0
,
1
)
tdSql
.
query
(
"select add_one(id) from tb2 where id = 0 "
)
tdSql
.
checkData
(
0
,
0
,
1
)
tdSql
.
query
(
"select sum_double(id) from st where id = 1"
)
tdSql
.
checkData
(
0
,
0
,
4
)
tdSql
.
query
(
"select sum_double(id) from tb2 where id = 1"
)
tdSql
.
checkData
(
0
,
0
,
2
)
tdSql
.
query
(
"select add_one(id) from st where id is not null and ts >=1604298065000 "
)
tdSql
.
checkData
(
0
,
0
,
None
)
tdSql
.
query
(
"select abs_max(number) from st where id is not null and ts >=1604298065000 "
)
tdSql
.
checkData
(
0
,
0
,
9223372036854775807
)
tdSql
.
query
(
"select abs_max(number) from bound where id is not null and ts >=1604298065000 "
)
tdSql
.
checkData
(
0
,
0
,
9223372036854775807
)
tdSql
.
query
(
"select sum_double(id) from st where id is not null and ts >=1604298064000 and ind = 1 "
)
tdSql
.
checkData
(
0
,
0
,
20
)
tdSql
.
query
(
"select sum_double(id) from tb1 where id is not null and ts >=1604298064000 "
)
tdSql
.
checkData
(
0
,
0
,
20
)
tdSql
.
query
(
"select add_one(id) from st where id is null and ts >=1604298065000 "
)
tdSql
.
checkRows
(
0
)
tdSql
.
query
(
"select abs_max(number) from st where id is null and ts >=1604298065000 "
)
tdSql
.
checkRows
(
0
)
tdSql
.
query
(
"select abs_max(number) from tb1 where id is null and ts >=1604298065000 "
)
tdSql
.
checkRows
(
0
)
tdSql
.
query
(
"select add_one(id) from bound where id is not null and ts >=1604298065000;"
)
tdSql
.
checkData
(
0
,
0
,
None
)
tdSql
.
query
(
"select id,add_one(id) from bound;"
)
tdSql
.
checkRowCol
(
4
,
2
)
tdSql
.
checkData
(
3
,
1
,
None
)
tdSql
.
query
(
"select add_one(id) from st where ts between 1604298064000 and 1604298064010"
)
tdSql
.
checkRows
(
3
)
tdSql
.
query
(
"select add_one(id) from tb1 where ts between 1604298064000 and 1604298064010"
)
tdSql
.
checkRows
(
1
)
tdSql
.
query
(
"select sum_double(id) from st where ts between 1604298064000 and 1604298064010 and id>=0"
)
tdSql
.
checkData
(
0
,
0
,
0
)
tdSql
.
query
(
"select sum_double(id) from tb1 where ts between 1604298064000 and 1604298064010 and id>=0"
)
tdSql
.
checkData
(
0
,
0
,
0
)
tdSql
.
query
(
"select add_one(id) from st where id in (1,2)"
)
tdSql
.
checkData
(
0
,
0
,
2
)
tdSql
.
checkData
(
1
,
0
,
3
)
tdSql
.
checkData
(
2
,
0
,
2
)
tdSql
.
checkData
(
3
,
0
,
3
)
tdSql
.
checkRows
(
4
)
tdSql
.
query
(
"select sum_double(id) from st where ts < now and ind =1 interval(1s)"
)
tdSql
.
checkData
(
0
,
1
,
20
)
tdSql
.
error
(
"select sum_double(id) from st where ts < now and ind =1 interval(3s) sliding (1s) fill (NULL) "
)
tdSql
.
error
(
"select sum_double(id) from st session(ts, 1s)"
)
tdSql
.
query
(
"select sum_double(id) from tb1 session(ts, 1s)"
)
tdSql
.
checkData
(
0
,
1
,
20
)
# intervals sliding values calculation
tdSql
.
query
(
"select sum_double(id) from st where ts < now and ind =1 interval(3s) sliding (1s) limit 2"
)
tdSql
.
checkData
(
0
,
1
,
20
)
tdSql
.
checkData
(
1
,
1
,
20
)
# scalar_function can't work when using interval and sliding =========
tdSql
.
error
(
"select add_one(id) from st where ts < now and ind =1 interval(3s) sliding (1s) limit 2 "
)
tdSql
.
error
(
"select add_one(id) from st order by ts"
)
tdSql
.
error
(
"select ts,id,add_one(id) from st order by ts asc;"
)
# # UDF not support order by
tdSql
.
error
(
"select ts,id,add_one(id) from st order by ts desc;"
)
# UDF function union all
tdSql
.
query
(
"select add_one(id) from tb1 union all select add_one(id) from tb2;"
)
tdSql
.
checkRows
(
10
)
tdSql
.
checkData
(
0
,
0
,
1
)
tdSql
.
checkData
(
5
,
0
,
1
)
tdSql
.
query
(
"select sum_double(id) from tb1 union all select sum_double(id) from tb2;"
)
tdSql
.
checkRows
(
2
)
tdSql
.
checkData
(
0
,
0
,
20
)
tdSql
.
checkData
(
1
,
0
,
20
)
tdSql
.
query
(
"select abs_max(number) from tb1 union all select abs_max(number) from bound;"
)
tdSql
.
checkRows
(
2
)
tdSql
.
checkData
(
0
,
0
,
400
)
tdSql
.
checkData
(
1
,
0
,
9223372036854775807
)
tdSql
.
execute
(
"create stable stb (ts timestamp,id int , val double , number bigint, chars binary(200)) tags (ind int)"
)
tdSql
.
execute
(
"create table stb1 using stb tags(3)"
)
tdSql
.
execute
(
"insert into stb1 values(1604298064000 , 1 , 1.0 , 10000 ,'chars')"
)
tdSql
.
query
(
"select add_one(id) from st union all select add_one(id) from stb;"
)
tdSql
.
checkRows
(
15
)
tdSql
.
checkData
(
13
,
0
,
None
)
tdSql
.
checkData
(
14
,
0
,
2
)
tdSql
.
query
(
"select add_one(id) from st union all select add_one(id) from stb1;"
)
tdSql
.
checkRows
(
15
)
tdSql
.
checkData
(
13
,
0
,
None
)
tdSql
.
checkData
(
14
,
0
,
2
)
tdSql
.
query
(
"select id ,add_one(id) from tb1 union all select id ,add_one(id) from stb1;"
)
tdSql
.
checkRows
(
6
)
tdSql
.
checkData
(
0
,
0
,
0
)
tdSql
.
checkData
(
0
,
1
,
1
)
tdSql
.
checkData
(
1
,
0
,
1
)
tdSql
.
checkData
(
1
,
1
,
2
)
# aggregate union all for different stables
tdSql
.
query
(
"select sum_double(id) from st union all select sum_double(id) from stb;"
)
tdSql
.
checkRows
(
2
)
tdSql
.
checkData
(
0
,
0
,
44
)
tdSql
.
checkData
(
1
,
0
,
2
)
tdSql
.
query
(
"select id from st union all select id from stb1;"
)
tdSql
.
checkRows
(
15
)
tdSql
.
query
(
"select id from tb1 union all select id from stb1"
)
tdSql
.
checkRows
(
6
)
tdSql
.
query
(
"select sum_double(id) from tb1 union all select sum_double(id) from stb"
)
tdSql
.
checkData
(
0
,
0
,
20
)
tdSql
.
checkData
(
1
,
0
,
2
)
tdSql
.
query
(
"select sum_double(id) from st union all select sum_double(id) from stb1;"
)
tdSql
.
checkRows
(
2
)
tdSql
.
checkData
(
0
,
0
,
44
)
tdSql
.
checkData
(
1
,
0
,
2
)
tdSql
.
query
(
"select abs_max(number) from st union all select abs_max(number) from stb;"
)
tdSql
.
checkData
(
0
,
0
,
9223372036854775807
)
tdSql
.
query
(
"select abs_max(number) from bound union all select abs_max(number) from stb1;"
)
tdSql
.
checkData
(
0
,
0
,
9223372036854775807
)
tdSql
.
checkData
(
1
,
0
,
10000
)
tdSql
.
query
(
"select abs_max(number) from st union all select abs_max(number) from stb1;"
)
tdSql
.
checkData
(
0
,
0
,
9223372036854775807
)
tdSql
.
checkData
(
1
,
0
,
10000
)
# group by for aggegate function ;
tdSql
.
query
(
"select sum_double(id) from st group by tbname;"
)
tdSql
.
checkData
(
0
,
0
,
20
)
tdSql
.
checkData
(
0
,
1
,
'tb1'
)
tdSql
.
checkData
(
1
,
0
,
20
)
tdSql
.
checkData
(
1
,
1
,
'tb2'
)
tdSql
.
query
(
"select sum_double(id) from st group by id;"
)
tdSql
.
checkRows
(
9
)
tdSql
.
query
(
"select sum_double(id) from st group by ts"
)
tdSql
.
checkRows
(
12
)
tdSql
.
query
(
"select sum_double(id) from st group by ind"
)
tdSql
.
checkRows
(
3
)
tdSql
.
query
(
"select sum_double(id) from st group by tbname order by ts asc;"
)
tdSql
.
query
(
"select abs_max(number) from st group by id"
)
tdSql
.
checkRows
(
9
)
tdSql
.
checkData
(
0
,
0
,
9223372036854775806
)
tdSql
.
checkData
(
8
,
0
,
9223372036854775807
)
tdSql
.
query
(
"select abs_max(number) from st group by ts"
)
tdSql
.
checkRows
(
12
)
tdSql
.
checkData
(
11
,
0
,
9223372036854775807
)
tdSql
.
checkData
(
1
,
0
,
9223372036854775805
)
tdSql
.
query
(
"select abs_max(number) from st group by ind"
)
tdSql
.
checkRows
(
3
)
tdSql
.
checkData
(
0
,
0
,
400
)
tdSql
.
checkData
(
2
,
0
,
9223372036854775807
)
# UDF join
tdSql
.
query
(
"select add_one(tb1.id),add_one(bound.id) from tb1,bound where tb1.ts=bound.ts;"
)
tdSql
.
checkData
(
0
,
0
,
1
)
tdSql
.
checkData
(
0
,
1
,
-
2147483644
)
tdSql
.
query
(
"select stb1.ts,add_one(stb1.id),bound.ts,add_one(bound.id) from stb1,bound where stb1.ts=bound.ts"
)
tdSql
.
checkData
(
0
,
1
,
2
)
tdSql
.
checkData
(
0
,
3
,
-
2147483645
)
tdSql
.
query
(
"select st.ts,add_one(st.id),stb.ts,add_one(stb.id) from st,stb where st.ts=stb.ts and st.ind=stb.ind"
)
tdSql
.
checkData
(
0
,
1
,
-
2147483645
)
tdSql
.
checkData
(
0
,
3
,
2
)
tdSql
.
query
(
"select sum_double(tb1.id),sum_double(bound.id) from tb1,bound where tb1.ts=bound.ts;"
)
tdSql
.
checkData
(
0
,
0
,
0
)
tdSql
.
checkData
(
0
,
1
,
-
4294967290
)
tdSql
.
query
(
"select sum_double(stb1.id),sum_double(bound.id) from stb1,bound where stb1.ts=bound.ts"
)
tdSql
.
checkData
(
0
,
0
,
2
)
tdSql
.
checkData
(
0
,
1
,
-
4294967292
)
#UDF join for stables
tdSql
.
query
(
"select sum_double(st.id),sum_double(stb.id) from st,stb where st.ts=stb.ts and st.ind=stb.ind"
)
tdSql
.
checkData
(
0
,
0
,
-
4294967292
)
tdSql
.
checkData
(
0
,
1
,
2
)
tdSql
.
query
(
"select abs_max(tb1.number),abs_max(bound.number) from tb1,bound where tb1.ts=bound.ts;"
)
tdSql
.
checkData
(
0
,
0
,
0
)
tdSql
.
checkData
(
0
,
1
,
9223372036854775805
)
tdSql
.
query
(
"select abs_max(stb1.number),abs_max(bound.number) from stb1,bound where stb1.ts=bound.ts"
)
tdSql
.
checkData
(
0
,
0
,
10000
)
tdSql
.
checkData
(
0
,
1
,
9223372036854775806
)
tdSql
.
query
(
"select abs_max(st.number),abs_max(stb.number) from st,stb where st.ts=stb.ts and st.ind=stb.ind"
)
tdSql
.
checkData
(
0
,
0
,
9223372036854775806
)
tdSql
.
checkData
(
0
,
1
,
10000
)
# check boundary
# check boundary
#
tdSql.query("select abs_max(number) from bound")
tdSql
.
query
(
"select abs_max(number) from bound"
)
#
tdSql.checkData(0,0,9223372036854775807)
tdSql
.
checkData
(
0
,
0
,
9223372036854775807
)
tdLog
.
info
(
"======= UDF function sum_double check ======="
)
tdLog
.
info
(
"======= UDF function sum_double check ======="
)
...
@@ -189,14 +423,10 @@ class TDTestCase:
...
@@ -189,14 +423,10 @@ class TDTestCase:
tdSql
.
query
(
"select sum_double(id) from tb1"
)
tdSql
.
query
(
"select sum_double(id) from tb1"
)
tdSql
.
checkData
(
0
,
0
,
20
)
tdSql
.
checkData
(
0
,
0
,
20
)
# UDF bug no 4 -> values error while two function work : it is limit that udf can't work with build-in functions.
# only one udf function in SQL can use ,follow errors notice.
# tdSql.query("select sum_double(id) , abs_max(number) from tb1")
tdSql
.
error
(
"select sum_double(id) , abs_max(number) from tb1"
)
# tdSql.checkData(0,0,20)
tdSql
.
error
(
"select sum_double(id) , abs_max(number) from st"
)
# tdSql.checkData(0,0,400)
# tdSql.query("select sum_double(id) , abs_max(number) from st")
# tdSql.checkData(0,0,44)
# tdSql.checkData(0,0,9223372036854775807)
# UDF not support mix up with build-in functions
# UDF not support mix up with build-in functions
# it seems like not support scalar_function mix up with aggregate functions
# it seems like not support scalar_function mix up with aggregate functions
...
@@ -204,147 +434,162 @@ class TDTestCase:
...
@@ -204,147 +434,162 @@ class TDTestCase:
tdSql
.
error
(
"select sum_double(id) ,add_one(id) from tb1"
)
tdSql
.
error
(
"select sum_double(id) ,add_one(id) from tb1"
)
tdSql
.
error
(
"select sum_double(id) ,max(id) from st"
)
tdSql
.
error
(
"select sum_double(id) ,max(id) from st"
)
tdSql
.
error
(
"select sum_double(id) ,max(id) from tb1"
)
tdSql
.
error
(
"select sum_double(id) ,max(id) from tb1"
)
tdSql
.
error
(
"select twa(id),add_one(id) from st"
)
tdSql
.
error
(
"select twa(id),add_one(id) from tb1"
)
# UDF function not support Arithmetic ===================
# UDF function not support Arithmetic ===================
tdSql
.
query
(
"select max(id) + 5 from st"
)
tdSql
.
query
(
"select max(id) + 5 from st"
)
tdSql
.
query
(
"select max(id) + 5 from tb1"
)
tdSql
.
query
(
"select max(id) + 5 from tb1"
)
tdSql
.
query
(
"select max(id) + avg(val) from st"
)
tdSql
.
query
(
"select max(id) + avg(val) from st"
)
tdSql
.
query
(
"select abs_max(number)*5 from st"
)
tdSql
.
checkData
(
0
,
0
,
46116860184273879040.000000000
)
tdSql
.
query
(
"select abs_max(number)*5 from tb1"
)
tdSql
.
checkData
(
0
,
0
,
2000.000000000
)
tdSql
.
query
(
"select max(id) + avg(val) from tb1"
)
tdSql
.
query
(
"select max(id) + avg(val) from tb1"
)
tdSql
.
query
(
"select abs_max(number) + 5 from st"
)
tdSql
.
query
(
"select add_one(id) + 5 from st"
)
tdSql
.
checkData
(
4
,
0
,
10.000000000
)
tdSql
.
query
(
"select add_one(id)/5 from tb1"
)
tdSql
.
checkData
(
4
,
0
,
1.000000000
)
tdSql
.
query
(
"select sum_double(id)-5 from st"
)
tdSql
.
checkData
(
0
,
0
,
39.000000000
)
tdSql
.
query
(
"select sum_double(id)*5 from tb1"
)
tdSql
.
checkData
(
0
,
0
,
100.000000000
)
tdSql
.
query
(
"select abs_max(number) + 5 from tb1"
)
tdSql
.
query
(
"select abs_max(number) + 5 from tb1"
)
tdSql
.
error
(
"select abs_max(number) + max(id) from st"
)
tdSql
.
error
(
"select abs_max(number) + max(id) from st"
)
tdSql
.
query
(
"select abs_max(number)*abs_max(val) from st"
)
tdSql
.
query
(
"select abs_max(number)*abs_max(val) from st"
)
tdSql
.
query
(
"select sum_double(id) + sum_double(id) from st"
)
tdSql
.
checkData
(
0
,
0
,
88.000000000
)
tdLog
.
info
(
"======= UDF Nested query test ======="
)
tdLog
.
info
(
"======= UDF Nested query test ======="
)
tdSql
.
query
(
"select sum(id) from (select id from st)"
)
tdSql
.
query
(
"select sum(id) from (select id from st)"
)
tdSql
.
checkData
(
0
,
0
,
22
)
tdSql
.
checkData
(
0
,
0
,
22
)
#UDF bug no 5 -> not support Nested query
# tdSql.query("select abs_max(number) from (select number from st)")
#UDF bug -> Nested query
# tdSql.checkData(0,0,9223372036854775807)
# outer nest query
# tdSql.query("select abs_max(number) from (select number from bound)")
tdSql
.
query
(
"select abs_max(number) from (select number from st)"
)
# tdSql.checkData(0,0,9223372036854775807)
tdSql
.
checkData
(
0
,
0
,
9223372036854775807
)
# tdSql.query("select sum_double(id) from (select id from st)")
tdSql
.
query
(
"select abs_max(number) from (select number from bound)"
)
# tdSql.checkData(0,0,44)
tdSql
.
checkData
(
0
,
0
,
9223372036854775807
)
# tdSql.query("select sum_double(id) from (select id from tb1)")
tdSql
.
query
(
"select sum_double(id) from (select id from st)"
)
# tdSql.checkData(0,0,10)
tdSql
.
checkData
(
0
,
0
,
44
)
tdSql
.
query
(
"select sum_double(id) from (select id from bound)"
)
tdSql
.
checkData
(
0
,
0
,
4
)
tdSql
.
query
(
"select add_one(id) from (select id from st);"
)
tdSql
.
checkRows
(
14
)
tdSql
.
checkData
(
1
,
0
,
2
)
tdSql
.
query
(
"select add_one(id) from (select id from bound);"
)
tdSql
.
checkRows
(
4
)
tdSql
.
checkData
(
1
,
0
,
-
2147483644
)
# UDF bug no 6 -> group by work error
# inner nest query
tdLog
.
info
(
"======= UDF work with group by ======="
)
tdSql
.
query
(
"select id from (select add_one(id) id from st)"
)
tdSql
.
checkRows
(
14
)
tdSql
.
checkData
(
13
,
0
,
None
)
tdSql
.
query
(
"select id from (select add_one(id) id from bound)"
)
tdSql
.
checkRows
(
4
)
tdSql
.
checkData
(
3
,
0
,
None
)
# tdSql.query("select sum_double(id) from st group by tbname;")
tdSql
.
query
(
"select id from (select sum_double(id) id from bound)"
)
# tdSql.checkData(0,0,6)
tdSql
.
checkData
(
0
,
0
,
4
)
# tdSql.checkData(0,1,'tb1')
tdSql
.
query
(
"select id from (select sum_double(id) id from st)"
)
# it will crash taos shell
# tdSql.checkData(1,0,2)
tdSql
.
checkData
(
0
,
0
,
44
)
# tdSql.checkData(1,1,'tb2')
# tdSql.query("select sum_double(id) from st group by id;")
# tdSql.checkRows(2)
# tdSql.query("select sum_double(id) from st group by tbname order by ts asc;")
tdSql
.
query
(
"select id from (select abs_max(number) id from st)"
)
# it will crash taos shell
tdSql
.
checkData
(
0
,
0
,
9223372036854775807
)
tdSql
.
query
(
"select id from (select abs_max(number) id from bound)"
)
tdSql
.
checkData
(
0
,
0
,
9223372036854775807
)
tdSql
.
query
(
"select sum_double(id) from st where ts < now and ind =1 interval(1s)"
)
# inner and outer nest query
tdSql
.
checkData
(
0
,
1
,
20
)
tdSql
.
error
(
"select sum_double(id) from st session(ts, 1s) interval (10s,1s) sliding(10s) fill (NULL) "
)
tdSql
.
error
(
"select sum_double(id) from st session(ts, 1s)"
)
tdSql
.
query
(
"select sum_double(id) from tb1 session(ts, 1s)"
)
tdSql
.
checkData
(
0
,
1
,
20
)
# UDF -> bug no 7 : intervals sliding values calculation error
tdSql
.
query
(
"select add_one(id) from (select add_one(id) id from st)"
)
# tdSql.query("select sum_double(id) from st where ts < now and ind =1 interval(3s) sliding (1s) limit 2")
tdSql
.
checkRows
(
14
)
# tdSql.checkData(0,1,20)
tdSql
.
checkData
(
0
,
0
,
2
)
# tdSql.checkData(1,1,20)
tdSql
.
checkData
(
1
,
0
,
3
)
tdSql
.
query
(
"select add_one(id) from (select add_one(id) id from tb1)"
)
tdSql
.
checkRows
(
5
)
tdSql
.
checkData
(
0
,
0
,
2
)
tdSql
.
checkData
(
1
,
0
,
3
)
tdSql
.
query
(
"select sum_double(sumdb) from (select sum_double(id) sumdb from st)"
)
tdSql
.
query
(
"select sum_double(sumdb) from (select sum_double(id) sumdb from tb1)"
)
# scalar_function can't work when using interval and sliding =========
tdSql
.
query
(
"select abs_max(number) from (select abs_max(number) number from st)"
)
tdSql
.
error
(
"select add_one(id) from st where ts < now and ind =1 interval(3s) sliding (1s) limit 2 "
)
tdSql
.
checkData
(
0
,
0
,
9223372036854775807
)
tdSql
.
query
(
"select abs_max(number) from (select abs_max(number) number from bound)"
)
tdSql
.
checkData
(
0
,
0
,
9223372036854775807
)
# nest inner and outer with build-in func
tdSql
.
query
(
"select max(number) from (select abs_max(number) number from st)"
)
tdSql
.
checkData
(
0
,
0
,
9223372036854775807
)
tdSql
.
query
(
"select max(number) from (select abs_max(number) number from bound)"
)
tdSql
.
checkData
(
0
,
0
,
9223372036854775807
)
tdSql
.
query
(
"select sum_double(sumdb) from (select sum_double(id) sumdb from st)"
)
tdSql
.
query
(
"select sum(sumdb) from (select sum_double(id) sumdb from tb1)"
)
tdSql
.
checkData
(
0
,
0
,
20
)
tdLog
.
info
(
" =====================test illegal creation method ====================="
)
tdLog
.
info
(
" =====================test illegal creation method ====================="
)
tdSql
.
execute
(
"drop function add_one"
)
#
tdSql.execute("drop function add_one")
tdSql
.
execute
(
"drop function abs_max"
)
tdSql
.
execute
(
"drop function abs_max"
)
tdSql
.
execute
(
"drop function sum_double"
)
tdSql
.
execute
(
"drop function sum_double"
)
tdSql
.
execute
(
"create aggregate function error_use1 as '/tmp/abs_max.so' outputtype bigint "
)
tdSql
.
execute
(
"create aggregate function error_use1 as '/tmp/abs_max.so' outputtype bigint "
)
tdSql
.
error
(
"select error_use1(number) from st"
)
tdSql
.
error
(
"select error_use1(number) from st"
)
#
UDF -> bug no 8: error return values when
create aggregate functions as an scalar_function
#
illega UDF
create aggregate functions as an scalar_function
# with no aggregate
# with no aggregate
# tdSql.execute("create function abs_max as '/tmp/abs_max.so' outputtype bigint bufsize 128")
tdSql
.
execute
(
"create function abs_max as '/tmp/abs_max.so' outputtype bigint bufsize 128"
)
# tdSql.query("select abs_max(number) from st") # this bug will return 3 rows
tdSql
.
error
(
"select abs_max(number) from st"
)
# tdSql.checkRows(1)
tdSql
.
execute
(
"create function sum_double as '/tmp/sum_double.so' outputtype bigint bufsize 128"
)
# tdSql.execute("create function sum_double as '/tmp/sum_double.so' outputtype bigint bufsize 128")
tdSql
.
error
(
"select sum_double(id) from st"
)
# tdSql.execute("select sum_double(id) from st")
# tdSql.checkRows(1)
# UDF -> bug no 9: give bufsize for scalar_function add_one;
# UDF -> need improve : when outputtype is not match datatype which is defined in function codes
tdSql
.
execute
(
"create function add_one as '/tmp/add_one.so' outputtype bigint bufsize 128"
)
# tdSql.error("select add_one(val) from st") # it should return error not [] for not match col datatype
# tdSql.query("select add_one(id) from st") # return error query result
# tdSql.checkData(0,0,1)
# tdSql.checkData(1,0,2)
# tdSql.checkData(5,0,1)
# tdSql.checkData(10,0,-2147483645)
# tdSql.checkData(13,0,None)
# UDF -> improve : aggregate function with no bufsize : it seems with no affect
# UDF -> improve : aggregate function with no bufsize : it seems with no affect
#
tdSql.execute("drop function abs_max")
tdSql
.
execute
(
"drop function abs_max"
)
#
tdSql.execute("drop function sum_double")
tdSql
.
execute
(
"drop function sum_double"
)
tdSql
.
execute
(
"create aggregate function abs_max as '/tmp/abs_max.so' outputtype bigint "
)
tdSql
.
execute
(
"create aggregate function abs_max as '/tmp/abs_max.so' outputtype bigint "
)
tdSql
.
execute
(
"create aggregate function sum_double as '/tmp/sum_double.so' outputtype int "
)
tdSql
.
execute
(
"create aggregate function sum_double as '/tmp/sum_double.so' outputtype int "
)
tdSql
.
query
(
"select sum_double(id) from st"
)
tdSql
.
query
(
"select sum_double(id) from st"
)
tdSql
.
checkData
(
0
,
0
,
44
)
tdSql
.
checkData
(
0
,
0
,
44
)
tdSql
.
query
(
"select sum_double(id) from tb1"
)
tdSql
.
query
(
"select sum_double(id) from tb1"
)
tdSql
.
checkData
(
0
,
0
,
20
)
tdSql
.
checkData
(
0
,
0
,
20
)
#
tdSql.query("select abs_max(number) from st")
tdSql
.
query
(
"select abs_max(number) from st"
)
#
tdSql.checkData(0,0,9223372036854775807)
tdSql
.
checkData
(
0
,
0
,
9223372036854775807
)
tdSql
.
query
(
"select abs_max(number) from tb1"
)
tdSql
.
query
(
"select abs_max(number) from tb1"
)
tdSql
.
checkData
(
0
,
0
,
400
)
tdSql
.
checkData
(
0
,
0
,
400
)
#UDF bug no 10 -> create function datatype of outputtype not match col datatype
tdSql
.
execute
(
"drop function abs_max"
)
tdSql
.
execute
(
"drop function sum_double"
)
tdSql
.
execute
(
"drop function add_one"
)
tdSql
.
execute
(
"create function add_one as '/tmp/add_one.so' outputtype bigint;"
)
tdSql
.
execute
(
"create aggregate function abs_max as '/tmp/abs_max.so' outputtype int bufsize 128;"
)
tdSql
.
execute
(
"create aggregate function sum_double as '/tmp/sum_double.so' outputtype double bufsize 128;"
)
# tdSql.query("select sum_double(id) from st") this bug will return 0.000000
# tdSql.checkData(0,0,44)
# tdSql.query("select sum_double(id) from tb1")
# tdSql.checkData(0,0,20) this bug will return 0.000000
# tdSql.query("select add_one(id) from st") this bug will return series error values
# tdSql.checkData(0,0,1)
# tdSql.checkData(1,0,2)
# tdSql.checkData(5,0,1)
# tdSql.checkData(10,0,-2147483645)
# tdSql.checkData(13,0,None)
# tdSql.query("select add_one(id) from tb1") this bug will return series error values
# tdSql.checkData(0,0,1)
# tdSql.checkData(2,0,3)
# tdSql.query("select abs_max(id) from st")
# tdSql.checkData(0,0,9223372036854775807)
tdSql
.
query
(
"select abs_max(number) from tb1"
)
# it seems work well
tdSql
.
query
(
"select abs_max(number) from tb1"
)
# it seems work well
tdSql
.
checkData
(
0
,
0
,
400
)
tdSql
.
checkData
(
0
,
0
,
400
)
# UDF scalar function not support group by
tdSql
.
error
(
"select add_one(id) from st group by tbname"
)
# UDF bug no 11 -> follow test case will coredump for taosd and let data lost
# UDF : give aggregate for scalar_function add_one ,it can't work well
# tdSql.query("select add_one(id) from st group by tbname")
tdSql
.
execute
(
"drop function add_one"
)
tdSql
.
execute
(
"create aggregate function add_one as '/tmp/add_one.so' outputtype bigint bufsize 128"
)
# UDF -> bug no 12: give aggregate for scalar_function add_one ,it will let taosd coredump as data lost
tdSql
.
error
(
"select add_one(id) from st"
)
# tdSql.execute("drop function add_one")
# tdSql.execute("create aggregate function add_one as '/tmp/add_one.so' outputtype bigint bufsize 128")
# udf must give col list
# tdSql.query("select add_one(id) from st")
tdSql
.
error
(
"select add_one(*) from st "
)
tdSql
.
error
(
"select add_one(*) from tb1 "
)
# UDF bug no 13 -> follow test case will coredump for taosc
# tdSql.query("select add_one(*) from st ")
# one udf function can multi use
# tdSql.query("select add_one(*) from tb1 ")
tdSql
.
query
(
"select abs_max(id),abs_max(number) from st "
)
tdSql
.
query
(
"select abs_max(number),abs_max(number)*3 from st "
)
# UDF bug no 14 -> follow test case will coredump for taosc
tdSql
.
query
(
"select abs_max(number),abs_max(number)*3 from tb1 "
)
# tdSql.query("select abs_max(id),abs_max(number) from st ")
tdSql
.
query
(
"select sum_double(id),sum_double(id) from st "
)
# tdSql.query("select abs_max(number),abs_max(number) from st ")
# tdSql.query("select sum_double(id),sum_double(id) from st ")
def
run
(
self
):
def
run
(
self
):
tdSql
.
prepare
()
tdSql
.
prepare
()
...
@@ -366,4 +611,4 @@ class TDTestCase:
...
@@ -366,4 +611,4 @@ class TDTestCase:
tdCases
.
addWindows
(
__file__
,
TDTestCase
())
tdCases
.
addWindows
(
__file__
,
TDTestCase
())
tdCases
.
addLinux
(
__file__
,
TDTestCase
())
tdCases
.
addLinux
(
__file__
,
TDTestCase
())
\ No newline at end of file
tests/pytest/table/create.py
浏览文件 @
3752a81f
...
@@ -178,6 +178,8 @@ class TDTestCase:
...
@@ -178,6 +178,8 @@ class TDTestCase:
tdSql
.
checkRows
(
1
)
tdSql
.
checkRows
(
1
)
self
.
tb193new
=
"table_193~!@#$%^&*()-_+=[]{}':,<.>/?stST0123456789table_192~!@#$%^&*()-_+=[]{}':,<.>/?stST0123456789table_192~!@#$%^&*()-_+=[]{}':,<.>/?stST0123456789table_192~!@#$%^&*()-_+=[]{}':,<.>/?stST123"
self
.
tb193new
=
"table_193~!@#$%^&*()-_+=[]{}':,<.>/?stST0123456789table_192~!@#$%^&*()-_+=[]{}':,<.>/?stST0123456789table_192~!@#$%^&*()-_+=[]{}':,<.>/?stST0123456789table_192~!@#$%^&*()-_+=[]{}':,<.>/?stST123"
tdSql
.
error
(
"create table db.`%s` using db.`%s` tags(1)"
%
(
self
.
tb193new
,
self
.
stb1
))
tdSql
.
error
(
"create table db.`%s` using db.`%s` tags(1)"
%
(
self
.
tb193new
,
self
.
stb1
))
# case for TD-10691
tdSql
.
error
(
"create table ttb1(ts timestamp, file int )"
)
...
...
tests/script/api/batchprepare.c
浏览文件 @
3752a81f
...
@@ -1765,7 +1765,7 @@ int stmt_funcb_autoctb_e1(TAOS_STMT *stmt) {
...
@@ -1765,7 +1765,7 @@ int stmt_funcb_autoctb_e1(TAOS_STMT *stmt) {
int
code
=
taos_stmt_prepare
(
stmt
,
sql
,
0
);
int
code
=
taos_stmt_prepare
(
stmt
,
sql
,
0
);
if
(
code
!=
0
){
if
(
code
!=
0
){
printf
(
"failed to execute taos_stmt_prepare. error:%s
\n
"
,
taos_stmt_errstr
(
stmt
));
printf
(
"failed to execute taos_stmt_prepare. error:%s
\n
"
,
taos_stmt_errstr
(
stmt
));
return
-
1
;
exit
(
1
)
;
}
}
int
id
=
0
;
int
id
=
0
;
...
@@ -1801,9 +1801,44 @@ int stmt_funcb_autoctb_e1(TAOS_STMT *stmt) {
...
@@ -1801,9 +1801,44 @@ int stmt_funcb_autoctb_e1(TAOS_STMT *stmt) {
return
0
;
return
0
;
}
}
int
stmt_multi_insert_check
(
TAOS_STMT
*
stmt
)
{
char
*
sql
;
// The number of tag column list is not equal to the number of tag value list
sql
=
"insert into ? using stb1 (id1) tags(1,?) values(?,?,?,?,?,?,?,?,?,?)"
;
if
(
0
==
taos_stmt_prepare
(
stmt
,
sql
,
0
))
{
printf
(
"failed to check taos_stmt_prepare. sql:%s
\n
"
,
sql
);
exit
(
1
);
}
// The number of column list is not equal to the number of value list
sql
=
"insert into ? using stb1 tags(1,?,2,?,4,?,6.0,?,'b') "
"(ts, b, v1, v2, v4, v8, f4, f8, bin) values(?,?,?,?,?,?,?,?,?,?)"
;
if
(
0
==
taos_stmt_prepare
(
stmt
,
sql
,
0
))
{
printf
(
"failed to check taos_stmt_prepare. sql:%s
\n
"
,
sql
);
exit
(
1
);
}
sql
=
"insert into ? using stb1 () tags(1,?) values(?,?,?,?,?,?,?,?,?,?)"
;
if
(
0
==
taos_stmt_prepare
(
stmt
,
sql
,
0
))
{
printf
(
"failed to check taos_stmt_prepare. sql:%s
\n
"
,
sql
);
exit
(
1
);
}
sql
=
"insert into ? using stb1 ( tags(1,?) values(?,?,?,?,?,?,?,?,?,?)"
;
if
(
0
==
taos_stmt_prepare
(
stmt
,
sql
,
0
))
{
printf
(
"failed to check taos_stmt_prepare. sql:%s
\n
"
,
sql
);
exit
(
1
);
}
sql
=
"insert into ? using stb1 ) tags(1,?) values(?,?,?,?,?,?,?,?,?,?)"
;
if
(
0
==
taos_stmt_prepare
(
stmt
,
sql
,
0
))
{
printf
(
"failed to check taos_stmt_prepare. sql:%s
\n
"
,
sql
);
exit
(
1
);
}
return
0
;
}
//1 tables 10 records
//1 tables 10 records
int
stmt_funcb_autoctb_e2
(
TAOS_STMT
*
stmt
)
{
int
stmt_funcb_autoctb_e2
(
TAOS_STMT
*
stmt
)
{
...
@@ -4509,7 +4544,6 @@ void* runcase(void *par) {
...
@@ -4509,7 +4544,6 @@ void* runcase(void *par) {
(
void
)
idx
;
(
void
)
idx
;
#if 1
#if 1
prepare
(
taos
,
1
,
1
);
prepare
(
taos
,
1
,
1
);
...
@@ -4823,6 +4857,16 @@ void* runcase(void *par) {
...
@@ -4823,6 +4857,16 @@ void* runcase(void *par) {
#endif
#endif
#if 1
prepare
(
taos
,
1
,
0
);
stmt
=
taos_stmt_init
(
taos
);
printf
(
"stmt_multi_insert_check start
\n
"
);
stmt_multi_insert_check
(
stmt
);
printf
(
"stmt_multi_insert_check end
\n
"
);
taos_stmt_close
(
stmt
);
#endif
#if 1
#if 1
prepare
(
taos
,
1
,
1
);
prepare
(
taos
,
1
,
1
);
...
@@ -5011,7 +5055,6 @@ void* runcase(void *par) {
...
@@ -5011,7 +5055,6 @@ void* runcase(void *par) {
printf
(
"check result end
\n
"
);
printf
(
"check result end
\n
"
);
#endif
#endif
#if 1
#if 1
preparem
(
taos
,
0
,
idx
);
preparem
(
taos
,
0
,
idx
);
...
...
tests/script/general/parser/udf_dll.sim
浏览文件 @
3752a81f
...
@@ -10,7 +10,7 @@ sql connect
...
@@ -10,7 +10,7 @@ sql connect
print ======================== dnode1 start
print ======================== dnode1 start
sql create function add_one as '/tmp/add_one.so' outputtype int;
sql create function add_one as '/tmp/add_one.so' outputtype int;
sql create aggregate function sum_double as '/tmp/sum_double.so' outputtype int;
sql create aggregate function sum_double as '/tmp/sum_double.so' outputtype
big
int;
sql show functions;
sql show functions;
if $rows != 2 then
if $rows != 2 then
return -1
return -1
...
...
tests/script/general/parser/udf_dll_stable.sim
浏览文件 @
3752a81f
...
@@ -11,7 +11,7 @@ print ======================== dnode1 start
...
@@ -11,7 +11,7 @@ print ======================== dnode1 start
sql create function add_one as '/tmp/add_one.so' outputtype int;
sql create function add_one as '/tmp/add_one.so' outputtype int;
sql create function add_one_64232 as '/tmp/add_one_64232.so' outputtype int;
sql create function add_one_64232 as '/tmp/add_one_64232.so' outputtype int;
sql create aggregate function sum_double as '/tmp/sum_double.so' outputtype int;
sql create aggregate function sum_double as '/tmp/sum_double.so' outputtype
big
int;
sql show functions;
sql show functions;
if $rows != 3 then
if $rows != 3 then
return -1
return -1
...
...
tests/script/sh/abs_max.c
浏览文件 @
3752a81f
#include <stdio.h>
#include <stdio.h>
#include <stdlib.h>
#include <stdlib.h>
#include <string.h>
#include <string.h>
#include <inttypes.h>
typedef
struct
SUdfInit
{
typedef
struct
SUdfInit
{
int
maybe_null
;
/* 1 if function can return NULL */
int
maybe_null
;
/* 1 if function can return NULL */
int
decimals
;
/* for real functions */
int
decimals
;
/* for real functions */
long
long
length
;
/* For string functions */
int64_t
length
;
/* For string functions */
char
*
ptr
;
/* free pointer for function data */
char
*
ptr
;
/* free pointer for function data */
int
const_item
;
/* 0 if result is independent of arguments */
int
const_item
;
/* 0 if result is independent of arguments */
}
SUdfInit
;
}
SUdfInit
;
...
@@ -14,31 +15,36 @@ typedef struct SUdfInit{
...
@@ -14,31 +15,36 @@ typedef struct SUdfInit{
#define TSDB_DATA_INT_NULL 0x80000000L
#define TSDB_DATA_INT_NULL 0x80000000L
#define TSDB_DATA_BIGINT_NULL 0x8000000000000000L
#define TSDB_DATA_BIGINT_NULL 0x8000000000000000L
void
abs_max
(
char
*
data
,
short
itype
,
short
ibytes
,
int
numOfRows
,
long
long
*
ts
,
char
*
dataOutput
,
char
*
interBuf
,
char
*
tsOutput
,
void
abs_max
(
char
*
data
,
short
itype
,
short
ibytes
,
int
numOfRows
,
int64_t
*
ts
,
char
*
dataOutput
,
char
*
interBuf
,
char
*
tsOutput
,
int
*
numOfOutput
,
short
otype
,
short
obytes
,
SUdfInit
*
buf
)
{
int
*
numOfOutput
,
short
otype
,
short
obytes
,
SUdfInit
*
buf
)
{
int
i
;
int
i
;
long
r
=
0
;
int64_t
r
=
0
;
printf
(
"abs_max input data:%p, type:%d, rows:%d, ts:%p,%lld
, dataoutput:%p, tsOutput:%p, numOfOutput:%p, buf:%p
\n
"
,
data
,
itype
,
numOfRows
,
ts
,
*
ts
,
dataOutput
,
tsOutput
,
numOfOutput
,
buf
);
// printf("abs_max input data:%p, type:%d, rows:%d, ts:%p, %" PRId64 "
, dataoutput:%p, tsOutput:%p, numOfOutput:%p, buf:%p\n", data, itype, numOfRows, ts, *ts, dataOutput, tsOutput, numOfOutput, buf);
if
(
itype
==
5
)
{
if
(
itype
==
5
)
{
r
=*
(
long
*
)
dataOutput
;
r
=*
(
int64_t
*
)
dataOutput
;
*
numOfOutput
=
0
;
*
numOfOutput
=
0
;
for
(
i
=
0
;
i
<
numOfRows
;
++
i
)
{
for
(
i
=
0
;
i
<
numOfRows
;
++
i
)
{
if
(
*
((
long
*
)
data
+
i
)
==
TSDB_DATA_BIGINT_NULL
)
{
if
(
*
((
int64_t
*
)
data
+
i
)
==
TSDB_DATA_BIGINT_NULL
)
{
continue
;
continue
;
}
}
*
numOfOutput
=
1
;
*
numOfOutput
=
1
;
long
v
=
labs
(
*
((
long
*
)
data
+
i
));
//int64_t v = abs(*((int64_t *)data + i));
int64_t
v
=
*
((
int64_t
*
)
data
+
i
);
if
(
v
<
0
)
{
v
=
0
-
v
;
}
if
(
v
>
r
)
{
if
(
v
>
r
)
{
r
=
v
;
r
=
v
;
}
}
}
}
*
(
long
*
)
dataOutput
=
r
;
*
(
int64_t
*
)
dataOutput
=
r
;
printf
(
"abs_max out, dataoutput:%ld, numOfOutput:%d
\n
"
,
*
(
long
*
)
dataOutput
,
*
numOfOutput
);
// printf("abs_max out, dataoutput:%" PRId64", numOfOutput:%d\n", *(int64_t
*)dataOutput, *numOfOutput);
}
else
{
}
else
{
*
numOfOutput
=
0
;
*
numOfOutput
=
0
;
}
}
}
}
...
@@ -47,44 +53,43 @@ void abs_max(char* data, short itype, short ibytes, int numOfRows, long long* ts
...
@@ -47,44 +53,43 @@ void abs_max(char* data, short itype, short ibytes, int numOfRows, long long* ts
void
abs_max_finalize
(
char
*
dataOutput
,
char
*
interBuf
,
int
*
numOfOutput
,
SUdfInit
*
buf
)
{
void
abs_max_finalize
(
char
*
dataOutput
,
char
*
interBuf
,
int
*
numOfOutput
,
SUdfInit
*
buf
)
{
int
i
;
int
i
;
in
t
r
=
0
;
//int64_
t r = 0;
printf
(
"abs_max_finalize dataoutput:%p:%d, numOfOutput:%d, buf:%p
\n
"
,
dataOutput
,
*
dataOutput
,
*
numOfOutput
,
buf
);
//
printf("abs_max_finalize dataoutput:%p:%d, numOfOutput:%d, buf:%p\n", dataOutput, *dataOutput, *numOfOutput, buf);
// *numOfOutput=1;
printf
(
"abs_max finalize, dataoutput:%ld, numOfOutput:%d
\n
"
,
*
(
long
*
)
dataOutput
,
*
numOfOutput
);
// printf("abs_max finalize, dataoutput:%" PRId64", numOfOutput:%d\n", *(int64_t
*)dataOutput, *numOfOutput);
}
}
void
abs_max_merge
(
char
*
data
,
int32_t
numOfRows
,
char
*
dataOutput
,
int32_t
*
numOfOutput
,
SUdfInit
*
buf
)
{
void
abs_max_merge
(
char
*
data
,
int32_t
numOfRows
,
char
*
dataOutput
,
int32_t
*
numOfOutput
,
SUdfInit
*
buf
)
{
int
r
=
0
;
int
64_t
r
=
0
;
if
(
numOfRows
>
0
)
{
if
(
numOfRows
>
0
)
{
r
=
*
((
long
*
)
data
);
r
=
*
((
int64_t
*
)
data
);
}
}
printf
(
"abs_max_merge numOfRows:%d, dataoutput:%p, buf:%p
\n
"
,
numOfRows
,
dataOutput
,
buf
);
//
printf("abs_max_merge numOfRows:%d, dataoutput:%p, buf:%p\n", numOfRows, dataOutput, buf);
for
(
int
i
=
1
;
i
<
numOfRows
;
++
i
)
{
for
(
int
i
=
1
;
i
<
numOfRows
;
++
i
)
{
printf
(
"abs_max_merge %d - %ld
\n
"
,
i
,
*
((
long
*
)
data
+
i
));
// printf("abs_max_merge %d - %" PRId64"\n", i, *((int64_t
*)data + i));
if
(
*
((
long
*
)
data
+
i
)
>
r
)
{
if
(
*
((
int64_t
*
)
data
+
i
)
>
r
)
{
r
=
*
((
long
*
)
data
+
i
);
r
=
*
((
int64_t
*
)
data
+
i
);
}
}
}
}
*
(
long
*
)
dataOutput
=
r
;
*
(
int64_t
*
)
dataOutput
=
r
;
if
(
numOfRows
>
0
)
{
if
(
numOfRows
>
0
)
{
*
numOfOutput
=
1
;
*
numOfOutput
=
1
;
}
else
{
}
else
{
*
numOfOutput
=
0
;
*
numOfOutput
=
0
;
}
}
printf
(
"abs_max_merge, dataoutput:%ld, numOfOutput:%d
\n
"
,
*
(
long
*
)
dataOutput
,
*
numOfOutput
);
// printf("abs_max_merge, dataoutput:%" PRId64", numOfOutput:%d\n", *(int64_t
*)dataOutput, *numOfOutput);
}
}
int
abs_max_init
(
SUdfInit
*
buf
)
{
int
abs_max_init
(
SUdfInit
*
buf
)
{
printf
(
"abs_max init
\n
"
);
//
printf("abs_max init\n");
return
0
;
return
0
;
}
}
void
abs_max_destroy
(
SUdfInit
*
buf
)
{
void
abs_max_destroy
(
SUdfInit
*
buf
)
{
printf
(
"abs_max destroy
\n
"
);
// printf("abs_max destroy\n");
}
}
\ No newline at end of file
tests/script/sh/add_one.c
浏览文件 @
3752a81f
...
@@ -14,20 +14,18 @@ void add_one(char* data, short itype, short ibytes, int numOfRows, long long* ts
...
@@ -14,20 +14,18 @@ void add_one(char* data, short itype, short ibytes, int numOfRows, long long* ts
int
*
numOfOutput
,
short
otype
,
short
obytes
,
SUdfInit
*
buf
)
{
int
*
numOfOutput
,
short
otype
,
short
obytes
,
SUdfInit
*
buf
)
{
int
i
;
int
i
;
int
r
=
0
;
int
r
=
0
;
printf
(
"add_one input data:%p, type:%d, rows:%d, ts:%p,%lld, dataoutput:%p, tsOutput:%p, numOfOutput:%p, buf:%p
\n
"
,
data
,
itype
,
numOfRows
,
ts
,
*
ts
,
dataOutput
,
tsOutput
,
numOfOutput
,
buf
);
//
printf("add_one input data:%p, type:%d, rows:%d, ts:%p,%lld, dataoutput:%p, tsOutput:%p, numOfOutput:%p, buf:%p\n", data, itype, numOfRows, ts, *ts, dataOutput, tsOutput, numOfOutput, buf);
if
(
itype
==
4
)
{
if
(
itype
==
4
)
{
for
(
i
=
0
;
i
<
numOfRows
;
++
i
)
{
for
(
i
=
0
;
i
<
numOfRows
;
++
i
)
{
printf
(
"input %d - %d"
,
i
,
*
((
int
*
)
data
+
i
));
//
printf("input %d - %d", i, *((int *)data + i));
*
((
int
*
)
dataOutput
+
i
)
=*
((
int
*
)
data
+
i
)
+
1
;
*
((
int
*
)
dataOutput
+
i
)
=*
((
int
*
)
data
+
i
)
+
1
;
printf
(
", output %d
\n
"
,
*
((
int
*
)
dataOutput
+
i
));
//
printf(", output %d\n", *((int *)dataOutput+i));
if
(
tsOutput
)
{
if
(
tsOutput
)
{
*
(
long
long
*
)
tsOutput
=
1000000
;
*
(
long
long
*
)
tsOutput
=
1000000
;
}
}
}
}
*
numOfOutput
=
numOfRows
;
*
numOfOutput
=
numOfRows
;
printf
(
"add_one out, numOfOutput:%d
\n
"
,
*
numOfOutput
);
//
printf("add_one out, numOfOutput:%d\n", *numOfOutput);
}
}
}
}
\ No newline at end of file
tests/script/sh/add_one_64232.c
浏览文件 @
3752a81f
...
@@ -28,6 +28,4 @@ void add_one_64232(char* data, short itype, short ibytes, int numOfRows, long lo
...
@@ -28,6 +28,4 @@ void add_one_64232(char* data, short itype, short ibytes, int numOfRows, long lo
printf
(
"add_one_64232 out, numOfOutput:%d
\n
"
,
*
numOfOutput
);
printf
(
"add_one_64232 out, numOfOutput:%d
\n
"
,
*
numOfOutput
);
}
}
}
}
\ No newline at end of file
tests/script/sh/sum_double.c
浏览文件 @
3752a81f
#include <stdio.h>
#include <stdio.h>
#include <stdlib.h>
#include <stdlib.h>
#include <string.h>
#include <string.h>
#include <inttypes.h>
typedef
struct
SUdfInit
{
typedef
struct
SUdfInit
{
int
maybe_null
;
/* 1 if function can return NULL */
int
maybe_null
;
/* 1 if function can return NULL */
int
decimals
;
/* for real functions */
int
decimals
;
/* for real functions */
long
long
length
;
/* For string functions */
int64_t
length
;
/* For string functions */
char
*
ptr
;
/* free pointer for function data */
char
*
ptr
;
/* free pointer for function data */
int
const_item
;
/* 0 if result is independent of arguments */
int
const_item
;
/* 0 if result is independent of arguments */
}
SUdfInit
;
}
SUdfInit
;
...
@@ -13,13 +14,13 @@ typedef struct SUdfInit{
...
@@ -13,13 +14,13 @@ typedef struct SUdfInit{
#define TSDB_DATA_INT_NULL 0x80000000L
#define TSDB_DATA_INT_NULL 0x80000000L
void
sum_double
(
char
*
data
,
short
itype
,
short
ibytes
,
int
numOfRows
,
long
long
*
ts
,
char
*
dataOutput
,
char
*
interBuf
,
char
*
tsOutput
,
void
sum_double
(
char
*
data
,
short
itype
,
short
ibytes
,
int
numOfRows
,
int64_t
*
ts
,
char
*
dataOutput
,
char
*
interBuf
,
char
*
tsOutput
,
int
*
numOfOutput
,
short
otype
,
short
obytes
,
SUdfInit
*
buf
)
{
int
*
numOfOutput
,
short
otype
,
short
obytes
,
SUdfInit
*
buf
)
{
int
i
;
int
i
;
int
r
=
0
;
int
64_t
r
=
0
;
printf
(
"sum_double input data:%p, type:%d, rows:%d, ts:%p,%
lld
, dataoutput:%p, tsOutput:%p, numOfOutput:%p, buf:%p
\n
"
,
data
,
itype
,
numOfRows
,
ts
,
*
ts
,
dataOutput
,
tsOutput
,
numOfOutput
,
buf
);
printf
(
"sum_double input data:%p, type:%d, rows:%d, ts:%p,%
"
PRId64
"
, dataoutput:%p, tsOutput:%p, numOfOutput:%p, buf:%p
\n
"
,
data
,
itype
,
numOfRows
,
ts
,
*
ts
,
dataOutput
,
tsOutput
,
numOfOutput
,
buf
);
if
(
itype
==
4
)
{
if
(
itype
==
4
)
{
r
=*
(
int
*
)
dataOutput
;
r
=*
(
int
64_t
*
)
dataOutput
;
*
numOfOutput
=
0
;
*
numOfOutput
=
0
;
for
(
i
=
0
;
i
<
numOfRows
;
++
i
)
{
for
(
i
=
0
;
i
<
numOfRows
;
++
i
)
{
...
@@ -29,10 +30,10 @@ void sum_double(char* data, short itype, short ibytes, int numOfRows, long long*
...
@@ -29,10 +30,10 @@ void sum_double(char* data, short itype, short ibytes, int numOfRows, long long*
*
numOfOutput
=
1
;
*
numOfOutput
=
1
;
r
+=*
((
int
*
)
data
+
i
);
r
+=*
((
int
*
)
data
+
i
);
*
(
int
*
)
dataOutput
=
r
;
*
(
int
64_t
*
)
dataOutput
=
r
;
}
}
printf
(
"sum_double out, dataoutput:%d, numOfOutput:%d
\n
"
,
*
(
in
t
*
)
dataOutput
,
*
numOfOutput
);
// printf("sum_double out, dataoutput:%"PRId64", numOfOutput:%d\n", *(int64_
t *)dataOutput, *numOfOutput);
}
}
}
}
...
@@ -40,45 +41,44 @@ void sum_double(char* data, short itype, short ibytes, int numOfRows, long long*
...
@@ -40,45 +41,44 @@ void sum_double(char* data, short itype, short ibytes, int numOfRows, long long*
void
sum_double_finalize
(
char
*
dataOutput
,
char
*
interBuf
,
int
*
numOfOutput
,
SUdfInit
*
buf
)
{
void
sum_double_finalize
(
char
*
dataOutput
,
char
*
interBuf
,
int
*
numOfOutput
,
SUdfInit
*
buf
)
{
int
i
;
int
i
;
int
r
=
0
;
int
64_t
r
=
0
;
printf
(
"sum_double_finalize dataoutput:%p:%d, numOfOutput:%d, buf:%p
\n
"
,
dataOutput
,
*
dataOutput
,
*
numOfOutput
,
buf
);
// printf("sum_double_finalize dataoutput:%p:%"PRId64", numOfOutput:%d, buf:%p\n", dataOutput, *(int64_t*)
dataOutput, *numOfOutput, buf);
*
numOfOutput
=
1
;
//
*numOfOutput=1;
*
(
int
*
)(
buf
->
ptr
)
=*
(
in
t
*
)
dataOutput
*
2
;
*
(
int
64_t
*
)(
buf
->
ptr
)
=*
(
int64_
t
*
)
dataOutput
*
2
;
*
(
int
*
)
dataOutput
=*
(
in
t
*
)(
buf
->
ptr
);
*
(
int
64_t
*
)
dataOutput
=*
(
int64_
t
*
)(
buf
->
ptr
);
printf
(
"sum_double finalize, dataoutput:%d, numOfOutput:%d
\n
"
,
*
(
in
t
*
)
dataOutput
,
*
numOfOutput
);
// printf("sum_double finalize, dataoutput:%"PRId64", numOfOutput:%d\n", *(int64_
t *)dataOutput, *numOfOutput);
}
}
void
sum_double_merge
(
char
*
data
,
int32_t
numOfRows
,
char
*
dataOutput
,
int
32_t
*
numOfOutput
,
SUdfInit
*
buf
)
{
void
sum_double_merge
(
char
*
data
,
int32_t
numOfRows
,
char
*
dataOutput
,
int
*
numOfOutput
,
SUdfInit
*
buf
)
{
int
r
=
0
;
int
r
=
0
;
int
sum
=
0
;
int
64_t
sum
=
0
;
printf
(
"sum_double_merge numOfRows:%d, dataoutput:%p, buf:%p
\n
"
,
numOfRows
,
dataOutput
,
buf
);
//
printf("sum_double_merge numOfRows:%d, dataoutput:%p, buf:%p\n", numOfRows, dataOutput, buf);
for
(
int
i
=
0
;
i
<
numOfRows
;
++
i
)
{
for
(
int
i
=
0
;
i
<
numOfRows
;
++
i
)
{
printf
(
"sum_double_merge %d - %d
\n
"
,
i
,
*
((
in
t
*
)
data
+
i
));
// printf("sum_double_merge %d - %"PRId64"\n", i, *((int64_
t*)data + i));
sum
+=*
((
int
*
)
data
+
i
);
sum
+=*
((
int
64_t
*
)
data
+
i
);
}
}
*
(
int
*
)
dataOutput
+=
sum
;
*
(
int
64_t
*
)
dataOutput
+=
sum
;
if
(
numOfRows
>
0
)
{
if
(
numOfRows
>
0
)
{
*
numOfOutput
=
1
;
*
numOfOutput
=
1
;
}
else
{
}
else
{
*
numOfOutput
=
0
;
*
numOfOutput
=
0
;
}
}
printf
(
"sum_double_merge, dataoutput:%d, numOfOutput:%d
\n
"
,
*
(
in
t
*
)
dataOutput
,
*
numOfOutput
);
// printf("sum_double_merge, dataoutput:%"PRId64", numOfOutput:%d\n", *(int64_
t *)dataOutput, *numOfOutput);
}
}
int
sum_double_init
(
SUdfInit
*
buf
)
{
int
sum_double_init
(
SUdfInit
*
buf
)
{
buf
->
maybe_null
=
1
;
buf
->
maybe_null
=
1
;
buf
->
ptr
=
malloc
(
sizeof
(
int
));
buf
->
ptr
=
malloc
(
sizeof
(
int
64_t
));
printf
(
"sum_double init
\n
"
);
//
printf("sum_double init\n");
return
0
;
return
0
;
}
}
void
sum_double_destroy
(
SUdfInit
*
buf
)
{
void
sum_double_destroy
(
SUdfInit
*
buf
)
{
free
(
buf
->
ptr
);
free
(
buf
->
ptr
);
printf
(
"sum_double destroy
\n
"
);
// printf("sum_double destroy\n");
}
}
\ No newline at end of file
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录