Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
taosdata
TDengine
提交
62927b93
T
TDengine
项目概览
taosdata
/
TDengine
接近 2 年 前同步成功
通知
1192
Star
22018
Fork
4786
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
1
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
T
TDengine
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
1
Issue
1
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
未验证
提交
62927b93
编写于
4月 28, 2022
作者:
Z
Zhiyu Yang
提交者:
GitHub
4月 28, 2022
浏览文件
操作
浏览文件
下载
差异文件
Merge branch '2.4' into hmoem
上级
d4963023
98846d56
变更
31
展开全部
隐藏空白更改
内联
并排
Showing
31 changed file
with
2329 addition
and
2136 deletion
+2329
-2136
Jenkinsfile2
Jenkinsfile2
+0
-14
examples/go/taosdemo.go
examples/go/taosdemo.go
+1
-1
packaging/cfg/taos.cfg
packaging/cfg/taos.cfg
+3
-3
packaging/deb/makedeb.sh
packaging/deb/makedeb.sh
+1
-1
packaging/rpm/tdengine.spec
packaging/rpm/tdengine.spec
+1
-1
packaging/tools/install.sh
packaging/tools/install.sh
+8
-3
packaging/tools/install_client.sh
packaging/tools/install_client.sh
+10
-7
packaging/tools/makeclient.sh
packaging/tools/makeclient.sh
+31
-14
packaging/tools/makepkg.sh
packaging/tools/makepkg.sh
+39
-14
src/client/src/tscSQLParser.c
src/client/src/tscSQLParser.c
+60
-14
src/client/src/tscSql.c
src/client/src/tscSql.c
+2
-0
src/client/src/tscSubquery.c
src/client/src/tscSubquery.c
+3
-3
src/common/src/tarithoperator.c
src/common/src/tarithoperator.c
+273
-1
src/common/src/texpr.c
src/common/src/texpr.c
+182
-5
src/inc/taosdef.h
src/inc/taosdef.h
+2
-1
src/kit/taos-tools
src/kit/taos-tools
+1
-1
src/os/src/detail/osFile.c
src/os/src/detail/osFile.c
+2
-2
src/query/inc/qExecutor.h
src/query/inc/qExecutor.h
+9
-2
src/query/inc/sql.y
src/query/inc/sql.y
+1
-0
src/query/src/qExecutor.c
src/query/src/qExecutor.c
+156
-68
src/query/src/qTsbuf.c
src/query/src/qTsbuf.c
+8
-1
src/query/src/sql.c
src/query/src/sql.c
+1277
-1826
tests/perftest-scripts/perftest-query.sh
tests/perftest-scripts/perftest-query.sh
+16
-32
tests/pytest/perfbenchmark/insertFromCSVPerformance.py
tests/pytest/perfbenchmark/insertFromCSVPerformance.py
+4
-3
tests/pytest/perfbenchmark/queryPerformance.py
tests/pytest/perfbenchmark/queryPerformance.py
+15
-14
tests/pytest/perfbenchmark/taosdemoPerformance.py
tests/pytest/perfbenchmark/taosdemoPerformance.py
+6
-5
tests/pytest/query/queryGroupbySort.py
tests/pytest/query/queryGroupbySort.py
+32
-0
tests/pytest/tools/taosdumpTest2.py
tests/pytest/tools/taosdumpTest2.py
+1
-1
tests/pytest/util/dnodes.py
tests/pytest/util/dnodes.py
+169
-81
tests/script/general/parser/groupby.sim
tests/script/general/parser/groupby.sim
+5
-5
tests/system-test/5-taos-tools/basic.py
tests/system-test/5-taos-tools/basic.py
+11
-13
未找到文件。
Jenkinsfile2
浏览文件 @
62927b93
...
...
@@ -275,20 +275,6 @@ pipeline {
}
}
parallel {
stage ('build worker08_arm32') {
agent {label " worker08_arm32"}
steps {
timeout(time: 20, unit: 'MINUTES') {
pre_test()
script {
sh '''
echo "worker08_arm32 build done"
date
'''
}
}
}
}
stage ('build worker07_arm64') {
agent {label " worker07_arm64 "}
steps {
...
...
examples/go/taosdemo.go
浏览文件 @
62927b93
...
...
@@ -26,7 +26,7 @@ import (
"sync"
"time"
_
"github.com/taosdata/driver-go/taosSql"
_
"github.com/taosdata/driver-go/
v2/
taosSql"
)
const
(
...
...
packaging/cfg/taos.cfg
浏览文件 @
62927b93
...
...
@@ -174,13 +174,13 @@ keepColumnName 1
# maxConnections 5000
# stop writing logs when the disk size of the log folder is less than this value
# minimalLogDirGB
0.1
# minimalLogDirGB
1.0
# stop writing temporary files when the disk size of the tmp folder is less than this value
# minimalTmpDirGB
0.1
# minimalTmpDirGB
1.0
# if disk free space is less than this value, taosd service exit directly within startup process
# minimalDataDirGB
0.1
# minimalDataDirGB
2.0
# One mnode is equal to the number of vnode consumed
# mnodeEqualVnodeNum 4
...
...
packaging/deb/makedeb.sh
浏览文件 @
62927b93
...
...
@@ -59,7 +59,7 @@ cp ${compile_dir}/../packaging/tools/set_core.sh ${pkg_dir}${install_home_pat
cp
${
compile_dir
}
/../packaging/tools/taosd-dump-cfg.gdb
${
pkg_dir
}${
install_home_path
}
/bin
cp
${
compile_dir
}
/build/bin/taosd
${
pkg_dir
}${
install_home_path
}
/bin
cp
${
compile_dir
}
/build/bin/taosBenchmark
${
pkg_dir
}${
install_home_path
}
/bin
#
cp ${compile_dir}/build/bin/taosBenchmark ${pkg_dir}${install_home_path}/bin
if
[
-f
"
${
compile_dir
}
/build/bin/taosadapter"
]
;
then
cp
${
compile_dir
}
/build/bin/taosadapter
${
pkg_dir
}${
install_home_path
}
/bin
||
:
...
...
packaging/rpm/tdengine.spec
浏览文件 @
62927b93
...
...
@@ -68,7 +68,7 @@ cp %{_compiledir}/../packaging/tools/set_core.sh %{buildroot}%{homepath}/bin
cp %{_compiledir}/../packaging/tools/taosd-dump-cfg.gdb %{buildroot}%{homepath}/bin
cp %{_compiledir}/build/bin/taos %{buildroot}%{homepath}/bin
cp %{_compiledir}/build/bin/taosd %{buildroot}%{homepath}/bin
cp %{_compiledir}/build/bin/taosBenchmark %{buildroot}%{homepath}/bin
#
cp %{_compiledir}/build/bin/taosBenchmark %{buildroot}%{homepath}/bin
if [ -f %{_compiledir}/build/bin/taosadapter ]; then
cp %{_compiledir}/build/bin/taosadapter %{buildroot}%{homepath}/bin ||:
...
...
packaging/tools/install.sh
浏览文件 @
62927b93
...
...
@@ -808,6 +808,11 @@ function updateProduct() {
install_log
install_header
install_lib
if
[
"
$verMode
"
==
"cluster"
]
;
then
install_connector
fi
install_examples
if
[
-z
$1
]
;
then
install_bin
...
...
@@ -888,9 +893,9 @@ function installProduct() {
#install_avro lib
#install_avro lib64
# if [ "$pagMode" != "lite
" ]; then
#
install_connector
#
fi
if
[
"
$verMode
"
==
"cluster
"
]
;
then
install_connector
fi
install_examples
if
[
-z
$1
]
;
then
# install service and client
...
...
packaging/tools/install_client.sh
浏览文件 @
62927b93
...
...
@@ -21,6 +21,7 @@ tarName="taos.tar.gz"
osType
=
Linux
pagMode
=
full
verMode
=
edge
if
[
"
$osType
"
!=
"Darwin"
]
;
then
script_dir
=
$(
dirname
$(
readlink
-f
"
$0
"
))
...
...
@@ -85,10 +86,12 @@ function install_main_path() {
${
csudo
}
mkdir
-p
${
install_main_dir
}
/bin
${
csudo
}
mkdir
-p
${
install_main_dir
}
/driver
if
[
$productName
==
"TDengine"
]
;
then
${
csudo
}
mkdir
-p
${
install_main_dir
}
/connector
${
csudo
}
mkdir
-p
${
install_main_dir
}
/examples
fi
${
csudo
}
mkdir
-p
${
install_main_dir
}
/include
if
[
"
$verMode
"
==
"cluster"
]
;
then
${
csudo
}
mkdir
-p
${
install_main_dir
}
/connector
fi
}
function
install_bin
()
{
...
...
@@ -251,9 +254,9 @@ function update_TDengine() {
install_header
install_lib
install_jemalloc
# if [ "$pagMode" != "lite
" ]; then
#
install_connector
#
fi
if
[
"
$verMode
"
==
"cluster
"
]
;
then
install_connector
fi
install_examples
install_bin
install_config
...
...
@@ -279,9 +282,9 @@ function install_TDengine() {
install_header
install_lib
install_jemalloc
# if [ "$pagMode" != "lite
" ]; then
#
install_connector
#
fi
if
[
"
$verMode
"
==
"cluster
"
]
;
then
install_connector
fi
install_examples
install_bin
install_config
...
...
packaging/tools/makeclient.sh
浏览文件 @
62927b93
...
...
@@ -126,6 +126,12 @@ if [ "$osType" == "Darwin" ]; then
sed
's/osType=Linux/osType=Darwin/g'
${
install_dir
}
/install_client.sh
>>
install_client_temp.sh
mv
install_client_temp.sh
${
install_dir
}
/install_client.sh
fi
if
[
"
$verMode
"
==
"cluster"
]
;
then
sed
's/verMode=edge/verMode=cluster/g'
${
install_dir
}
/install_client.sh
>>
install_client_temp.sh
mv
install_client_temp.sh
${
install_dir
}
/install_client.sh
fi
if
[
"
$pagMode
"
==
"lite"
]
;
then
sed
's/pagMode=full/pagMode=lite/g'
${
install_dir
}
/install_client.sh
>>
install_client_temp.sh
mv
install_client_temp.sh
${
install_dir
}
/install_client.sh
...
...
@@ -148,20 +154,31 @@ if [[ $productName == "TDengine" ]]; then
mkdir
-p
${
install_dir
}
/examples/taosbenchmark-json
&&
cp
${
examples_dir
}
/../src/kit/taos-tools/example/
*
${
install_dir
}
/examples/taosbenchmark-json
fi
# Copy connector
connector_dir
=
"
${
code_dir
}
/connector"
mkdir
-p
${
install_dir
}
/connector
if
[[
"
$pagMode
"
!=
"lite"
]]
&&
[[
"
$cpuType
"
!=
"aarch32"
]]
;
then
if
[
"
$osType
"
!=
"Darwin"
]
;
then
cp
${
build_dir
}
/lib/
*
.jar
${
install_dir
}
/connector
||
:
fi
if
find
${
connector_dir
}
/go
-mindepth
1
-maxdepth
1 |
read
;
then
cp
-r
${
connector_dir
}
/go
${
install_dir
}
/connector
else
echo
"WARNING: go connector not found, please check if want to use it!"
fi
cp
-r
${
connector_dir
}
/python
${
install_dir
}
/connector
cp
-r
${
connector_dir
}
/nodejs
${
install_dir
}
/connector
if
[
"
$verMode
"
==
"cluster"
]
;
then
# Copy connector
connector_dir
=
"
${
code_dir
}
/connector"
mkdir
-p
${
install_dir
}
/connector
if
[[
"
$pagMode
"
!=
"lite"
]]
&&
[[
"
$cpuType
"
!=
"aarch32"
]]
;
then
if
[
"
$osType
"
!=
"Darwin"
]
;
then
cp
${
build_dir
}
/lib/
*
.jar
${
install_dir
}
/connector
||
:
fi
if
find
${
connector_dir
}
/go
-mindepth
1
-maxdepth
1 |
read
;
then
cp
-r
${
connector_dir
}
/go
${
install_dir
}
/connector
else
echo
"WARNING: go connector not found, please check if want to use it!"
fi
git clone
--depth
1 https://github.com/taosdata/taos-connector-python
${
install_dir
}
/connector/python
rm
-rf
${
install_dir
}
/connector/python/.git
||
:
# cp -r ${connector_dir}/python ${install_dir}/connector
git clone
--depth
1 https://github.com/taosdata/taos-connector-node
${
install_dir
}
/connector/nodejs
rm
-rf
${
install_dir
}
/connector/nodejs/.git
||
:
git clone
--depth
1 https://github.com/taosdata/taos-connector-dotnet
${
install_dir
}
/connector/dotnet
rm
-rf
${
install_dir
}
/connector/dotnet/.git
||
:
# cp -r ${connector_dir}/nodejs ${install_dir}/connector
git clone
--depth
1 https://github.com/taosdata/libtaos-rs
${
install_dir
}
/connector/rust
rm
-rf
${
install_dir
}
/connector/rust/.git
||
:
fi
fi
fi
...
...
packaging/tools/makepkg.sh
浏览文件 @
62927b93
...
...
@@ -64,13 +64,24 @@ if [ "$pagMode" == "lite" ]; then
taostools_bin_files
=
""
else
wget https://github.com/taosdata/grafanaplugin/releases/latest/download/TDinsight.sh
-O
${
build_dir
}
/bin/TDinsight.sh
&&
echo
"TDinsight.sh downloaded!"
||
echo
"failed to download TDinsight.sh"
# download TDinsight caches
orig_pwd
=
$(
pwd
)
tdinsight_caches
=
""
cd
${
build_dir
}
/bin/
&&
\
chmod
+x TDinsight.sh
tdinsight_caches
=
$(
./TDinsight.sh
--download-only
| xargs
-i
printf
"
${
build_dir
}
/bin/{} "
)
cd
$orig_pwd
echo
"TDinsight caches:
$tdinsight_caches
"
taostools_bin_files
=
"
${
build_dir
}
/bin/
${
dumpName
}
\
${
build_dir
}
/bin/
${
benchmarkName
}
\
${
build_dir
}
/bin/TDinsight.sh "
${
build_dir
}
/bin/TDinsight.sh
\
$tdinsight_caches
"
#!!! do not change taosadapter here
bin_files
=
"
${
build_dir
}
/bin/
${
serverName
}
\
...
...
@@ -291,20 +302,34 @@ fi
mkdir
-p
${
install_dir
}
/driver
&&
cp
${
lib_files
}
${
install_dir
}
/driver
&&
echo
"
${
versionComp
}
"
>
${
install_dir
}
/driver/vercomp.txt
# Copy connector
#connector_dir="${code_dir}/connector"
#mkdir -p ${install_dir}/connector
#if [[ "$pagMode" != "lite" ]] && [[ "$cpuType" != "aarch32" ]]; then
# cp ${build_dir}/lib/*.jar ${install_dir}/connector || :
# if find ${connector_dir}/go -mindepth 1 -maxdepth 1 | read; then
# cp -r ${connector_dir}/go ${install_dir}/connector
# else
# echo "WARNING: go connector not found, please check if want to use it!"
# fi
# cp -r ${connector_dir}/python ${install_dir}/connector
# cp -r ${connector_dir}/nodejs ${install_dir}/connector
#fi
if
[
"
$verMode
"
==
"cluster"
]
;
then
connector_dir
=
"
${
code_dir
}
/connector"
mkdir
-p
${
install_dir
}
/connector
if
[[
"
$pagMode
"
!=
"lite"
]]
&&
[[
"
$cpuType
"
!=
"aarch32"
]]
;
then
cp
${
build_dir
}
/lib/
*
.jar
${
install_dir
}
/connector
||
:
if
find
${
connector_dir
}
/go
-mindepth
1
-maxdepth
1 |
read
;
then
cp
-r
${
connector_dir
}
/go
${
install_dir
}
/connector
else
echo
"WARNING: go connector not found, please check if want to use it!"
fi
git clone
--depth
1 https://github.com/taosdata/taos-connector-python
${
install_dir
}
/connector/python
rm
-rf
${
install_dir
}
/connector/python/.git
||
:
git clone
--depth
1 https://github.com/taosdata/taos-connector-node
${
install_dir
}
/connector/nodejs
rm
-rf
${
install_dir
}
/connector/nodejs/.git
||
:
git clone
--depth
1 https://github.com/taosdata/taos-connector-dotnet
${
install_dir
}
/connector/dotnet
rm
-rf
${
install_dir
}
/connector/dotnet/.git
||
:
git clone
--depth
1 https://github.com/taosdata/libtaos-rs
${
install_dir
}
/connector/rust
rm
-rf
${
install_dir
}
/connector/rust/.git
||
:
# cp -r ${connector_dir}/python ${install_dir}/connector
# cp -r ${connector_dir}/nodejs ${install_dir}/connector
fi
fi
# Copy release note
#
cp ${script_dir}/release_note ${install_dir}
cp
${
script_dir
}
/release_note
${
install_dir
}
# exit 1
...
...
src/client/src/tscSQLParser.c
浏览文件 @
62927b93
...
...
@@ -282,6 +282,8 @@ static uint8_t convertRelationalOperator(SStrToken *pToken) {
return
TSDB_BINARY_OP_DIVIDE
;
case
TK_REM
:
return
TSDB_BINARY_OP_REMAINDER
;
case
TK_BITAND
:
return
TSDB_BINARY_OP_BITAND
;
case
TK_LIKE
:
return
TSDB_RELATION_LIKE
;
case
TK_MATCH
:
...
...
@@ -3958,8 +3960,8 @@ int32_t validateGroupbyNode(SQueryInfo* pQueryInfo, SArray* pList, SSqlCmd* pCmd
const
char
*
msg4
=
"join query does not support group by"
;
const
char
*
msg5
=
"not allowed column type for group by"
;
const
char
*
msg6
=
"tags not allowed for table query"
;
const
char
*
msg7
=
"not support group by expression
"
;
const
char
*
msg8
=
"normal column can only locate at the end of group by clause"
;
//const char* msg7 = "not support group by primary key
";
//
const char* msg8 = "normal column can only locate at the end of group by clause";
const
char
*
msg9
=
"json tag must be use ->'key'"
;
const
char
*
msg10
=
"non json column can not use ->'key'"
;
const
char
*
msg11
=
"group by json->'key' is too long"
;
...
...
@@ -4070,7 +4072,10 @@ int32_t validateGroupbyNode(SQueryInfo* pQueryInfo, SArray* pList, SSqlCmd* pCmd
// check if the column type is valid, here only support the bool/tinyint/smallint/bigint group by
if
(
pSchema
->
type
==
TSDB_DATA_TYPE_FLOAT
||
pSchema
->
type
==
TSDB_DATA_TYPE_DOUBLE
)
{
return
invalidOperationMsg
(
tscGetErrorMsgPayload
(
pCmd
),
msg5
);
}
}
/*
if (index.columnIndex == PRIMARYKEY_TIMESTAMP_COL_INDEX) {
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg7);
}*/
tscColumnListInsert
(
pQueryInfo
->
colList
,
index
.
columnIndex
,
pTableMeta
->
id
.
uid
,
pSchema
);
...
...
@@ -4085,14 +4090,14 @@ int32_t validateGroupbyNode(SQueryInfo* pQueryInfo, SArray* pList, SSqlCmd* pCmd
// 1. only one normal column allowed in the group by clause
// 2. the normal column in the group by clause can only located in the end position
if
(
numOfGroupCols
>
1
)
{
return
invalidOperationMsg
(
tscGetErrorMsgPayload
(
pCmd
),
msg7
);
}
//
if (numOfGroupCols > 1) {
//
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg7);
//
}
for
(
int32_t
i
=
0
;
i
<
num
;
++
i
)
{
SColIndex
*
pIndex
=
taosArrayGet
(
pGroupExpr
->
columnInfo
,
i
);
if
(
TSDB_COL_IS_NORMAL_COL
(
pIndex
->
flag
)
&&
i
!=
num
-
1
)
{
return
invalidOperationMsg
(
tscGetErrorMsgPayload
(
pCmd
),
msg8
);
if
(
!
TSDB_COL_IS_NORMAL_COL
(
pIndex
->
flag
)
)
{
//
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg8);
}
}
...
...
@@ -4358,7 +4363,7 @@ static int32_t getColQueryCondExpr(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, tSqlEx
};
if
(
pQueryInfo
->
colCond
==
NULL
)
{
pQueryInfo
->
colCond
=
taosArrayInit
(
2
,
sizeof
(
SCond
));
pQueryInfo
->
colCond
=
taosArrayInit
(
2
,
sizeof
(
S
Tbl
Cond
));
}
taosArrayPush
(
pQueryInfo
->
colCond
,
&
cond
);
...
...
@@ -7866,6 +7871,25 @@ static void updateTagPrjFunction(SQueryInfo* pQueryInfo) {
}
}
/*
retrun false : expr is not in groupbu column.
return true : expr is in groupby column.
*/
static
bool
check_expr_in_groupby_colum
(
SGroupbyExpr
*
pGroupbyExpr
,
SExprInfo
*
pExpr
){
SColIndex
*
pIndex
=
NULL
;
assert
(
pExpr
);
if
(
NULL
==
pGroupbyExpr
)
return
false
;
for
(
int32_t
k
=
0
;
k
<
pGroupbyExpr
->
numOfGroupCols
;
++
k
)
{
pIndex
=
taosArrayGet
(
pGroupbyExpr
->
columnInfo
,
k
);
if
(
!
strcmp
(
pIndex
->
name
,
&
pExpr
->
base
.
colInfo
.
name
[
1
])){
// notes:first char is dot, skip one char.
return
true
;
}
}
return
false
;
}
/*
* check for selectivity function + tags column function both exist.
* 1. tagprj functions are not compatible with aggregated function when missing "group by" clause
...
...
@@ -7882,12 +7906,15 @@ static int32_t checkUpdateTagPrjFunctions(SQueryInfo* pQueryInfo, char* msg) {
int16_t
numOfAggregation
=
0
;
size_t
numOfExprs
=
taosArrayGetSize
(
pQueryInfo
->
exprList
);
SGroupbyExpr
*
pGroupbyExpr
=
&
pQueryInfo
->
groupbyExpr
;
for
(
int32_t
i
=
0
;
i
<
numOfExprs
;
++
i
)
{
SExprInfo
*
pExpr
=
taosArrayGetP
(
pQueryInfo
->
exprList
,
i
);
if
(
pExpr
->
base
.
functionId
==
TSDB_FUNC_TAGPRJ
||
(
pExpr
->
base
.
functionId
==
TSDB_FUNC_PRJ
&&
pExpr
->
base
.
colInfo
.
colId
==
PRIMARYKEY_TIMESTAMP_COL_INDEX
))
{
tagTsColExists
=
true
;
// selectivity + ts/tag column
break
;
if
(
false
==
check_expr_in_groupby_colum
(
pGroupbyExpr
,
pExpr
)){
tagTsColExists
=
true
;
// selectivity + ts/tag column
break
;
}
}
}
...
...
@@ -10248,6 +10275,23 @@ int32_t exprTreeFromSqlExpr(SSqlCmd* pCmd, tExprNode **pExpr, const tSqlExpr* pS
}
}
if
(
pSqlExpr
->
tokenId
==
TK_BITAND
&&
pSqlExpr
->
pLeft
!=
NULL
&&
pSqlExpr
->
pRight
!=
NULL
)
{
// for example: col type is "bool" but expr "col & 1" received
uint8_t
colType
=
pLeft
->
pSchema
->
type
;
SStrToken
*
exprToken
=
&
pSqlExpr
->
pRight
->
exprToken
;
if
(
pSqlExpr
->
pLeft
->
type
==
SQL_NODE_TABLE_COLUMN
&&
pSqlExpr
->
pRight
->
type
==
SQL_NODE_VALUE
)
{
if
(
colType
==
TSDB_DATA_TYPE_BOOL
)
{
if
((
exprToken
->
n
!=
4
||
strncasecmp
(
exprToken
->
z
,
"true"
,
4
))
&&
(
exprToken
->
n
!=
5
||
strncasecmp
(
exprToken
->
z
,
"false"
,
5
)))
{
return
TSDB_CODE_TSC_INVALID_OPERATION
;
}
}
else
if
(
IS_SIGNED_NUMERIC_TYPE
(
colType
)
||
IS_UNSIGNED_NUMERIC_TYPE
(
colType
))
{
if
((
exprToken
->
n
==
4
&&
strncasecmp
(
exprToken
->
z
,
"true"
,
4
)
==
0
)
||
(
exprToken
->
n
==
5
||
strncasecmp
(
exprToken
->
z
,
"false"
,
5
)
==
0
))
{
return
TSDB_CODE_TSC_INVALID_OPERATION
;
}
}
}
}
if
(
pSqlExpr
->
pRight
!=
NULL
)
{
int32_t
ret
=
exprTreeFromSqlExpr
(
pCmd
,
&
pRight
,
pSqlExpr
->
pRight
,
pQueryInfo
,
pCols
,
uid
);
if
(
ret
!=
TSDB_CODE_SUCCESS
)
{
...
...
@@ -10285,9 +10329,11 @@ int32_t exprTreeFromSqlExpr(SSqlCmd* pCmd, tExprNode **pExpr, const tSqlExpr* pS
if
(
pLeft
->
_node
.
optr
==
TSDB_RELATION_ARROW
){
pLeft
=
pLeft
->
_node
.
pLeft
;
}
if
(
pRight
->
pVal
->
nType
==
TSDB_DATA_TYPE_BOOL
&&
pLeft
->
nodeType
==
TSQL_NODE_COL
&&
(
pLeft
->
pSchema
->
type
==
TSDB_DATA_TYPE_BOOL
||
pLeft
->
pSchema
->
type
==
TSDB_DATA_TYPE_JSON
))
{
return
TSDB_CODE_TSC_INVALID_OPERATION
;
if
(
pRight
->
pVal
->
nType
==
TSDB_DATA_TYPE_BOOL
&&
pLeft
->
nodeType
==
TSQL_NODE_COL
)
{
if
(((
*
pExpr
)
->
_node
.
optr
!=
TSDB_BINARY_OP_BITAND
&&
pLeft
->
pSchema
->
type
==
TSDB_DATA_TYPE_BOOL
)
||
pLeft
->
pSchema
->
type
==
TSDB_DATA_TYPE_JSON
)
{
return
TSDB_CODE_TSC_INVALID_OPERATION
;
}
}
}
}
...
...
src/client/src/tscSql.c
浏览文件 @
62927b93
...
...
@@ -224,6 +224,8 @@ TAOS *taos_connect_internal(const char *ip, const char *user, const char *pass,
}
}
printf
(
"connect failed, reason: %s
\n\n
"
,
taos_errstr
(
pSql
));
return
NULL
;
}
...
...
src/client/src/tscSubquery.c
浏览文件 @
62927b93
...
...
@@ -1672,7 +1672,7 @@ static void joinRetrieveFinalResCallback(void* param, TAOS_RES* tres, int numOfR
if
(
pRes1
->
row
>
0
&&
pRes1
->
numOfRows
>
0
)
{
tscDebug
(
"0x%"
PRIx64
" sub:0x%"
PRIx64
" index:%d numOfRows:%d total:%"
PRId64
" (not retrieve)"
,
pParentSql
->
self
,
pParentSql
->
pSubs
[
i
]
->
self
,
i
,
pRes1
->
numOfRows
,
pRes1
->
numOfTotal
);
assert
(
pRes1
->
row
<
pRes1
->
numOfRows
);
assert
(
pRes1
->
row
<
pRes1
->
numOfRows
||
(
pRes1
->
row
==
pRes1
->
numOfRows
&&
pRes1
->
completed
)
);
}
else
{
if
(
!
stableQuery
)
{
pRes1
->
numOfClauseTotal
+=
pRes1
->
numOfRows
;
...
...
@@ -1841,7 +1841,7 @@ void tscFetchDatablockForSubquery(SSqlObj* pSql) {
SSqlRes
*
pRes1
=
&
pSql1
->
res
;
if
(
pRes1
->
row
>=
pRes1
->
numOfRows
)
{
if
(
pRes1
->
row
>=
pRes1
->
numOfRows
&&
!
pRes1
->
completed
)
{
subquerySetState
(
pSql1
,
&
pSql
->
subState
,
i
,
0
);
}
}
...
...
@@ -1863,7 +1863,7 @@ void tscFetchDatablockForSubquery(SSqlObj* pSql) {
STableMetaInfo
*
pTableMetaInfo
=
tscGetMetaInfo
(
pQueryInfo
,
0
);
if
(
pRes1
->
row
>=
pRes1
->
numOfRows
)
{
if
(
pRes1
->
row
>=
pRes1
->
numOfRows
&&
!
pRes1
->
completed
)
{
tscDebug
(
"0x%"
PRIx64
" subquery:0x%"
PRIx64
" retrieve data from vnode, subquery:%d, vgroupIndex:%d"
,
pSql
->
self
,
pSql1
->
self
,
pSupporter
->
subqueryIndex
,
pTableMetaInfo
->
vgroupIndex
);
...
...
src/common/src/tarithoperator.c
浏览文件 @
62927b93
...
...
@@ -123,6 +123,9 @@ _arithmetic_getVectorDoubleValue_fn_t getVectorDoubleValueFn(int32_t srcType) {
typedef
void
*
(
*
_arithmetic_getVectorValueAddr_fn_t
)(
void
*
src
,
int32_t
index
);
void
*
getVectorValueAddr_BOOL
(
void
*
src
,
int32_t
index
)
{
return
(
void
*
)((
bool
*
)
src
+
index
);
}
void
*
getVectorValueAddr_TINYINT
(
void
*
src
,
int32_t
index
)
{
return
(
void
*
)((
int8_t
*
)
src
+
index
);
}
...
...
@@ -156,7 +159,9 @@ void* getVectorValueAddr_DOUBLE(void *src, int32_t index) {
_arithmetic_getVectorValueAddr_fn_t
getVectorValueAddrFn
(
int32_t
srcType
)
{
_arithmetic_getVectorValueAddr_fn_t
p
=
NULL
;
if
(
srcType
==
TSDB_DATA_TYPE_TINYINT
)
{
if
(
srcType
==
TSDB_DATA_TYPE_BOOL
)
{
p
=
getVectorValueAddr_BOOL
;
}
else
if
(
srcType
==
TSDB_DATA_TYPE_TINYINT
)
{
p
=
getVectorValueAddr_TINYINT
;
}
else
if
(
srcType
==
TSDB_DATA_TYPE_UTINYINT
)
{
p
=
getVectorValueAddr_UTINYINT
;
...
...
@@ -398,6 +403,271 @@ void vectorRemainder(void *left, int32_t len1, int32_t _left_type, void *right,
}
}
void
vectorBitand
(
void
*
left
,
int32_t
len1
,
int32_t
_left_type
,
void
*
right
,
int32_t
len2
,
int32_t
_right_type
,
void
*
out
,
int32_t
_ord
)
{
int32_t
i
=
(
_ord
==
TSDB_ORDER_ASC
)
?
0
:
MAX
(
len1
,
len2
)
-
1
;
int32_t
step
=
(
_ord
==
TSDB_ORDER_ASC
)
?
1
:
-
1
;
char
*
output
=
out
;
_arithmetic_getVectorValueAddr_fn_t
getVectorValueAddrFnLeft
=
getVectorValueAddrFn
(
_left_type
);
_arithmetic_getVectorValueAddr_fn_t
getVectorValueAddrFnRight
=
getVectorValueAddrFn
(
_right_type
);
if
(
len1
==
(
len2
))
{
for
(;
i
>=
0
&&
i
<
(
len2
);
i
+=
step
)
{
if
(
isNull
(
getVectorValueAddrFnLeft
(
left
,
i
),
_left_type
)
||
isNull
(
getVectorValueAddrFnRight
(
right
,
i
),
_right_type
))
{
switch
(
_left_type
)
{
case
TSDB_DATA_TYPE_BOOL
:
*
(
bool
*
)
output
=
TSDB_DATA_BOOL_NULL
;
output
+=
sizeof
(
bool
);
break
;
case
TSDB_DATA_TYPE_TINYINT
:
*
(
int8_t
*
)
output
=
TSDB_DATA_TINYINT_NULL
;
output
+=
sizeof
(
int8_t
);
break
;
case
TSDB_DATA_TYPE_SMALLINT
:
*
(
int16_t
*
)
output
=
TSDB_DATA_SMALLINT_NULL
;
output
+=
sizeof
(
int16_t
);
break
;
case
TSDB_DATA_TYPE_INT
:
*
(
int32_t
*
)
output
=
TSDB_DATA_INT_NULL
;
output
+=
sizeof
(
int32_t
);
break
;
case
TSDB_DATA_TYPE_BIGINT
:
*
(
int64_t
*
)
output
=
TSDB_DATA_BIGINT_NULL
;
output
+=
sizeof
(
int64_t
);
break
;
case
TSDB_DATA_TYPE_UTINYINT
:
*
(
uint8_t
*
)
output
=
TSDB_DATA_UTINYINT_NULL
;
output
+=
sizeof
(
int8_t
);
break
;
case
TSDB_DATA_TYPE_USMALLINT
:
*
(
uint16_t
*
)
output
=
TSDB_DATA_USMALLINT_NULL
;
output
+=
sizeof
(
int16_t
);
break
;
case
TSDB_DATA_TYPE_UINT
:
*
(
uint32_t
*
)
output
=
TSDB_DATA_UINT_NULL
;
output
+=
sizeof
(
int32_t
);
break
;
case
TSDB_DATA_TYPE_UBIGINT
:
*
(
uint64_t
*
)
output
=
TSDB_DATA_UBIGINT_NULL
;
output
+=
sizeof
(
int64_t
);
break
;
}
continue
;
}
switch
(
_left_type
)
{
case
TSDB_DATA_TYPE_BOOL
:
*
(
bool
*
)
output
=
(
*
((
bool
*
)
left
+
i
))
&
(
*
((
bool
*
)
right
+
i
));
output
+=
sizeof
(
bool
);
break
;
case
TSDB_DATA_TYPE_TINYINT
:
*
(
int8_t
*
)
output
=
(
*
((
int8_t
*
)
left
+
i
))
&
(
*
((
int8_t
*
)
right
+
i
));
output
+=
sizeof
(
int8_t
);
break
;
case
TSDB_DATA_TYPE_SMALLINT
:
*
(
int16_t
*
)
output
=
(
*
((
int16_t
*
)
left
+
i
))
&
(
*
((
int16_t
*
)
right
+
i
));
output
+=
sizeof
(
int16_t
);
break
;
case
TSDB_DATA_TYPE_INT
:
*
(
int32_t
*
)
output
=
(
*
((
int32_t
*
)
left
+
i
))
&
(
*
((
int32_t
*
)
right
+
i
));
output
+=
sizeof
(
int32_t
);
break
;
case
TSDB_DATA_TYPE_BIGINT
:
*
(
int64_t
*
)
output
=
(
*
((
int64_t
*
)
left
+
i
))
&
(
*
((
int64_t
*
)
right
+
i
));
output
+=
sizeof
(
int64_t
);
break
;
case
TSDB_DATA_TYPE_UTINYINT
:
*
(
uint8_t
*
)
output
=
(
*
((
uint8_t
*
)
left
+
i
))
&
(
*
((
uint8_t
*
)
right
+
i
));
output
+=
sizeof
(
int8_t
);
break
;
case
TSDB_DATA_TYPE_USMALLINT
:
*
(
uint16_t
*
)
output
=
(
*
((
uint16_t
*
)
left
+
i
))
&
(
*
((
uint16_t
*
)
right
+
i
));
output
+=
sizeof
(
int16_t
);
break
;
case
TSDB_DATA_TYPE_UINT
:
*
(
uint32_t
*
)
output
=
(
*
((
uint32_t
*
)
left
+
i
))
&
(
*
((
uint32_t
*
)
right
+
i
));
output
+=
sizeof
(
int32_t
);
break
;
case
TSDB_DATA_TYPE_UBIGINT
:
*
(
uint64_t
*
)
output
=
(
*
((
uint64_t
*
)
left
+
i
))
&
(
*
((
uint64_t
*
)
right
+
i
));
output
+=
sizeof
(
int64_t
);
break
;
}
}
}
else
if
(
len1
==
1
)
{
for
(;
i
>=
0
&&
i
<
(
len2
);
i
+=
step
)
{
if
(
isNull
(
getVectorValueAddrFnLeft
(
left
,
0
),
_left_type
)
||
isNull
(
getVectorValueAddrFnRight
(
right
,
i
),
_right_type
))
{
switch
(
_left_type
)
{
case
TSDB_DATA_TYPE_BOOL
:
*
(
bool
*
)
output
=
TSDB_DATA_BOOL_NULL
;
output
+=
sizeof
(
bool
);
break
;
case
TSDB_DATA_TYPE_TINYINT
:
*
(
int8_t
*
)
output
=
TSDB_DATA_TINYINT_NULL
;
output
+=
sizeof
(
int8_t
);
break
;
case
TSDB_DATA_TYPE_SMALLINT
:
*
(
int16_t
*
)
output
=
TSDB_DATA_SMALLINT_NULL
;
output
+=
sizeof
(
int16_t
);
break
;
case
TSDB_DATA_TYPE_INT
:
*
(
int32_t
*
)
output
=
TSDB_DATA_INT_NULL
;
output
+=
sizeof
(
int32_t
);
break
;
case
TSDB_DATA_TYPE_BIGINT
:
*
(
int64_t
*
)
output
=
TSDB_DATA_BIGINT_NULL
;
output
+=
sizeof
(
int64_t
);
break
;
case
TSDB_DATA_TYPE_UTINYINT
:
*
(
uint8_t
*
)
output
=
TSDB_DATA_UTINYINT_NULL
;
output
+=
sizeof
(
int8_t
);
break
;
case
TSDB_DATA_TYPE_USMALLINT
:
*
(
uint16_t
*
)
output
=
TSDB_DATA_USMALLINT_NULL
;
output
+=
sizeof
(
int16_t
);
break
;
case
TSDB_DATA_TYPE_UINT
:
*
(
uint32_t
*
)
output
=
TSDB_DATA_UINT_NULL
;
output
+=
sizeof
(
int32_t
);
break
;
case
TSDB_DATA_TYPE_UBIGINT
:
*
(
uint64_t
*
)
output
=
TSDB_DATA_UBIGINT_NULL
;
output
+=
sizeof
(
int64_t
);
break
;
}
continue
;
}
switch
(
_left_type
)
{
case
TSDB_DATA_TYPE_BOOL
:
*
(
bool
*
)
output
=
(
*
(
bool
*
)
left
)
&
(
*
((
bool
*
)
right
+
i
));
output
+=
sizeof
(
bool
);
break
;
case
TSDB_DATA_TYPE_TINYINT
:
*
(
int8_t
*
)
output
=
(
*
(
int8_t
*
)
left
)
&
(
*
((
int8_t
*
)
right
+
i
));
output
+=
sizeof
(
int8_t
);
break
;
case
TSDB_DATA_TYPE_SMALLINT
:
*
(
int16_t
*
)
output
=
(
*
(
int16_t
*
)
left
)
&
(
*
((
int16_t
*
)
right
+
i
));
output
+=
sizeof
(
int16_t
);
break
;
case
TSDB_DATA_TYPE_INT
:
*
(
int32_t
*
)
output
=
(
*
(
int32_t
*
)
left
)
&
(
*
((
int32_t
*
)
right
+
i
));
output
+=
sizeof
(
int32_t
);
break
;
case
TSDB_DATA_TYPE_BIGINT
:
*
(
int64_t
*
)
output
=
(
*
(
int64_t
*
)
left
)
&
(
*
((
int64_t
*
)
right
+
i
));
output
+=
sizeof
(
int64_t
);
break
;
case
TSDB_DATA_TYPE_UTINYINT
:
*
(
uint8_t
*
)
output
=
(
*
(
uint8_t
*
)
left
)
&
(
*
((
uint8_t
*
)
right
+
i
));
output
+=
sizeof
(
int8_t
);
break
;
case
TSDB_DATA_TYPE_USMALLINT
:
*
(
uint16_t
*
)
output
=
(
*
(
uint16_t
*
)
left
)
&
(
*
((
uint16_t
*
)
right
+
i
));
output
+=
sizeof
(
int16_t
);
break
;
case
TSDB_DATA_TYPE_UINT
:
*
(
uint32_t
*
)
output
=
(
*
(
uint32_t
*
)
left
)
&
(
*
((
uint32_t
*
)
right
+
i
));
output
+=
sizeof
(
int32_t
);
break
;
case
TSDB_DATA_TYPE_UBIGINT
:
*
(
uint64_t
*
)
output
=
(
*
(
uint64_t
*
)
left
)
&
(
*
((
uint64_t
*
)
right
+
i
));
output
+=
sizeof
(
int64_t
);
break
;
}
}
}
else
if
((
len2
)
==
1
)
{
for
(;
i
>=
0
&&
i
<
len1
;
i
+=
step
)
{
if
(
isNull
(
getVectorValueAddrFnLeft
(
left
,
i
),
_left_type
)
||
isNull
(
getVectorValueAddrFnRight
(
right
,
0
),
_right_type
))
{
switch
(
_left_type
)
{
case
TSDB_DATA_TYPE_BOOL
:
*
(
bool
*
)
output
=
TSDB_DATA_BOOL_NULL
;
output
+=
sizeof
(
bool
);
break
;
case
TSDB_DATA_TYPE_TINYINT
:
*
(
int8_t
*
)
output
=
TSDB_DATA_TINYINT_NULL
;
output
+=
sizeof
(
int8_t
);
break
;
case
TSDB_DATA_TYPE_SMALLINT
:
*
(
int16_t
*
)
output
=
TSDB_DATA_SMALLINT_NULL
;
output
+=
sizeof
(
int16_t
);
break
;
case
TSDB_DATA_TYPE_INT
:
*
(
int32_t
*
)
output
=
TSDB_DATA_INT_NULL
;
output
+=
sizeof
(
int32_t
);
break
;
case
TSDB_DATA_TYPE_BIGINT
:
*
(
int64_t
*
)
output
=
TSDB_DATA_BIGINT_NULL
;
output
+=
sizeof
(
int64_t
);
break
;
case
TSDB_DATA_TYPE_UTINYINT
:
*
(
uint8_t
*
)
output
=
TSDB_DATA_UTINYINT_NULL
;
output
+=
sizeof
(
int8_t
);
break
;
case
TSDB_DATA_TYPE_USMALLINT
:
*
(
uint16_t
*
)
output
=
TSDB_DATA_USMALLINT_NULL
;
output
+=
sizeof
(
int16_t
);
break
;
case
TSDB_DATA_TYPE_UINT
:
*
(
uint32_t
*
)
output
=
TSDB_DATA_UINT_NULL
;
output
+=
sizeof
(
int32_t
);
break
;
case
TSDB_DATA_TYPE_UBIGINT
:
*
(
uint64_t
*
)
output
=
TSDB_DATA_UBIGINT_NULL
;
output
+=
sizeof
(
int64_t
);
break
;
}
continue
;
}
switch
(
_left_type
)
{
case
TSDB_DATA_TYPE_BOOL
:
*
(
bool
*
)
output
=
(
*
((
bool
*
)
left
+
i
))
&
(
*
(
bool
*
)
right
);
output
+=
sizeof
(
bool
);
break
;
case
TSDB_DATA_TYPE_TINYINT
:
*
(
int8_t
*
)
output
=
(
*
((
int8_t
*
)
left
+
i
))
&
(
*
(
int8_t
*
)
right
);
output
+=
sizeof
(
int8_t
);
break
;
case
TSDB_DATA_TYPE_SMALLINT
:
*
(
int16_t
*
)
output
=
(
*
((
int16_t
*
)
left
+
i
))
&
(
*
(
int16_t
*
)
right
);
output
+=
sizeof
(
int16_t
);
break
;
case
TSDB_DATA_TYPE_INT
:
*
(
int32_t
*
)
output
=
(
*
((
int32_t
*
)
left
+
i
))
&
(
*
(
int32_t
*
)
right
);
output
+=
sizeof
(
int32_t
);
break
;
case
TSDB_DATA_TYPE_BIGINT
:
*
(
int64_t
*
)
output
=
(
*
((
int64_t
*
)
left
+
i
))
&
(
*
(
int64_t
*
)
right
);
output
+=
sizeof
(
int64_t
);
break
;
case
TSDB_DATA_TYPE_UTINYINT
:
*
(
uint8_t
*
)
output
=
(
*
((
uint8_t
*
)
left
+
i
))
&
(
*
(
uint8_t
*
)
right
);
output
+=
sizeof
(
int8_t
);
break
;
case
TSDB_DATA_TYPE_USMALLINT
:
*
(
uint16_t
*
)
output
=
(
*
((
uint16_t
*
)
left
+
i
))
&
(
*
(
uint16_t
*
)
right
);
output
+=
sizeof
(
int16_t
);
break
;
case
TSDB_DATA_TYPE_UINT
:
*
(
uint32_t
*
)
output
=
(
*
((
uint32_t
*
)
left
+
i
))
&
(
*
(
uint32_t
*
)
right
);
output
+=
sizeof
(
int32_t
);
break
;
case
TSDB_DATA_TYPE_UBIGINT
:
*
(
uint64_t
*
)
output
=
(
*
((
uint64_t
*
)
left
+
i
))
&
(
*
(
uint64_t
*
)
right
);
output
+=
sizeof
(
int64_t
);
break
;
}
}
}
}
_arithmetic_operator_fn_t
getArithmeticOperatorFn
(
int32_t
arithmeticOptr
)
{
switch
(
arithmeticOptr
)
{
case
TSDB_BINARY_OP_ADD
:
...
...
@@ -410,6 +680,8 @@ _arithmetic_operator_fn_t getArithmeticOperatorFn(int32_t arithmeticOptr) {
return
vectorDivide
;
case
TSDB_BINARY_OP_REMAINDER
:
return
vectorRemainder
;
case
TSDB_BINARY_OP_BITAND
:
return
vectorBitand
;
default:
assert
(
0
);
return
NULL
;
...
...
src/common/src/texpr.c
浏览文件 @
62927b93
...
...
@@ -23,6 +23,7 @@
#include "tarray.h"
#include "tbuffer.h"
#include "tcompare.h"
#include "tglobal.h"
#include "tsdb.h"
#include "tskiplist.h"
#include "texpr.h"
...
...
@@ -47,7 +48,7 @@ static int32_t exprInvalidOperationMsg(char *msgbuf, const char *msg) {
int32_t
exprTreeValidateFunctionNode
(
char
*
msgbuf
,
tExprNode
*
pExpr
)
{
int32_t
code
=
TSDB_CODE_SUCCESS
;
//TODO: check child
s
for every function
//TODO: check child
ren
for every function
switch
(
pExpr
->
_func
.
functionId
)
{
case
TSDB_FUNC_SCALAR_POW
:
case
TSDB_FUNC_SCALAR_LOG
:
...
...
@@ -85,17 +86,184 @@ int32_t exprTreeValidateFunctionNode(char* msgbuf, tExprNode *pExpr) {
}
int32_t
exprTreeValidateExprNode
(
tExprNode
*
pExpr
)
{
int16_t
leftType
=
pExpr
->
_node
.
pLeft
->
resultType
;
int16_t
rightType
=
pExpr
->
_node
.
pRight
->
resultType
;
int16_t
resultType
=
leftType
;
if
(
pExpr
->
_node
.
optr
==
TSDB_BINARY_OP_ADD
||
pExpr
->
_node
.
optr
==
TSDB_BINARY_OP_SUBTRACT
||
pExpr
->
_node
.
optr
==
TSDB_BINARY_OP_MULTIPLY
||
pExpr
->
_node
.
optr
==
TSDB_BINARY_OP_DIVIDE
||
pExpr
->
_node
.
optr
==
TSDB_BINARY_OP_REMAINDER
)
{
int16_t
leftType
=
pExpr
->
_node
.
pLeft
->
resultType
;
int16_t
rightType
=
pExpr
->
_node
.
pRight
->
resultType
;
if
(
!
IS_NUMERIC_TYPE
(
leftType
)
||
!
IS_NUMERIC_TYPE
(
rightType
))
{
return
TSDB_CODE_TSC_INVALID_OPERATION
;
}
pExpr
->
resultType
=
TSDB_DATA_TYPE_DOUBLE
;
pExpr
->
resultBytes
=
tDataTypes
[
TSDB_DATA_TYPE_DOUBLE
].
bytes
;
return
TSDB_CODE_SUCCESS
;
}
else
if
(
pExpr
->
_node
.
optr
==
TSDB_BINARY_OP_BITAND
)
{
if
((
leftType
!=
TSDB_DATA_TYPE_BOOL
&&
!
IS_SIGNED_NUMERIC_TYPE
(
leftType
)
&&
!
IS_UNSIGNED_NUMERIC_TYPE
(
leftType
))
||
(
rightType
!=
TSDB_DATA_TYPE_BOOL
&&
!
IS_SIGNED_NUMERIC_TYPE
(
rightType
)
&&
!
IS_UNSIGNED_NUMERIC_TYPE
(
rightType
)))
{
return
TSDB_CODE_TSC_INVALID_OPERATION
;
}
uint8_t
schemaType
;
// now leftType and rightType are both numeric
if
(
pExpr
->
_node
.
pLeft
->
nodeType
==
TSQL_NODE_COL
&&
pExpr
->
_node
.
pRight
->
nodeType
==
TSQL_NODE_COL
)
{
if
(
leftType
!=
rightType
)
{
return
TSDB_CODE_TSC_INVALID_OPERATION
;
}
}
else
if
(
pExpr
->
_node
.
pLeft
->
nodeType
==
TSQL_NODE_COL
)
{
if
(
pExpr
->
_node
.
pRight
->
nodeType
!=
TSQL_NODE_VALUE
)
{
return
TSDB_CODE_TSC_INVALID_OPERATION
;
}
else
{
schemaType
=
pExpr
->
_node
.
pLeft
->
pSchema
->
type
;
int64_t
sVal
=
pExpr
->
_node
.
pRight
->
pVal
->
i64
;
uint64_t
uVal
=
pExpr
->
_node
.
pRight
->
pVal
->
u64
;
switch
(
schemaType
)
{
case
TSDB_DATA_TYPE_BOOL
:
if
((
pExpr
->
_node
.
pRight
->
pVal
->
nType
!=
TSDB_DATA_TYPE_BOOL
)
||
(
pExpr
->
_node
.
pRight
->
pVal
->
i64
!=
0
&&
pExpr
->
_node
.
pRight
->
pVal
->
i64
!=
1
&&
pExpr
->
_node
.
pRight
->
pVal
->
i64
!=
TSDB_DATA_BOOL_NULL
))
{
return
TSDB_CODE_TSC_INVALID_OPERATION
;
}
break
;
case
TSDB_DATA_TYPE_TINYINT
:
if
(
sVal
<
-
128
||
sVal
>
127
)
{
return
TSDB_CODE_TSC_INVALID_OPERATION
;
}
break
;
case
TSDB_DATA_TYPE_SMALLINT
:
if
(
sVal
<
-
32768
||
sVal
>
32767
)
{
return
TSDB_CODE_TSC_INVALID_OPERATION
;
}
break
;
case
TSDB_DATA_TYPE_INT
:
if
(
sVal
<
INT32_MIN
||
sVal
>
INT32_MAX
)
{
return
TSDB_CODE_TSC_INVALID_OPERATION
;
}
break
;
case
TSDB_DATA_TYPE_BIGINT
:
if
(
sVal
<
INT64_MIN
||
sVal
>
INT64_MAX
)
{
return
TSDB_CODE_TSC_INVALID_OPERATION
;
}
break
;
case
TSDB_DATA_TYPE_UTINYINT
:
if
(
uVal
>
255
)
{
return
TSDB_CODE_TSC_INVALID_OPERATION
;
}
break
;
case
TSDB_DATA_TYPE_USMALLINT
:
if
(
uVal
>
65535
)
{
return
TSDB_CODE_TSC_INVALID_OPERATION
;
}
break
;
case
TSDB_DATA_TYPE_UINT
:
if
(
uVal
>
UINT32_MAX
)
{
return
TSDB_CODE_TSC_INVALID_OPERATION
;
}
break
;
case
TSDB_DATA_TYPE_UBIGINT
:
if
(
uVal
>
UINT64_MAX
)
{
return
TSDB_CODE_TSC_INVALID_OPERATION
;
}
break
;
}
pExpr
->
_node
.
pRight
->
pSchema
->
type
=
schemaType
;
pExpr
->
_node
.
pRight
->
pVal
->
nType
=
schemaType
;
pExpr
->
_node
.
pRight
->
resultType
=
schemaType
;
pExpr
->
_node
.
pRight
->
resultBytes
=
tDataTypes
[
schemaType
].
bytes
;
}
}
else
{
if
(
pExpr
->
_node
.
pLeft
->
nodeType
!=
TSQL_NODE_VALUE
)
{
return
TSDB_CODE_TSC_INVALID_OPERATION
;
}
else
{
schemaType
=
pExpr
->
_node
.
pRight
->
pSchema
->
type
;
int64_t
sVal
=
pExpr
->
_node
.
pLeft
->
pVal
->
i64
;
uint64_t
uVal
=
pExpr
->
_node
.
pLeft
->
pVal
->
u64
;
switch
(
schemaType
)
{
case
TSDB_DATA_TYPE_BOOL
:
if
((
pExpr
->
_node
.
pLeft
->
pVal
->
nType
!=
TSDB_DATA_TYPE_BOOL
)
||
(
pExpr
->
_node
.
pLeft
->
pVal
->
i64
!=
0
&&
pExpr
->
_node
.
pLeft
->
pVal
->
i64
!=
1
&&
pExpr
->
_node
.
pLeft
->
pVal
->
i64
!=
TSDB_DATA_BOOL_NULL
))
{
return
TSDB_CODE_TSC_INVALID_OPERATION
;
}
pExpr
->
_node
.
pLeft
->
pVal
->
nLen
=
1
;
break
;
case
TSDB_DATA_TYPE_TINYINT
:
if
(
sVal
<
-
128
||
sVal
>
127
)
{
return
TSDB_CODE_TSC_INVALID_OPERATION
;
}
pExpr
->
_node
.
pLeft
->
pVal
->
nLen
=
1
;
break
;
case
TSDB_DATA_TYPE_SMALLINT
:
if
(
sVal
<
-
32768
||
sVal
>
32767
)
{
return
TSDB_CODE_TSC_INVALID_OPERATION
;
}
pExpr
->
_node
.
pLeft
->
pVal
->
nLen
=
2
;
break
;
case
TSDB_DATA_TYPE_INT
:
if
(
sVal
<
INT32_MIN
||
sVal
>
INT32_MAX
)
{
return
TSDB_CODE_TSC_INVALID_OPERATION
;
}
pExpr
->
_node
.
pLeft
->
pVal
->
nLen
=
4
;
break
;
case
TSDB_DATA_TYPE_BIGINT
:
if
(
sVal
<
INT64_MIN
||
sVal
>
INT64_MAX
)
{
return
TSDB_CODE_TSC_INVALID_OPERATION
;
}
pExpr
->
_node
.
pLeft
->
pVal
->
nLen
=
8
;
break
;
case
TSDB_DATA_TYPE_UTINYINT
:
if
(
uVal
>
255
)
{
return
TSDB_CODE_TSC_INVALID_OPERATION
;
}
pExpr
->
_node
.
pLeft
->
pVal
->
nLen
=
1
;
break
;
case
TSDB_DATA_TYPE_USMALLINT
:
if
(
uVal
>
65535
)
{
return
TSDB_CODE_TSC_INVALID_OPERATION
;
}
pExpr
->
_node
.
pLeft
->
pVal
->
nLen
=
2
;
break
;
case
TSDB_DATA_TYPE_UINT
:
if
(
uVal
>
UINT32_MAX
)
{
return
TSDB_CODE_TSC_INVALID_OPERATION
;
}
pExpr
->
_node
.
pLeft
->
pVal
->
nLen
=
4
;
break
;
case
TSDB_DATA_TYPE_UBIGINT
:
if
(
uVal
>
UINT64_MAX
)
{
return
TSDB_CODE_TSC_INVALID_OPERATION
;
}
pExpr
->
_node
.
pLeft
->
pVal
->
nLen
=
8
;
break
;
}
pExpr
->
_node
.
pLeft
->
pSchema
->
type
=
schemaType
;
pExpr
->
_node
.
pLeft
->
pVal
->
nType
=
schemaType
;
pExpr
->
_node
.
pLeft
->
resultType
=
schemaType
;
pExpr
->
_node
.
pLeft
->
resultBytes
=
tDataTypes
[
schemaType
].
bytes
;
}
resultType
=
schemaType
;
}
if
(
resultType
==
TSDB_DATA_TYPE_BOOL
)
{
pExpr
->
resultType
=
TSDB_DATA_TYPE_BOOL
;
pExpr
->
resultBytes
=
tDataTypes
[
TSDB_DATA_TYPE_BOOL
].
bytes
;
}
else
{
pExpr
->
resultType
=
resultType
;
pExpr
->
resultBytes
=
tDataTypes
[
resultType
].
bytes
;
}
return
TSDB_CODE_SUCCESS
;
}
else
{
return
TSDB_CODE_SUCCESS
;
}
...
...
@@ -488,9 +656,17 @@ void exprTreeExprNodeTraverse(tExprNode *pExpr, int32_t numOfRows, tExprOperandI
_arithmetic_operator_fn_t
OperatorFn
=
getArithmeticOperatorFn
(
pExpr
->
_node
.
optr
);
OperatorFn
(
leftIn
,
leftNum
,
leftType
,
rightIn
,
rightNum
,
rightType
,
output
->
data
,
fnOrder
);
output
->
numOfRows
=
MAX
(
leftNum
,
rightNum
);
output
->
type
=
TSDB_DATA_TYPE_DOUBLE
;
if
(
leftType
==
TSDB_DATA_TYPE_TIMESTAMP
||
rightType
==
TSDB_DATA_TYPE_TIMESTAMP
)
{
output
->
type
=
TSDB_DATA_TYPE_BIGINT
;
}
else
{
if
(
pExpr
->
_node
.
optr
==
TSDB_BINARY_OP_BITAND
)
{
output
->
type
=
leftType
;
// rightType must be the same as leftType
}
else
{
output
->
type
=
TSDB_DATA_TYPE_DOUBLE
;
}
}
output
->
bytes
=
tDataTypes
[
output
->
type
].
bytes
;
tfree
(
ltmp
);
...
...
@@ -1724,4 +1900,5 @@ tScalarFunctionInfo aScalarFunctions[] = {
"cast"
,
vectorMathFunc
},
};
src/inc/taosdef.h
浏览文件 @
62927b93
...
...
@@ -176,10 +176,11 @@ do { \
#define TSDB_BINARY_OP_MULTIPLY 32
#define TSDB_BINARY_OP_DIVIDE 33
#define TSDB_BINARY_OP_REMAINDER 34
#define TSDB_BINARY_OP_BITAND 35
#define IS_RELATION_OPTR(op) (((op) >= TSDB_RELATION_LESS) && ((op) < TSDB_RELATION_IN))
#define IS_ARITHMETIC_OPTR(op) (((op) >= TSDB_BINARY_OP_ADD) && ((op) <= TSDB_BINARY_OP_
REMAINDER
))
#define IS_ARITHMETIC_OPTR(op) (((op) >= TSDB_BINARY_OP_ADD) && ((op) <= TSDB_BINARY_OP_
BITAND
))
#define TS_PATH_DELIMITER_LEN 1
...
...
taos-tools
@
9815e306
比较
d1c2c640
...
9815e306
Subproject commit
d1c2c6404e10d303117caa02b746665974e0e264
Subproject commit
9815e306333f4b0d8cf31499ba6c1e3a21fe104c
src/os/src/detail/osFile.c
浏览文件 @
62927b93
...
...
@@ -179,7 +179,7 @@ int64_t taosFSendFile(FILE *out_file, FILE *in_file, int64_t *offset, int64_t co
fwrite
(
buffer
,
1
,
rlen
,
out_file
);
return
(
int64_t
)(
writeLen
+
rlen
);
}
else
{
fwrite
(
buffer
,
1
,
_SEND_FILE_STEP_
,
in
_file
);
fwrite
(
buffer
,
1
,
_SEND_FILE_STEP_
,
out
_file
);
writeLen
+=
_SEND_FILE_STEP_
;
}
}
...
...
@@ -223,7 +223,7 @@ int64_t taosSendFile(SocketFd dfd, FileFd sfd, int64_t *offset, int64_t count) {
if
(
rlen
<=
0
)
{
return
writeLen
;
}
else
{
taosWriteSocket
(
s
fd
,
buffer
,
(
int32_t
)
remain
);
taosWriteSocket
(
d
fd
,
buffer
,
(
int32_t
)
remain
);
writeLen
+=
remain
;
}
}
...
...
src/query/inc/qExecutor.h
浏览文件 @
62927b93
...
...
@@ -546,10 +546,17 @@ typedef struct SFillOperatorInfo {
bool
multigroupResult
;
}
SFillOperatorInfo
;
typedef
struct
SGroupbyDataInfo
{
int32_t
index
;
// index of col in dataBlock
int32_t
type
;
int32_t
bytes
;
}
SGroupbyDataInfo
;
typedef
struct
SGroupbyOperatorInfo
{
SOptrBasicInfo
binfo
;
int32_t
colIndex
;
char
*
prevData
;
// previous group by value
SArray
*
pGroupbyDataInfo
;
int32_t
totalBytes
;
char
*
prevData
;
// previous data buf
}
SGroupbyOperatorInfo
;
typedef
struct
SSWindowOperatorInfo
{
...
...
src/query/inc/sql.y
浏览文件 @
62927b93
...
...
@@ -786,6 +786,7 @@ expr(A) ::= expr(X) MINUS expr(Y). {A = tSqlExprCreate(X, Y, TK_MINUS); }
expr(A) ::= expr(X) STAR expr(Y). {A = tSqlExprCreate(X, Y, TK_STAR); }
expr(A) ::= expr(X) SLASH expr(Y). {A = tSqlExprCreate(X, Y, TK_DIVIDE);}
expr(A) ::= expr(X) REM expr(Y). {A = tSqlExprCreate(X, Y, TK_REM); }
expr(A) ::= expr(X) BITAND expr(Y). {A = tSqlExprCreate(X, Y, TK_BITAND);}
// like expression
expr(A) ::= expr(X) LIKE expr(Y). {A = tSqlExprCreate(X, Y, TK_LIKE); }
...
...
src/query/src/qExecutor.c
浏览文件 @
62927b93
...
...
@@ -258,7 +258,7 @@ static void getAlignQueryTimeWindow(SQueryAttr *pQueryAttr, int64_t key, int64_t
static
void
setResultBufSize
(
SQueryAttr
*
pQueryAttr
,
SRspResultInfo
*
pResultInfo
);
static
void
setCtxTagForJoin
(
SQueryRuntimeEnv
*
pRuntimeEnv
,
SQLFunctionCtx
*
pCtx
,
SExprInfo
*
pExprInfo
,
void
*
pTable
);
static
void
setParamForStableStddev
(
SQueryRuntimeEnv
*
pRuntimeEnv
,
SQLFunctionCtx
*
pCtx
,
int32_t
numOfOutput
,
SExprInfo
*
pExpr
);
static
void
setParamForStableStddevByColData
(
SQueryRuntimeEnv
*
pRuntimeEnv
,
SQLFunctionCtx
*
pCtx
,
int32_t
numOfOutput
,
SExprInfo
*
pExpr
,
char
*
val
,
int16_t
bytes
);
static
void
setParamForStableStddevByColData
(
SQueryRuntimeEnv
*
pRuntimeEnv
,
SQLFunctionCtx
*
pCtx
,
int32_t
numOfOutput
,
SExprInfo
*
pExpr
,
SGroupbyOperatorInfo
*
pInfo
);
static
void
doSetTableGroupOutputBuf
(
SQueryRuntimeEnv
*
pRuntimeEnv
,
SResultRowInfo
*
pResultRowInfo
,
SQLFunctionCtx
*
pCtx
,
int32_t
*
rowCellInfoOffset
,
int32_t
numOfOutput
,
int32_t
tableGroupId
);
...
...
@@ -1569,56 +1569,132 @@ static void hashIntervalAgg(SOperatorInfo* pOperatorInfo, SResultRowInfo* pResul
updateResultRowInfoActiveIndex
(
pResultRowInfo
,
pQueryAttr
,
pRuntimeEnv
->
current
->
lastKey
);
}
static
bool
initGroupbyInfo
(
const
SSDataBlock
*
pSDataBlock
,
const
SGroupbyExpr
*
pGroupbyExpr
,
SGroupbyOperatorInfo
*
pInfo
)
{
if
(
pInfo
->
pGroupbyDataInfo
!=
NULL
)
{
// no need build group-by info
return
true
;
}
pInfo
->
pGroupbyDataInfo
=
taosArrayInit
(
pGroupbyExpr
->
numOfGroupCols
,
sizeof
(
SGroupbyDataInfo
));
for
(
int32_t
k
=
0
;
k
<
pGroupbyExpr
->
numOfGroupCols
;
++
k
)
{
SColIndex
*
pColIndex
=
taosArrayGet
(
pGroupbyExpr
->
columnInfo
,
k
);
if
(
TSDB_COL_IS_TAG
(
pColIndex
->
flag
))
{
continue
;
}
for
(
int32_t
i
=
0
;
i
<
pSDataBlock
->
info
.
numOfCols
;
++
i
)
{
SColumnInfoData
*
pColInfo
=
taosArrayGet
(
pSDataBlock
->
pDataBlock
,
i
);
if
(
pColInfo
->
info
.
colId
==
pColIndex
->
colId
)
{
int32_t
type
=
pColInfo
->
info
.
type
;
if
(
type
==
TSDB_DATA_TYPE_FLOAT
||
type
==
TSDB_DATA_TYPE_DOUBLE
)
{
return
false
;
}
pInfo
->
totalBytes
+=
pColInfo
->
info
.
bytes
;
SGroupbyDataInfo
info
=
{.
index
=
i
,
.
type
=
pColInfo
->
info
.
type
,
.
bytes
=
pColInfo
->
info
.
bytes
};
taosArrayInsert
(
pInfo
->
pGroupbyDataInfo
,
k
,
&
info
);
break
;
}
if
(
i
==
pSDataBlock
->
info
.
numOfCols
-
1
)
{
// not found groupby col in dataBlock, error
return
false
;
}
}
}
pInfo
->
totalBytes
+=
(
int32_t
)
strlen
(
MULTI_KEY_DELIM
)
*
pGroupbyExpr
->
numOfGroupCols
;
return
true
;
}
static
void
buildGroupbyKeyBuf
(
const
SSDataBlock
*
pSDataBlock
,
SGroupbyOperatorInfo
*
pInfo
,
int32_t
rowId
,
char
**
buf
)
{
char
*
p
=
calloc
(
1
,
pInfo
->
totalBytes
);
if
(
p
==
NULL
)
{
*
buf
=
NULL
;
return
;
}
*
buf
=
p
;
for
(
int32_t
i
=
0
;
i
<
taosArrayGetSize
(
pInfo
->
pGroupbyDataInfo
);
i
++
)
{
SGroupbyDataInfo
*
pDataInfo
=
taosArrayGet
(
pInfo
->
pGroupbyDataInfo
,
i
);
SColumnInfoData
*
pColData
=
taosArrayGet
(
pSDataBlock
->
pDataBlock
,
pDataInfo
->
index
);
//TODO(yihaoDeng): handle float & double
char
*
val
=
((
char
*
)
pColData
->
pData
)
+
pDataInfo
->
bytes
*
rowId
;
if
(
isNull
(
val
,
pDataInfo
->
type
))
{
p
+=
pDataInfo
->
bytes
;
continue
;
}
if
(
IS_VAR_DATA_TYPE
(
pDataInfo
->
type
))
{
memcpy
(
p
,
varDataVal
(
val
),
varDataLen
(
val
));
p
+=
varDataLen
(
val
);
}
else
{
memcpy
(
p
,
val
,
pDataInfo
->
bytes
);
p
+=
pDataInfo
->
bytes
;
}
memcpy
(
p
,
MULTI_KEY_DELIM
,
strlen
(
MULTI_KEY_DELIM
));
p
+=
strlen
(
MULTI_KEY_DELIM
);
}
}
static
bool
isGroupbyKeyEqual
(
void
*
a
,
void
*
b
,
void
*
ext
)
{
SGroupbyOperatorInfo
*
pInfo
=
(
SGroupbyOperatorInfo
*
)
ext
;
if
(
memcmp
(
a
,
b
,
pInfo
->
totalBytes
)
==
0
)
{
return
true
;
}
int32_t
offset
=
0
;
for
(
int32_t
i
=
0
;
i
<
taosArrayGetSize
(
pInfo
->
pGroupbyDataInfo
);
i
++
)
{
SGroupbyDataInfo
*
pDataInfo
=
taosArrayGet
(
pInfo
->
pGroupbyDataInfo
,
i
);
char
*
k1
=
(
char
*
)
a
+
offset
;
char
*
k2
=
(
char
*
)
b
+
offset
;
if
(
getComparFunc
(
pDataInfo
->
type
,
0
)(
k1
,
k2
)
!=
0
)
{
return
false
;
}
offset
+=
pDataInfo
->
bytes
;
offset
+=
(
int32_t
)
strlen
(
MULTI_KEY_DELIM
);
}
return
true
;
}
static
void
doHashGroupbyAgg
(
SOperatorInfo
*
pOperator
,
SGroupbyOperatorInfo
*
pInfo
,
SSDataBlock
*
pSDataBlock
)
{
SQueryRuntimeEnv
*
pRuntimeEnv
=
pOperator
->
pRuntimeEnv
;
STableQueryInfo
*
item
=
pRuntimeEnv
->
current
;
SColumnInfoData
*
pColInfoData
=
taosArrayGet
(
pSDataBlock
->
pDataBlock
,
pInfo
->
colIndex
);
SQueryAttr
*
pQueryAttr
=
pRuntimeEnv
->
pQueryAttr
;
int16_t
bytes
=
pColInfoData
->
info
.
bytes
;
int16_t
type
=
pColInfoData
->
info
.
type
;
if
(
type
==
TSDB_DATA_TYPE_FLOAT
||
type
==
TSDB_DATA_TYPE_DOUBLE
)
{
if
(
!
initGroupbyInfo
(
pSDataBlock
,
pRuntimeEnv
->
pQueryAttr
->
pGroupbyExpr
,
pInfo
)
)
{
qError
(
"QInfo:0x%"
PRIx64
" group by not supported on double/float columns, abort"
,
GET_QID
(
pRuntimeEnv
));
return
;
}
//realloc pRuntimeEnv->keyBuf
pRuntimeEnv
->
keyBuf
=
realloc
(
pRuntimeEnv
->
keyBuf
,
pInfo
->
totalBytes
+
sizeof
(
int64_t
)
+
POINTER_BYTES
);
SColumnInfoData
*
pFirstColData
=
taosArrayGet
(
pSDataBlock
->
pDataBlock
,
0
);
int64_t
*
tsList
=
(
pFirstColData
->
info
.
type
==
TSDB_DATA_TYPE_TIMESTAMP
)
?
(
int64_t
*
)
pFirstColData
->
pData
:
NULL
;
STimeWindow
w
=
TSWINDOW_INITIALIZER
;
int32_t
num
=
0
;
char
*
key
=
NULL
;
int16_t
num
=
0
;
int32_t
type
=
0
;
for
(
int32_t
j
=
0
;
j
<
pSDataBlock
->
info
.
rows
;
++
j
)
{
char
*
val
=
((
char
*
)
pColInfoData
->
pData
)
+
bytes
*
j
;
// Compare with the previous row of this column, and do not set the output buffer again if they are identical.
buildGroupbyKeyBuf
(
pSDataBlock
,
pInfo
,
j
,
&
key
);
if
(
!
key
)
{
continue
;
}
if
(
pInfo
->
prevData
==
NULL
)
{
pInfo
->
prevData
=
malloc
(
bytes
);
memcpy
(
pInfo
->
prevData
,
val
,
bytes
)
;
// first row of
pInfo
->
prevData
=
key
;
num
++
;
continue
;
}
if
(
IS_VAR_DATA_TYPE
(
type
))
{
int32_t
len
=
varDataLen
(
val
);
if
(
len
==
varDataLen
(
pInfo
->
prevData
)
&&
memcmp
(
varDataVal
(
pInfo
->
prevData
),
varDataVal
(
val
),
len
)
==
0
)
{
num
++
;
continue
;
}
}
else
{
if
(
memcmp
(
pInfo
->
prevData
,
val
,
bytes
)
==
0
)
{
num
++
;
continue
;
}
}
else
if
(
isGroupbyKeyEqual
(
pInfo
->
prevData
,
key
,
pInfo
))
{
num
++
;
tfree
(
key
);
continue
;
}
if
(
pQueryAttr
->
stableQuery
&&
pQueryAttr
->
stabledev
&&
(
pRuntimeEnv
->
prevResult
!=
NULL
))
{
setParamForStableStddevByColData
(
pRuntimeEnv
,
pInfo
->
binfo
.
pCtx
,
pOperator
->
numOfOutput
,
pOperator
->
pExpr
,
pInfo
->
prevData
,
bytes
);
setParamForStableStddevByColData
(
pRuntimeEnv
,
pInfo
->
binfo
.
pCtx
,
pOperator
->
numOfOutput
,
pOperator
->
pExpr
,
pInfo
);
}
int32_t
ret
=
setGroupResultOutputBuf
(
pRuntimeEnv
,
&
(
pInfo
->
binfo
),
pOperator
->
numOfOutput
,
pInfo
->
prevData
,
type
,
b
ytes
,
item
->
groupIndex
);
int32_t
ret
=
setGroupResultOutputBuf
(
pRuntimeEnv
,
&
(
pInfo
->
binfo
),
pOperator
->
numOfOutput
,
pInfo
->
prevData
,
type
,
pInfo
->
totalB
ytes
,
item
->
groupIndex
);
if
(
ret
!=
TSDB_CODE_SUCCESS
)
{
// null data, too many state code
longjmp
(
pRuntimeEnv
->
env
,
TSDB_CODE_QRY_APP_ERROR
);
}
...
...
@@ -1626,23 +1702,25 @@ static void doHashGroupbyAgg(SOperatorInfo* pOperator, SGroupbyOperatorInfo *pIn
doApplyFunctions
(
pRuntimeEnv
,
pInfo
->
binfo
.
pCtx
,
&
w
,
j
-
num
,
num
,
tsList
,
pSDataBlock
->
info
.
rows
,
pOperator
->
numOfOutput
);
num
=
1
;
memcpy
(
pInfo
->
prevData
,
val
,
bytes
);
tfree
(
pInfo
->
prevData
);
pInfo
->
prevData
=
key
;
}
if
(
num
>
0
)
{
char
*
val
=
((
char
*
)
pColInfoData
->
pData
)
+
bytes
*
(
pSDataBlock
->
info
.
rows
-
num
);
memcpy
(
pInfo
->
prevData
,
val
,
bytes
);
if
(
pQueryAttr
->
stableQuery
&&
pQueryAttr
->
stabledev
&&
(
pRuntimeEnv
->
prevResult
!=
NULL
))
{
setParamForStableStddevByColData
(
pRuntimeEnv
,
pInfo
->
binfo
.
pCtx
,
pOperator
->
numOfOutput
,
pOperator
->
pExpr
,
val
,
bytes
);
}
int32_t
ret
=
setGroupResultOutputBuf
(
pRuntimeEnv
,
&
(
pInfo
->
binfo
),
pOperator
->
numOfOutput
,
val
,
type
,
bytes
,
item
->
groupIndex
);
if
(
ret
!=
TSDB_CODE_SUCCESS
)
{
// null data, too many state code
longjmp
(
pRuntimeEnv
->
env
,
TSDB_CODE_QRY_APP_ERROR
);
buildGroupbyKeyBuf
(
pSDataBlock
,
pInfo
,
pSDataBlock
->
info
.
rows
-
num
,
&
key
);
if
(
key
)
{
tfree
(
pInfo
->
prevData
);
pInfo
->
prevData
=
key
;
if
(
pQueryAttr
->
stableQuery
&&
pQueryAttr
->
stabledev
&&
(
pRuntimeEnv
->
prevResult
!=
NULL
))
{
setParamForStableStddevByColData
(
pRuntimeEnv
,
pInfo
->
binfo
.
pCtx
,
pOperator
->
numOfOutput
,
pOperator
->
pExpr
,
pInfo
);
}
int32_t
ret
=
setGroupResultOutputBuf
(
pRuntimeEnv
,
&
(
pInfo
->
binfo
),
pOperator
->
numOfOutput
,
pInfo
->
prevData
,
type
,
pInfo
->
totalBytes
,
item
->
groupIndex
);
if
(
ret
!=
TSDB_CODE_SUCCESS
)
{
// null data, too many state code
longjmp
(
pRuntimeEnv
->
env
,
TSDB_CODE_QRY_APP_ERROR
);
}
doApplyFunctions
(
pRuntimeEnv
,
pInfo
->
binfo
.
pCtx
,
&
w
,
pSDataBlock
->
info
.
rows
-
num
,
num
,
tsList
,
pSDataBlock
->
info
.
rows
,
pOperator
->
numOfOutput
);
}
doApplyFunctions
(
pRuntimeEnv
,
pInfo
->
binfo
.
pCtx
,
&
w
,
pSDataBlock
->
info
.
rows
-
num
,
num
,
tsList
,
pSDataBlock
->
info
.
rows
,
pOperator
->
numOfOutput
);
}
tfree
(
pInfo
->
prevData
);
...
...
@@ -1717,22 +1795,22 @@ static void doSessionWindowAggImpl(SOperatorInfo* pOperator, SSWindowOperatorInf
pSDataBlock
->
info
.
rows
,
pOperator
->
numOfOutput
);
}
static
void
setResultRowKey
(
SResultRow
*
pResultRow
,
char
*
pData
,
int16_t
type
)
{
if
(
IS_VAR_DATA_TYPE
(
type
))
{
if
(
pResultRow
->
key
==
NULL
)
{
pResultRow
->
key
=
malloc
(
varDataTLen
(
pData
));
varDataCopy
(
pResultRow
->
key
,
pData
);
}
else
{
assert
(
memcmp
(
pResultRow
->
key
,
pData
,
varDataTLen
(
pData
))
==
0
);
}
}
else
{
int64_t
v
=
-
1
;
GET_TYPED_DATA
(
v
,
int64_t
,
type
,
pData
);
pResultRow
->
win
.
skey
=
v
;
pResultRow
->
win
.
ekey
=
v
;
}
}
//
static void setResultRowKey(SResultRow* pResultRow, char* pData, int16_t type) {
//
if (IS_VAR_DATA_TYPE(type)) {
//
if (pResultRow->key == NULL) {
//
pResultRow->key = malloc(varDataTLen(pData));
//
varDataCopy(pResultRow->key, pData);
//
} else {
//
assert(memcmp(pResultRow->key, pData, varDataTLen(pData)) == 0);
//
}
//
} else {
//
int64_t v = -1;
//
GET_TYPED_DATA(v, int64_t, type, pData);
//
//
pResultRow->win.skey = v;
//
pResultRow->win.ekey = v;
//
}
//
}
static
int32_t
setGroupResultOutputBuf
(
SQueryRuntimeEnv
*
pRuntimeEnv
,
SOptrBasicInfo
*
binfo
,
int32_t
numOfCols
,
char
*
pData
,
int16_t
type
,
int16_t
bytes
,
int32_t
groupIndex
)
{
SDiskbasedResultBuf
*
pResultBuf
=
pRuntimeEnv
->
pResultBuf
;
...
...
@@ -1744,16 +1822,16 @@ static int32_t setGroupResultOutputBuf(SQueryRuntimeEnv *pRuntimeEnv, SOptrBasic
// not assign result buffer yet, add new result buffer, TODO remove it
char
*
d
=
pData
;
int16_t
len
=
bytes
;
if
(
IS_VAR_DATA_TYPE
(
type
))
{
d
=
varDataVal
(
pData
);
len
=
varDataLen
(
pData
);
}
//
if (IS_VAR_DATA_TYPE(type)) {
//
d = varDataVal(pData);
//
len = varDataLen(pData);
//
}
int64_t
tid
=
0
;
SResultRow
*
pResultRow
=
doSetResultOutBufByKey
(
pRuntimeEnv
,
pResultRowInfo
,
tid
,
d
,
len
,
true
,
groupIndex
);
assert
(
pResultRow
!=
NULL
);
setResultRowKey
(
pResultRow
,
pData
,
type
);
//
setResultRowKey(pResultRow, pData, type);
if
(
pResultRow
->
pageId
==
-
1
)
{
int32_t
ret
=
addNewWindowResultBuf
(
pResultRow
,
pResultBuf
,
groupIndex
,
pRuntimeEnv
->
pQueryAttr
->
resultRowSize
);
if
(
ret
!=
0
)
{
...
...
@@ -4161,7 +4239,7 @@ void setParamForStableStddev(SQueryRuntimeEnv* pRuntimeEnv, SQLFunctionCtx* pCtx
}
void
setParamForStableStddevByColData
(
SQueryRuntimeEnv
*
pRuntimeEnv
,
SQLFunctionCtx
*
pCtx
,
int32_t
numOfOutput
,
SExprInfo
*
pExpr
,
char
*
val
,
int16_t
bytes
)
{
void
setParamForStableStddevByColData
(
SQueryRuntimeEnv
*
pRuntimeEnv
,
SQLFunctionCtx
*
pCtx
,
int32_t
numOfOutput
,
SExprInfo
*
pExpr
,
SGroupbyOperatorInfo
*
pInfo
)
{
SQueryAttr
*
pQueryAttr
=
pRuntimeEnv
->
pQueryAttr
;
int32_t
numOfExprs
=
pQueryAttr
->
numOfOutput
;
...
...
@@ -4174,6 +4252,20 @@ void setParamForStableStddevByColData(SQueryRuntimeEnv* pRuntimeEnv, SQLFunction
pCtx
[
i
].
param
[
0
].
arr
=
NULL
;
pCtx
[
i
].
param
[
0
].
nType
=
TSDB_DATA_TYPE_INT
;
// avoid freeing the memory by setting the type to be int
// find colid in dataBlock
int32_t
bytes
,
offset
=
0
;
char
*
val
=
NULL
;
for
(
int32_t
idx
=
0
;
idx
<
taosArrayGetSize
(
pInfo
->
pGroupbyDataInfo
);
idx
++
)
{
SGroupbyDataInfo
*
pDataInfo
=
taosArrayGet
(
pInfo
->
pGroupbyDataInfo
,
idx
);
if
(
pDataInfo
->
index
==
pExpr1
->
colInfo
.
colId
)
{
bytes
=
pDataInfo
->
bytes
;
val
=
pInfo
->
prevData
+
offset
;
break
;
}
offset
+=
pDataInfo
->
bytes
;
}
if
(
val
==
NULL
)
{
continue
;
}
// TODO use hash to speedup this loop
int32_t
numOfGroup
=
(
int32_t
)
taosArrayGetSize
(
pRuntimeEnv
->
prevResult
);
for
(
int32_t
j
=
0
;
j
<
numOfGroup
;
++
j
)
{
...
...
@@ -7074,10 +7166,6 @@ static SSDataBlock* hashGroupbyAggregate(void* param, bool* newgroup) {
// the pDataBlock are always the same one, no need to call this again
setInputDataBlock
(
pOperator
,
pInfo
->
binfo
.
pCtx
,
pBlock
,
pRuntimeEnv
->
pQueryAttr
->
order
.
order
);
setTagValue
(
pOperator
,
pRuntimeEnv
->
current
->
pTable
,
pInfo
->
binfo
.
pCtx
,
pOperator
->
numOfOutput
);
if
(
pInfo
->
colIndex
==
-
1
)
{
pInfo
->
colIndex
=
getGroupbyColumnIndex
(
pRuntimeEnv
->
pQueryAttr
->
pGroupbyExpr
,
pBlock
);
}
doHashGroupbyAgg
(
pOperator
,
pInfo
,
pBlock
);
}
...
...
@@ -7352,7 +7440,7 @@ static void destroySFillOperatorInfo(void* param, int32_t numOfOutput) {
static
void
destroyGroupbyOperatorInfo
(
void
*
param
,
int32_t
numOfOutput
)
{
SGroupbyOperatorInfo
*
pInfo
=
(
SGroupbyOperatorInfo
*
)
param
;
doDestroyBasicInfo
(
&
pInfo
->
binfo
,
numOfOutput
);
taosArrayDestroy
(
&
pInfo
->
pGroupbyDataInfo
);
if
(
pInfo
->
prevData
)
{
tfree
(
pInfo
->
prevData
);
}
...
...
@@ -7829,7 +7917,7 @@ SOperatorInfo* createGroupbyOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SOperato
return
NULL
;
}
pInfo
->
colIndex
=
-
1
;
// group by column index
//
pInfo->colIndex = -1; // group by column index
pInfo
->
binfo
.
pCtx
=
createSQLFunctionCtx
(
pRuntimeEnv
,
pExpr
,
numOfOutput
,
&
pInfo
->
binfo
.
rowCellInfoOffset
);
SQueryAttr
*
pQueryAttr
=
pRuntimeEnv
->
pQueryAttr
;
...
...
src/query/src/qTsbuf.c
浏览文件 @
62927b93
...
...
@@ -125,7 +125,14 @@ STSBuf* tsBufCreateFromFile(const char* path, bool autoDelete) {
ret
=
fseek
(
pTSBuf
->
f
,
0
,
SEEK_END
);
UNUSED
(
ret
);
// file meta data may be cached, close and reopen the file for accurate file size.
fclose
(
pTSBuf
->
f
);
pTSBuf
->
f
=
fopen
(
pTSBuf
->
path
,
"rb+"
);
if
(
pTSBuf
->
f
==
NULL
)
{
return
pTSBuf
;
}
struct
stat
fileStat
;
if
(
fstat
(
fileno
(
pTSBuf
->
f
),
&
fileStat
)
!=
0
)
{
tsBufDestroy
(
pTSBuf
);
...
...
src/query/src/sql.c
浏览文件 @
62927b93
此差异已折叠。
点击以展开。
tests/perftest-scripts/perftest-query.sh
浏览文件 @
62927b93
...
...
@@ -57,35 +57,21 @@ function stopTaosd {
function
buildTDengine
{
echoInfo
"Build TDinternal"
cd
$WORK_DIR
/TDinternal
git reset
--hard
HEAD~3
git fetch
git checkout
$branch
git pull
git remote update
>
/dev/null
cd
community
git reset
--hard
HEAD
git fetch
git checkout
$branch
REMOTE_COMMIT
=
`
git rev-parse
--short
remotes/origin/
$branch
`
git pull
>
/dev/null
LOCAL_COMMIT
=
`
git rev-parse
--short
@
`
echo
" LOCAL:
$LOCAL_COMMIT
"
echo
"REMOTE:
$REMOTE_COMMIT
"
if
[
"
$LOCAL_COMMIT
"
==
"
$REMOTE_COMMIT
"
]
;
then
echo
"repo up-to-date"
fi
cd
community
git reset
--hard
HEAD
cd
..
echo
"git submodule update --init --recursive"
git submodule update
--init
--recursive
git pull
>
/dev/null 2>&1
cd
community
git remote update
>
/dev/null
git reset
--hard
HEAD
git fetch
git checkout
$branch
REMOTE_COMMIT
=
`
git rev-parse
--short
remotes/origin/
$branch
`
LOCAL_COMMIT
=
`
git rev-parse
--short
@
`
cd
../debug
rm
-rf
*
if
[
$type
=
"jemalloc"
]
;
then
...
...
@@ -94,36 +80,34 @@ function buildTDengine {
else
cmake ..
>
/dev/null
fi
#cp $WORK_DIR/taosdemoPerformance.py $WORK_DIR/TDinternal/community/tests/pytest/tools/
#cp $WORK_DIR/insertFromCSVPerformance.py $WORK_DIR/TDinternal/community/tests/pytest/insert/
#cp $WORK_DIR/queryPerformance.py $WORK_DIR/TDinternal/community/tests/pytest/query/
rm
-rf
$WORK_DIR
/TDinternal/community/tests/pytest/query/operator.py
make
>
/dev/null 2>&1
make
install
>
/dev/null 2>&1
echo
"Build TDengine on remote server"
ssh perftest
"./buildTDengine.sh
$branch
> /dev/null"
}
function
runQueryPerfTest
{
[
-f
$PERFORMANCE_TEST_REPORT
]
&&
rm
$PERFORMANCE_TEST_REPORT
nohup
$WORK_DIR
/TDinternal/debug/build/bin/taosd
-c
/etc/
perf/
>
/dev/null 2>&1 &
nohup
$WORK_DIR
/TDinternal/debug/build/bin/taosd
-c
/etc/
$branch
>
/dev/null 2>&1 &
echoInfo
"Wait TDengine to start"
sleep
60
echoInfo
"Run Performance Test"
cd
$WORK_DIR
/TDinternal/community/tests/pytest
cd
$WORK_DIR
/TDinternal/community/tests/pytest
python3
query/queryPerformance.py
-c
$LOCAL_COMMIT
-b
$branch
-T
$type
-d
perf2
|
tee
-a
$PERFORMANCE_TEST_REPORT
python3
perfbenchmark/queryPerformance.py
-c
$LOCAL_COMMIT
-b
$branch
-T
$type
|
tee
-a
$PERFORMANCE_TEST_REPORT
python3
insert
/insertFromCSVPerformance.py
-c
$LOCAL_COMMIT
-b
$branch
-T
$type
|
tee
-a
$PERFORMANCE_TEST_REPORT
python3
perfbenchmark
/insertFromCSVPerformance.py
-c
$LOCAL_COMMIT
-b
$branch
-T
$type
|
tee
-a
$PERFORMANCE_TEST_REPORT
echo
"=========== taosdemo performance: 4 int columns, 10000 tables, 100000 recoreds per table ==========="
|
tee
-a
$PERFORMANCE_TEST_REPORT
python3
tools
/taosdemoPerformance.py
-c
$LOCAL_COMMIT
-b
$branch
-T
$type
|
tee
-a
$PERFORMANCE_TEST_REPORT
python3
perfbenchmark
/taosdemoPerformance.py
-c
$LOCAL_COMMIT
-b
$branch
-T
$type
|
tee
-a
$PERFORMANCE_TEST_REPORT
echo
"=========== taosdemo performance: 400 int columns, 400 double columns, 200 binary(128) columns, 10000 tables, 10 recoreds per table ==========="
|
tee
-a
$PERFORMANCE_TEST_REPORT
python3
tools
/taosdemoPerformance.py
-c
$LOCAL_COMMIT
-b
$branch
-T
$type
-i
400
-D
400
-B
200
-t
10000
-r
10 |
tee
-a
$PERFORMANCE_TEST_REPORT
python3
perfbenchmark
/taosdemoPerformance.py
-c
$LOCAL_COMMIT
-b
$branch
-T
$type
-i
400
-D
400
-B
200
-t
10000
-r
10 |
tee
-a
$PERFORMANCE_TEST_REPORT
echo
"=========== taosdemo performance: 1900 int columns, 1900 double columns, 200 binary(128) columns, 10000 tables, 10 recoreds per table ==========="
|
tee
-a
$PERFORMANCE_TEST_REPORT
python3
tools
/taosdemoPerformance.py
-c
$LOCAL_COMMIT
-b
$branch
-T
$type
-i
1900
-D
1900
-B
200
-t
10000
-r
10 |
tee
-a
$PERFORMANCE_TEST_REPORT
python3
perfbenchmark
/taosdemoPerformance.py
-c
$LOCAL_COMMIT
-b
$branch
-T
$type
-i
1900
-D
1900
-B
200
-t
10000
-r
10 |
tee
-a
$PERFORMANCE_TEST_REPORT
}
...
...
tests/pytest/
insert
/insertFromCSVPerformance.py
→
tests/pytest/
perfbenchmark
/insertFromCSVPerformance.py
浏览文件 @
62927b93
...
...
@@ -32,7 +32,7 @@ class insertFromCSVPerformace:
self
.
host
=
"127.0.0.1"
self
.
user
=
"root"
self
.
password
=
"taosdata"
self
.
config
=
"/etc/
perf"
self
.
config
=
"/etc/
%s"
%
self
.
branchName
self
.
conn
=
taos
.
connect
(
self
.
host
,
self
.
user
,
...
...
@@ -95,6 +95,7 @@ class insertFromCSVPerformace:
in_order_time
=
(
float
)
(
totalTime
/
10
)
print
(
"In order - Insert time: %f"
%
in_order_time
)
cursor
.
execute
(
"drop database if exists %s"
%
self
.
dbName
)
cursor
.
close
()
...
...
@@ -133,9 +134,9 @@ if __name__ == '__main__':
'-b'
,
'--branch-name'
,
action
=
'store'
,
default
=
'
develop
'
,
default
=
'
2.4
'
,
type
=
str
,
help
=
'branch name (default:
develop
)'
)
help
=
'branch name (default:
2.4
)'
)
parser
.
add_argument
(
'-T'
,
'--build-type'
,
...
...
tests/pytest/
query
/queryPerformance.py
→
tests/pytest/
perfbenchmark
/queryPerformance.py
浏览文件 @
62927b93
...
...
@@ -32,7 +32,7 @@ class taosdemoQueryPerformace:
self
.
host
=
"127.0.0.1"
self
.
user
=
"root"
self
.
password
=
"taosdata"
self
.
config
=
"/etc/
perf"
self
.
config
=
"/etc/
%s"
%
self
.
branch
self
.
conn
=
taos
.
connect
(
self
.
host
,
self
.
user
,
...
...
@@ -55,35 +55,35 @@ class taosdemoQueryPerformace:
tableid
=
1
cursor2
.
execute
(
"create table if not exists %s%d using %s tags(%d, '%s')"
%
(
self
.
tbPerfix
,
tableid
,
self
.
stbName
,
tableid
,
sql
))
sql
=
"select avg(
f1), max(f2), min(f3
) from test.meters"
sql
=
"select avg(
current), max(voltage), min(phase
) from test.meters"
tableid
=
2
cursor2
.
execute
(
"create table if not exists %s%d using %s tags(%d, '%s')"
%
(
self
.
tbPerfix
,
tableid
,
self
.
stbName
,
tableid
,
sql
))
sql
=
"select count(*) from test.meters where loc='beijing'"
sql
=
"select count(*) from test.meters where loc
ation
='beijing'"
tableid
=
3
cursor2
.
execute
(
"create table if not exists %s%d using %s tags(%d,
\"
%s
\"
)"
%
(
self
.
tbPerfix
,
tableid
,
self
.
stbName
,
tableid
,
sql
))
sql
=
"select avg(
f1), max(f2), min(f3) from test.meters where area
id=10"
sql
=
"select avg(
current), max(voltage), min(phase) from test.meters where group
id=10"
tableid
=
4
cursor2
.
execute
(
"create table if not exists %s%d using %s tags(%d, '%s')"
%
(
self
.
tbPerfix
,
tableid
,
self
.
stbName
,
tableid
,
sql
))
sql
=
"select avg(
f1), max(f2), min(f3) from test.t
10 interval(10s)"
sql
=
"select avg(
current), max(voltage), min(phase) from test.d
10 interval(10s)"
tableid
=
5
cursor2
.
execute
(
"create table if not exists %s%d using %s tags(%d, '%s')"
%
(
self
.
tbPerfix
,
tableid
,
self
.
stbName
,
tableid
,
sql
))
sql
=
"select last_row(*) from meters"
sql
=
"select last_row(*) from
test.
meters"
tableid
=
6
cursor2
.
execute
(
"create table if not exists %s%d using %s tags(%d, '%s')"
%
(
self
.
tbPerfix
,
tableid
,
self
.
stbName
,
tableid
,
sql
))
sql
=
"select * from meters limit 10000"
sql
=
"select * from
test.
meters limit 10000"
tableid
=
7
cursor2
.
execute
(
"create table if not exists %s%d using %s tags(%d, '%s')"
%
(
self
.
tbPerfix
,
tableid
,
self
.
stbName
,
tableid
,
sql
))
sql
=
"select avg(
f1), max(f2), min(f3
) from meters where ts <= '2017-07-15 10:40:01.000' and ts <= '2017-07-15 14:00:40.000'"
sql
=
"select avg(
current), max(voltage), min(phase
) from meters where ts <= '2017-07-15 10:40:01.000' and ts <= '2017-07-15 14:00:40.000'"
tableid
=
8
cursor2
.
execute
(
"create table if not exists %s%d using %s tags(%d,
\"
%s
\"
)"
%
(
self
.
tbPerfix
,
tableid
,
self
.
stbName
,
tableid
,
sql
))
sql
=
"select last(*) from meters"
sql
=
"select last(*) from
test.
meters"
tableid
=
9
cursor2
.
execute
(
"create table if not exists %s%d using %s tags(%d, '%s')"
%
(
self
.
tbPerfix
,
tableid
,
self
.
stbName
,
tableid
,
sql
))
...
...
@@ -106,7 +106,7 @@ class taosdemoQueryPerformace:
query_data
=
{
"filetype"
:
"query"
,
"cfgdir"
:
"/etc/
perf"
,
"cfgdir"
:
"/etc/
%s"
%
self
.
branch
,
"host"
:
"127.0.0.1"
,
"port"
:
6030
,
"user"
:
"root"
,
...
...
@@ -126,6 +126,7 @@ class taosdemoQueryPerformace:
return
query_json_file
def
getBuildPath
(
self
):
buildPath
=
""
selfPath
=
os
.
path
.
dirname
(
os
.
path
.
realpath
(
__file__
))
if
(
"community"
in
selfPath
):
...
...
@@ -134,7 +135,7 @@ class taosdemoQueryPerformace:
projPath
=
selfPath
[:
selfPath
.
find
(
"tests"
)]
for
root
,
dirs
,
files
in
os
.
walk
(
projPath
):
if
(
"
taosdemo
"
in
files
):
if
(
"
perfMonitor
"
in
files
):
rootRealPath
=
os
.
path
.
dirname
(
os
.
path
.
realpath
(
root
))
if
(
"packaging"
not
in
rootRealPath
):
buildPath
=
root
[:
len
(
root
)
-
len
(
"/build/bin"
)]
...
...
@@ -150,7 +151,7 @@ class taosdemoQueryPerformace:
def
query
(
self
):
buildPath
=
self
.
getBuildPath
()
if
(
buildPath
==
""
):
print
(
"
taosdemo
not found!"
)
print
(
"
perfMonitor
not found!"
)
sys
.
exit
(
1
)
binPath
=
buildPath
+
"/build/bin/"
...
...
@@ -220,9 +221,9 @@ if __name__ == '__main__':
'-b'
,
'--git-branch'
,
action
=
'store'
,
default
=
'
master
'
,
default
=
'
2.4
'
,
type
=
str
,
help
=
'git branch (default:
master
)'
)
help
=
'git branch (default:
2.4
)'
)
parser
.
add_argument
(
'-T'
,
'--build-type'
,
...
...
tests/pytest/
tools
/taosdemoPerformance.py
→
tests/pytest/
perfbenchmark
/taosdemoPerformance.py
浏览文件 @
62927b93
...
...
@@ -32,7 +32,7 @@ class taosdemoPerformace:
self
.
host
=
"127.0.0.1"
self
.
user
=
"root"
self
.
password
=
"taosdata"
self
.
config
=
"/etc/
perf"
self
.
config
=
"/etc/
%s"
%
self
.
branch
self
.
conn
=
taos
.
connect
(
self
.
host
,
self
.
user
,
...
...
@@ -88,7 +88,7 @@ class taosdemoPerformace:
insert_data
=
{
"filetype"
:
"insert"
,
"cfgdir"
:
"/etc/
perf"
,
"cfgdir"
:
"/etc/
%s"
%
self
.
branch
,
"host"
:
"127.0.0.1"
,
"port"
:
6030
,
"user"
:
"root"
,
...
...
@@ -112,6 +112,7 @@ class taosdemoPerformace:
return
output
def
getBuildPath
(
self
):
buildPath
=
""
selfPath
=
os
.
path
.
dirname
(
os
.
path
.
realpath
(
__file__
))
if
(
"community"
in
selfPath
):
...
...
@@ -130,7 +131,7 @@ class taosdemoPerformace:
def
insertData
(
self
):
buildPath
=
self
.
getBuildPath
()
if
(
buildPath
==
""
):
print
(
"
taosdemo
not found!"
)
print
(
"
perfMonitor
not found!"
)
sys
.
exit
(
1
)
binPath
=
buildPath
+
"/build/bin/"
...
...
@@ -198,9 +199,9 @@ if __name__ == '__main__':
'-b'
,
'--git-branch'
,
action
=
'store'
,
default
=
'
master
'
,
default
=
'
2.4
'
,
type
=
str
,
help
=
'git branch (default:
master
)'
)
help
=
'git branch (default:
2.4
)'
)
parser
.
add_argument
(
'-T'
,
'--build-type'
,
...
...
tests/pytest/query/queryGroupbySort.py
浏览文件 @
62927b93
...
...
@@ -16,6 +16,7 @@ import taos
from
util.log
import
tdLog
from
util.cases
import
tdCases
from
util.sql
import
tdSql
import
random
class
TDTestCase
:
...
...
@@ -56,6 +57,37 @@ class TDTestCase:
tdSql
.
checkData
(
0
,
0
,
11.6
)
tdSql
.
query
(
"select avg(current) from meters group by t4;"
)
tdSql
.
query
(
"select avg(current) from meters group by t3,t4;"
)
# TS-899
tdSql
.
execute
(
"create table stb(ts timestamp, c0 int, c1 double, c2 binary(20), c3 int) tags(t1 int, t2 nchar(20))"
)
tags
=
[
"beijing"
,
"shanghai"
]
cols
=
[
"chaoyang"
,
"haidian"
,
"shunyi"
]
for
i
in
range
(
10
):
tdSql
.
execute
(
"create table t%d using stb tags(%d, '%s')"
%
(
i
,
i
,
tags
[
i
%
2
]))
sql
=
"insert into t%d values"
%
i
for
j
in
range
(
100
):
sql
+=
"(%d, %d, %f, '%s', %d)"
%
(
self
.
ts
+
j
,
j
%
10
+
1
,
random
.
uniform
(
1
,
10
),
cols
[
j
%
3
],
j
%
5
+
1
)
tdSql
.
execute
(
sql
)
tdSql
.
error
(
"select count(*) from stb group by c1"
)
tdSql
.
query
(
"select count(*), c0, c2, c3 from stb group by c0, c2, c3"
)
tdSql
.
checkRows
(
30
)
tdSql
.
query
(
"select count(*), c0, c2, c3 from stb group by c0, c2, c3, t1, t2"
)
tdSql
.
checkRows
(
300
)
tdSql
.
query
(
"select count(*), c0 from t0 group by c0"
)
tdSql
.
checkRows
(
10
)
tdSql
.
checkData
(
0
,
0
,
10
)
# corner cases
tdSql
.
execute
(
"create table tb(ts timestamp, c0 int, c1 bool, c2 float, c3 double)"
)
tdSql
.
error
(
"select count(*) from tb group by c2"
)
tdSql
.
error
(
"select count(*) from tb group by c3"
)
tdSql
.
error
(
"select count(*) from tb group by c2, c3"
)
tdSql
.
query
(
"select count(*) from tb group by c1"
)
tdSql
.
checkRows
(
0
)
def
stop
(
self
):
tdSql
.
close
()
...
...
tests/pytest/tools/taosdumpTest2.py
浏览文件 @
62927b93
...
...
@@ -30,7 +30,7 @@ class TDTestCase:
self
.
ts
=
1601481600000
self
.
numberOfTables
=
1
self
.
numberOfRecords
=
150
00
self
.
numberOfRecords
=
150
def
getPath
(
self
,
tool
=
"taosdump"
):
selfPath
=
os
.
path
.
dirname
(
os
.
path
.
realpath
(
__file__
))
...
...
tests/pytest/util/dnodes.py
浏览文件 @
62927b93
...
...
@@ -65,10 +65,12 @@ class TDSimClient:
cmd
=
"echo %s %s >> %s"
%
(
option
,
value
,
self
.
cfgPath
)
if
os
.
system
(
cmd
)
!=
0
:
tdLog
.
exit
(
cmd
)
def
os_string
(
self
,
path
):
os_path
=
path
.
replace
(
"/"
,
os
.
sep
)
def
os_string
(
self
,
path
):
os_path
=
path
.
replace
(
"/"
,
os
.
sep
)
return
os_path
def
deploy
(
self
):
def
deploy
(
self
,
*
updatecfgDict
):
self
.
logDir
=
self
.
os_string
(
"%s/sim/psim/log"
%
(
self
.
path
))
self
.
cfgDir
=
self
.
os_string
(
"%s/sim/psim/cfg"
%
(
self
.
path
))
self
.
cfgPath
=
self
.
os_string
(
"%s/sim/psim/cfg/taos.cfg"
%
(
self
.
path
))
...
...
@@ -76,11 +78,11 @@ class TDSimClient:
# cmd = "rm -rf " + self.logDir
# if os.system(cmd) != 0:
# tdLog.exit(cmd)
if
os
.
path
.
exists
(
self
.
logDir
):
if
os
.
path
.
exists
(
self
.
logDir
):
try
:
shutil
.
rmtree
(
self
.
logDir
)
except
:
tdLog
.
exit
(
"del %s failed"
%
self
.
logDir
)
except
BaseException
:
tdLog
.
exit
(
"del %s failed"
%
self
.
logDir
)
# cmd = "mkdir -p " + self.logDir
# if os.system(cmd) != 0:
# tdLog.exit(cmd)
...
...
@@ -88,11 +90,11 @@ class TDSimClient:
# cmd = "rm -rf " + self.cfgDir
# if os.system(cmd) != 0:
# tdLog.exit(cmd)
if
os
.
path
.
exists
(
self
.
cfgDir
):
if
os
.
path
.
exists
(
self
.
cfgDir
):
try
:
shutil
.
rmtree
(
self
.
cfgDir
)
except
:
tdLog
.
exit
(
"del %s failed"
%
self
.
cfgDir
)
except
BaseException
:
tdLog
.
exit
(
"del %s failed"
%
self
.
cfgDir
)
# cmd = "mkdir -p " + self.cfgDir
# if os.system(cmd) != 0:
# tdLog.exit(cmd)
...
...
@@ -102,8 +104,8 @@ class TDSimClient:
# tdLog.exit(cmd)
try
:
pathlib
.
Path
(
self
.
cfgPath
).
touch
()
except
:
tdLog
.
exit
(
"create %s failed"
%
self
.
cfgPath
)
except
BaseException
:
tdLog
.
exit
(
"create %s failed"
%
self
.
cfgPath
)
if
self
.
testCluster
:
self
.
cfg
(
"masterIp"
,
"192.168.0.1"
)
self
.
cfg
(
"secondIp"
,
"192.168.0.2"
)
...
...
@@ -111,6 +113,15 @@ class TDSimClient:
for
key
,
value
in
self
.
cfgDict
.
items
():
self
.
cfg
(
key
,
value
)
try
:
if
updatecfgDict
and
updatecfgDict
[
0
]
and
updatecfgDict
[
0
][
0
]:
clientCfg
=
dict
(
updatecfgDict
[
0
][
0
].
get
(
'clientCfg'
))
if
clientCfg
is
not
None
:
for
key
,
value
in
clientCfg
.
items
():
self
.
cfg
(
key
,
value
)
except
Exception
as
e
:
pass
tdLog
.
debug
(
"psim is deployed and configured by %s"
%
(
self
.
cfgPath
))
...
...
@@ -123,36 +134,36 @@ class TDDnode:
self
.
testCluster
=
False
self
.
valgrind
=
0
self
.
cfgDict
=
{
"numOfLogLines"
:
"100000000"
,
"mnodeEqualVnodeNum"
:
"0"
,
"walLevel"
:
"2"
,
"fsync"
:
"1000"
,
"statusInterval"
:
"1"
,
"numOfMnodes"
:
"3"
,
"numOfThreadsPerCore"
:
"2.0"
,
"monitor"
:
"0"
,
"maxVnodeConnections"
:
"30000"
,
"maxMgmtConnections"
:
"30000"
,
"maxMeterConnections"
:
"30000"
,
"maxShellConns"
:
"30000"
,
"locale"
:
"en_US.UTF-8"
,
"charset"
:
"UTF-8"
,
"asyncLog"
:
"0"
,
"anyIp"
:
"0"
,
"telemetryReporting"
:
"0"
,
"dDebugFlag"
:
"135"
,
"tsdbDebugFlag"
:
"135"
,
"mDebugFlag"
:
"135"
,
"sdbDebugFlag"
:
"135"
,
"rpcDebugFlag"
:
"135"
,
"tmrDebugFlag"
:
"131"
,
"cDebugFlag"
:
"135"
,
"httpDebugFlag"
:
"135"
,
"monitorDebugFlag"
:
"135"
,
"udebugFlag"
:
"135"
,
"jnidebugFlag"
:
"135"
,
"qdebugFlag"
:
"135"
,
"maxSQLLength"
:
"1048576"
,
"numOfLogLines"
:
"100000000"
,
"mnodeEqualVnodeNum"
:
"0"
,
"walLevel"
:
"2"
,
"fsync"
:
"1000"
,
"statusInterval"
:
"1"
,
"numOfMnodes"
:
"3"
,
"numOfThreadsPerCore"
:
"2.0"
,
"monitor"
:
"0"
,
"maxVnodeConnections"
:
"30000"
,
"maxMgmtConnections"
:
"30000"
,
"maxMeterConnections"
:
"30000"
,
"maxShellConns"
:
"30000"
,
"locale"
:
"en_US.UTF-8"
,
"charset"
:
"UTF-8"
,
"asyncLog"
:
"0"
,
"anyIp"
:
"0"
,
"telemetryReporting"
:
"0"
,
"dDebugFlag"
:
"135"
,
"tsdbDebugFlag"
:
"135"
,
"mDebugFlag"
:
"135"
,
"sdbDebugFlag"
:
"135"
,
"rpcDebugFlag"
:
"135"
,
"tmrDebugFlag"
:
"131"
,
"cDebugFlag"
:
"135"
,
"httpDebugFlag"
:
"135"
,
"monitorDebugFlag"
:
"135"
,
"udebugFlag"
:
"135"
,
"jnidebugFlag"
:
"135"
,
"qdebugFlag"
:
"135"
,
"maxSQLLength"
:
"1048576"
,
"enableCoreFile"
:
"1"
,
}
...
...
@@ -200,17 +211,11 @@ class TDDnode:
if
os
.
system
(
cmd
)
!=
0
:
tdLog
.
exit
(
cmd
)
cmd
=
"mkdir -p "
+
self
.
dataDir
if
os
.
system
(
cmd
)
!=
0
:
tdLog
.
exit
(
cmd
)
os
.
makedirs
(
self
.
dataDir
,
exist_ok
=
True
)
# like "mkdir -p"
cmd
=
"mkdir -p "
+
self
.
logDir
if
os
.
system
(
cmd
)
!=
0
:
tdLog
.
exit
(
cmd
)
os
.
makedirs
(
self
.
logDir
,
exist_ok
=
True
)
# like "mkdir -p"
cmd
=
"mkdir -p "
+
self
.
cfgDir
if
os
.
system
(
cmd
)
!=
0
:
tdLog
.
exit
(
cmd
)
os
.
makedirs
(
self
.
cfgDir
,
exist_ok
=
True
)
# like "mkdir -p"
cmd
=
"touch "
+
self
.
cfgPath
if
os
.
system
(
cmd
)
!=
0
:
...
...
@@ -233,16 +238,18 @@ class TDDnode:
isFirstDir
=
1
if
bool
(
updatecfgDict
)
and
updatecfgDict
[
0
]
and
updatecfgDict
[
0
][
0
]:
print
(
updatecfgDict
[
0
][
0
])
for
key
,
value
in
updatecfgDict
[
0
][
0
].
items
():
if
value
==
'dataDir'
:
for
key
,
value
in
updatecfgDict
[
0
][
0
].
items
():
if
key
==
"clientCfg"
:
continue
if
value
==
'dataDir'
:
if
isFirstDir
:
self
.
cfgDict
.
pop
(
'dataDir'
)
self
.
cfg
(
value
,
key
)
self
.
cfg
(
value
,
key
)
isFirstDir
=
0
else
:
self
.
cfg
(
value
,
key
)
self
.
cfg
(
value
,
key
)
else
:
self
.
addExtraCfg
(
key
,
value
)
self
.
addExtraCfg
(
key
,
value
)
for
key
,
value
in
self
.
cfgDict
.
items
():
self
.
cfg
(
key
,
value
)
...
...
@@ -251,8 +258,7 @@ class TDDnode:
"dnode:%d is deployed and configured by %s"
%
(
self
.
index
,
self
.
cfgPath
))
def
getBuildPath
(
self
,
tool
=
"taosd"
):
buildPath
=
""
def
getPath
(
self
,
tool
=
"taosd"
):
selfPath
=
os
.
path
.
dirname
(
os
.
path
.
realpath
(
__file__
))
if
(
"community"
in
selfPath
):
...
...
@@ -260,24 +266,30 @@ class TDDnode:
else
:
projPath
=
selfPath
[:
selfPath
.
find
(
"tests"
)]
paths
=
[]
for
root
,
dirs
,
files
in
os
.
walk
(
projPath
):
if
((
tool
)
in
files
):
rootRealPath
=
os
.
path
.
dirname
(
os
.
path
.
realpath
(
root
))
if
(
"packaging"
not
in
rootRealPath
):
buildPath
=
root
[:
len
(
root
)
-
len
(
"/build/bin"
)]
paths
.
append
(
os
.
path
.
join
(
root
,
tool
))
break
return
buildPath
if
(
len
(
paths
)
==
0
):
return
""
return
paths
[
0
]
def
start
(
self
):
b
uildPath
=
self
.
getBuild
Path
()
b
inPath
=
self
.
get
Path
()
if
(
b
uild
Path
==
""
):
if
(
b
in
Path
==
""
):
tdLog
.
exit
(
"taosd not found!"
)
else
:
tdLog
.
info
(
"taosd found
in %s"
%
build
Path
)
tdLog
.
info
(
"taosd found
: %s"
%
bin
Path
)
binPath
=
buildPath
+
"/build/bin/taosd"
taosadapterBinPath
=
buildPath
+
"/build/bin/taosadapter"
taosadapterBinPath
=
self
.
getPath
(
"taosadapter"
)
if
(
taosadapterBinPath
==
""
):
tdLog
.
info
(
"taosAdapter not found!"
)
else
:
tdLog
.
info
(
"taosAdapter found: %s"
%
taosadapterBinPath
)
if
self
.
deployed
==
0
:
tdLog
.
exit
(
"dnode:%d is not deployed"
%
(
self
.
index
))
...
...
@@ -294,7 +306,8 @@ class TDDnode:
print
(
cmd
)
taosadapterCmd
=
"nohup %s --opentsdb_telnet.enable=true > /dev/null 2>&1 & "
%
(
taosadapterBinPath
)
taosadapterBinPath
)
tdLog
.
info
(
taosadapterCmd
)
if
os
.
system
(
taosadapterCmd
)
!=
0
:
tdLog
.
exit
(
taosadapterCmd
)
...
...
@@ -306,18 +319,22 @@ class TDDnode:
if
self
.
valgrind
==
0
:
time
.
sleep
(
0.1
)
key
=
'from offline to online'
bkey
=
bytes
(
key
,
encoding
=
"utf8"
)
bkey
=
bytes
(
key
,
encoding
=
"utf8"
)
logFile
=
self
.
logDir
+
"/taosdlog.0"
i
=
0
while
not
os
.
path
.
exists
(
logFile
):
sleep
(
0.1
)
i
+=
1
if
i
>
50
:
if
i
>
50
:
break
popen
=
subprocess
.
Popen
(
'tail -f '
+
logFile
,
stdout
=
subprocess
.
PIPE
,
stderr
=
subprocess
.
PIPE
,
shell
=
True
)
popen
=
subprocess
.
Popen
(
'tail -f '
+
logFile
,
stdout
=
subprocess
.
PIPE
,
stderr
=
subprocess
.
PIPE
,
shell
=
True
)
pid
=
popen
.
pid
# print('Popen.pid:' + str(pid))
timeout
=
time
.
time
()
+
60
*
2
timeout
=
time
.
time
()
+
60
*
2
while
True
:
line
=
popen
.
stdout
.
readline
().
strip
()
if
bkey
in
line
:
...
...
@@ -327,22 +344,89 @@ class TDDnode:
tdLog
.
exit
(
'wait too long for taosd start'
)
tdLog
.
debug
(
"the dnode:%d has been started."
%
(
self
.
index
))
else
:
tdLog
.
debug
(
"wait 10 seconds for the dnode:%d to start."
%
(
self
.
index
))
tdLog
.
debug
(
"wait 10 seconds for the dnode:%d to start."
%
(
self
.
index
))
time
.
sleep
(
10
)
# time.sleep(5)
def
startWin
(
self
):
binPath
=
self
.
getPath
(
"taosd.exe"
)
if
(
binPath
==
""
):
tdLog
.
exit
(
"taosd.exe not found!"
)
else
:
tdLog
.
info
(
"taosd.exe found: %s"
%
binPath
)
taosadapterBinPath
=
self
.
getPath
(
"taosadapter.exe"
)
if
(
taosadapterBinPath
==
""
):
tdLog
.
info
(
"taosAdapter.exe not found!"
)
else
:
tdLog
.
info
(
"taosAdapter.exe found in %s"
%
taosadapterBuildPath
)
if
self
.
deployed
==
0
:
tdLog
.
exit
(
"dnode:%d is not deployed"
%
(
self
.
index
))
cmd
=
"mintty -h never -w hide %s -c %s"
%
(
binPath
,
self
.
cfgDir
)
taosadapterCmd
=
"mintty -h never -w hide %s "
%
(
taosadapterBinPath
)
if
os
.
system
(
taosadapterCmd
)
!=
0
:
tdLog
.
exit
(
taosadapterCmd
)
if
os
.
system
(
cmd
)
!=
0
:
tdLog
.
exit
(
cmd
)
self
.
running
=
1
tdLog
.
debug
(
"dnode:%d is running with %s "
%
(
self
.
index
,
cmd
))
if
self
.
valgrind
==
0
:
time
.
sleep
(
0.1
)
key
=
'from offline to online'
bkey
=
bytes
(
key
,
encoding
=
"utf8"
)
logFile
=
self
.
logDir
+
"/taosdlog.0"
i
=
0
while
not
os
.
path
.
exists
(
logFile
):
sleep
(
0.1
)
i
+=
1
if
i
>
50
:
break
popen
=
subprocess
.
Popen
(
'tail -n +0 -f '
+
logFile
,
stdout
=
subprocess
.
PIPE
,
stderr
=
subprocess
.
PIPE
,
shell
=
True
)
pid
=
popen
.
pid
# print('Popen.pid:' + str(pid))
timeout
=
time
.
time
()
+
60
*
2
while
True
:
line
=
popen
.
stdout
.
readline
().
strip
()
if
bkey
in
line
:
popen
.
kill
()
break
if
time
.
time
()
>
timeout
:
tdLog
.
exit
(
'wait too long for taosd start'
)
tdLog
.
debug
(
"the dnode:%d has been started."
%
(
self
.
index
))
else
:
tdLog
.
debug
(
"wait 10 seconds for the dnode:%d to start."
%
(
self
.
index
))
time
.
sleep
(
10
)
def
startWithoutSleep
(
self
):
b
uildPath
=
self
.
getBuild
Path
()
b
inPath
=
self
.
get
Path
()
if
(
b
uild
Path
==
""
):
if
(
b
in
Path
==
""
):
tdLog
.
exit
(
"taosd not found!"
)
else
:
tdLog
.
info
(
"taosd found
in %s"
%
build
Path
)
tdLog
.
info
(
"taosd found
: %s"
%
bin
Path
)
binPath
=
buildPath
+
"/build/bin/taosd"
taosadapterBinPath
=
buildPath
+
"/build/bin/taosadapter"
taosadapterBinPath
=
self
.
getPath
(
"taosadapter"
)
if
(
taosadapterBinPath
==
""
):
tdLog
.
exit
(
"taosAdapter not found!"
)
else
:
tdLog
.
info
(
"taosAdapter found: %s"
%
taosadapterBinPath
)
if
self
.
deployed
==
0
:
tdLog
.
exit
(
"dnode:%d is not deployed"
%
(
self
.
index
))
...
...
@@ -372,14 +456,14 @@ class TDDnode:
taosadapterPsCmd
=
"ps -ef|grep -w %s| grep -v grep | awk '{print $2}'"
%
taosadapterToBeKilled
taosadapterProcessID
=
subprocess
.
check_output
(
taosadapterPsCmd
,
shell
=
True
).
decode
(
"utf-8"
)
taosadapterPsCmd
,
shell
=
True
).
decode
(
"utf-8"
)
while
(
taosadapterProcessID
):
taosadapterKillCmd
=
"kill -INT %s > /dev/null 2>&1"
%
taosadapterProcessID
os
.
system
(
taosadapterKillCmd
)
time
.
sleep
(
1
)
taosadapterProcessID
=
subprocess
.
check_output
(
taosadapterPsCmd
,
shell
=
True
).
decode
(
"utf-8"
)
taosadapterPsCmd
,
shell
=
True
).
decode
(
"utf-8"
)
if
self
.
valgrind
==
0
:
toBeKilled
=
"taosd"
...
...
@@ -532,7 +616,7 @@ class TDDnodes:
self
.
sim
.
setTestCluster
(
self
.
testCluster
)
if
(
self
.
simDeployed
==
False
):
self
.
sim
.
deploy
()
self
.
sim
.
deploy
(
updatecfgDict
)
self
.
simDeployed
=
True
self
.
check
(
index
)
...
...
@@ -547,7 +631,11 @@ class TDDnodes:
def
start
(
self
,
index
):
self
.
check
(
index
)
self
.
dnodes
[
index
-
1
].
start
()
def
startWin
(
self
,
index
):
self
.
check
(
index
)
self
.
dnodes
[
index
-
1
].
startWin
()
def
startWithoutSleep
(
self
,
index
):
self
.
check
(
index
)
self
.
dnodes
[
index
-
1
].
startWithoutSleep
()
...
...
tests/script/general/parser/groupby.sim
浏览文件 @
62927b93
...
...
@@ -783,13 +783,13 @@ if $data11 != 2 then
return -1
endi
sql_error select count(*) from m1 group by tbname,k,f1;
sql_error select count(*) from m1 group by tbname,k,a;
sql_error select count(*) from m1 group by k, tbname;
sql_error select count(*) from m1 group by k,f1;
#
sql_error select count(*) from m1 group by tbname,k,f1;
#
sql_error select count(*) from m1 group by tbname,k,a;
#
sql_error select count(*) from m1 group by k, tbname;
#
sql_error select count(*) from m1 group by k,f1;
sql_error select count(*) from tm0 group by tbname;
sql_error select count(*) from tm0 group by a;
sql_error select count(*) from tm0 group by k,f1;
#
sql_error select count(*) from tm0 group by k,f1;
sql_error select count(*),f1 from m1 group by tbname,k;
...
...
tests/system-test/5-taos-tools/basic.py
浏览文件 @
62927b93
...
...
@@ -23,33 +23,31 @@ import subprocess
class
TDTestCase
:
def
caseDescription
(
self
):
'''
case1<pxiao>: [TD-11977] start taosdump without taosd
case1<pxiao>: [TD-11977] start taosdump without taosd
case1<pxiao>: [TD-11977] start taosBenchmark without taosd
case1<pxiao>: [TD-11977] start taosAdaptor without taosd
'''
'''
return
def
init
(
self
,
conn
,
logSql
):
tdLog
.
debug
(
"start to execute %s"
%
__file__
)
tdSql
.
init
(
conn
.
cursor
(),
logSql
)
def
run
(
self
):
tdSql
.
prepare
()
tools
=
[
"taosdump"
,
"taosBenchmark"
,
"taos
Adaptor"
]
tdSql
.
prepare
()
tools
=
[
"taosdump"
,
"taosBenchmark"
,
"taos
adaptor"
]
tdDnodes
.
stop
(
1
)
for
tool
in
tools
:
path
=
tdDnodes
.
dnodes
[
1
].
get
Build
Path
(
tool
)
for
tool
in
tools
:
path
=
tdDnodes
.
dnodes
[
1
].
getPath
(
tool
)
try
:
path
+=
"/build/bin/"
print
(
f
"
{
path
}{
tool
}
"
)
if
tool
==
"taosBenchmark"
:
os
.
system
(
f
"
{
path
}
{
tool
}
-y"
)
os
.
system
(
f
"
{
path
}
-y"
)
else
:
os
.
system
(
f
"
{
path
}
{
tool
}
"
)
except
:
os
.
system
(
f
"
{
path
}
"
)
except
BaseException
:
pass
def
stop
(
self
):
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录