diff --git a/Jenkinsfile b/Jenkinsfile index 8689e19274fd87e85e386e4d597a762309d8fcb4..9fd44923c02875fa5f6483d37e10eaf1807cac12 100644 --- a/Jenkinsfile +++ b/Jenkinsfile @@ -46,6 +46,7 @@ def pre_test(){ killall -9 gdb || echo "no gdb running" killall -9 python3.8 || echo "no python program running" cd ${WKC} + [ -f src/connector/grafanaplugin/README.md ] && rm -f src/connector/grafanaplugin/README.md > /dev/null || echo "failed to remove grafanaplugin README.md" git reset --hard HEAD~10 >/dev/null ''' script { @@ -77,6 +78,8 @@ def pre_test(){ git checkout -qf FETCH_HEAD git clean -dfx git submodule update --init --recursive + cd src/kit/taos-tools/deps/avro + git clean -dfx cd ${WK} git reset --hard HEAD~10 ''' @@ -121,6 +124,7 @@ def pre_test_noinstall(){ sh'hostname' sh''' cd ${WKC} + [ -f src/connector/grafanaplugin/README.md ] && rm -f src/connector/grafanaplugin/README.md > /dev/null || echo "failed to remove grafanaplugin README.md" git reset --hard HEAD~10 >/dev/null ''' script { @@ -152,6 +156,8 @@ def pre_test_noinstall(){ git checkout -qf FETCH_HEAD git clean -dfx git submodule update --init --recursive + cd src/kit/taos-tools/deps/avro + git clean -dfx cd ${WK} git reset --hard HEAD~10 ''' @@ -184,7 +190,7 @@ def pre_test_noinstall(){ git clean -dfx mkdir debug cd debug - cmake .. -DBUILD_HTTP=false -DBUILD_TOOLS=false > /dev/null + cmake .. -DBUILD_HTTP=false -DBUILD_TOOLS=true > /dev/null make ''' return 1 @@ -193,6 +199,7 @@ def pre_test_mac(){ sh'hostname' sh''' cd ${WKC} + [ -f src/connector/grafanaplugin/README.md ] && rm -f src/connector/grafanaplugin/README.md > /dev/null || echo "failed to remove grafanaplugin README.md" git reset --hard HEAD~10 >/dev/null ''' script { @@ -224,6 +231,8 @@ def pre_test_mac(){ git checkout -qf FETCH_HEAD git clean -dfx git submodule update --init --recursive + cd src/kit/taos-tools/deps/avro + git clean -dfx cd ${WK} git reset --hard HEAD~10 ''' @@ -352,7 +361,7 @@ pipeline { } stages { stage('pre_build'){ - agent{label 'catalina'} + agent{label 'master'} options { skipDefaultCheckout() } when { changeRequest() @@ -361,37 +370,13 @@ pipeline { script{ abort_previous() abortPreviousBuilds() - println env.CHANGE_BRANCH - if(env.CHANGE_FORK){ - scope = ['connector','query','insert','other','tools','taosAdapter'] - } - else{ - sh''' - cd ${WKC} - git reset --hard HEAD~10 - git fetch - git checkout ${CHANGE_BRANCH} - git pull - ''' - dir('/var/lib/jenkins/workspace/TDinternal/community'){ - gitlog = sh(script: "git log -1 --pretty=%B ", returnStdout:true) - println gitlog - if (!(gitlog =~ /\((.*?)\)/)){ - autoCancelled = true - error('Please fill in the scope information correctly.\neg. [TD-xxxx](query,insert):xxxxxxxxxxxxxxxxxx ') - } - temp = (gitlog =~ /\((.*?)\)/) - temp = temp[0].remove(1) - scope = temp.split(",") - scope = ['connector','query','insert','other','tools','taosAdapter'] - Collections.shuffle mod - Collections.shuffle sim_mod - } + scope = ['connector','query','insert','other','tools','taosAdapter'] + Collections.shuffle mod + Collections.shuffle sim_mod + } - } - } } - } + } stage('Parallel test stage') { //only build pr options { skipDefaultCheckout() } diff --git a/cmake/define.inc b/cmake/define.inc index 36df42dd8c9be3980398540c97d5ef879597a8d1..a15a0725ebcc04683cee3559e69cf667a060fc7d 100755 --- a/cmake/define.inc +++ b/cmake/define.inc @@ -110,10 +110,10 @@ IF (TD_MIPS_32) ENDIF () IF (TD_ALPINE) - SET(COMMON_FLAGS "${COMMON_FLAGS} -largp") - link_libraries(/usr/lib/libargp.a) + SET(COMMON_FLAGS "${COMMON_FLAGS} -Wl,-z,stack-size=2097152") + link_libraries(argp) ADD_DEFINITIONS(-D_ALPINE) - MESSAGE(STATUS "aplhine is defined") + MESSAGE(STATUS "alpine is defined") ENDIF () IF ("${BUILD_HTTP}" STREQUAL "") diff --git a/cmake/platform.inc b/cmake/platform.inc index 2a0aace8d08e9dba1451daa051df4b614a21d398..b0e463026ef64d3ce662911001daa17488dfe321 100755 --- a/cmake/platform.inc +++ b/cmake/platform.inc @@ -96,10 +96,12 @@ IF ("${CPUTYPE}" STREQUAL "") MESSAGE(STATUS "The current platform is amd64") MESSAGE(STATUS "Set CPUTYPE to x64") SET(CPUTYPE "x64") + SET(PLATFORM_ARCH_STR "amd64") ELSEIF (CMAKE_SYSTEM_PROCESSOR MATCHES "(x86)|(X86)") MESSAGE(STATUS "The current platform is x86") MESSAGE(STATUS "Set CPUTYPE to x86") SET(CPUTYPE "x32") + SET(PLATFORM_ARCH_STR "i386") ELSEIF (CMAKE_SYSTEM_PROCESSOR MATCHES "armv7l") MESSAGE(STATUS "Set CPUTYPE to aarch32") SET(CPUTYPE "aarch32") @@ -107,12 +109,14 @@ IF ("${CPUTYPE}" STREQUAL "") SET(TD_LINUX TRUE) SET(TD_LINUX_32 FALSE) SET(TD_ARM_32 TRUE) + SET(PLATFORM_ARCH_STR "arm") ELSEIF (CMAKE_SYSTEM_PROCESSOR MATCHES "aarch64") SET(CPUTYPE "aarch64") MESSAGE(STATUS "Set CPUTYPE to aarch64") SET(TD_LINUX TRUE) SET(TD_LINUX_64 FALSE) SET(TD_ARM_64 TRUE) + SET(PLATFORM_ARCH_STR "arm64") ELSEIF (CMAKE_SYSTEM_PROCESSOR MATCHES "mips64") SET(CPUTYPE "mips64") MESSAGE(STATUS "Set CPUTYPE to mips64") @@ -124,7 +128,6 @@ IF ("${CPUTYPE}" STREQUAL "") MESSAGE(STATUS "Set CPUTYPE to apple silicon m1") SET(TD_ARM_64 TRUE) ENDIF () - ELSE () # if generate ARM version: # cmake -DCPUTYPE=aarch32 .. or cmake -DCPUTYPE=aarch64 @@ -132,27 +135,33 @@ ELSE () SET(TD_LINUX TRUE) SET(TD_LINUX_32 FALSE) SET(TD_ARM_32 TRUE) + SET(PLATFORM_ARCH_STR "arm") MESSAGE(STATUS "input cpuType: aarch32") ELSEIF (${CPUTYPE} MATCHES "aarch64") SET(TD_LINUX TRUE) SET(TD_LINUX_64 FALSE) SET(TD_ARM_64 TRUE) + SET(PLATFORM_ARCH_STR "arm64") MESSAGE(STATUS "input cpuType: aarch64") ELSEIF (${CPUTYPE} MATCHES "mips64") SET(TD_LINUX TRUE) SET(TD_LINUX_64 FALSE) SET(TD_MIPS_64 TRUE) + SET(PLATFORM_ARCH_STR "mips") MESSAGE(STATUS "input cpuType: mips64") ELSEIF (${CPUTYPE} MATCHES "x64") + SET(PLATFORM_ARCH_STR "amd64") MESSAGE(STATUS "input cpuType: x64") ELSEIF (${CPUTYPE} MATCHES "x86") + SET(PLATFORM_ARCH_STR "i386") MESSAGE(STATUS "input cpuType: x86") ELSE () MESSAGE(STATUS "input cpuType unknown " ${CPUTYPE}) ENDIF () - ENDIF () +MESSAGE(STATUS "platform arch:" ${PLATFORM_ARCH_STR}) + # cmake -DOSTYPE=Ningsi IF (${OSTYPE} MATCHES "Ningsi60") SET(TD_NINGSI TRUE) diff --git a/documentation20/cn/02.getting-started/01.docker/docs.md b/documentation20/cn/02.getting-started/01.docker/docs.md index 4ac6d96ec1de161d3259c5246e78565ec2cfc726..c546248acba4c8ffbd9eb3e8b6b86d00059973bd 100644 --- a/documentation20/cn/02.getting-started/01.docker/docs.md +++ b/documentation20/cn/02.getting-started/01.docker/docs.md @@ -49,7 +49,7 @@ c452519b0f9b tdengine/tdengine "taosd" 14 minutes ago Up 14 minutes · ```bash $ docker exec -it tdengine /bin/bash -root@c452519b0f9b:~/TDengine-server-2.0.20.13# +root@tdengine-server:~/TDengine-server-2.4.0.4# ``` - **docker exec**:通过 docker exec 命令进入容器,如果退出,容器不会停止。 @@ -61,38 +61,245 @@ root@c452519b0f9b:~/TDengine-server-2.0.20.13# 4,进入容器后,执行 taos shell 客户端程序。 ```bash -$ root@c452519b0f9b:~/TDengine-server-2.0.20.13# taos +$ root@c452519b0f9b:~/TDengine-server-2.4.0.4# taos -Welcome to the TDengine shell from Linux, Client Version:2.0.20.13 +Welcome to the TDengine shell from Linux, Client Version:2.4.0.4 Copyright (c) 2020 by TAOS Data, Inc. All rights reserved. -taos> +taos> ``` TDengine 终端成功连接服务端,打印出了欢迎消息和版本信息。如果失败,会有错误信息打印出来。 在 TDengine 终端中,可以通过 SQL 命令来创建/删除数据库、表、超级表等,并可以进行插入和查询操作。具体可以参考 [TAOS SQL 说明文档](https://www.taosdata.com/cn/documentation/taos-sql)。 -## 通过 taosdemo 进一步了解 TDengine +## 通过 taosBenchmark 进一步了解 TDengine -1,接上面的步骤,先退出 TDengine 终端程序。 +### 在宿主机访问 Docker 容器中的 TDengine server + +在使用了 -p 命令行参数映射了正确的端口启动了 TDengine Docker 容器后,就在宿主机使用 taos shell 命令即可访问运行在 Docker 容器中的 TDengine。 + +``` +$ taos + +Welcome to the TDengine shell from Linux, Client Version:2.4.0.4 +Copyright (c) 2020 by TAOS Data, Inc. All rights reserved. + +taos> +``` + +也可以在宿主机使用 curl 通过 RESTful 端口访问 Docker 容器内的 TDengine server。 + +``` +$ curl -u root:taosdata -d 'show databases' 127.0.0.1:6041/rest/sql +{"status":"succ","head":["name","created_time","ntables","vgroups","replica","quorum","days","keep0,keep1,keep(D)","cache(MB)","blocks","minrows","maxrows","wallevel","fsync","comp","cachelast","precision","update","status"],"column_meta":[["name",8,32],["created_time",9,8],["ntables",4,4],["vgroups",4,4],["replica",3,2],["quorum",3,2],["days",3,2],["keep0,keep1,keep(D)",8,24],["cache(MB)",4,4],["blocks",4,4],["minrows",4,4],["maxrows",4,4],["wallevel",2,1],["fsync",4,4],["comp",2,1],["cachelast",2,1],["precision",8,3],["update",2,1],["status",8,10]],"data":[["test","2021-08-18 06:01:11.021",10000,4,1,1,10,"3650,3650,3650",16,6,100,4096,1,3000,2,0,"ms",0,"ready"],["log","2021-08-18 05:51:51.065",4,1,1,1,10,"30,30,30",1,3,100,4096,1,3000,2,0,"us",0,"ready"]],"rows":2} +``` + +这条命令,通过 RESTful 接口访问 TDengine server,这时连接的是本机的 6041 端口,可见连接成功。 + +TDengine RESTful 接口详情请参考[官方文档](https://www.taosdata.com/cn/documentation/connector#restful)。 + + +### 使用 Docker 容器运行 TDengine server 和 taosAdapter + +在 TDegnine 2.4.0.0 之后版本的 Docker 容器,开始提供一个独立运行的组件 taosAdapter,代替之前版本 TDengine 中 taosd 进程中内置的 http server。taosAdapter 支持通过 RESTful 接口对 TDengine server 的数据写入和查询能力,并提供和 InfluxDB/OpenTSDB 兼容的数据摄取接口,允许 InfluxDB/OpenTSDB 应用程序无缝移植到 TDengine。在新版本 Docker 镜像中,默认运行 taosd,而 taosAdapter 没有运行;也可以使用 docker run 命令中指定 taosadapter 的方式来运行 taosadapter,而 taosd 不会运行;或者在 docker run 命令中指定运行 run_taosd_and_taosadapter.sh 来同时运行 taosd 和 taosAdapter。 + +注意:如果容器中运行 taosAdapter,需要根据需要增加映射其他端口,具体端口默认配置和修改方法请参考[taosAdapter文档](https://github.com/taosdata/taosadapter/blob/develop/README-CN.md)。 + +使用 docker 运行 TDengine 2.4.0.4 版本镜像(仅 taosd): + +``` +$ docker run -d --name tdengine-taosa -p 6030-6049:6030-6049 -p 6030-6049:6030-6049/udp tdengine/tdengine:2.4.0.4 +``` + +使用 docker 运行 TDengine 2.4.0.4 版本镜像(仅 taosAdapter): + +``` +$ docker run -d --name tdengine-taosa -p 6041-6049:6041-6049 -p 6041-6049:6041-6049/udp tdengine/tdengine:2.4.0.4 taosadapter +``` + +使用 docker 运行 TDengine 2.4.0.4 版本镜像(同时运行 taosd 和 taosAdapter): + +``` +$ docker run -d --name tdengine-taosa -p 6030-6049:6030-6049 -p 6030-6049:6030-6049/udp tdengine/tdengine:2.4.0.4 run_taosd_and_taosadapter.sh +``` + + +使用 curl 命令验证 RESTful 接口可以正常工作: +``` +$ curl -H 'Authorization: Basic cm9vdDp0YW9zZGF0YQ==' -d 'show databases;' 127.0.0.1:6041/rest/sql + +{"status":"succ","head":["name","created_time","ntables","vgroups","replica","quorum","days","keep","cache(MB)","blocks","minrows","maxrows","wallevel","fsync","comp","cachelast","precision","update","status"],"column_meta":[["name",8,32],["created_time",9,8],["ntables",4,4],["vgroups",4,4],["replica",3,2],["quorum",3,2],["days",3,2],["keep",8,24],["cache(MB)",4,4],["blocks",4,4],["minrows",4,4],["maxrows",4,4],["wallevel",2,1],["fsync",4,4],["comp",2,1],["cachelast",2,1],["precision",8,3],["update",2,1],["status",8,10]],"data":[["log","2021-12-28 09:18:55.765",10,1,1,1,10,"30",1,3,100,4096,1,3000,2,0,"us",0,"ready"]],"rows":1} +``` + +taosAdapter 支持多个数据收集代理软件(如 Telegraf、StatsD、collectd 等),这里仅模拟 StasD 写入数据,在宿主机执行命令如下: +``` +$ echo "foo:1|c" | nc -u -w0 127.0.0.1 6044 +``` + +然后可以使用 taos shell 查询 taosAdapter 自动创建的数据库 statsd 和 超级表 foo 中的内容: +``` +taos> show databases; + name | created_time | ntables | vgroups | replica | quorum | days | keep | cache(MB) | blocks | minrows | maxrows | wallevel | fsync | comp | cachelast | precision | update | status | +==================================================================================================================================================================================================================================================================================== + log | 2021-12-28 09:18:55.765 | 12 | 1 | 1 | 1 | 10 | 30 | 1 | 3 | 100 | 4096 | 1 | 3000 | 2 | 0 | us | 0 | ready | + statsd | 2021-12-28 09:21:48.841 | 1 | 1 | 1 | 1 | 10 | 3650 | 16 | 6 | 100 | 4096 | 1 | 3000 | 2 | 0 | ns | 2 | ready | +Query OK, 2 row(s) in set (0.002112s) + +taos> use statsd; +Database changed. + +taos> show stables; + name | created_time | columns | tags | tables | +============================================================================================ + foo | 2021-12-28 09:21:48.894 | 2 | 1 | 1 | +Query OK, 1 row(s) in set (0.001160s) + +taos> select * from foo; + ts | value | metric_type | +======================================================================================= + 2021-12-28 09:21:48.840820836 | 1 | counter | +Query OK, 1 row(s) in set (0.001639s) + +taos> +``` + +可以看到模拟数据已经被写入到 TDengine 中。 + + +### 应用示例:在宿主机使用 taosBenchmark 写入数据到 Docker 容器中的 TDengine server + +1,在宿主机命令行界面执行 taosBenchmark (曾命名为 taosdemo)写入数据到 Docker 容器中的 TDengine server + +```bash +$ taosBenchmark + +taosBenchmark is simulating data generated by power equipments monitoring... + +host: 127.0.0.1:6030 +user: root +password: taosdata +configDir: +resultFile: ./output.txt +thread num of insert data: 10 +thread num of create table: 10 +top insert interval: 0 +``` + +使用 curl 命令验证 RESTful 接口可以正常工作: +$ curl -H 'Authorization: Basic cm9vdDp0YW9zZGF0YQ==' -d 'show databases;' 127.0.0.1:6041/rest/sql + +{"status":"succ","head":["name","created_time","ntables","vgroups","replica","quorum","days","keep","cache(MB)","blocks","minrows","maxrows","wallevel","fsync","comp","cachelast","precision","update","status"],"column_meta":[["name",8,32],["created_time",9,8],["ntables",4,4],["vgroups",4,4],["replica",3,2],["quorum",3,2],["days",3,2],["keep",8,24],["cache(MB)",4,4],["blocks",4,4],["minrows",4,4],["maxrows",4,4],["wallevel",2,1],["fsync",4,4],["comp",2,1],["cachelast",2,1],["precision",8,3],["update",2,1],["status",8,10]],"data":[["log","2021-12-28 09:18:55.765",10,1,1,1,10,"30",1,3,100,4096,1,3000,2,0,"us",0,"ready"]],"rows":1} +``` + +taosAdapter 支持多个数据收集代理软件(如 Telegraf、StatsD、collectd 等),这里仅模拟 StasD 写入数据,在宿主机执行命令如下: +``` +$ echo "foo:1|c" | nc -u -w0 127.0.0.1 6044 +``` + +然后可以使用 taos shell 查询 taosAdapter 自动创建的数据库 statsd 和 超级表 foo 中的内容: +``` +taos> show databases; + name | created_time | ntables | vgroups | replica | quorum | days | keep | cache(MB) | blocks | minrows | maxrows | wallevel | fsync | comp | cachelast | precision | update | status | +==================================================================================================================================================================================================================================================================================== + log | 2021-12-28 09:18:55.765 | 12 | 1 | 1 | 1 | 10 | 30 | 1 | 3 | 100 | 4096 | 1 | 3000 | 2 | 0 | us | 0 | ready | + statsd | 2021-12-28 09:21:48.841 | 1 | 1 | 1 | 1 | 10 | 3650 | 16 | 6 | 100 | 4096 | 1 | 3000 | 2 | 0 | ns | 2 | ready | +Query OK, 2 row(s) in set (0.002112s) + +taos> use statsd; +Database changed. + +taos> show stables; + name | created_time | columns | tags | tables | +============================================================================================ + foo | 2021-12-28 09:21:48.894 | 2 | 1 | 1 | +Query OK, 1 row(s) in set (0.001160s) + +taos> select * from foo; + ts | value | metric_type | +======================================================================================= + 2021-12-28 09:21:48.840820836 | 1 | counter | +Query OK, 1 row(s) in set (0.001639s) + +taos> +``` + +可以看到模拟数据已经被写入到 TDengine 中。 + + +### 应用示例:在宿主机使用 taosBenchmark 写入数据到 Docker 容器中的 TDengine server + +1,在宿主机命令行界面执行 taosBenchmark 写入数据到 Docker 容器中的 TDengine server ```bash -$ taos> q -root@c452519b0f9b:~/TDengine-server-2.0.20.13# +$ taosBenchmark + +taosBenchmark is simulating data generated by power equipments monitoring... + +host: 127.0.0.1:6030 +user: root +password: taosdata +configDir: +resultFile: ./output.txt +thread num of insert data: 10 +thread num of create table: 10 +top insert interval: 0 + +使用 curl 命令验证 RESTful 接口可以正常工作: +``` +$ curl -H 'Authorization: Basic cm9vdDp0YW9zZGF0YQ==' -d 'show databases;' 127.0.0.1:6041/rest/sql + +{"status":"succ","head":["name","created_time","ntables","vgroups","replica","quorum","days","keep","cache(MB)","blocks","minrows","maxrows","wallevel","fsync","comp","cachelast","precision","update","status"],"column_meta":[["name",8,32],["created_time",9,8],["ntables",4,4],["vgroups",4,4],["replica",3,2],["quorum",3,2],["days",3,2],["keep",8,24],["cache(MB)",4,4],["blocks",4,4],["minrows",4,4],["maxrows",4,4],["wallevel",2,1],["fsync",4,4],["comp",2,1],["cachelast",2,1],["precision",8,3],["update",2,1],["status",8,10]],"data":[["log","2021-12-28 09:18:55.765",10,1,1,1,10,"30",1,3,100,4096,1,3000,2,0,"us",0,"ready"]],"rows":1} +``` + +taosAdapter 支持多个数据收集代理软件(如 Telegraf、StatsD、collectd 等),这里仅模拟 StasD 写入数据,在宿主机执行命令如下: ``` +$ echo "foo:1|c" | nc -u -w0 127.0.0.1 6044 +``` + +然后可以使用 taos shell 查询 taosAdapter 自动创建的数据库 statsd 和 超级表 foo 中的内容: +``` +taos> show databases; + name | created_time | ntables | vgroups | replica | quorum | days | keep | cache(MB) | blocks | minrows | maxrows | wallevel | fsync | comp | cachelast | precision | update | status | +==================================================================================================================================================================================================================================================================================== + log | 2021-12-28 09:18:55.765 | 12 | 1 | 1 | 1 | 10 | 30 | 1 | 3 | 100 | 4096 | 1 | 3000 | 2 | 0 | us | 0 | ready | + statsd | 2021-12-28 09:21:48.841 | 1 | 1 | 1 | 1 | 10 | 3650 | 16 | 6 | 100 | 4096 | 1 | 3000 | 2 | 0 | ns | 2 | ready | +Query OK, 2 row(s) in set (0.002112s) + +taos> use statsd; +Database changed. + +taos> show stables; + name | created_time | columns | tags | tables | +============================================================================================ + foo | 2021-12-28 09:21:48.894 | 2 | 1 | 1 | +Query OK, 1 row(s) in set (0.001160s) + +taos> select * from foo; + ts | value | metric_type | +======================================================================================= + 2021-12-28 09:21:48.840820836 | 1 | counter | +Query OK, 1 row(s) in set (0.001639s) + +taos> +``` + +可以看到模拟数据已经被写入到 TDengine 中。 + + +### 应用示例:在宿主机使用 taosBenchmark 写入数据到 Docker 容器中的 TDengine server -2,在命令行界面执行 taosdemo。 +1,在宿主机命令行界面执行 taosBenchmark 写入数据到 Docker 容器中的 TDengine server ```bash -root@c452519b0f9b:~/TDengine-server-2.0.20.13# taosdemo +$ taosBenchmark -taosdemo is simulating data generated by power equipments monitoring... +taosBenchmark is simulating data generated by power equipments monitoring... host: 127.0.0.1:6030 user: root password: taosdata -configDir: +configDir: resultFile: ./output.txt thread num of insert data: 10 thread num of create table: 10 @@ -121,13 +328,13 @@ database[0]: maxSqlLen: 1048576 timeStampStep: 1 startTimestamp: 2017-07-14 10:40:00.000 - sampleFormat: - sampleFile: - tagsFile: + sampleFormat: + sampleFile: + tagsFile: columnCount: 3 -column[0]:FLOAT column[1]:INT column[2]:FLOAT +column[0]:FLOAT column[1]:INT column[2]:FLOAT tagCount: 2 - tag[0]:INT tag[1]:BINARY(16) + tag[0]:INT tag[1]:BINARY(16) Press enter key to continue or Ctrl-C to stop ``` @@ -136,17 +343,17 @@ column[0]:FLOAT column[1]:INT column[2]:FLOAT 执行这条命令大概需要几分钟,最后共插入 1 亿条记录。 -3,进入 TDengine 终端,查看 taosdemo 生成的数据。 +2,进入 TDengine 终端,查看 taosBenchmark 生成的数据。 - **进入命令行。** ```bash -$ root@c452519b0f9b:~/TDengine-server-2.0.20.13# taos +$ root@c452519b0f9b:~/TDengine-server-2.4.0.4# taos -Welcome to the TDengine shell from Linux, Client Version:2.0.20.13 +Welcome to the TDengine shell from Linux, Client Version:2.4.0.4 Copyright (c) 2020 by TAOS Data, Inc. All rights reserved. -taos> +taos> ``` - **查看数据库。** diff --git a/documentation20/cn/02.getting-started/02.taosdemo/docs.md b/documentation20/cn/02.getting-started/02.taosdemo/docs.md index c01c2efb514c22883bbc9a8bd07a974ba37d3019..64200f17ff5912d4741ea69f7e4dffaa99f7c5c3 100644 --- a/documentation20/cn/02.getting-started/02.taosdemo/docs.md +++ b/documentation20/cn/02.getting-started/02.taosdemo/docs.md @@ -1,20 +1,20 @@ - 如何使用 taosdemo 进行性能测试 + 如何使用 taosBenchmark 进行性能测试 == -自从 TDengine 2019年 7 月开源以来,凭借创新的数据建模设计、快捷的安装方式、易用的编程接口和强大的数据写入查询性能博得了大量时序数据开发者的青睐。其中写入和查询性能往往令刚接触 TDengine 的用户称叹不已。为了便于用户在最短时间内就可以体验到 TDengine 的高性能特点,我们专门开发了一个应用程序 taosdemo 用于对 TDengine 进行写入和查询的性能测试,用户可以通过 taosdemo 轻松模拟大量设备产生海量数据的场景,并且可以通过 taosdemo 参数灵活控制表的列数、数据类型、乱序比例以及并发线程数量。 +自从 TDengine 2019年 7 月开源以来,凭借创新的数据建模设计、快捷的安装方式、易用的编程接口和强大的数据写入查询性能博得了大量时序数据开发者的青睐。其中写入和查询性能往往令刚接触 TDengine 的用户称叹不已。为了便于用户在最短时间内就可以体验到 TDengine 的高性能特点,我们专门开发了一个应用程序 taosBenchmark (曾命名为 taosdemo)用于对 TDengine 进行写入和查询的性能测试,用户可以通过 taosBenchmark 轻松模拟大量设备产生海量数据的场景,并且可以通过 taosBenchmark 参数灵活控制表的列数、数据类型、乱序比例以及并发线程数量。 -运行 taosdemo 很简单,通过下载 TDengine 安装包( https://www.taosdata.com/cn/all-downloads/ )或者自行下载 TDengine 代码( https://github.com/taosdata/TDengine )编译都可以在安装目录或者编译结果目录中找到并运行。 +运行 taosBenchmark 很简单,通过下载 TDengine 安装包( https://www.taosdata.com/cn/all-downloads/ )或者自行下载 TDengine 代码( https://github.com/taosdata/TDengine )编译都可以在安装目录或者编译结果目录中找到并运行。 -接下来本文为大家讲解 taosdemo 的使用介绍及注意事项。 +接下来本文为大家讲解 taosBenchmark 的使用介绍及注意事项。 -使用 taosdemo 进行写入测试 +使用 taosBenchmark 进行写入测试 -- -不使用任何参数的情况下执行 taosdemo 命令,输出如下: +不使用任何参数的情况下执行 taosBenchmark 命令,输出如下: ``` -$ taosdemo +$ taosBenchmark -taosdemo is simulating data generated by power equipment monitoring... +taosBenchmark is simulating data generated by power equipment monitoring... host: 127.0.0.1:6030 user: root @@ -58,7 +58,7 @@ column[0]:FLOAT column[1]:INT column[2]:FLOAT Press enter key to continue or Ctrl-C to stop ``` -这里显示的是接下来 taosdemo 进行数据写入的各项参数。默认不输入任何命令行参数的情况下 taosdemo 将模拟生成一个电力行业典型应用的电表数据采集场景数据。即建立一个名为 test 的数据库,并创建一个名为 meters 的超级表,其中表结构为: +这里显示的是接下来 taosBenchmark 进行数据写入的各项参数。默认不输入任何命令行参数的情况下 taosBenchmark 将模拟生成一个电力行业典型应用的电表数据采集场景数据。即建立一个名为 test 的数据库,并创建一个名为 meters 的超级表,其中表结构为: ``` taos> describe test.meters; Field | Type | Length | Note | @@ -71,7 +71,7 @@ taos> describe test.meters; location | BINARY | 64 | TAG | Query OK, 6 row(s) in set (0.002972s) ``` -按任意键后 taosdemo 将建立数据库 test 和超级表 meters,并按照 TDengine 数据建模的最佳实践,以 meters 超级表为模板生成一万个子表,代表一万个独立上报数据的电表设备。 +按任意键后 taosBenchmark 将建立数据库 test 和超级表 meters,并按照 TDengine 数据建模的最佳实践,以 meters 超级表为模板生成一万个子表,代表一万个独立上报数据的电表设备。 ``` taos> use test; Database changed. @@ -82,7 +82,7 @@ taos> show stables; meters | 2021-08-27 11:21:01.209 | 4 | 2 | 10000 | Query OK, 1 row(s) in set (0.001740s) ``` -然后 taosdemo 为每个电表设备模拟生成一万条记录: +然后 taosBenchmark 为每个电表设备模拟生成一万条记录: ``` ... ====thread[3] completed total inserted rows: 6250000, total affected rows: 6250000. 347626.22 records/second==== @@ -99,9 +99,9 @@ Spent 18.0863 seconds to insert rows: 100000000, affected rows: 100000000 with 1 insert delay, avg: 28.64ms, max: 112.92ms, min: 9.35ms ``` -以上信息是在一台具备 8个CPU 64G 内存的普通 PC 服务器上进行实测的结果。显示 taosdemo 用了 18 秒的时间插入了 100000000 (一亿)条记录,平均每秒钟插入 552 万 9千零49 条记录。 +以上信息是在一台具备 8个CPU 64G 内存的普通 PC 服务器上进行实测的结果。显示 taosBenchmark 用了 18 秒的时间插入了 100000000 (一亿)条记录,平均每秒钟插入 552 万 9千零49 条记录。 -TDengine 还提供性能更好的参数绑定接口,而在同样的硬件上使用参数绑定接口 (taosdemo -I stmt )进行相同数据量的写入,结果如下: +TDengine 还提供性能更好的参数绑定接口,而在同样的硬件上使用参数绑定接口 (taosBenchmark -I stmt )进行相同数据量的写入,结果如下: ``` ... @@ -136,14 +136,14 @@ Spent 6.0257 seconds to insert rows: 100000000, affected rows: 100000000 with 16 insert delay, avg: 8.31ms, max: 860.12ms, min: 2.00ms ``` -显示 taosdemo 用了 6 秒的时间插入了一亿条记录,每秒钟插入性能高达 1659 万 5 千 590 条记录。 +显示 taosBenchmark 用了 6 秒的时间插入了一亿条记录,每秒钟插入性能高达 1659 万 5 千 590 条记录。 -由于 taosdemo 使用起来非常方便,我们又对 taosdemo 做了更多的功能扩充,使其支持更复杂的参数设置,便于进行快速原型开发的样例数据准备和验证工作。 +由于 taosBenchmark 使用起来非常方便,我们又对 taosBenchmark 做了更多的功能扩充,使其支持更复杂的参数设置,便于进行快速原型开发的样例数据准备和验证工作。 -完整的 taosdemo 命令行参数列表可以通过 taosdemo --help 显示如下: +完整的 taosBenchmark 命令行参数列表可以通过 taosBenchmark --help 显示如下: ``` -$ taosdemo --help +$ taosBenchmark --help -f, --file=FILE The meta file to the execution procedure. Currently, we support standard UTF-8 (without BOM) encoded files only. -u, --user=USER The user name to use when connecting to the server. @@ -151,7 +151,7 @@ $ taosdemo --help -c, --config-dir=CONFIG_DIR Configuration directory. -h, --host=HOST TDengine server FQDN to connect. The default host is localhost. -P, --port=PORT The TCP/IP port number to use for the connection. --I, --interface=INTERFACE The interface (taosc, rest, and stmt) taosdemo uses. By default use 'taosc'. +-I, --interface=INTERFACE The interface (taosc, rest, and stmt) taosBenchmark uses. By default use 'taosc'. -d, --database=DATABASE Destination database. By default is 'test'. -a, --replica=REPLICA Set the replica parameters of the database, By default use 1, min: 1, max: 3. -m, --table-prefix=TABLEPREFIX Table prefix name. By default use 'd'. @@ -187,15 +187,15 @@ for any corresponding short options. Report bugs to . ``` -taosdemo 的参数是为了满足数据模拟的需求来设计的。下面介绍几个常用的参数: +taosBenchmark 的参数是为了满足数据模拟的需求来设计的。下面介绍几个常用的参数: ``` --I, --interface=INTERFACE The interface (taosc, rest, and stmt) taosdemo uses. Default is 'taosc'. +-I, --interface=INTERFACE The interface (taosc, rest, and stmt) taosBenchmark uses. Default is 'taosc'. ``` -前面介绍 taosdemo 不同接口的性能差异已经提到, -I 参数为选择不同的接口,目前支持 taosc、stmt 和 rest 几种。其中 taosc 为使用 SQL 语句方式进行数据写入;stmt 为使用参数绑定接口进行数据写入;rest 为使用 RESTful 协议进行数据写入。 +前面介绍 taosBenchmark 不同接口的性能差异已经提到, -I 参数为选择不同的接口,目前支持 taosc、stmt 和 rest 几种。其中 taosc 为使用 SQL 语句方式进行数据写入;stmt 为使用参数绑定接口进行数据写入;rest 为使用 RESTful 协议进行数据写入。 ``` -T, --threads=NUMBER The number of threads. Default is 8. ``` --T 参数设置 taosdemo 使用多少个线程进行数据同步写入,通过多线程可以尽最大可能压榨硬件的处理能力。 +-T 参数设置 taosBenchmark 使用多少个线程进行数据同步写入,通过多线程可以尽最大可能压榨硬件的处理能力。 ``` -b, --data-type=DATATYPE The data_type of columns, default: FLOAT, INT, FLOAT. @@ -203,7 +203,7 @@ taosdemo 的参数是为了满足数据模拟的需求来设计的。下面介 -l, --columns=COLUMNS The number of columns per record. Demo mode by default is 3 (float, int, float). Max values is 4095 ``` -前文提到,taosdemo 默认创建一个典型电表数据采集应用场景,每个设备包含电流电压相位3个采集量。对于需要定义不同的采集量,可以使用 -b 参数。TDengine 支持 BOOL、TINYINT、SMALLINT、INT、BIGINT、FLOAT、DOUBLE、BINARY、NCHAR、TIMESTAMP 等多种数据类型。通过 -b 加上以“ , ”(英文逗号)分割定制类型的列表可以使 taosdemo 建立对应的超级表和子表并插入相应模拟数据。通过 -w 参数可以指定 BINARY 和 NCHAR 数据类型的列的宽度(默认为 64 )。-l 参数可以在 -b 参数指定数据类型的几列之后补充以 INT 型的总的列数,特别多列的情况下可以减少手工输入的过程,最多支持到 4095 列。 +前文提到,taosBenchmark 默认创建一个典型电表数据采集应用场景,每个设备包含电流电压相位3个采集量。对于需要定义不同的采集量,可以使用 -b 参数。TDengine 支持 BOOL、TINYINT、SMALLINT、INT、BIGINT、FLOAT、DOUBLE、BINARY、NCHAR、TIMESTAMP 等多种数据类型。通过 -b 加上以“ , ”(英文逗号)分割定制类型的列表可以使 taosBenchmark 建立对应的超级表和子表并插入相应模拟数据。通过 -w 参数可以指定 BINARY 和 NCHAR 数据类型的列的宽度(默认为 64 )。-l 参数可以在 -b 参数指定数据类型的几列之后补充以 INT 型的总的列数,特别多列的情况下可以减少手工输入的过程,最多支持到 4095 列。 ``` -r, --rec-per-req=NUMBER The number of records per request. Default is 30000. ``` @@ -213,28 +213,28 @@ taosdemo 的参数是为了满足数据模拟的需求来设计的。下面介 -n, --records=NUMBER The number of records per table. Default is 10000. -M, --random The value of records generated are totally random. The default is to simulate power equipment senario. ``` -前面提到 taosdemo 默认创建 10000 个表,每个表写入 10000 条记录。可以通过 -t 和 -n 设置表的数量和每个表的记录的数量。默认无参数生成的数据为模拟真实场景,模拟生成的数据为电流电压相位值增加一定的抖动,可以更真实表现 TDengine 高效的数据压缩能力。如果需要模拟生成完全随机数据,可以通过 -M 参数。 +前面提到 taosBenchmark 默认创建 10000 个表,每个表写入 10000 条记录。可以通过 -t 和 -n 设置表的数量和每个表的记录的数量。默认无参数生成的数据为模拟真实场景,模拟生成的数据为电流电压相位值增加一定的抖动,可以更真实表现 TDengine 高效的数据压缩能力。如果需要模拟生成完全随机数据,可以通过 -M 参数。 ``` -y, --answer-yes Default input yes for prompt. ``` -前面我们可以看到 taosdemo 默认在进行创建数据库或插入数据之前输出将要进行操作的参数列表,方便使用者在插入之前了解即将进行的数据写入的内容。为了方便进行自动测试,-y 参数可以使 taosdemo 输出参数后立刻进行数据写入操作。 +前面我们可以看到 taosBenchmark 默认在进行创建数据库或插入数据之前输出将要进行操作的参数列表,方便使用者在插入之前了解即将进行的数据写入的内容。为了方便进行自动测试,-y 参数可以使 taosBenchmark 输出参数后立刻进行数据写入操作。 ``` -O, --disorder=NUMBER Insert order mode--0: In order, 1 ~ 50: disorder ratio. Default is in order. -R, --disorder-range=NUMBER Out of order data's range, ms, default is 1000. ``` -在某些场景,接收到的数据并不是完全按时间顺序到来,而是包含一定比例的乱序数据,TDengine 也能进行很好的处理。为了模拟乱序数据的写入,taosdemo 提供 -O 和 -R 参数进行设置。-O 参数为 0 和不使用 -O 参数相同为完全有序数据写入。1 到 50 为数据中包含乱序数据的比例。-R 参数为乱序数据时间戳偏移的范围,默认为 1000 毫秒。另外注意,时序数据以时间戳为唯一标识,所以乱序数据可能会生成和之前已经写入数据完全相同的时间戳,这样的数据会根据数据库创建的 update 值或者被丢弃(update 0)或者覆盖已有数据(update 1 或 2),而总的数据条数可能和期待的条数不一致的情况。 +在某些场景,接收到的数据并不是完全按时间顺序到来,而是包含一定比例的乱序数据,TDengine 也能进行很好的处理。为了模拟乱序数据的写入,taosBenchmark 提供 -O 和 -R 参数进行设置。-O 参数为 0 和不使用 -O 参数相同为完全有序数据写入。1 到 50 为数据中包含乱序数据的比例。-R 参数为乱序数据时间戳偏移的范围,默认为 1000 毫秒。另外注意,时序数据以时间戳为唯一标识,所以乱序数据可能会生成和之前已经写入数据完全相同的时间戳,这样的数据会根据数据库创建的 update 值或者被丢弃(update 0)或者覆盖已有数据(update 1 或 2),而总的数据条数可能和期待的条数不一致的情况。 ``` -g, --debug Print debug info. ``` -如果对 taosdemo 写入数据过程感兴趣或者数据写入结果不符合预期,可以使用 -g 参数使 taosdemo 打印执行过程中间调试信息到屏幕上,或通过 Linux 重定向命令导入到另外一个文件,方便找到发生问题的原因。另外 taosdemo 在执行失败后也会把相应执行的语句和调试原因输出到屏幕。可以搜索 reason 来找到 TDengine 服务端返回的错误原因信息。 +如果对 taosBenchmark 写入数据过程感兴趣或者数据写入结果不符合预期,可以使用 -g 参数使 taosBenchmark 打印执行过程中间调试信息到屏幕上,或通过 Linux 重定向命令导入到另外一个文件,方便找到发生问题的原因。另外 taosBenchmark 在执行失败后也会把相应执行的语句和调试原因输出到屏幕。可以搜索 reason 来找到 TDengine 服务端返回的错误原因信息。 ``` -x, --aggr-func Test aggregation funtions after insertion. ``` -TDengine 不仅仅是插入性能非常强大,由于其先进的数据库引擎设计使查询性能也异常强大。taosdemo 提供一个 -x 函数,可以在插入数据结束后进行常用查询操作并输出查询消耗时间。以下为在前述服务器上进行插入一亿条记录后进行常用查询的结果。 +TDengine 不仅仅是插入性能非常强大,由于其先进的数据库引擎设计使查询性能也异常强大。taosBenchmark 提供一个 -x 函数,可以在插入数据结束后进行常用查询操作并输出查询消耗时间。以下为在前述服务器上进行插入一亿条记录后进行常用查询的结果。 可以看到 select * 取出一亿条记录(不输出到屏幕)操作仅消耗1.26秒。而对一亿条记录进行常用的聚合函数操作通常仅需要二十几毫秒,时间最长的 count 函数也不到四十毫秒。 ``` -taosdemo -I stmt -T 48 -y -x +taosBenchmark -I stmt -T 48 -y -x ... ... select * took 1.266835 second(s) @@ -254,7 +254,7 @@ select min(current) took 0.025812 second(s) select first(current) took 0.024105 second(s) ... ``` -除了命令行方式, taosdemo 还支持接受指定一个 JSON 文件做为传入参数的方式来提供更丰富的设置。一个典型的 JSON 文件内容如下: +除了命令行方式, taosBenchmark 还支持接受指定一个 JSON 文件做为传入参数的方式来提供更丰富的设置。一个典型的 JSON 文件内容如下: ``` { "filetype": "insert", @@ -317,11 +317,11 @@ select first(current) took 0.024105 second(s) }] } ``` -例如:我们可以通过 "thread_count" 和 "thread_count_create_tbl" 来为建表和插入数据指定不同数量的线程。可以通过 "child_table_exists"、"childtable_limit" 和 "childtable_offset" 的组合来使用多个 taosdemo 进程(甚至可以在不同的电脑上)对同一个超级表的不同范围子表进行同时写入。也可以通过 "data_source" 和 "sample_file" 来指定数据来源为 csv 文件,来实现导入已有数据的功能。 +例如:我们可以通过 "thread_count" 和 "thread_count_create_tbl" 来为建表和插入数据指定不同数量的线程。可以通过 "child_table_exists"、"childtable_limit" 和 "childtable_offset" 的组合来使用多个 taosBenchmark 进程(甚至可以在不同的电脑上)对同一个超级表的不同范围子表进行同时写入。也可以通过 "data_source" 和 "sample_file" 来指定数据来源为 csv 文件,来实现导入已有数据的功能。 -使用 taosdemo 进行查询和订阅测试 +使用 taosBenchmark 进行查询和订阅测试 -- -taosdemo 不仅仅可以进行数据写入,也可以执行查询和订阅功能。但一个 taosdemo 实例只能支持其中的一种功能,不能同时支持三种功能,通过配置文件来指定进行哪种功能的测试。 +taosBenchmark 不仅仅可以进行数据写入,也可以执行查询和订阅功能。但一个 taosBenchmark 实例只能支持其中的一种功能,不能同时支持三种功能,通过配置文件来指定进行哪种功能的测试。 以下为一个典型查询 JSON 示例文件内容: ``` @@ -433,23 +433,23 @@ taosdemo 不仅仅可以进行数据写入,也可以执行查询和订阅功 -- TDengine是涛思数据专为物联网、车联网、工业互联网、IT运维等设计和优化的大数据平台。TDengine 由于数据库内核中创新的数据存储和查询引擎设计,展现出远超同类产品的高效性能。并且由于支持 SQL 语法和多种编程语言的连接器(目前支持 Java, Python, Go, C#, NodeJS, Rust 等),易用性极强,学习成本为零。为了便于运维需求,我们还提供数据迁移和监控功能等相关生态工具软件。 -为了刚接触 TDengine 的使用者方便进行技术评估和压力测试,我们为 taosdemo 开发了丰富的特性。本文即为对 taosdemo 的一个简单介绍,随着 TDengine 新功能的不断增加,taosdemo 也会继续演化和改进。taosdemo 的代码做为 TDengine 的一部分在 GitHub 上完全开源。欢迎就 taosdemo 或 TDengine 的使用或实现在 GitHub 或者涛思数据的用户群提出建议或批评。 +为了刚接触 TDengine 的使用者方便进行技术评估和压力测试,我们为 taosBenchmark 开发了丰富的特性。本文即为对 taosBenchmark 的一个简单介绍,随着 TDengine 新功能的不断增加,taosBenchmark 也会继续演化和改进。taosBenchmark 的代码做为 TDengine 的一部分在 GitHub 上完全开源。欢迎就 taosBenchmark 或 TDengine 的使用或实现在 GitHub 或者涛思数据的用户群提出建议或批评。 -附录 - 完整 taosdemo 参数介绍 +附录 - 完整 taosBenchmark 参数介绍 -- -taosdemo支持两种配置参数的模式,一种是命令行参数,一种是使用 JSON 格式的配置文件。 +taosBenchmark支持两种配置参数的模式,一种是命令行参数,一种是使用 JSON 格式的配置文件。 一、命令行参数 --f:指定taosdemo所需参数的meta文件。当使用该参数时,其他所有命令行参数都失效。可选项,缺省是NULL。目前仅支持不含 BOM(byte-order mark)的标准 UTF-8 编码文件。 +-f:指定taosBenchmark所需参数的meta文件。当使用该参数时,其他所有命令行参数都失效。可选项,缺省是NULL。目前仅支持不含 BOM(byte-order mark)的标准 UTF-8 编码文件。 -u: 用户名。可选项,缺省是“root“。 -p: 密码。可选项,缺省是“taosdata"。指定密码需要使用 MySQL 风格,即密码和 -p 贴紧方式,中间无空格。 --c: 配置文件taos.cfg所在的路径。因为taosdemo通过包含taos的动态库,去链接taosd服务,所以需要做好配置文件。可选项,缺省是 "/etc/taos"路径。 +-c: 配置文件taos.cfg所在的路径。因为taosBenchmark通过包含taos的动态库,去链接taosd服务,所以需要做好配置文件。可选项,缺省是 "/etc/taos"路径。 -h:taosd服务的FQDN。可选项,缺省是“localhost“。 @@ -491,7 +491,7 @@ taosdemo支持两种配置参数的模式,一种是命令行参数,一种是 -M: 插入数据为完全随机。可选项,缺省为模拟能源设备真实场景(数据在固定范围小幅波动)。 --x:不仅仅插入数据。有该选项时,taosdemo还会进行聚合函数查询操作。 +-x:不仅仅插入数据。有该选项时,taosBenchmark还会进行聚合函数查询操作。 -y:提示询问输入时缺省输入yes。 @@ -501,14 +501,14 @@ taosdemo支持两种配置参数的模式,一种是命令行参数,一种是 -g:打印debug信息 --V: 打印taosdemo的debug信息。 +-V: 打印taosBenchmark的debug信息。 --help: 打印命令参数列表。 二、JSON 格式的配置文件中所有参数说明 -taosdemo支持3种功能的测试,包括插入、查询、订阅。但一个taosdemo实例不能同时支持三种功能,一个 taosdemo 实例只能支持其中的一种功能,通过配置文件来指定进行哪种功能的测试。 +taosBenchmark支持3种功能的测试,包括插入、查询、订阅。但一个taosBenchmark实例不能同时支持三种功能,一个 taosBenchmark 实例只能支持其中的一种功能,通过配置文件来指定进行哪种功能的测试。 1、插入功能测试的 JSON 配置文件 ``` @@ -575,9 +575,9 @@ taosdemo支持3种功能的测试,包括插入、查询、订阅。但一个ta } ``` -"filetype": 本taosdemo实例进行哪种功能测试。"insert"表示数据插入功能。必选项。 +"filetype": 本taosBenchmark实例进行哪种功能测试。"insert"表示数据插入功能。必选项。 -"cfgdir": 配置文件taos.cfg所在的路径。因为taosdemo通过包含taos的动态库,去链接taosd服务,所以需要做好配置文件。可选项,缺省是 "/etc/taos"路径。 +"cfgdir": 配置文件taos.cfg所在的路径。因为taosBenchmark通过包含taos的动态库,去链接taosd服务,所以需要做好配置文件。可选项,缺省是 "/etc/taos"路径。 "host": taosd服务的FQDN。可选项,缺省是“localhost“。 @@ -655,7 +655,7 @@ taosdemo支持3种功能的测试,包括插入、查询、订阅。但一个ta "childtable_offset": 插入数据时,子表起始值。只在drop=no && child_table_exists= yes,该字段生效。 -"childtable_limit": 插入数据时,子表从offset开始,偏移的表数目。使用者可以运行多个 taosdemo 实例(甚至可以在不同的机器上)通过使用不同的 childtable_offset 和 childtable_limit 配置值来实现同时写入相同数据库相同超级表下多个子表。只在drop=no && child_table_exists= yes,该字段生效。 +"childtable_limit": 插入数据时,子表从offset开始,偏移的表数目。使用者可以运行多个 taosBenchmark 实例(甚至可以在不同的机器上)通过使用不同的 childtable_offset 和 childtable_limit 配置值来实现同时写入相同数据库相同超级表下多个子表。只在drop=no && child_table_exists= yes,该字段生效。 "interlace_rows": 跟上面的配置一致,不过该处的配置优先,每个stable可以有自己单独的配置。最大不超过 num_of_records_per_req。 @@ -740,9 +740,9 @@ taosdemo支持3种功能的测试,包括插入、查询、订阅。但一个ta } ``` -"filetype": 本taosdemo实例进行哪种功能测试。"query"表示数据查询功能。必选项。 +"filetype": 本taosBenchmark实例进行哪种功能测试。"query"表示数据查询功能。必选项。 -"cfgdir": 配置文件taos.cfg所在的路径。因为taosdemo通过包含taos的动态库,去链接taosd服务,所以需要做好配置文件。可选项,缺省是 "/etc/taos"路径。 +"cfgdir": 配置文件taos.cfg所在的路径。因为taosBenchmark通过包含taos的动态库,去链接taosd服务,所以需要做好配置文件。可选项,缺省是 "/etc/taos"路径。 "host": taosd服务的FQDN。可选项,缺省是“localhost“。 @@ -830,9 +830,9 @@ taosdemo支持3种功能的测试,包括插入、查询、订阅。但一个ta } ``` -"filetype": 本taosdemo实例进行哪种功能测试。"subscribe"表示数据查询功能。必选项。** +"filetype": 本taosBenchmark实例进行哪种功能测试。"subscribe"表示数据查询功能。必选项。** -"cfgdir": 配置文件taos.cfg所在的路径。因为taosdemo通过包含taos的动态库,去链接taosd服务,所以需要做好配置文件。可选项,缺省是 "/etc/taos"路径。 +"cfgdir": 配置文件taos.cfg所在的路径。因为taosBenchmark通过包含taos的动态库,去链接taosd服务,所以需要做好配置文件。可选项,缺省是 "/etc/taos"路径。 "host": taosd服务的FQDN。可选项,缺省是“localhost“。 diff --git a/documentation20/cn/02.getting-started/docs.md b/documentation20/cn/02.getting-started/docs.md index f38522b5c257fdb3f72e833e72f14f4c9acdefb0..ee1aa1348c3e02af635eb744e1358868d3c56628 100644 --- a/documentation20/cn/02.getting-started/docs.md +++ b/documentation20/cn/02.getting-started/docs.md @@ -2,7 +2,7 @@ ## 快捷安装 -TDengine 软件分为服务器、客户端和报警模块三部分,目前 2.0 版服务器仅能在 Linux 系统上安装和运行,后续会支持 Windows、Mac OS 等系统。客户端可以在 Windows 或 Linux 上安装和运行。任何 OS 的应用也可以选择 RESTful 接口连接服务器 taosd。CPU 支持 X64/ARM64/MIPS64/Alpha64,后续会支持 ARM32、RISC-V 等 CPU 架构。用户可根据需求选择通过 [源码](https://www.taosdata.com/cn/getting-started/#通过源码安装) 或者 [安装包](https://www.taosdata.com/cn/getting-started/#通过安装包安装) 来安装。 +TDengine 软件分为服务器、客户端和报警模块三部分,目前 2.0 版服务器仅能在 Linux 系统上安装和运行,后续会支持 Windows、Mac OS 等系统。客户端可以在 Windows 或 Linux 上安装和运行。任何 OS 的应用也可以选择 RESTful 接口连接服务器 taosd,其中 2.4 之后版本默认使用单独运行的独立组件 taosAdapter 提供 http 服务,之前版本使用内置 http 服务。CPU 支持 X64/ARM64/MIPS64/Alpha64,后续会支持 ARM32、RISC-V 等 CPU 架构。用户可根据需求选择通过 [源码](https://www.taosdata.com/cn/getting-started/#通过源码安装) 或者 [安装包](https://www.taosdata.com/cn/getting-started/#通过安装包安装) 来安装。 ### 通过源码安装 @@ -134,10 +134,10 @@ taos> source ; ## TDengine 极速体验 -启动 TDengine 的服务,在 Linux 终端执行 taosdemo +启动 TDengine 的服务,在 Linux 终端执行 taosBenchmark (曾命名为 taosdemo): ```bash -$ taosdemo +$ taosBenchmark ``` 该命令将在数据库 test 下面自动创建一张超级表 meters,该超级表下有 1 万张表,表名为 "d0" 到 "d9999",每张表有 1 万条记录,每条记录有 (ts, current, voltage, phase) 四个字段,时间戳从 "2017-07-14 10:40:00 000" 到 "2017-07-14 10:40:09 999",每张表带有标签 location 和 groupId,groupId 被设置为 1 到 10, location 被设置为 "beijing" 或者 "shanghai"。 @@ -175,10 +175,10 @@ taos> select avg(current), max(voltage), min(phase) from test.meters where group ```mysql taos> select avg(current), max(voltage), min(phase) from test.d10 interval(10s); ``` -## taosdemo 详细功能列表 +## taosBenchmark 详细功能列表 -taosdemo 命令本身带有很多选项,配置表的数目、记录条数等等,请执行 `taosdemo --help` 详细列出。您可以设置不同参数进行体验。 -taosdemo 详细使用方法请参照 [如何使用taosdemo对TDengine进行性能测试](https://www.taosdata.com/cn/documentation/getting-started/taosdemo )。 +taosBenchmark 命令本身带有很多选项,配置表的数目、记录条数等等,请执行 `taosBenchmark --help` 详细列出。您可以设置不同参数进行体验。 +taosBenchmark 详细使用方法请参照 [如何使用taosBenchmark对TDengine进行性能测试](https://www.taosdata.com/cn/documentation/getting-started/taosBenchmark )。 ## 客户端和报警模块 diff --git a/documentation20/cn/08.connector/docs.md b/documentation20/cn/08.connector/docs.md index ecd9770e6a52ce06440d4788daaa527b194b5fef..9423dd97dbc11248017e0efb1238d6b779a9bb21 100644 --- a/documentation20/cn/08.connector/docs.md +++ b/documentation20/cn/08.connector/docs.md @@ -822,7 +822,7 @@ k1 = conn.query("select info->'k1' as k1 from s1").fetch_all_into_dict() 为支持各种不同类型平台的开发,TDengine 提供符合 REST 设计标准的 API,即 RESTful API。为最大程度降低学习成本,不同于其他数据库 RESTful API 的设计方法,TDengine 直接通过 HTTP POST 请求 BODY 中包含的 SQL 语句来操作数据库,仅需要一个 URL。RESTful 连接器的使用参见[视频教程](https://www.taosdata.com/blog/2020/11/11/1965.html)。 -注意:与标准连接器的一个区别是,RESTful 接口是无状态的,因此 `USE db_name` 指令没有效果,所有对表名、超级表名的引用都需要指定数据库名前缀。(从 2.2.0.0 版本开始,支持在 RESTful url 中指定 db_name,这时如果 SQL 语句中没有指定数据库名前缀的话,会使用 url 中指定的这个 db_name。) +注意:与原生连接器的一个区别是,RESTful 接口是无状态的,因此 `USE db_name` 指令没有效果,所有对表名、超级表名的引用都需要指定数据库名前缀。(从 2.2.0.0 版本开始,支持在 RESTful url 中指定 db_name,这时如果 SQL 语句中没有指定数据库名前缀的话,会使用 url 中指定的这个 db_name。从 2.4.0.0 版本开始,RESTful 默认有 taosAdapter 提供,要求必须在 url 中指定 db_name。) ### 安装 diff --git a/documentation20/cn/09.connections/docs.md b/documentation20/cn/09.connections/docs.md index 69825e655940045669fedeafdc9ab709c7ed15d9..2ebbe5e43988bc7165ce9234085d66768dc34191 100644 --- a/documentation20/cn/09.connections/docs.md +++ b/documentation20/cn/09.connections/docs.md @@ -64,7 +64,7 @@ GF_PLUGINS_ALLOW_LOADING_UNSIGNED_PLUGINS=tdengine-datasource ![img](../images/connections/add_datasource3.jpg) -* Host: TDengine 集群的中任意一台服务器的 IP 地址与 TDengine RESTful 接口的端口号(6041),默认 http://localhost:6041 。 +* Host: TDengine 集群的中任意一台服务器的 IP 地址与 TDengine RESTful 接口的端口号(6041),默认 http://localhost:6041。注意:从 2.4 版本开始 RESTful 服务默认使用独立组件 taosAdapter 提供,请参考相关文档配置部署。 * User:TDengine 用户名。 * Password:TDengine 用户密码。 diff --git a/documentation20/cn/11.administrator/docs.md b/documentation20/cn/11.administrator/docs.md index b24b432fd4eee893b077a8a85306bfa9642851f5..37d773848d53094b43094ef9ebdd5b5b746621b6 100644 --- a/documentation20/cn/11.administrator/docs.md +++ b/documentation20/cn/11.administrator/docs.md @@ -119,7 +119,7 @@ taosd -C | 1 | firstEP | | **SC** | | taosd启动时,主动连接的集群中首个dnode的end point | | localhost:6030 | | | 2 | secondEP | YES | **SC** | | taosd启动时,如果firstEp连接不上,尝试连接集群中第二个dnode的end point | | 无 | | | 3 | fqdn | | **SC** | | 数据节点的FQDN。如果习惯IP地址访问,可设置为该节点的IP地址。 | | 缺省为操作系统配置的第一个hostname。 | 这个参数值的长度需要控制在 96 个字符以内。 | -| 4 | serverPort | | **SC** | | taosd启动后,对外服务的端口号 | | 6030 | RESTful服务使用的端口号是在此基础上+11,即默认值为6041。 | +| 4 | serverPort | | **SC** | | taosd启动后,对外服务的端口号 | | 6030 | RESTful服务使用的端口号是在此基础上+11,即默认值为6041(注意2.4及后续版本使用 taosAdapter 提供 RESTful 接口)。 | | 5 | logDir | | **SC** | | 日志文件目录,客户端和服务器的运行日志将写入该目录 | | /var/log/taos | | | 6 | scriptDir | YES | **S** | | | | | | | 7 | dataDir | | **S** | | 数据文件目录,所有的数据文件都将写入该目录 | | /var/lib/taos | | @@ -180,10 +180,10 @@ taosd -C | 62 | http | | **S** | | 服务器内部的http服务开关。 | 0:关闭http服务, 1:激活http服务。 | 1 | | | 63 | mqtt | YES | **S** | | 服务器内部的mqtt服务开关。 | 0:关闭mqtt服务, 1:激活mqtt服务。 | 0 | | | 64 | monitor | | **S** | | 服务器内部的系统监控开关。监控主要负责收集物理节点的负载状况,包括CPU、内存、硬盘、网络带宽、HTTP请求量的监控记录,记录信息存储在`LOG`库中。 | 0:关闭监控服务, 1:激活监控服务。 | 0 | | -| 65 | httpEnableRecordSql | | **S** | | 内部使用,记录通过RESTFul接口,产生的SQL调用 | | 0 | 生成的文件(httpnote.0/httpnote.1),与服务端日志所在目录相同。 | -| 66 | httpMaxThreads | | **S** | | RESTFul接口的线程数 | | 2 | | +| 65 | httpEnableRecordSql | | **S** | | 内部使用,记录通过RESTFul接口,产生的SQL调用。taosAdapter 配置或有不同,请参考相应[文档](https://www.taosdata.com/cn/documentation/tools/adapter)。 | | 0 | 生成的文件(httpnote.0/httpnote.1),与服务端日志所在目录相同。 | +| 66 | httpMaxThreads | | **S** | | RESTFul接口的线程数。taosAdapter 配置或有不同,请参考相应[文档](https://www.taosdata.com/cn/documentation/tools/adapter)。 | | 2 | | | 67 | telegrafUseFieldNum | YES | | | | | | | -| 68 | restfulRowLimit | | **S** | | RESTFul接口单次返回的记录条数 | | 10240 | 最大10,000,000 | +| 68 | restfulRowLimit | | **S** | | RESTFul接口单次返回的记录条数。taosAdapter 配置或有不同,请参考相应[文档](https://www.taosdata.com/cn/documentation/tools/adapter)。 | | 10240 | 最大10,000,000 | | 69 | numOfLogLines | | **SC** | | 单个日志文件允许的最大行数。 | | 10,000,000 | | | 70 | asyncLog | | **SC** | | 日志写入模式 | 0:同步、1:异步 | 1 | | | 71 | logKeepDays | | **SC** | 天 | 日志文件的最长保存时间 | | 0 | 大于0时,日志文件会被重命名为taosdlog.xxx,其中xxx为日志文件最后修改的时间戳。 | @@ -641,9 +641,11 @@ TDengine的所有可执行文件默认存放在 _/usr/local/taos/bin_ 目录下 - *taosd*:TDengine服务端可执行文件 - *taos*:TDengine Shell可执行文件 - *taosdump*:数据导入导出工具 -- *taosdemo*:TDengine测试工具 +- *taosBenchmark*:TDengine测试工具 - remove.sh:卸载TDengine的脚本,请谨慎执行,链接到/usr/bin目录下的**rmtaos**命令。会删除TDengine的安装目录/usr/local/taos,但会保留/etc/taos、/var/lib/taos、/var/log/taos。 +注意:2.4.0.0 版本之后的 taosBenchmark 和 taosdump 需要安装独立安装包 taosTools。 + 您可以通过修改系统配置文件taos.cfg来配置不同的数据目录和日志目录。 ## TDengine 的启动、停止、卸载 @@ -692,6 +694,12 @@ rmtaos 1. 合法字符:英文字符、数字和下划线 2. 允许英文字符或下划线开头,不允许以数字开头 3. 不区分大小写 +4. 转义后表(列)名规则: + 为了兼容支持更多形式的表(列)名,TDengine 引入新的转义符 "`"。可用让表名与关键词不冲突,同时不受限于上述表名称合法性约束检查。 + 转义后的表(列)名同样受到长度限制要求,且长度计算的时候不计算转义符。使用转义字符以后,不再对转义字符中的内容进行大小写统一。 + 例如:\`aBc\` 和 \`abc\` 是不同的表(列)名,但是 abc 和 aBc 是相同的表(列)名。 + 需要注意的是转义字符中的内容必须是可打印字符。 + 支持转义符的功能从 2.3.0.1 版本开始。 **密码合法字符集** @@ -761,6 +769,28 @@ rmtaos | CONNS | ID | NOTNULL | STABLE | WAL | | COPY | IF | NOW | STABLES | WHERE | +## 转义字符说明 +- 转义字符表(转义符的功能从 2.4.0.4 版本开始) + + | 字符序列 | **代表的字符** | + | :--------: | ------- | + | `\'` | 单引号' | + | `\"` | 双引号" | + | \n | 换行符 | + | \r | 回车符 | + | \t | tab符 | + | `\\` | 斜杠\ | + | `\%` | % 规则见下 | + | `\_` | _ 规则见下 | + +- 转义字符使用规则 + 1. 标识符里有转义字符(数据库名、表名、列名) + 1. 普通标识符: 直接提示错误的标识符,因为标识符规定必须是数字、字母和下划线,并且不能以数字开头。 + 2. 反引号``标识符: 保持原样,不转义 + 2. 数据里有转义字符 + 1. 遇到上面定义的转义字符会转义(%和_见下面说明),如果没有匹配的转义字符会忽略掉转义符\。 + 2. 对于%和_,因为在like里这两个字符是通配符,所以在模式匹配like里用`\%`%和`\_`表示字符里本身的%和_,如果在like模式匹配上下文之外使用`\%`或`\_`,则它们的计算结果为字符串`\%`和`\_`,而不是%和_。 + ## 诊断及其他 #### 网络连接诊断 diff --git a/documentation20/cn/12.taos-sql/docs.md b/documentation20/cn/12.taos-sql/docs.md index 71f873c6a11678f5c7c68bc933d0db96d8c85f8a..b6bf522a9b872a04f463fad3a4885c89e8c453a3 100755 --- a/documentation20/cn/12.taos-sql/docs.md +++ b/documentation20/cn/12.taos-sql/docs.md @@ -1854,7 +1854,7 @@ TDengine 中的表(列)名命名规则如下: select jtag->'key' from (select jtag from stable) where jtag->'key'>0 ``` ## 转义字符说明 -- 转义字符表 +- 转义字符表 (转义符的功能从 2.4.0.4 版本开始) | 字符序列 | **代表的字符** | | :--------: | ------- | diff --git a/documentation20/cn/13.faq/docs.md b/documentation20/cn/13.faq/docs.md index 507ffc09ba954ed6acba39ece128ebbbe5a4142e..28ca0e979bdb73fc79992fdf542a356d8cb45008 100644 --- a/documentation20/cn/13.faq/docs.md +++ b/documentation20/cn/13.faq/docs.md @@ -186,7 +186,7 @@ TDengine 中时间戳的时区总是由客户端进行处理,而与服务端 | TCP | 6030 | 客户端与服务端之间通讯。 | 由配置文件设置 serverPort 决定。 | | TCP | 6035 | 多节点集群的节点间通讯。 | 随 serverPort 端口变化。 | | TCP | 6040 | 多节点集群的节点间数据同步。 | 随 serverPort 端口变化。 | -| TCP | 6041 | 客户端与服务端之间的 RESTful 通讯。 | 随 serverPort 端口变化。 | +| TCP | 6041 | 客户端与服务端之间的 RESTful 通讯。 | 随 serverPort 端口变化。注意 taosAdapter 配置或有不同,请参考相应[文档](https://www.taosdata.com/cn/documentation/tools/adapter)。 | | TCP | 6042 | Arbitrator 的服务端口。 | 随 Arbitrator 启动参数设置变化。 | | TCP | 6043 | TaosKeeper 监控服务端口。 | 随 TaosKeeper 启动参数设置变化。 | | TCP | 6044 | 支持 StatsD 的数据接入端口。 | 随 taosAdapter 启动参数设置变化(2.3.0.1+以上版本)。 | @@ -197,7 +197,7 @@ TDengine 中时间戳的时区总是由客户端进行处理,而与服务端 **20. go 语言编写组件编译失败怎样解决?** -新版本 TDengine 2.3.0.0 包含一个使用 go 语言开发的 taosAdapter 组件,取代之前内置的 httpd ,提供包含原 httpd 功能以及支持多种其他软件(Prometheus、Telegraf、collectd、StatsD等)的数据接入功能。 +新版本 TDengine 2.3.0.0 包含一个使用 go 语言开发的 taosAdapter 独立组件,需要单独运行,取代之前 taosd 内置的 httpd ,提供包含原 httpd 功能以及支持多种其他软件(Prometheus、Telegraf、collectd、StatsD等)的数据接入功能。 使用最新 develop 分支代码编译需要先 `git submodule update --init --recursive` 下载 taosAdapter 仓库代码后再编译。 目前编译方式默认自动编译 taosAdapter。go 语言版本要求 1.14 以上,如果发生 go 编译错误,往往是国内访问 go mod 问题,可以通过设置 go 环境变量来解决: diff --git a/documentation20/en/02.getting-started/01.docker/docs.md b/documentation20/en/02.getting-started/01.docker/docs.md index daa89ef1016179e7860e4178c52481aef2760243..05453cc59f43121a8aacd32e7837c5ac33f43f7b 100644 --- a/documentation20/en/02.getting-started/01.docker/docs.md +++ b/documentation20/en/02.getting-started/01.docker/docs.md @@ -49,7 +49,7 @@ c452519b0f9b tdengine/tdengine "taosd" 14 minutes ago Up 14 minutes · ```bash $ docker exec -it tdengine /bin/bash -root@c452519b0f9b:~/TDengine-server-2.0.20.13# +root@c452519b0f9b:~/TDengine-server-2.4.0.4# ``` - **docker exec**: Enter the container via the docker exec command; if you exit, the container will not stop. @@ -61,9 +61,9 @@ root@c452519b0f9b:~/TDengine-server-2.0.20.13# 4, After entering the container, execute the taos shell client program. ```bash -$ root@c452519b0f9b:~/TDengine-server-2.0.20.13# taos +$ root@c452519b0f9b:~/TDengine-server-2.4.0.4# taos -Welcome to the TDengine shell from Linux, Client Version:2.0.20.13 +Welcome to the TDengine shell from Linux, Client Version:2.4.0.4 Copyright (c) 2020 by TAOS Data, Inc. All rights reserved. taos> @@ -73,21 +73,94 @@ The TDengine terminal successfully connects to the server and prints out a welco In the TDengine terminal, you can create/delete databases, tables, super tables, etc., and perform insert and query operations via SQL commands. For details, you can refer to [TAOS SQL guide](https://www.taosdata.com/en/documentation/taos-sql). -## Learn more about TDengine with taosdemo +## Learn more about TDengine with taosBenchmark -1, Following the above steps, exit the TDengine terminal program first. +1, Execute `taosBenchmark` from the command line interface. ```bash -$ taos> q -root@c452519b0f9b:~/TDengine-server-2.0.20.13# +root@c452519b0f9b:~/TDengine-server-2.4.0.4# taosBenchmark ``` +$ taos -2, Execute taosdemo from the command line interface. +Welcome to the TDengine shell from Linux, Client Version:2.4.0.4 +Copyright (c) 2020 by TAOS Data, Inc. All rights reserved. + +taos> +``` + +You can also access the TDengine server inside the Docker container using `curl` command from the host side through the RESTful port. + +``` +$ curl -u root:taosdata -d 'show databases' 127.0.0.1:6041/rest/sql +{"status":"succ","head":["name","created_time","ntables","vgroups","replica","quorum","days","keep0,keep1,keep(D)","cache(MB)","blocks","minrows","maxrows","wallevel","fsync","comp","cachelast","precision","update","status"],"column_meta":[["name",8,32],["created_time",9,8],["ntables",4,4],["vgroups",4,4],["replica",3,2],["quorum",3,2],["days",3,2],["keep0,keep1,keep(D)",8,24],["cache(MB)",4,4],["blocks",4,4],["minrows",4,4],["maxrows",4,4],["wallevel",2,1],["fsync",4,4],["comp",2,1],["cachelast",2,1],["precision",8,3],["update",2,1],["status",8,10]],"data":[["test","2021-08-18 06:01:11.021",10000,4,1,1,10,"3650,3650,3650",16,6,100,4096,1,3000,2,0,"ms",0,"ready"],["log","2021-08-18 05:51:51.065",4,1,1,1,10,"30,30,30",1,3,100,4096,1,3000,2,0,"us",0,"ready"]],"rows":2} +``` + +This command accesses the TDengine server through the RESTful interface, which connects to port 6041 on the local machine, so the connection is successful. + +TDengine RESTful interface details can be found in the [official documentation](https://www.taosdata.com/en/documentation/connector#restful). + + +### Running TDengine server and taosAdapter with a Docker container + +Docker containers of TDegnine version 2.4.0.0 and later include a component named `taosAdapter`, which supports data writing and querying capabilities to the TDengine server through the RESTful interface and provides the data ingestion interfaces compatible with InfluxDB/OpenTSDB. Allows seamless migration of InfluxDB/OpenTSDB applications to access TDengine. + +Note: If taosAdapter is running inside the container, you need to add mapping to other additional ports as needed, please refer to [taosAdapter documentation](https://github.com/taosdata/taosadapter/blob/develop/README.md) for the default port number and modification methods for the specific purpose. + +Running TDengine version 2.4.0.4 image with docker. + +``` +$ docker run -d --name tdengine-taosa -p 6030-6049:6030-6049 -p 6030-6049:6030-6049/udp tdengine/tdengine:2.4.0.4 +``` + +Verify that the RESTful interface taosAdapter provides working using the `curl` command. +``` +$ curl -H 'Authorization: Basic cm9vdDp0YW9zZGF0YQ==' -d 'show databases;' 127.0.0.1:6041/rest/sql + +{"status":"succ","head":["name","created_time","ntables","vgroups","replica","quorum","days","keep","cache(MB)","blocks","minrows","maxrows","wallevel","fsync","comp","cachelast","precision","update","status"],"column_meta":[["name",8,32],["created_time",9,8],["ntables",4,4],["vgroups",4,4],["replica",3,2],["quorum",3,2],["days",3,2],["keep",8,24],["cache(MB)",4,4],["blocks",4,4],["minrows",4,4],["maxrows",4,4],["wallevel",2,1],["fsync",4,4],["comp",2,1],["cachelast",2,1],["precision",8,3],["update",2,1],["status",8,10]],"data":[["log","2021-12-28 09:18:55.765",10,1,1,1,10,"30",1,3,100,4096,1,3000,2,0,"us",0,"ready"]],"rows":1} +``` + +taosAdapter supports multiple data collection agents (e.g. Telegraf, StatsD, collectd, etc.), here only demonstrate how StasD is simulated to write data, and the command is executed from the host side as follows. +``` +$ echo "foo:1|c" | nc -u -w0 127.0.0.1 6044 +``` + +Then you can use the taos shell to query the taosAdapter automatically created database statsd and the contents of the super table foo. +``` +taos> show databases; + name | created_time | ntables | vgroups | replica | quorum | days | keep | cache(MB) | blocks | minrows | maxrows | wallevel | fsync | comp | cachelast | precision | update | status | +==================================================================================================================================================================================================================================================================================== + log | 2021-12-28 09:18:55.765 | 12 | 1 | 1 | 1 | 10 | 30 | 1 | 3 | 100 | 4096 | 1 | 3000 | 2 | 0 | us | 0 | ready | + statsd | 2021-12-28 09:21:48.841 | 1 | 1 | 1 | 1 | 10 | 3650 | 16 | 6 | 100 | 4096 | 1 | 3000 | 2 | 0 | ns | 2 | ready | +Query OK, 2 row(s) in set (0.002112s) + +taos> use statsd; +Database changed. + +taos> show stables; + name | created_time | columns | tags | tables | +============================================================================================ + foo | 2021-12-28 09:21:48.894 | 2 | 1 | 1 | +Query OK, 1 row(s) in set (0.001160s) + +taos> select * from foo; + ts | value | metric_type | +======================================================================================= + 2021-12-28 09:21:48.840820836 | 1 | counter | +Query OK, 1 row(s) in set (0.001639s) + +taos> +``` + +You can see that the simulation data has been written to TDengine. + + +### Application example: write data to TDengine server in Docker container using `taosBenchmark` on the host +1, execute `taosBenchmark` (was named taosdemo) in the host command line interface to write data to the TDengine server in the Docker container ```bash -root@c452519b0f9b:~/TDengine-server-2.0.20.13# taosdemo +$ taosBenchmark -taosdemo is simulating data generated by power equipments monitoring... +taosBenchmark is simulating data generated by power equipments monitoring... host: 127.0.0.1:6030 user: root @@ -136,14 +209,14 @@ After enter, this command will automatically create a super table meters under t It takes about a few minutes to execute this command and ends up inserting a total of 100 million records. -3, Go to the TDengine terminal and view the data generated by taosdemo. +3, Go to the TDengine terminal and view the data generated by taosBenchmark. - **Go to the terminal interface.** ```bash -$ root@c452519b0f9b:~/TDengine-server-2.0.20.13# taos +$ root@c452519b0f9b:~/TDengine-server-2.4.0.4# taos -Welcome to the TDengine shell from Linux, Client Version:2.0.20.13 +Welcome to the TDengine shell from Linux, Client Version:2.4.0.4 Copyright (c) 2020 by TAOS Data, Inc. All rights reserved. taos> diff --git a/documentation20/en/02.getting-started/02.taosdemo/docs.md b/documentation20/en/02.getting-started/02.taosdemo/docs.md index c872d2971ef3cce250592df0534af5369c4682dd..2fd09ef3d3774d1bc47091c9eaa4020d6f937bc0 100644 --- a/documentation20/en/02.getting-started/02.taosdemo/docs.md +++ b/documentation20/en/02.getting-started/02.taosdemo/docs.md @@ -1,15 +1,15 @@ -Since TDengine was open sourced in July 2019, it has gained a lot of popularity among time-series database developers with its innovative data modeling design, simple installation method, easy programming interface, and powerful data insertion and query performance. The insertion and querying performance is often astonishing to users who are new to TDengine. In order to help users to experience the high performance and functions of TDengine in the shortest time, we developed an application called taosdemo for insertion and querying performance testing of TDengine. Then user can easily simulate the scenario of a large number of devices generating a very large amount of data. User can easily manipulate the number of columns, data types, disorder ratio, and number of concurrent threads with taosdemo customized parameters. +Since TDengine was open sourced in July 2019, it has gained a lot of popularity among time-series database developers with its innovative data modeling design, simple installation method, easy programming interface, and powerful data insertion and query performance. The insertion and querying performance is often astonishing to users who are new to TDengine. In order to help users to experience the high performance and functions of TDengine in the shortest time, we developed an application called `taosBenchmark` (was named `taosdemo`) for insertion and querying performance testing of TDengine. Then user can easily simulate the scenario of a large number of devices generating a very large amount of data. User can easily manipulate the number of columns, data types, disorder ratio, and number of concurrent threads with taosBenchmark customized parameters. -Running taosdemo is very simple. Just download the TDengine installation package (https://www.taosdata.com/cn/all-downloads/) or compiling the TDengine code yourself (https://github.com/taosdata/TDengine). It can be found and run in the installation directory or in the compiled results directory. +Running taosBenchmark is very simple. Just download the TDengine installation package (https://www.taosdata.com/cn/all-downloads/) or compiling the TDengine code yourself (https://github.com/taosdata/TDengine). It can be found and run in the installation directory or in the compiled results directory. -To run an insertion test with taosdemo +To run an insertion test with taosBenchmark -- -Executing taosdemo without any parameters results in the following output. +Executing taosBenchmark without any parameters results in the following output. ``` -$ taosdemo +$ taosBenchmark -taosdemo is simulating data generated by power equipment monitoring... +taosBenchmark is simulating data generated by power equipment monitoring... host: 127.0.0.1:6030 user: root @@ -54,7 +54,7 @@ column[0]:FLOAT column[1]:INT column[2]:FLOAT Press enter key to continue or Ctrl-C to stop ``` -The parameters here shows for what taosdemo will use for data insertion. By default, taosdemo without entering any command line arguments will simulate a city power grid system's meter data collection scenario as a typical application in the power industry. That is, a database named test will be created, and a super table named meters will be created, where the super table schema is following: +The parameters here shows for what taosBenchmark will use for data insertion. By default, taosBenchmark without entering any command line arguments will simulate a city power grid system's meter data collection scenario as a typical application in the power industry. That is, a database named test will be created, and a super table named meters will be created, where the super table schema is following: ``` taos> describe test.meters; @@ -69,7 +69,7 @@ taos> describe test.meters; Query OK, 6 row(s) in set (0.002972s) ``` -After pressing any key taosdemo will create the database test and super table meters and generate 10,000 sub-tables representing 10,000 individule meter devices that report data. That means they independently using the super table meters as a template according to TDengine data modeling best practices. +After pressing any key taosBenchmark will create the database test and super table meters and generate 10,000 sub-tables representing 10,000 individule meter devices that report data. That means they independently using the super table meters as a template according to TDengine data modeling best practices. ``` taos> use test; Database changed. @@ -91,7 +91,7 @@ taos> show stables; meters | 2021-08-27 11:21:01.209 | 4 | 2 | 10000 | Query OK, 1 row(s) in set (0.001740s) ``` -Then taosdemo generates 10,000 records for each meter device. +Then taosBenchmark generates 10,000 records for each meter device. ``` ... ====thread[3] completed total inserted rows: 6250000, total affected rows: 6250000. 347626.22 records/second==== @@ -108,9 +108,9 @@ Spent 18.0863 seconds to insert rows: 100000000, affected rows: 100000000 with 1 insert delay, avg: 28.64ms, max: 112.92ms, min: 9.35ms ``` -The above information is the result of a real test on a normal PC server with 8 CPUs and 64G RAM. It shows that taosdemo inserted 100,000,000 (no need to count, 100 million) records in 18 seconds, or an average of 552,909,049 records per second. +The above information is the result of a real test on a normal PC server with 8 CPUs and 64G RAM. It shows that taosBenchmark inserted 100,000,000 (no need to count, 100 million) records in 18 seconds, or an average of 552,909,049 records per second. -TDengine also offers a parameter-bind interface for better performance, and using the parameter-bind interface (taosdemo -I stmt) on the same hardware for the same amount of data writes, the results are as follows. +TDengine also offers a parameter-bind interface for better performance, and using the parameter-bind interface (taosBenchmark -I stmt) on the same hardware for the same amount of data writes, the results are as follows. ``` ... @@ -145,14 +145,14 @@ Spent 6.0257 seconds to insert rows: 100000000, affected rows: 100000000 with 16 insert delay, avg: 8.31ms, max: 860.12ms, min: 2.00ms ``` -It shows that taosdemo inserted 100 million records in 6 seconds, with a much more higher insertion performance, 1,659,590 records wer inserted per second. +It shows that taosBenchmark inserted 100 million records in 6 seconds, with a much more higher insertion performance, 1,659,590 records wer inserted per second. -Because taosdemo is so easy to use, so we have extended it with more features to support more complex parameter settings for sample data preparation and validation for rapid prototyping. +Because taosBenchmark is so easy to use, so we have extended it with more features to support more complex parameter settings for sample data preparation and validation for rapid prototyping. -The complete list of taosdemo command-line arguments can be displayed via taosdemo --help as follows. +The complete list of taosBenchmark command-line arguments can be displayed via taosBenchmark --help as follows. ``` -$ taosdemo --help +$ taosBenchmark --help -f, --file=FILE The meta file to the execution procedure. Currently, we support standard UTF-8 (without BOM) encoded files only. -u, --user=USER The user name to use when connecting to the server. @@ -160,7 +160,7 @@ $ taosdemo --help -c, --config-dir=CONFIG_DIR Configuration directory. -h, --host=HOST TDengine server FQDN to connect. The default host is localhost. -P, --port=PORT The TCP/IP port number to use for the connection. --I, --interface=INTERFACE The interface (taosc, rest, and stmt) taosdemo uses. By default use 'taosc'. +-I, --interface=INTERFACE The interface (taosc, rest, and stmt) taosBenchmark uses. By default use 'taosc'. -d, --database=DATABASE Destination database. By default is 'test'. -a, --replica=REPLICA Set the replica parameters of the database, By default use 1, min: 1, max: 3. -m, --table-prefix=TABLEPREFIX Table prefix name. By default use 'd'. @@ -196,16 +196,16 @@ for any corresponding short options. Report bugs to . ``` -taosdemo's parameters are designed to meet the needs of data simulation. A few commonly used parameters are described below. +taosBenchmark's parameters are designed to meet the needs of data simulation. A few commonly used parameters are described below. ``` --I, --interface=INTERFACE The interface (taosc, rest, and stmt) taosdemo uses. Default is 'taosc'. +-I, --interface=INTERFACE The interface (taosc, rest, and stmt) taosBenchmark uses. Default is 'taosc'. ``` -The performance difference between different interfaces of taosdemo has been mentioned earlier, the -I parameter is used to select different interfaces, currently taosc, stmt and rest are supported. The -I parameter is used to select different interfaces, currently taosc, stmt and rest are supported. taosc uses SQL statements to write data, stmt uses parameter binding interface to write data, and rest uses RESTful protocol to write data. +The performance difference between different interfaces of taosBenchmark has been mentioned earlier, the -I parameter is used to select different interfaces, currently taosc, stmt and rest are supported. The -I parameter is used to select different interfaces, currently taosc, stmt and rest are supported. taosc uses SQL statements to write data, stmt uses parameter binding interface to write data, and rest uses RESTful protocol to write data. ``` -T, --threads=NUMBER The number of threads. Default is 8. ``` -The -T parameter sets how many threads taosdemo uses to synchronize data writes, so that multiple threads can squeeze as much processing power out of the hardware as possible. +The -T parameter sets how many threads taosBenchmark uses to synchronize data writes, so that multiple threads can squeeze as much processing power out of the hardware as possible. ``` -b, --data-type=DATATYPE The data_type of columns, default: FLOAT, INT, FLOAT. @@ -223,11 +223,11 @@ To reach TDengine performance limits, data insertion can be executed by using mu -n, --records=NUMBER The number of records per table. Default is 10000. -M, --random The value of records generated are totally random. The default is to simulate power equipment scenario. ``` -As mentioned earlier, taosdemo creates 10,000 tables by default, and each table writes 10,000 records. taosdemo can set the number of tables and the number of records in each table by -t and -n. The data generated by default without parameters are simulated real scenarios, and the simulated data are current and voltage phase values with certain jitter, which can more realistically show TDengine's efficient data compression ability. If you need to simulate the generation of completely random data, you can pass the -M parameter. +As mentioned earlier, taosBenchmark creates 10,000 tables by default, and each table writes 10,000 records. taosBenchmark can set the number of tables and the number of records in each table by -t and -n. The data generated by default without parameters are simulated real scenarios, and the simulated data are current and voltage phase values with certain jitter, which can more realistically show TDengine's efficient data compression ability. If you need to simulate the generation of completely random data, you can pass the -M parameter. ``` -y, --answer-yes Default input yes for prompt. ``` -As we can see above, taosdemo outputs a list of parameters for the upcoming operation by default before creating a database or inserting data, so that the user can know what data is about to be written before inserting. To facilitate automatic testing, the -y parameter allows taosdemo to write data immediately after outputting the parameters. +As we can see above, taosBenchmark outputs a list of parameters for the upcoming operation by default before creating a database or inserting data, so that the user can know what data is about to be written before inserting. To facilitate automatic testing, the -y parameter allows taosBenchmark to write data immediately after outputting the parameters. ``` -O, --disorder=NUMBER Insert order mode--0: In order, 1 ~ 50: disorder ratio. Default is in order. -R, --disorder-range=NUMBER Out of order data's range, ms, default is 1000. @@ -236,7 +236,7 @@ In some scenarios, the received data does not arrive in exact order, but contain ``` -g, --debug Print debug info. ``` -If you are interested in the taosdemo insertion process or if the data insertion result is not as expected, you can use the -g parameter to make taosdemo print the debugging information in the process of the execution to the screen or import it to another file with the Linux redirect command to easily find the cause of the problem. In addition, taosdemo will also output the corresponding executed statements and debugging reasons to the screen after the execution fails. You can search the word "reason" to find the error reason information returned by the TDengine server. +If you are interested in the taosBenchmark insertion process or if the data insertion result is not as expected, you can use the -g parameter to make taosBenchmark print the debugging information in the process of the execution to the screen or import it to another file with the Linux redirect command to easily find the cause of the problem. In addition, taosBenchmark will also output the corresponding executed statements and debugging reasons to the screen after the execution fails. You can search the word "reason" to find the error reason information returned by the TDengine server. ``` -x, --aggr-func Test aggregation funtions after insertion. ``` @@ -244,7 +244,7 @@ TDengine is not only very powerful in insertion performance, but also in query p You can see that the select * fetch 100 million rows (not output to the screen) operation consumes only 1.26 seconds. The most of normal aggregation function for 100 million records usually takes only about 20 milliseconds, and even the longest count function takes less than 40 milliseconds. ``` -taosdemo -I stmt -T 48 -y -x +taosBenchmark -I stmt -T 48 -y -x ... ... select * took 1.266835 second(s) @@ -264,7 +264,7 @@ select min(current) took 0.025812 second(s) select first(current) took 0.024105 second(s) ... ``` -In addition to the command line approach, taosdemo also supports take a JSON file as an incoming parameter to provide a richer set of settings. A typical JSON file would look like this. +In addition to the command line approach, taosBenchmark also supports take a JSON file as an incoming parameter to provide a richer set of settings. A typical JSON file would look like this. ``` { "filetype": "insert", @@ -327,11 +327,11 @@ In addition to the command line approach, taosdemo also supports take a JSON fil }] } ``` -For example, we can specify different number of threads for table creation and data insertion with "thread_count" and "thread_count_create_tbl". You can use a combination of "child_table_exists", "childtable_limit" and "childtable_offset" to use multiple taosdemo processes (even on different computers) to write to different ranges of child tables of the same super table at the same time. You can also import existing data by specifying the data source as a csv file with "data_source" and "sample_file". +For example, we can specify different number of threads for table creation and data insertion with "thread_count" and "thread_count_create_tbl". You can use a combination of "child_table_exists", "childtable_limit" and "childtable_offset" to use multiple taosBenchmark processes (even on different computers) to write to different ranges of child tables of the same super table at the same time. You can also import existing data by specifying the data source as a csv file with "data_source" and "sample_file". -Use taosdemo for query and subscription testing +Use taosBenchmark for query and subscription testing -- -taosdemo can not only write data, but also perform query and subscription functions. However, a taosdemo instance can only support one of these functions, not all three, and the configuration file is used to specify which function to test. +taosBenchmark can not only write data, but also perform query and subscription functions. However, a taosBenchmark instance can only support one of these functions, not all three, and the configuration file is used to specify which function to test. The following is the content of a typical query JSON example file. ``` @@ -443,7 +443,7 @@ Conclusion -- TDengine is a big data platform designed and optimized for IoT, Telematics, Industrial Internet, DevOps, etc. TDengine shows a high performance that far exceeds similar products due to the innovative data storage and query engine design in the database kernel. And withSQL syntax support and connectors for multiple programming languages (currently Java, Python, Go, C#, NodeJS, Rust, etc. are supported), it is extremely easy to use and has zero learning cost. To facilitate the operation and maintenance needs, we also provide data migration and monitoring functions and other related ecological tools and software. -For users who are new to TDengine, we have developed rich features for taosdemo to facilitate technical evaluation and stress testing. This article is a brief introduction to taosdemo, which will continue to evolve and improve as new features are added to TDengine. +For users who are new to TDengine, we have developed rich features for taosBenchmark to facilitate technical evaluation and stress testing. This article is a brief introduction to taosBenchmark, which will continue to evolve and improve as new features are added to TDengine. - As part of TDengine, taosdemo's source code is fully open on the GitHub. Suggestions or advices about the use or implementation of taosdemo or TDengine are welcomed on GitHub or in the Taos Data user group. + As part of TDengine, taosBenchmark's source code is fully open on the GitHub. Suggestions or advices about the use or implementation of taosBenchmark or TDengine are welcomed on GitHub or in the Taos Data user group. diff --git a/documentation20/en/02.getting-started/docs.md b/documentation20/en/02.getting-started/docs.md index 7d7744be56259c5c1a6a74a8b407df607768d99d..5843db965560a70393497d291cbc3b68822258e7 100644 --- a/documentation20/en/02.getting-started/docs.md +++ b/documentation20/en/02.getting-started/docs.md @@ -2,7 +2,7 @@ ## Quick Install -TDengine software consists of 3 parts: server, client, and alarm module. At the moment, TDengine server only runs on Linux (Windows, mac OS and more OS supports will come soon), but client can run on either Windows or Linux. TDengine client can be installed and run on Windows or Linux. Applications based-on any OSes can all connect to server taosd via a RESTful interface. About CPU, TDengine supports X64/ARM64/MIPS64/Alpha64, and ARM32、RISC-V, other more CPU architectures will be supported soon. You can set up and install TDengine server either from the [source code](https://www.taosdata.com/en/getting-started/#Install-from-Source) or the [packages](https://www.taosdata.com/en/getting-started/#Install-from-Package). +TDengine software consists of 3 parts: server, client, and alarm module. At the moment, TDengine server only runs on Linux (Windows, mac OS and more OS supports will come soon), but client can run on either Windows or Linux. TDengine client can be installed and run on Windows or Linux. Applications based-on any OSes can all connect to server taosd via a RESTful interface. From 2.4 and later version, TDengine use a stand-alone software, taosAdapteer to provide http service. The early version uses the http server embedded in the taosd. About CPU, TDengine supports X64/ARM64/MIPS64/Alpha64, and ARM32、RISC-V, other more CPU architectures will be supported soon. You can set up and install TDengine server either from the [source code](https://www.taosdata.com/en/getting-started/#Install-from-Source) or the [packages](https://www.taosdata.com/en/getting-started/#Install-from-Package). ### Install from Source @@ -138,10 +138,10 @@ taos> source ; ## Experience TDengine’s Lightning Speed -After starting the TDengine server, you can execute the command `taosdemo` in the Linux terminal. +After starting the TDengine server, you can execute the command `taosBenchmark` (was named `taosdemo`) in the Linux terminal. ```bash -$ taosdemo +$ taosBenchmark ``` Using this command, a STable named `meters` will be created in the database `test`. There are 10k tables under this STable, named from `t0` to `t9999`. In each table there are 100k rows of records, each row with columns (`f1`, `f2` and `f3`. The timestamp is from "2017-07-14 10:40:00 000" to "2017-07-14 10:41:39 999". Each table also has tags `areaid` and `loc`: `areaid` is set from 1 to 10, `loc` is set to "beijing" or "shanghai". @@ -180,10 +180,10 @@ taos> select avg(f1), max(f2), min(f3) from test.meters where areaid=10; taos> select avg(f1), max(f2), min(f3) from test.t10 interval(10s); ``` -## Using taosdemo in detail +## Using taosBenchmark in detail -you can run command `taosdemo` with many options, like number of tables, rows of records and so on. To know more about these options, you can execute `taosdemo --help` and then take a try using different options. -Please refer to [How to use taosdemo to test the performance of TDengine](https://www.taosdata.com/en/documentation/getting-started/taosdemo) for detail. +you can run command `taosBenchmark` with many options, like number of tables, rows of records and so on. To know more about these options, you can execute `taosBenchmark --help` and then take a try using different options. +Please refer to [How to use taosBenchmark to test the performance of TDengine](https://www.taosdata.com/en/documentation/getting-started/taosBenchmark) for detail. ## Client and Alarm Module diff --git a/documentation20/en/08.connector/docs.md b/documentation20/en/08.connector/docs.md index f8b444281587e03bb0b143d5ecd1c41abed9dd64..be8dabaa61ae81b299487d3ca1f3ca1a907df39e 100644 --- a/documentation20/en/08.connector/docs.md +++ b/documentation20/en/08.connector/docs.md @@ -661,6 +661,8 @@ In tests/examples/python, we provide a sample Python program read_example. py to To support the development of various types of platforms, TDengine provides an API that conforms to REST design standards, that is, RESTful API. In order to minimize the learning cost, different from other designs of database RESTful APIs, TDengine directly requests SQL statements contained in BODY through HTTP POST to operate the database, and only needs a URL. See the [video tutorial](https://www.taosdata.com/blog/2020/11/11/1965.html) for the use of RESTful connectors. +Note: One difference from the native connector is that the RESTful interface is stateless, so the `USE db_name` command has no effect and all references to table names and super table names require the database name to be specified. (Starting from version 2.2.0.0, we support specifying db_name in the RESTful url, in which case if the database name prefix is not specified in the SQL statement. Since version 2.4.0.0, RESTful service is provided by taosAdapter by default, which requires that db_name must be specified in the url.) + ### HTTP request format ``` diff --git a/documentation20/en/09.connections/docs.md b/documentation20/en/09.connections/docs.md index 0e15e58a531cbd783168802e919aa8095fe034bf..2d0886379754e7a2abd106a2359495c1df379389 100644 --- a/documentation20/en/09.connections/docs.md +++ b/documentation20/en/09.connections/docs.md @@ -63,7 +63,7 @@ Enter the data source configuration page and modify the corresponding configurat ![img](../images/connections/add_datasource3.jpg) -- Host: IP address of any server in TDengine cluster and port number of TDengine RESTful interface (6041), default [http://localhost:6041](http://localhost:6041/) +- Host: IP address of any server in TDengine cluster and port number of TDengine RESTful interface (6041), use [http://localhost:6041](http://localhost:6041/) to access the interface by default. Note the 2.4 and later version of TDengine use a stand-alone software, taosAdapter to provide RESTful interface. Please refer to its document for configuration and deployment. - User: TDengine username. - Password: TDengine user password. @@ -173,4 +173,4 @@ Please replace the IP address in the command above to the correct one. If no err The functions below are not supported currently: - `dbExistsTable(conn, "test")`: if table test exists -- `dbListTables(conn)`: list all tables in the connection \ No newline at end of file +- `dbListTables(conn)`: list all tables in the connection diff --git a/documentation20/en/11.administrator/docs.md b/documentation20/en/11.administrator/docs.md index 11dd3e482d5e68bb642a94c533f23d390edf61f3..d845ba6466987f66d6a0f86d6525ffa1cd96d85d 100644 --- a/documentation20/en/11.administrator/docs.md +++ b/documentation20/en/11.administrator/docs.md @@ -91,7 +91,7 @@ Only some important configuration parameters are listed below. For more paramete - firstEp: end point of the first dnode which will be connected in the cluster when taosd starts, the default value is localhost: 6030. - fqdn: FQDN of the data node, which defaults to the first hostname configured by the operating system. If you want to access via IP address directly, you can set it to the IP address of the node. - serverPort: the port number of the external service after taosd started, the default value is 6030. -- httpPort: the port number used by the RESTful service to which all HTTP requests (TCP) require a query/write request. The default value is 6041. +- httpPort: the port number used by the RESTful service to which all HTTP requests (TCP) require a query/write request. The default value is 6041. Note 2.4 and later version use a stand-alone software, taosAdapter to provide RESTFul interface. - dataDir: the data file directory to which all data files will be written. [Default:/var/lib/taos](http://default/var/lib/taos). - logDir: the log file directory to which the running log files of the client and server will be written. [Default:/var/log/taos](http://default/var/log/taos). - arbitrator: the end point of the arbitrator in the system; the default value is null. @@ -538,4 +538,4 @@ At the moment, TDengine has nearly 200 internal reserved keywords, which cannot | CONCAT | GLOB | METRICS | SET | VIEW | | CONFIGS | GRANTS | MIN | SHOW | WAVG | | CONFLICT | GROUP | MINUS | SLASH | WHERE | -| CONNECTION | | | | | \ No newline at end of file +| CONNECTION | | | | | diff --git a/documentation20/en/12.taos-sql/docs.md b/documentation20/en/12.taos-sql/docs.md index 17af500c7a8aade144c1ce42e540dbc7c73b74d9..56c8a6956475469e8d19f80f2e2a0c14d2d8a6b2 100755 --- a/documentation20/en/12.taos-sql/docs.md +++ b/documentation20/en/12.taos-sql/docs.md @@ -1336,7 +1336,7 @@ Is not null supports all types of columns. Non-null expression is < > "" and onl select jtag->'key' from (select jtag from stable) where jtag->'key'>0 ``` ## Escape character description -- Special Character Escape Sequences +- Special Character Escape Sequences(since version 2.4.0.4) | Escape Sequence | **Character Represented by Sequence** | | :--------: | ------------------- | diff --git a/packaging/cfg/taosd.service b/packaging/cfg/taosd.service index 452488b4e951e36c043c823e17cca5ab7dbfd21b..fff4b74e62a6da8f2bda9a6306a79132d7585e42 100644 --- a/packaging/cfg/taosd.service +++ b/packaging/cfg/taosd.service @@ -1,7 +1,7 @@ [Unit] Description=TDengine server service -After=network-online.target taosadapter.service -Wants=network-online.target taosadapter.service +After=network-online.target +Wants=network-online.target [Service] Type=simple diff --git a/packaging/docker/Dockerfile b/packaging/docker/Dockerfile index a54e9ca499330855b22daf523286ea5bbc509bb8..053d806022124732e3009c2a16f2bfd5d5def9c7 100644 --- a/packaging/docker/Dockerfile +++ b/packaging/docker/Dockerfile @@ -18,5 +18,5 @@ ENV LD_LIBRARY_PATH="$LD_LIBRARY_PATH:/usr/lib" \ LC_ALL=en_US.UTF-8 EXPOSE 6030 6031 6032 6033 6034 6035 6036 6037 6038 6039 6040 6041 6042 -CMD ["run_taosd.sh"] +CMD ["taosd"] VOLUME [ "/var/lib/taos", "/var/log/taos","/etc/taos/" ] diff --git a/packaging/sed_jh.sh b/packaging/sed_jh.sh index 0c288bee76c0745f5d3cf3b23d4aa103c1897c22..68335285c5e6b2cc43da277d9ac7f8078a50f3b2 100755 --- a/packaging/sed_jh.sh +++ b/packaging/sed_jh.sh @@ -68,8 +68,8 @@ function replace_community_jh() { # packaging/tools/startPre.sh sed -i "s/serverName=\"taosd\"/serverName=\"jh_taosd\"/g" ${top_dir}/packaging/tools/startPre.sh sed -i "s/logDir=\"\/var\/log\/taos\"/logDir=\"\/var\/log\/jh_taos\"/g" ${top_dir}/packaging/tools/startPre.sh - # packaging/tools/run_taosd.sh - sed -i "s/taosd/jh_taosd/g" ${top_dir}/packaging/tools/run_taosd.sh + # packaging/tools/run_taosd_and_taosadapter.sh + sed -i "s/taosd/jh_taosd/g" ${top_dir}/packaging/tools/run_taosd_and_taosadapter.sh # packaging/tools/install.sh sed -i "s/clientName=\"taos\"/clientName=\"jh_taos\"/g" ${top_dir}/packaging/tools/install.sh sed -i "s/serverName=\"taosd\"/serverName=\"jh_taosd\"/g" ${top_dir}/packaging/tools/install.sh diff --git a/packaging/sed_kh.sh b/packaging/sed_kh.sh index 3041dc9ffa82a0e9fa0e1a2a5dd859c80a6c311c..d0d21a20c140de1335a9c79a9da68c4b619e7381 100755 --- a/packaging/sed_kh.sh +++ b/packaging/sed_kh.sh @@ -68,8 +68,8 @@ function replace_community_kh() { # packaging/tools/startPre.sh sed -i "s/serverName=\"taosd\"/serverName=\"khserver\"/g" ${top_dir}/packaging/tools/startPre.sh sed -i "s/logDir=\"\/var\/log\/taos\"/logDir=\"\/var\/log\/kinghistorian\"/g" ${top_dir}/packaging/tools/startPre.sh - # packaging/tools/run_taosd.sh - sed -i "s/taosd/khserver/g" ${top_dir}/packaging/tools/run_taosd.sh + # packaging/tools/run_taosd_and_taosadapter.sh + sed -i "s/taosd/khserver/g" ${top_dir}/packaging/tools/run_taosd_and_taosadapter.sh # packaging/tools/install.sh sed -i "s/clientName=\"taos\"/clientName=\"khclient\"/g" ${top_dir}/packaging/tools/install.sh sed -i "s/serverName=\"taosd\"/serverName=\"khserver\"/g" ${top_dir}/packaging/tools/install.sh diff --git a/packaging/sed_power.sh b/packaging/sed_power.sh index 8955476591410b6efac3aa410aab2cf257c1ac41..22c056adb2035a6e441bdcb8b3ed33c99e10e19d 100755 --- a/packaging/sed_power.sh +++ b/packaging/sed_power.sh @@ -113,8 +113,8 @@ function replace_community_power() { # packaging/tools/startPre.sh sed -i "s/serverName=\"taosd\"/serverName=\"powerd\"/g" ${top_dir}/packaging/tools/startPre.sh sed -i "s/logDir=\"\/var\/log\/taos\"/logDir=\"\/var\/log\/Power\"/g" ${top_dir}/packaging/tools/startPre.sh - # packaging/tools/run_taosd.sh - sed -i "s/taosd/powerd/g" ${top_dir}/packaging/tools/run_taosd.sh + # packaging/tools/run_taosd_and_taosadapter.sh + sed -i "s/taosd/powerd/g" ${top_dir}/packaging/tools/run_taosd_and_taosadapter.sh # packaging/tools/install.sh sed -i "s/clientName=\"taos\"/clientName=\"power\"/g" ${top_dir}/packaging/tools/install.sh sed -i "s/serverName=\"taosd\"/serverName=\"powerd\"/g" ${top_dir}/packaging/tools/install.sh diff --git a/packaging/sed_pro.sh b/packaging/sed_pro.sh index e7fdaeda4c68f4dfc76d4d879f20f83c123238c1..fb50a37c57189e35a52e170d1dd74759ee6c5455 100755 --- a/packaging/sed_pro.sh +++ b/packaging/sed_pro.sh @@ -68,8 +68,8 @@ function replace_community_pro() { # packaging/tools/startPre.sh sed -i "s/serverName=\"taosd\"/serverName=\"prodbs\"/g" ${top_dir}/packaging/tools/startPre.sh sed -i "s/logDir=\"\/var\/log\/taos\"/logDir=\"\/var\/log\/ProDB\"/g" ${top_dir}/packaging/tools/startPre.sh - # packaging/tools/run_taosd.sh - sed -i "s/taosd/prodbs/g" ${top_dir}/packaging/tools/run_taosd.sh + # packaging/tools/run_taosd_and_taosadapter.sh + sed -i "s/taosd/prodbs/g" ${top_dir}/packaging/tools/run_taosd_and_taosadapter.sh # packaging/tools/install.sh sed -i "s/clientName=\"taos\"/clientName=\"prodbc\"/g" ${top_dir}/packaging/tools/install.sh sed -i "s/serverName=\"taosd\"/serverName=\"prodbs\"/g" ${top_dir}/packaging/tools/install.sh diff --git a/packaging/sed_tq.sh b/packaging/sed_tq.sh index 412abb1fa702839a8d9a789c7860155a120419c6..ce0ecc1aafcfe231bf1d901ba872e18c3c8fd793 100755 --- a/packaging/sed_tq.sh +++ b/packaging/sed_tq.sh @@ -64,8 +64,8 @@ function replace_community_tq() { # packaging/tools/startPre.sh sed -i "s/serverName=\"taosd\"/serverName=\"tqd\"/g" ${top_dir}/packaging/tools/startPre.sh sed -i "s/logDir=\"\/var\/log\/taos\"/logDir=\"\/var\/log\/tq\"/g" ${top_dir}/packaging/tools/startPre.sh - # packaging/tools/run_taosd.sh - sed -i "s/taosd/tqd/g" ${top_dir}/packaging/tools/run_taosd.sh + # packaging/tools/run_taosd_and_taosadapter.sh + sed -i "s/taosd/tqd/g" ${top_dir}/packaging/tools/run_taosd_and_taosadapter.sh # packaging/tools/install.sh sed -i "s/clientName=\"taos\"/clientName=\"tq\"/g" ${top_dir}/packaging/tools/install.sh sed -i "s/serverName=\"taosd\"/serverName=\"tqd\"/g" ${top_dir}/packaging/tools/install.sh diff --git a/packaging/tools/install.sh b/packaging/tools/install.sh index 7c9f267e9234a9276c4fa7bc297b738599dad944..009a5f967c4e1ce5441c1b9d9cbd3bf89421f070 100755 --- a/packaging/tools/install.sh +++ b/packaging/tools/install.sh @@ -194,7 +194,7 @@ function install_bin() { ${csudo}rm -f ${bin_link_dir}/${uninstallScript} || : ${csudo}rm -f ${bin_link_dir}/tarbitrator || : ${csudo}rm -f ${bin_link_dir}/set_core || : - ${csudo}rm -f ${bin_link_dir}/run_taosd.sh || : + ${csudo}rm -f ${bin_link_dir}/run_taosd_and_taosadapter.sh || : ${csudo}cp -r ${script_dir}/bin/* ${install_main_dir}/bin && ${csudo}chmod 0555 ${install_main_dir}/bin/* @@ -206,7 +206,7 @@ function install_bin() { [ -x ${install_main_dir}/bin/taosdump ] && ${csudo}ln -s ${install_main_dir}/bin/taosdump ${bin_link_dir}/taosdump || : [ -x ${install_main_dir}/bin/remove.sh ] && ${csudo}ln -s ${install_main_dir}/bin/remove.sh ${bin_link_dir}/${uninstallScript} || : [ -x ${install_main_dir}/bin/set_core.sh ] && ${csudo}ln -s ${install_main_dir}/bin/set_core.sh ${bin_link_dir}/set_core || : - [ -x ${install_main_dir}/bin/run_taosd.sh ] && ${csudo}ln -s ${install_main_dir}/bin/run_taosd.sh ${bin_link_dir}/run_taosd.sh || : + [ -x ${install_main_dir}/bin/run_taosd_and_taosadapter.sh ] && ${csudo}ln -s ${install_main_dir}/bin/run_taosd_and_taosadapter.sh ${bin_link_dir}/run_taosd_and_taosadapter.sh || : [ -x ${install_main_dir}/bin/tarbitrator ] && ${csudo}ln -s ${install_main_dir}/bin/tarbitrator ${bin_link_dir}/tarbitrator || : if [ "$verMode" == "cluster" ]; then diff --git a/packaging/tools/make_install.sh b/packaging/tools/make_install.sh index 3ec415a801dffee4b9253c3a1ae732c26ea079de..567c8117fdaaa5d33ae8e7b5272cd3c13ec032c6 100755 --- a/packaging/tools/make_install.sh +++ b/packaging/tools/make_install.sh @@ -175,7 +175,8 @@ function install_bin() { if [ "$osType" != "Darwin" ]; then ${csudo}rm -f ${bin_link_dir}/perfMonitor || : ${csudo}rm -f ${bin_link_dir}/set_core || : - ${csudo}rm -f ${bin_link_dir}/run_taosd.sh || : + ${csudo}rm -f ${bin_link_dir}/rmtaos || : + ${csudo}rm -f ${bin_link_dir}/run_taosd_and_taosadapter.sh || : ${csudo}rm -f ${bin_link_dir}/${uninstallScript} || : ${csudo}cp -r ${binary_dir}/build/bin/taos ${install_main_dir}/bin || : @@ -189,7 +190,7 @@ function install_bin() { ${csudo}cp -r ${script_dir}/taosd-dump-cfg.gdb ${install_main_dir}/bin ${csudo}cp -r ${script_dir}/remove.sh ${install_main_dir}/bin ${csudo}cp -r ${script_dir}/set_core.sh ${install_main_dir}/bin - ${csudo}cp -r ${script_dir}/run_taosd.sh ${install_main_dir}/bin + ${csudo}cp -r ${script_dir}/run_taosd_and_taosadapter.sh ${install_main_dir}/bin ${csudo}cp -r ${script_dir}/startPre.sh ${install_main_dir}/bin ${csudo}chmod 0555 ${install_main_dir}/bin/* @@ -201,7 +202,7 @@ function install_bin() { [ -x ${install_main_dir}/bin/taosdemo ] && ${csudo}ln -s ${install_main_dir}/bin/taosdemo ${bin_link_dir}/taosdemo || : [ -x ${install_main_dir}/bin/perfMonitor ] && ${csudo}ln -s ${install_main_dir}/bin/perfMonitor ${bin_link_dir}/perfMonitor || : [ -x ${install_main_dir}/set_core.sh ] && ${csudo}ln -s ${install_main_dir}/bin/set_core.sh ${bin_link_dir}/set_core || : - [ -x ${install_main_dir}/run_taosd.sh ] && ${csudo}ln -s ${install_main_dir}/bin/run_taosd.sh ${bin_link_dir}/run_taosd.sh || : + [ -x ${install_main_dir}/run_taosd_and_taosadapter.sh ] && ${csudo}ln -s ${install_main_dir}/bin/run_taosd_and_taosadapter.sh ${bin_link_dir}/run_taosd_and_taosadapter.sh || : [ -x ${install_main_dir}/bin/remove.sh ] && ${csudo}ln -s ${install_main_dir}/bin/remove.sh ${bin_link_dir}/${uninstallScript} || : else diff --git a/packaging/tools/makepkg.sh b/packaging/tools/makepkg.sh index e4241ca285430b404a175d02185e19ccecbbc238..6c95db27daee6932b537b3f38d110aa878008d0c 100755 --- a/packaging/tools/makepkg.sh +++ b/packaging/tools/makepkg.sh @@ -64,7 +64,7 @@ else ${build_dir}/bin/tarbitrator\ ${script_dir}/remove.sh \ ${script_dir}/set_core.sh \ - ${script_dir}/run_taosd.sh \ + ${script_dir}/run_taosd_and_taosadapter.sh \ ${script_dir}/startPre.sh \ ${script_dir}/taosd-dump-cfg.gdb" diff --git a/packaging/tools/remove.sh b/packaging/tools/remove.sh index 1e1302c373ca7d0ffa3cf6b5965909613b37b5e5..76ce0f0049abb89b30bfb8612712216c5573f132 100755 --- a/packaging/tools/remove.sh +++ b/packaging/tools/remove.sh @@ -88,7 +88,7 @@ function clean_bin() { ${csudo}rm -f ${bin_link_dir}/${uninstallScript} || : ${csudo}rm -f ${bin_link_dir}/tarbitrator || : ${csudo}rm -f ${bin_link_dir}/set_core || : - ${csudo}rm -f ${bin_link_dir}/run_taosd.sh || : + ${csudo}rm -f ${bin_link_dir}/run_taosd_and_taosadapter.sh || : } function clean_lib() { diff --git a/packaging/tools/run_taosd.sh b/packaging/tools/run_taosd_and_taosadapter.sh similarity index 100% rename from packaging/tools/run_taosd.sh rename to packaging/tools/run_taosd_and_taosadapter.sh diff --git a/src/client/jni/com_taosdata_jdbc_TSDBJNIConnector.h b/src/client/jni/com_taosdata_jdbc_TSDBJNIConnector.h index 1038af5abb1d00b14b1c54d2f96522647b71178b..4c999b710a62d1e620064af4d5647ee46d9a570e 100644 --- a/src/client/jni/com_taosdata_jdbc_TSDBJNIConnector.h +++ b/src/client/jni/com_taosdata_jdbc_TSDBJNIConnector.h @@ -209,6 +209,15 @@ JNIEXPORT jlong JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_prepareStmtImp JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_setBindTableNameImp (JNIEnv *, jobject, jlong, jstring, jlong); + +/** + * Class: com_taosdata_jdbc_TSDBJNIConnector + * Method: setTableNameTagsImp + * Signature: (JLjava/lang/String;I[B[B[B[BJ)I + */ +JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_setTableNameTagsImp + (JNIEnv *, jobject, jlong, jstring, jint, jbyteArray, jbyteArray, jbyteArray, jbyteArray, jlong); + /* * Class: com_taosdata_jdbc_TSDBJNIConnector * Method: bindColDataImp @@ -217,6 +226,14 @@ JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_setBindTableNameI JNIEXPORT jlong JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_bindColDataImp (JNIEnv *, jobject, jlong, jbyteArray, jbyteArray, jbyteArray, jint, jint, jint, jint, jlong); +/* + * Class: com_taosdata_jdbc_TSDBJNIConnector + * Method: stmt_add_batch + * Signature: (JJ)I + */ +JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_addBatchImp(JNIEnv *env, jobject jobj, jlong stmt, jlong con); + + /* * Class: com_taosdata_jdbc_TSDBJNIConnector * Method: executeBatchImp @@ -231,13 +248,12 @@ JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_executeBatchImp(J */ JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_closeStmt(JNIEnv *env, jobject jobj, jlong stmt, jlong con); -/** +/* * Class: com_taosdata_jdbc_TSDBJNIConnector - * Method: setTableNameTagsImp - * Signature: (JLjava/lang/String;I[B[B[B[BJ)I + * Method: stmt_errstr + * Signature: (JJ)I */ -JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_setTableNameTagsImp - (JNIEnv *, jobject, jlong, jstring, jint, jbyteArray, jbyteArray, jbyteArray, jbyteArray, jlong); +JNIEXPORT jstring JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_stmtErrorMsgImp(JNIEnv *env, jobject jobj, jlong stmt, jlong con); /* * Class: com_taosdata_jdbc_TSDBJNIConnector diff --git a/src/client/src/TSDBJNIConnector.c b/src/client/src/TSDBJNIConnector.c index 32a07b3aad20d8399620b13bf8c4fdb440a8e106..a7ce71bb8727701305729773ae2eb23631ca11c7 100644 --- a/src/client/src/TSDBJNIConnector.c +++ b/src/client/src/TSDBJNIConnector.c @@ -805,6 +805,78 @@ JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_setBindTableNameI return JNI_SUCCESS; } +JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_setTableNameTagsImp( + JNIEnv *env, jobject jobj, jlong stmt, jstring tableName, jint numOfTags, jbyteArray tags, jbyteArray typeList, + jbyteArray lengthList, jbyteArray nullList, jlong conn) { + TAOS *tsconn = (TAOS *)conn; + if (tsconn == NULL) { + jniError("jobj:%p, connection already closed", jobj); + return JNI_CONNECTION_NULL; + } + + TAOS_STMT *pStmt = (TAOS_STMT *)stmt; + if (pStmt == NULL) { + jniError("jobj:%p, conn:%p, invalid stmt handle", jobj, tsconn); + return JNI_SQL_NULL; + } + + jsize len = (*env)->GetArrayLength(env, tags); + char *tagsData = (char *)calloc(1, len); + (*env)->GetByteArrayRegion(env, tags, 0, len, (jbyte *)tagsData); + if ((*env)->ExceptionCheck(env)) { + // todo handle error + } + + len = (*env)->GetArrayLength(env, lengthList); + int64_t *lengthArray = (int64_t *)calloc(1, len); + (*env)->GetByteArrayRegion(env, lengthList, 0, len, (jbyte *)lengthArray); + if ((*env)->ExceptionCheck(env)) { + } + + len = (*env)->GetArrayLength(env, typeList); + char *typeArray = (char *)calloc(1, len); + (*env)->GetByteArrayRegion(env, typeList, 0, len, (jbyte *)typeArray); + if ((*env)->ExceptionCheck(env)) { + } + + len = (*env)->GetArrayLength(env, nullList); + int32_t *nullArray = (int32_t *)calloc(1, len); + (*env)->GetByteArrayRegion(env, nullList, 0, len, (jbyte *)nullArray); + if ((*env)->ExceptionCheck(env)) { + } + + const char *name = (*env)->GetStringUTFChars(env, tableName, NULL); + char *curTags = tagsData; + + TAOS_BIND *tagsBind = calloc(numOfTags, sizeof(TAOS_BIND)); + for (int32_t i = 0; i < numOfTags; ++i) { + tagsBind[i].buffer_type = typeArray[i]; + tagsBind[i].buffer = curTags; + tagsBind[i].is_null = &nullArray[i]; + tagsBind[i].length = (uintptr_t *)&lengthArray[i]; + + curTags += lengthArray[i]; + } + + int32_t code = taos_stmt_set_tbname_tags((void *)stmt, name, tagsBind); + + int32_t nTags = (int32_t)numOfTags; + jniDebug("jobj:%p, conn:%p, set table name:%s, numOfTags:%d", jobj, tsconn, name, nTags); + + tfree(tagsData); + tfree(lengthArray); + tfree(typeArray); + tfree(nullArray); + tfree(tagsBind); + (*env)->ReleaseStringUTFChars(env, tableName, name); + + if (code != TSDB_CODE_SUCCESS) { + jniError("jobj:%p, conn:%p, code:%s", jobj, tsconn, tstrerror(code)); + return JNI_TDENGINE_ERROR; + } + return JNI_SUCCESS; +} + JNIEXPORT jlong JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_bindColDataImp( JNIEnv *env, jobject jobj, jlong stmt, jbyteArray colDataList, jbyteArray lengthList, jbyteArray nullList, jint dataType, jint dataBytes, jint numOfRows, jint colIndex, jlong con) { @@ -872,8 +944,8 @@ JNIEXPORT jlong JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_bindColDataImp( return JNI_SUCCESS; } -JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_executeBatchImp(JNIEnv *env, jobject jobj, jlong stmt, - jlong con) { +JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_addBatchImp(JNIEnv *env, jobject jobj, jlong stmt, + jlong con) { TAOS *tscon = (TAOS *)con; if (tscon == NULL) { jniError("jobj:%p, connection already closed", jobj); @@ -886,19 +958,18 @@ JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_executeBatchImp(J return JNI_SQL_NULL; } - taos_stmt_add_batch(pStmt); - int32_t code = taos_stmt_execute(pStmt); + int32_t code = taos_stmt_add_batch(pStmt); if (code != TSDB_CODE_SUCCESS) { jniError("jobj:%p, conn:%p, code:%s", jobj, tscon, tstrerror(code)); return JNI_TDENGINE_ERROR; } - jniDebug("jobj:%p, conn:%p, batch execute", jobj, tscon); + jniDebug("jobj:%p, conn:%p, stmt closed", jobj, tscon); return JNI_SUCCESS; } -JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_closeStmt(JNIEnv *env, jobject jobj, jlong stmt, - jlong con) { +JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_executeBatchImp(JNIEnv *env, jobject jobj, jlong stmt, + jlong con) { TAOS *tscon = (TAOS *)con; if (tscon == NULL) { jniError("jobj:%p, connection already closed", jobj); @@ -911,86 +982,58 @@ JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_closeStmt(JNIEnv return JNI_SQL_NULL; } - int32_t code = taos_stmt_close(pStmt); + int32_t code = taos_stmt_execute(pStmt); if (code != TSDB_CODE_SUCCESS) { jniError("jobj:%p, conn:%p, code:%s", jobj, tscon, tstrerror(code)); return JNI_TDENGINE_ERROR; } - jniDebug("jobj:%p, conn:%p, stmt closed", jobj, tscon); + jniDebug("jobj:%p, conn:%p, batch execute", jobj, tscon); return JNI_SUCCESS; } -JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_setTableNameTagsImp( - JNIEnv *env, jobject jobj, jlong stmt, jstring tableName, jint numOfTags, jbyteArray tags, jbyteArray typeList, - jbyteArray lengthList, jbyteArray nullList, jlong conn) { - TAOS *tsconn = (TAOS *)conn; - if (tsconn == NULL) { +JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_closeStmt(JNIEnv *env, jobject jobj, jlong stmt, + jlong con) { + TAOS *tscon = (TAOS *)con; + if (tscon == NULL) { jniError("jobj:%p, connection already closed", jobj); return JNI_CONNECTION_NULL; } TAOS_STMT *pStmt = (TAOS_STMT *)stmt; if (pStmt == NULL) { - jniError("jobj:%p, conn:%p, invalid stmt handle", jobj, tsconn); + jniError("jobj:%p, conn:%p, invalid stmt", jobj, tscon); return JNI_SQL_NULL; } - jsize len = (*env)->GetArrayLength(env, tags); - char *tagsData = (char *)calloc(1, len); - (*env)->GetByteArrayRegion(env, tags, 0, len, (jbyte *)tagsData); - if ((*env)->ExceptionCheck(env)) { - // todo handle error - } - - len = (*env)->GetArrayLength(env, lengthList); - int64_t *lengthArray = (int64_t *)calloc(1, len); - (*env)->GetByteArrayRegion(env, lengthList, 0, len, (jbyte *)lengthArray); - if ((*env)->ExceptionCheck(env)) { + int32_t code = taos_stmt_close(pStmt); + if (code != TSDB_CODE_SUCCESS) { + jniError("jobj:%p, conn:%p, code:%s", jobj, tscon, tstrerror(code)); + return JNI_TDENGINE_ERROR; } - len = (*env)->GetArrayLength(env, typeList); - char *typeArray = (char *)calloc(1, len); - (*env)->GetByteArrayRegion(env, typeList, 0, len, (jbyte *)typeArray); - if ((*env)->ExceptionCheck(env)) { - } + jniDebug("jobj:%p, conn:%p, stmt closed", jobj, tscon); + return JNI_SUCCESS; +} - len = (*env)->GetArrayLength(env, nullList); - int32_t *nullArray = (int32_t *)calloc(1, len); - (*env)->GetByteArrayRegion(env, nullList, 0, len, (jbyte *)nullArray); - if ((*env)->ExceptionCheck(env)) { +JNIEXPORT jstring JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_stmtErrorMsgImp(JNIEnv *env, jobject jobj, jlong stmt, + jlong con) { + char errMsg[128]; + TAOS *tscon = (TAOS *)con; + if (tscon == NULL) { + jniError("jobj:%p, connection already closed", jobj); + sprintf(errMsg, "jobj:%p, connection already closed", jobj); + return (*env)->NewStringUTF(env, errMsg); } - const char *name = (*env)->GetStringUTFChars(env, tableName, NULL); - char *curTags = tagsData; - - TAOS_BIND *tagsBind = calloc(numOfTags, sizeof(TAOS_BIND)); - for (int32_t i = 0; i < numOfTags; ++i) { - tagsBind[i].buffer_type = typeArray[i]; - tagsBind[i].buffer = curTags; - tagsBind[i].is_null = &nullArray[i]; - tagsBind[i].length = (uintptr_t *)&lengthArray[i]; - - curTags += lengthArray[i]; + TAOS_STMT *pStmt = (TAOS_STMT *)stmt; + if (pStmt == NULL) { + jniError("jobj:%p, conn:%p, invalid stmt", jobj, tscon); + sprintf(errMsg, "jobj:%p, conn:%p, invalid stmt", jobj, tscon); + return (*env)->NewStringUTF(env, errMsg); } - int32_t code = taos_stmt_set_tbname_tags((void *)stmt, name, tagsBind); - - int32_t nTags = (int32_t)numOfTags; - jniDebug("jobj:%p, conn:%p, set table name:%s, numOfTags:%d", jobj, tsconn, name, nTags); - - tfree(tagsData); - tfree(lengthArray); - tfree(typeArray); - tfree(nullArray); - tfree(tagsBind); - (*env)->ReleaseStringUTFChars(env, tableName, name); - - if (code != TSDB_CODE_SUCCESS) { - jniError("jobj:%p, conn:%p, code:%s", jobj, tsconn, tstrerror(code)); - return JNI_TDENGINE_ERROR; - } - return JNI_SUCCESS; + return (*env)->NewStringUTF(env, taos_stmt_errstr((TAOS_STMT *)stmt)); } JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_insertLinesImp(JNIEnv *env, jobject jobj, diff --git a/src/client/src/tscLocal.c b/src/client/src/tscLocal.c index 85c2215a2e71746889403e60ed09279e64574750..bb3792850620b0a07ca599cef42906a6854a368a 100644 --- a/src/client/src/tscLocal.c +++ b/src/client/src/tscLocal.c @@ -485,7 +485,26 @@ static int32_t tscGetDBInfo(SCreateBuilder *builder, char *result) { if (row == NULL) { return TSDB_CODE_TSC_DB_NOT_SELECTED; } - const char *showColumns[] = {"REPLICA", "QUORUM", "DAYS", "KEEP", "BLOCKS", NULL}; + const char *showColumns[][2] = { + {"REPLICA", "REPLICA"}, + {"QUORUM", "QUORUM"}, + {"DAYS", "DAYS"}, +#ifdef _STORAGE + {"KEEP0,KEEP1,KEEP2", "KEEP"}, +#else + {"KEEP", "KEEP"}, +#endif + {"CACHE(MB)", "CACHE"}, + {"BLOCKS", "BLOCKS"}, + {"MINROWS", "MINROWS"}, + {"MAXROWS", "MAXROWS"}, + {"WALLEVEL", "WAL"}, + {"FSYNC", "FSYNC"}, + {"COMP", "COMP"}, + {"CACHELAST", "CACHELAST"}, + {"PRECISION", "PRECISION"}, + {"UPDATE", "UPDATE"}, + {NULL, NULL}}; SSqlObj *pSql = builder->pInterSql; TAOS_FIELD *fields = taos_fetch_fields(pSql); @@ -499,12 +518,16 @@ static int32_t tscGetDBInfo(SCreateBuilder *builder, char *result) { if (0 == ret && STR_NOCASE_EQUAL(buf, strlen(buf), builder->buf, strlen(builder->buf))) { snprintf(result + strlen(result), TSDB_MAX_BINARY_LEN - strlen(result), "CREATE DATABASE %s", buf); for (int i = 1; i < num_fields; i++) { - for (int j = 0; showColumns[j] != NULL; j++) { - if (STR_NOCASE_EQUAL(fields[i].name, strlen(fields[i].name), showColumns[j], strlen(showColumns[j]))) { + for (int j = 0; showColumns[j][0] != NULL; j++) { + if (STR_NOCASE_EQUAL(fields[i].name, strlen(fields[i].name), showColumns[j][0], strlen(showColumns[j][0]))) { memset(buf, 0, sizeof(buf)); ret = tscGetNthFieldResult(row, fields, lengths, i, buf); if (ret == 0) { - snprintf(result + strlen(result), TSDB_MAX_BINARY_LEN - strlen(result), " %s %s", showColumns[j], buf); + if (STR_NOCASE_EQUAL(showColumns[j][0], strlen(showColumns[j][0]), "PRECISION", strlen("PRECISION"))) { + snprintf(result + strlen(result), TSDB_MAX_BINARY_LEN - strlen(result), " %s '%s'", showColumns[j][1], buf); + } else { + snprintf(result + strlen(result), TSDB_MAX_BINARY_LEN - strlen(result), " %s %s", showColumns[j][1], buf); + } } } } diff --git a/src/client/src/tscPrepare.c b/src/client/src/tscPrepare.c index e4ed16518f87fee2463d2e474a79243dfd703ca7..f6e210287e6ef73b66a85b7c4c5ffdbd7074fbcd 100644 --- a/src/client/src/tscPrepare.c +++ b/src/client/src/tscPrepare.c @@ -1658,7 +1658,7 @@ int taos_stmt_prepare(TAOS_STMT* stmt, const char* sql, unsigned long length) { pRes->qId = 0; pRes->numOfRows = 0; - strcpy(pSql->sqlstr, sql); + strntolower(pSql->sqlstr, sql, (int32_t)sqlLen); tscDebugL("0x%"PRIx64" SQL: %s", pSql->self, pSql->sqlstr); if (tscIsInsertData(pSql->sqlstr)) { @@ -1849,6 +1849,7 @@ int taos_stmt_set_tbname_tags(TAOS_STMT* stmt, const char* name, TAOS_BIND* tags tscResetSqlCmd(pCmd, false, pSql->self); pCmd->insertParam.pTableBlockHashList = hashList; } + code = tsParseSql(pStmt->pSql, true); if (code == TSDB_CODE_TSC_ACTION_IN_PROGRESS) { diff --git a/src/client/src/tscSQLParser.c b/src/client/src/tscSQLParser.c index 294707eb1747913567dc3b216031bda9eb182527..3b46c7f634d1ac28de060ba332c258da88f385bb 100644 --- a/src/client/src/tscSQLParser.c +++ b/src/client/src/tscSQLParser.c @@ -2431,7 +2431,7 @@ int32_t addProjectionExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, t if (pTableMeta->tableType != TSDB_TEMP_TABLE) { tscInsertPrimaryTsSourceColumn(pQueryInfo, pTableMeta->id.uid); } - } else if (tokenId == TK_STRING || tokenId == TK_INTEGER || tokenId == TK_FLOAT) { // simple column projection query + } else if (tokenId == TK_STRING || tokenId == TK_INTEGER || tokenId == TK_FLOAT || tokenId == TK_BOOL) { // simple column projection query SColumnIndex index = COLUMN_INDEX_INITIALIZER; // user-specified constant value as a new result column @@ -4664,6 +4664,9 @@ static int32_t validateSQLExprItemArithmeticExpr(SSqlCmd* pCmd, tSqlExpr* pExpr, int32_t rightType = SQLEXPR_TYPE_UNASSIGNED; const char* msg1 = "arithmetic expression composed with columns from different tables"; const char* msg2 = "arithmetic expression composed with functions/columns of different types"; + const char* msg3 = "comparison/logical expression involving string operands is not supported"; + const char* msg4 = "comparison/logical expression involving function result is not supported"; + int32_t leftHeight = 0; int32_t ret = validateSQLExprItem(pCmd, pExpr->pLeft, pQueryInfo, pList, &leftType, &uidLeft, &leftHeight); if (ret != TSDB_CODE_SUCCESS) { @@ -4696,6 +4699,21 @@ static int32_t validateSQLExprItemArithmeticExpr(SSqlCmd* pCmd, tSqlExpr* pExpr, } else if (leftType == SQLEXPR_TYPE_SCALAR || rightType == SQLEXPR_TYPE_SCALAR){ *type = SQLEXPR_TYPE_SCALAR; } + + // comparison/logical operations + if (pExpr->tokenId == TK_EQ || pExpr->tokenId == TK_NE || + pExpr->tokenId == TK_GT || pExpr->tokenId == TK_GE || + pExpr->tokenId == TK_LT || pExpr->tokenId == TK_LE || + pExpr->tokenId == TK_AND || pExpr->tokenId == TK_OR) { + if ((leftType == SQLEXPR_TYPE_VALUE && pExpr->pLeft->tokenId == TK_STRING) || + (rightType == SQLEXPR_TYPE_VALUE && pExpr->pRight->tokenId == TK_STRING)) { + return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg3); + } + if (leftType == SQLEXPR_TYPE_AGG || leftType == SQLEXPR_TYPE_SCALAR || + rightType == SQLEXPR_TYPE_AGG || rightType == SQLEXPR_TYPE_SCALAR) { + return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg4); + } + } } return TSDB_CODE_SUCCESS; } @@ -5666,7 +5684,16 @@ static int32_t validateTagCondExpr(SSqlCmd* pCmd, tExprNode *p) { if (!p->_node.pLeft || !p->_node.pRight) { break; } - + + int32_t retVal = TSDB_CODE_SUCCESS; + if (p->_node.pLeft && (retVal = validateTagCondExpr(pCmd, p->_node.pLeft)) != TSDB_CODE_SUCCESS) { + return retVal; + } + + if (p->_node.pRight && (retVal = validateTagCondExpr(pCmd, p->_node.pRight)) != TSDB_CODE_SUCCESS) { + return retVal; + } + if (IS_ARITHMETIC_OPTR(p->_node.optr)) { return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg1); } @@ -5702,8 +5729,6 @@ static int32_t validateTagCondExpr(SSqlCmd* pCmd, tExprNode *p) { schemaType = TSDB_DATA_TYPE_DOUBLE; } - int32_t retVal = TSDB_CODE_SUCCESS; - int32_t bufLen = 0; if (IS_NUMERIC_TYPE(vVariant->nType)) { bufLen = 60; // The maximum length of string that a number is converted to. @@ -6668,7 +6693,7 @@ int32_t validateOrderbyNode(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SSqlNode* pSq } if (tscIsProjectionQuery(pQueryInfo)) { - bool found = false; + bool found = false; for (int32_t i = 0; i < tscNumOfExprs(pQueryInfo); ++i) { SExprInfo* pExpr = tscExprGet(pQueryInfo, i); if (pExpr->base.functionId == TSDB_FUNC_PRJ && pExpr->base.colInfo.colId == pSchema[index.columnIndex].colId) { @@ -6680,10 +6705,10 @@ int32_t validateOrderbyNode(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SSqlNode* pSq if (!found) { int32_t numOfCols = (int32_t)tscNumOfFields(pQueryInfo); tscAddFuncInSelectClause(pQueryInfo, numOfCols, TSDB_FUNC_PRJ, &index, pSchema, TSDB_COL_NORMAL, getNewResColId(pCmd)); - + SInternalField* pSupInfo = tscFieldInfoGetInternalField(&pQueryInfo->fieldsInfo, numOfCols); pSupInfo->visible = false; - + pQueryInfo->type |= TSDB_QUERY_TYPE_PROJECTION_QUERY; } } @@ -6704,17 +6729,17 @@ int32_t setAlterTableInfo(SSqlObj* pSql, struct SSqlInfo* pInfo) { const char* msg4 = "set tag value only available for table"; const char* msg5 = "only support add one tag"; const char* msg6 = "column can only be modified by super table"; - + const char* msg7 = "no tags can be dropped"; const char* msg8 = "only support one tag"; const char* msg9 = "tag name too long"; - + const char* msg10 = "invalid tag name"; const char* msg11 = "primary tag cannot be dropped"; const char* msg12 = "update normal column not supported"; const char* msg13 = "invalid tag value"; const char* msg14 = "tag value too long"; - + const char* msg15 = "no columns can be dropped"; const char* msg16 = "only support one column"; const char* msg17 = "invalid column name"; @@ -6722,7 +6747,7 @@ int32_t setAlterTableInfo(SSqlObj* pSql, struct SSqlInfo* pInfo) { const char* msg19 = "invalid new tag name"; const char* msg20 = "table is not super table"; const char* msg21 = "only binary/nchar column length could be modified"; - const char* msg23 = "only column length coulbe be modified"; + const char* msg23 = "only column length can be be modified"; const char* msg24 = "invalid binary/nchar column length"; const char* msg25 = "json type error, should be string"; @@ -6785,7 +6810,7 @@ int32_t setAlterTableInfo(SSqlObj* pSql, struct SSqlInfo* pInfo) { if (ret != TSDB_CODE_SUCCESS) { return ret; } - + tscFieldInfoAppend(&pQueryInfo->fieldsInfo, p); } else if (pAlterSQL->type == TSDB_ALTER_TABLE_DROP_TAG_COLUMN) { if (tscGetNumOfTags(pTableMeta) == 1) { @@ -6818,7 +6843,7 @@ int32_t setAlterTableInfo(SSqlObj* pSql, struct SSqlInfo* pInfo) { char name1[128] = {0}; strncpy(name1, pItem->pVar.pz, pItem->pVar.nLen); - + TAOS_FIELD f = tscCreateField(TSDB_DATA_TYPE_INT, name1, tDataTypes[TSDB_DATA_TYPE_INT].bytes); tscFieldInfoAppend(&pQueryInfo->fieldsInfo, &f); } else if (pAlterSQL->type == TSDB_ALTER_TABLE_CHANGE_TAG_COLUMN) { @@ -6868,7 +6893,7 @@ int32_t setAlterTableInfo(SSqlObj* pSql, struct SSqlInfo* pInfo) { // Note: update can only be applied to table not super table. // the following is used to handle tags value for table created according to super table pCmd->command = TSDB_SQL_UPDATE_TAGS_VAL; - + SArray* pVarList = pAlterSQL->varList; tVariantListItem* item = taosArrayGet(pVarList, 0); int16_t numOfTags = tscGetNumOfTags(pTableMeta); diff --git a/src/client/src/tscServer.c b/src/client/src/tscServer.c index e89deebe22e78d22ce0c977afadd59995ff427a1..3849e90ce4526ea974792969217473eb8aef5925 100644 --- a/src/client/src/tscServer.c +++ b/src/client/src/tscServer.c @@ -1882,6 +1882,13 @@ int tscProcessRetrieveGlobalMergeRsp(SSqlObj *pSql) { tscDebug("0x%"PRIx64" create QInfo 0x%"PRIx64" to execute query processing", pSql->self, pSql->self); pQueryInfo->pQInfo = createQInfoFromQueryNode(pQueryInfo, &tableGroupInfo, NULL, NULL, pRes->pMerger, MERGE_STAGE, pSql->self); + if (pQueryInfo->pQInfo == NULL) { + taosHashCleanup(tableGroupInfo.map); + taosArrayDestroy(&group); + tscAsyncResultOnError(pSql); + pRes->code = TSDB_CODE_QRY_OUT_OF_MEMORY; + return pRes->code; + } } uint64_t localQueryId = pSql->self; diff --git a/src/client/src/tscSubquery.c b/src/client/src/tscSubquery.c index 34fd1212079eb1470bc91df31f12c6de24d28b60..792db6dcde7dbaa849cc4c7af8d2b416dd29ba49 100644 --- a/src/client/src/tscSubquery.c +++ b/src/client/src/tscSubquery.c @@ -3356,7 +3356,7 @@ static void multiVnodeInsertFinalize(void* param, TAOS_RES* tres, int numOfRows) } } - tscError("0x%"PRIx64" Async insertion completed, total inserted:%d rows, numOfFailed:%d, numOfTotal:%d", pParentObj->self, + tscWarn("0x%"PRIx64" Async insertion completed, total inserted:%d rows, numOfFailed:%d, numOfTotal:%d", pParentObj->self, pParentObj->res.numOfRows, numOfFailed, numOfSub); tscDebug("0x%"PRIx64" cleanup %d tableMeta in hashTable before reparse sql", pParentObj->self, pParentObj->cmd.insertParam.numOfTables); @@ -3905,8 +3905,11 @@ void* createQInfoFromQueryNode(SQueryInfo* pQueryInfo, STableGroupInfo* pTableGr STsBufInfo bufInfo = {0}; SQueryParam param = {.pOperator = pa}; - /*int32_t code = */initQInfo(&bufInfo, NULL, pSourceOperator, pQInfo, ¶m, NULL, 0, merger); + int32_t code = initQInfo(&bufInfo, NULL, pSourceOperator, pQInfo, ¶m, NULL, 0, merger); taosArrayDestroy(&pa); + if (code != TSDB_CODE_SUCCESS) { + goto _cleanup; + } return pQInfo; diff --git a/src/client/src/tscSystem.c b/src/client/src/tscSystem.c index 90f0468150ef14f904ae2dd584bc0c01b4a75306..d5369e38f0eb0a64a375d4a30fc05173c6a6aafd 100644 --- a/src/client/src/tscSystem.c +++ b/src/client/src/tscSystem.c @@ -387,6 +387,10 @@ static int taos_options_imp(TSDB_OPTION option, const char *pStr) { cfg->cfgStatus = TAOS_CFG_CSTATUS_OPTION; } else { // set the user specified locale failed, use default LC_CTYPE as current locale locale = setlocale(LC_CTYPE, tsLocale); + if (locale == NULL) { + tscError("failed to set locale:%s failed, neither default LC_CTYPE: %s", pStr, tsLocale); + return -1; + } tscInfo("failed to set locale:%s, current locale:%s", pStr, tsLocale); } diff --git a/src/common/src/tarithoperator.c b/src/common/src/tarithoperator.c index 31c7e32773965c866f069d04910cbbc59c187762..d0183ba5b0998b8aa509e360c4dfc02e0721604c 100644 --- a/src/common/src/tarithoperator.c +++ b/src/common/src/tarithoperator.c @@ -21,7 +21,7 @@ #include "tcompare.h" #include "texpr.h" -//GET_TYPED_DATA(v, double, _right_type, (char *)&((right)[i])); +//GET_TYPED_DATA(v, double, _right_type, (char *)&((right)[i])); void calc_i32_i32_add(void *left, void *right, int32_t numLeft, int32_t numRight, void *output, int32_t order) { int32_t *pLeft = (int32_t *)left; @@ -183,215 +183,219 @@ _arithmetic_getVectorValueAddr_fn_t getVectorValueAddrFn(int32_t srcType) { } void vectorAdd(void *left, int32_t len1, int32_t _left_type, void *right, int32_t len2, int32_t _right_type, void *out, int32_t _ord) { - int32_t i = ((_ord) == TSDB_ORDER_ASC) ? 0 : MAX(len1, len2) - 1; - int32_t step = ((_ord) == TSDB_ORDER_ASC) ? 1 : -1; + int32_t i = ((_ord) == TSDB_ORDER_ASC) ? 0 : MAX(len1, len2) - 1; + int32_t step = ((_ord) == TSDB_ORDER_ASC) ? 1 : -1; double *output=(double*)out; _arithmetic_getVectorValueAddr_fn_t getVectorValueAddrFnLeft = getVectorValueAddrFn(_left_type); _arithmetic_getVectorValueAddr_fn_t getVectorValueAddrFnRight = getVectorValueAddrFn(_right_type); _arithmetic_getVectorDoubleValue_fn_t getVectorDoubleValueFnLeft = getVectorDoubleValueFn(_left_type); _arithmetic_getVectorDoubleValue_fn_t getVectorDoubleValueFnRight = getVectorDoubleValueFn(_right_type); - - if ((len1) == (len2)) { - for (; i < (len2) && i >= 0; i += step, output += 1) { - if (isNull(getVectorValueAddrFnLeft(left,i), _left_type) || isNull(getVectorValueAddrFnRight(right,i), _right_type)) { - SET_DOUBLE_NULL(output); - continue; - } - SET_DOUBLE_VAL(output,getVectorDoubleValueFnLeft(left,i) + getVectorDoubleValueFnRight(right,i)); - } - } else if ((len1) == 1) { - for (; i >= 0 && i < (len2); i += step, output += 1) { - if (isNull(getVectorValueAddrFnLeft(left,0), _left_type) || isNull(getVectorValueAddrFnRight(right,i), _right_type)) { - SET_DOUBLE_NULL(output); - continue; - } - SET_DOUBLE_VAL(output,getVectorDoubleValueFnLeft(left,0) + getVectorDoubleValueFnRight(right,i)); - } - } else if ((len2) == 1) { - for (; i >= 0 && i < (len1); i += step, output += 1) { - if (isNull(getVectorValueAddrFnLeft(left,i), _left_type) || isNull(getVectorValueAddrFnRight(right,0), _right_type)) { - SET_DOUBLE_NULL(output); - continue; - } - SET_DOUBLE_VAL(output,getVectorDoubleValueFnLeft(left,i) + getVectorDoubleValueFnRight(right,0)); - } - } + + if ((len1) == (len2)) { + for (; i < (len2) && i >= 0; i += step, output += 1) { + if (isNull(getVectorValueAddrFnLeft(left,i), _left_type) || isNull(getVectorValueAddrFnRight(right,i), _right_type)) { + SET_DOUBLE_NULL(output); + continue; + } + SET_DOUBLE_VAL(output,getVectorDoubleValueFnLeft(left,i) + getVectorDoubleValueFnRight(right,i)); + } + } else if ((len1) == 1) { + for (; i >= 0 && i < (len2); i += step, output += 1) { + if (isNull(getVectorValueAddrFnLeft(left,0), _left_type) || isNull(getVectorValueAddrFnRight(right,i), _right_type)) { + SET_DOUBLE_NULL(output); + continue; + } + SET_DOUBLE_VAL(output,getVectorDoubleValueFnLeft(left,0) + getVectorDoubleValueFnRight(right,i)); + } + } else if ((len2) == 1) { + for (; i >= 0 && i < (len1); i += step, output += 1) { + if (isNull(getVectorValueAddrFnLeft(left,i), _left_type) || isNull(getVectorValueAddrFnRight(right,0), _right_type)) { + SET_DOUBLE_NULL(output); + continue; + } + SET_DOUBLE_VAL(output,getVectorDoubleValueFnLeft(left,i) + getVectorDoubleValueFnRight(right,0)); + } + } } + void vectorSub(void *left, int32_t len1, int32_t _left_type, void *right, int32_t len2, int32_t _right_type, void *out, int32_t _ord) { - int32_t i = ((_ord) == TSDB_ORDER_ASC) ? 0 : MAX(len1, len2) - 1; - int32_t step = ((_ord) == TSDB_ORDER_ASC) ? 1 : -1; + int32_t i = ((_ord) == TSDB_ORDER_ASC) ? 0 : MAX(len1, len2) - 1; + int32_t step = ((_ord) == TSDB_ORDER_ASC) ? 1 : -1; double *output=(double*)out; _arithmetic_getVectorValueAddr_fn_t getVectorValueAddrFnLeft = getVectorValueAddrFn(_left_type); _arithmetic_getVectorValueAddr_fn_t getVectorValueAddrFnRight = getVectorValueAddrFn(_right_type); _arithmetic_getVectorDoubleValue_fn_t getVectorDoubleValueFnLeft = getVectorDoubleValueFn(_left_type); _arithmetic_getVectorDoubleValue_fn_t getVectorDoubleValueFnRight = getVectorDoubleValueFn(_right_type); - - if ((len1) == (len2)) { - for (; i < (len2) && i >= 0; i += step, output += 1) { - if (isNull(getVectorValueAddrFnLeft(left,i), _left_type) || isNull(getVectorValueAddrFnRight(right,i), _right_type)) { - SET_DOUBLE_NULL(output); - continue; - } - SET_DOUBLE_VAL(output,getVectorDoubleValueFnLeft(left,i) - getVectorDoubleValueFnRight(right,i)); - } - } else if ((len1) == 1) { - for (; i >= 0 && i < (len2); i += step, output += 1) { - if (isNull(getVectorValueAddrFnLeft(left,0), _left_type) || isNull(getVectorValueAddrFnRight(right,i), _right_type)) { - SET_DOUBLE_NULL(output); - continue; - } - SET_DOUBLE_VAL(output,getVectorDoubleValueFnLeft(left,0) - getVectorDoubleValueFnRight(right,i)); - } - } else if ((len2) == 1) { - for (; i >= 0 && i < (len1); i += step, output += 1) { - if (isNull(getVectorValueAddrFnLeft(left,i), _left_type) || isNull(getVectorValueAddrFnRight(right,0), _right_type)) { - SET_DOUBLE_NULL(output); - continue; - } - SET_DOUBLE_VAL(output,getVectorDoubleValueFnLeft(left,i) - getVectorDoubleValueFnRight(right,0)); - } - } + + if ((len1) == (len2)) { + for (; i < (len2) && i >= 0; i += step, output += 1) { + if (isNull(getVectorValueAddrFnLeft(left,i), _left_type) || isNull(getVectorValueAddrFnRight(right,i), _right_type)) { + SET_DOUBLE_NULL(output); + continue; + } + SET_DOUBLE_VAL(output,getVectorDoubleValueFnLeft(left,i) - getVectorDoubleValueFnRight(right,i)); + } + } else if ((len1) == 1) { + for (; i >= 0 && i < (len2); i += step, output += 1) { + if (isNull(getVectorValueAddrFnLeft(left,0), _left_type) || isNull(getVectorValueAddrFnRight(right,i), _right_type)) { + SET_DOUBLE_NULL(output); + continue; + } + SET_DOUBLE_VAL(output,getVectorDoubleValueFnLeft(left,0) - getVectorDoubleValueFnRight(right,i)); + } + } else if ((len2) == 1) { + for (; i >= 0 && i < (len1); i += step, output += 1) { + if (isNull(getVectorValueAddrFnLeft(left,i), _left_type) || isNull(getVectorValueAddrFnRight(right,0), _right_type)) { + SET_DOUBLE_NULL(output); + continue; + } + SET_DOUBLE_VAL(output,getVectorDoubleValueFnLeft(left,i) - getVectorDoubleValueFnRight(right,0)); + } + } } + void vectorMultiply(void *left, int32_t len1, int32_t _left_type, void *right, int32_t len2, int32_t _right_type, void *out, int32_t _ord) { - int32_t i = ((_ord) == TSDB_ORDER_ASC) ? 0 : MAX(len1, len2) - 1; - int32_t step = ((_ord) == TSDB_ORDER_ASC) ? 1 : -1; + int32_t i = ((_ord) == TSDB_ORDER_ASC) ? 0 : MAX(len1, len2) - 1; + int32_t step = ((_ord) == TSDB_ORDER_ASC) ? 1 : -1; double *output=(double*)out; _arithmetic_getVectorValueAddr_fn_t getVectorValueAddrFnLeft = getVectorValueAddrFn(_left_type); _arithmetic_getVectorValueAddr_fn_t getVectorValueAddrFnRight = getVectorValueAddrFn(_right_type); _arithmetic_getVectorDoubleValue_fn_t getVectorDoubleValueFnLeft = getVectorDoubleValueFn(_left_type); _arithmetic_getVectorDoubleValue_fn_t getVectorDoubleValueFnRight = getVectorDoubleValueFn(_right_type); - - if ((len1) == (len2)) { - for (; i < (len2) && i >= 0; i += step, output += 1) { - if (isNull(getVectorValueAddrFnLeft(left,i), _left_type) || isNull(getVectorValueAddrFnRight(right,i), _right_type)) { - SET_DOUBLE_NULL(output); - continue; - } - SET_DOUBLE_VAL(output,getVectorDoubleValueFnLeft(left,i) * getVectorDoubleValueFnRight(right,i)); - } - } else if ((len1) == 1) { - for (; i >= 0 && i < (len2); i += step, output += 1) { - if (isNull(getVectorValueAddrFnLeft(left,0), _left_type) || isNull(getVectorValueAddrFnRight(right,i), _right_type)) { - SET_DOUBLE_NULL(output); - continue; - } - SET_DOUBLE_VAL(output,getVectorDoubleValueFnLeft(left,0) * getVectorDoubleValueFnRight(right,i)); - } - } else if ((len2) == 1) { - for (; i >= 0 && i < (len1); i += step, output += 1) { - if (isNull(getVectorValueAddrFnLeft(left,i), _left_type) || isNull(getVectorValueAddrFnRight(right,0), _right_type)) { - SET_DOUBLE_NULL(output); - continue; - } - SET_DOUBLE_VAL(output,getVectorDoubleValueFnLeft(left,i) * getVectorDoubleValueFnRight(right,0)); - } - } + + if ((len1) == (len2)) { + for (; i < (len2) && i >= 0; i += step, output += 1) { + if (isNull(getVectorValueAddrFnLeft(left,i), _left_type) || isNull(getVectorValueAddrFnRight(right,i), _right_type)) { + SET_DOUBLE_NULL(output); + continue; + } + SET_DOUBLE_VAL(output,getVectorDoubleValueFnLeft(left,i) * getVectorDoubleValueFnRight(right,i)); + } + } else if ((len1) == 1) { + for (; i >= 0 && i < (len2); i += step, output += 1) { + if (isNull(getVectorValueAddrFnLeft(left,0), _left_type) || isNull(getVectorValueAddrFnRight(right,i), _right_type)) { + SET_DOUBLE_NULL(output); + continue; + } + SET_DOUBLE_VAL(output,getVectorDoubleValueFnLeft(left,0) * getVectorDoubleValueFnRight(right,i)); + } + } else if ((len2) == 1) { + for (; i >= 0 && i < (len1); i += step, output += 1) { + if (isNull(getVectorValueAddrFnLeft(left,i), _left_type) || isNull(getVectorValueAddrFnRight(right,0), _right_type)) { + SET_DOUBLE_NULL(output); + continue; + } + SET_DOUBLE_VAL(output,getVectorDoubleValueFnLeft(left,i) * getVectorDoubleValueFnRight(right,0)); + } + } } -void vectorDivide(void *left, int32_t len1, int32_t _left_type, void *right, int32_t len2, int32_t _right_type, void *out, int32_t _ord) { - int32_t i = ((_ord) == TSDB_ORDER_ASC) ? 0 : MAX(len1, len2) - 1; - int32_t step = ((_ord) == TSDB_ORDER_ASC) ? 1 : -1; + +void vectorDivide(void *left, int32_t len1, int32_t _left_type, void *right, int32_t len2, int32_t _right_type, void *out, int32_t _ord) { + int32_t i = ((_ord) == TSDB_ORDER_ASC) ? 0 : MAX(len1, len2) - 1; + int32_t step = ((_ord) == TSDB_ORDER_ASC) ? 1 : -1; double *output=(double*)out; _arithmetic_getVectorValueAddr_fn_t getVectorValueAddrFnLeft = getVectorValueAddrFn(_left_type); _arithmetic_getVectorValueAddr_fn_t getVectorValueAddrFnRight = getVectorValueAddrFn(_right_type); _arithmetic_getVectorDoubleValue_fn_t getVectorDoubleValueFnLeft = getVectorDoubleValueFn(_left_type); _arithmetic_getVectorDoubleValue_fn_t getVectorDoubleValueFnRight = getVectorDoubleValueFn(_right_type); - - if ((len1) == (len2)) { - for (; i < (len2) && i >= 0; i += step, output += 1) { - if (isNull(getVectorValueAddrFnLeft(left,i), _left_type) || isNull(getVectorValueAddrFnRight(right,i), _right_type)) { - SET_DOUBLE_NULL(output); - continue; - } - double v, u = 0.0; - GET_TYPED_DATA(v, double, _right_type, getVectorValueAddrFnRight(right,i)); - if (getComparFunc(TSDB_DATA_TYPE_DOUBLE, 0)(&v, &u) == 0) { - SET_DOUBLE_NULL(output); - continue; - } - SET_DOUBLE_VAL(output,getVectorDoubleValueFnLeft(left,i) /getVectorDoubleValueFnRight(right,i)); - } - } else if ((len1) == 1) { - for (; i >= 0 && i < (len2); i += step, output += 1) { - if (isNull(getVectorValueAddrFnLeft(left,0), _left_type) || isNull(getVectorValueAddrFnRight(right,i), _right_type)) { - SET_DOUBLE_NULL(output); - continue; - } - double v, u = 0.0; - GET_TYPED_DATA(v, double, _right_type, getVectorValueAddrFnRight(right,i)); - if (getComparFunc(TSDB_DATA_TYPE_DOUBLE, 0)(&v, &u) == 0) { - SET_DOUBLE_NULL(output); - continue; - } - SET_DOUBLE_VAL(output,getVectorDoubleValueFnLeft(left,0) /getVectorDoubleValueFnRight(right,i)); - } - } else if ((len2) == 1) { - for (; i >= 0 && i < (len1); i += step, output += 1) { - if (isNull(getVectorValueAddrFnLeft(left,i), _left_type) || isNull(getVectorValueAddrFnRight(right,0), _right_type)) { - SET_DOUBLE_NULL(output); - continue; - } - double v, u = 0.0; - GET_TYPED_DATA(v, double, _right_type, getVectorValueAddrFnRight(right,0)); - if (getComparFunc(TSDB_DATA_TYPE_DOUBLE, 0)(&v, &u) == 0) { - SET_DOUBLE_NULL(output); - continue; - } - SET_DOUBLE_VAL(output,getVectorDoubleValueFnLeft(left,i) /getVectorDoubleValueFnRight(right,0)); - } - } -} + + if ((len1) == (len2)) { + for (; i < (len2) && i >= 0; i += step, output += 1) { + if (isNull(getVectorValueAddrFnLeft(left,i), _left_type) || isNull(getVectorValueAddrFnRight(right,i), _right_type)) { + SET_DOUBLE_NULL(output); + continue; + } + double v, u = 0.0; + GET_TYPED_DATA(v, double, _right_type, getVectorValueAddrFnRight(right,i)); + if (getComparFunc(TSDB_DATA_TYPE_DOUBLE, 0)(&v, &u) == 0) { + SET_DOUBLE_NULL(output); + continue; + } + SET_DOUBLE_VAL(output,getVectorDoubleValueFnLeft(left,i) /getVectorDoubleValueFnRight(right,i)); + } + } else if ((len1) == 1) { + for (; i >= 0 && i < (len2); i += step, output += 1) { + if (isNull(getVectorValueAddrFnLeft(left,0), _left_type) || isNull(getVectorValueAddrFnRight(right,i), _right_type)) { + SET_DOUBLE_NULL(output); + continue; + } + double v, u = 0.0; + GET_TYPED_DATA(v, double, _right_type, getVectorValueAddrFnRight(right,i)); + if (getComparFunc(TSDB_DATA_TYPE_DOUBLE, 0)(&v, &u) == 0) { + SET_DOUBLE_NULL(output); + continue; + } + SET_DOUBLE_VAL(output,getVectorDoubleValueFnLeft(left,0) /getVectorDoubleValueFnRight(right,i)); + } + } else if ((len2) == 1) { + for (; i >= 0 && i < (len1); i += step, output += 1) { + if (isNull(getVectorValueAddrFnLeft(left,i), _left_type) || isNull(getVectorValueAddrFnRight(right,0), _right_type)) { + SET_DOUBLE_NULL(output); + continue; + } + double v, u = 0.0; + GET_TYPED_DATA(v, double, _right_type, getVectorValueAddrFnRight(right,0)); + if (getComparFunc(TSDB_DATA_TYPE_DOUBLE, 0)(&v, &u) == 0) { + SET_DOUBLE_NULL(output); + continue; + } + SET_DOUBLE_VAL(output,getVectorDoubleValueFnLeft(left,i) /getVectorDoubleValueFnRight(right,0)); + } + } +} + void vectorRemainder(void *left, int32_t len1, int32_t _left_type, void *right, int32_t len2, int32_t _right_type, void *out, int32_t _ord) { - int32_t i = (_ord == TSDB_ORDER_ASC) ? 0 : MAX(len1, len2) - 1; - int32_t step = (_ord == TSDB_ORDER_ASC) ? 1 : -1; + int32_t i = (_ord == TSDB_ORDER_ASC) ? 0 : MAX(len1, len2) - 1; + int32_t step = (_ord == TSDB_ORDER_ASC) ? 1 : -1; double *output=(double*)out; _arithmetic_getVectorValueAddr_fn_t getVectorValueAddrFnLeft = getVectorValueAddrFn(_left_type); _arithmetic_getVectorValueAddr_fn_t getVectorValueAddrFnRight = getVectorValueAddrFn(_right_type); _arithmetic_getVectorDoubleValue_fn_t getVectorDoubleValueFnLeft = getVectorDoubleValueFn(_left_type); _arithmetic_getVectorDoubleValue_fn_t getVectorDoubleValueFnRight = getVectorDoubleValueFn(_right_type); - - if (len1 == (len2)) { - for (; i >= 0 && i < (len2); i += step, output += 1) { - if (isNull(getVectorValueAddrFnLeft(left,i), _left_type) || isNull(getVectorValueAddrFnRight(right,i), _right_type)) { - SET_DOUBLE_NULL(output); - continue; - } - double v, u = 0.0; - GET_TYPED_DATA(v, double, _right_type, getVectorValueAddrFnRight(right,i)); - if (getComparFunc(TSDB_DATA_TYPE_DOUBLE, 0)(&v, &u) == 0) { - SET_DOUBLE_NULL(output); - continue; - } - SET_DOUBLE_VAL(output,getVectorDoubleValueFnLeft(left,i) - ((int64_t)(getVectorDoubleValueFnLeft(left,i) / getVectorDoubleValueFnRight(right,i))) * getVectorDoubleValueFnRight(right,i)); - } - } else if (len1 == 1) { - for (; i >= 0 && i < (len2); i += step, output += 1) { - if (isNull(getVectorValueAddrFnLeft(left,0), _left_type) || isNull(getVectorValueAddrFnRight(right,i), _right_type)) { - SET_DOUBLE_NULL(output); - continue; - } - double v, u = 0.0; - GET_TYPED_DATA(v, double, _right_type, getVectorValueAddrFnRight(right,i)); - if (getComparFunc(TSDB_DATA_TYPE_DOUBLE, 0)(&v, &u) == 0) { - SET_DOUBLE_NULL(output); - continue; - } - SET_DOUBLE_VAL(output,getVectorDoubleValueFnLeft(left,0) - ((int64_t)(getVectorDoubleValueFnLeft(left,0) / getVectorDoubleValueFnRight(right,i))) * getVectorDoubleValueFnRight(right,i)); - } - } else if ((len2) == 1) { - for (; i >= 0 && i < len1; i += step, output += 1) { - if (isNull(getVectorValueAddrFnLeft(left,i), _left_type) || isNull(getVectorValueAddrFnRight(right,0), _right_type)) { - SET_DOUBLE_NULL(output); - continue; - } - double v, u = 0.0; - GET_TYPED_DATA(v, double, _right_type, getVectorValueAddrFnRight(right,0)); - if (getComparFunc(TSDB_DATA_TYPE_DOUBLE, 0)(&v, &u) == 0) { - SET_DOUBLE_NULL(output); - continue; - } - SET_DOUBLE_VAL(output,getVectorDoubleValueFnLeft(left,i) - ((int64_t)(getVectorDoubleValueFnLeft(left,i) / getVectorDoubleValueFnRight(right,0))) * getVectorDoubleValueFnRight(right,0)); - } - } + + if (len1 == (len2)) { + for (; i >= 0 && i < (len2); i += step, output += 1) { + if (isNull(getVectorValueAddrFnLeft(left,i), _left_type) || isNull(getVectorValueAddrFnRight(right,i), _right_type)) { + SET_DOUBLE_NULL(output); + continue; + } + double v, u = 0.0; + GET_TYPED_DATA(v, double, _right_type, getVectorValueAddrFnRight(right,i)); + if (getComparFunc(TSDB_DATA_TYPE_DOUBLE, 0)(&v, &u) == 0) { + SET_DOUBLE_NULL(output); + continue; + } + SET_DOUBLE_VAL(output,getVectorDoubleValueFnLeft(left,i) - ((int64_t)(getVectorDoubleValueFnLeft(left,i) / getVectorDoubleValueFnRight(right,i))) * getVectorDoubleValueFnRight(right,i)); + } + } else if (len1 == 1) { + for (; i >= 0 && i < (len2); i += step, output += 1) { + if (isNull(getVectorValueAddrFnLeft(left,0), _left_type) || isNull(getVectorValueAddrFnRight(right,i), _right_type)) { + SET_DOUBLE_NULL(output); + continue; + } + double v, u = 0.0; + GET_TYPED_DATA(v, double, _right_type, getVectorValueAddrFnRight(right,i)); + if (getComparFunc(TSDB_DATA_TYPE_DOUBLE, 0)(&v, &u) == 0) { + SET_DOUBLE_NULL(output); + continue; + } + SET_DOUBLE_VAL(output,getVectorDoubleValueFnLeft(left,0) - ((int64_t)(getVectorDoubleValueFnLeft(left,0) / getVectorDoubleValueFnRight(right,i))) * getVectorDoubleValueFnRight(right,i)); + } + } else if ((len2) == 1) { + for (; i >= 0 && i < len1; i += step, output += 1) { + if (isNull(getVectorValueAddrFnLeft(left,i), _left_type) || isNull(getVectorValueAddrFnRight(right,0), _right_type)) { + SET_DOUBLE_NULL(output); + continue; + } + double v, u = 0.0; + GET_TYPED_DATA(v, double, _right_type, getVectorValueAddrFnRight(right,0)); + if (getComparFunc(TSDB_DATA_TYPE_DOUBLE, 0)(&v, &u) == 0) { + SET_DOUBLE_NULL(output); + continue; + } + SET_DOUBLE_VAL(output,getVectorDoubleValueFnLeft(left,i) - ((int64_t)(getVectorDoubleValueFnLeft(left,i) / getVectorDoubleValueFnRight(right,0))) * getVectorDoubleValueFnRight(right,0)); + } + } } _arithmetic_operator_fn_t getArithmeticOperatorFn(int32_t arithmeticOptr) { diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBPreparedStatement.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBPreparedStatement.java index 5fd8f181388824bccd4a2ab2b488667af117b172..9a3c126bebe5cf4bbc47ef813bd797f30c14ad5d 100644 --- a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBPreparedStatement.java +++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBPreparedStatement.java @@ -59,7 +59,6 @@ public class TSDBPreparedStatement extends TSDBStatement implements PreparedStat } } parameters = new Object[parameterCnt]; - if (parameterCnt > 1) { // the table name is also a parameter, so ignore it. this.colData = new ArrayList<>(); @@ -530,8 +529,14 @@ public class TSDBPreparedStatement extends TSDBStatement implements PreparedStat } public void setTableName(String name) throws SQLException { + + if (this.nativeStmtHandle == 0) { + TSDBJNIConnector connector = ((TSDBConnection) this.getConnection()).getConnector(); + this.nativeStmtHandle = connector.prepareStmt(rawSql); + } + if (this.tableName != null) { - this.columnDataExecuteBatch(); + this.columnDataAddBatch(); this.columnDataClearBatchInternal(); } this.tableName = name; @@ -963,10 +968,12 @@ public class TSDBPreparedStatement extends TSDBStatement implements PreparedStat } private void columnDataClearBatchInternal() { - int size = this.colData.size(); - this.colData.clear(); - this.colData.addAll(Collections.nCopies(size, null)); - this.tableName = null; // clear the table name + this.tableName = null; + if (this.tableTags != null) + this.tableTags.clear(); + this.tagValueLength = 0; + if (this.colData != null) + this.colData.clear(); } diff --git a/src/kit/shell/src/shellEngine.c b/src/kit/shell/src/shellEngine.c index e897454b93fd70cfd46e1836ee7490ae9faedfd7..c5cb93b50e088d549fdebdd0dde7b78e61fae338 100644 --- a/src/kit/shell/src/shellEngine.c +++ b/src/kit/shell/src/shellEngine.c @@ -430,15 +430,27 @@ static void dumpFieldToFile(FILE* fp, const char* val, TAOS_FIELD* field, int32_ case TSDB_DATA_TYPE_TINYINT: fprintf(fp, "%d", *((int8_t *)val)); break; + case TSDB_DATA_TYPE_UTINYINT: + fprintf(fp, "%u", *((uint8_t *)val)); + break; case TSDB_DATA_TYPE_SMALLINT: fprintf(fp, "%d", *((int16_t *)val)); break; + case TSDB_DATA_TYPE_USMALLINT: + fprintf(fp, "%u", *((uint16_t *)val)); + break; case TSDB_DATA_TYPE_INT: fprintf(fp, "%d", *((int32_t *)val)); break; + case TSDB_DATA_TYPE_UINT: + fprintf(fp, "%u", *((uint32_t *)val)); + break; case TSDB_DATA_TYPE_BIGINT: fprintf(fp, "%" PRId64, *((int64_t *)val)); break; + case TSDB_DATA_TYPE_UBIGINT: + fprintf(fp, "%" PRIu64, *((uint64_t *)val)); + break; case TSDB_DATA_TYPE_FLOAT: fprintf(fp, "%.5f", GET_FLOAT_VAL(val)); break; diff --git a/src/kit/taos-tools b/src/kit/taos-tools index 9f1b028ca325f67762826be4caf58d356ad7e389..8c11dcf6856bca0860e7e9999f57274864218f4f 160000 --- a/src/kit/taos-tools +++ b/src/kit/taos-tools @@ -1 +1 @@ -Subproject commit 9f1b028ca325f67762826be4caf58d356ad7e389 +Subproject commit 8c11dcf6856bca0860e7e9999f57274864218f4f diff --git a/src/plugins/CMakeLists.txt b/src/plugins/CMakeLists.txt index ef955a5663d39f0afcf399a6c15557b8c044d6c7..6ac3878df901923ed5a5fcc77fe6d63969be3a59 100644 --- a/src/plugins/CMakeLists.txt +++ b/src/plugins/CMakeLists.txt @@ -12,7 +12,7 @@ ELSEIF(TD_BUILD_TAOSA_INTERNAL) MESSAGE("${Yellow} use taosa internal as httpd ${ColourReset}") ELSE () MESSAGE("") - MESSAGE("${Green} use taosadapter as httpd ${ColourReset}") + MESSAGE("${Green} use taosadapter as httpd, platform is ${PLATFORM_ARCH_STR} ${ColourReset}") EXECUTE_PROCESS( COMMAND git rev-parse --abbrev-ref HEAD @@ -26,7 +26,7 @@ ELSE () STRING(SUBSTRING "${taos_version}" 12 -1 taos_version) STRING(STRIP "${taos_version}" taos_version) ELSE () - STRING(CONCAT taos_version "branch_" "${taos_version}") + STRING(CONCAT taos_version "_branch_" "${taos_version}") STRING(STRIP "${taos_version}" taos_version) ENDIF () EXECUTE_PROCESS( @@ -61,7 +61,7 @@ ELSE () COMMAND git clean -f -d BUILD_COMMAND CGO_CFLAGS=-I${CMAKE_CURRENT_SOURCE_DIR}/../inc CGO_LDFLAGS=-L${CMAKE_BINARY_DIR}/build/lib go build -ldflags "-s -w -X github.com/taosdata/taosadapter/version.Version=${taos_version} -X github.com/taosdata/taosadapter/version.CommitID=${taosadapter_commit_sha1}" INSTALL_COMMAND - COMMAND curl -sL https://github.com/upx/upx/releases/download/v3.96/upx-3.96-amd64_linux.tar.xz -o upx.tar.xz && tar -xvJf upx.tar.xz -C ${CMAKE_BINARY_DIR} --strip-components 1 > /dev/null && ${CMAKE_BINARY_DIR}/upx taosadapter || : + COMMAND curl -sL https://github.com/upx/upx/releases/download/v3.96/upx-3.96-${PLATFORM_ARCH_STR}_linux.tar.xz -o upx.tar.xz && tar -xvJf upx.tar.xz -C ${CMAKE_BINARY_DIR} --strip-components 1 > /dev/null && ${CMAKE_BINARY_DIR}/upx taosadapter || : COMMAND cmake -E copy taosadapter ${CMAKE_BINARY_DIR}/build/bin COMMAND cmake -E make_directory ${CMAKE_BINARY_DIR}/test/cfg/ COMMAND cmake -E copy ./example/config/taosadapter.toml ${CMAKE_BINARY_DIR}/test/cfg/ diff --git a/src/plugins/monitor/src/monMain.c b/src/plugins/monitor/src/monMain.c index 68bd98dd5e0ed343e9a9966a8e75ffe4493a4cfb..33b95642b9d2757942d14d7608e7ae5169784f42 100644 --- a/src/plugins/monitor/src/monMain.c +++ b/src/plugins/monitor/src/monMain.c @@ -1157,7 +1157,9 @@ static uint32_t monBuildVgroupsInfoSql(char *sql, char *dbName) { monError("failed to save vgroup_%d info, reason: invalid row %s len, sql:%s", vgId, (char *)row[i], tsMonitor.sql); goto DONE; } - pos += snprintf(sql + pos, strlen(SQL_STR_FMT) + charLen + 1, ", "SQL_STR_FMT, (char *)row[i]); + char tmpBuf[10] = {0}; + memcpy(tmpBuf, row[i], charLen); + pos += snprintf(sql + pos, strlen(SQL_STR_FMT) + charLen + 1, ", "SQL_STR_FMT, tmpBuf); } else if (strcmp(fields[i].name, "onlines") == 0) { pos += snprintf(sql + pos, SQL_LENGTH, ", %d", *(int32_t *)row[i]); } else if (v_dnode_str && strcmp(v_dnode_str, "_dnode") == 0) { diff --git a/src/plugins/taosadapter b/src/plugins/taosadapter index 8f9501a30b1893c6616d644a924c995aa21ad957..6d401bb95e1125ce4aad012dc23191ed85af8b3b 160000 --- a/src/plugins/taosadapter +++ b/src/plugins/taosadapter @@ -1 +1 @@ -Subproject commit 8f9501a30b1893c6616d644a924c995aa21ad957 +Subproject commit 6d401bb95e1125ce4aad012dc23191ed85af8b3b diff --git a/src/query/src/qAggMain.c b/src/query/src/qAggMain.c index b6bf474a65f6a650087651c6571a7e9fbfa89d26..48e52e078aa0d7a14b5f38c11fbd76609b1f6cd4 100644 --- a/src/query/src/qAggMain.c +++ b/src/query/src/qAggMain.c @@ -5117,7 +5117,7 @@ SAggFunctionInfo aAggs[40] = {{ "twa", TSDB_FUNC_TWA, TSDB_FUNC_TWA, - TSDB_BASE_FUNC_SO | TSDB_FUNCSTATE_NEED_TS, + TSDB_FUNCSTATE_SO | TSDB_FUNCSTATE_STABLE | TSDB_FUNCSTATE_NEED_TS, twa_function_setup, twa_function, twa_function_finalizer, @@ -5393,7 +5393,7 @@ SAggFunctionInfo aAggs[40] = {{ "elapsed", TSDB_FUNC_ELAPSED, TSDB_FUNC_ELAPSED, - TSDB_BASE_FUNC_SO, + TSDB_FUNCSTATE_SO | TSDB_FUNCSTATE_STABLE, elapsedSetup, elapsedFunction, elapsedFinalizer, diff --git a/src/query/src/qExecutor.c b/src/query/src/qExecutor.c index 6d3751d9035a55f275aeca68b8d8a0af9d0594ad..bc3ac1fcdab336b93c79e2aaa801693578289640 100644 --- a/src/query/src/qExecutor.c +++ b/src/query/src/qExecutor.c @@ -340,9 +340,17 @@ SSDataBlock* createOutputBuf(SExprInfo* pExpr, int32_t numOfOutput, int32_t numO const static int32_t minSize = 8; SSDataBlock *res = calloc(1, sizeof(SSDataBlock)); - res->info.numOfCols = numOfOutput; + if (res == NULL) { + qError("failed to allocate for output buffer"); + goto _clean; + } res->pDataBlock = taosArrayInit(numOfOutput, sizeof(SColumnInfoData)); + if (res->pDataBlock == NULL) { + qError("failed to init arrary for data block of output buffer"); + goto _clean; + } + for (int32_t i = 0; i < numOfOutput; ++i) { SColumnInfoData idata = {{0}}; idata.info.type = pExpr[i].base.resType; @@ -351,10 +359,20 @@ SSDataBlock* createOutputBuf(SExprInfo* pExpr, int32_t numOfOutput, int32_t numO int32_t size = MAX(idata.info.bytes * numOfRows, minSize); idata.pData = calloc(1, size); // at least to hold a pointer on x64 platform + if (idata.pData == NULL) { + qError("failed to allocate column buffer for output buffer"); + goto _clean; + } + taosArrayPush(res->pDataBlock, &idata); + res->info.numOfCols++; } return res; + +_clean: + destroyOutputBuf(res); + return NULL; } void* destroyOutputBuf(SSDataBlock* pBlock) { @@ -2088,17 +2106,26 @@ static int32_t setupQueryRuntimeEnv(SQueryRuntimeEnv *pRuntimeEnv, int32_t numOf switch (*op) { case OP_TagScan: { pRuntimeEnv->proot = createTagScanOperatorInfo(pRuntimeEnv, pQueryAttr->pExpr1, pQueryAttr->numOfOutput); + if (pRuntimeEnv->proot == NULL) { + goto _clean; + } break; } case OP_MultiTableTimeInterval: { pRuntimeEnv->proot = createMultiTableTimeIntervalOperatorInfo(pRuntimeEnv, pRuntimeEnv->proot, pQueryAttr->pExpr1, pQueryAttr->numOfOutput); + if (pRuntimeEnv->proot == NULL) { + goto _clean; + } setTableScanFilterOperatorInfo(pRuntimeEnv->proot->upstream[0]->info, pRuntimeEnv->proot); break; } case OP_TimeWindow: { pRuntimeEnv->proot = createTimeIntervalOperatorInfo(pRuntimeEnv, pRuntimeEnv->proot, pQueryAttr->pExpr1, pQueryAttr->numOfOutput); + if (pRuntimeEnv->proot == NULL) { + goto _clean; + } int32_t opType = pRuntimeEnv->proot->upstream[0]->operatorType; if (opType != OP_DummyInput && opType != OP_Join) { setTableScanFilterOperatorInfo(pRuntimeEnv->proot->upstream[0]->info, pRuntimeEnv->proot); @@ -2108,6 +2135,9 @@ static int32_t setupQueryRuntimeEnv(SQueryRuntimeEnv *pRuntimeEnv, int32_t numOf case OP_TimeEvery: { pRuntimeEnv->proot = createTimeEveryOperatorInfo(pRuntimeEnv, pRuntimeEnv->proot, pQueryAttr->pExpr1, pQueryAttr->numOfOutput); + if (pRuntimeEnv->proot == NULL) { + goto _clean; + } int32_t opType = pRuntimeEnv->proot->upstream[0]->operatorType; if (opType != OP_DummyInput && opType != OP_Join) { setTableScanFilterOperatorInfo(pRuntimeEnv->proot->upstream[0]->info, pRuntimeEnv->proot); @@ -2117,7 +2147,9 @@ static int32_t setupQueryRuntimeEnv(SQueryRuntimeEnv *pRuntimeEnv, int32_t numOf case OP_Groupby: { pRuntimeEnv->proot = createGroupbyOperatorInfo(pRuntimeEnv, pRuntimeEnv->proot, pQueryAttr->pExpr1, pQueryAttr->numOfOutput); - + if (pRuntimeEnv->proot == NULL) { + goto _clean; + } int32_t opType = pRuntimeEnv->proot->upstream[0]->operatorType; if (opType != OP_DummyInput) { setTableScanFilterOperatorInfo(pRuntimeEnv->proot->upstream[0]->info, pRuntimeEnv->proot); @@ -2127,6 +2159,9 @@ static int32_t setupQueryRuntimeEnv(SQueryRuntimeEnv *pRuntimeEnv, int32_t numOf case OP_SessionWindow: { pRuntimeEnv->proot = createSWindowOperatorInfo(pRuntimeEnv, pRuntimeEnv->proot, pQueryAttr->pExpr1, pQueryAttr->numOfOutput); + if (pRuntimeEnv->proot == NULL) { + goto _clean; + } int32_t opType = pRuntimeEnv->proot->upstream[0]->operatorType; if (opType != OP_DummyInput) { setTableScanFilterOperatorInfo(pRuntimeEnv->proot->upstream[0]->info, pRuntimeEnv->proot); @@ -2136,13 +2171,18 @@ static int32_t setupQueryRuntimeEnv(SQueryRuntimeEnv *pRuntimeEnv, int32_t numOf case OP_MultiTableAggregate: { pRuntimeEnv->proot = createMultiTableAggOperatorInfo(pRuntimeEnv, pRuntimeEnv->proot, pQueryAttr->pExpr1, pQueryAttr->numOfOutput); + if (pRuntimeEnv->proot == NULL) { + goto _clean; + } setTableScanFilterOperatorInfo(pRuntimeEnv->proot->upstream[0]->info, pRuntimeEnv->proot); break; } case OP_Aggregate: { pRuntimeEnv->proot = createAggregateOperatorInfo(pRuntimeEnv, pRuntimeEnv->proot, pQueryAttr->pExpr1, pQueryAttr->numOfOutput); - + if (pRuntimeEnv->proot == NULL) { + goto _clean; + } int32_t opType = pRuntimeEnv->proot->upstream[0]->operatorType; if (opType != OP_DummyInput && opType != OP_Join) { setTableScanFilterOperatorInfo(pRuntimeEnv->proot->upstream[0]->info, pRuntimeEnv->proot); @@ -2162,11 +2202,18 @@ static int32_t setupQueryRuntimeEnv(SQueryRuntimeEnv *pRuntimeEnv, int32_t numOf assert(pQueryAttr->pExpr2 != NULL); pRuntimeEnv->proot = createProjectOperatorInfo(pRuntimeEnv, prev, pQueryAttr->pExpr2, pQueryAttr->numOfExpr2); } + + if (pRuntimeEnv->proot == NULL) { + goto _clean; + } break; } case OP_StateWindow: { - pRuntimeEnv->proot = createStatewindowOperatorInfo(pRuntimeEnv, pRuntimeEnv->proot, pQueryAttr->pExpr1, pQueryAttr->numOfOutput); + pRuntimeEnv->proot = createStatewindowOperatorInfo(pRuntimeEnv, pRuntimeEnv->proot, pQueryAttr->pExpr1, pQueryAttr->numOfOutput); + if (pRuntimeEnv->proot == NULL) { + goto _clean; + } int32_t opType = pRuntimeEnv->proot->upstream[0]->operatorType; if (opType != OP_DummyInput) { setTableScanFilterOperatorInfo(pRuntimeEnv->proot->upstream[0]->info, pRuntimeEnv->proot); @@ -2176,6 +2223,9 @@ static int32_t setupQueryRuntimeEnv(SQueryRuntimeEnv *pRuntimeEnv, int32_t numOf case OP_Limit: { pRuntimeEnv->proot = createLimitOperatorInfo(pRuntimeEnv, pRuntimeEnv->proot); + if (pRuntimeEnv->proot == NULL) { + goto _clean; + } break; } @@ -2187,12 +2237,18 @@ static int32_t setupQueryRuntimeEnv(SQueryRuntimeEnv *pRuntimeEnv, int32_t numOf pRuntimeEnv->proot = createFilterOperatorInfo(pRuntimeEnv, pRuntimeEnv->proot, pQueryAttr->pExpr3, pQueryAttr->numOfExpr3, pColInfo, numOfFilterCols); freeColumnInfo(pColInfo, pQueryAttr->numOfExpr3); + if (pRuntimeEnv->proot == NULL) { + goto _clean; + } } else { SColumnInfo* pColInfo = extractColumnFilterInfo(pQueryAttr->pExpr1, pQueryAttr->numOfOutput, &numOfFilterCols); pRuntimeEnv->proot = createFilterOperatorInfo(pRuntimeEnv, pRuntimeEnv->proot, pQueryAttr->pExpr1, pQueryAttr->numOfOutput, pColInfo, numOfFilterCols); freeColumnInfo(pColInfo, pQueryAttr->numOfOutput); + if (pRuntimeEnv->proot == NULL) { + goto _clean; + } } break; @@ -2201,11 +2257,17 @@ static int32_t setupQueryRuntimeEnv(SQueryRuntimeEnv *pRuntimeEnv, int32_t numOf case OP_Fill: { SOperatorInfo* pInfo = pRuntimeEnv->proot; pRuntimeEnv->proot = createFillOperatorInfo(pRuntimeEnv, pInfo, pInfo->pExpr, pInfo->numOfOutput, pQueryAttr->multigroupResult); + if (pRuntimeEnv->proot == NULL) { + goto _clean; + } break; } case OP_MultiwayMergeSort: { pRuntimeEnv->proot = createMultiwaySortOperatorInfo(pRuntimeEnv, pQueryAttr->pExpr1, pQueryAttr->numOfOutput, 200, merger); // TD-10899 + if (pRuntimeEnv->proot == NULL) { + goto _clean; + } break; } @@ -2217,6 +2279,9 @@ static int32_t setupQueryRuntimeEnv(SQueryRuntimeEnv *pRuntimeEnv, int32_t numOf pRuntimeEnv->proot = createGlobalAggregateOperatorInfo(pRuntimeEnv, pRuntimeEnv->proot, pQueryAttr->pExpr3, pQueryAttr->numOfExpr3, merger, pQueryAttr->pUdfInfo, multigroupResult); + if (pRuntimeEnv->proot == NULL) { + goto _clean; + } break; } @@ -2224,11 +2289,17 @@ static int32_t setupQueryRuntimeEnv(SQueryRuntimeEnv *pRuntimeEnv, int32_t numOf int32_t num = pRuntimeEnv->proot->numOfOutput; SExprInfo* pExpr = pRuntimeEnv->proot->pExpr; pRuntimeEnv->proot = createSLimitOperatorInfo(pRuntimeEnv, pRuntimeEnv->proot, pExpr, num, merger, pQueryAttr->multigroupResult); + if (pRuntimeEnv->proot == NULL) { + goto _clean; + } break; } case OP_Distinct: { pRuntimeEnv->proot = createDistinctOperatorInfo(pRuntimeEnv, pRuntimeEnv->proot, pQueryAttr->pExpr1, pQueryAttr->numOfOutput); + if (pRuntimeEnv->proot == NULL) { + goto _clean; + } break; } @@ -2240,6 +2311,9 @@ static int32_t setupQueryRuntimeEnv(SQueryRuntimeEnv *pRuntimeEnv, int32_t numOf pRuntimeEnv->proot = createOrderOperatorInfo(pRuntimeEnv, pRuntimeEnv->proot, pQueryAttr->pExpr1, pQueryAttr->numOfOutput, &pQueryAttr->order); } + if (pRuntimeEnv->proot == NULL) { + goto _clean; + } break; } @@ -4840,18 +4914,30 @@ int32_t doInitQInfo(SQInfo* pQInfo, STSBuf* pTsBuf, void* tsdb, void* sourceOptr switch(tbScanner) { case OP_TableBlockInfoScan: { pRuntimeEnv->proot = createTableBlockInfoScanOperator(pRuntimeEnv->pQueryHandle, pRuntimeEnv); + if (pRuntimeEnv->proot == NULL) { + return TSDB_CODE_QRY_OUT_OF_MEMORY; + } break; } case OP_TableSeqScan: { pRuntimeEnv->proot = createTableSeqScanOperator(pRuntimeEnv->pQueryHandle, pRuntimeEnv); + if (pRuntimeEnv->proot == NULL) { + return TSDB_CODE_QRY_OUT_OF_MEMORY; + } break; } case OP_DataBlocksOptScan: { pRuntimeEnv->proot = createDataBlocksOptScanInfo(pRuntimeEnv->pQueryHandle, pRuntimeEnv, getNumOfScanTimes(pQueryAttr), pQueryAttr->needReverseScan? 1:0); + if (pRuntimeEnv->proot == NULL) { + return TSDB_CODE_QRY_OUT_OF_MEMORY; + } break; } case OP_TableScan: { pRuntimeEnv->proot = createTableScanOperator(pRuntimeEnv->pQueryHandle, pRuntimeEnv, getNumOfScanTimes(pQueryAttr)); + if (pRuntimeEnv->proot == NULL) { + return TSDB_CODE_QRY_OUT_OF_MEMORY; + } break; } default: { // do nothing @@ -5168,6 +5254,10 @@ SOperatorInfo* createTableScanOperator(void* pTsdbQueryHandle, SQueryRuntimeEnv* assert(repeatTime > 0); STableScanInfo* pInfo = calloc(1, sizeof(STableScanInfo)); + if (pInfo == NULL) { + return NULL; + } + pInfo->pQueryHandle = pTsdbQueryHandle; pInfo->times = repeatTime; pInfo->reverseTimes = 0; @@ -5175,6 +5265,11 @@ SOperatorInfo* createTableScanOperator(void* pTsdbQueryHandle, SQueryRuntimeEnv* pInfo->current = 0; SOperatorInfo* pOperator = calloc(1, sizeof(SOperatorInfo)); + if (pOperator == NULL) { + tfree(pInfo); + return NULL; + } + pOperator->name = "TableScanOperator"; pOperator->operatorType = OP_TableScan; pOperator->blockingOptr = false; @@ -5189,6 +5284,9 @@ SOperatorInfo* createTableScanOperator(void* pTsdbQueryHandle, SQueryRuntimeEnv* SOperatorInfo* createTableSeqScanOperator(void* pTsdbQueryHandle, SQueryRuntimeEnv* pRuntimeEnv) { STableScanInfo* pInfo = calloc(1, sizeof(STableScanInfo)); + if (pInfo == NULL) { + return NULL; + } pInfo->pQueryHandle = pTsdbQueryHandle; pInfo->times = 1; @@ -5199,6 +5297,11 @@ SOperatorInfo* createTableSeqScanOperator(void* pTsdbQueryHandle, SQueryRuntimeE pRuntimeEnv->enableGroupData = true; SOperatorInfo* pOperator = calloc(1, sizeof(SOperatorInfo)); + if (pOperator == NULL) { + tfree(pInfo); + return NULL; + } + pOperator->name = "TableSeqScanOperator"; pOperator->operatorType = OP_TableSeqScan; pOperator->blockingOptr = false; @@ -5213,9 +5316,15 @@ SOperatorInfo* createTableSeqScanOperator(void* pTsdbQueryHandle, SQueryRuntimeE SOperatorInfo* createTableBlockInfoScanOperator(void* pTsdbQueryHandle, SQueryRuntimeEnv* pRuntimeEnv) { STableScanInfo* pInfo = calloc(1, sizeof(STableScanInfo)); + if (pInfo == NULL) { + return NULL; + } pInfo->pQueryHandle = pTsdbQueryHandle; pInfo->block.pDataBlock = taosArrayInit(1, sizeof(SColumnInfoData)); + if (pInfo->block.pDataBlock == NULL) { + goto _clean; + } SColumnInfoData infoData = {{0}}; infoData.info.type = TSDB_DATA_TYPE_BINARY; @@ -5224,6 +5333,11 @@ SOperatorInfo* createTableBlockInfoScanOperator(void* pTsdbQueryHandle, SQueryRu taosArrayPush(pInfo->block.pDataBlock, &infoData); SOperatorInfo* pOperator = calloc(1, sizeof(SOperatorInfo)); + if (pOperator == NULL) { + taosArrayDestroy(&pInfo->block.pDataBlock); + goto _clean; + } + pOperator->name = "TableBlockInfoScanOperator"; pOperator->operatorType = OP_TableBlockInfoScan; pOperator->blockingOptr = false; @@ -5234,6 +5348,11 @@ SOperatorInfo* createTableBlockInfoScanOperator(void* pTsdbQueryHandle, SQueryRu pOperator->exec = doBlockInfoScan; return pOperator; + +_clean: + tfree(pInfo); + + return NULL; } void setTableScanFilterOperatorInfo(STableScanInfo* pTableScanInfo, SOperatorInfo* pDownstream) { @@ -5301,6 +5420,10 @@ SOperatorInfo* createDataBlocksOptScanInfo(void* pTsdbQueryHandle, SQueryRuntime assert(repeatTime > 0); STableScanInfo* pInfo = calloc(1, sizeof(STableScanInfo)); + if (pInfo == NULL) { + return NULL; + } + pInfo->pQueryHandle = pTsdbQueryHandle; pInfo->times = repeatTime; pInfo->reverseTimes = reverseTime; @@ -5312,6 +5435,11 @@ SOperatorInfo* createDataBlocksOptScanInfo(void* pTsdbQueryHandle, SQueryRuntime } SOperatorInfo* pOptr = calloc(1, sizeof(SOperatorInfo)); + if (pOptr == NULL) { + tfree(pInfo); + return NULL; + } + pOptr->name = "DataBlocksOptimizedScanOperator"; pOptr->operatorType = OP_DataBlocksOptScan; pOptr->pRuntimeEnv = pRuntimeEnv; @@ -5333,6 +5461,10 @@ SArray* getOrderCheckColumns(SQueryAttr* pQuery) { pOrderColumns = taosArrayInit(4, sizeof(SColIndex)); } + if (pOrderColumns == NULL) { + return NULL; + } + if (pQuery->interval.interval > 0) { if (pOrderColumns == NULL) { pOrderColumns = taosArrayInit(1, sizeof(SColIndex)); @@ -5372,7 +5504,11 @@ SArray* getResultGroupCheckColumns(SQueryAttr* pQuery) { pOrderColumns = taosArrayInit(4, sizeof(SColIndex)); } - for(int32_t i = 0; i < numOfCols; ++i) { + if (pOrderColumns == NULL) { + return NULL; + } + + for (int32_t i = 0; i < numOfCols; ++i) { SColIndex* index = taosArrayGet(pOrderColumns, i); bool found = false; @@ -5400,21 +5536,45 @@ static void destroyGlobalAggOperatorInfo(void* param, int32_t numOfOutput) { SMultiwayMergeInfo *pInfo = (SMultiwayMergeInfo*) param; destroyBasicOperatorInfo(&pInfo->binfo, numOfOutput); - taosArrayDestroy(&pInfo->orderColumnList); - taosArrayDestroy(&pInfo->groupColumnList); - tfree(pInfo->prevRow); - tfree(pInfo->currentGroupColData); + if (pInfo->orderColumnList) { + taosArrayDestroy(&pInfo->orderColumnList); + } + + if (pInfo->groupColumnList) { + taosArrayDestroy(&pInfo->groupColumnList); + } + + if (pInfo->prevRow) { + tfree(pInfo->prevRow); + } + + if (pInfo->currentGroupColData) { + tfree(pInfo->currentGroupColData); + } } + static void destroySlimitOperatorInfo(void* param, int32_t numOfOutput) { SSLimitOperatorInfo *pInfo = (SSLimitOperatorInfo*) param; - taosArrayDestroy(&pInfo->orderColumnList); - pInfo->pRes = destroyOutputBuf(pInfo->pRes); - tfree(pInfo->prevRow); + + if (pInfo->orderColumnList) { + taosArrayDestroy(&pInfo->orderColumnList); + } + + if (pInfo->pRes) { + pInfo->pRes = destroyOutputBuf(pInfo->pRes); + } + + if (pInfo->prevRow) { + tfree(pInfo->prevRow); + } } SOperatorInfo* createGlobalAggregateOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SOperatorInfo* upstream, SExprInfo* pExpr, int32_t numOfOutput, void* param, SArray* pUdfInfo, bool groupResultMixedUp) { SMultiwayMergeInfo* pInfo = calloc(1, sizeof(SMultiwayMergeInfo)); + if (pInfo == NULL) { + return NULL; + } pInfo->resultRowFactor = (int32_t)(getRowNumForMultioutput(pRuntimeEnv->pQueryAttr, pRuntimeEnv->pQueryAttr->topBotQuery, false)); @@ -5430,6 +5590,10 @@ SOperatorInfo* createGlobalAggregateOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, pInfo->orderColumnList = getOrderCheckColumns(pRuntimeEnv->pQueryAttr); pInfo->groupColumnList = getResultGroupCheckColumns(pRuntimeEnv->pQueryAttr); + if (pInfo->binfo.pRes == NULL || pInfo->binfo.pCtx == NULL || pInfo->orderColumnList == NULL || pInfo->groupColumnList == NULL) { + goto _clean; + } + // TODO refactor int32_t len = 0; for(int32_t i = 0; i < numOfOutput; ++i) { @@ -5449,6 +5613,10 @@ SOperatorInfo* createGlobalAggregateOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, numOfCols = (pInfo->groupColumnList != NULL)? (int32_t)taosArrayGetSize(pInfo->groupColumnList):0; pInfo->currentGroupColData = calloc(1, (POINTER_BYTES * numOfCols + len)); + if (pInfo->currentGroupColData == NULL) { + goto _clean; + } + offset = POINTER_BYTES * numOfCols; for(int32_t i = 0; i < numOfCols; ++i) { @@ -5459,11 +5627,18 @@ SOperatorInfo* createGlobalAggregateOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, } initResultRowInfo(&pInfo->binfo.resultRowInfo, 8, TSDB_DATA_TYPE_INT); + if (pInfo->binfo.resultRowInfo.pResult == NULL) { + goto _clean; + } pInfo->seed = rand(); setDefaultOutputBuf(pRuntimeEnv, &pInfo->binfo, pInfo->seed, MERGE_STAGE); SOperatorInfo* pOperator = calloc(1, sizeof(SOperatorInfo)); + if (pOperator == NULL) { + return NULL; + } + pOperator->name = "GlobalAggregate"; pOperator->operatorType = OP_GlobalAggregate; pOperator->blockingOptr = true; @@ -5478,17 +5653,30 @@ SOperatorInfo* createGlobalAggregateOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, appendUpstream(pOperator, upstream); return pOperator; + +_clean: + destroyGlobalAggOperatorInfo((void *) pInfo, numOfOutput); + tfree(pInfo); + + return NULL; } SOperatorInfo *createMultiwaySortOperatorInfo(SQueryRuntimeEnv *pRuntimeEnv, SExprInfo *pExpr, int32_t numOfOutput, int32_t numOfRows, void *merger) { SMultiwayMergeInfo* pInfo = calloc(1, sizeof(SMultiwayMergeInfo)); + if (pInfo == NULL) { + return NULL; + } pInfo->pMerge = merger; pInfo->bufCapacity = numOfRows; pInfo->orderColumnList = getResultGroupCheckColumns(pRuntimeEnv->pQueryAttr); pInfo->binfo.pRes = createOutputBuf(pExpr, numOfOutput, numOfRows); + if (pInfo->orderColumnList == NULL || pInfo->binfo.pRes == NULL) { + goto _clean; + } + { // todo extract method to create prev compare buffer int32_t len = 0; for(int32_t i = 0; i < numOfOutput; ++i) { @@ -5508,6 +5696,10 @@ SOperatorInfo *createMultiwaySortOperatorInfo(SQueryRuntimeEnv *pRuntimeEnv, SEx } SOperatorInfo* pOperator = calloc(1, sizeof(SOperatorInfo)); + if (pOperator == NULL) { + goto _clean; + } + pOperator->name = "MultiwaySortOperator"; pOperator->operatorType = OP_MultiwayMergeSort; pOperator->blockingOptr = false; @@ -5519,6 +5711,12 @@ SOperatorInfo *createMultiwaySortOperatorInfo(SQueryRuntimeEnv *pRuntimeEnv, SEx pOperator->exec = doMultiwayMergeSort; pOperator->cleanup = destroyGlobalAggOperatorInfo; return pOperator; + +_clean: + destroyGlobalAggOperatorInfo((void *)pInfo, numOfOutput); + tfree(pInfo); + + return NULL; } static int32_t doMergeSDatablock(SSDataBlock* pDest, SSDataBlock* pSrc) { @@ -5595,11 +5793,22 @@ static SSDataBlock* doSort(void* param, bool* newgroup) { SOperatorInfo *createOrderOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SOperatorInfo* upstream, SExprInfo* pExpr, int32_t numOfOutput, SOrderVal* pOrderVal) { SOrderOperatorInfo* pInfo = calloc(1, sizeof(SOrderOperatorInfo)); + if (pInfo == NULL) { + return NULL; + } { SSDataBlock* pDataBlock = calloc(1, sizeof(SSDataBlock)); + if (pDataBlock == NULL) { + goto _clean; + } + pDataBlock->pDataBlock = taosArrayInit(numOfOutput, sizeof(SColumnInfoData)); - for(int32_t i = 0; i < numOfOutput; ++i) { + if (pDataBlock->pDataBlock == NULL) { + goto _clean; + } + + for (int32_t i = 0; i < numOfOutput; ++i) { SColumnInfoData col = {{0}}; col.info.colId = pExpr[i].base.colInfo.colId; col.info.bytes = pExpr[i].base.resBytes; @@ -5617,6 +5826,10 @@ SOperatorInfo *createOrderOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SOperatorI } SOperatorInfo* pOperator = calloc(1, sizeof(SOperatorInfo)); + if (pOperator == NULL) { + goto _clean; + } + pOperator->name = "InMemoryOrder"; pOperator->operatorType = OP_Order; pOperator->blockingOptr = true; @@ -5628,6 +5841,12 @@ SOperatorInfo *createOrderOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SOperatorI appendUpstream(pOperator, upstream); return pOperator; + +_clean: + destroyOrderOperatorInfo((void *)pInfo, numOfOutput); + tfree(pInfo); + + return NULL; } static int32_t getTableScanOrder(STableScanInfo* pTableScanInfo) { @@ -5913,8 +6132,7 @@ static SSDataBlock* doLimit(void* param, bool* newgroup) { if (pRuntimeEnv->currentOffset == 0) { break; - } - else if(srows > 0) { + } else if(srows > 0) { if(pRuntimeEnv->currentOffset - srows >= pBlock->info.rows) { pRuntimeEnv->currentOffset -= pBlock->info.rows; } else { @@ -7004,6 +7222,9 @@ static void destroyOperatorInfo(SOperatorInfo* pOperator) { SOperatorInfo* createAggregateOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SOperatorInfo* upstream, SExprInfo* pExpr, int32_t numOfOutput) { SAggOperatorInfo* pInfo = calloc(1, sizeof(SAggOperatorInfo)); + if (pInfo == NULL) { + return NULL; + } SQueryAttr* pQueryAttr = pRuntimeEnv->pQueryAttr; int32_t numOfRows = (int32_t)(getRowNumForMultioutput(pQueryAttr, pQueryAttr->topBotQuery, pQueryAttr->stableQuery)); @@ -7013,10 +7234,18 @@ SOperatorInfo* createAggregateOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SOpera initResultRowInfo(&pInfo->binfo.resultRowInfo, 8, TSDB_DATA_TYPE_INT); + if (pInfo->binfo.pRes == NULL || pInfo->binfo.pCtx == NULL || pInfo->binfo.resultRowInfo.pResult == NULL) { + goto _clean; + } + pInfo->seed = rand(); setDefaultOutputBuf(pRuntimeEnv, &pInfo->binfo, pInfo->seed, MASTER_SCAN); SOperatorInfo* pOperator = calloc(1, sizeof(SOperatorInfo)); + if (pOperator == NULL) { + goto _clean; + } + pOperator->name = "TableAggregate"; pOperator->operatorType = OP_Aggregate; pOperator->blockingOptr = true; @@ -7031,31 +7260,53 @@ SOperatorInfo* createAggregateOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SOpera appendUpstream(pOperator, upstream); return pOperator; + +_clean: + destroyAggOperatorInfo((void *)pInfo, numOfOutput); + tfree(pInfo); + + return NULL; } static void doDestroyBasicInfo(SOptrBasicInfo* pInfo, int32_t numOfOutput) { assert(pInfo != NULL); - destroySQLFunctionCtx(pInfo->pCtx, numOfOutput); - tfree(pInfo->rowCellInfoOffset); + if (pInfo->pCtx) { + destroySQLFunctionCtx(pInfo->pCtx, numOfOutput); + } + + if (pInfo->rowCellInfoOffset) { + tfree(pInfo->rowCellInfoOffset); + } + + if (pInfo->resultRowInfo.pResult) { + cleanupResultRowInfo(&pInfo->resultRowInfo); + } - cleanupResultRowInfo(&pInfo->resultRowInfo); - pInfo->pRes = destroyOutputBuf(pInfo->pRes); + if (pInfo->pRes) { + pInfo->pRes = destroyOutputBuf(pInfo->pRes); + } } static void destroyBasicOperatorInfo(void* param, int32_t numOfOutput) { SOptrBasicInfo* pInfo = (SOptrBasicInfo*) param; doDestroyBasicInfo(pInfo, numOfOutput); } + static void destroyStateWindowOperatorInfo(void* param, int32_t numOfOutput) { SStateWindowOperatorInfo* pInfo = (SStateWindowOperatorInfo*) param; doDestroyBasicInfo(&pInfo->binfo, numOfOutput); - tfree(pInfo->prevData); + + if (pInfo->prevData) { + tfree(pInfo->prevData); + } } + static void destroyAggOperatorInfo(void* param, int32_t numOfOutput) { SAggOperatorInfo* pInfo = (SAggOperatorInfo*) param; doDestroyBasicInfo(&pInfo->binfo, numOfOutput); } + static void destroySWindowOperatorInfo(void* param, int32_t numOfOutput) { SSWindowOperatorInfo* pInfo = (SSWindowOperatorInfo*) param; doDestroyBasicInfo(&pInfo->binfo, numOfOutput); @@ -7063,15 +7314,27 @@ static void destroySWindowOperatorInfo(void* param, int32_t numOfOutput) { static void destroySFillOperatorInfo(void* param, int32_t numOfOutput) { SFillOperatorInfo* pInfo = (SFillOperatorInfo*) param; - pInfo->pFillInfo = taosDestroyFillInfo(pInfo->pFillInfo); - pInfo->pRes = destroyOutputBuf(pInfo->pRes); - tfree(pInfo->p); + + if (pInfo->pFillInfo) { + pInfo->pFillInfo = taosDestroyFillInfo(pInfo->pFillInfo); + } + + if (pInfo->pRes) { + pInfo->pRes = destroyOutputBuf(pInfo->pRes); + } + + if (pInfo->p) { + tfree(pInfo->p); + } } static void destroyGroupbyOperatorInfo(void* param, int32_t numOfOutput) { SGroupbyOperatorInfo* pInfo = (SGroupbyOperatorInfo*) param; doDestroyBasicInfo(&pInfo->binfo, numOfOutput); - tfree(pInfo->prevData); + + if (pInfo->prevData) { + tfree(pInfo->prevData); + } } static void destroyProjectOperatorInfo(void* param, int32_t numOfOutput) { @@ -7082,18 +7345,27 @@ static void destroyProjectOperatorInfo(void* param, int32_t numOfOutput) { static void destroyTimeEveryOperatorInfo(void* param, int32_t numOfOutput) { STimeEveryOperatorInfo* pInfo = (STimeEveryOperatorInfo*) param; doDestroyBasicInfo(&pInfo->binfo, numOfOutput); - taosHashCleanup(pInfo->rangeStart); + + if (pInfo->rangeStart) { + taosHashCleanup(pInfo->rangeStart); + } } static void destroyTagScanOperatorInfo(void* param, int32_t numOfOutput) { STagScanInfo* pInfo = (STagScanInfo*) param; - pInfo->pRes = destroyOutputBuf(pInfo->pRes); + + if (pInfo->pRes) { + pInfo->pRes = destroyOutputBuf(pInfo->pRes); + } } static void destroyOrderOperatorInfo(void* param, int32_t numOfOutput) { SOrderOperatorInfo* pInfo = (SOrderOperatorInfo*) param; - pInfo->pDataBlock = destroyOutputBuf(pInfo->pDataBlock); + + if (pInfo->pDataBlock) { + pInfo->pDataBlock = destroyOutputBuf(pInfo->pDataBlock); + } } static void destroyConditionOperatorInfo(void* param, int32_t numOfOutput) { @@ -7103,14 +7375,29 @@ static void destroyConditionOperatorInfo(void* param, int32_t numOfOutput) { static void destroyDistinctOperatorInfo(void* param, int32_t numOfOutput) { SDistinctOperatorInfo* pInfo = (SDistinctOperatorInfo*) param; - taosHashCleanup(pInfo->pSet); - tfree(pInfo->buf); - taosArrayDestroy(&pInfo->pDistinctDataInfo); - pInfo->pRes = destroyOutputBuf(pInfo->pRes); + + if (pInfo->pSet) { + taosHashCleanup(pInfo->pSet); + } + + if (pInfo->buf) { + tfree(pInfo->buf); + } + + if (pInfo->pDistinctDataInfo) { + taosArrayDestroy(&pInfo->pDistinctDataInfo); + } + + if (pInfo->pRes) { + pInfo->pRes = destroyOutputBuf(pInfo->pRes); + } } SOperatorInfo* createMultiTableAggOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SOperatorInfo* upstream, SExprInfo* pExpr, int32_t numOfOutput) { SAggOperatorInfo* pInfo = calloc(1, sizeof(SAggOperatorInfo)); + if (pInfo == NULL) { + return NULL; + } size_t tableGroup = GET_NUM_OF_TABLEGROUP(pRuntimeEnv); @@ -7118,7 +7405,15 @@ SOperatorInfo* createMultiTableAggOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SO pInfo->binfo.pCtx = createSQLFunctionCtx(pRuntimeEnv, pExpr, numOfOutput, &pInfo->binfo.rowCellInfoOffset); initResultRowInfo(&pInfo->binfo.resultRowInfo, (int32_t)tableGroup, TSDB_DATA_TYPE_INT); + if (pInfo->binfo.pRes == NULL || pInfo->binfo.pCtx == NULL || pInfo->binfo.resultRowInfo.pResult == NULL) { + goto _clean; + } + SOperatorInfo* pOperator = calloc(1, sizeof(SOperatorInfo)); + if (pOperator == NULL) { + goto _clean; + } + pOperator->name = "MultiTableAggregate"; pOperator->operatorType = OP_MultiTableAggregate; pOperator->blockingOptr = true; @@ -7133,10 +7428,19 @@ SOperatorInfo* createMultiTableAggOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SO appendUpstream(pOperator, upstream); return pOperator; + +_clean: + destroyAggOperatorInfo((void *)pInfo, numOfOutput); + tfree(pInfo); + + return NULL; } SOperatorInfo* createProjectOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SOperatorInfo* upstream, SExprInfo* pExpr, int32_t numOfOutput) { SProjectOperatorInfo* pInfo = calloc(1, sizeof(SProjectOperatorInfo)); + if (pInfo == NULL) { + return NULL; + } pInfo->seed = rand(); pInfo->bufCapacity = pRuntimeEnv->resultInfo.capacity; @@ -7146,9 +7450,18 @@ SOperatorInfo* createProjectOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SOperato pBInfo->pCtx = createSQLFunctionCtx(pRuntimeEnv, pExpr, numOfOutput, &pBInfo->rowCellInfoOffset); initResultRowInfo(&pBInfo->resultRowInfo, 8, TSDB_DATA_TYPE_INT); + + if (pInfo->binfo.pRes == NULL || pInfo->binfo.pCtx == NULL || pInfo->binfo.resultRowInfo.pResult == NULL) { + goto _clean; + } + setDefaultOutputBuf(pRuntimeEnv, pBInfo, pInfo->seed, MASTER_SCAN); SOperatorInfo* pOperator = calloc(1, sizeof(SOperatorInfo)); + if (pOperator == NULL) { + goto _clean; + } + pOperator->name = "ProjectOperator"; pOperator->operatorType = OP_Project; pOperator->blockingOptr = false; @@ -7163,6 +7476,12 @@ SOperatorInfo* createProjectOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SOperato appendUpstream(pOperator, upstream); return pOperator; + +_clean: + destroyProjectOperatorInfo((void *)pInfo, numOfOutput); + tfree(pInfo); + + return NULL; } SColumnInfo* extractColumnFilterInfo(SExprInfo* pExpr, int32_t numOfOutput, int32_t* numOfFilterCols) { @@ -7197,12 +7516,18 @@ SColumnInfo* extractColumnFilterInfo(SExprInfo* pExpr, int32_t numOfOutput, int3 SOperatorInfo* createFilterOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SOperatorInfo* upstream, SExprInfo* pExpr, int32_t numOfOutput, SColumnInfo* pCols, int32_t numOfFilter) { SFilterOperatorInfo* pInfo = calloc(1, sizeof(SFilterOperatorInfo)); + if (pInfo == NULL) { + return NULL; + } assert(numOfFilter > 0 && pCols != NULL); doCreateFilterInfo(pCols, numOfOutput, numOfFilter, &pInfo->pFilterInfo, 0); pInfo->numOfFilterCols = numOfFilter; SOperatorInfo* pOperator = calloc(1, sizeof(SOperatorInfo)); + if (pOperator == NULL) { + goto _clean; + } pOperator->name = "FilterOperator"; pOperator->operatorType = OP_Filter; @@ -7217,13 +7542,27 @@ SOperatorInfo* createFilterOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SOperator appendUpstream(pOperator, upstream); return pOperator; + +_clean: + destroyConditionOperatorInfo((void *)pInfo, numOfOutput); + tfree(pInfo); + + return NULL; } SOperatorInfo* createLimitOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SOperatorInfo* upstream) { SLimitOperatorInfo* pInfo = calloc(1, sizeof(SLimitOperatorInfo)); + if (pInfo == NULL) { + return NULL; + } + pInfo->limit = pRuntimeEnv->pQueryAttr->limit.limit; SOperatorInfo* pOperator = calloc(1, sizeof(SOperatorInfo)); + if (pOperator == NULL) { + tfree(pInfo); + return NULL; + } pOperator->name = "LimitOperator"; pOperator->operatorType = OP_Limit; @@ -7239,12 +7578,22 @@ SOperatorInfo* createLimitOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SOperatorI SOperatorInfo* createTimeIntervalOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SOperatorInfo* upstream, SExprInfo* pExpr, int32_t numOfOutput) { STableIntervalOperatorInfo* pInfo = calloc(1, sizeof(STableIntervalOperatorInfo)); + if (pInfo == NULL) { + return NULL; + } pInfo->pCtx = createSQLFunctionCtx(pRuntimeEnv, pExpr, numOfOutput, &pInfo->rowCellInfoOffset); pInfo->pRes = createOutputBuf(pExpr, numOfOutput, pRuntimeEnv->resultInfo.capacity); initResultRowInfo(&pInfo->resultRowInfo, 8, TSDB_DATA_TYPE_INT); + if (pInfo->pRes == NULL || pInfo->pCtx == NULL || pInfo->resultRowInfo.pResult == NULL) { + goto _clean; + } + SOperatorInfo* pOperator = calloc(1, sizeof(SOperatorInfo)); + if (pOperator == NULL) { + goto _clean; + } pOperator->name = "TimeIntervalAggOperator"; pOperator->operatorType = OP_TimeWindow; @@ -7259,12 +7608,22 @@ SOperatorInfo* createTimeIntervalOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SOp appendUpstream(pOperator, upstream); return pOperator; + +_clean: + destroyBasicOperatorInfo((void *)pInfo, numOfOutput); + tfree(pInfo); + + return NULL; } SOperatorInfo* createTimeEveryOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SOperatorInfo* upstream, SExprInfo* pExpr, int32_t numOfOutput) { STimeEveryOperatorInfo* pInfo = calloc(1, sizeof(STimeEveryOperatorInfo)); - SQueryAttr* pQueryAttr = pRuntimeEnv->pQueryAttr; + if (pInfo == NULL) { + return NULL; + } + + SQueryAttr *pQueryAttr = pRuntimeEnv->pQueryAttr; pInfo->seed = rand(); pInfo->bufCapacity = pRuntimeEnv->resultInfo.capacity; @@ -7280,9 +7639,20 @@ SOperatorInfo* createTimeEveryOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SOpera } initResultRowInfo(&pBInfo->resultRowInfo, 8, TSDB_DATA_TYPE_INT); + + if (pBInfo->pRes == NULL || pBInfo->pCtx == NULL || pBInfo->resultRowInfo.pResult == NULL || + (pQueryAttr->needReverseScan && pInfo->rangeStart == NULL)) + { + goto _clean; + } + setDefaultOutputBuf(pRuntimeEnv, pBInfo, pInfo->seed, MASTER_SCAN); SOperatorInfo* pOperator = calloc(1, sizeof(SOperatorInfo)); + if (pOperator == NULL) { + goto _clean; + } + pOperator->name = "TimeEveryOperator"; pOperator->operatorType = OP_TimeEvery; pOperator->blockingOptr = false; @@ -7297,18 +7667,36 @@ SOperatorInfo* createTimeEveryOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SOpera appendUpstream(pOperator, upstream); return pOperator; + +_clean: + destroyTimeEveryOperatorInfo((void *)pInfo, numOfOutput); + tfree(pInfo); + + return NULL; } SOperatorInfo* createStatewindowOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SOperatorInfo* upstream, SExprInfo* pExpr, int32_t numOfOutput) { SStateWindowOperatorInfo* pInfo = calloc(1, sizeof(SStateWindowOperatorInfo)); + if (pInfo == NULL) { + return NULL; + } + pInfo->colIndex = -1; pInfo->reptScan = false; pInfo->binfo.pCtx = createSQLFunctionCtx(pRuntimeEnv, pExpr, numOfOutput, &pInfo->binfo.rowCellInfoOffset); pInfo->binfo.pRes = createOutputBuf(pExpr, numOfOutput, pRuntimeEnv->resultInfo.capacity); initResultRowInfo(&pInfo->binfo.resultRowInfo, 8, TSDB_DATA_TYPE_INT); + if (pInfo->binfo.pCtx == NULL || pInfo->binfo.pRes == NULL || pInfo->binfo.resultRowInfo.pResult == NULL) { + goto _clean; + } + SOperatorInfo* pOperator = calloc(1, sizeof(SOperatorInfo)); + if (pOperator == NULL) { + goto _clean; + } + pOperator->name = "StateWindowOperator"; pOperator->operatorType = OP_StateWindow; pOperator->blockingOptr = true; @@ -7322,17 +7710,34 @@ SOperatorInfo* createStatewindowOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SOpe appendUpstream(pOperator, upstream); return pOperator; + +_clean: + destroyStateWindowOperatorInfo((void *)pInfo, numOfOutput); + tfree(pInfo); + + return NULL; } + SOperatorInfo* createSWindowOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SOperatorInfo* upstream, SExprInfo* pExpr, int32_t numOfOutput) { SSWindowOperatorInfo* pInfo = calloc(1, sizeof(SSWindowOperatorInfo)); + if (pInfo == NULL) { + return NULL; + } pInfo->binfo.pCtx = createSQLFunctionCtx(pRuntimeEnv, pExpr, numOfOutput, &pInfo->binfo.rowCellInfoOffset); pInfo->binfo.pRes = createOutputBuf(pExpr, numOfOutput, pRuntimeEnv->resultInfo.capacity); initResultRowInfo(&pInfo->binfo.resultRowInfo, 8, TSDB_DATA_TYPE_INT); + if (pInfo->binfo.pCtx == NULL || pInfo->binfo.pRes == NULL || pInfo->binfo.resultRowInfo.pResult == NULL) { + goto _clean; + } + pInfo->prevTs = INT64_MIN; pInfo->reptScan = false; SOperatorInfo* pOperator = calloc(1, sizeof(SOperatorInfo)); + if (pOperator == NULL) { + goto _clean; + } pOperator->name = "SessionWindowAggOperator"; pOperator->operatorType = OP_SessionWindow; @@ -7347,16 +7752,33 @@ SOperatorInfo* createSWindowOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SOperato appendUpstream(pOperator, upstream); return pOperator; + +_clean: + destroyStateWindowOperatorInfo((void *)pInfo, numOfOutput); + tfree(pInfo); + + return NULL; } SOperatorInfo* createMultiTableTimeIntervalOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SOperatorInfo* upstream, SExprInfo* pExpr, int32_t numOfOutput) { STableIntervalOperatorInfo* pInfo = calloc(1, sizeof(STableIntervalOperatorInfo)); + if (pInfo == NULL) { + return NULL; + } pInfo->pCtx = createSQLFunctionCtx(pRuntimeEnv, pExpr, numOfOutput, &pInfo->rowCellInfoOffset); pInfo->pRes = createOutputBuf(pExpr, numOfOutput, pRuntimeEnv->resultInfo.capacity); initResultRowInfo(&pInfo->resultRowInfo, 8, TSDB_DATA_TYPE_INT); + if (pInfo->pCtx == NULL || pInfo->pRes == NULL || pInfo->resultRowInfo.pResult == NULL) { + goto _clean; + } + SOperatorInfo* pOperator = calloc(1, sizeof(SOperatorInfo)); + if (pOperator == NULL) { + goto _clean; + } + pOperator->name = "MultiTableTimeIntervalOperator"; pOperator->operatorType = OP_MultiTableTimeInterval; pOperator->blockingOptr = true; @@ -7371,14 +7793,22 @@ SOperatorInfo* createMultiTableTimeIntervalOperatorInfo(SQueryRuntimeEnv* pRunti appendUpstream(pOperator, upstream); return pOperator; + +_clean: + destroyBasicOperatorInfo((void *)pInfo, numOfOutput); + tfree(pInfo); + + return NULL; } SOperatorInfo* createGroupbyOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SOperatorInfo* upstream, SExprInfo* pExpr, int32_t numOfOutput) { SGroupbyOperatorInfo* pInfo = calloc(1, sizeof(SGroupbyOperatorInfo)); - pInfo->colIndex = -1; // group by column index - + if (pInfo == NULL) { + return NULL; + } + pInfo->colIndex = -1; // group by column index pInfo->binfo.pCtx = createSQLFunctionCtx(pRuntimeEnv, pExpr, numOfOutput, &pInfo->binfo.rowCellInfoOffset); SQueryAttr *pQueryAttr = pRuntimeEnv->pQueryAttr; @@ -7389,7 +7819,15 @@ SOperatorInfo* createGroupbyOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SOperato pInfo->binfo.pRes = createOutputBuf(pExpr, numOfOutput, pRuntimeEnv->resultInfo.capacity); initResultRowInfo(&pInfo->binfo.resultRowInfo, 8, TSDB_DATA_TYPE_INT); + if (pInfo->binfo.pCtx == NULL || pInfo->binfo.pRes == NULL || pInfo->binfo.resultRowInfo.pResult == NULL) { + goto _clean; + } + SOperatorInfo* pOperator = calloc(1, sizeof(SOperatorInfo)); + if (pOperator == NULL) { + goto _clean; + } + pOperator->name = "GroupbyAggOperator"; pOperator->blockingOptr = true; pOperator->status = OP_IN_EXECUTING; @@ -7403,16 +7841,34 @@ SOperatorInfo* createGroupbyOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SOperato appendUpstream(pOperator, upstream); return pOperator; + +_clean: + destroyGroupbyOperatorInfo((void *)pInfo, numOfOutput); + tfree(pInfo); + + return NULL; } SOperatorInfo* createFillOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SOperatorInfo* upstream, SExprInfo* pExpr, int32_t numOfOutput, bool multigroupResult) { SFillOperatorInfo* pInfo = calloc(1, sizeof(SFillOperatorInfo)); + if (pInfo == NULL) { + return NULL; + } + pInfo->pRes = createOutputBuf(pExpr, numOfOutput, pRuntimeEnv->resultInfo.capacity); + if (pInfo->pRes == NULL) { + goto _clean; + } + pInfo->multigroupResult = multigroupResult; { SQueryAttr* pQueryAttr = pRuntimeEnv->pQueryAttr; SFillColInfo* pColInfo = createFillColInfo(pExpr, numOfOutput, pQueryAttr->fillVal); + if (pColInfo == NULL) { + goto _clean; + } + STimeWindow w = TSWINDOW_INITIALIZER; TSKEY sk = MIN(pQueryAttr->window.skey, pQueryAttr->window.ekey); @@ -7423,11 +7879,20 @@ SOperatorInfo* createFillOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SOperatorIn taosCreateFillInfo(pQueryAttr->order.order, w.skey, 0, (int32_t)pRuntimeEnv->resultInfo.capacity, numOfOutput, pQueryAttr->interval.sliding, pQueryAttr->interval.slidingUnit, (int8_t)pQueryAttr->precision, pQueryAttr->fillType, pColInfo, pRuntimeEnv->qinfo); + if (pInfo->pFillInfo == NULL) { + goto _clean; + } pInfo->p = calloc(pInfo->pFillInfo->numOfCols, POINTER_BYTES); + if (pInfo->p == NULL) { + goto _clean; + } } SOperatorInfo* pOperator = calloc(1, sizeof(SOperatorInfo)); + if (pOperator == NULL) { + goto _clean; + } pOperator->name = "FillOperator"; pOperator->blockingOptr = false; @@ -7442,14 +7907,27 @@ SOperatorInfo* createFillOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SOperatorIn appendUpstream(pOperator, upstream); return pOperator; + +_clean: + destroySFillOperatorInfo((void *)pInfo, numOfOutput); + tfree(pInfo); + + return NULL; } SOperatorInfo* createSLimitOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SOperatorInfo* upstream, SExprInfo* pExpr, int32_t numOfOutput, void* pMerger, bool multigroupResult) { SSLimitOperatorInfo* pInfo = calloc(1, sizeof(SSLimitOperatorInfo)); + if (pInfo == NULL) { + return NULL; + } SQueryAttr* pQueryAttr = pRuntimeEnv->pQueryAttr; pInfo->orderColumnList = getResultGroupCheckColumns(pQueryAttr); + if (pInfo->orderColumnList == NULL) { + goto _clean; + } + pInfo->slimit = pQueryAttr->slimit; pInfo->limit = pQueryAttr->limit; pInfo->capacity = pRuntimeEnv->resultInfo.capacity; @@ -7466,6 +7944,9 @@ SOperatorInfo* createSLimitOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SOperator int32_t numOfCols = (pInfo->orderColumnList != NULL)? (int32_t) taosArrayGetSize(pInfo->orderColumnList):0; pInfo->prevRow = calloc(1, (POINTER_BYTES * numOfCols + len)); + if (pInfo->prevRow == NULL) { + goto _clean; + } int32_t offset = POINTER_BYTES * numOfCols; for(int32_t i = 0; i < numOfCols; ++i) { @@ -7479,6 +7960,10 @@ SOperatorInfo* createSLimitOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SOperator SOperatorInfo* pOperator = calloc(1, sizeof(SOperatorInfo)); + if (pInfo->pRes == NULL || pOperator == NULL) { + goto _clean; + } + pOperator->name = "SLimitOperator"; pOperator->operatorType = OP_SLimit; pOperator->blockingOptr = false; @@ -7490,6 +7975,12 @@ SOperatorInfo* createSLimitOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SOperator appendUpstream(pOperator, upstream); return pOperator; + +_clean: + destroySlimitOperatorInfo((void *)pInfo, numOfOutput); + tfree(pInfo); + + return NULL; } static SSDataBlock* doTagScan(void* param, bool* newgroup) { @@ -7640,7 +8131,14 @@ static SSDataBlock* doTagScan(void* param, bool* newgroup) { SOperatorInfo* createTagScanOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SExprInfo* pExpr, int32_t numOfOutput) { STagScanInfo* pInfo = calloc(1, sizeof(STagScanInfo)); + if (pInfo == NULL) { + return NULL; + } + pInfo->pRes = createOutputBuf(pExpr, numOfOutput, pRuntimeEnv->resultInfo.capacity); + if (pInfo->pRes == NULL) { + goto _clean; + } size_t numOfGroup = GET_NUM_OF_TABLEGROUP(pRuntimeEnv); assert(numOfGroup == 0 || numOfGroup == 1); @@ -7649,6 +8147,10 @@ SOperatorInfo* createTagScanOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SExprInf pInfo->curPos = 0; SOperatorInfo* pOperator = calloc(1, sizeof(SOperatorInfo)); + if (pOperator == NULL) { + goto _clean; + } + pOperator->name = "SeqTableTagScan"; pOperator->operatorType = OP_TagScan; pOperator->blockingOptr = false; @@ -7661,7 +8163,14 @@ SOperatorInfo* createTagScanOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SExprInf pOperator->cleanup = destroyTagScanOperatorInfo; return pOperator; + +_clean: + destroyTagScanOperatorInfo((void *)pInfo, numOfOutput); + tfree(pInfo); + + return NULL; } + static bool initMultiDistinctInfo(SDistinctOperatorInfo *pInfo, SOperatorInfo* pOperator, SSDataBlock *pBlock) { if (taosArrayGetSize(pInfo->pDistinctDataInfo) == pOperator->numOfOutput) { // distinct info already inited @@ -7778,6 +8287,10 @@ static SSDataBlock* hashDistinct(void* param, bool* newgroup) { SOperatorInfo* createDistinctOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SOperatorInfo* upstream, SExprInfo* pExpr, int32_t numOfOutput) { SDistinctOperatorInfo* pInfo = calloc(1, sizeof(SDistinctOperatorInfo)); + if (pInfo == NULL) { + return NULL; + } + pInfo->totalBytes = 0; pInfo->buf = NULL; pInfo->threshold = tsMaxNumOfDistinctResults; // distinct result threshold @@ -7786,8 +8299,15 @@ SOperatorInfo* createDistinctOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SOperat pInfo->pSet = taosHashInit(64, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), false, HASH_NO_LOCK); pInfo->pRes = createOutputBuf(pExpr, numOfOutput, (int32_t) pInfo->outputCapacity); + if (pInfo->pDistinctDataInfo == NULL || pInfo->pSet == NULL || pInfo->pRes == NULL) { + goto _clean; + } SOperatorInfo* pOperator = calloc(1, sizeof(SOperatorInfo)); + if (pOperator == NULL) { + goto _clean; + } + pOperator->name = "DistinctOperator"; pOperator->blockingOptr = false; pOperator->status = OP_IN_EXECUTING; @@ -7802,6 +8322,12 @@ SOperatorInfo* createDistinctOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SOperat appendUpstream(pOperator, upstream); return pOperator; + +_clean: + destroyDistinctOperatorInfo((void *)pInfo, numOfOutput); + tfree(pInfo); + + return NULL; } static int32_t getColumnIndexInSource(SQueriedTableInfo *pTableInfo, SSqlExpr *pExpr, SColumnInfo* pTagCols) { diff --git a/src/query/src/qFill.c b/src/query/src/qFill.c index 9694dac7db152d1b5851629fe349655002c474e8..dbe385e249e19f77786538f344ef6f6485166fda 100644 --- a/src/query/src/qFill.c +++ b/src/query/src/qFill.c @@ -354,6 +354,10 @@ SFillInfo* taosCreateFillInfo(int32_t order, TSKEY skey, int32_t numOfTags, int3 } SFillInfo* pFillInfo = calloc(1, sizeof(SFillInfo)); + if (pFillInfo == NULL) { + return NULL; + } + taosResetFillInfo(pFillInfo, skey); pFillInfo->order = order; @@ -371,6 +375,10 @@ SFillInfo* taosCreateFillInfo(int32_t order, TSKEY skey, int32_t numOfTags, int3 pFillInfo->interval.slidingUnit = slidingUnit; pFillInfo->pData = malloc(POINTER_BYTES * numOfCols); + if (pFillInfo->pData == NULL) { + tfree(pFillInfo); + return NULL; + } // if (numOfTags > 0) { pFillInfo->pTags = calloc(numOfCols, sizeof(SFillTagColInfo)); diff --git a/src/query/src/qFilter.c b/src/query/src/qFilter.c index 9afd9609ee7b8817a390b0e12d705e5d678593aa..5af5dea75326c6c6d041fd9e7ab1a9df44fdae37 100644 --- a/src/query/src/qFilter.c +++ b/src/query/src/qFilter.c @@ -927,7 +927,9 @@ int32_t filterAddUnit(SFilterInfo *info, uint8_t optr, SFilterFieldId *left, SFi SFilterField *val = FILTER_UNIT_RIGHT_FIELD(info, u); assert(FILTER_GET_FLAG(val->flag, FLD_TYPE_VALUE)); } else { - assert(optr == TSDB_RELATION_ISNULL || optr == TSDB_RELATION_NOTNULL || optr == FILTER_DUMMY_EMPTY_OPTR); + if(optr != TSDB_RELATION_ISNULL && optr != TSDB_RELATION_NOTNULL && optr != FILTER_DUMMY_EMPTY_OPTR) { + return -1; + } } SFilterField *col = FILTER_UNIT_LEFT_FIELD(info, u); @@ -1257,7 +1259,8 @@ int32_t filterAddGroupUnitFromNode(SFilterInfo *info, tExprNode* tree, SArray *g } else { filterAddFieldFromNode(info, tree->_node.pRight, &right); - filterAddUnit(info, tree->_node.optr, &left, &right, &uidx); + ret = filterAddUnit(info, tree->_node.optr, &left, &right, &uidx); + CHK_LRET(ret != TSDB_CODE_SUCCESS, TSDB_CODE_QRY_APP_ERROR, "invalid where condition"); SFilterGroup fgroup = {0}; filterAddUnitToGroup(&fgroup, uidx); @@ -1282,7 +1285,7 @@ int32_t filterAddUnitFromUnit(SFilterInfo *dst, SFilterInfo *src, SFilterUnit* u void *data = FILTER_UNIT_VAL_DATA(src, u); if (IS_VAR_DATA_TYPE(type)) { if (FILTER_UNIT_OPTR(u) == TSDB_RELATION_IN) { - filterAddField(dst, NULL, &data, FLD_TYPE_VALUE, &right, 0, false); + filterAddField(dst, NULL, &data, FLD_TYPE_VALUE, &right, sizeof(SHashObj), false); t = FILTER_GET_FIELD(dst, right); @@ -1574,7 +1577,9 @@ void filterDumpInfoToString(SFilterInfo *info, const char *msg, int32_t options) SFilterField *left = FILTER_UNIT_LEFT_FIELD(info, unit); SSchema *sch = left->desc; - len = sprintf(str, "UNIT[%d] => [%d][%s] %s [", i, sch->colId, sch->name, gOptrStr[unit->compare.optr].str); + if (unit->compare.optr >= TSDB_RELATION_INVALID && unit->compare.optr <= TSDB_RELATION_CONTAINS){ + len = sprintf(str, "UNIT[%d] => [%d][%s] %s [", i, sch->colId, sch->name, gOptrStr[unit->compare.optr].str); + } if (unit->right.type == FLD_TYPE_VALUE && FILTER_UNIT_OPTR(unit) != TSDB_RELATION_IN) { SFilterField *right = FILTER_UNIT_RIGHT_FIELD(info, unit); @@ -1591,7 +1596,9 @@ void filterDumpInfoToString(SFilterInfo *info, const char *msg, int32_t options) if (unit->compare.optr2) { strcat(str, " && "); - sprintf(str + strlen(str), "[%d][%s] %s [", sch->colId, sch->name, gOptrStr[unit->compare.optr2].str); + if (unit->compare.optr2 >= TSDB_RELATION_INVALID && unit->compare.optr2 <= TSDB_RELATION_CONTAINS){ + sprintf(str + strlen(str), "[%d][%s] %s [", sch->colId, sch->name, gOptrStr[unit->compare.optr2].str); + } if (unit->right2.type == FLD_TYPE_VALUE && FILTER_UNIT_OPTR(unit) != TSDB_RELATION_IN) { SFilterField *right = FILTER_UNIT_RIGHT2_FIELD(info, unit); @@ -3588,7 +3595,7 @@ int32_t filterConverNcharColumns(SFilterInfo* info, int32_t rows, bool *gotNchar if (FILTER_EMPTY_RES(info) || FILTER_ALL_RES(info)) { return TSDB_CODE_SUCCESS; } - + for (uint32_t i = 0; i < info->fields[FLD_TYPE_COLUMN].num; ++i) { SFilterField* fi = &info->fields[FLD_TYPE_COLUMN].fields[i]; int32_t type = FILTER_GET_COL_FIELD_TYPE(fi); @@ -3602,6 +3609,15 @@ int32_t filterConverNcharColumns(SFilterInfo* info, int32_t rows, bool *gotNchar char *src = FILTER_GET_COL_FIELD_DATA(fi, j); char *dst = FILTER_GET_COL_FIELD_DATA(&nfi, j); int32_t len = 0; + char *varSrc = varDataVal(src); + size_t k = 0, varSrcLen = varDataLen(src); + while (k < varSrcLen && varSrc[k++] == -1) {} + if (k == varSrcLen) { + /* NULL */ + varDataLen(dst) = (VarDataLenT) varSrcLen; + varDataCopy(dst, src); + continue; + } bool ret = taosMbsToUcs4(varDataVal(src), varDataLen(src), varDataVal(dst), bufSize, &len); if(!ret) { qError("filterConverNcharColumns taosMbsToUcs4 error"); diff --git a/src/query/src/qSqlParser.c b/src/query/src/qSqlParser.c index f927287015bf56f09c99d992b18fd2d226cb15f5..707be41c5842efe1810eb8a478e9e25ad402a8ba 100644 --- a/src/query/src/qSqlParser.c +++ b/src/query/src/qSqlParser.c @@ -18,6 +18,7 @@ #include "taosdef.h" #include "taosmsg.h" #include "tcmdtype.h" +#include "tcompare.h" #include "tstrbuild.h" #include "ttoken.h" #include "ttokendef.h" @@ -318,12 +319,17 @@ tSqlExpr *tSqlExprCreate(tSqlExpr *pLeft, tSqlExpr *pRight, int32_t optrType) { } if ((pLeft != NULL && pRight != NULL) && - (optrType == TK_PLUS || optrType == TK_MINUS || optrType == TK_STAR || optrType == TK_DIVIDE || optrType == TK_REM)) { + (optrType == TK_PLUS || optrType == TK_MINUS || optrType == TK_STAR || optrType == TK_DIVIDE || optrType == TK_REM || + optrType == TK_EQ || optrType == TK_NE || optrType == TK_LT || optrType == TK_GT || optrType == TK_LE || optrType == TK_GE || + optrType == TK_AND || optrType == TK_OR)) { /* * if a exprToken is noted as the TK_TIMESTAMP, the time precision is microsecond * Otherwise, the time precision is adaptive, determined by the time precision from databases. */ if ((pLeft->tokenId == TK_INTEGER && pRight->tokenId == TK_INTEGER) || + (pLeft->tokenId == TK_BOOL && pRight->tokenId == TK_BOOL) || + (pLeft->tokenId == TK_INTEGER && pRight->tokenId == TK_BOOL) || + (pLeft->tokenId == TK_BOOL && pRight->tokenId == TK_INTEGER) || (pLeft->tokenId == TK_TIMESTAMP && pRight->tokenId == TK_TIMESTAMP)) { pExpr->value.nType = TSDB_DATA_TYPE_BIGINT; pExpr->tokenId = pLeft->tokenId; @@ -360,12 +366,46 @@ tSqlExpr *tSqlExprCreate(tSqlExpr *pLeft, tSqlExpr *pRight, int32_t optrType) { pExpr->value.i64 = pLeft->value.i64 % pRight->value.i64; break; } + case TK_EQ: { + pExpr->value.i64 = (pLeft->value.i64 == pRight->value.i64) ? 1 : 0; + break; + } + case TK_NE: { + pExpr->value.i64 = (pLeft->value.i64 != pRight->value.i64) ? 1 : 0; + break; + } + case TK_LT: { + pExpr->value.i64 = (pLeft->value.i64 < pRight->value.i64) ? 1 : 0; + break; + } + case TK_GT: { + pExpr->value.i64 = (pLeft->value.i64 > pRight->value.i64) ? 1 : 0; + break; + } + case TK_LE: { + pExpr->value.i64 = (pLeft->value.i64 <= pRight->value.i64) ? 1 : 0; + break; + } + case TK_GE: { + pExpr->value.i64 = (pLeft->value.i64 >= pRight->value.i64) ? 1 : 0; + break; + } + case TK_AND: { + pExpr->value.i64 = (pLeft->value.i64 && pRight->value.i64) ? 1 : 0; + break; + } + case TK_OR: { + pExpr->value.i64 = (pLeft->value.i64 || pRight->value.i64) ? 1 : 0; + break; + } } tSqlExprDestroy(pLeft); tSqlExprDestroy(pRight); } else if ((pLeft->tokenId == TK_FLOAT && pRight->tokenId == TK_INTEGER) || (pLeft->tokenId == TK_INTEGER && pRight->tokenId == TK_FLOAT) || + (pLeft->tokenId == TK_BOOL && pRight->tokenId == TK_FLOAT) || + (pLeft->tokenId == TK_FLOAT && pRight->tokenId == TK_BOOL) || (pLeft->tokenId == TK_FLOAT && pRight->tokenId == TK_FLOAT)) { pExpr->value.nType = TSDB_DATA_TYPE_DOUBLE; pExpr->tokenId = TK_FLOAT; @@ -395,6 +435,80 @@ tSqlExpr *tSqlExprCreate(tSqlExpr *pLeft, tSqlExpr *pRight, int32_t optrType) { pExpr->value.dKey = left - ((int64_t)(left / right)) * right; break; } + case TK_EQ: { + pExpr->tokenId = TK_INTEGER; + pExpr->value.nType = TSDB_DATA_TYPE_BIGINT; + if (compareDoubleVal(&left, &right) == 0) { + pExpr->value.i64 = 1; + } else { + pExpr->value.i64 = 0; + } + break; + } + case TK_NE: { + pExpr->tokenId = TK_INTEGER; + pExpr->value.nType = TSDB_DATA_TYPE_BIGINT; + if (compareDoubleVal(&left, &right) != 0) { + pExpr->value.i64 = 1; + } else { + pExpr->value.i64 = 0; + } + break; + } + case TK_LT: { + pExpr->tokenId = TK_INTEGER; + pExpr->value.nType = TSDB_DATA_TYPE_BIGINT; + if (compareDoubleVal(&left, &right) == -1) { + pExpr->value.i64 = 1; + } else { + pExpr->value.i64 = 0; + } + break; + } + case TK_GT: { + pExpr->tokenId = TK_INTEGER; + pExpr->value.nType = TSDB_DATA_TYPE_BIGINT; + if (compareDoubleVal(&left, &right) == 1) { + pExpr->value.i64 = 1; + } else { + pExpr->value.i64 = 0; + } + break; + } + case TK_LE: { + int32_t res = compareDoubleVal(&left, &right); + pExpr->tokenId = TK_INTEGER; + pExpr->value.nType = TSDB_DATA_TYPE_BIGINT; + if (res == 0 || res == -1) { + pExpr->value.i64 = 1; + } else { + pExpr->value.i64 = 0; + } + break; + } + case TK_GE: { + int32_t res = compareDoubleVal(&left, &right); + pExpr->tokenId = TK_INTEGER; + pExpr->value.nType = TSDB_DATA_TYPE_BIGINT; + if (res == 0 || res == 1) { + pExpr->value.i64 = 1; + } else { + pExpr->value.i64 = 0; + } + break; + } + case TK_AND: { + pExpr->tokenId = TK_INTEGER; + pExpr->value.nType = TSDB_DATA_TYPE_BIGINT; + pExpr->value.i64 = (left && right) ? 1 : 0; + break; + } + case TK_OR: { + pExpr->tokenId = TK_INTEGER; + pExpr->value.nType = TSDB_DATA_TYPE_BIGINT; + pExpr->value.i64 = (left || right) ? 1 : 0; + break; + } } tSqlExprDestroy(pLeft); @@ -505,7 +619,7 @@ tSqlExpr *tSqlExprClone(tSqlExpr *pSrc) { tSqlExpr *pExpr = calloc(1, sizeof(tSqlExpr)); memcpy(pExpr, pSrc, sizeof(*pSrc)); - + if (pSrc->pLeft) { pExpr->pLeft = tSqlExprClone(pSrc->pLeft); } @@ -518,7 +632,7 @@ tSqlExpr *tSqlExprClone(tSqlExpr *pSrc) { tVariantAssign(&pExpr->value, &pSrc->value); //we don't clone paramList now because clone is only used for between/and - assert(pSrc->Expr.paramList == NULL); + pExpr->Expr.paramList = NULL; return pExpr; } diff --git a/src/rpc/src/rpcMain.c b/src/rpc/src/rpcMain.c index c2bebaeee6cc21acab197e92b77358ddba42b0ff..ddc5b2de3730ef2e0e76bca9660643a5d9a977aa 100644 --- a/src/rpc/src/rpcMain.c +++ b/src/rpc/src/rpcMain.c @@ -963,9 +963,14 @@ static SRpcConn *rpcProcessMsgHead(SRpcInfo *pRpc, SRecvInfo *pRecv, SRpcReqCont terrno = TSDB_CODE_RPC_INVALID_SESSION_ID; return NULL; } - if (rpcIsReq(pHead->msgType) && htonl(pHead->msgVer) != tsVersion >> 8) { - tDebug("%s sid:%d, invalid client version:%x/%x %s", pRpc->label, sid, htonl(pHead->msgVer), tsVersion, taosMsg[pHead->msgType]); - terrno = TSDB_CODE_RPC_INVALID_VERSION; return NULL; + // compatibility between old version client and new version server, since 2.4.0.0 + if (rpcIsReq(pHead->msgType)){ + if((htonl(pHead->msgVer) >> 16 != tsVersion >> 24) || + ((htonl(pHead->msgVer) >> 16 == tsVersion >> 24) && htonl(pHead->msgVer) < ((2 << 16) | (4 << 8)))){ + tError("%s sid:%d, invalid client version:%x/%x %s", pRpc->label, sid, htonl(pHead->msgVer), tsVersion, taosMsg[pHead->msgType]); + terrno = TSDB_CODE_RPC_INVALID_VERSION; + return NULL; + } } pConn = rpcGetConnObj(pRpc, sid, pRecv); @@ -983,7 +988,8 @@ static SRpcConn *rpcProcessMsgHead(SRpcInfo *pRpc, SRecvInfo *pRecv, SRpcReqCont sid = pConn->sid; if (pConn->chandle == NULL) pConn->chandle = pRecv->chandle; - pConn->peerIp = pRecv->ip; + pConn->peerIp = pRecv->ip; + pConn->peerPort = pRecv->port; if (pHead->port) pConn->peerPort = htons(pHead->port); diff --git a/src/util/src/hash.c b/src/util/src/hash.c index 9b14084d29e2e5c3ca2ed4c54c069e5ca017005e..fa33d436d4a7cea62e235c683f76ce1b9dc4d5b8 100644 --- a/src/util/src/hash.c +++ b/src/util/src/hash.c @@ -599,6 +599,7 @@ void taosHashClear(SHashObj *pHashObj) { __wr_unlock(&pHashObj->lock, pHashObj->type); } +// the input paras should be SHashObj **, so the origin input will be set by tfree(*pHashObj) void taosHashCleanup(SHashObj *pHashObj) { if (pHashObj == NULL) { return; diff --git a/src/util/src/tcompare.c b/src/util/src/tcompare.c index 266d55fc4f85a2cda27bc73cefa6560629a93962..24263f61b5ba442d016356ef9c7eca99bd4211a8 100644 --- a/src/util/src/tcompare.c +++ b/src/util/src/tcompare.c @@ -266,6 +266,7 @@ int patternMatch(const char *patterStr, const char *str, size_t size, const SPat int32_t j = 0; int32_t o = 0; int32_t m = 0; + char escape = '\\'; // "\" while ((c = patterStr[i++]) != 0) { if (c == pInfo->matchAll) { /* Match "*" */ @@ -308,13 +309,30 @@ int patternMatch(const char *patterStr, const char *str, size_t size, const SPat ++o; if (j <= size) { - if (c == '\\' && patterStr[i] == '_' && c1 == '_') { i++; continue; } - if (c == '\\' && patterStr[i] == '%' && c1 == '%') { i++; continue; } + if (c == escape && patterStr[i] == pInfo->matchOne){ + if(c1 == pInfo->matchOne){ + i++; + continue; + } + else{ + return TSDB_PATTERN_NOMATCH; + } + } + if (c == escape && patterStr[i] == pInfo->matchAll){ + if(c1 == pInfo->matchAll){ + i++; + continue; + } + else{ + return TSDB_PATTERN_NOMATCH; + } + } if (c == c1 || tolower(c) == tolower(c1) || (c == pInfo->matchOne && c1 != 0)) { continue; } } + return TSDB_PATTERN_NOMATCH; } @@ -428,8 +446,24 @@ int WCSPatternMatch(const uint32_t *patterStr, const uint32_t *str, size_t size, c1 = str[j++]; if (j <= size) { - if (c == escape && patterStr[i] == matchOne && c1 == matchOne) { i++; continue; } - if (c == escape && patterStr[i] == matchAll && c1 == matchAll) { i++; continue; } + if (c == escape && patterStr[i] == matchOne){ + if(c1 == matchOne){ + i++; + continue; + } + else{ + return TSDB_PATTERN_NOMATCH; + } + } + if (c == escape && patterStr[i] == matchAll){ + if(c1 == matchAll){ + i++; + continue; + } + else{ + return TSDB_PATTERN_NOMATCH; + } + } if (c == c1 || towlower(c) == towlower(c1) || (c == matchOne && c1 != 0)) { continue; } diff --git a/tests/develop-test/1-insert/uppercase_in_stmt.py b/tests/develop-test/1-insert/uppercase_in_stmt.py new file mode 100644 index 0000000000000000000000000000000000000000..b9372b72cb1f5c30e32dda847bdd2dbe6611fd6b --- /dev/null +++ b/tests/develop-test/1-insert/uppercase_in_stmt.py @@ -0,0 +1,66 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import sys +from util.log import * +from util.cases import * +from util.sql import * +from util.dnodes import * +from taos import * + + +class TDTestCase: + def caseDescription(self): + ''' + case1: [TD-12977] fix invalid upper case table name of stmt api + ''' + return + + def init(self, conn, logSql): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor(), logSql) + self._conn = conn + self._dbname = "TD12977" + + def run(self): + tdSql.prepare() + + self._conn.execute("drop database if exists %s" % self._dbname) + self._conn.execute("create database if not exists %s" % self._dbname) + self._conn.select_db(self._dbname) + + self._conn.execute("create stable STB(ts timestamp, n int) tags(b int)") + + stmt = self._conn.statement("insert into ? using STB tags(?) values(?, ?)") + params = new_bind_params(1) + params[0].int(4); + stmt.set_tbname_tags("ct", params); + + multi_params = new_multi_binds(2); + multi_params[0].timestamp([1626861392589, 1626861392590]) + multi_params[1].int([123,456]) + stmt.bind_param_batch(multi_params) + + stmt.execute() + + tdSql.query("select * from stb") + tdSql.checkRows(2) + stmt.close() + + + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) diff --git a/tests/develop-test/2-query/constant_compare.py b/tests/develop-test/2-query/constant_compare.py new file mode 100644 index 0000000000000000000000000000000000000000..a8ca27e63ce0db8e3bd05009e1d9aab4e7995981 --- /dev/null +++ b/tests/develop-test/2-query/constant_compare.py @@ -0,0 +1,1139 @@ +################################################################### +# Copyright (c) 2021 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import sys +from util.log import * +from util.cases import * +from util.sql import * + + +class TDTestCase: + def caseDescription(self): + ''' + case1: [TD-12945] : taos shell crash when constant comparison cause crash + ''' + return + + def init(self, conn, logSql): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor(), logSql) + + def getBuildPath(self): + selfPath = os.path.dirname(os.path.realpath(__file__)) + + if ("community" in selfPath): + projPath = selfPath[:selfPath.find("community")] + else: + projPath = selfPath[:selfPath.find("tests")] + + for root, dirs, files in os.walk(projPath): + if ("taosd" in files): + rootRealPath = os.path.dirname(os.path.realpath(root)) + if ("packaging" not in rootRealPath): + buildPath = root[:len(root) - len("/build/bin")] + break + return buildPath + + def run(self): + print("running {}".format(__file__)) + tdSql.execute("drop database if exists db") + tdSql.execute("create database if not exists db") + tdSql.execute('use db') + + #Prepare data + tdSql.execute("create table tb (ts timestamp, value int);") + tdSql.execute("insert into tb values (now, 123);") + + ##operator: = + tdSql.query('select 1 = 1 from tb;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 1) + + tdSql.query('select 1 = 0 from tb;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 0) + + tdSql.query('select 1 = 1.0 from tb;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 1) + + tdSql.query('select 1 = 0.0 from tb;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 0) + + tdSql.query('select 1 = 1.0001 from tb;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 0) + + tdSql.query('select 1 = 1.0000000001 from tb;') ##DBL_EPSILON is used in floating number comparison + tdSql.checkRows(1) + tdSql.checkData(0, 0, 1) + + tdSql.query('select 1.0 = 1.0 from tb;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 1) + + tdSql.query('select 1.0 = 0.0 from tb;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 0) + + tdSql.query('select 1.0 = 1.001 from tb;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 0) + + tdSql.query('select 1.0 = 1.0000000001 from tb;') ##DBL_EPSILON is used in floating number comparison + tdSql.checkRows(1) + tdSql.checkData(0, 0, 1) + + tdSql.query('select 1 = true from tb;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 1) + + tdSql.query('select 0 = false from tb;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 1) + + tdSql.query('select 1 = false from tb;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 0) + + tdSql.query('select 0 = true from tb;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 0) + + tdSql.query('select 1.0 = true from tb;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 1) + + tdSql.query('select 0.0 = false from tb;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 1) + + tdSql.query('select 1.0 = false from tb;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 0) + + tdSql.query('select 0.0 = true from tb;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 0) + + tdSql.query('select true = 1.001 from tb;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 0) + + tdSql.query('select true = 1.0000000001 from tb;') ##DBL_EPSILON is used in floating number comparison + tdSql.checkRows(1) + tdSql.checkData(0, 0, 1) + + tdSql.error('select "abc" = "def" from tb;') + tdSql.error('select "abc" = 1 from tb;') + tdSql.error('select 1 = "abc" from tb;') + tdSql.error('select "abc" = 1.0 from tb;') + tdSql.error('select 1.0 = "abc" from tb;') + tdSql.error('select "abc" = true from tb;') + tdSql.error('select false = "abc" from tb;') + tdSql.error('select \'abc\' = \'def\' from tb;') + tdSql.error('select \'abc\' = 1 from tb;') + tdSql.error('select 1 = \'abc\' from tb;') + tdSql.error('select \'abc\' = 1.0 from tb;') + tdSql.error('select 1.0 = \'abc\' from tb;') + tdSql.error('select \'abc\' = true from tb;') + tdSql.error('select false = \'abc\' from tb;') + + + ##operator: != + tdSql.query('select 1 != 1 from tb;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 0) + + tdSql.query('select 1 != 0 from tb;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 1) + + tdSql.query('select 1 != 1.0 from tb;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 0) + + tdSql.query('select 1 != 0.0 from tb;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 1) + + tdSql.query('select 1 != 1.0001 from tb;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 1) + + tdSql.query('select 1 != 1.0000000001 from tb;') ##DBL_EPSILON is used in floating number comparison + tdSql.checkRows(1) + tdSql.checkData(0, 0, 0) + + tdSql.query('select 1.0 != 1.0 from tb;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 0) + + tdSql.query('select 1.0 != 0.0 from tb;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 1) + + tdSql.query('select 1.0 != 1.001 from tb;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 1) + + tdSql.query('select 1.0 != 1.0000000001 from tb;') ##DBL_EPSILON is used in floating number comparison + tdSql.checkRows(1) + tdSql.checkData(0, 0, 0) + + tdSql.query('select 1 != true from tb;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 0) + + tdSql.query('select 0 != false from tb;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 0) + + tdSql.query('select 1 != false from tb;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 1) + + tdSql.query('select 0 != true from tb;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 1) + + tdSql.query('select 1.0 != true from tb;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 0) + + tdSql.query('select 0.0 != false from tb;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 0) + + tdSql.query('select 1.0 != false from tb;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 1) + + tdSql.query('select 0.0 != true from tb;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 1) + + tdSql.query('select true != 1.001 from tb;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 1) + + tdSql.query('select true != 1.0000000001 from tb;') ##DBL_EPSILON is used in floating number comparison + tdSql.checkRows(1) + tdSql.checkData(0, 0, 0) + + tdSql.error('select "abc" != "def" from tb;') + tdSql.error('select "abc" != 1 from tb;') + tdSql.error('select 1 != "abc" from tb;') + tdSql.error('select "abc" != 1.0 from tb;') + tdSql.error('select 1.0 != "abc" from tb;') + tdSql.error('select "abc" != true from tb;') + tdSql.error('select false != "abc" from tb;') + tdSql.error('select \'abc\' != \'def\' from tb;') + tdSql.error('select \'abc\' != 1 from tb;') + tdSql.error('select 1 != \'abc\' from tb;') + tdSql.error('select \'abc\' != 1.0 from tb;') + tdSql.error('select 1.0 != \'abc\' from tb;') + tdSql.error('select \'abc\' != true from tb;') + tdSql.error('select false != \'abc\' from tb;') + + ##operator: <> + tdSql.query('select 1 <> 1 from tb;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 0) + + tdSql.query('select 1 <> 0 from tb;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 1) + + tdSql.query('select 1 <> 1.0 from tb;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 0) + + tdSql.query('select 1 <> 0.0 from tb;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 1) + + tdSql.query('select 1 <> 1.0001 from tb;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 1) + + tdSql.query('select 1 <> 1.0000000001 from tb;') ##DBL_EPSILON is used in floating number comparison + tdSql.checkRows(1) + tdSql.checkData(0, 0, 0) + + tdSql.query('select 1.0 <> 1.0 from tb;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 0) + + tdSql.query('select 1.0 <> 0.0 from tb;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 1) + + tdSql.query('select 1.0 <> 1.001 from tb;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 1) + + tdSql.query('select 1.0 <> 1.0000000001 from tb;') ##DBL_EPSILON is used in floating number comparison + tdSql.checkRows(1) + tdSql.checkData(0, 0, 0) + + tdSql.query('select 1 <> true from tb;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 0) + + tdSql.query('select 0 <> false from tb;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 0) + + tdSql.query('select 1 <> false from tb;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 1) + + tdSql.query('select 0 <> true from tb;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 1) + + tdSql.query('select 1.0 <> true from tb;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 0) + + tdSql.query('select 0.0 <> false from tb;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 0) + + tdSql.query('select 1.0 <> false from tb;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 1) + + tdSql.query('select 0.0 <> true from tb;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 1) + + tdSql.query('select true <> 1.001 from tb;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 1) + + tdSql.query('select true <> 1.0000000001 from tb;') ##DBL_EPSILON is used in floating number comparison + tdSql.checkRows(1) + tdSql.checkData(0, 0, 0) + + tdSql.error('select "abc" <> "def" from tb;') + tdSql.error('select "abc" <> 1 from tb;') + tdSql.error('select 1 <> "abc" from tb;') + tdSql.error('select "abc" <> 1.0 from tb;') + tdSql.error('select 1.0 <> "abc" from tb;') + tdSql.error('select "abc" <> true from tb;') + tdSql.error('select false <> "abc" from tb;') + tdSql.error('select \'abc\' <> \'def\' from tb;') + tdSql.error('select \'abc\' <> 1 from tb;') + tdSql.error('select 1 <> \'abc\' from tb;') + tdSql.error('select \'abc\' <> 1.0 from tb;') + tdSql.error('select 1.0 <> \'abc\' from tb;') + tdSql.error('select \'abc\' <> true from tb;') + tdSql.error('select false <> \'abc\' from tb;') + + ##operator: < + tdSql.query('select 1 < 1 from tb;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 0) + + tdSql.query('select 0 < 1 from tb;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 1) + + tdSql.query('select 1 < 1.0 from tb;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 0) + + tdSql.query('select 0.0 < 1 from tb;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 1) + + tdSql.query('select 1 < 1.0001 from tb;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 1) + + tdSql.query('select 1 < 1.0000000001 from tb;') ##DBL_EPSILON is used in floating number comparison + tdSql.checkRows(1) + tdSql.checkData(0, 0, 0) + + tdSql.query('select 1.0 < 1.0 from tb;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 0) + + tdSql.query('select 0.0 < 1.0 from tb;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 1) + + tdSql.query('select 1.0 < 1.001 from tb;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 1) + + tdSql.query('select 1.0 < 1.0000000001 from tb;') ##DBL_EPSILON is used in floating number comparison + tdSql.checkRows(1) + tdSql.checkData(0, 0, 0) + + tdSql.query('select 0 < true from tb;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 1) + + tdSql.query('select 1 < false from tb;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 0) + + tdSql.query('select false < true from tb;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 1) + + tdSql.query('select true < false from tb;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 0) + + tdSql.query('select 1.0 < true from tb;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 0) + + tdSql.query('select 0.0 < false from tb;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 0) + + tdSql.query('select false < 1.0 from tb;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 1) + + tdSql.query('select 0.0 < true from tb;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 1) + + tdSql.query('select true < 1.001 from tb;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 1) + + tdSql.query('select true < 1.0000000001 from tb;') ##DBL_EPSILON is used in floating number comparison + tdSql.checkRows(1) + tdSql.checkData(0, 0, 0) + + tdSql.error('select "abc" < "def" from tb;') + tdSql.error('select "abc" < 1 from tb;') + tdSql.error('select 1 < "abc" from tb;') + tdSql.error('select "abc" < 1.0 from tb;') + tdSql.error('select 1.0 < "abc" from tb;') + tdSql.error('select "abc" < true from tb;') + tdSql.error('select false < "abc" from tb;') + tdSql.error('select \'abc\' < \'def\' from tb;') + tdSql.error('select \'abc\' < 1 from tb;') + tdSql.error('select 1 < \'abc\' from tb;') + tdSql.error('select \'abc\' < 1.0 from tb;') + tdSql.error('select 1.0 < \'abc\' from tb;') + tdSql.error('select \'abc\' < true from tb;') + tdSql.error('select false < \'abc\' from tb;') + + ##operator: > + tdSql.query('select 1 > 1 from tb;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 0) + + tdSql.query('select 1 > 0 from tb;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 1) + + tdSql.query('select 1 > 1.0 from tb;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 0) + + tdSql.query('select 1 > 0.0 from tb;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 1) + + tdSql.query('select 1.0001 > 1 from tb;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 1) + + tdSql.query('select 1 > 1.0000000001 from tb;') ##DBL_EPSILON is used in floating number comparison + tdSql.checkRows(1) + tdSql.checkData(0, 0, 0) + + tdSql.query('select 1.0 > 1.0 from tb;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 0) + + tdSql.query('select 1.0 > 0.0 from tb;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 1) + + tdSql.query('select 1.001 > 1.0 from tb;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 1) + + tdSql.query('select 1.0000000001 > 1.0 from tb;') ##DBL_EPSILON is used in floating number comparison + tdSql.checkRows(1) + tdSql.checkData(0, 0, 0) + + tdSql.query('select 1 > false from tb;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 1) + + tdSql.query('select 0 > true from tb;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 0) + + tdSql.query('select false > true from tb;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 0) + + tdSql.query('select true > false from tb;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 1) + + tdSql.query('select 1.0 > true from tb;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 0) + + tdSql.query('select 0.0 > false from tb;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 0) + + tdSql.query('select 1.0 > false from tb;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 1) + + tdSql.query('select true > 0.0 from tb;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 1) + + tdSql.query('select 1.001 > true from tb;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 1) + + tdSql.query('select 1.0000000001 > true from tb;') ##DBL_EPSILON is used in floating number comparison + tdSql.checkRows(1) + tdSql.checkData(0, 0, 0) + + tdSql.error('select "abc" > "def" from tb;') + tdSql.error('select "abc" > 1 from tb;') + tdSql.error('select 1 > "abc" from tb;') + tdSql.error('select "abc" > 1.0 from tb;') + tdSql.error('select 1.0 > "abc" from tb;') + tdSql.error('select "abc" > true from tb;') + tdSql.error('select false > "abc" from tb;') + tdSql.error('select \'abc\' > \'def\' from tb;') + tdSql.error('select \'abc\' > 1 from tb;') + tdSql.error('select 1 > \'abc\' from tb;') + tdSql.error('select \'abc\' > 1.0 from tb;') + tdSql.error('select 1.0 > \'abc\' from tb;') + tdSql.error('select \'abc\' > true from tb;') + tdSql.error('select false > \'abc\' from tb;') + + ##operator: <= + tdSql.query('select 1 <= 2 from tb;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 1) + + tdSql.query('select 1 <= 1 from tb;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 1) + + tdSql.query('select 1 <= 0 from tb;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 0) + + tdSql.query('select 1 <= 2.0 from tb;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 1) + + tdSql.query('select 1 <= 1.0 from tb;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 1) + + tdSql.query('select 1 <= 0.0 from tb;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 0) + + tdSql.query('select 1 <= 1.0001 from tb;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 1) + + tdSql.query('select 1 <= 1.0000000001 from tb;') ##DBL_EPSILON is used in floating number comparison + tdSql.checkRows(1) + tdSql.checkData(0, 0, 1) + + tdSql.query('select 1.0 <= 2.0 from tb;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 1) + + tdSql.query('select 1.0 <= 1.0 from tb;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 1) + + tdSql.query('select 1.0 <= 0.0 from tb;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 0) + + tdSql.query('select 1.0 <= 1.001 from tb;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 1) + + tdSql.query('select 1.0 <= 1.0000000001 from tb;') ##DBL_EPSILON is used in floating number comparison + tdSql.checkRows(1) + tdSql.checkData(0, 0, 1) + + tdSql.query('select 0 <= true from tb;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 1) + + tdSql.query('select 1 <= false from tb;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 0) + + tdSql.query('select false <= true from tb;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 1) + + tdSql.query('select true <= false from tb;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 0) + + tdSql.query('select true <= true from tb;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 1) + + tdSql.query('select false <= false from tb;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 1) + + tdSql.query('select 1.0 <= true from tb;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 1) + + tdSql.query('select 0.0 <= false from tb;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 1) + + tdSql.query('select false <= 1.0 from tb;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 1) + + tdSql.query('select 0.0 <= true from tb;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 1) + + tdSql.query('select true <= 1.001 from tb;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 1) + + tdSql.query('select true <= 1.0000000001 from tb;') ##DBL_EPSILON is used in floating number comparison + tdSql.checkRows(1) + tdSql.checkData(0, 0, 1) + + tdSql.error('select "abc" <= "def" from tb;') + tdSql.error('select "abc" <= 1 from tb;') + tdSql.error('select 1 <= "abc" from tb;') + tdSql.error('select "abc" <= 1.0 from tb;') + tdSql.error('select 1.0 <= "abc" from tb;') + tdSql.error('select "abc" <= true from tb;') + tdSql.error('select false <= "abc" from tb;') + tdSql.error('select \'abc\' <= \'def\' from tb;') + tdSql.error('select \'abc\' <= 1 from tb;') + tdSql.error('select 1 <= \'abc\' from tb;') + tdSql.error('select \'abc\' <= 1.0 from tb;') + tdSql.error('select 1.0 <= \'abc\' from tb;') + tdSql.error('select \'abc\' <= true from tb;') + tdSql.error('select false <= \'abc\' from tb;') + + ##operator: >= + tdSql.query('select 1 >= 2 from tb;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 0) + + tdSql.query('select 1 >= 1 from tb;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 1) + + tdSql.query('select 1 >= 0 from tb;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 1) + + tdSql.query('select 1 >= 2.0 from tb;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 0) + + tdSql.query('select 1 >= 1.0 from tb;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 1) + + tdSql.query('select 1 >= 0.0 from tb;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 1) + + tdSql.query('select 1.0001 >= 1 from tb;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 1) + + tdSql.query('select 1 >= 1.0000000001 from tb;') ##DBL_EPSILON is used in floating number comparison + tdSql.checkRows(1) + tdSql.checkData(0, 0, 1) + + tdSql.query('select 1.0 >= 2.0 from tb;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 0) + + tdSql.query('select 1.0 >= 1.0 from tb;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 1) + + tdSql.query('select 1.0 >= 0.0 from tb;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 1) + + tdSql.query('select 1.001 >= 1.0 from tb;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 1) + + tdSql.query('select 1.0000000001 >= 1.0 from tb;') ##DBL_EPSILON is used in floating number comparison + tdSql.checkRows(1) + tdSql.checkData(0, 0, 1) + + tdSql.query('select 1 >= false from tb;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 1) + + tdSql.query('select 0 >= true from tb;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 0) + + tdSql.query('select false >= true from tb;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 0) + + tdSql.query('select true >= false from tb;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 1) + + tdSql.query('select false >= false from tb;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 1) + + tdSql.query('select true >= true from tb;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 1) + + tdSql.query('select 1.0 >= true from tb;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 1) + + tdSql.query('select 0.0 >= false from tb;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 1) + + tdSql.query('select 1.0 >= false from tb;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 1) + + tdSql.query('select true >= 0.0 from tb;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 1) + + tdSql.query('select 1.001 >= true from tb;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 1) + + tdSql.query('select 1.0000000001 >= true from tb;') ##DBL_EPSILON is used in floating number comparison + tdSql.checkRows(1) + tdSql.checkData(0, 0, 1) + + tdSql.error('select "abc" >= "def" from tb;') + tdSql.error('select "abc" >= 1 from tb;') + tdSql.error('select 1 >= "abc" from tb;') + tdSql.error('select "abc" >= 1.0 from tb;') + tdSql.error('select 1.0 >= "abc" from tb;') + tdSql.error('select "abc" >= true from tb;') + tdSql.error('select false >= "abc" from tb;') + tdSql.error('select \'abc\' >= \'def\' from tb;') + tdSql.error('select \'abc\' >= 1 from tb;') + tdSql.error('select 1 >= \'abc\' from tb;') + tdSql.error('select \'abc\' >= 1.0 from tb;') + tdSql.error('select 1.0 >= \'abc\' from tb;') + tdSql.error('select \'abc\' >= true from tb;') + tdSql.error('select false >= \'abc\' from tb;') + + ##operator: between and + tdSql.query('select 1 between 2 and 4 from tb;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 0) + + tdSql.query('select 3 between 2 and 4 from tb;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 1) + + tdSql.query('select 2 between 2 and 4 from tb;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 1) + + tdSql.query('select 4 between 2 and 4 from tb;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 1) + + tdSql.query('select 1 between 2.0 and 4.0 from tb;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 0) + + tdSql.query('select 1.0 between 2 and 4 from tb;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 0) + + tdSql.query('select 3 between 2.0 and 4.0 from tb;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 1) + + tdSql.query('select 3.0 between 2 and 4 from tb;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 1) + + tdSql.query('select 2 between 2.0 and 4.0 from tb;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 1) + + tdSql.query('select 4 between 2.0 and 4.0 from tb;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 1) + + tdSql.query('select 2.0 between 2 and 4 from tb;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 1) + + tdSql.query('select 4.0 between 2 and 4 from tb;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 1) + + tdSql.query('select 2.0001 between 2 and 4 from tb;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 1) + + tdSql.query('select 2.0000000001 between 2 and 4 from tb;') ##DBL_EPSILON is used in floating number comparison + tdSql.checkRows(1) + tdSql.checkData(0, 0, 1) + + tdSql.query('select 2 between 2.0001 and 4 from tb;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 0) + + tdSql.query('select 2 between 2.000000001 and 4 from tb;') ##DBL_EPSILON is used in floating number comparison + tdSql.checkRows(1) + tdSql.checkData(0, 0, 1) + + + tdSql.query('select 4 between 2 and 4.0001 from tb;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 1) + + tdSql.query('select 4 between 2 and 4.000000001 from tb;') ##DBL_EPSILON is used in floating number comparison + tdSql.checkRows(1) + tdSql.checkData(0, 0, 1) + + tdSql.query('select 4.0001 between 2 and 4 from tb;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 0) + + tdSql.query('select 4.000000001 between 2 and 4 from tb;') ##DBL_EPSILON is used in floating number comparison + tdSql.checkRows(1) + tdSql.checkData(0, 0, 1) + + tdSql.query('select false between 0 and 1 from tb;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 1) + + tdSql.query('select false between 1 and 2 from tb;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 0) + + tdSql.query('select true between 0 and 1 from tb;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 1) + + tdSql.query('select true between -1 and 0 from tb;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 0) + + tdSql.query('select false between 0.0 and 1.0 from tb;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 1) + + tdSql.query('select false between 1.0 and 2.0 from tb;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 0) + + tdSql.query('select true between 0.0 and 1.0 from tb;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 1) + + tdSql.query('select true between -1.0 and 0.0 from tb;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 0) + + tdSql.query('select 0 between false and true from tb;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 1) + + tdSql.query('select 1 between false and true from tb;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 1) + + tdSql.query('select 0 between false and 10 from tb;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 1) + + tdSql.query('select 1 between true and 10 from tb;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 1) + + tdSql.query('select 0.0 between false and true from tb;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 1) + + tdSql.query('select 1.0 between false and true from tb;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 1) + + tdSql.query('select 0 between false and 10.0 from tb;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 1) + + tdSql.query('select 1 between true and 10.0 from tb;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 1) + + tdSql.error('select "abc" between "def" and "ghi" from tb;') + tdSql.error('select "abc" between 1 and 2 from tb;') + tdSql.error('select "abc" between 1.0 and 2.0 from tb;') + tdSql.error('select "abc" between true and false from tb;') + tdSql.error('select 1 between 1.0 and "cde" from tb;') + tdSql.error('select 1.0 between true and "cde" from tb;') + tdSql.error('select true between 1 and "cde" from tb;') + tdSql.error('select 1 between "abc" and 1.0 from tb;') + tdSql.error('select 1.0 between "abc" and true from tb;') + tdSql.error('select true between "abc" and 1 from tb;') + + tdSql.error('select \'abc\' between \'def\' and \'ghi\' from tb;') + tdSql.error('select \'abc\' between 1 and 2 from tb;') + tdSql.error('select \'abc\' between 1.0 and 2.0 from tb;') + tdSql.error('select \'abc\' between true and false from tb;') + tdSql.error('select 1 between 1.0 and \'cde\' from tb;') + tdSql.error('select 1.0 between true and \'cde\' from tb;') + tdSql.error('select true between 1 and \'cde\' from tb;') + tdSql.error('select 1 between \'abc\' and 1.0 from tb;') + tdSql.error('select 1.0 between \'abc\' and true from tb;') + tdSql.error('select true between \'abc\' and 1 from tb;') + + ##operator: and + tdSql.query('select 10 and 10 from tb;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 1) + + tdSql.query('select 10.0 and 10 from tb;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 1) + + tdSql.query('select 10.0 and 10.0 from tb;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 1) + + tdSql.query('select 10 and 0 from tb;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 0) + + tdSql.query('select 10.0 and 0 from tb;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 0) + + tdSql.query('select 10.0 and 0.0 from tb;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 0) + + tdSql.query('select 10.0 and 0.001 from tb;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 1) + + tdSql.query('select 10.0 and 0.000000000000000001 from tb;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 1) + + tdSql.query('select true and 10 and false from tb;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 0) + + tdSql.query('select true and 10.0 and false from tb;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 0) + + tdSql.query('select 1 and 2 and 3 and 10.1 and -20.02 and 22.03 from tb;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 1) + + tdSql.query('select 1 and 2 and 3 and 0 and 20.02 and 22.03 from tb;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 0) + + tdSql.query('select 1 and 2 and 3 and 0.0 and 20.02 and 22.03 from tb;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 0) + + tdSql.error('select "abc" and "def" from tb;') + tdSql.error('select "abc" and 1 from tb;') + tdSql.error('select 1 and "abc" from tb;') + tdSql.error('select "abc" and 1.0 from tb;') + tdSql.error('select 1.0 and abc from tb;') + tdSql.error('select "abc" and true from tb;') + tdSql.error('select false and "abc" from tb;') + tdSql.error('select 1 and "abc" and 1.0 and true and false and 0 from tb;') + tdSql.error('select 1 and "abc" and 1.0 and "cde" and false and 0 from tb;') + tdSql.error('select 1 and "abc" and 1.0 and "cde" and false and "fhi" from tb;') + + tdSql.error('select \'abc\' and \'def\' from tb;') + tdSql.error('select \'abc\' and 1 from tb;') + tdSql.error('select 1 and \'abc\' from tb;') + tdSql.error('select \'abc\' and 1.0 from tb;') + tdSql.error('select 1.0 and abc from tb;') + tdSql.error('select \'abc\' and true from tb;') + tdSql.error('select false and \'abc\' from tb;') + tdSql.error('select 1 and \'abc\' and 1.0 and true and false and 0 from tb;') + tdSql.error('select 1 and \'abc\' and 1.0 and \'cde\' and false and "fhi" from tb;') + + + ##operator: or + tdSql.query('select 10 or 10 from tb;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 1) + + tdSql.query('select 10.0 or 0 from tb;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 1) + + tdSql.query('select 10 or 0.0 from tb;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 1) + + tdSql.query('select 0.0 or 0 from tb;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 0) + + tdSql.query('select 0.0 or 0.001 from tb;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 1) + + tdSql.query('select 0.0 or 0.000000000000000001 from tb;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 1) + + tdSql.query('select 1 or 2 or 3 or 0.0 or -20.02 or 22.03 from tb;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 1) + + tdSql.query('select 0 or 0.0 or 0.00 or 0.000 or 0.0000 or 0.00000 from tb;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 0) + + tdSql.query('select true or 10 or false from tb;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 1) + + tdSql.query('select true or 10.0 or false from tb;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 1) + + tdSql.error('select "abc" or "def" from tb;') + tdSql.error('select "abc" or 1 from tb;') + tdSql.error('select 1 or "abc" from tb;') + tdSql.error('select "abc" or 1.0 from tb;') + tdSql.error('select 1.0 or abc from tb;') + tdSql.error('select "abc" or true from tb;') + tdSql.error('select false or "abc" from tb;') + tdSql.error('select 1 or "abc" or 1.0 or true or false or 0 from tb;') + tdSql.error('select 1 or "abc" or 1.0 or "cde" or false or 0 from tb;') + tdSql.error('select 1 or "abc" or 1.0 or "cde" or false or "fhi" from tb;') + + tdSql.error('select \'abc\' or \'def\' from tb;') + tdSql.error('select \'abc\' or 1 from tb;') + tdSql.error('select 1 or \'abc\' from tb;') + tdSql.error('select \'abc\' or 1.0 from tb;') + tdSql.error('select 1.0 or abc from tb;') + tdSql.error('select \'abc\' or true from tb;') + tdSql.error('select false or \'abc\' from tb;') + tdSql.error('select 1 or \'abc\' or 1.0 or true or false or 0 from tb;') + tdSql.error('select 1 or \'abc\' or 1.0 or \'cde\' or false or "fhi" from tb;') + + ##operator: multiple operations + tdSql.query('select 1 and 1 != 2 and 1 < 2 and 2 between 1 and 3 from tb;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 1) + + tdSql.query('select 0 or 1 = 2 or 1 >= 2 or 2 between 3 and 5 from tb;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 0) + + tdSql.query('select 0 and 1 != 2 and 1 < 2 and 2 between 1 and 3 from tb;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 0) + + tdSql.query('select 1 or 1 = 2 or 1 >= 2 or 2 between 3 and 5 from tb;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 1) + + tdSql.query('select 1 != 2 and 1 < 2 and 1 >= 2 and 2 between 1 and 3 from tb;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 0) + + tdSql.query('select 1 = 2 or 1 >= 2 or 1<>3 or 2 between 3 and 5 from tb;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 1) + + tdSql.query('select 0 or 1 != 2 and 1 <= 2 and 2 between 3 and 4 from tb;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 0) + + tdSql.query('select 0 or 1 = 2 and 1 <= 2 and 2 between 3 and 5 from tb;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 0) + + tdSql.query('select 1 != 2 and 1 < 2 or 1 >= 2 or 2 between 4 and 5 from tb;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 1) + + tdSql.query('select 1 != 2 and 1 < 2 or 1 >= 2 or 2 between 4 and 5 or true from tb;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 1) + + tdSql.query('select 1 != 2 and 1 < 2 or 1 >= 2 or 2 between 4 and 5 and false from tb;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 1) + + tdSql.query('select 1 != 2 and 1 < 2 or 1 >= 2 or 2 between 4 and 5 and true and 10.1 from tb;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 1) + + tdSql.query('select 1 != 2 and 1 < 2 or 1 >= 2 or 2 between 4 and 5 and false or 10.1 from tb;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 1) + + tdSql.error('select 1 != 2 and "abc" or 1 >= 2 or "cde" between 4 and 5 and \'ghi\' or 10.1 from tb;') + tdSql.error('select 1 != 2 and 1 < 2 or \'abc123\' or 2 between \'abc123\' and 5 and false or "abc123" from tb;') + tdSql.error('select \'1234\' or 1 < 2 or \'aace\' and "cde" between 4 and "def" and "ckas" or 10.1 from tb;') + + tdSql.execute('drop database db') + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) diff --git a/tests/develop-test/2-query/escape.py b/tests/develop-test/2-query/escape.py index ab023a839eaee8217e29c2a488ec7803fb23636f..2ff3c0b7cfcd45d1cf5cf6abe5f4c09cc0956994 100644 --- a/tests/develop-test/2-query/escape.py +++ b/tests/develop-test/2-query/escape.py @@ -131,11 +131,17 @@ class TDTestCase: tdSql.checkData(0, 1, r'\%') # [TD-12815] like wildcard(%, _) are not supported nchar + tdSql.execute(r"insert into tt values(1591050708000, 'h\%d')") tdSql.execute(r"insert into tt values(1591070708000, 'h%d')") + tdSql.execute(r"insert into tt values(1591080808000, 'h\_j')") tdSql.execute(r"insert into tt values(1591080708000, 'h_j')") tdSql.execute(r"insert into tt values(1591090708000, 'h\\j')") + tdSql.query(r"select * from tt where `i\t` like 'h\\\%d'") + tdSql.checkRows(1) tdSql.query(r"select * from tt where `i\t` like 'h\%d'") tdSql.checkRows(1) + tdSql.query(r"select * from tt where `i\t` like 'h\\\_j'") + tdSql.checkRows(1) tdSql.query(r"select * from tt where `i\t` like 'h\_j'") tdSql.checkRows(1) tdSql.query(r"select * from tt where `i\t` like 'h\\j'") diff --git a/tests/develop-test/2-query/func_compare.py b/tests/develop-test/2-query/func_compare.py new file mode 100644 index 0000000000000000000000000000000000000000..80894b9e0872a2d08e14e7e04dbafbf0f0f88f1c --- /dev/null +++ b/tests/develop-test/2-query/func_compare.py @@ -0,0 +1,308 @@ +################################################################### +# Copyright (c) 2021 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import sys +from util.log import * +from util.cases import * +from util.sql import * + + +class TDTestCase: + def caseDescription(self): + ''' + case1: [TD-12861] : taoshell crash coredump for such as "select first(c1)==max(c1) from st" + ''' + return + + def init(self, conn, logSql): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor(), logSql) + + def getBuildPath(self): + selfPath = os.path.dirname(os.path.realpath(__file__)) + + if ("community" in selfPath): + projPath = selfPath[:selfPath.find("community")] + else: + projPath = selfPath[:selfPath.find("tests")] + + for root, dirs, files in os.walk(projPath): + if ("taosd" in files): + rootRealPath = os.path.dirname(os.path.realpath(root)) + if ("packaging" not in rootRealPath): + buildPath = root[:len(root) - len("/build/bin")] + break + return buildPath + + def run(self): + print("running {}".format(__file__)) + tdSql.execute("drop database if exists db") + tdSql.execute("create database if not exists db") + tdSql.execute('use db') + + #Prepare data + tdSql.execute("create table tb (ts timestamp, value int);") + tdSql.execute("insert into tb values (now, 1);") + tdSql.execute("insert into tb values (now, 2);") + tdSql.execute("insert into tb values (now, 3);") + tdSql.execute("insert into tb values (now, 4);") + + ##operator: = + tdSql.error('select count(*) = 4 from tb;') + tdSql.error('select avg(value) = 2.5 from tb') + tdSql.error('select twa(value) = 3.03 from tb') + tdSql.error('select sum(value) = 10 from tb') + tdSql.error('select stddev(value) = 2.0 from tb') + tdSql.error('select min(value) = 1 from tb') + tdSql.error('select max(value) = 4 from tb') + tdSql.error('select first(*) = 3 from tb') + tdSql.error('select last(*) = 3 from tb') + tdSql.error('select top(value, 3) = 3 from tb') + tdSql.error('select bottom(value, 3) = 3 from tb') + tdSql.error('select percentile(value, 50) = 3 from tb') + tdSql.error('select apercentile(value, 50) = 3 from tb') + tdSql.error('select last_row(*) = 3 from tb') + tdSql.error('select diff(value) = 3 from tb') + tdSql.error('select ceil(value) = 12 from tb') + tdSql.error('select floor(3.5) = 3 from tb') + tdSql.error('select round(3.5) = 3 from tb') + + tdSql.error('select count(*) = max(value) from tb') + tdSql.error('select avg(value) = min(value) from tb') + tdSql.error('select first(value) = last(value) from tb') + tdSql.error('select round(value) = round(value) from tb') + + ##operator: != + tdSql.error('select count(*) != 4 from tb;') + tdSql.error('select avg(value) != 2.5 from tb') + tdSql.error('select twa(value) != 3.03 from tb') + tdSql.error('select sum(value) != 10 from tb') + tdSql.error('select stddev(value) != 2.0 from tb') + tdSql.error('select min(value) != 1 from tb') + tdSql.error('select max(value) != 4 from tb') + tdSql.error('select first(*) != 3 from tb') + tdSql.error('select last(*) != 3 from tb') + tdSql.error('select top(value, 3) != 3 from tb') + tdSql.error('select bottom(value, 3) != 3 from tb') + tdSql.error('select percentile(value, 50) != 3 from tb') + tdSql.error('select apercentile(value, 50) != 3 from tb') + tdSql.error('select last_row(*) != 3 from tb') + tdSql.error('select diff(value) != 3 from tb') + tdSql.error('select ceil(value) != 12 from tb') + tdSql.error('select floor(3.5) != 3 from tb') + tdSql.error('select round(3.5) != 3 from tb') + + tdSql.error('select count(*) != max(value) from tb') + tdSql.error('select avg(value) != min(value) from tb') + tdSql.error('select first(value) != last(value) from tb') + tdSql.error('select round(value) != round(value) from tb') + + ##operator: <> + tdSql.error('select count(*) <> 4 from tb;') + tdSql.error('select avg(value) <> 2.5 from tb') + tdSql.error('select twa(value) <> 3.03 from tb') + tdSql.error('select sum(value) <> 10 from tb') + tdSql.error('select stddev(value) <> 2.0 from tb') + tdSql.error('select min(value) <> 1 from tb') + tdSql.error('select max(value) <> 4 from tb') + tdSql.error('select first(*) <> 3 from tb') + tdSql.error('select last(*) <> 3 from tb') + tdSql.error('select top(value, 3) <> 3 from tb') + tdSql.error('select bottom(value, 3) <> 3 from tb') + tdSql.error('select percentile(value, 50) <> 3 from tb') + tdSql.error('select apercentile(value, 50) <> 3 from tb') + tdSql.error('select last_row(*) <> 3 from tb') + tdSql.error('select diff(value) <> 3 from tb') + tdSql.error('select ceil(value) <> 12 from tb') + tdSql.error('select floor(3.5) <> 3 from tb') + tdSql.error('select round(3.5) <> 3 from tb') + + tdSql.error('select count(*) <> max(value) from tb') + tdSql.error('select avg(value) <> min(value) from tb') + tdSql.error('select first(value) <> last(value) from tb') + tdSql.error('select round(value) <> round(value) from tb') + + ##operator: < + tdSql.error('select count(*) < 4 from tb;') + tdSql.error('select avg(value) < 2.5 from tb') + tdSql.error('select twa(value) < 3.03 from tb') + tdSql.error('select sum(value) < 10 from tb') + tdSql.error('select stddev(value) < 2.0 from tb') + tdSql.error('select min(value) < 1 from tb') + tdSql.error('select max(value) < 4 from tb') + tdSql.error('select first(*) < 3 from tb') + tdSql.error('select last(*) < 3 from tb') + tdSql.error('select top(value, 3) < 3 from tb') + tdSql.error('select bottom(value, 3) < 3 from tb') + tdSql.error('select percentile(value, 50) < 3 from tb') + tdSql.error('select apercentile(value, 50) < 3 from tb') + tdSql.error('select last_row(*) < 3 from tb') + tdSql.error('select diff(value) < 3 from tb') + tdSql.error('select ceil(value) < 12 from tb') + tdSql.error('select floor(3.5) < 3 from tb') + tdSql.error('select round(3.5) < 3 from tb') + + tdSql.error('select count(*) < max(value) from tb') + tdSql.error('select avg(value) < min(value) from tb') + tdSql.error('select first(value) < last(value) from tb') + tdSql.error('select round(value) < round(value) from tb') + + ##operator: > + tdSql.error('select count(*) > 4 from tb;') + tdSql.error('select avg(value) > 2.5 from tb') + tdSql.error('select twa(value) > 3.03 from tb') + tdSql.error('select sum(value) > 10 from tb') + tdSql.error('select stddev(value) > 2.0 from tb') + tdSql.error('select min(value) > 1 from tb') + tdSql.error('select max(value) > 4 from tb') + tdSql.error('select first(*) > 3 from tb') + tdSql.error('select last(*) > 3 from tb') + tdSql.error('select top(value, 3) > 3 from tb') + tdSql.error('select bottom(value, 3) > 3 from tb') + tdSql.error('select percentile(value, 50) > 3 from tb') + tdSql.error('select apercentile(value, 50) > 3 from tb') + tdSql.error('select last_row(*) > 3 from tb') + tdSql.error('select diff(value) > 3 from tb') + tdSql.error('select ceil(value) > 12 from tb') + tdSql.error('select floor(3.5) > 3 from tb') + tdSql.error('select round(3.5) > 3 from tb') + + tdSql.error('select count(*) > max(value) from tb') + tdSql.error('select avg(value) > min(value) from tb') + tdSql.error('select first(value) > last(value) from tb') + tdSql.error('select round(value) > round(value) from tb') + + ##operator: <= + tdSql.error('select count(*) <= 4 from tb;') + tdSql.error('select avg(value) <= 2.5 from tb') + tdSql.error('select twa(value) <= 3.03 from tb') + tdSql.error('select sum(value) <= 10 from tb') + tdSql.error('select stddev(value) <= 2.0 from tb') + tdSql.error('select min(value) <= 1 from tb') + tdSql.error('select max(value) <= 4 from tb') + tdSql.error('select first(*) <= 3 from tb') + tdSql.error('select last(*) <= 3 from tb') + tdSql.error('select top(value, 3) <= 3 from tb') + tdSql.error('select bottom(value, 3) <= 3 from tb') + tdSql.error('select percentile(value, 50) <= 3 from tb') + tdSql.error('select apercentile(value, 50) <= 3 from tb') + tdSql.error('select last_row(*) <= 3 from tb') + tdSql.error('select diff(value) <= 3 from tb') + tdSql.error('select ceil(value) <= 12 from tb') + tdSql.error('select floor(3.5) <= 3 from tb') + tdSql.error('select round(3.5) <= 3 from tb') + + tdSql.error('select count(*) <= max(value) from tb') + tdSql.error('select avg(value) <= min(value) from tb') + tdSql.error('select first(value) <= last(value) from tb') + tdSql.error('select round(value) <= round(value) from tb') + + ##operator: >= + tdSql.error('select count(*) >= 4 from tb;') + tdSql.error('select avg(value) >= 2.5 from tb') + tdSql.error('select twa(value) >= 3.03 from tb') + tdSql.error('select sum(value) >= 10 from tb') + tdSql.error('select stddev(value) >= 2.0 from tb') + tdSql.error('select min(value) >= 1 from tb') + tdSql.error('select max(value) >= 4 from tb') + tdSql.error('select first(*) >= 3 from tb') + tdSql.error('select last(*) >= 3 from tb') + tdSql.error('select top(value, 3) >= 3 from tb') + tdSql.error('select bottom(value, 3) >= 3 from tb') + tdSql.error('select percentile(value, 50) >= 3 from tb') + tdSql.error('select apercentile(value, 50) >= 3 from tb') + tdSql.error('select last_row(*) >= 3 from tb') + tdSql.error('select diff(value) >= 3 from tb') + tdSql.error('select ceil(value) >= 12 from tb') + tdSql.error('select floor(3.5) >= 3 from tb') + tdSql.error('select round(3.5) >= 3 from tb') + + tdSql.error('select count(*) >= max(value) from tb') + tdSql.error('select avg(value) >= min(value) from tb') + tdSql.error('select first(value) >= last(value) from tb') + tdSql.error('select round(value) >= round(value) from tb') + + ##operator: between and + tdSql.error('select count(*) between 3 and 4 from tb;') + tdSql.error('select avg(value) between 1.5 and 2.5 from tb') + tdSql.error('select twa(value) between 3.0 and 3.03 from tb') + tdSql.error('select sum(value) between 1 and 10 from tb') + tdSql.error('select stddev(value) between 1 and 2.0 from tb') + tdSql.error('select min(value) between 2 and 5 from tb') + tdSql.error('select max(value) between 1 and 10 from tb') + tdSql.error('select first(*) between 1 and 3 from tb') + tdSql.error('select last(*) between 0 and 3 from tb') + tdSql.error('select top(value, 3) between 0.0 and 3 from tb') + tdSql.error('select bottom(value, 3) between 0.0 and 3 from tb') + tdSql.error('select percentile(value, 50) between 1 and 3 from tb') + tdSql.error('select apercentile(value, 50) between 2 and 3 from tb') + tdSql.error('select last_row(*) between 2 and 3 from tb') + tdSql.error('select diff(value) between 1 and 3 from tb') + tdSql.error('select ceil(value) between 5 and 12 from tb') + tdSql.error('select floor(3.5) between 12 and 3 from tb') + tdSql.error('select round(3.5) between true and 3 from tb') + + tdSql.error('select count(*) between min(value) and max(value) from tb') + tdSql.error('select avg(*) between min(value) and 3 from tb') + tdSql.error('select avg(value) between 1 and max(value) from tb') + tdSql.error('select first(value) between first(value) and last(value) from tb') + tdSql.error('select round(value) between ceil(value) and floor(value) from tb') + + ##operator: and + tdSql.error('select count(*) and 1 from tb;') + tdSql.error('select avg(value) and 0.0 from tb') + tdSql.error('select twa(value) and true from tb') + tdSql.error('select sum(value) and false from tb') + tdSql.error('select 1 and stddev(value) from tb') + tdSql.error('select 0.0 and min(value) from tb') + tdSql.error('select true and max(value) from tb') + tdSql.error('select false and first(*) from tb') + tdSql.error('select last(*) and first(value) from tb') + tdSql.error('select top(value, 3) and bottom(value, 3) from tb') + tdSql.error('select percentile(value, 50) and apercentile(value, 50) from tb') + tdSql.error('select diff(value) and ceil(value) from tb') + tdSql.error('select floor(3.5) and round(3.5) and ceil(3.5) from tb') + tdSql.error('select true and round(3.5) and 3 from tb') + + ##operator: or + tdSql.error('select count(*) or 1 from tb;') + tdSql.error('select avg(value) or 0.0 from tb') + tdSql.error('select twa(value) or true from tb') + tdSql.error('select sum(value) or false from tb') + tdSql.error('select 1 or stddev(value) from tb') + tdSql.error('select 0.0 or min(value) from tb') + tdSql.error('select true or max(value) from tb') + tdSql.error('select false or first(*) from tb') + tdSql.error('select last(*) or first(value) from tb') + tdSql.error('select top(value, 3) or bottom(value, 3) from tb') + tdSql.error('select percentile(value, 50) or apercentile(value, 50) from tb') + tdSql.error('select diff(value) or ceil(value) from tb') + tdSql.error('select floor(3.5) or round(3.5) or ceil(3.5) from tb') + tdSql.error('select true or round(3.5) or 3 from tb') + + ##operator: multiple operations + tdSql.error('select count(*) <> avg(value) or twa(value) and sum(value) or 1 from tb;') + tdSql.error('select 1 and stddev(value) <= min(value) or max(value) and first(*) or 0.0 from tb') + tdSql.error('select last(*) and first(value) or top(value, 3) and 3 between 4.0 and bottom(value, 3)from tb') + tdSql.error('select percentile(value, 50) or diff(value) = ceil(value) and apercentile(value, 50) from tb') + tdSql.error('select floor(3.5) or round(3.5) and ceil(3.5) > true and round(3.5) or 3 from tb') + + tdSql.execute('drop database db') + + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) diff --git a/tests/develop-test/5-taos-tools/taosbenchmark/auto_create_table_json.py b/tests/develop-test/5-taos-tools/taosbenchmark/auto_create_table_json.py new file mode 100644 index 0000000000000000000000000000000000000000..404f922dc7a6fa07acf3fb74c93e66f9d052c6fe --- /dev/null +++ b/tests/develop-test/5-taos-tools/taosbenchmark/auto_create_table_json.py @@ -0,0 +1,102 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- +import os +from util.log import * +from util.cases import * +from util.sql import * +from util.dnodes import * + + +class TDTestCase: + def caseDescription(self): + ''' + [TD-11510] taosBenchmark test cases + ''' + return + + def init(self, conn, logSql): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor(), logSql) + + def run(self): + cmd = "taosBenchmark -f ./5-taos-tools/taosbenchmark/json/taosc_auto_create_table.json" + tdLog.info("%s" % cmd) + os.system("%s" % cmd) + tdSql.execute("reset query cache") + tdSql.query("select count(tbname) from db.stb1") + tdSql.checkData(0, 0, 8) + tdSql.query("select count(*) from db.stb1") + tdSql.checkData(0, 0, 160) + + tdSql.execute("reset query cache") + tdSql.query("select count(tbname) from db.`stb1-2`") + tdSql.checkData(0, 0, 8) + tdSql.query("select count(*) from db.`stb1-2`") + tdSql.checkData(0, 0, 160) + + cmd = "taosBenchmark -f ./5-taos-tools/taosbenchmark/json/stmt_auto_create_table.json" + tdLog.info("%s" % cmd) + os.system("%s" % cmd) + tdSql.execute("reset query cache") + tdSql.query("select count(tbname) from db.stb2") + tdSql.checkData(0, 0, 8) + tdSql.query("select count(*) from db.stb2") + tdSql.checkData(0, 0, 160) + tdSql.query("show databases") + tdSql.checkData(0, 16, "us") + + tdSql.execute("reset query cache") + tdSql.query("select count(tbname) from db.`stb2-2`") + tdSql.checkData(0, 0, 8) + tdSql.query("select count(*) from db.`stb2-2`") + tdSql.checkData(0, 0, 160) + + cmd = "taosBenchmark -f ./5-taos-tools/taosbenchmark/json/rest_auto_create_table.json" + tdLog.info("%s" % cmd) + os.system("%s" % cmd) + tdSql.execute("reset query cache") + tdSql.query("select count(tbname) from db.stb3") + tdSql.checkData(0, 0, 8) + tdSql.query("select count(*) from db.stb3") + tdSql.checkData(0, 0, 160) + tdSql.query("show databases") + tdSql.checkData(0, 16, "ns") + + tdSql.execute("reset query cache") + tdSql.query("select count(tbname) from db.`stb3-2`") + tdSql.checkData(0, 0, 8) + tdSql.query("select count(*) from db.`stb3-2`") + tdSql.checkData(0, 0, 160) + + cmd = "taosBenchmark -f ./5-taos-tools/taosbenchmark/json/sml_auto_create_table.json" + tdLog.info("%s" % cmd) + os.system("%s" % cmd) + tdSql.execute("reset query cache") + tdSql.query("select count(tbname) from db.stb4") + tdSql.checkData(0, 0, 8) + tdSql.query("select count(*) from db.stb4") + tdSql.checkData(0, 0, 160) + + tdSql.execute("reset query cache") + tdSql.query("select count(tbname) from db.`stb4-2`") + tdSql.checkData(0, 0, 8) + tdSql.query("select count(*) from db.`stb4-2`") + tdSql.checkData(0, 0, 160) + + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) \ No newline at end of file diff --git a/tests/develop-test/5-taos-tools/taosbenchmark/commandline.py b/tests/develop-test/5-taos-tools/taosbenchmark/commandline.py new file mode 100644 index 0000000000000000000000000000000000000000..51edecdbbfba7f23c55db9b4afc32bd5720ec36c --- /dev/null +++ b/tests/develop-test/5-taos-tools/taosbenchmark/commandline.py @@ -0,0 +1,294 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- +import os +import subprocess +import time + +from util.log import * +from util.cases import * +from util.sql import * +from util.dnodes import * + + +class TDTestCase: + def caseDescription(self): + ''' + [TD-11510] taosBenchmark test cases + ''' + return + + def init(self, conn, logSql): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor(), logSql) + + def run(self): + cmd = "taosBenchmark -F 7 -n 10 -t 2 -x -y -M -C -d newtest -l 5 -A binary,nchar\(31\) -b tinyint,binary\(23\),bool,nchar -w 29 -E -m $%^*" + tdLog.info("%s" % cmd) + os.system("%s" % cmd) + tdSql.execute("use newtest") + tdSql.query("select count(*) from newtest.meters") + tdSql.checkData(0, 0, 20) + tdSql.query("select distinct(c0) from newtest.meters") + tdSql.checkRows(7) + tdSql.query("describe meters") + tdSql.checkRows(8) + tdSql.checkData(0, 1, "TIMESTAMP") + tdSql.checkData(1, 1, "TINYINT") + tdSql.checkData(2, 1, "BINARY") + tdSql.checkData(2, 2, 23) + tdSql.checkData(3, 1, "BOOL") + tdSql.checkData(4, 1, "NCHAR") + tdSql.checkData(4, 2, 29) + tdSql.checkData(5, 1, "INT") + tdSql.checkData(6, 1, "BINARY") + tdSql.checkData(6, 2, 29) + tdSql.checkData(6, 3, "TAG") + tdSql.checkData(7, 1, "NCHAR") + tdSql.checkData(7, 2, 31) + tdSql.checkData(7, 3, "TAG") + tdSql.query("select tbname from meters where tbname like '$%^*%'") + tdSql.checkRows(2) + tdSql.execute("drop database if exists newtest") + + cmd = "taosBenchmark -F 7 -n 10 -t 2 -y -M -I stmt" + tdLog.info("%s" % cmd) + os.system("%s" % cmd) + tdSql.query("select count(tbname) from test.meters") + tdSql.checkData(0, 0, 2) + tdSql.query("select count(*) from test.meters") + tdSql.checkData(0, 0, 20) + tdSql.query("select distinct(c0) from test.meters") + tdSql.checkRows(7) + + cmd = "taosBenchmark -n 3 -t 3 -B 2 -i 1 -G -y -T 1 2>&1 | grep sleep | wc -l" + sleepTimes = subprocess.check_output(cmd, shell=True).decode("utf-8") + tdLog.info("%s" % cmd) + os.system("%s" % cmd) + if (int(sleepTimes) != 2): + tdLog.exit("expected sleep times 2, actual %d" % int(sleepTimes)) + + cmd = "taosBenchmark -n 3 -t 3 -B 2 -i 1 -G -y -T 1 -r 1 2>&1 | grep sleep | wc -l" + sleepTimes = subprocess.check_output(cmd, shell=True).decode("utf-8") + tdLog.info("%s" % cmd) + os.system("%s" % cmd) + if (int(sleepTimes) != 3): + tdLog.exit("expected sleep times 3, actual %d" % int(sleepTimes)) + + cmd = "taosBenchmark -n 3 -t 3 -B 2 -i 1 -G -y -T 1 -I sml 2>&1 | grep sleep | wc -l" + sleepTimes = subprocess.check_output(cmd, shell=True).decode("utf-8") + tdLog.info("%s" % cmd) + os.system("%s" % cmd) + if (int(sleepTimes) != 2): + tdLog.exit("expected sleep times 2, actual %d" % int(sleepTimes)) + + cmd = "taosBenchmark -n 3 -t 3 -B 2 -i 1 -G -y -T 1 -r 1 -I sml 2>&1 | grep sleep | wc -l" + sleepTimes = subprocess.check_output(cmd, shell=True).decode("utf-8") + tdLog.info("%s" % cmd) + os.system("%s" % cmd) + if (int(sleepTimes) != 3): + tdLog.exit("expected sleep times 3, actual %d" % int(sleepTimes)) + + cmd = "taosBenchmark -n 3 -t 3 -B 2 -i 1 -G -y -T 1 -I stmt 2>&1 | grep sleep | wc -l" + sleepTimes = subprocess.check_output(cmd, shell=True).decode("utf-8") + tdLog.info("%s" % cmd) + os.system("%s" % cmd) + if (int(sleepTimes) != 2): + tdLog.exit("expected sleep times 2, actual %d" % int(sleepTimes)) + + cmd = "taosBenchmark -n 3 -t 3 -B 2 -i 1 -G -y -T 1 -r 1 -I stmt 2>&1 | grep sleep | wc -l" + sleepTimes = subprocess.check_output(cmd, shell=True).decode("utf-8") + tdLog.info("%s" % cmd) + os.system("%s" % cmd) + if (int(sleepTimes) != 3): + tdLog.exit("expected sleep times 3, actual %d" % int(sleepTimes)) + + cmd = "taosBenchmark -S 17 -n 3 -t 1 -y -x" + tdLog.info("%s" % cmd) + os.system("%s" % cmd) + tdSql.query("select last(ts) from test.meters") + tdSql.checkData(0, 0 , "2017-07-14 10:40:00.034") + + cmd = "taosBenchmark -N -I taosc -t 11 -n 11 -y -x -E" + tdLog.info("%s" % cmd) + os.system("%s" % cmd) + tdSql.execute("use test") + tdSql.query("show stables") + tdSql.checkRows(0) + tdSql.query("show tables") + tdSql.checkRows(11) + tdSql.query("select count(*) from `d10`") + tdSql.checkData(0, 0, 11) + + cmd = "taosBenchmark -N -I rest -t 11 -n 11 -y -x" + tdLog.info("%s" % cmd) + os.system("%s" % cmd) + tdSql.execute("use test") + tdSql.query("show stables") + tdSql.checkRows(0) + tdSql.query("show tables") + tdSql.checkRows(11) + tdSql.query("select count(*) from d10") + tdSql.checkData(0, 0, 11) + + cmd = "taosBenchmark -N -I stmt -t 11 -n 11 -y -x" + tdLog.info("%s" % cmd) + os.system("%s" % cmd) + tdSql.execute("use test") + tdSql.query("show stables") + tdSql.checkRows(0) + tdSql.query("show tables") + tdSql.checkRows(11) + tdSql.query("select count(*) from d10") + tdSql.checkData(0, 0, 11) + + cmd = "taosBenchmark -N -I sml -y" + tdLog.info("%s" % cmd) + assert(os.system("%s" % cmd) !=0 ) + + cmd = "taosBenchmark -n 1 -t 1 -y -b bool" + tdLog.info("%s" % cmd) + os.system("%s" % cmd) + tdSql.execute("reset query cache") + tdSql.query("describe test.meters") + tdSql.checkData(1, 1, "BOOL") + + cmd = "taosBenchmark -n 1 -t 1 -y -b tinyint" + tdLog.info("%s" % cmd) + os.system("%s" % cmd) + tdSql.execute("reset query cache") + tdSql.query("describe test.meters") + tdSql.checkData(1, 1, "TINYINT") + + cmd = "taosBenchmark -n 1 -t 1 -y -b utinyint" + tdLog.info("%s" % cmd) + os.system("%s" % cmd) + tdSql.execute("reset query cache") + tdSql.query("describe test.meters") + tdSql.checkData(1, 1, "TINYINT UNSIGNED") + + cmd = "taosBenchmark -n 1 -t 1 -y -b smallint" + tdLog.info("%s" % cmd) + os.system("%s" % cmd) + tdSql.execute("reset query cache") + tdSql.query("describe test.meters") + tdSql.checkData(1, 1, "SMALLINT") + + cmd = "taosBenchmark -n 1 -t 1 -y -b usmallint" + tdLog.info("%s" % cmd) + os.system("%s" % cmd) + tdSql.execute("reset query cache") + tdSql.query("describe test.meters") + tdSql.checkData(1, 1, "SMALLINT UNSIGNED") + + cmd = "taosBenchmark -n 1 -t 1 -y -b int" + tdLog.info("%s" % cmd) + os.system("%s" % cmd) + tdSql.execute("reset query cache") + tdSql.query("describe test.meters") + tdSql.checkData(1, 1, "INT") + + cmd = "taosBenchmark -n 1 -t 1 -y -b uint" + tdLog.info("%s" % cmd) + os.system("%s" % cmd) + tdSql.execute("reset query cache") + tdSql.query("describe test.meters") + tdSql.checkData(1, 1, "INT UNSIGNED") + + cmd = "taosBenchmark -n 1 -t 1 -y -b bigint" + tdLog.info("%s" % cmd) + os.system("%s" % cmd) + tdSql.execute("reset query cache") + tdSql.query("describe test.meters") + tdSql.checkData(1, 1, "BIGINT") + + cmd = "taosBenchmark -n 1 -t 1 -y -b ubigint" + tdLog.info("%s" % cmd) + os.system("%s" % cmd) + tdSql.execute("reset query cache") + tdSql.query("describe test.meters") + tdSql.checkData(1, 1, "BIGINT UNSIGNED") + + cmd = "taosBenchmark -n 1 -t 1 -y -b timestamp" + tdLog.info("%s" % cmd) + os.system("%s" % cmd) + tdSql.execute("reset query cache") + tdSql.query("describe test.meters") + tdSql.checkData(1, 1, "TIMESTAMP") + + cmd = "taosBenchmark -n 1 -t 1 -y -b float" + tdLog.info("%s" % cmd) + os.system("%s" % cmd) + tdSql.execute("reset query cache") + tdSql.query("describe test.meters") + tdSql.checkData(1, 1, "FLOAT") + + cmd = "taosBenchmark -n 1 -t 1 -y -b double" + tdLog.info("%s" % cmd) + os.system("%s" % cmd) + tdSql.execute("reset query cache") + tdSql.query("describe test.meters") + tdSql.checkData(1, 1, "DOUBLE") + + cmd = "taosBenchmark -n 1 -t 1 -y -b nchar" + tdLog.info("%s" % cmd) + os.system("%s" % cmd) + tdSql.execute("reset query cache") + tdSql.query("describe test.meters") + tdSql.checkData(1, 1, "NCHAR") + + cmd = "taosBenchmark -n 1 -t 1 -y -b nchar\(7\)" + tdLog.info("%s" % cmd) + os.system("%s" % cmd) + tdSql.execute("reset query cache") + tdSql.query("describe test.meters") + tdSql.checkData(1, 1, "NCHAR") + + cmd = "taosBenchmark -n 1 -t 1 -y -b binary" + tdLog.info("%s" % cmd) + os.system("%s" % cmd) + tdSql.execute("reset query cache") + tdSql.query("describe test.meters") + tdSql.checkData(1, 1, "BINARY") + + cmd = "taosBenchmark -n 1 -t 1 -y -b binary\(7\)" + tdLog.info("%s" % cmd) + os.system("%s" % cmd) + tdSql.execute("reset query cache") + tdSql.query("describe test.meters") + tdSql.checkData(1, 1, "BINARY") + + cmd = "taosBenchmark -n 1 -t 1 -y -A json\(7\)" + tdLog.info("%s" % cmd) + os.system("%s" % cmd) + tdSql.execute("reset query cache") + tdSql.query("describe test.meters") + tdSql.checkData(4, 1, "JSON") + + cmd = "taosBenchmark -n 1 -t 1 -y -b int,x" + tdLog.info("%s" % cmd) + assert(os.system("%s" % cmd) != 0) + + cmd = "taosBenchmark -n 1 -t 1 -y -A int,json" + tdLog.info("%s" % cmd) + assert(os.system("%s" % cmd) != 0) + + + + + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) \ No newline at end of file diff --git a/tests/develop-test/5-taos-tools/taosbenchmark/csv/sample_tags.csv b/tests/develop-test/5-taos-tools/taosbenchmark/csv/sample_tags.csv new file mode 100644 index 0000000000000000000000000000000000000000..8e2afd342773582f9484b796cdc0b84736e8194e --- /dev/null +++ b/tests/develop-test/5-taos-tools/taosbenchmark/csv/sample_tags.csv @@ -0,0 +1 @@ +17 \ No newline at end of file diff --git a/tests/develop-test/5-taos-tools/taosbenchmark/csv/sample_use_ts.csv b/tests/develop-test/5-taos-tools/taosbenchmark/csv/sample_use_ts.csv new file mode 100644 index 0000000000000000000000000000000000000000..f92eedd50d35e1666d8d74a999fd968271944a57 --- /dev/null +++ b/tests/develop-test/5-taos-tools/taosbenchmark/csv/sample_use_ts.csv @@ -0,0 +1,3 @@ +1641976781445,1 +1641976781446,2 +1641976781447,3 \ No newline at end of file diff --git a/tests/develop-test/5-taos-tools/taosbenchmark/default_json.py b/tests/develop-test/5-taos-tools/taosbenchmark/default_json.py new file mode 100644 index 0000000000000000000000000000000000000000..fd8bde5c1066833f9c2413b434dbc7e467a27b7b --- /dev/null +++ b/tests/develop-test/5-taos-tools/taosbenchmark/default_json.py @@ -0,0 +1,47 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- +import os +from util.log import * +from util.cases import * +from util.sql import * +from util.dnodes import * + + +class TDTestCase: + def caseDescription(self): + ''' + [TD-11510] taosBenchmark test cases + ''' + return + + def init(self, conn, logSql): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor(), logSql) + + def run(self): + cmd = "taosBenchmark -f ./5-taos-tools/taosbenchmark/json/default.json" + tdLog.info("%s" % cmd) + os.system("%s" % cmd) + tdSql.execute("reset query cache") + tdSql.query("select count(tbname) from db.stb") + tdSql.checkData(0, 0, 10) + tdSql.query("select count(*) from db.stb") + tdSql.checkData(0, 0, 100) + + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) \ No newline at end of file diff --git a/tests/develop-test/5-taos-tools/taosbenchmark/insert_alltypes_json.py b/tests/develop-test/5-taos-tools/taosbenchmark/insert_alltypes_json.py new file mode 100644 index 0000000000000000000000000000000000000000..0b8dd11accef03243e5b285bbd86c80ab06f4267 --- /dev/null +++ b/tests/develop-test/5-taos-tools/taosbenchmark/insert_alltypes_json.py @@ -0,0 +1,203 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- +import os +from util.log import * +from util.cases import * +from util.sql import * +from util.dnodes import * + + +class TDTestCase: + def caseDescription(self): + ''' + [TD-11510] taosBenchmark test cases + ''' + return + + def init(self, conn, logSql): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor(), logSql) + + def run(self): + cmd = "taosBenchmark -f ./5-taos-tools/taosbenchmark/json/taosc_insert_alltypes.json" + tdLog.info("%s" % cmd) + os.system("%s" % cmd) + tdSql.query("select count(*) from db.stb") + tdSql.checkData(0, 0, 160) + tdSql.execute("reset query cache") + tdSql.query("describe db.stb") + tdSql.checkRows(29) + tdSql.checkData(0, 1, "TIMESTAMP") + tdSql.checkData(1, 1, "TIMESTAMP") + tdSql.checkData(2, 1, "INT") + tdSql.checkData(3, 1, "BIGINT") + tdSql.checkData(4, 1, "FLOAT") + tdSql.checkData(5, 1, "DOUBLE") + tdSql.checkData(6, 1, "SMALLINT") + tdSql.checkData(7, 1, "TINYINT") + tdSql.checkData(8, 1, "BOOL") + tdSql.checkData(9, 1, "NCHAR") + tdSql.checkData(9, 2, 29) + tdSql.checkData(10, 1, "INT UNSIGNED") + tdSql.checkData(11, 1, "BIGINT UNSIGNED") + tdSql.checkData(12, 1, "TINYINT UNSIGNED") + tdSql.checkData(13, 1, "SMALLINT UNSIGNED") + tdSql.checkData(14, 1, "BINARY") + tdSql.checkData(14, 2, 23) + tdSql.checkData(15, 1, "TIMESTAMP") + tdSql.checkData(16, 1, "INT") + tdSql.checkData(17, 1, "BIGINT") + tdSql.checkData(18, 1, "FLOAT") + tdSql.checkData(19, 1, "DOUBLE") + tdSql.checkData(20, 1, "SMALLINT") + tdSql.checkData(21, 1, "TINYINT") + tdSql.checkData(22, 1, "BOOL") + tdSql.checkData(23, 1, "NCHAR") + tdSql.checkData(23, 2, 17) + tdSql.checkData(24, 1, "INT UNSIGNED") + tdSql.checkData(25, 1, "BIGINT UNSIGNED") + tdSql.checkData(26, 1, "TINYINT UNSIGNED") + tdSql.checkData(27, 1, "SMALLINT UNSIGNED") + tdSql.checkData(28, 1, "BINARY") + tdSql.checkData(28, 2, 19) + + cmd = "taosBenchmark -f ./5-taos-tools/taosbenchmark/json/sml_insert_alltypes.json" + tdLog.info("%s" % cmd) + os.system("%s" % cmd) + tdSql.query("select count(*) from db.stb") + tdSql.checkData(0, 0, 160) + tdSql.execute("reset query cache") + tdSql.query("describe db.stb") + tdSql.checkRows(27) + tdSql.checkData(0, 1, "TIMESTAMP") + tdSql.checkData(1, 1, "INT") + tdSql.checkData(2, 1, "BIGINT") + tdSql.checkData(3, 1, "FLOAT") + tdSql.checkData(4, 1, "DOUBLE") + tdSql.checkData(5, 1, "SMALLINT") + tdSql.checkData(6, 1, "TINYINT") + tdSql.checkData(7, 1, "BOOL") + tdSql.checkData(8, 1, "NCHAR") + tdSql.checkData(8, 2, 29) + tdSql.checkData(9, 1, "INT UNSIGNED") + tdSql.checkData(10, 1, "BIGINT UNSIGNED") + tdSql.checkData(11, 1, "TINYINT UNSIGNED") + tdSql.checkData(12, 1, "SMALLINT UNSIGNED") + tdSql.checkData(13, 1, "BINARY") + tdSql.checkData(13, 2, 23) + tdSql.checkData(14, 1, "NCHAR") + tdSql.checkData(15, 1, "NCHAR") + tdSql.checkData(16, 1, "NCHAR") + tdSql.checkData(17, 1, "NCHAR") + tdSql.checkData(18, 1, "NCHAR") + tdSql.checkData(19, 1, "NCHAR") + tdSql.checkData(20, 1, "NCHAR") + tdSql.checkData(21, 1, "NCHAR") + tdSql.checkData(22, 1, "NCHAR") + tdSql.checkData(23, 1, "NCHAR") + tdSql.checkData(24, 1, "NCHAR") + tdSql.checkData(25, 1, "NCHAR") + tdSql.checkData(26, 1, "NCHAR") + + cmd = "taosBenchmark -f ./5-taos-tools/taosbenchmark/json/rest_insert_alltypes.json" + tdLog.info("%s" % cmd) + os.system("%s" % cmd) + tdSql.query("select count(*) from db.stb") + tdSql.checkData(0, 0, 160) + tdSql.execute("reset query cache") + tdSql.query("describe db.stb") + tdSql.checkRows(29) + tdSql.checkData(0, 1, "TIMESTAMP") + tdSql.checkData(1, 1, "TIMESTAMP") + tdSql.checkData(2, 1, "INT") + tdSql.checkData(3, 1, "BIGINT") + tdSql.checkData(4, 1, "FLOAT") + tdSql.checkData(5, 1, "DOUBLE") + tdSql.checkData(6, 1, "SMALLINT") + tdSql.checkData(7, 1, "TINYINT") + tdSql.checkData(8, 1, "BOOL") + tdSql.checkData(9, 1, "NCHAR") + tdSql.checkData(9, 2, 29) + tdSql.checkData(10, 1, "INT UNSIGNED") + tdSql.checkData(11, 1, "BIGINT UNSIGNED") + tdSql.checkData(12, 1, "TINYINT UNSIGNED") + tdSql.checkData(13, 1, "SMALLINT UNSIGNED") + tdSql.checkData(14, 1, "BINARY") + tdSql.checkData(14, 2, 23) + tdSql.checkData(15, 1, "TIMESTAMP") + tdSql.checkData(16, 1, "INT") + tdSql.checkData(17, 1, "BIGINT") + tdSql.checkData(18, 1, "FLOAT") + tdSql.checkData(19, 1, "DOUBLE") + tdSql.checkData(20, 1, "SMALLINT") + tdSql.checkData(21, 1, "TINYINT") + tdSql.checkData(22, 1, "BOOL") + tdSql.checkData(23, 1, "NCHAR") + tdSql.checkData(23, 2, 17) + tdSql.checkData(24, 1, "INT UNSIGNED") + tdSql.checkData(25, 1, "BIGINT UNSIGNED") + tdSql.checkData(26, 1, "TINYINT UNSIGNED") + tdSql.checkData(27, 1, "SMALLINT UNSIGNED") + tdSql.checkData(28, 1, "BINARY") + tdSql.checkData(28, 2, 19) + + cmd = "taosBenchmark -f ./5-taos-tools/taosbenchmark/json/stmt_insert_alltypes.json" + tdLog.info("%s" % cmd) + os.system("%s" % cmd) + tdSql.query("select count(*) from db.stb") + tdSql.checkData(0, 0, 160) + tdSql.execute("reset query cache") + tdSql.query("describe db.stb") + tdSql.checkRows(29) + tdSql.checkData(0, 1, "TIMESTAMP") + tdSql.checkData(1, 1, "TIMESTAMP") + tdSql.checkData(2, 1, "INT") + tdSql.checkData(3, 1, "BIGINT") + tdSql.checkData(4, 1, "FLOAT") + tdSql.checkData(5, 1, "DOUBLE") + tdSql.checkData(6, 1, "SMALLINT") + tdSql.checkData(7, 1, "TINYINT") + tdSql.checkData(8, 1, "BOOL") + tdSql.checkData(9, 1, "NCHAR") + tdSql.checkData(9, 2, 29) + tdSql.checkData(10, 1, "INT UNSIGNED") + tdSql.checkData(11, 1, "BIGINT UNSIGNED") + tdSql.checkData(12, 1, "TINYINT UNSIGNED") + tdSql.checkData(13, 1, "SMALLINT UNSIGNED") + tdSql.checkData(14, 1, "BINARY") + tdSql.checkData(14, 2, 23) + tdSql.checkData(15, 1, "TIMESTAMP") + tdSql.checkData(16, 1, "INT") + tdSql.checkData(17, 1, "BIGINT") + tdSql.checkData(18, 1, "FLOAT") + tdSql.checkData(19, 1, "DOUBLE") + tdSql.checkData(20, 1, "SMALLINT") + tdSql.checkData(21, 1, "TINYINT") + tdSql.checkData(22, 1, "BOOL") + tdSql.checkData(23, 1, "NCHAR") + tdSql.checkData(23, 2, 17) + tdSql.checkData(24, 1, "INT UNSIGNED") + tdSql.checkData(25, 1, "BIGINT UNSIGNED") + tdSql.checkData(26, 1, "TINYINT UNSIGNED") + tdSql.checkData(27, 1, "SMALLINT UNSIGNED") + tdSql.checkData(28, 1, "BINARY") + tdSql.checkData(28, 2, 19) + + + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) \ No newline at end of file diff --git a/tests/develop-test/5-taos-tools/taosbenchmark/invalid_commandline.py b/tests/develop-test/5-taos-tools/taosbenchmark/invalid_commandline.py new file mode 100644 index 0000000000000000000000000000000000000000..99e3d1dc766b51f59927bfe75929605e774ddfa7 --- /dev/null +++ b/tests/develop-test/5-taos-tools/taosbenchmark/invalid_commandline.py @@ -0,0 +1,68 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- +import os +from util.log import * +from util.cases import * +from util.sql import * +from util.dnodes import * + + +class TDTestCase: + def caseDescription(self): + ''' + [TD-11510] taosBenchmark test cases + ''' + return + + def init(self, conn, logSql): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor(), logSql) + + def run(self): + cmd = "taosBenchmark -F abc -P abc -I abc -T abc -i abc -S abc -B abc -r abc -t abc -n abc -l abc -w abc -w 16385 -R abc -O abc -a abc -n 2 -t 2 -r 1 -y" + tdLog.info("%s" % cmd) + os.system("%s" % cmd) + tdSql.query("select count(*) from test.meters") + tdSql.checkData(0, 0, 4) + + cmd = "taosBenchmark non_exist_opt" + tdLog.info("%s" % cmd) + assert (os.system("%s" % cmd) != 0) + + cmd = "taosBenchmark -f non_exist_file" + tdLog.info("%s" % cmd) + assert (os.system("%s" % cmd) != 0) + + cmd = "taosBenchmark -h non_exist_host" + tdLog.info("%s" % cmd) + assert (os.system("%s" % cmd) != 0) + + cmd = "taosBenchmark -p non_exist_pass" + tdLog.info("%s" % cmd) + assert (os.system("%s" % cmd) != 0) + + cmd = "taosBenchmark -u non_exist_user" + tdLog.info("%s" % cmd) + assert (os.system("%s" % cmd) != 0) + + cmd = "taosBenchmark -c non_exist_dir -n 1 -t 1 -o non_exist_path -y" + tdLog.info("%s" % cmd) + assert (os.system("%s" % cmd) == 0) + + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) \ No newline at end of file diff --git a/tests/develop-test/5-taos-tools/taosbenchmark/json/default.json b/tests/develop-test/5-taos-tools/taosbenchmark/json/default.json new file mode 100644 index 0000000000000000000000000000000000000000..f0ad9d516e2f3855722ea41ea88cdee5c7f06de7 --- /dev/null +++ b/tests/develop-test/5-taos-tools/taosbenchmark/json/default.json @@ -0,0 +1,27 @@ +{ + "filetype": "insert", + "cfgdir": "/etc/taos", + "host": "127.0.0.1", + "port": 6030, + "user": "root", + "password": "taosdata", + "thread_count": 4, + "thread_pool_size": 20, + "result_file": "./insert_res.txt", + "confirm_parameter_prompt": "no", + "prepared_rand": 10, + "chinese": "no", + "insert_interval": 0, + "num_of_records_per_req": 10, + "databases": [{ + "dbinfo": { + "name": "db" + }, + "super_tables": [{ + "name": "stb", + "childtable_prefix": "stb_", + "columns": [{"type": "INT"}], + "tags": [{"type": "INT"}] + }] + }] +} \ No newline at end of file diff --git a/tests/develop-test/5-taos-tools/taosbenchmark/json/json_alltypes.json b/tests/develop-test/5-taos-tools/taosbenchmark/json/json_alltypes.json new file mode 100644 index 0000000000000000000000000000000000000000..f0c0f9649385006b6859c0247e86d9f0ed3cfb31 --- /dev/null +++ b/tests/develop-test/5-taos-tools/taosbenchmark/json/json_alltypes.json @@ -0,0 +1,262 @@ +{ + "filetype": "insert", + "cfgdir": "/etc/taos", + "host": "127.0.0.1", + "port": 6030, + "user": "root", + "password": "taosdata", + "thread_count": 4, + "thread_pool_size": 20, + "result_file": "./insert_res.txt", + "confirm_parameter_prompt": "no", + "prepared_rand": 10, + "chinese": "no", + "insert_interval": 0, + "num_of_records_per_req": 10, + "databases": [{ + "dbinfo": { + "name": "db", + "drop": "yes", + "replica": 1, + "days": 10, + "cache": 16, + "blocks": 8, + "precision": "ms", + "keep": 36500, + "minRows": 100, + "maxRows": 4096, + "comp":2, + "walLevel":1, + "cachelast":0, + "quorum":1, + "fsync":3000, + "update": 0 + }, + "super_tables": [{ + "name": "stb1", + "child_table_exists":"no", + "childtable_count": 8, + "childtable_prefix": "stb1_", + "escape_character": "no", + "auto_create_table": "no", + "batch_create_tbl_num": 10, + "data_source": "rand", + "insert_mode": "sml", + "line_protocol": "json", + "childtable_limit": 0, + "childtable_offset": 0, + "insert_rows": 20, + "insert_interval": 0, + "interlace_rows": 0, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "now", + "sample_file": "", + "use_sample_ts": "no", + "tags_file": "", + "columns": [{"type": "BOOL"}], + "tags": [{"type": "INT"}, {"type": "BIGINT"}, {"type": "FLOAT"}, {"type": "DOUBLE"}, {"type": "SMALLINT"}, {"type": "TINYINT"}, {"type": "BOOL"}, {"type": "NCHAR","len": 17, "count":1}, {"type": "BINARY", "len": 19, "count":1}] + },{ + "name": "stb2", + "child_table_exists":"no", + "childtable_count": 8, + "childtable_prefix": "stb2_", + "escape_character": "no", + "auto_create_table": "no", + "batch_create_tbl_num": 10, + "data_source": "rand", + "insert_mode": "sml", + "line_protocol": "json", + "childtable_limit": 0, + "childtable_offset": 0, + "insert_rows": 20, + "insert_interval": 0, + "interlace_rows": 0, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "now", + "sample_file": "", + "use_sample_ts": "no", + "tags_file": "", + "columns": [{"type": "TINYINT"}], + "tags": [{"type": "INT"}, {"type": "BIGINT"}, {"type": "FLOAT"}, {"type": "DOUBLE"}, {"type": "SMALLINT"}, {"type": "TINYINT"}, {"type": "BOOL"}, {"type": "NCHAR","len": 17, "count":1}, {"type": "BINARY", "len": 19, "count":1}] + },{ + "name": "stb3", + "child_table_exists":"no", + "childtable_count": 8, + "childtable_prefix": "stb3_", + "escape_character": "no", + "auto_create_table": "no", + "batch_create_tbl_num": 10, + "data_source": "rand", + "insert_mode": "sml", + "line_protocol": "json", + "childtable_limit": 0, + "childtable_offset": 0, + "insert_rows": 20, + "insert_interval": 0, + "interlace_rows": 0, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "now", + "sample_file": "", + "use_sample_ts": "no", + "tags_file": "", + "columns": [{"type": "SMALLINT"}], + "tags": [{"type": "INT"}, {"type": "BIGINT"}, {"type": "FLOAT"}, {"type": "DOUBLE"}, {"type": "SMALLINT"}, {"type": "TINYINT"}, {"type": "BOOL"}, {"type": "NCHAR","len": 17, "count":1}, {"type": "BINARY", "len": 19, "count":1}] + },{ + "name": "stb4", + "child_table_exists":"no", + "childtable_count": 8, + "childtable_prefix": "stb4_", + "escape_character": "no", + "auto_create_table": "no", + "batch_create_tbl_num": 10, + "data_source": "rand", + "insert_mode": "sml", + "line_protocol": "json", + "childtable_limit": 0, + "childtable_offset": 0, + "insert_rows": 20, + "insert_interval": 0, + "interlace_rows": 0, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "now", + "sample_file": "", + "use_sample_ts": "no", + "tags_file": "", + "columns": [{"type": "INT"}], + "tags": [{"type": "INT"}, {"type": "BIGINT"}, {"type": "FLOAT"}, {"type": "DOUBLE"}, {"type": "SMALLINT"}, {"type": "TINYINT"}, {"type": "BOOL"}, {"type": "NCHAR","len": 17, "count":1}, {"type": "BINARY", "len": 19, "count":1}] + },{ + "name": "stb5", + "child_table_exists":"no", + "childtable_count": 8, + "childtable_prefix": "stb5_", + "escape_character": "no", + "auto_create_table": "no", + "batch_create_tbl_num": 10, + "data_source": "rand", + "insert_mode": "sml", + "line_protocol": "json", + "childtable_limit": 0, + "childtable_offset": 0, + "insert_rows": 20, + "insert_interval": 0, + "interlace_rows": 0, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "now", + "sample_file": "", + "use_sample_ts": "no", + "tags_file": "", + "columns": [{"type": "BIGINT"}], + "tags": [{"type": "INT"}, {"type": "BIGINT"}, {"type": "FLOAT"}, {"type": "DOUBLE"}, {"type": "SMALLINT"}, {"type": "TINYINT"}, {"type": "BOOL"}, {"type": "NCHAR","len": 17, "count":1}, {"type": "BINARY", "len": 19, "count":1}] + },{ + "name": "stb6", + "child_table_exists":"no", + "childtable_count": 8, + "childtable_prefix": "stb6_", + "escape_character": "no", + "auto_create_table": "no", + "batch_create_tbl_num": 10, + "data_source": "rand", + "insert_mode": "sml", + "line_protocol": "json", + "childtable_limit": 0, + "childtable_offset": 0, + "insert_rows": 20, + "insert_interval": 0, + "interlace_rows": 0, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "now", + "sample_file": "", + "use_sample_ts": "no", + "tags_file": "", + "columns": [{"type": "FLOAT"}], + "tags": [{"type": "INT"}, {"type": "BIGINT"}, {"type": "FLOAT"}, {"type": "DOUBLE"}, {"type": "SMALLINT"}, {"type": "TINYINT"}, {"type": "BOOL"}, {"type": "NCHAR","len": 17, "count":1}, {"type": "BINARY", "len": 19, "count":1}] + },{ + "name": "stb7", + "child_table_exists":"no", + "childtable_count": 8, + "childtable_prefix": "stb7_", + "escape_character": "no", + "auto_create_table": "no", + "batch_create_tbl_num": 10, + "data_source": "rand", + "insert_mode": "sml", + "line_protocol": "json", + "childtable_limit": 0, + "childtable_offset": 0, + "insert_rows": 20, + "insert_interval": 0, + "interlace_rows": 0, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "now", + "sample_file": "", + "use_sample_ts": "no", + "tags_file": "", + "columns": [{"type": "DOUBLE"}], + "tags": [{"type": "INT"}, {"type": "BIGINT"}, {"type": "FLOAT"}, {"type": "DOUBLE"}, {"type": "SMALLINT"}, {"type": "TINYINT"}, {"type": "BOOL"}, {"type": "NCHAR","len": 17, "count":1}, {"type": "BINARY", "len": 19, "count":1}] + },{ + "name": "stb8", + "child_table_exists":"no", + "childtable_count": 8, + "childtable_prefix": "stb8_", + "escape_character": "no", + "auto_create_table": "no", + "batch_create_tbl_num": 10, + "data_source": "rand", + "insert_mode": "sml", + "line_protocol": "json", + "childtable_limit": 0, + "childtable_offset": 0, + "insert_rows": 20, + "insert_interval": 0, + "interlace_rows": 0, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "now", + "sample_file": "", + "use_sample_ts": "no", + "tags_file": "", + "columns": [{"type": "BINARY", "len": 8}], + "tags": [{"type": "INT"}, {"type": "BIGINT"}, {"type": "FLOAT"}, {"type": "DOUBLE"}, {"type": "SMALLINT"}, {"type": "TINYINT"}, {"type": "BOOL"}, {"type": "NCHAR","len": 17, "count":1}, {"type": "BINARY", "len": 19, "count":1}] + },{ + "name": "stb9", + "child_table_exists":"no", + "childtable_count": 8, + "childtable_prefix": "stb9_", + "escape_character": "no", + "auto_create_table": "no", + "batch_create_tbl_num": 10, + "data_source": "rand", + "insert_mode": "sml", + "line_protocol": "json", + "childtable_limit": 0, + "childtable_offset": 0, + "insert_rows": 20, + "insert_interval": 0, + "interlace_rows": 0, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "now", + "sample_file": "", + "use_sample_ts": "no", + "tags_file": "", + "columns": [{"type": "NCHAR", "len": 8}], + "tags": [{"type": "INT"}, {"type": "BIGINT"}, {"type": "FLOAT"}, {"type": "DOUBLE"}, {"type": "SMALLINT"}, {"type": "TINYINT"}, {"type": "BOOL"}, {"type": "NCHAR","len": 17, "count":1}, {"type": "BINARY", "len": 19, "count":1}] + }] + }] +} diff --git a/tests/develop-test/5-taos-tools/taosbenchmark/json/rest_auto_create_table.json b/tests/develop-test/5-taos-tools/taosbenchmark/json/rest_auto_create_table.json new file mode 100644 index 0000000000000000000000000000000000000000..e52fadc8576c76e28079eb935f1c95d0302f6b41 --- /dev/null +++ b/tests/develop-test/5-taos-tools/taosbenchmark/json/rest_auto_create_table.json @@ -0,0 +1,87 @@ +{ + "filetype": "insert", + "cfgdir": "/etc/taos", + "host": "127.0.0.1", + "port": 6030, + "user": "root", + "password": "taosdata", + "thread_count": 4, + "thread_pool_size": 20, + "result_file": "./insert_res.txt", + "confirm_parameter_prompt": "no", + "prepared_rand": 100, + "chinese": "no", + "insert_interval": 0, + "num_of_records_per_req": 10, + "databases": [{ + "dbinfo": { + "name": "db", + "drop": "yes", + "replica": 1, + "days": 10, + "cache": 16, + "blocks": 8, + "precision": "ns", + "keep": 36500, + "minRows": 100, + "maxRows": 4096, + "comp":2, + "walLevel":1, + "cachelast":0, + "quorum":1, + "fsync":3000, + "update": 0 + }, + "super_tables": [{ + "name": "stb3", + "child_table_exists":"no", + "childtable_count": 8, + "childtable_prefix": "stb_", + "escape_character": "yes", + "auto_create_table": "yes", + "batch_create_tbl_num": 10, + "data_source": "rand", + "insert_mode": "rest", + "line_protocol": "line", + "childtable_limit": 0, + "childtable_offset": 0, + "insert_rows": 20, + "insert_interval": 0, + "interlace_rows": 0, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "now", + "sample_file": "./sample.csv", + "use_sample_ts": "no", + "tags_file": "", + "columns": [{"type": "INT"}, {"type": "BIGINT"}, {"type": "FLOAT"}, {"type": "DOUBLE"}, {"type": "SMALLINT"}, {"type": "TINYINT"}, {"type": "BOOL"}, {"type": "NCHAR","len": 29, "count":1}, {"type": "UINT"}, {"type": "UBIGINT"}, {"type": "UTINYINT"}, {"type": "USMALLINT"}, {"type": "BINARY", "len": 23, "count":1}], + "tags": [{"type": "TIMESTAMP"},{"type": "INT"}, {"type": "BIGINT"}, {"type": "FLOAT"}, {"type": "DOUBLE"}, {"type": "SMALLINT"}, {"type": "TINYINT"}, {"type": "BOOL"}, {"type": "NCHAR","len": 17, "count":1}, {"type": "UINT"}, {"type": "UBIGINT"}, {"type": "UTINYINT"}, {"type": "USMALLINT"}, {"type": "BINARY", "len": 19, "count":1}] + },{ + "name": "stb3-2", + "child_table_exists":"no", + "childtable_count": 8, + "childtable_prefix": "stb3-2_", + "escape_character": "yes", + "auto_create_table": "yes", + "batch_create_tbl_num": 10, + "data_source": "rand", + "insert_mode": "rest", + "line_protocol": "line", + "childtable_limit": 0, + "childtable_offset": 0, + "insert_rows": 20, + "insert_interval": 0, + "interlace_rows": 5, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "2020-10-01 00:00:00.000", + "sample_file": "./sample.csv", + "use_sample_ts": "no", + "tags_file": "", + "columns": [{"type": "INT"}, {"type": "BIGINT"}, {"type": "FLOAT"}, {"type": "DOUBLE"}, {"type": "SMALLINT"}, {"type": "TINYINT"}, {"type": "BOOL"}, {"type": "NCHAR","len": 29, "count":1}, {"type": "UINT"}, {"type": "UBIGINT"}, {"type": "UTINYINT"}, {"type": "USMALLINT"}, {"type": "BINARY", "len": 23, "count":1}], + "tags": [{"type": "TIMESTAMP"},{"type": "INT"}, {"type": "BIGINT"}, {"type": "FLOAT"}, {"type": "DOUBLE"}, {"type": "SMALLINT"}, {"type": "TINYINT"}, {"type": "BOOL"}, {"type": "NCHAR","len": 17, "count":1}, {"type": "UINT"}, {"type": "UBIGINT"}, {"type": "UTINYINT"}, {"type": "USMALLINT"}, {"type": "BINARY", "len": 19, "count":1}] + }] + }] +} \ No newline at end of file diff --git a/tests/develop-test/5-taos-tools/taosbenchmark/json/rest_insert_alltypes.json b/tests/develop-test/5-taos-tools/taosbenchmark/json/rest_insert_alltypes.json new file mode 100644 index 0000000000000000000000000000000000000000..e45ae7890af33a9ddc4b7d552adeb781aaa8a6ba --- /dev/null +++ b/tests/develop-test/5-taos-tools/taosbenchmark/json/rest_insert_alltypes.json @@ -0,0 +1,62 @@ +{ + "filetype": "insert", + "cfgdir": "/etc/taos", + "host": "127.0.0.1", + "port": 6030, + "user": "root", + "password": "taosdata", + "thread_count": 4, + "thread_pool_size": 20, + "result_file": "./insert_res.txt", + "confirm_parameter_prompt": "no", + "prepared_rand": 10, + "chinese": "no", + "insert_interval": 0, + "num_of_records_per_req": 10, + "databases": [{ + "dbinfo": { + "name": "db", + "drop": "yes", + "replica": 1, + "days": 10, + "cache": 16, + "blocks": 8, + "precision": "ns", + "keep": 36500, + "minRows": 100, + "maxRows": 4096, + "comp":2, + "walLevel":1, + "cachelast":0, + "quorum":1, + "fsync":3000, + "update": 0 + }, + "super_tables": [{ + "name": "stb", + "child_table_exists":"no", + "childtable_count": 8, + "childtable_prefix": "stb_", + "escape_character": "yes", + "auto_create_table": "no", + "batch_create_tbl_num": 10, + "data_source": "rand", + "insert_mode": "rest", + "line_protocol": "line", + "childtable_limit": 0, + "childtable_offset": 0, + "insert_rows": 20, + "insert_interval": 0, + "interlace_rows": 0, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "2020-10-01 00:00:00.000", + "sample_file": "./sample.csv", + "use_sample_ts": "no", + "tags_file": "", + "columns": [{"type": "TIMESTAMP"},{"type": "INT"}, {"type": "BIGINT"}, {"type": "FLOAT"}, {"type": "DOUBLE"}, {"type": "SMALLINT"}, {"type": "TINYINT"}, {"type": "BOOL"}, {"type": "NCHAR","len": 29, "count":1}, {"type": "UINT"}, {"type": "UBIGINT"}, {"type": "UTINYINT"}, {"type": "USMALLINT"}, {"type": "BINARY", "len": 23, "count":1}], + "tags": [{"type": "TIMESTAMP"},{"type": "INT"}, {"type": "BIGINT"}, {"type": "FLOAT"}, {"type": "DOUBLE"}, {"type": "SMALLINT"}, {"type": "TINYINT"}, {"type": "BOOL"}, {"type": "NCHAR","len": 17, "count":1}, {"type": "UINT"}, {"type": "UBIGINT"}, {"type": "UTINYINT"}, {"type": "USMALLINT"}, {"type": "BINARY", "len": 19, "count":1}] + }] + }] +} diff --git a/tests/develop-test/5-taos-tools/taosbenchmark/json/rest_query.json b/tests/develop-test/5-taos-tools/taosbenchmark/json/rest_query.json new file mode 100644 index 0000000000000000000000000000000000000000..9ef1b933d8ea019004bc373529c26f4ba5c58018 --- /dev/null +++ b/tests/develop-test/5-taos-tools/taosbenchmark/json/rest_query.json @@ -0,0 +1,27 @@ +{ + "filetype":"query", + "cfgdir": "/etc/taos", + "confirm_parameter_prompt": "no", + "databases": "db", + "query_mode": "rest", + "thread_pool_size": 20, + "response_buffer": 10000, + "specified_table_query": + { + "query_times": 1, + "sqls": + [{ + "sql": "select count(*) from db.stb", + "result": "rest_query_specified" + }] + }, + "super_table_query": { + "stblname": "stb", + "sqls": [ + { + "sql": "select count(*) from xxxx", + "result": "rest_query_super" + } + ] + } +} \ No newline at end of file diff --git a/tests/develop-test/5-taos-tools/taosbenchmark/json/sml_auto_create_table.json b/tests/develop-test/5-taos-tools/taosbenchmark/json/sml_auto_create_table.json new file mode 100644 index 0000000000000000000000000000000000000000..9bb5c4292cf9c1fb6628517dfc044fe2065e2c2e --- /dev/null +++ b/tests/develop-test/5-taos-tools/taosbenchmark/json/sml_auto_create_table.json @@ -0,0 +1,87 @@ +{ + "filetype": "insert", + "cfgdir": "/etc/taos", + "host": "127.0.0.1", + "port": 6030, + "user": "root", + "password": "taosdata", + "thread_count": 4, + "thread_pool_size": 20, + "result_file": "./insert_res.txt", + "confirm_parameter_prompt": "no", + "prepared_rand": 100, + "chinese": "no", + "insert_interval": 0, + "num_of_records_per_req": 10, + "databases": [{ + "dbinfo": { + "name": "db", + "drop": "yes", + "replica": 1, + "days": 10, + "cache": 16, + "blocks": 8, + "precision": "ms", + "keep": 36500, + "minRows": 100, + "maxRows": 4096, + "comp":2, + "walLevel":1, + "cachelast":0, + "quorum":1, + "fsync":3000, + "update": 0 + }, + "super_tables": [{ + "name": "stb4", + "child_table_exists":"no", + "childtable_count": 8, + "childtable_prefix": "stb_", + "escape_character": "yes", + "auto_create_table": "yes", + "batch_create_tbl_num": 10, + "data_source": "rand", + "insert_mode": "sml", + "line_protocol": "line", + "childtable_limit": 0, + "childtable_offset": 0, + "insert_rows": 20, + "insert_interval": 0, + "interlace_rows": 0, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "2020-10-01 00:00:00.000", + "sample_file": "./sample.csv", + "use_sample_ts": "no", + "tags_file": "", + "columns": [{"type": "INT"}, {"type": "BIGINT"}, {"type": "FLOAT"}, {"type": "DOUBLE"}, {"type": "SMALLINT"}, {"type": "TINYINT"}, {"type": "BOOL"}, {"type": "NCHAR","len": 29, "count":1}, {"type": "UINT"}, {"type": "UBIGINT"}, {"type": "UTINYINT"}, {"type": "USMALLINT"}, {"type": "BINARY", "len": 23, "count":1}], + "tags": [{"type": "INT"}, {"type": "BIGINT"}, {"type": "FLOAT"}, {"type": "DOUBLE"}, {"type": "SMALLINT"}, {"type": "TINYINT"}, {"type": "BOOL"}, {"type": "NCHAR","len": 17, "count":1}, {"type": "UINT"}, {"type": "UBIGINT"}, {"type": "UTINYINT"}, {"type": "USMALLINT"}, {"type": "BINARY", "len": 19, "count":1}] + },{ + "name": "stb4-2", + "child_table_exists":"no", + "childtable_count": 8, + "childtable_prefix": "stb4-2_", + "escape_character": "yes", + "auto_create_table": "yes", + "batch_create_tbl_num": 10, + "data_source": "rand", + "insert_mode": "sml", + "line_protocol": "line", + "childtable_limit": 0, + "childtable_offset": 0, + "insert_rows": 20, + "insert_interval": 0, + "interlace_rows": 5, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "2020-10-01 00:00:00.000", + "sample_file": "./sample.csv", + "use_sample_ts": "no", + "tags_file": "", + "columns": [{"type": "INT"}, {"type": "BIGINT"}, {"type": "FLOAT"}, {"type": "DOUBLE"}, {"type": "SMALLINT"}, {"type": "TINYINT"}, {"type": "BOOL"}, {"type": "NCHAR","len": 29, "count":1}, {"type": "UINT"}, {"type": "UBIGINT"}, {"type": "UTINYINT"}, {"type": "USMALLINT"}, {"type": "BINARY", "len": 23, "count":1}], + "tags": [{"type": "INT"}, {"type": "BIGINT"}, {"type": "FLOAT"}, {"type": "DOUBLE"}, {"type": "SMALLINT"}, {"type": "TINYINT"}, {"type": "BOOL"}, {"type": "NCHAR","len": 17, "count":1}, {"type": "UINT"}, {"type": "UBIGINT"}, {"type": "UTINYINT"}, {"type": "USMALLINT"}, {"type": "BINARY", "len": 19, "count":1}] + }] + }] +} \ No newline at end of file diff --git a/tests/develop-test/5-taos-tools/taosbenchmark/json/sml_insert_alltypes.json b/tests/develop-test/5-taos-tools/taosbenchmark/json/sml_insert_alltypes.json new file mode 100644 index 0000000000000000000000000000000000000000..45cf05d3e620f0dfed070d01150ad4961087efaf --- /dev/null +++ b/tests/develop-test/5-taos-tools/taosbenchmark/json/sml_insert_alltypes.json @@ -0,0 +1,62 @@ +{ + "filetype": "insert", + "cfgdir": "/etc/taos", + "host": "127.0.0.1", + "port": 6030, + "user": "root", + "password": "taosdata", + "thread_count": 4, + "thread_pool_size": 20, + "result_file": "./insert_res.txt", + "confirm_parameter_prompt": "no", + "prepared_rand": 10, + "chinese": "no", + "insert_interval": 0, + "num_of_records_per_req": 10, + "databases": [{ + "dbinfo": { + "name": "db", + "drop": "yes", + "replica": 1, + "days": 10, + "cache": 16, + "blocks": 8, + "precision": "ms", + "keep": 36500, + "minRows": 100, + "maxRows": 4096, + "comp":2, + "walLevel":1, + "cachelast":0, + "quorum":1, + "fsync":3000, + "update": 0 + }, + "super_tables": [{ + "name": "stb", + "child_table_exists":"no", + "childtable_count": 8, + "childtable_prefix": "stb_", + "escape_character": "yes", + "auto_create_table": "no", + "batch_create_tbl_num": 10, + "data_source": "rand", + "insert_mode": "sml", + "line_protocol": "line", + "childtable_limit": 0, + "childtable_offset": 0, + "insert_rows": 20, + "insert_interval": 0, + "interlace_rows": 0, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "2020-10-01 00:00:00.000", + "sample_file": "./sample.csv", + "use_sample_ts": "no", + "tags_file": "", + "columns": [{"type": "INT"}, {"type": "BIGINT"}, {"type": "FLOAT"}, {"type": "DOUBLE"}, {"type": "SMALLINT"}, {"type": "TINYINT"}, {"type": "BOOL"}, {"type": "NCHAR","len": 29, "count":1}, {"type": "UINT"}, {"type": "UBIGINT"}, {"type": "UTINYINT"}, {"type": "USMALLINT"}, {"type": "BINARY", "len": 23, "count":1}], + "tags": [{"type": "INT"}, {"type": "BIGINT"}, {"type": "FLOAT"}, {"type": "DOUBLE"}, {"type": "SMALLINT"}, {"type": "TINYINT"}, {"type": "BOOL"}, {"type": "NCHAR","len": 17, "count":1}, {"type": "UINT"}, {"type": "UBIGINT"}, {"type": "UTINYINT"}, {"type": "USMALLINT"}, {"type": "BINARY", "len": 19, "count":1}] + }] + }] +} diff --git a/tests/develop-test/5-taos-tools/taosbenchmark/json/sml_interlace.json b/tests/develop-test/5-taos-tools/taosbenchmark/json/sml_interlace.json new file mode 100644 index 0000000000000000000000000000000000000000..5b55ceb4a1fe8f57ae26f74ed78a86e6bdc9a333 --- /dev/null +++ b/tests/develop-test/5-taos-tools/taosbenchmark/json/sml_interlace.json @@ -0,0 +1,87 @@ +{ + "filetype": "insert", + "cfgdir": "/etc/taos", + "host": "127.0.0.1", + "port": 6030, + "user": "root", + "password": "taosdata", + "thread_count": 4, + "thread_pool_size": 20, + "result_file": "./insert_res.txt", + "confirm_parameter_prompt": "no", + "prepared_rand": 10, + "chinese": "no", + "insert_interval": 0, + "num_of_records_per_req": 10, + "databases": [{ + "dbinfo": { + "name": "db", + "drop": "yes", + "replica": 1, + "days": 10, + "cache": 16, + "blocks": 8, + "precision": "ms", + "keep": 36500, + "minRows": 100, + "maxRows": 4096, + "comp":2, + "walLevel":1, + "cachelast":0, + "quorum":1, + "fsync":3000, + "update": 0 + }, + "super_tables": [{ + "name": "stb1", + "child_table_exists":"no", + "childtable_count": 8, + "childtable_prefix": "stb1_", + "escape_character": "no", + "auto_create_table": "no", + "batch_create_tbl_num": 10, + "data_source": "rand", + "insert_mode": "sml", + "line_protocol": "telnet", + "childtable_limit": 0, + "childtable_offset": 0, + "insert_rows": 20, + "insert_interval": 0, + "interlace_rows": 5, + "disorder_ratio": 30, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "now", + "sample_file": "", + "use_sample_ts": "no", + "tags_file": "", + "columns": [{"type": "INT"}], + "tags": [{"type": "INT"}, {"type": "BIGINT"}, {"type": "FLOAT"}, {"type": "DOUBLE"}, {"type": "SMALLINT"}, {"type": "TINYINT"}, {"type": "BOOL"}, {"type": "NCHAR","len": 17, "count":1}, {"type": "UINT"}, {"type": "UBIGINT"}, {"type": "UTINYINT"}, {"type": "USMALLINT"}, {"type": "BINARY", "len": 19, "count":1}] + },{ + "name": "stb2", + "child_table_exists":"no", + "childtable_count": 8, + "childtable_prefix": "stb2_", + "escape_character": "no", + "auto_create_table": "no", + "batch_create_tbl_num": 10, + "data_source": "rand", + "insert_mode": "sml", + "line_protocol": "json", + "childtable_limit": 0, + "childtable_offset": 0, + "insert_rows": 20, + "insert_interval": 0, + "interlace_rows": 5, + "disorder_ratio": 60, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "now", + "sample_file": "", + "use_sample_ts": "no", + "tags_file": "", + "columns": [{"type": "INT"}], + "tags": [{"type": "INT"}, {"type": "BIGINT"}, {"type": "FLOAT"}, {"type": "DOUBLE"}, {"type": "SMALLINT"}, {"type": "TINYINT"}, {"type": "BOOL"}, {"type": "NCHAR","len": 17, "count":1}, {"type": "BINARY", "len": 19, "count":1}] + }] + }] +} diff --git a/tests/develop-test/5-taos-tools/taosbenchmark/json/specified_subscribe.json b/tests/develop-test/5-taos-tools/taosbenchmark/json/specified_subscribe.json new file mode 100644 index 0000000000000000000000000000000000000000..61a7961e73506d9aeda07a46f00d7b8c3317d8f0 --- /dev/null +++ b/tests/develop-test/5-taos-tools/taosbenchmark/json/specified_subscribe.json @@ -0,0 +1,24 @@ +{ + "filetype": "subscribe", + "cfgdir": "/etc/taos", + "host": "127.0.0.1", + "port": 6030, + "user": "root", + "password": "taosdata", + "databases": "db", + "confirm_parameter_prompt": "no", + "specified_table_query": { + "concurrent": 1, + "mode": "async", + "interval": 1000, + "restart": "no", + "keepProgress": "yes", + "resubAfterConsume": 10, + "endAfterConsume": 1, + "sqls": [ + { + "sql": "select * from stb;" + } + ] + } +} \ No newline at end of file diff --git a/tests/develop-test/5-taos-tools/taosbenchmark/json/stmt_auto_create_table.json b/tests/develop-test/5-taos-tools/taosbenchmark/json/stmt_auto_create_table.json new file mode 100644 index 0000000000000000000000000000000000000000..ebe5e3f043eac127acd4069a3088e5b49a782824 --- /dev/null +++ b/tests/develop-test/5-taos-tools/taosbenchmark/json/stmt_auto_create_table.json @@ -0,0 +1,87 @@ +{ + "filetype": "insert", + "cfgdir": "/etc/taos", + "host": "127.0.0.1", + "port": 6030, + "user": "root", + "password": "taosdata", + "thread_count": 4, + "thread_pool_size": 20, + "result_file": "./insert_res.txt", + "confirm_parameter_prompt": "no", + "prepared_rand": 100, + "chinese": "no", + "insert_interval": 0, + "num_of_records_per_req": 10, + "databases": [{ + "dbinfo": { + "name": "db", + "drop": "yes", + "replica": 1, + "days": 10, + "cache": 16, + "blocks": 8, + "precision": "us", + "keep": 36500, + "minRows": 100, + "maxRows": 4096, + "comp":2, + "walLevel":1, + "cachelast":0, + "quorum":1, + "fsync":3000, + "update": 0 + }, + "super_tables": [{ + "name": "stb2", + "child_table_exists":"no", + "childtable_count": 8, + "childtable_prefix": "stb_", + "escape_character": "yes", + "auto_create_table": "yes", + "batch_create_tbl_num": 10, + "data_source": "rand", + "insert_mode": "stmt", + "line_protocol": "line", + "childtable_limit": 0, + "childtable_offset": 0, + "insert_rows": 20, + "insert_interval": 0, + "interlace_rows": 0, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "now", + "sample_file": "./sample.csv", + "use_sample_ts": "no", + "tags_file": "", + "columns": [{"type": "INT"}, {"type": "BIGINT"}, {"type": "FLOAT"}, {"type": "DOUBLE"}, {"type": "SMALLINT"}, {"type": "TINYINT"}, {"type": "BOOL"}, {"type": "NCHAR","len": 29, "count":1}, {"type": "UINT"}, {"type": "UBIGINT"}, {"type": "UTINYINT"}, {"type": "USMALLINT"}, {"type": "BINARY", "len": 23, "count":1}], + "tags": [{"type": "TIMESTAMP"},{"type": "INT"}, {"type": "BIGINT"}, {"type": "FLOAT"}, {"type": "DOUBLE"}, {"type": "SMALLINT"}, {"type": "TINYINT"}, {"type": "BOOL"}, {"type": "NCHAR","len": 17, "count":1}, {"type": "UINT"}, {"type": "UBIGINT"}, {"type": "UTINYINT"}, {"type": "USMALLINT"}, {"type": "BINARY", "len": 19, "count":1}] + },{ + "name": "stb2-2", + "child_table_exists":"no", + "childtable_count": 8, + "childtable_prefix": "stb2-2_", + "escape_character": "yes", + "auto_create_table": "yes", + "batch_create_tbl_num": 10, + "data_source": "rand", + "insert_mode": "stmt", + "line_protocol": "line", + "childtable_limit": 0, + "childtable_offset": 0, + "insert_rows": 20, + "insert_interval": 0, + "interlace_rows": 5, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "2020-10-01 00:00:00.000", + "sample_file": "./sample.csv", + "use_sample_ts": "no", + "tags_file": "", + "columns": [{"type": "INT"}, {"type": "BIGINT"}, {"type": "FLOAT"}, {"type": "DOUBLE"}, {"type": "SMALLINT"}, {"type": "TINYINT"}, {"type": "BOOL"}, {"type": "NCHAR","len": 29, "count":1}, {"type": "UINT"}, {"type": "UBIGINT"}, {"type": "UTINYINT"}, {"type": "USMALLINT"}, {"type": "BINARY", "len": 23, "count":1}], + "tags": [{"type": "TIMESTAMP"},{"type": "INT"}, {"type": "BIGINT"}, {"type": "FLOAT"}, {"type": "DOUBLE"}, {"type": "SMALLINT"}, {"type": "TINYINT"}, {"type": "BOOL"}, {"type": "NCHAR","len": 17, "count":1}, {"type": "UINT"}, {"type": "UBIGINT"}, {"type": "UTINYINT"}, {"type": "USMALLINT"}, {"type": "BINARY", "len": 19, "count":1}] + }] + }] +} \ No newline at end of file diff --git a/tests/develop-test/5-taos-tools/taosbenchmark/json/stmt_insert_alltypes.json b/tests/develop-test/5-taos-tools/taosbenchmark/json/stmt_insert_alltypes.json new file mode 100644 index 0000000000000000000000000000000000000000..806142bf2a24f0e868ab768db9313c3762e62a34 --- /dev/null +++ b/tests/develop-test/5-taos-tools/taosbenchmark/json/stmt_insert_alltypes.json @@ -0,0 +1,62 @@ +{ + "filetype": "insert", + "cfgdir": "/etc/taos", + "host": "127.0.0.1", + "port": 6030, + "user": "root", + "password": "taosdata", + "thread_count": 4, + "thread_pool_size": 20, + "result_file": "./insert_res.txt", + "confirm_parameter_prompt": "no", + "prepared_rand": 10, + "chinese": "no", + "insert_interval": 0, + "num_of_records_per_req": 10, + "databases": [{ + "dbinfo": { + "name": "db", + "drop": "yes", + "replica": 1, + "days": 10, + "cache": 16, + "blocks": 8, + "precision": "ms", + "keep": 36500, + "minRows": 100, + "maxRows": 4096, + "comp":2, + "walLevel":1, + "cachelast":0, + "quorum":1, + "fsync":3000, + "update": 0 + }, + "super_tables": [{ + "name": "stb", + "child_table_exists":"no", + "childtable_count": 8, + "childtable_prefix": "stb_", + "escape_character": "yes", + "auto_create_table": "no", + "batch_create_tbl_num": 10, + "data_source": "rand", + "insert_mode": "stmt", + "line_protocol": "line", + "childtable_limit": 0, + "childtable_offset": 0, + "insert_rows": 20, + "insert_interval": 0, + "interlace_rows": 0, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "2020-10-01 00:00:00.000", + "sample_file": "./sample.csv", + "use_sample_ts": "no", + "tags_file": "", + "columns": [{"type": "TIMESTAMP"},{"type": "INT"}, {"type": "BIGINT"}, {"type": "FLOAT"}, {"type": "DOUBLE"}, {"type": "SMALLINT"}, {"type": "TINYINT"}, {"type": "BOOL"}, {"type": "NCHAR","len": 29, "count":1}, {"type": "UINT"}, {"type": "UBIGINT"}, {"type": "UTINYINT"}, {"type": "USMALLINT"}, {"type": "BINARY", "len": 23, "count":1}], + "tags": [{"type": "TIMESTAMP"},{"type": "INT"}, {"type": "BIGINT"}, {"type": "FLOAT"}, {"type": "DOUBLE"}, {"type": "SMALLINT"}, {"type": "TINYINT"}, {"type": "BOOL"}, {"type": "NCHAR","len": 17, "count":1}, {"type": "UINT"}, {"type": "UBIGINT"}, {"type": "UTINYINT"}, {"type": "USMALLINT"}, {"type": "BINARY", "len": 19, "count":1}] + }] + }] +} diff --git a/tests/develop-test/5-taos-tools/taosbenchmark/json/taosc_auto_create_table.json b/tests/develop-test/5-taos-tools/taosbenchmark/json/taosc_auto_create_table.json new file mode 100644 index 0000000000000000000000000000000000000000..fea72a34fb74c52f06e7549008333d33ce537d08 --- /dev/null +++ b/tests/develop-test/5-taos-tools/taosbenchmark/json/taosc_auto_create_table.json @@ -0,0 +1,87 @@ +{ + "filetype": "insert", + "cfgdir": "/etc/taos", + "host": "127.0.0.1", + "port": 6030, + "user": "root", + "password": "taosdata", + "thread_count": 4, + "thread_pool_size": 20, + "result_file": "./insert_res.txt", + "confirm_parameter_prompt": "no", + "prepared_rand": 100, + "chinese": "no", + "insert_interval": 0, + "num_of_records_per_req": 10, + "databases": [{ + "dbinfo": { + "name": "db", + "drop": "yes", + "replica": 1, + "days": 10, + "cache": 16, + "blocks": 8, + "precision": "ms", + "keep": 36500, + "minRows": 100, + "maxRows": 4096, + "comp":2, + "walLevel":1, + "cachelast":0, + "quorum":1, + "fsync":3000, + "update": 0 + }, + "super_tables": [{ + "name": "stb1", + "child_table_exists":"no", + "childtable_count": 8, + "childtable_prefix": "stb_", + "escape_character": "yes", + "auto_create_table": "yes", + "batch_create_tbl_num": 10, + "data_source": "rand", + "insert_mode": "taosc", + "line_protocol": "line", + "childtable_limit": 0, + "childtable_offset": 0, + "insert_rows": 20, + "insert_interval": 0, + "interlace_rows": 0, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "2020-10-01 00:00:00.000", + "sample_file": "./sample.csv", + "use_sample_ts": "no", + "tags_file": "", + "columns": [{"type": "INT"}, {"type": "BIGINT"}, {"type": "FLOAT"}, {"type": "DOUBLE"}, {"type": "SMALLINT"}, {"type": "TINYINT"}, {"type": "BOOL"}, {"type": "NCHAR","len": 29, "count":1}, {"type": "UINT"}, {"type": "UBIGINT"}, {"type": "UTINYINT"}, {"type": "USMALLINT"}, {"type": "BINARY", "len": 23, "count":1}], + "tags": [{"type": "TIMESTAMP"},{"type": "INT"}, {"type": "BIGINT"}, {"type": "FLOAT"}, {"type": "DOUBLE"}, {"type": "SMALLINT"}, {"type": "TINYINT"}, {"type": "BOOL"}, {"type": "NCHAR","len": 17, "count":1}, {"type": "UINT"}, {"type": "UBIGINT"}, {"type": "UTINYINT"}, {"type": "USMALLINT"}, {"type": "BINARY", "len": 19, "count":1}] + },{ + "name": "stb1-2", + "child_table_exists":"no", + "childtable_count": 8, + "childtable_prefix": "stb1-2_", + "escape_character": "yes", + "auto_create_table": "yes", + "batch_create_tbl_num": 10, + "data_source": "rand", + "insert_mode": "taosc", + "line_protocol": "line", + "childtable_limit": 0, + "childtable_offset": 0, + "insert_rows": 20, + "insert_interval": 0, + "interlace_rows": 5, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "2020-10-01 00:00:00.000", + "sample_file": "./sample.csv", + "use_sample_ts": "no", + "tags_file": "", + "columns": [{"type": "INT"}, {"type": "BIGINT"}, {"type": "FLOAT"}, {"type": "DOUBLE"}, {"type": "SMALLINT"}, {"type": "TINYINT"}, {"type": "BOOL"}, {"type": "NCHAR","len": 29, "count":1}, {"type": "UINT"}, {"type": "UBIGINT"}, {"type": "UTINYINT"}, {"type": "USMALLINT"}, {"type": "BINARY", "len": 23, "count":1}], + "tags": [{"type": "TIMESTAMP"},{"type": "INT"}, {"type": "BIGINT"}, {"type": "FLOAT"}, {"type": "DOUBLE"}, {"type": "SMALLINT"}, {"type": "TINYINT"}, {"type": "BOOL"}, {"type": "NCHAR","len": 17, "count":1}, {"type": "UINT"}, {"type": "UBIGINT"}, {"type": "UTINYINT"}, {"type": "USMALLINT"}, {"type": "BINARY", "len": 19, "count":1}] + }] + }] +} diff --git a/tests/develop-test/5-taos-tools/taosbenchmark/json/taosc_insert_alltypes.json b/tests/develop-test/5-taos-tools/taosbenchmark/json/taosc_insert_alltypes.json new file mode 100644 index 0000000000000000000000000000000000000000..698fb599f595fbbc4a1fd130696e41059362ca50 --- /dev/null +++ b/tests/develop-test/5-taos-tools/taosbenchmark/json/taosc_insert_alltypes.json @@ -0,0 +1,62 @@ +{ + "filetype": "insert", + "cfgdir": "/etc/taos", + "host": "127.0.0.1", + "port": 6030, + "user": "root", + "password": "taosdata", + "thread_count": 4, + "thread_pool_size": 20, + "result_file": "./insert_res.txt", + "confirm_parameter_prompt": "no", + "prepared_rand": 10, + "chinese": "no", + "insert_interval": 0, + "num_of_records_per_req": 10, + "databases": [{ + "dbinfo": { + "name": "db", + "drop": "yes", + "replica": 1, + "days": 10, + "cache": 16, + "blocks": 8, + "precision": "ms", + "keep": 36500, + "minRows": 100, + "maxRows": 4096, + "comp":2, + "walLevel":1, + "cachelast":0, + "quorum":1, + "fsync":3000, + "update": 1 + }, + "super_tables": [{ + "name": "stb", + "child_table_exists":"no", + "childtable_count": 8, + "childtable_prefix": "stb_", + "escape_character": "yes", + "auto_create_table": "no", + "batch_create_tbl_num": 10, + "data_source": "rand", + "insert_mode": "taosc", + "line_protocol": "line", + "childtable_limit": -10, + "childtable_offset": 10, + "insert_rows": 20, + "insert_interval": 0, + "interlace_rows": 0, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "2020-10-01 00:00:00.000", + "sample_file": "./sample.csv", + "use_sample_ts": "no", + "tags_file": "", + "columns": [{"type": "TIMESTAMP"},{"type": "INT"}, {"type": "BIGINT"}, {"type": "FLOAT"}, {"type": "DOUBLE"}, {"type": "SMALLINT"}, {"type": "TINYINT"}, {"type": "BOOL"}, {"type": "NCHAR","len": 29, "count":1}, {"type": "UINT"}, {"type": "UBIGINT"}, {"type": "UTINYINT"}, {"type": "USMALLINT"}, {"type": "BINARY", "len": 23, "count":1}], + "tags": [{"type": "TIMESTAMP"},{"type": "INT"}, {"type": "BIGINT"}, {"type": "FLOAT"}, {"type": "DOUBLE"}, {"type": "SMALLINT"}, {"type": "TINYINT"}, {"type": "BOOL"}, {"type": "NCHAR","len": 17, "count":1}, {"type": "UINT"}, {"type": "UBIGINT"}, {"type": "UTINYINT"}, {"type": "USMALLINT"}, {"type": "BINARY", "len": 19, "count":1}] + }] + }] +} diff --git a/tests/develop-test/5-taos-tools/taosbenchmark/json/taosc_json_tag.json b/tests/develop-test/5-taos-tools/taosbenchmark/json/taosc_json_tag.json new file mode 100644 index 0000000000000000000000000000000000000000..71fed3c48cf13123890f4212baa4c074b8b6df74 --- /dev/null +++ b/tests/develop-test/5-taos-tools/taosbenchmark/json/taosc_json_tag.json @@ -0,0 +1,62 @@ +{ + "filetype": "insert", + "cfgdir": "/etc/taos", + "host": "127.0.0.1", + "port": 6030, + "user": "root", + "password": "taosdata", + "thread_count": 4, + "thread_pool_size": 20, + "result_file": "./insert_res.txt", + "confirm_parameter_prompt": "no", + "prepared_rand": 10, + "chinese": "no", + "insert_interval": 0, + "num_of_records_per_req": 10, + "databases": [{ + "dbinfo": { + "name": "db", + "drop": "yes", + "replica": 1, + "days": 10, + "cache": 16, + "blocks": 8, + "precision": "ms", + "keep": 36500, + "minRows": 100, + "maxRows": 4096, + "comp":2, + "walLevel":1, + "cachelast":0, + "quorum":1, + "fsync":3000, + "update": 1 + }, + "super_tables": [{ + "name": "stb", + "child_table_exists":"no", + "childtable_count": 8, + "childtable_prefix": "stb_", + "escape_character": "yes", + "auto_create_table": "no", + "batch_create_tbl_num": 10, + "data_source": "rand", + "insert_mode": "taosc", + "line_protocol": "line", + "childtable_limit": 0, + "childtable_offset": 0, + "insert_rows": 20, + "insert_interval": 0, + "interlace_rows": 0, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "2020-10-01 00:00:00.000", + "sample_file": "./sample.csv", + "use_sample_ts": "no", + "tags_file": "", + "columns": [{"type": "INT"}], + "tags": [{"type": "JSON", "len": 8, "count": 5}] + }] + }] +} diff --git a/tests/develop-test/5-taos-tools/taosbenchmark/json/taosc_limit_offset.json b/tests/develop-test/5-taos-tools/taosbenchmark/json/taosc_limit_offset.json new file mode 100644 index 0000000000000000000000000000000000000000..c78317aade33cd3fea4a400511dee5b1431bc473 --- /dev/null +++ b/tests/develop-test/5-taos-tools/taosbenchmark/json/taosc_limit_offset.json @@ -0,0 +1,62 @@ +{ + "filetype": "insert", + "cfgdir": "/etc/taos", + "host": "127.0.0.1", + "port": 6030, + "user": "root", + "password": "taosdata", + "thread_count": 4, + "thread_pool_size": 20, + "result_file": "./insert_res.txt", + "confirm_parameter_prompt": "no", + "prepared_rand": 10, + "chinese": "no", + "insert_interval": 0, + "num_of_records_per_req": 10, + "databases": [{ + "dbinfo": { + "name": "db", + "drop": "no", + "replica": 1, + "days": 10, + "cache": 16, + "blocks": 8, + "precision": "ms", + "keep": 36500, + "minRows": 100, + "maxRows": 4096, + "comp":2, + "walLevel":1, + "cachelast":0, + "quorum":1, + "fsync":3000, + "update": 0 + }, + "super_tables": [{ + "name": "stb", + "child_table_exists":"yes", + "childtable_count": 8, + "childtable_prefix": "stb_", + "escape_character": "no", + "auto_create_table": "no", + "batch_create_tbl_num": 10, + "data_source": "rand", + "insert_mode": "taosc", + "line_protocol": "line", + "childtable_limit": 2, + "childtable_offset": 0, + "insert_rows": 20, + "insert_interval": 0, + "interlace_rows": 0, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "2020-10-01 00:00:00.000", + "sample_file": "./sample.csv", + "use_sample_ts": "no", + "tags_file": "", + "columns": [{"type": "TIMESTAMP"},{"type": "INT"}, {"type": "BIGINT"}, {"type": "FLOAT"}, {"type": "DOUBLE"}, {"type": "SMALLINT"}, {"type": "TINYINT"}, {"type": "BOOL"}, {"type": "NCHAR","len": 29, "count":1}, {"type": "UINT"}, {"type": "UBIGINT"}, {"type": "UTINYINT"}, {"type": "USMALLINT"}, {"type": "BINARY", "len": 23, "count":1}], + "tags": [{"type": "TIMESTAMP"},{"type": "INT"}, {"type": "BIGINT"}, {"type": "FLOAT"}, {"type": "DOUBLE"}, {"type": "SMALLINT"}, {"type": "TINYINT"}, {"type": "BOOL"}, {"type": "NCHAR","len": 17, "count":1}, {"type": "UINT"}, {"type": "UBIGINT"}, {"type": "UTINYINT"}, {"type": "USMALLINT"}, {"type": "BINARY", "len": 19, "count":1}] + }] + }] +} diff --git a/tests/develop-test/5-taos-tools/taosbenchmark/json/taosc_only_create_table.json b/tests/develop-test/5-taos-tools/taosbenchmark/json/taosc_only_create_table.json new file mode 100644 index 0000000000000000000000000000000000000000..46672bcc4c54082fbb2aedb73ac649976c73013f --- /dev/null +++ b/tests/develop-test/5-taos-tools/taosbenchmark/json/taosc_only_create_table.json @@ -0,0 +1,62 @@ +{ + "filetype": "insert", + "cfgdir": "/etc/taos", + "host": "127.0.0.1", + "port": 6030, + "user": "root", + "password": "taosdata", + "thread_count": 4, + "thread_pool_size": 20, + "result_file": "./insert_res.txt", + "confirm_parameter_prompt": "no", + "prepared_rand": 10, + "chinese": "no", + "insert_interval": 0, + "num_of_records_per_req": 10, + "databases": [{ + "dbinfo": { + "name": "db", + "drop": "yes", + "replica": 1, + "days": 10, + "cache": 16, + "blocks": 8, + "precision": "ms", + "keep": 36500, + "minRows": 100, + "maxRows": 4096, + "comp":2, + "walLevel":1, + "cachelast":1, + "quorum":1, + "fsync":3000, + "update": 0 + }, + "super_tables": [{ + "name": "stb", + "child_table_exists":"no", + "childtable_count": 8, + "childtable_prefix": "stb_", + "escape_character": "yes", + "auto_create_table": "no", + "batch_create_tbl_num": 10, + "data_source": "rand", + "insert_mode": "taosc", + "line_protocol": "line", + "childtable_limit": 0, + "childtable_offset": 0, + "insert_rows": 0, + "insert_interval": 0, + "interlace_rows": 0, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "2020-10-01 00:00:00.000", + "sample_file": "./sample.csv", + "use_sample_ts": "no", + "tags_file": "", + "columns": [{"type": "TIMESTAMP"},{"type": "INT"}, {"type": "BIGINT"}, {"type": "FLOAT"}, {"type": "DOUBLE"}, {"type": "SMALLINT"}, {"type": "TINYINT"}, {"type": "BOOL"}, {"type": "NCHAR"}, {"type": "UINT"}, {"type": "UBIGINT"}, {"type": "UTINYINT"}, {"type": "USMALLINT"}, {"type": "BINARY"}], + "tags": [{"type": "TIMESTAMP"},{"type": "INT"}, {"type": "BIGINT"}, {"type": "FLOAT"}, {"type": "DOUBLE"}, {"type": "SMALLINT"}, {"type": "TINYINT"}, {"type": "BOOL"}, {"type": "NCHAR"}, {"type": "UINT"}, {"type": "UBIGINT"}, {"type": "UTINYINT"}, {"type": "USMALLINT"}, {"type": "BINARY"}] + }] + }] +} \ No newline at end of file diff --git a/tests/develop-test/5-taos-tools/taosbenchmark/json/taosc_query.json b/tests/develop-test/5-taos-tools/taosbenchmark/json/taosc_query.json new file mode 100644 index 0000000000000000000000000000000000000000..e30a24be42aacd5f710a9bfe0aa6ce83ba9cd03a --- /dev/null +++ b/tests/develop-test/5-taos-tools/taosbenchmark/json/taosc_query.json @@ -0,0 +1,32 @@ +{ + "filetype":"query", + "cfgdir": "/etc/taos", + "host": "localhost", + "port": 6030, + "user": "root", + "password": "taosdata", + "confirm_parameter_prompt": "no", + "databases": "db", + "query_times": 1, + "specified_table_query": + { + "query_interval": 1, + "concurrent":1, + "sqls": + [{ + "sql": "select count(*) from db.stb", + "result": "taosc_query_specified" + }] + }, + "super_table_query": { + "stblname": "stb", + "query_interval": 1, + "threads": 1, + "sqls": [ + { + "sql": "select count(*) from xxxx", + "result": "taosc_query_super" + } + ] + } +} \ No newline at end of file diff --git a/tests/develop-test/5-taos-tools/taosbenchmark/json/taosc_sample_use_ts.json b/tests/develop-test/5-taos-tools/taosbenchmark/json/taosc_sample_use_ts.json new file mode 100644 index 0000000000000000000000000000000000000000..8ac8aab93e2e948cdf9b92bd548ad8299470e57f --- /dev/null +++ b/tests/develop-test/5-taos-tools/taosbenchmark/json/taosc_sample_use_ts.json @@ -0,0 +1,62 @@ +{ + "filetype": "insert", + "cfgdir": "/etc/taos", + "host": "127.0.0.1", + "port": 6030, + "user": "root", + "password": "taosdata", + "thread_count": 4, + "thread_pool_size": 20, + "result_file": "./insert_res.txt", + "confirm_parameter_prompt": "no", + "prepared_rand": 10, + "chinese": "yes", + "insert_interval": 0, + "num_of_records_per_req": 10, + "databases": [{ + "dbinfo": { + "name": "db", + "drop": "yes", + "replica": 1, + "days": 10, + "cache": 16, + "blocks": 8, + "precision": "ms", + "keep": 36500, + "minRows": 100, + "maxRows": 4096, + "comp":2, + "walLevel":1, + "cachelast":0, + "quorum":1, + "fsync":3000, + "update": 0 + }, + "super_tables": [{ + "name": "stb", + "child_table_exists":"no", + "childtable_count": 8, + "childtable_prefix": "stb_", + "escape_character": "no", + "auto_create_table": "no", + "batch_create_tbl_num": 10, + "data_source": "sample", + "insert_mode": "taosc", + "line_protocol": "line", + "childtable_limit": 0, + "childtable_offset": 0, + "insert_rows": 20, + "insert_interval": 0, + "interlace_rows": 0, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "now", + "sample_file": "./5-taos-tools/taosbenchmark/csv/sample_use_ts.csv", + "use_sample_ts": "yes", + "tags_file": "./5-taos-tools/taosbenchmark/csv/sample_tags.csv", + "columns": [{"type": "INT"}], + "tags": [{"type": "INT"}] + }] + }] +} diff --git a/tests/develop-test/5-taos-tools/taosbenchmark/json/telnet_alltypes.json b/tests/develop-test/5-taos-tools/taosbenchmark/json/telnet_alltypes.json new file mode 100644 index 0000000000000000000000000000000000000000..40f58d4f7ef75f0cb5c30abd45c8ec86409763da --- /dev/null +++ b/tests/develop-test/5-taos-tools/taosbenchmark/json/telnet_alltypes.json @@ -0,0 +1,362 @@ +{ + "filetype": "insert", + "cfgdir": "/etc/taos", + "host": "127.0.0.1", + "port": 6030, + "user": "root", + "password": "taosdata", + "thread_count": 4, + "thread_pool_size": 20, + "result_file": "./insert_res.txt", + "confirm_parameter_prompt": "no", + "prepared_rand": 10, + "chinese": "no", + "insert_interval": 0, + "num_of_records_per_req": 10, + "databases": [{ + "dbinfo": { + "name": "db", + "drop": "yes", + "replica": 1, + "days": 10, + "cache": 16, + "blocks": 8, + "precision": "ms", + "keep": 36500, + "minRows": 100, + "maxRows": 4096, + "comp":2, + "walLevel":1, + "cachelast":0, + "quorum":1, + "fsync":3000, + "update": 0 + }, + "super_tables": [{ + "name": "stb1", + "child_table_exists":"no", + "childtable_count": 8, + "childtable_prefix": "stb1_", + "escape_character": "no", + "auto_create_table": "no", + "batch_create_tbl_num": 10, + "data_source": "rand", + "insert_mode": "sml", + "line_protocol": "telnet", + "childtable_limit": 0, + "childtable_offset": 0, + "insert_rows": 20, + "insert_interval": 0, + "interlace_rows": 0, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "now", + "sample_file": "", + "use_sample_ts": "no", + "tags_file": "", + "columns": [{"type": "BOOL"}], + "tags": [{"type": "INT"}, {"type": "BIGINT"}, {"type": "FLOAT"}, {"type": "DOUBLE"}, {"type": "SMALLINT"}, {"type": "TINYINT"}, {"type": "BOOL"}, {"type": "NCHAR","len": 17, "count":1}, {"type": "UINT"}, {"type": "UBIGINT"}, {"type": "UTINYINT"}, {"type": "USMALLINT"}, {"type": "BINARY", "len": 19, "count":1}] + },{ + "name": "stb2", + "child_table_exists":"no", + "childtable_count": 8, + "childtable_prefix": "stb2_", + "escape_character": "no", + "auto_create_table": "no", + "batch_create_tbl_num": 10, + "data_source": "rand", + "insert_mode": "sml", + "line_protocol": "telnet", + "childtable_limit": 0, + "childtable_offset": 0, + "insert_rows": 20, + "insert_interval": 0, + "interlace_rows": 0, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "now", + "sample_file": "", + "use_sample_ts": "no", + "tags_file": "", + "columns": [{"type": "TINYINT"}], + "tags": [{"type": "INT"}, {"type": "BIGINT"}, {"type": "FLOAT"}, {"type": "DOUBLE"}, {"type": "SMALLINT"}, {"type": "TINYINT"}, {"type": "BOOL"}, {"type": "NCHAR","len": 17, "count":1}, {"type": "UINT"}, {"type": "UBIGINT"}, {"type": "UTINYINT"}, {"type": "USMALLINT"}, {"type": "BINARY", "len": 19, "count":1}] + },{ + "name": "stb3", + "child_table_exists":"no", + "childtable_count": 8, + "childtable_prefix": "stb3_", + "escape_character": "no", + "auto_create_table": "no", + "batch_create_tbl_num": 10, + "data_source": "rand", + "insert_mode": "sml", + "line_protocol": "telnet", + "childtable_limit": 0, + "childtable_offset": 0, + "insert_rows": 20, + "insert_interval": 0, + "interlace_rows": 0, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "now", + "sample_file": "", + "use_sample_ts": "no", + "tags_file": "", + "columns": [{"type": "UTINYINT"}], + "tags": [{"type": "INT"}, {"type": "BIGINT"}, {"type": "FLOAT"}, {"type": "DOUBLE"}, {"type": "SMALLINT"}, {"type": "TINYINT"}, {"type": "BOOL"}, {"type": "NCHAR","len": 17, "count":1}, {"type": "UINT"}, {"type": "UBIGINT"}, {"type": "UTINYINT"}, {"type": "USMALLINT"}, {"type": "BINARY", "len": 19, "count":1}] + },{ + "name": "stb4", + "child_table_exists":"no", + "childtable_count": 8, + "childtable_prefix": "stb4_", + "escape_character": "no", + "auto_create_table": "no", + "batch_create_tbl_num": 10, + "data_source": "rand", + "insert_mode": "sml", + "line_protocol": "telnet", + "childtable_limit": 0, + "childtable_offset": 0, + "insert_rows": 20, + "insert_interval": 0, + "interlace_rows": 0, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "now", + "sample_file": "", + "use_sample_ts": "no", + "tags_file": "", + "columns": [{"type": "SMALLINT"}], + "tags": [{"type": "INT"}, {"type": "BIGINT"}, {"type": "FLOAT"}, {"type": "DOUBLE"}, {"type": "SMALLINT"}, {"type": "TINYINT"}, {"type": "BOOL"}, {"type": "NCHAR","len": 17, "count":1}, {"type": "UINT"}, {"type": "UBIGINT"}, {"type": "UTINYINT"}, {"type": "USMALLINT"}, {"type": "BINARY", "len": 19, "count":1}] + },{ + "name": "stb5", + "child_table_exists":"no", + "childtable_count": 8, + "childtable_prefix": "stb5_", + "escape_character": "no", + "auto_create_table": "no", + "batch_create_tbl_num": 10, + "data_source": "rand", + "insert_mode": "sml", + "line_protocol": "telnet", + "childtable_limit": 0, + "childtable_offset": 0, + "insert_rows": 20, + "insert_interval": 0, + "interlace_rows": 0, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "now", + "sample_file": "", + "use_sample_ts": "no", + "tags_file": "", + "columns": [{"type": "USMALLINT"}], + "tags": [{"type": "INT"}, {"type": "BIGINT"}, {"type": "FLOAT"}, {"type": "DOUBLE"}, {"type": "SMALLINT"}, {"type": "TINYINT"}, {"type": "BOOL"}, {"type": "NCHAR","len": 17, "count":1}, {"type": "UINT"}, {"type": "UBIGINT"}, {"type": "UTINYINT"}, {"type": "USMALLINT"}, {"type": "BINARY", "len": 19, "count":1}] + },{ + "name": "stb6", + "child_table_exists":"no", + "childtable_count": 8, + "childtable_prefix": "stb6_", + "escape_character": "no", + "auto_create_table": "no", + "batch_create_tbl_num": 10, + "data_source": "rand", + "insert_mode": "sml", + "line_protocol": "telnet", + "childtable_limit": 0, + "childtable_offset": 0, + "insert_rows": 20, + "insert_interval": 0, + "interlace_rows": 0, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "now", + "sample_file": "", + "use_sample_ts": "no", + "tags_file": "", + "columns": [{"type": "INT"}], + "tags": [{"type": "INT"}, {"type": "BIGINT"}, {"type": "FLOAT"}, {"type": "DOUBLE"}, {"type": "SMALLINT"}, {"type": "TINYINT"}, {"type": "BOOL"}, {"type": "NCHAR","len": 17, "count":1}, {"type": "UINT"}, {"type": "UBIGINT"}, {"type": "UTINYINT"}, {"type": "USMALLINT"}, {"type": "BINARY", "len": 19, "count":1}] + },{ + "name": "stb7", + "child_table_exists":"no", + "childtable_count": 8, + "childtable_prefix": "stb7_", + "escape_character": "no", + "auto_create_table": "no", + "batch_create_tbl_num": 10, + "data_source": "rand", + "insert_mode": "sml", + "line_protocol": "telnet", + "childtable_limit": 0, + "childtable_offset": 0, + "insert_rows": 20, + "insert_interval": 0, + "interlace_rows": 0, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "now", + "sample_file": "", + "use_sample_ts": "no", + "tags_file": "", + "columns": [{"type": "UINT"}], + "tags": [{"type": "INT"}, {"type": "BIGINT"}, {"type": "FLOAT"}, {"type": "DOUBLE"}, {"type": "SMALLINT"}, {"type": "TINYINT"}, {"type": "BOOL"}, {"type": "NCHAR","len": 17, "count":1}, {"type": "UINT"}, {"type": "UBIGINT"}, {"type": "UTINYINT"}, {"type": "USMALLINT"}, {"type": "BINARY", "len": 19, "count":1}] + },{ + "name": "stb8", + "child_table_exists":"no", + "childtable_count": 8, + "childtable_prefix": "stb8_", + "escape_character": "no", + "auto_create_table": "no", + "batch_create_tbl_num": 10, + "data_source": "rand", + "insert_mode": "sml", + "line_protocol": "telnet", + "childtable_limit": 0, + "childtable_offset": 0, + "insert_rows": 20, + "insert_interval": 0, + "interlace_rows": 0, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "now", + "sample_file": "", + "use_sample_ts": "no", + "tags_file": "", + "columns": [{"type": "BIGINT"}], + "tags": [{"type": "INT"}, {"type": "BIGINT"}, {"type": "FLOAT"}, {"type": "DOUBLE"}, {"type": "SMALLINT"}, {"type": "TINYINT"}, {"type": "BOOL"}, {"type": "NCHAR","len": 17, "count":1}, {"type": "UINT"}, {"type": "UBIGINT"}, {"type": "UTINYINT"}, {"type": "USMALLINT"}, {"type": "BINARY", "len": 19, "count":1}] + },{ + "name": "stb9", + "child_table_exists":"no", + "childtable_count": 8, + "childtable_prefix": "stb9_", + "escape_character": "no", + "auto_create_table": "no", + "batch_create_tbl_num": 10, + "data_source": "rand", + "insert_mode": "sml", + "line_protocol": "telnet", + "childtable_limit": 0, + "childtable_offset": 0, + "insert_rows": 20, + "insert_interval": 0, + "interlace_rows": 0, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "now", + "sample_file": "", + "use_sample_ts": "no", + "tags_file": "", + "columns": [{"type": "UBIGINT"}], + "tags": [{"type": "INT"}, {"type": "BIGINT"}, {"type": "FLOAT"}, {"type": "DOUBLE"}, {"type": "SMALLINT"}, {"type": "TINYINT"}, {"type": "BOOL"}, {"type": "NCHAR","len": 17, "count":1}, {"type": "UINT"}, {"type": "UBIGINT"}, {"type": "UTINYINT"}, {"type": "USMALLINT"}, {"type": "BINARY", "len": 19, "count":1}] + },{ + "name": "stb10", + "child_table_exists":"no", + "childtable_count": 8, + "childtable_prefix": "stb10_", + "escape_character": "no", + "auto_create_table": "no", + "batch_create_tbl_num": 10, + "data_source": "rand", + "insert_mode": "sml", + "line_protocol": "telnet", + "childtable_limit": 0, + "childtable_offset": 0, + "insert_rows": 20, + "insert_interval": 0, + "interlace_rows": 0, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "now", + "sample_file": "", + "use_sample_ts": "no", + "tags_file": "", + "columns": [{"type": "FLOAT"}], + "tags": [{"type": "INT"}, {"type": "BIGINT"}, {"type": "FLOAT"}, {"type": "DOUBLE"}, {"type": "SMALLINT"}, {"type": "TINYINT"}, {"type": "BOOL"}, {"type": "NCHAR","len": 17, "count":1}, {"type": "UINT"}, {"type": "UBIGINT"}, {"type": "UTINYINT"}, {"type": "USMALLINT"}, {"type": "BINARY", "len": 19, "count":1}] + },{ + "name": "stb11", + "child_table_exists":"no", + "childtable_count": 8, + "childtable_prefix": "stb11_", + "escape_character": "no", + "auto_create_table": "no", + "batch_create_tbl_num": 10, + "data_source": "rand", + "insert_mode": "sml", + "line_protocol": "telnet", + "childtable_limit": 0, + "childtable_offset": 0, + "insert_rows": 20, + "insert_interval": 0, + "interlace_rows": 0, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "now", + "sample_file": "", + "use_sample_ts": "no", + "tags_file": "", + "columns": [{"type": "DOUBLE"}], + "tags": [{"type": "INT"}, {"type": "BIGINT"}, {"type": "FLOAT"}, {"type": "DOUBLE"}, {"type": "SMALLINT"}, {"type": "TINYINT"}, {"type": "BOOL"}, {"type": "NCHAR","len": 17, "count":1}, {"type": "UINT"}, {"type": "UBIGINT"}, {"type": "UTINYINT"}, {"type": "USMALLINT"}, {"type": "BINARY", "len": 19, "count":1}] + },{ + "name": "stb12", + "child_table_exists":"no", + "childtable_count": 8, + "childtable_prefix": "stb12_", + "escape_character": "no", + "auto_create_table": "no", + "batch_create_tbl_num": 10, + "data_source": "rand", + "insert_mode": "sml", + "line_protocol": "telnet", + "childtable_limit": 0, + "childtable_offset": 0, + "insert_rows": 20, + "insert_interval": 0, + "interlace_rows": 0, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "now", + "sample_file": "", + "use_sample_ts": "no", + "tags_file": "", + "columns": [{"type": "BINARY", "len": 8}], + "tags": [{"type": "INT"}, {"type": "BIGINT"}, {"type": "FLOAT"}, {"type": "DOUBLE"}, {"type": "SMALLINT"}, {"type": "TINYINT"}, {"type": "BOOL"}, {"type": "NCHAR","len": 17, "count":1}, {"type": "UINT"}, {"type": "UBIGINT"}, {"type": "UTINYINT"}, {"type": "USMALLINT"}, {"type": "BINARY", "len": 19, "count":1}] + },{ + "name": "stb13", + "child_table_exists":"no", + "childtable_count": 8, + "childtable_prefix": "stb13_", + "escape_character": "no", + "auto_create_table": "no", + "batch_create_tbl_num": 10, + "data_source": "rand", + "insert_mode": "sml", + "line_protocol": "telnet", + "childtable_limit": 0, + "childtable_offset": 0, + "insert_rows": 20, + "insert_interval": 0, + "interlace_rows": 0, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "now", + "sample_file": "", + "use_sample_ts": "no", + "tags_file": "", + "columns": [{"type": "NCHAR", "len": 8}], + "tags": [{"type": "INT"}, {"type": "BIGINT"}, {"type": "FLOAT"}, {"type": "DOUBLE"}, {"type": "SMALLINT"}, {"type": "TINYINT"}, {"type": "BOOL"}, {"type": "NCHAR","len": 17, "count":1}, {"type": "UINT"}, {"type": "UBIGINT"}, {"type": "UTINYINT"}, {"type": "USMALLINT"}, {"type": "BINARY", "len": 19, "count":1}] + }] + }] +} diff --git a/tests/develop-test/5-taos-tools/taosbenchmark/json_tag.py b/tests/develop-test/5-taos-tools/taosbenchmark/json_tag.py new file mode 100644 index 0000000000000000000000000000000000000000..5b71f3a065de1708a6dbdf570f77d18db80f3e26 --- /dev/null +++ b/tests/develop-test/5-taos-tools/taosbenchmark/json_tag.py @@ -0,0 +1,49 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- +import os +from util.log import * +from util.cases import * +from util.sql import * +from util.dnodes import * + + +class TDTestCase: + def caseDescription(self): + ''' + [TD-11510] taosBenchmark test cases + ''' + return + + def init(self, conn, logSql): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor(), logSql) + + def run(self): + cmd = "taosBenchmark -f ./5-taos-tools/taosbenchmark/json/taosc_json_tag.json" + tdLog.info("%s" % cmd) + os.system("%s" % cmd) + tdSql.execute("reset query cache") + tdSql.query("describe db.stb") + tdSql.checkData(2, 0, "jtag") + tdSql.checkData(2, 1, "JSON") + tdSql.checkData(2, 3, "TAG") + tdSql.query("select count(jtag) from db.stb") + tdSql.checkData(0, 0, 8) + + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) \ No newline at end of file diff --git a/tests/develop-test/5-taos-tools/taosbenchmark/limit_offset_json.py b/tests/develop-test/5-taos-tools/taosbenchmark/limit_offset_json.py new file mode 100644 index 0000000000000000000000000000000000000000..20e64fa7458fecb87771bd98eec59a886e3663b3 --- /dev/null +++ b/tests/develop-test/5-taos-tools/taosbenchmark/limit_offset_json.py @@ -0,0 +1,66 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- +import os +from util.log import * +from util.cases import * +from util.sql import * +from util.dnodes import * + + +class TDTestCase: + def caseDescription(self): + ''' + [TD-11510] taosBenchmark test cases + ''' + return + + def init(self, conn, logSql): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor(), logSql) + + def run(self): + cmd = "taosBenchmark -f ./5-taos-tools/taosbenchmark/json/taosc_only_create_table.json" + tdLog.info("%s" % cmd) + os.system("%s" % cmd) + tdSql.execute("reset query cache") + tdSql.query("select count(tbname) from db.stb") + tdSql.checkData(0, 0, 8) + tdSql.query("select count(*) from db.stb") + tdSql.checkRows(0) + tdSql.query("describe db.stb") + tdSql.checkData(9, 1, "NCHAR") + tdSql.checkData(14, 1, "BINARY") + tdSql.checkData(23, 1, "NCHAR") + tdSql.checkData(28, 1, "BINARY") + tdSql.checkData(9, 2, 64) + tdSql.checkData(14, 2, 64) + tdSql.checkData(23, 2, 64) + tdSql.checkData(28, 2, 64) + + + cmd = "taosBenchmark -f ./5-taos-tools/taosbenchmark/json/taosc_limit_offset.json" + tdLog.info("%s" % cmd) + os.system("%s" % cmd) + tdSql.execute("reset query cache") + tdSql.query("select count(tbname) from db.stb") + tdSql.checkData(0, 0, 8) + tdSql.query("select count(*) from db.stb") + tdSql.checkData(0, 0, 40) + + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) \ No newline at end of file diff --git a/tests/develop-test/5-taos-tools/taosbenchmark/query_json.py b/tests/develop-test/5-taos-tools/taosbenchmark/query_json.py new file mode 100644 index 0000000000000000000000000000000000000000..274729fada8f759535ad72979c9d5710390cc67f --- /dev/null +++ b/tests/develop-test/5-taos-tools/taosbenchmark/query_json.py @@ -0,0 +1,100 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- +import ast +import os +import re +import subprocess + +from util.log import * +from util.cases import * +from util.sql import * +from util.dnodes import * + + +class TDTestCase: + def caseDescription(self): + ''' + [TD-11510] taosBenchmark test cases + ''' + return + + def init(self, conn, logSql): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor(), logSql) + + def run(self): + os.system("rm -f rest_query_specified-0 rest_query_super-0 taosc_query_specified-0 taosc_query_super-0") + tdSql.execute("drop database if exists db") + tdSql.execute("create database if not exists db") + tdSql.execute("use db") + tdSql.execute("create table stb (ts timestamp, c0 int) tags (t0 int)") + tdSql.execute("insert into stb_0 using stb tags (0) values (now, 0)") + tdSql.execute("insert into stb_1 using stb tags (1) values (now, 1)") + tdSql.execute("insert into stb_2 using stb tags (2) values (now, 2)") + cmd = "taosBenchmark -f ./5-taos-tools/taosbenchmark/json/taosc_query.json" + tdLog.info("%s" % cmd) + os.system("%s" % cmd) + with open("%s" % "taosc_query_specified-0", 'r+') as f1: + for line in f1.readlines(): + queryTaosc = line.strip().split()[0] + assert queryTaosc == '3' , "result is %s != expect: 3" % queryTaosc + + with open("%s" % "taosc_query_super-0", 'r+') as f1: + for line in f1.readlines(): + queryTaosc = line.strip().split()[0] + assert queryTaosc == '1', "result is %s != expect: 1" % queryTaosc + + cmd = "taosBenchmark -f ./5-taos-tools/taosbenchmark/json/rest_query.json" + tdLog.info("%s" % cmd) + os.system("%s" % cmd) + + times = 0 + with open("rest_query_super-0", 'r+') as f1: + + for line in f1.readlines(): + contents = line.strip() + if contents.find("data") != -1: + pattern = re.compile("{.*}") + contents = pattern.search(contents).group() + contentsDict = ast.literal_eval(contents) + queryResultRest = contentsDict['data'][0][0] + assert queryResultRest == 1, "result is %s != expect: 1" % queryResultRest + times += 1 + + assert times == 3, "result is %s != expect: 3" % times + + + times = 0 + with open("rest_query_specified-0", 'r+') as f1: + for line in f1.readlines(): + contents = line.strip() + if contents.find("data") != -1: + pattern = re.compile("{.*}") + contents = pattern.search(contents).group() + contentsDict = ast.literal_eval(contents) + queryResultRest = contentsDict['data'][0][0] + assert queryResultRest == 3, "result is %s != expect: 3" % queryResultRest + times += 1 + + assert times == 1, "result is %s != expect: 1" % times + + + + + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) \ No newline at end of file diff --git a/tests/develop-test/5-taos-tools/taosbenchmark/sample_csv_json.py b/tests/develop-test/5-taos-tools/taosbenchmark/sample_csv_json.py new file mode 100644 index 0000000000000000000000000000000000000000..5be777497930f14fa5d34bda3f54a8722f0e7dbc --- /dev/null +++ b/tests/develop-test/5-taos-tools/taosbenchmark/sample_csv_json.py @@ -0,0 +1,55 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- +import os +from util.log import * +from util.cases import * +from util.sql import * +from util.dnodes import * + + +class TDTestCase: + def caseDescription(self): + ''' + [TD-11510] taosBenchmark test cases + ''' + return + + def init(self, conn, logSql): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor(), logSql) + + def run(self): + cmd = "taosBenchmark -f ./5-taos-tools/taosbenchmark/json/taosc_sample_use_ts.json" + tdLog.info("%s" % cmd) + os.system("%s" % cmd) + tdSql.execute("reset query cache") + tdSql.query("select count(tbname) from db.stb") + tdSql.checkData(0, 0, 8) + tdSql.query("select count(*) from db.stb") + tdSql.checkData(0, 0, 24) + tdSql.query("select * from db.stb_0") + tdSql.checkRows(3) + tdSql.checkData(0, 1, 1) + tdSql.checkData(1, 1, 2) + tdSql.checkData(2, 1, 3) + tdSql.query("select distinct(t0) from db.stb") + tdSql.checkRows(1) + tdSql.checkData(0, 0, 17) + + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) \ No newline at end of file diff --git a/tests/develop-test/5-taos-tools/taosbenchmark/sml_interlace.py b/tests/develop-test/5-taos-tools/taosbenchmark/sml_interlace.py new file mode 100644 index 0000000000000000000000000000000000000000..f704d684fbb7a3d1f9778bccfac0a95ddbc34e4b --- /dev/null +++ b/tests/develop-test/5-taos-tools/taosbenchmark/sml_interlace.py @@ -0,0 +1,51 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- +import os +from util.log import * +from util.cases import * +from util.sql import * +from util.dnodes import * + + +class TDTestCase: + def caseDescription(self): + ''' + [TD-11510] taosBenchmark test cases + ''' + return + + def init(self, conn, logSql): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor(), logSql) + + def run(self): + cmd = "taosBenchmark -f ./5-taos-tools/taosbenchmark/json/sml_interlace.json" + tdLog.info("%s" % cmd) + os.system("%s" % cmd) + tdSql.execute("reset query cache") + tdSql.query("select count(tbname) from db.stb1") + tdSql.checkData(0, 0, 8) + tdSql.query("select count(tbname) from db.stb2") + tdSql.checkData(0, 0, 8) + tdSql.query("select count(*) from db.stb1") + tdSql.checkData(0, 0, 160) + tdSql.query("select count(*) from db.stb2") + tdSql.checkData(0, 0, 160) + + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) \ No newline at end of file diff --git a/tests/develop-test/5-taos-tools/taosbenchmark/sml_json_alltypes.py b/tests/develop-test/5-taos-tools/taosbenchmark/sml_json_alltypes.py new file mode 100644 index 0000000000000000000000000000000000000000..dc18bda7ecbfbc2207d5919bc663d1bd82c7ae3e --- /dev/null +++ b/tests/develop-test/5-taos-tools/taosbenchmark/sml_json_alltypes.py @@ -0,0 +1,81 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- +import os +from util.log import * +from util.cases import * +from util.sql import * +from util.dnodes import * + + +class TDTestCase: + def caseDescription(self): + ''' + [TD-11510] taosBenchmark test cases + ''' + return + + def init(self, conn, logSql): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor(), logSql) + + def run(self): + cmd = "taosBenchmark -f ./5-taos-tools/taosbenchmark/json/json_alltypes.json" + tdLog.info("%s" % cmd) + os.system("%s" % cmd) + tdSql.execute("reset query cache") + tdSql.query("describe db.stb1") + tdSql.checkData(1, 1, "BOOL") + tdSql.query("describe db.stb2") + tdSql.checkData(1, 1, "TINYINT") + tdSql.query("describe db.stb3") + tdSql.checkData(1, 1, "SMALLINT") + tdSql.query("describe db.stb4") + tdSql.checkData(1, 1, "INT") + tdSql.query("describe db.stb5") + tdSql.checkData(1, 1, "BIGINT") + tdSql.query("describe db.stb6") + tdSql.checkData(1, 1, "FLOAT") + tdSql.query("describe db.stb7") + tdSql.checkData(1, 1, "DOUBLE") + tdSql.query("describe db.stb8") + tdSql.checkData(1, 1, "BINARY") + tdSql.checkData(1, 2, 8) + tdSql.query("describe db.stb9") + tdSql.checkData(1, 1, "NCHAR") + tdSql.checkData(1, 2, 8) + tdSql.query("select count(*) from db.stb1") + tdSql.checkData(0, 0, 160) + tdSql.query("select count(*) from db.stb2") + tdSql.checkData(0, 0, 160) + tdSql.query("select count(*) from db.stb3") + tdSql.checkData(0, 0, 160) + tdSql.query("select count(*) from db.stb4") + tdSql.checkData(0, 0, 160) + tdSql.query("select count(*) from db.stb5") + tdSql.checkData(0, 0, 160) + tdSql.query("select count(*) from db.stb6") + tdSql.checkData(0, 0, 160) + tdSql.query("select count(*) from db.stb7") + tdSql.checkData(0, 0, 160) + tdSql.query("select count(*) from db.stb8") + tdSql.checkData(0, 0, 160) + tdSql.query("select count(*) from db.stb9") + tdSql.checkData(0, 0, 160) + + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) \ No newline at end of file diff --git a/tests/develop-test/5-taos-tools/taosbenchmark/sml_telnet_alltypes.py b/tests/develop-test/5-taos-tools/taosbenchmark/sml_telnet_alltypes.py new file mode 100644 index 0000000000000000000000000000000000000000..9285de99848acdd1674f6242d0865189d2e17920 --- /dev/null +++ b/tests/develop-test/5-taos-tools/taosbenchmark/sml_telnet_alltypes.py @@ -0,0 +1,97 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- +import os +from util.log import * +from util.cases import * +from util.sql import * +from util.dnodes import * + + +class TDTestCase: + def caseDescription(self): + ''' + [TD-11510] taosBenchmark test cases + ''' + return + + def init(self, conn, logSql): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor(), logSql) + + def run(self): + cmd = "taosBenchmark -f ./5-taos-tools/taosbenchmark/json/telnet_alltypes.json" + tdLog.info("%s" % cmd) + os.system("%s" % cmd) + tdSql.execute("reset query cache") + tdSql.query("describe db.stb1") + tdSql.checkData(1, 1, "BOOL") + tdSql.query("describe db.stb2") + tdSql.checkData(1, 1, "TINYINT") + tdSql.query("describe db.stb3") + tdSql.checkData(1, 1, "TINYINT UNSIGNED") + tdSql.query("describe db.stb4") + tdSql.checkData(1, 1, "SMALLINT") + tdSql.query("describe db.stb5") + tdSql.checkData(1, 1, "SMALLINT UNSIGNED") + tdSql.query("describe db.stb6") + tdSql.checkData(1, 1, "INT") + tdSql.query("describe db.stb7") + tdSql.checkData(1, 1, "INT UNSIGNED") + tdSql.query("describe db.stb8") + tdSql.checkData(1, 1, "BIGINT") + tdSql.query("describe db.stb9") + tdSql.checkData(1, 1, "BIGINT UNSIGNED") + tdSql.query("describe db.stb10") + tdSql.checkData(1, 1, "FLOAT") + tdSql.query("describe db.stb11") + tdSql.checkData(1, 1, "DOUBLE") + tdSql.query("describe db.stb12") + tdSql.checkData(1, 1, "BINARY") + tdSql.checkData(1, 2, 8) + tdSql.query("describe db.stb13") + tdSql.checkData(1, 1, "NCHAR") + tdSql.checkData(1, 2, 8) + tdSql.query("select count(*) from db.stb1") + tdSql.checkData(0, 0, 160) + tdSql.query("select count(*) from db.stb2") + tdSql.checkData(0, 0, 160) + tdSql.query("select count(*) from db.stb3") + tdSql.checkData(0, 0, 160) + tdSql.query("select count(*) from db.stb4") + tdSql.checkData(0, 0, 160) + tdSql.query("select count(*) from db.stb5") + tdSql.checkData(0, 0, 160) + tdSql.query("select count(*) from db.stb6") + tdSql.checkData(0, 0, 160) + tdSql.query("select count(*) from db.stb7") + tdSql.checkData(0, 0, 160) + tdSql.query("select count(*) from db.stb8") + tdSql.checkData(0, 0, 160) + tdSql.query("select count(*) from db.stb9") + tdSql.checkData(0, 0, 160) + tdSql.query("select count(*) from db.stb10") + tdSql.checkData(0, 0, 160) + tdSql.query("select count(*) from db.stb11") + tdSql.checkData(0, 0, 160) + tdSql.query("select count(*) from db.stb12") + tdSql.checkData(0, 0, 160) + tdSql.query("select count(*) from db.stb13") + tdSql.checkData(0, 0, 160) + + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) \ No newline at end of file diff --git a/tests/develop-test/5-taos-tools/taosbenchmark/subscripe_json.py b/tests/develop-test/5-taos-tools/taosbenchmark/subscripe_json.py new file mode 100644 index 0000000000000000000000000000000000000000..726b4188e0824530cb78330f07a822e93e8ecc51 --- /dev/null +++ b/tests/develop-test/5-taos-tools/taosbenchmark/subscripe_json.py @@ -0,0 +1,50 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- +import os +from util.log import * +from util.cases import * +from util.sql import * +from util.dnodes import * + + +class TDTestCase: + def caseDescription(self): + ''' + [TD-11510] taosBenchmark test cases + ''' + return + + def init(self, conn, logSql): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor(), logSql) + + def run(self): + tdSql.execute("drop database if exists db") + tdSql.execute("create database if not exists db") + tdSql.execute("use db") + tdSql.execute("create table stb (ts timestamp, c0 int) tags (t0 int)") + tdSql.execute("insert into stb_0 using stb tags (0) values (now, 0)") + tdSql.execute("insert into stb_1 using stb tags (1) values (now, 1)") + tdSql.execute("insert into stb_2 using stb tags (2) values (now, 2)") + cmd = "taosBenchmark -f ./5-taos-tools/taosbenchmark/json/specified_subscribe.json -g" + tdLog.info("%s" % cmd) + os.system("%s" % cmd) + tdSql.execute("reset query cache") + + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) \ No newline at end of file diff --git a/tests/develop-test/fulltest-insert.sh b/tests/develop-test/fulltest-insert.sh index 532f7e6fc0446f6a68ca0a5e80be070684a71c23..e538abf8e2f9f22e9acbce017a3f42b9a6804818 100755 --- a/tests/develop-test/fulltest-insert.sh +++ b/tests/develop-test/fulltest-insert.sh @@ -1 +1,2 @@ -python3 ./test.py -f 1-insert/batchInsert.py \ No newline at end of file +python3 ./test.py -f 1-insert/batchInsert.py +python3 ./test.py -f 1-insert/uppercase_in_stmt.py diff --git a/tests/develop-test/fulltest-query.sh b/tests/develop-test/fulltest-query.sh index a6f3834aac96edee80c6cb70dbeec35e610803b5..c91c592adfbee5b48b9cc0fac3fade70ff4a611c 100755 --- a/tests/develop-test/fulltest-query.sh +++ b/tests/develop-test/fulltest-query.sh @@ -2,6 +2,8 @@ python3 ./test.py -f 2-query/ts_hidden_column.py python3 ./test.py -f 2-query/union-order.py python3 ./test.py -f 2-query/session_two_stage.py python3 ./test.py -f 2-query/timeline_agg_func_groupby.py +python3 ./test.py -f 2-query/constant_compare.py python3 ./test.py -f 2-query/ts_2016.py python3 ./test.py -f 2-query/escape.py python3 ./test.py -f 2-query/function_mavg.py +python3 ./test.py -f 2-query/func_compare.py diff --git a/tests/develop-test/fulltest-tools.sh b/tests/develop-test/fulltest-tools.sh index 32df73c920fd27ec5f614cdda04718718bd5180f..ca02f1605c9ceb2443105561a897d8279109fede 100755 --- a/tests/develop-test/fulltest-tools.sh +++ b/tests/develop-test/fulltest-tools.sh @@ -10,3 +10,16 @@ python3 ./test.py -f 5-taos-tools/taosdump/taosdumpTestTypeUnsignedBigInt.py python3 ./test.py -f 5-taos-tools/taosdump/taosdumpTestTypeUnsignedInt.py python3 ./test.py -f 5-taos-tools/taosdump/taosdumpTestTypeUnsignedSmallInt.py python3 ./test.py -f 5-taos-tools/taosdump/taosdumpTestTypeUnsignedTinyInt.py +python3 ./test.py -f 5-taos-tools/taosbenchmark/limit_offset_json.py +python3 ./test.py -f 5-taos-tools/taosbenchmark/sml_json_alltypes.py +python3 ./test.py -f 5-taos-tools/taosbenchmark/auto_create_table_json.py +python3 ./test.py -f 5-taos-tools/taosbenchmark/commandline.py +python3 ./test.py -f 5-taos-tools/taosbenchmark/json_tag.py +python3 ./test.py -f 5-taos-tools/taosbenchmark/insert_alltypes_json.py +python3 ./test.py -f 5-taos-tools/taosbenchmark/query_json.py +python3 ./test.py -f 5-taos-tools/taosbenchmark/sml_interlace.py +python3 ./test.py -f 5-taos-tools/taosbenchmark/sml_telnet_alltypes.py +python3 ./test.py -f 5-taos-tools/taosbenchmark/subscripe_json.py +python3 ./test.py -f 5-taos-tools/taosbenchmark/default_json.py +python3 ./test.py -f 5-taos-tools/taosbenchmark/invalid_commandline.py +python3 ./test.py -f 5-taos-tools/taosbenchmark/sample_csv_json.py \ No newline at end of file diff --git a/tests/pytest/fulltest-tools.sh b/tests/pytest/fulltest-tools.sh index d1f83e9fb289f36d52340b0ed942c912f361c2de..959e699cb6b0624ed7b1eb0f6ab4f83f80acc6b0 100755 --- a/tests/pytest/fulltest-tools.sh +++ b/tests/pytest/fulltest-tools.sh @@ -5,44 +5,37 @@ ulimit -c unlimited # tools python3 test.py -f tools/taosdumpTest.py python3 test.py -f tools/taosdumpTest2.py - -python3 test.py -f tools/taosdemoTest.py -python3 test.py -f tools/taosdemoTestWithoutMetric.py -python3 test.py -f tools/taosdemoTestWithJson.py - +#python3 test.py -f tools/taosdemoTest.py +#python3 test.py -f tools/taosdemoTestWithoutMetric.py +#python3 test.py -f tools/taosdemoTestWithJson.py #======================p1-end=============== #======================p2-start=============== - -python3 test.py -f tools/taosdemoTestLimitOffset.py -python3 test.py -f tools/taosdemoTestTblAlt.py -python3 test.py -f tools/taosdemoTestSampleData.py -python3 test.py -f tools/taosdemoTestInterlace.py -# python3 test.py -f tools/taosdemoTestQuery.py -python3 ./test.py -f tools/taosdemoTestdatatype.py +#python3 test.py -f tools/taosdemoTestLimitOffset.py +#python3 test.py -f tools/taosdemoTestTblAlt.py +#python3 test.py -f tools/taosdemoTestSampleData.py +#python3 test.py -f tools/taosdemoTestInterlace.py +#python3 test.py -f tools/taosdemoTestQuery.py +#python3 ./test.py -f tools/taosdemoTestdatatype.py #======================p2-end=============== #======================p3-start=============== # nano support -python3 test.py -f tools/taosdemoAllTest/NanoTestCase/taosdemoTestSupportNanoInsert.py -python3 test.py -f tools/taosdemoAllTest/NanoTestCase/taosdemoTestSupportNanoQuery.py -python3 test.py -f tools/taosdemoAllTest/NanoTestCase/taosdemoTestSupportNanosubscribe.py -python3 test.py -f tools/taosdemoAllTest/NanoTestCase/taosdemoTestInsertTime_step.py +#python3 test.py -f tools/taosdemoAllTest/NanoTestCase/taosdemoTestSupportNanoInsert.py +#python3 test.py -f tools/taosdemoAllTest/NanoTestCase/taosdemoTestSupportNanoQuery.py +#python3 test.py -f tools/taosdemoAllTest/NanoTestCase/taosdemoTestSupportNanosubscribe.py +#python3 test.py -f tools/taosdemoAllTest/NanoTestCase/taosdemoTestInsertTime_step.py python3 test.py -f tools/taosdumpTestNanoSupport.py -python3 test.py -f tools/taosdemoAllTest/taosdemoTestInsertWithJson.py +#python3 test.py -f tools/taosdemoAllTest/taosdemoTestInsertWithJson.py #======================p3-end=============== #======================p4-start=============== - -python3 test.py -f tools/taosdemoAllTest/taosdemoTestQueryWithJson.py -python3 test.py -f tools/taosdemoAllTest/taosdemoTestInsertAllType.py -python3 test.py -f tools/taosdemoAllTest/TD-4985/query-limit-offset.py -python3 test.py -f tools/taosdemoAllTest/TD-5213/insert4096columns_not_use_taosdemo.py -python3 test.py -f tools/taosdemoAllTest/TD-5213/insertSigcolumnsNum4096.py - -#python3 test.py -f tools/taosdemoAllTest/TD-10539/create_taosdemo.py -python3 test.py -f tools/taosdemoAllTest/taosdemoTestInsertWithJsonStmt.py -python3 test.py -f tools/taosdemoAllTest/taosdemoTestInsertWithJsonSml.py -python3 test.py -f tools/taosdemoAllTest/taosdemoTestInsertShell.py - +#python3 test.py -f tools/taosdemoAllTest/taosdemoTestQueryWithJson.py +#python3 test.py -f tools/taosdemoAllTest/taosdemoTestInsertAllType.py +#python3 test.py -f tools/taosdemoAllTest/TD-4985/query-limit-offset.py +#python3 test.py -f tools/taosdemoAllTest/TD-5213/insert4096columns_not_use_taosdemo.py +#python3 test.py -f tools/taosdemoAllTest/TD-5213/insertSigcolumnsNum4096.py +#python3 test.py -f tools/taosdemoAllTest/taosdemoTestInsertWithJsonStmt.py +#python3 test.py -f tools/taosdemoAllTest/taosdemoTestInsertWithJsonSml.py +#python3 test.py -f tools/taosdemoAllTest/taosdemoTestInsertShell.py #======================p4-end=============== #======================p5-start=============== diff --git a/tests/pytest/fulltest.sh b/tests/pytest/fulltest.sh index c9cf6a2af4241d17d9a9536374a25d1c3add604b..cf1b4b21cf4913b70ac5364e95b64ca3b124c558 100755 --- a/tests/pytest/fulltest.sh +++ b/tests/pytest/fulltest.sh @@ -289,6 +289,7 @@ python3 ./test.py -f query/bug6586.py # python3 ./test.py -f query/bug5903.py python3 ./test.py -f query/queryLimit.py python3 ./test.py -f query/queryPriKey.py +python3 ./test.py -f query/queryNcharNull.py #stream python3 ./test.py -f stream/metric_1.py diff --git a/tests/pytest/functions/function_elapsed_case.py b/tests/pytest/functions/function_elapsed_case.py index 025354f2c3e31d1483f339e0e4f23bbda4c1e997..02411a2002953521ce7b1abbeaadcc147059dd55 100644 --- a/tests/pytest/functions/function_elapsed_case.py +++ b/tests/pytest/functions/function_elapsed_case.py @@ -322,8 +322,8 @@ class ElapsedCase: if (self.restart): tdSql.execute("drop table elapsed_t") tdSql.execute("drop table elapsed_st") - tdSql.execute("create table elapsed_t as select elapsed(ts) from t1 interval(1m) sliding(30s)") - tdSql.execute("create table elapsed_st as select elapsed(ts) from st1 interval(1m) sliding(30s) group by tbname") + tdSql.error("create table elapsed_t as select elapsed(ts) from t1 interval(1m) sliding(30s)") + tdSql.error("create table elapsed_st as select elapsed(ts) from st1 interval(1m) sliding(30s) group by tbname") def selectIllegalTest(self): tdSql.execute("use wxy_db") diff --git a/tests/pytest/query/nestedQuery/queryInterval.py b/tests/pytest/query/nestedQuery/queryInterval.py index 11c42c463ee7d863393f2921db24244718a49df8..f48e451069c0c2be31132f05246bfb457fd0d58d 100644 --- a/tests/pytest/query/nestedQuery/queryInterval.py +++ b/tests/pytest/query/nestedQuery/queryInterval.py @@ -92,6 +92,16 @@ class TDTestCase: tdSql.query("select top(avg_val,2) from(select avg(value) as avg_val from st where loc='beijing1' interval(8m) sliding(3m));") tdSql.checkData(0, 1, 120) + # TS-802 + tdSql.query("select first(*) from st interval(5m) limit 10") + tdSql.checkRows(10) + + tdSql.query("select * from (select first(*) from st interval(5m) limit 10) order by ts") + tdSql.checkRows(10) + + tdSql.query("select * from (select first(*) from st interval(5m) limit 10) order by ts desc") + tdSql.checkRows(10) + # clear env testcaseFilename = os.path.split(__file__)[-1] os.system("rm -rf ./insert_res.txt") diff --git a/tests/pytest/query/queryNcharNull.py b/tests/pytest/query/queryNcharNull.py new file mode 100644 index 0000000000000000000000000000000000000000..75565afd58f6663c510a2b1735f34097cc795e83 --- /dev/null +++ b/tests/pytest/query/queryNcharNull.py @@ -0,0 +1,73 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import sys +from util.log import * +from util.cases import * +from util.sql import * +from util.dnodes import * + + +class TDTestCase: + def init(self, conn, logSql): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor(), logSql) + + def run(self): + tdSql.prepare() + tdSql.execute("drop database if exists db") + tdSql.execute("create database if not exists db keep 3650") + tdSql.execute("use db") + + tdSql.execute( + "create table stb1 (ts TIMESTAMP, id INT, col1 NCHAR(20), col2 BINARY(30), col3 FLOAT) TAGS (tid INT, name BINARY(20))" + ) + + tdSql.execute( + "insert into tb1 using stb1 tags(1, 'ABC') values (now - 1m, 1, '北京', '朝阳', 3.141)" + ) + + tdSql.execute( + "insert into tb1 using stb1 tags(1, 'ABC') values (now, 2, NULL, NULL, 3.141)" + ) + + tdSql.query( + "select * from (select * from stb1) where col1 = '北京'" + ) + + tdSql.checkData(0, 2, '北京') + + tdSql.execute( + "create table normal1 (ts TIMESTAMP, id INT, col1 NCHAR(20), col2 BINARY(30), col3 FLOAT)" + ) + + tdSql.execute( + "insert into normal1 values (now - 1m, 1, '北京', '朝阳', 3.141)" + ) + + tdSql.execute( + "insert into normal1 values (now, 1, NULL, NULL, 3.141)" + ) + + tdSql.query( + "select * from (select * from normal1) where col1 = '北京'" + ) + + tdSql.checkData(0, 2, '北京') + + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) diff --git a/tests/pytest/query/queryNormal.py b/tests/pytest/query/queryNormal.py index a1789c8909f542ba3dcae83042ab50cde9e58e32..73f131faaed8cecde74abf81dfac7ab448ab8edb 100644 --- a/tests/pytest/query/queryNormal.py +++ b/tests/pytest/query/queryNormal.py @@ -149,6 +149,27 @@ class TDTestCase: tdSql.checkData(1, 0, "2020-03-01 00:01:01") tdSql.checkData(1, 1, 421) tdSql.checkData(1, 2, "tm1") + + # TD-12980 + if platform.system() == "Linux": + types = ["tinyint unsigned", "smallint unsigned", "int unsigned", "bigint unsigned"] + ts = 1640000000000 + + for type in types: + tdSql.execute("drop table if exists csvtest") + tdSql.execute("create table csvtest(ts timestamp, c1 %s)" % type) + for i in range(10): + tdSql.execute("insert into csvtest values(%d, %d)" % (ts + i, i)) + + os.system("taos -s 'select c1 from db.csvtest >> a.csv'") + + tdSql.query("select c1 from csvtest") + for i in range(10): + r = os.popen("sed -n %dp a.csv" % (i + 2)) + data = r.read() + tdSql.checkData(i, 0, int(data)) + + os.system("rm -rf a.csv") def stop(self): tdSql.close() diff --git a/tests/pytest/tools/taosdemoAllTest/sml/insert-sml-telnet-alltype.json b/tests/pytest/tools/taosdemoAllTest/sml/insert-sml-telnet-alltype.json index a786e93696e8b13b39d45a9c4c8ef1aae829fef8..c9fa0f6fb0ddc777159b5d13f324c65b23cabd0d 100644 --- a/tests/pytest/tools/taosdemoAllTest/sml/insert-sml-telnet-alltype.json +++ b/tests/pytest/tools/taosdemoAllTest/sml/insert-sml-telnet-alltype.json @@ -107,7 +107,7 @@ "sample_format": "csv", "sample_file": "./sample.csv", "tags_file": "", - "columns": [{"type": "TINYINT", "count":6}], + "columns": [{"type": "TINYINT"}], "tags": [{"type": "TINYINT", "count":6}] }, { @@ -263,7 +263,7 @@ "sample_format": "csv", "sample_file": "./sample.csv", "tags_file": "", - "columns": [{"type": "NCHAR","len": 16, "count":6}], + "columns": [{"type": "NCHAR","len": 16}], "tags": [{"type": "NCHAR", "count":6}] }, { @@ -289,7 +289,7 @@ "sample_format": "csv", "sample_file": "./sample.csv", "tags_file": "", - "columns": [{"type": "BINARY", "len": 16, "count":6}], + "columns": [{"type": "BINARY", "len": 16}], "tags": [{"type": "BINARY", "count":6}] }, { diff --git a/tests/system-test/2-query/TD-12909.py b/tests/system-test/2-query/TD-12909.py new file mode 100644 index 0000000000000000000000000000000000000000..7dd1a870c57b57a2d9e3ab517277fbbfc8cf1d52 --- /dev/null +++ b/tests/system-test/2-query/TD-12909.py @@ -0,0 +1,91 @@ +################################################################### +# Copyright (c) 2020 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### +# -*- coding: utf-8 -*- +from posixpath import split +import sys +import os +from util.log import * +from util.cases import * +from util.sql import * +from util.dnodes import * +class TDTestCase: + def init(self, conn, logSql): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor(), logSql) + + self.ts = 1420041600000 # 2015-01-01 00:00:00 this is begin time for first record + self.num = 10 + def getBuildPath(self): + selfPath = os.path.dirname(os.path.realpath(__file__)) + if ("community" in selfPath): + projPath = selfPath[:selfPath.find("community")] + else: + projPath = selfPath[:selfPath.find("tests")] + for root, dirs, files in os.walk(projPath): + if ("taosd" in files): + rootRealPath = os.path.dirname(os.path.realpath(root)) + if ("packaging" not in rootRealPath): + buildPath = root[:len(root) - len("/build/bin")] + break + return buildPath + def caseDescription(self): + ''' + case1 : [TD-12909] : + this test case is for illegal SQL in query ,it will crash taosd. + ''' + return + + def run(self): + tdSql.prepare() + tdSql.execute("create database if not exists testdb keep 36500;") + tdSql.execute("use testdb;") + tdSql.execute("create stable st (ts timestamp , id int , value double) tags(hostname binary(10) ,ind int);") + for i in range(self.num): + tdSql.execute("insert into sub_%s using st tags('host_%s' , %d) values (%d , %d , %f );"%(str(i),str(i),i*10,self.ts+10000*i,i*2,i+10.00)) + tdSql.query("select distinct(hostname) from testdb.st") + tdSql.checkRows(10) + tdSql.query("select count(*) from st where hostname >1") + tdSql.query("select count(*) from st where hostname >'1'") + tdSql.query("select count(*) from st where hostname <=1") + tdSql.query("select count(*) from st where hostname <='1'") + tdSql.query("select count(*) from st where hostname !=1") + tdSql.query("select count(*) from st where hostname !='1'") + tdSql.query("select count(*) from st where hostname <>1") + tdSql.query("select count(*) from st where hostname <>'1'") + tdSql.query("select count(*) from st where hostname in ('1','2')") + tdSql.query("select count(*) from st where hostname match '%'") + tdSql.query("select count(*) from st where hostname match '%1'") + tdSql.query("select count(*) from st where hostname between 1 and 2") + tdSql.query("select count(*) from st where hostname between 'abc' and 'def'") + + tdSql.error("select count(*) from st where hostname between 1 and 2 or sum(1)") + tdSql.error("select count(*) from st where hostname < max(123)") + + tdSql.error("select count(*) from st where hostname < max('abc')") + tdSql.error("select count(*) from st where hostname < max(min(123))") + + tdSql.error("select count(*) from st where hostname < sum('abc')") + tdSql.error("select count(*) from st where hostname < sum(min(123))") + + tdSql.error("select count(*) from st where hostname < diff('abc')") + tdSql.error("select count(*) from st where hostname < diff(min(123))") + + tdSql.error("select count(*) from st where hostname < tbname") + tdSql.error("select count(*) from st where ts > 0 and tbname in ('d1', 'd2') and tbname-2") + + tdSql.query("select count(*) from st where id > 10000000000000") + + def stop(self): + tdSql.close() + +tdLog.success("%s successfully executed" % __file__) +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) \ No newline at end of file diff --git a/tests/system-test/fulltest-query.sh b/tests/system-test/fulltest-query.sh index efdbbe4047791dfa865d2897c63681fb6b41b9c6..eeb3871d92324be5dbb8902baa881eef79e55b9c 100755 --- a/tests/system-test/fulltest-query.sh +++ b/tests/system-test/fulltest-query.sh @@ -3,3 +3,4 @@ python3 ./test.py -f 2-query/TD-11256.py python3 ./test.py -f 2-query/TD-11945_crash.py python3 ./test.py -f 2-query/TD-12340-12342.py python3 ./test.py -f 2-query/TD-12344.py +python3 ./test.py -f 2-query/TD-12909.py