提交 23f0e2c1 编写于 作者: A Alex Duan

Merge branch '2.4' into fix/TS-575-V24

......@@ -46,6 +46,7 @@ def pre_test(){
killall -9 gdb || echo "no gdb running"
killall -9 python3.8 || echo "no python program running"
cd ${WKC}
[ -f src/connector/grafanaplugin/README.md ] && rm -f src/connector/grafanaplugin/README.md > /dev/null || echo "failed to remove grafanaplugin README.md"
git reset --hard HEAD~10 >/dev/null
'''
script {
......@@ -77,6 +78,8 @@ def pre_test(){
git checkout -qf FETCH_HEAD
git clean -dfx
git submodule update --init --recursive
cd src/kit/taos-tools/deps/avro
git clean -dfx
cd ${WK}
git reset --hard HEAD~10
'''
......@@ -121,6 +124,7 @@ def pre_test_noinstall(){
sh'hostname'
sh'''
cd ${WKC}
[ -f src/connector/grafanaplugin/README.md ] && rm -f src/connector/grafanaplugin/README.md > /dev/null || echo "failed to remove grafanaplugin README.md"
git reset --hard HEAD~10 >/dev/null
'''
script {
......@@ -152,6 +156,8 @@ def pre_test_noinstall(){
git checkout -qf FETCH_HEAD
git clean -dfx
git submodule update --init --recursive
cd src/kit/taos-tools/deps/avro
git clean -dfx
cd ${WK}
git reset --hard HEAD~10
'''
......@@ -184,7 +190,7 @@ def pre_test_noinstall(){
git clean -dfx
mkdir debug
cd debug
cmake .. -DBUILD_HTTP=false -DBUILD_TOOLS=false > /dev/null
cmake .. -DBUILD_HTTP=false -DBUILD_TOOLS=true > /dev/null
make
'''
return 1
......@@ -193,6 +199,7 @@ def pre_test_mac(){
sh'hostname'
sh'''
cd ${WKC}
[ -f src/connector/grafanaplugin/README.md ] && rm -f src/connector/grafanaplugin/README.md > /dev/null || echo "failed to remove grafanaplugin README.md"
git reset --hard HEAD~10 >/dev/null
'''
script {
......@@ -224,6 +231,8 @@ def pre_test_mac(){
git checkout -qf FETCH_HEAD
git clean -dfx
git submodule update --init --recursive
cd src/kit/taos-tools/deps/avro
git clean -dfx
cd ${WK}
git reset --hard HEAD~10
'''
......@@ -352,7 +361,7 @@ pipeline {
}
stages {
stage('pre_build'){
agent{label 'catalina'}
agent{label 'master'}
options { skipDefaultCheckout() }
when {
changeRequest()
......@@ -361,37 +370,13 @@ pipeline {
script{
abort_previous()
abortPreviousBuilds()
println env.CHANGE_BRANCH
if(env.CHANGE_FORK){
scope = ['connector','query','insert','other','tools','taosAdapter']
}
else{
sh'''
cd ${WKC}
git reset --hard HEAD~10
git fetch
git checkout ${CHANGE_BRANCH}
git pull
'''
dir('/var/lib/jenkins/workspace/TDinternal/community'){
gitlog = sh(script: "git log -1 --pretty=%B ", returnStdout:true)
println gitlog
if (!(gitlog =~ /\((.*?)\)/)){
autoCancelled = true
error('Please fill in the scope information correctly.\neg. [TD-xxxx]<fix>(query,insert):xxxxxxxxxxxxxxxxxx ')
}
temp = (gitlog =~ /\((.*?)\)/)
temp = temp[0].remove(1)
scope = temp.split(",")
scope = ['connector','query','insert','other','tools','taosAdapter']
Collections.shuffle mod
Collections.shuffle sim_mod
}
scope = ['connector','query','insert','other','tools','taosAdapter']
Collections.shuffle mod
Collections.shuffle sim_mod
}
}
}
}
}
}
stage('Parallel test stage') {
//only build pr
options { skipDefaultCheckout() }
......
......@@ -110,10 +110,10 @@ IF (TD_MIPS_32)
ENDIF ()
IF (TD_ALPINE)
SET(COMMON_FLAGS "${COMMON_FLAGS} -largp")
link_libraries(/usr/lib/libargp.a)
SET(COMMON_FLAGS "${COMMON_FLAGS} -Wl,-z,stack-size=2097152")
link_libraries(argp)
ADD_DEFINITIONS(-D_ALPINE)
MESSAGE(STATUS "aplhine is defined")
MESSAGE(STATUS "alpine is defined")
ENDIF ()
IF ("${BUILD_HTTP}" STREQUAL "")
......
......@@ -96,10 +96,12 @@ IF ("${CPUTYPE}" STREQUAL "")
MESSAGE(STATUS "The current platform is amd64")
MESSAGE(STATUS "Set CPUTYPE to x64")
SET(CPUTYPE "x64")
SET(PLATFORM_ARCH_STR "amd64")
ELSEIF (CMAKE_SYSTEM_PROCESSOR MATCHES "(x86)|(X86)")
MESSAGE(STATUS "The current platform is x86")
MESSAGE(STATUS "Set CPUTYPE to x86")
SET(CPUTYPE "x32")
SET(PLATFORM_ARCH_STR "i386")
ELSEIF (CMAKE_SYSTEM_PROCESSOR MATCHES "armv7l")
MESSAGE(STATUS "Set CPUTYPE to aarch32")
SET(CPUTYPE "aarch32")
......@@ -107,12 +109,14 @@ IF ("${CPUTYPE}" STREQUAL "")
SET(TD_LINUX TRUE)
SET(TD_LINUX_32 FALSE)
SET(TD_ARM_32 TRUE)
SET(PLATFORM_ARCH_STR "arm")
ELSEIF (CMAKE_SYSTEM_PROCESSOR MATCHES "aarch64")
SET(CPUTYPE "aarch64")
MESSAGE(STATUS "Set CPUTYPE to aarch64")
SET(TD_LINUX TRUE)
SET(TD_LINUX_64 FALSE)
SET(TD_ARM_64 TRUE)
SET(PLATFORM_ARCH_STR "arm64")
ELSEIF (CMAKE_SYSTEM_PROCESSOR MATCHES "mips64")
SET(CPUTYPE "mips64")
MESSAGE(STATUS "Set CPUTYPE to mips64")
......@@ -124,7 +128,6 @@ IF ("${CPUTYPE}" STREQUAL "")
MESSAGE(STATUS "Set CPUTYPE to apple silicon m1")
SET(TD_ARM_64 TRUE)
ENDIF ()
ELSE ()
# if generate ARM version:
# cmake -DCPUTYPE=aarch32 .. or cmake -DCPUTYPE=aarch64
......@@ -132,27 +135,33 @@ ELSE ()
SET(TD_LINUX TRUE)
SET(TD_LINUX_32 FALSE)
SET(TD_ARM_32 TRUE)
SET(PLATFORM_ARCH_STR "arm")
MESSAGE(STATUS "input cpuType: aarch32")
ELSEIF (${CPUTYPE} MATCHES "aarch64")
SET(TD_LINUX TRUE)
SET(TD_LINUX_64 FALSE)
SET(TD_ARM_64 TRUE)
SET(PLATFORM_ARCH_STR "arm64")
MESSAGE(STATUS "input cpuType: aarch64")
ELSEIF (${CPUTYPE} MATCHES "mips64")
SET(TD_LINUX TRUE)
SET(TD_LINUX_64 FALSE)
SET(TD_MIPS_64 TRUE)
SET(PLATFORM_ARCH_STR "mips")
MESSAGE(STATUS "input cpuType: mips64")
ELSEIF (${CPUTYPE} MATCHES "x64")
SET(PLATFORM_ARCH_STR "amd64")
MESSAGE(STATUS "input cpuType: x64")
ELSEIF (${CPUTYPE} MATCHES "x86")
SET(PLATFORM_ARCH_STR "i386")
MESSAGE(STATUS "input cpuType: x86")
ELSE ()
MESSAGE(STATUS "input cpuType unknown " ${CPUTYPE})
ENDIF ()
ENDIF ()
MESSAGE(STATUS "platform arch:" ${PLATFORM_ARCH_STR})
# cmake -DOSTYPE=Ningsi
IF (${OSTYPE} MATCHES "Ningsi60")
SET(TD_NINGSI TRUE)
......
......@@ -49,7 +49,7 @@ c452519b0f9b tdengine/tdengine "taosd" 14 minutes ago Up 14 minutes ·
```bash
$ docker exec -it tdengine /bin/bash
root@c452519b0f9b:~/TDengine-server-2.0.20.13#
root@tdengine-server:~/TDengine-server-2.4.0.4#
```
- **docker exec**:通过 docker exec 命令进入容器,如果退出,容器不会停止。
......@@ -61,38 +61,245 @@ root@c452519b0f9b:~/TDengine-server-2.0.20.13#
4,进入容器后,执行 taos shell 客户端程序。
```bash
$ root@c452519b0f9b:~/TDengine-server-2.0.20.13# taos
$ root@c452519b0f9b:~/TDengine-server-2.4.0.4# taos
Welcome to the TDengine shell from Linux, Client Version:2.0.20.13
Welcome to the TDengine shell from Linux, Client Version:2.4.0.4
Copyright (c) 2020 by TAOS Data, Inc. All rights reserved.
taos>
taos>
```
TDengine 终端成功连接服务端,打印出了欢迎消息和版本信息。如果失败,会有错误信息打印出来。
在 TDengine 终端中,可以通过 SQL 命令来创建/删除数据库、表、超级表等,并可以进行插入和查询操作。具体可以参考 [TAOS SQL 说明文档](https://www.taosdata.com/cn/documentation/taos-sql)
## 通过 taosdemo 进一步了解 TDengine
## 通过 taosBenchmark 进一步了解 TDengine
1,接上面的步骤,先退出 TDengine 终端程序。
### 在宿主机访问 Docker 容器中的 TDengine server
在使用了 -p 命令行参数映射了正确的端口启动了 TDengine Docker 容器后,就在宿主机使用 taos shell 命令即可访问运行在 Docker 容器中的 TDengine。
```
$ taos
Welcome to the TDengine shell from Linux, Client Version:2.4.0.4
Copyright (c) 2020 by TAOS Data, Inc. All rights reserved.
taos>
```
也可以在宿主机使用 curl 通过 RESTful 端口访问 Docker 容器内的 TDengine server。
```
$ curl -u root:taosdata -d 'show databases' 127.0.0.1:6041/rest/sql
{"status":"succ","head":["name","created_time","ntables","vgroups","replica","quorum","days","keep0,keep1,keep(D)","cache(MB)","blocks","minrows","maxrows","wallevel","fsync","comp","cachelast","precision","update","status"],"column_meta":[["name",8,32],["created_time",9,8],["ntables",4,4],["vgroups",4,4],["replica",3,2],["quorum",3,2],["days",3,2],["keep0,keep1,keep(D)",8,24],["cache(MB)",4,4],["blocks",4,4],["minrows",4,4],["maxrows",4,4],["wallevel",2,1],["fsync",4,4],["comp",2,1],["cachelast",2,1],["precision",8,3],["update",2,1],["status",8,10]],"data":[["test","2021-08-18 06:01:11.021",10000,4,1,1,10,"3650,3650,3650",16,6,100,4096,1,3000,2,0,"ms",0,"ready"],["log","2021-08-18 05:51:51.065",4,1,1,1,10,"30,30,30",1,3,100,4096,1,3000,2,0,"us",0,"ready"]],"rows":2}
```
这条命令,通过 RESTful 接口访问 TDengine server,这时连接的是本机的 6041 端口,可见连接成功。
TDengine RESTful 接口详情请参考[官方文档](https://www.taosdata.com/cn/documentation/connector#restful)
### 使用 Docker 容器运行 TDengine server 和 taosAdapter
在 TDegnine 2.4.0.0 之后版本的 Docker 容器,开始提供一个独立运行的组件 taosAdapter,代替之前版本 TDengine 中 taosd 进程中内置的 http server。taosAdapter 支持通过 RESTful 接口对 TDengine server 的数据写入和查询能力,并提供和 InfluxDB/OpenTSDB 兼容的数据摄取接口,允许 InfluxDB/OpenTSDB 应用程序无缝移植到 TDengine。在新版本 Docker 镜像中,默认运行 taosd,而 taosAdapter 没有运行;也可以使用 docker run 命令中指定 taosadapter 的方式来运行 taosadapter,而 taosd 不会运行;或者在 docker run 命令中指定运行 run_taosd_and_taosadapter.sh 来同时运行 taosd 和 taosAdapter。
注意:如果容器中运行 taosAdapter,需要根据需要增加映射其他端口,具体端口默认配置和修改方法请参考[taosAdapter文档](https://github.com/taosdata/taosadapter/blob/develop/README-CN.md)
使用 docker 运行 TDengine 2.4.0.4 版本镜像(仅 taosd):
```
$ docker run -d --name tdengine-taosa -p 6030-6049:6030-6049 -p 6030-6049:6030-6049/udp tdengine/tdengine:2.4.0.4
```
使用 docker 运行 TDengine 2.4.0.4 版本镜像(仅 taosAdapter):
```
$ docker run -d --name tdengine-taosa -p 6041-6049:6041-6049 -p 6041-6049:6041-6049/udp tdengine/tdengine:2.4.0.4 taosadapter
```
使用 docker 运行 TDengine 2.4.0.4 版本镜像(同时运行 taosd 和 taosAdapter):
```
$ docker run -d --name tdengine-taosa -p 6030-6049:6030-6049 -p 6030-6049:6030-6049/udp tdengine/tdengine:2.4.0.4 run_taosd_and_taosadapter.sh
```
使用 curl 命令验证 RESTful 接口可以正常工作:
```
$ curl -H 'Authorization: Basic cm9vdDp0YW9zZGF0YQ==' -d 'show databases;' 127.0.0.1:6041/rest/sql
{"status":"succ","head":["name","created_time","ntables","vgroups","replica","quorum","days","keep","cache(MB)","blocks","minrows","maxrows","wallevel","fsync","comp","cachelast","precision","update","status"],"column_meta":[["name",8,32],["created_time",9,8],["ntables",4,4],["vgroups",4,4],["replica",3,2],["quorum",3,2],["days",3,2],["keep",8,24],["cache(MB)",4,4],["blocks",4,4],["minrows",4,4],["maxrows",4,4],["wallevel",2,1],["fsync",4,4],["comp",2,1],["cachelast",2,1],["precision",8,3],["update",2,1],["status",8,10]],"data":[["log","2021-12-28 09:18:55.765",10,1,1,1,10,"30",1,3,100,4096,1,3000,2,0,"us",0,"ready"]],"rows":1}
```
taosAdapter 支持多个数据收集代理软件(如 Telegraf、StatsD、collectd 等),这里仅模拟 StasD 写入数据,在宿主机执行命令如下:
```
$ echo "foo:1|c" | nc -u -w0 127.0.0.1 6044
```
然后可以使用 taos shell 查询 taosAdapter 自动创建的数据库 statsd 和 超级表 foo 中的内容:
```
taos> show databases;
name | created_time | ntables | vgroups | replica | quorum | days | keep | cache(MB) | blocks | minrows | maxrows | wallevel | fsync | comp | cachelast | precision | update | status |
====================================================================================================================================================================================================================================================================================
log | 2021-12-28 09:18:55.765 | 12 | 1 | 1 | 1 | 10 | 30 | 1 | 3 | 100 | 4096 | 1 | 3000 | 2 | 0 | us | 0 | ready |
statsd | 2021-12-28 09:21:48.841 | 1 | 1 | 1 | 1 | 10 | 3650 | 16 | 6 | 100 | 4096 | 1 | 3000 | 2 | 0 | ns | 2 | ready |
Query OK, 2 row(s) in set (0.002112s)
taos> use statsd;
Database changed.
taos> show stables;
name | created_time | columns | tags | tables |
============================================================================================
foo | 2021-12-28 09:21:48.894 | 2 | 1 | 1 |
Query OK, 1 row(s) in set (0.001160s)
taos> select * from foo;
ts | value | metric_type |
=======================================================================================
2021-12-28 09:21:48.840820836 | 1 | counter |
Query OK, 1 row(s) in set (0.001639s)
taos>
```
可以看到模拟数据已经被写入到 TDengine 中。
### 应用示例:在宿主机使用 taosBenchmark 写入数据到 Docker 容器中的 TDengine server
1,在宿主机命令行界面执行 taosBenchmark (曾命名为 taosdemo)写入数据到 Docker 容器中的 TDengine server
```bash
$ taosBenchmark
taosBenchmark is simulating data generated by power equipments monitoring...
host: 127.0.0.1:6030
user: root
password: taosdata
configDir:
resultFile: ./output.txt
thread num of insert data: 10
thread num of create table: 10
top insert interval: 0
```
使用 curl 命令验证 RESTful 接口可以正常工作:
$ curl -H 'Authorization: Basic cm9vdDp0YW9zZGF0YQ==' -d 'show databases;' 127.0.0.1:6041/rest/sql
{"status":"succ","head":["name","created_time","ntables","vgroups","replica","quorum","days","keep","cache(MB)","blocks","minrows","maxrows","wallevel","fsync","comp","cachelast","precision","update","status"],"column_meta":[["name",8,32],["created_time",9,8],["ntables",4,4],["vgroups",4,4],["replica",3,2],["quorum",3,2],["days",3,2],["keep",8,24],["cache(MB)",4,4],["blocks",4,4],["minrows",4,4],["maxrows",4,4],["wallevel",2,1],["fsync",4,4],["comp",2,1],["cachelast",2,1],["precision",8,3],["update",2,1],["status",8,10]],"data":[["log","2021-12-28 09:18:55.765",10,1,1,1,10,"30",1,3,100,4096,1,3000,2,0,"us",0,"ready"]],"rows":1}
```
taosAdapter 支持多个数据收集代理软件(如 Telegraf、StatsD、collectd 等),这里仅模拟 StasD 写入数据,在宿主机执行命令如下:
```
$ echo "foo:1|c" | nc -u -w0 127.0.0.1 6044
```
然后可以使用 taos shell 查询 taosAdapter 自动创建的数据库 statsd 和 超级表 foo 中的内容:
```
taos> show databases;
name | created_time | ntables | vgroups | replica | quorum | days | keep | cache(MB) | blocks | minrows | maxrows | wallevel | fsync | comp | cachelast | precision | update | status |
====================================================================================================================================================================================================================================================================================
log | 2021-12-28 09:18:55.765 | 12 | 1 | 1 | 1 | 10 | 30 | 1 | 3 | 100 | 4096 | 1 | 3000 | 2 | 0 | us | 0 | ready |
statsd | 2021-12-28 09:21:48.841 | 1 | 1 | 1 | 1 | 10 | 3650 | 16 | 6 | 100 | 4096 | 1 | 3000 | 2 | 0 | ns | 2 | ready |
Query OK, 2 row(s) in set (0.002112s)
taos> use statsd;
Database changed.
taos> show stables;
name | created_time | columns | tags | tables |
============================================================================================
foo | 2021-12-28 09:21:48.894 | 2 | 1 | 1 |
Query OK, 1 row(s) in set (0.001160s)
taos> select * from foo;
ts | value | metric_type |
=======================================================================================
2021-12-28 09:21:48.840820836 | 1 | counter |
Query OK, 1 row(s) in set (0.001639s)
taos>
```
可以看到模拟数据已经被写入到 TDengine 中。
### 应用示例:在宿主机使用 taosBenchmark 写入数据到 Docker 容器中的 TDengine server
1,在宿主机命令行界面执行 taosBenchmark 写入数据到 Docker 容器中的 TDengine server
```bash
$ taos> q
root@c452519b0f9b:~/TDengine-server-2.0.20.13#
$ taosBenchmark
taosBenchmark is simulating data generated by power equipments monitoring...
host: 127.0.0.1:6030
user: root
password: taosdata
configDir:
resultFile: ./output.txt
thread num of insert data: 10
thread num of create table: 10
top insert interval: 0
使用 curl 命令验证 RESTful 接口可以正常工作:
```
$ curl -H 'Authorization: Basic cm9vdDp0YW9zZGF0YQ==' -d 'show databases;' 127.0.0.1:6041/rest/sql
{"status":"succ","head":["name","created_time","ntables","vgroups","replica","quorum","days","keep","cache(MB)","blocks","minrows","maxrows","wallevel","fsync","comp","cachelast","precision","update","status"],"column_meta":[["name",8,32],["created_time",9,8],["ntables",4,4],["vgroups",4,4],["replica",3,2],["quorum",3,2],["days",3,2],["keep",8,24],["cache(MB)",4,4],["blocks",4,4],["minrows",4,4],["maxrows",4,4],["wallevel",2,1],["fsync",4,4],["comp",2,1],["cachelast",2,1],["precision",8,3],["update",2,1],["status",8,10]],"data":[["log","2021-12-28 09:18:55.765",10,1,1,1,10,"30",1,3,100,4096,1,3000,2,0,"us",0,"ready"]],"rows":1}
```
taosAdapter 支持多个数据收集代理软件(如 Telegraf、StatsD、collectd 等),这里仅模拟 StasD 写入数据,在宿主机执行命令如下:
```
$ echo "foo:1|c" | nc -u -w0 127.0.0.1 6044
```
然后可以使用 taos shell 查询 taosAdapter 自动创建的数据库 statsd 和 超级表 foo 中的内容:
```
taos> show databases;
name | created_time | ntables | vgroups | replica | quorum | days | keep | cache(MB) | blocks | minrows | maxrows | wallevel | fsync | comp | cachelast | precision | update | status |
====================================================================================================================================================================================================================================================================================
log | 2021-12-28 09:18:55.765 | 12 | 1 | 1 | 1 | 10 | 30 | 1 | 3 | 100 | 4096 | 1 | 3000 | 2 | 0 | us | 0 | ready |
statsd | 2021-12-28 09:21:48.841 | 1 | 1 | 1 | 1 | 10 | 3650 | 16 | 6 | 100 | 4096 | 1 | 3000 | 2 | 0 | ns | 2 | ready |
Query OK, 2 row(s) in set (0.002112s)
taos> use statsd;
Database changed.
taos> show stables;
name | created_time | columns | tags | tables |
============================================================================================
foo | 2021-12-28 09:21:48.894 | 2 | 1 | 1 |
Query OK, 1 row(s) in set (0.001160s)
taos> select * from foo;
ts | value | metric_type |
=======================================================================================
2021-12-28 09:21:48.840820836 | 1 | counter |
Query OK, 1 row(s) in set (0.001639s)
taos>
```
可以看到模拟数据已经被写入到 TDengine 中。
### 应用示例:在宿主机使用 taosBenchmark 写入数据到 Docker 容器中的 TDengine server
2,在命令行界面执行 taosdemo。
1,在宿主机命令行界面执行 taosBenchmark 写入数据到 Docker 容器中的 TDengine server
```bash
root@c452519b0f9b:~/TDengine-server-2.0.20.13# taosdemo
$ taosBenchmark
taosdemo is simulating data generated by power equipments monitoring...
taosBenchmark is simulating data generated by power equipments monitoring...
host: 127.0.0.1:6030
user: root
password: taosdata
configDir:
configDir:
resultFile: ./output.txt
thread num of insert data: 10
thread num of create table: 10
......@@ -121,13 +328,13 @@ database[0]:
maxSqlLen: 1048576
timeStampStep: 1
startTimestamp: 2017-07-14 10:40:00.000
sampleFormat:
sampleFile:
tagsFile:
sampleFormat:
sampleFile:
tagsFile:
columnCount: 3
column[0]:FLOAT column[1]:INT column[2]:FLOAT
column[0]:FLOAT column[1]:INT column[2]:FLOAT
tagCount: 2
tag[0]:INT tag[1]:BINARY(16)
tag[0]:INT tag[1]:BINARY(16)
Press enter key to continue or Ctrl-C to stop
```
......@@ -136,17 +343,17 @@ column[0]:FLOAT column[1]:INT column[2]:FLOAT
执行这条命令大概需要几分钟,最后共插入 1 亿条记录。
3,进入 TDengine 终端,查看 taosdemo 生成的数据。
2,进入 TDengine 终端,查看 taosBenchmark 生成的数据。
- **进入命令行。**
```bash
$ root@c452519b0f9b:~/TDengine-server-2.0.20.13# taos
$ root@c452519b0f9b:~/TDengine-server-2.4.0.4# taos
Welcome to the TDengine shell from Linux, Client Version:2.0.20.13
Welcome to the TDengine shell from Linux, Client Version:2.4.0.4
Copyright (c) 2020 by TAOS Data, Inc. All rights reserved.
taos>
taos>
```
- **查看数据库。**
......
......@@ -2,7 +2,7 @@
## <a class="anchor" id="install"></a>快捷安装
TDengine 软件分为服务器、客户端和报警模块三部分,目前 2.0 版服务器仅能在 Linux 系统上安装和运行,后续会支持 Windows、Mac OS 等系统。客户端可以在 Windows 或 Linux 上安装和运行。任何 OS 的应用也可以选择 RESTful 接口连接服务器 taosd。CPU 支持 X64/ARM64/MIPS64/Alpha64,后续会支持 ARM32、RISC-V 等 CPU 架构。用户可根据需求选择通过 [源码](https://www.taosdata.com/cn/getting-started/#通过源码安装) 或者 [安装包](https://www.taosdata.com/cn/getting-started/#通过安装包安装) 来安装。
TDengine 软件分为服务器、客户端和报警模块三部分,目前 2.0 版服务器仅能在 Linux 系统上安装和运行,后续会支持 Windows、Mac OS 等系统。客户端可以在 Windows 或 Linux 上安装和运行。任何 OS 的应用也可以选择 RESTful 接口连接服务器 taosd,其中 2.4 之后版本默认使用单独运行的独立组件 taosAdapter 提供 http 服务,之前版本使用内置 http 服务。CPU 支持 X64/ARM64/MIPS64/Alpha64,后续会支持 ARM32、RISC-V 等 CPU 架构。用户可根据需求选择通过 [源码](https://www.taosdata.com/cn/getting-started/#通过源码安装) 或者 [安装包](https://www.taosdata.com/cn/getting-started/#通过安装包安装) 来安装。
### <a class="anchor" id="source-install"></a>通过源码安装
......@@ -134,10 +134,10 @@ taos> source <filename>;
## <a class="anchor" id="demo"></a>TDengine 极速体验
启动 TDengine 的服务,在 Linux 终端执行 taosdemo
启动 TDengine 的服务,在 Linux 终端执行 taosBenchmark (曾命名为 taosdemo):
```bash
$ taosdemo
$ taosBenchmark
```
该命令将在数据库 test 下面自动创建一张超级表 meters,该超级表下有 1 万张表,表名为 "d0" 到 "d9999",每张表有 1 万条记录,每条记录有 (ts, current, voltage, phase) 四个字段,时间戳从 "2017-07-14 10:40:00 000" 到 "2017-07-14 10:40:09 999",每张表带有标签 location 和 groupId,groupId 被设置为 1 到 10, location 被设置为 "beijing" 或者 "shanghai"。
......@@ -175,10 +175,10 @@ taos> select avg(current), max(voltage), min(phase) from test.meters where group
```mysql
taos> select avg(current), max(voltage), min(phase) from test.d10 interval(10s);
```
## <a class="anchor" id="taosdemo"></a> taosdemo 详细功能列表
## <a class="anchor" id="taosBenchmark"></a> taosBenchmark 详细功能列表
taosdemo 命令本身带有很多选项,配置表的数目、记录条数等等,请执行 `taosdemo --help` 详细列出。您可以设置不同参数进行体验。
taosdemo 详细使用方法请参照 [如何使用taosdemo对TDengine进行性能测试](https://www.taosdata.com/cn/documentation/getting-started/taosdemo )
taosBenchmark 命令本身带有很多选项,配置表的数目、记录条数等等,请执行 `taosBenchmark --help` 详细列出。您可以设置不同参数进行体验。
taosBenchmark 详细使用方法请参照 [如何使用taosBenchmark对TDengine进行性能测试](https://www.taosdata.com/cn/documentation/getting-started/taosBenchmark )
## 客户端和报警模块
......
......@@ -822,7 +822,7 @@ k1 = conn.query("select info->'k1' as k1 from s1").fetch_all_into_dict()
为支持各种不同类型平台的开发,TDengine 提供符合 REST 设计标准的 API,即 RESTful API。为最大程度降低学习成本,不同于其他数据库 RESTful API 的设计方法,TDengine 直接通过 HTTP POST 请求 BODY 中包含的 SQL 语句来操作数据库,仅需要一个 URL。RESTful 连接器的使用参见[视频教程](https://www.taosdata.com/blog/2020/11/11/1965.html)
注意:与标准连接器的一个区别是,RESTful 接口是无状态的,因此 `USE db_name` 指令没有效果,所有对表名、超级表名的引用都需要指定数据库名前缀。(从 2.2.0.0 版本开始,支持在 RESTful url 中指定 db_name,这时如果 SQL 语句中没有指定数据库名前缀的话,会使用 url 中指定的这个 db_name。)
注意:与原生连接器的一个区别是,RESTful 接口是无状态的,因此 `USE db_name` 指令没有效果,所有对表名、超级表名的引用都需要指定数据库名前缀。(从 2.2.0.0 版本开始,支持在 RESTful url 中指定 db_name,这时如果 SQL 语句中没有指定数据库名前缀的话,会使用 url 中指定的这个 db_name。从 2.4.0.0 版本开始,RESTful 默认有 taosAdapter 提供,要求必须在 url 中指定 db_name。)
### 安装
......
......@@ -64,7 +64,7 @@ GF_PLUGINS_ALLOW_LOADING_UNSIGNED_PLUGINS=tdengine-datasource
![img](../images/connections/add_datasource3.jpg)
* Host: TDengine 集群的中任意一台服务器的 IP 地址与 TDengine RESTful 接口的端口号(6041),默认 http://localhost:6041
* Host: TDengine 集群的中任意一台服务器的 IP 地址与 TDengine RESTful 接口的端口号(6041),默认 http://localhost:6041。注意:从 2.4 版本开始 RESTful 服务默认使用独立组件 taosAdapter 提供,请参考相关文档配置部署
* User:TDengine 用户名。
* Password:TDengine 用户密码。
......
......@@ -119,7 +119,7 @@ taosd -C
| 1 | firstEP | | **SC** | | taosd启动时,主动连接的集群中首个dnode的end point | | localhost:6030 | |
| 2 | secondEP | YES | **SC** | | taosd启动时,如果firstEp连接不上,尝试连接集群中第二个dnode的end point | | 无 | |
| 3 | fqdn | | **SC** | | 数据节点的FQDN。如果习惯IP地址访问,可设置为该节点的IP地址。 | | 缺省为操作系统配置的第一个hostname。 | 这个参数值的长度需要控制在 96 个字符以内。 |
| 4 | serverPort | | **SC** | | taosd启动后,对外服务的端口号 | | 6030 | RESTful服务使用的端口号是在此基础上+11,即默认值为6041。 |
| 4 | serverPort | | **SC** | | taosd启动后,对外服务的端口号 | | 6030 | RESTful服务使用的端口号是在此基础上+11,即默认值为6041(注意2.4及后续版本使用 taosAdapter 提供 RESTful 接口)。 |
| 5 | logDir | | **SC** | | 日志文件目录,客户端和服务器的运行日志将写入该目录 | | /var/log/taos | |
| 6 | scriptDir | YES | **S** | | | | | |
| 7 | dataDir | | **S** | | 数据文件目录,所有的数据文件都将写入该目录 | | /var/lib/taos | |
......@@ -180,10 +180,10 @@ taosd -C
| 62 | http | | **S** | | 服务器内部的http服务开关。 | 0:关闭http服务, 1:激活http服务。 | 1 | |
| 63 | mqtt | YES | **S** | | 服务器内部的mqtt服务开关。 | 0:关闭mqtt服务, 1:激活mqtt服务。 | 0 | |
| 64 | monitor | | **S** | | 服务器内部的系统监控开关。监控主要负责收集物理节点的负载状况,包括CPU、内存、硬盘、网络带宽、HTTP请求量的监控记录,记录信息存储在`LOG`库中。 | 0:关闭监控服务, 1:激活监控服务。 | 0 | |
| 65 | httpEnableRecordSql | | **S** | | 内部使用,记录通过RESTFul接口,产生的SQL调用 | | 0 | 生成的文件(httpnote.0/httpnote.1),与服务端日志所在目录相同。 |
| 66 | httpMaxThreads | | **S** | | RESTFul接口的线程数 | | 2 | |
| 65 | httpEnableRecordSql | | **S** | | 内部使用,记录通过RESTFul接口,产生的SQL调用。taosAdapter 配置或有不同,请参考相应[文档](https://www.taosdata.com/cn/documentation/tools/adapter) | | 0 | 生成的文件(httpnote.0/httpnote.1),与服务端日志所在目录相同。 |
| 66 | httpMaxThreads | | **S** | | RESTFul接口的线程数。taosAdapter 配置或有不同,请参考相应[文档](https://www.taosdata.com/cn/documentation/tools/adapter) | | 2 | |
| 67 | telegrafUseFieldNum | YES | | | | | | |
| 68 | restfulRowLimit | | **S** | | RESTFul接口单次返回的记录条数 | | 10240 | 最大10,000,000 |
| 68 | restfulRowLimit | | **S** | | RESTFul接口单次返回的记录条数。taosAdapter 配置或有不同,请参考相应[文档](https://www.taosdata.com/cn/documentation/tools/adapter) | | 10240 | 最大10,000,000 |
| 69 | numOfLogLines | | **SC** | | 单个日志文件允许的最大行数。 | | 10,000,000 | |
| 70 | asyncLog | | **SC** | | 日志写入模式 | 0:同步、1:异步 | 1 | |
| 71 | logKeepDays | | **SC** | 天 | 日志文件的最长保存时间 | | 0 | 大于0时,日志文件会被重命名为taosdlog.xxx,其中xxx为日志文件最后修改的时间戳。 |
......@@ -641,9 +641,11 @@ TDengine的所有可执行文件默认存放在 _/usr/local/taos/bin_ 目录下
- *taosd*:TDengine服务端可执行文件
- *taos*:TDengine Shell可执行文件
- *taosdump*:数据导入导出工具
- *taosdemo*:TDengine测试工具
- *taosBenchmark*:TDengine测试工具
- remove.sh:卸载TDengine的脚本,请谨慎执行,链接到/usr/bin目录下的**rmtaos**命令。会删除TDengine的安装目录/usr/local/taos,但会保留/etc/taos、/var/lib/taos、/var/log/taos。
注意:2.4.0.0 版本之后的 taosBenchmark 和 taosdump 需要安装独立安装包 taosTools。
您可以通过修改系统配置文件taos.cfg来配置不同的数据目录和日志目录。
## TDengine 的启动、停止、卸载
......@@ -692,6 +694,12 @@ rmtaos
1. 合法字符:英文字符、数字和下划线
2. 允许英文字符或下划线开头,不允许以数字开头
3. 不区分大小写
4. 转义后表(列)名规则:
为了兼容支持更多形式的表(列)名,TDengine 引入新的转义符 "`"。可用让表名与关键词不冲突,同时不受限于上述表名称合法性约束检查。
转义后的表(列)名同样受到长度限制要求,且长度计算的时候不计算转义符。使用转义字符以后,不再对转义字符中的内容进行大小写统一。
例如:\`aBc\` 和 \`abc\` 是不同的表(列)名,但是 abc 和 aBc 是相同的表(列)名。
需要注意的是转义字符中的内容必须是可打印字符。
支持转义符的功能从 2.3.0.1 版本开始。
**密码合法字符集**
......@@ -761,6 +769,28 @@ rmtaos
| CONNS | ID | NOTNULL | STABLE | WAL |
| COPY | IF | NOW | STABLES | WHERE |
## 转义字符说明
- 转义字符表(转义符的功能从 2.4.0.4 版本开始)
| 字符序列 | **代表的字符** |
| :--------: | ------- |
| `\'` | 单引号' |
| `\"` | 双引号" |
| \n | 换行符 |
| \r | 回车符 |
| \t | tab符 |
| `\\` | 斜杠\ |
| `\%` | % 规则见下 |
| `\_` | _ 规则见下 |
- 转义字符使用规则
1. 标识符里有转义字符(数据库名、表名、列名)
1. 普通标识符: 直接提示错误的标识符,因为标识符规定必须是数字、字母和下划线,并且不能以数字开头。
2. 反引号``标识符: 保持原样,不转义
2. 数据里有转义字符
1. 遇到上面定义的转义字符会转义(%和_见下面说明),如果没有匹配的转义字符会忽略掉转义符\。
2. 对于%和_,因为在like里这两个字符是通配符,所以在模式匹配like里用`\%`%和`\_`表示字符里本身的%和_,如果在like模式匹配上下文之外使用`\%`或`\_`,则它们的计算结果为字符串`\%`和`\_`,而不是%和_。
## 诊断及其他
#### 网络连接诊断
......
......@@ -1854,7 +1854,7 @@ TDengine 中的表(列)名命名规则如下:
select jtag->'key' from (select jtag from stable) where jtag->'key'>0
```
## 转义字符说明
- 转义字符表
- 转义字符表 (转义符的功能从 2.4.0.4 版本开始)
| 字符序列 | **代表的字符** |
| :--------: | ------- |
......
......@@ -186,7 +186,7 @@ TDengine 中时间戳的时区总是由客户端进行处理,而与服务端
| TCP | 6030 | 客户端与服务端之间通讯。 | 由配置文件设置 serverPort 决定。 |
| TCP | 6035 | 多节点集群的节点间通讯。 | 随 serverPort 端口变化。 |
| TCP | 6040 | 多节点集群的节点间数据同步。 | 随 serverPort 端口变化。 |
| TCP | 6041 | 客户端与服务端之间的 RESTful 通讯。 | 随 serverPort 端口变化。 |
| TCP | 6041 | 客户端与服务端之间的 RESTful 通讯。 | 随 serverPort 端口变化。注意 taosAdapter 配置或有不同,请参考相应[文档](https://www.taosdata.com/cn/documentation/tools/adapter) |
| TCP | 6042 | Arbitrator 的服务端口。 | 随 Arbitrator 启动参数设置变化。 |
| TCP | 6043 | TaosKeeper 监控服务端口。 | 随 TaosKeeper 启动参数设置变化。 |
| TCP | 6044 | 支持 StatsD 的数据接入端口。 | 随 taosAdapter 启动参数设置变化(2.3.0.1+以上版本)。 |
......@@ -197,7 +197,7 @@ TDengine 中时间戳的时区总是由客户端进行处理,而与服务端
**20. go 语言编写组件编译失败怎样解决?**
新版本 TDengine 2.3.0.0 包含一个使用 go 语言开发的 taosAdapter 组件,取代之前内置的 httpd ,提供包含原 httpd 功能以及支持多种其他软件(Prometheus、Telegraf、collectd、StatsD等)的数据接入功能。
新版本 TDengine 2.3.0.0 包含一个使用 go 语言开发的 taosAdapter 独立组件,需要单独运行,取代之前 taosd 内置的 httpd ,提供包含原 httpd 功能以及支持多种其他软件(Prometheus、Telegraf、collectd、StatsD等)的数据接入功能。
使用最新 develop 分支代码编译需要先 `git submodule update --init --recursive` 下载 taosAdapter 仓库代码后再编译。
目前编译方式默认自动编译 taosAdapter。go 语言版本要求 1.14 以上,如果发生 go 编译错误,往往是国内访问 go mod 问题,可以通过设置 go 环境变量来解决:
......
......@@ -49,7 +49,7 @@ c452519b0f9b tdengine/tdengine "taosd" 14 minutes ago Up 14 minutes ·
```bash
$ docker exec -it tdengine /bin/bash
root@c452519b0f9b:~/TDengine-server-2.0.20.13#
root@c452519b0f9b:~/TDengine-server-2.4.0.4#
```
- **docker exec**: Enter the container via the docker exec command; if you exit, the container will not stop.
......@@ -61,9 +61,9 @@ root@c452519b0f9b:~/TDengine-server-2.0.20.13#
4, After entering the container, execute the taos shell client program.
```bash
$ root@c452519b0f9b:~/TDengine-server-2.0.20.13# taos
$ root@c452519b0f9b:~/TDengine-server-2.4.0.4# taos
Welcome to the TDengine shell from Linux, Client Version:2.0.20.13
Welcome to the TDengine shell from Linux, Client Version:2.4.0.4
Copyright (c) 2020 by TAOS Data, Inc. All rights reserved.
taos>
......@@ -73,21 +73,94 @@ The TDengine terminal successfully connects to the server and prints out a welco
In the TDengine terminal, you can create/delete databases, tables, super tables, etc., and perform insert and query operations via SQL commands. For details, you can refer to [TAOS SQL guide](https://www.taosdata.com/en/documentation/taos-sql).
## Learn more about TDengine with taosdemo
## Learn more about TDengine with taosBenchmark
1, Following the above steps, exit the TDengine terminal program first.
1, Execute `taosBenchmark` from the command line interface.
```bash
$ taos> q
root@c452519b0f9b:~/TDengine-server-2.0.20.13#
root@c452519b0f9b:~/TDengine-server-2.4.0.4# taosBenchmark
```
$ taos
2, Execute taosdemo from the command line interface.
Welcome to the TDengine shell from Linux, Client Version:2.4.0.4
Copyright (c) 2020 by TAOS Data, Inc. All rights reserved.
taos>
```
You can also access the TDengine server inside the Docker container using `curl` command from the host side through the RESTful port.
```
$ curl -u root:taosdata -d 'show databases' 127.0.0.1:6041/rest/sql
{"status":"succ","head":["name","created_time","ntables","vgroups","replica","quorum","days","keep0,keep1,keep(D)","cache(MB)","blocks","minrows","maxrows","wallevel","fsync","comp","cachelast","precision","update","status"],"column_meta":[["name",8,32],["created_time",9,8],["ntables",4,4],["vgroups",4,4],["replica",3,2],["quorum",3,2],["days",3,2],["keep0,keep1,keep(D)",8,24],["cache(MB)",4,4],["blocks",4,4],["minrows",4,4],["maxrows",4,4],["wallevel",2,1],["fsync",4,4],["comp",2,1],["cachelast",2,1],["precision",8,3],["update",2,1],["status",8,10]],"data":[["test","2021-08-18 06:01:11.021",10000,4,1,1,10,"3650,3650,3650",16,6,100,4096,1,3000,2,0,"ms",0,"ready"],["log","2021-08-18 05:51:51.065",4,1,1,1,10,"30,30,30",1,3,100,4096,1,3000,2,0,"us",0,"ready"]],"rows":2}
```
This command accesses the TDengine server through the RESTful interface, which connects to port 6041 on the local machine, so the connection is successful.
TDengine RESTful interface details can be found in the [official documentation](https://www.taosdata.com/en/documentation/connector#restful).
### Running TDengine server and taosAdapter with a Docker container
Docker containers of TDegnine version 2.4.0.0 and later include a component named `taosAdapter`, which supports data writing and querying capabilities to the TDengine server through the RESTful interface and provides the data ingestion interfaces compatible with InfluxDB/OpenTSDB. Allows seamless migration of InfluxDB/OpenTSDB applications to access TDengine.
Note: If taosAdapter is running inside the container, you need to add mapping to other additional ports as needed, please refer to [taosAdapter documentation](https://github.com/taosdata/taosadapter/blob/develop/README.md) for the default port number and modification methods for the specific purpose.
Running TDengine version 2.4.0.4 image with docker.
```
$ docker run -d --name tdengine-taosa -p 6030-6049:6030-6049 -p 6030-6049:6030-6049/udp tdengine/tdengine:2.4.0.4
```
Verify that the RESTful interface taosAdapter provides working using the `curl` command.
```
$ curl -H 'Authorization: Basic cm9vdDp0YW9zZGF0YQ==' -d 'show databases;' 127.0.0.1:6041/rest/sql
{"status":"succ","head":["name","created_time","ntables","vgroups","replica","quorum","days","keep","cache(MB)","blocks","minrows","maxrows","wallevel","fsync","comp","cachelast","precision","update","status"],"column_meta":[["name",8,32],["created_time",9,8],["ntables",4,4],["vgroups",4,4],["replica",3,2],["quorum",3,2],["days",3,2],["keep",8,24],["cache(MB)",4,4],["blocks",4,4],["minrows",4,4],["maxrows",4,4],["wallevel",2,1],["fsync",4,4],["comp",2,1],["cachelast",2,1],["precision",8,3],["update",2,1],["status",8,10]],"data":[["log","2021-12-28 09:18:55.765",10,1,1,1,10,"30",1,3,100,4096,1,3000,2,0,"us",0,"ready"]],"rows":1}
```
taosAdapter supports multiple data collection agents (e.g. Telegraf, StatsD, collectd, etc.), here only demonstrate how StasD is simulated to write data, and the command is executed from the host side as follows.
```
$ echo "foo:1|c" | nc -u -w0 127.0.0.1 6044
```
Then you can use the taos shell to query the taosAdapter automatically created database statsd and the contents of the super table foo.
```
taos> show databases;
name | created_time | ntables | vgroups | replica | quorum | days | keep | cache(MB) | blocks | minrows | maxrows | wallevel | fsync | comp | cachelast | precision | update | status |
====================================================================================================================================================================================================================================================================================
log | 2021-12-28 09:18:55.765 | 12 | 1 | 1 | 1 | 10 | 30 | 1 | 3 | 100 | 4096 | 1 | 3000 | 2 | 0 | us | 0 | ready |
statsd | 2021-12-28 09:21:48.841 | 1 | 1 | 1 | 1 | 10 | 3650 | 16 | 6 | 100 | 4096 | 1 | 3000 | 2 | 0 | ns | 2 | ready |
Query OK, 2 row(s) in set (0.002112s)
taos> use statsd;
Database changed.
taos> show stables;
name | created_time | columns | tags | tables |
============================================================================================
foo | 2021-12-28 09:21:48.894 | 2 | 1 | 1 |
Query OK, 1 row(s) in set (0.001160s)
taos> select * from foo;
ts | value | metric_type |
=======================================================================================
2021-12-28 09:21:48.840820836 | 1 | counter |
Query OK, 1 row(s) in set (0.001639s)
taos>
```
You can see that the simulation data has been written to TDengine.
### Application example: write data to TDengine server in Docker container using `taosBenchmark` on the host
1, execute `taosBenchmark` (was named taosdemo) in the host command line interface to write data to the TDengine server in the Docker container
```bash
root@c452519b0f9b:~/TDengine-server-2.0.20.13# taosdemo
$ taosBenchmark
taosdemo is simulating data generated by power equipments monitoring...
taosBenchmark is simulating data generated by power equipments monitoring...
host: 127.0.0.1:6030
user: root
......@@ -136,14 +209,14 @@ After enter, this command will automatically create a super table meters under t
It takes about a few minutes to execute this command and ends up inserting a total of 100 million records.
3, Go to the TDengine terminal and view the data generated by taosdemo.
3, Go to the TDengine terminal and view the data generated by taosBenchmark.
- **Go to the terminal interface.**
```bash
$ root@c452519b0f9b:~/TDengine-server-2.0.20.13# taos
$ root@c452519b0f9b:~/TDengine-server-2.4.0.4# taos
Welcome to the TDengine shell from Linux, Client Version:2.0.20.13
Welcome to the TDengine shell from Linux, Client Version:2.4.0.4
Copyright (c) 2020 by TAOS Data, Inc. All rights reserved.
taos>
......
Since TDengine was open sourced in July 2019, it has gained a lot of popularity among time-series database developers with its innovative data modeling design, simple installation method, easy programming interface, and powerful data insertion and query performance. The insertion and querying performance is often astonishing to users who are new to TDengine. In order to help users to experience the high performance and functions of TDengine in the shortest time, we developed an application called taosdemo for insertion and querying performance testing of TDengine. Then user can easily simulate the scenario of a large number of devices generating a very large amount of data. User can easily manipulate the number of columns, data types, disorder ratio, and number of concurrent threads with taosdemo customized parameters.
Since TDengine was open sourced in July 2019, it has gained a lot of popularity among time-series database developers with its innovative data modeling design, simple installation method, easy programming interface, and powerful data insertion and query performance. The insertion and querying performance is often astonishing to users who are new to TDengine. In order to help users to experience the high performance and functions of TDengine in the shortest time, we developed an application called `taosBenchmark` (was named `taosdemo`) for insertion and querying performance testing of TDengine. Then user can easily simulate the scenario of a large number of devices generating a very large amount of data. User can easily manipulate the number of columns, data types, disorder ratio, and number of concurrent threads with taosBenchmark customized parameters.
Running taosdemo is very simple. Just download the TDengine installation package (https://www.taosdata.com/cn/all-downloads/) or compiling the TDengine code yourself (https://github.com/taosdata/TDengine). It can be found and run in the installation directory or in the compiled results directory.
Running taosBenchmark is very simple. Just download the TDengine installation package (https://www.taosdata.com/cn/all-downloads/) or compiling the TDengine code yourself (https://github.com/taosdata/TDengine). It can be found and run in the installation directory or in the compiled results directory.
To run an insertion test with taosdemo
To run an insertion test with taosBenchmark
--
Executing taosdemo without any parameters results in the following output.
Executing taosBenchmark without any parameters results in the following output.
```
$ taosdemo
$ taosBenchmark
taosdemo is simulating data generated by power equipment monitoring...
taosBenchmark is simulating data generated by power equipment monitoring...
host: 127.0.0.1:6030
user: root
......@@ -54,7 +54,7 @@ column[0]:FLOAT column[1]:INT column[2]:FLOAT
Press enter key to continue or Ctrl-C to stop
```
The parameters here shows for what taosdemo will use for data insertion. By default, taosdemo without entering any command line arguments will simulate a city power grid system's meter data collection scenario as a typical application in the power industry. That is, a database named test will be created, and a super table named meters will be created, where the super table schema is following:
The parameters here shows for what taosBenchmark will use for data insertion. By default, taosBenchmark without entering any command line arguments will simulate a city power grid system's meter data collection scenario as a typical application in the power industry. That is, a database named test will be created, and a super table named meters will be created, where the super table schema is following:
```
taos> describe test.meters;
......@@ -69,7 +69,7 @@ taos> describe test.meters;
Query OK, 6 row(s) in set (0.002972s)
```
After pressing any key taosdemo will create the database test and super table meters and generate 10,000 sub-tables representing 10,000 individule meter devices that report data. That means they independently using the super table meters as a template according to TDengine data modeling best practices.
After pressing any key taosBenchmark will create the database test and super table meters and generate 10,000 sub-tables representing 10,000 individule meter devices that report data. That means they independently using the super table meters as a template according to TDengine data modeling best practices.
```
taos> use test;
Database changed.
......@@ -91,7 +91,7 @@ taos> show stables;
meters | 2021-08-27 11:21:01.209 | 4 | 2 | 10000 |
Query OK, 1 row(s) in set (0.001740s)
```
Then taosdemo generates 10,000 records for each meter device.
Then taosBenchmark generates 10,000 records for each meter device.
```
...
====thread[3] completed total inserted rows: 6250000, total affected rows: 6250000. 347626.22 records/second====
......@@ -108,9 +108,9 @@ Spent 18.0863 seconds to insert rows: 100000000, affected rows: 100000000 with 1
insert delay, avg: 28.64ms, max: 112.92ms, min: 9.35ms
```
The above information is the result of a real test on a normal PC server with 8 CPUs and 64G RAM. It shows that taosdemo inserted 100,000,000 (no need to count, 100 million) records in 18 seconds, or an average of 552,909,049 records per second.
The above information is the result of a real test on a normal PC server with 8 CPUs and 64G RAM. It shows that taosBenchmark inserted 100,000,000 (no need to count, 100 million) records in 18 seconds, or an average of 552,909,049 records per second.
TDengine also offers a parameter-bind interface for better performance, and using the parameter-bind interface (taosdemo -I stmt) on the same hardware for the same amount of data writes, the results are as follows.
TDengine also offers a parameter-bind interface for better performance, and using the parameter-bind interface (taosBenchmark -I stmt) on the same hardware for the same amount of data writes, the results are as follows.
```
...
......@@ -145,14 +145,14 @@ Spent 6.0257 seconds to insert rows: 100000000, affected rows: 100000000 with 16
insert delay, avg: 8.31ms, max: 860.12ms, min: 2.00ms
```
It shows that taosdemo inserted 100 million records in 6 seconds, with a much more higher insertion performance, 1,659,590 records wer inserted per second.
It shows that taosBenchmark inserted 100 million records in 6 seconds, with a much more higher insertion performance, 1,659,590 records wer inserted per second.
Because taosdemo is so easy to use, so we have extended it with more features to support more complex parameter settings for sample data preparation and validation for rapid prototyping.
Because taosBenchmark is so easy to use, so we have extended it with more features to support more complex parameter settings for sample data preparation and validation for rapid prototyping.
The complete list of taosdemo command-line arguments can be displayed via taosdemo --help as follows.
The complete list of taosBenchmark command-line arguments can be displayed via taosBenchmark --help as follows.
```
$ taosdemo --help
$ taosBenchmark --help
-f, --file=FILE The meta file to the execution procedure. Currently, we support standard UTF-8 (without BOM) encoded files only.
-u, --user=USER The user name to use when connecting to the server.
......@@ -160,7 +160,7 @@ $ taosdemo --help
-c, --config-dir=CONFIG_DIR Configuration directory.
-h, --host=HOST TDengine server FQDN to connect. The default host is localhost.
-P, --port=PORT The TCP/IP port number to use for the connection.
-I, --interface=INTERFACE The interface (taosc, rest, and stmt) taosdemo uses. By default use 'taosc'.
-I, --interface=INTERFACE The interface (taosc, rest, and stmt) taosBenchmark uses. By default use 'taosc'.
-d, --database=DATABASE Destination database. By default is 'test'.
-a, --replica=REPLICA Set the replica parameters of the database, By default use 1, min: 1, max: 3.
-m, --table-prefix=TABLEPREFIX Table prefix name. By default use 'd'.
......@@ -196,16 +196,16 @@ for any corresponding short options.
Report bugs to <support@taosdata.com>.
```
taosdemo's parameters are designed to meet the needs of data simulation. A few commonly used parameters are described below.
taosBenchmark's parameters are designed to meet the needs of data simulation. A few commonly used parameters are described below.
```
-I, --interface=INTERFACE The interface (taosc, rest, and stmt) taosdemo uses. Default is 'taosc'.
-I, --interface=INTERFACE The interface (taosc, rest, and stmt) taosBenchmark uses. Default is 'taosc'.
```
The performance difference between different interfaces of taosdemo has been mentioned earlier, the -I parameter is used to select different interfaces, currently taosc, stmt and rest are supported. The -I parameter is used to select different interfaces, currently taosc, stmt and rest are supported. taosc uses SQL statements to write data, stmt uses parameter binding interface to write data, and rest uses RESTful protocol to write data.
The performance difference between different interfaces of taosBenchmark has been mentioned earlier, the -I parameter is used to select different interfaces, currently taosc, stmt and rest are supported. The -I parameter is used to select different interfaces, currently taosc, stmt and rest are supported. taosc uses SQL statements to write data, stmt uses parameter binding interface to write data, and rest uses RESTful protocol to write data.
```
-T, --threads=NUMBER The number of threads. Default is 8.
```
The -T parameter sets how many threads taosdemo uses to synchronize data writes, so that multiple threads can squeeze as much processing power out of the hardware as possible.
The -T parameter sets how many threads taosBenchmark uses to synchronize data writes, so that multiple threads can squeeze as much processing power out of the hardware as possible.
```
-b, --data-type=DATATYPE The data_type of columns, default: FLOAT, INT, FLOAT.
......@@ -223,11 +223,11 @@ To reach TDengine performance limits, data insertion can be executed by using mu
-n, --records=NUMBER The number of records per table. Default is 10000.
-M, --random The value of records generated are totally random. The default is to simulate power equipment scenario.
```
As mentioned earlier, taosdemo creates 10,000 tables by default, and each table writes 10,000 records. taosdemo can set the number of tables and the number of records in each table by -t and -n. The data generated by default without parameters are simulated real scenarios, and the simulated data are current and voltage phase values with certain jitter, which can more realistically show TDengine's efficient data compression ability. If you need to simulate the generation of completely random data, you can pass the -M parameter.
As mentioned earlier, taosBenchmark creates 10,000 tables by default, and each table writes 10,000 records. taosBenchmark can set the number of tables and the number of records in each table by -t and -n. The data generated by default without parameters are simulated real scenarios, and the simulated data are current and voltage phase values with certain jitter, which can more realistically show TDengine's efficient data compression ability. If you need to simulate the generation of completely random data, you can pass the -M parameter.
```
-y, --answer-yes Default input yes for prompt.
```
As we can see above, taosdemo outputs a list of parameters for the upcoming operation by default before creating a database or inserting data, so that the user can know what data is about to be written before inserting. To facilitate automatic testing, the -y parameter allows taosdemo to write data immediately after outputting the parameters.
As we can see above, taosBenchmark outputs a list of parameters for the upcoming operation by default before creating a database or inserting data, so that the user can know what data is about to be written before inserting. To facilitate automatic testing, the -y parameter allows taosBenchmark to write data immediately after outputting the parameters.
```
-O, --disorder=NUMBER Insert order mode--0: In order, 1 ~ 50: disorder ratio. Default is in order.
-R, --disorder-range=NUMBER Out of order data's range, ms, default is 1000.
......@@ -236,7 +236,7 @@ In some scenarios, the received data does not arrive in exact order, but contain
```
-g, --debug Print debug info.
```
If you are interested in the taosdemo insertion process or if the data insertion result is not as expected, you can use the -g parameter to make taosdemo print the debugging information in the process of the execution to the screen or import it to another file with the Linux redirect command to easily find the cause of the problem. In addition, taosdemo will also output the corresponding executed statements and debugging reasons to the screen after the execution fails. You can search the word "reason" to find the error reason information returned by the TDengine server.
If you are interested in the taosBenchmark insertion process or if the data insertion result is not as expected, you can use the -g parameter to make taosBenchmark print the debugging information in the process of the execution to the screen or import it to another file with the Linux redirect command to easily find the cause of the problem. In addition, taosBenchmark will also output the corresponding executed statements and debugging reasons to the screen after the execution fails. You can search the word "reason" to find the error reason information returned by the TDengine server.
```
-x, --aggr-func Test aggregation funtions after insertion.
```
......@@ -244,7 +244,7 @@ TDengine is not only very powerful in insertion performance, but also in query p
You can see that the select * fetch 100 million rows (not output to the screen) operation consumes only 1.26 seconds. The most of normal aggregation function for 100 million records usually takes only about 20 milliseconds, and even the longest count function takes less than 40 milliseconds.
```
taosdemo -I stmt -T 48 -y -x
taosBenchmark -I stmt -T 48 -y -x
...
...
select * took 1.266835 second(s)
......@@ -264,7 +264,7 @@ select min(current) took 0.025812 second(s)
select first(current) took 0.024105 second(s)
...
```
In addition to the command line approach, taosdemo also supports take a JSON file as an incoming parameter to provide a richer set of settings. A typical JSON file would look like this.
In addition to the command line approach, taosBenchmark also supports take a JSON file as an incoming parameter to provide a richer set of settings. A typical JSON file would look like this.
```
{
"filetype": "insert",
......@@ -327,11 +327,11 @@ In addition to the command line approach, taosdemo also supports take a JSON fil
}]
}
```
For example, we can specify different number of threads for table creation and data insertion with "thread_count" and "thread_count_create_tbl". You can use a combination of "child_table_exists", "childtable_limit" and "childtable_offset" to use multiple taosdemo processes (even on different computers) to write to different ranges of child tables of the same super table at the same time. You can also import existing data by specifying the data source as a csv file with "data_source" and "sample_file".
For example, we can specify different number of threads for table creation and data insertion with "thread_count" and "thread_count_create_tbl". You can use a combination of "child_table_exists", "childtable_limit" and "childtable_offset" to use multiple taosBenchmark processes (even on different computers) to write to different ranges of child tables of the same super table at the same time. You can also import existing data by specifying the data source as a csv file with "data_source" and "sample_file".
Use taosdemo for query and subscription testing
Use taosBenchmark for query and subscription testing
--
taosdemo can not only write data, but also perform query and subscription functions. However, a taosdemo instance can only support one of these functions, not all three, and the configuration file is used to specify which function to test.
taosBenchmark can not only write data, but also perform query and subscription functions. However, a taosBenchmark instance can only support one of these functions, not all three, and the configuration file is used to specify which function to test.
The following is the content of a typical query JSON example file.
```
......@@ -443,7 +443,7 @@ Conclusion
--
TDengine is a big data platform designed and optimized for IoT, Telematics, Industrial Internet, DevOps, etc. TDengine shows a high performance that far exceeds similar products due to the innovative data storage and query engine design in the database kernel. And withSQL syntax support and connectors for multiple programming languages (currently Java, Python, Go, C#, NodeJS, Rust, etc. are supported), it is extremely easy to use and has zero learning cost. To facilitate the operation and maintenance needs, we also provide data migration and monitoring functions and other related ecological tools and software.
For users who are new to TDengine, we have developed rich features for taosdemo to facilitate technical evaluation and stress testing. This article is a brief introduction to taosdemo, which will continue to evolve and improve as new features are added to TDengine.
For users who are new to TDengine, we have developed rich features for taosBenchmark to facilitate technical evaluation and stress testing. This article is a brief introduction to taosBenchmark, which will continue to evolve and improve as new features are added to TDengine.
As part of TDengine, taosdemo's source code is fully open on the GitHub. Suggestions or advices about the use or implementation of taosdemo or TDengine are welcomed on GitHub or in the Taos Data user group.
As part of TDengine, taosBenchmark's source code is fully open on the GitHub. Suggestions or advices about the use or implementation of taosBenchmark or TDengine are welcomed on GitHub or in the Taos Data user group.
......@@ -2,7 +2,7 @@
## <a class="anchor" id="install"></a>Quick Install
TDengine software consists of 3 parts: server, client, and alarm module. At the moment, TDengine server only runs on Linux (Windows, mac OS and more OS supports will come soon), but client can run on either Windows or Linux. TDengine client can be installed and run on Windows or Linux. Applications based-on any OSes can all connect to server taosd via a RESTful interface. About CPU, TDengine supports X64/ARM64/MIPS64/Alpha64, and ARM32、RISC-V, other more CPU architectures will be supported soon. You can set up and install TDengine server either from the [source code](https://www.taosdata.com/en/getting-started/#Install-from-Source) or the [packages](https://www.taosdata.com/en/getting-started/#Install-from-Package).
TDengine software consists of 3 parts: server, client, and alarm module. At the moment, TDengine server only runs on Linux (Windows, mac OS and more OS supports will come soon), but client can run on either Windows or Linux. TDengine client can be installed and run on Windows or Linux. Applications based-on any OSes can all connect to server taosd via a RESTful interface. From 2.4 and later version, TDengine use a stand-alone software, taosAdapteer to provide http service. The early version uses the http server embedded in the taosd. About CPU, TDengine supports X64/ARM64/MIPS64/Alpha64, and ARM32、RISC-V, other more CPU architectures will be supported soon. You can set up and install TDengine server either from the [source code](https://www.taosdata.com/en/getting-started/#Install-from-Source) or the [packages](https://www.taosdata.com/en/getting-started/#Install-from-Package).
### <a class="anchor" id="source-install"></a>Install from Source
......@@ -138,10 +138,10 @@ taos> source <filename>;
## <a class="anchor" id="demo"></a>Experience TDengine’s Lightning Speed
After starting the TDengine server, you can execute the command `taosdemo` in the Linux terminal.
After starting the TDengine server, you can execute the command `taosBenchmark` (was named `taosdemo`) in the Linux terminal.
```bash
$ taosdemo
$ taosBenchmark
```
Using this command, a STable named `meters` will be created in the database `test`. There are 10k tables under this STable, named from `t0` to `t9999`. In each table there are 100k rows of records, each row with columns (`f1`, `f2` and `f3`. The timestamp is from "2017-07-14 10:40:00 000" to "2017-07-14 10:41:39 999". Each table also has tags `areaid` and `loc`: `areaid` is set from 1 to 10, `loc` is set to "beijing" or "shanghai".
......@@ -180,10 +180,10 @@ taos> select avg(f1), max(f2), min(f3) from test.meters where areaid=10;
taos> select avg(f1), max(f2), min(f3) from test.t10 interval(10s);
```
## <a class="anchor" id="taosdemo"></a> Using taosdemo in detail
## <a class="anchor" id="taosBenchmark"></a> Using taosBenchmark in detail
you can run command `taosdemo` with many options, like number of tables, rows of records and so on. To know more about these options, you can execute `taosdemo --help` and then take a try using different options.
Please refer to [How to use taosdemo to test the performance of TDengine](https://www.taosdata.com/en/documentation/getting-started/taosdemo) for detail.
you can run command `taosBenchmark` with many options, like number of tables, rows of records and so on. To know more about these options, you can execute `taosBenchmark --help` and then take a try using different options.
Please refer to [How to use taosBenchmark to test the performance of TDengine](https://www.taosdata.com/en/documentation/getting-started/taosBenchmark) for detail.
## Client and Alarm Module
......
......@@ -661,6 +661,8 @@ In tests/examples/python, we provide a sample Python program read_example. py to
To support the development of various types of platforms, TDengine provides an API that conforms to REST design standards, that is, RESTful API. In order to minimize the learning cost, different from other designs of database RESTful APIs, TDengine directly requests SQL statements contained in BODY through HTTP POST to operate the database, and only needs a URL. See the [video tutorial](https://www.taosdata.com/blog/2020/11/11/1965.html) for the use of RESTful connectors.
Note: One difference from the native connector is that the RESTful interface is stateless, so the `USE db_name` command has no effect and all references to table names and super table names require the database name to be specified. (Starting from version 2.2.0.0, we support specifying db_name in the RESTful url, in which case if the database name prefix is not specified in the SQL statement. Since version 2.4.0.0, RESTful service is provided by taosAdapter by default, which requires that db_name must be specified in the url.)
### HTTP request format
```
......
......@@ -63,7 +63,7 @@ Enter the data source configuration page and modify the corresponding configurat
![img](../images/connections/add_datasource3.jpg)
- Host: IP address of any server in TDengine cluster and port number of TDengine RESTful interface (6041), default [http://localhost:6041](http://localhost:6041/)
- Host: IP address of any server in TDengine cluster and port number of TDengine RESTful interface (6041), use [http://localhost:6041](http://localhost:6041/) to access the interface by default. Note the 2.4 and later version of TDengine use a stand-alone software, taosAdapter to provide RESTful interface. Please refer to its document for configuration and deployment.
- User: TDengine username.
- Password: TDengine user password.
......@@ -173,4 +173,4 @@ Please replace the IP address in the command above to the correct one. If no err
The functions below are not supported currently:
- `dbExistsTable(conn, "test")`: if table test exists
- `dbListTables(conn)`: list all tables in the connection
\ No newline at end of file
- `dbListTables(conn)`: list all tables in the connection
......@@ -91,7 +91,7 @@ Only some important configuration parameters are listed below. For more paramete
- firstEp: end point of the first dnode which will be connected in the cluster when taosd starts, the default value is localhost: 6030.
- fqdn: FQDN of the data node, which defaults to the first hostname configured by the operating system. If you want to access via IP address directly, you can set it to the IP address of the node.
- serverPort: the port number of the external service after taosd started, the default value is 6030.
- httpPort: the port number used by the RESTful service to which all HTTP requests (TCP) require a query/write request. The default value is 6041.
- httpPort: the port number used by the RESTful service to which all HTTP requests (TCP) require a query/write request. The default value is 6041. Note 2.4 and later version use a stand-alone software, taosAdapter to provide RESTFul interface.
- dataDir: the data file directory to which all data files will be written. [Default:/var/lib/taos](http://default/var/lib/taos).
- logDir: the log file directory to which the running log files of the client and server will be written. [Default:/var/log/taos](http://default/var/log/taos).
- arbitrator: the end point of the arbitrator in the system; the default value is null.
......@@ -538,4 +538,4 @@ At the moment, TDengine has nearly 200 internal reserved keywords, which cannot
| CONCAT | GLOB | METRICS | SET | VIEW |
| CONFIGS | GRANTS | MIN | SHOW | WAVG |
| CONFLICT | GROUP | MINUS | SLASH | WHERE |
| CONNECTION | | | | |
\ No newline at end of file
| CONNECTION | | | | |
......@@ -1336,7 +1336,7 @@ Is not null supports all types of columns. Non-null expression is < > "" and onl
select jtag->'key' from (select jtag from stable) where jtag->'key'>0
```
## Escape character description
- Special Character Escape Sequences
- Special Character Escape Sequences(since version 2.4.0.4)
| Escape Sequence | **Character Represented by Sequence** |
| :--------: | ------------------- |
......
[Unit]
Description=TDengine server service
After=network-online.target taosadapter.service
Wants=network-online.target taosadapter.service
After=network-online.target
Wants=network-online.target
[Service]
Type=simple
......
......@@ -18,5 +18,5 @@ ENV LD_LIBRARY_PATH="$LD_LIBRARY_PATH:/usr/lib" \
LC_ALL=en_US.UTF-8
EXPOSE 6030 6031 6032 6033 6034 6035 6036 6037 6038 6039 6040 6041 6042
CMD ["run_taosd.sh"]
CMD ["taosd"]
VOLUME [ "/var/lib/taos", "/var/log/taos","/etc/taos/" ]
......@@ -68,8 +68,8 @@ function replace_community_jh() {
# packaging/tools/startPre.sh
sed -i "s/serverName=\"taosd\"/serverName=\"jh_taosd\"/g" ${top_dir}/packaging/tools/startPre.sh
sed -i "s/logDir=\"\/var\/log\/taos\"/logDir=\"\/var\/log\/jh_taos\"/g" ${top_dir}/packaging/tools/startPre.sh
# packaging/tools/run_taosd.sh
sed -i "s/taosd/jh_taosd/g" ${top_dir}/packaging/tools/run_taosd.sh
# packaging/tools/run_taosd_and_taosadapter.sh
sed -i "s/taosd/jh_taosd/g" ${top_dir}/packaging/tools/run_taosd_and_taosadapter.sh
# packaging/tools/install.sh
sed -i "s/clientName=\"taos\"/clientName=\"jh_taos\"/g" ${top_dir}/packaging/tools/install.sh
sed -i "s/serverName=\"taosd\"/serverName=\"jh_taosd\"/g" ${top_dir}/packaging/tools/install.sh
......
......@@ -68,8 +68,8 @@ function replace_community_kh() {
# packaging/tools/startPre.sh
sed -i "s/serverName=\"taosd\"/serverName=\"khserver\"/g" ${top_dir}/packaging/tools/startPre.sh
sed -i "s/logDir=\"\/var\/log\/taos\"/logDir=\"\/var\/log\/kinghistorian\"/g" ${top_dir}/packaging/tools/startPre.sh
# packaging/tools/run_taosd.sh
sed -i "s/taosd/khserver/g" ${top_dir}/packaging/tools/run_taosd.sh
# packaging/tools/run_taosd_and_taosadapter.sh
sed -i "s/taosd/khserver/g" ${top_dir}/packaging/tools/run_taosd_and_taosadapter.sh
# packaging/tools/install.sh
sed -i "s/clientName=\"taos\"/clientName=\"khclient\"/g" ${top_dir}/packaging/tools/install.sh
sed -i "s/serverName=\"taosd\"/serverName=\"khserver\"/g" ${top_dir}/packaging/tools/install.sh
......
......@@ -113,8 +113,8 @@ function replace_community_power() {
# packaging/tools/startPre.sh
sed -i "s/serverName=\"taosd\"/serverName=\"powerd\"/g" ${top_dir}/packaging/tools/startPre.sh
sed -i "s/logDir=\"\/var\/log\/taos\"/logDir=\"\/var\/log\/Power\"/g" ${top_dir}/packaging/tools/startPre.sh
# packaging/tools/run_taosd.sh
sed -i "s/taosd/powerd/g" ${top_dir}/packaging/tools/run_taosd.sh
# packaging/tools/run_taosd_and_taosadapter.sh
sed -i "s/taosd/powerd/g" ${top_dir}/packaging/tools/run_taosd_and_taosadapter.sh
# packaging/tools/install.sh
sed -i "s/clientName=\"taos\"/clientName=\"power\"/g" ${top_dir}/packaging/tools/install.sh
sed -i "s/serverName=\"taosd\"/serverName=\"powerd\"/g" ${top_dir}/packaging/tools/install.sh
......
......@@ -68,8 +68,8 @@ function replace_community_pro() {
# packaging/tools/startPre.sh
sed -i "s/serverName=\"taosd\"/serverName=\"prodbs\"/g" ${top_dir}/packaging/tools/startPre.sh
sed -i "s/logDir=\"\/var\/log\/taos\"/logDir=\"\/var\/log\/ProDB\"/g" ${top_dir}/packaging/tools/startPre.sh
# packaging/tools/run_taosd.sh
sed -i "s/taosd/prodbs/g" ${top_dir}/packaging/tools/run_taosd.sh
# packaging/tools/run_taosd_and_taosadapter.sh
sed -i "s/taosd/prodbs/g" ${top_dir}/packaging/tools/run_taosd_and_taosadapter.sh
# packaging/tools/install.sh
sed -i "s/clientName=\"taos\"/clientName=\"prodbc\"/g" ${top_dir}/packaging/tools/install.sh
sed -i "s/serverName=\"taosd\"/serverName=\"prodbs\"/g" ${top_dir}/packaging/tools/install.sh
......
......@@ -64,8 +64,8 @@ function replace_community_tq() {
# packaging/tools/startPre.sh
sed -i "s/serverName=\"taosd\"/serverName=\"tqd\"/g" ${top_dir}/packaging/tools/startPre.sh
sed -i "s/logDir=\"\/var\/log\/taos\"/logDir=\"\/var\/log\/tq\"/g" ${top_dir}/packaging/tools/startPre.sh
# packaging/tools/run_taosd.sh
sed -i "s/taosd/tqd/g" ${top_dir}/packaging/tools/run_taosd.sh
# packaging/tools/run_taosd_and_taosadapter.sh
sed -i "s/taosd/tqd/g" ${top_dir}/packaging/tools/run_taosd_and_taosadapter.sh
# packaging/tools/install.sh
sed -i "s/clientName=\"taos\"/clientName=\"tq\"/g" ${top_dir}/packaging/tools/install.sh
sed -i "s/serverName=\"taosd\"/serverName=\"tqd\"/g" ${top_dir}/packaging/tools/install.sh
......
......@@ -194,7 +194,7 @@ function install_bin() {
${csudo}rm -f ${bin_link_dir}/${uninstallScript} || :
${csudo}rm -f ${bin_link_dir}/tarbitrator || :
${csudo}rm -f ${bin_link_dir}/set_core || :
${csudo}rm -f ${bin_link_dir}/run_taosd.sh || :
${csudo}rm -f ${bin_link_dir}/run_taosd_and_taosadapter.sh || :
${csudo}cp -r ${script_dir}/bin/* ${install_main_dir}/bin && ${csudo}chmod 0555 ${install_main_dir}/bin/*
......@@ -206,7 +206,7 @@ function install_bin() {
[ -x ${install_main_dir}/bin/taosdump ] && ${csudo}ln -s ${install_main_dir}/bin/taosdump ${bin_link_dir}/taosdump || :
[ -x ${install_main_dir}/bin/remove.sh ] && ${csudo}ln -s ${install_main_dir}/bin/remove.sh ${bin_link_dir}/${uninstallScript} || :
[ -x ${install_main_dir}/bin/set_core.sh ] && ${csudo}ln -s ${install_main_dir}/bin/set_core.sh ${bin_link_dir}/set_core || :
[ -x ${install_main_dir}/bin/run_taosd.sh ] && ${csudo}ln -s ${install_main_dir}/bin/run_taosd.sh ${bin_link_dir}/run_taosd.sh || :
[ -x ${install_main_dir}/bin/run_taosd_and_taosadapter.sh ] && ${csudo}ln -s ${install_main_dir}/bin/run_taosd_and_taosadapter.sh ${bin_link_dir}/run_taosd_and_taosadapter.sh || :
[ -x ${install_main_dir}/bin/tarbitrator ] && ${csudo}ln -s ${install_main_dir}/bin/tarbitrator ${bin_link_dir}/tarbitrator || :
if [ "$verMode" == "cluster" ]; then
......
......@@ -175,7 +175,8 @@ function install_bin() {
if [ "$osType" != "Darwin" ]; then
${csudo}rm -f ${bin_link_dir}/perfMonitor || :
${csudo}rm -f ${bin_link_dir}/set_core || :
${csudo}rm -f ${bin_link_dir}/run_taosd.sh || :
${csudo}rm -f ${bin_link_dir}/rmtaos || :
${csudo}rm -f ${bin_link_dir}/run_taosd_and_taosadapter.sh || :
${csudo}rm -f ${bin_link_dir}/${uninstallScript} || :
${csudo}cp -r ${binary_dir}/build/bin/taos ${install_main_dir}/bin || :
......@@ -189,7 +190,7 @@ function install_bin() {
${csudo}cp -r ${script_dir}/taosd-dump-cfg.gdb ${install_main_dir}/bin
${csudo}cp -r ${script_dir}/remove.sh ${install_main_dir}/bin
${csudo}cp -r ${script_dir}/set_core.sh ${install_main_dir}/bin
${csudo}cp -r ${script_dir}/run_taosd.sh ${install_main_dir}/bin
${csudo}cp -r ${script_dir}/run_taosd_and_taosadapter.sh ${install_main_dir}/bin
${csudo}cp -r ${script_dir}/startPre.sh ${install_main_dir}/bin
${csudo}chmod 0555 ${install_main_dir}/bin/*
......@@ -201,7 +202,7 @@ function install_bin() {
[ -x ${install_main_dir}/bin/taosdemo ] && ${csudo}ln -s ${install_main_dir}/bin/taosdemo ${bin_link_dir}/taosdemo || :
[ -x ${install_main_dir}/bin/perfMonitor ] && ${csudo}ln -s ${install_main_dir}/bin/perfMonitor ${bin_link_dir}/perfMonitor || :
[ -x ${install_main_dir}/set_core.sh ] && ${csudo}ln -s ${install_main_dir}/bin/set_core.sh ${bin_link_dir}/set_core || :
[ -x ${install_main_dir}/run_taosd.sh ] && ${csudo}ln -s ${install_main_dir}/bin/run_taosd.sh ${bin_link_dir}/run_taosd.sh || :
[ -x ${install_main_dir}/run_taosd_and_taosadapter.sh ] && ${csudo}ln -s ${install_main_dir}/bin/run_taosd_and_taosadapter.sh ${bin_link_dir}/run_taosd_and_taosadapter.sh || :
[ -x ${install_main_dir}/bin/remove.sh ] && ${csudo}ln -s ${install_main_dir}/bin/remove.sh ${bin_link_dir}/${uninstallScript} || :
else
......
......@@ -64,7 +64,7 @@ else
${build_dir}/bin/tarbitrator\
${script_dir}/remove.sh \
${script_dir}/set_core.sh \
${script_dir}/run_taosd.sh \
${script_dir}/run_taosd_and_taosadapter.sh \
${script_dir}/startPre.sh \
${script_dir}/taosd-dump-cfg.gdb"
......
......@@ -88,7 +88,7 @@ function clean_bin() {
${csudo}rm -f ${bin_link_dir}/${uninstallScript} || :
${csudo}rm -f ${bin_link_dir}/tarbitrator || :
${csudo}rm -f ${bin_link_dir}/set_core || :
${csudo}rm -f ${bin_link_dir}/run_taosd.sh || :
${csudo}rm -f ${bin_link_dir}/run_taosd_and_taosadapter.sh || :
}
function clean_lib() {
......
......@@ -209,6 +209,15 @@ JNIEXPORT jlong JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_prepareStmtImp
JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_setBindTableNameImp
(JNIEnv *, jobject, jlong, jstring, jlong);
/**
* Class: com_taosdata_jdbc_TSDBJNIConnector
* Method: setTableNameTagsImp
* Signature: (JLjava/lang/String;I[B[B[B[BJ)I
*/
JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_setTableNameTagsImp
(JNIEnv *, jobject, jlong, jstring, jint, jbyteArray, jbyteArray, jbyteArray, jbyteArray, jlong);
/*
* Class: com_taosdata_jdbc_TSDBJNIConnector
* Method: bindColDataImp
......@@ -217,6 +226,14 @@ JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_setBindTableNameI
JNIEXPORT jlong JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_bindColDataImp
(JNIEnv *, jobject, jlong, jbyteArray, jbyteArray, jbyteArray, jint, jint, jint, jint, jlong);
/*
* Class: com_taosdata_jdbc_TSDBJNIConnector
* Method: stmt_add_batch
* Signature: (JJ)I
*/
JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_addBatchImp(JNIEnv *env, jobject jobj, jlong stmt, jlong con);
/*
* Class: com_taosdata_jdbc_TSDBJNIConnector
* Method: executeBatchImp
......@@ -231,13 +248,12 @@ JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_executeBatchImp(J
*/
JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_closeStmt(JNIEnv *env, jobject jobj, jlong stmt, jlong con);
/**
/*
* Class: com_taosdata_jdbc_TSDBJNIConnector
* Method: setTableNameTagsImp
* Signature: (JLjava/lang/String;I[B[B[B[BJ)I
* Method: stmt_errstr
* Signature: (JJ)I
*/
JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_setTableNameTagsImp
(JNIEnv *, jobject, jlong, jstring, jint, jbyteArray, jbyteArray, jbyteArray, jbyteArray, jlong);
JNIEXPORT jstring JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_stmtErrorMsgImp(JNIEnv *env, jobject jobj, jlong stmt, jlong con);
/*
* Class: com_taosdata_jdbc_TSDBJNIConnector
......
......@@ -805,6 +805,78 @@ JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_setBindTableNameI
return JNI_SUCCESS;
}
JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_setTableNameTagsImp(
JNIEnv *env, jobject jobj, jlong stmt, jstring tableName, jint numOfTags, jbyteArray tags, jbyteArray typeList,
jbyteArray lengthList, jbyteArray nullList, jlong conn) {
TAOS *tsconn = (TAOS *)conn;
if (tsconn == NULL) {
jniError("jobj:%p, connection already closed", jobj);
return JNI_CONNECTION_NULL;
}
TAOS_STMT *pStmt = (TAOS_STMT *)stmt;
if (pStmt == NULL) {
jniError("jobj:%p, conn:%p, invalid stmt handle", jobj, tsconn);
return JNI_SQL_NULL;
}
jsize len = (*env)->GetArrayLength(env, tags);
char *tagsData = (char *)calloc(1, len);
(*env)->GetByteArrayRegion(env, tags, 0, len, (jbyte *)tagsData);
if ((*env)->ExceptionCheck(env)) {
// todo handle error
}
len = (*env)->GetArrayLength(env, lengthList);
int64_t *lengthArray = (int64_t *)calloc(1, len);
(*env)->GetByteArrayRegion(env, lengthList, 0, len, (jbyte *)lengthArray);
if ((*env)->ExceptionCheck(env)) {
}
len = (*env)->GetArrayLength(env, typeList);
char *typeArray = (char *)calloc(1, len);
(*env)->GetByteArrayRegion(env, typeList, 0, len, (jbyte *)typeArray);
if ((*env)->ExceptionCheck(env)) {
}
len = (*env)->GetArrayLength(env, nullList);
int32_t *nullArray = (int32_t *)calloc(1, len);
(*env)->GetByteArrayRegion(env, nullList, 0, len, (jbyte *)nullArray);
if ((*env)->ExceptionCheck(env)) {
}
const char *name = (*env)->GetStringUTFChars(env, tableName, NULL);
char *curTags = tagsData;
TAOS_BIND *tagsBind = calloc(numOfTags, sizeof(TAOS_BIND));
for (int32_t i = 0; i < numOfTags; ++i) {
tagsBind[i].buffer_type = typeArray[i];
tagsBind[i].buffer = curTags;
tagsBind[i].is_null = &nullArray[i];
tagsBind[i].length = (uintptr_t *)&lengthArray[i];
curTags += lengthArray[i];
}
int32_t code = taos_stmt_set_tbname_tags((void *)stmt, name, tagsBind);
int32_t nTags = (int32_t)numOfTags;
jniDebug("jobj:%p, conn:%p, set table name:%s, numOfTags:%d", jobj, tsconn, name, nTags);
tfree(tagsData);
tfree(lengthArray);
tfree(typeArray);
tfree(nullArray);
tfree(tagsBind);
(*env)->ReleaseStringUTFChars(env, tableName, name);
if (code != TSDB_CODE_SUCCESS) {
jniError("jobj:%p, conn:%p, code:%s", jobj, tsconn, tstrerror(code));
return JNI_TDENGINE_ERROR;
}
return JNI_SUCCESS;
}
JNIEXPORT jlong JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_bindColDataImp(
JNIEnv *env, jobject jobj, jlong stmt, jbyteArray colDataList, jbyteArray lengthList, jbyteArray nullList,
jint dataType, jint dataBytes, jint numOfRows, jint colIndex, jlong con) {
......@@ -872,8 +944,8 @@ JNIEXPORT jlong JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_bindColDataImp(
return JNI_SUCCESS;
}
JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_executeBatchImp(JNIEnv *env, jobject jobj, jlong stmt,
jlong con) {
JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_addBatchImp(JNIEnv *env, jobject jobj, jlong stmt,
jlong con) {
TAOS *tscon = (TAOS *)con;
if (tscon == NULL) {
jniError("jobj:%p, connection already closed", jobj);
......@@ -886,19 +958,18 @@ JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_executeBatchImp(J
return JNI_SQL_NULL;
}
taos_stmt_add_batch(pStmt);
int32_t code = taos_stmt_execute(pStmt);
int32_t code = taos_stmt_add_batch(pStmt);
if (code != TSDB_CODE_SUCCESS) {
jniError("jobj:%p, conn:%p, code:%s", jobj, tscon, tstrerror(code));
return JNI_TDENGINE_ERROR;
}
jniDebug("jobj:%p, conn:%p, batch execute", jobj, tscon);
jniDebug("jobj:%p, conn:%p, stmt closed", jobj, tscon);
return JNI_SUCCESS;
}
JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_closeStmt(JNIEnv *env, jobject jobj, jlong stmt,
jlong con) {
JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_executeBatchImp(JNIEnv *env, jobject jobj, jlong stmt,
jlong con) {
TAOS *tscon = (TAOS *)con;
if (tscon == NULL) {
jniError("jobj:%p, connection already closed", jobj);
......@@ -911,86 +982,58 @@ JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_closeStmt(JNIEnv
return JNI_SQL_NULL;
}
int32_t code = taos_stmt_close(pStmt);
int32_t code = taos_stmt_execute(pStmt);
if (code != TSDB_CODE_SUCCESS) {
jniError("jobj:%p, conn:%p, code:%s", jobj, tscon, tstrerror(code));
return JNI_TDENGINE_ERROR;
}
jniDebug("jobj:%p, conn:%p, stmt closed", jobj, tscon);
jniDebug("jobj:%p, conn:%p, batch execute", jobj, tscon);
return JNI_SUCCESS;
}
JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_setTableNameTagsImp(
JNIEnv *env, jobject jobj, jlong stmt, jstring tableName, jint numOfTags, jbyteArray tags, jbyteArray typeList,
jbyteArray lengthList, jbyteArray nullList, jlong conn) {
TAOS *tsconn = (TAOS *)conn;
if (tsconn == NULL) {
JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_closeStmt(JNIEnv *env, jobject jobj, jlong stmt,
jlong con) {
TAOS *tscon = (TAOS *)con;
if (tscon == NULL) {
jniError("jobj:%p, connection already closed", jobj);
return JNI_CONNECTION_NULL;
}
TAOS_STMT *pStmt = (TAOS_STMT *)stmt;
if (pStmt == NULL) {
jniError("jobj:%p, conn:%p, invalid stmt handle", jobj, tsconn);
jniError("jobj:%p, conn:%p, invalid stmt", jobj, tscon);
return JNI_SQL_NULL;
}
jsize len = (*env)->GetArrayLength(env, tags);
char *tagsData = (char *)calloc(1, len);
(*env)->GetByteArrayRegion(env, tags, 0, len, (jbyte *)tagsData);
if ((*env)->ExceptionCheck(env)) {
// todo handle error
}
len = (*env)->GetArrayLength(env, lengthList);
int64_t *lengthArray = (int64_t *)calloc(1, len);
(*env)->GetByteArrayRegion(env, lengthList, 0, len, (jbyte *)lengthArray);
if ((*env)->ExceptionCheck(env)) {
int32_t code = taos_stmt_close(pStmt);
if (code != TSDB_CODE_SUCCESS) {
jniError("jobj:%p, conn:%p, code:%s", jobj, tscon, tstrerror(code));
return JNI_TDENGINE_ERROR;
}
len = (*env)->GetArrayLength(env, typeList);
char *typeArray = (char *)calloc(1, len);
(*env)->GetByteArrayRegion(env, typeList, 0, len, (jbyte *)typeArray);
if ((*env)->ExceptionCheck(env)) {
}
jniDebug("jobj:%p, conn:%p, stmt closed", jobj, tscon);
return JNI_SUCCESS;
}
len = (*env)->GetArrayLength(env, nullList);
int32_t *nullArray = (int32_t *)calloc(1, len);
(*env)->GetByteArrayRegion(env, nullList, 0, len, (jbyte *)nullArray);
if ((*env)->ExceptionCheck(env)) {
JNIEXPORT jstring JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_stmtErrorMsgImp(JNIEnv *env, jobject jobj, jlong stmt,
jlong con) {
char errMsg[128];
TAOS *tscon = (TAOS *)con;
if (tscon == NULL) {
jniError("jobj:%p, connection already closed", jobj);
sprintf(errMsg, "jobj:%p, connection already closed", jobj);
return (*env)->NewStringUTF(env, errMsg);
}
const char *name = (*env)->GetStringUTFChars(env, tableName, NULL);
char *curTags = tagsData;
TAOS_BIND *tagsBind = calloc(numOfTags, sizeof(TAOS_BIND));
for (int32_t i = 0; i < numOfTags; ++i) {
tagsBind[i].buffer_type = typeArray[i];
tagsBind[i].buffer = curTags;
tagsBind[i].is_null = &nullArray[i];
tagsBind[i].length = (uintptr_t *)&lengthArray[i];
curTags += lengthArray[i];
TAOS_STMT *pStmt = (TAOS_STMT *)stmt;
if (pStmt == NULL) {
jniError("jobj:%p, conn:%p, invalid stmt", jobj, tscon);
sprintf(errMsg, "jobj:%p, conn:%p, invalid stmt", jobj, tscon);
return (*env)->NewStringUTF(env, errMsg);
}
int32_t code = taos_stmt_set_tbname_tags((void *)stmt, name, tagsBind);
int32_t nTags = (int32_t)numOfTags;
jniDebug("jobj:%p, conn:%p, set table name:%s, numOfTags:%d", jobj, tsconn, name, nTags);
tfree(tagsData);
tfree(lengthArray);
tfree(typeArray);
tfree(nullArray);
tfree(tagsBind);
(*env)->ReleaseStringUTFChars(env, tableName, name);
if (code != TSDB_CODE_SUCCESS) {
jniError("jobj:%p, conn:%p, code:%s", jobj, tsconn, tstrerror(code));
return JNI_TDENGINE_ERROR;
}
return JNI_SUCCESS;
return (*env)->NewStringUTF(env, taos_stmt_errstr((TAOS_STMT *)stmt));
}
JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_insertLinesImp(JNIEnv *env, jobject jobj,
......
......@@ -485,7 +485,26 @@ static int32_t tscGetDBInfo(SCreateBuilder *builder, char *result) {
if (row == NULL) {
return TSDB_CODE_TSC_DB_NOT_SELECTED;
}
const char *showColumns[] = {"REPLICA", "QUORUM", "DAYS", "KEEP", "BLOCKS", NULL};
const char *showColumns[][2] = {
{"REPLICA", "REPLICA"},
{"QUORUM", "QUORUM"},
{"DAYS", "DAYS"},
#ifdef _STORAGE
{"KEEP0,KEEP1,KEEP2", "KEEP"},
#else
{"KEEP", "KEEP"},
#endif
{"CACHE(MB)", "CACHE"},
{"BLOCKS", "BLOCKS"},
{"MINROWS", "MINROWS"},
{"MAXROWS", "MAXROWS"},
{"WALLEVEL", "WAL"},
{"FSYNC", "FSYNC"},
{"COMP", "COMP"},
{"CACHELAST", "CACHELAST"},
{"PRECISION", "PRECISION"},
{"UPDATE", "UPDATE"},
{NULL, NULL}};
SSqlObj *pSql = builder->pInterSql;
TAOS_FIELD *fields = taos_fetch_fields(pSql);
......@@ -499,12 +518,16 @@ static int32_t tscGetDBInfo(SCreateBuilder *builder, char *result) {
if (0 == ret && STR_NOCASE_EQUAL(buf, strlen(buf), builder->buf, strlen(builder->buf))) {
snprintf(result + strlen(result), TSDB_MAX_BINARY_LEN - strlen(result), "CREATE DATABASE %s", buf);
for (int i = 1; i < num_fields; i++) {
for (int j = 0; showColumns[j] != NULL; j++) {
if (STR_NOCASE_EQUAL(fields[i].name, strlen(fields[i].name), showColumns[j], strlen(showColumns[j]))) {
for (int j = 0; showColumns[j][0] != NULL; j++) {
if (STR_NOCASE_EQUAL(fields[i].name, strlen(fields[i].name), showColumns[j][0], strlen(showColumns[j][0]))) {
memset(buf, 0, sizeof(buf));
ret = tscGetNthFieldResult(row, fields, lengths, i, buf);
if (ret == 0) {
snprintf(result + strlen(result), TSDB_MAX_BINARY_LEN - strlen(result), " %s %s", showColumns[j], buf);
if (STR_NOCASE_EQUAL(showColumns[j][0], strlen(showColumns[j][0]), "PRECISION", strlen("PRECISION"))) {
snprintf(result + strlen(result), TSDB_MAX_BINARY_LEN - strlen(result), " %s '%s'", showColumns[j][1], buf);
} else {
snprintf(result + strlen(result), TSDB_MAX_BINARY_LEN - strlen(result), " %s %s", showColumns[j][1], buf);
}
}
}
}
......
......@@ -1658,7 +1658,7 @@ int taos_stmt_prepare(TAOS_STMT* stmt, const char* sql, unsigned long length) {
pRes->qId = 0;
pRes->numOfRows = 0;
strcpy(pSql->sqlstr, sql);
strntolower(pSql->sqlstr, sql, (int32_t)sqlLen);
tscDebugL("0x%"PRIx64" SQL: %s", pSql->self, pSql->sqlstr);
if (tscIsInsertData(pSql->sqlstr)) {
......@@ -1849,6 +1849,7 @@ int taos_stmt_set_tbname_tags(TAOS_STMT* stmt, const char* name, TAOS_BIND* tags
tscResetSqlCmd(pCmd, false, pSql->self);
pCmd->insertParam.pTableBlockHashList = hashList;
}
code = tsParseSql(pStmt->pSql, true);
if (code == TSDB_CODE_TSC_ACTION_IN_PROGRESS) {
......
......@@ -2431,7 +2431,7 @@ int32_t addProjectionExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, t
if (pTableMeta->tableType != TSDB_TEMP_TABLE) {
tscInsertPrimaryTsSourceColumn(pQueryInfo, pTableMeta->id.uid);
}
} else if (tokenId == TK_STRING || tokenId == TK_INTEGER || tokenId == TK_FLOAT) { // simple column projection query
} else if (tokenId == TK_STRING || tokenId == TK_INTEGER || tokenId == TK_FLOAT || tokenId == TK_BOOL) { // simple column projection query
SColumnIndex index = COLUMN_INDEX_INITIALIZER;
// user-specified constant value as a new result column
......@@ -4664,6 +4664,9 @@ static int32_t validateSQLExprItemArithmeticExpr(SSqlCmd* pCmd, tSqlExpr* pExpr,
int32_t rightType = SQLEXPR_TYPE_UNASSIGNED;
const char* msg1 = "arithmetic expression composed with columns from different tables";
const char* msg2 = "arithmetic expression composed with functions/columns of different types";
const char* msg3 = "comparison/logical expression involving string operands is not supported";
const char* msg4 = "comparison/logical expression involving function result is not supported";
int32_t leftHeight = 0;
int32_t ret = validateSQLExprItem(pCmd, pExpr->pLeft, pQueryInfo, pList, &leftType, &uidLeft, &leftHeight);
if (ret != TSDB_CODE_SUCCESS) {
......@@ -4696,6 +4699,21 @@ static int32_t validateSQLExprItemArithmeticExpr(SSqlCmd* pCmd, tSqlExpr* pExpr,
} else if (leftType == SQLEXPR_TYPE_SCALAR || rightType == SQLEXPR_TYPE_SCALAR){
*type = SQLEXPR_TYPE_SCALAR;
}
// comparison/logical operations
if (pExpr->tokenId == TK_EQ || pExpr->tokenId == TK_NE ||
pExpr->tokenId == TK_GT || pExpr->tokenId == TK_GE ||
pExpr->tokenId == TK_LT || pExpr->tokenId == TK_LE ||
pExpr->tokenId == TK_AND || pExpr->tokenId == TK_OR) {
if ((leftType == SQLEXPR_TYPE_VALUE && pExpr->pLeft->tokenId == TK_STRING) ||
(rightType == SQLEXPR_TYPE_VALUE && pExpr->pRight->tokenId == TK_STRING)) {
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg3);
}
if (leftType == SQLEXPR_TYPE_AGG || leftType == SQLEXPR_TYPE_SCALAR ||
rightType == SQLEXPR_TYPE_AGG || rightType == SQLEXPR_TYPE_SCALAR) {
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg4);
}
}
}
return TSDB_CODE_SUCCESS;
}
......@@ -5666,7 +5684,16 @@ static int32_t validateTagCondExpr(SSqlCmd* pCmd, tExprNode *p) {
if (!p->_node.pLeft || !p->_node.pRight) {
break;
}
int32_t retVal = TSDB_CODE_SUCCESS;
if (p->_node.pLeft && (retVal = validateTagCondExpr(pCmd, p->_node.pLeft)) != TSDB_CODE_SUCCESS) {
return retVal;
}
if (p->_node.pRight && (retVal = validateTagCondExpr(pCmd, p->_node.pRight)) != TSDB_CODE_SUCCESS) {
return retVal;
}
if (IS_ARITHMETIC_OPTR(p->_node.optr)) {
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg1);
}
......@@ -5702,8 +5729,6 @@ static int32_t validateTagCondExpr(SSqlCmd* pCmd, tExprNode *p) {
schemaType = TSDB_DATA_TYPE_DOUBLE;
}
int32_t retVal = TSDB_CODE_SUCCESS;
int32_t bufLen = 0;
if (IS_NUMERIC_TYPE(vVariant->nType)) {
bufLen = 60; // The maximum length of string that a number is converted to.
......@@ -6668,7 +6693,7 @@ int32_t validateOrderbyNode(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SSqlNode* pSq
}
if (tscIsProjectionQuery(pQueryInfo)) {
bool found = false;
bool found = false;
for (int32_t i = 0; i < tscNumOfExprs(pQueryInfo); ++i) {
SExprInfo* pExpr = tscExprGet(pQueryInfo, i);
if (pExpr->base.functionId == TSDB_FUNC_PRJ && pExpr->base.colInfo.colId == pSchema[index.columnIndex].colId) {
......@@ -6680,10 +6705,10 @@ int32_t validateOrderbyNode(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SSqlNode* pSq
if (!found) {
int32_t numOfCols = (int32_t)tscNumOfFields(pQueryInfo);
tscAddFuncInSelectClause(pQueryInfo, numOfCols, TSDB_FUNC_PRJ, &index, pSchema, TSDB_COL_NORMAL, getNewResColId(pCmd));
SInternalField* pSupInfo = tscFieldInfoGetInternalField(&pQueryInfo->fieldsInfo, numOfCols);
pSupInfo->visible = false;
pQueryInfo->type |= TSDB_QUERY_TYPE_PROJECTION_QUERY;
}
}
......@@ -6704,17 +6729,17 @@ int32_t setAlterTableInfo(SSqlObj* pSql, struct SSqlInfo* pInfo) {
const char* msg4 = "set tag value only available for table";
const char* msg5 = "only support add one tag";
const char* msg6 = "column can only be modified by super table";
const char* msg7 = "no tags can be dropped";
const char* msg8 = "only support one tag";
const char* msg9 = "tag name too long";
const char* msg10 = "invalid tag name";
const char* msg11 = "primary tag cannot be dropped";
const char* msg12 = "update normal column not supported";
const char* msg13 = "invalid tag value";
const char* msg14 = "tag value too long";
const char* msg15 = "no columns can be dropped";
const char* msg16 = "only support one column";
const char* msg17 = "invalid column name";
......@@ -6722,7 +6747,7 @@ int32_t setAlterTableInfo(SSqlObj* pSql, struct SSqlInfo* pInfo) {
const char* msg19 = "invalid new tag name";
const char* msg20 = "table is not super table";
const char* msg21 = "only binary/nchar column length could be modified";
const char* msg23 = "only column length coulbe be modified";
const char* msg23 = "only column length can be be modified";
const char* msg24 = "invalid binary/nchar column length";
const char* msg25 = "json type error, should be string";
......@@ -6785,7 +6810,7 @@ int32_t setAlterTableInfo(SSqlObj* pSql, struct SSqlInfo* pInfo) {
if (ret != TSDB_CODE_SUCCESS) {
return ret;
}
tscFieldInfoAppend(&pQueryInfo->fieldsInfo, p);
} else if (pAlterSQL->type == TSDB_ALTER_TABLE_DROP_TAG_COLUMN) {
if (tscGetNumOfTags(pTableMeta) == 1) {
......@@ -6818,7 +6843,7 @@ int32_t setAlterTableInfo(SSqlObj* pSql, struct SSqlInfo* pInfo) {
char name1[128] = {0};
strncpy(name1, pItem->pVar.pz, pItem->pVar.nLen);
TAOS_FIELD f = tscCreateField(TSDB_DATA_TYPE_INT, name1, tDataTypes[TSDB_DATA_TYPE_INT].bytes);
tscFieldInfoAppend(&pQueryInfo->fieldsInfo, &f);
} else if (pAlterSQL->type == TSDB_ALTER_TABLE_CHANGE_TAG_COLUMN) {
......@@ -6868,7 +6893,7 @@ int32_t setAlterTableInfo(SSqlObj* pSql, struct SSqlInfo* pInfo) {
// Note: update can only be applied to table not super table.
// the following is used to handle tags value for table created according to super table
pCmd->command = TSDB_SQL_UPDATE_TAGS_VAL;
SArray* pVarList = pAlterSQL->varList;
tVariantListItem* item = taosArrayGet(pVarList, 0);
int16_t numOfTags = tscGetNumOfTags(pTableMeta);
......
......@@ -1882,6 +1882,13 @@ int tscProcessRetrieveGlobalMergeRsp(SSqlObj *pSql) {
tscDebug("0x%"PRIx64" create QInfo 0x%"PRIx64" to execute query processing", pSql->self, pSql->self);
pQueryInfo->pQInfo = createQInfoFromQueryNode(pQueryInfo, &tableGroupInfo, NULL, NULL, pRes->pMerger, MERGE_STAGE, pSql->self);
if (pQueryInfo->pQInfo == NULL) {
taosHashCleanup(tableGroupInfo.map);
taosArrayDestroy(&group);
tscAsyncResultOnError(pSql);
pRes->code = TSDB_CODE_QRY_OUT_OF_MEMORY;
return pRes->code;
}
}
uint64_t localQueryId = pSql->self;
......
......@@ -3356,7 +3356,7 @@ static void multiVnodeInsertFinalize(void* param, TAOS_RES* tres, int numOfRows)
}
}
tscError("0x%"PRIx64" Async insertion completed, total inserted:%d rows, numOfFailed:%d, numOfTotal:%d", pParentObj->self,
tscWarn("0x%"PRIx64" Async insertion completed, total inserted:%d rows, numOfFailed:%d, numOfTotal:%d", pParentObj->self,
pParentObj->res.numOfRows, numOfFailed, numOfSub);
tscDebug("0x%"PRIx64" cleanup %d tableMeta in hashTable before reparse sql", pParentObj->self, pParentObj->cmd.insertParam.numOfTables);
......@@ -3905,8 +3905,11 @@ void* createQInfoFromQueryNode(SQueryInfo* pQueryInfo, STableGroupInfo* pTableGr
STsBufInfo bufInfo = {0};
SQueryParam param = {.pOperator = pa};
/*int32_t code = */initQInfo(&bufInfo, NULL, pSourceOperator, pQInfo, &param, NULL, 0, merger);
int32_t code = initQInfo(&bufInfo, NULL, pSourceOperator, pQInfo, &param, NULL, 0, merger);
taosArrayDestroy(&pa);
if (code != TSDB_CODE_SUCCESS) {
goto _cleanup;
}
return pQInfo;
......
......@@ -387,6 +387,10 @@ static int taos_options_imp(TSDB_OPTION option, const char *pStr) {
cfg->cfgStatus = TAOS_CFG_CSTATUS_OPTION;
} else { // set the user specified locale failed, use default LC_CTYPE as current locale
locale = setlocale(LC_CTYPE, tsLocale);
if (locale == NULL) {
tscError("failed to set locale:%s failed, neither default LC_CTYPE: %s", pStr, tsLocale);
return -1;
}
tscInfo("failed to set locale:%s, current locale:%s", pStr, tsLocale);
}
......
此差异已折叠。
......@@ -59,7 +59,6 @@ public class TSDBPreparedStatement extends TSDBStatement implements PreparedStat
}
}
parameters = new Object[parameterCnt];
if (parameterCnt > 1) {
// the table name is also a parameter, so ignore it.
this.colData = new ArrayList<>();
......@@ -530,8 +529,14 @@ public class TSDBPreparedStatement extends TSDBStatement implements PreparedStat
}
public void setTableName(String name) throws SQLException {
if (this.nativeStmtHandle == 0) {
TSDBJNIConnector connector = ((TSDBConnection) this.getConnection()).getConnector();
this.nativeStmtHandle = connector.prepareStmt(rawSql);
}
if (this.tableName != null) {
this.columnDataExecuteBatch();
this.columnDataAddBatch();
this.columnDataClearBatchInternal();
}
this.tableName = name;
......@@ -963,10 +968,12 @@ public class TSDBPreparedStatement extends TSDBStatement implements PreparedStat
}
private void columnDataClearBatchInternal() {
int size = this.colData.size();
this.colData.clear();
this.colData.addAll(Collections.nCopies(size, null));
this.tableName = null; // clear the table name
this.tableName = null;
if (this.tableTags != null)
this.tableTags.clear();
this.tagValueLength = 0;
if (this.colData != null)
this.colData.clear();
}
......
......@@ -430,15 +430,27 @@ static void dumpFieldToFile(FILE* fp, const char* val, TAOS_FIELD* field, int32_
case TSDB_DATA_TYPE_TINYINT:
fprintf(fp, "%d", *((int8_t *)val));
break;
case TSDB_DATA_TYPE_UTINYINT:
fprintf(fp, "%u", *((uint8_t *)val));
break;
case TSDB_DATA_TYPE_SMALLINT:
fprintf(fp, "%d", *((int16_t *)val));
break;
case TSDB_DATA_TYPE_USMALLINT:
fprintf(fp, "%u", *((uint16_t *)val));
break;
case TSDB_DATA_TYPE_INT:
fprintf(fp, "%d", *((int32_t *)val));
break;
case TSDB_DATA_TYPE_UINT:
fprintf(fp, "%u", *((uint32_t *)val));
break;
case TSDB_DATA_TYPE_BIGINT:
fprintf(fp, "%" PRId64, *((int64_t *)val));
break;
case TSDB_DATA_TYPE_UBIGINT:
fprintf(fp, "%" PRIu64, *((uint64_t *)val));
break;
case TSDB_DATA_TYPE_FLOAT:
fprintf(fp, "%.5f", GET_FLOAT_VAL(val));
break;
......
Subproject commit 9f1b028ca325f67762826be4caf58d356ad7e389
Subproject commit 8c11dcf6856bca0860e7e9999f57274864218f4f
......@@ -12,7 +12,7 @@ ELSEIF(TD_BUILD_TAOSA_INTERNAL)
MESSAGE("${Yellow} use taosa internal as httpd ${ColourReset}")
ELSE ()
MESSAGE("")
MESSAGE("${Green} use taosadapter as httpd ${ColourReset}")
MESSAGE("${Green} use taosadapter as httpd, platform is ${PLATFORM_ARCH_STR} ${ColourReset}")
EXECUTE_PROCESS(
COMMAND git rev-parse --abbrev-ref HEAD
......@@ -26,7 +26,7 @@ ELSE ()
STRING(SUBSTRING "${taos_version}" 12 -1 taos_version)
STRING(STRIP "${taos_version}" taos_version)
ELSE ()
STRING(CONCAT taos_version "branch_" "${taos_version}")
STRING(CONCAT taos_version "_branch_" "${taos_version}")
STRING(STRIP "${taos_version}" taos_version)
ENDIF ()
EXECUTE_PROCESS(
......@@ -61,7 +61,7 @@ ELSE ()
COMMAND git clean -f -d
BUILD_COMMAND CGO_CFLAGS=-I${CMAKE_CURRENT_SOURCE_DIR}/../inc CGO_LDFLAGS=-L${CMAKE_BINARY_DIR}/build/lib go build -ldflags "-s -w -X github.com/taosdata/taosadapter/version.Version=${taos_version} -X github.com/taosdata/taosadapter/version.CommitID=${taosadapter_commit_sha1}"
INSTALL_COMMAND
COMMAND curl -sL https://github.com/upx/upx/releases/download/v3.96/upx-3.96-amd64_linux.tar.xz -o upx.tar.xz && tar -xvJf upx.tar.xz -C ${CMAKE_BINARY_DIR} --strip-components 1 > /dev/null && ${CMAKE_BINARY_DIR}/upx taosadapter || :
COMMAND curl -sL https://github.com/upx/upx/releases/download/v3.96/upx-3.96-${PLATFORM_ARCH_STR}_linux.tar.xz -o upx.tar.xz && tar -xvJf upx.tar.xz -C ${CMAKE_BINARY_DIR} --strip-components 1 > /dev/null && ${CMAKE_BINARY_DIR}/upx taosadapter || :
COMMAND cmake -E copy taosadapter ${CMAKE_BINARY_DIR}/build/bin
COMMAND cmake -E make_directory ${CMAKE_BINARY_DIR}/test/cfg/
COMMAND cmake -E copy ./example/config/taosadapter.toml ${CMAKE_BINARY_DIR}/test/cfg/
......
......@@ -1157,7 +1157,9 @@ static uint32_t monBuildVgroupsInfoSql(char *sql, char *dbName) {
monError("failed to save vgroup_%d info, reason: invalid row %s len, sql:%s", vgId, (char *)row[i], tsMonitor.sql);
goto DONE;
}
pos += snprintf(sql + pos, strlen(SQL_STR_FMT) + charLen + 1, ", "SQL_STR_FMT, (char *)row[i]);
char tmpBuf[10] = {0};
memcpy(tmpBuf, row[i], charLen);
pos += snprintf(sql + pos, strlen(SQL_STR_FMT) + charLen + 1, ", "SQL_STR_FMT, tmpBuf);
} else if (strcmp(fields[i].name, "onlines") == 0) {
pos += snprintf(sql + pos, SQL_LENGTH, ", %d", *(int32_t *)row[i]);
} else if (v_dnode_str && strcmp(v_dnode_str, "_dnode") == 0) {
......
Subproject commit 8f9501a30b1893c6616d644a924c995aa21ad957
Subproject commit 6d401bb95e1125ce4aad012dc23191ed85af8b3b
......@@ -5117,7 +5117,7 @@ SAggFunctionInfo aAggs[40] = {{
"twa",
TSDB_FUNC_TWA,
TSDB_FUNC_TWA,
TSDB_BASE_FUNC_SO | TSDB_FUNCSTATE_NEED_TS,
TSDB_FUNCSTATE_SO | TSDB_FUNCSTATE_STABLE | TSDB_FUNCSTATE_NEED_TS,
twa_function_setup,
twa_function,
twa_function_finalizer,
......@@ -5393,7 +5393,7 @@ SAggFunctionInfo aAggs[40] = {{
"elapsed",
TSDB_FUNC_ELAPSED,
TSDB_FUNC_ELAPSED,
TSDB_BASE_FUNC_SO,
TSDB_FUNCSTATE_SO | TSDB_FUNCSTATE_STABLE,
elapsedSetup,
elapsedFunction,
elapsedFinalizer,
......
此差异已折叠。
......@@ -354,6 +354,10 @@ SFillInfo* taosCreateFillInfo(int32_t order, TSKEY skey, int32_t numOfTags, int3
}
SFillInfo* pFillInfo = calloc(1, sizeof(SFillInfo));
if (pFillInfo == NULL) {
return NULL;
}
taosResetFillInfo(pFillInfo, skey);
pFillInfo->order = order;
......@@ -371,6 +375,10 @@ SFillInfo* taosCreateFillInfo(int32_t order, TSKEY skey, int32_t numOfTags, int3
pFillInfo->interval.slidingUnit = slidingUnit;
pFillInfo->pData = malloc(POINTER_BYTES * numOfCols);
if (pFillInfo->pData == NULL) {
tfree(pFillInfo);
return NULL;
}
// if (numOfTags > 0) {
pFillInfo->pTags = calloc(numOfCols, sizeof(SFillTagColInfo));
......
......@@ -927,7 +927,9 @@ int32_t filterAddUnit(SFilterInfo *info, uint8_t optr, SFilterFieldId *left, SFi
SFilterField *val = FILTER_UNIT_RIGHT_FIELD(info, u);
assert(FILTER_GET_FLAG(val->flag, FLD_TYPE_VALUE));
} else {
assert(optr == TSDB_RELATION_ISNULL || optr == TSDB_RELATION_NOTNULL || optr == FILTER_DUMMY_EMPTY_OPTR);
if(optr != TSDB_RELATION_ISNULL && optr != TSDB_RELATION_NOTNULL && optr != FILTER_DUMMY_EMPTY_OPTR) {
return -1;
}
}
SFilterField *col = FILTER_UNIT_LEFT_FIELD(info, u);
......@@ -1257,7 +1259,8 @@ int32_t filterAddGroupUnitFromNode(SFilterInfo *info, tExprNode* tree, SArray *g
} else {
filterAddFieldFromNode(info, tree->_node.pRight, &right);
filterAddUnit(info, tree->_node.optr, &left, &right, &uidx);
ret = filterAddUnit(info, tree->_node.optr, &left, &right, &uidx);
CHK_LRET(ret != TSDB_CODE_SUCCESS, TSDB_CODE_QRY_APP_ERROR, "invalid where condition");
SFilterGroup fgroup = {0};
filterAddUnitToGroup(&fgroup, uidx);
......@@ -1282,7 +1285,7 @@ int32_t filterAddUnitFromUnit(SFilterInfo *dst, SFilterInfo *src, SFilterUnit* u
void *data = FILTER_UNIT_VAL_DATA(src, u);
if (IS_VAR_DATA_TYPE(type)) {
if (FILTER_UNIT_OPTR(u) == TSDB_RELATION_IN) {
filterAddField(dst, NULL, &data, FLD_TYPE_VALUE, &right, 0, false);
filterAddField(dst, NULL, &data, FLD_TYPE_VALUE, &right, sizeof(SHashObj), false);
t = FILTER_GET_FIELD(dst, right);
......@@ -1574,7 +1577,9 @@ void filterDumpInfoToString(SFilterInfo *info, const char *msg, int32_t options)
SFilterField *left = FILTER_UNIT_LEFT_FIELD(info, unit);
SSchema *sch = left->desc;
len = sprintf(str, "UNIT[%d] => [%d][%s] %s [", i, sch->colId, sch->name, gOptrStr[unit->compare.optr].str);
if (unit->compare.optr >= TSDB_RELATION_INVALID && unit->compare.optr <= TSDB_RELATION_CONTAINS){
len = sprintf(str, "UNIT[%d] => [%d][%s] %s [", i, sch->colId, sch->name, gOptrStr[unit->compare.optr].str);
}
if (unit->right.type == FLD_TYPE_VALUE && FILTER_UNIT_OPTR(unit) != TSDB_RELATION_IN) {
SFilterField *right = FILTER_UNIT_RIGHT_FIELD(info, unit);
......@@ -1591,7 +1596,9 @@ void filterDumpInfoToString(SFilterInfo *info, const char *msg, int32_t options)
if (unit->compare.optr2) {
strcat(str, " && ");
sprintf(str + strlen(str), "[%d][%s] %s [", sch->colId, sch->name, gOptrStr[unit->compare.optr2].str);
if (unit->compare.optr2 >= TSDB_RELATION_INVALID && unit->compare.optr2 <= TSDB_RELATION_CONTAINS){
sprintf(str + strlen(str), "[%d][%s] %s [", sch->colId, sch->name, gOptrStr[unit->compare.optr2].str);
}
if (unit->right2.type == FLD_TYPE_VALUE && FILTER_UNIT_OPTR(unit) != TSDB_RELATION_IN) {
SFilterField *right = FILTER_UNIT_RIGHT2_FIELD(info, unit);
......@@ -3588,7 +3595,7 @@ int32_t filterConverNcharColumns(SFilterInfo* info, int32_t rows, bool *gotNchar
if (FILTER_EMPTY_RES(info) || FILTER_ALL_RES(info)) {
return TSDB_CODE_SUCCESS;
}
for (uint32_t i = 0; i < info->fields[FLD_TYPE_COLUMN].num; ++i) {
SFilterField* fi = &info->fields[FLD_TYPE_COLUMN].fields[i];
int32_t type = FILTER_GET_COL_FIELD_TYPE(fi);
......@@ -3602,6 +3609,15 @@ int32_t filterConverNcharColumns(SFilterInfo* info, int32_t rows, bool *gotNchar
char *src = FILTER_GET_COL_FIELD_DATA(fi, j);
char *dst = FILTER_GET_COL_FIELD_DATA(&nfi, j);
int32_t len = 0;
char *varSrc = varDataVal(src);
size_t k = 0, varSrcLen = varDataLen(src);
while (k < varSrcLen && varSrc[k++] == -1) {}
if (k == varSrcLen) {
/* NULL */
varDataLen(dst) = (VarDataLenT) varSrcLen;
varDataCopy(dst, src);
continue;
}
bool ret = taosMbsToUcs4(varDataVal(src), varDataLen(src), varDataVal(dst), bufSize, &len);
if(!ret) {
qError("filterConverNcharColumns taosMbsToUcs4 error");
......
......@@ -18,6 +18,7 @@
#include "taosdef.h"
#include "taosmsg.h"
#include "tcmdtype.h"
#include "tcompare.h"
#include "tstrbuild.h"
#include "ttoken.h"
#include "ttokendef.h"
......@@ -318,12 +319,17 @@ tSqlExpr *tSqlExprCreate(tSqlExpr *pLeft, tSqlExpr *pRight, int32_t optrType) {
}
if ((pLeft != NULL && pRight != NULL) &&
(optrType == TK_PLUS || optrType == TK_MINUS || optrType == TK_STAR || optrType == TK_DIVIDE || optrType == TK_REM)) {
(optrType == TK_PLUS || optrType == TK_MINUS || optrType == TK_STAR || optrType == TK_DIVIDE || optrType == TK_REM ||
optrType == TK_EQ || optrType == TK_NE || optrType == TK_LT || optrType == TK_GT || optrType == TK_LE || optrType == TK_GE ||
optrType == TK_AND || optrType == TK_OR)) {
/*
* if a exprToken is noted as the TK_TIMESTAMP, the time precision is microsecond
* Otherwise, the time precision is adaptive, determined by the time precision from databases.
*/
if ((pLeft->tokenId == TK_INTEGER && pRight->tokenId == TK_INTEGER) ||
(pLeft->tokenId == TK_BOOL && pRight->tokenId == TK_BOOL) ||
(pLeft->tokenId == TK_INTEGER && pRight->tokenId == TK_BOOL) ||
(pLeft->tokenId == TK_BOOL && pRight->tokenId == TK_INTEGER) ||
(pLeft->tokenId == TK_TIMESTAMP && pRight->tokenId == TK_TIMESTAMP)) {
pExpr->value.nType = TSDB_DATA_TYPE_BIGINT;
pExpr->tokenId = pLeft->tokenId;
......@@ -360,12 +366,46 @@ tSqlExpr *tSqlExprCreate(tSqlExpr *pLeft, tSqlExpr *pRight, int32_t optrType) {
pExpr->value.i64 = pLeft->value.i64 % pRight->value.i64;
break;
}
case TK_EQ: {
pExpr->value.i64 = (pLeft->value.i64 == pRight->value.i64) ? 1 : 0;
break;
}
case TK_NE: {
pExpr->value.i64 = (pLeft->value.i64 != pRight->value.i64) ? 1 : 0;
break;
}
case TK_LT: {
pExpr->value.i64 = (pLeft->value.i64 < pRight->value.i64) ? 1 : 0;
break;
}
case TK_GT: {
pExpr->value.i64 = (pLeft->value.i64 > pRight->value.i64) ? 1 : 0;
break;
}
case TK_LE: {
pExpr->value.i64 = (pLeft->value.i64 <= pRight->value.i64) ? 1 : 0;
break;
}
case TK_GE: {
pExpr->value.i64 = (pLeft->value.i64 >= pRight->value.i64) ? 1 : 0;
break;
}
case TK_AND: {
pExpr->value.i64 = (pLeft->value.i64 && pRight->value.i64) ? 1 : 0;
break;
}
case TK_OR: {
pExpr->value.i64 = (pLeft->value.i64 || pRight->value.i64) ? 1 : 0;
break;
}
}
tSqlExprDestroy(pLeft);
tSqlExprDestroy(pRight);
} else if ((pLeft->tokenId == TK_FLOAT && pRight->tokenId == TK_INTEGER) ||
(pLeft->tokenId == TK_INTEGER && pRight->tokenId == TK_FLOAT) ||
(pLeft->tokenId == TK_BOOL && pRight->tokenId == TK_FLOAT) ||
(pLeft->tokenId == TK_FLOAT && pRight->tokenId == TK_BOOL) ||
(pLeft->tokenId == TK_FLOAT && pRight->tokenId == TK_FLOAT)) {
pExpr->value.nType = TSDB_DATA_TYPE_DOUBLE;
pExpr->tokenId = TK_FLOAT;
......@@ -395,6 +435,80 @@ tSqlExpr *tSqlExprCreate(tSqlExpr *pLeft, tSqlExpr *pRight, int32_t optrType) {
pExpr->value.dKey = left - ((int64_t)(left / right)) * right;
break;
}
case TK_EQ: {
pExpr->tokenId = TK_INTEGER;
pExpr->value.nType = TSDB_DATA_TYPE_BIGINT;
if (compareDoubleVal(&left, &right) == 0) {
pExpr->value.i64 = 1;
} else {
pExpr->value.i64 = 0;
}
break;
}
case TK_NE: {
pExpr->tokenId = TK_INTEGER;
pExpr->value.nType = TSDB_DATA_TYPE_BIGINT;
if (compareDoubleVal(&left, &right) != 0) {
pExpr->value.i64 = 1;
} else {
pExpr->value.i64 = 0;
}
break;
}
case TK_LT: {
pExpr->tokenId = TK_INTEGER;
pExpr->value.nType = TSDB_DATA_TYPE_BIGINT;
if (compareDoubleVal(&left, &right) == -1) {
pExpr->value.i64 = 1;
} else {
pExpr->value.i64 = 0;
}
break;
}
case TK_GT: {
pExpr->tokenId = TK_INTEGER;
pExpr->value.nType = TSDB_DATA_TYPE_BIGINT;
if (compareDoubleVal(&left, &right) == 1) {
pExpr->value.i64 = 1;
} else {
pExpr->value.i64 = 0;
}
break;
}
case TK_LE: {
int32_t res = compareDoubleVal(&left, &right);
pExpr->tokenId = TK_INTEGER;
pExpr->value.nType = TSDB_DATA_TYPE_BIGINT;
if (res == 0 || res == -1) {
pExpr->value.i64 = 1;
} else {
pExpr->value.i64 = 0;
}
break;
}
case TK_GE: {
int32_t res = compareDoubleVal(&left, &right);
pExpr->tokenId = TK_INTEGER;
pExpr->value.nType = TSDB_DATA_TYPE_BIGINT;
if (res == 0 || res == 1) {
pExpr->value.i64 = 1;
} else {
pExpr->value.i64 = 0;
}
break;
}
case TK_AND: {
pExpr->tokenId = TK_INTEGER;
pExpr->value.nType = TSDB_DATA_TYPE_BIGINT;
pExpr->value.i64 = (left && right) ? 1 : 0;
break;
}
case TK_OR: {
pExpr->tokenId = TK_INTEGER;
pExpr->value.nType = TSDB_DATA_TYPE_BIGINT;
pExpr->value.i64 = (left || right) ? 1 : 0;
break;
}
}
tSqlExprDestroy(pLeft);
......@@ -505,7 +619,7 @@ tSqlExpr *tSqlExprClone(tSqlExpr *pSrc) {
tSqlExpr *pExpr = calloc(1, sizeof(tSqlExpr));
memcpy(pExpr, pSrc, sizeof(*pSrc));
if (pSrc->pLeft) {
pExpr->pLeft = tSqlExprClone(pSrc->pLeft);
}
......@@ -518,7 +632,7 @@ tSqlExpr *tSqlExprClone(tSqlExpr *pSrc) {
tVariantAssign(&pExpr->value, &pSrc->value);
//we don't clone paramList now because clone is only used for between/and
assert(pSrc->Expr.paramList == NULL);
pExpr->Expr.paramList = NULL;
return pExpr;
}
......
......@@ -963,9 +963,14 @@ static SRpcConn *rpcProcessMsgHead(SRpcInfo *pRpc, SRecvInfo *pRecv, SRpcReqCont
terrno = TSDB_CODE_RPC_INVALID_SESSION_ID; return NULL;
}
if (rpcIsReq(pHead->msgType) && htonl(pHead->msgVer) != tsVersion >> 8) {
tDebug("%s sid:%d, invalid client version:%x/%x %s", pRpc->label, sid, htonl(pHead->msgVer), tsVersion, taosMsg[pHead->msgType]);
terrno = TSDB_CODE_RPC_INVALID_VERSION; return NULL;
// compatibility between old version client and new version server, since 2.4.0.0
if (rpcIsReq(pHead->msgType)){
if((htonl(pHead->msgVer) >> 16 != tsVersion >> 24) ||
((htonl(pHead->msgVer) >> 16 == tsVersion >> 24) && htonl(pHead->msgVer) < ((2 << 16) | (4 << 8)))){
tError("%s sid:%d, invalid client version:%x/%x %s", pRpc->label, sid, htonl(pHead->msgVer), tsVersion, taosMsg[pHead->msgType]);
terrno = TSDB_CODE_RPC_INVALID_VERSION;
return NULL;
}
}
pConn = rpcGetConnObj(pRpc, sid, pRecv);
......@@ -983,7 +988,8 @@ static SRpcConn *rpcProcessMsgHead(SRpcInfo *pRpc, SRecvInfo *pRecv, SRpcReqCont
sid = pConn->sid;
if (pConn->chandle == NULL) pConn->chandle = pRecv->chandle;
pConn->peerIp = pRecv->ip;
pConn->peerIp = pRecv->ip;
pConn->peerPort = pRecv->port;
if (pHead->port) pConn->peerPort = htons(pHead->port);
......
......@@ -599,6 +599,7 @@ void taosHashClear(SHashObj *pHashObj) {
__wr_unlock(&pHashObj->lock, pHashObj->type);
}
// the input paras should be SHashObj **, so the origin input will be set by tfree(*pHashObj)
void taosHashCleanup(SHashObj *pHashObj) {
if (pHashObj == NULL) {
return;
......
......@@ -266,6 +266,7 @@ int patternMatch(const char *patterStr, const char *str, size_t size, const SPat
int32_t j = 0;
int32_t o = 0;
int32_t m = 0;
char escape = '\\'; // "\"
while ((c = patterStr[i++]) != 0) {
if (c == pInfo->matchAll) { /* Match "*" */
......@@ -308,13 +309,30 @@ int patternMatch(const char *patterStr, const char *str, size_t size, const SPat
++o;
if (j <= size) {
if (c == '\\' && patterStr[i] == '_' && c1 == '_') { i++; continue; }
if (c == '\\' && patterStr[i] == '%' && c1 == '%') { i++; continue; }
if (c == escape && patterStr[i] == pInfo->matchOne){
if(c1 == pInfo->matchOne){
i++;
continue;
}
else{
return TSDB_PATTERN_NOMATCH;
}
}
if (c == escape && patterStr[i] == pInfo->matchAll){
if(c1 == pInfo->matchAll){
i++;
continue;
}
else{
return TSDB_PATTERN_NOMATCH;
}
}
if (c == c1 || tolower(c) == tolower(c1) || (c == pInfo->matchOne && c1 != 0)) {
continue;
}
}
return TSDB_PATTERN_NOMATCH;
}
......@@ -428,8 +446,24 @@ int WCSPatternMatch(const uint32_t *patterStr, const uint32_t *str, size_t size,
c1 = str[j++];
if (j <= size) {
if (c == escape && patterStr[i] == matchOne && c1 == matchOne) { i++; continue; }
if (c == escape && patterStr[i] == matchAll && c1 == matchAll) { i++; continue; }
if (c == escape && patterStr[i] == matchOne){
if(c1 == matchOne){
i++;
continue;
}
else{
return TSDB_PATTERN_NOMATCH;
}
}
if (c == escape && patterStr[i] == matchAll){
if(c1 == matchAll){
i++;
continue;
}
else{
return TSDB_PATTERN_NOMATCH;
}
}
if (c == c1 || towlower(c) == towlower(c1) || (c == matchOne && c1 != 0)) {
continue;
}
......
###################################################################
# Copyright (c) 2016 by TAOS Technologies, Inc.
# All rights reserved.
#
# This file is proprietary and confidential to TAOS Technologies.
# No part of this file may be reproduced, stored, transmitted,
# disclosed or used in any form or by any means other than as
# expressly provided by the written permission from Jianhui Tao
#
###################################################################
# -*- coding: utf-8 -*-
import sys
from util.log import *
from util.cases import *
from util.sql import *
from util.dnodes import *
from taos import *
class TDTestCase:
def caseDescription(self):
'''
case1<slzhou>: [TD-12977] fix invalid upper case table name of stmt api
'''
return
def init(self, conn, logSql):
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor(), logSql)
self._conn = conn
self._dbname = "TD12977"
def run(self):
tdSql.prepare()
self._conn.execute("drop database if exists %s" % self._dbname)
self._conn.execute("create database if not exists %s" % self._dbname)
self._conn.select_db(self._dbname)
self._conn.execute("create stable STB(ts timestamp, n int) tags(b int)")
stmt = self._conn.statement("insert into ? using STB tags(?) values(?, ?)")
params = new_bind_params(1)
params[0].int(4);
stmt.set_tbname_tags("ct", params);
multi_params = new_multi_binds(2);
multi_params[0].timestamp([1626861392589, 1626861392590])
multi_params[1].int([123,456])
stmt.bind_param_batch(multi_params)
stmt.execute()
tdSql.query("select * from stb")
tdSql.checkRows(2)
stmt.close()
def stop(self):
tdSql.close()
tdLog.success("%s successfully executed" % __file__)
tdCases.addWindows(__file__, TDTestCase())
tdCases.addLinux(__file__, TDTestCase())
此差异已折叠。
......@@ -131,11 +131,17 @@ class TDTestCase:
tdSql.checkData(0, 1, r'\%')
# [TD-12815] like wildcard(%, _) are not supported nchar
tdSql.execute(r"insert into tt values(1591050708000, 'h\%d')")
tdSql.execute(r"insert into tt values(1591070708000, 'h%d')")
tdSql.execute(r"insert into tt values(1591080808000, 'h\_j')")
tdSql.execute(r"insert into tt values(1591080708000, 'h_j')")
tdSql.execute(r"insert into tt values(1591090708000, 'h\\j')")
tdSql.query(r"select * from tt where `i\t` like 'h\\\%d'")
tdSql.checkRows(1)
tdSql.query(r"select * from tt where `i\t` like 'h\%d'")
tdSql.checkRows(1)
tdSql.query(r"select * from tt where `i\t` like 'h\\\_j'")
tdSql.checkRows(1)
tdSql.query(r"select * from tt where `i\t` like 'h\_j'")
tdSql.checkRows(1)
tdSql.query(r"select * from tt where `i\t` like 'h\\j'")
......
###################################################################
# Copyright (c) 2021 by TAOS Technologies, Inc.
# All rights reserved.
#
# This file is proprietary and confidential to TAOS Technologies.
# No part of this file may be reproduced, stored, transmitted,
# disclosed or used in any form or by any means other than as
# expressly provided by the written permission from Jianhui Tao
#
###################################################################
# -*- coding: utf-8 -*-
import sys
from util.log import *
from util.cases import *
from util.sql import *
class TDTestCase:
def caseDescription(self):
'''
case1<Ganlin Zhao>: [TD-12861] : taoshell crash coredump for such as "select first(c1)==max(c1) from st"
'''
return
def init(self, conn, logSql):
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor(), logSql)
def getBuildPath(self):
selfPath = os.path.dirname(os.path.realpath(__file__))
if ("community" in selfPath):
projPath = selfPath[:selfPath.find("community")]
else:
projPath = selfPath[:selfPath.find("tests")]
for root, dirs, files in os.walk(projPath):
if ("taosd" in files):
rootRealPath = os.path.dirname(os.path.realpath(root))
if ("packaging" not in rootRealPath):
buildPath = root[:len(root) - len("/build/bin")]
break
return buildPath
def run(self):
print("running {}".format(__file__))
tdSql.execute("drop database if exists db")
tdSql.execute("create database if not exists db")
tdSql.execute('use db')
#Prepare data
tdSql.execute("create table tb (ts timestamp, value int);")
tdSql.execute("insert into tb values (now, 1);")
tdSql.execute("insert into tb values (now, 2);")
tdSql.execute("insert into tb values (now, 3);")
tdSql.execute("insert into tb values (now, 4);")
##operator: =
tdSql.error('select count(*) = 4 from tb;')
tdSql.error('select avg(value) = 2.5 from tb')
tdSql.error('select twa(value) = 3.03 from tb')
tdSql.error('select sum(value) = 10 from tb')
tdSql.error('select stddev(value) = 2.0 from tb')
tdSql.error('select min(value) = 1 from tb')
tdSql.error('select max(value) = 4 from tb')
tdSql.error('select first(*) = 3 from tb')
tdSql.error('select last(*) = 3 from tb')
tdSql.error('select top(value, 3) = 3 from tb')
tdSql.error('select bottom(value, 3) = 3 from tb')
tdSql.error('select percentile(value, 50) = 3 from tb')
tdSql.error('select apercentile(value, 50) = 3 from tb')
tdSql.error('select last_row(*) = 3 from tb')
tdSql.error('select diff(value) = 3 from tb')
tdSql.error('select ceil(value) = 12 from tb')
tdSql.error('select floor(3.5) = 3 from tb')
tdSql.error('select round(3.5) = 3 from tb')
tdSql.error('select count(*) = max(value) from tb')
tdSql.error('select avg(value) = min(value) from tb')
tdSql.error('select first(value) = last(value) from tb')
tdSql.error('select round(value) = round(value) from tb')
##operator: !=
tdSql.error('select count(*) != 4 from tb;')
tdSql.error('select avg(value) != 2.5 from tb')
tdSql.error('select twa(value) != 3.03 from tb')
tdSql.error('select sum(value) != 10 from tb')
tdSql.error('select stddev(value) != 2.0 from tb')
tdSql.error('select min(value) != 1 from tb')
tdSql.error('select max(value) != 4 from tb')
tdSql.error('select first(*) != 3 from tb')
tdSql.error('select last(*) != 3 from tb')
tdSql.error('select top(value, 3) != 3 from tb')
tdSql.error('select bottom(value, 3) != 3 from tb')
tdSql.error('select percentile(value, 50) != 3 from tb')
tdSql.error('select apercentile(value, 50) != 3 from tb')
tdSql.error('select last_row(*) != 3 from tb')
tdSql.error('select diff(value) != 3 from tb')
tdSql.error('select ceil(value) != 12 from tb')
tdSql.error('select floor(3.5) != 3 from tb')
tdSql.error('select round(3.5) != 3 from tb')
tdSql.error('select count(*) != max(value) from tb')
tdSql.error('select avg(value) != min(value) from tb')
tdSql.error('select first(value) != last(value) from tb')
tdSql.error('select round(value) != round(value) from tb')
##operator: <>
tdSql.error('select count(*) <> 4 from tb;')
tdSql.error('select avg(value) <> 2.5 from tb')
tdSql.error('select twa(value) <> 3.03 from tb')
tdSql.error('select sum(value) <> 10 from tb')
tdSql.error('select stddev(value) <> 2.0 from tb')
tdSql.error('select min(value) <> 1 from tb')
tdSql.error('select max(value) <> 4 from tb')
tdSql.error('select first(*) <> 3 from tb')
tdSql.error('select last(*) <> 3 from tb')
tdSql.error('select top(value, 3) <> 3 from tb')
tdSql.error('select bottom(value, 3) <> 3 from tb')
tdSql.error('select percentile(value, 50) <> 3 from tb')
tdSql.error('select apercentile(value, 50) <> 3 from tb')
tdSql.error('select last_row(*) <> 3 from tb')
tdSql.error('select diff(value) <> 3 from tb')
tdSql.error('select ceil(value) <> 12 from tb')
tdSql.error('select floor(3.5) <> 3 from tb')
tdSql.error('select round(3.5) <> 3 from tb')
tdSql.error('select count(*) <> max(value) from tb')
tdSql.error('select avg(value) <> min(value) from tb')
tdSql.error('select first(value) <> last(value) from tb')
tdSql.error('select round(value) <> round(value) from tb')
##operator: <
tdSql.error('select count(*) < 4 from tb;')
tdSql.error('select avg(value) < 2.5 from tb')
tdSql.error('select twa(value) < 3.03 from tb')
tdSql.error('select sum(value) < 10 from tb')
tdSql.error('select stddev(value) < 2.0 from tb')
tdSql.error('select min(value) < 1 from tb')
tdSql.error('select max(value) < 4 from tb')
tdSql.error('select first(*) < 3 from tb')
tdSql.error('select last(*) < 3 from tb')
tdSql.error('select top(value, 3) < 3 from tb')
tdSql.error('select bottom(value, 3) < 3 from tb')
tdSql.error('select percentile(value, 50) < 3 from tb')
tdSql.error('select apercentile(value, 50) < 3 from tb')
tdSql.error('select last_row(*) < 3 from tb')
tdSql.error('select diff(value) < 3 from tb')
tdSql.error('select ceil(value) < 12 from tb')
tdSql.error('select floor(3.5) < 3 from tb')
tdSql.error('select round(3.5) < 3 from tb')
tdSql.error('select count(*) < max(value) from tb')
tdSql.error('select avg(value) < min(value) from tb')
tdSql.error('select first(value) < last(value) from tb')
tdSql.error('select round(value) < round(value) from tb')
##operator: >
tdSql.error('select count(*) > 4 from tb;')
tdSql.error('select avg(value) > 2.5 from tb')
tdSql.error('select twa(value) > 3.03 from tb')
tdSql.error('select sum(value) > 10 from tb')
tdSql.error('select stddev(value) > 2.0 from tb')
tdSql.error('select min(value) > 1 from tb')
tdSql.error('select max(value) > 4 from tb')
tdSql.error('select first(*) > 3 from tb')
tdSql.error('select last(*) > 3 from tb')
tdSql.error('select top(value, 3) > 3 from tb')
tdSql.error('select bottom(value, 3) > 3 from tb')
tdSql.error('select percentile(value, 50) > 3 from tb')
tdSql.error('select apercentile(value, 50) > 3 from tb')
tdSql.error('select last_row(*) > 3 from tb')
tdSql.error('select diff(value) > 3 from tb')
tdSql.error('select ceil(value) > 12 from tb')
tdSql.error('select floor(3.5) > 3 from tb')
tdSql.error('select round(3.5) > 3 from tb')
tdSql.error('select count(*) > max(value) from tb')
tdSql.error('select avg(value) > min(value) from tb')
tdSql.error('select first(value) > last(value) from tb')
tdSql.error('select round(value) > round(value) from tb')
##operator: <=
tdSql.error('select count(*) <= 4 from tb;')
tdSql.error('select avg(value) <= 2.5 from tb')
tdSql.error('select twa(value) <= 3.03 from tb')
tdSql.error('select sum(value) <= 10 from tb')
tdSql.error('select stddev(value) <= 2.0 from tb')
tdSql.error('select min(value) <= 1 from tb')
tdSql.error('select max(value) <= 4 from tb')
tdSql.error('select first(*) <= 3 from tb')
tdSql.error('select last(*) <= 3 from tb')
tdSql.error('select top(value, 3) <= 3 from tb')
tdSql.error('select bottom(value, 3) <= 3 from tb')
tdSql.error('select percentile(value, 50) <= 3 from tb')
tdSql.error('select apercentile(value, 50) <= 3 from tb')
tdSql.error('select last_row(*) <= 3 from tb')
tdSql.error('select diff(value) <= 3 from tb')
tdSql.error('select ceil(value) <= 12 from tb')
tdSql.error('select floor(3.5) <= 3 from tb')
tdSql.error('select round(3.5) <= 3 from tb')
tdSql.error('select count(*) <= max(value) from tb')
tdSql.error('select avg(value) <= min(value) from tb')
tdSql.error('select first(value) <= last(value) from tb')
tdSql.error('select round(value) <= round(value) from tb')
##operator: >=
tdSql.error('select count(*) >= 4 from tb;')
tdSql.error('select avg(value) >= 2.5 from tb')
tdSql.error('select twa(value) >= 3.03 from tb')
tdSql.error('select sum(value) >= 10 from tb')
tdSql.error('select stddev(value) >= 2.0 from tb')
tdSql.error('select min(value) >= 1 from tb')
tdSql.error('select max(value) >= 4 from tb')
tdSql.error('select first(*) >= 3 from tb')
tdSql.error('select last(*) >= 3 from tb')
tdSql.error('select top(value, 3) >= 3 from tb')
tdSql.error('select bottom(value, 3) >= 3 from tb')
tdSql.error('select percentile(value, 50) >= 3 from tb')
tdSql.error('select apercentile(value, 50) >= 3 from tb')
tdSql.error('select last_row(*) >= 3 from tb')
tdSql.error('select diff(value) >= 3 from tb')
tdSql.error('select ceil(value) >= 12 from tb')
tdSql.error('select floor(3.5) >= 3 from tb')
tdSql.error('select round(3.5) >= 3 from tb')
tdSql.error('select count(*) >= max(value) from tb')
tdSql.error('select avg(value) >= min(value) from tb')
tdSql.error('select first(value) >= last(value) from tb')
tdSql.error('select round(value) >= round(value) from tb')
##operator: between and
tdSql.error('select count(*) between 3 and 4 from tb;')
tdSql.error('select avg(value) between 1.5 and 2.5 from tb')
tdSql.error('select twa(value) between 3.0 and 3.03 from tb')
tdSql.error('select sum(value) between 1 and 10 from tb')
tdSql.error('select stddev(value) between 1 and 2.0 from tb')
tdSql.error('select min(value) between 2 and 5 from tb')
tdSql.error('select max(value) between 1 and 10 from tb')
tdSql.error('select first(*) between 1 and 3 from tb')
tdSql.error('select last(*) between 0 and 3 from tb')
tdSql.error('select top(value, 3) between 0.0 and 3 from tb')
tdSql.error('select bottom(value, 3) between 0.0 and 3 from tb')
tdSql.error('select percentile(value, 50) between 1 and 3 from tb')
tdSql.error('select apercentile(value, 50) between 2 and 3 from tb')
tdSql.error('select last_row(*) between 2 and 3 from tb')
tdSql.error('select diff(value) between 1 and 3 from tb')
tdSql.error('select ceil(value) between 5 and 12 from tb')
tdSql.error('select floor(3.5) between 12 and 3 from tb')
tdSql.error('select round(3.5) between true and 3 from tb')
tdSql.error('select count(*) between min(value) and max(value) from tb')
tdSql.error('select avg(*) between min(value) and 3 from tb')
tdSql.error('select avg(value) between 1 and max(value) from tb')
tdSql.error('select first(value) between first(value) and last(value) from tb')
tdSql.error('select round(value) between ceil(value) and floor(value) from tb')
##operator: and
tdSql.error('select count(*) and 1 from tb;')
tdSql.error('select avg(value) and 0.0 from tb')
tdSql.error('select twa(value) and true from tb')
tdSql.error('select sum(value) and false from tb')
tdSql.error('select 1 and stddev(value) from tb')
tdSql.error('select 0.0 and min(value) from tb')
tdSql.error('select true and max(value) from tb')
tdSql.error('select false and first(*) from tb')
tdSql.error('select last(*) and first(value) from tb')
tdSql.error('select top(value, 3) and bottom(value, 3) from tb')
tdSql.error('select percentile(value, 50) and apercentile(value, 50) from tb')
tdSql.error('select diff(value) and ceil(value) from tb')
tdSql.error('select floor(3.5) and round(3.5) and ceil(3.5) from tb')
tdSql.error('select true and round(3.5) and 3 from tb')
##operator: or
tdSql.error('select count(*) or 1 from tb;')
tdSql.error('select avg(value) or 0.0 from tb')
tdSql.error('select twa(value) or true from tb')
tdSql.error('select sum(value) or false from tb')
tdSql.error('select 1 or stddev(value) from tb')
tdSql.error('select 0.0 or min(value) from tb')
tdSql.error('select true or max(value) from tb')
tdSql.error('select false or first(*) from tb')
tdSql.error('select last(*) or first(value) from tb')
tdSql.error('select top(value, 3) or bottom(value, 3) from tb')
tdSql.error('select percentile(value, 50) or apercentile(value, 50) from tb')
tdSql.error('select diff(value) or ceil(value) from tb')
tdSql.error('select floor(3.5) or round(3.5) or ceil(3.5) from tb')
tdSql.error('select true or round(3.5) or 3 from tb')
##operator: multiple operations
tdSql.error('select count(*) <> avg(value) or twa(value) and sum(value) or 1 from tb;')
tdSql.error('select 1 and stddev(value) <= min(value) or max(value) and first(*) or 0.0 from tb')
tdSql.error('select last(*) and first(value) or top(value, 3) and 3 between 4.0 and bottom(value, 3)from tb')
tdSql.error('select percentile(value, 50) or diff(value) = ceil(value) and apercentile(value, 50) from tb')
tdSql.error('select floor(3.5) or round(3.5) and ceil(3.5) > true and round(3.5) or 3 from tb')
tdSql.execute('drop database db')
def stop(self):
tdSql.close()
tdLog.success("%s successfully executed" % __file__)
tdCases.addWindows(__file__, TDTestCase())
tdCases.addLinux(__file__, TDTestCase())
###################################################################
# Copyright (c) 2016 by TAOS Technologies, Inc.
# All rights reserved.
#
# This file is proprietary and confidential to TAOS Technologies.
# No part of this file may be reproduced, stored, transmitted,
# disclosed or used in any form or by any means other than as
# expressly provided by the written permission from Jianhui Tao
#
###################################################################
# -*- coding: utf-8 -*-
import os
from util.log import *
from util.cases import *
from util.sql import *
from util.dnodes import *
class TDTestCase:
def caseDescription(self):
'''
[TD-11510] taosBenchmark test cases
'''
return
def init(self, conn, logSql):
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor(), logSql)
def run(self):
cmd = "taosBenchmark -f ./5-taos-tools/taosbenchmark/json/taosc_auto_create_table.json"
tdLog.info("%s" % cmd)
os.system("%s" % cmd)
tdSql.execute("reset query cache")
tdSql.query("select count(tbname) from db.stb1")
tdSql.checkData(0, 0, 8)
tdSql.query("select count(*) from db.stb1")
tdSql.checkData(0, 0, 160)
tdSql.execute("reset query cache")
tdSql.query("select count(tbname) from db.`stb1-2`")
tdSql.checkData(0, 0, 8)
tdSql.query("select count(*) from db.`stb1-2`")
tdSql.checkData(0, 0, 160)
cmd = "taosBenchmark -f ./5-taos-tools/taosbenchmark/json/stmt_auto_create_table.json"
tdLog.info("%s" % cmd)
os.system("%s" % cmd)
tdSql.execute("reset query cache")
tdSql.query("select count(tbname) from db.stb2")
tdSql.checkData(0, 0, 8)
tdSql.query("select count(*) from db.stb2")
tdSql.checkData(0, 0, 160)
tdSql.query("show databases")
tdSql.checkData(0, 16, "us")
tdSql.execute("reset query cache")
tdSql.query("select count(tbname) from db.`stb2-2`")
tdSql.checkData(0, 0, 8)
tdSql.query("select count(*) from db.`stb2-2`")
tdSql.checkData(0, 0, 160)
cmd = "taosBenchmark -f ./5-taos-tools/taosbenchmark/json/rest_auto_create_table.json"
tdLog.info("%s" % cmd)
os.system("%s" % cmd)
tdSql.execute("reset query cache")
tdSql.query("select count(tbname) from db.stb3")
tdSql.checkData(0, 0, 8)
tdSql.query("select count(*) from db.stb3")
tdSql.checkData(0, 0, 160)
tdSql.query("show databases")
tdSql.checkData(0, 16, "ns")
tdSql.execute("reset query cache")
tdSql.query("select count(tbname) from db.`stb3-2`")
tdSql.checkData(0, 0, 8)
tdSql.query("select count(*) from db.`stb3-2`")
tdSql.checkData(0, 0, 160)
cmd = "taosBenchmark -f ./5-taos-tools/taosbenchmark/json/sml_auto_create_table.json"
tdLog.info("%s" % cmd)
os.system("%s" % cmd)
tdSql.execute("reset query cache")
tdSql.query("select count(tbname) from db.stb4")
tdSql.checkData(0, 0, 8)
tdSql.query("select count(*) from db.stb4")
tdSql.checkData(0, 0, 160)
tdSql.execute("reset query cache")
tdSql.query("select count(tbname) from db.`stb4-2`")
tdSql.checkData(0, 0, 8)
tdSql.query("select count(*) from db.`stb4-2`")
tdSql.checkData(0, 0, 160)
def stop(self):
tdSql.close()
tdLog.success("%s successfully executed" % __file__)
tdCases.addWindows(__file__, TDTestCase())
tdCases.addLinux(__file__, TDTestCase())
\ No newline at end of file
###################################################################
# Copyright (c) 2016 by TAOS Technologies, Inc.
# All rights reserved.
#
# This file is proprietary and confidential to TAOS Technologies.
# No part of this file may be reproduced, stored, transmitted,
# disclosed or used in any form or by any means other than as
# expressly provided by the written permission from Jianhui Tao
#
###################################################################
# -*- coding: utf-8 -*-
import os
import subprocess
import time
from util.log import *
from util.cases import *
from util.sql import *
from util.dnodes import *
class TDTestCase:
def caseDescription(self):
'''
[TD-11510] taosBenchmark test cases
'''
return
def init(self, conn, logSql):
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor(), logSql)
def run(self):
cmd = "taosBenchmark -F 7 -n 10 -t 2 -x -y -M -C -d newtest -l 5 -A binary,nchar\(31\) -b tinyint,binary\(23\),bool,nchar -w 29 -E -m $%^*"
tdLog.info("%s" % cmd)
os.system("%s" % cmd)
tdSql.execute("use newtest")
tdSql.query("select count(*) from newtest.meters")
tdSql.checkData(0, 0, 20)
tdSql.query("select distinct(c0) from newtest.meters")
tdSql.checkRows(7)
tdSql.query("describe meters")
tdSql.checkRows(8)
tdSql.checkData(0, 1, "TIMESTAMP")
tdSql.checkData(1, 1, "TINYINT")
tdSql.checkData(2, 1, "BINARY")
tdSql.checkData(2, 2, 23)
tdSql.checkData(3, 1, "BOOL")
tdSql.checkData(4, 1, "NCHAR")
tdSql.checkData(4, 2, 29)
tdSql.checkData(5, 1, "INT")
tdSql.checkData(6, 1, "BINARY")
tdSql.checkData(6, 2, 29)
tdSql.checkData(6, 3, "TAG")
tdSql.checkData(7, 1, "NCHAR")
tdSql.checkData(7, 2, 31)
tdSql.checkData(7, 3, "TAG")
tdSql.query("select tbname from meters where tbname like '$%^*%'")
tdSql.checkRows(2)
tdSql.execute("drop database if exists newtest")
cmd = "taosBenchmark -F 7 -n 10 -t 2 -y -M -I stmt"
tdLog.info("%s" % cmd)
os.system("%s" % cmd)
tdSql.query("select count(tbname) from test.meters")
tdSql.checkData(0, 0, 2)
tdSql.query("select count(*) from test.meters")
tdSql.checkData(0, 0, 20)
tdSql.query("select distinct(c0) from test.meters")
tdSql.checkRows(7)
cmd = "taosBenchmark -n 3 -t 3 -B 2 -i 1 -G -y -T 1 2>&1 | grep sleep | wc -l"
sleepTimes = subprocess.check_output(cmd, shell=True).decode("utf-8")
tdLog.info("%s" % cmd)
os.system("%s" % cmd)
if (int(sleepTimes) != 2):
tdLog.exit("expected sleep times 2, actual %d" % int(sleepTimes))
cmd = "taosBenchmark -n 3 -t 3 -B 2 -i 1 -G -y -T 1 -r 1 2>&1 | grep sleep | wc -l"
sleepTimes = subprocess.check_output(cmd, shell=True).decode("utf-8")
tdLog.info("%s" % cmd)
os.system("%s" % cmd)
if (int(sleepTimes) != 3):
tdLog.exit("expected sleep times 3, actual %d" % int(sleepTimes))
cmd = "taosBenchmark -n 3 -t 3 -B 2 -i 1 -G -y -T 1 -I sml 2>&1 | grep sleep | wc -l"
sleepTimes = subprocess.check_output(cmd, shell=True).decode("utf-8")
tdLog.info("%s" % cmd)
os.system("%s" % cmd)
if (int(sleepTimes) != 2):
tdLog.exit("expected sleep times 2, actual %d" % int(sleepTimes))
cmd = "taosBenchmark -n 3 -t 3 -B 2 -i 1 -G -y -T 1 -r 1 -I sml 2>&1 | grep sleep | wc -l"
sleepTimes = subprocess.check_output(cmd, shell=True).decode("utf-8")
tdLog.info("%s" % cmd)
os.system("%s" % cmd)
if (int(sleepTimes) != 3):
tdLog.exit("expected sleep times 3, actual %d" % int(sleepTimes))
cmd = "taosBenchmark -n 3 -t 3 -B 2 -i 1 -G -y -T 1 -I stmt 2>&1 | grep sleep | wc -l"
sleepTimes = subprocess.check_output(cmd, shell=True).decode("utf-8")
tdLog.info("%s" % cmd)
os.system("%s" % cmd)
if (int(sleepTimes) != 2):
tdLog.exit("expected sleep times 2, actual %d" % int(sleepTimes))
cmd = "taosBenchmark -n 3 -t 3 -B 2 -i 1 -G -y -T 1 -r 1 -I stmt 2>&1 | grep sleep | wc -l"
sleepTimes = subprocess.check_output(cmd, shell=True).decode("utf-8")
tdLog.info("%s" % cmd)
os.system("%s" % cmd)
if (int(sleepTimes) != 3):
tdLog.exit("expected sleep times 3, actual %d" % int(sleepTimes))
cmd = "taosBenchmark -S 17 -n 3 -t 1 -y -x"
tdLog.info("%s" % cmd)
os.system("%s" % cmd)
tdSql.query("select last(ts) from test.meters")
tdSql.checkData(0, 0 , "2017-07-14 10:40:00.034")
cmd = "taosBenchmark -N -I taosc -t 11 -n 11 -y -x -E"
tdLog.info("%s" % cmd)
os.system("%s" % cmd)
tdSql.execute("use test")
tdSql.query("show stables")
tdSql.checkRows(0)
tdSql.query("show tables")
tdSql.checkRows(11)
tdSql.query("select count(*) from `d10`")
tdSql.checkData(0, 0, 11)
cmd = "taosBenchmark -N -I rest -t 11 -n 11 -y -x"
tdLog.info("%s" % cmd)
os.system("%s" % cmd)
tdSql.execute("use test")
tdSql.query("show stables")
tdSql.checkRows(0)
tdSql.query("show tables")
tdSql.checkRows(11)
tdSql.query("select count(*) from d10")
tdSql.checkData(0, 0, 11)
cmd = "taosBenchmark -N -I stmt -t 11 -n 11 -y -x"
tdLog.info("%s" % cmd)
os.system("%s" % cmd)
tdSql.execute("use test")
tdSql.query("show stables")
tdSql.checkRows(0)
tdSql.query("show tables")
tdSql.checkRows(11)
tdSql.query("select count(*) from d10")
tdSql.checkData(0, 0, 11)
cmd = "taosBenchmark -N -I sml -y"
tdLog.info("%s" % cmd)
assert(os.system("%s" % cmd) !=0 )
cmd = "taosBenchmark -n 1 -t 1 -y -b bool"
tdLog.info("%s" % cmd)
os.system("%s" % cmd)
tdSql.execute("reset query cache")
tdSql.query("describe test.meters")
tdSql.checkData(1, 1, "BOOL")
cmd = "taosBenchmark -n 1 -t 1 -y -b tinyint"
tdLog.info("%s" % cmd)
os.system("%s" % cmd)
tdSql.execute("reset query cache")
tdSql.query("describe test.meters")
tdSql.checkData(1, 1, "TINYINT")
cmd = "taosBenchmark -n 1 -t 1 -y -b utinyint"
tdLog.info("%s" % cmd)
os.system("%s" % cmd)
tdSql.execute("reset query cache")
tdSql.query("describe test.meters")
tdSql.checkData(1, 1, "TINYINT UNSIGNED")
cmd = "taosBenchmark -n 1 -t 1 -y -b smallint"
tdLog.info("%s" % cmd)
os.system("%s" % cmd)
tdSql.execute("reset query cache")
tdSql.query("describe test.meters")
tdSql.checkData(1, 1, "SMALLINT")
cmd = "taosBenchmark -n 1 -t 1 -y -b usmallint"
tdLog.info("%s" % cmd)
os.system("%s" % cmd)
tdSql.execute("reset query cache")
tdSql.query("describe test.meters")
tdSql.checkData(1, 1, "SMALLINT UNSIGNED")
cmd = "taosBenchmark -n 1 -t 1 -y -b int"
tdLog.info("%s" % cmd)
os.system("%s" % cmd)
tdSql.execute("reset query cache")
tdSql.query("describe test.meters")
tdSql.checkData(1, 1, "INT")
cmd = "taosBenchmark -n 1 -t 1 -y -b uint"
tdLog.info("%s" % cmd)
os.system("%s" % cmd)
tdSql.execute("reset query cache")
tdSql.query("describe test.meters")
tdSql.checkData(1, 1, "INT UNSIGNED")
cmd = "taosBenchmark -n 1 -t 1 -y -b bigint"
tdLog.info("%s" % cmd)
os.system("%s" % cmd)
tdSql.execute("reset query cache")
tdSql.query("describe test.meters")
tdSql.checkData(1, 1, "BIGINT")
cmd = "taosBenchmark -n 1 -t 1 -y -b ubigint"
tdLog.info("%s" % cmd)
os.system("%s" % cmd)
tdSql.execute("reset query cache")
tdSql.query("describe test.meters")
tdSql.checkData(1, 1, "BIGINT UNSIGNED")
cmd = "taosBenchmark -n 1 -t 1 -y -b timestamp"
tdLog.info("%s" % cmd)
os.system("%s" % cmd)
tdSql.execute("reset query cache")
tdSql.query("describe test.meters")
tdSql.checkData(1, 1, "TIMESTAMP")
cmd = "taosBenchmark -n 1 -t 1 -y -b float"
tdLog.info("%s" % cmd)
os.system("%s" % cmd)
tdSql.execute("reset query cache")
tdSql.query("describe test.meters")
tdSql.checkData(1, 1, "FLOAT")
cmd = "taosBenchmark -n 1 -t 1 -y -b double"
tdLog.info("%s" % cmd)
os.system("%s" % cmd)
tdSql.execute("reset query cache")
tdSql.query("describe test.meters")
tdSql.checkData(1, 1, "DOUBLE")
cmd = "taosBenchmark -n 1 -t 1 -y -b nchar"
tdLog.info("%s" % cmd)
os.system("%s" % cmd)
tdSql.execute("reset query cache")
tdSql.query("describe test.meters")
tdSql.checkData(1, 1, "NCHAR")
cmd = "taosBenchmark -n 1 -t 1 -y -b nchar\(7\)"
tdLog.info("%s" % cmd)
os.system("%s" % cmd)
tdSql.execute("reset query cache")
tdSql.query("describe test.meters")
tdSql.checkData(1, 1, "NCHAR")
cmd = "taosBenchmark -n 1 -t 1 -y -b binary"
tdLog.info("%s" % cmd)
os.system("%s" % cmd)
tdSql.execute("reset query cache")
tdSql.query("describe test.meters")
tdSql.checkData(1, 1, "BINARY")
cmd = "taosBenchmark -n 1 -t 1 -y -b binary\(7\)"
tdLog.info("%s" % cmd)
os.system("%s" % cmd)
tdSql.execute("reset query cache")
tdSql.query("describe test.meters")
tdSql.checkData(1, 1, "BINARY")
cmd = "taosBenchmark -n 1 -t 1 -y -A json\(7\)"
tdLog.info("%s" % cmd)
os.system("%s" % cmd)
tdSql.execute("reset query cache")
tdSql.query("describe test.meters")
tdSql.checkData(4, 1, "JSON")
cmd = "taosBenchmark -n 1 -t 1 -y -b int,x"
tdLog.info("%s" % cmd)
assert(os.system("%s" % cmd) != 0)
cmd = "taosBenchmark -n 1 -t 1 -y -A int,json"
tdLog.info("%s" % cmd)
assert(os.system("%s" % cmd) != 0)
def stop(self):
tdSql.close()
tdLog.success("%s successfully executed" % __file__)
tdCases.addWindows(__file__, TDTestCase())
tdCases.addLinux(__file__, TDTestCase())
\ No newline at end of file
1641976781445,1
1641976781446,2
1641976781447,3
\ No newline at end of file
###################################################################
# Copyright (c) 2016 by TAOS Technologies, Inc.
# All rights reserved.
#
# This file is proprietary and confidential to TAOS Technologies.
# No part of this file may be reproduced, stored, transmitted,
# disclosed or used in any form or by any means other than as
# expressly provided by the written permission from Jianhui Tao
#
###################################################################
# -*- coding: utf-8 -*-
import os
from util.log import *
from util.cases import *
from util.sql import *
from util.dnodes import *
class TDTestCase:
def caseDescription(self):
'''
[TD-11510] taosBenchmark test cases
'''
return
def init(self, conn, logSql):
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor(), logSql)
def run(self):
cmd = "taosBenchmark -f ./5-taos-tools/taosbenchmark/json/default.json"
tdLog.info("%s" % cmd)
os.system("%s" % cmd)
tdSql.execute("reset query cache")
tdSql.query("select count(tbname) from db.stb")
tdSql.checkData(0, 0, 10)
tdSql.query("select count(*) from db.stb")
tdSql.checkData(0, 0, 100)
def stop(self):
tdSql.close()
tdLog.success("%s successfully executed" % __file__)
tdCases.addWindows(__file__, TDTestCase())
tdCases.addLinux(__file__, TDTestCase())
\ No newline at end of file
###################################################################
# Copyright (c) 2016 by TAOS Technologies, Inc.
# All rights reserved.
#
# This file is proprietary and confidential to TAOS Technologies.
# No part of this file may be reproduced, stored, transmitted,
# disclosed or used in any form or by any means other than as
# expressly provided by the written permission from Jianhui Tao
#
###################################################################
# -*- coding: utf-8 -*-
import os
from util.log import *
from util.cases import *
from util.sql import *
from util.dnodes import *
class TDTestCase:
def caseDescription(self):
'''
[TD-11510] taosBenchmark test cases
'''
return
def init(self, conn, logSql):
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor(), logSql)
def run(self):
cmd = "taosBenchmark -F abc -P abc -I abc -T abc -i abc -S abc -B abc -r abc -t abc -n abc -l abc -w abc -w 16385 -R abc -O abc -a abc -n 2 -t 2 -r 1 -y"
tdLog.info("%s" % cmd)
os.system("%s" % cmd)
tdSql.query("select count(*) from test.meters")
tdSql.checkData(0, 0, 4)
cmd = "taosBenchmark non_exist_opt"
tdLog.info("%s" % cmd)
assert (os.system("%s" % cmd) != 0)
cmd = "taosBenchmark -f non_exist_file"
tdLog.info("%s" % cmd)
assert (os.system("%s" % cmd) != 0)
cmd = "taosBenchmark -h non_exist_host"
tdLog.info("%s" % cmd)
assert (os.system("%s" % cmd) != 0)
cmd = "taosBenchmark -p non_exist_pass"
tdLog.info("%s" % cmd)
assert (os.system("%s" % cmd) != 0)
cmd = "taosBenchmark -u non_exist_user"
tdLog.info("%s" % cmd)
assert (os.system("%s" % cmd) != 0)
cmd = "taosBenchmark -c non_exist_dir -n 1 -t 1 -o non_exist_path -y"
tdLog.info("%s" % cmd)
assert (os.system("%s" % cmd) == 0)
def stop(self):
tdSql.close()
tdLog.success("%s successfully executed" % __file__)
tdCases.addWindows(__file__, TDTestCase())
tdCases.addLinux(__file__, TDTestCase())
\ No newline at end of file
{
"filetype": "insert",
"cfgdir": "/etc/taos",
"host": "127.0.0.1",
"port": 6030,
"user": "root",
"password": "taosdata",
"thread_count": 4,
"thread_pool_size": 20,
"result_file": "./insert_res.txt",
"confirm_parameter_prompt": "no",
"prepared_rand": 10,
"chinese": "no",
"insert_interval": 0,
"num_of_records_per_req": 10,
"databases": [{
"dbinfo": {
"name": "db"
},
"super_tables": [{
"name": "stb",
"childtable_prefix": "stb_",
"columns": [{"type": "INT"}],
"tags": [{"type": "INT"}]
}]
}]
}
\ No newline at end of file
{
"filetype":"query",
"cfgdir": "/etc/taos",
"confirm_parameter_prompt": "no",
"databases": "db",
"query_mode": "rest",
"thread_pool_size": 20,
"response_buffer": 10000,
"specified_table_query":
{
"query_times": 1,
"sqls":
[{
"sql": "select count(*) from db.stb",
"result": "rest_query_specified"
}]
},
"super_table_query": {
"stblname": "stb",
"sqls": [
{
"sql": "select count(*) from xxxx",
"result": "rest_query_super"
}
]
}
}
\ No newline at end of file
python3 ./test.py -f 1-insert/batchInsert.py
\ No newline at end of file
python3 ./test.py -f 1-insert/batchInsert.py
python3 ./test.py -f 1-insert/uppercase_in_stmt.py
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册