未验证 提交 f743f2a5 编写于 作者: S shenglian-zhou 提交者: GitHub

Merge branch 'develop' into szhou/feature/diff-ignore-negative

......@@ -8,6 +8,7 @@ def skipbuild = 0
def win_stop = 0
def scope = []
def mod = [0,1,2,3,4]
def sim_mod = [0,1,2,3]
def abortPreviousBuilds() {
def currentJobName = env.JOB_NAME
......@@ -45,6 +46,7 @@ def pre_test(){
killall -9 gdb || echo "no gdb running"
killall -9 python3.8 || echo "no python program running"
cd ${WKC}
[ -f src/connector/grafanaplugin/README.md ] && rm -f src/connector/grafanaplugin/README.md > /dev/null || echo "failed to remove grafanaplugin README.md"
git reset --hard HEAD~10 >/dev/null
'''
script {
......@@ -120,6 +122,7 @@ def pre_test_noinstall(){
sh'hostname'
sh'''
cd ${WKC}
[ -f src/connector/grafanaplugin/README.md ] && rm -f src/connector/grafanaplugin/README.md > /dev/null || echo "failed to remove grafanaplugin README.md"
git reset --hard HEAD~10 >/dev/null
'''
script {
......@@ -192,6 +195,7 @@ def pre_test_mac(){
sh'hostname'
sh'''
cd ${WKC}
[ -f src/connector/grafanaplugin/README.md ] && rm -f src/connector/grafanaplugin/README.md > /dev/null || echo "failed to remove grafanaplugin README.md"
git reset --hard HEAD~10 >/dev/null
'''
script {
......@@ -377,12 +381,14 @@ pipeline {
println gitlog
if (!(gitlog =~ /\((.*?)\)/)){
autoCancelled = true
error('Aborting the build.')
error('Please fill in the scope information correctly.\neg. [TD-xxxx]<fix>(query,insert):xxxxxxxxxxxxxxxxxx ')
}
temp = (gitlog =~ /\((.*?)\)/)
temp = temp[0].remove(1)
scope = temp.split(",")
scope = ['connector','query','insert','other','tools','taosAdapter']
Collections.shuffle mod
Collections.shuffle sim_mod
}
}
......@@ -400,10 +406,10 @@ pipeline {
}
parallel {
stage('python_1') {
agent{label " slave1 || slave6 || slave11 || slave16 "}
agent{label " slave1 || slave11 "}
steps {
pre_test()
timeout(time: 55, unit: 'MINUTES'){
timeout(time: 100, unit: 'MINUTES'){
script{
scope.each {
sh """
......@@ -417,10 +423,10 @@ pipeline {
}
}
stage('python_2') {
agent{label " slave2 || slave7 || slave12 || slave17 "}
agent{label " slave2 || slave12 "}
steps {
pre_test()
timeout(time: 55, unit: 'MINUTES'){
timeout(time: 100, unit: 'MINUTES'){
script{
scope.each {
sh """
......@@ -434,7 +440,7 @@ pipeline {
}
}
stage('python_3') {
agent{label " slave3 || slave8 || slave13 ||slave18 "}
agent{label " slave3 || slave13 "}
steps {
timeout(time: 105, unit: 'MINUTES'){
pre_test()
......@@ -451,9 +457,9 @@ pipeline {
}
}
stage('python_4') {
agent{label " slave4 || slave9 || slave14 || slave19 "}
agent{label " slave4 || slave14 "}
steps {
timeout(time: 55, unit: 'MINUTES'){
timeout(time: 100, unit: 'MINUTES'){
pre_test()
script{
scope.each {
......@@ -469,9 +475,9 @@ pipeline {
}
}
stage('python_5') {
agent{label " slave5 || slave10 || slave15 || slave20 "}
agent{label " slave5 || slave15 "}
steps {
timeout(time: 55, unit: 'MINUTES'){
timeout(time: 100, unit: 'MINUTES'){
pre_test()
script{
scope.each {
......@@ -486,34 +492,97 @@ pipeline {
}
}
}
stage('arm64centos7') {
agent{label " arm64centos7 "}
stage('sim_1') {
agent{label " slave6 || slave16 "}
steps {
pre_test_noinstall()
pre_test()
timeout(time: 100, unit: 'MINUTES'){
sh """
date
cd ${WKC}/tests
./test-CI.sh sim 4 ${sim_mod[0]}
date"""
}
}
stage('arm64centos8') {
agent{label " arm64centos8 "}
}
stage('sim_2') {
agent{label " slave7 || slave17 "}
steps {
pre_test_noinstall()
pre_test()
timeout(time: 100, unit: 'MINUTES'){
sh """
date
cd ${WKC}/tests
./test-CI.sh sim 4 ${sim_mod[1]}
date"""
}
}
stage('arm32bionic') {
agent{label " arm32bionic "}
}
stage('sim_3') {
agent{label " slave8 || slave18 "}
steps {
pre_test_noinstall()
timeout(time: 105, unit: 'MINUTES'){
pre_test()
sh """
date
cd ${WKC}/tests
./test-CI.sh sim 4 ${sim_mod[2]}
date"""
}
}
stage('arm64bionic') {
agent{label " arm64bionic "}
}
stage('sim_4') {
agent{label " slave9 || slave19 "}
steps {
pre_test_noinstall()
timeout(time: 100, unit: 'MINUTES'){
pre_test()
sh """
date
cd ${WKC}/tests
./test-CI.sh sim 4 ${sim_mod[3]}
date"""
}
}
stage('arm64focal') {
agent{label " arm64focal "}
}
stage('other') {
agent{label " slave10 || slave20 "}
steps {
pre_test_noinstall()
timeout(time: 100, unit: 'MINUTES'){
pre_test()
timeout(time: 60, unit: 'MINUTES'){
sh '''
cd ${WKC}/tests/pytest
./crash_gen.sh -a -p -t 4 -s 2000
'''
}
timeout(time: 60, unit: 'MINUTES'){
sh '''
cd ${WKC}/tests/pytest
rm -rf /var/lib/taos/*
rm -rf /var/log/taos/*
./handle_crash_gen_val_log.sh
'''
sh '''
cd ${WKC}/tests/pytest
rm -rf /var/lib/taos/*
rm -rf /var/log/taos/*
./handle_taosd_val_log.sh
'''
}
catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE') {
sh '''
cd ${WKC}/tests/pytest
./valgrind-test.sh 2>&1 > mem-error-out.log
./handle_val_log.sh
'''
}
sh '''
cd ${WKC}/tests
./test-all.sh full unit
date
'''
}
}
}
stage('centos7') {
......@@ -546,7 +615,36 @@ pipeline {
pre_test_mac()
}
}
stage('arm64centos7') {
agent{label " arm64centos7 "}
steps {
pre_test_noinstall()
}
}
stage('arm64centos8') {
agent{label " arm64centos8 "}
steps {
pre_test_noinstall()
}
}
stage('arm32bionic') {
agent{label " arm32bionic "}
steps {
pre_test_noinstall()
}
}
stage('arm64bionic') {
agent{label " arm64bionic "}
steps {
pre_test_noinstall()
}
}
stage('arm64focal') {
agent{label " arm64focal "}
steps {
pre_test_noinstall()
}
}
stage('build'){
agent{label " wintest "}
steps {
......@@ -561,6 +659,7 @@ pipeline {
stage('test'){
agent{label "win"}
steps{
catchError(buildResult: 'FAILURE', stageResult: 'FAILURE') {
pre_test_win()
timeout(time: 20, unit: 'MINUTES'){
......
......@@ -4,7 +4,7 @@ PROJECT(TDengine)
IF (DEFINED VERNUMBER)
SET(TD_VER_NUMBER ${VERNUMBER})
ELSE ()
SET(TD_VER_NUMBER "2.3.2.0")
SET(TD_VER_NUMBER "2.4.0.0")
ENDIF ()
IF (DEFINED VERCOMPATIBLE)
......
......@@ -83,10 +83,11 @@ TDengine是一个高效的存储、查询、分析时序大数据的平台,专
* [Windows客户端](https://www.taosdata.com/blog/2019/07/26/514.html):自行编译windows客户端,Windows环境的各种连接器都需要它
* [Rust Connector](/connector/rust): Rust语言下通过libtaos客户端或RESTful接口,连接TDengine服务器。
## [TDengine 组件与工具](/cn/documentation/)
## TDengine 组件与工具
* [taosAdapter 用户手册](/tools/adapter)
* [TDinsight 用户手册](/tools/insight)
* [taoTools 用户手册](/tools/taos-tools)
## [与其他工具的连接](/connections)
......
......@@ -15,22 +15,34 @@ $ docker -v
Docker version 20.10.3, build 48d30b5
```
## 在 Docker 容器中运行 TDengine
## 使用 Docker 在容器中运行 TDengine
1,使用命令拉取 TDengine 镜像,并使它在后台运行。
### 在 Docker 容器中运行 TDengine server
```bash
$ docker run -d --name tdengine tdengine/tdengine
7760c955f225d72e9c1ec5a4cef66149a7b94dae7598b11eb392138877e7d292
$ docker run -d -p 6030-6041:6030-6041 -p 6030-6041:6030-6041/udp tdengine/tdengine
526aa188da767ae94b244226a2b2eec2b5f17dd8eff592893d9ec0cd0f3a1ccd
```
这条命令,启动一个运行了 TDengine server 的 docker 容器,并且将容器的 6030 到 6041 端口映射到宿主机的 6030 到 6041 端口上。如果宿主机已经运行了 TDengine server 并占用了相同端口,需要映射容器的端口到不同的未使用端口段。(详情参见 [TDengine 2.0 端口说明](https://www.taosdata.com/cn/documentation/faq#port))。为了支持 TDengine 客户端操作 TDengine server 服务, TCP 和 UDP 端口都需要打开。
- **docker run**:通过 Docker 运行一个容器
- **--name tdengine**:设置容器名称,我们可以通过容器名称来查看对应的容器
- **-d**:让容器在后台运行
- **-p**:指定映射端口。注意:如果不是用端口映射,依然可以进入 Docker 容器内部使用 TDengine 服务或进行应用开发,只是不能对容器外部提供服务
- **tdengine/tdengine**:拉取的 TDengine 官方发布的应用镜像
- **7760c955f225d72e9c1ec5a4cef66149a7b94dae7598b11eb392138877e7d292**:这个返回的长字符是容器 ID,我们也可以通过容器 ID 来查看对应的容器
- **526aa188da767ae94b244226a2b2eec2b5f17dd8eff592893d9ec0cd0f3a1ccd**:这个返回的长字符是容器 ID,我们也可以通过容器 ID 来查看对应的容器
进一步,还可以使用 docker run 命令启动运行 TDengine server 的 docker 容器,并使用 --name 命令行参数将容器命名为 tdengine,使用 --hostname 指定 hostname 为 tdengine-server,通过 -v 挂载本地目录(-v),实现宿主机与容器内部的数据同步,防止容器删除后,数据丢失。
2,确认容器是否已经正确运行。
```
$ docker run -d --name tdengine --hostname="tdengine-server" -v ~/work/taos/log:/var/log/taos -v ~/work/taos/data:/var/lib/taos -p 6030-6041:6030-6041 -p 6030-6041:6030-6041/udp tdengine/tdengine
```
- **--name tdengine**:设置容器名称,我们可以通过容器名称来访问对应的容器
- **--hostnamename=tdengine-server**:设置容器内 Linux 系统的 hostname,我们可以通过映射 hostname 和 IP 来解决容器 IP 可能变化的问题。
- **-v**:设置宿主机文件目录映射到容器内目录,避免容器删除后数据丢失。
### 使用 docker ps 命令确认容器是否已经正确运行
```bash
$ docker ps
......@@ -45,23 +57,23 @@ c452519b0f9b tdengine/tdengine "taosd" 14 minutes ago Up 14 minutes ·
- **CREATED**:容器创建时间。
- **STATUS**:容器状态。UP 表示运行中。
3,进入 Docker 容器内,使用 TDengine。
### 通过 docker exec 命令,进入到 docker 容器中去做开发
```bash
$ docker exec -it tdengine /bin/bash
root@c452519b0f9b:~/TDengine-server-2.0.20.13#
root@tdengine-server:~/TDengine-server-2.0.20.13#
```
- **docker exec**:通过 docker exec 命令进入容器,如果退出,容器不会停止。
- **-i**:进入交互模式。
- **-t**:指定一个终端。
- **c452519b0f9b**:容器 ID,需要根据 docker ps 指令返回的值进行修改。
- **tdengine**:容器名称,需要根据 docker ps 指令返回的值进行修改。
- **/bin/bash**:载入容器后运行 bash 来进行交互。
4,进入容器后,执行 taos shell 客户端程序。
进入容器后,执行 taos shell 客户端程序。
```bash
$ root@c452519b0f9b:~/TDengine-server-2.0.20.13# taos
root@tdengine-server:~/TDengine-server-2.0.20.13# taos
Welcome to the TDengine shell from Linux, Client Version:2.0.20.13
Copyright (c) 2020 by TAOS Data, Inc. All rights reserved.
......@@ -73,19 +85,92 @@ TDengine 终端成功连接服务端,打印出了欢迎消息和版本信息
在 TDengine 终端中,可以通过 SQL 命令来创建/删除数据库、表、超级表等,并可以进行插入和查询操作。具体可以参考 [TAOS SQL 说明文档](https://www.taosdata.com/cn/documentation/taos-sql)
## 通过 taosdemo 进一步了解 TDengine
1,接上面的步骤,先退出 TDengine 终端程序。
### 在宿主机访问 Docker 容器中的 TDengine server
在使用了 -p 命令行参数映射了正确的端口启动了 TDengine Docker 容器后,就在宿主机使用 taos shell 命令即可访问运行在 Docker 容器中的 TDengine。
```
$ taos
Welcome to the TDengine shell from Linux, Client Version:2.0.22.3
Copyright (c) 2020 by TAOS Data, Inc. All rights reserved.
taos>
```
也可以在宿主机使用 curl 通过 RESTful 端口访问 Docker 容器内的 TDengine server。
```bash
$ taos> q
root@c452519b0f9b:~/TDengine-server-2.0.20.13#
```
$ curl -u root:taosdata -d 'show databases' 127.0.0.1:6041/rest/sql
{"status":"succ","head":["name","created_time","ntables","vgroups","replica","quorum","days","keep0,keep1,keep(D)","cache(MB)","blocks","minrows","maxrows","wallevel","fsync","comp","cachelast","precision","update","status"],"column_meta":[["name",8,32],["created_time",9,8],["ntables",4,4],["vgroups",4,4],["replica",3,2],["quorum",3,2],["days",3,2],["keep0,keep1,keep(D)",8,24],["cache(MB)",4,4],["blocks",4,4],["minrows",4,4],["maxrows",4,4],["wallevel",2,1],["fsync",4,4],["comp",2,1],["cachelast",2,1],["precision",8,3],["update",2,1],["status",8,10]],"data":[["test","2021-08-18 06:01:11.021",10000,4,1,1,10,"3650,3650,3650",16,6,100,4096,1,3000,2,0,"ms",0,"ready"],["log","2021-08-18 05:51:51.065",4,1,1,1,10,"30,30,30",1,3,100,4096,1,3000,2,0,"us",0,"ready"]],"rows":2}
```
这条命令,通过 RESTful 接口访问 TDengine server,这时连接的是本机的 6041 端口,可见连接成功。
TDengine RESTful 接口详情请参考[官方文档](https://www.taosdata.com/cn/documentation/connector#restful)
### 使用 Docker 容器运行 TDengine server 和 taosAdapter
在 TDegnine 2.4.0.0 之后版本的 Docker 容器,开始一个组件 taosAdapter,taosAdapter 支持通过 RESTful 接口对 TDengine server 的数据写入和查询能力,并提供和 InfluxDB/OpenTSDB 兼容的数据摄取接口,允许 InfluxDB/OpenTSDB 应用程序无缝移植到 TDengine。
2,在命令行界面执行 taosdemo。
注意:如果容器中运行 taosAdapter,需要根据需要增加映射其他端口,具体端口默认配置和修改方法请参考[taosAdapter文档](https://github.com/taosdata/taosadapter/blob/develop/README-CN.md)
使用 docker 运行 TDengine 2.4.0.0 版本镜像:
```
$ docker run -d --name tdengine-taosa -p 6030-6049:6030-6049 -p 6030-6049:6030-6049/udp tdengine/tdengine:2.4.0.0
```
使用 curl 命令验证 RESTful 接口可以正常工作:
```
$ curl -H 'Authorization: Basic cm9vdDp0YW9zZGF0YQ==' -d 'show databases;' 127.0.0.1:6041/rest/sql
{"status":"succ","head":["name","created_time","ntables","vgroups","replica","quorum","days","keep","cache(MB)","blocks","minrows","maxrows","wallevel","fsync","comp","cachelast","precision","update","status"],"column_meta":[["name",8,32],["created_time",9,8],["ntables",4,4],["vgroups",4,4],["replica",3,2],["quorum",3,2],["days",3,2],["keep",8,24],["cache(MB)",4,4],["blocks",4,4],["minrows",4,4],["maxrows",4,4],["wallevel",2,1],["fsync",4,4],["comp",2,1],["cachelast",2,1],["precision",8,3],["update",2,1],["status",8,10]],"data":[["log","2021-12-28 09:18:55.765",10,1,1,1,10,"30",1,3,100,4096,1,3000,2,0,"us",0,"ready"]],"rows":1}
```
taosAdapter 支持多个数据收集代理软件(如 Telegraf、StatsD、collectd 等),这里仅模拟 StasD 写入数据,在宿主机执行命令如下:
```
$ echo "foo:1|c" | nc -u -w0 127.0.0.1 6044
```
然后可以使用 taos shell 查询 taosAdapter 自动创建的数据库 statsd 和 超级表 foo 中的内容:
```
taos> show databases;
name | created_time | ntables | vgroups | replica | quorum | days | keep | cache(MB) | blocks | minrows | maxrows | wallevel | fsync | comp | cachelast | precision | update | status |
====================================================================================================================================================================================================================================================================================
log | 2021-12-28 09:18:55.765 | 12 | 1 | 1 | 1 | 10 | 30 | 1 | 3 | 100 | 4096 | 1 | 3000 | 2 | 0 | us | 0 | ready |
statsd | 2021-12-28 09:21:48.841 | 1 | 1 | 1 | 1 | 10 | 3650 | 16 | 6 | 100 | 4096 | 1 | 3000 | 2 | 0 | ns | 2 | ready |
Query OK, 2 row(s) in set (0.002112s)
taos> use statsd;
Database changed.
taos> show stables;
name | created_time | columns | tags | tables |
============================================================================================
foo | 2021-12-28 09:21:48.894 | 2 | 1 | 1 |
Query OK, 1 row(s) in set (0.001160s)
taos> select * from foo;
ts | value | metric_type |
=======================================================================================
2021-12-28 09:21:48.840820836 | 1 | counter |
Query OK, 1 row(s) in set (0.001639s)
taos>
```
可以看到模拟数据已经被写入到 TDengine 中。
### 应用示例:在宿主机使用 taosdemo 写入数据到 Docker 容器中的 TDengine server
1,在宿主机命令行界面执行 taosdemo 写入数据到 Docker 容器中的 TDengine server
```bash
root@c452519b0f9b:~/TDengine-server-2.0.20.13# taosdemo
$ taosdemo
taosdemo is simulating data generated by power equipments monitoring...
......@@ -134,9 +219,9 @@ column[0]:FLOAT column[1]:INT column[2]:FLOAT
回车后,该命令将在数据库 test 下面自动创建一张超级表 meters,该超级表下有 1 万张表,表名为 "d0" 到 "d9999",每张表有 1 万条记录,每条记录有 (ts, current, voltage, phase) 四个字段,时间戳从 "2017-07-14 10:40:00 000" 到 "2017-07-14 10:40:09 999",每张表带有标签 location 和 groupId,groupId 被设置为 1 到 10, location 被设置为 "beijing" 或者 "shanghai"。
执行这条命令大概需要几分钟,最后共插入 1 亿条记录。
最后共插入 1 亿条记录。
3,进入 TDengine 终端,查看 taosdemo 生成的数据。
2,进入 TDengine 终端,查看 taosdemo 生成的数据。
- **进入命令行。**
......@@ -217,27 +302,3 @@ tdengine
- **docker stop**:通过 docker stop 停止指定的正在运行中的 docker 镜像。
- **tdengine**:容器名称。
## 编程开发时连接在 Docker 中的 TDengine
从 Docker 之外连接使用在 Docker 容器内运行的 TDengine 服务,有以下两个思路:
1,通过端口映射(-p),将容器内部开放的网络端口映射到宿主机的指定端口上。通过挂载本地目录(-v),可以实现宿主机与容器内部的数据同步,防止容器删除后,数据丢失。
```bash
$ docker run -d -v /etc/taos:/etc/taos -p 6041:6041 tdengine/tdengine
526aa188da767ae94b244226a2b2eec2b5f17dd8eff592893d9ec0cd0f3a1ccd
$ curl -u root:taosdata -d 'show databases' 127.0.0.1:6041/rest/sql
{"status":"succ","head":["name","created_time","ntables","vgroups","replica","quorum","days","keep0,keep1,keep(D)","cache(MB)","blocks","minrows","maxrows","wallevel","fsync","comp","cachelast","precision","update","status"],"column_meta":[["name",8,32],["created_time",9,8],["ntables",4,4],["vgroups",4,4],["replica",3,2],["quorum",3,2],["days",3,2],["keep0,keep1,keep(D)",8,24],["cache(MB)",4,4],["blocks",4,4],["minrows",4,4],["maxrows",4,4],["wallevel",2,1],["fsync",4,4],["comp",2,1],["cachelast",2,1],["precision",8,3],["update",2,1],["status",8,10]],"data":[["test","2021-08-18 06:01:11.021",10000,4,1,1,10,"3650,3650,3650",16,6,100,4096,1,3000,2,0,"ms",0,"ready"],["log","2021-08-18 05:51:51.065",4,1,1,1,10,"30,30,30",1,3,100,4096,1,3000,2,0,"us",0,"ready"]],"rows":2}
```
- 第一条命令,启动一个运行了 TDengine 的 docker 容器,并且将容器的 6041 端口映射到宿主机的 6041 端口上。
- 第二条命令,通过 RESTful 接口访问 TDengine,这时连接的是本机的 6041 端口,可见连接成功。
注意:在这个示例中,出于方便性考虑,只映射了 RESTful 需要的 6041 端口。如果希望以非 RESTful 方式连接 TDengine 服务,则需要映射从 6030 开始的共 11 个端口(完整的端口情况请参见 [TDengine 2.0 端口说明](https://www.taosdata.com/cn/documentation/faq#port))。在例子中,挂载本地目录也只是处理了配置文件所在的 /etc/taos 目录,而没有挂载数据存储目录。
2,直接通过 exec 命令,进入到 docker 容器中去做开发。也即,把程序代码放在 TDengine 服务端所在的同一个 Docker 容器中,连接容器本地的 TDengine 服务。
```bash
$ docker exec -it tdengine /bin/bash
```
......@@ -251,6 +251,7 @@ select * from apiserver_request_latencies_bucket;
```
## <a class="anchor" id="telegraf"></a> Telegraf 直接写入(通过 taosAdapter)
安装 Telegraf 请参考[官方文档](https://portal.influxdata.com/downloads/)
TDengine 新版本(2.3.0.0+)包含一个 taosAdapter 独立程序,负责接收包括 Telegraf 的多种应用的数据写入。
......@@ -276,6 +277,7 @@ sudo systemctl start telegraf
taosAdapter 相关配置参数请参考 taosadapter --help 命令输出以及相关文档。
## <a class="anchor" id="collectd"></a> collectd 直接写入(通过 taosAdapter)
安装 collectd,请参考[官方文档](https://collectd.org/download.shtml)
TDengine 新版本(2.3.0.0+)包含一个 taosAdapter 独立程序,负责接收包括 collectd 的多种应用的数据写入。
......@@ -294,6 +296,7 @@ sudo systemctl start collectd
taosAdapter 相关配置参数请参考 taosadapter --help 命令输出以及相关文档。
## <a class="anchor" id="statsd"></a> StatsD 直接写入(通过 taosAdapter)
安装 StatsD
请参考[官方文档](https://github.com/statsd/statsd)
......@@ -316,6 +319,30 @@ port: 8125
taosAdapter 相关配置参数请参考 taosadapter --help 命令输出以及相关文档。
icinga2 可以收集监控和性能数据并写入 OpenTSDB,taosAdapter 可以支持接收 icinga2 的数据并写入到 TDengine 中。
## <a class="anchor" id="icinga2"></a> icinga2 直接写入(通过 taosAdapter)
* 参考链接 https://icinga.com/docs/icinga-2/latest/doc/14-features/#opentsdb-writer 使能 opentsdb-writer
* 使能 taosAdapter 配置项 opentsdb_telnet.enable
* 修改配置文件 /etc/icinga2/features-enabled/opentsdb.conf
```
object OpenTsdbWriter "opentsdb" {
host = "host to taosAdapter"
port = 6048
}
```
taosAdapter 相关配置参数请参考 taosadapter --help 命令输出以及相关文档。
## <a class="anchor" id="tcollector"></a> TCollector 直接写入(通过 taosAdapter)
TCollector 是一个在客户侧收集本地收集器并发送数据到 OpenTSDB 的进程,taosAdaapter 可以支持接收 TCollector 的数据并写入到 TDengine 中。
使能 taosAdapter 配置项 opentsdb_telnet.enable
修改 TCollector 配置文件,修改 OpenTSDB 宿主机地址为 taosAdapter 被部署的地址,并修改端口号为 taosAdapter 使用的端口(默认6049)。
taosAdapter 相关配置参数请参考 taosadapter --help 命令输出以及相关文档。
## <a class="anchor" id="taosadapter2-telegraf"></a> 使用 Bailongma 2.0 接入 Telegraf 数据写入
......
......@@ -53,6 +53,7 @@ TDengine 提供 3 个 UDF 的源代码示例,分别为:
* numOfOutput:输出数据的个数,对聚合函数来说只能是0或者1。
* buf:用于在 UDF 与引擎间的状态控制信息传递块。
其他典型场景,如协方差的计算,即可通过定义聚合UDF的方式实现。
### 其他 UDF 函数
......
......@@ -1350,17 +1350,17 @@ TDengine支持针对数据的聚合查询。提供支持的聚合和选择函数
```mysql
SELECT LAST_ROW(field_name) FROM { tb_name | stb_name };
```
功能说明:返回表/超级表的最后一条记录。
功能说明:返回表/超级表的最后一条记录。
返回结果数据类型:同应用的字段。
返回结果数据类型:同应用的字段。
应用字段:所有字段。
应用字段:所有字段。
适用于:**表、超级表**。
适用于:**表、超级表**。
限制:LAST_ROW() 不能与 INTERVAL 一起使用。
限制:LAST_ROW() 不能与 INTERVAL 一起使用。
说明:在用于超级表时,时间戳完全一样且同为最大的数据行可能有多个,那么会从中随机返回一条,而并不保证多次运行所挑选的数据行必然一致。<br/>
说明:在用于超级表时,时间戳完全一样且同为最大的数据行可能有多个,那么会从中随机返回一条,而并不保证多次运行所挑选的数据行必然一致。<br/>
<br/>示例:
```mysql
......@@ -1383,30 +1383,30 @@ TDengine支持针对数据的聚合查询。提供支持的聚合和选择函数
SELECT INTERP(field_name) FROM { tb_name | stb_name } [WHERE where_condition] [ RANGE(timestamp1,timestamp2) ] [EVERY(interval)] [FILL ({ VALUE | PREV | NULL | LINEAR | NEXT})];
```
功能说明:返回表/超级表的指定时间截面指定列的记录值(插值)。
功能说明:返回表/超级表的指定时间截面指定列的记录值(插值)。
返回结果数据类型:同字段类型。
返回结果数据类型:同字段类型。
应用字段:数值型字段。
应用字段:数值型字段。
适用于:**表、超级表、嵌套查询**。
适用于:**表、超级表、嵌套查询**。
说明:
1)INTERP用于在指定时间断面获取指定列的记录值,如果该时间断面不存在符合条件的行数据,那么会根据 FILL 参数的设定进行插值。
说明:
1)INTERP用于在指定时间断面获取指定列的记录值,如果该时间断面不存在符合条件的行数据,那么会根据 FILL 参数的设定进行插值。
2)INTERP的输入数据为指定列的数据,可以通过条件语句(where子句)来对原始列数据进行过滤,如果没有指定过滤条件则输入为全部数据。
2)INTERP的输入数据为指定列的数据,可以通过条件语句(where子句)来对原始列数据进行过滤,如果没有指定过滤条件则输入为全部数据。
3)INTERP的输出时间范围根据RANGE(timestamp1,timestamp2)字段来指定,需满足timestamp1<=timestamp2。其中timestamp1(必选值)为输出时间范围的起始值,即如果timestamp1时刻符合插值条件则timestamp1为输出的第一条记录,timestamp2(必选值)为输出时间范围的结束值,即输出的最后一条记录的timestamp不能大于timestamp2。如果没有指定RANGE,那么满足过滤条件的输入数据中第一条记录的timestamp即为timestamp1,最后一条记录的timestamp即为timestamp2,同样也满足timestamp1 <= timestamp2。
3)INTERP的输出时间范围根据RANGE(timestamp1,timestamp2)字段来指定,需满足timestamp1<=timestamp2。其中timestamp1(必选值)为输出时间范围的起始值,即如果timestamp1时刻符合插值条件则timestamp1为输出的第一条记录,timestamp2(必选值)为输出时间范围的结束值,即输出的最后一条记录的timestamp不能大于timestamp2。如果没有指定RANGE,那么满足过滤条件的输入数据中第一条记录的timestamp即为timestamp1,最后一条记录的timestamp即为timestamp2,同样也满足timestamp1 <= timestamp2。
4)INTERP根据EVERY字段来确定输出时间范围内的结果条数,即从timestamp1开始每隔固定长度的时间(EVERY值)进行插值。如果没有指定EVERY,则默认窗口大小为无穷大,即从timestamp1开始只有一个窗口。
4)INTERP根据EVERY字段来确定输出时间范围内的结果条数,即从timestamp1开始每隔固定长度的时间(EVERY值)进行插值。如果没有指定EVERY,则默认窗口大小为无穷大,即从timestamp1开始只有一个窗口。
5)INTERP根据FILL字段来决定在每个符合输出条件的时刻如何进行插值,如果没有FILL字段则默认不插值,即输出为原始记录值或不输出(原始记录不存在)。
5)INTERP根据FILL字段来决定在每个符合输出条件的时刻如何进行插值,如果没有FILL字段则默认不插值,即输出为原始记录值或不输出(原始记录不存在)。
6)INTERP只能在一个时间序列内进行插值,因此当作用于超级表时必须跟group by tbname一起使用,当作用嵌套查询外层时内层子查询不能含GROUP BY信息。
6)INTERP只能在一个时间序列内进行插值,因此当作用于超级表时必须跟group by tbname一起使用,当作用嵌套查询外层时内层子查询不能含GROUP BY信息。
7)INTERP的插值结果不受ORDER BY timestamp的影响,ORDER BY timestamp只影响输出结果的排序。
7)INTERP的插值结果不受ORDER BY timestamp的影响,ORDER BY timestamp只影响输出结果的排序。
SQL示例:
SQL示例:
1) 单点线性插值
```mysql
......@@ -1436,15 +1436,15 @@ SQL示例:
SELECT INTERP(field_name) FROM { tb_name | stb_name } WHERE ts='timestamp' [FILL ({ VALUE | PREV | NULL | LINEAR | NEXT})];
```
功能说明:返回表/超级表的指定时间截面、指定字段的记录。
功能说明:返回表/超级表的指定时间截面、指定字段的记录。
返回结果数据类型:同字段类型。
返回结果数据类型:同字段类型。
应用字段:数值型字段。
应用字段:数值型字段。
适用于:**表、超级表**。
适用于:**表、超级表**。
说明:(从 2.0.15.0 版本开始新增此函数) <br/>1)INTERP 必须指定时间断面,如果该时间断面不存在直接对应的数据,那么会根据 FILL 参数的设定进行插值。此外,条件语句里面可附带筛选条件,例如标签、tbname。<br/>2)INTERP 查询要求查询的时间区间必须位于数据集合(表)的所有记录的时间范围之内。如果给定的时间戳位于时间范围之外,即使有插值指令,仍然不返回结果。<br/>3)单个 INTERP 函数查询只能够针对一个时间点进行查询,如果需要返回等时间间隔的断面数据,可以通过 INTERP 配合 EVERY 的方式来进行查询处理(而不是使用 INTERVAL),其含义是每隔固定长度的时间进行插值。<br/>
说明:(从 2.0.15.0 版本开始新增此函数) <br/>1)INTERP 必须指定时间断面,如果该时间断面不存在直接对应的数据,那么会根据 FILL 参数的设定进行插值。此外,条件语句里面可附带筛选条件,例如标签、tbname。<br/>2)INTERP 查询要求查询的时间区间必须位于数据集合(表)的所有记录的时间范围之内。如果给定的时间戳位于时间范围之外,即使有插值指令,仍然不返回结果。<br/>3)单个 INTERP 函数查询只能够针对一个时间点进行查询,如果需要返回等时间间隔的断面数据,可以通过 INTERP 配合 EVERY 的方式来进行查询处理(而不是使用 INTERVAL),其含义是每隔固定长度的时间进行插值。<br/>
示例:
```mysql
......@@ -1455,7 +1455,7 @@ SQL示例:
Query OK, 1 row(s) in set (0.002652s)
```
如果给定的时间戳无对应的数据,在不指定插值生成策略的情况下,不会返回结果,如果指定了插值策略,会根据插值策略返回结果。
如果给定的时间戳无对应的数据,在不指定插值生成策略的情况下,不会返回结果,如果指定了插值策略,会根据插值策略返回结果。
```mysql
taos> SELECT INTERP(*) FROM meters WHERE tbname IN ('d636') AND ts='2017-7-14 18:40:00.005';
......@@ -1468,7 +1468,7 @@ SQL示例:
Query OK, 1 row(s) in set (0.003056s)
```
如下所示代码表示在时间区间 `['2017-7-14 18:40:00', '2017-7-14 18:40:00.014']` 中每隔 5 毫秒 进行一次断面计算。
如下所示代码表示在时间区间 `['2017-7-14 18:40:00', '2017-7-14 18:40:00.014']` 中每隔 5 毫秒 进行一次断面计算。
```mysql
taos> SELECT INTERP(current) FROM d636 WHERE ts>='2017-7-14 18:40:00' AND ts<='2017-7-14 18:40:00.014' EVERY(5a);
......@@ -1578,8 +1578,6 @@ SQL示例:
只能与普通列,选择(Selection)、投影(Projection)函数一起使用,不能与聚合(Aggregation)函数一起使用。
该函数可以应用在普通表和超级表上。
支持版本:指定计算算法的功能从 2.2.0.x 版本开始,2.2.0.0 之前的版本不支持指定使用算法的功能。
- **FLOOR**
```mysql
SELECT FLOOR(field_name) FROM { tb_name | stb_name } [WHERE clause];
......@@ -1653,7 +1651,7 @@ SELECT COUNT(*) FROM temp_table INTERVAL(1D) SLIDING(2D)
使用整数(布尔值)或字符串来标识产生记录时候设备的状态量。产生的记录如果具有相同的状态量数值则归属于同一个状态窗口,数值改变后该窗口关闭。如下图所示,根据状态量确定的状态窗口分别是[2019-04-28 14:22:07,2019-04-28 14:22:10]和[2019-04-28 14:22:11,2019-04-28 14:22:12]两个。(状态窗口暂不支持对超级表使用)
![时间窗口示意图](../images/sql/timewindow-2.png)
![时间窗口示意图](../images/sql/timewindow-3.png)
使用STATE_WINDOW来确定状态窗口划分的列。例如:
......@@ -1665,7 +1663,7 @@ SELECT COUNT(*), FIRST(ts), status FROM temp_tb_1 STATE_WINDOW(status)
会话窗口根据记录的时间戳主键的值来确定是否属于同一个会话。如下图所示,如果设置时间戳的连续的间隔小于等于12秒,则以下6条记录构成2个会话窗口,分别是:[2019-04-28 14:22:10,2019-04-28 14:22:30]和[2019-04-28 14:23:10,2019-04-28 14:23:30]。因为2019-04-28 14:22:30与2019-04-28 14:23:10之间的时间间隔是40秒,超过了连续时间间隔(12秒)。
![时间窗口示意图](../images/sql/timewindow-3.png)
![时间窗口示意图](../images/sql/timewindow-2.png)
在tol_value时间间隔范围内的结果都认为归属于同一个窗口,如果连续的两条记录的时间超过tol_val,则自动开启下一个窗口。(会话窗口暂不支持对超级表使用)
......@@ -1853,3 +1851,24 @@ TDengine 中的表(列)名命名规则如下:
```mysql
select jtag->'key' from (select jtag from stable) where jtag->'key'>0
```
## 转义字符说明
- 转义字符表
| 字符序列 | **代表的字符** |
| :--------: | ------- |
| `\'` | 单引号' |
| `\"` | 双引号" |
| \n | 换行符 |
| \r | 回车符 |
| \t | tab符 |
| `\\` | 斜杠\ |
| `\%` | % 规则见下 |
| `\%` | _ 规则见下 |
- 转义字符使用规则
1. 标识符里有转义字符(数据库名、表名、列名)
1. 普通标识符: 直接提示错误的标识符,因为标识符规定必须是数字、字母和下划线,并且不能以数字开头。
2. 反引号``标识符: 保持原样,不转义
2. 数据里有转义字符
1. 遇到上面定义的转义字符会转义(%和_见下面说明),如果没有匹配的转义字符会忽略掉转义符\。
2. 对于%和_,因为在like里这两个字符是通配符,所以在模式匹配like里用`\%`%和`\_`表示字符里本身的%和_,如果在like模式匹配上下文之外使用`\%`或`\_`,则它们的计算结果为字符串`\%`和`\_`,而不是%和_。
\ No newline at end of file
......@@ -83,6 +83,7 @@ TDengine is a highly efficient platform to store, query, and analyze time-series
* [taosAdapter User Manual](/tools/adapter)
* [TDinsight User Manual](/tools/insight)
* [taos-tools User Manual](/tools/taos-tools)
## [Connections with Other Tools](/connections)
......
......@@ -8,29 +8,41 @@ The following article explains how to quickly build a single-node TDengine runti
The Docker tools themselves can be downloaded from [Docker official site](https://docs.docker.com/get-docker/).
After installation, you can check the Docker version in the command line terminal. If the version number is output properly, the Docker environment has been installed successfully.
After installation, you can check the Docker version in the command-line terminal. If the version number is output properly, the Docker environment has been installed successfully.
```bash
$ docker -v
Docker version 20.10.3, build 48d30b5
```
## Running TDengine in a Docker container
## How to use Docker to run TDengine
1, Use the command to pull the TDengine image and make it run in the background.
### running TDengine server inside Docker
```bash
$ docker run -d --name tdengine tdengine/tdengine
7760c955f225d72e9c1ec5a4cef66149a7b94dae7598b11eb392138877e7d292
$ docker run -d -p 6030-6041:6030-6041 -p 6030-6041:6030-6041/udp tdengine/tdengine
526aa188da767ae94b244226a2b2eec2b5f17dd8eff592893d9ec0cd0f3a1ccd
```
- **docker run**: Running a container via Docker
- **--name tdengine**: Set the container name, we can see the corresponding container by the container name
- **-d**: Keeping containers running in the background
- **tdengine/tdengine**: Pulled from the official TDengine published application image
- **7760c955f225d72e9c1ec5a4cef66149a7b94dae7598b11eb392138877e7d292**: The long character returned is the container ID, and we can also view the corresponding container by its container ID
This command starts a docker container with TDengine server running and maps the container's ports from 6030 to 6041 to the host's ports from 6030 to 6041. If the host is already running TDengine server and occupying the same port(s), you need to map the container's port to a different unused port segment. (Please see [TDengine 2.0 Port Description](https://www.taosdata.com/en/documentation/faq#port) for details). In order to support TDengine clients accessing TDengine server services, both TCP and UDP ports need to be open.
- **docker run**: Run a container via Docker
- **-d**: put the container run in the background
- **-p**: specify the port(s) to map. Note: If you do not use port mapping, you can still go inside the Docker container to access TDengine services or develop your application, but you cannot provide services outside the container
- **tdengine/tdengine**: the official TDengine published application image that is pulled
- **526aa188da767ae94b244226a2b2eec2b5f17dd8eff592893d9ec0cd0f3a1ccd**: The long character returned is the container ID, and we can also view the corresponding container by its container ID
Further, you can also use the `docker run` command to start the docker container running TDengine server, and use the `--name` command line parameter to name the container tdengine, use `--hostname` to specify the hostname as tdengine-server, and use `-v` to mount the local directory (-v) to synchronize the data inside the host and the container to prevent data loss after the container is deleted.
2, Verify that the container is running correctly.
```
$ docker run -d --name tdengine --hostname="tdengine-server" -v ~/work/taos/log:/var/log/taos -v ~/work/taos/data:/var/lib/taos -p 6030-6041:6030-6041 -p 6030-6041:6030-6041/udp tdengine/tdengine
```
- **--name tdengine**: set the container name, we can access the corresponding container by container name
- **--hostnamename=tdengine-server**: set the hostname of the Linux system inside the container, we can map the hostname and IP to solve the problem that the container IP may change.
- **-v**: Set the host file directory to be mapped to the inner container directory to avoid data loss after the container is deleted.
### Use the `docker ps` command to verify that the container is running correctly
```bash
$ docker ps
......@@ -38,30 +50,30 @@ CONTAINER ID IMAGE COMMAND CREATED STATUS ·
c452519b0f9b tdengine/tdengine "taosd" 14 minutes ago Up 14 minutes ···
```
- **docker ps**: Lists information about all containers that are in running state.
- **CONTAINER ID**: Container ID.
- **IMAGE**: The mirror used.
- **COMMAND**: The command to run when starting the container.
- **CREATED**: The time when the container was created.
- **STATUS**: The container status. Up means running.
- **docker ps**: list all containers in running state.
- **CONTAINER ID**: container ID.
- **IMAGE**: the image used.
- **COMMAND**: the command to run when starting the container.
- **CREATED**: container creation time.
- **STATUS**: container status. UP means running.
3, Go inside the Docker container and use TDengine.
### Enter the docker container to do development via the `docker exec` COMMAND
```bash
$ docker exec -it tdengine /bin/bash
root@c452519b0f9b:~/TDengine-server-2.0.20.13#
root@tdengine-server:~/TDengine-server-2.0.20.13#
```
- **docker exec**: Enter the container via the docker exec command; if you exit, the container will not stop.
- **-i**: Enter the interactive mode.
- **-t**: Specify a terminal.
- **c452519b0f9b**: The container ID, which needs to be modified according to the value returned by the docker ps command.
- **/bin/bash**: Load the container and run bash to interact with it.
- **docker exec**: Enter the container by `docker exec` command, if exited, the container will not stop.
- **-i**: use interactive mode.
- **-t**: specify a terminal.
- **tdengine**: container name, needs to be changed according to the value returned by the docker ps command.
- **/bin/bash**: load the container and run bash to interact with it.
4, After entering the container, execute the taos shell client program.
After entering the container, execute the taos shell client program.
```bash
$ root@c452519b0f9b:~/TDengine-server-2.0.20.13# taos
root@tdengine-server:~/TDengine-server-2.0.20.13# taos
Welcome to the TDengine shell from Linux, Client Version:2.0.20.13
Copyright (c) 2020 by TAOS Data, Inc. All rights reserved.
......@@ -69,23 +81,94 @@ Copyright (c) 2020 by TAOS Data, Inc. All rights reserved.
taos>
```
The TDengine terminal successfully connects to the server and prints out a welcome message and version information. If it fails, an error message is printed.
The TDengine shell successfully connects to the server and prints out a welcome message and version information. If it fails, an error message is printed.
In the TDengine terminal, you can create/delete databases, tables, super tables, etc., and perform insert and query operations via SQL commands. For details, you can refer to [TAOS SQL guide](https://www.taosdata.com/en/documentation/taos-sql).
In the TDengine shell, you can create/delete databases, tables, super tables, etc., and perform insert and query operations via SQL commands. For details, please refer to the [TAOS SQL documentation](https://www.taosdata.com/en/documentation/taos-sql).
## Learn more about TDengine with taosdemo
### Accessing TDengine server inside Docker container from the host side
1, Following the above steps, exit the TDengine terminal program first.
After starting the TDengine Docker container with the correct port mapped with the -p command line parameter, you can access the TDengine running inside the Docker container from the host side using the taos shell command.
```bash
$ taos> q
root@c452519b0f9b:~/TDengine-server-2.0.20.13#
```
$ taos
2, Execute taosdemo from the command line interface.
Welcome to the TDengine shell from Linux, Client Version:2.0.22.3
Copyright (c) 2020 by TAOS Data, Inc. All rights reserved.
taos>
```
You can also access the TDengine server inside the Docker container using `curl` command from the host side through the RESTful port.
```
$ curl -u root:taosdata -d 'show databases' 127.0.0.1:6041/rest/sql
{"status":"succ","head":["name","created_time","ntables","vgroups","replica","quorum","days","keep0,keep1,keep(D)","cache(MB)","blocks","minrows","maxrows","wallevel","fsync","comp","cachelast","precision","update","status"],"column_meta":[["name",8,32],["created_time",9,8],["ntables",4,4],["vgroups",4,4],["replica",3,2],["quorum",3,2],["days",3,2],["keep0,keep1,keep(D)",8,24],["cache(MB)",4,4],["blocks",4,4],["minrows",4,4],["maxrows",4,4],["wallevel",2,1],["fsync",4,4],["comp",2,1],["cachelast",2,1],["precision",8,3],["update",2,1],["status",8,10]],"data":[["test","2021-08-18 06:01:11.021",10000,4,1,1,10,"3650,3650,3650",16,6,100,4096,1,3000,2,0,"ms",0,"ready"],["log","2021-08-18 05:51:51.065",4,1,1,1,10,"30,30,30",1,3,100,4096,1,3000,2,0,"us",0,"ready"]],"rows":2}
```
This command accesses the TDengine server through the RESTful interface, which connects to port 6041 on the local machine, so the connection is successful.
TDengine RESTful interface details can be found in the [official documentation](https://www.taosdata.com/en/documentation/connector#restful).
### Running TDengine server and taosAdapter with a Docker container
Docker containers of TDegnine version 2.4.0.0 and later include a component named `taosAdapter`, which supports data writing and querying capabilities to the TDengine server through the RESTful interface and provides the data ingestion interfaces compatible with InfluxDB/OpenTSDB. Allows seamless migration of InfluxDB/OpenTSDB applications to access TDengine.
Note: If taosAdapter is running inside the container, you need to add mapping to other additional ports as needed, please refer to [taosAdapter documentation](https://github.com/taosdata/taosadapter/blob/develop/README.md) for the default port number and modification methods for the specific purpose.
Running TDengine version 2.4.0.0 image with docker.
```
$ docker run -d --name tdengine-taosa -p 6030-6049:6030-6049 -p 6030-6049:6030-6049/udp tdengine/tdengine:2.4.0.0
```
Verify that the RESTful interface taosAdapter provides working using the `curl` command.
```
$ curl -H 'Authorization: Basic cm9vdDp0YW9zZGF0YQ==' -d 'show databases;' 127.0.0.1:6041/rest/sql
{"status":"succ","head":["name","created_time","ntables","vgroups","replica","quorum","days","keep","cache(MB)","blocks","minrows","maxrows","wallevel","fsync","comp","cachelast","precision","update","status"],"column_meta":[["name",8,32],["created_time",9,8],["ntables",4,4],["vgroups",4,4],["replica",3,2],["quorum",3,2],["days",3,2],["keep",8,24],["cache(MB)",4,4],["blocks",4,4],["minrows",4,4],["maxrows",4,4],["wallevel",2,1],["fsync",4,4],["comp",2,1],["cachelast",2,1],["precision",8,3],["update",2,1],["status",8,10]],"data":[["log","2021-12-28 09:18:55.765",10,1,1,1,10,"30",1,3,100,4096,1,3000,2,0,"us",0,"ready"]],"rows":1}
```
taosAdapter supports multiple data collection agents (e.g. Telegraf, StatsD, collectd, etc.), here only demonstrate how StasD is simulated to write data, and the command is executed from the host side as follows.
```
$ echo "foo:1|c" | nc -u -w0 127.0.0.1 6044
```
Then you can use the taos shell to query the taosAdapter automatically created database statsd and the contents of the super table foo.
```
taos> show databases;
name | created_time | ntables | vgroups | replica | quorum | days | keep | cache(MB) | blocks | minrows | maxrows | wallevel | fsync | comp | cachelast | precision | update | status |
====================================================================================================================================================================================================================================================================================
log | 2021-12-28 09:18:55.765 | 12 | 1 | 1 | 1 | 10 | 30 | 1 | 3 | 100 | 4096 | 1 | 3000 | 2 | 0 | us | 0 | ready |
statsd | 2021-12-28 09:21:48.841 | 1 | 1 | 1 | 1 | 10 | 3650 | 16 | 6 | 100 | 4096 | 1 | 3000 | 2 | 0 | ns | 2 | ready |
Query OK, 2 row(s) in set (0.002112s)
taos> use statsd;
Database changed.
taos> show stables;
name | created_time | columns | tags | tables |
============================================================================================
foo | 2021-12-28 09:21:48.894 | 2 | 1 | 1 |
Query OK, 1 row(s) in set (0.001160s)
taos> select * from foo;
ts | value | metric_type |
=======================================================================================
2021-12-28 09:21:48.840820836 | 1 | counter |
Query OK, 1 row(s) in set (0.001639s)
taos>
```
You can see that the simulation data has been written to TDengine.
### Application example: write data to TDengine server in Docker container using taosdemo on the host
1, execute taosdemo in the host command line interface to write data to the TDengine server in the Docker container
```bash
root@c452519b0f9b:~/TDengine-server-2.0.20.13# taosdemo
$ taosdemo
taosdemo is simulating data generated by power equipments monitoring...
......@@ -132,7 +215,7 @@ column[0]:FLOAT column[1]:INT column[2]:FLOAT
Press enter key to continue or Ctrl-C to stop
```
After enter, this command will automatically create a super table meters under the database test, there are 10,000 tables under this super table, the table name is "d0" to "d9999", each table has 10,000 records, each record has four fields (ts, current, voltage, phase), the time stamp is from "2017-07-14 10:40:00 000" to "2017-07-14 10:40:09 999", each table has a tag location and groupId, groupId is set from 1 to 10 and location is set to "beijing" or "shanghai".
After enter, this command will automatically create a super table `meters` under the database test, there are 10,000 tables under this super table, the table name is "d0" to "d9999", each table has 10,000 records, each record has four fields (ts, current, voltage, phase), the time stamp is from "2017-07-14 10:40:00 000" to "2017-07-14 10:40:09 999", each table has a tag location and groupId, groupId is set from 1 to 10 and location is set to "beijing" or "shanghai".
It takes about a few minutes to execute this command and ends up inserting a total of 100 million records.
......@@ -217,27 +300,3 @@ tdengine
- **docker stop**: Stop the specified running docker image with docker stop.
- **tdengine**: The name of the container.
## TDengine connected in Docker during programming development
There are two ideas for connecting from outside of Docker to use TDengine services running inside a Docker container:
1, By port mapping (-p), the open network port inside the container is mapped to the specified port of the host. By mounting the local directory (-v), you can synchronize the data inside the host and the container to prevent data loss after the container is deleted.
```bash
$ docker run -d -v /etc/taos:/etc/taos -P 6041:6041 tdengine/tdengine
526aa188da767ae94b244226a2b2eec2b5f17dd8eff592893d9ec0cd0f3a1ccd
$ curl -u root:taosdata -d 'show databases' 127.0.0.1:6041/rest/sql
{"status":"succ","head":["name","created_time","ntables","vgroups","replica","quorum","days","keep0,keep1,keep(D)","cache(MB)","blocks","minrows","maxrows","wallevel","fsync","comp","cachelast","precision","update","status"],"column_meta":[["name",8,32],["created_time",9,8],["ntables",4,4],["vgroups",4,4],["replica",3,2],["quorum",3,2],["days",3,2],["keep0,keep1,keep(D)",8,24],["cache(MB)",4,4],["blocks",4,4],["minrows",4,4],["maxrows",4,4],["wallevel",2,1],["fsync",4,4],["comp",2,1],["cachelast",2,1],["precision",8,3],["update",2,1],["status",8,10]],"data":[["test","2021-08-18 06:01:11.021",10000,4,1,1,10,"3650,3650,3650",16,6,100,4096,1,3000,2,0,"ms",0,"ready"],["log","2021-08-18 05:51:51.065",4,1,1,1,10,"30,30,30",1,3,100,4096,1,3000,2,0,"us",0,"ready"]],"rows":2}
```
- The first command starts a docker container with TDengine running and maps the 6041 port of the container to port 6041 of the host.
- The second command, accessing TDengine through the RESTful interface, connects to port 6041 on the local machine, so the connection is successful.
Note: In this example, for convenience reasons, only port 6041 is mapped, which is required for RESTful. If you wish to connect to the TDengine service in a non-RESTful manner, you will need to map a total of 11 ports starting at 6030. In the example, mounting the local directory also only deals with the /etc/taos directory where the configuration files are located, but not the data storage directory.
2, Go directly to the docker container to do development via the exec command. That is, put the program code in the same Docker container where the TDengine server is located and connect to the TDengine service local to the container.
```bash
$ docker exec -it tdengine /bin/bash
```
......@@ -167,7 +167,7 @@ Now you can query the metrics data of Telegraf from TDengine.
Please find taosAdapter configuration and usage from `taosadapter --help` output.
## <a class="anchor" id="collectd"></a> collectd 直接写入(通过 taosAdapter)
## <a class="anchor" id="collectd"></a> Data Writing via collectd and taosAdapter
Please refer to [official document](https://collectd.org/download.shtml) for collectd installation.
TDengine version 2.3.0.0+ includes a stand-alone application taosAdapter in charge of receive data insertion from collectd.
......@@ -186,7 +186,7 @@ sudo systemctl start collectd
```
Please find taosAdapter configuration and usage from `taosadapter --help` output.
## <a class="anchor" id="statsd"></a> StatsD 直接写入(通过 taosAdapter)
## <a class="anchor" id="statsd"></a> Data Writting via StatsD and taosAdapter
Please refer to [official document](https://github.com/statsd/statsd) for StatsD installation.
TDengine version 2.3.0.0+ includes a stand-alone application taosAdapter in charge of receive data insertion from StatsD.
......@@ -206,8 +206,30 @@ port: 8125
}
```
## <a class="anchor" id="cinga2"></a> Data Writting via icinga2 and taosAdapter
Use icinga2 to collect check result metrics and performance data
* Follow the doc to enable opentsdb-writer https://icinga.com/docs/icinga-2/latest/doc/14-features/#opentsdb-writer
* Enable taosAdapter configuration opentsdb_telnet.enable
* Modify the configuration file /etc/icinga2/features-enabled/opentsdb.conf
```
object OpenTsdbWriter "opentsdb" {
host = "host to taosAdapter"
port = 6048
}
```
Please find taosAdapter configuration and usage from `taosadapter --help` output.
## <a class="anchor" id="tcollector"></a> Data Writting via TCollector and taosAdapter
TCollector is a client-side process that gathers data from local collectors and pushes the data to OpenTSDB. You run it on all your hosts, and it does the work of sending each host’s data to the TSD (OpenTSDB backend process).
* Enable taosAdapter configuration opentsdb_telnet.enable
* Modify the TCollector configuration file, modify the OpenTSDB host to the host where taosAdapter is deployed, and modify the port to 6049
Please find taosAdapter configuration and usage from `taosadapter --help` output.
## <a class="anchor" id="taosadapter2-telegraf"></a> Insert data via Bailongma 2.0 and Telegraf
......
......@@ -761,17 +761,16 @@ Query OK, 1 row(s) in set (0.000141s)
you see sample code here: [JDBC example](https://github.com/taosdata/TDengine/tree/develop/tests/examples/JDBC)
## FAQ
- Why does not addBatch and executeBatch provide a performance benefit for executing "batch writes/updates"?
**Cause**:In TDengine's JDBC implementation, SQL statements submitted through the addBatch method are executed in the order in which they are added. This method does not reduce the number of interactions with the server and does not improve performance.
**Answer**:1. Concatenate multiple values in an INSERT statement; 2. Use multi-threaded concurrent insertion; 3. Use the parameter-binding to write
- java.lang.UnsatisfiedLinkError: no taos in java.library.path
**Cause**:The application program cannot find Library function *taos*
**Answer**:Copy `C:\TDengine\driver\taos.dll` to `C:\Windows\System32\` on Windows and make a soft link through `ln -s /usr/local/taos/driver/libtaos.so.x.x.x.x /usr/lib/libtaos.so` on Linux.
- java.lang.UnsatisfiedLinkError: taos.dll Can't load AMD 64 bit on a IA 32-bit platform
**Cause**:Currently TDengine only support 64bit JDK
**Answer**:re-install 64bit JDK.
- For other questions, please refer to [Issues](https://github.com/taosdata/TDengine/issues)
......
......@@ -1335,3 +1335,24 @@ Is not null supports all types of columns. Non-null expression is < > "" and onl
select jtag->'key' from (select jtag from stable) where jtag->'key'>0
```
## Escape character description
- Special Character Escape Sequences
| Escape Sequence | **Character Represented by Sequence** |
| :--------: | ------------------- |
| `\'` | A single quote (') character |
| `\"` | A double quote (") character |
| \n | A newline (linefeed) character |
| \r | A carriage return character |
| \t | A tab character |
| `\\` | A backslash (\) character |
| `\%` | A % character; see note following the table |
| `\_` | A _ character; see note following the table |
- Escape character usage rules
- The escape characters that in a identifier (database name, table name, column name)
1. Normal identifier: The wrong identifier is prompted directly, because the identifier must be numbers, letters and underscores, and cannot start with a number.
2. Backquote`` identifier: Keep it as it is.
- The escape characters that in a data
3. The escape character defined above will be escaped (% and _ see the description below). If there is no matching escape character, the escape character will be ignored.
4. The `\%` and `\_` sequences are used to search for literal instances of % and _ in pattern-matching contexts where they would otherwise be interpreted as wildcard characters.If you use `\%` or `\_` outside of pattern-matching contexts, they evaluate to the strings `\%` and `\_`, not to % and _.
\ No newline at end of file
......@@ -36,11 +36,11 @@ install_home_path="/usr/local/taos"
mkdir -p ${pkg_dir}${install_home_path}
mkdir -p ${pkg_dir}${install_home_path}/bin
mkdir -p ${pkg_dir}${install_home_path}/cfg
mkdir -p ${pkg_dir}${install_home_path}/connector
#mkdir -p ${pkg_dir}${install_home_path}/connector
mkdir -p ${pkg_dir}${install_home_path}/driver
mkdir -p ${pkg_dir}${install_home_path}/examples
mkdir -p ${pkg_dir}${install_home_path}/include
mkdir -p ${pkg_dir}${install_home_path}/init.d
#mkdir -p ${pkg_dir}${install_home_path}/init.d
mkdir -p ${pkg_dir}${install_home_path}/script
cp ${compile_dir}/../packaging/cfg/taos.cfg ${pkg_dir}${install_home_path}/cfg
......@@ -51,7 +51,7 @@ if [ -f "${compile_dir}/test/cfg/taosadapter.service" ]; then
cp ${compile_dir}/test/cfg/taosadapter.service ${pkg_dir}${install_home_path}/cfg || :
fi
cp ${compile_dir}/../packaging/deb/taosd ${pkg_dir}${install_home_path}/init.d
#cp ${compile_dir}/../packaging/deb/taosd ${pkg_dir}${install_home_path}/init.d
cp ${compile_dir}/../packaging/tools/post.sh ${pkg_dir}${install_home_path}/script
cp ${compile_dir}/../packaging/tools/preun.sh ${pkg_dir}${install_home_path}/script
cp ${compile_dir}/../packaging/tools/startPre.sh ${pkg_dir}${install_home_path}/bin
......@@ -70,10 +70,10 @@ cp ${compile_dir}/../src/inc/taos.h ${pkg_dir}${install_home_pat
cp ${compile_dir}/../src/inc/taosdef.h ${pkg_dir}${install_home_path}/include
cp ${compile_dir}/../src/inc/taoserror.h ${pkg_dir}${install_home_path}/include
cp -r ${top_dir}/tests/examples/* ${pkg_dir}${install_home_path}/examples
cp -r ${top_dir}/src/connector/python ${pkg_dir}${install_home_path}/connector
cp -r ${top_dir}/src/connector/go ${pkg_dir}${install_home_path}/connector
cp -r ${top_dir}/src/connector/nodejs ${pkg_dir}${install_home_path}/connector
cp ${compile_dir}/build/lib/taos-jdbcdriver*.* ${pkg_dir}${install_home_path}/connector ||:
#cp -r ${top_dir}/src/connector/python ${pkg_dir}${install_home_path}/connector
#cp -r ${top_dir}/src/connector/go ${pkg_dir}${install_home_path}/connector
#cp -r ${top_dir}/src/connector/nodejs ${pkg_dir}${install_home_path}/connector
#cp ${compile_dir}/build/lib/taos-jdbcdriver*.* ${pkg_dir}${install_home_path}/connector ||:
install_user_local_path="/usr/local"
......
......@@ -3,7 +3,7 @@
# Generate the deb package for ubuntu, or rpm package for centos, or tar.gz package for other linux os
set -e
set -x
#set -x
# release.sh -v [cluster | edge]
# -c [aarch32 | aarch64 | x64 | x86 | mips64 ...]
......
......@@ -46,11 +46,11 @@ libfile="libtaos.so.%{_version}"
# create install path, and cp file
mkdir -p %{buildroot}%{homepath}/bin
mkdir -p %{buildroot}%{homepath}/cfg
mkdir -p %{buildroot}%{homepath}/connector
#mkdir -p %{buildroot}%{homepath}/connector
mkdir -p %{buildroot}%{homepath}/driver
mkdir -p %{buildroot}%{homepath}/examples
mkdir -p %{buildroot}%{homepath}/include
mkdir -p %{buildroot}%{homepath}/init.d
#mkdir -p %{buildroot}%{homepath}/init.d
mkdir -p %{buildroot}%{homepath}/script
cp %{_compiledir}/../packaging/cfg/taos.cfg %{buildroot}%{homepath}/cfg
......@@ -60,7 +60,7 @@ fi
if [ -f %{_compiledir}/test/cfg/taosadapter.service ]; then
cp %{_compiledir}/test/cfg/taosadapter.service %{buildroot}%{homepath}/cfg
fi
cp %{_compiledir}/../packaging/rpm/taosd %{buildroot}%{homepath}/init.d
#cp %{_compiledir}/../packaging/rpm/taosd %{buildroot}%{homepath}/init.d
cp %{_compiledir}/../packaging/tools/post.sh %{buildroot}%{homepath}/script
cp %{_compiledir}/../packaging/tools/preun.sh %{buildroot}%{homepath}/script
cp %{_compiledir}/../packaging/tools/startPre.sh %{buildroot}%{homepath}/bin
......@@ -75,10 +75,10 @@ cp %{_compiledir}/build/lib/${libfile} %{buildroot}%{homepath}/driv
cp %{_compiledir}/../src/inc/taos.h %{buildroot}%{homepath}/include
cp %{_compiledir}/../src/inc/taosdef.h %{buildroot}%{homepath}/include
cp %{_compiledir}/../src/inc/taoserror.h %{buildroot}%{homepath}/include
cp -r %{_compiledir}/../src/connector/python %{buildroot}%{homepath}/connector
cp -r %{_compiledir}/../src/connector/go %{buildroot}%{homepath}/connector
cp -r %{_compiledir}/../src/connector/nodejs %{buildroot}%{homepath}/connector
cp %{_compiledir}/build/lib/taos-jdbcdriver*.* %{buildroot}%{homepath}/connector ||:
#cp -r %{_compiledir}/../src/connector/python %{buildroot}%{homepath}/connector
#cp -r %{_compiledir}/../src/connector/go %{buildroot}%{homepath}/connector
#cp -r %{_compiledir}/../src/connector/nodejs %{buildroot}%{homepath}/connector
#cp %{_compiledir}/build/lib/taos-jdbcdriver*.* %{buildroot}%{homepath}/connector ||:
cp -r %{_compiledir}/../tests/examples/* %{buildroot}%{homepath}/examples
if [ -f %{_compiledir}/build/bin/jemalloc-config ]; then
......
......@@ -167,11 +167,11 @@ function install_main_path() {
${csudo}mkdir -p ${install_main_dir}
${csudo}mkdir -p ${install_main_dir}/cfg
${csudo}mkdir -p ${install_main_dir}/bin
${csudo}mkdir -p ${install_main_dir}/connector
# ${csudo}mkdir -p ${install_main_dir}/connector
${csudo}mkdir -p ${install_main_dir}/driver
${csudo}mkdir -p ${install_main_dir}/examples
${csudo}mkdir -p ${install_main_dir}/include
${csudo}mkdir -p ${install_main_dir}/init.d
# ${csudo}mkdir -p ${install_main_dir}/init.d
if [ "$verMode" == "cluster" ]; then
${csudo}mkdir -p ${nginx_dir}
fi
......@@ -199,7 +199,7 @@ function install_bin() {
[ -x ${install_main_dir}/bin/taos ] && ${csudo}ln -s ${install_main_dir}/bin/taos ${bin_link_dir}/taos || :
[ -x ${install_main_dir}/bin/taosd ] && ${csudo}ln -s ${install_main_dir}/bin/taosd ${bin_link_dir}/taosd || :
[ -x ${install_main_dir}/bin/taosadapter ] && ${csudo}ln -s ${install_main_dir}/bin/taosadapter ${bin_link_dir}/taosadapter || :
[ -x ${install_main_dir}/bin/taosdemo ] && ${csudo}ln -s ${install_main_dir}/bin/taosdemo ${bin_link_dir}/taosdemo || :
[ -x ${install_main_dir}/bin/taosBenchmark ] && ${csudo}ln -s ${install_main_dir}/bin/taosBenchmark ${bin_link_dir}/taosdemo || :
[ -x ${install_main_dir}/bin/taosdump ] && ${csudo}ln -s ${install_main_dir}/bin/taosdump ${bin_link_dir}/taosdump || :
[ -x ${install_main_dir}/bin/remove.sh ] && ${csudo}ln -s ${install_main_dir}/bin/remove.sh ${bin_link_dir}/rmtaos || :
[ -x ${install_main_dir}/bin/set_core.sh ] && ${csudo}ln -s ${install_main_dir}/bin/set_core.sh ${bin_link_dir}/set_core || :
......@@ -639,14 +639,14 @@ function install_service_on_sysvinit() {
# Install taosd service
if ((${os_type}==1)); then
${csudo}cp -f ${script_dir}/init.d/taosd.deb ${install_main_dir}/init.d/taosd
# ${csudo}cp -f ${script_dir}/init.d/taosd.deb ${install_main_dir}/init.d/taosd
${csudo}cp ${script_dir}/init.d/taosd.deb ${service_config_dir}/taosd && ${csudo}chmod a+x ${service_config_dir}/taosd
${csudo}cp -f ${script_dir}/init.d/tarbitratord.deb ${install_main_dir}/init.d/tarbitratord
# ${csudo}cp -f ${script_dir}/init.d/tarbitratord.deb ${install_main_dir}/init.d/tarbitratord
${csudo}cp ${script_dir}/init.d/tarbitratord.deb ${service_config_dir}/tarbitratord && ${csudo}chmod a+x ${service_config_dir}/tarbitratord
elif ((${os_type}==2)); then
${csudo}cp -f ${script_dir}/init.d/taosd.rpm ${install_main_dir}/init.d/taosd
# ${csudo}cp -f ${script_dir}/init.d/taosd.rpm ${install_main_dir}/init.d/taosd
${csudo}cp ${script_dir}/init.d/taosd.rpm ${service_config_dir}/taosd && ${csudo}chmod a+x ${service_config_dir}/taosd
${csudo}cp -f ${script_dir}/init.d/tarbitratord.rpm ${install_main_dir}/init.d/tarbitratord
# ${csudo}cp -f ${script_dir}/init.d/tarbitratord.rpm ${install_main_dir}/init.d/tarbitratord
${csudo}cp ${script_dir}/init.d/tarbitratord.rpm ${service_config_dir}/tarbitratord && ${csudo}chmod a+x ${service_config_dir}/tarbitratord
fi
......@@ -923,15 +923,14 @@ function update_TDengine() {
install_log
install_header
install_lib
if [ "$pagMode" != "lite" ]; then
install_connector
fi
# if [ "$pagMode" != "lite" ]; then
# install_connector
# fi
install_examples
if [ -z $1 ]; then
install_bin
install_service
install_taosadapter_service
install_config
install_taosadapter_config
openresty_work=false
......@@ -1008,9 +1007,9 @@ function install_TDengine() {
#install_avro lib
#install_avro lib64
if [ "$pagMode" != "lite" ]; then
install_connector
fi
# if [ "$pagMode" != "lite" ]; then
# install_connector
# fi
install_examples
if [ -z $1 ]; then # install service and client
......@@ -1018,6 +1017,7 @@ function install_TDengine() {
install_bin
install_service
install_taosadapter_service
install_taosadapter_config
openresty_work=false
if [ "$verMode" == "cluster" ]; then
......
......@@ -137,17 +137,17 @@ function install_main_path() {
${csudo}mkdir -p ${install_main_dir}
${csudo}mkdir -p ${install_main_dir}/cfg
${csudo}mkdir -p ${install_main_dir}/bin
${csudo}mkdir -p ${install_main_dir}/connector
# ${csudo}mkdir -p ${install_main_dir}/connector
${csudo}mkdir -p ${install_main_dir}/driver
${csudo}mkdir -p ${install_main_dir}/examples
${csudo}mkdir -p ${install_main_dir}/include
${csudo}mkdir -p ${install_main_dir}/init.d
# ${csudo}mkdir -p ${install_main_dir}/init.d
else
${csudo}rm -rf ${install_main_dir} || ${csudo}rm -rf ${install_main_2_dir} || :
${csudo}mkdir -p ${install_main_dir} || ${csudo}mkdir -p ${install_main_2_dir}
${csudo}mkdir -p ${install_main_dir}/cfg || ${csudo}mkdir -p ${install_main_2_dir}/cfg
${csudo}mkdir -p ${install_main_dir}/bin || ${csudo}mkdir -p ${install_main_2_dir}/bin
${csudo}mkdir -p ${install_main_dir}/connector || ${csudo}mkdir -p ${install_main_2_dir}/connector
# ${csudo}mkdir -p ${install_main_dir}/connector || ${csudo}mkdir -p ${install_main_2_dir}/connector
${csudo}mkdir -p ${install_main_dir}/driver || ${csudo}mkdir -p ${install_main_2_dir}/driver
${csudo}mkdir -p ${install_main_dir}/examples || ${csudo}mkdir -p ${install_main_2_dir}/examples
${csudo}mkdir -p ${install_main_dir}/include || ${csudo}mkdir -p ${install_main_2_dir}/include
......@@ -168,9 +168,15 @@ function install_bin() {
${csudo}rm -f ${bin_link_dir}/run_taosd.sh || :
${csudo}rm -f ${bin_link_dir}/rmtaos || :
${csudo}cp -r ${binary_dir}/build/bin/* ${install_main_dir}/bin
${csudo}cp -r ${script_dir}/taosd-dump-cfg.gdb ${install_main_dir}/bin
${csudo}cp -r ${binary_dir}/build/bin/taos ${install_main_dir}/bin || :
[ -f ${binary_dir}/build/bin/taosBenchmark ] && ${csudo}cp -r ${binary_dir}/build/bin/taosBenchmark ${install_main_dir}/bin || :
[ -f ${install_main_dir}/bin/taosBenchmark ] && ${csudo}ln -sf ${install_main_dir}/bin/taosBenchmark ${install_main_dir}/bin/taosdemo || :
[ -f ${binary_dir}/build/bin/taosdump ] && ${csudo}cp -r ${binary_dir}/build/bin/taosdump ${install_main_dir}/bin || :
[ -f ${binary_dir}/build/bin/taosadapter ] && ${csudo}cp -r ${binary_dir}/build/bin/taosadapter ${install_main_dir}/bin || :
${csudo}cp -r ${binary_dir}/build/bin/taosd ${install_main_dir}/bin || :
${csudo}cp -r ${binary_dir}/build/bin/tarbitrator ${install_main_dir}/bin || :
${csudo}cp -r ${script_dir}/taosd-dump-cfg.gdb ${install_main_dir}/bin
${csudo}cp -r ${script_dir}/remove.sh ${install_main_dir}/bin
${csudo}cp -r ${script_dir}/set_core.sh ${install_main_dir}/bin
${csudo}cp -r ${script_dir}/run_taosd.sh ${install_main_dir}/bin
......@@ -458,10 +464,10 @@ function install_service_on_sysvinit() {
# Install taosd service
if ((${os_type}==1)); then
${csudo}cp -f ${script_dir}/../deb/taosd ${install_main_dir}/init.d
# ${csudo}cp -f ${script_dir}/../deb/taosd ${install_main_dir}/init.d
${csudo}cp ${script_dir}/../deb/taosd ${service_config_dir} && ${csudo}chmod a+x ${service_config_dir}/taosd
elif ((${os_type}==2)); then
${csudo}cp -f ${script_dir}/../rpm/taosd ${install_main_dir}/init.d
# ${csudo}cp -f ${script_dir}/../rpm/taosd ${install_main_dir}/init.d
${csudo}cp ${script_dir}/../rpm/taosd ${service_config_dir} && ${csudo}chmod a+x ${service_config_dir}/taosd
fi
......@@ -563,7 +569,7 @@ function update_TDengine() {
install_log
install_header
install_lib
install_connector
# install_connector
install_examples
install_bin
......@@ -603,7 +609,7 @@ function install_TDengine() {
install_log
install_header
install_lib
install_connector
# install_connector
install_examples
install_bin
......
......@@ -36,11 +36,11 @@ if [ -d ${top_dir}/src/kit/taos-tools/packaging/deb ]; then
[ -z "$taos_tools_ver" ] && taos_tools_ver="0.1.0"
taostools_ver=$(git describe --tags|sed -e 's/ver-//g'|awk -F '-' '{print $1}')
taostools_install_dir="${release_dir}/taos-tools-${taostools_ver}"
taostools_install_dir="${release_dir}/taosTools-${taostools_ver}"
cd ${curr_dir}
else
taostools_install_dir="${release_dir}/taos-tools-${version}"
taostools_install_dir="${release_dir}/taosTools-${version}"
fi
# Directories and files
......@@ -123,9 +123,9 @@ if [ -n "${taostools_bin_files}" ]; then
mkdir -p ${taostools_install_dir}/bin \
&& cp ${taostools_bin_files} ${taostools_install_dir}/bin \
&& chmod a+x ${taostools_install_dir}/bin/* || :
[ -f ${taostools_install_dir}/bin/taosBenchmark ] && \
ln -sf ${taostools_install_dir}/bin/taosBenchmark \
${taostools_install_dir}/bin/taosdemo
# [ -f ${taostools_install_dir}/bin/taosBenchmark ] && \
# ln -sf ${taostools_install_dir}/bin/taosBenchmark \
# ${taostools_install_dir}/bin/taosdemo
if [ -f ${top_dir}/src/kit/taos-tools/packaging/tools/install-taostools.sh ]; then
cp ${top_dir}/src/kit/taos-tools/packaging/tools/install-taostools.sh \
......@@ -248,18 +248,18 @@ fi
mkdir -p ${install_dir}/driver && cp ${lib_files} ${install_dir}/driver && echo "${versionComp}" > ${install_dir}/driver/vercomp.txt
# Copy connector
connector_dir="${code_dir}/connector"
mkdir -p ${install_dir}/connector
if [[ "$pagMode" != "lite" ]] && [[ "$cpuType" != "aarch32" ]]; then
cp ${build_dir}/lib/*.jar ${install_dir}/connector ||:
if find ${connector_dir}/go -mindepth 1 -maxdepth 1 | read; then
cp -r ${connector_dir}/go ${install_dir}/connector
else
echo "WARNING: go connector not found, please check if want to use it!"
fi
cp -r ${connector_dir}/python ${install_dir}/connector
cp -r ${connector_dir}/nodejs ${install_dir}/connector
fi
#connector_dir="${code_dir}/connector"
#mkdir -p ${install_dir}/connector
#if [[ "$pagMode" != "lite" ]] && [[ "$cpuType" != "aarch32" ]]; then
# cp ${build_dir}/lib/*.jar ${install_dir}/connector ||:
# if find ${connector_dir}/go -mindepth 1 -maxdepth 1 | read; then
# cp -r ${connector_dir}/go ${install_dir}/connector
# else
# echo "WARNING: go connector not found, please check if want to use it!"
# fi
# cp -r ${connector_dir}/python ${install_dir}/connector
# cp -r ${connector_dir}/nodejs ${install_dir}/connector
#fi
# Copy release note
# cp ${script_dir}/release_note ${install_dir}
......
name: tdengine
base: core20
version: '2.3.2.0'
version: '2.4.0.0'
icon: snap/gui/t-dengine.svg
summary: an open-source big data platform designed and optimized for IoT.
description: |
......
......@@ -567,7 +567,7 @@ void bnCheckStatus() {
while (1) {
pIter = mnodeGetNextDnode(pIter, &pDnode);
if (pDnode == NULL) break;
if (tsAccessSquence - pDnode->lastAccess > 3) {
if (tsAccessSquence - pDnode->lastAccess > tsOfflineInterval) {
if (pDnode->status != TAOS_DN_STATUS_DROPPING && pDnode->status != TAOS_DN_STATUS_OFFLINE) {
pDnode->status = TAOS_DN_STATUS_OFFLINE;
pDnode->offlineReason = TAOS_DN_OFF_STATUS_MSG_TIMEOUT;
......
......@@ -207,7 +207,7 @@ TAOS_FIELD* tscFieldInfoGetField(SFieldInfo* pFieldInfo, int32_t index);
void tscFieldInfoUpdateOffset(SQueryInfo* pQueryInfo);
int16_t tscFieldInfoGetOffset(SQueryInfo* pQueryInfo, int32_t index);
int32_t tscFieldInfoGetOffset(SQueryInfo* pQueryInfo, int32_t index);
void tscFieldInfoClear(SFieldInfo* pFieldInfo);
void tscFieldInfoCopy(SFieldInfo* pFieldInfo, const SFieldInfo* pSrc, const SArray* pExprList);
......@@ -258,8 +258,6 @@ void tscColumnListCopyAll(SArray* dst, const SArray* src);
void convertQueryResult(SSqlRes* pRes, SQueryInfo* pQueryInfo, uint64_t objId, bool convertNchar, bool convertJson);
void tscDequoteAndTrimToken(SStrToken* pToken);
void tscRmEscapeAndTrimToken(SStrToken* pToken);
int32_t tscValidateName(SStrToken* pToken, bool escapeEnabled, bool *dbIncluded);
void tscIncStreamExecutionCount(void* pStream);
......
......@@ -52,3 +52,4 @@ taos_stmt_bind_single_param_batch
taos_is_null
taos_insert_lines
taos_schemaless_insert
taos_result_block
......@@ -902,7 +902,7 @@ SSDataBlock* doGlobalAggregate(void* param, bool* newgroup) {
// not belongs to the same group, return the result of current group;
setInputDataBlock(pOperator, pAggInfo->binfo.pCtx, pAggInfo->pExistBlock, TSDB_ORDER_ASC);
updateOutputBuf(&pAggInfo->binfo, &pAggInfo->bufCapacity, pAggInfo->pExistBlock->info.rows);
updateOutputBuf(&pAggInfo->binfo, &pAggInfo->bufCapacity, pAggInfo->pExistBlock->info.rows, pOperator->pRuntimeEnv);
{ // reset output buffer
for(int32_t j = 0; j < pOperator->numOfOutput; ++j) {
......@@ -954,7 +954,7 @@ SSDataBlock* doGlobalAggregate(void* param, bool* newgroup) {
// not belongs to the same group, return the result of current group
setInputDataBlock(pOperator, pAggInfo->binfo.pCtx, pBlock, TSDB_ORDER_ASC);
updateOutputBuf(&pAggInfo->binfo, &pAggInfo->bufCapacity, pBlock->info.rows * pAggInfo->resultRowFactor);
updateOutputBuf(&pAggInfo->binfo, &pAggInfo->bufCapacity, pBlock->info.rows * pAggInfo->resultRowFactor, pOperator->pRuntimeEnv);
doExecuteFinalMerge(pOperator, pOperator->numOfOutput, pBlock);
savePrevOrderColumns(pAggInfo->currentGroupColData, pAggInfo->groupColumnList, pBlock, 0, &pAggInfo->hasGroupColData);
......
......@@ -481,32 +481,12 @@ int tsParseOneRow(char **str, STableDataBlocks *pDataBlocks, int16_t timePrec, i
// Remove quotation marks
if (TK_STRING == sToken.type) {
// delete escape character: \\, \', \"
char delim = sToken.z[0];
int32_t cnt = 0;
int32_t j = 0;
if (sToken.n >= TSDB_MAX_BYTES_PER_ROW) {
return tscSQLSyntaxErrMsg(pInsertParam->msg, "too long string", sToken.z);
}
for (uint32_t k = 1; k < sToken.n - 1; ++k) {
if (sToken.z[k] == '\\' || (sToken.z[k] == delim && sToken.z[k + 1] == delim)) {
tmpTokenBuf[j] = sToken.z[k + 1];
cnt++;
j++;
k++;
continue;
}
tmpTokenBuf[j] = sToken.z[k];
j++;
}
tmpTokenBuf[j] = 0;
strncpy(tmpTokenBuf, sToken.z, sToken.n);
sToken.n = stringProcess(tmpTokenBuf, sToken.n);
sToken.z = tmpTokenBuf;
sToken.n -= 2 + cnt;
}
bool isPrimaryKey = (colIndex == PRIMARYKEY_TIMESTAMP_COL_INDEX);
......@@ -1057,10 +1037,12 @@ static int32_t tscCheckIfCreateTable(char **sqlstr, SSqlObj *pSql, char** boundC
break;
}
char* tmp = NULL;
// Remove quotation marks
if (TK_STRING == sToken.type) {
sToken.z++;
sToken.n -= 2;
tmp = strndup(sToken.z, sToken.n);
sToken.n = stringProcess(tmp, sToken.n);
sToken.z = tmp;
}
char tagVal[TSDB_MAX_TAGS_LEN] = {0};
......@@ -1068,6 +1050,7 @@ static int32_t tscCheckIfCreateTable(char **sqlstr, SSqlObj *pSql, char** boundC
if (code != TSDB_CODE_SUCCESS) {
tdDestroyKVRowBuilder(&kvRowBuilder);
tscDestroyBoundColumnInfo(&spd);
tfree(tmp);
return code;
}
......@@ -1078,18 +1061,18 @@ static int32_t tscCheckIfCreateTable(char **sqlstr, SSqlObj *pSql, char** boundC
if(sToken.n > TSDB_MAX_JSON_TAGS_LEN/TSDB_NCHAR_SIZE){
tdDestroyKVRowBuilder(&kvRowBuilder);
tscDestroyBoundColumnInfo(&spd);
tfree(tmp);
return tscSQLSyntaxErrMsg(pInsertParam->msg, "json tag too long", NULL);
}
char* json = strndup(sToken.z, sToken.n);
code = parseJsontoTagData(json, &kvRowBuilder, pInsertParam->msg, pTagSchema[spd.boundedColumns[0]].colId);
code = parseJsontoTagData(sToken.z, &kvRowBuilder, pInsertParam->msg, pTagSchema[spd.boundedColumns[0]].colId);
if (code != TSDB_CODE_SUCCESS) {
tdDestroyKVRowBuilder(&kvRowBuilder);
tscDestroyBoundColumnInfo(&spd);
tfree(json);
tfree(tmp);
return code;
}
tfree(json);
}
tfree(tmp);
}
tscDestroyBoundColumnInfo(&spd);
......@@ -1246,12 +1229,8 @@ static int32_t parseBoundColumns(SInsertStatementParam *pInsertParam, SParsedDat
strncpy(tmpTokenBuf, sToken.z, sToken.n);
sToken.z = tmpTokenBuf;
if (TK_STRING == sToken.type) {
tscDequoteAndTrimToken(&sToken);
}
if (TK_ID == sToken.type) {
tscRmEscapeAndTrimToken(&sToken);
if (TK_STRING == sToken.type || TK_ID == sToken.type) {
sToken.n = stringProcess(sToken.z, sToken.n);
}
if (sToken.type == TK_RP) {
......@@ -1371,7 +1350,7 @@ _clean:
static int32_t getFileFullPath(SStrToken* pToken, char* output) {
char path[PATH_MAX] = {0};
strncpy(path, pToken->z, pToken->n);
strdequote(path);
stringProcess(path, (int32_t)strlen(path));
wordexp_t full_path;
if (wordexp(path, &full_path, 0) != 0) {
......
......@@ -20,7 +20,7 @@
#include "tscParseLine.h"
typedef struct {
char sTableName[TSDB_TABLE_NAME_LEN + TS_ESCAPE_CHAR_SIZE];
char sTableName[TSDB_TABLE_NAME_LEN + TS_BACKQUOTE_CHAR_SIZE];
SHashObj* tagHash;
SHashObj* fieldHash;
SArray* tags; //SArray<SSchema>
......@@ -68,13 +68,13 @@ typedef enum {
} ESchemaAction;
typedef struct {
char sTableName[TSDB_TABLE_NAME_LEN + TS_ESCAPE_CHAR_SIZE];
char sTableName[TSDB_TABLE_NAME_LEN + TS_BACKQUOTE_CHAR_SIZE];
SArray* tags; //SArray<SSchema>
SArray* fields; //SArray<SSchema>
} SCreateSTableActionInfo;
typedef struct {
char sTableName[TSDB_TABLE_NAME_LEN + TS_ESCAPE_CHAR_SIZE];
char sTableName[TSDB_TABLE_NAME_LEN + TS_BACKQUOTE_CHAR_SIZE];
SSchema* field;
} SAlterSTableActionInfo;
......@@ -161,14 +161,14 @@ static int32_t getSmlMd5ChildTableName(TAOS_SML_DATA_POINT* point, char* tableNa
}
SStringBuilder sb; memset(&sb, 0, sizeof(sb));
char sTableName[TSDB_TABLE_NAME_LEN + TS_ESCAPE_CHAR_SIZE] = {0};
char sTableName[TSDB_TABLE_NAME_LEN + TS_BACKQUOTE_CHAR_SIZE] = {0};
strncpy(sTableName, point->stableName, strlen(point->stableName));
//strtolower(sTableName, point->stableName);
taosStringBuilderAppendString(&sb, sTableName);
for (int j = 0; j < point->tagNum; ++j) {
taosStringBuilderAppendChar(&sb, ',');
TAOS_SML_KV* tagKv = point->tags + j;
char tagName[TSDB_COL_NAME_LEN + TS_ESCAPE_CHAR_SIZE] = {0};
char tagName[TSDB_COL_NAME_LEN + TS_BACKQUOTE_CHAR_SIZE] = {0};
strncpy(tagName, tagKv->key, strlen(tagKv->key));
//strtolower(tagName, tagKv->key);
taosStringBuilderAppendString(&sb, tagName);
......@@ -192,8 +192,8 @@ static int32_t getSmlMd5ChildTableName(TAOS_SML_DATA_POINT* point, char* tableNa
static int32_t buildSmlChildTableName(TAOS_SML_DATA_POINT* point, SSmlLinesInfo* info) {
tscDebug("SML:0x%"PRIx64" taos_sml_insert build child table name", info->id);
char childTableName[TSDB_TABLE_NAME_LEN + TS_ESCAPE_CHAR_SIZE];
int32_t tableNameLen = TSDB_TABLE_NAME_LEN + TS_ESCAPE_CHAR_SIZE;
char childTableName[TSDB_TABLE_NAME_LEN + TS_BACKQUOTE_CHAR_SIZE];
int32_t tableNameLen = TSDB_TABLE_NAME_LEN + TS_BACKQUOTE_CHAR_SIZE;
getSmlMd5ChildTableName(point, childTableName, &tableNameLen, info);
point->childTableName = calloc(1, tableNameLen+1);
strncpy(point->childTableName, childTableName, tableNameLen);
......@@ -251,15 +251,15 @@ static int32_t buildDataPointSchemas(TAOS_SML_DATA_POINT* points, int numPoint,
size_t nameLen = strlen(tsSmlTagNullName);
strncpy(tagNullName, tsSmlTagNullName, nameLen);
addEscapeCharToString(tagNullName, (int32_t)nameLen);
size_t* pTagNullIdx = taosHashGet(pStableSchema->tagHash, tagNullName, nameLen + TS_ESCAPE_CHAR_SIZE);
size_t* pTagNullIdx = taosHashGet(pStableSchema->tagHash, tagNullName, nameLen + TS_BACKQUOTE_CHAR_SIZE);
if (!pTagNullIdx) {
SSchema tagNull = {0};
tagNull.type = TSDB_DATA_TYPE_NCHAR;
tagNull.bytes = TSDB_NCHAR_SIZE + VARSTR_HEADER_SIZE;
strncpy(tagNull.name, tagNullName, nameLen + TS_ESCAPE_CHAR_SIZE);
strncpy(tagNull.name, tagNullName, nameLen + TS_BACKQUOTE_CHAR_SIZE);
taosArrayPush(pStableSchema->tags, &tagNull);
size_t tagNullIdx = taosArrayGetSize(pStableSchema->tags) - 1;
taosHashPut(pStableSchema->tagHash, tagNull.name, nameLen + TS_ESCAPE_CHAR_SIZE, &tagNullIdx, sizeof(tagNullIdx));
taosHashPut(pStableSchema->tagHash, tagNull.name, nameLen + TS_BACKQUOTE_CHAR_SIZE, &tagNullIdx, sizeof(tagNullIdx));
}
}
......@@ -295,7 +295,7 @@ static int32_t buildDataPointSchemas(TAOS_SML_DATA_POINT* points, int numPoint,
static int32_t generateSchemaAction(SSchema* pointColField, SHashObj* dbAttrHash, SArray* dbAttrArray, bool isTag, char sTableName[],
SSchemaAction* action, bool* actionNeeded, SSmlLinesInfo* info) {
char fieldName[TSDB_COL_NAME_LEN + TS_ESCAPE_CHAR_SIZE] = {0};
char fieldName[TSDB_COL_NAME_LEN + TS_BACKQUOTE_CHAR_SIZE] = {0};
strcpy(fieldName, pointColField->name);
size_t* pDbIndex = taosHashGet(dbAttrHash, fieldName, strlen(fieldName));
......@@ -315,7 +315,7 @@ static int32_t generateSchemaAction(SSchema* pointColField, SHashObj* dbAttrHash
action->action = SCHEMA_ACTION_CHANGE_COLUMN_SIZE;
}
memset(&action->alterSTable, 0, sizeof(SAlterSTableActionInfo));
memcpy(action->alterSTable.sTableName, sTableName, TSDB_TABLE_NAME_LEN + TS_ESCAPE_CHAR_SIZE);
memcpy(action->alterSTable.sTableName, sTableName, TSDB_TABLE_NAME_LEN + TS_BACKQUOTE_CHAR_SIZE);
action->alterSTable.field = pointColField;
*actionNeeded = true;
}
......@@ -326,7 +326,7 @@ static int32_t generateSchemaAction(SSchema* pointColField, SHashObj* dbAttrHash
action->action = SCHEMA_ACTION_ADD_COLUMN;
}
memset(&action->alterSTable, 0, sizeof(SAlterSTableActionInfo));
memcpy(action->alterSTable.sTableName, sTableName, TSDB_TABLE_NAME_LEN + TS_ESCAPE_CHAR_SIZE);
memcpy(action->alterSTable.sTableName, sTableName, TSDB_TABLE_NAME_LEN + TS_BACKQUOTE_CHAR_SIZE);
action->alterSTable.field = pointColField;
*actionNeeded = true;
}
......@@ -572,7 +572,7 @@ static int32_t getSuperTableMetaFromLocalCache(TAOS* taos, char* tableName, STab
pSql->fp = NULL;
registerSqlObj(pSql);
char tableNameBuf[TSDB_TABLE_NAME_LEN + TS_ESCAPE_CHAR_SIZE] = {0};
char tableNameBuf[TSDB_TABLE_NAME_LEN + TS_BACKQUOTE_CHAR_SIZE] = {0};
memcpy(tableNameBuf, tableName, strlen(tableName));
SStrToken tableToken = {.z = tableNameBuf, .n = (uint32_t)strlen(tableName), .type = TK_ID};
tGetToken(tableNameBuf, &tableToken.type);
......@@ -689,7 +689,7 @@ static int32_t modifyDBSchemas(TAOS* taos, SArray* stableSchemas, SSmlLinesInfo*
SSchemaAction schemaAction = {0};
schemaAction.action = SCHEMA_ACTION_CREATE_STABLE;
memset(&schemaAction.createSTable, 0, sizeof(SCreateSTableActionInfo));
memcpy(schemaAction.createSTable.sTableName, pointSchema->sTableName, TSDB_TABLE_NAME_LEN + TS_ESCAPE_CHAR_SIZE);
memcpy(schemaAction.createSTable.sTableName, pointSchema->sTableName, TSDB_TABLE_NAME_LEN + TS_BACKQUOTE_CHAR_SIZE);
schemaAction.createSTable.tags = pointSchema->tags;
schemaAction.createSTable.fields = pointSchema->fields;
applySchemaAction(taos, &schemaAction, info);
......@@ -726,7 +726,7 @@ static int32_t modifyDBSchemas(TAOS* taos, SArray* stableSchemas, SSmlLinesInfo*
SSchema* pointColTs = taosArrayGet(pointSchema->fields, 0);
SSchema* dbColTs = taosArrayGet(dbSchema.fields, 0);
memcpy(pointColTs->name, dbColTs->name, TSDB_COL_NAME_LEN + TS_ESCAPE_CHAR_SIZE);
memcpy(pointColTs->name, dbColTs->name, TSDB_COL_NAME_LEN + TS_BACKQUOTE_CHAR_SIZE);
for (int j = 1; j < pointFieldSize; ++j) {
SSchema* pointCol = taosArrayGet(pointSchema->fields, j);
......@@ -1398,7 +1398,7 @@ char* addEscapeCharToString(char *str, int32_t len) {
return NULL;
}
memmove(str + 1, str, len);
str[0] = str[len + 1] = TS_ESCAPE_CHAR;
str[0] = str[len + 1] = TS_BACKQUOTE_CHAR;
str[len + 2] = '\0';
return str;
}
......@@ -2129,7 +2129,7 @@ static int32_t parseSmlKey(TAOS_SML_KV *pKV, const char **index, SHashObj *pHash
return TSDB_CODE_TSC_LINE_SYNTAX_ERROR;
}
pKV->key = calloc(len + TS_ESCAPE_CHAR_SIZE + 1, 1);
pKV->key = calloc(len + TS_BACKQUOTE_CHAR_SIZE + 1, 1);
memcpy(pKV->key, key, len + 1);
addEscapeCharToString(pKV->key, len);
tscDebug("SML:0x%"PRIx64" Key:%s|len:%d", info->id, pKV->key, len);
......@@ -2227,7 +2227,7 @@ static int32_t parseSmlMeasurement(TAOS_SML_DATA_POINT *pSml, const char **index
const char *cur = *index;
int16_t len = 0;
pSml->stableName = calloc(TSDB_TABLE_NAME_LEN + TS_ESCAPE_CHAR_SIZE, 1);
pSml->stableName = calloc(TSDB_TABLE_NAME_LEN + TS_BACKQUOTE_CHAR_SIZE, 1);
if (pSml->stableName == NULL){
return TSDB_CODE_TSC_OUT_OF_MEMORY;
}
......@@ -2313,7 +2313,7 @@ static int32_t parseSmlKvPairs(TAOS_SML_KV **pKVs, int *num_kvs,
}
size_t childTableNameLen = strlen(tsSmlChildTableName);
char childTableName[TSDB_TABLE_NAME_LEN + TS_ESCAPE_CHAR_SIZE] = {0};
char childTableName[TSDB_TABLE_NAME_LEN + TS_BACKQUOTE_CHAR_SIZE] = {0};
if (childTableNameLen != 0) {
memcpy(childTableName, tsSmlChildTableName, childTableNameLen);
addEscapeCharToString(childTableName, (int32_t)(childTableNameLen));
......@@ -2332,7 +2332,7 @@ static int32_t parseSmlKvPairs(TAOS_SML_KV **pKVs, int *num_kvs,
}
if (!isField && childTableNameLen != 0 && strcasecmp(pkv->key, childTableName) == 0) {
smlData->childTableName = malloc(pkv->length + TS_ESCAPE_CHAR_SIZE + 1);
smlData->childTableName = malloc(pkv->length + TS_BACKQUOTE_CHAR_SIZE + 1);
memcpy(smlData->childTableName, pkv->value, pkv->length);
addEscapeCharToString(smlData->childTableName, (int32_t)pkv->length);
free(pkv->key);
......
......@@ -37,7 +37,7 @@ static int32_t parseTelnetMetric(TAOS_SML_DATA_POINT *pSml, const char **index,
const char *cur = *index;
uint16_t len = 0;
pSml->stableName = tcalloc(TSDB_TABLE_NAME_LEN + TS_ESCAPE_CHAR_SIZE, 1);
pSml->stableName = tcalloc(TSDB_TABLE_NAME_LEN + TS_BACKQUOTE_CHAR_SIZE, 1);
if (pSml->stableName == NULL) {
return TSDB_CODE_TSC_OUT_OF_MEMORY;
}
......@@ -125,7 +125,7 @@ static int32_t parseTelnetTimeStamp(TAOS_SML_KV **pTS, int *num_kvs, const char
}
tfree(value);
(*pTS)->key = tcalloc(sizeof(key) + TS_ESCAPE_CHAR_SIZE, 1);
(*pTS)->key = tcalloc(sizeof(key) + TS_BACKQUOTE_CHAR_SIZE, 1);
memcpy((*pTS)->key, key, sizeof(key));
addEscapeCharToString((*pTS)->key, (int32_t)strlen(key));
......@@ -196,7 +196,7 @@ static int32_t parseTelnetMetricValue(TAOS_SML_KV **pKVs, int *num_kvs, const ch
}
tfree(value);
pVal->key = tcalloc(sizeof(key) + TS_ESCAPE_CHAR_SIZE, 1);
pVal->key = tcalloc(sizeof(key) + TS_BACKQUOTE_CHAR_SIZE, 1);
memcpy(pVal->key, key, sizeof(key));
addEscapeCharToString(pVal->key, (int32_t)strlen(pVal->key));
*num_kvs += 1;
......@@ -240,7 +240,7 @@ static int32_t parseTelnetTagKey(TAOS_SML_KV *pKV, const char **index, SHashObj
return TSDB_CODE_TSC_DUP_TAG_NAMES;
}
pKV->key = tcalloc(len + TS_ESCAPE_CHAR_SIZE + 1, 1);
pKV->key = tcalloc(len + TS_BACKQUOTE_CHAR_SIZE + 1, 1);
memcpy(pKV->key, key, len + 1);
addEscapeCharToString(pKV->key, len);
//tscDebug("OTD:0x%"PRIx64" Key:%s|len:%d", info->id, pKV->key, len);
......@@ -307,7 +307,7 @@ static int32_t parseTelnetTagKvs(TAOS_SML_KV **pKVs, int *num_kvs,
pkv = *pKVs;
size_t childTableNameLen = strlen(tsSmlChildTableName);
char childTbName[TSDB_TABLE_NAME_LEN + TS_ESCAPE_CHAR_SIZE] = {0};
char childTbName[TSDB_TABLE_NAME_LEN + TS_BACKQUOTE_CHAR_SIZE] = {0};
if (childTableNameLen != 0) {
memcpy(childTbName, tsSmlChildTableName, childTableNameLen);
addEscapeCharToString(childTbName, (int32_t)(childTableNameLen));
......@@ -324,7 +324,7 @@ static int32_t parseTelnetTagKvs(TAOS_SML_KV **pKVs, int *num_kvs,
return ret;
}
if (childTableNameLen != 0 && strcasecmp(pkv->key, childTbName) == 0) {
*childTableName = tcalloc(pkv->length + TS_ESCAPE_CHAR_SIZE + 1, 1);
*childTableName = tcalloc(pkv->length + TS_BACKQUOTE_CHAR_SIZE + 1, 1);
memcpy(*childTableName, pkv->value, pkv->length);
(*childTableName)[pkv->length] = '\0';
addEscapeCharToString(*childTableName, pkv->length);
......@@ -500,7 +500,7 @@ static int32_t parseMetricFromJSON(cJSON *root, TAOS_SML_DATA_POINT* pSml, SSmlL
return TSDB_CODE_TSC_INVALID_TABLE_ID_LENGTH;
}
pSml->stableName = tcalloc(stableLen + TS_ESCAPE_CHAR_SIZE + 1, sizeof(char));
pSml->stableName = tcalloc(stableLen + TS_BACKQUOTE_CHAR_SIZE + 1, sizeof(char));
if (pSml->stableName == NULL){
return TSDB_CODE_TSC_OUT_OF_MEMORY;
}
......@@ -879,7 +879,7 @@ static int32_t parseMetricValueFromJSON(cJSON *root, TAOS_SML_KV **pKVs, int *nu
return ret;
}
pVal->key = tcalloc(sizeof(key) + TS_ESCAPE_CHAR_SIZE, 1);
pVal->key = tcalloc(sizeof(key) + TS_BACKQUOTE_CHAR_SIZE, 1);
memcpy(pVal->key, key, sizeof(key));
addEscapeCharToString(pVal->key, (int32_t)strlen(pVal->key));
......@@ -910,7 +910,7 @@ static int32_t parseTagsFromJSON(cJSON *root, TAOS_SML_KV **pKVs, int *num_kvs,
return TSDB_CODE_TSC_INVALID_JSON;
}
size_t idLen = strlen(id->valuestring);
*childTableName = tcalloc(idLen + TS_ESCAPE_CHAR_SIZE + 1, sizeof(char));
*childTableName = tcalloc(idLen + TS_BACKQUOTE_CHAR_SIZE + 1, sizeof(char));
memcpy(*childTableName, id->valuestring, idLen);
addEscapeCharToString(*childTableName, (int32_t)idLen);
......@@ -948,7 +948,7 @@ static int32_t parseTagsFromJSON(cJSON *root, TAOS_SML_KV **pKVs, int *num_kvs,
tscError("OTD:0x%"PRIx64" Tag key cannot exceeds %d characters in JSON", info->id, TSDB_COL_NAME_LEN - 1);
return TSDB_CODE_TSC_INVALID_COLUMN_LENGTH;
}
pkv->key = tcalloc(keyLen + TS_ESCAPE_CHAR_SIZE + 1, sizeof(char));
pkv->key = tcalloc(keyLen + TS_BACKQUOTE_CHAR_SIZE + 1, sizeof(char));
strncpy(pkv->key, tag->string, keyLen);
addEscapeCharToString(pkv->key, (int32_t)keyLen);
//value
......
......@@ -170,6 +170,16 @@ void tscAddIntoStreamList(SSqlStream *pStream) {
STscObj * pObj = pStream->pSql->pTscObj;
pthread_mutex_lock(&pObj->mutex);
//check if newly added stream node is present
//in the streamList to prevent loop in the list
SSqlStream *iter = pObj->streamList;
while (iter) {
if (pStream == iter) {
pthread_mutex_unlock(&pObj->mutex);
return;
}
iter = iter->next;
}
pStream->next = pObj->streamList;
if (pObj->streamList) pObj->streamList->prev = pStream;
......
......@@ -322,7 +322,7 @@ static int32_t invalidOperationMsg(char* dstBuffer, const char* errMsg) {
static int convertTimestampStrToInt64(tVariant *pVar, int32_t precision) {
int64_t time = 0;
strdequote(pVar->pz);
stringProcess(pVar->pz, pVar->nLen);
char* seg = strnchr(pVar->pz, '-', pVar->nLen, false);
if (seg != NULL) {
......@@ -359,7 +359,7 @@ static int32_t handlePassword(SSqlCmd* pCmd, SStrToken* pPwd) {
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg3);
}
strdequote(pPwd->z);
stringProcess(pPwd->z, pPwd->n);
pPwd->n = (uint32_t)strtrim(pPwd->z); // trim space before and after passwords
if (pPwd->n <= 0) {
......@@ -433,7 +433,7 @@ int32_t readFromFile(char *name, uint32_t *len, void **buf) {
return TSDB_CODE_TSC_OUT_OF_MEMORY;
}
int fd = open(name, O_RDONLY);
int fd = open(name, O_RDONLY | O_BINARY);
if (fd < 0) {
tscError("open file %s failed, error:%s", name, strerror(errno));
tfree(*buf);
......@@ -477,7 +477,7 @@ int32_t handleUserDefinedFunc(SSqlObj* pSql, struct SSqlInfo* pInfo) {
if (validateColumnName(createInfo->name.z) != TSDB_CODE_SUCCESS) {
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg1);
}
strdequote(createInfo->name.z);
stringProcess(createInfo->name.z, createInfo->name.n);
if (strlen(createInfo->name.z) >= TSDB_FUNC_NAME_LEN) {
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg1);
......@@ -485,7 +485,7 @@ int32_t handleUserDefinedFunc(SSqlObj* pSql, struct SSqlInfo* pInfo) {
createInfo->path.z[createInfo->path.n] = 0;
strdequote(createInfo->path.z);
stringProcess(createInfo->path.z, createInfo->path.n);
if (strlen(createInfo->path.z) >= PATH_MAX) {
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg2);
......@@ -543,7 +543,7 @@ int32_t handleUserDefinedFunc(SSqlObj* pSql, struct SSqlInfo* pInfo) {
t0->z[t0->n] = 0;
strdequote(t0->z);
stringProcess(t0->z, t0->n);
if (strlen(t0->z) >= TSDB_FUNC_NAME_LEN) {
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg1);
......@@ -628,7 +628,7 @@ int32_t tscValidateSqlInfo(SSqlObj* pSql, struct SSqlInfo* pInfo) {
}
} else if (pInfo->type == TSDB_SQL_DROP_DNODE) {
if (pzName->type == TK_STRING) {
pzName->n = strdequote(pzName->z);
pzName->n = stringProcess(pzName->z, pzName->n);
}
strncpy(pCmd->payload, pzName->z, pzName->n);
} else { // drop user/account
......@@ -718,7 +718,7 @@ int32_t tscValidateSqlInfo(SSqlObj* pSql, struct SSqlInfo* pInfo) {
SStrToken* id = taosArrayGet(pInfo->pMiscInfo->a, 0);
if (id->type == TK_STRING) {
id->n = strdequote(id->z);
id->n = stringProcess(id->z, id->n);
}
break;
}
......@@ -834,7 +834,7 @@ int32_t tscValidateSqlInfo(SSqlObj* pSql, struct SSqlInfo* pInfo) {
SStrToken* t0 = taosArrayGet(pMiscInfo->a, 0);
SStrToken* t1 = taosArrayGet(pMiscInfo->a, 1);
t0->n = strdequote(t0->z);
t0->n = stringProcess(t0->z, t0->n);
strncpy(pCfg->ep, t0->z, t0->n);
if (validateEp(pCfg->ep) != TSDB_CODE_SUCCESS) {
......@@ -1084,8 +1084,9 @@ static int32_t addPrimaryTsColumnForTimeWindowQuery(SQueryInfo* pQueryInfo, SSql
uint64_t uid = tscExprGet(pQueryInfo, 0)->base.uid;
int32_t tableIndex = COLUMN_INDEX_INITIAL_VAL;
STableMetaInfo* pTableMetaInfo = NULL;
for (int32_t i = 0; i < pQueryInfo->numOfTables; ++i) {
STableMetaInfo* pTableMetaInfo = tscGetMetaInfo(pQueryInfo, i);
pTableMetaInfo = tscGetMetaInfo(pQueryInfo, i);
if (pTableMetaInfo->pTableMeta->id.uid == uid) {
tableIndex = i;
break;
......@@ -1097,7 +1098,11 @@ static int32_t addPrimaryTsColumnForTimeWindowQuery(SQueryInfo* pQueryInfo, SSql
}
SSchema s = {.bytes = TSDB_KEYSIZE, .type = TSDB_DATA_TYPE_TIMESTAMP, .colId = PRIMARYKEY_TIMESTAMP_COL_INDEX};
if (pTableMetaInfo) {
tstrncpy(s.name, pTableMetaInfo->pTableMeta->schema[PRIMARYKEY_TIMESTAMP_COL_INDEX].name, sizeof(s.name));
} else {
tstrncpy(s.name, aAggs[TSDB_FUNC_TS].name, sizeof(s.name));
}
SColumnIndex index = {tableIndex, PRIMARYKEY_TIMESTAMP_COL_INDEX};
tscAddFuncInSelectClause(pQueryInfo, 0, TSDB_FUNC_TS, &index, &s, TSDB_COL_NORMAL, 0);
......@@ -2668,7 +2673,10 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t col
const char* msg14 = "third parameter algorithm must be 'default' or 't-digest'";
const char* msg15 = "parameter is out of range [1, 1000]";
const char* msg16 = "elapsed duration should be greater than or equal to database precision";
const char* msg17 = "the second paramter of diff should be 0 or 1";
const char* msg17 = "elapsed/twa should not be used in nested query if inner query has group by clause";
const char* msg18 = "the second parameter is not an integer";
const char* msg19 = "the second paramter of diff should be 0 or 1";
switch (functionId) {
case TSDB_FUNC_COUNT: {
......@@ -2795,6 +2803,17 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t col
}
}
//for timeline related aggregation function like elapsed and twa, groupby in subquery is not allowed
//as calculation result is meaningless by mixing different childtables(timelines) results.
if ((functionId == TSDB_FUNC_ELAPSED || functionId == TSDB_FUNC_TWA) && pQueryInfo->pUpstream != NULL) {
size_t numOfUpstreams = taosArrayGetSize(pQueryInfo->pUpstream);
for (int32_t i = 0; i < numOfUpstreams; ++i) {
SQueryInfo* pSub = taosArrayGetP(pQueryInfo->pUpstream, i);
if (pSub->groupbyExpr.numOfGroupCols > 0) {
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg17);
}
}
}
STableComInfo info = tscGetTableInfo(pTableMetaInfo->pTableMeta);
......@@ -2855,6 +2874,13 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t col
char val[8] = {0};
int64_t tickPerSec = 0;
char *exprToken = tcalloc(pParamElem[1].pNode->exprToken.n + 1, sizeof(char));
memcpy(exprToken, pParamElem[1].pNode->exprToken.z, pParamElem[1].pNode->exprToken.n);
if (pParamElem[1].pNode->exprToken.type == TK_NOW || strstr(exprToken, "now")) {
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg2);
}
tfree(exprToken);
if ((TSDB_DATA_TYPE_NULL == pParamElem[1].pNode->value.nType) || tVariantDump(&pParamElem[1].pNode->value, (char*) &tickPerSec, TSDB_DATA_TYPE_BIGINT, true) < 0) {
return TSDB_CODE_TSC_INVALID_OPERATION;
}
......@@ -2897,7 +2923,7 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t col
int64_t ignoreNegative = GET_INT64_VAL(val);
if (ignoreNegative != 0 && ignoreNegative != 1) {
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg17);
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg19);
}
}
tscExprAddParams(&pExpr->base, val, TSDB_DATA_TYPE_BIGINT, sizeof(int64_t));
......@@ -3150,6 +3176,11 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t col
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg2);
}
char* endptr = NULL;
strtoll(pParamElem[1].pNode->exprToken.z, &endptr, 10);
if ((endptr-pParamElem[1].pNode->exprToken.z != pParamElem[1].pNode->exprToken.n) || errno == ERANGE) {
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg18);
}
tVariantDump(pVariant, val, TSDB_DATA_TYPE_BIGINT, true);
int64_t numRowsSelected = GET_INT64_VAL(val);
......@@ -3429,7 +3460,7 @@ static int16_t doGetColumnIndex(SQueryInfo* pQueryInfo, int32_t index, SStrToken
pToken->z = tmpTokenBuf;
if (pToken->type == TK_ID) {
tscRmEscapeAndTrimToken(pToken);
pToken->n = stringProcess(pToken->z, pToken->n);
}
for (int16_t i = 0; i < numOfCols; ++i) {
......@@ -3590,11 +3621,11 @@ int32_t setShowInfo(SSqlObj* pSql, struct SSqlInfo* pInfo) {
// show table/stable like 'xxxx', set the like pattern for show tables
SStrToken* pPattern = &pShowInfo->pattern;
if (pPattern->type != 0) {
if (pPattern->type == TK_ID && pPattern->z[0] == TS_ESCAPE_CHAR) {
if (pPattern->type == TK_ID && pPattern->z[0] == TS_BACKQUOTE_CHAR) {
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg7);
}
pPattern->n = strdequote(pPattern->z);
pPattern->n = stringProcess(pPattern->z, pPattern->n);
if (pPattern->n <= 0) {
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg6);
......@@ -3612,7 +3643,7 @@ int32_t setShowInfo(SSqlObj* pSql, struct SSqlInfo* pInfo) {
}
if (pShowInfo->prefix.type == TK_STRING) {
pShowInfo->prefix.n = strdequote(pShowInfo->prefix.z);
pShowInfo->prefix.n = stringProcess(pShowInfo->prefix.z, pShowInfo->prefix.n);
}
}
return TSDB_CODE_SUCCESS;
......@@ -4926,7 +4957,7 @@ static int32_t validateNullExpr(tSqlExpr* pExpr, STableMeta* pTableMeta, int32_t
}
char *v = strndup(pRight->exprToken.z, pRight->exprToken.n);
int32_t len = strRmquote(v, pRight->exprToken.n);
int32_t len = stringProcess(v, pRight->exprToken.n);
if (len > 0) {
uint32_t type = 0;
tGetToken(v, &type);
......@@ -5040,20 +5071,10 @@ static int32_t validateMatchExpr(tSqlExpr* pExpr, STableMeta* pTableMeta, int32_
regex_t regex;
char regErrBuf[256] = {0};
//remove the quote at the begin end of original sql string.
uint32_t lenPattern = pRight->exprToken.n - 2;
char* pattern = malloc(lenPattern + 1);
strncpy(pattern, pRight->exprToken.z+1, lenPattern);
pattern[lenPattern] = '\0';
tfree(pRight->value.pz);
pRight->value.pz = pattern;
pRight->value.nLen = lenPattern;
int cflags = REG_EXTENDED;
if ((errCode = regcomp(&regex, pattern, cflags)) != 0) {
if ((errCode = regcomp(&regex, pRight->value.pz, cflags)) != 0) {
regerror(errCode, &regex, regErrBuf, sizeof(regErrBuf));
tscError("Failed to compile regex pattern %s. reason %s", pattern, regErrBuf);
tscError("Failed to compile regex pattern %s. reason %s", pRight->value.pz, regErrBuf);
return invalidOperationMsg(msgBuf, msg3);
}
regfree(&regex);
......@@ -5247,7 +5268,7 @@ static int32_t handleExprInQueryCond(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, tSql
}
}
if (pRight != NULL && (pRight->tokenId == TK_ID || pRight->tokenId == TK_ARROW)) { // join on tag columns for stable query
if (joinQuery && pRight != NULL && (pRight->tokenId == TK_ID || pRight->tokenId == TK_ARROW)) { // join on tag columns for stable query
if (!validateJoinExprNode(pCmd, pQueryInfo, *pExpr, &index)) {
return TSDB_CODE_TSC_INVALID_OPERATION;
}
......@@ -6053,7 +6074,7 @@ int32_t getTimeRange(STimeWindow* win, tSqlExpr* pRight, int32_t optr, int16_t t
int64_t val = 0;
bool parsed = false;
if (pRight->value.nType == TSDB_DATA_TYPE_BINARY) {
pRight->value.nLen = strdequote(pRight->value.pz);
pRight->value.nLen = stringProcess(pRight->value.pz, pRight->value.nLen);
char* seg = strnchr(pRight->value.pz, '-', pRight->value.nLen, false);
if (seg != NULL) {
......@@ -6108,7 +6129,7 @@ int32_t getTimeRange(STimeWindow* win, tSqlExpr* pRight, int32_t optr, int16_t t
// todo error !!!!
int32_t tsRewriteFieldNameIfNecessary(SSqlCmd* pCmd, SQueryInfo* pQueryInfo) {
const char rep[] = {'(', ')', '*', ',', '.', '/', '\\', '+', '-', '%', ' '};
const char rep[] = {'(', ')', '*', ',', '.', '/', '\\', '+', '-', '%', ' ', '`'};
for (int32_t i = 0; i < pQueryInfo->fieldsInfo.numOfOutput; ++i) {
char* fieldName = tscFieldInfoGetField(&pQueryInfo->fieldsInfo, i)->name;
......@@ -7009,7 +7030,7 @@ int32_t setAlterTableInfo(SSqlObj* pSql, struct SSqlInfo* pInfo) {
//handle Escape character backstick
bool inEscape = false;
if (name.z[0] == TS_ESCAPE_CHAR && name.z[name.n - 1] == TS_ESCAPE_CHAR) {
if (name.z[0] == TS_BACKQUOTE_CHAR && name.z[name.n - 1] == TS_BACKQUOTE_CHAR) {
inEscape = true;
name.type = TK_ID;
}
......@@ -7028,7 +7049,7 @@ int32_t setAlterTableInfo(SSqlObj* pSql, struct SSqlInfo* pInfo) {
int32_t nameLen = pItem->pVar.nLen;
if (inEscape) {
memmove(name1, name1 + 1, nameLen);
name1[nameLen - TS_ESCAPE_CHAR_SIZE] = '\0';
name1[nameLen - TS_BACKQUOTE_CHAR_SIZE] = '\0';
}
TAOS_FIELD f = tscCreateField(TSDB_DATA_TYPE_INT, name1, tDataTypes[TSDB_DATA_TYPE_INT].bytes);
......@@ -7049,7 +7070,7 @@ int32_t setAlterTableInfo(SSqlObj* pSql, struct SSqlInfo* pInfo) {
//handle Escape character backstick
bool inEscape = false;
if (name.z[0] == TS_ESCAPE_CHAR && name.z[name.n - 1] == TS_ESCAPE_CHAR) {
if (name.z[0] == TS_BACKQUOTE_CHAR && name.z[name.n - 1] == TS_BACKQUOTE_CHAR) {
inEscape = true;
name.type = TK_ID;
}
......@@ -7090,8 +7111,8 @@ int32_t setAlterTableInfo(SSqlObj* pSql, struct SSqlInfo* pInfo) {
if (inEscape) {
memmove(name.z, name.z + 1, name.n);
name.z[name.n - TS_ESCAPE_CHAR_SIZE] = '\0';
name.n -= TS_ESCAPE_CHAR_SIZE;
name.z[name.n - TS_BACKQUOTE_CHAR_SIZE] = '\0';
name.n -= TS_BACKQUOTE_CHAR_SIZE;
}
TAOS_FIELD f = tscCreateField(pColSchema->type, name.z, pItem->bytes);
......@@ -7109,10 +7130,10 @@ int32_t setAlterTableInfo(SSqlObj* pSql, struct SSqlInfo* pInfo) {
SColumnIndex columnIndex = COLUMN_INDEX_INITIALIZER;
SStrToken name = {.type = TK_STRING, .z = pItem->name, .n = (uint32_t)strlen(pItem->name)};
//handle Escape character backstick
if (name.z[0] == TS_ESCAPE_CHAR && name.z[name.n - 1] == TS_ESCAPE_CHAR) {
if (name.z[0] == TS_BACKQUOTE_CHAR && name.z[name.n - 1] == TS_BACKQUOTE_CHAR) {
memmove(name.z, name.z + 1, name.n);
name.z[name.n - TS_ESCAPE_CHAR_SIZE] = '\0';
name.n -= TS_ESCAPE_CHAR_SIZE;
name.z[name.n - TS_BACKQUOTE_CHAR_SIZE] = '\0';
name.n -= TS_BACKQUOTE_CHAR_SIZE;
}
if (getColumnIndexByName(&name, pQueryInfo, &columnIndex, tscGetErrorMsgPayload(pCmd)) != TSDB_CODE_SUCCESS) {
return invalidOperationMsg(pMsg, msg17);
......@@ -7283,6 +7304,7 @@ int32_t validateDNodeConfig(SMiscInfo* pOptions) {
const int tokenMonitor = 3;
const int tokenDebugFlag = 4;
const int tokenDebugFlagEnd = 20;
const int tokenOfflineInterval = 21;
const SDNodeDynConfOption cfgOptions[] = {
{"resetLog", 8}, {"resetQueryCache", 15}, {"balance", 7}, {"monitor", 7},
{"debugFlag", 9}, {"monDebugFlag", 12}, {"vDebugFlag", 10}, {"mDebugFlag", 10},
......@@ -7290,6 +7312,7 @@ int32_t validateDNodeConfig(SMiscInfo* pOptions) {
{"uDebugFlag", 10}, {"tsdbDebugFlag", 13}, {"sDebugflag", 10}, {"rpcDebugFlag", 12},
{"dDebugFlag", 10}, {"mqttDebugFlag", 13}, {"wDebugFlag", 10}, {"tmrDebugFlag", 12},
{"cqDebugFlag", 11},
{"offlineInterval", 15},
};
SStrToken* pOptionToken = taosArrayGet(pOptions->a, 1);
......@@ -7307,7 +7330,7 @@ int32_t validateDNodeConfig(SMiscInfo* pOptions) {
SStrToken* pValToken = taosArrayGet(pOptions->a, 2);
int32_t vnodeId = 0;
int32_t dnodeId = 0;
strdequote(pValToken->z);
stringProcess(pValToken->z, pValToken->n);
bool parseOk = taosCheckBalanceCfgOptions(pValToken->z, &vnodeId, &dnodeId);
if (!parseOk) {
return TSDB_CODE_TSC_INVALID_OPERATION; // options value is invalid
......@@ -7321,6 +7344,14 @@ int32_t validateDNodeConfig(SMiscInfo* pOptions) {
return TSDB_CODE_TSC_INVALID_OPERATION; // options value is invalid
}
return TSDB_CODE_SUCCESS;
} else if ((strncasecmp(cfgOptions[tokenOfflineInterval].name, pOptionToken->z, pOptionToken->n) == 0) &&
(cfgOptions[tokenOfflineInterval].len == pOptionToken->n)) {
SStrToken* pValToken = taosArrayGet(pOptions->a, 2);
int32_t val = strtol(pValToken->z, NULL, 10);
if (val < 1 || val > 600) {
return TSDB_CODE_TSC_INVALID_OPERATION; // options value is invalid
}
return TSDB_CODE_SUCCESS;
} else {
SStrToken* pValToken = taosArrayGet(pOptions->a, 2);
......@@ -7401,7 +7432,7 @@ int32_t validateColumnName(char* name) {
}
if (token.type == TK_STRING) {
strdequote(token.z);
token.n = stringProcess(token.z, token.n);
strntolower(token.z, token.z, token.n);
token.n = (uint32_t)strtrim(token.z);
......@@ -7412,7 +7443,7 @@ int32_t validateColumnName(char* name) {
return validateColumnName(token.z);
} else if (token.type == TK_ID) {
strRmquoteEscape(name, token.n);
stringProcess(name, token.n);
return TSDB_CODE_SUCCESS;
} else {
if (isNumber(&token)) {
......@@ -7563,7 +7594,7 @@ static int32_t setTimePrecision(SSqlCmd* pCmd, SCreateDbMsg* pMsg, SCreateDbInfo
SStrToken* pToken = &pCreateDbInfo->precision;
if (pToken->n > 0) {
pToken->n = strdequote(pToken->z);
pToken->n = stringProcess(pToken->z, pToken->n);
if (strncmp(pToken->z, TSDB_TIME_PRECISION_MILLI_STR, pToken->n) == 0 &&
strlen(TSDB_TIME_PRECISION_MILLI_STR) == pToken->n) {
......@@ -8618,12 +8649,8 @@ int32_t doCheckForCreateFromStable(SSqlObj* pSql, SSqlInfo* pInfo) {
strncpy(tmpTokenBuf, sToken->z, sToken->n);
sToken->z = tmpTokenBuf;
if (TK_STRING == sToken->type) {
tscDequoteAndTrimToken(sToken);
}
if (TK_ID == sToken->type) {
tscRmEscapeAndTrimToken(sToken);
if (TK_STRING == sToken->type || TK_ID == sToken->type) {
sToken->n = stringProcess(sToken->z, sToken->n);
}
tVariantListItem* pItem = taosArrayGet(pValList, i);
......@@ -9562,8 +9589,6 @@ static int32_t doLoadAllTableMeta(SSqlObj* pSql, SQueryInfo* pQueryInfo, SSqlNod
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg1);
}
tscDequoteAndTrimToken(oriName);
bool dbIncluded = false;
char buf[TSDB_TABLE_FNAME_LEN];
SStrToken sTblToken;
......@@ -9585,7 +9610,6 @@ static int32_t doLoadAllTableMeta(SSqlObj* pSql, SQueryInfo* pQueryInfo, SSqlNod
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg2);
}
tscDequoteAndTrimToken(aliasName);
if (tscValidateName(aliasName, false, NULL) != TSDB_CODE_SUCCESS || aliasName->n >= TSDB_TABLE_NAME_LEN) {
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg3);
}
......
......@@ -363,6 +363,41 @@ void tscSetFqdnErrorMsg(SSqlObj* pSql, SRpcEpSet* pEpSet) {
}
}
bool shouldRewTableMeta(SSqlObj* pSql, SRpcMsg* rpcMsg) {
SSqlCmd *pCmd = &pSql->cmd;
SQueryInfo* pQueryInfo = tscGetQueryInfo(pCmd);
int32_t cmd = pCmd->command;
if ((cmd != TSDB_SQL_SELECT && cmd != TSDB_SQL_UPDATE_TAGS_VAL)) {
return false;
}
if (rpcMsg->code != TSDB_CODE_TDB_INVALID_TABLE_ID &&
rpcMsg->code != TSDB_CODE_VND_INVALID_VGROUP_ID &&
rpcMsg->code != TSDB_CODE_QRY_INVALID_SCHEMA_VERSION &&
rpcMsg->code != TSDB_CODE_RPC_NETWORK_UNAVAIL &&
rpcMsg->code != TSDB_CODE_APP_NOT_READY ) {
return false;
}
if (rpcMsg->code == TSDB_CODE_QRY_INVALID_SCHEMA_VERSION) {
return true;
}
// 1. super table subquery
// 2. nest queries are all not updated the tablemeta and retry parse the sql after cleanup local tablemeta/vgroup id buffer
if ((TSDB_QUERY_HAS_TYPE(pQueryInfo->type, (TSDB_QUERY_TYPE_STABLE_SUBQUERY | TSDB_QUERY_TYPE_SUBQUERY | TSDB_QUERY_TYPE_TAG_FILTER_QUERY)) &&
!TSDB_QUERY_HAS_TYPE(pQueryInfo->type, TSDB_QUERY_TYPE_PROJECTION_QUERY)) ||
(TSDB_QUERY_HAS_TYPE(pQueryInfo->type, TSDB_QUERY_TYPE_NEST_SUBQUERY)) ||
(TSDB_QUERY_HAS_TYPE(pQueryInfo->type, TSDB_QUERY_TYPE_STABLE_SUBQUERY) && pQueryInfo->distinct)
|| (TSDB_QUERY_HAS_TYPE(pQueryInfo->type, TSDB_QUERY_TYPE_JOIN_QUERY))) {
return false;
}
// single table query error need to renew table meta.
return true;
}
void tscProcessMsgFromServer(SRpcMsg *rpcMsg, SRpcEpSet *pEpSet) {
TSDB_CACHE_PTR_TYPE handle = (TSDB_CACHE_PTR_TYPE) rpcMsg->ahandle;
SSqlObj* pSql = (SSqlObj*)taosAcquireRef(tscObjRef, handle);
......@@ -415,20 +450,8 @@ void tscProcessMsgFromServer(SRpcMsg *rpcMsg, SRpcEpSet *pEpSet) {
pSql->cmd.insertParam.schemaAttached = 1;
}
// single table query error need to be handled here.
if ((cmd == TSDB_SQL_SELECT || cmd == TSDB_SQL_UPDATE_TAGS_VAL) &&
(((rpcMsg->code == TSDB_CODE_TDB_INVALID_TABLE_ID || rpcMsg->code == TSDB_CODE_VND_INVALID_VGROUP_ID)) ||
rpcMsg->code == TSDB_CODE_RPC_NETWORK_UNAVAIL || rpcMsg->code == TSDB_CODE_APP_NOT_READY)) {
// 1. super table subquery
// 2. nest queries are all not updated the tablemeta and retry parse the sql after cleanup local tablemeta/vgroup id buffer
if ((TSDB_QUERY_HAS_TYPE(pQueryInfo->type, (TSDB_QUERY_TYPE_STABLE_SUBQUERY | TSDB_QUERY_TYPE_SUBQUERY |
TSDB_QUERY_TYPE_TAG_FILTER_QUERY)) &&
!TSDB_QUERY_HAS_TYPE(pQueryInfo->type, TSDB_QUERY_TYPE_PROJECTION_QUERY)) ||
(TSDB_QUERY_HAS_TYPE(pQueryInfo->type, TSDB_QUERY_TYPE_NEST_SUBQUERY)) || (TSDB_QUERY_HAS_TYPE(pQueryInfo->type, TSDB_QUERY_TYPE_STABLE_SUBQUERY) && pQueryInfo->distinct)
|| (TSDB_QUERY_HAS_TYPE(pQueryInfo->type, TSDB_QUERY_TYPE_JOIN_QUERY))) {
// do nothing in case of super table subquery
} else {
bool renewTableMeta = shouldRewTableMeta(pSql, rpcMsg);
if (renewTableMeta) {
pSql->retry += 1;
tscWarn("0x%" PRIx64 " it shall renew table meta, code:%s, retry:%d", pSql->self, tstrerror(rpcMsg->code), pSql->retry);
......@@ -453,7 +476,6 @@ void tscProcessMsgFromServer(SRpcMsg *rpcMsg, SRpcEpSet *pEpSet) {
}
}
}
}
pRes->rspLen = 0;
......@@ -511,6 +533,7 @@ void tscProcessMsgFromServer(SRpcMsg *rpcMsg, SRpcEpSet *pEpSet) {
}
bool shouldFree = tscShouldBeFreed(pSql);
if (rpcMsg->code != TSDB_CODE_TSC_ACTION_IN_PROGRESS) {
if (rpcMsg->code != TSDB_CODE_SUCCESS) {
pRes->code = rpcMsg->code;
......@@ -1148,21 +1171,21 @@ int tscBuildQueryMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
pMsg += sqlLen;
/*
//MSG EXTEND DEMO
pQueryMsg->extend = 1;
STLV *tlv = (STLV *)pMsg;
tlv->type = htons(TLV_TYPE_DUMMY);
tlv->len = htonl(sizeof(int16_t));
*(int16_t *)tlv->value = htons(12345);
tlv->type = htons(TLV_TYPE_META_VERSION);
tlv->len = htonl(sizeof(int16_t) * 2);
*(int16_t*)tlv->value = htons(pTableMeta->sversion);
*(int16_t*)(tlv->value+sizeof(int16_t)) = htons(pTableMeta->tversion);
pMsg += sizeof(*tlv) + ntohl(tlv->len);
tlv = (STLV *)pMsg;
tlv->type = htons(TLV_TYPE_END_MARK);
tlv->len = 0;
pMsg += sizeof(*tlv);
*/
int32_t msgLen = (int32_t)(pMsg - pCmd->payload);
......@@ -1859,6 +1882,13 @@ int tscProcessRetrieveGlobalMergeRsp(SSqlObj *pSql) {
tscDebug("0x%"PRIx64" create QInfo 0x%"PRIx64" to execute query processing", pSql->self, pSql->self);
pQueryInfo->pQInfo = createQInfoFromQueryNode(pQueryInfo, &tableGroupInfo, NULL, NULL, pRes->pMerger, MERGE_STAGE, pSql->self);
if (pQueryInfo->pQInfo == NULL) {
taosHashCleanup(tableGroupInfo.map);
taosArrayDestroy(&group);
tscAsyncResultOnError(pSql);
pRes->code = TSDB_CODE_QRY_OUT_OF_MEMORY;
return pRes->code;
}
}
uint64_t localQueryId = pSql->self;
......@@ -1866,6 +1896,7 @@ int tscProcessRetrieveGlobalMergeRsp(SSqlObj *pSql) {
bool convertJson = true;
if (pQueryInfo->isStddev == true) convertJson = false;
convertQueryResult(pRes, pQueryInfo, pSql->self, true, convertJson);
pRes->code = pQueryInfo->pQInfo->code;
code = pRes->code;
if (pRes->code == TSDB_CODE_SUCCESS) {
......@@ -2690,7 +2721,9 @@ int tscProcessQueryRsp(SSqlObj *pSql) {
pRes->data = NULL;
tscResetForNextRetrieve(pRes);
tscDebug("0x%"PRIx64" query rsp received, qId:0x%"PRIx64, pSql->self, pRes->qId);
return 0;
}
......@@ -2702,7 +2735,7 @@ static void decompressQueryColData(SSqlObj *pSql, SSqlRes *pRes, SQueryInfo* pQu
compSizes = (int32_t *)(pData + compLen);
TAOS_FIELD *pField = tscFieldInfoGetField(&pQueryInfo->fieldsInfo, numOfCols - 1);
int16_t offset = tscFieldInfoGetOffset(pQueryInfo, numOfCols - 1);
int32_t offset = tscFieldInfoGetOffset(pQueryInfo, numOfCols - 1);
char *outputBuf = tcalloc(pRes->numOfRows, (pField->bytes + offset));
char *p = outputBuf;
......@@ -2808,6 +2841,7 @@ int tscProcessRetrieveRspFromNode(SSqlObj *pSql) {
return 0;
}
void tscTableMetaCallBack(void *param, TAOS_RES *res, int code);
static int32_t getTableMetaFromMnode(SSqlObj *pSql, STableMetaInfo *pTableMetaInfo, bool autocreate) {
......
......@@ -137,7 +137,7 @@ static SSqlObj *taosConnectImpl(const char *ip, const char *user, const char *pa
char tmp[TSDB_DB_NAME_LEN] = {0};
tstrncpy(tmp, db, sizeof(tmp));
strdequote(tmp);
stringProcess(tmp, (int32_t)strlen(tmp));
strtolower(pObj->db, tmp);
}
......@@ -547,6 +547,28 @@ int taos_fetch_block(TAOS_RES *res, TAOS_ROW *rows) {
return pRes->numOfRows;
}
TAOS_ROW *taos_result_block(TAOS_RES *res) {
SSqlObj *pSql = (SSqlObj *)res;
if (pSql == NULL || pSql->signature != pSql) {
terrno = TSDB_CODE_TSC_DISCONNECTED;
return NULL;
}
SSqlCmd *pCmd = &pSql->cmd;
SSqlRes *pRes = &pSql->res;
if (pCmd == NULL ||
pRes == NULL ||
pRes->qId == 0 ||
pRes->code == TSDB_CODE_TSC_QUERY_CANCELLED ||
pCmd->command == TSDB_SQL_RETRIEVE_EMPTY_RESULT ||
pCmd->command == TSDB_SQL_INSERT) {
return NULL;
}
return &pRes->urow;
}
int taos_select_db(TAOS *taos, const char *db) {
char sql[256] = {0};
......
......@@ -211,6 +211,7 @@ static void tscProcessStreamQueryCallback(void *param, TAOS_RES *tres, int numOf
tfree(pSql->pSubs);
pSql->subState.numOfSub = 0;
pSql->parseRetry = 0;
int32_t code = tsParseSql(pSql, true);
if (code == TSDB_CODE_SUCCESS) {
cbParseSql(pStream, pSql, code);
......@@ -220,6 +221,7 @@ static void tscProcessStreamQueryCallback(void *param, TAOS_RES *tres, int numOf
tscError("0x%"PRIx64" open stream failed, code:%s", pSql->self, tstrerror(code));
taosReleaseRef(tscObjRef, pSql->self);
free(pStream);
return;
}
// tscSetRetryTimer(pStream, pStream->pSql, retryDelay);
......
......@@ -3902,8 +3902,11 @@ void* createQInfoFromQueryNode(SQueryInfo* pQueryInfo, STableGroupInfo* pTableGr
STsBufInfo bufInfo = {0};
SQueryParam param = {.pOperator = pa};
/*int32_t code = */initQInfo(&bufInfo, NULL, pSourceOperator, pQInfo, &param, NULL, 0, merger);
int32_t code = initQInfo(&bufInfo, NULL, pSourceOperator, pQInfo, &param, NULL, 0, merger);
taosArrayDestroy(&pa);
if (code != TSDB_CODE_SUCCESS) {
goto _cleanup;
}
return pQInfo;
......
......@@ -1479,6 +1479,18 @@ void handleDownstreamOperator(SSqlObj** pSqlObjList, int32_t numOfUpstream, SQue
break;
}
}
// set input data order to param[1]
if(pex->base.functionId == TSDB_FUNC_FIRST || pex->base.functionId == TSDB_FUNC_FIRST_DST ||
pex->base.functionId == TSDB_FUNC_LAST || pex->base.functionId == TSDB_FUNC_LAST_DST) {
// set input order
SQueryInfo* pInputQI = pSqlObjList[0]->cmd.pQueryInfo;
if(pInputQI) {
pex->base.numOfParams = 3;
pex->base.param[2].nType = TSDB_DATA_TYPE_INT;
pex->base.param[2].i64 = pInputQI->order.order;
}
}
}
tscDebug("0x%"PRIx64" create QInfo 0x%"PRIx64" to execute the main query while all nest queries are ready", pSql->self, pSql->self);
......@@ -2357,7 +2369,7 @@ TAOS_FIELD* tscFieldInfoGetField(SFieldInfo* pFieldInfo, int32_t index) {
return &((SInternalField*)TARRAY_GET_ELEM(pFieldInfo->internalField, index))->field;
}
int16_t tscFieldInfoGetOffset(SQueryInfo* pQueryInfo, int32_t index) {
int32_t tscFieldInfoGetOffset(SQueryInfo* pQueryInfo, int32_t index) {
SInternalField* pInfo = tscFieldInfoGetInternalField(&pQueryInfo->fieldsInfo, index);
assert(pInfo != NULL && pInfo->pExpr->pExpr == NULL);
......@@ -2906,7 +2918,7 @@ void tscColumnListDestroy(SArray* pColumnList) {
*
*/
static int32_t validateQuoteToken(SStrToken* pToken, bool escapeEnabled, bool *dbIncluded) {
tscDequoteAndTrimToken(pToken);
if(pToken->z[0] != TS_BACKQUOTE_CHAR) pToken->n = stringProcess(pToken->z, pToken->n);
int32_t k = tGetToken(pToken->z, &pToken->type);
......@@ -2920,94 +2932,6 @@ static int32_t validateQuoteToken(SStrToken* pToken, bool escapeEnabled, bool *d
return TSDB_CODE_SUCCESS;
}
void tscDequoteAndTrimToken(SStrToken* pToken) {
uint32_t first = 0, last = pToken->n;
// trim leading spaces
while (first < last) {
char c = pToken->z[first];
if (c != ' ' && c != '\t') {
break;
}
first++;
}
// trim ending spaces
while (first < last) {
char c = pToken->z[last - 1];
if (c != ' ' && c != '\t') {
break;
}
last--;
}
// there are still at least two characters
if (first < last - 1) {
char c = pToken->z[first];
// dequote
if ((c == '\'' || c == '"') && c == pToken->z[last - 1]) {
first++;
last--;
}
}
// left shift the string and pad spaces
for (uint32_t i = 0; i + first < last; i++) {
pToken->z[i] = pToken->z[first + i];
}
for (uint32_t i = last - first; i < pToken->n; i++) {
pToken->z[i] = ' ';
}
// adjust token length
pToken->n = last - first;
}
void tscRmEscapeAndTrimToken(SStrToken* pToken) {
uint32_t first = 0, last = pToken->n;
// trim leading spaces
while (first < last) {
char c = pToken->z[first];
if (c != ' ' && c != '\t') {
break;
}
first++;
}
// trim ending spaces
while (first < last) {
char c = pToken->z[last - 1];
if (c != ' ' && c != '\t') {
break;
}
last--;
}
// there are still at least two characters
if (first < last - 1) {
char c = pToken->z[first];
// dequote
if ((c == '`') && c == pToken->z[last - 1]) {
first++;
last--;
}
}
// left shift the string and pad spaces
for (uint32_t i = 0; i + first < last; i++) {
pToken->z[i] = pToken->z[first + i];
}
for (uint32_t i = last - first; i < pToken->n; i++) {
pToken->z[i] = ' ';
}
// adjust token length
pToken->n = last - first;
}
int32_t tscValidateName(SStrToken* pToken, bool escapeEnabled, bool *dbIncluded) {
if (pToken == NULL || pToken->z == NULL
|| (pToken->type != TK_STRING && pToken->type != TK_ID)) {
......@@ -3015,7 +2939,7 @@ int32_t tscValidateName(SStrToken* pToken, bool escapeEnabled, bool *dbIncluded)
}
if ((!escapeEnabled) && pToken->type == TK_ID) {
if (pToken->z[0] == TS_ESCAPE_CHAR) {
if (pToken->z[0] == TS_BACKQUOTE_CHAR) {
return TSDB_CODE_TSC_INVALID_OPERATION;
}
}
......@@ -3033,7 +2957,7 @@ int32_t tscValidateName(SStrToken* pToken, bool escapeEnabled, bool *dbIncluded)
if (pToken->type == TK_STRING) {
tscDequoteAndTrimToken(pToken);
if(pToken->z[0] != TS_BACKQUOTE_CHAR) pToken->n = stringProcess(pToken->z, pToken->n);
// tscStrToLower(pToken->z, pToken->n);
strntolower(pToken->z, pToken->z, pToken->n);
//pToken->n = (uint32_t)strtrim(pToken->z);
......@@ -3053,7 +2977,7 @@ int32_t tscValidateName(SStrToken* pToken, bool escapeEnabled, bool *dbIncluded)
return tscValidateName(pToken, escapeEnabled, NULL);
}
} else if (pToken->type == TK_ID) {
tscRmEscapeAndTrimToken(pToken);
if(pToken->z[0] == TS_BACKQUOTE_CHAR) pToken->n = stringProcess(pToken->z, pToken->n);
if (pToken->n == 0) {
return TSDB_CODE_TSC_INVALID_OPERATION;
......@@ -3114,7 +3038,7 @@ int32_t tscValidateName(SStrToken* pToken, bool escapeEnabled, bool *dbIncluded)
}
if (escapeEnabled && pToken->type == TK_ID) {
tscRmEscapeAndTrimToken(pToken);
if(pToken->z[0] == TS_BACKQUOTE_CHAR) pToken->n = stringProcess(pToken->z, pToken->n);
}
// re-build the whole name string
......@@ -4303,6 +4227,11 @@ void executeQuery(SSqlObj* pSql, SQueryInfo* pQueryInfo) {
// create sub query to handle the sub query.
SQueryInfo* pq = tscGetQueryInfo(&psub->cmd);
STableMetaInfo* pSubMeta = tscGetMetaInfo(pq, 0);
if (UTIL_TABLE_IS_SUPER_TABLE(pSubMeta) &&
pq->command == TSDB_SQL_RETRIEVE_EMPTY_RESULT) {
psub->cmd.command = TSDB_SQL_RETRIEVE_EMPTY_RESULT;
}
executeQuery(psub, pq);
}
......
......@@ -119,6 +119,7 @@ extern int32_t tsdbWalFlushSize;
extern int8_t tsEnableBalance;
extern int8_t tsAlternativeRole;
extern int32_t tsBalanceInterval;
extern int32_t tsOfflineInterval;
extern int32_t tsOfflineThreshold;
extern int32_t tsMnodeEqualVnodeNum;
extern int8_t tsEnableFlowCtrl;
......
......@@ -164,6 +164,7 @@ int32_t tsdbWalFlushSize = TSDB_DEFAULT_WAL_FLUSH_SIZE; // MB
int8_t tsEnableBalance = 1;
int8_t tsAlternativeRole = 0;
int32_t tsBalanceInterval = 300; // seconds
int32_t tsOfflineInterval = 3; // seconds
int32_t tsOfflineThreshold = 86400 * 10; // seconds of 10 days
int32_t tsMnodeEqualVnodeNum = 4;
int8_t tsEnableFlowCtrl = 1;
......@@ -288,7 +289,7 @@ char Compressor[32] = "ZSTD_COMPRESSOR"; // ZSTD_COMPRESSOR or GZIP_COMPRES
#endif
// long query death-lock
int8_t tsDeadLockKillQuery = 0;
int8_t tsDeadLockKillQuery = 1;
// default JSON string type
char tsDefaultJSONStrType[7] = "nchar";
......@@ -653,6 +654,16 @@ static void doInitGlobalConfig(void) {
cfg.unitType = TAOS_CFG_UTYPE_NONE;
taosInitConfigOption(cfg);
cfg.option = "offlineInterval";
cfg.ptr = &tsOfflineInterval;
cfg.valType = TAOS_CFG_VTYPE_INT32;
cfg.cfgType = TSDB_CFG_CTYPE_B_CONFIG | TSDB_CFG_CTYPE_B_SHOW;
cfg.minValue = 1;
cfg.maxValue = 600;
cfg.ptrLength = 0;
cfg.unitType = TAOS_CFG_UTYPE_NONE;
taosInitConfigOption(cfg);
// 0-any; 1-mnode; 2-vnode
cfg.option = "role";
cfg.ptr = &tsAlternativeRole;
......
......@@ -50,7 +50,7 @@ SSchema tGetUserSpecifiedColumnSchema(tVariant* pVal, SStrToken* exprStr, const
} else {
size_t tlen = MIN(sizeof(s.name), exprStr->n + 1);
tstrncpy(s.name, exprStr->z, tlen);
strdequote(s.name);
stringProcess(s.name, (int32_t)strlen(s.name));
}
return s;
......@@ -163,7 +163,7 @@ char *tableNameGetPosition(SStrToken* pToken, char target) {
return pToken->z + i;
}
if (*(pToken->z + i) == TS_ESCAPE_CHAR) {
if (*(pToken->z + i) == TS_BACKQUOTE_CHAR) {
if (!inQuote) {
inEscape = !inEscape;
}
......@@ -223,7 +223,7 @@ void extractTableNameFromToken(SStrToken* pToken, SStrToken* pTable) {
char* r = tableNameGetPosition(pToken, sep);
if (r != NULL) { // record the table name token
if (pToken->z[0] == TS_ESCAPE_CHAR && *(r - 1) == TS_ESCAPE_CHAR) {
if (pToken->z[0] == TS_BACKQUOTE_CHAR && *(r - 1) == TS_BACKQUOTE_CHAR) {
pTable->n = (uint32_t)(r - pToken->z - 2);
pTable->z = pToken->z + 1;
} else {
......
......@@ -87,7 +87,7 @@ void tVariantCreateExt(tVariant *pVar, SStrToken *token, int32_t optrType, bool
case TSDB_DATA_TYPE_BINARY: {
pVar->pz = strndup(token->z, token->n);
pVar->nLen = needRmquoteEscape ? strRmquoteEscape(pVar->pz, token->n) : token->n;
pVar->nLen = needRmquoteEscape ? stringProcess(pVar->pz, token->n) : token->n;
break;
}
case TSDB_DATA_TYPE_TIMESTAMP: {
......
src/TDengineDriver/bin/
src/TDengineDriver/obj/
src/test/Cases/bin/
src/test/Cases/obj/
src/test/FunctionTest/bin/
src/test/FunctionTest/obj/
src/test/XUnitTest/bin/
src/test/XUnitTest/obj/
src/test/doc/
NugetPackTest/
\ No newline at end of file
......@@ -11,7 +11,7 @@ Project("{2150E333-8FDC-42A3-9474-1A3956D46DE8}") = "test", "test", "{CB8E6458-3
EndProject
Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "XUnitTest", "src\test\XUnitTest\XUnitTest.csproj", "{64C0A478-2591-4459-9F8F-A70F37976A41}"
EndProject
Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "Cases", "src\test\Cases\Cases.csproj", "{19A69D26-66BF-4227-97BE-9B087BC76B2F}"
Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "FunctionTest", "src\test\FunctionTest\FunctionTest.csproj", "{E66B034B-4677-4BFB-8B87-84715D281E21}"
EndProject
Global
GlobalSection(SolutionConfigurationPlatforms) = preSolution
......@@ -50,23 +50,23 @@ Global
{64C0A478-2591-4459-9F8F-A70F37976A41}.Release|x64.Build.0 = Release|Any CPU
{64C0A478-2591-4459-9F8F-A70F37976A41}.Release|x86.ActiveCfg = Release|Any CPU
{64C0A478-2591-4459-9F8F-A70F37976A41}.Release|x86.Build.0 = Release|Any CPU
{19A69D26-66BF-4227-97BE-9B087BC76B2F}.Debug|Any CPU.ActiveCfg = Debug|Any CPU
{19A69D26-66BF-4227-97BE-9B087BC76B2F}.Debug|Any CPU.Build.0 = Debug|Any CPU
{19A69D26-66BF-4227-97BE-9B087BC76B2F}.Debug|x64.ActiveCfg = Debug|Any CPU
{19A69D26-66BF-4227-97BE-9B087BC76B2F}.Debug|x64.Build.0 = Debug|Any CPU
{19A69D26-66BF-4227-97BE-9B087BC76B2F}.Debug|x86.ActiveCfg = Debug|Any CPU
{19A69D26-66BF-4227-97BE-9B087BC76B2F}.Debug|x86.Build.0 = Debug|Any CPU
{19A69D26-66BF-4227-97BE-9B087BC76B2F}.Release|Any CPU.ActiveCfg = Release|Any CPU
{19A69D26-66BF-4227-97BE-9B087BC76B2F}.Release|Any CPU.Build.0 = Release|Any CPU
{19A69D26-66BF-4227-97BE-9B087BC76B2F}.Release|x64.ActiveCfg = Release|Any CPU
{19A69D26-66BF-4227-97BE-9B087BC76B2F}.Release|x64.Build.0 = Release|Any CPU
{19A69D26-66BF-4227-97BE-9B087BC76B2F}.Release|x86.ActiveCfg = Release|Any CPU
{19A69D26-66BF-4227-97BE-9B087BC76B2F}.Release|x86.Build.0 = Release|Any CPU
{E66B034B-4677-4BFB-8B87-84715D281E21}.Debug|Any CPU.ActiveCfg = Debug|Any CPU
{E66B034B-4677-4BFB-8B87-84715D281E21}.Debug|Any CPU.Build.0 = Debug|Any CPU
{E66B034B-4677-4BFB-8B87-84715D281E21}.Debug|x64.ActiveCfg = Debug|Any CPU
{E66B034B-4677-4BFB-8B87-84715D281E21}.Debug|x64.Build.0 = Debug|Any CPU
{E66B034B-4677-4BFB-8B87-84715D281E21}.Debug|x86.ActiveCfg = Debug|Any CPU
{E66B034B-4677-4BFB-8B87-84715D281E21}.Debug|x86.Build.0 = Debug|Any CPU
{E66B034B-4677-4BFB-8B87-84715D281E21}.Release|Any CPU.ActiveCfg = Release|Any CPU
{E66B034B-4677-4BFB-8B87-84715D281E21}.Release|Any CPU.Build.0 = Release|Any CPU
{E66B034B-4677-4BFB-8B87-84715D281E21}.Release|x64.ActiveCfg = Release|Any CPU
{E66B034B-4677-4BFB-8B87-84715D281E21}.Release|x64.Build.0 = Release|Any CPU
{E66B034B-4677-4BFB-8B87-84715D281E21}.Release|x86.ActiveCfg = Release|Any CPU
{E66B034B-4677-4BFB-8B87-84715D281E21}.Release|x86.Build.0 = Release|Any CPU
EndGlobalSection
GlobalSection(NestedProjects) = preSolution
{5BED7402-0A65-4ED9-A491-C56BFB518045} = {A1FB5B66-E32F-4789-9BE9-042E5BD21087}
{CB8E6458-31E1-4351-B704-1B918E998654} = {A1FB5B66-E32F-4789-9BE9-042E5BD21087}
{64C0A478-2591-4459-9F8F-A70F37976A41} = {CB8E6458-31E1-4351-B704-1B918E998654}
{19A69D26-66BF-4227-97BE-9B087BC76B2F} = {CB8E6458-31E1-4351-B704-1B918E998654}
{E66B034B-4677-4BFB-8B87-84715D281E21} = {CB8E6458-31E1-4351-B704-1B918E998654}
EndGlobalSection
EndGlobal
......@@ -87,7 +87,7 @@ namespace TDengineDriver
case TDengineDataType.TSDB_DATA_TYPE_DOUBLE:
return "DOUBLE";
case TDengineDataType.TSDB_DATA_TYPE_BINARY:
return "STRING";
return "BINARY";
case TDengineDataType.TSDB_DATA_TYPE_TIMESTAMP:
return "TIMESTAMP";
case TDengineDataType.TSDB_DATA_TYPE_NCHAR:
......
<Project Sdk="Microsoft.NET.Sdk">
<PropertyGroup>
<TargetFramework>net5.0</TargetFramework>
<TargetFrameworks>net5;netstandard2.0;net45</TargetFrameworks>
<PackageId>TDengine.Connector</PackageId>
<PackageIcon>logo.jpg</PackageIcon>
<Version>1.0.4</Version>
<Authors>taosdata</Authors>
<Company>www.taosdata.com</Company>
<PackageLicenseExpression>MIT</PackageLicenseExpression>
<PackageTags>Taos;Data;Microsoft.NET.Sdk;IOT;bigdata;TDengine;taosdata</PackageTags>
<Description>
This is the C# connector's classlib that lets you connect to TDengine.
This C # connector supports: Linux 64/Windows x64/Windows x86.
more information please visit: https://www.taosdata.com
</Description>
<RepositoryUrl>https://github.com/taosdata/TDengine/tree/develop/src/connector/C%2523/src/TDengineDriver</RepositoryUrl>
<NoWarn>CS1591</NoWarn>
</PropertyGroup>
<ItemGroup>
<None Include="resource\logo.jpg" Pack="true" PackagePath="\" />
</ItemGroup>
</Project>
......@@ -436,49 +436,46 @@ namespace TDengineDriver
{
TAOS_MULTI_BIND multiBind = new TAOS_MULTI_BIND();
int elementCount = arr.Length;
//TypeSize represent the Max element length of the comming arr
//The size of the buffer is typeSize * elementCount
//This buffer is used to store TAOS_MULTI_BIND.buffer
int typeSize = MaxElementLength(arr);
//This intSize is used to calcuate buffer size of the struct TAOS_MULTI_BIND's
//length. The buffer is intSize * elementCount,which is used to store TAOS_MULTI_BIND.length
int intSize = sizeof(int);
//This byteSize is used to calculate the buffer size of the struct TAOS_MULTI_BIND.is_null
//This buffer size is byteSize * elementCount
int byteSize = sizeof(byte);
StringBuilder arrStrBuilder = new StringBuilder(); ;
StringBuilder arrStrBuilder = new StringBuilder(); ;
//TAOS_MULTI_BIND.length
IntPtr lengthArr = Marshal.AllocHGlobal(intSize * elementCount);
//TAOS_MULTI_BIND.is_null
IntPtr nullArr = Marshal.AllocHGlobal(byteSize * elementCount);
//TAOS_MULTI_BIND.buffer
IntPtr uNcharBuff = Marshal.AllocHGlobal(typeSize * elementCount);
for (int i = 0; i < elementCount; i++)
{
int itemLength = 0;
byte[] decodeByte = GetStringEncodeByte(arr[i]);
itemLength = decodeByte.Length;
// if element if not null and element length is less then typeSize
// fill the memory with default char.Since arr element memory need align.
if (!String.IsNullOrEmpty(arr[i]) && typeSize == itemLength)
if (!String.IsNullOrEmpty(arr[i]))
{
arrStrBuilder.Append(arr[i]);
}
else if (!String.IsNullOrEmpty(arr[i]) && typeSize > itemLength)
for (int j = 0; j < itemLength; j++)
{
arrStrBuilder.Append(arr[i]);
arrStrBuilder.Append(AlignCharArr(typeSize - itemLength));
//Read byte after byte
Marshal.WriteByte(uNcharBuff, i * typeSize + j, decodeByte[j]);
}
else
{
// if is null value,fill the memory with default values.
arrStrBuilder.Append(AlignCharArr(typeSize));
}
//set TAOS_MULTI_BIND.length
Marshal.WriteInt32(lengthArr, intSize * i, typeSize);
//set TAOS_MULTI_BIND.is_null
//Set TAOS_MULTI_BIND.length
Marshal.WriteInt32(lengthArr, intSize * i, itemLength);
//Set TAOS_MULTI_BIND.is_null
Marshal.WriteByte(nullArr, byteSize * i, Convert.ToByte(String.IsNullOrEmpty(arr[i]) ? 1 : 0));
}
//set TAOS_MULTI_BIND.buffer
IntPtr uBinaryBuff = (IntPtr)Marshal.StringToHGlobalAnsi(arrStrBuilder.ToString());
//config TAOS_MULTI_BIND
//Config TAOS_MULTI_BIND
multiBind.buffer_type = (int)TDengineDataType.TSDB_DATA_TYPE_BINARY;
multiBind.buffer = uBinaryBuff;
multiBind.buffer = uNcharBuff;
multiBind.buffer_length = (ulong)typeSize;
multiBind.length = lengthArr;
multiBind.is_null = nullArr;
......@@ -491,47 +488,43 @@ namespace TDengineDriver
{
TAOS_MULTI_BIND multiBind = new TAOS_MULTI_BIND();
int elementCount = arr.Length;
//TypeSize represent the Max element length of the comming arr
//The size of the buffer is typeSize * elementCount
//This buffer is used to store TAOS_MULTI_BIND.buffer
int typeSize = MaxElementLength(arr);
//This intSize is used to calcuate buffer size of the struct TAOS_MULTI_BIND's
//length. The buffer is intSize * elementCount,which is used to store TAOS_MULTI_BIND.length
int intSize = sizeof(int);
//This byteSize is used to calculate the buffer size of the struct TAOS_MULTI_BIND.is_null
//This buffer size is byteSize * elementCount
int byteSize = sizeof(byte);
StringBuilder arrStrBuilder = new StringBuilder(); ;
//TAOS_MULTI_BIND.length
IntPtr lengthArr = Marshal.AllocHGlobal(intSize * elementCount);
//TAOS_MULTI_BIND.is_null
IntPtr nullArr = Marshal.AllocHGlobal(byteSize * elementCount);
//TAOS_MULTI_BIND.buffer
IntPtr uNcharBuff = Marshal.AllocHGlobal(typeSize * elementCount);
for (int i = 0; i < elementCount; i++)
{
int itemLength = 0;
byte[] decodeByte = GetStringEncodeByte(arr[i]);
itemLength = decodeByte.Length;
// if element if not null and element length is less then typeSize
// fill the memory with default char.Since arr element memory need align.
if (!String.IsNullOrEmpty(arr[i]) && typeSize == itemLength)
if (!String.IsNullOrEmpty(arr[i]))
{
arrStrBuilder.Append(arr[i]);
}
else if (!String.IsNullOrEmpty(arr[i]) && typeSize > itemLength)
for (int j = 0; j < itemLength; j++)
{
arrStrBuilder.Append(arr[i]);
arrStrBuilder.Append(AlignCharArr(typeSize - itemLength));
//Read byte after byte
Marshal.WriteByte(uNcharBuff, i * typeSize + j, decodeByte[j]);
}
else
{
// if is null value,fill the memory with default values.
arrStrBuilder.Append(AlignCharArr(typeSize));
}
//set TAOS_MULTI_BIND.length
Marshal.WriteInt32(lengthArr, intSize * i, typeSize);
//set TAOS_MULTI_BIND.is_null
//Set TAOS_MULTI_BIND.length
Marshal.WriteInt32(lengthArr, intSize * i, itemLength);
//Set TAOS_MULTI_BIND.is_null
Marshal.WriteByte(nullArr, byteSize * i, Convert.ToByte(String.IsNullOrEmpty(arr[i]) ? 1 : 0));
}
//set TAOS_MULTI_BIND.buffer
IntPtr uNcharBuff = (IntPtr)Marshal.StringToHGlobalAnsi(arrStrBuilder.ToString());
//config TAOS_MULTI_BIND
//Config TAOS_MULTI_BIND
multiBind.buffer_type = (int)TDengineDataType.TSDB_DATA_TYPE_NCHAR;
multiBind.buffer = uNcharBuff;
multiBind.buffer_length = (ulong)typeSize;
......@@ -614,7 +607,7 @@ namespace TDengineDriver
private static Byte[] GetStringEncodeByte(string str)
{
Byte[] strToBytes = null;
if(String.IsNullOrEmpty(str))
if (String.IsNullOrEmpty(str))
{
strToBytes = System.Text.Encoding.Default.GetBytes(String.Empty);
}
......
using System;
using Test.UtilsTools;
using System.Collections.Generic;
namespace Cases
{
public class FetchLengthCase
{
/// <author>xiaolei</author>
/// <Name>TestRetrieveBinary</Name>
/// <describe>TD-12103 C# connector fetch_row with binary data retrieving error</describe>
/// <filename>FetchLength.cs</filename>
/// <result>pass or failed </result>
public void TestRetrieveBinary(IntPtr conn)
{
string sql1 = "create stable stb1 (ts timestamp, name binary(10)) tags(n int);";
string sql2 = "insert into tb1 using stb1 tags(1) values(now, 'log');";
string sql3 = "insert into tb2 using stb1 tags(2) values(now, 'test');";
string sql4 = "insert into tb3 using stb1 tags(3) values(now, 'db02');";
string sql5 = "insert into tb4 using stb1 tags(4) values(now, 'db3');";
string sql6 = "select distinct(name) from stb1;";//
UtilsTools.ExecuteQuery(conn, sql1);
UtilsTools.ExecuteQuery(conn, sql2);
UtilsTools.ExecuteQuery(conn, sql3);
UtilsTools.ExecuteQuery(conn, sql4);
UtilsTools.ExecuteQuery(conn, sql5);
IntPtr resPtr = IntPtr.Zero;
resPtr = UtilsTools.ExecuteQuery(conn, sql6);
List<List<string>> result = UtilsTools.GetResultSet(resPtr);
List<string> colname = result[0];
List<string> data = result[1];
UtilsTools.AssertEqual("db3", data[0]);
UtilsTools.AssertEqual("log", data[1]);
UtilsTools.AssertEqual("db02", data[2]);
UtilsTools.AssertEqual("test", data[3]);
}
}
}
using System;
using Test.UtilsTools;
using Cases;
namespace Cases.EntryPoint
{
class Program
{
static void Main(string[] args)
{
IntPtr conn = IntPtr.Zero;
IntPtr stmt = IntPtr.Zero;
IntPtr res = IntPtr.Zero;
conn = UtilsTools.TDConnection("127.0.0.1", "root", "taosdata", "", 0);
UtilsTools.ExecuteUpdate(conn, "drop database if exists csharp");
UtilsTools.ExecuteUpdate(conn, "create database if not exists csharp keep 3650");
UtilsTools.ExecuteUpdate(conn, "use csharp");
Console.WriteLine("====================StableColumnByColumn===================");
StableColumnByColumn columnByColumn = new StableColumnByColumn();
columnByColumn.Test(conn, "stablecolumnbycolumn");
Console.WriteLine("====================StmtStableQuery===================");
StmtStableQuery stmtStableQuery = new StmtStableQuery();
stmtStableQuery.Test(conn, "stablecolumnbycolumn");
Console.WriteLine("====================StableMutipleLine===================");
StableMutipleLine mutipleLine = new StableMutipleLine();
mutipleLine.Test(conn, "stablemutipleline");
//================================================================================
Console.WriteLine("====================NtableSingleLine===================");
NtableSingleLine ntableSingleLine = new NtableSingleLine();
ntableSingleLine.Test(conn, "stablesingleline");
IntPtr resPtr = UtilsTools.ExecuteQuery(conn, "select * from stablesingleline ");
UtilsTools.DisplayRes(resPtr);
Console.WriteLine("====================NtableMutipleLine===================");
NtableMutipleLine ntableMutipleLine = new NtableMutipleLine();
ntableMutipleLine.Test(conn, "ntablemutipleline");
Console.WriteLine("====================StmtNtableQuery===================");
StmtNtableQuery stmtNtableQuery = new StmtNtableQuery();
stmtNtableQuery.Test(conn, "ntablemutipleline");
Console.WriteLine("====================NtableColumnByColumn===================");
NtableColumnByColumn ntableColumnByColumn = new NtableColumnByColumn();
ntableColumnByColumn.Test(conn, "ntablecolumnbycolumn");
Console.WriteLine("====================fetchfeilds===================");
FetchFields fetchFields = new FetchFields();
fetchFields.Test(conn, "fetchfeilds");
StableStmtCases stableStmtCases = new StableStmtCases();
Console.WriteLine("====================stableStmtCases.TestBindSingleLineCn===================");
stableStmtCases.TestBindSingleLineCn(conn, "stablestmtcasestestbindsinglelinecn");
Console.WriteLine("====================stableStmtCases.TestBindColumnCn===================");
stableStmtCases.TestBindColumnCn(conn, " stablestmtcasestestbindcolumncn");
Console.WriteLine("====================stableStmtCases.TestBindMultiLineCn===================");
stableStmtCases.TestBindMultiLineCn(conn, "stablestmtcasestestbindmultilinecn");
NormalTableStmtCases normalTableStmtCases = new NormalTableStmtCases();
Console.WriteLine("====================normalTableStmtCases.TestBindSingleLineCn===================");
normalTableStmtCases.TestBindSingleLineCn(conn, "normaltablestmtcasestestbindsinglelinecn");
Console.WriteLine("====================normalTableStmtCases.TestBindColumnCn===================");
normalTableStmtCases.TestBindColumnCn(conn, "normaltablestmtcasestestbindcolumncn");
Console.WriteLine("====================normalTableStmtCases.TestBindMultiLineCn===================");
normalTableStmtCases.TestBindMultiLineCn(conn, "normaltablestmtcasestestbindmultilinecn");
Console.WriteLine("===================JsonTagTest====================");
JsonTagTest jsonTagTest = new JsonTagTest();
jsonTagTest.Test(conn);
Console.WriteLine("====================fetchLengthCase===================");
FetchLengthCase fetchLengthCase = new FetchLengthCase();
fetchLengthCase.TestRetrieveBinary(conn);
UtilsTools.ExecuteQuery(conn, "drop database if exists csharp");
UtilsTools.CloseConnection(conn);
UtilsTools.ExitProgram();
}
}
}
using System;
using Test.UtilsTools;
using TDengineDriver;
using Test.UtilsTools.DataSource;
namespace Cases
{
public class StableMutipleLine
{
TAOS_BIND[] tags = DataSource.getTags();
TAOS_MULTI_BIND[] mbind = DataSource.GetMultiBindArr();
public void Test(IntPtr conn, string tableName)
{
String createTb = "create stable " + tableName + " (ts timestamp ,b bool,v1 tinyint,v2 smallint,v4 int,v8 bigint,f4 float,f8 double,u1 tinyint unsigned,u2 smallint unsigned,u4 int unsigned,u8 bigint unsigned,bin binary(200),blob nchar(200))tags(bo bool,tt tinyint,si smallint,ii int,bi bigint,tu tinyint unsigned,su smallint unsigned,iu int unsigned,bu bigint unsigned,ff float ,dd double ,bb binary(200),nc nchar(200));";
String insertSql = "insert into ? using " + tableName + " tags(?,?,?,?,?,?,?,?,?,?,?,?,?) values(?,?,?,?,?,?,?,?,?,?,?,?,?,?)";
UtilsTools.ExecuteUpdate(conn, createTb);
IntPtr stmt = StmtUtilTools.StmtInit(conn);
StmtUtilTools.StmtPrepare(stmt, insertSql);
StmtUtilTools.SetTableNameTags(stmt, tableName + "_t1", tags);
StmtUtilTools.BindParamBatch(stmt, mbind);
StmtUtilTools.AddBatch(stmt);
StmtUtilTools.StmtExecute(stmt);
StmtUtilTools.StmtClose(stmt);
DataSource.FreeTaosBind(tags);
DataSource.FreeTaosMBind(mbind);
}
}
public class StableColumnByColumn
{
DataSource data = new DataSource();
TAOS_BIND[] tags = DataSource.getTags();
TAOS_MULTI_BIND[] mbind = DataSource.GetMultiBindArr();
public void Test(IntPtr conn, string tableName)
{
String createTb = "create stable " + tableName + " (ts timestamp ,b bool,v1 tinyint,v2 smallint,v4 int,v8 bigint,f4 float,f8 double,u1 tinyint unsigned,u2 smallint unsigned,u4 int unsigned,u8 bigint unsigned,bin binary(200),blob nchar(200))tags(bo bool,tt tinyint,si smallint,ii int,bi bigint,tu tinyint unsigned,su smallint unsigned,iu int unsigned,bu bigint unsigned,ff float ,dd double ,bb binary(200),nc nchar(200));";
String insertSql = "insert into ? using " + tableName + " tags(?,?,?,?,?,?,?,?,?,?,?,?,?) values(?,?,?,?,?,?,?,?,?,?,?,?,?,?)";
UtilsTools.ExecuteUpdate(conn, createTb);
IntPtr stmt = StmtUtilTools.StmtInit(conn);
StmtUtilTools.StmtPrepare(stmt, insertSql);
StmtUtilTools.SetTableNameTags(stmt, tableName + "_t1", tags);
StmtUtilTools.BindSingleParamBatch(stmt, mbind[0], 0);
StmtUtilTools.BindSingleParamBatch(stmt, mbind[1], 1);
StmtUtilTools.BindSingleParamBatch(stmt, mbind[2], 2);
StmtUtilTools.BindSingleParamBatch(stmt, mbind[3], 3);
StmtUtilTools.BindSingleParamBatch(stmt, mbind[4], 4);
StmtUtilTools.BindSingleParamBatch(stmt, mbind[5], 5);
StmtUtilTools.BindSingleParamBatch(stmt, mbind[6], 6);
StmtUtilTools.BindSingleParamBatch(stmt, mbind[7], 7);
StmtUtilTools.BindSingleParamBatch(stmt, mbind[8], 8);
StmtUtilTools.BindSingleParamBatch(stmt, mbind[9], 9);
StmtUtilTools.BindSingleParamBatch(stmt, mbind[10], 10);
StmtUtilTools.BindSingleParamBatch(stmt, mbind[11], 11);
StmtUtilTools.BindSingleParamBatch(stmt, mbind[12], 12);
StmtUtilTools.BindSingleParamBatch(stmt, mbind[13], 13);
StmtUtilTools.AddBatch(stmt);
StmtUtilTools.StmtExecute(stmt);
StmtUtilTools.StmtClose(stmt);
DataSource.FreeTaosBind(tags);
DataSource.FreeTaosMBind(mbind);
}
}
public class StableStmtCases
{
/// <author>xiaolei</author>
/// <Name>StableStmtCases.TestBindSingleLineCn</Name>
/// <describe>Test stmt insert single line of chinese character into stable by column after column </describe>
/// <filename>StmtSTable.cs</filename>
/// <result>pass or failed </result>
public void TestBindSingleLineCn(IntPtr conn, string tableName)
{
TAOS_BIND[] tags = DataSource.getCNTags();
TAOS_BIND[] binds = DataSource.getNtableCNRow();
String createTb = "create stable " + tableName + " (ts timestamp,v1 tinyint,v2 smallint,v4 int,v8 bigint,u1 tinyint unsigned,u2 smallint unsigned,u4 int unsigned,u8 bigint unsigned,f4 float,f8 double,bin binary(200),blob nchar(200),b bool,nilcol int)tags(bo bool,tt tinyint,si smallint,ii int,bi bigint,tu tinyint unsigned,su smallint unsigned,iu int unsigned,bu bigint unsigned,ff float ,dd double ,bb binary(200),nc nchar(200));";
String insertSql = "insert into ? using " + tableName + " tags(?,?,?,?,?,?,?,?,?,?,?,?,?) values(?,?,?,?,?,?,?,?,?,?,?,?,?,?,?)";
UtilsTools.ExecuteUpdate(conn, createTb);
IntPtr stmt = StmtUtilTools.StmtInit(conn);
StmtUtilTools.StmtPrepare(stmt, insertSql);
StmtUtilTools.SetTableNameTags(stmt, tableName + "_t1", tags);
StmtUtilTools.BindParam(stmt, binds);
StmtUtilTools.AddBatch(stmt);
StmtUtilTools.StmtExecute(stmt);
StmtUtilTools.StmtClose(stmt);
DataSource.FreeTaosBind(tags);
DataSource.FreeTaosBind(binds);
string querySql = "select * from " + tableName;
IntPtr res = UtilsTools.ExecuteQuery(conn, querySql);
UtilsTools.DisplayRes(res);
}
/// <author>xiaolei</author>
/// <Name>StableStmtCases.TestBindColumnCn</Name>
/// <describe>Test stmt insert single line of chinese character into stable by column after column </describe>
/// <filename>StmtSTable.cs</filename>
/// <result>pass or failed </result>
public void TestBindColumnCn(IntPtr conn, string tableName)
{
DataSource data = new DataSource();
TAOS_BIND[] tags = DataSource.getCNTags();
TAOS_MULTI_BIND[] mbind = DataSource.GetMultiBindCNArr();
String createTb = "create stable " + tableName + " (ts timestamp ,b bool,v1 tinyint,v2 smallint,v4 int,v8 bigint,f4 float,f8 double,u1 tinyint unsigned,u2 smallint unsigned,u4 int unsigned,u8 bigint unsigned,bin binary(200),blob nchar(200))tags(bo bool,tt tinyint,si smallint,ii int,bi bigint,tu tinyint unsigned,su smallint unsigned,iu int unsigned,bu bigint unsigned,ff float ,dd double ,bb binary(200),nc nchar(200));";
String insertSql = "insert into ? using " + tableName + " tags(?,?,?,?,?,?,?,?,?,?,?,?,?) values(?,?,?,?,?,?,?,?,?,?,?,?,?,?)";
UtilsTools.ExecuteUpdate(conn, createTb);
IntPtr stmt = StmtUtilTools.StmtInit(conn);
StmtUtilTools.StmtPrepare(stmt, insertSql);
StmtUtilTools.SetTableNameTags(stmt, tableName + "_t1", tags);
StmtUtilTools.BindSingleParamBatch(stmt, mbind[0], 0);
StmtUtilTools.BindSingleParamBatch(stmt, mbind[1], 1);
StmtUtilTools.BindSingleParamBatch(stmt, mbind[2], 2);
StmtUtilTools.BindSingleParamBatch(stmt, mbind[3], 3);
StmtUtilTools.BindSingleParamBatch(stmt, mbind[4], 4);
StmtUtilTools.BindSingleParamBatch(stmt, mbind[5], 5);
StmtUtilTools.BindSingleParamBatch(stmt, mbind[6], 6);
StmtUtilTools.BindSingleParamBatch(stmt, mbind[7], 7);
StmtUtilTools.BindSingleParamBatch(stmt, mbind[8], 8);
StmtUtilTools.BindSingleParamBatch(stmt, mbind[9], 9);
StmtUtilTools.BindSingleParamBatch(stmt, mbind[10], 10);
StmtUtilTools.BindSingleParamBatch(stmt, mbind[11], 11);
StmtUtilTools.BindSingleParamBatch(stmt, mbind[12], 12);
StmtUtilTools.BindSingleParamBatch(stmt, mbind[13], 13);
StmtUtilTools.AddBatch(stmt);
StmtUtilTools.StmtExecute(stmt);
StmtUtilTools.StmtClose(stmt);
DataSource.FreeTaosBind(tags);
DataSource.FreeTaosMBind(mbind);
string querySql = "select * from " + tableName;
IntPtr res = UtilsTools.ExecuteQuery(conn, querySql);
UtilsTools.DisplayRes(res);
}
/// <author>xiaolei</author>
/// <Name>StableStmtCases.TestBindMultiLineCn</Name>
/// <describe>Test stmt insert single line of chinese character into stable by column after column </describe>
/// <filename>StmtSTable.cs</filename>
/// <result>pass or failed </result>
public void TestBindMultiLineCn(IntPtr conn, string tableName)
{
TAOS_BIND[] tags = DataSource.getCNTags();
TAOS_MULTI_BIND[] mbind = DataSource.GetMultiBindCNArr();
String createTb = "create stable " + tableName + " (ts timestamp ,b bool,v1 tinyint,v2 smallint,v4 int,v8 bigint,f4 float,f8 double,u1 tinyint unsigned,u2 smallint unsigned,u4 int unsigned,u8 bigint unsigned,bin binary(200),blob nchar(200))tags(bo bool,tt tinyint,si smallint,ii int,bi bigint,tu tinyint unsigned,su smallint unsigned,iu int unsigned,bu bigint unsigned,ff float ,dd double ,bb binary(200),nc nchar(200));";
String insertSql = "insert into ? using " + tableName + " tags(?,?,?,?,?,?,?,?,?,?,?,?,?) values(?,?,?,?,?,?,?,?,?,?,?,?,?,?)";
UtilsTools.ExecuteUpdate(conn, createTb);
IntPtr stmt = StmtUtilTools.StmtInit(conn);
StmtUtilTools.StmtPrepare(stmt, insertSql);
StmtUtilTools.SetTableNameTags(stmt, tableName + "_t1", tags);
StmtUtilTools.BindParamBatch(stmt, mbind);
StmtUtilTools.AddBatch(stmt);
StmtUtilTools.StmtExecute(stmt);
StmtUtilTools.StmtClose(stmt);
DataSource.FreeTaosBind(tags);
DataSource.FreeTaosMBind(mbind);
string querySql = "select * from " + tableName;
IntPtr res = UtilsTools.ExecuteQuery(conn, querySql);
UtilsTools.DisplayRes(res);
}
}
}
\ No newline at end of file
using System;
using Test.UtilsTools;
using TDengineDriver;
using System.Collections.Generic;
using System.Runtime.InteropServices;
namespace Cases
{
public class FetchFields
{
public void Test(IntPtr conn, string tableName)
{
IntPtr res = IntPtr.Zero;
String createTb = "create stable " + tableName + " (ts timestamp ,b bool,v1 tinyint,v2 smallint,v4 int,v8 bigint,f4 float,f8 double,u1 tinyint unsigned,u2 smallint unsigned,u4 int unsigned,u8 bigint unsigned,bin binary(200),blob nchar(200))tags(jsontag json);";
String insertSql = "insert into " + tableName + "_t1 using " + tableName + " tags('{\"k1\": \"v1\"}') values(1637064040000,true,1,2,3,4,5,6,7,8,9,10,'XI','XII')";
String selectSql = "select * from " + tableName;
String dropSql = "drop table " + tableName;
UtilsTools.ExecuteQuery(conn, createTb);
UtilsTools.ExecuteQuery(conn, insertSql);
res = UtilsTools.ExecuteQuery(conn, selectSql);
UtilsTools.ExecuteQuery(conn, dropSql);
List<TDengineMeta> metas = new List<TDengineMeta>();
metas = TDengine.FetchFields(res);
if (metas.Capacity == 0)
{
Console.WriteLine("empty result");
}
else
{
foreach(TDengineMeta meta in metas){
Console.WriteLine("col_name:{0},col_type_code:{1},col_type:{2}({3})",meta.name,meta.type,meta.TypeName(),meta.size);
}
}
}
}
}
using System;
using Test.UtilsTools;
using TDengineDriver;
using System.Collections.Generic;
namespace Test.UtilsTools.DataSource
{
public class DataSource
......@@ -23,7 +23,10 @@ namespace Test.UtilsTools.DataSource
public static string[] binaryArrCn = new string[5] { "涛思数据", String.Empty, null, "taosdata涛思数据", "涛思数据TDengine" };
public static string[] NcharArrCn = new string[5] { "涛思数据", null, "taosdata涛思数据", "涛思数据TDengine", String.Empty };
public static TAOS_BIND[] getTags()
// Construct a TAOS_BIND array which contains normal character.
// For stmt bind tags,this will be used as tag info
public static TAOS_BIND[] GetTags()
{
TAOS_BIND[] binds = new TAOS_BIND[13];
binds[0] = TaosBind.BindBool(true);
......@@ -41,8 +44,59 @@ namespace Test.UtilsTools.DataSource
binds[12] = TaosBind.BindNchar("qwertyuiopasdfghjklzxcvbnmQWERTYUIOPASDFGHJKZXCVBNM`1234567890-=+_)(*&^%$#@!~[];,./<>?:{}");
return binds;
}
// Get the tag data within and string list
// Which will be retrieved as a string List
private static List<String> GetTagData()
{
List<String> tagData = new List<String>();
tagData.Add(true.ToString());
tagData.Add((-2).ToString());
tagData.Add((short.MaxValue).ToString());
tagData.Add((int.MaxValue).ToString());
tagData.Add((Int64.MaxValue).ToString());
tagData.Add((byte.MaxValue - 1).ToString());
tagData.Add((UInt16.MaxValue - 1).ToString());
tagData.Add((uint.MinValue + 1).ToString());
tagData.Add((UInt64.MinValue + 1).ToString());
tagData.Add((11.11F).ToString());
tagData.Add((22.22D).ToString());
tagData.Add("qwertyuiopasdfghjklzxcvbnmQWERTYUIOPASDFGHJKZXCVBNM`1234567890-=+_)(*&^%$#@!~[];,./<>?:{}");
tagData.Add("qwertyuiopasdfghjklzxcvbnmQWERTYUIOPASDFGHJKZXCVBNM`1234567890-=+_)(*&^%$#@!~[];,./<>?:{}");
return tagData;
}
public static List<string> GetMultiBindStableRowData()
{
List<string> rowData = new List<String>();
List<string> tagData = GetTagData();
for (int i = 0; i < tsArr.Length; i++)
{
rowData.Add(tsArr[i].ToString());
rowData.Add(boolArr[i].Equals(null) ? "NULL" : boolArr[i].ToString());
rowData.Add(tinyIntArr[i].Equals(null) ? "NULL" : tinyIntArr[i].ToString());
rowData.Add(shortArr[i].Equals(null) ? "NULL" : shortArr[i].ToString());
rowData.Add(intArr[i].Equals(null) ? "NULL" : intArr[i].ToString());
rowData.Add(longArr[i].Equals(null) ? "NULL" : longArr[i].ToString());
rowData.Add(floatArr[i].Equals(null) ? "NULL" : floatArr[i].ToString());
rowData.Add(doubleArr[i].Equals(null) ? "NULL" : doubleArr[i].ToString());
rowData.Add(uTinyIntArr[i].Equals(null) ? "NULL" : uTinyIntArr[i].ToString());
rowData.Add(uShortArr[i].Equals(null) ? "NULL" : uShortArr[i].ToString());
rowData.Add(uIntArr[i].Equals(null) ? "NULL" : uIntArr[i].ToString());
rowData.Add(uLongArr[i].Equals(null) ? "NULL" : uLongArr[i].ToString());
rowData.Add(String.IsNullOrEmpty(binaryArr[i]) ? "NULL" : binaryArr[i]);
rowData.Add(String.IsNullOrEmpty(ncharArr[i]) ? "NULL" : ncharArr[i]);
rowData.AddRange(tagData);
// Console.WriteLine("binaryArrCn[{0}]:{1},ncharArr[{0}]:{2}",i,String.IsNullOrEmpty(binaryArrCn[i]) ? "NULL" : binaryArrCn[i],String.IsNullOrEmpty(ncharArr[i]) ? "NULL" : NcharArrCn[i]);
// Console.WriteLine("binaryArrCn[{0}]:{1},ncharArr[{0}]:{2}",i,String.IsNullOrEmpty(binaryArrCn[i]) ? 0 :binaryArrCn[i].Length, String.IsNullOrEmpty(ncharArr[i]) ? 0 : NcharArrCn[i].Length);
// Console.WriteLine("========");
}
return rowData;
public static TAOS_BIND[] getCNTags()
}
// Construct a TAOS_BIND array which contains chinese character.
// For stmt bind tags,this will be used as tag info
public static TAOS_BIND[] GetCNTags()
{
TAOS_BIND[] binds = new TAOS_BIND[13];
binds[0] = TaosBind.BindBool(true);
......@@ -57,11 +111,32 @@ namespace Test.UtilsTools.DataSource
binds[9] = TaosBind.BindFloat(11.11F);
binds[10] = TaosBind.BindDouble(22.22D);
binds[11] = TaosBind.BindBinary("TDengine涛思数据");
binds[12] = TaosBind.BindNchar("涛思");
binds[12] = TaosBind.BindNchar("涛思数据taos");
return binds;
}
public static TAOS_BIND[] getNtableCNRow()
// Get the tag data within and string list
// Which will be retrieved as a string List
private static List<String> GetTagCnData()
{
List<String> tagData = new List<String>();
tagData.Add(true.ToString());
tagData.Add((-2).ToString());
tagData.Add((short.MaxValue - 1).ToString());
tagData.Add((int.MaxValue - 1).ToString());
tagData.Add((Int64.MaxValue - 1).ToString());
tagData.Add((byte.MaxValue - 1).ToString());
tagData.Add((UInt16.MaxValue - 1).ToString());
tagData.Add((uint.MinValue + 1).ToString());
tagData.Add((UInt64.MinValue + 1).ToString());
tagData.Add((11.11F).ToString());
tagData.Add((22.22D).ToString());
tagData.Add("TDengine涛思数据");
tagData.Add("涛思数据taos");
return tagData;
}
// A line of data that's without CN character.
// Which is construct as an TAOS_BIND array
public static TAOS_BIND[] GetNtableCNRow()
{
TAOS_BIND[] binds = new TAOS_BIND[15];
binds[0] = TaosBind.BindTimestamp(1637064040000);
......@@ -81,8 +156,40 @@ namespace Test.UtilsTools.DataSource
binds[14] = TaosBind.BindNil();
return binds;
}
//Get and list data that will be insert into table
public static List<String> GetNtableCNRowData()
{
var data = new List<string>{
"1637064040000",
"-2",
short.MaxValue.ToString(),
int.MaxValue.ToString(),
Int64.MaxValue.ToString(),
(byte.MaxValue - 1).ToString(),
(UInt16.MaxValue - 1).ToString(),
(uint.MinValue + 1).ToString(),
(UInt64.MinValue + 1).ToString(),
(11.11F).ToString(),
(22.22D).ToString(),
"TDengine数据",
"taosdata涛思数据",
"True",
"NULL"
};
return data;
}
// Get the data value and tag values which have chinese characters
// And retrieved as a string list.This is single Line.
public static List<String> GetStableCNRowData()
{
List<String> columnData = GetNtableCNRowData();
List<String> tagData = GetTagCnData();
columnData.AddRange(tagData);
return columnData;
}
public static TAOS_BIND[] getNtableRow()
// A line of data that's without CN character
public static TAOS_BIND[] GetNtableRow()
{
TAOS_BIND[] binds = new TAOS_BIND[15];
binds[0] = TaosBind.BindTimestamp(1637064040000);
......@@ -102,6 +209,31 @@ namespace Test.UtilsTools.DataSource
binds[14] = TaosBind.BindNil();
return binds;
}
// A List of data ,use as expectResData. The value is equal to getNtableRow()
public static List<String> GetNtableRowData()
{
var data = new List<string>{
"1637064040000",
"-2",
short.MaxValue.ToString(),
int.MaxValue.ToString(),
(Int64.MaxValue).ToString(),
(byte.MaxValue - 1).ToString(),
(UInt16.MaxValue - 1).ToString(),
(uint.MinValue + 1).ToString(),
(UInt64.MinValue + 1).ToString(),
(11.11F).ToString(),
(22.22D).ToString(),
"qwertyuiopasdfghjklzxcvbnmQWERTYUIOPASDFGHJKZXCVBNM`1234567890-=+_)(*&^%$#@!~[];,./<>?:{}",
"qwertyuiopasdfghjklzxcvbnmQWERTYUIOPASDFGHJKZXCVBNM`1234567890-=+_)(*&^%$#@!~[];,./<>?:{}",
true.ToString(),
"NULL"
};
return data;
}
// Five lines of data, that is construct as taos_mutli_bind array.
// There aren't any CN character
public static TAOS_MULTI_BIND[] GetMultiBindArr()
{
TAOS_MULTI_BIND[] mBinds = new TAOS_MULTI_BIND[14];
......@@ -121,6 +253,35 @@ namespace Test.UtilsTools.DataSource
mBinds[13] = TaosMultiBind.MultiBindNchar(ncharArr);
return mBinds;
}
// A List of data ,use as expectResData. The value is equal to GetMultiBindCNArr()
public static List<string> GetMultiBindResData()
{
var rowData = new List<string>();
for (int i = 0; i < tsArr.Length; i++)
{
rowData.Add(tsArr[i].ToString());
rowData.Add(boolArr[i].Equals(null) ? "NULL" : boolArr[i].ToString());
rowData.Add(tinyIntArr[i].Equals(null) ? "NULL" : tinyIntArr[i].ToString());
rowData.Add(shortArr[i].Equals(null) ? "NULL" : shortArr[i].ToString());
rowData.Add(intArr[i].Equals(null) ? "NULL" : intArr[i].ToString());
rowData.Add(longArr[i].Equals(null) ? "NULL" : longArr[i].ToString());
rowData.Add(floatArr[i].Equals(null) ? "NULL" : floatArr[i].ToString());
rowData.Add(doubleArr[i].Equals(null) ? "NULL" : doubleArr[i].ToString());
rowData.Add(uTinyIntArr[i].Equals(null) ? "NULL" : uTinyIntArr[i].ToString());
rowData.Add(uShortArr[i].Equals(null) ? "NULL" : uShortArr[i].ToString());
rowData.Add(uIntArr[i].Equals(null) ? "NULL" : uIntArr[i].ToString());
rowData.Add(uLongArr[i].Equals(null) ? "NULL" : uLongArr[i].ToString());
rowData.Add(String.IsNullOrEmpty(binaryArr[i]) ? "NULL" : binaryArr[i]);
rowData.Add(String.IsNullOrEmpty(ncharArr[i]) ? "NULL" : ncharArr[i]);
// Console.WriteLine("binaryArrCn[{0}]:{1},NcharArrCn[{0}]:{2}",i,String.IsNullOrEmpty(binaryArrCn[i]) ? "NULL" : binaryArrCn[i],String.IsNullOrEmpty(NcharArrCn[i]) ? "NULL" : NcharArrCn[i]);
// Console.WriteLine("binaryArrCn[{0}]:{1},NcharArrCn[{0}]:{2}",i,String.IsNullOrEmpty(binaryArrCn[i]) ? 0 :binaryArrCn[i].Length, String.IsNullOrEmpty(NcharArrCn[i]) ? 0 : NcharArrCn[i].Length);
// Console.WriteLine("========");
}
return rowData;
}
// Five lines of data, that is construct as taos_mutli_bind array.
// There aren some CN characters and letters.
public static TAOS_MULTI_BIND[] GetMultiBindCNArr()
{
TAOS_MULTI_BIND[] mBinds = new TAOS_MULTI_BIND[14];
......@@ -140,6 +301,62 @@ namespace Test.UtilsTools.DataSource
mBinds[13] = TaosMultiBind.MultiBindNchar(NcharArrCn);
return mBinds;
}
// A List of data ,use as expectResData. The value is equal to GetMultiBindCNArr()
public static List<string> GetMultiBindCNRowData()
{
var rowData = new List<string>();
for (int i = 0; i < tsArr.Length; i++)
{
rowData.Add(tsArr[i].ToString());
rowData.Add(boolArr[i].Equals(null) ? "NULL" : boolArr[i].ToString());
rowData.Add(tinyIntArr[i].Equals(null) ? "NULL" : tinyIntArr[i].ToString());
rowData.Add(shortArr[i].Equals(null) ? "NULL" : shortArr[i].ToString());
rowData.Add(intArr[i].Equals(null) ? "NULL" : intArr[i].ToString());
rowData.Add(longArr[i].Equals(null) ? "NULL" : longArr[i].ToString());
rowData.Add(floatArr[i].Equals(null) ? "NULL" : floatArr[i].ToString());
rowData.Add(doubleArr[i].Equals(null) ? "NULL" : doubleArr[i].ToString());
rowData.Add(uTinyIntArr[i].Equals(null) ? "NULL" : uTinyIntArr[i].ToString());
rowData.Add(uShortArr[i].Equals(null) ? "NULL" : uShortArr[i].ToString());
rowData.Add(uIntArr[i].Equals(null) ? "NULL" : uIntArr[i].ToString());
rowData.Add(uLongArr[i].Equals(null) ? "NULL" : uLongArr[i].ToString());
rowData.Add(String.IsNullOrEmpty(binaryArrCn[i]) ? "NULL" : binaryArrCn[i]);
rowData.Add(String.IsNullOrEmpty(NcharArrCn[i]) ? "NULL" : NcharArrCn[i]);
// Console.WriteLine("binaryArrCn[{0}]:{1},NcharArrCn[{0}]:{2}",i,String.IsNullOrEmpty(binaryArrCn[i]) ? "NULL" : binaryArrCn[i],String.IsNullOrEmpty(NcharArrCn[i]) ? "NULL" : NcharArrCn[i]);
// Console.WriteLine("binaryArrCn[{0}]:{1},NcharArrCn[{0}]:{2}",i,String.IsNullOrEmpty(binaryArrCn[i]) ? 0 :binaryArrCn[i].Length, String.IsNullOrEmpty(NcharArrCn[i]) ? 0 : NcharArrCn[i].Length);
// Console.WriteLine("========");
}
return rowData;
}
public static List<String> GetMultiBindStableCNRowData()
{
List<String> columnData = new List<string>();
List<String> tagData = GetTagCnData();
for (int i = 0; i < tsArr.Length; i++)
{
columnData.Add(tsArr[i].ToString());
columnData.Add(boolArr[i].Equals(null) ? "NULL" : boolArr[i].ToString());
columnData.Add(tinyIntArr[i].Equals(null) ? "NULL" : tinyIntArr[i].ToString());
columnData.Add(shortArr[i].Equals(null) ? "NULL" : shortArr[i].ToString());
columnData.Add(intArr[i].Equals(null) ? "NULL" : intArr[i].ToString());
columnData.Add(longArr[i].Equals(null) ? "NULL" : longArr[i].ToString());
columnData.Add(floatArr[i].Equals(null) ? "NULL" : floatArr[i].ToString());
columnData.Add(doubleArr[i].Equals(null) ? "NULL" : doubleArr[i].ToString());
columnData.Add(uTinyIntArr[i].Equals(null) ? "NULL" : uTinyIntArr[i].ToString());
columnData.Add(uShortArr[i].Equals(null) ? "NULL" : uShortArr[i].ToString());
columnData.Add(uIntArr[i].Equals(null) ? "NULL" : uIntArr[i].ToString());
columnData.Add(uLongArr[i].Equals(null) ? "NULL" : uLongArr[i].ToString());
columnData.Add(String.IsNullOrEmpty(binaryArrCn[i]) ? "NULL" : binaryArrCn[i]);
columnData.Add(String.IsNullOrEmpty(NcharArrCn[i]) ? "NULL" : NcharArrCn[i]);
columnData.AddRange(tagData);
// Console.WriteLine("binaryArrCn[{0}]:{1},NcharArrCn[{0}]:{2}",i,String.IsNullOrEmpty(binaryArrCn[i]) ? "NULL" : binaryArrCn[i],String.IsNullOrEmpty(NcharArrCn[i]) ? "NULL" : NcharArrCn[i]);
// Console.WriteLine("binaryArrCn[{0}]:{1},NcharArrCn[{0}]:{2}",i,String.IsNullOrEmpty(binaryArrCn[i]) ? 0 :binaryArrCn[i].Length, String.IsNullOrEmpty(NcharArrCn[i]) ? 0 : NcharArrCn[i].Length);
// Console.WriteLine("========");
}
return columnData;
}
public static TAOS_BIND[] GetQueryCondition()
{
......@@ -158,7 +375,47 @@ namespace Test.UtilsTools.DataSource
{
TaosMultiBind.FreeTaosBind(mbinds);
}
//Get the TDengineMeta list from the ddl either normal table or stable
public static List<TDengineMeta> GetMetaFromDLL(string dllStr)
{
var expectResMeta = new List<TDengineMeta>();
//"CREATE TABLE meters(ts TIMESTAMP, current FLOAT, voltage INT, phase FLOAT) TAGS(location BINARY(30), groupId INT);";
int bracetInd = dllStr.IndexOf("(");
//(ts TIMESTAMP, current FLOAT, voltage INT, phase FLOAT) TAGS(location BINARY(30), groupId INT);
string subDllStr = dllStr.Substring(bracetInd);
String[] stableSeparators = new String[] { "tags", "TAGS" };
//(ts TIMESTAMP, current FLOAT, voltage INT, phase FLOAT)
//(location BINARY(30), groupId INT)
String[] dllStrElements = subDllStr.Split(stableSeparators, StringSplitOptions.RemoveEmptyEntries);
//(ts TIMESTAMP, current FLOAT, voltage INT, phase FLOAT)
dllStrElements[0] = dllStrElements[0].Substring(1, dllStrElements[0].Length - 2);
String[] finalStr1 = dllStrElements[0].Split(',', StringSplitOptions.RemoveEmptyEntries);
foreach (string item in finalStr1)
{
//ts TIMESTAMP
string[] itemArr = item.Split(' ', 2, StringSplitOptions.RemoveEmptyEntries);
// Console.WriteLine("GetMetaFromDLL():{0},{1}",itemArr[0],itemArr[1]);
expectResMeta.Add(UtilsTools.ConstructTDengineMeta(itemArr[0], itemArr[1]));
}
if (dllStr.Contains("TAGS") || dllStr.Contains("tags"))
{
//location BINARY(30), groupId INT
dllStrElements[1] = dllStrElements[1].Substring(1, dllStrElements[1].Length - 2);
//location BINARY(30) groupId INT
String[] finalStr2 = dllStrElements[1].Split(',', StringSplitOptions.RemoveEmptyEntries);
Console.WriteLine("========");
foreach (string item in finalStr2)
{
//location BINARY(30)
string[] itemArr = item.Split(' ', 2, StringSplitOptions.RemoveEmptyEntries);
// Console.WriteLine("GetMetaFromDLL():{0},{1}",itemArr[0],itemArr[1]);
expectResMeta.Add(UtilsTools.ConstructTDengineMeta(itemArr[0], itemArr[1]));
}
}
return expectResMeta;
}
}
}
\ No newline at end of file
using System;
using Test.UtilsTools;
using System.Collections.Generic;
using Xunit;
using TDengineDriver;
using Test.UtilsTools.ResultSet;
namespace Cases
{
public class FetchLengthCase
{
/// <author>xiaolei</author>
/// <Name>TestRetrieveBinary</Name>
/// <describe>TD-12103 C# connector fetch_row with binary data retrieving error</describe>
/// <filename>FetchLength.cs</filename>
/// <result>pass or failed </result>
[Fact(DisplayName = "Skip FetchLengthCase.TestRetrieveBinary()")]
public void TestRetrieveBinary()
{
IntPtr conn = UtilsTools.TDConnection();
var expectData = new List<string> { "log", "test", "db02", "db3" };
var expectMeta = new List<TDengineMeta>{
UtilsTools.ConstructTDengineMeta("ts","timestamp"),
UtilsTools.ConstructTDengineMeta("name","binary(10)"),
UtilsTools.ConstructTDengineMeta("n","int")
};
string sql0 = "drop table if exists stb1;";
string sql1 = "create stable if not exists stb1 (ts timestamp, name binary(10)) tags(n int);";
string sql2 = $"insert into tb1 using stb1 tags(1) values(now, '{expectData[0]}');";
string sql3 = $"insert into tb2 using stb1 tags(2) values(now, '{expectData[1]}');";
string sql4 = $"insert into tb3 using stb1 tags(3) values(now, '{expectData[2]}');";
string sql5 = $"insert into tb4 using stb1 tags(4) values(now, '{expectData[3]}');";
string sql6 = "select distinct(name) from stb1;";
UtilsTools.ExecuteQuery(conn, sql0);
UtilsTools.ExecuteQuery(conn, sql1);
UtilsTools.ExecuteQuery(conn, sql2);
UtilsTools.ExecuteQuery(conn, sql3);
UtilsTools.ExecuteQuery(conn, sql4);
UtilsTools.ExecuteQuery(conn, sql5);
IntPtr resPtr = IntPtr.Zero;
resPtr = UtilsTools.ExecuteQuery(conn, sql6);
ResultSet actualResult = new ResultSet(resPtr);
List<string> actualData = actualResult.GetResultData();
List<TDengineMeta> actualMeta = actualResult.GetResultMeta();
expectData.Reverse();
Assert.Equal(expectData[0], actualData[0]);
Assert.Equal(expectMeta[1].name, actualMeta[0].name);
Assert.Equal(expectMeta[1].size, actualMeta[0].size);
Assert.Equal(expectMeta[1].type, actualMeta[0].type);
}
}
}
<Project Sdk="Microsoft.NET.Sdk">
<PropertyGroup>
<TargetFramework>net5.0</TargetFramework>
<IsPackable>false</IsPackable>
<NoWarn>CS1591;CS0168</NoWarn>
<GenerateDocumentationFile>true</GenerateDocumentationFile>
<DocumentationFile>..\doc\FunctionTest.XML</DocumentationFile>
</PropertyGroup>
<ItemGroup>
<PackageReference Include="Microsoft.NET.Test.Sdk" Version="16.9.4" />
<PackageReference Include="xunit" Version="2.4.1" />
<PackageReference Include="xunit.runner.visualstudio" Version="2.4.3">
<IncludeAssets>runtime; build; native; contentfiles; analyzers; buildtransitive</IncludeAssets>
<PrivateAssets>all</PrivateAssets>
</PackageReference>
<PackageReference Include="coverlet.collector" Version="3.0.2">
<IncludeAssets>runtime; build; native; contentfiles; analyzers; buildtransitive</IncludeAssets>
<PrivateAssets>all</PrivateAssets>
</PackageReference>
</ItemGroup>
<ItemGroup>
<ProjectReference Include="..\..\TDengineDriver\TDengineDriver.csproj" />
</ItemGroup>
</Project>
using System;
using TDengineDriver;
using System.Runtime.InteropServices;
using System.Text;
using System.Collections.Generic;
namespace Test.UtilsTools.ResultSet
{
public class ResultSet
{
private List<TDengineMeta> resultMeta;
private List<String> resultData;
// private bool isValidResult = false;
public ResultSet(IntPtr res)
{
resultMeta = UtilsTools.GetResField(res);
resultData = UtilsTools.GetResData(res);
}
public ResultSet(List<TDengineMeta> metas, List<String> datas)
{
resultMeta = metas;
resultData = datas;
}
public List<String> GetResultData()
{
return resultData;
}
public List<TDengineMeta> GetResultMeta()
{
return resultMeta;
}
}
}
\ No newline at end of file
此差异已折叠。
using System;
using Test.UtilsTools;
using TDengineDriver;
using System.Collections.Generic;
using Xunit;
using Test.UtilsTools.ResultSet;
namespace Cases
{
public class FetchFieldCases
{
/// <author>xiaolei</author>
/// <Name>FetchFieldCases.TestFetchFieldJsonTag</Name>
/// <describe>test taos_fetch_fields(), check the meta data</describe>
/// <filename>TaosFeild.cs</filename>
/// <result>pass or failed </result>
[Fact(DisplayName = "FetchFieldCases.TestFetchFieldJsonTag()")]
public void TestFetchFieldJsonTag()
{
IntPtr conn = UtilsTools.TDConnection();
IntPtr _res = IntPtr.Zero;
string tableName = "fetchfeilds";
var expectResMeta = new List<TDengineMeta> {
UtilsTools.ConstructTDengineMeta("ts", "timestamp"),
UtilsTools.ConstructTDengineMeta("b", "bool"),
UtilsTools.ConstructTDengineMeta("v1", "tinyint"),
UtilsTools.ConstructTDengineMeta("v2", "smallint"),
UtilsTools.ConstructTDengineMeta("v4", "int"),
UtilsTools.ConstructTDengineMeta("v8", "bigint"),
UtilsTools.ConstructTDengineMeta("f4", "float"),
UtilsTools.ConstructTDengineMeta("f8", "double"),
UtilsTools.ConstructTDengineMeta("u1", "tinyint unsigned"),
UtilsTools.ConstructTDengineMeta("u2", "smallint unsigned"),
UtilsTools.ConstructTDengineMeta("u4", "int unsigned"),
UtilsTools.ConstructTDengineMeta("u8", "bigint unsigned"),
UtilsTools.ConstructTDengineMeta("bin", "binary(200)"),
UtilsTools.ConstructTDengineMeta("blob", "nchar(200)"),
UtilsTools.ConstructTDengineMeta("jsontag", "json"),
};
var expectResData = new List<String> { "1637064040000", "true", "1", "2", "3", "4", "5", "6", "7", "8", "9", "10", "XI", "XII", "{\"k1\": \"v1\"}" };
String dropTb = "drop table if exists " + tableName;
String createTb = "create stable " + tableName
+ " (ts timestamp" +
",b bool" +
",v1 tinyint" +
",v2 smallint" +
",v4 int" +
",v8 bigint" +
",f4 float" +
",f8 double" +
",u1 tinyint unsigned" +
",u2 smallint unsigned" +
",u4 int unsigned" +
",u8 bigint unsigned" +
",bin binary(200)" +
",blob nchar(200)" +
")" +
"tags" +
"(jsontag json);";
String insertSql = "insert into " + tableName + "_t1 using " + tableName +
" tags('{\"k1\": \"v1\"}') " +
"values(1637064040000,true,1,2,3,4,5,6,7,8,9,10,'XI','XII')";
String selectSql = "select * from " + tableName;
String dropSql = "drop table " + tableName;
UtilsTools.ExecuteUpdate(conn, dropTb);
UtilsTools.ExecuteUpdate(conn, createTb);
UtilsTools.ExecuteUpdate(conn, insertSql);
_res = UtilsTools.ExecuteQuery(conn, selectSql);
ResultSet actualResult = new ResultSet(_res);
List<TDengineMeta> actualMeta = actualResult.GetResultMeta();
for (int i = 0; i < actualMeta.Count; i++)
{
Assert.Equal(expectResMeta[i].name, actualMeta[i].name);
Assert.Equal(expectResMeta[i].type, actualMeta[i].type);
Assert.Equal(expectResMeta[i].size, actualMeta[i].size);
}
}
}
}
此差异已折叠。
......@@ -6,6 +6,11 @@ namespace TDengineDriver.Test
{
public class TestTDengineMeta
{
/// <author>xiaolei</author>
/// <Name>TestTDengineMeta.TestTypeNameBool</Name>
/// <describe>Unit test for oject TDengineDriver.TDengineMeta's bool meta info</describe>
/// <filename>TestTDengineMeta.cs</filename>
/// <result>pass or failed </result>
[Fact]
public void TestTypeNameBool()
{
......@@ -17,7 +22,11 @@ namespace TDengineDriver.Test
Assert.Equal(metaTypeName, typeName);
}
/// <author>xiaolei</author>
/// <Name>TestTDengineMeta.TestTypeNameTINYINT</Name>
/// <describe>Unit test for oject TDengineDriver.TDengineMeta's TinnyInt's meta info</describe>
/// <filename>TestTDengineMeta.cs</filename>
/// <result>pass or failed </result>
[Fact]
public void TestTypeNameTINYINT()
{
......@@ -29,6 +38,11 @@ namespace TDengineDriver.Test
Assert.Equal(metaTypeName, typeName);
}
/// <author>xiaolei</author>
/// <Name>TestTDengineMeta.TestTypeNameSMALLINT</Name>
/// <describe>Unit test for oject TDengineDriver.TDengineMeta's SMALLINT's meta info</describe>
/// <filename>TestTDengineMeta.cs</filename>
/// <result>pass or failed </result>
[Fact]
public void TestTypeNameSMALLINT()
{
......@@ -40,6 +54,11 @@ namespace TDengineDriver.Test
Assert.Equal(metaTypeName, typeName);
}
/// <author>xiaolei</author>
/// <Name>TestTDengineMeta.TestTypeNameINT</Name>
/// <describe>Unit test for oject TDengineDriver.TDengineMeta's INT's meta info</describe>
/// <filename>TestTDengineMeta.cs</filename>
/// <result>pass or failed </result>
[Fact]
public void TestTypeNameINT()
{
......@@ -51,6 +70,11 @@ namespace TDengineDriver.Test
Assert.Equal(metaTypeName, typeName);
}
/// <author>xiaolei</author>
/// <Name>TestTDengineMeta.TestTypeNameBIGINT</Name>
/// <describe>Unit test for oject TDengineDriver.TDengineMeta's BIGINT's meta info</describe>
/// <filename>TestTDengineMeta.cs</filename>
/// <result>pass or failed </result>
[Fact]
public void TestTypeNameBIGINT()
{
......@@ -62,6 +86,11 @@ namespace TDengineDriver.Test
Assert.Equal(metaTypeName, typeName);
}
/// <author>xiaolei</author>
/// <Name>TestTDengineMeta.TestTypeNameUTINYINT</Name>
/// <describe>Unit test for oject TDengineDriver.TDengineMeta's TINYINT UNSIGNED's meta info</describe>
/// <filename>TestTDengineMeta.cs</filename>
/// <result>pass or failed </result>
[Fact]
public void TestTypeNameUTINYINT()
{
......@@ -73,6 +102,11 @@ namespace TDengineDriver.Test
Assert.Equal(metaTypeName, typeName);
}
/// <author>xiaolei</author>
/// <Name>TestTDengineMeta.TestTypeNameUSMALLINT</Name>
/// <describe>Unit test for oject TDengineDriver.TDengineMeta's SMALLINT UNSIGNED's meta info</describe>
/// <filename>TestTDengineMeta.cs</filename>
/// <result>pass or failed </result>
[Fact]
public void TestTypeNameUSMALLINT()
{
......@@ -84,6 +118,11 @@ namespace TDengineDriver.Test
Assert.Equal(metaTypeName, typeName);
}
/// <author>xiaolei</author>
/// <Name>TestTDengineMeta.TestTypeNameUINT</Name>
/// <describe>Unit test for oject TDengineDriver.TDengineMeta's INT UNSIGNED's meta info</describe>
/// <filename>TestTDengineMeta.cs</filename>
/// <result>pass or failed </result>
[Fact]
public void TestTypeNameUINT()
{
......@@ -95,6 +134,11 @@ namespace TDengineDriver.Test
Assert.Equal(metaTypeName, typeName);
}
/// <author>xiaolei</author>
/// <Name>TestTDengineMeta.TestTypeNameUBIGINT</Name>
/// <describe>Unit test for oject TDengineDriver.TDengineMeta's BIGINT UNSIGNED's meta info</describe>
/// <filename>TestTDengineMeta.cs</filename>
/// <result>pass or failed </result>
[Fact]
public void TestTypeNameUBIGINT()
{
......@@ -106,7 +150,11 @@ namespace TDengineDriver.Test
Assert.Equal(metaTypeName, typeName);
}
/// <author>xiaolei</author>
/// <Name>TestTDengineMeta.TestTypeNameFLOAT</Name>
/// <describe>Unit test for oject TDengineDriver.TDengineMeta's FLOAT's meta info</describe>
/// <filename>TestTDengineMeta.cs</filename>
/// <result>pass or failed </result>
[Fact]
public void TestTypeNameFLOAT()
{
......@@ -118,6 +166,11 @@ namespace TDengineDriver.Test
Assert.Equal(metaTypeName, typeName);
}
/// <author>xiaolei</author>
/// <Name>TestTDengineMeta.TestTypeNameDOUBLE</Name>
/// <describe>Unit test for oject TDengineDriver.TDengineMeta's DOUBLE's meta info</describe>
/// <filename>TestTDengineMeta.cs</filename>
/// <result>pass or failed </result>
[Fact]
public void TestTypeNameDOUBLE()
{
......@@ -129,10 +182,15 @@ namespace TDengineDriver.Test
Assert.Equal(metaTypeName, typeName);
}
/// <author>xiaolei</author>
/// <Name>TestTDengineMeta.TestTypeNameSTRING</Name>
/// <describe>Unit test for oject TDengineDriver.TDengineMeta's BINARY's meta info</describe>
/// <filename>TestTDengineMeta.cs</filename>
/// <result>pass or failed </result>
[Fact]
public void TestTypeNameSTRING()
{
string typeName = "STRING";
string typeName = "BINARY";
TDengineDriver.TDengineMeta meta = new TDengineDriver.TDengineMeta();
meta.type = 8;
string metaTypeName = meta.TypeName();
......@@ -140,6 +198,11 @@ namespace TDengineDriver.Test
Assert.Equal(metaTypeName, typeName);
}
/// <author>xiaolei</author>
/// <Name>TestTDengineMeta.TestTypeNameTIMESTAMP</Name>
/// <describe>Unit test for oject TDengineDriver.TDengineMeta's TIMESTAMP's meta info</describe>
/// <filename>TestTDengineMeta.cs</filename>
/// <result>pass or failed </result>
[Fact]
public void TestTypeNameTIMESTAMP()
{
......@@ -151,6 +214,11 @@ namespace TDengineDriver.Test
Assert.Equal(metaTypeName, typeName);
}
/// <author>xiaolei</author>
/// <Name>TestTDengineMeta.TestTypeNameNCHAR</Name>
/// <describe>Unit test for oject TDengineDriver.TDengineMeta's NCHAR's meta info</describe>
/// <filename>TestTDengineMeta.cs</filename>
/// <result>pass or failed </result>
[Fact]
public void TestTypeNameNCHAR()
{
......@@ -162,6 +230,11 @@ namespace TDengineDriver.Test
Assert.Equal(metaTypeName, typeName);
}
/// <author>xiaolei</author>
/// <Name>TestTDengineMeta.TestTypeNameUndefined</Name>
/// <describe>Unit test for oject TDengineDriver.TDengineMeta's undefine's meta info</describe>
/// <filename>TestTDengineMeta.cs</filename>
/// <result>pass or failed </result>
[Fact]
public void TestTypeNameUndefined()
{
......
......@@ -3,13 +3,12 @@
<PropertyGroup>
<TargetFramework>net5.0</TargetFramework>
<IsPackable>false</IsPackable>
</PropertyGroup>
<PropertyGroup>
<NoWarn>CS1591</NoWarn>
<GenerateDocumentationFile>true</GenerateDocumentationFile>
<DocumentationFile>..\doc\UnitTest.XML</DocumentationFile>
</PropertyGroup>
<ItemGroup>
<PackageReference Include="coverlet.msbuild" Version="3.1.0">
<IncludeAssets>runtime; build; native; contentfiles; analyzers; buildtransitive</IncludeAssets>
......
......@@ -54,7 +54,7 @@ public abstract class TSDBConstants {
public static final int TSDB_DATA_TYPE_USMALLINT = 12; //unsigned smallint
public static final int TSDB_DATA_TYPE_UINT = 13; //unsigned int
public static final int TSDB_DATA_TYPE_UBIGINT = 14; //unsigned bigint
public static final int TSDB_DATA_TYPE_JSON = 15; //json
// nchar column max length
public static final int maxFieldSize = 16 * 1024;
......@@ -129,6 +129,8 @@ public abstract class TSDBConstants {
return Types.TIMESTAMP;
case TSDBConstants.TSDB_DATA_TYPE_NCHAR:
return Types.NCHAR;
case TSDBConstants.TSDB_DATA_TYPE_JSON:
return Types.OTHER;
default:
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNKNOWN_TAOS_TYPE, "unknown taos type: " + taosType + " in tdengine");
}
......@@ -160,6 +162,8 @@ public abstract class TSDBConstants {
return "TIMESTAMP";
case TSDBConstants.TSDB_DATA_TYPE_NCHAR:
return "NCHAR";
case TSDBConstants.TSDB_DATA_TYPE_JSON:
return "JSON";
default:
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNKNOWN_TAOS_TYPE, "unknown taos type: " + taosType + " in tdengine");
}
......
......@@ -615,6 +615,18 @@ public class TSDBPreparedStatement extends TSDBStatement implements PreparedStat
}
}
public void setTagJson(int index, String value) {
ensureTagCapacity(index);
this.tableTags.set(index, new TableTagInfo(value, TSDBConstants.TSDB_DATA_TYPE_JSON));
String charset = TaosGlobalConfig.getCharset();
try {
this.tagValueLength += value.getBytes(charset).length;
} catch (UnsupportedEncodingException e) {
throw new RuntimeException(e.getMessage());
}
}
public <T> void setValueImpl(int columnIndex, ArrayList<T> list, int type, int bytes) throws SQLException {
if (this.colData.size() == 0) {
this.colData.addAll(Collections.nCopies(this.parameters.length - 1 - this.tableTags.size(), null));
......@@ -774,6 +786,7 @@ public class TSDBPreparedStatement extends TSDBStatement implements PreparedStat
}
case TSDBConstants.TSDB_DATA_TYPE_NCHAR:
case TSDBConstants.TSDB_DATA_TYPE_JSON:
case TSDBConstants.TSDB_DATA_TYPE_BINARY: {
String charset = TaosGlobalConfig.getCharset();
String val = (String) tag.value;
......
......@@ -151,6 +151,7 @@ public class TSDBResultSetBlockData {
this.colData.set(col, lb);
break;
}
case TSDBConstants.TSDB_DATA_TYPE_JSON:
case TSDBConstants.TSDB_DATA_TYPE_NCHAR: {
ByteBuffer buf = ByteBuffer.wrap(value, 0, length);
buf.order(ByteOrder.LITTLE_ENDIAN);
......@@ -199,6 +200,7 @@ public class TSDBResultSetBlockData {
}
case TSDBConstants.TSDB_DATA_TYPE_NCHAR:
case TSDBConstants.TSDB_DATA_TYPE_JSON:
case TSDBConstants.TSDB_DATA_TYPE_BINARY: {
return Integer.parseInt((String) obj);
}
......@@ -232,6 +234,7 @@ public class TSDBResultSetBlockData {
}
case TSDBConstants.TSDB_DATA_TYPE_NCHAR:
case TSDBConstants.TSDB_DATA_TYPE_JSON:
case TSDBConstants.TSDB_DATA_TYPE_BINARY: {
if ("TRUE".compareToIgnoreCase((String) obj) == 0) {
return Boolean.TRUE;
......@@ -271,6 +274,7 @@ public class TSDBResultSetBlockData {
}
case TSDBConstants.TSDB_DATA_TYPE_NCHAR:
case TSDBConstants.TSDB_DATA_TYPE_JSON:
case TSDBConstants.TSDB_DATA_TYPE_BINARY: {
return Long.parseLong((String) obj);
}
......@@ -308,6 +312,7 @@ public class TSDBResultSetBlockData {
}
case TSDBConstants.TSDB_DATA_TYPE_NCHAR:
case TSDBConstants.TSDB_DATA_TYPE_JSON:
case TSDBConstants.TSDB_DATA_TYPE_BINARY: {
return Double.parseDouble((String) obj);
}
......@@ -406,6 +411,7 @@ public class TSDBResultSetBlockData {
return new String(dest);
}
case TSDBConstants.TSDB_DATA_TYPE_JSON:
case TSDBConstants.TSDB_DATA_TYPE_NCHAR: {
ByteBuffer bb = (ByteBuffer) this.colData.get(col);
bb.position((fieldSize + BINARY_LENGTH_OFFSET) * this.rowIndex);
......
......@@ -78,6 +78,7 @@ public class TSDBResultSetRowData {
case TSDBConstants.TSDB_DATA_TYPE_BIGINT:
return ((Long) obj) == 1L ? Boolean.TRUE : Boolean.FALSE;
case TSDBConstants.TSDB_DATA_TYPE_BINARY:
case TSDBConstants.TSDB_DATA_TYPE_JSON:
case TSDBConstants.TSDB_DATA_TYPE_NCHAR: {
return obj.toString().contains("1");
}
......@@ -147,6 +148,7 @@ public class TSDBResultSetRowData {
return ((Long) obj).intValue();
case TSDBConstants.TSDB_DATA_TYPE_NCHAR:
case TSDBConstants.TSDB_DATA_TYPE_BINARY:
case TSDBConstants.TSDB_DATA_TYPE_JSON:
return Integer.parseInt((String) obj);
case TSDBConstants.TSDB_DATA_TYPE_UTINYINT:
return parseUnsignedTinyIntToInt(obj);
......@@ -228,6 +230,7 @@ public class TSDBResultSetRowData {
return (Long) obj;
case TSDBConstants.TSDB_DATA_TYPE_NCHAR:
case TSDBConstants.TSDB_DATA_TYPE_BINARY:
case TSDBConstants.TSDB_DATA_TYPE_JSON:
return Long.parseLong((String) obj);
case TSDBConstants.TSDB_DATA_TYPE_UTINYINT: {
byte value = (byte) obj;
......@@ -418,6 +421,7 @@ public class TSDBResultSetRowData {
case TSDBConstants.TSDB_DATA_TYPE_BINARY:
return new String((byte[]) obj);
case TSDBConstants.TSDB_DATA_TYPE_NCHAR:
case TSDBConstants.TSDB_DATA_TYPE_JSON:
return (String) obj;
default:
return String.valueOf(obj);
......
package com.taosdata.jdbc.rs;
import com.alibaba.fastjson.JSON;
import com.alibaba.fastjson.JSONArray;
import com.alibaba.fastjson.JSONObject;
import com.alibaba.fastjson.serializer.SerializerFeature;
import com.google.common.primitives.Ints;
import com.google.common.primitives.Longs;
import com.google.common.primitives.Shorts;
......@@ -184,6 +186,11 @@ public class RestfulResultSet extends AbstractResultSet implements ResultSet {
return row.getString(colIndex) == null ? null : row.getString(colIndex).getBytes();
case TSDBConstants.TSDB_DATA_TYPE_NCHAR:
return row.getString(colIndex) == null ? null : row.getString(colIndex);
case TSDBConstants.TSDB_DATA_TYPE_JSON:
// all json tag or just a json tag value
return row.get(colIndex) != null && (row.get(colIndex) instanceof String || row.get(colIndex) instanceof JSONObject)
? JSON.toJSONString(row.get(colIndex), SerializerFeature.WriteMapNullValue)
: row.get(colIndex);
default:
return row.get(colIndex);
}
......
node_modules
\ No newline at end of file
This repositry create a custom Node-Red node for configing TDEngine server connection and execute SQL from preview node msg.payload
## Design
Use Taos data restful API to commit SQL, API call like
```
curl -H 'Authorization: Basic <TOKEN>' -d '<SQL>' <ip>:<PORT>/rest/sql/[db_name]
```
Input options:
* DB Server: Setup server connection or select a exist server
* DB Name: Database to execute SQL
Use [axios](https://axios-http.com/) to call http request
## Usage
1. Start Node-Red
2. Install TDEngine node
3. Add "taos query" node to workspace from palette
4. Setup a TDEngine server and database name
5. Add function or other node to create SQL, put SQL into msg.payload
6. Link to "taos query" node
### Demo
1. Start Node-Red by docker
```
docker run -it -p 1880:1880 -v node_red_data:/data --name mynodered nodered/node-red
```
2. Import sample flow "demo/flow.json"
![import-flow](demo/ImportFlow.png)
3. Install TDEngine node by name "node-red-contrib-tdengine", current version is 0.0.2
![alt](demo/InstallTDEngineNode.png)
4. Modify your TDEngine server config
![alt](demo/ModifyServerConfig.png)
5. Edit test SQL
![alt](demo/EditTestSQL.png)
6. Start flow by click Inject node
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
......@@ -240,7 +240,7 @@ static void dnodeCheckDataDirOpenned(char *dir) {
char filepath[256] = {0};
sprintf(filepath, "%s/.running", dir);
int fd = open(filepath, O_WRONLY | O_CREAT | O_TRUNC, S_IRWXU | S_IRWXG | S_IRWXO);
int fd = open(filepath, O_WRONLY | O_CREAT | O_TRUNC | O_BINARY, S_IRWXU | S_IRWXG | S_IRWXO);
if (fd < 0) {
dError("failed to open lock file:%s, reason: %s, quit", filepath, strerror(errno));
exit(0);
......
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
Subproject commit 27751ba9ca17407425fb50a52cd68295794dedc3
Subproject commit da842b77f438e5b4c496918e51f8ea02ba0f2c99
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册