diff --git a/Jenkinsfile b/Jenkinsfile
index ebac32cb241af1a35556262690544f84ca94d9fc..73bb832d8ea4c16d65f6fff88cb8844415c19f9c 100644
--- a/Jenkinsfile
+++ b/Jenkinsfile
@@ -41,13 +41,10 @@ def pre_test(){
cd ${WKC}
git checkout develop
- git reset --hard HEAD~10
- git pull
- git fetch
- git checkout ${CHANGE_BRANCH}
- git reset --hard HEAD~10
+ git reset --hard HEAD~10 >/dev/null
git pull
- git merge develop
+ git fetch origin +refs/pull/${CHANGE_ID}/merge
+ git checkout -qf FETCH_HEAD
cd ${WK}
git reset --hard HEAD~10
git checkout develop
@@ -87,11 +84,14 @@ pipeline {
steps {
pre_test()
- sh '''
- cd ${WKC}/tests
- find pytest -name '*'sql|xargs rm -rf
- ./test-all.sh p1
- date'''
+ timeout(time: 90, unit: 'MINUTES'){
+ sh '''
+ cd ${WKC}/tests
+ find pytest -name '*'sql|xargs rm -rf
+ ./test-all.sh p1
+ date'''
+ }
+
}
}
stage('python_2') {
@@ -112,12 +112,14 @@ pipeline {
}
stage('test_b1') {
agent{label 'b1'}
- steps {
- pre_test()
- sh '''
- cd ${WKC}/tests
- ./test-all.sh b1fq
- date'''
+ steps {
+ timeout(time: 90, unit: 'MINUTES'){
+ pre_test()
+ sh '''
+ cd ${WKC}/tests
+ ./test-all.sh b1fq
+ date'''
+ }
}
}
@@ -137,12 +139,14 @@ pipeline {
./handle_crash_gen_val_log.sh
'''
}
- sh '''
- date
- cd ${WKC}/tests
- ./test-all.sh b2fq
- date
- '''
+ timeout(time: 90, unit: 'MINUTES'){
+ sh '''
+ date
+ cd ${WKC}/tests
+ ./test-all.sh b2fq
+ date
+ '''
+ }
}
}
@@ -157,12 +161,14 @@ pipeline {
./valgrind-test.sh 2>&1 > mem-error-out.log
./handle_val_log.sh
'''
- }
- sh '''
- date
- cd ${WKC}/tests
- ./test-all.sh b3fq
- date'''
+ }
+ timeout(time: 90, unit: 'MINUTES'){
+ sh '''
+ date
+ cd ${WKC}/tests
+ ./test-all.sh b3fq
+ date'''
+ }
}
}
diff --git a/cmake/install.inc b/cmake/install.inc
index b0e5c71022c7c30fefd79ed62830db6afeb953f2..a5b01f43cb7e8a302a9cf2d6d7ef48ddb8ed944e 100755
--- a/cmake/install.inc
+++ b/cmake/install.inc
@@ -32,7 +32,7 @@ ELSEIF (TD_WINDOWS)
#INSTALL(TARGETS taos RUNTIME DESTINATION driver)
#INSTALL(TARGETS shell RUNTIME DESTINATION .)
IF (TD_MVN_INSTALLED)
- INSTALL(FILES ${LIBRARY_OUTPUT_PATH}/taos-jdbcdriver-2.0.16-dist.jar DESTINATION connector/jdbc)
+ INSTALL(FILES ${LIBRARY_OUTPUT_PATH}/taos-jdbcdriver-2.0.17-dist.jar DESTINATION connector/jdbc)
ENDIF ()
ELSEIF (TD_DARWIN)
SET(TD_MAKE_INSTALL_SH "${TD_COMMUNITY_DIR}/packaging/tools/make_install.sh")
diff --git a/cmake/version.inc b/cmake/version.inc
old mode 100644
new mode 100755
index 0509a5ce1b2d44f4986c734060853b3aa32addac..f9927bf1c61c62dd27123e1c6aec158c4d1c09cf
--- a/cmake/version.inc
+++ b/cmake/version.inc
@@ -16,13 +16,25 @@ ENDIF ()
IF (DEFINED GITINFO)
SET(TD_VER_GIT ${GITINFO})
ELSE ()
- SET(TD_VER_GIT "community")
+ execute_process(
+ COMMAND git log -1 --format=%H
+ WORKING_DIRECTORY ${TD_COMMUNITY_DIR}
+ OUTPUT_VARIABLE GIT_COMMITID
+ )
+ string (REGEX REPLACE "[\n\t\r]" "" GIT_COMMITID ${GIT_COMMITID})
+ SET(TD_VER_GIT ${GIT_COMMITID})
ENDIF ()
IF (DEFINED GITINFOI)
SET(TD_VER_GIT_INTERNAL ${GITINFOI})
ELSE ()
- SET(TD_VER_GIT_INTERNAL "internal")
+ execute_process(
+ COMMAND git log -1 --format=%H
+ WORKING_DIRECTORY ${PROJECT_SOURCE_DIR}
+ OUTPUT_VARIABLE GIT_COMMITID
+ )
+ string (REGEX REPLACE "[\n\t\r]" "" GIT_COMMITID ${GIT_COMMITID})
+ SET(TD_VER_GIT_INTERNAL ${GIT_COMMITID})
ENDIF ()
IF (DEFINED VERDATE)
diff --git a/documentation20/webdocs/assets/tdengine-jdbc-connector.png b/documentation20/webdocs/assets/tdengine-jdbc-connector.png
new file mode 100644
index 0000000000000000000000000000000000000000..fdf1dd3fcc5ee222c4a8753efa2c95c5257314bf
Binary files /dev/null and b/documentation20/webdocs/assets/tdengine-jdbc-connector.png differ
diff --git a/documentation20/webdocs/markdowndocs/Getting Started-ch.md b/documentation20/webdocs/markdowndocs/Getting Started-ch.md
index b53c014ba64fb203cb8e6943297fe6117034385c..b8b298b9501a43396e134eb1efa9ba6e6a029b79 100644
--- a/documentation20/webdocs/markdowndocs/Getting Started-ch.md
+++ b/documentation20/webdocs/markdowndocs/Getting Started-ch.md
@@ -179,7 +179,7 @@ taos> select avg(f1), max(f2), min(f3) from test.t10 interval(10s);
-## **支持平台列表**
+## 支持平台列表
### TDengine服务器支持的平台列表
diff --git a/documentation20/webdocs/markdowndocs/Queries-ch.md b/documentation20/webdocs/markdowndocs/Queries-ch.md
index 960bb39e6323fe590905de62d2a1021adb19d176..839809ccba1914a9d5cfa9005be9f32e94f19924 100644
--- a/documentation20/webdocs/markdowndocs/Queries-ch.md
+++ b/documentation20/webdocs/markdowndocs/Queries-ch.md
@@ -9,7 +9,7 @@
TDengine 采用 SQL 作为查询语言。应用程序可以通过 C/C++, Java, Go, Python 连接器发送 SQL 语句,用户可以通过 TDengine 提供的命令行(Command Line Interface, CLI)工具 TAOS Shell 手动执行 SQL 即席查询(Ad-Hoc Query)。TDengine 支持如下查询功能:
- 单列、多列数据查询
-- 标签和数值的多种过滤条件:\>, \<, =, \<>, like 等
+- 标签和数值的多种过滤条件:>, <, =, <>, like 等
- 聚合结果的分组(Group by)、排序(Order by)、约束输出(Limit/Offset)
- 数值列及聚合结果的四则运算
- 时间戳对齐的连接查询(Join Query: 隐式连接)操作
diff --git a/documentation20/webdocs/markdowndocs/TAOS SQL-ch.md b/documentation20/webdocs/markdowndocs/TAOS SQL-ch.md
index 946eec53ad9cc14b91870708ca9d9594dcec8671..3333bbc450d2c8883a45c4879bcbfe951003967b 100644
--- a/documentation20/webdocs/markdowndocs/TAOS SQL-ch.md
+++ b/documentation20/webdocs/markdowndocs/TAOS SQL-ch.md
@@ -58,26 +58,26 @@ TDengine缺省的时间戳是毫秒精度,但通过修改配置参数enableMic
- **创建数据库**
- ```mysql
- CREATE DATABASE [IF NOT EXISTS] db_name [KEEP keep] [UPDATE 1];
- ```
- 说明:
-
- 1) KEEP是该数据库的数据保留多长天数,缺省是3650天(10年),数据库会自动删除超过时限的数据;
-
- 2) UPDATE 标志数据库支持更新相同时间戳数据;
-
- 3) 数据库名最大长度为33;
-
- 4) 一条SQL 语句的最大长度为65480个字符;
-
- 5) 数据库还有更多与存储相关的配置参数,请参见系统管理。
+ ```mysql
+ CREATE DATABASE [IF NOT EXISTS] db_name [KEEP keep] [UPDATE 1];
+ ```
+ 说明:
+
+ 1) KEEP是该数据库的数据保留多长天数,缺省是3650天(10年),数据库会自动删除超过时限的数据;
+
+ 2) UPDATE 标志数据库支持更新相同时间戳数据;
+
+ 3) 数据库名最大长度为33;
+
+ 4) 一条SQL 语句的最大长度为65480个字符;
+
+ 5) 数据库还有更多与存储相关的配置参数,请参见系统管理。
- **显示系统当前参数**
- ```mysql
- SHOW VARIABLES;
- ```
+ ```mysql
+ SHOW VARIABLES;
+ ```
- **使用数据库**
@@ -698,13 +698,13 @@ TDengine支持针对数据的聚合查询。提供支持的聚合和选择函数
```mysql
SELECT TWA(field_name) FROM tb_name WHERE clause;
```
- 功能说明:时间加权平均函数。统计表/超级表中某列在一段时间内的时间加权平均。
+ 功能说明:时间加权平均函数。统计表中某列在一段时间内的时间加权平均。
返回结果数据类型:双精度浮点数Double。
应用字段:不能应用在timestamp、binary、nchar、bool类型字段。
- 适用于:表、超级表。
+ 适用于:表。
- **SUM**
```mysql
@@ -743,7 +743,7 @@ TDengine支持针对数据的聚合查询。提供支持的聚合和选择函数
应用字段:不能应用在timestamp、binary、nchar、bool类型字段。
- 适用于:表。
+ 适用于:表。(从 2.0.15 版本开始,本函数也支持超级表)
示例:
```mysql
@@ -1125,11 +1125,8 @@ SELECT function_list FROM stb_name
- WHERE语句可以指定查询的起止时间和其他过滤条件
- FILL语句指定某一时间区间数据缺失的情况下的填充模式。填充模式包括以下几种:
1. 不进行填充:NONE(默认填充模式)。
-
2. VALUE填充:固定值填充,此时需要指定填充的数值。例如:fill(value, 1.23)。
-
3. NULL填充:使用NULL填充数据。例如:fill(null)。
-
4. PREV填充:使用前一个非NULL值填充数据。例如:fill(prev)。
说明:
@@ -1157,7 +1154,7 @@ SELECT AVG(current), MAX(current), LEASTSQUARES(current, start_val, step_val), P
- 数据库名最大长度为32
- 表名最大长度为192,每行数据最大长度16k个字符
- 列名最大长度为64,最多允许1024列,最少需要2列,第一列必须是时间戳
-- 标签最多允许128个,可以0个,标签总长度不超过16k个字符
+- 标签最多允许128个,可以1个,标签总长度不超过16k个字符
- SQL语句最大长度65480个字符,但可通过系统配置参数maxSQLLength修改,最长可配置为1M
- 库的数目,超级表的数目、表的数目,系统不做限制,仅受系统资源限制
diff --git a/documentation20/webdocs/markdowndocs/administrator-ch.md b/documentation20/webdocs/markdowndocs/administrator-ch.md
index 9eee5b2b69cad573f62a1fa4890ff73c43026bd1..a343f7e97079566208b707b36e46b219f26772e2 100644
--- a/documentation20/webdocs/markdowndocs/administrator-ch.md
+++ b/documentation20/webdocs/markdowndocs/administrator-ch.md
@@ -124,10 +124,10 @@ taosd -C
对于一个应用场景,可能有多种数据特征的数据并存,最佳的设计是将具有相同数据特征的表放在一个库里,这样一个应用有多个库,而每个库可以配置不同的存储参数,从而保证系统有最优的性能。TDengine允许应用在创建库时指定上述存储参数,如果指定,该参数就将覆盖对应的系统配置参数。举例,有下述SQL:
```
- create database demo days 10 cache 32 blocks 8 replica 3;
+ create database demo days 10 cache 32 blocks 8 replica 3 update 1;
```
-该SQL创建了一个库demo, 每个数据文件存储10天数据,内存块为32兆字节,每个VNODE占用8个内存块,副本数为3,而其他参数与系统配置完全一致。
+该SQL创建了一个库demo, 每个数据文件存储10天数据,内存块为32兆字节,每个VNODE占用8个内存块,副本数为3,允许更新,而其他参数与系统配置完全一致。
TDengine集群中加入一个新的dnode时,涉及集群相关的一些参数必须与已有集群的配置相同,否则不能成功加入到集群中。会进行校验的参数如下:
diff --git a/documentation20/webdocs/markdowndocs/advanced features-ch.md b/documentation20/webdocs/markdowndocs/advanced features-ch.md
index cdd9ee81048f0968fb02f036b20e003c66835a4c..0ca8428ecee2c5ef162810737f77cb9cf4b9412b 100644
--- a/documentation20/webdocs/markdowndocs/advanced features-ch.md
+++ b/documentation20/webdocs/markdowndocs/advanced features-ch.md
@@ -197,7 +197,7 @@ select * from meters where ts > now - 1d and current > 10;
且`restart`是 **false**(**0**),用户程序就不会读到之前已经读取的数据了。
`taos_subscribe`的最后一个参数是以毫秒为单位的轮询周期。
-在同步模式下,如过前后两次调用`taos_consume`的时间间隔小于此时间,
+在同步模式下,如果前后两次调用`taos_consume`的时间间隔小于此时间,
`taos_consume`会阻塞,直到间隔超过此时间。
异步模式下,这个时间是两次调用回调函数的最小时间间隔。
@@ -414,7 +414,7 @@ TDengine通过查询函数向用户提供毫秒级的数据获取能力。直接
TDengine分配固定大小的内存空间作为缓存空间,缓存空间可根据应用的需求和硬件资源配置。通过适当的设置缓存空间,TDengine可以提供极高性能的写入和查询的支持。TDengine中每个虚拟节点(virtual node)创建时分配独立的缓存池。每个虚拟节点管理自己的缓存池,不同虚拟节点间不共享缓存池。每个虚拟节点内部所属的全部表共享该虚拟节点的缓存池。
-TDengine将内存池按块划分进行管理,数据在内存块里按照列式存储。一个vnode的内存池是在vnode创建时按块分配好的,而且每个内存块按照先进先出的原则进行管理。一张表所需要的内存块是从vnode的内存池中进行分配的,块的大小由系统配置参数cache决定。每张表最大内存块的数目由配置参数tblocks决定,每张表平均的内存块的个数由配置参数ablocks决定。因此对于一个vnode, 总的内存大小为: `cache * ablocks * tables`。内存块参数cache不宜过小,一个cache block需要能存储至少几十条以上记录,才会有效率。参数ablocks最小为2,保证每张表平均至少能分配两个内存块。
+TDengine将内存池按块划分进行管理,数据在内存块里是以行(row)的形式存储。一个vnode的内存池是在vnode创建时按块分配好,而且每个内存块按照先进先出的原则进行管理。在创建内存池时,块的大小由系统配置参数cache决定;每个vnode中内存块的数目则由配置参数blocks决定。因此对于一个vnode,总的内存大小为:`cache * blocks`。一个cache block需要保证每张表能存储至少几十条以上记录,才会有效率。
你可以通过函数last_row快速获取一张表或一张超级表的最后一条记录,这样很便于在大屏显示各设备的实时状态或采集值。例如:
diff --git a/documentation20/webdocs/markdowndocs/architecture-ch.md b/documentation20/webdocs/markdowndocs/architecture-ch.md
index a666f755156906adb9c4d3e8c18e3e4f8679e255..773d8196f26ff414a9546b474713f46bfb0dd5ac 100644
--- a/documentation20/webdocs/markdowndocs/architecture-ch.md
+++ b/documentation20/webdocs/markdowndocs/architecture-ch.md
@@ -248,7 +248,7 @@ Master Vnode遵循下面的写入流程:
1. Master vnode收到应用的数据插入请求,验证OK,进入下一步;
2. 如果系统配置参数walLevel大于0,vnode将把该请求的原始数据包写入数据库日志文件WAL。如果walLevel设置为2,而且fsync设置为0,TDengine还将WAL数据立即落盘,以保证即使宕机,也能从数据库日志文件中恢复数据,避免数据的丢失;
3. 如果有多个副本,vnode将把数据包转发给同一虚拟节点组内slave vnodes, 该转发包带有数据的版本号(version);
-4. 写入内存,并加记录加入到skip list;
+4. 写入内存,并将记录加入到skip list;
5. Master vnode返回确认信息给应用,表示写入成功。
6. 如果第2,3,4步中任何一步失败,将直接返回错误给应用。
diff --git a/documentation20/webdocs/markdowndocs/cluster-ch.md b/documentation20/webdocs/markdowndocs/cluster-ch.md
index f3f6f2b3005e1dc101fce817520a7e96bdfc0e31..f6019b1a5aabdaea095eafc1919c46432ec88c77 100644
--- a/documentation20/webdocs/markdowndocs/cluster-ch.md
+++ b/documentation20/webdocs/markdowndocs/cluster-ch.md
@@ -225,7 +225,7 @@ SHOW MNODES;
## Arbitrator的使用
-如果副本数为偶数,当一个vnode group里一半vnode不工作时,是无法从中选出master的。同理,一半mnode不工作时,是无法选出mnode的master的,因为存在“split brain”问题。为解决这个问题,TDengine引入了arbitrator的概念。Arbitrator模拟一个vnode或mnode在工作,但只简单的负责网络连接,不处理任何数据插入或访问。只要包含arbitrator在内,超过半数的vnode或mnode工作,那么该vnode group或mnode组就可以正常的提供数据插入或查询服务。比如对于副本数为2的情形,如果一个节点A离线,但另外一个节点B正常,而且能连接到arbitrator, 那么节点B就能正常工作。
+如果副本数为偶数,当一个vnode group里一半vnode不工作时,是无法从中选出master的。同理,一半mnode不工作时,是无法选出mnode的master的,因为存在“split brain”问题。为解决这个问题,TDengine引入了Arbitrator的概念。Arbitrator模拟一个vnode或mnode在工作,但只简单的负责网络连接,不处理任何数据插入或访问。只要包含Arbitrator在内,超过半数的vnode或mnode工作,那么该vnode group或mnode组就可以正常的提供数据插入或查询服务。比如对于副本数为2的情形,如果一个节点A离线,但另外一个节点B正常,而且能连接到Arbitrator,那么节点B就能正常工作。
-TDengine提供一个执行程序tarbitrator, 找任何一台Linux服务器运行它即可。请点击[安装包下载](https://www.taosdata.com/cn/all-downloads/),在TDengine Arbitrator Linux一节中,选择适合的版本下载并安装。该程序对系统资源几乎没有要求,只需要保证有网络连接即可。该应用的命令行参数`-p`可以指定其对外服务的端口号,缺省是6042。配置每个taosd实例时,可以在配置文件taos.cfg里将参数arbitrator设置为arbitrator的End Point。如果该参数配置了,当副本数为偶数时,系统将自动连接配置的arbitrator。如果副本数为奇数,即使配置了arbitrator, 系统也不会去建立连接。
+TDengine提供一个执行程序,名为 tarbitrator,找任何一台Linux服务器运行它即可。请点击[安装包下载](https://www.taosdata.com/cn/all-downloads/),在TDengine Arbitrator Linux一节中,选择适合的版本下载并安装。该程序对系统资源几乎没有要求,只需要保证有网络连接即可。该应用的命令行参数`-p`可以指定其对外服务的端口号,缺省是6042。配置每个taosd实例时,可以在配置文件taos.cfg里将参数arbitrator设置为Arbitrator的End Point。如果该参数配置了,当副本数为偶数时,系统将自动连接配置的Arbitrator。如果副本数为奇数,即使配置了Arbitrator,系统也不会去建立连接。
diff --git a/documentation20/webdocs/markdowndocs/cluster.md b/documentation20/webdocs/markdowndocs/cluster.md
deleted file mode 100644
index 8cf7065f72eb7da75a949fd644b00346523c15ed..0000000000000000000000000000000000000000
--- a/documentation20/webdocs/markdowndocs/cluster.md
+++ /dev/null
@@ -1,146 +0,0 @@
-#集群安装、管理
-
-多个taosd的运行实例可以组成一个集群,以保证TDengine的高可靠运行,并提供水平扩展能力。要了解TDengine 2.0的集群管理,需要对集群的基本概念有所了解,请看TDengine 2.0整体架构一章。
-
-集群的每个节点是由End Point来唯一标识的,End Point是由FQDN(Fully Qualified Domain Name)外加Port组成,比如 h1.taosdata.com:6030。一般FQDN就是服务器的hostname,可通过Linux命令“hostname"获取。端口是这个节点对外服务的端口号,缺省是6030,但可以通过taos.cfg里配置参数serverPort进行修改。
-
-TDengine的集群管理极其简单,除添加和删除节点需要人工干预之外,其他全部是自动完成,最大程度的降低了运维的工作量。本章对集群管理的操作做详细的描述。
-
-##安装、创建第一个节点
-
-集群是由一个一个dnode组成的,是从一个dnode的创建开始的。创建第一个节点很简单,就按照["立即开始“](https://www.taosdata.com/cn/getting-started/)一章的方法进行安装、启动即可。
-
-启动后,请执行taos, 启动taos shell,从shell里执行命令"show dnodes;",如下所示:
- ```
-Welcome to the TDengine shell from Linux, Client Version:2.0.0.0
-Copyright (c) 2017 by TAOS Data, Inc. All rights reserved.
-
-taos> show dnodes;
- id | end_point | vnodes | cores | status | role | create_time |
-=====================================================================================
- 1 | h1.taos.com:6030 | 0 | 2 | ready | any | 2020-07-31 03:49:29.202 |
-Query OK, 1 row(s) in set (0.006385s)
-
-taos>
- ```
-上述命令里,可以看到这个刚启动的这个节点的End Point是:h1.taos.com:6030
-
-## 安装、创建后续节点
-
-将新的节点添加到现有集群,具体有以下几步:
-
-1. 按照["立即开始“](https://www.taosdata.com/cn/getting-started/)一章的方法进行安装,但不要启动taosd
-
-2. 如果是使用涛思数据的官方安装包进行安装,在安装结束时,会询问集群的End Port, 输入第一个节点的End Point即可。如果是源码安装,请编辑配置文件taos.cfg(缺省是在/etc/taos/目录),增加一行:
-
- ```
- firstEp h1.taos.com:6030
- ```
-
- 请注意将示例的“h1.taos.com:6030" 替换为你自己第一个节点的End Point
-
-3. 按照["立即开始“](https://www.taosdata.com/cn/getting-started/)一章的方法启动taosd
-
-4. 在Linux shell里执行命令"hostname"找出本机的FQDN, 假设为h2.taos.com。如果无法找到,可以查看taosd日志文件taosdlog.0里前面几行日志(一般在/var/log/taos目录),fqdn以及port都会打印出来。
-
-5. 在第一个节点,使用CLI程序taos, 登录进TDengine系统, 使用命令:
-
- ```
- CREATE DNODE "h2.taos.com:6030";
- ```
-
- 将新节点的End Point添加进集群的EP列表。**"fqdn:port"需要用双引号引起来**,否则出错。请注意将示例的“h2.taos.com:6030" 替换为你自己第一个节点的End Point
-
-6. 使用命令
-
- ```
- SHOW DNODES;
- ```
-
- 查看新节点是否被成功加入。
-
-按照上述步骤可以源源不断的将新的节点加入到集群。
-
-**提示:**
-
-- firstEp, secondEp这两个参数仅仅在该节点第一次加入集群时有作用,加入集群后,该节点会保存最新的mnode的End Point列表,不再依赖这两个参数。
-- 两个没有配置first, second参数的dnode启动后,会独立运行起来。这个时候,无法将其中一个节点加入到另外一个节点,形成集群。**无法将两个独立的集群合并成为新的集群**。
-
-##节点管理
-
-###添加节点
-执行CLI程序taos, 使用root账号登录进系统, 执行:
-```
-CREATE DNODE "fqdn:port";
-```
-将新节点的End Point添加进集群的EP列表。**"fqdn:port"需要用双引号引起来**,否则出错。一个节点对外服务的fqdn和port可以通过配置文件taos.cfg进行配置,缺省是自动获取。
-
-###删除节点
-执行CLI程序taos, 使用root账号登录进TDengine系统,执行:
-```
-DROP DNODE "fqdn:port";
-```
-其中fqdn是被删除的节点的FQDN,port是其对外服务器的端口号
-
-###查看节点
-执行CLI程序taos,使用root账号登录进TDengine系统,执行:
-```
-SHOW DNODES;
-```
-它将列出集群中所有的dnode,每个dnode的fqdn:port, 状态(ready, offline等),vnode数目,还未使用的vnode数目等信息。在添加或删除一个节点后,可以使用该命令查看。
-
-如果集群配置了Arbitrator,那么它也会在这个节点列表中显示出来,其role列的值会是“arb”。
-
-###查看虚拟节点组
-
-为充分利用多核技术,并提供scalability,数据需要分片处理。因此TDengine会将一个DB的数据切分成多份,存放在多个vnode里。这些vnode可能分布在多个dnode里,这样就实现了水平扩展。一个vnode仅仅属于一个DB,但一个DB可以有多个vnode。vnode的是mnode根据当前系统资源的情况,自动进行分配的,无需任何人工干预。
-
-执行CLI程序taos,使用root账号登录进TDengine系统,执行:
-```
-SHOW VGROUPS;
-```
-##高可用性
-TDengine通过多副本的机制来提供系统的高可用性。副本数是与DB关联的,一个集群里可以有多个DB,根据运营的需求,每个DB可以配置不同的副本数。创建数据库时,通过参数replica 指定副本数(缺省为1)。如果副本数为1,系统的可靠性无法保证,只要数据所在的节点宕机,就将无法提供服务。集群的节点数必须大于等于副本数,否则创建表时将返回错误“more dnodes are needed"。比如下面的命令将创建副本数为3的数据库demo:
-```
-CREATE DATABASE demo replica 3;
-```
-一个DB里的数据会被切片分到多个vnode group,vnode group里的vnode数目就是DB的副本数,同一个vnode group里各vnode的数据是完全一致的。为保证高可用性,vnode group里的vnode一定要分布在不同的dnode里(实际部署时,需要在不同的物理机上),只要一个vgroup里超过半数的vnode处于工作状态,这个vgroup就能正常的对外服务。
-
-一个dnode里可能有多个DB的数据,因此一个dnode离线时,可能会影响到多个DB。如果一个vnode group里的一半或一半以上的vnode不工作,那么该vnode group就无法对外服务,无法插入或读取数据,这样会影响到它所属的DB的一部分表的d读写操作。
-
-因为vnode的引入,无法简单的给出结论:“集群中过半dnode工作,集群就应该工作”。但是对于简单的情形,很好下结论。比如副本数为3,只有三个dnode,那如果仅有一个节点不工作,整个集群还是可以正常工作的,但如果有两个节点不工作,那整个集群就无法正常工作了。
-
-##Mnode的高可用
-TDengine集群是由mnode (taosd的一个模块,逻辑节点) 负责管理的,为保证mnode的高可用,可以配置多个mnode副本,副本数由系统配置参数numOfMnodes决定,有效范围为1-3。为保证元数据的强一致性,mnode副本之间是通过同步的方式进行数据复制的。
-
-一个集群有多个dnode, 但一个dnode至多运行一个mnode实例。多个dnode情况下,哪个dnode可以作为mnode呢?这是完全由系统根据整个系统资源情况,自动指定的。用户可通过CLI程序taos,在TDengine的console里,执行如下命令:
-```
-SHOW MNODES;
-```
-来查看mnode列表,该列表将列出mnode所处的dnode的End Point和角色(master, slave, unsynced 或offline)。
-当集群中第一个节点启动时,该节点一定会运行一个mnode实例,否则该dnode无法正常工作,因为一个系统是必须有至少一个mnode的。如果numOfMnodes配置为2,启动第二个dnode时,该dnode也将运行一个mnode实例。
-
-为保证mnode服务的高可用性,numOfMnodes必须设置为2或更大。因为mnode保存的元数据必须是强一致的,如果numOfMnodes大于2,复制参数quorum自动设为2,也就是说,至少要保证有两个副本写入数据成功,才通知客户端应用写入成功。
-
-##负载均衡
-
-有三种情况,将触发负载均衡,而且都无需人工干预。
-
-- 当一个新节点添加进集群时,系统将自动触发负载均衡,一些节点上的数据将被自动转移到新节点上,无需任何人工干预。
-- 当一个节点从集群中移除时,系统将自动把该节点上的数据转移到其他节点,无需任何人工干预。
-- 如果一个节点过热(数据量过大),系统将自动进行负载均衡,将该节点的一些vnode自动挪到其他节点。
-
-当上述三种情况发生时,系统将启动一各个节点的负载计算,从而决定如何挪动。
-
-##节点离线处理
-如果一个节点离线,TDengine集群将自动检测到。有如下两种情况:
-- 改节点离线超过一定时间(taos.cfg里配置参数offlineThreshold控制时长),系统将自动把该节点删除,产生系统报警信息,触发负载均衡流程。如果该被删除的节点重现上线时,它将无法加入集群,需要系统管理员重新将其添加进集群才会开始工作。
-- 离线后,在offlineThreshold的时长内重新上线,系统将自动启动数据恢复流程,等数据完全恢复后,该节点将开始正常工作。
-
-##Arbitrator的使用
-
-如果副本数为偶数,当一个vnode group里一半vnode不工作时,是无法从中选出master的。同理,一半mnode不工作时,是无法选出mnode的master的,因为存在“split brain”问题。为解决这个问题,TDengine引入了arbitrator的概念。Arbitrator模拟一个vnode或mnode在工作,但只简单的负责网络连接,不处理任何数据插入或访问。只要包含arbitrator在内,超过半数的vnode或mnode工作,那么该vnode group或mnode组就可以正常的提供数据插入或查询服务。比如对于副本数为2的情形,如果一个节点A离线,但另外一个节点B正常,而且能连接到arbitrator, 那么节点B就能正常工作。
-
-TDengine安装包里带有一个执行程序tarbitrator, 找任何一台Linux服务器运行它即可。该程序对系统资源几乎没有要求,只需要保证有网络连接即可。该应用的命令行参数`-p`可以指定其对外服务的端口号,缺省是6030。配置每个taosd实例时,可以在配置文件taos.cfg里将参数arbitrator设置为Arbitrator的End Point。如果该参数配置了,当副本数为偶数时,系统将自动连接配置的Arbitrator。
-
-在配置了Arbitrator的情况下,它也会显示在“show dnodes;”指令给出的节点列表中。
diff --git a/documentation20/webdocs/markdowndocs/connector-ch.md b/documentation20/webdocs/markdowndocs/connector-ch.md
index da9c2e5a119e56243e517e9de0e248184d3c3c93..bcaabe3c0a48837cc50d1d2f5f212a57c8e5c912 100644
--- a/documentation20/webdocs/markdowndocs/connector-ch.md
+++ b/documentation20/webdocs/markdowndocs/connector-ch.md
@@ -288,13 +288,6 @@ C/C++的API类似于MySQL的C API。应用程序使用时,需要包含TDengine
* res:`taos_query_a`回调时返回的结果集
* fp:回调函数。其参数`param`是用户可定义的传递给回调函数的参数结构体;`numOfRows`是获取到的数据的行数(不是整个查询结果集的函数)。 在回调函数中,应用可以通过调用`taos_fetch_row`前向迭代获取批量记录中每一行记录。读完一块内的所有记录后,应用需要在回调函数中继续调用`taos_fetch_rows_a`获取下一批记录进行处理,直到返回的记录数(numOfRows)为零(结果返回完成)或记录数为负值(查询出错)。
-- `void taos_fetch_row_a(TAOS_RES *res, void (*fp)(void *param, TAOS_RES *, TAOS_ROW row), void *param);`
-
- 异步获取一条记录。其中:
-
- * res:`taos_query_a`回调时返回的结果集
- * fp:回调函数。其参数`param`是应用提供的一个用于回调的参数。回调时,第三个参数`row`指向一行记录。不同于`taos_fetch_rows_a`,应用无需调用`taos_fetch_row`来获取一行数据,更加简单,但数据提取性能不及批量获取的API。
-
TDengine的异步API均采用非阻塞调用模式。应用程序可以用多线程同时打开多张表,并可以同时对每张打开的表进行查询或者插入操作。需要指出的是,**客户端应用必须确保对同一张表的操作完全串行化**,即对同一个表的插入或查询操作未完成时(未返回时),不能够执行第二个插入或查询操作。
### 参数绑定API
diff --git a/documentation20/webdocs/markdowndocs/connector-java-ch.md b/documentation20/webdocs/markdowndocs/connector-java-ch.md
index 7ba573d2e44bb3848d13a2f6b7cb81038843a291..b8390e7af50569b62f031c5a3af3020d43b6f98a 100644
--- a/documentation20/webdocs/markdowndocs/connector-java-ch.md
+++ b/documentation20/webdocs/markdowndocs/connector-java-ch.md
@@ -1,62 +1,62 @@
# Java Connector
-Java连接器支持的系统有:
-| **CPU类型** | x64(64bit) | | | ARM64 | ARM32 |
-| ------------ | ------------ | -------- | -------- | -------- | -------- |
-| **OS类型** | Linux | Win64 | Win32 | Linux | Linux |
-| **支持与否** | **支持** | **支持** | **支持** | **支持** | **支持** |
+TDengine 提供了遵循 JDBC 标准(3.0)API 规范的 `taos-jdbcdriver` 实现,可在 maven 的中央仓库 [Sonatype Repository][1] 搜索下载。
-Java连接器的使用请参见视频教程。
+`taos-jdbcdriver` 的实现包括 2 种形式: JDBC-JNI 和 JDBC-RESTful(taos-jdbcdriver-2.0.17 开始支持 JDBC-RESTful)。 JDBC-JNI 通过调用客户端 libtaos.so(或 taos.dll )的本地方法实现, JDBC-RESTful 则在内部封装了 RESTful 接口实现。
-TDengine 为了方便 Java 应用使用,提供了遵循 JDBC 标准(3.0)API 规范的 `taos-jdbcdriver` 实现。目前可以通过 [Sonatype Repository][1] 搜索并下载。
+
-由于 TDengine 是使用 c 语言开发的,使用 taos-jdbcdriver 驱动包时需要依赖系统对应的本地函数库。
+上图显示了 3 种 Java 应用使用连接器访问 TDengine 的方式:
-* libtaos.so
- 在 linux 系统中成功安装 TDengine 后,依赖的本地函数库 libtaos.so 文件会被自动拷贝至 /usr/lib/libtaos.so,该目录包含在 Linux 自动扫描路径上,无需单独指定。
-
-* taos.dll
- 在 windows 系统中安装完客户端之后,驱动包依赖的 taos.dll 文件会自动拷贝到系统默认搜索路径 C:/Windows/System32 下,同样无需要单独指定。
+* JDBC-JNI:Java 应用在物理节点1(pnode1)上使用 JDBC-JNI 的 API ,直接调用客户端 API(libtaos.so 或 taos.dll)将写入和查询请求发送到位于物理节点2(pnode2)上的 taosd 实例。
+* RESTful:应用将 SQL 发送给位于物理节点2(pnode2)上的 RESTful 连接器,再调用客户端 API(libtaos.so)。
+* JDBC-RESTful:Java 应用通过 JDBC-RESTful 的 API ,将 SQL 封装成一个 RESTful 请求,发送给物理节点2的 RESTful 连接器。
-> 注意:在 windows 环境开发时需要安装 TDengine 对应的 [windows 客户端][14],Linux 服务器安装完 TDengine 之后默认已安装 client,也可以单独安装 [Linux 客户端][15] 连接远程 TDengine Server。
+TDengine 的 JDBC 驱动实现尽可能与关系型数据库驱动保持一致,但时序空间数据库与关系对象型数据库服务的对象和技术特征存在差异,导致 `taos-jdbcdriver` 与传统的 JDBC driver 也存在一定差异。在使用时需要注意以下几点:
-TDengine 的 JDBC 驱动实现尽可能的与关系型数据库驱动保持一致,但时序空间数据库与关系对象型数据库服务的对象和技术特征的差异导致 taos-jdbcdriver 并未完全实现 JDBC 标准规范。在使用时需要注意以下几点:
-
-* TDengine 不提供针对单条数据记录的删除和修改的操作,驱动中也没有支持相关方法。
-* 由于不支持删除和修改,所以也不支持事务操作。
+* TDengine 目前不支持针对单条数据记录的删除操作。
+* 目前不支持事务操作。
* 目前不支持表间的 union 操作。
-* 目前不支持嵌套查询(nested query),对每个 Connection 的实例,至多只能有一个打开的 ResultSet 实例;如果在 ResultSet还没关闭的情况下执行了新的查询,TSDBJDBCDriver 则会自动关闭上一个 ResultSet。
-
-## TAOS-JDBCDriver 版本以及支持的 TDengine 版本和 JDK 版本
-
-| taos-jdbcdriver 版本 | TDengine 版本 | JDK 版本 |
-| --- | --- | --- |
-| 2.0.12 及以上 | 2.0.8.0 及以上 | 1.8.x |
-| 2.0.4 - 2.0.11 | 2.0.0.0 - 2.0.7.x | 1.8.x |
-| 1.0.3 | 1.6.1.x 及以上 | 1.8.x |
-| 1.0.2 | 1.6.1.x 及以上 | 1.8.x |
-| 1.0.1 | 1.6.1.x 及以上 | 1.8.x |
-
-## TDengine DataType 和 Java DataType
-
-TDengine 目前支持时间戳、数字、字符、布尔类型,与 Java 对应类型转换如下:
-
-| TDengine DataType | Java DataType |
-| --- | --- |
-| TIMESTAMP | java.sql.Timestamp |
-| INT | java.lang.Integer |
-| BIGINT | java.lang.Long |
-| FLOAT | java.lang.Float |
-| DOUBLE | java.lang.Double |
-| SMALLINT, TINYINT |java.lang.Short |
-| BOOL | java.lang.Boolean |
-| BINARY, NCHAR | java.lang.String |
-
-## 如何获取 TAOS-JDBCDriver
+* 目前不支持嵌套查询(nested query)。
+* 对每个 Connection 的实例,至多只能有一个打开的 ResultSet 实例;如果在 ResultSet 还没关闭的情况下执行了新的查询,taos-jdbcdriver 会自动关闭上一个 ResultSet。
+
+
+## JDBC-JNI和JDBC-RESTful的对比
+
+
+对比项 | JDBC-JNI | JDBC-RESTful |
+
+ 支持的操作系统 |
+ linux、windows |
+ 全平台 |
+
+
+ 是否需要安装 client |
+ 需要 |
+ 不需要 |
+
+
+ server 升级后是否需要升级 client |
+ 需要 |
+ 不需要 |
+
+
+ 写入性能 |
+ JDBC-RESTful 是 JDBC-JNI 的 50%~90% |
+
+
+ 查询性能 |
+ JDBC-RESTful 与 JDBC-JNI 没有差别 |
+
+
+
+
+## 如何获取 taos-jdbcdriver
### maven 仓库
目前 taos-jdbcdriver 已经发布到 [Sonatype Repository][1] 仓库,且各大仓库都已同步。
+
* [sonatype][8]
* [mvnrepository][9]
* [maven.aliyun][10]
@@ -67,30 +67,63 @@ maven 项目中使用如下 pom.xml 配置即可:
com.taosdata.jdbc
taos-jdbcdriver
- 2.0.4
+ 2.0.17
```
### 源码编译打包
-下载 [TDengine][3] 源码之后,进入 taos-jdbcdriver 源码目录 `src/connector/jdbc` 执行 `mvn clean package` 即可生成相应 jar 包。
+下载 [TDengine][3] 源码之后,进入 taos-jdbcdriver 源码目录 `src/connector/jdbc` 执行 `mvn clean package -Dmaven.test.skip=true` 即可生成相应 jar 包。
+
-## 使用说明
+
+## JDBC的使用说明
### 获取连接
-#### 通过JdbcUrl获取连接
+#### 指定URL获取连接
+
+通过指定URL获取连接,如下所示:
+
+```java
+Class.forName("com.taosdata.jdbc.rs.RestfulDriver");
+String jdbcUrl = "jdbc:TAOS-RS://taosdemo.com:6041/test?user=root&password=taosdata";
+Connection conn = DriverManager.getConnection(jdbcUrl);
+```
+
+以上示例,使用 **JDBC-RESTful** 的 driver,建立了到 hostname 为 taosdemo.com,端口为 6041,数据库名为 test 的连接。这个 URL 中指定用户名(user)为 root,密码(password)为 taosdata。
+
+使用 JDBC-RESTful 接口,不需要依赖本地函数库。与 JDBC-JNI 相比,仅需要:
+
+1. driverClass 指定为“com.taosdata.jdbc.rs.RestfulDriver”;
+2. jdbcUrl 以“jdbc:TAOS-RS://”开头;
+3. 使用 6041 作为连接端口。
+
+如果希望获得更好的写入和查询性能,Java 应用可以使用 **JDBC-JNI** 的driver,如下所示:
-通过指定的jdbcUrl获取连接,如下所示:
```java
Class.forName("com.taosdata.jdbc.TSDBDriver");
String jdbcUrl = "jdbc:TAOS://taosdemo.com:6030/test?user=root&password=taosdata";
Connection conn = DriverManager.getConnection(jdbcUrl);
```
-以上示例,建立了到hostname为taosdemo.com,端口为6030(TDengine的默认端口),数据库名为test的连接。这个url中指定用户名(user)为root,密码(password)为taosdata。
+
+以上示例,使用了 JDBC-JNI 的 driver,建立了到 hostname 为 taosdemo.com,端口为 6030(TDengine 的默认端口),数据库名为 test 的连接。这个 URL 中指定用户名(user)为 root,密码(password)为 taosdata。
+
+**注意**:使用 JDBC-JNI 的 driver,taos-jdbcdriver 驱动包时需要依赖系统对应的本地函数库。
+
+* libtaos.so
+ 在 linux 系统中成功安装 TDengine 后,依赖的本地函数库 libtaos.so 文件会被自动拷贝至 /usr/lib/libtaos.so,该目录包含在 Linux 自动扫描路径上,无需单独指定。
+
+* taos.dll
+ 在 windows 系统中安装完客户端之后,驱动包依赖的 taos.dll 文件会自动拷贝到系统默认搜索路径 C:/Windows/System32 下,同样无需要单独指定。
+
+> 在 windows 环境开发时需要安装 TDengine 对应的 [windows 客户端][14],Linux 服务器安装完 TDengine 之后默认已安装 client,也可以单独安装 [Linux 客户端][15] 连接远程 TDengine Server。
+
+JDBC-JNI 的使用请参见视频教程。
TDengine 的 JDBC URL 规范格式为:
-`jdbc:TAOS://[host_name]:[port]/[database_name]?[user={user}|&password={password}|&charset={charset}|&cfgdir={config_dir}|&locale={locale}|&timezone={timezone}]`
+`jdbc:[TAOS|TAOS-RS]://[host_name]:[port]/[database_name]?[user={user}|&password={password}|&charset={charset}|&cfgdir={config_dir}|&locale={locale}|&timezone={timezone}]`
+
url中的配置参数如下:
* user:登录 TDengine 用户名,默认值 root。
* password:用户登录密码,默认值 taosdata。
@@ -99,13 +132,17 @@ url中的配置参数如下:
* locale:客户端语言环境,默认值系统当前 locale。
* timezone:客户端使用的时区,默认值为系统当前时区。
-#### 使用JdbcUrl和Properties获取连接
-除了通过指定的jdbcUrl获取连接,还可以使用Properties指定建立连接时的参数,如下所示:
+
+#### 指定URL和Properties获取连接
+
+除了通过指定的 URL 获取连接,还可以使用 Properties 指定建立连接时的参数,如下所示:
```java
public Connection getConn() throws Exception{
Class.forName("com.taosdata.jdbc.TSDBDriver");
+ // Class.forName("com.taosdata.jdbc.rs.RestfulDriver");
String jdbcUrl = "jdbc:TAOS://taosdemo.com:6030/test?user=root&password=taosdata";
+ // String jdbcUrl = "jdbc:TAOS-RS://taosdemo.com:6041/test?user=root&password=taosdata";
Properties connProps = new Properties();
connProps.setProperty(TSDBDriver.PROPERTY_KEY_CHARSET, "UTF-8");
connProps.setProperty(TSDBDriver.PROPERTY_KEY_LOCALE, "en_US.UTF-8");
@@ -114,9 +151,10 @@ public Connection getConn() throws Exception{
return conn;
}
```
-以上示例,建立一个到hostname为taosdemo.com,端口为6030,数据库名为test的连接。这个连接在url中指定了用户名(user)为root,密码(password)为taosdata,并在connProps中指定了使用的字符集、语言环境、时区等信息。
-properties中的配置参数如下:
+以上示例,建立一个到 hostname 为 taosdemo.com,端口为 6030,数据库名为 test 的连接。注释为使用 JDBC-RESTful 时的方法。这个连接在 url 中指定了用户名(user)为 root,密码(password)为 taosdata,并在 connProps 中指定了使用的字符集、语言环境、时区等信息。
+
+properties 中的配置参数如下:
* TSDBDriver.PROPERTY_KEY_USER:登录 TDengine 用户名,默认值 root。
* TSDBDriver.PROPERTY_KEY_PASSWORD:用户登录密码,默认值 taosdata。
* TSDBDriver.PROPERTY_KEY_CONFIG_DIR:客户端配置文件目录路径,Linux OS 上默认值 /etc/taos ,Windows OS 上默认值 C:/TDengine/cfg。
@@ -124,10 +162,14 @@ properties中的配置参数如下:
* TSDBDriver.PROPERTY_KEY_LOCALE:客户端语言环境,默认值系统当前 locale。
* TSDBDriver.PROPERTY_KEY_TIME_ZONE:客户端使用的时区,默认值为系统当前时区。
+
+
#### 使用客户端配置文件建立连接
-当使用JDBC连接TDengine集群时,可以使用客户端配置文件,在客户端配置文件中指定集群的firstEp、secondEp参数。
+
+当使用 JDBC-JNI 连接 TDengine 集群时,可以使用客户端配置文件,在客户端配置文件中指定集群的 firstEp、secondEp参数。
如下所示:
-1. 在java中不指定hostname和port
+
+1. 在 Java 应用中不指定 hostname 和 port
```java
public Connection getConn() throws Exception{
Class.forName("com.taosdata.jdbc.TSDBDriver");
@@ -140,7 +182,7 @@ public Connection getConn() throws Exception{
return conn;
}
```
-2. 在配置文件中指定firstEp和secondEp
+2. 在配置文件中指定 firstEp 和 secondEp
```
# first fully qualified domain name (FQDN) for TDengine system
firstEp cluster_node1:6030
@@ -155,17 +197,19 @@ secondEp cluster_node2:6030
# locale en_US.UTF-8
```
-以上示例,jdbc会使用客户端的配置文件,建立到hostname为cluster_node1,端口为6030,数据库名为test的连接。当集群中firstEp节点失效时,JDBC会尝试使用secondEp连接集群。
-TDengine中,只要保证firstEp和secondEp中一个节点有效,就可以正常建立到集群的连接。
+以上示例,jdbc 会使用客户端的配置文件,建立到 hostname 为 cluster_node1、端口为 6030、数据库名为 test 的连接。当集群中 firstEp 节点失效时,JDBC 会尝试使用 secondEp 连接集群。
+TDengine 中,只要保证 firstEp 和 secondEp 中一个节点有效,就可以正常建立到集群的连接。
-> 注意:这里的配置文件指的是调用JDBC Connector的应用程序所在机器上的配置文件,Linux OS 上默认值 /etc/taos/taos.cfg ,Windows OS 上默认值 C://TDengine/cfg/taos.cfg。
+> 注意:这里的配置文件指的是调用 JDBC Connector 的应用程序所在机器上的配置文件,Linux OS 上默认值 /etc/taos/taos.cfg ,Windows OS 上默认值 C://TDengine/cfg/taos.cfg。
#### 配置参数的优先级
-通过以上3种方式获取连接,如果配置参数在url、Properties、客户端配置文件中有重复,则参数的`优先级由高到低`分别如下:
+
+通过以上 3 种方式获取连接,如果配置参数在 url、Properties、客户端配置文件中有重复,则参数的`优先级由高到低`分别如下:
1. JDBC URL 参数,如上所述,可以在 JDBC URL 的参数中指定。
2. Properties connProps
3. 客户端配置文件 taos.cfg
-例如:在url中指定了password为taosdata,在Properties中指定了password为taosdemo,那么,JDBC会使用url中的password建立连接。
+
+例如:在 url 中指定了 password 为 taosdata,在 Properties 中指定了 password 为 taosdemo,那么,JDBC 会使用 url 中的 password 建立连接。
> 更多详细配置请参考[客户端配置][13]
@@ -183,6 +227,7 @@ stmt.executeUpdate("use db");
// create table
stmt.executeUpdate("create table if not exists tb (ts timestamp, temperature int, humidity float)");
```
+
> 注意:如果不使用 `use db` 指定数据库,则后续对表的操作都需要增加数据库名称作为前缀,如 db.tb。
### 插入数据
@@ -193,6 +238,7 @@ int affectedRows = stmt.executeUpdate("insert into tb values(now, 23, 10.3) (now
System.out.println("insert " + affectedRows + " rows.");
```
+
> now 为系统内部函数,默认为服务器当前时间。
> `now + 1s` 代表服务器当前时间往后加 1 秒,数字后面代表时间单位:a(毫秒), s(秒), m(分), h(小时), d(天),w(周), n(月), y(年)。
@@ -214,6 +260,7 @@ while(resultSet.next()){
System.out.printf("%s, %d, %s\n", ts, temperature, humidity);
}
```
+
> 查询和操作关系型数据库一致,使用下标获取返回字段内容时从 1 开始,建议使用字段名称获取。
### 订阅
@@ -248,7 +295,7 @@ while(true) {
}
```
-`consume` 方法返回一个结果集,其中包含从上次 `consume` 到目前为止的所有新数据。请务必按需选择合理的调用 `consume` 的频率(如例子中的`Thread.sleep(1000)`),否则会给服务端造成不必要的压力。
+`consume` 方法返回一个结果集,其中包含从上次 `consume` 到目前为止的所有新数据。请务必按需选择合理的调用 `consume` 的频率(如例子中的 `Thread.sleep(1000)`),否则会给服务端造成不必要的压力。
#### 关闭订阅
@@ -265,8 +312,11 @@ resultSet.close();
stmt.close();
conn.close();
```
+
> `注意务必要将 connection 进行关闭`,否则会出现连接泄露。
+
+
## 与连接池使用
**HikariCP**
@@ -306,6 +356,7 @@ conn.close();
connection.close(); // put back to conneciton pool
}
```
+
> 通过 HikariDataSource.getConnection() 获取连接后,使用完成后需要调用 close() 方法,实际上它并不会关闭连接,只是放回连接池中。
> 更多 HikariCP 使用问题请查看[官方说明][5]
@@ -356,6 +407,7 @@ public static void main(String[] args) throws Exception {
connection.close(); // put back to conneciton pool
}
```
+
> 更多 druid 使用问题请查看[官方说明][6]
**注意事项**
@@ -370,10 +422,43 @@ server_status()|
Query OK, 1 row(s) in set (0.000141s)
```
+
+
## 与框架使用
* Spring JdbcTemplate 中使用 taos-jdbcdriver,可参考 [SpringJdbcTemplate][11]
-* Springboot + Mybatis 中使用,可参考 [springbootdemo][12]
+* Springboot + Mybatis 中使用,可参考 [springbootdemo
+
+
+
+## TAOS-JDBCDriver 版本以及支持的 TDengine 版本和 JDK 版本
+
+| taos-jdbcdriver 版本 | TDengine 版本 | JDK 版本 |
+| -------------------- | ----------------- | -------- |
+| 2.0.12 及以上 | 2.0.8.0 及以上 | 1.8.x |
+| 2.0.4 - 2.0.11 | 2.0.0.0 - 2.0.7.x | 1.8.x |
+| 1.0.3 | 1.6.1.x 及以上 | 1.8.x |
+| 1.0.2 | 1.6.1.x 及以上 | 1.8.x |
+| 1.0.1 | 1.6.1.x 及以上 | 1.8.x |
+
+
+
+## TDengine DataType 和 Java DataType
+
+TDengine 目前支持时间戳、数字、字符、布尔类型,与 Java 对应类型转换如下:
+
+| TDengine DataType | Java DataType |
+| ----------------- | ------------------ |
+| TIMESTAMP | java.sql.Timestamp |
+| INT | java.lang.Integer |
+| BIGINT | java.lang.Long |
+| FLOAT | java.lang.Float |
+| DOUBLE | java.lang.Double |
+| SMALLINT, TINYINT | java.lang.Short |
+| BOOL | java.lang.Boolean |
+| BINARY, NCHAR | java.lang.String |
+
+
## 常见问题
@@ -381,7 +466,7 @@ Query OK, 1 row(s) in set (0.000141s)
**原因**:程序没有找到依赖的本地函数库 taos。
- **解决方法**:windows 下可以将 C:\TDengine\driver\taos.dll 拷贝到 C:\Windows\System32\ 目录下,linux 下将建立如下软链 ` ln -s /usr/local/taos/driver/libtaos.so.x.x.x.x /usr/lib/libtaos.so` 即可。
+ **解决方法**:windows 下可以将 C:\TDengine\driver\taos.dll 拷贝到 C:\Windows\System32\ 目录下,linux 下将建立如下软链 `ln -s /usr/local/taos/driver/libtaos.so.x.x.x.x /usr/lib/libtaos.so` 即可。
* java.lang.UnsatisfiedLinkError: taos.dll Can't load AMD 64 bit on a IA 32-bit platform
@@ -406,3 +491,4 @@ Query OK, 1 row(s) in set (0.000141s)
[13]: https://www.taosdata.com/cn/documentation20/administrator/#%E5%AE%A2%E6%88%B7%E7%AB%AF%E9%85%8D%E7%BD%AE
[14]: https://www.taosdata.com/cn/all-downloads/#TDengine-Windows-Client
[15]: https://www.taosdata.com/cn/getting-started/#%E5%BF%AB%E9%80%9F%E4%B8%8A%E6%89%8B
+
diff --git a/documentation20/webdocs/markdowndocs/faq-ch.md b/documentation20/webdocs/markdowndocs/faq-ch.md
index 79139078c13bda797299b470b417061c61169a10..cd6f0ae08caf19340b6cef9a9428abcb66c97dc6 100644
--- a/documentation20/webdocs/markdowndocs/faq-ch.md
+++ b/documentation20/webdocs/markdowndocs/faq-ch.md
@@ -85,7 +85,9 @@ TDengine还没有一组专用的validation queries。然而建议你使用系统
## 9. 我可以删除或更新一条记录吗?
-不能。因为TDengine是为联网设备采集的数据设计的,不容许修改。但TDengine提供数据保留策略,只要数据记录超过保留时长,就会被自动删除。
+TDengine 目前尚不支持删除功能,未来根据用户需求可能会支持。
+
+从 2.0.8.0 开始,TDengine 支持更新已经写入数据的功能。使用更新功能需要在创建数据库时使用 UPDATE 1 参数,之后可以使用 INSERT INTO 命令更新已经写入的相同时间戳数据。UPDATE 参数不支持 ALTER DATABASE 命令修改。没有使用 UPDATE 1 参数创建的数据库,写入相同时间戳的数据不会修改之前的数据,也不会报错。
## 10. 我怎么创建超过1024列的表?
@@ -132,8 +134,3 @@ TDengine是根据hostname唯一标志一台机器的,在数据文件从机器A
- 2.0.7.0 及以后的版本,到/var/lib/taos/dnode下,修复dnodeEps.json的dnodeId对应的FQDN,重启。确保机器内所有机器的此文件是完全相同的。
- 1.x 和 2.x 版本的存储结构不兼容,需要使用迁移工具或者自己开发应用导出导入数据。
-## 17. TDengine 是否支持删除或更新已经写入的数据?
-
-TDengine 目前尚不支持删除功能,未来根据用户需求可能会支持。
-
-从 2.0.8.0 开始,TDengine 支持更新已经写入数据的功能。使用更新功能需要在创建数据库时使用 UPDATE 1 参数,之后可以使用 INSERT INTO 命令更新已经写入的相同时间戳数据。UPDATE 参数不支持 ALTER DATABASE 命令修改。没有使用 UPDATE 1 参数创建的数据库,写入相同时间戳的数据不会修改之前的数据,也不会报错。
\ No newline at end of file
diff --git a/documentation20/webdocs/markdowndocs/insert-ch.md b/documentation20/webdocs/markdowndocs/insert-ch.md
index 3fa48c1f508bedf7b4000ffe9f7ef8c96e42d606..7d380ac952dce5f57ff259159c33dd9e9b53edf3 100644
--- a/documentation20/webdocs/markdowndocs/insert-ch.md
+++ b/documentation20/webdocs/markdowndocs/insert-ch.md
@@ -222,7 +222,7 @@ MQTT是一流行的物联网数据传输协议,TDengine 可以很方便的接
## EMQ Broker 直接写入
-EMQ是一开源的MQTT Broker软件,无需任何代码,只需要在EMQ Dashboard里使用“规则”做简单配置,即可将MQTT的数据直接写入TDengine。EMQ X 支持通过 发送到 Web 服务 的方式保存数据到 TDengine,也在企业版上提供原生的 TDEngine 驱动实现直接保存。详细使用方法请参考EMQ 官方文档。
+EMQ是一开源的MQTT Broker软件,无需任何代码,只需要在EMQ Dashboard里使用“规则”做简单配置,即可将MQTT的数据直接写入TDengine。EMQ X 支持通过 发送到 Web 服务 的方式保存数据到 TDengine,也在企业版上提供原生的 TDengine 驱动实现直接保存。详细使用方法请参考EMQ 官方文档。
## HiveMQ Broker 直接写入
diff --git a/packaging/cfg/taos.cfg b/packaging/cfg/taos.cfg
index 2908a8a21e503acd96c4d9d3976c9ee67bb86561..73004fe7b722d2fd2c15930f69a38d78aca4220d 100644
--- a/packaging/cfg/taos.cfg
+++ b/packaging/cfg/taos.cfg
@@ -265,9 +265,6 @@
# maximum display width of binary and nchar fields in the shell. The parts exceeding this limit will be hidden
# maxBinaryDisplayWidth 30
-# enable/disable telemetry reporting
-# telemetryReporting 1
-
# enable/disable stream (continuous query)
# stream 1
diff --git a/packaging/docker/Dockerfile b/packaging/docker/Dockerfile
index 44ccafd04460c8beef0a4b2f5ae715033328f807..e13cad41204c3f28ed678a664bf7b9b7f5f8715d 100644
--- a/packaging/docker/Dockerfile
+++ b/packaging/docker/Dockerfile
@@ -1,14 +1,16 @@
-FROM ubuntu:20.04
+FROM ubuntu:18.04
WORKDIR /root
-ARG version
-RUN echo $version
-COPY tdengine.tar.gz /root/
-RUN tar -zxf tdengine.tar.gz
-WORKDIR /root/TDengine-server-$version/
-RUN /bin/bash install.sh -e no
+ARG pkgFile
+ARG dirName
+RUN echo ${pkgFile}
+RUN echo ${dirName}
+COPY ${pkgFile} /root/
+RUN tar -zxf ${pkgFile}
+WORKDIR /root/${dirName}/
+RUN /bin/bash install.sh -e no
ENV LD_LIBRARY_PATH="$LD_LIBRARY_PATH:/usr/lib"
ENV LANG=en_US.UTF-8
diff --git a/packaging/docker/dockerManifest.sh b/packaging/docker/dockerManifest.sh
new file mode 100755
index 0000000000000000000000000000000000000000..ca2c3c66c9875a697b372f73448aa53deb887f68
--- /dev/null
+++ b/packaging/docker/dockerManifest.sh
@@ -0,0 +1,44 @@
+#!/bin/bash
+set -e
+#set -x
+
+# dockerbuild.sh
+# -n [version number]
+# -p [xxxx]
+
+# set parameters by default value
+verNumber=""
+passWord=""
+
+while getopts "hn:p:" arg
+do
+ case $arg in
+ n)
+ #echo "verNumber=$OPTARG"
+ verNumber=$(echo $OPTARG)
+ ;;
+ p)
+ #echo "passWord=$OPTARG"
+ passWord=$(echo $OPTARG)
+ ;;
+ h)
+ echo "Usage: `basename $0` -n [version number] "
+ echo " -p [password for docker hub] "
+ exit 0
+ ;;
+ ?) #unknow option
+ echo "unkonw argument"
+ exit 1
+ ;;
+ esac
+done
+
+echo "verNumber=${verNumber}"
+
+docker manifest create -a tdengine/tdengine:${verNumber} tdengine/tdengine-amd64:${verNumber} tdengine/tdengine-aarch64:${verNumber} tdengine/tdengine-aarch32:${verNumber}
+
+docker login -u tdengine -p ${passWord} #replace the docker registry username and password
+
+docker manifest push tdengine/tdengine:${verNumber}
+
+# how set latest version ???
diff --git a/packaging/docker/dockerbuild-aarch64.sh b/packaging/docker/dockerbuild-aarch64.sh
deleted file mode 100755
index 795eaed5491834cf5601e567440a314d6e1f9629..0000000000000000000000000000000000000000
--- a/packaging/docker/dockerbuild-aarch64.sh
+++ /dev/null
@@ -1,5 +0,0 @@
-#!/bin/bash
-set -x
-docker build --rm -f "Dockerfile" -t tdengine/tdengine-aarch64:$1 "." --build-arg version=$1
-docker login -u tdengine -p $2 #replace the docker registry username and password
-docker push tdengine/tdengine-aarch64:$1
diff --git a/packaging/docker/dockerbuild.sh b/packaging/docker/dockerbuild.sh
index 48e2f7ead63356e8210d03beff2add6796558f5b..b7991465b0940c86ac6d620d498901ab5e1c9ac2 100755
--- a/packaging/docker/dockerbuild.sh
+++ b/packaging/docker/dockerbuild.sh
@@ -1,5 +1,63 @@
#!/bin/bash
-set -x
-docker build --rm -f "Dockerfile" -t tdengine/tdengine:$1 "." --build-arg version=$1
-docker login -u tdengine -p $2 #replace the docker registry username and password
-docker push tdengine/tdengine:$1
+set -e
+#set -x
+
+# dockerbuild.sh
+# -c [aarch32 | aarch64 | amd64 | x86 | mips64 ...]
+# -f [pkg file]
+# -n [version number]
+# -p [password for docker hub]
+
+# set parameters by default value
+cpuType=amd64
+verNumber=""
+passWord=""
+pkgFile=""
+
+while getopts "hc:n:p:f:" arg
+do
+ case $arg in
+ c)
+ #echo "cpuType=$OPTARG"
+ cpuType=$(echo $OPTARG)
+ ;;
+ n)
+ #echo "verNumber=$OPTARG"
+ verNumber=$(echo $OPTARG)
+ ;;
+ p)
+ #echo "passWord=$OPTARG"
+ passWord=$(echo $OPTARG)
+ ;;
+ f)
+ #echo "pkgFile=$OPTARG"
+ pkgFile=$(echo $OPTARG)
+ ;;
+ h)
+ echo "Usage: `basename $0` -c [aarch32 | aarch64 | amd64 | x86 | mips64 ...] "
+ echo " -f [pkg file] "
+ echo " -n [version number] "
+ echo " -p [password for docker hub] "
+ exit 0
+ ;;
+ ?) #unknow option
+ echo "unkonw argument"
+ exit 1
+ ;;
+ esac
+done
+
+echo "cpuType=${cpuType} verNumber=${verNumber} pkgFile=${pkgFile} "
+echo "$(pwd)"
+echo "====NOTES: ${pkgFile} must be in the same directory as dockerbuild.sh===="
+
+dirName=${pkgFile%-Linux*}
+#echo "dirName=${dirName}"
+
+docker build --rm -f "Dockerfile" -t tdengine/tdengine-${cpuType}:${verNumber} "." --build-arg pkgFile=${pkgFile} --build-arg dirName=${dirName}
+docker login -u tdengine -p ${passWord} #replace the docker registry username and password
+docker push tdengine/tdengine-${cpuType}:${verNumber}
+
+# set this version to latest version
+docker tag tdengine/tdengine-${cpuType}:${verNumber} tdengine/tdengine-${cpuType}:latest
+docker push tdengine/tdengine-${cpuType}:latest
diff --git a/packaging/docker/dockerbuildi.sh b/packaging/docker/dockerbuildi.sh
new file mode 100755
index 0000000000000000000000000000000000000000..a0a954e30fe9c3637abe4d219001235d793466e0
--- /dev/null
+++ b/packaging/docker/dockerbuildi.sh
@@ -0,0 +1,56 @@
+#!/bin/bash
+#
+
+set -e
+#set -x
+
+# dockerbuild.sh
+# -c [aarch32 | aarch64 | amd64 | x86 | mips64 ...]
+# -n [version number]
+# -p [password for docker hub]
+
+# set parameters by default value
+cpuType=aarch64
+verNumber=""
+passWord=""
+
+while getopts "hc:n:p:f:" arg
+do
+ case $arg in
+ c)
+ #echo "cpuType=$OPTARG"
+ cpuType=$(echo $OPTARG)
+ ;;
+ n)
+ #echo "verNumber=$OPTARG"
+ verNumber=$(echo $OPTARG)
+ ;;
+ p)
+ #echo "passWord=$OPTARG"
+ passWord=$(echo $OPTARG)
+ ;;
+ h)
+ echo "Usage: `basename $0` -c [aarch32 | aarch64 | amd64 | x86 | mips64 ...] "
+ echo " -n [version number] "
+ echo " -p [password for docker hub] "
+ exit 0
+ ;;
+ ?) #unknow option
+ echo "unkonw argument"
+ exit 1
+ ;;
+ esac
+done
+
+pkgFile=TDengine-server-${verNumber}-Linux-${cpuType}.tar.gz
+
+echo "cpuType=${cpuType} verNumber=${verNumber} pkgFile=${pkgFile} "
+
+scriptDir=`pwd`
+pkgDir=$scriptDir/../../release/
+
+cp -f ${pkgDir}/${pkgFile} .
+
+./dockerbuild.sh -c ${cpuType} -f ${pkgFile} -n ${verNumber} -p ${passWord}
+
+rm -f ${pkgFile}
diff --git a/packaging/tools/taosd-dump-cfg.gdb b/packaging/tools/taosd-dump-cfg.gdb
new file mode 100644
index 0000000000000000000000000000000000000000..9774ccf82283817edea5f49b59e0c6cb6f529577
--- /dev/null
+++ b/packaging/tools/taosd-dump-cfg.gdb
@@ -0,0 +1,144 @@
+# Usage:
+# sudo gdb -x ./taosd-dump-cfg.gdb
+
+define attach_pidof
+ if $argc != 1
+ help attach_pidof
+ else
+ shell echo -e "\
+set \$PID = "$(echo $(pidof $arg0) 0 | cut -d " " -f 1)"\n\
+if \$PID > 0\n\
+ attach "$(pidof -s $arg0)"\n\
+else\n\
+ print \"Process '"$arg0"' not found\"\n\
+end" > /tmp/gdb.pidof
+ source /tmp/gdb.pidof
+ end
+end
+
+document attach_pidof
+Attach to process by name
+Usage: attach_pidof PROG_NAME
+end
+
+set $TAOS_CFG_VTYPE_INT8 = 0
+set $TAOS_CFG_VTYPE_INT16 = 1
+set $TAOS_CFG_VTYPE_INT32 = 2
+set $TAOS_CFG_VTYPE_FLOAT = 3
+set $TAOS_CFG_VTYPE_STRING = 4
+set $TAOS_CFG_VTYPE_IPSTR = 5
+set $TAOS_CFG_VTYPE_DIRECTORY = 6
+
+set $TSDB_CFG_CTYPE_B_CONFIG = 1U
+set $TSDB_CFG_CTYPE_B_SHOW = 2U
+set $TSDB_CFG_CTYPE_B_LOG = 4U
+set $TSDB_CFG_CTYPE_B_CLIENT = 8U
+set $TSDB_CFG_CTYPE_B_OPTION = 16U
+set $TSDB_CFG_CTYPE_B_NOT_PRINT = 32U
+
+set $TSDB_CFG_PRINT_LEN = 53
+
+define print_blank
+ if $argc == 1
+ set $blank_len = $arg0
+ while $blank_len > 0
+ printf "%s", " "
+ set $blank_len = $blank_len - 1
+ end
+ end
+end
+
+define dump_cfg
+ if $argc != 1
+ help dump_cfg
+ else
+ set $blen = $TSDB_CFG_PRINT_LEN - (int)strlen($arg0.option)
+ if $blen < 0
+ $blen = 0
+ end
+ #printf "%s: %d\n", "******blen: ", $blen
+ printf "%s: ", $arg0.option
+ print_blank $blen
+
+ if $arg0.valType == $TAOS_CFG_VTYPE_INT8
+ printf "%d\n", *((int8_t *) $arg0.ptr)
+ else
+ if $arg0.valType == $TAOS_CFG_VTYPE_INT16
+ printf "%d\n", *((int16_t *) $arg0.ptr)
+ else
+ if $arg0.valType == $TAOS_CFG_VTYPE_INT32
+ printf "%d\n", *((int32_t *) $arg0.ptr)
+ else
+ if $arg0.valType == $TAOS_CFG_VTYPE_FLOAT
+ printf "%f\n", *((float *) $arg0.ptr)
+ else
+ printf "%s\n", $arg0.ptr
+ end
+ end
+ end
+ end
+ end
+end
+
+document dump_cfg
+Dump a cfg entry
+Usage: dump_cfg cfg
+end
+
+set pagination off
+
+attach_pidof taosd
+
+set $idx=0
+#print tsGlobalConfigNum
+#set $end=$1
+set $end=tsGlobalConfigNum
+
+p "*=*=*=*=*=*=*=*=*= taos global config:"
+#while ($idx .lt. $end)
+while ($idx < $end)
+ # print tsGlobalConfig[$idx].option
+ set $cfg = tsGlobalConfig[$idx]
+ set $tsce = tscEmbedded
+# p "1"
+ if ($tsce == 0)
+ if !($cfg.cfgType & $TSDB_CFG_CTYPE_B_CLIENT)
+ end
+ else
+ if $cfg.cfgType & $TSDB_CFG_CTYPE_B_NOT_PRINT
+ else
+ if !($cfg.cfgType & $TSDB_CFG_CTYPE_B_SHOW)
+ else
+ dump_cfg $cfg
+ end
+ end
+ end
+
+ set $idx=$idx+1
+end
+
+set $idx=0
+
+p "*=*=*=*=*=*=*=*=*= taos local config:"
+while ($idx < $end)
+ set $cfg = tsGlobalConfig[$idx]
+ set $tsce = tscEmbedded
+ if ($tsce == 0)
+ if !($cfg.cfgType & $TSDB_CFG_CTYPE_B_CLIENT)
+ end
+ else
+ if $cfg.cfgType & $TSDB_CFG_CTYPE_B_NOT_PRINT
+ else
+ if ($cfg.cfgType & $TSDB_CFG_CTYPE_B_SHOW)
+ else
+ dump_cfg $cfg
+ end
+ end
+ end
+
+ set $idx=$idx+1
+end
+
+detach
+
+quit
diff --git a/src/balance/src/bnThread.c b/src/balance/src/bnThread.c
index 84f8694fca7248abb27529f5e8268dc0e08bf815..3931acd0536dab48e11b63a7cd045c3ede3b3937 100644
--- a/src/balance/src/bnThread.c
+++ b/src/balance/src/bnThread.c
@@ -56,7 +56,7 @@ int32_t bnInitThread() {
pthread_attr_destroy(&thattr);
if (ret != 0) {
- mError("failed to create balance thread since %s", strerror(errno));
+ mError("failed to create balance thread since %s", strerror(ret));
return -1;
}
diff --git a/src/client/inc/tscLocalMerge.h b/src/client/inc/tscLocalMerge.h
index 06176451881b30ebe4c24bfcd478ceeb221a86de..581cd37cbd53cb87847fc5a13c88b03eb797d93a 100644
--- a/src/client/inc/tscLocalMerge.h
+++ b/src/client/inc/tscLocalMerge.h
@@ -38,7 +38,7 @@ typedef struct SLocalDataSource {
tFilePage filePage;
} SLocalDataSource;
-typedef struct SLocalReducer {
+typedef struct SLocalMerger {
SLocalDataSource ** pLocalDataSrc;
int32_t numOfBuffer;
int32_t numOfCompleted;
@@ -62,7 +62,7 @@ typedef struct SLocalReducer {
bool discard;
int32_t offset; // limit offset value
bool orderPrjOnSTable; // projection query on stable
-} SLocalReducer;
+} SLocalMerger;
typedef struct SRetrieveSupport {
tExtMemBuffer ** pExtMemBuffer; // for build loser tree
@@ -89,10 +89,10 @@ int32_t tscFlushTmpBuffer(tExtMemBuffer *pMemoryBuf, tOrderDescriptor *pDesc, tF
/*
* create local reducer to launch the second-stage reduce process at client site
*/
-void tscCreateLocalReducer(tExtMemBuffer **pMemBuffer, int32_t numOfBuffer, tOrderDescriptor *pDesc,
+void tscCreateLocalMerger(tExtMemBuffer **pMemBuffer, int32_t numOfBuffer, tOrderDescriptor *pDesc,
SColumnModel *finalModel, SColumnModel *pFFModel, SSqlObj* pSql);
-void tscDestroyLocalReducer(SSqlObj *pSql);
+void tscDestroyLocalMerger(SSqlObj *pSql);
int32_t tscDoLocalMerge(SSqlObj *pSql);
diff --git a/src/client/inc/tscSubquery.h b/src/client/inc/tscSubquery.h
index 7529891635f47fb4f2b68ede5b48ed5640c8f00a..e1370513ef28b4eb86d4caf8c7b34481f334c512 100644
--- a/src/client/inc/tscSubquery.h
+++ b/src/client/inc/tscSubquery.h
@@ -47,7 +47,6 @@ void tscLockByThread(int64_t *lockedBy);
void tscUnlockByThread(int64_t *lockedBy);
-
#ifdef __cplusplus
}
#endif
diff --git a/src/client/inc/tscUtil.h b/src/client/inc/tscUtil.h
index 633512e324671821f43e8d07dc3f195f2800eca9..ce623cdc03b6871c989186b52fc221c8609a4811 100644
--- a/src/client/inc/tscUtil.h
+++ b/src/client/inc/tscUtil.h
@@ -98,20 +98,19 @@ static FORCE_INLINE SQueryInfo* tscGetQueryInfoDetail(SSqlCmd* pCmd, int32_t sub
return pCmd->pQueryInfo[subClauseIndex];
}
-int32_t tscCreateDataBlock(size_t initialSize, int32_t rowSize, int32_t startOffset, const char* name,
- STableMeta* pTableMeta, STableDataBlocks** dataBlocks);
-void tscDestroyDataBlock(STableDataBlocks* pDataBlock);
+int32_t tscCreateDataBlock(size_t initialSize, int32_t rowSize, int32_t startOffset, SName* name, STableMeta* pTableMeta, STableDataBlocks** dataBlocks);
+void tscDestroyDataBlock(STableDataBlocks* pDataBlock, bool removeMeta);
void tscSortRemoveDataBlockDupRows(STableDataBlocks* dataBuf);
SParamInfo* tscAddParamToDataBlock(STableDataBlocks* pDataBlock, char type, uint8_t timePrec, int16_t bytes,
uint32_t offset);
void* tscDestroyBlockArrayList(SArray* pDataBlockList);
-void* tscDestroyBlockHashTable(SHashObj* pBlockHashTable);
+void* tscDestroyBlockHashTable(SHashObj* pBlockHashTable, bool removeMeta);
int32_t tscCopyDataBlockToPayload(SSqlObj* pSql, STableDataBlocks* pDataBlock);
int32_t tscMergeTableDataBlocks(SSqlObj* pSql, bool freeBlockMap);
-int32_t tscGetDataBlockFromList(SHashObj* pHashList, int64_t id, int32_t size, int32_t startOffset, int32_t rowSize, const char* tableId, STableMeta* pTableMeta,
+int32_t tscGetDataBlockFromList(SHashObj* pHashList, int64_t id, int32_t size, int32_t startOffset, int32_t rowSize, SName* pName, STableMeta* pTableMeta,
STableDataBlocks** dataBlocks, SArray* pBlockList);
/**
@@ -142,10 +141,6 @@ void tscClearInterpInfo(SQueryInfo* pQueryInfo);
bool tscIsInsertData(char* sqlstr);
-/* use for keep current db info temporarily, for handle table with db prefix */
-// todo remove it
-void tscGetDBInfoFromTableFullName(char* tableId, char* db);
-
int tscAllocPayload(SSqlCmd* pCmd, int size);
TAOS_FIELD tscCreateField(int8_t type, const char* name, int16_t bytes);
@@ -215,8 +210,8 @@ SQueryInfo *tscGetQueryInfoDetailSafely(SSqlCmd *pCmd, int32_t subClauseIndex);
void tscClearTableMetaInfo(STableMetaInfo* pTableMetaInfo);
-STableMetaInfo* tscAddTableMetaInfo(SQueryInfo* pQueryInfo, const char* name, STableMeta* pTableMeta,
- SVgroupsInfo* vgroupList, SArray* pTagCols, SArray* pVgroupTables);
+STableMetaInfo* tscAddTableMetaInfo(SQueryInfo* pQueryInfo, SName* name, STableMeta* pTableMeta,
+ SVgroupsInfo* vgroupList, SArray* pTagCols, SArray* pVgroupTables);
STableMetaInfo* tscAddEmptyMetaInfo(SQueryInfo *pQueryInfo);
int32_t tscAddSubqueryInfo(SSqlCmd *pCmd);
@@ -225,7 +220,7 @@ void tscInitQueryInfo(SQueryInfo* pQueryInfo);
void tscClearSubqueryInfo(SSqlCmd* pCmd);
void tscFreeVgroupTableInfo(SArray* pVgroupTables);
-SArray* tscVgroupTableInfoClone(SArray* pVgroupTables);
+SArray* tscVgroupTableInfoDup(SArray* pVgroupTables);
void tscRemoveVgroupTableGroup(SArray* pVgroupTable, int32_t index);
void tscVgroupTableCopy(SVgroupTableInfo* info, SVgroupTableInfo* pInfo);
@@ -292,7 +287,7 @@ uint32_t tscGetTableMetaSize(STableMeta* pTableMeta);
CChildTableMeta* tscCreateChildMeta(STableMeta* pTableMeta);
uint32_t tscGetTableMetaMaxSize();
int32_t tscCreateTableMetaFromCChildMeta(STableMeta* pChild, const char* name);
-STableMeta* tscTableMetaClone(STableMeta* pTableMeta);
+STableMeta* tscTableMetaDup(STableMeta* pTableMeta);
void* malloc_throw(size_t size);
diff --git a/src/client/inc/tsclient.h b/src/client/inc/tsclient.h
index 652e5bdd47b5bfa3adfdb8b8f58e94399c4de810..c1b6b0c2b9a0f1c23b8afecbce61451c9ff68488 100644
--- a/src/client/inc/tsclient.h
+++ b/src/client/inc/tsclient.h
@@ -22,15 +22,15 @@ extern "C" {
#include "os.h"
+#include "qAggMain.h"
#include "taos.h"
#include "taosdef.h"
#include "taosmsg.h"
#include "tarray.h"
-#include "tglobal.h"
-#include "tsqlfunction.h"
-#include "tutil.h"
#include "tcache.h"
+#include "tglobal.h"
#include "tref.h"
+#include "tutil.h"
#include "qExecutor.h"
#include "qSqlparser.h"
@@ -39,7 +39,7 @@ extern "C" {
// forward declaration
struct SSqlInfo;
-struct SLocalReducer;
+struct SLocalMerger;
// data source from sql string or from file
enum {
@@ -67,7 +67,7 @@ typedef struct CChildTableMeta {
int32_t vgId;
STableId id;
uint8_t tableType;
- char sTableName[TSDB_TABLE_FNAME_LEN];
+ char sTableName[TSDB_TABLE_FNAME_LEN]; //super table name, not full name
} CChildTableMeta;
typedef struct STableMeta {
@@ -91,7 +91,7 @@ typedef struct STableMetaInfo {
* 2. keep the vgroup index for multi-vnode insertion
*/
int32_t vgroupIndex;
- char name[TSDB_TABLE_FNAME_LEN]; // (super) table name
+ SName name;
char aliasName[TSDB_TABLE_NAME_LEN]; // alias name of table specified in query sql
SArray *tagColList; // SArray, involved tag columns
} STableMetaInfo;
@@ -142,7 +142,7 @@ typedef struct SCond {
} SCond;
typedef struct SJoinNode {
- char tableId[TSDB_TABLE_FNAME_LEN];
+ char tableName[TSDB_TABLE_FNAME_LEN];
uint64_t uid;
int16_t tagColId;
} SJoinNode;
@@ -176,7 +176,7 @@ typedef struct SParamInfo {
} SParamInfo;
typedef struct STableDataBlocks {
- char tableName[TSDB_TABLE_FNAME_LEN];
+ SName tableName;
int8_t tsSource; // where does the UNIX timestamp come from, server or client
bool ordered; // if current rows are ordered or not
int64_t vgId; // virtual group id
@@ -254,7 +254,7 @@ typedef struct {
int8_t submitSchema; // submit block is built with table schema
STagData tagData; // NOTE: pTagData->data is used as a variant length array
- char **pTableNameList; // all involved tableMeta list of current insert sql statement.
+ SName **pTableNameList; // all involved tableMeta list of current insert sql statement.
int32_t numOfTables;
SHashObj *pTableBlockHashList; // data block for each table
@@ -292,7 +292,7 @@ typedef struct {
SColumnIndex* pColumnIndex;
SArithmeticSupport *pArithSup; // support the arithmetic expression calculation on agg functions
- struct SLocalReducer *pLocalReducer;
+ struct SLocalMerger *pLocalMerger;
} SSqlRes;
typedef struct STscObj {
@@ -317,7 +317,8 @@ typedef struct STscObj {
} STscObj;
typedef struct SSubqueryState {
- int32_t numOfRemain; // the number of remain unfinished subquery
+ pthread_mutex_t mutex;
+ int8_t *states;
int32_t numOfSub; // the number of total sub-queries
uint64_t numOfRetrievedRows; // total number of points in this query
} SSubqueryState;
@@ -410,7 +411,7 @@ void tscRestoreSQLFuncForSTableQuery(SQueryInfo *pQueryInfo);
int32_t tscCreateResPointerInfo(SSqlRes *pRes, SQueryInfo *pQueryInfo);
void tscSetResRawPtr(SSqlRes* pRes, SQueryInfo* pQueryInfo);
-void tscResetSqlCmdObj(SSqlCmd *pCmd);
+void tscResetSqlCmd(SSqlCmd *pCmd, bool removeMeta);
/**
* free query result of the sql object
@@ -435,7 +436,7 @@ void waitForQueryRsp(void *param, TAOS_RES *tres, int code);
void doAsyncQuery(STscObj *pObj, SSqlObj *pSql, __async_cb_func_t fp, void *param, const char *sqlstr, size_t sqlLen);
-void tscProcessMultiVnodesImportFromFile(SSqlObj *pSql);
+void tscImportDataFromFile(SSqlObj *pSql);
void tscInitResObjForLocalQuery(SSqlObj *pObj, int32_t numOfRes, int32_t rowLen);
bool tscIsUpdateQuery(SSqlObj* pSql);
bool tscHasReachLimitation(SQueryInfo *pQueryInfo, SSqlRes *pRes);
diff --git a/src/client/src/tscAsync.c b/src/client/src/tscAsync.c
index 4eae2b7a877bee69f1192a9558c4db8d6fb1d596..8e5f621b37d77d41e44ff2cb329e6d6d03d62340 100644
--- a/src/client/src/tscAsync.c
+++ b/src/client/src/tscAsync.c
@@ -351,7 +351,7 @@ void tscTableMetaCallBack(void *param, TAOS_RES *res, int code) {
if (pCmd->command == TSDB_SQL_SELECT) {
tscDebug("%p redo parse sql string and proceed", pSql);
pCmd->parseFinished = false;
- tscResetSqlCmdObj(pCmd);
+ tscResetSqlCmd(pCmd, true);
code = tsParseSql(pSql, true);
if (code == TSDB_CODE_TSC_ACTION_IN_PROGRESS) {
diff --git a/src/client/src/tscLocal.c b/src/client/src/tscLocal.c
index 599fa8646096da224fc2d77b55f164e0581400ef..4b1ab477304d68d5d52ed02b9b2d2c0121c4ffa6 100644
--- a/src/client/src/tscLocal.c
+++ b/src/client/src/tscLocal.c
@@ -569,10 +569,12 @@ static int32_t tscRebuildDDLForSubTable(SSqlObj *pSql, const char *tableName, ch
}
char fullName[TSDB_TABLE_FNAME_LEN * 2] = {0};
- extractDBName(pTableMetaInfo->name, fullName);
+ tNameGetDbName(&pTableMetaInfo->name, fullName);
+
extractTableName(pMeta->sTableName, param->sTableName);
snprintf(fullName + strlen(fullName), TSDB_TABLE_FNAME_LEN - strlen(fullName), ".%s", param->sTableName);
- extractTableName(pTableMetaInfo->name, param->buf);
+
+ strncpy(param->buf, tNameGetTableName(&pTableMetaInfo->name), TSDB_TABLE_NAME_LEN);
param->pParentSql = pSql;
param->pInterSql = pInterSql;
@@ -602,6 +604,7 @@ static int32_t tscRebuildDDLForSubTable(SSqlObj *pSql, const char *tableName, ch
return TSDB_CODE_TSC_ACTION_IN_PROGRESS;
}
+
static int32_t tscRebuildDDLForNormalTable(SSqlObj *pSql, const char *tableName, char *ddl) {
SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(&pSql->cmd, 0);
STableMetaInfo *pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0);
@@ -675,8 +678,7 @@ static int32_t tscProcessShowCreateTable(SSqlObj *pSql) {
STableMetaInfo *pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0);
assert(pTableMetaInfo->pTableMeta != NULL);
- char tableName[TSDB_TABLE_NAME_LEN] = {0};
- extractTableName(pTableMetaInfo->name, tableName);
+ const char* tableName = tNameGetTableName(&pTableMetaInfo->name);
char *result = (char *)calloc(1, TSDB_MAX_BINARY_LEN);
int32_t code = TSDB_CODE_SUCCESS;
@@ -712,7 +714,9 @@ static int32_t tscProcessShowCreateDatabase(SSqlObj *pSql) {
free(pInterSql);
return TSDB_CODE_TSC_OUT_OF_MEMORY;
}
- extractTableName(pTableMetaInfo->name, param->buf);
+
+ strncpy(param->buf, tNameGetTableName(&pTableMetaInfo->name), TSDB_TABLE_NAME_LEN);
+
param->pParentSql = pSql;
param->pInterSql = pInterSql;
param->fp = tscRebuildCreateDBStatement;
diff --git a/src/client/src/tscLocalMerge.c b/src/client/src/tscLocalMerge.c
index 6280cc520a9adf7b2d8686455b1524a8b356af83..d1ccc1fbb060e8d697f19d6f18d96ffa445ad2ee 100644
--- a/src/client/src/tscLocalMerge.c
+++ b/src/client/src/tscLocalMerge.c
@@ -16,7 +16,7 @@
#include "tscLocalMerge.h"
#include "tscSubquery.h"
#include "os.h"
-#include "qAst.h"
+#include "texpr.h"
#include "tlosertree.h"
#include "tscLog.h"
#include "tscUtil.h"
@@ -56,7 +56,7 @@ int32_t treeComparator(const void *pLeft, const void *pRight, void *param) {
}
}
-static void tscInitSqlContext(SSqlCmd *pCmd, SLocalReducer *pReducer, tOrderDescriptor *pDesc) {
+static void tscInitSqlContext(SSqlCmd *pCmd, SLocalMerger *pReducer, tOrderDescriptor *pDesc) {
/*
* the fields and offset attributes in pCmd and pModel may be different due to
* merge requirement. So, the final result in pRes structure is formatted in accordance with the pCmd object.
@@ -166,7 +166,7 @@ static SFillColInfo* createFillColInfo(SQueryInfo* pQueryInfo) {
return pFillCol;
}
-void tscCreateLocalReducer(tExtMemBuffer **pMemBuffer, int32_t numOfBuffer, tOrderDescriptor *pDesc,
+void tscCreateLocalMerger(tExtMemBuffer **pMemBuffer, int32_t numOfBuffer, tOrderDescriptor *pDesc,
SColumnModel *finalmodel, SColumnModel *pFFModel, SSqlObj* pSql) {
SSqlCmd* pCmd = &pSql->cmd;
SSqlRes* pRes = &pSql->res;
@@ -212,9 +212,9 @@ void tscCreateLocalReducer(tExtMemBuffer **pMemBuffer, int32_t numOfBuffer, tOrd
return;
}
- size_t size = sizeof(SLocalReducer) + POINTER_BYTES * numOfFlush;
+ size_t size = sizeof(SLocalMerger) + POINTER_BYTES * numOfFlush;
- SLocalReducer *pReducer = (SLocalReducer *) calloc(1, size);
+ SLocalMerger *pReducer = (SLocalMerger *) calloc(1, size);
if (pReducer == NULL) {
tscError("%p failed to create local merge structure, out of memory", pSql);
@@ -372,7 +372,7 @@ void tscCreateLocalReducer(tExtMemBuffer **pMemBuffer, int32_t numOfBuffer, tOrd
pReducer->offset = (int32_t)pQueryInfo->limit.offset;
- pRes->pLocalReducer = pReducer;
+ pRes->pLocalMerger = pReducer;
pRes->numOfGroups = 0;
STableMetaInfo *pTableMetaInfo = tscGetTableMetaInfoFromCmd(pCmd, pCmd->clauseIndex, 0);
@@ -477,13 +477,13 @@ int32_t saveToBuffer(tExtMemBuffer *pMemoryBuf, tOrderDescriptor *pDesc, tFilePa
return 0;
}
-void tscDestroyLocalReducer(SSqlObj *pSql) {
+void tscDestroyLocalMerger(SSqlObj *pSql) {
if (pSql == NULL) {
return;
}
SSqlRes *pRes = &(pSql->res);
- if (pRes->pLocalReducer == NULL) {
+ if (pRes->pLocalMerger == NULL) {
return;
}
@@ -491,14 +491,14 @@ void tscDestroyLocalReducer(SSqlObj *pSql) {
SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(pCmd, pCmd->clauseIndex);
// there is no more result, so we release all allocated resource
- SLocalReducer *pLocalReducer = (SLocalReducer *)atomic_exchange_ptr(&pRes->pLocalReducer, NULL);
- if (pLocalReducer != NULL) {
- pLocalReducer->pFillInfo = taosDestroyFillInfo(pLocalReducer->pFillInfo);
+ SLocalMerger *pLocalMerge = (SLocalMerger *)atomic_exchange_ptr(&pRes->pLocalMerger, NULL);
+ if (pLocalMerge != NULL) {
+ pLocalMerge->pFillInfo = taosDestroyFillInfo(pLocalMerge->pFillInfo);
- if (pLocalReducer->pCtx != NULL) {
+ if (pLocalMerge->pCtx != NULL) {
int32_t numOfExprs = (int32_t) tscSqlExprNumOfExprs(pQueryInfo);
for (int32_t i = 0; i < numOfExprs; ++i) {
- SQLFunctionCtx *pCtx = &pLocalReducer->pCtx[i];
+ SQLFunctionCtx *pCtx = &pLocalMerge->pCtx[i];
tVariantDestroy(&pCtx->tag);
tfree(pCtx->resultInfo);
@@ -508,31 +508,31 @@ void tscDestroyLocalReducer(SSqlObj *pSql) {
}
}
- tfree(pLocalReducer->pCtx);
+ tfree(pLocalMerge->pCtx);
}
- tfree(pLocalReducer->prevRowOfInput);
+ tfree(pLocalMerge->prevRowOfInput);
- tfree(pLocalReducer->pTempBuffer);
- tfree(pLocalReducer->pResultBuf);
+ tfree(pLocalMerge->pTempBuffer);
+ tfree(pLocalMerge->pResultBuf);
- if (pLocalReducer->pLoserTree) {
- tfree(pLocalReducer->pLoserTree->param);
- tfree(pLocalReducer->pLoserTree);
+ if (pLocalMerge->pLoserTree) {
+ tfree(pLocalMerge->pLoserTree->param);
+ tfree(pLocalMerge->pLoserTree);
}
- tfree(pLocalReducer->pFinalRes);
- tfree(pLocalReducer->discardData);
+ tfree(pLocalMerge->pFinalRes);
+ tfree(pLocalMerge->discardData);
- tscLocalReducerEnvDestroy(pLocalReducer->pExtMemBuffer, pLocalReducer->pDesc, pLocalReducer->resColModel, pLocalReducer->finalModel,
- pLocalReducer->numOfVnode);
- for (int32_t i = 0; i < pLocalReducer->numOfBuffer; ++i) {
- tfree(pLocalReducer->pLocalDataSrc[i]);
+ tscLocalReducerEnvDestroy(pLocalMerge->pExtMemBuffer, pLocalMerge->pDesc, pLocalMerge->resColModel, pLocalMerge->finalModel,
+ pLocalMerge->numOfVnode);
+ for (int32_t i = 0; i < pLocalMerge->numOfBuffer; ++i) {
+ tfree(pLocalMerge->pLocalDataSrc[i]);
}
- pLocalReducer->numOfBuffer = 0;
- pLocalReducer->numOfCompleted = 0;
- free(pLocalReducer);
+ pLocalMerge->numOfBuffer = 0;
+ pLocalMerge->numOfCompleted = 0;
+ free(pLocalMerge);
} else {
tscDebug("%p already freed or another free function is invoked", pSql);
}
@@ -604,7 +604,7 @@ static int32_t createOrderDescriptor(tOrderDescriptor **pOrderDesc, SSqlCmd *pCm
}
}
-bool isSameGroup(SSqlCmd *pCmd, SLocalReducer *pReducer, char *pPrev, tFilePage *tmpBuffer) {
+bool isSameGroup(SSqlCmd *pCmd, SLocalMerger *pReducer, char *pPrev, tFilePage *tmpBuffer) {
SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(pCmd, pCmd->clauseIndex);
// disable merge procedure for column projection query
@@ -795,12 +795,12 @@ void tscLocalReducerEnvDestroy(tExtMemBuffer **pMemBuffer, tOrderDescriptor *pDe
/**
*
- * @param pLocalReducer
+ * @param pLocalMerge
* @param pOneInterDataSrc
* @param treeList
* @return the number of remain input source. if ret == 0, all data has been handled
*/
-int32_t loadNewDataFromDiskFor(SLocalReducer *pLocalReducer, SLocalDataSource *pOneInterDataSrc,
+int32_t loadNewDataFromDiskFor(SLocalMerger *pLocalMerge, SLocalDataSource *pOneInterDataSrc,
bool *needAdjustLoserTree) {
pOneInterDataSrc->rowIdx = 0;
pOneInterDataSrc->pageId += 1;
@@ -817,17 +817,17 @@ int32_t loadNewDataFromDiskFor(SLocalReducer *pLocalReducer, SLocalDataSource *p
#endif
*needAdjustLoserTree = true;
} else {
- pLocalReducer->numOfCompleted += 1;
+ pLocalMerge->numOfCompleted += 1;
pOneInterDataSrc->rowIdx = -1;
pOneInterDataSrc->pageId = -1;
*needAdjustLoserTree = true;
}
- return pLocalReducer->numOfBuffer;
+ return pLocalMerge->numOfBuffer;
}
-void adjustLoserTreeFromNewData(SLocalReducer *pLocalReducer, SLocalDataSource *pOneInterDataSrc,
+void adjustLoserTreeFromNewData(SLocalMerger *pLocalMerge, SLocalDataSource *pOneInterDataSrc,
SLoserTreeInfo *pTree) {
/*
* load a new data page into memory for intermediate dataset source,
@@ -835,7 +835,7 @@ void adjustLoserTreeFromNewData(SLocalReducer *pLocalReducer, SLocalDataSource *
*/
bool needToAdjust = true;
if (pOneInterDataSrc->filePage.num <= pOneInterDataSrc->rowIdx) {
- loadNewDataFromDiskFor(pLocalReducer, pOneInterDataSrc, &needToAdjust);
+ loadNewDataFromDiskFor(pLocalMerge, pOneInterDataSrc, &needToAdjust);
}
/*
@@ -843,7 +843,7 @@ void adjustLoserTreeFromNewData(SLocalReducer *pLocalReducer, SLocalDataSource *
* if the loser tree is rebuild completed, we do not need to adjust
*/
if (needToAdjust) {
- int32_t leafNodeIdx = pTree->pNode[0].index + pLocalReducer->numOfBuffer;
+ int32_t leafNodeIdx = pTree->pNode[0].index + pLocalMerge->numOfBuffer;
#ifdef _DEBUG_VIEW
printf("before adjust:\t");
@@ -860,7 +860,7 @@ void adjustLoserTreeFromNewData(SLocalReducer *pLocalReducer, SLocalDataSource *
}
}
-void savePrevRecordAndSetupFillInfo(SLocalReducer *pLocalReducer, SQueryInfo *pQueryInfo, SFillInfo *pFillInfo) {
+void savePrevRecordAndSetupFillInfo(SLocalMerger *pLocalMerge, SQueryInfo *pQueryInfo, SFillInfo *pFillInfo) {
// discard following dataset in the same group and reset the interpolation information
STableMetaInfo *pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0);
@@ -873,28 +873,28 @@ void savePrevRecordAndSetupFillInfo(SLocalReducer *pLocalReducer, SQueryInfo *pQ
taosResetFillInfo(pFillInfo, revisedSTime);
}
- pLocalReducer->discard = true;
- pLocalReducer->discardData->num = 0;
+ pLocalMerge->discard = true;
+ pLocalMerge->discardData->num = 0;
- SColumnModel *pModel = pLocalReducer->pDesc->pColumnModel;
- tColModelAppend(pModel, pLocalReducer->discardData, pLocalReducer->prevRowOfInput, 0, 1, 1);
+ SColumnModel *pModel = pLocalMerge->pDesc->pColumnModel;
+ tColModelAppend(pModel, pLocalMerge->discardData, pLocalMerge->prevRowOfInput, 0, 1, 1);
}
-static void genFinalResWithoutFill(SSqlRes* pRes, SLocalReducer *pLocalReducer, SQueryInfo* pQueryInfo) {
+static void genFinalResWithoutFill(SSqlRes* pRes, SLocalMerger *pLocalMerge, SQueryInfo* pQueryInfo) {
assert(pQueryInfo->interval.interval == 0 || pQueryInfo->fillType == TSDB_FILL_NONE);
- tFilePage * pBeforeFillData = pLocalReducer->pResultBuf;
+ tFilePage * pBeforeFillData = pLocalMerge->pResultBuf;
- pRes->data = pLocalReducer->pFinalRes;
+ pRes->data = pLocalMerge->pFinalRes;
pRes->numOfRows = (int32_t) pBeforeFillData->num;
if (pQueryInfo->limit.offset > 0) {
if (pQueryInfo->limit.offset < pRes->numOfRows) {
int32_t prevSize = (int32_t) pBeforeFillData->num;
- tColModelErase(pLocalReducer->finalModel, pBeforeFillData, prevSize, 0, (int32_t)pQueryInfo->limit.offset - 1);
+ tColModelErase(pLocalMerge->finalModel, pBeforeFillData, prevSize, 0, (int32_t)pQueryInfo->limit.offset - 1);
/* remove the hole in column model */
- tColModelCompact(pLocalReducer->finalModel, pBeforeFillData, prevSize);
+ tColModelCompact(pLocalMerge->finalModel, pBeforeFillData, prevSize);
pRes->numOfRows -= (int32_t) pQueryInfo->limit.offset;
pQueryInfo->limit.offset = 0;
@@ -907,7 +907,7 @@ static void genFinalResWithoutFill(SSqlRes* pRes, SLocalReducer *pLocalReducer,
if (pRes->numOfRowsGroup >= pQueryInfo->limit.limit && pQueryInfo->limit.limit > 0) {
pRes->numOfRows = 0;
pBeforeFillData->num = 0;
- pLocalReducer->discard = true;
+ pLocalMerge->discard = true;
return;
}
@@ -923,29 +923,29 @@ static void genFinalResWithoutFill(SSqlRes* pRes, SLocalReducer *pLocalReducer,
pRes->numOfRows -= overflow;
pBeforeFillData->num -= overflow;
- tColModelCompact(pLocalReducer->finalModel, pBeforeFillData, prevSize);
+ tColModelCompact(pLocalMerge->finalModel, pBeforeFillData, prevSize);
// set remain data to be discarded, and reset the interpolation information
- savePrevRecordAndSetupFillInfo(pLocalReducer, pQueryInfo, pLocalReducer->pFillInfo);
+ savePrevRecordAndSetupFillInfo(pLocalMerge, pQueryInfo, pLocalMerge->pFillInfo);
}
- memcpy(pRes->data, pBeforeFillData->data, (size_t)(pRes->numOfRows * pLocalReducer->finalModel->rowSize));
+ memcpy(pRes->data, pBeforeFillData->data, (size_t)(pRes->numOfRows * pLocalMerge->finalModel->rowSize));
pRes->numOfClauseTotal += pRes->numOfRows;
pBeforeFillData->num = 0;
}
/*
- * Note: pRes->pLocalReducer may be null, due to the fact that "tscDestroyLocalReducer" is called
+ * Note: pRes->pLocalMerge may be null, due to the fact that "tscDestroyLocalMerger" is called
* by "interuptHandler" function in shell
*/
-static void doFillResult(SSqlObj *pSql, SLocalReducer *pLocalReducer, bool doneOutput) {
+static void doFillResult(SSqlObj *pSql, SLocalMerger *pLocalMerge, bool doneOutput) {
SSqlCmd *pCmd = &pSql->cmd;
SSqlRes *pRes = &pSql->res;
- tFilePage *pBeforeFillData = pLocalReducer->pResultBuf;
+ tFilePage *pBeforeFillData = pLocalMerge->pResultBuf;
SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(pCmd, pCmd->clauseIndex);
- SFillInfo *pFillInfo = pLocalReducer->pFillInfo;
+ SFillInfo *pFillInfo = pLocalMerge->pFillInfo;
// todo extract function
int64_t actualETime = (pQueryInfo->order.order == TSDB_ORDER_ASC)? pQueryInfo->window.ekey: pQueryInfo->window.skey;
@@ -953,11 +953,11 @@ static void doFillResult(SSqlObj *pSql, SLocalReducer *pLocalReducer, bool doneO
tFilePage **pResPages = malloc(POINTER_BYTES * pQueryInfo->fieldsInfo.numOfOutput);
for (int32_t i = 0; i < pQueryInfo->fieldsInfo.numOfOutput; ++i) {
TAOS_FIELD *pField = tscFieldInfoGetField(&pQueryInfo->fieldsInfo, i);
- pResPages[i] = calloc(1, sizeof(tFilePage) + pField->bytes * pLocalReducer->resColModel->capacity);
+ pResPages[i] = calloc(1, sizeof(tFilePage) + pField->bytes * pLocalMerge->resColModel->capacity);
}
while (1) {
- int64_t newRows = taosFillResultDataBlock(pFillInfo, pResPages, pLocalReducer->resColModel->capacity);
+ int64_t newRows = taosFillResultDataBlock(pFillInfo, pResPages, pLocalMerge->resColModel->capacity);
if (pQueryInfo->limit.offset < newRows) {
newRows -= pQueryInfo->limit.offset;
@@ -970,7 +970,7 @@ static void doFillResult(SSqlObj *pSql, SLocalReducer *pLocalReducer, bool doneO
}
}
- pRes->data = pLocalReducer->pFinalRes;
+ pRes->data = pLocalMerge->pFinalRes;
pRes->numOfRows = (int32_t) newRows;
pQueryInfo->limit.offset = 0;
@@ -985,7 +985,7 @@ static void doFillResult(SSqlObj *pSql, SLocalReducer *pLocalReducer, bool doneO
}
// all output in current group are completed
- int32_t totalRemainRows = (int32_t)getNumOfResultsAfterFillGap(pFillInfo, actualETime, pLocalReducer->resColModel->capacity);
+ int32_t totalRemainRows = (int32_t)getNumOfResultsAfterFillGap(pFillInfo, actualETime, pLocalMerge->resColModel->capacity);
if (totalRemainRows <= 0) {
break;
}
@@ -1003,7 +1003,7 @@ static void doFillResult(SSqlObj *pSql, SLocalReducer *pLocalReducer, bool doneO
assert(pRes->numOfRows >= 0);
/* set remain data to be discarded, and reset the interpolation information */
- savePrevRecordAndSetupFillInfo(pLocalReducer, pQueryInfo, pFillInfo);
+ savePrevRecordAndSetupFillInfo(pLocalMerge, pQueryInfo, pFillInfo);
}
int32_t offset = 0;
@@ -1025,8 +1025,8 @@ static void doFillResult(SSqlObj *pSql, SLocalReducer *pLocalReducer, bool doneO
tfree(pResPages);
}
-static void savePreviousRow(SLocalReducer *pLocalReducer, tFilePage *tmpBuffer) {
- SColumnModel *pColumnModel = pLocalReducer->pDesc->pColumnModel;
+static void savePreviousRow(SLocalMerger *pLocalMerge, tFilePage *tmpBuffer) {
+ SColumnModel *pColumnModel = pLocalMerge->pDesc->pColumnModel;
assert(pColumnModel->capacity == 1 && tmpBuffer->num == 1);
// copy to previous temp buffer
@@ -1034,20 +1034,20 @@ static void savePreviousRow(SLocalReducer *pLocalReducer, tFilePage *tmpBuffer)
SSchema *pSchema = getColumnModelSchema(pColumnModel, i);
int16_t offset = getColumnModelOffset(pColumnModel, i);
- memcpy(pLocalReducer->prevRowOfInput + offset, tmpBuffer->data + offset, pSchema->bytes);
+ memcpy(pLocalMerge->prevRowOfInput + offset, tmpBuffer->data + offset, pSchema->bytes);
}
tmpBuffer->num = 0;
- pLocalReducer->hasPrevRow = true;
+ pLocalMerge->hasPrevRow = true;
}
-static void doExecuteSecondaryMerge(SSqlCmd *pCmd, SLocalReducer *pLocalReducer, bool needInit) {
+static void doExecuteSecondaryMerge(SSqlCmd *pCmd, SLocalMerger *pLocalMerge, bool needInit) {
// the tag columns need to be set before all functions execution
SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(pCmd, pCmd->clauseIndex);
size_t size = tscSqlExprNumOfExprs(pQueryInfo);
for (int32_t j = 0; j < size; ++j) {
- SQLFunctionCtx *pCtx = &pLocalReducer->pCtx[j];
+ SQLFunctionCtx *pCtx = &pLocalMerge->pCtx[j];
// tags/tags_dummy function, the tag field of SQLFunctionCtx is from the input buffer
int32_t functionId = pCtx->functionId;
@@ -1074,20 +1074,20 @@ static void doExecuteSecondaryMerge(SSqlCmd *pCmd, SLocalReducer *pLocalReducer,
}
for (int32_t j = 0; j < size; ++j) {
- int32_t functionId = pLocalReducer->pCtx[j].functionId;
+ int32_t functionId = pLocalMerge->pCtx[j].functionId;
if (functionId == TSDB_FUNC_TAG_DUMMY || functionId == TSDB_FUNC_TS_DUMMY) {
continue;
}
- aAggs[functionId].mergeFunc(&pLocalReducer->pCtx[j]);
+ aAggs[functionId].mergeFunc(&pLocalMerge->pCtx[j]);
}
}
-static void handleUnprocessedRow(SSqlCmd *pCmd, SLocalReducer *pLocalReducer, tFilePage *tmpBuffer) {
- if (pLocalReducer->hasUnprocessedRow) {
- pLocalReducer->hasUnprocessedRow = false;
- doExecuteSecondaryMerge(pCmd, pLocalReducer, true);
- savePreviousRow(pLocalReducer, tmpBuffer);
+static void handleUnprocessedRow(SSqlCmd *pCmd, SLocalMerger *pLocalMerge, tFilePage *tmpBuffer) {
+ if (pLocalMerge->hasUnprocessedRow) {
+ pLocalMerge->hasUnprocessedRow = false;
+ doExecuteSecondaryMerge(pCmd, pLocalMerge, true);
+ savePreviousRow(pLocalMerge, tmpBuffer);
}
}
@@ -1120,7 +1120,7 @@ static int64_t getNumOfResultLocal(SQueryInfo *pQueryInfo, SQLFunctionCtx *pCtx)
* filled with the same result, which is the tags, specified in group by clause
*
*/
-static void fillMultiRowsOfTagsVal(SQueryInfo *pQueryInfo, int32_t numOfRes, SLocalReducer *pLocalReducer) {
+static void fillMultiRowsOfTagsVal(SQueryInfo *pQueryInfo, int32_t numOfRes, SLocalMerger *pLocalMerge) {
int32_t maxBufSize = 0; // find the max tags column length to prepare the buffer
size_t size = tscSqlExprNumOfExprs(pQueryInfo);
@@ -1135,7 +1135,7 @@ static void fillMultiRowsOfTagsVal(SQueryInfo *pQueryInfo, int32_t numOfRes, SLo
char *buf = malloc((size_t)maxBufSize);
for (int32_t k = 0; k < size; ++k) {
- SQLFunctionCtx *pCtx = &pLocalReducer->pCtx[k];
+ SQLFunctionCtx *pCtx = &pLocalMerge->pCtx[k];
if (pCtx->functionId != TSDB_FUNC_TAG) {
continue;
}
@@ -1153,20 +1153,20 @@ static void fillMultiRowsOfTagsVal(SQueryInfo *pQueryInfo, int32_t numOfRes, SLo
free(buf);
}
-int32_t finalizeRes(SQueryInfo *pQueryInfo, SLocalReducer *pLocalReducer) {
+int32_t finalizeRes(SQueryInfo *pQueryInfo, SLocalMerger *pLocalMerge) {
size_t size = tscSqlExprNumOfExprs(pQueryInfo);
for (int32_t k = 0; k < size; ++k) {
- SQLFunctionCtx* pCtx = &pLocalReducer->pCtx[k];
+ SQLFunctionCtx* pCtx = &pLocalMerge->pCtx[k];
aAggs[pCtx->functionId].xFinalize(pCtx);
}
- pLocalReducer->hasPrevRow = false;
+ pLocalMerge->hasPrevRow = false;
- int32_t numOfRes = (int32_t)getNumOfResultLocal(pQueryInfo, pLocalReducer->pCtx);
- pLocalReducer->pResultBuf->num += numOfRes;
+ int32_t numOfRes = (int32_t)getNumOfResultLocal(pQueryInfo, pLocalMerge->pCtx);
+ pLocalMerge->pResultBuf->num += numOfRes;
- fillMultiRowsOfTagsVal(pQueryInfo, numOfRes, pLocalReducer);
+ fillMultiRowsOfTagsVal(pQueryInfo, numOfRes, pLocalMerge);
return numOfRes;
}
@@ -1177,22 +1177,22 @@ int32_t finalizeRes(SQueryInfo *pQueryInfo, SLocalReducer *pLocalReducer) {
* results generated by simple aggregation function, we merge them all into one points
* *Exception*: column projection query, required no merge procedure
*/
-bool needToMerge(SQueryInfo *pQueryInfo, SLocalReducer *pLocalReducer, tFilePage *tmpBuffer) {
+bool needToMerge(SQueryInfo *pQueryInfo, SLocalMerger *pLocalMerge, tFilePage *tmpBuffer) {
int32_t ret = 0; // merge all result by default
- int16_t functionId = pLocalReducer->pCtx[0].functionId;
+ int16_t functionId = pLocalMerge->pCtx[0].functionId;
// todo opt performance
if ((/*functionId == TSDB_FUNC_PRJ || */functionId == TSDB_FUNC_ARITHM) || (tscIsProjectionQueryOnSTable(pQueryInfo, 0))) { // column projection query
ret = 1; // disable merge procedure
} else {
- tOrderDescriptor *pDesc = pLocalReducer->pDesc;
+ tOrderDescriptor *pDesc = pLocalMerge->pDesc;
if (pDesc->orderInfo.numOfCols > 0) {
if (pDesc->tsOrder == TSDB_ORDER_ASC) { // asc
// todo refactor comparator
- ret = compare_a(pLocalReducer->pDesc, 1, 0, pLocalReducer->prevRowOfInput, 1, 0, tmpBuffer->data);
+ ret = compare_a(pLocalMerge->pDesc, 1, 0, pLocalMerge->prevRowOfInput, 1, 0, tmpBuffer->data);
} else { // desc
- ret = compare_d(pLocalReducer->pDesc, 1, 0, pLocalReducer->prevRowOfInput, 1, 0, tmpBuffer->data);
+ ret = compare_d(pLocalMerge->pDesc, 1, 0, pLocalMerge->prevRowOfInput, 1, 0, tmpBuffer->data);
}
}
}
@@ -1230,17 +1230,17 @@ static bool saveGroupResultInfo(SSqlObj *pSql) {
/**
*
* @param pSql
- * @param pLocalReducer
+ * @param pLocalMerge
* @param noMoreCurrentGroupRes
* @return if current group is skipped, return false, and do NOT record it into pRes->numOfGroups
*/
-bool genFinalResults(SSqlObj *pSql, SLocalReducer *pLocalReducer, bool noMoreCurrentGroupRes) {
+bool genFinalResults(SSqlObj *pSql, SLocalMerger *pLocalMerge, bool noMoreCurrentGroupRes) {
SSqlCmd *pCmd = &pSql->cmd;
SSqlRes *pRes = &pSql->res;
SQueryInfo * pQueryInfo = tscGetQueryInfoDetail(pCmd, pCmd->clauseIndex);
- tFilePage * pResBuf = pLocalReducer->pResultBuf;
- SColumnModel *pModel = pLocalReducer->resColModel;
+ tFilePage * pResBuf = pLocalMerge->pResultBuf;
+ SColumnModel *pModel = pLocalMerge->resColModel;
pRes->code = TSDB_CODE_SUCCESS;
@@ -1251,11 +1251,11 @@ bool genFinalResults(SSqlObj *pSql, SLocalReducer *pLocalReducer, bool noMoreCur
if (pQueryInfo->slimit.offset > 0) {
pRes->numOfRows = 0;
pQueryInfo->slimit.offset -= 1;
- pLocalReducer->discard = !noMoreCurrentGroupRes;
+ pLocalMerge->discard = !noMoreCurrentGroupRes;
- if (pLocalReducer->discard) {
- SColumnModel *pInternModel = pLocalReducer->pDesc->pColumnModel;
- tColModelAppend(pInternModel, pLocalReducer->discardData, pLocalReducer->pTempBuffer->data, 0, 1, 1);
+ if (pLocalMerge->discard) {
+ SColumnModel *pInternModel = pLocalMerge->pDesc->pColumnModel;
+ tColModelAppend(pInternModel, pLocalMerge->discardData, pLocalMerge->pTempBuffer->data, 0, 1, 1);
}
return false;
@@ -1264,19 +1264,14 @@ bool genFinalResults(SSqlObj *pSql, SLocalReducer *pLocalReducer, bool noMoreCur
tColModelCompact(pModel, pResBuf, pModel->capacity);
if (tscIsSecondStageQuery(pQueryInfo)) {
- doArithmeticCalculate(pQueryInfo, pResBuf, pModel->rowSize, pLocalReducer->finalModel->rowSize);
+ doArithmeticCalculate(pQueryInfo, pResBuf, pModel->rowSize, pLocalMerge->finalModel->rowSize);
}
-#ifdef _DEBUG_VIEW
- printf("final result before interpo:\n");
-// tColModelDisplay(pLocalReducer->resColModel, pLocalReducer->pBufForInterpo, pResBuf->num, pResBuf->num);
-#endif
-
// no interval query, no fill operation
if (pQueryInfo->interval.interval == 0 || pQueryInfo->fillType == TSDB_FILL_NONE) {
- genFinalResWithoutFill(pRes, pLocalReducer, pQueryInfo);
+ genFinalResWithoutFill(pRes, pLocalMerge, pQueryInfo);
} else {
- SFillInfo* pFillInfo = pLocalReducer->pFillInfo;
+ SFillInfo* pFillInfo = pLocalMerge->pFillInfo;
if (pFillInfo != NULL) {
TSKEY ekey = (pQueryInfo->order.order == TSDB_ORDER_ASC)? pQueryInfo->window.ekey: pQueryInfo->window.skey;
@@ -1284,34 +1279,34 @@ bool genFinalResults(SSqlObj *pSql, SLocalReducer *pLocalReducer, bool noMoreCur
taosFillCopyInputDataFromOneFilePage(pFillInfo, pResBuf);
}
- doFillResult(pSql, pLocalReducer, noMoreCurrentGroupRes);
+ doFillResult(pSql, pLocalMerge, noMoreCurrentGroupRes);
}
return true;
}
-void resetOutputBuf(SQueryInfo *pQueryInfo, SLocalReducer *pLocalReducer) {// reset output buffer to the beginning
+void resetOutputBuf(SQueryInfo *pQueryInfo, SLocalMerger *pLocalMerge) {// reset output buffer to the beginning
size_t t = tscSqlExprNumOfExprs(pQueryInfo);
for (int32_t i = 0; i < t; ++i) {
SSqlExpr* pExpr = tscSqlExprGet(pQueryInfo, i);
- pLocalReducer->pCtx[i].aOutputBuf = pLocalReducer->pResultBuf->data + pExpr->offset * pLocalReducer->resColModel->capacity;
+ pLocalMerge->pCtx[i].aOutputBuf = pLocalMerge->pResultBuf->data + pExpr->offset * pLocalMerge->resColModel->capacity;
if (pExpr->functionId == TSDB_FUNC_TOP || pExpr->functionId == TSDB_FUNC_BOTTOM || pExpr->functionId == TSDB_FUNC_DIFF) {
- pLocalReducer->pCtx[i].ptsOutputBuf = pLocalReducer->pCtx[0].aOutputBuf;
+ pLocalMerge->pCtx[i].ptsOutputBuf = pLocalMerge->pCtx[0].aOutputBuf;
}
}
- memset(pLocalReducer->pResultBuf, 0, pLocalReducer->nResultBufSize + sizeof(tFilePage));
+ memset(pLocalMerge->pResultBuf, 0, pLocalMerge->nResultBufSize + sizeof(tFilePage));
}
-static void resetEnvForNewResultset(SSqlRes *pRes, SSqlCmd *pCmd, SLocalReducer *pLocalReducer) {
+static void resetEnvForNewResultset(SSqlRes *pRes, SSqlCmd *pCmd, SLocalMerger *pLocalMerge) {
// In handling data in other groups, we need to reset the interpolation information for a new group data
pRes->numOfRows = 0;
pRes->numOfRowsGroup = 0;
SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(pCmd, pCmd->clauseIndex);
- pQueryInfo->limit.offset = pLocalReducer->offset;
+ pQueryInfo->limit.offset = pLocalMerge->offset;
STableMetaInfo *pTableMetaInfo = tscGetTableMetaInfoFromCmd(pCmd, pCmd->clauseIndex, 0);
STableComInfo tinfo = tscGetTableInfo(pTableMetaInfo->pTableMeta);
@@ -1320,12 +1315,12 @@ static void resetEnvForNewResultset(SSqlRes *pRes, SSqlCmd *pCmd, SLocalReducer
if (pQueryInfo->fillType != TSDB_FILL_NONE) {
TSKEY skey = (pQueryInfo->order.order == TSDB_ORDER_ASC)? pQueryInfo->window.skey:pQueryInfo->window.ekey;//MIN(pQueryInfo->window.skey, pQueryInfo->window.ekey);
int64_t newTime = taosTimeTruncate(skey, &pQueryInfo->interval, tinfo.precision);
- taosResetFillInfo(pLocalReducer->pFillInfo, newTime);
+ taosResetFillInfo(pLocalMerge->pFillInfo, newTime);
}
}
-static bool isAllSourcesCompleted(SLocalReducer *pLocalReducer) {
- return (pLocalReducer->numOfBuffer == pLocalReducer->numOfCompleted);
+static bool isAllSourcesCompleted(SLocalMerger *pLocalMerge) {
+ return (pLocalMerge->numOfBuffer == pLocalMerge->numOfCompleted);
}
static bool doBuildFilledResultForGroup(SSqlObj *pSql) {
@@ -1333,19 +1328,19 @@ static bool doBuildFilledResultForGroup(SSqlObj *pSql) {
SSqlRes *pRes = &pSql->res;
SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(pCmd, pCmd->clauseIndex);
- SLocalReducer *pLocalReducer = pRes->pLocalReducer;
- SFillInfo *pFillInfo = pLocalReducer->pFillInfo;
+ SLocalMerger *pLocalMerge = pRes->pLocalMerger;
+ SFillInfo *pFillInfo = pLocalMerge->pFillInfo;
if (pFillInfo != NULL && taosFillHasMoreResults(pFillInfo)) {
assert(pQueryInfo->fillType != TSDB_FILL_NONE);
- tFilePage *pFinalDataBuf = pLocalReducer->pResultBuf;
+ tFilePage *pFinalDataBuf = pLocalMerge->pResultBuf;
int64_t etime = *(int64_t *)(pFinalDataBuf->data + TSDB_KEYSIZE * (pFillInfo->numOfRows - 1));
// the first column must be the timestamp column
- int32_t rows = (int32_t) getNumOfResultsAfterFillGap(pFillInfo, etime, pLocalReducer->resColModel->capacity);
+ int32_t rows = (int32_t) getNumOfResultsAfterFillGap(pFillInfo, etime, pLocalMerge->resColModel->capacity);
if (rows > 0) { // do fill gap
- doFillResult(pSql, pLocalReducer, false);
+ doFillResult(pSql, pLocalMerge, false);
}
return true;
@@ -1358,23 +1353,23 @@ static bool doHandleLastRemainData(SSqlObj *pSql) {
SSqlCmd *pCmd = &pSql->cmd;
SSqlRes *pRes = &pSql->res;
- SLocalReducer *pLocalReducer = pRes->pLocalReducer;
- SFillInfo *pFillInfo = pLocalReducer->pFillInfo;
+ SLocalMerger *pLocalMerge = pRes->pLocalMerger;
+ SFillInfo *pFillInfo = pLocalMerge->pFillInfo;
- bool prevGroupCompleted = (!pLocalReducer->discard) && pLocalReducer->hasUnprocessedRow;
+ bool prevGroupCompleted = (!pLocalMerge->discard) && pLocalMerge->hasUnprocessedRow;
SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(pCmd, pCmd->clauseIndex);
- if ((isAllSourcesCompleted(pLocalReducer) && !pLocalReducer->hasPrevRow) || pLocalReducer->pLocalDataSrc[0] == NULL ||
+ if ((isAllSourcesCompleted(pLocalMerge) && !pLocalMerge->hasPrevRow) || pLocalMerge->pLocalDataSrc[0] == NULL ||
prevGroupCompleted) {
// if fillType == TSDB_FILL_NONE, return directly
if (pQueryInfo->fillType != TSDB_FILL_NONE &&
((pRes->numOfRowsGroup < pQueryInfo->limit.limit && pQueryInfo->limit.limit > 0) || (pQueryInfo->limit.limit < 0))) {
int64_t etime = (pQueryInfo->order.order == TSDB_ORDER_ASC)? pQueryInfo->window.ekey : pQueryInfo->window.skey;
- int32_t rows = (int32_t)getNumOfResultsAfterFillGap(pFillInfo, etime, pLocalReducer->resColModel->capacity);
+ int32_t rows = (int32_t)getNumOfResultsAfterFillGap(pFillInfo, etime, pLocalMerge->resColModel->capacity);
if (rows > 0) {
- doFillResult(pSql, pLocalReducer, true);
+ doFillResult(pSql, pLocalMerge, true);
}
}
@@ -1384,7 +1379,7 @@ static bool doHandleLastRemainData(SSqlObj *pSql) {
*
* No results will be generated and query completed.
*/
- if (pRes->numOfRows > 0 || (isAllSourcesCompleted(pLocalReducer) && (!pLocalReducer->hasUnprocessedRow))) {
+ if (pRes->numOfRows > 0 || (isAllSourcesCompleted(pLocalMerge) && (!pLocalMerge->hasUnprocessedRow))) {
return true;
}
@@ -1393,7 +1388,7 @@ static bool doHandleLastRemainData(SSqlObj *pSql) {
return true;
}
- resetEnvForNewResultset(pRes, pCmd, pLocalReducer);
+ resetEnvForNewResultset(pRes, pCmd, pLocalMerge);
}
return false;
@@ -1403,12 +1398,12 @@ static void doProcessResultInNextWindow(SSqlObj *pSql, int32_t numOfRes) {
SSqlCmd *pCmd = &pSql->cmd;
SSqlRes *pRes = &pSql->res;
- SLocalReducer *pLocalReducer = pRes->pLocalReducer;
+ SLocalMerger *pLocalMerge = pRes->pLocalMerger;
SQueryInfo * pQueryInfo = tscGetQueryInfoDetail(pCmd, pCmd->clauseIndex);
size_t size = tscSqlExprNumOfExprs(pQueryInfo);
for (int32_t k = 0; k < size; ++k) {
- SQLFunctionCtx *pCtx = &pLocalReducer->pCtx[k];
+ SQLFunctionCtx *pCtx = &pLocalMerge->pCtx[k];
pCtx->aOutputBuf += pCtx->outputBytes * numOfRes;
// set the correct output timestamp column position
@@ -1417,7 +1412,7 @@ static void doProcessResultInNextWindow(SSqlObj *pSql, int32_t numOfRes) {
}
}
- doExecuteSecondaryMerge(pCmd, pLocalReducer, true);
+ doExecuteSecondaryMerge(pCmd, pLocalMerge, true);
}
int32_t tscDoLocalMerge(SSqlObj *pSql) {
@@ -1426,14 +1421,18 @@ int32_t tscDoLocalMerge(SSqlObj *pSql) {
tscResetForNextRetrieve(pRes);
- if (pSql->signature != pSql || pRes == NULL || pRes->pLocalReducer == NULL) { // all data has been processed
+ if (pSql->signature != pSql || pRes == NULL || pRes->pLocalMerger == NULL) { // all data has been processed
+ if (pRes->code == TSDB_CODE_SUCCESS) {
+ return pRes->code;
+ }
+
tscError("%p local merge abort due to error occurs, code:%s", pSql, tstrerror(pRes->code));
return pRes->code;
}
- SLocalReducer *pLocalReducer = pRes->pLocalReducer;
+ SLocalMerger *pLocalMerge = pRes->pLocalMerger;
SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(pCmd, pCmd->clauseIndex);
- tFilePage *tmpBuffer = pLocalReducer->pTempBuffer;
+ tFilePage *tmpBuffer = pLocalMerge->pTempBuffer;
if (doHandleLastRemainData(pSql)) {
return TSDB_CODE_SUCCESS;
@@ -1443,24 +1442,24 @@ int32_t tscDoLocalMerge(SSqlObj *pSql) {
return TSDB_CODE_SUCCESS;
}
- SLoserTreeInfo *pTree = pLocalReducer->pLoserTree;
+ SLoserTreeInfo *pTree = pLocalMerge->pLoserTree;
// clear buffer
- handleUnprocessedRow(pCmd, pLocalReducer, tmpBuffer);
- SColumnModel *pModel = pLocalReducer->pDesc->pColumnModel;
+ handleUnprocessedRow(pCmd, pLocalMerge, tmpBuffer);
+ SColumnModel *pModel = pLocalMerge->pDesc->pColumnModel;
while (1) {
- if (isAllSourcesCompleted(pLocalReducer)) {
+ if (isAllSourcesCompleted(pLocalMerge)) {
break;
}
#ifdef _DEBUG_VIEW
printf("chosen data in pTree[0] = %d\n", pTree->pNode[0].index);
#endif
- assert((pTree->pNode[0].index < pLocalReducer->numOfBuffer) && (pTree->pNode[0].index >= 0) && tmpBuffer->num == 0);
+ assert((pTree->pNode[0].index < pLocalMerge->numOfBuffer) && (pTree->pNode[0].index >= 0) && tmpBuffer->num == 0);
// chosen from loser tree
- SLocalDataSource *pOneDataSrc = pLocalReducer->pLocalDataSrc[pTree->pNode[0].index];
+ SLocalDataSource *pOneDataSrc = pLocalMerge->pLocalDataSrc[pTree->pNode[0].index];
tColModelAppend(pModel, tmpBuffer, pOneDataSrc->filePage.data, pOneDataSrc->rowIdx, 1,
pOneDataSrc->pMemBuffer->pColumnModel->capacity);
@@ -1473,76 +1472,76 @@ int32_t tscDoLocalMerge(SSqlObj *pSql) {
tColModelDisplayEx(pModel, tmpBuffer->data, tmpBuffer->num, pModel->capacity, colInfo);
#endif
- if (pLocalReducer->discard) {
- assert(pLocalReducer->hasUnprocessedRow == false);
+ if (pLocalMerge->discard) {
+ assert(pLocalMerge->hasUnprocessedRow == false);
/* current record belongs to the same group of previous record, need to discard it */
- if (isSameGroup(pCmd, pLocalReducer, pLocalReducer->discardData->data, tmpBuffer)) {
+ if (isSameGroup(pCmd, pLocalMerge, pLocalMerge->discardData->data, tmpBuffer)) {
tmpBuffer->num = 0;
pOneDataSrc->rowIdx += 1;
- adjustLoserTreeFromNewData(pLocalReducer, pOneDataSrc, pTree);
+ adjustLoserTreeFromNewData(pLocalMerge, pOneDataSrc, pTree);
// all inputs are exhausted, abort current process
- if (isAllSourcesCompleted(pLocalReducer)) {
+ if (isAllSourcesCompleted(pLocalMerge)) {
break;
}
// data belongs to the same group needs to be discarded
continue;
} else {
- pLocalReducer->discard = false;
- pLocalReducer->discardData->num = 0;
+ pLocalMerge->discard = false;
+ pLocalMerge->discardData->num = 0;
if (saveGroupResultInfo(pSql)) {
return TSDB_CODE_SUCCESS;
}
- resetEnvForNewResultset(pRes, pCmd, pLocalReducer);
+ resetEnvForNewResultset(pRes, pCmd, pLocalMerge);
}
}
- if (pLocalReducer->hasPrevRow) {
- if (needToMerge(pQueryInfo, pLocalReducer, tmpBuffer)) {
+ if (pLocalMerge->hasPrevRow) {
+ if (needToMerge(pQueryInfo, pLocalMerge, tmpBuffer)) {
// belong to the group of the previous row, continue process it
- doExecuteSecondaryMerge(pCmd, pLocalReducer, false);
+ doExecuteSecondaryMerge(pCmd, pLocalMerge, false);
// copy to buffer
- savePreviousRow(pLocalReducer, tmpBuffer);
+ savePreviousRow(pLocalMerge, tmpBuffer);
} else {
/*
* current row does not belong to the group of previous row.
* so the processing of previous group is completed.
*/
- int32_t numOfRes = finalizeRes(pQueryInfo, pLocalReducer);
- bool sameGroup = isSameGroup(pCmd, pLocalReducer, pLocalReducer->prevRowOfInput, tmpBuffer);
+ int32_t numOfRes = finalizeRes(pQueryInfo, pLocalMerge);
+ bool sameGroup = isSameGroup(pCmd, pLocalMerge, pLocalMerge->prevRowOfInput, tmpBuffer);
- tFilePage *pResBuf = pLocalReducer->pResultBuf;
+ tFilePage *pResBuf = pLocalMerge->pResultBuf;
/*
* if the previous group does NOT generate any result (pResBuf->num == 0),
* continue to process results instead of return results.
*/
- if ((!sameGroup && pResBuf->num > 0) || (pResBuf->num == pLocalReducer->resColModel->capacity)) {
+ if ((!sameGroup && pResBuf->num > 0) || (pResBuf->num == pLocalMerge->resColModel->capacity)) {
// does not belong to the same group
- bool notSkipped = genFinalResults(pSql, pLocalReducer, !sameGroup);
+ bool notSkipped = genFinalResults(pSql, pLocalMerge, !sameGroup);
// this row needs to discard, since it belongs to the group of previous
- if (pLocalReducer->discard && sameGroup) {
- pLocalReducer->hasUnprocessedRow = false;
+ if (pLocalMerge->discard && sameGroup) {
+ pLocalMerge->hasUnprocessedRow = false;
tmpBuffer->num = 0;
} else { // current row does not belongs to the previous group, so it is not be handled yet.
- pLocalReducer->hasUnprocessedRow = true;
+ pLocalMerge->hasUnprocessedRow = true;
}
- resetOutputBuf(pQueryInfo, pLocalReducer);
+ resetOutputBuf(pQueryInfo, pLocalMerge);
pOneDataSrc->rowIdx += 1;
// here we do not check the return value
- adjustLoserTreeFromNewData(pLocalReducer, pOneDataSrc, pTree);
+ adjustLoserTreeFromNewData(pLocalMerge, pOneDataSrc, pTree);
if (pRes->numOfRows == 0) {
- handleUnprocessedRow(pCmd, pLocalReducer, tmpBuffer);
+ handleUnprocessedRow(pCmd, pLocalMerge, tmpBuffer);
if (!sameGroup) {
/*
@@ -1553,7 +1552,7 @@ int32_t tscDoLocalMerge(SSqlObj *pSql) {
return TSDB_CODE_SUCCESS;
}
- resetEnvForNewResultset(pRes, pCmd, pLocalReducer);
+ resetEnvForNewResultset(pRes, pCmd, pLocalMerge);
}
} else {
/*
@@ -1561,7 +1560,7 @@ int32_t tscDoLocalMerge(SSqlObj *pSql) {
* We start the process in a new round.
*/
if (sameGroup) {
- handleUnprocessedRow(pCmd, pLocalReducer, tmpBuffer);
+ handleUnprocessedRow(pCmd, pLocalMerge, tmpBuffer);
}
}
@@ -1573,24 +1572,24 @@ int32_t tscDoLocalMerge(SSqlObj *pSql) {
}
} else { // result buffer is not full
doProcessResultInNextWindow(pSql, numOfRes);
- savePreviousRow(pLocalReducer, tmpBuffer);
+ savePreviousRow(pLocalMerge, tmpBuffer);
}
}
} else {
- doExecuteSecondaryMerge(pCmd, pLocalReducer, true);
- savePreviousRow(pLocalReducer, tmpBuffer); // copy the processed row to buffer
+ doExecuteSecondaryMerge(pCmd, pLocalMerge, true);
+ savePreviousRow(pLocalMerge, tmpBuffer); // copy the processed row to buffer
}
pOneDataSrc->rowIdx += 1;
- adjustLoserTreeFromNewData(pLocalReducer, pOneDataSrc, pTree);
+ adjustLoserTreeFromNewData(pLocalMerge, pOneDataSrc, pTree);
}
- if (pLocalReducer->hasPrevRow) {
- finalizeRes(pQueryInfo, pLocalReducer);
+ if (pLocalMerge->hasPrevRow) {
+ finalizeRes(pQueryInfo, pLocalMerge);
}
- if (pLocalReducer->pResultBuf->num) {
- genFinalResults(pSql, pLocalReducer, true);
+ if (pLocalMerge->pResultBuf->num) {
+ genFinalResults(pSql, pLocalMerge, true);
}
return TSDB_CODE_SUCCESS;
@@ -1598,8 +1597,8 @@ int32_t tscDoLocalMerge(SSqlObj *pSql) {
void tscInitResObjForLocalQuery(SSqlObj *pObj, int32_t numOfRes, int32_t rowLen) {
SSqlRes *pRes = &pObj->res;
- if (pRes->pLocalReducer != NULL) {
- tscDestroyLocalReducer(pObj);
+ if (pRes->pLocalMerger != NULL) {
+ tscDestroyLocalMerger(pObj);
}
pRes->qhandle = 1; // hack to pass the safety check in fetch_row function
@@ -1607,17 +1606,17 @@ void tscInitResObjForLocalQuery(SSqlObj *pObj, int32_t numOfRes, int32_t rowLen)
pRes->row = 0;
pRes->rspType = 0; // used as a flag to denote if taos_retrieved() has been called yet
- pRes->pLocalReducer = (SLocalReducer *)calloc(1, sizeof(SLocalReducer));
+ pRes->pLocalMerger = (SLocalMerger *)calloc(1, sizeof(SLocalMerger));
/*
* we need one additional byte space
* the sprintf function needs one additional space to put '\0' at the end of string
*/
size_t allocSize = numOfRes * rowLen + sizeof(tFilePage) + 1;
- pRes->pLocalReducer->pResultBuf = (tFilePage *)calloc(1, allocSize);
+ pRes->pLocalMerger->pResultBuf = (tFilePage *)calloc(1, allocSize);
- pRes->pLocalReducer->pResultBuf->num = numOfRes;
- pRes->data = pRes->pLocalReducer->pResultBuf->data;
+ pRes->pLocalMerger->pResultBuf->num = numOfRes;
+ pRes->data = pRes->pLocalMerger->pResultBuf->data;
}
int32_t doArithmeticCalculate(SQueryInfo* pQueryInfo, tFilePage* pOutput, int32_t rowSize, int32_t finalRowSize) {
diff --git a/src/client/src/tscParseInsert.c b/src/client/src/tscParseInsert.c
index 0c7af5d4e3d8b61445b25c94cf5eda021ceba3e9..c0a8762180d2198847f6704da6bb28160659b858 100644
--- a/src/client/src/tscParseInsert.c
+++ b/src/client/src/tscParseInsert.c
@@ -703,7 +703,7 @@ static int32_t doParseInsertStatement(SSqlCmd* pCmd, char **str, SParsedDataColI
STableDataBlocks *dataBuf = NULL;
int32_t ret = tscGetDataBlockFromList(pCmd->pTableBlockHashList, pTableMeta->id.uid, TSDB_DEFAULT_PAYLOAD_SIZE,
- sizeof(SSubmitBlk), tinfo.rowSize, pTableMetaInfo->name, pTableMeta, &dataBuf, NULL);
+ sizeof(SSubmitBlk), tinfo.rowSize, &pTableMetaInfo->name, pTableMeta, &dataBuf, NULL);
if (ret != TSDB_CODE_SUCCESS) {
return ret;
}
@@ -813,26 +813,26 @@ static int32_t tscCheckIfCreateTable(char **sqlstr, SSqlObj *pSql) {
tscAddEmptyMetaInfo(pQueryInfo);
}
- STableMetaInfo *pSTableMeterMetaInfo = tscGetMetaInfo(pQueryInfo, STABLE_INDEX);
- code = tscSetTableFullName(pSTableMeterMetaInfo, &sToken, pSql);
+ STableMetaInfo *pSTableMetaInfo = tscGetMetaInfo(pQueryInfo, STABLE_INDEX);
+ code = tscSetTableFullName(pSTableMetaInfo, &sToken, pSql);
if (code != TSDB_CODE_SUCCESS) {
return code;
}
- tstrncpy(pCmd->tagData.name, pSTableMeterMetaInfo->name, sizeof(pCmd->tagData.name));
+ tNameExtractFullName(&pSTableMetaInfo->name, pCmd->tagData.name);
pCmd->tagData.dataLen = 0;
- code = tscGetTableMeta(pSql, pSTableMeterMetaInfo);
+ code = tscGetTableMeta(pSql, pSTableMetaInfo);
if (code != TSDB_CODE_SUCCESS) {
return code;
}
- if (!UTIL_TABLE_IS_SUPER_TABLE(pSTableMeterMetaInfo)) {
+ if (!UTIL_TABLE_IS_SUPER_TABLE(pSTableMetaInfo)) {
return tscInvalidSQLErrMsg(pCmd->payload, "create table only from super table is allowed", sToken.z);
}
- SSchema *pTagSchema = tscGetTableTagSchema(pSTableMeterMetaInfo->pTableMeta);
- STableComInfo tinfo = tscGetTableInfo(pSTableMeterMetaInfo->pTableMeta);
+ SSchema *pTagSchema = tscGetTableTagSchema(pSTableMetaInfo->pTableMeta);
+ STableComInfo tinfo = tscGetTableInfo(pSTableMetaInfo->pTableMeta);
index = 0;
sToken = tStrGetToken(sql, &index, false, 0, NULL);
@@ -840,7 +840,7 @@ static int32_t tscCheckIfCreateTable(char **sqlstr, SSqlObj *pSql) {
SParsedDataColInfo spd = {0};
- uint8_t numOfTags = tscGetNumOfTags(pSTableMeterMetaInfo->pTableMeta);
+ uint8_t numOfTags = tscGetNumOfTags(pSTableMetaInfo->pTableMeta);
spd.numOfCols = numOfTags;
// if specify some tags column
@@ -1036,11 +1036,7 @@ static int32_t validateDataSource(SSqlCmd *pCmd, int8_t type, const char *sql) {
}
/**
- * usage: insert into table1 values() () table2 values()()
- *
- * @param str
- * @param acct
- * @param db
+ * parse insert sql
* @param pSql
* @return
*/
@@ -1343,10 +1339,11 @@ int tsParseSql(SSqlObj *pSql, bool initial) {
// make a backup as tsParseInsertSql may modify the string
char* sqlstr = strdup(pSql->sqlstr);
ret = tsParseInsertSql(pSql);
- if (sqlstr == NULL || pSql->parseRetry >= 1 || ret != TSDB_CODE_TSC_INVALID_SQL) {
+ if ((sqlstr == NULL) || (pSql->parseRetry >= 1) ||
+ (ret != TSDB_CODE_TSC_SQL_SYNTAX_ERROR && ret != TSDB_CODE_TSC_INVALID_SQL)) {
free(sqlstr);
} else {
- tscResetSqlCmdObj(pCmd);
+ tscResetSqlCmd(pCmd, true);
free(pSql->sqlstr);
pSql->sqlstr = sqlstr;
pSql->parseRetry++;
@@ -1358,7 +1355,7 @@ int tsParseSql(SSqlObj *pSql, bool initial) {
SSqlInfo SQLInfo = qSQLParse(pSql->sqlstr);
ret = tscToSQLCmd(pSql, &SQLInfo);
if (ret == TSDB_CODE_TSC_INVALID_SQL && pSql->parseRetry == 0 && SQLInfo.type == TSDB_SQL_NULL) {
- tscResetSqlCmdObj(pCmd);
+ tscResetSqlCmd(pCmd, true);
pSql->parseRetry++;
ret = tscToSQLCmd(pSql, &SQLInfo);
}
@@ -1406,39 +1403,38 @@ typedef struct SImportFileSupport {
FILE *fp;
} SImportFileSupport;
-static void parseFileSendDataBlock(void *param, TAOS_RES *tres, int code) {
+static void parseFileSendDataBlock(void *param, TAOS_RES *tres, int32_t numOfRows) {
assert(param != NULL && tres != NULL);
+ char * tokenBuf = NULL;
+ size_t n = 0;
+ ssize_t readLen = 0;
+ char * line = NULL;
+ int32_t count = 0;
+ int32_t maxRows = 0;
+ FILE * fp = NULL;
+
SSqlObj *pSql = tres;
SSqlCmd *pCmd = &pSql->cmd;
- SImportFileSupport *pSupporter = (SImportFileSupport *) param;
+ SImportFileSupport *pSupporter = (SImportFileSupport *)param;
SSqlObj *pParentSql = pSupporter->pSql;
- FILE *fp = pSupporter->fp;
-
- if (taos_errno(pSql) != TSDB_CODE_SUCCESS) { // handle error
- assert(taos_errno(pSql) == code);
-
- do {
- if (code == TSDB_CODE_TDB_TABLE_RECONFIGURE) {
- assert(pSql->res.numOfRows == 0);
- int32_t errc = fseek(fp, 0, SEEK_SET);
- if (errc < 0) {
- tscError("%p failed to seek SEEK_SET since:%s", pSql, tstrerror(errno));
- } else {
- break;
- }
- }
-
- taos_free_result(pSql);
- tfree(pSupporter);
- fclose(fp);
-
- pParentSql->res.code = code;
- tscAsyncResultOnError(pParentSql);
- return;
- } while (0);
+ fp = pSupporter->fp;
+
+ int32_t code = pSql->res.code;
+
+ // retry parse data from file and import data from the begining again
+ if (code == TSDB_CODE_TDB_TABLE_RECONFIGURE) {
+ assert(pSql->res.numOfRows == 0);
+ int32_t ret = fseek(fp, 0, SEEK_SET);
+ if (ret < 0) {
+ tscError("%p failed to seek SEEK_SET since:%s", pSql, tstrerror(errno));
+ code = TAOS_SYSTEM_ERROR(errno);
+ goto _error;
+ }
+ } else if (code != TSDB_CODE_SUCCESS) {
+ goto _error;
}
// accumulate the total submit records
@@ -1452,28 +1448,32 @@ static void parseFileSendDataBlock(void *param, TAOS_RES *tres, int code) {
SParsedDataColInfo spd = {.numOfCols = tinfo.numOfColumns};
tscSetAssignedColumnInfo(&spd, pSchema, tinfo.numOfColumns);
- size_t n = 0;
- ssize_t readLen = 0;
- char * line = NULL;
- int32_t count = 0;
- int32_t maxRows = 0;
-
tfree(pCmd->pTableNameList);
pCmd->pDataBlocks = tscDestroyBlockArrayList(pCmd->pDataBlocks);
if (pCmd->pTableBlockHashList == NULL) {
pCmd->pTableBlockHashList = taosHashInit(16, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT), true, false);
+ if (pCmd->pTableBlockHashList == NULL) {
+ code = TSDB_CODE_TSC_OUT_OF_MEMORY;
+ goto _error;
+ }
}
STableDataBlocks *pTableDataBlock = NULL;
- int32_t ret = tscGetDataBlockFromList(pCmd->pTableBlockHashList, pTableMeta->id.uid, TSDB_PAYLOAD_SIZE,
- sizeof(SSubmitBlk), tinfo.rowSize, pTableMetaInfo->name, pTableMeta, &pTableDataBlock, NULL);
+ int32_t ret =
+ tscGetDataBlockFromList(pCmd->pTableBlockHashList, pTableMeta->id.uid, TSDB_PAYLOAD_SIZE, sizeof(SSubmitBlk),
+ tinfo.rowSize, &pTableMetaInfo->name, pTableMeta, &pTableDataBlock, NULL);
if (ret != TSDB_CODE_SUCCESS) {
-// return ret;
+ pParentSql->res.code = TSDB_CODE_TSC_OUT_OF_MEMORY;
+ goto _error;
}
tscAllocateMemIfNeed(pTableDataBlock, tinfo.rowSize, &maxRows);
- char *tokenBuf = calloc(1, 4096);
+ tokenBuf = calloc(1, TSDB_MAX_BYTES_PER_ROW);
+ if (tokenBuf == NULL) {
+ code = TSDB_CODE_TSC_OUT_OF_MEMORY;
+ goto _error;
+ }
while ((readLen = tgetline(&line, &n, fp)) != -1) {
if (('\r' == line[readLen - 1]) || ('\n' == line[readLen - 1])) {
@@ -1501,30 +1501,42 @@ static void parseFileSendDataBlock(void *param, TAOS_RES *tres, int code) {
}
tfree(tokenBuf);
- free(line);
+ tfree(line);
+
+ pParentSql->res.code = code;
+ if (code == TSDB_CODE_SUCCESS) {
+ if (count > 0) {
+ code = doPackSendDataBlock(pSql, count, pTableDataBlock);
+ if (code == TSDB_CODE_SUCCESS) {
+ return;
+ } else {
+ goto _error;
+ }
+ } else {
+ taos_free_result(pSql);
+ tfree(pSupporter);
+ fclose(fp);
- if (count > 0) {
- code = doPackSendDataBlock(pSql, count, pTableDataBlock);
- if (code != TSDB_CODE_SUCCESS) {
- pParentSql->res.code = code;
- tscAsyncResultOnError(pParentSql);
+ pParentSql->fp = pParentSql->fetchFp;
+
+ // all data has been sent to vnode, call user function
+ int32_t v = (code != TSDB_CODE_SUCCESS) ? code : (int32_t)pParentSql->res.numOfRows;
+ (*pParentSql->fp)(pParentSql->param, pParentSql, v);
return;
}
+ }
- } else {
- taos_free_result(pSql);
- tfree(pSupporter);
- fclose(fp);
-
- pParentSql->fp = pParentSql->fetchFp;
+_error:
+ tfree(tokenBuf);
+ tfree(line);
+ taos_free_result(pSql);
+ tfree(pSupporter);
+ fclose(fp);
- // all data has been sent to vnode, call user function
- int32_t v = (pParentSql->res.code != TSDB_CODE_SUCCESS) ? pParentSql->res.code : (int32_t)pParentSql->res.numOfRows;
- (*pParentSql->fp)(pParentSql->param, pParentSql, v);
- }
+ tscAsyncResultOnError(pParentSql);
}
-void tscProcessMultiVnodesImportFromFile(SSqlObj *pSql) {
+void tscImportDataFromFile(SSqlObj *pSql) {
SSqlCmd *pCmd = &pSql->cmd;
if (pCmd->command != TSDB_SQL_INSERT) {
return;
@@ -1543,12 +1555,11 @@ void tscProcessMultiVnodesImportFromFile(SSqlObj *pSql) {
tfree(pSupporter);
tscAsyncResultOnError(pSql);
-
return;
}
pSupporter->pSql = pSql;
- pSupporter->fp = fp;
+ pSupporter->fp = fp;
- parseFileSendDataBlock(pSupporter, pNew, 0);
+ parseFileSendDataBlock(pSupporter, pNew, TSDB_CODE_SUCCESS);
}
diff --git a/src/client/src/tscPrepare.c b/src/client/src/tscPrepare.c
index 7c69c16b049e45a763118ccd5a43e3ab3337ed46..c5f06a52f342b5726321925c1864d58c41afbeb4 100644
--- a/src/client/src/tscPrepare.c
+++ b/src/client/src/tscPrepare.c
@@ -707,7 +707,7 @@ static int insertStmtBindParam(STscStmt* stmt, TAOS_BIND* bind) {
int32_t ret =
tscGetDataBlockFromList(pCmd->pTableBlockHashList, pTableMeta->id.uid, TSDB_PAYLOAD_SIZE, sizeof(SSubmitBlk),
- pTableMeta->tableInfo.rowSize, pTableMetaInfo->name, pTableMeta, &pBlock, NULL);
+ pTableMeta->tableInfo.rowSize, &pTableMetaInfo->name, pTableMeta, &pBlock, NULL);
if (ret != 0) {
// todo handle error
}
@@ -790,7 +790,7 @@ static int insertStmtExecute(STscStmt* stmt) {
int32_t ret =
tscGetDataBlockFromList(pCmd->pTableBlockHashList, pTableMeta->id.uid, TSDB_PAYLOAD_SIZE, sizeof(SSubmitBlk),
- pTableMeta->tableInfo.rowSize, pTableMetaInfo->name, pTableMeta, &pBlock, NULL);
+ pTableMeta->tableInfo.rowSize, &pTableMetaInfo->name, pTableMeta, &pBlock, NULL);
assert(ret == 0);
pBlock->size = sizeof(SSubmitBlk) + pCmd->batchSize * pBlock->rowSize;
SSubmitBlk* pBlk = (SSubmitBlk*) pBlock->pData;
diff --git a/src/client/src/tscSQLParser.c b/src/client/src/tscSQLParser.c
index 2886c22f5d39d22ca73eb31956eb217351a86584..226de59f460e7a94e8a5f02f23f63225b12cd8ea 100644
--- a/src/client/src/tscSQLParser.c
+++ b/src/client/src/tscSQLParser.c
@@ -20,7 +20,7 @@
#include "os.h"
#include "ttype.h"
-#include "qAst.h"
+#include "texpr.h"
#include "taos.h"
#include "taosmsg.h"
#include "tcompare.h"
@@ -60,7 +60,7 @@ static int32_t setShowInfo(SSqlObj* pSql, SSqlInfo* pInfo);
static char* getAccountId(SSqlObj* pSql);
static bool has(SArray* pFieldList, int32_t startIdx, const char* name);
-static void getCurrentDBName(SSqlObj* pSql, SStrToken* pDBToken);
+static char* getCurrentDBName(SSqlObj* pSql);
static bool hasSpecifyDB(SStrToken* pTableName);
static bool validateTableColumnInfo(SArray* pFieldList, SSqlCmd* pCmd);
static bool validateTagParams(SArray* pTagsList, SArray* pFieldList, SSqlCmd* pCmd);
@@ -100,8 +100,8 @@ static int32_t validateSqlFunctionInStreamSql(SSqlCmd* pCmd, SQueryInfo* pQueryI
static int32_t validateFunctionsInIntervalOrGroupbyQuery(SSqlCmd* pCmd, SQueryInfo* pQueryInfo);
static int32_t validateArithmeticSQLExpr(SSqlCmd* pCmd, tSQLExpr* pExpr, SQueryInfo* pQueryInfo, SColumnList* pList, int32_t* type);
static int32_t validateEp(char* ep);
-static int32_t validateDNodeConfig(tDCLSQL* pOptions);
-static int32_t validateLocalConfig(tDCLSQL* pOptions);
+static int32_t validateDNodeConfig(SMiscInfo* pOptions);
+static int32_t validateLocalConfig(SMiscInfo* pOptions);
static int32_t validateColumnName(char* name);
static int32_t setKillInfo(SSqlObj* pSql, struct SSqlInfo* pInfo, int32_t killType);
@@ -110,7 +110,7 @@ static bool hasTimestampForPointInterpQuery(SQueryInfo* pQueryInfo);
static bool hasNormalColumnFilter(SQueryInfo* pQueryInfo);
static int32_t parseLimitClause(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t index, SQuerySQL* pQuerySql, SSqlObj* pSql);
-static int32_t parseCreateDBOptions(SSqlCmd* pCmd, SCreateDBInfo* pCreateDbSql);
+static int32_t parseCreateDBOptions(SSqlCmd* pCmd, SCreateDbInfo* pCreateDbSql);
static int32_t getColumnIndexByName(SSqlCmd* pCmd, const SStrToken* pToken, SQueryInfo* pQueryInfo, SColumnIndex* pIndex);
static int32_t getTableIndexByName(SStrToken* pToken, SQueryInfo* pQueryInfo, SColumnIndex* pIndex);
static int32_t optrToString(tSQLExpr* pExpr, char** exprString);
@@ -239,7 +239,7 @@ int32_t tscToSQLCmd(SSqlObj* pSql, struct SSqlInfo* pInfo) {
int32_t code = TSDB_CODE_SUCCESS;
if (!pInfo->valid || terrno == TSDB_CODE_TSC_SQL_SYNTAX_ERROR) {
terrno = TSDB_CODE_SUCCESS; // clear the error number
- return tscSQLSyntaxErrMsg(tscGetErrorMsgPayload(pCmd), NULL, pInfo->pzErrMsg);
+ return tscSQLSyntaxErrMsg(tscGetErrorMsgPayload(pCmd), NULL, pInfo->msg);
}
SQueryInfo* pQueryInfo = tscGetQueryInfoDetailSafely(pCmd, pCmd->clauseIndex);
@@ -264,36 +264,48 @@ int32_t tscToSQLCmd(SSqlObj* pSql, struct SSqlInfo* pInfo) {
case TSDB_SQL_DROP_DB: {
const char* msg2 = "invalid name";
const char* msg3 = "param name too long";
+ const char* msg4 = "table is not super table";
- SStrToken* pzName = &pInfo->pDCLInfo->a[0];
+ SStrToken* pzName = taosArrayGet(pInfo->pMiscInfo->a, 0);
if ((pInfo->type != TSDB_SQL_DROP_DNODE) && (tscValidateName(pzName) != TSDB_CODE_SUCCESS)) {
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg2);
}
if (pInfo->type == TSDB_SQL_DROP_DB) {
- assert(pInfo->pDCLInfo->nTokens == 1);
-
- code = setObjFullName(pTableMetaInfo->name, getAccountId(pSql), pzName, NULL, NULL);
+ assert(taosArrayGetSize(pInfo->pMiscInfo->a) == 1);
+ code = tNameSetDbName(&pTableMetaInfo->name, getAccountId(pSql), pzName);
if (code != TSDB_CODE_SUCCESS) {
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg2);
}
} else if (pInfo->type == TSDB_SQL_DROP_TABLE) {
- assert(pInfo->pDCLInfo->nTokens == 1);
+ assert(taosArrayGetSize(pInfo->pMiscInfo->a) == 1);
code = tscSetTableFullName(pTableMetaInfo, pzName, pSql);
if(code != TSDB_CODE_SUCCESS) {
return code;
}
+
+ if (pInfo->pMiscInfo->tableType == TSDB_SUPER_TABLE) {
+ code = tscGetTableMeta(pSql, pTableMetaInfo);
+ if (code != TSDB_CODE_SUCCESS) {
+ return code;
+ }
+
+ if (!UTIL_TABLE_IS_SUPER_TABLE(pTableMetaInfo)) {
+ return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg4);
+ }
+ }
+
} else if (pInfo->type == TSDB_SQL_DROP_DNODE) {
pzName->n = strdequote(pzName->z);
- strncpy(pTableMetaInfo->name, pzName->z, pzName->n);
- } else { // drop user
+ strncpy(pCmd->payload, pzName->z, pzName->n);
+ } else { // drop user/account
if (pzName->n >= TSDB_USER_LEN) {
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg3);
}
- strncpy(pTableMetaInfo->name, pzName->z, pzName->n);
+ strncpy(pCmd->payload, pzName->z, pzName->n);
}
break;
@@ -301,13 +313,13 @@ int32_t tscToSQLCmd(SSqlObj* pSql, struct SSqlInfo* pInfo) {
case TSDB_SQL_USE_DB: {
const char* msg = "invalid db name";
- SStrToken* pToken = &pInfo->pDCLInfo->a[0];
+ SStrToken* pToken = taosArrayGet(pInfo->pMiscInfo->a, 0);
if (tscValidateName(pToken) != TSDB_CODE_SUCCESS) {
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg);
}
- int32_t ret = setObjFullName(pTableMetaInfo->name, getAccountId(pSql), pToken, NULL, NULL);
+ int32_t ret = tNameSetDbName(&pTableMetaInfo->name, getAccountId(pSql), pToken);
if (ret != TSDB_CODE_SUCCESS) {
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg);
}
@@ -332,12 +344,12 @@ int32_t tscToSQLCmd(SSqlObj* pSql, struct SSqlInfo* pInfo) {
const char* msg1 = "invalid db name";
const char* msg2 = "name too long";
- SCreateDBInfo* pCreateDB = &(pInfo->pDCLInfo->dbOpt);
+ SCreateDbInfo* pCreateDB = &(pInfo->pMiscInfo->dbOpt);
if (tscValidateName(&pCreateDB->dbname) != TSDB_CODE_SUCCESS) {
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1);
}
- int32_t ret = setObjFullName(pTableMetaInfo->name, getAccountId(pSql), &(pCreateDB->dbname), NULL, NULL);
+ int32_t ret = tNameSetDbName(&pTableMetaInfo->name, getAccountId(pSql), &(pCreateDB->dbname));
if (ret != TSDB_CODE_SUCCESS) {
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg2);
}
@@ -349,15 +361,15 @@ int32_t tscToSQLCmd(SSqlObj* pSql, struct SSqlInfo* pInfo) {
break;
}
- case TSDB_SQL_CREATE_DNODE: { // todo hostname
+ case TSDB_SQL_CREATE_DNODE: {
const char* msg = "invalid host name (ip address)";
- if (pInfo->pDCLInfo->nTokens > 1) {
+ if (taosArrayGetSize(pInfo->pMiscInfo->a) > 1) {
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg);
}
- SStrToken* pIpAddr = &pInfo->pDCLInfo->a[0];
- pIpAddr->n = strdequote(pIpAddr->z);
+ SStrToken* id = taosArrayGet(pInfo->pMiscInfo->a, 0);
+ id->n = strdequote(id->z);
break;
}
@@ -367,8 +379,8 @@ int32_t tscToSQLCmd(SSqlObj* pSql, struct SSqlInfo* pInfo) {
const char* msg2 = "invalid user/account name";
const char* msg3 = "name too long";
- SStrToken* pName = &pInfo->pDCLInfo->user.user;
- SStrToken* pPwd = &pInfo->pDCLInfo->user.passwd;
+ SStrToken* pName = &pInfo->pMiscInfo->user.user;
+ SStrToken* pPwd = &pInfo->pMiscInfo->user.passwd;
if (handlePassword(pCmd, pPwd) != TSDB_CODE_SUCCESS) {
return TSDB_CODE_TSC_INVALID_SQL;
@@ -382,7 +394,7 @@ int32_t tscToSQLCmd(SSqlObj* pSql, struct SSqlInfo* pInfo) {
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg2);
}
- SCreateAcctSQL* pAcctOpt = &pInfo->pDCLInfo->acctOpt;
+ SCreateAcctInfo* pAcctOpt = &pInfo->pMiscInfo->acctOpt;
if (pAcctOpt->stat.n > 0) {
if (pAcctOpt->stat.z[0] == 'r' && pAcctOpt->stat.n == 1) {
} else if (pAcctOpt->stat.z[0] == 'w' && pAcctOpt->stat.n == 1) {
@@ -397,10 +409,10 @@ int32_t tscToSQLCmd(SSqlObj* pSql, struct SSqlInfo* pInfo) {
}
case TSDB_SQL_DESCRIBE_TABLE: {
- SStrToken* pToken = &pInfo->pDCLInfo->a[0];
const char* msg1 = "invalid table name";
const char* msg2 = "table name too long";
+ SStrToken* pToken = taosArrayGet(pInfo->pMiscInfo->a, 0);
if (tscValidateName(pToken) != TSDB_CODE_SUCCESS) {
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1);
}
@@ -418,10 +430,10 @@ int32_t tscToSQLCmd(SSqlObj* pSql, struct SSqlInfo* pInfo) {
return tscGetTableMeta(pSql, pTableMetaInfo);
}
case TSDB_SQL_SHOW_CREATE_TABLE: {
- SStrToken* pToken = &pInfo->pDCLInfo->a[0];
const char* msg1 = "invalid table name";
const char* msg2 = "table name is too long";
+ SStrToken* pToken = taosArrayGet(pInfo->pMiscInfo->a, 0);
if (tscValidateName(pToken) != TSDB_CODE_SUCCESS) {
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1);
}
@@ -439,11 +451,12 @@ int32_t tscToSQLCmd(SSqlObj* pSql, struct SSqlInfo* pInfo) {
}
case TSDB_SQL_SHOW_CREATE_DATABASE: {
const char* msg1 = "invalid database name";
- SStrToken* pToken = &pInfo->pDCLInfo->a[0];
+ SStrToken* pToken = taosArrayGet(pInfo->pMiscInfo->a, 0);
if (tscValidateName(pToken) != TSDB_CODE_SUCCESS) {
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1);
}
+
if (pToken->n > TSDB_DB_NAME_LEN) {
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1);
}
@@ -455,29 +468,34 @@ int32_t tscToSQLCmd(SSqlObj* pSql, struct SSqlInfo* pInfo) {
const char* msg3 = "invalid dnode ep";
/* validate the ip address */
- tDCLSQL* pDCL = pInfo->pDCLInfo;
+ SMiscInfo* pMiscInfo = pInfo->pMiscInfo;
/* validate the parameter names and options */
- if (validateDNodeConfig(pDCL) != TSDB_CODE_SUCCESS) {
+ if (validateDNodeConfig(pMiscInfo) != TSDB_CODE_SUCCESS) {
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg2);
}
char* pMsg = pCmd->payload;
SCfgDnodeMsg* pCfg = (SCfgDnodeMsg*)pMsg;
- pDCL->a[0].n = strdequote(pDCL->a[0].z);
-
- strncpy(pCfg->ep, pDCL->a[0].z, pDCL->a[0].n);
+
+ SStrToken* t0 = taosArrayGet(pMiscInfo->a, 0);
+ SStrToken* t1 = taosArrayGet(pMiscInfo->a, 1);
+
+ t0->n = strdequote(t0->z);
+ strncpy(pCfg->ep, t0->z, t0->n);
if (validateEp(pCfg->ep) != TSDB_CODE_SUCCESS) {
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg3);
}
- strncpy(pCfg->config, pDCL->a[1].z, pDCL->a[1].n);
+ strncpy(pCfg->config, t1->z, t1->n);
+
+ if (taosArrayGetSize(pMiscInfo->a) == 3) {
+ SStrToken* t2 = taosArrayGet(pMiscInfo->a, 2);
- if (pDCL->nTokens == 3) {
- pCfg->config[pDCL->a[1].n] = ' '; // add sep
- strncpy(&pCfg->config[pDCL->a[1].n + 1], pDCL->a[2].z, pDCL->a[2].n);
+ pCfg->config[t1->n] = ' '; // add sep
+ strncpy(&pCfg->config[t1->n + 1], t2->z, t2->n);
}
break;
@@ -492,7 +510,7 @@ int32_t tscToSQLCmd(SSqlObj* pSql, struct SSqlInfo* pInfo) {
pCmd->command = pInfo->type;
- SUserInfo* pUser = &pInfo->pDCLInfo->user;
+ SUserInfo* pUser = &pInfo->pMiscInfo->user;
SStrToken* pName = &pUser->user;
SStrToken* pPwd = &pUser->passwd;
@@ -536,18 +554,22 @@ int32_t tscToSQLCmd(SSqlObj* pSql, struct SSqlInfo* pInfo) {
}
case TSDB_SQL_CFG_LOCAL: {
- tDCLSQL* pDCL = pInfo->pDCLInfo;
- const char* msg = "invalid configure options or values";
+ SMiscInfo *pMiscInfo = pInfo->pMiscInfo;
+ const char *msg = "invalid configure options or values";
// validate the parameter names and options
- if (validateLocalConfig(pDCL) != TSDB_CODE_SUCCESS) {
+ if (validateLocalConfig(pMiscInfo) != TSDB_CODE_SUCCESS) {
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg);
}
- strncpy(pCmd->payload, pDCL->a[0].z, pDCL->a[0].n);
- if (pDCL->nTokens == 2) {
- pCmd->payload[pDCL->a[0].n] = ' '; // add sep
- strncpy(&pCmd->payload[pDCL->a[0].n + 1], pDCL->a[1].z, pDCL->a[1].n);
+ int32_t numOfToken = (int32_t) taosArrayGetSize(pMiscInfo->a);
+ SStrToken* t = taosArrayGet(pMiscInfo->a, 0);
+ SStrToken* t1 = taosArrayGet(pMiscInfo->a, 1);
+
+ strncpy(pCmd->payload, t->z, t->n);
+ if (numOfToken == 2) {
+ pCmd->payload[t->n] = ' '; // add sep
+ strncpy(&pCmd->payload[t->n + 1], t1->z, t1->n);
}
break;
@@ -878,47 +900,51 @@ int32_t parseSlidingClause(SSqlObj* pSql, SQueryInfo* pQueryInfo, SQuerySQL* pQu
return TSDB_CODE_SUCCESS;
}
-int32_t tscSetTableFullName(STableMetaInfo* pTableMetaInfo, SStrToken* pzTableName, SSqlObj* pSql) {
+int32_t tscSetTableFullName(STableMetaInfo* pTableMetaInfo, SStrToken* pTableName, SSqlObj* pSql) {
const char* msg1 = "name too long";
+ const char* msg2 = "acctId too long";
SSqlCmd* pCmd = &pSql->cmd;
int32_t code = TSDB_CODE_SUCCESS;
- // backup the old name in pTableMetaInfo
- char oldName[TSDB_TABLE_FNAME_LEN] = {0};
- tstrncpy(oldName, pTableMetaInfo->name, tListLen(oldName));
+ if (hasSpecifyDB(pTableName)) { // db has been specified in sql string so we ignore current db path
+ code = tNameSetAcctId(&pTableMetaInfo->name, getAccountId(pSql));
+ if (code != 0) {
+ return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg2);
+ }
+
+ char name[TSDB_TABLE_FNAME_LEN] = {0};
+ strncpy(name, pTableName->z, pTableName->n);
- if (hasSpecifyDB(pzTableName)) { // db has been specified in sql string so we ignore current db path
- code = setObjFullName(pTableMetaInfo->name, getAccountId(pSql), NULL, pzTableName, NULL);
+ code = tNameFromString(&pTableMetaInfo->name, name, T_NAME_DB|T_NAME_TABLE);
if (code != 0) {
- invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1);
+ return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1);
}
} else { // get current DB name first, and then set it into path
- SStrToken t = {0};
- getCurrentDBName(pSql, &t);
- if (t.n == 0) { // current database not available or not specified
- code = TSDB_CODE_TSC_DB_NOT_SELECTED;
- } else {
- code = setObjFullName(pTableMetaInfo->name, NULL, &t, pzTableName, NULL);
- if (code != 0) {
- invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1);
- }
+ char* t = getCurrentDBName(pSql);
+ if (strlen(t) == 0) {
+ return TSDB_CODE_TSC_DB_NOT_SELECTED;
}
- }
- if (code != TSDB_CODE_SUCCESS) {
- return code;
- }
+ code = tNameFromString(&pTableMetaInfo->name, t, T_NAME_ACCT | T_NAME_DB);
+ if (code != 0) {
+ return TSDB_CODE_TSC_DB_NOT_SELECTED;
+ }
- /*
- * the old name exists and is not equalled to the new name. Release the table meta
- * that are corresponding to the old name for the new table name.
- */
- if (strlen(oldName) > 0 && strncasecmp(oldName, pTableMetaInfo->name, tListLen(pTableMetaInfo->name)) != 0) {
- tscClearTableMetaInfo(pTableMetaInfo);
+ if (pTableName->n >= TSDB_TABLE_NAME_LEN) {
+ return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1);
+ }
+
+ char name[TSDB_TABLE_FNAME_LEN] = {0};
+ strncpy(name, pTableName->z, pTableName->n);
+
+ code = tNameFromString(&pTableMetaInfo->name, name, T_NAME_TABLE);
+ if (code != 0) {
+ code = invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1);
+ }
}
- return TSDB_CODE_SUCCESS;
+ return code;
}
static bool validateTableColumnInfo(SArray* pFieldList, SSqlCmd* pCmd) {
@@ -1218,9 +1244,8 @@ static bool has(SArray* pFieldList, int32_t startIdx, const char* name) {
static char* getAccountId(SSqlObj* pSql) { return pSql->pTscObj->acctId; }
-static void getCurrentDBName(SSqlObj* pSql, SStrToken* pDBToken) {
- pDBToken->z = pSql->pTscObj->db;
- pDBToken->n = (uint32_t)strlen(pSql->pTscObj->db);
+static char* getCurrentDBName(SSqlObj* pSql) {
+ return pSql->pTscObj->db;
}
/* length limitation, strstr cannot be applied */
@@ -1333,7 +1358,7 @@ static int32_t handleArithmeticExpr(SSqlCmd* pCmd, int32_t clauseIndex, int32_t
int32_t ret = exprTreeFromSqlExpr(pCmd, &pNode, pItem->pNode, pQueryInfo, colList, NULL);
if (ret != TSDB_CODE_SUCCESS) {
taosArrayDestroy(colList);
- tExprTreeDestroy(&pNode, NULL);
+ tExprTreeDestroy(pNode, NULL);
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg2);
}
@@ -1342,9 +1367,9 @@ static int32_t handleArithmeticExpr(SSqlCmd* pCmd, int32_t clauseIndex, int32_t
for(int32_t k = 0; k < numOfNode; ++k) {
SColIndex* pIndex = taosArrayGet(colList, k);
if (TSDB_COL_IS_TAG(pIndex->flag)) {
- tExprTreeDestroy(&pNode, NULL);
+ tExprTreeDestroy(pNode, NULL);
taosArrayDestroy(colList);
- tExprTreeDestroy(&pNode, NULL);
+
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg3);
}
}
@@ -1371,7 +1396,7 @@ static int32_t handleArithmeticExpr(SSqlCmd* pCmd, int32_t clauseIndex, int32_t
tbufCloseWriter(&bw);
taosArrayDestroy(colList);
- tExprTreeDestroy(&pNode, NULL);
+ tExprTreeDestroy(pNode, NULL);
} else {
columnList.num = 0;
columnList.ids[0] = (SColumnIndex) {0, 0};
@@ -1403,7 +1428,7 @@ static int32_t handleArithmeticExpr(SSqlCmd* pCmd, int32_t clauseIndex, int32_t
int32_t ret = exprTreeFromSqlExpr(pCmd, &pArithExprInfo->pExpr, pItem->pNode, pQueryInfo, NULL, &pArithExprInfo->uid);
if (ret != TSDB_CODE_SUCCESS) {
- tExprTreeDestroy(&pArithExprInfo->pExpr, NULL);
+ tExprTreeDestroy(pArithExprInfo->pExpr, NULL);
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), "invalid expression in select clause");
}
@@ -2176,6 +2201,7 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t col
if (getColumnIndexByName(pCmd, &pParamElem->pNode->colInfo, pQueryInfo, &index) != TSDB_CODE_SUCCESS) {
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg3);
}
+
if (index.columnIndex == TSDB_TBNAME_COLUMN_INDEX) {
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg6);
}
@@ -2608,10 +2634,10 @@ int32_t setShowInfo(SSqlObj* pSql, struct SSqlInfo* pInfo) {
const char* msg6 = "pattern string is empty";
/*
- * database prefix in pInfo->pDCLInfo->a[0]
- * wildcard in like clause in pInfo->pDCLInfo->a[1]
+ * database prefix in pInfo->pMiscInfo->a[0]
+ * wildcard in like clause in pInfo->pMiscInfo->a[1]
*/
- SShowInfo* pShowInfo = &pInfo->pDCLInfo->showOpt;
+ SShowInfo* pShowInfo = &pInfo->pMiscInfo->showOpt;
int16_t showType = pShowInfo->showType;
if (showType == TSDB_MGMT_TABLE_TABLE || showType == TSDB_MGMT_TABLE_METRIC || showType == TSDB_MGMT_TABLE_VGROUP) {
// db prefix in tagCond, show table conds in payload
@@ -2630,7 +2656,7 @@ int32_t setShowInfo(SSqlObj* pSql, struct SSqlInfo* pInfo) {
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1);
}
- int32_t ret = setObjFullName(pTableMetaInfo->name, getAccountId(pSql), pDbPrefixToken, NULL, NULL);
+ int32_t ret = tNameSetDbName(&pTableMetaInfo->name, getAccountId(pSql), pDbPrefixToken);
if (ret != TSDB_CODE_SUCCESS) {
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1);
}
@@ -2675,7 +2701,7 @@ int32_t setKillInfo(SSqlObj* pSql, struct SSqlInfo* pInfo, int32_t killType) {
SSqlCmd* pCmd = &pSql->cmd;
pCmd->command = pInfo->type;
- SStrToken* idStr = &(pInfo->pDCLInfo->ip);
+ SStrToken* idStr = &(pInfo->pMiscInfo->id);
if (idStr->n > TSDB_KILL_MSG_LEN) {
return TSDB_CODE_TSC_INVALID_SQL;
}
@@ -2910,7 +2936,7 @@ int32_t parseGroupbyClause(SQueryInfo* pQueryInfo, SArray* pList, SSqlCmd* pCmd)
STableMeta* pTableMeta = NULL;
SSchema* pSchema = NULL;
- SSchema s = tscGetTbnameColumnSchema();
+ SSchema s = tGetTbnameColumnSchema();
int32_t tableIndex = COLUMN_INDEX_INITIAL_VAL;
@@ -3433,6 +3459,7 @@ static int32_t getColumnQueryCondInfo(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, tSQ
static int32_t getJoinCondInfo(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, tSQLExpr* pExpr) {
const char* msg1 = "invalid join query condition";
+ const char* msg2 = "invalid table name in join query";
const char* msg3 = "type of join columns must be identical";
const char* msg4 = "invalid column name in join condition";
@@ -3458,7 +3485,11 @@ static int32_t getJoinCondInfo(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, tSQLExpr*
pLeft->uid = pTableMetaInfo->pTableMeta->id.uid;
pLeft->tagColId = pTagSchema1->colId;
- strcpy(pLeft->tableId, pTableMetaInfo->name);
+
+ int32_t code = tNameExtractFullName(&pTableMetaInfo->name, pLeft->tableName);
+ if (code != TSDB_CODE_SUCCESS) {
+ return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg2);
+ }
index = (SColumnIndex)COLUMN_INDEX_INITIALIZER;
if (getColumnIndexByName(pCmd, &pExpr->pRight->colInfo, pQueryInfo, &index) != TSDB_CODE_SUCCESS) {
@@ -3470,7 +3501,11 @@ static int32_t getJoinCondInfo(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, tSQLExpr*
pRight->uid = pTableMetaInfo->pTableMeta->id.uid;
pRight->tagColId = pTagSchema2->colId;
- strcpy(pRight->tableId, pTableMetaInfo->name);
+
+ code = tNameExtractFullName(&pTableMetaInfo->name, pRight->tableName);
+ if (code != TSDB_CODE_SUCCESS) {
+ return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg2);
+ }
if (pTagSchema1->type != pTagSchema2->type) {
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg3);
@@ -4047,8 +4082,6 @@ static int32_t setTableCondForSTableQuery(SSqlCmd* pCmd, SQueryInfo* pQueryInfo,
SStringBuilder sb1; memset(&sb1, 0, sizeof(sb1));
taosStringBuilderAppendStringLen(&sb1, QUERY_COND_REL_PREFIX_IN, QUERY_COND_REL_PREFIX_IN_LEN);
- char db[TSDB_TABLE_FNAME_LEN] = {0};
-
// remove the duplicated input table names
int32_t num = 0;
char* tableNameString = taosStringBuilderGetResult(sb, NULL);
@@ -4064,7 +4097,8 @@ static int32_t setTableCondForSTableQuery(SSqlCmd* pCmd, SQueryInfo* pQueryInfo,
}
num = j;
- char* name = extractDBName(pTableMetaInfo->name, db);
+ char name[TSDB_DB_NAME_LEN] = {0};
+ tNameGetDbName(&pTableMetaInfo->name, name);
SStrToken dbToken = { .type = TK_STRING, .z = name, .n = (uint32_t)strlen(name) };
for (int32_t i = 0; i < num; ++i) {
@@ -4286,7 +4320,7 @@ static int32_t getTagQueryCondExpr(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SCondE
doCompactQueryExpr(pExpr);
tSqlExprDestroy(p1);
- tExprTreeDestroy(&p, NULL);
+ tExprTreeDestroy(p, NULL);
taosArrayDestroy(colList);
if (pQueryInfo->tagCond.pCond != NULL && taosArrayGetSize(pQueryInfo->tagCond.pCond) > 0 && !UTIL_TABLE_IS_SUPER_TABLE(pTableMetaInfo)) {
@@ -4507,10 +4541,10 @@ int32_t parseFillClause(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SQuerySQL* pQuery
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg2);
}
- size_t size = tscNumOfFields(pQueryInfo);
+ size_t numOfFields = tscNumOfFields(pQueryInfo);
if (pQueryInfo->fillVal == NULL) {
- pQueryInfo->fillVal = calloc(size, sizeof(int64_t));
+ pQueryInfo->fillVal = calloc(numOfFields, sizeof(int64_t));
if (pQueryInfo->fillVal == NULL) {
return TSDB_CODE_TSC_OUT_OF_MEMORY;
}
@@ -4520,7 +4554,7 @@ int32_t parseFillClause(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SQuerySQL* pQuery
pQueryInfo->fillType = TSDB_FILL_NONE;
} else if (strncasecmp(pItem->pVar.pz, "null", 4) == 0 && pItem->pVar.nLen == 4) {
pQueryInfo->fillType = TSDB_FILL_NULL;
- for (int32_t i = START_INTERPO_COL_IDX; i < size; ++i) {
+ for (int32_t i = START_INTERPO_COL_IDX; i < numOfFields; ++i) {
TAOS_FIELD* pField = tscFieldInfoGetField(&pQueryInfo->fieldsInfo, i);
setNull((char*)&pQueryInfo->fillVal[i], pField->type, pField->bytes);
}
@@ -4534,7 +4568,7 @@ int32_t parseFillClause(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SQuerySQL* pQuery
pQueryInfo->fillType = TSDB_FILL_SET_VALUE;
size_t num = taosArrayGetSize(pFillToken);
- if (num == 1) {
+ if (num == 1) { // no actual value, return with error code
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1);
}
@@ -4545,11 +4579,11 @@ int32_t parseFillClause(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SQuerySQL* pQuery
if (tscIsPointInterpQuery(pQueryInfo)) {
startPos = 0;
- if (numOfFillVal > size) {
- numOfFillVal = (int32_t)size;
+ if (numOfFillVal > numOfFields) {
+ numOfFillVal = (int32_t)numOfFields;
}
} else {
- numOfFillVal = (int16_t)((num > (int32_t)size) ? (int32_t)size : num);
+ numOfFillVal = (int16_t)((num > (int32_t)numOfFields) ? (int32_t)numOfFields : num);
}
int32_t j = 1;
@@ -4569,10 +4603,10 @@ int32_t parseFillClause(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SQuerySQL* pQuery
}
}
- if ((num < size) || ((num - 1 < size) && (tscIsPointInterpQuery(pQueryInfo)))) {
+ if ((num < numOfFields) || ((num - 1 < numOfFields) && (tscIsPointInterpQuery(pQueryInfo)))) {
tVariantListItem* lastItem = taosArrayGetLast(pFillToken);
- for (int32_t i = numOfFillVal; i < size; ++i) {
+ for (int32_t i = numOfFillVal; i < numOfFields; ++i) {
TAOS_FIELD* pField = tscFieldInfoGetField(&pQueryInfo->fieldsInfo, i);
if (pField->type == TSDB_DATA_TYPE_BINARY || pField->type == TSDB_DATA_TYPE_NCHAR) {
@@ -4806,11 +4840,12 @@ int32_t setAlterTableInfo(SSqlObj* pSql, struct SSqlInfo* pInfo) {
const char* msg17 = "invalid column name";
const char* msg18 = "primary timestamp column cannot be dropped";
const char* msg19 = "invalid new tag name";
+ const char* msg20 = "table is not super table";
int32_t code = TSDB_CODE_SUCCESS;
SSqlCmd* pCmd = &pSql->cmd;
- SAlterTableSQL* pAlterSQL = pInfo->pAlterInfo;
+ SAlterTableInfo* pAlterSQL = pInfo->pAlterInfo;
SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(pCmd, 0);
STableMetaInfo* pTableMetaInfo = tscGetMetaInfo(pQueryInfo, DEFAULT_TABLE_INDEX);
@@ -4831,6 +4866,10 @@ int32_t setAlterTableInfo(SSqlObj* pSql, struct SSqlInfo* pInfo) {
STableMeta* pTableMeta = pTableMetaInfo->pTableMeta;
+ if (pAlterSQL->tableType == TSDB_SUPER_TABLE && !(UTIL_TABLE_IS_SUPER_TABLE(pTableMetaInfo))) {
+ return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg20);
+ }
+
if (pAlterSQL->type == TSDB_ALTER_TABLE_ADD_TAG_COLUMN || pAlterSQL->type == TSDB_ALTER_TABLE_DROP_TAG_COLUMN ||
pAlterSQL->type == TSDB_ALTER_TABLE_CHANGE_TAG_COLUMN) {
if (UTIL_TABLE_IS_NORMAL_TABLE(pTableMetaInfo)) {
@@ -5145,8 +5184,10 @@ int32_t validateEp(char* ep) {
return TSDB_CODE_SUCCESS;
}
-int32_t validateDNodeConfig(tDCLSQL* pOptions) {
- if (pOptions->nTokens < 2 || pOptions->nTokens > 3) {
+int32_t validateDNodeConfig(SMiscInfo* pOptions) {
+ int32_t numOfToken = (int32_t) taosArrayGetSize(pOptions->a);
+
+ if (numOfToken < 2 || numOfToken > 3) {
return TSDB_CODE_TSC_INVALID_SQL;
}
@@ -5164,9 +5205,9 @@ int32_t validateDNodeConfig(tDCLSQL* pOptions) {
{"cqDebugFlag", 11},
};
- SStrToken* pOptionToken = &pOptions->a[1];
+ SStrToken* pOptionToken = taosArrayGet(pOptions->a, 1);
- if (pOptions->nTokens == 2) {
+ if (numOfToken == 2) {
// reset log and reset query cache does not need value
for (int32_t i = 0; i < tokenLogEnd; ++i) {
const SDNodeDynConfOption* pOption = &cfgOptions[i];
@@ -5176,7 +5217,7 @@ int32_t validateDNodeConfig(tDCLSQL* pOptions) {
}
} else if ((strncasecmp(cfgOptions[tokenBalance].name, pOptionToken->z, pOptionToken->n) == 0) &&
(cfgOptions[tokenBalance].len == pOptionToken->n)) {
- SStrToken* pValToken = &pOptions->a[2];
+ SStrToken* pValToken = taosArrayGet(pOptions->a, 2);
int32_t vnodeId = 0;
int32_t dnodeId = 0;
strdequote(pValToken->z);
@@ -5187,14 +5228,14 @@ int32_t validateDNodeConfig(tDCLSQL* pOptions) {
return TSDB_CODE_SUCCESS;
} else if ((strncasecmp(cfgOptions[tokenMonitor].name, pOptionToken->z, pOptionToken->n) == 0) &&
(cfgOptions[tokenMonitor].len == pOptionToken->n)) {
- SStrToken* pValToken = &pOptions->a[2];
+ SStrToken* pValToken = taosArrayGet(pOptions->a, 2);
int32_t val = strtol(pValToken->z, NULL, 10);
if (val != 0 && val != 1) {
return TSDB_CODE_TSC_INVALID_SQL; // options value is invalid
}
return TSDB_CODE_SUCCESS;
} else {
- SStrToken* pValToken = &pOptions->a[2];
+ SStrToken* pValToken = taosArrayGet(pOptions->a, 2);
int32_t val = strtol(pValToken->z, NULL, 10);
if (val < 0 || val > 256) {
@@ -5205,8 +5246,8 @@ int32_t validateDNodeConfig(tDCLSQL* pOptions) {
for (int32_t i = tokenDebugFlag; i < tokenDebugFlagEnd; ++i) {
const SDNodeDynConfOption* pOption = &cfgOptions[i];
+ // options is valid
if ((strncasecmp(pOption->name, pOptionToken->z, pOptionToken->n) == 0) && (pOption->len == pOptionToken->n)) {
- /* options is valid */
return TSDB_CODE_SUCCESS;
}
}
@@ -5215,17 +5256,18 @@ int32_t validateDNodeConfig(tDCLSQL* pOptions) {
return TSDB_CODE_TSC_INVALID_SQL;
}
-int32_t validateLocalConfig(tDCLSQL* pOptions) {
- if (pOptions->nTokens < 1 || pOptions->nTokens > 2) {
+int32_t validateLocalConfig(SMiscInfo* pOptions) {
+ int32_t numOfToken = (int32_t) taosArrayGetSize(pOptions->a);
+ if (numOfToken < 1 || numOfToken > 2) {
return TSDB_CODE_TSC_INVALID_SQL;
}
SDNodeDynConfOption LOCAL_DYNAMIC_CFG_OPTIONS[6] = {{"resetLog", 8}, {"rpcDebugFlag", 12}, {"tmrDebugFlag", 12},
{"cDebugFlag", 10}, {"uDebugFlag", 10}, {"debugFlag", 9}};
- SStrToken* pOptionToken = &pOptions->a[0];
+ SStrToken* pOptionToken = taosArrayGet(pOptions->a, 0);
- if (pOptions->nTokens == 1) {
+ if (numOfToken == 1) {
// reset log does not need value
for (int32_t i = 0; i < 1; ++i) {
SDNodeDynConfOption* pOption = &LOCAL_DYNAMIC_CFG_OPTIONS[i];
@@ -5234,7 +5276,7 @@ int32_t validateLocalConfig(tDCLSQL* pOptions) {
}
}
} else {
- SStrToken* pValToken = &pOptions->a[1];
+ SStrToken* pValToken = taosArrayGet(pOptions->a, 1);
int32_t val = strtol(pValToken->z, NULL, 10);
if (val < 131 || val > 199) {
@@ -5386,7 +5428,7 @@ int32_t parseLimitClause(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t clauseIn
return TSDB_CODE_SUCCESS;
}
-static int32_t setKeepOption(SSqlCmd* pCmd, SCreateDbMsg* pMsg, SCreateDBInfo* pCreateDb) {
+static int32_t setKeepOption(SSqlCmd* pCmd, SCreateDbMsg* pMsg, SCreateDbInfo* pCreateDb) {
const char* msg = "invalid number of options";
pMsg->daysToKeep = htonl(-1);
@@ -5424,7 +5466,7 @@ static int32_t setKeepOption(SSqlCmd* pCmd, SCreateDbMsg* pMsg, SCreateDBInfo* p
return TSDB_CODE_SUCCESS;
}
-static int32_t setTimePrecision(SSqlCmd* pCmd, SCreateDbMsg* pMsg, SCreateDBInfo* pCreateDbInfo) {
+static int32_t setTimePrecision(SSqlCmd* pCmd, SCreateDbMsg* pMsg, SCreateDbInfo* pCreateDbInfo) {
const char* msg = "invalid time precision";
pMsg->precision = TSDB_TIME_PRECISION_MILLI; // millisecond by default
@@ -5448,7 +5490,7 @@ static int32_t setTimePrecision(SSqlCmd* pCmd, SCreateDbMsg* pMsg, SCreateDBInfo
return TSDB_CODE_SUCCESS;
}
-static void setCreateDBOption(SCreateDbMsg* pMsg, SCreateDBInfo* pCreateDb) {
+static void setCreateDBOption(SCreateDbMsg* pMsg, SCreateDbInfo* pCreateDb) {
pMsg->maxTables = htonl(-1); // max tables can not be set anymore
pMsg->cacheBlockSize = htonl(pCreateDb->cacheBlockSize);
pMsg->totalBlocks = htonl(pCreateDb->numOfBlocks);
@@ -5466,7 +5508,7 @@ static void setCreateDBOption(SCreateDbMsg* pMsg, SCreateDBInfo* pCreateDb) {
pMsg->cacheLastRow = pCreateDb->cachelast;
}
-int32_t parseCreateDBOptions(SSqlCmd* pCmd, SCreateDBInfo* pCreateDbSql) {
+int32_t parseCreateDBOptions(SSqlCmd* pCmd, SCreateDbInfo* pCreateDbSql) {
SCreateDbMsg* pMsg = (SCreateDbMsg *)(pCmd->payload);
setCreateDBOption(pMsg, pCreateDbSql);
@@ -6227,9 +6269,9 @@ int32_t doCheckForCreateFromStable(SSqlObj* pSql, SSqlInfo* pInfo) {
}
// get table meta from mnode
- tstrncpy(pCreateTableInfo->tagdata.name, pStableMetaInfo->name, tListLen(pCreateTableInfo->tagdata.name));
- SArray* pList = pCreateTableInfo->pTagVals;
+ code = tNameExtractFullName(&pStableMetaInfo->name, pCreateTableInfo->tagdata.name);
+ SArray* pList = pCreateTableInfo->pTagVals;
code = tscGetTableMeta(pSql, pStableMetaInfo);
if (code != TSDB_CODE_SUCCESS) {
return code;
@@ -6307,7 +6349,11 @@ int32_t doCheckForCreateFromStable(SSqlObj* pSql, SSqlInfo* pInfo) {
return ret;
}
- pCreateTableInfo->fullname = strndup(pTableMetaInfo->name, TSDB_TABLE_FNAME_LEN);
+ pCreateTableInfo->fullname = calloc(1, tNameLen(&pTableMetaInfo->name) + 1);
+ ret = tNameExtractFullName(&pTableMetaInfo->name, pCreateTableInfo->fullname);
+ if (ret != TSDB_CODE_SUCCESS) {
+ return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1);
+ }
}
return TSDB_CODE_SUCCESS;
@@ -6554,7 +6600,7 @@ int32_t doCheckForQuery(SSqlObj* pSql, SQuerySQL* pQuerySql, int32_t index) {
// has no table alias name
if (memcmp(pTableItem->pz, p1->pVar.pz, p1->pVar.nLen) == 0) {
- extractTableName(pTableMetaInfo1->name, pTableMetaInfo1->aliasName);
+ strncpy(pTableMetaInfo1->aliasName, tNameGetTableName(&pTableMetaInfo1->name), tListLen(pTableMetaInfo->aliasName));
} else {
tstrncpy(pTableMetaInfo1->aliasName, p1->pVar.pz, sizeof(pTableMetaInfo1->aliasName));
}
diff --git a/src/client/src/tscSchemaUtil.c b/src/client/src/tscSchemaUtil.c
index 47c3cd97168f997ff366fa0712b7182558688d31..67352ca71cab03bb9cd2f6b920b97abebbe1420a 100644
--- a/src/client/src/tscSchemaUtil.c
+++ b/src/client/src/tscSchemaUtil.c
@@ -106,6 +106,7 @@ STableMeta* tscCreateTableMetaFromMsg(STableMetaMsg* pTableMetaMsg) {
pTableMeta->sversion = pTableMetaMsg->sversion;
pTableMeta->tversion = pTableMetaMsg->tversion;
+
tstrncpy(pTableMeta->sTableName, pTableMetaMsg->sTableName, TSDB_TABLE_FNAME_LEN);
memcpy(pTableMeta->schema, pTableMetaMsg->schema, schemaSize);
diff --git a/src/client/src/tscServer.c b/src/client/src/tscServer.c
index 537e81413fdd74f54d74c1bb887f28498b0b7379..840387656691e0bd0e76908d311b0ac124db5baf 100644
--- a/src/client/src/tscServer.c
+++ b/src/client/src/tscServer.c
@@ -309,7 +309,7 @@ void tscProcessMsgFromServer(SRpcMsg *rpcMsg, SRpcEpSet *pEpSet) {
return;
}
- if (pEpSet) { // todo update this
+ if (pEpSet) {
if (!tscEpSetIsEqual(&pSql->epSet, pEpSet)) {
if (pCmd->command < TSDB_SQL_MGMT) {
tscUpdateVgroupInfo(pSql, pEpSet);
@@ -461,16 +461,20 @@ int doProcessSql(SSqlObj *pSql) {
}
int tscProcessSql(SSqlObj *pSql) {
- char *name = NULL;
+ char name[TSDB_TABLE_FNAME_LEN] = {0};
+
SSqlCmd *pCmd = &pSql->cmd;
-
+ uint32_t type = 0;
+
SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(pCmd, pCmd->clauseIndex);
STableMetaInfo *pTableMetaInfo = NULL;
- uint32_t type = 0;
if (pQueryInfo != NULL) {
pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0);
- name = (pTableMetaInfo != NULL)? pTableMetaInfo->name:NULL;
+ if (pTableMetaInfo != NULL) {
+ tNameExtractFullName(&pTableMetaInfo->name, name);
+ }
+
type = pQueryInfo->type;
// while numOfTables equals to 0, it must be Heartbeat
@@ -669,10 +673,11 @@ static char *doSerializeTableInfo(SQueryTableMsg* pQueryMsg, SSqlObj *pSql, char
pMsg += sizeof(STableIdInfo);
}
}
-
- tscDebug("%p vgId:%d, query on table:%s, tid:%d, uid:%" PRIu64, pSql, htonl(pQueryMsg->head.vgId), pTableMetaInfo->name,
- pTableMeta->id.tid, pTableMeta->id.uid);
-
+
+ char n[TSDB_TABLE_FNAME_LEN] = {0};
+ tNameExtractFullName(&pTableMetaInfo->name, n);
+
+ tscDebug("%p vgId:%d, query on table:%s, tid:%d, uid:%" PRIu64, pSql, htonl(pQueryMsg->head.vgId), n, pTableMeta->id.tid, pTableMeta->id.uid);
return pMsg;
}
@@ -755,8 +760,11 @@ int tscBuildQueryMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
SSchema *pColSchema = &pSchema[pCol->colIndex.columnIndex];
if (pCol->colIndex.columnIndex >= tscGetNumOfColumns(pTableMeta) || !isValidDataType(pColSchema->type)) {
+ char n[TSDB_TABLE_FNAME_LEN] = {0};
+ tNameExtractFullName(&pTableMetaInfo->name, n);
+
tscError("%p tid:%d uid:%" PRIu64" id:%s, column index out of range, numOfColumns:%d, index:%d, column name:%s",
- pSql, pTableMeta->id.tid, pTableMeta->id.uid, pTableMetaInfo->name, tscGetNumOfColumns(pTableMeta), pCol->colIndex.columnIndex,
+ pSql, pTableMeta->id.tid, pTableMeta->id.uid, n, tscGetNumOfColumns(pTableMeta), pCol->colIndex.columnIndex,
pColSchema->name);
return TSDB_CODE_TSC_INVALID_SQL;
}
@@ -942,9 +950,11 @@ int tscBuildQueryMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
if ((pCol->colIndex.columnIndex >= numOfTagColumns || pCol->colIndex.columnIndex < -1) ||
(!isValidDataType(pColSchema->type))) {
+ char n[TSDB_TABLE_FNAME_LEN] = {0};
+ tNameExtractFullName(&pTableMetaInfo->name, n);
+
tscError("%p tid:%d uid:%" PRIu64 " id:%s, tag index out of range, totalCols:%d, numOfTags:%d, index:%d, column name:%s",
- pSql, pTableMeta->id.tid, pTableMeta->id.uid, pTableMetaInfo->name, total, numOfTagColumns,
- pCol->colIndex.columnIndex, pColSchema->name);
+ pSql, pTableMeta->id.tid, pTableMeta->id.uid, n, total, numOfTagColumns, pCol->colIndex.columnIndex, pColSchema->name);
return TSDB_CODE_TSC_INVALID_SQL;
}
@@ -1021,7 +1031,8 @@ int32_t tscBuildCreateDbMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
assert(pCmd->numOfClause == 1);
STableMetaInfo *pTableMetaInfo = tscGetTableMetaInfoFromCmd(pCmd, pCmd->clauseIndex, 0);
- tstrncpy(pCreateDbMsg->db, pTableMetaInfo->name, sizeof(pCreateDbMsg->db));
+ int32_t code = tNameExtractFullName(&pTableMetaInfo->name, pCreateDbMsg->db);
+ assert(code == TSDB_CODE_SUCCESS);
return TSDB_CODE_SUCCESS;
}
@@ -1035,7 +1046,9 @@ int32_t tscBuildCreateDnodeMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
}
SCreateDnodeMsg *pCreate = (SCreateDnodeMsg *)pCmd->payload;
- strncpy(pCreate->ep, pInfo->pDCLInfo->a[0].z, pInfo->pDCLInfo->a[0].n);
+
+ SStrToken* t0 = taosArrayGet(pInfo->pMiscInfo->a, 0);
+ strncpy(pCreate->ep, t0->z, t0->n);
pCmd->msgType = TSDB_MSG_TYPE_CM_CREATE_DNODE;
@@ -1052,13 +1065,13 @@ int32_t tscBuildAcctMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
SCreateAcctMsg *pAlterMsg = (SCreateAcctMsg *)pCmd->payload;
- SStrToken *pName = &pInfo->pDCLInfo->user.user;
- SStrToken *pPwd = &pInfo->pDCLInfo->user.passwd;
+ SStrToken *pName = &pInfo->pMiscInfo->user.user;
+ SStrToken *pPwd = &pInfo->pMiscInfo->user.passwd;
strncpy(pAlterMsg->user, pName->z, pName->n);
strncpy(pAlterMsg->pass, pPwd->z, pPwd->n);
- SCreateAcctSQL *pAcctOpt = &pInfo->pDCLInfo->acctOpt;
+ SCreateAcctInfo *pAcctOpt = &pInfo->pMiscInfo->acctOpt;
pAlterMsg->cfg.maxUsers = htonl(pAcctOpt->maxUsers);
pAlterMsg->cfg.maxDbs = htonl(pAcctOpt->maxDbs);
@@ -1098,7 +1111,7 @@ int32_t tscBuildUserMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
SCreateUserMsg *pAlterMsg = (SCreateUserMsg *)pCmd->payload;
- SUserInfo *pUser = &pInfo->pDCLInfo->user;
+ SUserInfo *pUser = &pInfo->pMiscInfo->user;
strncpy(pAlterMsg->user, pUser->user.z, pUser->user.n);
pAlterMsg->flag = (int8_t)pUser->type;
@@ -1138,8 +1151,11 @@ int32_t tscBuildDropDbMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
SDropDbMsg *pDropDbMsg = (SDropDbMsg*)pCmd->payload;
STableMetaInfo *pTableMetaInfo = tscGetTableMetaInfoFromCmd(pCmd, pCmd->clauseIndex, 0);
- tstrncpy(pDropDbMsg->db, pTableMetaInfo->name, sizeof(pDropDbMsg->db));
- pDropDbMsg->ignoreNotExists = pInfo->pDCLInfo->existsCheck ? 1 : 0;
+
+ int32_t code = tNameExtractFullName(&pTableMetaInfo->name, pDropDbMsg->db);
+ assert(code == TSDB_CODE_SUCCESS && pTableMetaInfo->name.type == TSDB_DB_NAME_T);
+
+ pDropDbMsg->ignoreNotExists = pInfo->pMiscInfo->existsCheck ? 1 : 0;
pCmd->msgType = TSDB_MSG_TYPE_CM_DROP_DB;
return TSDB_CODE_SUCCESS;
@@ -1156,15 +1172,19 @@ int32_t tscBuildDropTableMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
SCMDropTableMsg *pDropTableMsg = (SCMDropTableMsg*)pCmd->payload;
STableMetaInfo *pTableMetaInfo = tscGetTableMetaInfoFromCmd(pCmd, pCmd->clauseIndex, 0);
- strcpy(pDropTableMsg->tableFname, pTableMetaInfo->name);
- pDropTableMsg->igNotExists = pInfo->pDCLInfo->existsCheck ? 1 : 0;
+ tNameExtractFullName(&pTableMetaInfo->name, pDropTableMsg->name);
+ pDropTableMsg->igNotExists = pInfo->pMiscInfo->existsCheck ? 1 : 0;
pCmd->msgType = TSDB_MSG_TYPE_CM_DROP_TABLE;
return TSDB_CODE_SUCCESS;
}
int32_t tscBuildDropDnodeMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
SSqlCmd *pCmd = &pSql->cmd;
+
+ char dnodeEp[TSDB_EP_LEN] = {0};
+ tstrncpy(dnodeEp, pCmd->payload, TSDB_EP_LEN);
+
pCmd->payloadLen = sizeof(SDropDnodeMsg);
if (TSDB_CODE_SUCCESS != tscAllocPayload(pCmd, pCmd->payloadLen)) {
tscError("%p failed to malloc for query msg", pSql);
@@ -1172,43 +1192,28 @@ int32_t tscBuildDropDnodeMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
}
SDropDnodeMsg * pDrop = (SDropDnodeMsg *)pCmd->payload;
- STableMetaInfo *pTableMetaInfo = tscGetTableMetaInfoFromCmd(pCmd, pCmd->clauseIndex, 0);
- tstrncpy(pDrop->ep, pTableMetaInfo->name, sizeof(pDrop->ep));
+ tstrncpy(pDrop->ep, dnodeEp, tListLen(pDrop->ep));
pCmd->msgType = TSDB_MSG_TYPE_CM_DROP_DNODE;
return TSDB_CODE_SUCCESS;
}
-int32_t tscBuildDropUserMsg(SSqlObj *pSql, SSqlInfo * UNUSED_PARAM(pInfo)) {
+int32_t tscBuildDropUserAcctMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
SSqlCmd *pCmd = &pSql->cmd;
- pCmd->payloadLen = sizeof(SDropUserMsg);
- pCmd->msgType = TSDB_MSG_TYPE_CM_DROP_USER;
-
- if (TSDB_CODE_SUCCESS != tscAllocPayload(pCmd, pCmd->payloadLen)) {
- tscError("%p failed to malloc for query msg", pSql);
- return TSDB_CODE_TSC_OUT_OF_MEMORY;
- }
-
- SDropUserMsg * pDropMsg = (SDropUserMsg *)pCmd->payload;
- STableMetaInfo *pTableMetaInfo = tscGetTableMetaInfoFromCmd(pCmd, pCmd->clauseIndex, 0);
- tstrncpy(pDropMsg->user, pTableMetaInfo->name, sizeof(pDropMsg->user));
- return TSDB_CODE_SUCCESS;
-}
+ char user[TSDB_USER_LEN] = {0};
+ tstrncpy(user, pCmd->payload, TSDB_USER_LEN);
-int32_t tscBuildDropAcctMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
- SSqlCmd *pCmd = &pSql->cmd;
pCmd->payloadLen = sizeof(SDropUserMsg);
- pCmd->msgType = TSDB_MSG_TYPE_CM_DROP_ACCT;
+ pCmd->msgType = (pInfo->type == TSDB_SQL_DROP_USER)? TSDB_MSG_TYPE_CM_DROP_USER:TSDB_MSG_TYPE_CM_DROP_ACCT;
if (TSDB_CODE_SUCCESS != tscAllocPayload(pCmd, pCmd->payloadLen)) {
tscError("%p failed to malloc for query msg", pSql);
return TSDB_CODE_TSC_OUT_OF_MEMORY;
}
- SDropUserMsg * pDropMsg = (SDropUserMsg *)pCmd->payload;
- STableMetaInfo *pTableMetaInfo = tscGetTableMetaInfoFromCmd(pCmd, pCmd->clauseIndex, 0);
- tstrncpy(pDropMsg->user, pTableMetaInfo->name, sizeof(pDropMsg->user));
+ SDropUserMsg *pDropMsg = (SDropUserMsg *)pCmd->payload;
+ tstrncpy(pDropMsg->user, user, tListLen(user));
return TSDB_CODE_SUCCESS;
}
@@ -1224,7 +1229,7 @@ int32_t tscBuildUseDbMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
SUseDbMsg *pUseDbMsg = (SUseDbMsg *)pCmd->payload;
STableMetaInfo *pTableMetaInfo = tscGetTableMetaInfoFromCmd(pCmd, pCmd->clauseIndex, 0);
- strcpy(pUseDbMsg->db, pTableMetaInfo->name);
+ tNameExtractFullName(&pTableMetaInfo->name, pUseDbMsg->db);
pCmd->msgType = TSDB_MSG_TYPE_CM_USE_DB;
return TSDB_CODE_SUCCESS;
@@ -1244,14 +1249,14 @@ int32_t tscBuildShowMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
SShowMsg *pShowMsg = (SShowMsg *)pCmd->payload;
STableMetaInfo *pTableMetaInfo = tscGetTableMetaInfoFromCmd(pCmd, pCmd->clauseIndex, 0);
- size_t nameLen = strlen(pTableMetaInfo->name);
- if (nameLen > 0) {
- tstrncpy(pShowMsg->db, pTableMetaInfo->name, sizeof(pShowMsg->db)); // prefix is set here
- } else {
+
+ if (tNameIsEmpty(&pTableMetaInfo->name)) {
tstrncpy(pShowMsg->db, pObj->db, sizeof(pShowMsg->db));
+ } else {
+ tNameGetFullDbName(&pTableMetaInfo->name, pShowMsg->db);
}
- SShowInfo *pShowInfo = &pInfo->pDCLInfo->showOpt;
+ SShowInfo *pShowInfo = &pInfo->pMiscInfo->showOpt;
pShowMsg->type = pShowInfo->showType;
if (pShowInfo->showType != TSDB_MGMT_TABLE_VNODES) {
@@ -1310,12 +1315,12 @@ int tscEstimateCreateTableMsgLength(SSqlObj *pSql, SSqlInfo *pInfo) {
}
int tscBuildCreateTableMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
- int msgLen = 0;
- SSchema * pSchema;
- int size = 0;
+ int msgLen = 0;
+ int size = 0;
+ SSchema *pSchema;
SSqlCmd *pCmd = &pSql->cmd;
- SQueryInfo * pQueryInfo = tscGetQueryInfoDetail(pCmd, 0);
+ SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(pCmd, 0);
STableMetaInfo *pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0);
// Reallocate the payload size
@@ -1346,11 +1351,10 @@ int tscBuildCreateTableMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
pMsg += sizeof(SCreateTableMsg);
SCreatedTableInfo* p = taosArrayGet(list, i);
- strcpy(pCreate->tableFname, p->fullname);
+ strcpy(pCreate->tableName, p->fullname);
pCreate->igExists = (p->igExist)? 1 : 0;
// use dbinfo from table id without modifying current db info
- tscGetDBInfoFromTableFullName(p->fullname, pCreate->db);
pMsg = serializeTagData(&p->tagdata, pMsg);
int32_t len = (int32_t)(pMsg - (char*) pCreate);
@@ -1359,10 +1363,8 @@ int tscBuildCreateTableMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
} else { // create (super) table
pCreateTableMsg->numOfTables = htonl(1); // only one table will be created
- strcpy(pCreateMsg->tableFname, pTableMetaInfo->name);
-
- // use dbinfo from table id without modifying current db info
- tscGetDBInfoFromTableFullName(pTableMetaInfo->name, pCreateMsg->db);
+ int32_t code = tNameExtractFullName(&pTableMetaInfo->name, pCreateMsg->tableName);
+ assert(code == 0);
SCreateTableSQL *pCreateTable = pInfo->pCreateTableInfo;
@@ -1420,7 +1422,7 @@ int tscBuildAlterTableMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
STableMetaInfo *pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0);
- SAlterTableSQL *pAlterInfo = pInfo->pAlterInfo;
+ SAlterTableInfo *pAlterInfo = pInfo->pAlterInfo;
int size = tscEstimateAlterTableMsgLength(pCmd);
if (TSDB_CODE_SUCCESS != tscAllocPayload(pCmd, size)) {
tscError("%p failed to malloc for alter table msg", pSql);
@@ -1428,9 +1430,8 @@ int tscBuildAlterTableMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
}
SAlterTableMsg *pAlterTableMsg = (SAlterTableMsg *)pCmd->payload;
- tscGetDBInfoFromTableFullName(pTableMetaInfo->name, pAlterTableMsg->db);
- strcpy(pAlterTableMsg->tableFname, pTableMetaInfo->name);
+ tNameExtractFullName(&pTableMetaInfo->name, pAlterTableMsg->tableFname);
pAlterTableMsg->type = htons(pAlterInfo->type);
pAlterTableMsg->numOfCols = htons(tscNumOfFields(pQueryInfo));
@@ -1485,7 +1486,7 @@ int tscAlterDbMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
SAlterDbMsg *pAlterDbMsg = (SAlterDbMsg* )pCmd->payload;
STableMetaInfo *pTableMetaInfo = tscGetTableMetaInfoFromCmd(pCmd, pCmd->clauseIndex, 0);
- tstrncpy(pAlterDbMsg->db, pTableMetaInfo->name, sizeof(pAlterDbMsg->db));
+ tNameExtractFullName(&pTableMetaInfo->name, pAlterDbMsg->db);
return TSDB_CODE_SUCCESS;
}
@@ -1623,13 +1624,17 @@ int tscBuildConnectMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
}
int tscBuildTableMetaMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
- SSqlCmd * pCmd = &pSql->cmd;
+ SSqlCmd *pCmd = &pSql->cmd;
SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(&pSql->cmd, 0);
STableMetaInfo *pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0);
+ STableInfoMsg *pInfoMsg = (STableInfoMsg *)pCmd->payload;
+
+ int32_t code = tNameExtractFullName(&pTableMetaInfo->name, pInfoMsg->tableFname);
+ if (code != TSDB_CODE_SUCCESS) {
+ return TSDB_CODE_TSC_INVALID_SQL;
+ }
- STableInfoMsg *pInfoMsg = (STableInfoMsg *)pCmd->payload;
- strcpy(pInfoMsg->tableFname, pTableMetaInfo->name);
pInfoMsg->createFlag = htons(pSql->cmd.autoCreated ? 1 : 0);
char *pMsg = (char *)pInfoMsg + sizeof(STableInfoMsg);
@@ -1686,33 +1691,6 @@ int tscBuildMultiMeterMetaMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
return 0;
}
-//static UNUSED_FUNC int32_t tscEstimateMetricMetaMsgSize(SSqlCmd *pCmd) {
-//// const int32_t defaultSize =
-//// minMsgSize() + sizeof(SSuperTableMetaMsg) + sizeof(SMgmtHead) + sizeof(int16_t) * TSDB_MAX_TAGS;
-//// SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(pCmd, 0);
-////
-//// int32_t n = 0;
-//// size_t size = taosArrayGetSize(pQueryInfo->tagCond.pCond);
-//// for (int32_t i = 0; i < size; ++i) {
-//// assert(0);
-////// n += strlen(pQueryInfo->tagCond.cond[i].cond);
-//// }
-////
-//// int32_t tagLen = n * TSDB_NCHAR_SIZE;
-//// if (pQueryInfo->tagCond.tbnameCond.cond != NULL) {
-//// tagLen += strlen(pQueryInfo->tagCond.tbnameCond.cond) * TSDB_NCHAR_SIZE;
-//// }
-////
-//// int32_t joinCondLen = (TSDB_TABLE_FNAME_LEN + sizeof(int16_t)) * 2;
-//// int32_t elemSize = sizeof(SSuperTableMetaElemMsg) * pQueryInfo->numOfTables;
-////
-//// int32_t colSize = pQueryInfo->groupbyExpr.numOfGroupCols*sizeof(SColIndex);
-////
-//// int32_t len = tagLen + joinCondLen + elemSize + colSize + defaultSize;
-////
-//// return MAX(len, TSDB_DEFAULT_PAYLOAD_SIZE);
-//}
-
int tscBuildSTableVgroupMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
SSqlCmd *pCmd = &pSql->cmd;
@@ -1725,9 +1703,10 @@ int tscBuildSTableVgroupMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
for (int32_t i = 0; i < pQueryInfo->numOfTables; ++i) {
STableMetaInfo *pTableMetaInfo = tscGetTableMetaInfoFromCmd(pCmd, pCmd->clauseIndex, i);
- size_t size = sizeof(pTableMetaInfo->name);
- tstrncpy(pMsg, pTableMetaInfo->name, size);
- pMsg += size;
+ int32_t code = tNameExtractFullName(&pTableMetaInfo->name, pMsg);
+ assert(code == TSDB_CODE_SUCCESS);
+
+ pMsg += TSDB_TABLE_FNAME_LEN;
}
pCmd->msgType = TSDB_MSG_TYPE_CM_STABLE_VGROUP;
@@ -1827,7 +1806,6 @@ int tscProcessTableMetaRsp(SSqlObj *pSql) {
assert(i == 0);
}
- assert(isValidDataType(pSchema->type));
pSchema++;
}
@@ -1835,8 +1813,8 @@ int tscProcessTableMetaRsp(SSqlObj *pSql) {
assert(pTableMetaInfo->pTableMeta == NULL);
STableMeta* pTableMeta = tscCreateTableMetaFromMsg(pMetaMsg);
- if (!isValidSchema(pTableMeta->schema, pTableMeta->tableInfo.numOfColumns, pTableMeta->tableInfo.numOfTags)) {
- tscError("%p invalid table meta from mnode, name:%s", pSql, pTableMetaInfo->name);
+ if (!tIsValidSchema(pTableMeta->schema, pTableMeta->tableInfo.numOfColumns, pTableMeta->tableInfo.numOfTags)) {
+ tscError("%p invalid table meta from mnode, name:%s", pSql, tNameGetTableName(&pTableMetaInfo->name));
return TSDB_CODE_TSC_INVALID_VALUE;
}
@@ -1854,11 +1832,19 @@ int tscProcessTableMetaRsp(SSqlObj *pSql) {
tfree(pSupTableMeta);
CChildTableMeta* cMeta = tscCreateChildMeta(pTableMeta);
- taosHashPut(tscTableMetaInfo, pTableMetaInfo->name, strlen(pTableMetaInfo->name), cMeta, sizeof(CChildTableMeta));
+
+ char name[TSDB_TABLE_FNAME_LEN] = {0};
+ tNameExtractFullName(&pTableMetaInfo->name, name);
+
+ taosHashPut(tscTableMetaInfo, name, strlen(name), cMeta, sizeof(CChildTableMeta));
tfree(cMeta);
} else {
uint32_t s = tscGetTableMetaSize(pTableMeta);
- taosHashPut(tscTableMetaInfo, pTableMetaInfo->name, strlen(pTableMetaInfo->name), pTableMeta, s);
+
+ char name[TSDB_TABLE_FNAME_LEN] = {0};
+ tNameExtractFullName(&pTableMetaInfo->name, name);
+
+ taosHashPut(tscTableMetaInfo, name, strlen(name), pTableMeta, s);
}
// update the vgroupInfo if needed
@@ -1877,9 +1863,10 @@ int tscProcessTableMetaRsp(SSqlObj *pSql) {
}
}
- tscDebug("%p recv table meta, uid:%"PRId64 ", tid:%d, name:%s", pSql, pTableMeta->id.uid, pTableMeta->id.tid, pTableMetaInfo->name);
+ tscDebug("%p recv table meta, uid:%" PRIu64 ", tid:%d, name:%s", pSql, pTableMeta->id.uid, pTableMeta->id.tid,
+ tNameGetTableName(&pTableMetaInfo->name));
+
free(pTableMeta);
-
return TSDB_CODE_SUCCESS;
}
@@ -2174,9 +2161,7 @@ int tscProcessConnectRsp(SSqlObj *pSql) {
int tscProcessUseDbRsp(SSqlObj *pSql) {
STscObj * pObj = pSql->pTscObj;
STableMetaInfo *pTableMetaInfo = tscGetTableMetaInfoFromCmd(&pSql->cmd, 0, 0);
-
- tstrncpy(pObj->db, pTableMetaInfo->name, sizeof(pObj->db));
- return 0;
+ return tNameExtractFullName(&pTableMetaInfo->name, pObj->db);
}
int tscProcessDropDbRsp(SSqlObj *pSql) {
@@ -2189,18 +2174,22 @@ int tscProcessDropTableRsp(SSqlObj *pSql) {
STableMetaInfo *pTableMetaInfo = tscGetTableMetaInfoFromCmd(&pSql->cmd, 0, 0);
//The cached tableMeta is expired in this case, so clean it in hash table
- taosHashRemove(tscTableMetaInfo, pTableMetaInfo->name, strnlen(pTableMetaInfo->name, TSDB_TABLE_FNAME_LEN));
- tscDebug("%p remove table meta after drop table:%s, numOfRemain:%d", pSql, pTableMetaInfo->name,
- (int32_t) taosHashGetSize(tscTableMetaInfo));
+ char name[TSDB_TABLE_FNAME_LEN] = {0};
+ tNameExtractFullName(&pTableMetaInfo->name, name);
- assert(pTableMetaInfo->pTableMeta == NULL);
+ taosHashRemove(tscTableMetaInfo, name, strnlen(name, TSDB_TABLE_FNAME_LEN));
+ tscDebug("%p remove table meta after drop table:%s, numOfRemain:%d", pSql, name, (int32_t) taosHashGetSize(tscTableMetaInfo));
+
+ pTableMetaInfo->pTableMeta = NULL;
return 0;
}
int tscProcessAlterTableMsgRsp(SSqlObj *pSql) {
STableMetaInfo *pTableMetaInfo = tscGetTableMetaInfoFromCmd(&pSql->cmd, 0, 0);
- char* name = pTableMetaInfo->name;
+ char name[TSDB_TABLE_FNAME_LEN] = {0};
+ tNameExtractFullName(&pTableMetaInfo->name, name);
+
tscDebug("%p remove tableMeta in hashMap after alter-table: %s", pSql, name);
bool isSuperTable = UTIL_TABLE_IS_SUPER_TABLE(pTableMetaInfo);
@@ -2239,6 +2228,8 @@ int tscProcessRetrieveRspFromNode(SSqlObj *pSql) {
SSqlRes *pRes = &pSql->res;
SSqlCmd *pCmd = &pSql->cmd;
+ assert(pRes->rspLen >= sizeof(SRetrieveTableRsp));
+
SRetrieveTableRsp *pRetrieve = (SRetrieveTableRsp *)pRes->pRsp;
if (pRetrieve == NULL) {
pRes->code = TSDB_CODE_TSC_OUT_OF_MEMORY;
@@ -2319,7 +2310,7 @@ static int32_t getTableMetaFromMnode(SSqlObj *pSql, STableMetaInfo *pTableMetaIn
STableMetaInfo *pNewMeterMetaInfo = tscAddEmptyMetaInfo(pNewQueryInfo);
assert(pNew->cmd.numOfClause == 1 && pNewQueryInfo->numOfTables == 1);
- tstrncpy(pNewMeterMetaInfo->name, pTableMetaInfo->name, sizeof(pNewMeterMetaInfo->name));
+ tNameAssign(&pNewMeterMetaInfo->name, &pTableMetaInfo->name);
if (pSql->cmd.autoCreated) {
int32_t code = copyTagData(&pNew->cmd.tagData, &pSql->cmd.tagData);
@@ -2350,7 +2341,8 @@ static int32_t getTableMetaFromMnode(SSqlObj *pSql, STableMetaInfo *pTableMetaIn
}
int32_t tscGetTableMeta(SSqlObj *pSql, STableMetaInfo *pTableMetaInfo) {
- assert(strlen(pTableMetaInfo->name) != 0);
+ assert(tIsValidName(&pTableMetaInfo->name));
+
tfree(pTableMetaInfo->pTableMeta);
uint32_t size = tscGetTableMetaMaxSize();
@@ -2358,15 +2350,18 @@ int32_t tscGetTableMeta(SSqlObj *pSql, STableMetaInfo *pTableMetaInfo) {
pTableMetaInfo->pTableMeta->tableType = -1;
pTableMetaInfo->pTableMeta->tableInfo.numOfColumns = -1;
- int32_t len = (int32_t) strlen(pTableMetaInfo->name);
- taosHashGetClone(tscTableMetaInfo, pTableMetaInfo->name, len, NULL, pTableMetaInfo->pTableMeta, -1);
+ char name[TSDB_TABLE_FNAME_LEN] = {0};
+ tNameExtractFullName(&pTableMetaInfo->name, name);
+
+ size_t len = strlen(name);
+ taosHashGetClone(tscTableMetaInfo, name, len, NULL, pTableMetaInfo->pTableMeta, -1);
// TODO resize the tableMeta
STableMeta* pMeta = pTableMetaInfo->pTableMeta;
if (pMeta->id.uid > 0) {
if (pMeta->tableType == TSDB_CHILD_TABLE) {
- int32_t code = tscCreateTableMetaFromCChildMeta(pTableMetaInfo->pTableMeta, pTableMetaInfo->name);
+ int32_t code = tscCreateTableMetaFromCChildMeta(pTableMetaInfo->pTableMeta, name);
if (code != TSDB_CODE_SUCCESS) {
return getTableMetaFromMnode(pSql, pTableMetaInfo);
}
@@ -2394,16 +2389,24 @@ int tscRenewTableMeta(SSqlObj *pSql, int32_t tableIndex) {
SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(pCmd, 0);
STableMetaInfo *pTableMetaInfo = tscGetMetaInfo(pQueryInfo, tableIndex);
- const char* name = pTableMetaInfo->name;
+
+ char name[TSDB_TABLE_FNAME_LEN] = {0};
+ int32_t code = tNameExtractFullName(&pTableMetaInfo->name, name);
+ if (code != TSDB_CODE_SUCCESS) {
+ tscError("%p failed to generate the table full name", pSql);
+ return TSDB_CODE_TSC_INVALID_SQL;
+ }
STableMeta* pTableMeta = pTableMetaInfo->pTableMeta;
if (pTableMeta) {
tscDebug("%p update table meta:%s, old meta numOfTags:%d, numOfCols:%d, uid:%" PRId64, pSql, name,
- tscGetNumOfTags(pTableMeta), tscGetNumOfColumns(pTableMeta), pTableMeta->id.uid);
+ tscGetNumOfTags(pTableMeta), tscGetNumOfColumns(pTableMeta), pTableMeta->id.uid);
}
// remove stored tableMeta info in hash table
- taosHashRemove(tscTableMetaInfo, name, strnlen(name, TSDB_TABLE_FNAME_LEN));
+ size_t len = strlen(name);
+ taosHashRemove(tscTableMetaInfo, name, len);
+
return getTableMetaFromMnode(pSql, pTableMetaInfo);
}
@@ -2444,8 +2447,8 @@ int tscGetSTableVgroupInfo(SSqlObj *pSql, int32_t clauseIndex) {
SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(pCmd, clauseIndex);
for (int32_t i = 0; i < pQueryInfo->numOfTables; ++i) {
STableMetaInfo *pMInfo = tscGetMetaInfo(pQueryInfo, i);
- STableMeta* pTableMeta = tscTableMetaClone(pMInfo->pTableMeta);
- tscAddTableMetaInfo(pNewQueryInfo, pMInfo->name, pTableMeta, NULL, pMInfo->tagColList, pMInfo->pVgroupTables);
+ STableMeta* pTableMeta = tscTableMetaDup(pMInfo->pTableMeta);
+ tscAddTableMetaInfo(pNewQueryInfo, &pMInfo->name, pTableMeta, NULL, pMInfo->tagColList, pMInfo->pVgroupTables);
}
if ((code = tscAllocPayload(&pNew->cmd, TSDB_DEFAULT_PAYLOAD_SIZE)) != TSDB_CODE_SUCCESS) {
@@ -2485,8 +2488,8 @@ void tscInitMsgsFp() {
tscBuildMsg[TSDB_SQL_ALTER_ACCT] = tscBuildAcctMsg;
tscBuildMsg[TSDB_SQL_CREATE_TABLE] = tscBuildCreateTableMsg;
- tscBuildMsg[TSDB_SQL_DROP_USER] = tscBuildDropUserMsg;
- tscBuildMsg[TSDB_SQL_DROP_ACCT] = tscBuildDropAcctMsg;
+ tscBuildMsg[TSDB_SQL_DROP_USER] = tscBuildDropUserAcctMsg;
+ tscBuildMsg[TSDB_SQL_DROP_ACCT] = tscBuildDropUserAcctMsg;
tscBuildMsg[TSDB_SQL_DROP_DB] = tscBuildDropDbMsg;
tscBuildMsg[TSDB_SQL_DROP_TABLE] = tscBuildDropTableMsg;
tscBuildMsg[TSDB_SQL_ALTER_USER] = tscBuildUserMsg;
diff --git a/src/client/src/tscSql.c b/src/client/src/tscSql.c
index a4f2976cad16e962b63ec5543243753cd092276a..6b4d9809454528cc0df29a4a7683aa4223e4d7c5 100644
--- a/src/client/src/tscSql.c
+++ b/src/client/src/tscSql.c
@@ -15,7 +15,7 @@
#include "hash.h"
#include "os.h"
-#include "qAst.h"
+#include "texpr.h"
#include "tkey.h"
#include "tcache.h"
#include "tnote.h"
@@ -110,6 +110,7 @@ static SSqlObj *taosConnectImpl(const char *ip, const char *user, const char *pa
rpcClose(pDnodeConn);
free(pObj->tscCorMgmtEpSet);
free(pObj);
+ return NULL;
}
memcpy(pObj->tscCorMgmtEpSet, &corMgmtEpSet, sizeof(SRpcCorEpSet));
@@ -936,7 +937,7 @@ int taos_validate_sql(TAOS *taos, const char *sql) {
static int tscParseTblNameList(SSqlObj *pSql, const char *tblNameList, int32_t tblListLen) {
// must before clean the sqlcmd object
- tscResetSqlCmdObj(&pSql->cmd);
+ tscResetSqlCmd(&pSql->cmd, false);
SSqlCmd *pCmd = &pSql->cmd;
@@ -995,7 +996,8 @@ static int tscParseTblNameList(SSqlObj *pSql, const char *tblNameList, int32_t t
return code;
}
- if (payloadLen + strlen(pTableMetaInfo->name) + 128 >= pCmd->allocSize) {
+ int32_t xlen = tNameLen(&pTableMetaInfo->name);
+ if (payloadLen + xlen + 128 >= pCmd->allocSize) {
char *pNewMem = realloc(pCmd->payload, pCmd->allocSize + tblListLen);
if (pNewMem == NULL) {
code = TSDB_CODE_TSC_OUT_OF_MEMORY;
@@ -1008,7 +1010,9 @@ static int tscParseTblNameList(SSqlObj *pSql, const char *tblNameList, int32_t t
pMsg = pCmd->payload;
}
- payloadLen += sprintf(pMsg + payloadLen, "%s,", pTableMetaInfo->name);
+ char n[TSDB_TABLE_FNAME_LEN] = {0};
+ tNameExtractFullName(&pTableMetaInfo->name, n);
+ payloadLen += sprintf(pMsg + payloadLen, "%s,", n);
}
*(pMsg + payloadLen) = '\0';
diff --git a/src/client/src/tscStream.c b/src/client/src/tscStream.c
index a39fbc94202d3c1940b8631c9f95a38381857946..d787f5b51545dd5c1fe43397d4e9086e4a806ef5 100644
--- a/src/client/src/tscStream.c
+++ b/src/client/src/tscStream.c
@@ -104,7 +104,7 @@ static void doLaunchQuery(void* param, TAOS_RES* tres, int32_t code) {
// failed to get table Meta or vgroup list, retry in 10sec.
if (code == TSDB_CODE_SUCCESS) {
tscTansformSQLFuncForSTableQuery(pQueryInfo);
- tscDebug("%p stream:%p, start stream query on:%s", pSql, pStream, pTableMetaInfo->name);
+ tscDebug("%p stream:%p, start stream query on:%s", pSql, pStream, tNameGetTableName(&pTableMetaInfo->name));
pSql->fp = tscProcessStreamQueryCallback;
pSql->fetchFp = tscProcessStreamQueryCallback;
@@ -191,7 +191,9 @@ static void tscProcessStreamQueryCallback(void *param, TAOS_RES *tres, int numOf
STableMetaInfo* pTableMetaInfo = tscGetTableMetaInfoFromCmd(&pStream->pSql->cmd, 0, 0);
- char* name = pTableMetaInfo->name;
+ char name[TSDB_TABLE_FNAME_LEN] = {0};
+ tNameExtractFullName(&pTableMetaInfo->name, name);
+
taosHashRemove(tscTableMetaInfo, name, strnlen(name, TSDB_TABLE_FNAME_LEN));
pTableMetaInfo->vgroupList = tscVgroupInfoClear(pTableMetaInfo->vgroupList);
@@ -291,7 +293,7 @@ static void tscProcessStreamRetrieveResult(void *param, TAOS_RES *res, int numOf
pStream->stime += 1;
}
- tscDebug("%p stream:%p, query on:%s, fetch result completed, fetched rows:%" PRId64, pSql, pStream, pTableMetaInfo->name,
+ tscDebug("%p stream:%p, query on:%s, fetch result completed, fetched rows:%" PRId64, pSql, pStream, tNameGetTableName(&pTableMetaInfo->name),
pStream->numOfRes);
tfree(pTableMetaInfo->pTableMeta);
@@ -556,7 +558,7 @@ static void tscCreateStream(void *param, TAOS_RES *res, int code) {
taosTmrReset(tscProcessStreamTimer, (int32_t)starttime, pStream, tscTmr, &pStream->pTimer);
tscDebug("%p stream:%p is opened, query on:%s, interval:%" PRId64 ", sliding:%" PRId64 ", first launched in:%" PRId64 ", sql:%s", pSql,
- pStream, pTableMetaInfo->name, pStream->interval.interval, pStream->interval.sliding, starttime, pSql->sqlstr);
+ pStream, tNameGetTableName(&pTableMetaInfo->name), pStream->interval.interval, pStream->interval.sliding, starttime, pSql->sqlstr);
}
void tscSetStreamDestTable(SSqlStream* pStream, const char* dstTable) {
diff --git a/src/client/src/tscSubquery.c b/src/client/src/tscSubquery.c
index 2622246111894caf39f3c1c37923d1a7207935b6..aacdf9103e0f127137b164f948bca3363c4fa7a1 100644
--- a/src/client/src/tscSubquery.c
+++ b/src/client/src/tscSubquery.c
@@ -16,7 +16,7 @@
#include "os.h"
-#include "qAst.h"
+#include "texpr.h"
#include "qTsbuf.h"
#include "tcompare.h"
#include "tscLog.h"
@@ -55,6 +55,58 @@ static void skipRemainValue(STSBuf* pTSBuf, tVariant* tag1) {
}
}
+static void subquerySetState(SSqlObj *pSql, SSubqueryState *subState, int idx, int8_t state) {
+ assert(idx < subState->numOfSub);
+ assert(subState->states);
+
+ pthread_mutex_lock(&subState->mutex);
+
+ tscDebug("subquery:%p,%d state set to %d", pSql, idx, state);
+
+ subState->states[idx] = state;
+
+ pthread_mutex_unlock(&subState->mutex);
+}
+
+static bool allSubqueryDone(SSqlObj *pParentSql) {
+ bool done = true;
+ SSubqueryState *subState = &pParentSql->subState;
+
+ //lock in caller
+
+ for (int i = 0; i < subState->numOfSub; i++) {
+ if (0 == subState->states[i]) {
+ tscDebug("%p subquery:%p,%d is NOT finished, total:%d", pParentSql, pParentSql->pSubs[i], i, subState->numOfSub);
+ done = false;
+ break;
+ } else {
+ tscDebug("%p subquery:%p,%d is finished, total:%d", pParentSql, pParentSql->pSubs[i], i, subState->numOfSub);
+ }
+ }
+
+ return done;
+}
+
+static bool subAndCheckDone(SSqlObj *pSql, SSqlObj *pParentSql, int idx) {
+ SSubqueryState *subState = &pParentSql->subState;
+
+ assert(idx < subState->numOfSub);
+
+ pthread_mutex_lock(&subState->mutex);
+
+ tscDebug("%p subquery:%p,%d state set to 1", pParentSql, pSql, idx);
+
+ subState->states[idx] = 1;
+
+ bool done = allSubqueryDone(pParentSql);
+
+ pthread_mutex_unlock(&subState->mutex);
+
+ return done;
+}
+
+
+
static int64_t doTSBlockIntersect(SSqlObj* pSql, SJoinSupporter* pSupporter1, SJoinSupporter* pSupporter2, STimeWindow * win) {
SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(&pSql->cmd, pSql->cmd.clauseIndex);
@@ -367,10 +419,6 @@ static int32_t tscLaunchRealSubqueries(SSqlObj* pSql) {
// scan all subquery, if one sub query has only ts, ignore it
tscDebug("%p start to launch secondary subqueries, %d out of %d needs to query", pSql, numOfSub, pSql->subState.numOfSub);
- //the subqueries that do not actually launch the secondary query to virtual node is set as completed.
- SSubqueryState* pState = &pSql->subState;
- pState->numOfRemain = numOfSub;
-
bool success = true;
for (int32_t i = 0; i < pSql->subState.numOfSub; ++i) {
@@ -403,6 +451,7 @@ static int32_t tscLaunchRealSubqueries(SSqlObj* pSql) {
success = false;
break;
}
+
tscClearSubqueryInfo(&pNew->cmd);
pSql->pSubs[i] = pNew;
@@ -480,10 +529,12 @@ static int32_t tscLaunchRealSubqueries(SSqlObj* pSql) {
}
}
+ subquerySetState(pPrevSub, &pSql->subState, i, 0);
+
size_t numOfCols = taosArrayGetSize(pQueryInfo->colList);
tscDebug("%p subquery:%p tableIndex:%d, vgroupIndex:%d, type:%d, exprInfo:%" PRIzu ", colList:%" PRIzu ", fieldsInfo:%d, name:%s",
pSql, pNew, 0, pTableMetaInfo->vgroupIndex, pQueryInfo->type, taosArrayGetSize(pQueryInfo->exprList),
- numOfCols, pQueryInfo->fieldsInfo.numOfOutput, pTableMetaInfo->name);
+ numOfCols, pQueryInfo->fieldsInfo.numOfOutput, tNameGetTableName(&pTableMetaInfo->name));
}
//prepare the subqueries object failed, abort
@@ -517,20 +568,25 @@ void freeJoinSubqueryObj(SSqlObj* pSql) {
SJoinSupporter* p = pSub->param;
tscDestroyJoinSupporter(p);
- if (pSub->res.code == TSDB_CODE_SUCCESS) {
- taos_free_result(pSub);
- }
+ taos_free_result(pSub);
+ pSql->pSubs[i] = NULL;
}
+ if (pSql->subState.states) {
+ pthread_mutex_destroy(&pSql->subState.mutex);
+ }
+
+ tfree(pSql->subState.states);
+
+
pSql->subState.numOfSub = 0;
}
-static void quitAllSubquery(SSqlObj* pSqlObj, SJoinSupporter* pSupporter) {
- assert(pSqlObj->subState.numOfRemain > 0);
-
- if (atomic_sub_fetch_32(&pSqlObj->subState.numOfRemain, 1) <= 0) {
- tscError("%p all subquery return and query failed, global code:%s", pSqlObj, tstrerror(pSqlObj->res.code));
+static void quitAllSubquery(SSqlObj* pSqlSub, SSqlObj* pSqlObj, SJoinSupporter* pSupporter) {
+ if (subAndCheckDone(pSqlSub, pSqlObj, pSupporter->subqueryIndex)) {
+ tscError("%p all subquery return and query failed, global code:%s", pSqlObj, tstrerror(pSqlObj->res.code));
freeJoinSubqueryObj(pSqlObj);
+ return;
}
//tscDestroyJoinSupporter(pSupporter);
@@ -674,7 +730,7 @@ static void issueTSCompQuery(SSqlObj* pSql, SJoinSupporter* pSupporter, SSqlObj*
"%p subquery:%p tableIndex:%d, vgroupIndex:%d, numOfVgroups:%d, type:%d, ts_comp query to retrieve timestamps, "
"numOfExpr:%" PRIzu ", colList:%" PRIzu ", numOfOutputFields:%d, name:%s",
pParent, pSql, 0, pTableMetaInfo->vgroupIndex, pTableMetaInfo->vgroupList->numOfVgroups, pQueryInfo->type,
- tscSqlExprNumOfExprs(pQueryInfo), numOfCols, pQueryInfo->fieldsInfo.numOfOutput, pTableMetaInfo->name);
+ tscSqlExprNumOfExprs(pQueryInfo), numOfCols, pQueryInfo->fieldsInfo.numOfOutput, tNameGetTableName(&pTableMetaInfo->name));
tscProcessSql(pSql);
}
@@ -777,6 +833,15 @@ static void tidTagRetrieveCallback(void* param, TAOS_RES* tres, int32_t numOfRow
SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(pCmd, pCmd->clauseIndex);
assert(TSDB_QUERY_HAS_TYPE(pQueryInfo->type, TSDB_QUERY_TYPE_TAG_FILTER_QUERY));
+ if (pParentSql->res.code != TSDB_CODE_SUCCESS) {
+ tscError("%p abort query due to other subquery failure. code:%d, global code:%d", pSql, numOfRows, pParentSql->res.code);
+ quitAllSubquery(pSql, pParentSql, pSupporter);
+
+ tscAsyncResultOnError(pParentSql);
+
+ return;
+ }
+
// check for the error code firstly
if (taos_errno(pSql) != TSDB_CODE_SUCCESS) {
// todo retry if other subqueries are not failed
@@ -785,7 +850,7 @@ static void tidTagRetrieveCallback(void* param, TAOS_RES* tres, int32_t numOfRow
tscError("%p sub query failed, code:%s, index:%d", pSql, tstrerror(numOfRows), pSupporter->subqueryIndex);
pParentSql->res.code = numOfRows;
- quitAllSubquery(pParentSql, pSupporter);
+ quitAllSubquery(pSql, pParentSql, pSupporter);
tscAsyncResultOnError(pParentSql);
return;
@@ -802,7 +867,7 @@ static void tidTagRetrieveCallback(void* param, TAOS_RES* tres, int32_t numOfRow
tscError("%p failed to malloc memory", pSql);
pParentSql->res.code = TAOS_SYSTEM_ERROR(errno);
- quitAllSubquery(pParentSql, pSupporter);
+ quitAllSubquery(pSql, pParentSql, pSupporter);
tscAsyncResultOnError(pParentSql);
return;
@@ -844,9 +909,10 @@ static void tidTagRetrieveCallback(void* param, TAOS_RES* tres, int32_t numOfRow
// no data exists in next vnode, mark the query completed
// only when there is no subquery exits any more, proceeds to get the intersect of the tuple sets.
- if (atomic_sub_fetch_32(&pParentSql->subState.numOfRemain, 1) > 0) {
+ if (!subAndCheckDone(pSql, pParentSql, pSupporter->subqueryIndex)) {
+ tscDebug("%p tagRetrieve:%p,%d completed, total:%d", pParentSql, tres, pSupporter->subqueryIndex, pParentSql->subState.numOfSub);
return;
- }
+ }
SArray *s1 = NULL, *s2 = NULL;
int32_t code = getIntersectionOfTableTuple(pQueryInfo, pParentSql, &s1, &s2);
@@ -885,14 +951,16 @@ static void tidTagRetrieveCallback(void* param, TAOS_RES* tres, int32_t numOfRow
tscBuildVgroupTableInfo(pParentSql, pTableMetaInfo2, s2);
SSqlObj* psub1 = pParentSql->pSubs[0];
- ((SJoinSupporter*)psub1->param)->pVgroupTables = tscVgroupTableInfoClone(pTableMetaInfo1->pVgroupTables);
+ ((SJoinSupporter*)psub1->param)->pVgroupTables = tscVgroupTableInfoDup(pTableMetaInfo1->pVgroupTables);
SSqlObj* psub2 = pParentSql->pSubs[1];
- ((SJoinSupporter*)psub2->param)->pVgroupTables = tscVgroupTableInfoClone(pTableMetaInfo2->pVgroupTables);
+ ((SJoinSupporter*)psub2->param)->pVgroupTables = tscVgroupTableInfoDup(pTableMetaInfo2->pVgroupTables);
pParentSql->subState.numOfSub = 2;
- pParentSql->subState.numOfRemain = pParentSql->subState.numOfSub;
-
+
+ memset(pParentSql->subState.states, 0, sizeof(pParentSql->subState.states[0]) * pParentSql->subState.numOfSub);
+ tscDebug("%p reset all sub states to 0", pParentSql);
+
for (int32_t m = 0; m < pParentSql->subState.numOfSub; ++m) {
SSqlObj* sub = pParentSql->pSubs[m];
issueTSCompQuery(sub, sub->param, pParentSql);
@@ -915,6 +983,15 @@ static void tsCompRetrieveCallback(void* param, TAOS_RES* tres, int32_t numOfRow
SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(pCmd, pCmd->clauseIndex);
assert(!TSDB_QUERY_HAS_TYPE(pQueryInfo->type, TSDB_QUERY_TYPE_JOIN_SEC_STAGE));
+ if (pParentSql->res.code != TSDB_CODE_SUCCESS) {
+ tscError("%p abort query due to other subquery failure. code:%d, global code:%d", pSql, numOfRows, pParentSql->res.code);
+ quitAllSubquery(pSql, pParentSql, pSupporter);
+
+ tscAsyncResultOnError(pParentSql);
+
+ return;
+ }
+
// check for the error code firstly
if (taos_errno(pSql) != TSDB_CODE_SUCCESS) {
// todo retry if other subqueries are not failed yet
@@ -922,7 +999,7 @@ static void tsCompRetrieveCallback(void* param, TAOS_RES* tres, int32_t numOfRow
tscError("%p sub query failed, code:%s, index:%d", pSql, tstrerror(numOfRows), pSupporter->subqueryIndex);
pParentSql->res.code = numOfRows;
- quitAllSubquery(pParentSql, pSupporter);
+ quitAllSubquery(pSql, pParentSql, pSupporter);
tscAsyncResultOnError(pParentSql);
return;
@@ -937,7 +1014,7 @@ static void tsCompRetrieveCallback(void* param, TAOS_RES* tres, int32_t numOfRow
pParentSql->res.code = TAOS_SYSTEM_ERROR(errno);
- quitAllSubquery(pParentSql, pSupporter);
+ quitAllSubquery(pSql, pParentSql, pSupporter);
tscAsyncResultOnError(pParentSql);
@@ -955,7 +1032,7 @@ static void tsCompRetrieveCallback(void* param, TAOS_RES* tres, int32_t numOfRow
pParentSql->res.code = TAOS_SYSTEM_ERROR(errno);
- quitAllSubquery(pParentSql, pSupporter);
+ quitAllSubquery(pSql, pParentSql, pSupporter);
tscAsyncResultOnError(pParentSql);
@@ -1009,9 +1086,9 @@ static void tsCompRetrieveCallback(void* param, TAOS_RES* tres, int32_t numOfRow
return;
}
- if (atomic_sub_fetch_32(&pParentSql->subState.numOfRemain, 1) > 0) {
+ if (!subAndCheckDone(pSql, pParentSql, pSupporter->subqueryIndex)) {
return;
- }
+ }
tscDebug("%p all subquery retrieve ts complete, do ts block intersect", pParentSql);
@@ -1049,6 +1126,17 @@ static void joinRetrieveFinalResCallback(void* param, TAOS_RES* tres, int numOfR
SSqlRes* pRes = &pSql->res;
SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(pCmd, pCmd->clauseIndex);
+
+ if (pParentSql->res.code != TSDB_CODE_SUCCESS) {
+ tscError("%p abort query due to other subquery failure. code:%d, global code:%d", pSql, numOfRows, pParentSql->res.code);
+ quitAllSubquery(pSql, pParentSql, pSupporter);
+
+ tscAsyncResultOnError(pParentSql);
+
+ return;
+ }
+
+
if (taos_errno(pSql) != TSDB_CODE_SUCCESS) {
assert(numOfRows == taos_errno(pSql));
@@ -1088,9 +1176,8 @@ static void joinRetrieveFinalResCallback(void* param, TAOS_RES* tres, int numOfR
}
}
- assert(pState->numOfRemain > 0);
- if (atomic_sub_fetch_32(&pState->numOfRemain, 1) > 0) {
- tscDebug("%p sub:%p completed, remain:%d, total:%d", pParentSql, tres, pState->numOfRemain, pState->numOfSub);
+ if (!subAndCheckDone(pSql, pParentSql, pSupporter->subqueryIndex)) {
+ tscDebug("%p sub:%p,%d completed, total:%d", pParentSql, tres, pSupporter->subqueryIndex, pState->numOfSub);
return;
}
@@ -1205,15 +1292,16 @@ void tscFetchDatablockForSubquery(SSqlObj* pSql) {
}
}
- // get the number of subquery that need to retrieve the next vnode.
+
if (orderedPrjQuery) {
for (int32_t i = 0; i < pSql->subState.numOfSub; ++i) {
SSqlObj* pSub = pSql->pSubs[i];
if (pSub != NULL && pSub->res.row >= pSub->res.numOfRows && pSub->res.completed) {
- pSql->subState.numOfRemain++;
+ subquerySetState(pSub, &pSql->subState, i, 0);
}
}
}
+
for (int32_t i = 0; i < pSql->subState.numOfSub; ++i) {
SSqlObj* pSub = pSql->pSubs[i];
@@ -1270,7 +1358,19 @@ void tscFetchDatablockForSubquery(SSqlObj* pSql) {
// retrieve data from current vnode.
tscDebug("%p retrieve data from %d subqueries", pSql, numOfFetch);
SJoinSupporter* pSupporter = NULL;
- pSql->subState.numOfRemain = numOfFetch;
+
+ for (int32_t i = 0; i < pSql->subState.numOfSub; ++i) {
+ SSqlObj* pSql1 = pSql->pSubs[i];
+ if (pSql1 == NULL) {
+ continue;
+ }
+
+ SSqlRes* pRes1 = &pSql1->res;
+
+ if (pRes1->row >= pRes1->numOfRows) {
+ subquerySetState(pSql1, &pSql->subState, i, 0);
+ }
+ }
for (int32_t i = 0; i < pSql->subState.numOfSub; ++i) {
SSqlObj* pSql1 = pSql->pSubs[i];
@@ -1372,7 +1472,8 @@ void tscJoinQueryCallback(void* param, TAOS_RES* tres, int code) {
// retrieve actual query results from vnode during the second stage join subquery
if (pParentSql->res.code != TSDB_CODE_SUCCESS) {
tscError("%p abort query due to other subquery failure. code:%d, global code:%d", pSql, code, pParentSql->res.code);
- quitAllSubquery(pParentSql, pSupporter);
+ quitAllSubquery(pSql, pParentSql, pSupporter);
+
tscAsyncResultOnError(pParentSql);
return;
@@ -1384,7 +1485,8 @@ void tscJoinQueryCallback(void* param, TAOS_RES* tres, int code) {
tscError("%p abort query, code:%s, global code:%s", pSql, tstrerror(code), tstrerror(pParentSql->res.code));
pParentSql->res.code = code;
- quitAllSubquery(pParentSql, pSupporter);
+
+ quitAllSubquery(pSql, pParentSql, pSupporter);
tscAsyncResultOnError(pParentSql);
return;
@@ -1408,9 +1510,9 @@ void tscJoinQueryCallback(void* param, TAOS_RES* tres, int code) {
// In case of consequence query from other vnode, do not wait for other query response here.
if (!(pTableMetaInfo->vgroupIndex > 0 && tscNonOrderedProjectionQueryOnSTable(pQueryInfo, 0))) {
- if (atomic_sub_fetch_32(&pParentSql->subState.numOfRemain, 1) > 0) {
+ if (!subAndCheckDone(pSql, pParentSql, pSupporter->subqueryIndex)) {
return;
- }
+ }
}
tscSetupOutputColumnIndex(pParentSql);
@@ -1422,6 +1524,7 @@ void tscJoinQueryCallback(void* param, TAOS_RES* tres, int code) {
if (pTableMetaInfo->vgroupIndex > 0 && tscNonOrderedProjectionQueryOnSTable(pQueryInfo, 0)) {
pSql->fp = joinRetrieveFinalResCallback; // continue retrieve data
pSql->cmd.command = TSDB_SQL_FETCH;
+
tscProcessSql(pSql);
} else { // first retrieve from vnode during the secondary stage sub-query
// set the command flag must be after the semaphore been correctly set.
@@ -1457,8 +1560,7 @@ int32_t tscCreateJoinSubquery(SSqlObj *pSql, int16_t tableIndex, SJoinSupporter
return TSDB_CODE_TSC_OUT_OF_MEMORY;
}
- pSql->pSubs[pSql->subState.numOfRemain++] = pNew;
- assert(pSql->subState.numOfRemain <= pSql->subState.numOfSub);
+ pSql->pSubs[tableIndex] = pNew;
if (QUERY_IS_JOIN_QUERY(pQueryInfo->type)) {
addGroupInfoForSubquery(pSql, pNew, 0, tableIndex);
@@ -1534,7 +1636,7 @@ int32_t tscCreateJoinSubquery(SSqlObj *pSql, int16_t tableIndex, SJoinSupporter
"%p subquery:%p tableIndex:%d, vgroupIndex:%d, type:%d, transfer to tid_tag query to retrieve (tableId, tags), "
"exprInfo:%" PRIzu ", colList:%" PRIzu ", fieldsInfo:%d, tagIndex:%d, name:%s",
pSql, pNew, tableIndex, pTableMetaInfo->vgroupIndex, pNewQueryInfo->type, tscSqlExprNumOfExprs(pNewQueryInfo),
- numOfCols, pNewQueryInfo->fieldsInfo.numOfOutput, colIndex.columnIndex, pNewQueryInfo->pTableMetaInfo[0]->name);
+ numOfCols, pNewQueryInfo->fieldsInfo.numOfOutput, colIndex.columnIndex, tNameGetTableName(&pNewQueryInfo->pTableMetaInfo[0]->name));
} else {
SSchema colSchema = {.type = TSDB_DATA_TYPE_BINARY, .bytes = 1};
SColumnIndex colIndex = {0, PRIMARYKEY_TIMESTAMP_COL_INDEX};
@@ -1569,7 +1671,7 @@ int32_t tscCreateJoinSubquery(SSqlObj *pSql, int16_t tableIndex, SJoinSupporter
"%p subquery:%p tableIndex:%d, vgroupIndex:%d, type:%u, transfer to ts_comp query to retrieve timestamps, "
"exprInfo:%" PRIzu ", colList:%" PRIzu ", fieldsInfo:%d, name:%s",
pSql, pNew, tableIndex, pTableMetaInfo->vgroupIndex, pNewQueryInfo->type, tscSqlExprNumOfExprs(pNewQueryInfo),
- numOfCols, pNewQueryInfo->fieldsInfo.numOfOutput, pNewQueryInfo->pTableMetaInfo[0]->name);
+ numOfCols, pNewQueryInfo->fieldsInfo.numOfOutput, tNameGetTableName(&pNewQueryInfo->pTableMetaInfo[0]->name));
}
} else {
assert(0);
@@ -1590,6 +1692,19 @@ void tscHandleMasterJoinQuery(SSqlObj* pSql) {
int32_t code = TSDB_CODE_SUCCESS;
pSql->subState.numOfSub = pQueryInfo->numOfTables;
+ if (pSql->subState.states == NULL) {
+ pSql->subState.states = calloc(pSql->subState.numOfSub, sizeof(*pSql->subState.states));
+ if (pSql->subState.states == NULL) {
+ code = TSDB_CODE_TSC_OUT_OF_MEMORY;
+ goto _error;
+ }
+
+ pthread_mutex_init(&pSql->subState.mutex, NULL);
+ }
+
+ memset(pSql->subState.states, 0, sizeof(*pSql->subState.states) * pSql->subState.numOfSub);
+ tscDebug("%p reset all sub states to 0", pSql);
+
bool hasEmptySub = false;
tscDebug("%p start subquery, total:%d", pSql, pQueryInfo->numOfTables);
@@ -1622,14 +1737,25 @@ void tscHandleMasterJoinQuery(SSqlObj* pSql) {
pSql->cmd.command = TSDB_SQL_RETRIEVE_EMPTY_RESULT;
(*pSql->fp)(pSql->param, pSql, 0);
} else {
+ int fail = 0;
for (int32_t i = 0; i < pSql->subState.numOfSub; ++i) {
SSqlObj* pSub = pSql->pSubs[i];
+ if (fail) {
+ (*pSub->fp)(pSub->param, pSub, 0);
+ continue;
+ }
+
if ((code = tscProcessSql(pSub)) != TSDB_CODE_SUCCESS) {
- pSql->subState.numOfRemain = i - 1; // the already sent request will continue and do not go to the error process routine
- break;
+ pRes->code = code;
+ (*pSub->fp)(pSub->param, pSub, 0);
+ fail = 1;
}
}
+ if(fail) {
+ return;
+ }
+
pSql->cmd.command = TSDB_SQL_TABLE_JOIN_RETRIEVE;
}
@@ -1728,7 +1854,21 @@ int32_t tscHandleMasterSTableQuery(SSqlObj *pSql) {
return ret;
}
- pState->numOfRemain = pState->numOfSub;
+ if (pState->states == NULL) {
+ pState->states = calloc(pState->numOfSub, sizeof(*pState->states));
+ if (pState->states == NULL) {
+ pRes->code = TSDB_CODE_TSC_OUT_OF_MEMORY;
+ tscAsyncResultOnError(pSql);
+ tfree(pMemoryBuf);
+ return ret;
+ }
+
+ pthread_mutex_init(&pState->mutex, NULL);
+ }
+
+ memset(pState->states, 0, sizeof(*pState->states) * pState->numOfSub);
+ tscDebug("%p reset all sub states to 0", pSql);
+
pRes->code = TSDB_CODE_SUCCESS;
int32_t i = 0;
@@ -1877,7 +2017,6 @@ void tscHandleSubqueryError(SRetrieveSupport *trsupport, SSqlObj *pSql, int numO
assert(pSql != NULL);
SSubqueryState* pState = &pParentSql->subState;
- assert(pState->numOfRemain <= pState->numOfSub && pState->numOfRemain >= 0);
// retrieved in subquery failed. OR query cancelled in retrieve phase.
if (taos_errno(pSql) == TSDB_CODE_SUCCESS && pParentSql->res.code != TSDB_CODE_SUCCESS) {
@@ -1908,14 +2047,12 @@ void tscHandleSubqueryError(SRetrieveSupport *trsupport, SSqlObj *pSql, int numO
}
}
- int32_t remain = -1;
- if ((remain = atomic_sub_fetch_32(&pState->numOfRemain, 1)) > 0) {
- tscDebug("%p sub:%p orderOfSub:%d freed, finished subqueries:%d", pParentSql, pSql, trsupport->subqueryIndex,
- pState->numOfSub - remain);
+ if (!subAndCheckDone(pSql, pParentSql, subqueryIndex)) {
+ tscDebug("%p sub:%p,%d freed, not finished, total:%d", pParentSql, pSql, trsupport->subqueryIndex, pState->numOfSub);
tscFreeRetrieveSup(pSql);
return;
- }
+ }
// all subqueries are failed
tscError("%p retrieve from %d vnode(s) completed,code:%s.FAILED.", pParentSql, pState->numOfSub,
@@ -1980,14 +2117,12 @@ static void tscAllDataRetrievedFromDnode(SRetrieveSupport *trsupport, SSqlObj* p
return;
}
- int32_t remain = -1;
- if ((remain = atomic_sub_fetch_32(&pParentSql->subState.numOfRemain, 1)) > 0) {
- tscDebug("%p sub:%p orderOfSub:%d freed, finished subqueries:%d", pParentSql, pSql, trsupport->subqueryIndex,
- pState->numOfSub - remain);
+ if (!subAndCheckDone(pSql, pParentSql, idx)) {
+ tscDebug("%p sub:%p orderOfSub:%d freed, not finished", pParentSql, pSql, trsupport->subqueryIndex);
tscFreeRetrieveSup(pSql);
return;
- }
+ }
// all sub-queries are returned, start to local merge process
pDesc->pColumnModel->capacity = trsupport->pExtMemBuffer[idx]->numOfElemsPerPage;
@@ -1998,7 +2133,7 @@ static void tscAllDataRetrievedFromDnode(SRetrieveSupport *trsupport, SSqlObj* p
SQueryInfo *pPQueryInfo = tscGetQueryInfoDetail(&pParentSql->cmd, 0);
tscClearInterpInfo(pPQueryInfo);
- tscCreateLocalReducer(trsupport->pExtMemBuffer, pState->numOfSub, pDesc, trsupport->pFinalColModel, trsupport->pFFColModel, pParentSql);
+ tscCreateLocalMerger(trsupport->pExtMemBuffer, pState->numOfSub, pDesc, trsupport->pFinalColModel, trsupport->pFFColModel, pParentSql);
tscDebug("%p build loser tree completed", pParentSql);
pParentSql->res.precision = pSql->res.precision;
@@ -2033,7 +2168,6 @@ static void tscRetrieveFromDnodeCallBack(void *param, TAOS_RES *tres, int numOfR
SSqlObj * pParentSql = trsupport->pParentSql;
SSubqueryState* pState = &pParentSql->subState;
- assert(pState->numOfRemain <= pState->numOfSub && pState->numOfRemain >= 0);
STableMetaInfo *pTableMetaInfo = tscGetTableMetaInfoFromCmd(&pSql->cmd, 0, 0);
SVgroupInfo *pVgroup = &pTableMetaInfo->vgroupList->vgroups[0];
@@ -2254,7 +2388,8 @@ static void multiVnodeInsertFinalize(void* param, TAOS_RES* tres, int numOfRows)
}
}
- if (atomic_sub_fetch_32(&pParentObj->subState.numOfRemain, 1) > 0) {
+ if (!subAndCheckDone(tres, pParentObj, pSupporter->index)) {
+ tscDebug("%p insert:%p,%d completed, total:%d", pParentObj, tres, pSupporter->index, pParentObj->subState.numOfSub);
return;
}
@@ -2286,7 +2421,9 @@ static void multiVnodeInsertFinalize(void* param, TAOS_RES* tres, int numOfRows)
tscFreeQueryInfo(&pSql->cmd);
SQueryInfo* pQueryInfo = tscGetQueryInfoDetailSafely(&pSql->cmd, 0);
STableMetaInfo* pMasterTableMetaInfo = tscGetTableMetaInfoFromCmd(&pParentObj->cmd, pSql->cmd.clauseIndex, 0);
- tscAddTableMetaInfo(pQueryInfo, pMasterTableMetaInfo->name, NULL, NULL, NULL, NULL);
+ tscAddTableMetaInfo(pQueryInfo, &pMasterTableMetaInfo->name, NULL, NULL, NULL, NULL);
+
+ subquerySetState(pSql, &pParentObj->subState, i, 0);
tscDebug("%p, failed sub:%d, %p", pParentObj, i, pSql);
}
@@ -2297,14 +2434,14 @@ static void multiVnodeInsertFinalize(void* param, TAOS_RES* tres, int numOfRows)
tscDebug("%p cleanup %d tableMeta in hashTable", pParentObj, pParentObj->cmd.numOfTables);
for(int32_t i = 0; i < pParentObj->cmd.numOfTables; ++i) {
- char* name = pParentObj->cmd.pTableNameList[i];
+ char name[TSDB_TABLE_FNAME_LEN] = {0};
+ tNameExtractFullName(pParentObj->cmd.pTableNameList[i], name);
taosHashRemove(tscTableMetaInfo, name, strnlen(name, TSDB_TABLE_FNAME_LEN));
}
pParentObj->cmd.parseFinished = false;
- pParentObj->subState.numOfRemain = numOfFailed;
- tscResetSqlCmdObj(&pParentObj->cmd);
+ tscResetSqlCmd(&pParentObj->cmd, false);
// in case of insert, redo parsing the sql string and build new submit data block for two reasons:
// 1. the table Id(tid & uid) may have been update, the submit block needs to be updated accordingly.
@@ -2378,7 +2515,19 @@ int32_t tscHandleMultivnodeInsert(SSqlObj *pSql) {
// the number of already initialized subqueries
int32_t numOfSub = 0;
- pSql->subState.numOfRemain = pSql->subState.numOfSub;
+ if (pSql->subState.states == NULL) {
+ pSql->subState.states = calloc(pSql->subState.numOfSub, sizeof(*pSql->subState.states));
+ if (pSql->subState.states == NULL) {
+ pRes->code = TSDB_CODE_TSC_OUT_OF_MEMORY;
+ goto _error;
+ }
+
+ pthread_mutex_init(&pSql->subState.mutex, NULL);
+ }
+
+ memset(pSql->subState.states, 0, sizeof(*pSql->subState.states) * pSql->subState.numOfSub);
+ tscDebug("%p reset all sub states to 0", pSql);
+
pSql->pSubs = calloc(pSql->subState.numOfSub, POINTER_BYTES);
if (pSql->pSubs == NULL) {
goto _error;
diff --git a/src/client/src/tscUtil.c b/src/client/src/tscUtil.c
index 005d83bf2c7ea569bed25058cf2f97faec909fa7..d580eccca82e536f25184abf8b2e350be0c07662 100644
--- a/src/client/src/tscUtil.c
+++ b/src/client/src/tscUtil.c
@@ -16,7 +16,7 @@
#include "tscUtil.h"
#include "hash.h"
#include "os.h"
-#include "qAst.h"
+#include "texpr.h"
#include "taosmsg.h"
#include "tkey.h"
#include "tmd5.h"
@@ -89,21 +89,6 @@ bool tscQueryTags(SQueryInfo* pQueryInfo) {
return true;
}
-// todo refactor, extract methods and move the common module
-void tscGetDBInfoFromTableFullName(char* tableId, char* db) {
- char* st = strstr(tableId, TS_PATH_DELIMITER);
- if (st != NULL) {
- char* end = strstr(st + 1, TS_PATH_DELIMITER);
- if (end != NULL) {
- memcpy(db, tableId, (end - tableId));
- db[end - tableId] = 0;
- return;
- }
- }
-
- db[0] = 0;
-}
-
bool tscIsTwoStageSTableQuery(SQueryInfo* pQueryInfo, int32_t tableIndex) {
if (pQueryInfo == NULL) {
return false;
@@ -396,7 +381,7 @@ void tscFreeQueryInfo(SSqlCmd* pCmd) {
tfree(pCmd->pQueryInfo);
}
-void tscResetSqlCmdObj(SSqlCmd* pCmd) {
+void tscResetSqlCmd(SSqlCmd* pCmd, bool removeMeta) {
pCmd->command = 0;
pCmd->numOfCols = 0;
pCmd->count = 0;
@@ -414,13 +399,13 @@ void tscResetSqlCmdObj(SSqlCmd* pCmd) {
pCmd->numOfTables = 0;
tfree(pCmd->pTableNameList);
- pCmd->pTableBlockHashList = tscDestroyBlockHashTable(pCmd->pTableBlockHashList);
+ pCmd->pTableBlockHashList = tscDestroyBlockHashTable(pCmd->pTableBlockHashList, removeMeta);
pCmd->pDataBlocks = tscDestroyBlockArrayList(pCmd->pDataBlocks);
tscFreeQueryInfo(pCmd);
}
void tscFreeSqlResult(SSqlObj* pSql) {
- tscDestroyLocalReducer(pSql);
+ tscDestroyLocalMerger(pSql);
SSqlRes* pRes = &pSql->res;
tscDestroyResPointerInfo(pRes);
@@ -441,6 +426,12 @@ static void tscFreeSubobj(SSqlObj* pSql) {
pSql->pSubs[i] = NULL;
}
+ if (pSql->subState.states) {
+ pthread_mutex_destroy(&pSql->subState.mutex);
+ }
+
+ tfree(pSql->subState.states);
+
pSql->subState.numOfSub = 0;
}
@@ -510,7 +501,7 @@ void tscFreeSqlObj(SSqlObj* pSql) {
pSql->self = 0;
tscFreeSqlResult(pSql);
- tscResetSqlCmdObj(pCmd);
+ tscResetSqlCmd(pCmd, false);
tfree(pCmd->tagData.data);
pCmd->tagData.dataLen = 0;
@@ -524,7 +515,7 @@ void tscFreeSqlObj(SSqlObj* pSql) {
free(pSql);
}
-void tscDestroyDataBlock(STableDataBlocks* pDataBlock) {
+void tscDestroyDataBlock(STableDataBlocks* pDataBlock, bool removeMeta) {
if (pDataBlock == NULL) {
return;
}
@@ -537,6 +528,13 @@ void tscDestroyDataBlock(STableDataBlocks* pDataBlock) {
tfree(pDataBlock->pTableMeta);
}
+ if (removeMeta) {
+ char name[TSDB_TABLE_FNAME_LEN] = {0};
+ tNameExtractFullName(&pDataBlock->tableName, name);
+
+ taosHashRemove(tscTableMetaInfo, name, strnlen(name, TSDB_TABLE_FNAME_LEN));
+ }
+
tfree(pDataBlock);
}
@@ -572,21 +570,21 @@ void* tscDestroyBlockArrayList(SArray* pDataBlockList) {
size_t size = taosArrayGetSize(pDataBlockList);
for (int32_t i = 0; i < size; i++) {
void* d = taosArrayGetP(pDataBlockList, i);
- tscDestroyDataBlock(d);
+ tscDestroyDataBlock(d, false);
}
taosArrayDestroy(pDataBlockList);
return NULL;
}
-void* tscDestroyBlockHashTable(SHashObj* pBlockHashTable) {
+void* tscDestroyBlockHashTable(SHashObj* pBlockHashTable, bool removeMeta) {
if (pBlockHashTable == NULL) {
return NULL;
}
STableDataBlocks** p = taosHashIterate(pBlockHashTable, NULL);
while(p) {
- tscDestroyDataBlock(*p);
+ tscDestroyDataBlock(*p, removeMeta);
p = taosHashIterate(pBlockHashTable, p);
}
@@ -606,15 +604,13 @@ int32_t tscCopyDataBlockToPayload(SSqlObj* pSql, STableDataBlocks* pDataBlock) {
// todo refactor
// set the correct table meta object, the table meta has been locked in pDataBlocks, so it must be in the cache
if (pTableMetaInfo->pTableMeta != pDataBlock->pTableMeta) {
- tstrncpy(pTableMetaInfo->name, pDataBlock->tableName, sizeof(pTableMetaInfo->name));
+ tNameAssign(&pTableMetaInfo->name, &pDataBlock->tableName);
if (pTableMetaInfo->pTableMeta != NULL) {
tfree(pTableMetaInfo->pTableMeta);
}
- pTableMetaInfo->pTableMeta = tscTableMetaClone(pDataBlock->pTableMeta);
- } else {
- assert(strncmp(pTableMetaInfo->name, pDataBlock->tableName, tListLen(pDataBlock->tableName)) == 0);
+ pTableMetaInfo->pTableMeta = tscTableMetaDup(pDataBlock->pTableMeta);
}
/*
@@ -649,7 +645,7 @@ int32_t tscCopyDataBlockToPayload(SSqlObj* pSql, STableDataBlocks* pDataBlock) {
* @param dataBlocks
* @return
*/
-int32_t tscCreateDataBlock(size_t initialSize, int32_t rowSize, int32_t startOffset, const char* name,
+int32_t tscCreateDataBlock(size_t initialSize, int32_t rowSize, int32_t startOffset, SName* name,
STableMeta* pTableMeta, STableDataBlocks** dataBlocks) {
STableDataBlocks* dataBuf = (STableDataBlocks*)calloc(1, sizeof(STableDataBlocks));
if (dataBuf == NULL) {
@@ -677,18 +673,18 @@ int32_t tscCreateDataBlock(size_t initialSize, int32_t rowSize, int32_t startOff
dataBuf->size = startOffset;
dataBuf->tsSource = -1;
- tstrncpy(dataBuf->tableName, name, sizeof(dataBuf->tableName));
+ tNameAssign(&dataBuf->tableName, name);
//Here we keep the tableMeta to avoid it to be remove by other threads.
- dataBuf->pTableMeta = tscTableMetaClone(pTableMeta);
+ dataBuf->pTableMeta = tscTableMetaDup(pTableMeta);
assert(initialSize > 0 && pTableMeta != NULL && dataBuf->pTableMeta != NULL);
*dataBlocks = dataBuf;
return TSDB_CODE_SUCCESS;
}
-int32_t tscGetDataBlockFromList(SHashObj* pHashList, int64_t id, int32_t size, int32_t startOffset, int32_t rowSize, const char* tableId, STableMeta* pTableMeta,
- STableDataBlocks** dataBlocks, SArray* pBlockList) {
+int32_t tscGetDataBlockFromList(SHashObj* pHashList, int64_t id, int32_t size, int32_t startOffset, int32_t rowSize,
+ SName* name, STableMeta* pTableMeta, STableDataBlocks** dataBlocks, SArray* pBlockList) {
*dataBlocks = NULL;
STableDataBlocks** t1 = (STableDataBlocks**)taosHashGet(pHashList, (const char*)&id, sizeof(id));
if (t1 != NULL) {
@@ -696,7 +692,7 @@ int32_t tscGetDataBlockFromList(SHashObj* pHashList, int64_t id, int32_t size, i
}
if (*dataBlocks == NULL) {
- int32_t ret = tscCreateDataBlock((size_t)size, rowSize, startOffset, tableId, pTableMeta, dataBlocks);
+ int32_t ret = tscCreateDataBlock((size_t)size, rowSize, startOffset, name, pTableMeta, dataBlocks);
if (ret != TSDB_CODE_SUCCESS) {
return ret;
}
@@ -797,12 +793,12 @@ static void extractTableNameList(SSqlCmd* pCmd, bool freeBlockMap) {
int32_t i = 0;
while(p1) {
STableDataBlocks* pBlocks = *p1;
- pCmd->pTableNameList[i++] = strndup(pBlocks->tableName, TSDB_TABLE_FNAME_LEN);
+ pCmd->pTableNameList[i++] = tNameDup(&pBlocks->tableName);
p1 = taosHashIterate(pCmd->pTableBlockHashList, p1);
}
if (freeBlockMap) {
- pCmd->pTableBlockHashList = tscDestroyBlockHashTable(pCmd->pTableBlockHashList);
+ pCmd->pTableBlockHashList = tscDestroyBlockHashTable(pCmd->pTableBlockHashList, false);
}
}
@@ -822,7 +818,7 @@ int32_t tscMergeTableDataBlocks(SSqlObj* pSql, bool freeBlockMap) {
STableDataBlocks* dataBuf = NULL;
int32_t ret = tscGetDataBlockFromList(pVnodeDataBlockHashList, pOneTableBlock->vgId, TSDB_PAYLOAD_SIZE,
- INSERT_HEAD_SIZE, 0, pOneTableBlock->tableName, pOneTableBlock->pTableMeta, &dataBuf, pVnodeDataBlockList);
+ INSERT_HEAD_SIZE, 0, &pOneTableBlock->tableName, pOneTableBlock->pTableMeta, &dataBuf, pVnodeDataBlockList);
if (ret != TSDB_CODE_SUCCESS) {
tscError("%p failed to prepare the data block buffer for merging table data, code:%d", pSql, ret);
taosHashCleanup(pVnodeDataBlockHashList);
@@ -855,8 +851,8 @@ int32_t tscMergeTableDataBlocks(SSqlObj* pSql, bool freeBlockMap) {
tscSortRemoveDataBlockDupRows(pOneTableBlock);
char* ekey = (char*)pBlocks->data + pOneTableBlock->rowSize*(pBlocks->numOfRows-1);
-
- tscDebug("%p name:%s, sid:%d rows:%d sversion:%d skey:%" PRId64 ", ekey:%" PRId64, pSql, pOneTableBlock->tableName,
+
+ tscDebug("%p name:%s, name:%d rows:%d sversion:%d skey:%" PRId64 ", ekey:%" PRId64, pSql, tNameGetTableName(&pOneTableBlock->tableName),
pBlocks->tid, pBlocks->numOfRows, pBlocks->sversion, GET_INT64_VAL(pBlocks->data), GET_INT64_VAL(ekey));
int32_t len = pBlocks->numOfRows * (pOneTableBlock->rowSize + expandSize) + sizeof(STColumn) * tscGetNumOfColumns(pOneTableBlock->pTableMeta);
@@ -1058,7 +1054,7 @@ void tscFieldInfoClear(SFieldInfo* pFieldInfo) {
SInternalField* pInfo = taosArrayGet(pFieldInfo->internalField, i);
if (pInfo->pArithExprInfo != NULL) {
- tExprTreeDestroy(&pInfo->pArithExprInfo->pExpr, NULL);
+ tExprTreeDestroy(pInfo->pArithExprInfo->pExpr, NULL);
SSqlFuncMsg* pFuncMsg = &pInfo->pArithExprInfo->base;
for(int32_t j = 0; j < pFuncMsg->numOfParams; ++j) {
@@ -1306,7 +1302,7 @@ SColumn* tscColumnClone(const SColumn* src) {
dst->colIndex = src->colIndex;
dst->numOfFilters = src->numOfFilters;
- dst->filterInfo = tscFilterInfoClone(src->filterInfo, src->numOfFilters);
+ dst->filterInfo = tFilterInfoDup(src->filterInfo, src->numOfFilters);
return dst;
}
@@ -1812,10 +1808,10 @@ void tscVgroupTableCopy(SVgroupTableInfo* info, SVgroupTableInfo* pInfo) {
info->vgInfo.epAddr[j].fqdn = strdup(pInfo->vgInfo.epAddr[j].fqdn);
}
- info->itemList = taosArrayClone(pInfo->itemList);
+ info->itemList = taosArrayDup(pInfo->itemList);
}
-SArray* tscVgroupTableInfoClone(SArray* pVgroupTables) {
+SArray* tscVgroupTableInfoDup(SArray* pVgroupTables) {
if (pVgroupTables == NULL) {
return NULL;
}
@@ -1846,7 +1842,7 @@ void clearAllTableMetaInfo(SQueryInfo* pQueryInfo) {
tfree(pQueryInfo->pTableMetaInfo);
}
-STableMetaInfo* tscAddTableMetaInfo(SQueryInfo* pQueryInfo, const char* name, STableMeta* pTableMeta,
+STableMetaInfo* tscAddTableMetaInfo(SQueryInfo* pQueryInfo, SName* name, STableMeta* pTableMeta,
SVgroupsInfo* vgroupList, SArray* pTagCols, SArray* pVgroupTables) {
void* pAlloc = realloc(pQueryInfo->pTableMetaInfo, (pQueryInfo->numOfTables + 1) * POINTER_BYTES);
if (pAlloc == NULL) {
@@ -1864,7 +1860,7 @@ STableMetaInfo* tscAddTableMetaInfo(SQueryInfo* pQueryInfo, const char* name, ST
pQueryInfo->pTableMetaInfo[pQueryInfo->numOfTables] = pTableMetaInfo;
if (name != NULL) {
- tstrncpy(pTableMetaInfo->name, name, sizeof(pTableMetaInfo->name));
+ tNameAssign(&pTableMetaInfo->name, name);
}
pTableMetaInfo->pTableMeta = pTableMeta;
@@ -1883,7 +1879,7 @@ STableMetaInfo* tscAddTableMetaInfo(SQueryInfo* pQueryInfo, const char* name, ST
tscColumnListCopy(pTableMetaInfo->tagColList, pTagCols, -1);
}
- pTableMetaInfo->pVgroupTables = tscVgroupTableInfoClone(pVgroupTables);
+ pTableMetaInfo->pVgroupTables = tscVgroupTableInfoDup(pVgroupTables);
pQueryInfo->numOfTables += 1;
return pTableMetaInfo;
@@ -1961,7 +1957,7 @@ SSqlObj* createSimpleSubObj(SSqlObj* pSql, __async_cb_func_t fp, void* param, in
assert(pSql->cmd.clauseIndex == 0);
STableMetaInfo* pMasterTableMetaInfo = tscGetTableMetaInfoFromCmd(&pSql->cmd, pSql->cmd.clauseIndex, 0);
- tscAddTableMetaInfo(pQueryInfo, pMasterTableMetaInfo->name, NULL, NULL, NULL, NULL);
+ tscAddTableMetaInfo(pQueryInfo, &pMasterTableMetaInfo->name, NULL, NULL, NULL, NULL);
registerSqlObj(pNew);
return pNew;
@@ -2066,7 +2062,7 @@ SSqlObj* createSubqueryObj(SSqlObj* pSql, int16_t tableIndex, __async_cb_func_t
pNewQueryInfo->groupbyExpr = pQueryInfo->groupbyExpr;
if (pQueryInfo->groupbyExpr.columnInfo != NULL) {
- pNewQueryInfo->groupbyExpr.columnInfo = taosArrayClone(pQueryInfo->groupbyExpr.columnInfo);
+ pNewQueryInfo->groupbyExpr.columnInfo = taosArrayDup(pQueryInfo->groupbyExpr.columnInfo);
if (pNewQueryInfo->groupbyExpr.columnInfo == NULL) {
terrno = TSDB_CODE_TSC_OUT_OF_MEMORY;
goto _error;
@@ -2117,27 +2113,26 @@ SSqlObj* createSubqueryObj(SSqlObj* pSql, int16_t tableIndex, __async_cb_func_t
pNew->param = param;
pNew->maxRetry = TSDB_MAX_REPLICA;
- char* name = pTableMetaInfo->name;
STableMetaInfo* pFinalInfo = NULL;
if (pPrevSql == NULL) {
- STableMeta* pTableMeta = tscTableMetaClone(pTableMetaInfo->pTableMeta);
+ STableMeta* pTableMeta = tscTableMetaDup(pTableMetaInfo->pTableMeta);
assert(pTableMeta != NULL);
- pFinalInfo = tscAddTableMetaInfo(pNewQueryInfo, name, pTableMeta, pTableMetaInfo->vgroupList,
+ pFinalInfo = tscAddTableMetaInfo(pNewQueryInfo, &pTableMetaInfo->name, pTableMeta, pTableMetaInfo->vgroupList,
pTableMetaInfo->tagColList, pTableMetaInfo->pVgroupTables);
} else { // transfer the ownership of pTableMeta to the newly create sql object.
STableMetaInfo* pPrevInfo = tscGetTableMetaInfoFromCmd(&pPrevSql->cmd, pPrevSql->cmd.clauseIndex, 0);
- STableMeta* pPrevTableMeta = tscTableMetaClone(pPrevInfo->pTableMeta);
+ STableMeta* pPrevTableMeta = tscTableMetaDup(pPrevInfo->pTableMeta);
SVgroupsInfo* pVgroupsInfo = pPrevInfo->vgroupList;
- pFinalInfo = tscAddTableMetaInfo(pNewQueryInfo, name, pPrevTableMeta, pVgroupsInfo, pTableMetaInfo->tagColList,
+ pFinalInfo = tscAddTableMetaInfo(pNewQueryInfo, &pTableMetaInfo->name, pPrevTableMeta, pVgroupsInfo, pTableMetaInfo->tagColList,
pTableMetaInfo->pVgroupTables);
}
// this case cannot be happened
if (pFinalInfo->pTableMeta == NULL) {
- tscError("%p new subquery failed since no tableMeta, name:%s", pSql, name);
+ tscError("%p new subquery failed since no tableMeta, name:%s", pSql, tNameGetTableName(&pTableMetaInfo->name));
if (pPrevSql != NULL) { // pass the previous error to client
assert(pPrevSql->res.code != TSDB_CODE_SUCCESS);
@@ -2162,7 +2157,7 @@ SSqlObj* createSubqueryObj(SSqlObj* pSql, int16_t tableIndex, __async_cb_func_t
"%p new subquery:%p, tableIndex:%d, vgroupIndex:%d, type:%d, exprInfo:%" PRIzu ", colList:%" PRIzu ","
"fieldInfo:%d, name:%s, qrang:%" PRId64 " - %" PRId64 " order:%d, limit:%" PRId64,
pSql, pNew, tableIndex, pTableMetaInfo->vgroupIndex, pNewQueryInfo->type, tscSqlExprNumOfExprs(pNewQueryInfo),
- size, pNewQueryInfo->fieldsInfo.numOfOutput, pFinalInfo->name, pNewQueryInfo->window.skey,
+ size, pNewQueryInfo->fieldsInfo.numOfOutput, tNameGetTableName(&pFinalInfo->name), pNewQueryInfo->window.skey,
pNewQueryInfo->window.ekey, pNewQueryInfo->order.order, pNewQueryInfo->limit.limit);
tscPrintSelectClause(pNew, 0);
@@ -2199,7 +2194,7 @@ void tscDoQuery(SSqlObj* pSql) {
}
if (pCmd->dataSourceType == DATA_FROM_DATA_FILE) {
- tscProcessMultiVnodesImportFromFile(pSql);
+ tscImportDataFromFile(pSql);
} else {
SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(pCmd, pCmd->clauseIndex);
uint16_t type = pQueryInfo->type;
@@ -2299,7 +2294,6 @@ int32_t tscSQLSyntaxErrMsg(char* msg, const char* additionalInfo, const char* s
}
return TSDB_CODE_TSC_SQL_SYNTAX_ERROR;
-
}
int32_t tscInvalidSQLErrMsg(char* msg, const char* additionalInfo, const char* sql) {
@@ -2697,7 +2691,7 @@ uint32_t tscGetTableMetaMaxSize() {
return sizeof(STableMeta) + TSDB_MAX_COLUMNS * sizeof(SSchema);
}
-STableMeta* tscTableMetaClone(STableMeta* pTableMeta) {
+STableMeta* tscTableMetaDup(STableMeta* pTableMeta) {
assert(pTableMeta != NULL);
uint32_t size = tscGetTableMetaSize(pTableMeta);
STableMeta* p = calloc(1, size);
diff --git a/src/query/inc/qArithmeticOperator.h b/src/common/inc/tarithoperator.h
similarity index 100%
rename from src/query/inc/qArithmeticOperator.h
rename to src/common/inc/tarithoperator.h
diff --git a/src/query/inc/qAst.h b/src/common/inc/texpr.h
similarity index 83%
rename from src/query/inc/qAst.h
rename to src/common/inc/texpr.h
index 39af7261efc222c8a6bcfc809288c256eccb0970..acfbffc01e400f8b111ee92b7651bb048c112bd2 100644
--- a/src/query/inc/qAst.h
+++ b/src/common/inc/texpr.h
@@ -31,6 +31,15 @@ extern "C" {
struct tExprNode;
struct SSchema;
+#define QUERY_COND_REL_PREFIX_IN "IN|"
+#define QUERY_COND_REL_PREFIX_LIKE "LIKE|"
+
+#define QUERY_COND_REL_PREFIX_IN_LEN 3
+#define QUERY_COND_REL_PREFIX_LIKE_LEN 5
+
+typedef bool (*__result_filter_fn_t)(const void *, void *);
+typedef void (*__do_filter_suppl_fn_t)(void *, void *);
+
enum {
TSQL_NODE_DUMMY = 0x0,
TSQL_NODE_EXPR = 0x1,
@@ -38,9 +47,6 @@ enum {
TSQL_NODE_VALUE = 0x4,
};
-typedef bool (*__result_filter_fn_t)(const void *, void *);
-typedef void (*__do_filter_suppl_fn_t)(void *, void *);
-
/**
* this structure is used to filter data in tags, so the offset of filtered tag column in tagdata string is required
*/
@@ -52,12 +58,6 @@ typedef struct tQueryInfo {
bool indexed; // indexed columns
} tQueryInfo;
-typedef struct SExprTraverseSupp {
- __result_filter_fn_t nodeFilterFn;
- __do_filter_suppl_fn_t setupInfoFn;
- void * pExtInfo;
-} SExprTraverseSupp;
-
typedef struct tExprNode {
uint8_t nodeType;
union {
@@ -65,7 +65,7 @@ typedef struct tExprNode {
uint8_t optr; // filter operator
uint8_t hasPK; // 0: do not contain primary filter, 1: contain
void * info; // support filter operation on this expression only available for leaf node
-
+
struct tExprNode *pLeft; // left child pointer
struct tExprNode *pRight; // right child pointer
} _node;
@@ -74,19 +74,27 @@ typedef struct tExprNode {
};
} tExprNode;
-void arithmeticTreeTraverse(tExprNode *pExprs, int32_t numOfRows, char *pOutput, void *param, int32_t order,
- char *(*cb)(void *, const char*, int32_t));
+typedef struct SExprTraverseSupp {
+ __result_filter_fn_t nodeFilterFn;
+ __do_filter_suppl_fn_t setupInfoFn;
+ void * pExtInfo;
+} SExprTraverseSupp;
+
+void tExprTreeDestroy(tExprNode *pNode, void (*fp)(void *));
tExprNode* exprTreeFromBinary(const void* data, size_t size);
tExprNode* exprTreeFromTableName(const char* tbnameCond);
void exprTreeToBinary(SBufferWriter* bw, tExprNode* pExprTree);
-void tExprNodeDestroy(tExprNode *pNode, void (*fp)(void *));
-void tExprTreeDestroy(tExprNode **pExprs, void (*fp)(void*));
-
bool exprTreeApplayFilter(tExprNode *pExpr, const void *pItem, SExprTraverseSupp *param);
+typedef void (*_arithmetic_operator_fn_t)(void *left, int32_t numLeft, int32_t leftType, void *right, int32_t numRight,
+ int32_t rightType, void *output, int32_t order);
+
+void arithmeticTreeTraverse(tExprNode *pExprs, int32_t numOfRows, char *pOutput, void *param, int32_t order,
+ char *(*cb)(void *, const char*, int32_t));
+
#ifdef __cplusplus
}
#endif
diff --git a/src/common/inc/tname.h b/src/common/inc/tname.h
index 6631d4e450f7841e3dea73ecd43973ad9904cc50..892d682756ceaef9cd4d27f5abf6b00f0b08c31b 100644
--- a/src/common/inc/tname.h
+++ b/src/common/inc/tname.h
@@ -1,3 +1,18 @@
+/*
+ * Copyright (c) 2019 TAOS Data, Inc.
+ *
+ * This program is free software: you can use, redistribute, and/or modify
+ * it under the terms of the GNU Affero General Public License, version 3
+ * or later ("AGPL"), as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see .
+ */
+
#ifndef TDENGINE_NAME_H
#define TDENGINE_NAME_H
@@ -21,6 +36,20 @@ typedef struct SColumnInfoData {
void* pData; // the corresponding block data in memory
} SColumnInfoData;
+#define TSDB_DB_NAME_T 1
+#define TSDB_TABLE_NAME_T 2
+
+#define T_NAME_ACCT 0x1u
+#define T_NAME_DB 0x2u
+#define T_NAME_TABLE 0x4u
+
+typedef struct SName {
+ uint8_t type; //db_name_t, table_name_t
+ char acctId[TSDB_ACCT_ID_LEN];
+ char dbname[TSDB_DB_NAME_LEN];
+ char tname[TSDB_TABLE_NAME_LEN];
+} SName;
+
void extractTableName(const char *tableId, char *name);
char* extractDBName(const char *tableId, char *name);
@@ -37,9 +66,9 @@ SSchema tGetUserSpecifiedColumnSchema(tVariant* pVal, SStrToken* exprStr, const
bool tscValidateTableNameLength(size_t len);
-SColumnFilterInfo* tscFilterInfoClone(const SColumnFilterInfo* src, int32_t numOfFilters);
+SColumnFilterInfo* tFilterInfoDup(const SColumnFilterInfo* src, int32_t numOfFilters);
-SSchema tscGetTbnameColumnSchema();
+SSchema tGetTbnameColumnSchema();
/**
* check if the schema is valid or not, including following aspects:
@@ -53,6 +82,28 @@ SSchema tscGetTbnameColumnSchema();
* @param numOfCols
* @return
*/
-bool isValidSchema(struct SSchema* pSchema, int32_t numOfCols, int32_t numOfTags);
+bool tIsValidSchema(struct SSchema* pSchema, int32_t numOfCols, int32_t numOfTags);
+
+int32_t tNameExtractFullName(const SName* name, char* dst);
+int32_t tNameLen(const SName* name);
+
+SName* tNameDup(const SName* name);
+
+bool tIsValidName(const SName* name);
+
+const char* tNameGetTableName(const SName* name);
+
+int32_t tNameGetDbName(const SName* name, char* dst);
+int32_t tNameGetFullDbName(const SName* name, char* dst);
+
+bool tNameIsEmpty(const SName* name);
+
+void tNameAssign(SName* dst, const SName* src);
+
+int32_t tNameFromString(SName* dst, const char* str, uint32_t type);
+
+int32_t tNameSetAcctId(SName* dst, const char* acct);
+
+int32_t tNameSetDbName(SName* dst, const char* acct, SStrToken* dbToken);
#endif // TDENGINE_NAME_H
diff --git a/src/query/src/qArithmeticOperator.c b/src/common/src/tarithoperator.c
similarity index 99%
rename from src/query/src/qArithmeticOperator.c
rename to src/common/src/tarithoperator.c
index 677951bd07ba5c61e9e1078dbe94eb692a4a218c..1cb667d259f040cfab0656562f7c97444fc48d8a 100644
--- a/src/query/src/qArithmeticOperator.c
+++ b/src/common/src/tarithoperator.c
@@ -15,9 +15,9 @@
#include "os.h"
-#include "qArithmeticOperator.h"
#include "ttype.h"
#include "tutil.h"
+#include "tarithoperator.h"
#define ARRAY_LIST_OP(left, right, _left_type, _right_type, len1, len2, out, op, _res_type, _ord) \
{ \
diff --git a/src/query/src/qAst.c b/src/common/src/texpr.c
similarity index 96%
rename from src/query/src/qAst.c
rename to src/common/src/texpr.c
index 1e6dbe8e3dd52c98896c02954b2208c1cfcf7a50..f941fc45019dd5b267348de2a4fc1792638164e0 100644
--- a/src/query/src/qAst.c
+++ b/src/common/src/texpr.c
@@ -16,18 +16,15 @@
#include "os.h"
#include "exception.h"
-#include "qArithmeticOperator.h"
-#include "qAst.h"
#include "taosdef.h"
#include "taosmsg.h"
#include "tarray.h"
#include "tbuffer.h"
#include "tcompare.h"
-#include "tname.h"
-#include "tschemautil.h"
#include "tsdb.h"
#include "tskiplist.h"
-#include "tsqlfunction.h"
+#include "texpr.h"
+#include "tarithoperator.h"
static uint8_t UNUSED_FUNC isQueryOnPrimaryKey(const char *primaryColumnName, const tExprNode *pLeft, const tExprNode *pRight) {
if (pLeft->nodeType == TSQL_NODE_COL) {
@@ -102,13 +99,15 @@ static void reverseCopy(char* dest, const char* src, int16_t type, int32_t numOf
}
}
-void tExprNodeDestroy(tExprNode *pNode, void (*fp)(void *)) {
+static void doExprTreeDestroy(tExprNode **pExpr, void (*fp)(void *));
+
+void tExprTreeDestroy(tExprNode *pNode, void (*fp)(void *)) {
if (pNode == NULL) {
return;
}
if (pNode->nodeType == TSQL_NODE_EXPR) {
- tExprTreeDestroy(&pNode, fp);
+ doExprTreeDestroy(&pNode, fp);
} else if (pNode->nodeType == TSQL_NODE_VALUE) {
tVariantDestroy(pNode->pVal);
} else if (pNode->nodeType == TSQL_NODE_COL) {
@@ -118,14 +117,14 @@ void tExprNodeDestroy(tExprNode *pNode, void (*fp)(void *)) {
free(pNode);
}
-void tExprTreeDestroy(tExprNode **pExpr, void (*fp)(void *)) {
+static void doExprTreeDestroy(tExprNode **pExpr, void (*fp)(void *)) {
if (*pExpr == NULL) {
return;
}
if ((*pExpr)->nodeType == TSQL_NODE_EXPR) {
- tExprTreeDestroy(&(*pExpr)->_node.pLeft, fp);
- tExprTreeDestroy(&(*pExpr)->_node.pRight, fp);
+ doExprTreeDestroy(&(*pExpr)->_node.pLeft, fp);
+ doExprTreeDestroy(&(*pExpr)->_node.pRight, fp);
if (fp != NULL) {
fp((*pExpr)->_node.info);
@@ -270,8 +269,9 @@ void arithmeticTreeTraverse(tExprNode *pExprs, int32_t numOfRows, char *pOutput,
}
}
- free(pLeftOutput);
- free(pRightOutput);
+ tfree(pdata);
+ tfree(pLeftOutput);
+ tfree(pRightOutput);
}
static void exprTreeToBinaryImpl(SBufferWriter* bw, tExprNode* expr) {
@@ -342,7 +342,7 @@ static tExprNode* exprTreeFromBinaryImpl(SBufferReader* br) {
}
tExprNode* pExpr = exception_calloc(1, sizeof(tExprNode));
- CLEANUP_PUSH_VOID_PTR_PTR(true, tExprNodeDestroy, pExpr, NULL);
+ CLEANUP_PUSH_VOID_PTR_PTR(true, tExprTreeDestroy, pExpr, NULL);
pExpr->nodeType = tbufReadUint8(br);
if (pExpr->nodeType == TSQL_NODE_VALUE) {
@@ -396,7 +396,7 @@ tExprNode* exprTreeFromTableName(const char* tbnameCond) {
int32_t anchor = CLEANUP_GET_ANCHOR();
tExprNode* expr = exception_calloc(1, sizeof(tExprNode));
- CLEANUP_PUSH_VOID_PTR_PTR(true, tExprNodeDestroy, expr, NULL);
+ CLEANUP_PUSH_VOID_PTR_PTR(true, tExprTreeDestroy, expr, NULL);
expr->nodeType = TSQL_NODE_EXPR;
@@ -407,7 +407,7 @@ tExprNode* exprTreeFromTableName(const char* tbnameCond) {
SSchema* pSchema = exception_calloc(1, sizeof(SSchema));
left->pSchema = pSchema;
- *pSchema = tscGetTbnameColumnSchema();
+ *pSchema = tGetTbnameColumnSchema();
tExprNode* right = exception_calloc(1, sizeof(tExprNode));
expr->_node.pRight = right;
diff --git a/src/common/src/tname.c b/src/common/src/tname.c
index 31d473866a9022c5003bd8ce4e4c537cfed51f99..178ed09123e43b960faa3516cc3e9761dae1fcd3 100644
--- a/src/common/src/tname.c
+++ b/src/common/src/tname.c
@@ -3,31 +3,12 @@
#include "tname.h"
#include "tstoken.h"
-#include "ttokendef.h"
#include "tvariant.h"
-#define VALIDNUMOFCOLS(x) ((x) >= TSDB_MIN_COLUMNS && (x) <= TSDB_MAX_COLUMNS)
-
+#define VALIDNUMOFCOLS(x) ((x) >= TSDB_MIN_COLUMNS && (x) <= TSDB_MAX_COLUMNS)
#define VALIDNUMOFTAGS(x) ((x) >= 0 && (x) <= TSDB_MAX_TAGS)
-// todo refactor
-UNUSED_FUNC static FORCE_INLINE const char* skipSegments(const char* input, char delim, int32_t num) {
- for (int32_t i = 0; i < num; ++i) {
- while (*input != 0 && *input++ != delim) {
- };
- }
- return input;
-}
-
-UNUSED_FUNC static FORCE_INLINE size_t copy(char* dst, const char* src, char delimiter) {
- size_t len = 0;
- while (*src != delimiter && *src != 0) {
- *dst++ = *src++;
- len++;
- }
-
- return len;
-}
+#define VALID_NAME_TYPE(x) ((x) == TSDB_DB_NAME_T || (x) == TSDB_TABLE_NAME_T)
void extractTableName(const char* tableId, char* name) {
size_t s1 = strcspn(tableId, &TS_PATH_DELIMITER[0]);
@@ -93,7 +74,7 @@ bool tscValidateTableNameLength(size_t len) {
return len < TSDB_TABLE_NAME_LEN;
}
-SColumnFilterInfo* tscFilterInfoClone(const SColumnFilterInfo* src, int32_t numOfFilters) {
+SColumnFilterInfo* tFilterInfoDup(const SColumnFilterInfo* src, int32_t numOfFilters) {
if (numOfFilters == 0) {
assert(src == NULL);
return NULL;
@@ -208,7 +189,7 @@ void extractTableNameFromToken(SStrToken* pToken, SStrToken* pTable) {
}
}
-SSchema tscGetTbnameColumnSchema() {
+SSchema tGetTbnameColumnSchema() {
struct SSchema s = {
.colId = TSDB_TBNAME_COLUMN_INDEX,
.type = TSDB_DATA_TYPE_BINARY,
@@ -256,7 +237,7 @@ static bool doValidateSchema(SSchema* pSchema, int32_t numOfCols, int32_t maxLen
return rowLen <= maxLen;
}
-bool isValidSchema(struct SSchema* pSchema, int32_t numOfCols, int32_t numOfTags) {
+bool tIsValidSchema(struct SSchema* pSchema, int32_t numOfCols, int32_t numOfTags) {
if (!VALIDNUMOFCOLS(numOfCols)) {
return false;
}
@@ -280,3 +261,184 @@ bool isValidSchema(struct SSchema* pSchema, int32_t numOfCols, int32_t numOfTags
return true;
}
+
+int32_t tNameExtractFullName(const SName* name, char* dst) {
+ assert(name != NULL && dst != NULL);
+
+ // invalid full name format, abort
+ if (!tIsValidName(name)) {
+ return -1;
+ }
+
+ int32_t len = snprintf(dst, TSDB_ACCT_ID_LEN + 1 + TSDB_DB_NAME_LEN, "%s.%s", name->acctId, name->dbname);
+
+ size_t tnameLen = strlen(name->tname);
+ if (tnameLen > 0) {
+ assert(name->type == TSDB_TABLE_NAME_T);
+ dst[len] = TS_PATH_DELIMITER[0];
+
+ memcpy(dst + len + 1, name->tname, tnameLen);
+ dst[len + tnameLen + 1] = 0;
+ }
+
+ return 0;
+}
+
+int32_t tNameLen(const SName* name) {
+ assert(name != NULL);
+ int32_t len = (int32_t) strlen(name->acctId);
+ int32_t len1 = (int32_t) strlen(name->dbname);
+ int32_t len2 = (int32_t) strlen(name->tname);
+
+ if (name->type == TSDB_DB_NAME_T) {
+ assert(len2 == 0);
+ return len + len1 + TS_PATH_DELIMITER_LEN;
+ } else {
+ assert(len2 > 0);
+ return len + len1 + len2 + TS_PATH_DELIMITER_LEN * 2;
+ }
+}
+
+bool tIsValidName(const SName* name) {
+ assert(name != NULL);
+
+ if (!VALID_NAME_TYPE(name->type)) {
+ return false;
+ }
+
+ if (strlen(name->acctId) <= 0) {
+ return false;
+ }
+
+ if (name->type == TSDB_DB_NAME_T) {
+ return strlen(name->dbname) > 0;
+ } else {
+ return strlen(name->dbname) > 0 && strlen(name->tname) > 0;
+ }
+}
+
+SName* tNameDup(const SName* name) {
+ assert(name != NULL);
+
+ SName* p = calloc(1, sizeof(SName));
+ memcpy(p, name, sizeof(SName));
+ return p;
+}
+
+int32_t tNameGetDbName(const SName* name, char* dst) {
+ assert(name != NULL && dst != NULL);
+ strncpy(dst, name->dbname, tListLen(name->dbname));
+ return 0;
+}
+
+int32_t tNameGetFullDbName(const SName* name, char* dst) {
+ assert(name != NULL && dst != NULL);
+ snprintf(dst, TSDB_ACCT_ID_LEN + TS_PATH_DELIMITER_LEN + TSDB_DB_NAME_LEN,
+ "%s.%s", name->acctId, name->dbname);
+ return 0;
+}
+
+bool tNameIsEmpty(const SName* name) {
+ assert(name != NULL);
+ return name->type == 0 || strlen(name->acctId) <= 0;
+}
+
+const char* tNameGetTableName(const SName* name) {
+ assert(name != NULL && name->type == TSDB_TABLE_NAME_T);
+ return &name->tname[0];
+}
+
+void tNameAssign(SName* dst, const SName* src) {
+ memcpy(dst, src, sizeof(SName));
+}
+
+int32_t tNameSetDbName(SName* dst, const char* acct, SStrToken* dbToken) {
+ assert(dst != NULL && dbToken != NULL && acct != NULL);
+
+ // too long account id or too long db name
+ if (strlen(acct) >= tListLen(dst->acctId) || dbToken->n >= tListLen(dst->dbname)) {
+ return -1;
+ }
+
+ dst->type = TSDB_DB_NAME_T;
+ tstrncpy(dst->acctId, acct, tListLen(dst->acctId));
+ tstrncpy(dst->dbname, dbToken->z, dbToken->n + 1);
+ return 0;
+}
+
+int32_t tNameSetAcctId(SName* dst, const char* acct) {
+ assert(dst != NULL && acct != NULL);
+
+ // too long account id or too long db name
+ if (strlen(acct) >= tListLen(dst->acctId)) {
+ return -1;
+ }
+
+ tstrncpy(dst->acctId, acct, tListLen(dst->acctId));
+
+ assert(strlen(dst->acctId) > 0);
+
+ return 0;
+}
+
+int32_t tNameFromString(SName* dst, const char* str, uint32_t type) {
+ assert(dst != NULL && str != NULL && strlen(str) > 0);
+
+ char* p = NULL;
+ if ((type & T_NAME_ACCT) == T_NAME_ACCT) {
+ p = strstr(str, TS_PATH_DELIMITER);
+ if (p == NULL) {
+ return -1;
+ }
+
+ int32_t len = (int32_t)(p - str);
+
+ // too long account id or too long db name
+ if ((len >= tListLen(dst->acctId)) || (len <= 0)) {
+ return -1;
+ }
+
+ memcpy (dst->acctId, str, len);
+ dst->acctId[len] = 0;
+
+ assert(strlen(dst->acctId) > 0);
+ }
+
+ if ((type & T_NAME_DB) == T_NAME_DB) {
+ dst->type = TSDB_DB_NAME_T;
+ char* start = (char*)((p == NULL)? str:(p+1));
+
+ int32_t len = 0;
+ p = strstr(start, TS_PATH_DELIMITER);
+ if (p == NULL) {
+ len = (int32_t) strlen(start);
+ } else {
+ len = (int32_t) (p - start);
+ }
+
+ // too long account id or too long db name
+ if ((len >= tListLen(dst->dbname)) || (len <= 0)) {
+ return -1;
+ }
+
+ memcpy (dst->dbname, start, len);
+ dst->dbname[len] = 0;
+ }
+
+ if ((type & T_NAME_TABLE) == T_NAME_TABLE) {
+ dst->type = TSDB_TABLE_NAME_T;
+ char* start = (char*) ((p == NULL)? str: (p+1));
+
+ int32_t len = (int32_t) strlen(start);
+
+ // too long account id or too long db name
+ if ((len >= tListLen(dst->tname)) || (len <= 0)) {
+ return -1;
+ }
+
+ memcpy (dst->tname, start, len);
+ dst->tname[len] = 0;
+ }
+
+ return 0;
+}
diff --git a/src/connector/jdbc/CMakeLists.txt b/src/connector/jdbc/CMakeLists.txt
index 7097e9bc5add0b944e1df1911972422cdcbd1c58..b64161e2e4fd6fc00abb659118cfdcd50dfcf0a8 100644
--- a/src/connector/jdbc/CMakeLists.txt
+++ b/src/connector/jdbc/CMakeLists.txt
@@ -8,7 +8,7 @@ IF (TD_MVN_INSTALLED)
ADD_CUSTOM_COMMAND(OUTPUT ${JDBC_CMD_NAME}
POST_BUILD
COMMAND mvn -Dmaven.test.skip=true install -f ${CMAKE_CURRENT_SOURCE_DIR}/pom.xml
- COMMAND ${CMAKE_COMMAND} -E copy ${CMAKE_CURRENT_SOURCE_DIR}/target/taos-jdbcdriver-2.0.16-dist.jar ${LIBRARY_OUTPUT_PATH}
+ COMMAND ${CMAKE_COMMAND} -E copy ${CMAKE_CURRENT_SOURCE_DIR}/target/taos-jdbcdriver-2.0.17-dist.jar ${LIBRARY_OUTPUT_PATH}
COMMAND mvn -Dmaven.test.skip=true clean -f ${CMAKE_CURRENT_SOURCE_DIR}/pom.xml
COMMENT "build jdbc driver")
ADD_CUSTOM_TARGET(${JDBC_TARGET_NAME} ALL WORKING_DIRECTORY ${EXECUTABLE_OUTPUT_PATH} DEPENDS ${JDBC_CMD_NAME})
diff --git a/src/connector/jdbc/deploy-pom.xml b/src/connector/jdbc/deploy-pom.xml
index 5aa60c0df9311a18d5c849f125b24f6764579bdd..1f03c3c6fe5e1d5f1d7d12a0df7babb6edb789f1 100755
--- a/src/connector/jdbc/deploy-pom.xml
+++ b/src/connector/jdbc/deploy-pom.xml
@@ -5,7 +5,7 @@
com.taosdata.jdbc
taos-jdbcdriver
- 2.0.16
+ 2.0.17
jar
JDBCDriver
diff --git a/src/connector/jdbc/pom.xml b/src/connector/jdbc/pom.xml
index d18d86258a41ef4420863eddc47c20b24407f49a..6be0ca036e797decb668c42a81eeaafb91d52787 100755
--- a/src/connector/jdbc/pom.xml
+++ b/src/connector/jdbc/pom.xml
@@ -3,7 +3,7 @@
4.0.0
com.taosdata.jdbc
taos-jdbcdriver
- 2.0.16
+ 2.0.17
jar
JDBCDriver
https://github.com/taosdata/TDengine/tree/master/src/connector/jdbc
@@ -74,6 +74,14 @@
1.2.58
+
+
+ org.apache.commons
+ commons-dbcp2
+ 2.7.0
+
+
+
diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/AbstractDatabaseMetaData.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/AbstractDatabaseMetaData.java
index 8ab0e4429a89ac15447786f89afbec0c61503ed9..08414d05e9f8b03582ac1257e6c460c05522f57e 100644
--- a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/AbstractDatabaseMetaData.java
+++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/AbstractDatabaseMetaData.java
@@ -1,28 +1,13 @@
-/***************************************************************************
- * Copyright (c) 2019 TAOS Data, Inc.
- *
- * This program is free software: you can use, redistribute, and/or modify
- * it under the terms of the GNU Affero General Public License, version 3
- * or later ("AGPL"), as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.
- *
- * You should have received a copy of the GNU Affero General Public License
- * along with this program. If not, see .
- *****************************************************************************/
package com.taosdata.jdbc;
import java.sql.*;
import java.util.ArrayList;
import java.util.List;
-public abstract class AbstractDatabaseMetaData implements DatabaseMetaData {
+public abstract class AbstractDatabaseMetaData implements DatabaseMetaData, Wrapper {
private final static String PRODUCT_NAME = "TDengine";
private final static String PRODUCT_VESION = "2.0.x.x";
- private final static String DRIVER_NAME = "taos-jdbcdriver";
private final static String DRIVER_VERSION = "2.0.x";
private final static int DRIVER_MAJAR_VERSION = 2;
private final static int DRIVER_MINOR_VERSION = 0;
@@ -67,9 +52,7 @@ public abstract class AbstractDatabaseMetaData implements DatabaseMetaData {
return PRODUCT_VESION;
}
- public String getDriverName() throws SQLException {
- return DRIVER_NAME;
- }
+ public abstract String getDriverName() throws SQLException;
public String getDriverVersion() throws SQLException {
return DRIVER_VERSION;
@@ -92,6 +75,7 @@ public abstract class AbstractDatabaseMetaData implements DatabaseMetaData {
}
public boolean supportsMixedCaseIdentifiers() throws SQLException {
+ //像database、table这些对象的标识符,在存储时是否采用大小写混合的模式
return false;
}
@@ -100,7 +84,7 @@ public abstract class AbstractDatabaseMetaData implements DatabaseMetaData {
}
public boolean storesLowerCaseIdentifiers() throws SQLException {
- return false;
+ return true;
}
public boolean storesMixedCaseIdentifiers() throws SQLException {
@@ -168,10 +152,12 @@ public abstract class AbstractDatabaseMetaData implements DatabaseMetaData {
}
public boolean nullPlusNonNullIsNull() throws SQLException {
+ // null + non-null != null
return false;
}
public boolean supportsConvert() throws SQLException {
+ // 是否支持转换函数convert
return false;
}
@@ -196,7 +182,7 @@ public abstract class AbstractDatabaseMetaData implements DatabaseMetaData {
}
public boolean supportsGroupBy() throws SQLException {
- return false;
+ return true;
}
public boolean supportsGroupByUnrelated() throws SQLException {
@@ -468,7 +454,7 @@ public abstract class AbstractDatabaseMetaData implements DatabaseMetaData {
}
public int getDefaultTransactionIsolation() throws SQLException {
- return 0;
+ return Connection.TRANSACTION_NONE;
}
public boolean supportsTransactions() throws SQLException {
@@ -476,6 +462,8 @@ public abstract class AbstractDatabaseMetaData implements DatabaseMetaData {
}
public boolean supportsTransactionIsolationLevel(int level) throws SQLException {
+ if (level == Connection.TRANSACTION_NONE)
+ return true;
return false;
}
@@ -495,18 +483,113 @@ public abstract class AbstractDatabaseMetaData implements DatabaseMetaData {
return false;
}
- public ResultSet getProcedures(String catalog, String schemaPattern, String procedureNamePattern)
- throws SQLException {
+ public ResultSet getProcedures(String catalog, String schemaPattern, String procedureNamePattern) throws SQLException {
return null;
}
- public ResultSet getProcedureColumns(String catalog, String schemaPattern, String procedureNamePattern,
- String columnNamePattern) throws SQLException {
+ public ResultSet getProcedureColumns(String catalog, String schemaPattern, String procedureNamePattern, String columnNamePattern) throws SQLException {
return null;
}
- public abstract ResultSet getTables(String catalog, String schemaPattern, String tableNamePattern, String[] types)
- throws SQLException;
+ public abstract ResultSet getTables(String catalog, String schemaPattern, String tableNamePattern, String[] types) throws SQLException;
+
+ protected ResultSet getTables(String catalog, String schemaPattern, String tableNamePattern, String[] types, Connection connection) throws SQLException {
+ try (Statement stmt = connection.createStatement()) {
+ if (catalog == null || catalog.isEmpty())
+ return null;
+
+ ResultSet databases = stmt.executeQuery("show databases");
+ String dbname = null;
+ while (databases.next()) {
+ dbname = databases.getString("name");
+ if (dbname.equalsIgnoreCase(catalog))
+ break;
+ }
+ databases.close();
+ if (dbname == null)
+ return null;
+
+ stmt.execute("use " + dbname);
+ DatabaseMetaDataResultSet resultSet = new DatabaseMetaDataResultSet();
+ List columnMetaDataList = new ArrayList<>();
+ ColumnMetaData col1 = new ColumnMetaData();
+ col1.setColIndex(1);
+ col1.setColName("TABLE_CAT");
+ col1.setColType(TSDBConstants.TSDB_DATA_TYPE_NCHAR);
+ columnMetaDataList.add(col1);
+ ColumnMetaData col2 = new ColumnMetaData();
+ col2.setColIndex(2);
+ col2.setColName("TABLE_SCHEM");
+ col2.setColType(TSDBConstants.TSDB_DATA_TYPE_NCHAR);
+ columnMetaDataList.add(col2);
+ ColumnMetaData col3 = new ColumnMetaData();
+ col3.setColIndex(3);
+ col3.setColName("TABLE_NAME");
+ col3.setColType(TSDBConstants.TSDB_DATA_TYPE_NCHAR);
+ columnMetaDataList.add(col3);
+ ColumnMetaData col4 = new ColumnMetaData();
+ col4.setColIndex(4);
+ col4.setColName("TABLE_TYPE");
+ col4.setColType(TSDBConstants.TSDB_DATA_TYPE_NCHAR);
+ columnMetaDataList.add(col4);
+ ColumnMetaData col5 = new ColumnMetaData();
+ col5.setColIndex(5);
+ col5.setColName("REMARKS");
+ col5.setColType(TSDBConstants.TSDB_DATA_TYPE_NCHAR);
+ columnMetaDataList.add(col5);
+ ColumnMetaData col6 = new ColumnMetaData();
+ col6.setColIndex(6);
+ col6.setColName("TYPE_CAT");
+ col6.setColType(TSDBConstants.TSDB_DATA_TYPE_NCHAR);
+ columnMetaDataList.add(col6);
+ ColumnMetaData col7 = new ColumnMetaData();
+ col7.setColIndex(7);
+ col7.setColName("TYPE_SCHEM");
+ col7.setColType(TSDBConstants.TSDB_DATA_TYPE_NCHAR);
+ columnMetaDataList.add(col7);
+ ColumnMetaData col8 = new ColumnMetaData();
+ col8.setColIndex(8);
+ col8.setColName("TYPE_NAME");
+ col8.setColType(TSDBConstants.TSDB_DATA_TYPE_NCHAR);
+ columnMetaDataList.add(col8);
+ ColumnMetaData col9 = new ColumnMetaData();
+ col9.setColIndex(9);
+ col9.setColName("SELF_REFERENCING_COL_NAME");
+ col9.setColType(TSDBConstants.TSDB_DATA_TYPE_NCHAR);
+ columnMetaDataList.add(col9);
+ ColumnMetaData col10 = new ColumnMetaData();
+ col10.setColIndex(10);
+ col10.setColName("REF_GENERATION");
+ col10.setColType(TSDBConstants.TSDB_DATA_TYPE_NCHAR);
+ columnMetaDataList.add(col10);
+ resultSet.setColumnMetaDataList(columnMetaDataList);
+
+ List rowDataList = new ArrayList<>();
+ ResultSet tables = stmt.executeQuery("show tables");
+ while (tables.next()) {
+ TSDBResultSetRowData rowData = new TSDBResultSetRowData(10);
+ rowData.setString(0, dbname); //table_cat
+ rowData.setString(1, null); //TABLE_SCHEM
+ rowData.setString(2, tables.getString("table_name")); //TABLE_NAME
+ rowData.setString(3, "TABLE"); //TABLE_TYPE
+ rowData.setString(4, ""); //REMARKS
+ rowDataList.add(rowData);
+ }
+
+ ResultSet stables = stmt.executeQuery("show stables");
+ while (stables.next()) {
+ TSDBResultSetRowData rowData = new TSDBResultSetRowData(10);
+ rowData.setString(0, dbname); //TABLE_CAT
+ rowData.setString(1, null); //TABLE_SCHEM
+ rowData.setString(2, stables.getString("name")); //TABLE_NAME
+ rowData.setString(3, "TABLE"); //TABLE_TYPE
+ rowData.setString(4, "STABLE"); //REMARKS
+ rowDataList.add(rowData);
+ }
+ resultSet.setRowDataList(rowDataList);
+ return resultSet;
+ }
+ }
public ResultSet getSchemas() throws SQLException {
return getEmptyResultSet();
@@ -516,32 +599,239 @@ public abstract class AbstractDatabaseMetaData implements DatabaseMetaData {
public ResultSet getTableTypes() throws SQLException {
DatabaseMetaDataResultSet resultSet = new DatabaseMetaDataResultSet();
-
// set up ColumnMetaDataList
- List columnMetaDataList = new ArrayList(1);
+ List columnMetaDataList = new ArrayList<>();
ColumnMetaData colMetaData = new ColumnMetaData();
colMetaData.setColIndex(0);
colMetaData.setColName("TABLE_TYPE");
colMetaData.setColSize(10);
- colMetaData.setColType(TSDBConstants.TSDB_DATA_TYPE_BINARY);
+ colMetaData.setColType(TSDBConstants.TSDB_DATA_TYPE_NCHAR);
columnMetaDataList.add(colMetaData);
+ resultSet.setColumnMetaDataList(columnMetaDataList);
// set up rowDataList
- List rowDataList = new ArrayList(2);
- TSDBResultSetRowData rowData = new TSDBResultSetRowData();
+ List rowDataList = new ArrayList<>();
+ TSDBResultSetRowData rowData = new TSDBResultSetRowData(1);
rowData.setString(0, "TABLE");
rowDataList.add(rowData);
- rowData = new TSDBResultSetRowData();
+ rowData = new TSDBResultSetRowData(1);
rowData.setString(0, "STABLE");
rowDataList.add(rowData);
-
- resultSet.setColumnMetaDataList(columnMetaDataList);
resultSet.setRowDataList(rowDataList);
+
return resultSet;
}
public abstract ResultSet getColumns(String catalog, String schemaPattern, String tableNamePattern, String columnNamePattern) throws SQLException;
+ protected ResultSet getColumns(String catalog, String schemaPattern, String tableNamePattern, String columnNamePattern, Connection conn) {
+ try (Statement stmt = conn.createStatement()) {
+ if (catalog == null || catalog.isEmpty())
+ return null;
+
+ ResultSet databases = stmt.executeQuery("show databases");
+ String dbname = null;
+ while (databases.next()) {
+ dbname = databases.getString("name");
+ if (dbname.equalsIgnoreCase(catalog))
+ break;
+ }
+ databases.close();
+ if (dbname == null)
+ return null;
+
+ stmt.execute("use " + dbname);
+ DatabaseMetaDataResultSet resultSet = new DatabaseMetaDataResultSet();
+ // set up ColumnMetaDataList
+
+ List columnMetaDataList = new ArrayList<>();
+ // TABLE_CAT
+ ColumnMetaData col1 = new ColumnMetaData();
+ col1.setColIndex(1);
+ col1.setColName("TABLE_CAT");
+ col1.setColType(TSDBConstants.TSDB_DATA_TYPE_NCHAR);
+ columnMetaDataList.add(col1);
+ // TABLE_SCHEM
+ ColumnMetaData col2 = new ColumnMetaData();
+ col2.setColIndex(2);
+ col2.setColName("TABLE_SCHEM");
+ col2.setColType(TSDBConstants.TSDB_DATA_TYPE_NCHAR);
+ columnMetaDataList.add(col2);
+ // TABLE_NAME
+ ColumnMetaData col3 = new ColumnMetaData();
+ col3.setColIndex(3);
+ col3.setColName("TABLE_NAME");
+ col3.setColSize(193);
+ col3.setColType(TSDBConstants.TSDB_DATA_TYPE_NCHAR);
+ columnMetaDataList.add(col3);
+ // COLUMN_NAME
+ ColumnMetaData col4 = new ColumnMetaData();
+ col4.setColIndex(4);
+ col4.setColName("COLUMN_NAME");
+ col4.setColSize(65);
+ col4.setColType(TSDBConstants.TSDB_DATA_TYPE_NCHAR);
+ columnMetaDataList.add(col4);
+ // DATA_TYPE
+ ColumnMetaData col5 = new ColumnMetaData();
+ col5.setColIndex(5);
+ col5.setColName("DATA_TYPE");
+ col5.setColType(TSDBConstants.TSDB_DATA_TYPE_INT);
+ columnMetaDataList.add(col5);
+ // TYPE_NAME
+ ColumnMetaData col6 = new ColumnMetaData();
+ col6.setColIndex(6);
+ col6.setColName("TYPE_NAME");
+ col6.setColType(TSDBConstants.TSDB_DATA_TYPE_NCHAR);
+ columnMetaDataList.add(col6);
+ // COLUMN_SIZE
+ ColumnMetaData col7 = new ColumnMetaData();
+ col7.setColIndex(7);
+ col7.setColName("COLUMN_SIZE");
+ col7.setColType(TSDBConstants.TSDB_DATA_TYPE_INT);
+ columnMetaDataList.add(col7);
+ // BUFFER_LENGTH, not used
+ ColumnMetaData col8 = new ColumnMetaData();
+ col8.setColIndex(8);
+ col8.setColName("BUFFER_LENGTH");
+ columnMetaDataList.add(col8);
+ // DECIMAL_DIGITS
+ ColumnMetaData col9 = new ColumnMetaData();
+ col9.setColIndex(9);
+ col9.setColName("DECIMAL_DIGITS");
+ col9.setColType(TSDBConstants.TSDB_DATA_TYPE_INT);
+ columnMetaDataList.add(col9);
+ // add NUM_PREC_RADIX
+ ColumnMetaData col10 = new ColumnMetaData();
+ col10.setColIndex(10);
+ col10.setColName("NUM_PREC_RADIX");
+ col10.setColType(TSDBConstants.TSDB_DATA_TYPE_INT);
+ columnMetaDataList.add(col10);
+ // NULLABLE
+ ColumnMetaData col11 = new ColumnMetaData();
+ col11.setColIndex(11);
+ col11.setColName("NULLABLE");
+ col11.setColType(TSDBConstants.TSDB_DATA_TYPE_INT);
+ columnMetaDataList.add(col11);
+ // REMARKS
+ ColumnMetaData col12 = new ColumnMetaData();
+ col12.setColIndex(12);
+ col12.setColName("REMARKS");
+ col12.setColType(TSDBConstants.TSDB_DATA_TYPE_NCHAR);
+ columnMetaDataList.add(col12);
+ // COLUMN_DEF
+ ColumnMetaData col13 = new ColumnMetaData();
+ col13.setColIndex(13);
+ col13.setColName("COLUMN_DEF");
+ col13.setColType(TSDBConstants.TSDB_DATA_TYPE_NCHAR);
+ columnMetaDataList.add(col13);
+ //SQL_DATA_TYPE
+ ColumnMetaData col14 = new ColumnMetaData();
+ col14.setColIndex(14);
+ col14.setColName("SQL_DATA_TYPE");
+ col14.setColType(TSDBConstants.TSDB_DATA_TYPE_INT);
+ columnMetaDataList.add(col14);
+ //SQL_DATETIME_SUB
+ ColumnMetaData col15 = new ColumnMetaData();
+ col15.setColIndex(15);
+ col15.setColName("SQL_DATETIME_SUB");
+ col15.setColType(TSDBConstants.TSDB_DATA_TYPE_INT);
+ columnMetaDataList.add(col15);
+ //CHAR_OCTET_LENGTH
+ ColumnMetaData col16 = new ColumnMetaData();
+ col16.setColIndex(16);
+ col16.setColName("CHAR_OCTET_LENGTH");
+ col16.setColType(TSDBConstants.TSDB_DATA_TYPE_INT);
+ columnMetaDataList.add(col16);
+ //ORDINAL_POSITION
+ ColumnMetaData col17 = new ColumnMetaData();
+ col17.setColIndex(17);
+ col17.setColName("ORDINAL_POSITION");
+ col17.setColType(TSDBConstants.TSDB_DATA_TYPE_INT);
+ columnMetaDataList.add(col17);
+ // IS_NULLABLE
+ ColumnMetaData col18 = new ColumnMetaData();
+ col18.setColIndex(18);
+ col18.setColName("IS_NULLABLE");
+ col18.setColType(TSDBConstants.TSDB_DATA_TYPE_NCHAR);
+ columnMetaDataList.add(col18);
+ //SCOPE_CATALOG
+ ColumnMetaData col19 = new ColumnMetaData();
+ col19.setColIndex(19);
+ col19.setColName("SCOPE_CATALOG");
+ col19.setColType(TSDBConstants.TSDB_DATA_TYPE_NCHAR);
+ columnMetaDataList.add(col19);
+ //SCOPE_SCHEMA
+ ColumnMetaData col20 = new ColumnMetaData();
+ col20.setColIndex(20);
+ col20.setColName("SCOPE_SCHEMA");
+ col20.setColType(TSDBConstants.TSDB_DATA_TYPE_NCHAR);
+ columnMetaDataList.add(col20);
+ //SCOPE_TABLE
+ ColumnMetaData col21 = new ColumnMetaData();
+ col21.setColIndex(21);
+ col21.setColName("SCOPE_TABLE");
+ col21.setColType(TSDBConstants.TSDB_DATA_TYPE_NCHAR);
+ columnMetaDataList.add(col21);
+ //SOURCE_DATA_TYPE
+ ColumnMetaData col22 = new ColumnMetaData();
+ col22.setColIndex(22);
+ col22.setColName("SOURCE_DATA_TYPE");
+ col22.setColType(TSDBConstants.TSDB_DATA_TYPE_SMALLINT);
+ columnMetaDataList.add(col22);
+ //IS_AUTOINCREMENT
+ ColumnMetaData col23 = new ColumnMetaData();
+ col23.setColIndex(23);
+ col23.setColName("IS_AUTOINCREMENT");
+ col23.setColType(TSDBConstants.TSDB_DATA_TYPE_NCHAR);
+ columnMetaDataList.add(col23);
+ //IS_GENERATEDCOLUMN
+ ColumnMetaData col24 = new ColumnMetaData();
+ col24.setColIndex(24);
+ col24.setColName("IS_GENERATEDCOLUMN");
+ col24.setColType(TSDBConstants.TSDB_DATA_TYPE_NCHAR);
+ columnMetaDataList.add(col24);
+
+ resultSet.setColumnMetaDataList(columnMetaDataList);
+ // set up rowDataList
+ ResultSet rs = stmt.executeQuery("describe " + dbname + "." + tableNamePattern);
+ List rowDataList = new ArrayList<>();
+ int index = 0;
+ while (rs.next()) {
+ TSDBResultSetRowData rowData = new TSDBResultSetRowData(24);
+ // set TABLE_CAT
+ rowData.setString(0, dbname);
+ // set TABLE_NAME
+ rowData.setString(2, tableNamePattern);
+ // set COLUMN_NAME
+ rowData.setString(3, rs.getString("Field"));
+ // set DATA_TYPE
+ String typeName = rs.getString("Type");
+ rowData.setInt(4, getDataType(typeName));
+ // set TYPE_NAME
+ rowData.setString(5, typeName);
+ // set COLUMN_SIZE
+ int length = rs.getInt("Length");
+ rowData.setInt(6, getColumnSize(typeName, length));
+ // set DECIMAL_DIGITS
+ rowData.setInt(8, getDecimalDigits(typeName));
+ // set NUM_PREC_RADIX
+ rowData.setInt(9, 10);
+ // set NULLABLE
+ rowData.setInt(10, getNullable(index, typeName));
+ // set REMARKS
+ rowData.setString(11, rs.getString("Note"));
+ rowDataList.add(rowData);
+ index++;
+ }
+ resultSet.setRowDataList(rowDataList);
+ return resultSet;
+
+ } catch (SQLException e) {
+ e.printStackTrace();
+ }
+ return null;
+ }
+
protected int getNullable(int index, String typeName) {
if (index == 0 && "TIMESTAMP".equals(typeName))
return DatabaseMetaData.columnNoNulls;
@@ -552,7 +842,6 @@ public abstract class AbstractDatabaseMetaData implements DatabaseMetaData {
switch (typeName) {
case "TIMESTAMP":
return 23;
-
default:
return 0;
}
@@ -615,9 +904,7 @@ public abstract class AbstractDatabaseMetaData implements DatabaseMetaData {
return getEmptyResultSet();
}
- public ResultSet getPrimaryKeys(String catalog, String schema, String table) throws SQLException {
- return getEmptyResultSet();
- }
+ public abstract ResultSet getPrimaryKeys(String catalog, String schema, String table) throws SQLException;
public ResultSet getImportedKeys(String catalog, String schema, String table) throws SQLException {
return getEmptyResultSet();
@@ -718,9 +1005,7 @@ public abstract class AbstractDatabaseMetaData implements DatabaseMetaData {
return getEmptyResultSet();
}
- public ResultSet getSuperTables(String catalog, String schemaPattern, String tableNamePattern) throws SQLException {
- return getEmptyResultSet();
- }
+ public abstract ResultSet getSuperTables(String catalog, String schemaPattern, String tableNamePattern) throws SQLException;
public ResultSet getAttributes(String catalog, String schemaPattern, String typeNamePattern,
String attributeNamePattern) throws SQLException {
@@ -728,15 +1013,17 @@ public abstract class AbstractDatabaseMetaData implements DatabaseMetaData {
}
public boolean supportsResultSetHoldability(int holdability) throws SQLException {
+ if (holdability == ResultSet.HOLD_CURSORS_OVER_COMMIT)
+ return true;
return false;
}
public int getResultSetHoldability() throws SQLException {
- return 0;
+ return ResultSet.HOLD_CURSORS_OVER_COMMIT;
}
public int getDatabaseMajorVersion() throws SQLException {
- return 0;
+ return 2;
}
public int getDatabaseMinorVersion() throws SQLException {
@@ -744,7 +1031,7 @@ public abstract class AbstractDatabaseMetaData implements DatabaseMetaData {
}
public int getJDBCMajorVersion() throws SQLException {
- return 0;
+ return 2;
}
public int getJDBCMinorVersion() throws SQLException {
@@ -768,7 +1055,7 @@ public abstract class AbstractDatabaseMetaData implements DatabaseMetaData {
}
public ResultSet getSchemas(String catalog, String schemaPattern) throws SQLException {
- return null;
+ return getEmptyResultSet();
}
public boolean supportsStoredFunctionsUsingCallSyntax() throws SQLException {
@@ -805,4 +1092,180 @@ public abstract class AbstractDatabaseMetaData implements DatabaseMetaData {
private ResultSet getEmptyResultSet() {
return new EmptyResultSet();
}
+
+ @Override
+ public T unwrap(Class iface) throws SQLException {
+ try {
+ return iface.cast(this);
+ } catch (ClassCastException cce) {
+ throw new SQLException("Unable to unwrap to " + iface.toString());
+ }
+ }
+
+ @Override
+ public boolean isWrapperFor(Class> iface) throws SQLException {
+ return iface.isInstance(this);
+ }
+
+ protected ResultSet getCatalogs(Connection conn) throws SQLException {
+ try (Statement stmt = conn.createStatement()) {
+ DatabaseMetaDataResultSet resultSet = new DatabaseMetaDataResultSet();
+ // set up ColumnMetaDataList
+ List columnMetaDataList = new ArrayList<>();
+ // TABLE_CAT
+ ColumnMetaData col1 = new ColumnMetaData();
+ col1.setColIndex(1);
+ col1.setColName("TABLE_CAT");
+ col1.setColType(TSDBConstants.TSDB_DATA_TYPE_NCHAR);
+ columnMetaDataList.add(col1);
+
+ resultSet.setColumnMetaDataList(columnMetaDataList);
+
+ List rowDataList = new ArrayList<>();
+ ResultSet rs = stmt.executeQuery("show databases");
+ while (rs.next()) {
+ TSDBResultSetRowData rowData = new TSDBResultSetRowData(1);
+ rowData.setString(0, rs.getString("name"));
+ rowDataList.add(rowData);
+ }
+ resultSet.setRowDataList(rowDataList);
+ return resultSet;
+ }
+ }
+
+
+ protected ResultSet getPrimaryKeys(String catalog, String schema, String table, Connection conn) throws SQLException {
+ try (Statement stmt = conn.createStatement()) {
+ if (catalog == null || catalog.isEmpty())
+ return null;
+
+ ResultSet databases = stmt.executeQuery("show databases");
+ String dbname = null;
+ while (databases.next()) {
+ dbname = databases.getString("name");
+ if (dbname.equalsIgnoreCase(catalog))
+ break;
+ }
+ databases.close();
+ if (dbname == null)
+ return null;
+
+ stmt.execute("use " + dbname);
+ DatabaseMetaDataResultSet resultSet = new DatabaseMetaDataResultSet();
+ // set up ColumnMetaDataList
+ List columnMetaDataList = new ArrayList<>();
+ // TABLE_CAT
+ ColumnMetaData col1 = new ColumnMetaData();
+ col1.setColIndex(0);
+ col1.setColName("TABLE_CAT");
+ col1.setColType(TSDBConstants.TSDB_DATA_TYPE_NCHAR);
+ columnMetaDataList.add(col1);
+ // TABLE_SCHEM
+ ColumnMetaData col2 = new ColumnMetaData();
+ col2.setColIndex(1);
+ col2.setColName("TABLE_SCHEM");
+ col2.setColType(TSDBConstants.TSDB_DATA_TYPE_NCHAR);
+ columnMetaDataList.add(col2);
+ // TABLE_NAME
+ ColumnMetaData col3 = new ColumnMetaData();
+ col3.setColIndex(2);
+ col3.setColName("TABLE_NAME");
+ col3.setColType(TSDBConstants.TSDB_DATA_TYPE_NCHAR);
+ columnMetaDataList.add(col3);
+ // COLUMN_NAME
+ ColumnMetaData col4 = new ColumnMetaData();
+ col4.setColIndex(3);
+ col4.setColName("COLUMN_NAME");
+ col4.setColType(TSDBConstants.TSDB_DATA_TYPE_NCHAR);
+ columnMetaDataList.add(col4);
+ // KEY_SEQ
+ ColumnMetaData col5 = new ColumnMetaData();
+ col5.setColIndex(4);
+ col5.setColName("KEY_SEQ");
+ col5.setColType(TSDBConstants.TSDB_DATA_TYPE_INT);
+ columnMetaDataList.add(col5);
+ // PK_NAME
+ ColumnMetaData col6 = new ColumnMetaData();
+ col6.setColIndex(5);
+ col6.setColName("PK_NAME");
+ col6.setColType(TSDBConstants.TSDB_DATA_TYPE_NCHAR);
+ columnMetaDataList.add(col6);
+ resultSet.setColumnMetaDataList(columnMetaDataList);
+
+ // set rowData
+ List rowDataList = new ArrayList<>();
+ ResultSet rs = stmt.executeQuery("describe " + dbname + "." + table);
+ rs.next();
+ TSDBResultSetRowData rowData = new TSDBResultSetRowData(6);
+ rowData.setString(0, null);
+ rowData.setString(1, null);
+ rowData.setString(2, table);
+ String pkName = rs.getString(1);
+ rowData.setString(3, pkName);
+ rowData.setInt(4, 1);
+ rowData.setString(5, pkName);
+ rowDataList.add(rowData);
+ resultSet.setRowDataList(rowDataList);
+ return resultSet;
+ }
+ }
+
+ protected ResultSet getSuperTables(String catalog, String schemaPattern, String tableNamePattern, Connection conn) throws SQLException {
+ try (Statement stmt = conn.createStatement()) {
+ if (catalog == null || catalog.isEmpty())
+ return null;
+
+ ResultSet databases = stmt.executeQuery("show databases");
+ String dbname = null;
+ while (databases.next()) {
+ dbname = databases.getString("name");
+ if (dbname.equalsIgnoreCase(catalog))
+ break;
+ }
+ databases.close();
+ if (dbname == null)
+ return null;
+
+ stmt.execute("use " + dbname);
+ DatabaseMetaDataResultSet resultSet = new DatabaseMetaDataResultSet();
+ // set up ColumnMetaDataList
+ List columnMetaDataList = new ArrayList<>();
+ // TABLE_CAT
+ ColumnMetaData col1 = new ColumnMetaData();
+ col1.setColIndex(0);
+ col1.setColName("TABLE_CAT");
+ col1.setColType(TSDBConstants.TSDB_DATA_TYPE_NCHAR);
+ columnMetaDataList.add(col1);
+ // TABLE_SCHEM
+ ColumnMetaData col2 = new ColumnMetaData();
+ col2.setColIndex(1);
+ col2.setColName("TABLE_SCHEM");
+ col2.setColType(TSDBConstants.TSDB_DATA_TYPE_NCHAR);
+ columnMetaDataList.add(col2);
+ // TABLE_NAME
+ ColumnMetaData col3 = new ColumnMetaData();
+ col3.setColIndex(2);
+ col3.setColName("TABLE_NAME");
+ col3.setColType(TSDBConstants.TSDB_DATA_TYPE_NCHAR);
+ columnMetaDataList.add(col3);
+ // SUPERTABLE_NAME
+ ColumnMetaData col4 = new ColumnMetaData();
+ col4.setColIndex(3);
+ col4.setColName("SUPERTABLE_NAME");
+ col4.setColType(TSDBConstants.TSDB_DATA_TYPE_NCHAR);
+ columnMetaDataList.add(col4);
+ resultSet.setColumnMetaDataList(columnMetaDataList);
+
+ ResultSet rs = stmt.executeQuery("show tables like '" + tableNamePattern + "'");
+ List rowDataList = new ArrayList<>();
+ while (rs.next()) {
+ TSDBResultSetRowData rowData = new TSDBResultSetRowData(4);
+ rowData.setString(2, rs.getString(1));
+ rowData.setString(3, rs.getString(4));
+ rowDataList.add(rowData);
+ }
+ resultSet.setRowDataList(rowDataList);
+ return resultSet;
+ }
+ }
}
\ No newline at end of file
diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/CatalogResultSet.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/CatalogResultSet.java
deleted file mode 100644
index 3a01e2e09297d6af1405c63200e2ba91b3fa99a2..0000000000000000000000000000000000000000
--- a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/CatalogResultSet.java
+++ /dev/null
@@ -1,68 +0,0 @@
-/***************************************************************************
- * Copyright (c) 2019 TAOS Data, Inc.
- *
- * This program is free software: you can use, redistribute, and/or modify
- * it under the terms of the GNU Affero General Public License, version 3
- * or later ("AGPL"), as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.
- *
- * You should have received a copy of the GNU Affero General Public License
- * along with this program. If not, see .
- *****************************************************************************/
-package com.taosdata.jdbc;
-
-import java.sql.ResultSet;
-import java.sql.SQLException;
-
-/*
- * TDengine only supports a subset of the standard SQL, thus this implemetation of the
- * standard JDBC API contains more or less some adjustments customized for certain
- * compatibility needs.
- */
-public class CatalogResultSet extends TSDBResultSetWrapper {
-
-
- public CatalogResultSet(ResultSet resultSet) {
- super.setOriginalResultSet(resultSet);
- }
-
- @Override
- public String getString(int columnIndex) throws SQLException {
- if (columnIndex <= 1) {
- return super.getString(columnIndex);
- } else {
- return null;
- }
- }
-
- @Override
- public boolean getBoolean(int columnIndex) throws SQLException {
- if (columnIndex <= 1) {
- return super.getBoolean(columnIndex);
- } else {
- return false;
- }
- }
-
- @Override
- public byte[] getBytes(int columnIndex) throws SQLException {
- if (columnIndex <= 1) {
- return super.getBytes(columnIndex);
- } else {
- return null;
- }
- }
-
- @Override
- public Object getObject(int columnIndex) throws SQLException {
- if (columnIndex <= 1) {
- return super.getObject(columnIndex);
- } else {
- return null;
- }
- }
-
-}
diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/ColumnMetaData.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/ColumnMetaData.java
index 633fdcd5ab7c9f077abbd725c2511bcc2251db44..fe16aa653546f4033bd944f0b6c72aec1863ab8a 100644
--- a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/ColumnMetaData.java
+++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/ColumnMetaData.java
@@ -16,40 +16,40 @@ package com.taosdata.jdbc;
public class ColumnMetaData {
- private int colType = 0;
- private String colName = null;
- private int colSize = -1;
- private int colIndex = 0;
-
- public int getColSize() {
- return colSize;
- }
-
- public void setColSize(int colSize) {
- this.colSize = colSize;
- }
-
- public int getColType() {
- return colType;
- }
-
- public void setColType(int colType) {
- this.colType = colType;
- }
-
- public String getColName() {
- return colName;
- }
-
- public void setColName(String colName) {
- this.colName = colName;
- }
-
- public int getColIndex() {
- return colIndex;
- }
-
- public void setColIndex(int colIndex) {
- this.colIndex = colIndex;
- }
+ private int colType = 0;
+ private String colName = null;
+ private int colSize = -1;
+ private int colIndex = 0;
+
+ public int getColSize() {
+ return colSize;
+ }
+
+ public void setColSize(int colSize) {
+ this.colSize = colSize;
+ }
+
+ public int getColType() {
+ return colType;
+ }
+
+ public void setColType(int colType) {
+ this.colType = colType;
+ }
+
+ public String getColName() {
+ return colName;
+ }
+
+ public void setColName(String colName) {
+ this.colName = colName;
+ }
+
+ public int getColIndex() {
+ return colIndex;
+ }
+
+ public void setColIndex(int colIndex) {
+ this.colIndex = colIndex;
+ }
}
diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/GetColumnsResultSet.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/GetColumnsResultSet.java
deleted file mode 100644
index e15415e037948dd7ec757bcdeee03d14a0d588fb..0000000000000000000000000000000000000000
--- a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/GetColumnsResultSet.java
+++ /dev/null
@@ -1,51 +0,0 @@
-/***************************************************************************
- * Copyright (c) 2019 TAOS Data, Inc.
- *
- * This program is free software: you can use, redistribute, and/or modify
- * it under the terms of the GNU Affero General Public License, version 3
- * or later ("AGPL"), as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.
- *
- * You should have received a copy of the GNU Affero General Public License
- * along with this program. If not, see .
- *****************************************************************************/
-package com.taosdata.jdbc;
-
-import java.sql.ResultSet;
-
-/*
- * TDengine only supports a subset of the standard SQL, thus this implemetation of the
- * standard JDBC API contains more or less some adjustments customized for certain
- * compatibility needs.
- */
-public class GetColumnsResultSet extends TSDBResultSetWrapper {
- private String catalog;
- private String schemaPattern;
- private String tableNamePattern;
- private String columnNamePattern;
-
- public GetColumnsResultSet(ResultSet resultSet, String catalog, String schemaPattern, String tableNamePattern, String columnNamePattern) {
- super.setOriginalResultSet(resultSet);
- this.catalog = catalog;
- this.schemaPattern = schemaPattern;
- this.tableNamePattern = tableNamePattern;
- this.columnNamePattern = columnNamePattern;
- }
-
- @Override
- public String getString(int columnIndex) {
- switch (columnIndex) {
- case 1:
- return catalog;
- case 2:
- return null;
- case 3:
- return tableNamePattern;
- default:
- return null;
- }
- }
-}
diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/GetTablesResultSet.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/GetTablesResultSet.java
deleted file mode 100644
index e28f6e3c9adf8564437e1214b28b2eb13bdaf8d9..0000000000000000000000000000000000000000
--- a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/GetTablesResultSet.java
+++ /dev/null
@@ -1,53 +0,0 @@
-/***************************************************************************
- * Copyright (c) 2019 TAOS Data, Inc.
- *
- * This program is free software: you can use, redistribute, and/or modify
- * it under the terms of the GNU Affero General Public License, version 3
- * or later ("AGPL"), as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.
- *
- * You should have received a copy of the GNU Affero General Public License
- * along with this program. If not, see .
- *****************************************************************************/
-package com.taosdata.jdbc;
-
-import java.sql.ResultSet;
-import java.sql.SQLException;
-
-/*
- * TDengine only supports a subset of the standard SQL, thus this implemetation of the
- * standard JDBC API contains more or less some adjustments customized for certain
- * compatibility needs.
- */
-public class GetTablesResultSet extends TSDBResultSetWrapper {
-
- private String catalog;
- private String schemaPattern;
- private String tableNamePattern;
- private String[] types;
-
- public GetTablesResultSet(ResultSet resultSet, String catalog, String schemaPattern, String tableNamePattern, String[] types) {
- super.setOriginalResultSet(resultSet);
- this.catalog = catalog;
- this.schemaPattern = schemaPattern;
- this.tableNamePattern = tableNamePattern;
- this.types = types;
- }
-
- @Override
- public String getString(int columnIndex) throws SQLException {
- String ret = null;
- switch (columnIndex) {
- case 3:
- return super.getString(1);
- case 4:
- return "table";
- default:
- return null;
- }
- }
-
-}
diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBDatabaseMetaData.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBDatabaseMetaData.java
index 4ea0fc79507dc1c0f5a963fba0e055491561a6b4..d1f1e77b1c1e325e04c018a23d5589b7501f4919 100644
--- a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBDatabaseMetaData.java
+++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBDatabaseMetaData.java
@@ -14,11 +14,11 @@
*****************************************************************************/
package com.taosdata.jdbc;
-import java.sql.*;
-import java.util.ArrayList;
-import java.util.List;
+import java.sql.Connection;
+import java.sql.ResultSet;
+import java.sql.SQLException;
-public class TSDBDatabaseMetaData implements java.sql.DatabaseMetaData {
+public class TSDBDatabaseMetaData extends AbstractDatabaseMetaData {
private String url;
private String userName;
@@ -29,29 +29,12 @@ public class TSDBDatabaseMetaData implements java.sql.DatabaseMetaData {
this.userName = userName;
}
- public void setConnection(Connection conn) {
- this.conn = conn;
- }
-
- @Override
- public T unwrap(Class iface) throws SQLException {
- try {
- return iface.cast(this);
- } catch (ClassCastException cce) {
- throw new SQLException("Unable to unwrap to " + iface.toString());
- }
- }
-
- public boolean isWrapperFor(Class> iface) throws SQLException {
- return iface.isInstance(this);
- }
-
- public boolean allProceduresAreCallable() throws SQLException {
- return false;
+ public Connection getConnection() throws SQLException {
+ return this.conn;
}
- public boolean allTablesAreSelectable() throws SQLException {
- return false;
+ public void setConnection(Connection conn) {
+ this.conn = conn;
}
public String getURL() throws SQLException {
@@ -62,911 +45,52 @@ public class TSDBDatabaseMetaData implements java.sql.DatabaseMetaData {
return this.userName;
}
- public boolean isReadOnly() throws SQLException {
- return false;
- }
-
- public boolean nullsAreSortedHigh() throws SQLException {
- return false;
- }
-
- public boolean nullsAreSortedLow() throws SQLException {
- return !nullsAreSortedHigh();
- }
-
- public boolean nullsAreSortedAtStart() throws SQLException {
- return true;
- }
-
- public boolean nullsAreSortedAtEnd() throws SQLException {
- return !nullsAreSortedAtStart();
- }
-
- public String getDatabaseProductName() throws SQLException {
- return "TDengine";
- }
-
- public String getDatabaseProductVersion() throws SQLException {
- return "2.0.x.x";
- }
-
public String getDriverName() throws SQLException {
return TSDBDriver.class.getName();
}
- public String getDriverVersion() throws SQLException {
- return "2.0.x";
- }
-
- public int getDriverMajorVersion() {
- return 2;
- }
-
- public int getDriverMinorVersion() {
- return 0;
- }
-
- public boolean usesLocalFiles() throws SQLException {
- return false;
- }
-
- public boolean usesLocalFilePerTable() throws SQLException {
- return false;
- }
-
-
- public boolean supportsMixedCaseIdentifiers() throws SQLException {
- //像database、table这些对象的标识符,在存储时是否采用大小写混合的模式
- return false;
- }
-
- public boolean storesUpperCaseIdentifiers() throws SQLException {
- return false;
- }
-
- public boolean storesLowerCaseIdentifiers() throws SQLException {
- return true;
- }
-
- public boolean storesMixedCaseIdentifiers() throws SQLException {
- return false;
- }
-
- public boolean supportsMixedCaseQuotedIdentifiers() throws SQLException {
- //像database、table这些对象的标识符,在存储时是否采用大小写混合、并带引号的模式
- return false;
- }
-
- public boolean storesUpperCaseQuotedIdentifiers() throws SQLException {
- return false;
- }
-
- public boolean storesLowerCaseQuotedIdentifiers() throws SQLException {
- return false;
- }
-
- public boolean storesMixedCaseQuotedIdentifiers() throws SQLException {
- return false;
- }
-
- public String getIdentifierQuoteString() throws SQLException {
- return " ";
- }
-
- public String getSQLKeywords() throws SQLException {
- return null;
- }
-
- public String getNumericFunctions() throws SQLException {
- return null;
- }
-
- public String getStringFunctions() throws SQLException {
- return null;
- }
-
- public String getSystemFunctions() throws SQLException {
- return null;
- }
-
- public String getTimeDateFunctions() throws SQLException {
- return null;
- }
-
- public String getSearchStringEscape() throws SQLException {
- return null;
- }
-
- public String getExtraNameCharacters() throws SQLException {
- return null;
- }
-
- public boolean supportsAlterTableWithAddColumn() throws SQLException {
- return true;
- }
-
- public boolean supportsAlterTableWithDropColumn() throws SQLException {
- return true;
- }
-
- public boolean supportsColumnAliasing() throws SQLException {
- return true;
- }
-
- public boolean nullPlusNonNullIsNull() throws SQLException {
- // null + non-null != null
- return false;
- }
-
- public boolean supportsConvert() throws SQLException {
- // 是否支持转换函数convert
- return false;
- }
-
- public boolean supportsConvert(int fromType, int toType) throws SQLException {
- return false;
- }
-
- public boolean supportsTableCorrelationNames() throws SQLException {
- return false;
- }
-
- public boolean supportsDifferentTableCorrelationNames() throws SQLException {
- return false;
- }
-
- public boolean supportsExpressionsInOrderBy() throws SQLException {
- return false;
- }
-
- public boolean supportsOrderByUnrelated() throws SQLException {
- return false;
- }
-
- public boolean supportsGroupBy() throws SQLException {
- return true;
- }
-
- public boolean supportsGroupByUnrelated() throws SQLException {
- return false;
- }
-
- public boolean supportsGroupByBeyondSelect() throws SQLException {
- return false;
- }
-
- public boolean supportsLikeEscapeClause() throws SQLException {
- return false;
- }
-
- public boolean supportsMultipleResultSets() throws SQLException {
- return false;
- }
-
- public boolean supportsMultipleTransactions() throws SQLException {
- return false;
- }
-
- public boolean supportsNonNullableColumns() throws SQLException {
- return false;
- }
-
- public boolean supportsMinimumSQLGrammar() throws SQLException {
- return false;
- }
-
- public boolean supportsCoreSQLGrammar() throws SQLException {
- return false;
- }
-
- public boolean supportsExtendedSQLGrammar() throws SQLException {
- return false;
- }
-
- public boolean supportsANSI92EntryLevelSQL() throws SQLException {
- return false;
- }
-
- public boolean supportsANSI92IntermediateSQL() throws SQLException {
- return false;
- }
-
- public boolean supportsANSI92FullSQL() throws SQLException {
- return false;
- }
-
- public boolean supportsIntegrityEnhancementFacility() throws SQLException {
- return false;
- }
-
- public boolean supportsOuterJoins() throws SQLException {
- return false;
- }
-
- public boolean supportsFullOuterJoins() throws SQLException {
- return false;
- }
-
- public boolean supportsLimitedOuterJoins() throws SQLException {
- return false;
- }
-
- public String getSchemaTerm() throws SQLException {
- return null;
- }
-
- public String getProcedureTerm() throws SQLException {
- return null;
- }
-
- public String getCatalogTerm() throws SQLException {
- return "database";
- }
-
- public boolean isCatalogAtStart() throws SQLException {
- return true;
- }
-
- public String getCatalogSeparator() throws SQLException {
- return ".";
- }
-
- public boolean supportsSchemasInDataManipulation() throws SQLException {
- return false;
- }
-
- public boolean supportsSchemasInProcedureCalls() throws SQLException {
- return false;
- }
-
- public boolean supportsSchemasInTableDefinitions() throws SQLException {
- return false;
- }
-
- public boolean supportsSchemasInIndexDefinitions() throws SQLException {
- return false;
- }
-
- public boolean supportsSchemasInPrivilegeDefinitions() throws SQLException {
- return false;
- }
-
- public boolean supportsCatalogsInDataManipulation() throws SQLException {
- return true;
- }
-
- public boolean supportsCatalogsInProcedureCalls() throws SQLException {
- return false;
- }
-
- public boolean supportsCatalogsInTableDefinitions() throws SQLException {
- return false;
- }
-
- public boolean supportsCatalogsInIndexDefinitions() throws SQLException {
- return false;
- }
-
- public boolean supportsCatalogsInPrivilegeDefinitions() throws SQLException {
- return false;
- }
-
- public boolean supportsPositionedDelete() throws SQLException {
- return false;
- }
-
- public boolean supportsPositionedUpdate() throws SQLException {
- return false;
- }
-
- public boolean supportsSelectForUpdate() throws SQLException {
- return false;
- }
-
- public boolean supportsStoredProcedures() throws SQLException {
- return false;
- }
-
- public boolean supportsSubqueriesInComparisons() throws SQLException {
- return false;
- }
-
- public boolean supportsSubqueriesInExists() throws SQLException {
- return false;
- }
-
- public boolean supportsSubqueriesInIns() throws SQLException {
- return false;
- }
-
- public boolean supportsSubqueriesInQuantifieds() throws SQLException {
- return false;
- }
-
- public boolean supportsCorrelatedSubqueries() throws SQLException {
- return false;
- }
-
- public boolean supportsUnion() throws SQLException {
- return false;
- }
-
- public boolean supportsUnionAll() throws SQLException {
- return false;
- }
-
- public boolean supportsOpenCursorsAcrossCommit() throws SQLException {
- return false;
- }
-
- public boolean supportsOpenCursorsAcrossRollback() throws SQLException {
- return false;
- }
-
- public boolean supportsOpenStatementsAcrossCommit() throws SQLException {
- return false;
- }
-
- public boolean supportsOpenStatementsAcrossRollback() throws SQLException {
- return false;
- }
-
- public int getMaxBinaryLiteralLength() throws SQLException {
- return 0;
- }
-
- public int getMaxCharLiteralLength() throws SQLException {
- return 0;
- }
-
- public int getMaxColumnNameLength() throws SQLException {
- return 0;
- }
-
- public int getMaxColumnsInGroupBy() throws SQLException {
- return 0;
- }
-
- public int getMaxColumnsInIndex() throws SQLException {
- return 0;
- }
-
- public int getMaxColumnsInOrderBy() throws SQLException {
- return 0;
- }
-
- public int getMaxColumnsInSelect() throws SQLException {
- return 0;
- }
-
- public int getMaxColumnsInTable() throws SQLException {
- return 0;
- }
-
- public int getMaxConnections() throws SQLException {
- return 0;
- }
-
- public int getMaxCursorNameLength() throws SQLException {
- return 0;
- }
-
- public int getMaxIndexLength() throws SQLException {
- return 0;
- }
-
- public int getMaxSchemaNameLength() throws SQLException {
- return 0;
- }
-
- public int getMaxProcedureNameLength() throws SQLException {
- return 0;
- }
-
- public int getMaxCatalogNameLength() throws SQLException {
- return 0;
- }
-
- public int getMaxRowSize() throws SQLException {
- return 0;
- }
-
- public boolean doesMaxRowSizeIncludeBlobs() throws SQLException {
- return false;
- }
-
- public int getMaxStatementLength() throws SQLException {
- return 0;
- }
-
- public int getMaxStatements() throws SQLException {
- return 0;
- }
-
- public int getMaxTableNameLength() throws SQLException {
- return 0;
- }
-
- public int getMaxTablesInSelect() throws SQLException {
- return 0;
- }
-
- public int getMaxUserNameLength() throws SQLException {
- return 0;
- }
-
- public int getDefaultTransactionIsolation() throws SQLException {
- return Connection.TRANSACTION_NONE;
- }
-
- public boolean supportsTransactions() throws SQLException {
- return false;
- }
-
- public boolean supportsTransactionIsolationLevel(int level) throws SQLException {
- if (level == Connection.TRANSACTION_NONE)
- return true;
- return false;
- }
-
- public boolean supportsDataDefinitionAndDataManipulationTransactions() throws SQLException {
- return false;
- }
-
- public boolean supportsDataManipulationTransactionsOnly() throws SQLException {
- return false;
- }
-
- public boolean dataDefinitionCausesTransactionCommit() throws SQLException {
- return false;
- }
-
- public boolean dataDefinitionIgnoredInTransactions() throws SQLException {
- return false;
- }
-
- public ResultSet getProcedures(String catalog, String schemaPattern, String procedureNamePattern)
- throws SQLException {
- return null;
- }
-
- public ResultSet getProcedureColumns(String catalog, String schemaPattern, String procedureNamePattern,
- String columnNamePattern) throws SQLException {
- return null;
- }
-
+ /**
+ * @Param catalog : database名称,"" 表示不属于任何database的table,null表示不使用database来缩小范围
+ * @Param schemaPattern : schema名称,""表示
+ * @Param tableNamePattern : 表名满足tableNamePattern的表, null表示返回所有表
+ * @Param types : 表类型,null表示返回所有类型
+ */
public ResultSet getTables(String catalog, String schemaPattern, String tableNamePattern, String[] types) throws SQLException {
if (conn == null || conn.isClosed()) {
throw new SQLException(TSDBConstants.FixErrMsg(TSDBConstants.JNI_CONNECTION_NULL));
}
-
- try (Statement stmt = conn.createStatement()) {
- if (catalog == null || catalog.isEmpty())
- return null;
-
- stmt.executeUpdate("use " + catalog);
- ResultSet resultSet0 = stmt.executeQuery("show tables");
- GetTablesResultSet getTablesResultSet = new GetTablesResultSet(resultSet0, catalog, schemaPattern, tableNamePattern, types);
- return getTablesResultSet;
- }
+ return super.getTables(catalog, schemaPattern, tableNamePattern, types, conn);
}
- public ResultSet getSchemas() throws SQLException {
- return getEmptyResultSet();
- }
public ResultSet getCatalogs() throws SQLException {
if (conn == null || conn.isClosed())
throw new SQLException(TSDBConstants.FixErrMsg(TSDBConstants.JNI_CONNECTION_NULL));
-
- try (Statement stmt = conn.createStatement()) {
- ResultSet rs = stmt.executeQuery("show databases");
- return new CatalogResultSet(rs);
- }
+ return super.getCatalogs(conn);
}
public ResultSet getTableTypes() throws SQLException {
- DatabaseMetaDataResultSet resultSet = new DatabaseMetaDataResultSet();
-
- // set up ColumnMetaDataList
- List columnMetaDataList = new ArrayList<>(1);
- ColumnMetaData colMetaData = new ColumnMetaData();
- colMetaData.setColIndex(0);
- colMetaData.setColName("TABLE_TYPE");
- colMetaData.setColSize(10);
- colMetaData.setColType(TSDBConstants.TSDB_DATA_TYPE_BINARY);
- columnMetaDataList.add(colMetaData);
-
- // set up rowDataList
- List rowDataList = new ArrayList<>(2);
- TSDBResultSetRowData rowData = new TSDBResultSetRowData();
- rowData.setString(0, "TABLE");
- rowDataList.add(rowData);
- rowData = new TSDBResultSetRowData();
- rowData.setString(0, "STABLE");
- rowDataList.add(rowData);
-
- resultSet.setColumnMetaDataList(columnMetaDataList);
- resultSet.setRowDataList(rowDataList);
- return resultSet;
- }
-
- public ResultSet getColumns(String catalog, String schemaPattern, String tableNamePattern, String columnNamePattern)
- throws SQLException {
-
- /** add by zyyang **********/
- Statement stmt = null;
- if (null != conn && !conn.isClosed()) {
- stmt = conn.createStatement();
- if (catalog == null || catalog.isEmpty())
- return null;
-
- stmt.executeUpdate("use " + catalog);
- DatabaseMetaDataResultSet resultSet = new DatabaseMetaDataResultSet();
- // set up ColumnMetaDataList
- List columnMetaDataList = new ArrayList<>(24);
- columnMetaDataList.add(null);
- columnMetaDataList.add(null);
- // add TABLE_NAME
- ColumnMetaData colMetaData = new ColumnMetaData();
- colMetaData.setColIndex(3);
- colMetaData.setColName("TABLE_NAME");
- colMetaData.setColSize(193);
- colMetaData.setColType(TSDBConstants.TSDB_DATA_TYPE_BINARY);
- columnMetaDataList.add(colMetaData);
- // add COLUMN_NAME
- colMetaData = new ColumnMetaData();
- colMetaData.setColIndex(4);
- colMetaData.setColName("COLUMN_NAME");
- colMetaData.setColSize(65);
- colMetaData.setColType(TSDBConstants.TSDB_DATA_TYPE_BINARY);
- columnMetaDataList.add(colMetaData);
- // add DATA_TYPE
- colMetaData = new ColumnMetaData();
- colMetaData.setColIndex(5);
- colMetaData.setColName("DATA_TYPE");
- colMetaData.setColType(TSDBConstants.TSDB_DATA_TYPE_INT);
- columnMetaDataList.add(colMetaData);
- // add TYPE_NAME
- colMetaData = new ColumnMetaData();
- colMetaData.setColIndex(6);
- colMetaData.setColName("TYPE_NAME");
- colMetaData.setColType(TSDBConstants.TSDB_DATA_TYPE_BINARY);
- columnMetaDataList.add(colMetaData);
- // add COLUMN_SIZE
- colMetaData = new ColumnMetaData();
- colMetaData.setColIndex(7);
- colMetaData.setColName("COLUMN_SIZE");
- colMetaData.setColType(TSDBConstants.TSDB_DATA_TYPE_INT);
- columnMetaDataList.add(colMetaData);
- // add BUFFER_LENGTH ,not used
- columnMetaDataList.add(null);
- // add DECIMAL_DIGITS
- colMetaData = new ColumnMetaData();
- colMetaData.setColIndex(9);
- colMetaData.setColName("DECIMAL_DIGITS");
- colMetaData.setColType(TSDBConstants.TSDB_DATA_TYPE_INT);
- columnMetaDataList.add(colMetaData);
- // add NUM_PREC_RADIX
- colMetaData = new ColumnMetaData();
- colMetaData.setColIndex(10);
- colMetaData.setColName("NUM_PREC_RADIX");
- colMetaData.setColType(TSDBConstants.TSDB_DATA_TYPE_INT);
- columnMetaDataList.add(colMetaData);
- // add NULLABLE
- colMetaData = new ColumnMetaData();
- colMetaData.setColIndex(11);
- colMetaData.setColName("NULLABLE");
- colMetaData.setColType(TSDBConstants.TSDB_DATA_TYPE_INT);
- columnMetaDataList.add(colMetaData);
-
- resultSet.setColumnMetaDataList(columnMetaDataList);
-
- // set up rowDataList
- ResultSet resultSet0 = stmt.executeQuery("describe " + tableNamePattern);
- List rowDataList = new ArrayList<>();
- int index = 0;
- while (resultSet0.next()) {
- TSDBResultSetRowData rowData = new TSDBResultSetRowData(24);
- // set TABLE_NAME
- rowData.setString(2, tableNamePattern);
- // set COLUMN_NAME
- rowData.setString(3, resultSet0.getString(1));
- // set DATA_TYPE
- String typeName = resultSet0.getString(2);
- rowData.setInt(4, getDataType(typeName));
- // set TYPE_NAME
- rowData.setString(5, typeName);
- // set COLUMN_SIZE
- int length = resultSet0.getInt(3);
- rowData.setInt(6, getColumnSize(typeName, length));
- // set DECIMAL_DIGITS
- rowData.setInt(8, getDecimalDigits(typeName));
- // set NUM_PREC_RADIX
- rowData.setInt(9, 10);
- // set NULLABLE
- rowData.setInt(10, getNullable(index, typeName));
- rowDataList.add(rowData);
- index++;
- }
- resultSet.setRowDataList(rowDataList);
-
-// GetColumnsResultSet getColumnsResultSet = new GetColumnsResultSet(resultSet0, catalog, schemaPattern, tableNamePattern, columnNamePattern);
-// return getColumnsResultSet;
-// DatabaseMetaDataResultSet resultSet = new DatabaseMetaDataResultSet();
- return resultSet;
- } else {
+ if (conn == null || conn.isClosed())
throw new SQLException(TSDBConstants.FixErrMsg(TSDBConstants.JNI_CONNECTION_NULL));
- }
-
- /*************************/
-
-// return getEmptyResultSet();
- }
-
- private int getNullable(int index, String typeName) {
- if (index == 0 && "TIMESTAMP".equals(typeName))
- return DatabaseMetaData.columnNoNulls;
- return DatabaseMetaData.columnNullable;
+ return super.getTableTypes();
}
- private int getColumnSize(String typeName, int length) {
- switch (typeName) {
- case "TIMESTAMP":
- return 23;
-
- default:
- return 0;
- }
- }
-
- private int getDecimalDigits(String typeName) {
- switch (typeName) {
- case "FLOAT":
- return 5;
- case "DOUBLE":
- return 9;
- default:
- return 0;
- }
- }
-
- private int getDataType(String typeName) {
- switch (typeName) {
- case "TIMESTAMP":
- return Types.TIMESTAMP;
- case "INT":
- return Types.INTEGER;
- case "BIGINT":
- return Types.BIGINT;
- case "FLOAT":
- return Types.FLOAT;
- case "DOUBLE":
- return Types.DOUBLE;
- case "BINARY":
- return Types.BINARY;
- case "SMALLINT":
- return Types.SMALLINT;
- case "TINYINT":
- return Types.TINYINT;
- case "BOOL":
- return Types.BOOLEAN;
- case "NCHAR":
- return Types.NCHAR;
- default:
- return Types.NULL;
- }
- }
-
- public ResultSet getColumnPrivileges(String catalog, String schema, String table, String columnNamePattern)
- throws SQLException {
- return getEmptyResultSet();
- }
-
- public ResultSet getTablePrivileges(String catalog, String schemaPattern, String tableNamePattern)
- throws SQLException {
- return getEmptyResultSet();
- }
-
- public ResultSet getBestRowIdentifier(String catalog, String schema, String table, int scope, boolean nullable)
- throws SQLException {
- return getEmptyResultSet();
- }
-
- public ResultSet getVersionColumns(String catalog, String schema, String table) throws SQLException {
- return getEmptyResultSet();
+ public ResultSet getColumns(String catalog, String schemaPattern, String tableNamePattern, String columnNamePattern) throws SQLException {
+ if (conn == null || conn.isClosed())
+ throw new SQLException(TSDBConstants.FixErrMsg(TSDBConstants.JNI_CONNECTION_NULL));
+ return super.getColumns(catalog, schemaPattern, tableNamePattern, columnNamePattern, conn);
}
public ResultSet getPrimaryKeys(String catalog, String schema, String table) throws SQLException {
- return getEmptyResultSet();
- }
-
- public ResultSet getImportedKeys(String catalog, String schema, String table) throws SQLException {
- return getEmptyResultSet();
- }
-
- public ResultSet getExportedKeys(String catalog, String schema, String table) throws SQLException {
- return getEmptyResultSet();
- }
-
- public ResultSet getCrossReference(String parentCatalog, String parentSchema, String parentTable,
- String foreignCatalog, String foreignSchema, String foreignTable) throws SQLException {
- return getEmptyResultSet();
- }
-
- public ResultSet getTypeInfo() throws SQLException {
- return getEmptyResultSet();
- }
-
- public ResultSet getIndexInfo(String catalog, String schema, String table, boolean unique, boolean approximate)
- throws SQLException {
- return getEmptyResultSet();
- }
-
- public boolean supportsResultSetType(int type) throws SQLException {
- return false;
- }
-
- public boolean supportsResultSetConcurrency(int type, int concurrency) throws SQLException {
- return false;
- }
-
- public boolean ownUpdatesAreVisible(int type) throws SQLException {
- return false;
- }
-
- public boolean ownDeletesAreVisible(int type) throws SQLException {
- return false;
- }
-
- public boolean ownInsertsAreVisible(int type) throws SQLException {
- return false;
- }
-
- public boolean othersUpdatesAreVisible(int type) throws SQLException {
- return false;
- }
-
- public boolean othersDeletesAreVisible(int type) throws SQLException {
- return false;
- }
-
- public boolean othersInsertsAreVisible(int type) throws SQLException {
- return false;
- }
-
- public boolean updatesAreDetected(int type) throws SQLException {
- return false;
- }
-
- public boolean deletesAreDetected(int type) throws SQLException {
- return false;
- }
-
- public boolean insertsAreDetected(int type) throws SQLException {
- return false;
- }
-
- public boolean supportsBatchUpdates() throws SQLException {
- return false;
- }
-
- public ResultSet getUDTs(String catalog, String schemaPattern, String typeNamePattern, int[] types)
- throws SQLException {
- return getEmptyResultSet();
- }
-
- public Connection getConnection() throws SQLException {
- return this.conn;
- }
-
- public boolean supportsSavepoints() throws SQLException {
- return false;
- }
-
- public boolean supportsNamedParameters() throws SQLException {
- return false;
- }
-
- public boolean supportsMultipleOpenResults() throws SQLException {
- return false;
- }
-
- public boolean supportsGetGeneratedKeys() throws SQLException {
- return false;
- }
-
- public ResultSet getSuperTypes(String catalog, String schemaPattern, String typeNamePattern) throws SQLException {
- return getEmptyResultSet();
+ if (conn == null || conn.isClosed())
+ throw new SQLException(TSDBConstants.FixErrMsg(TSDBConstants.JNI_CONNECTION_NULL));
+ return super.getPrimaryKeys(catalog, schema, table, conn);
}
public ResultSet getSuperTables(String catalog, String schemaPattern, String tableNamePattern) throws SQLException {
- return getEmptyResultSet();
- }
-
- public ResultSet getAttributes(String catalog, String schemaPattern, String typeNamePattern,
- String attributeNamePattern) throws SQLException {
- return getEmptyResultSet();
- }
-
- public boolean supportsResultSetHoldability(int holdability) throws SQLException {
- if (holdability == ResultSet.HOLD_CURSORS_OVER_COMMIT)
- return true;
- return false;
- }
-
- public int getResultSetHoldability() throws SQLException {
- return ResultSet.HOLD_CURSORS_OVER_COMMIT;
- }
-
- public int getDatabaseMajorVersion() throws SQLException {
- return 2;
- }
-
- public int getDatabaseMinorVersion() throws SQLException {
- return 0;
- }
-
- public int getJDBCMajorVersion() throws SQLException {
- return 2;
- }
-
- public int getJDBCMinorVersion() throws SQLException {
- return 0;
- }
-
- public int getSQLStateType() throws SQLException {
- return 0;
- }
-
- public boolean locatorsUpdateCopy() throws SQLException {
- return false;
- }
-
- public boolean supportsStatementPooling() throws SQLException {
- return false;
- }
-
- public RowIdLifetime getRowIdLifetime() throws SQLException {
- return null;
- }
-
- public ResultSet getSchemas(String catalog, String schemaPattern) throws SQLException {
- return null;
- }
-
- public boolean supportsStoredFunctionsUsingCallSyntax() throws SQLException {
- return false;
- }
-
- public boolean autoCommitFailureClosesAllResultSets() throws SQLException {
- return false;
- }
-
- public ResultSet getClientInfoProperties() throws SQLException {
- return getEmptyResultSet();
- }
-
- public ResultSet getFunctions(String catalog, String schemaPattern, String functionNamePattern)
- throws SQLException {
- return getEmptyResultSet();
- }
-
- public ResultSet getFunctionColumns(String catalog, String schemaPattern, String functionNamePattern,
- String columnNamePattern) throws SQLException {
- return getEmptyResultSet();
- }
-
- public ResultSet getPseudoColumns(String catalog, String schemaPattern, String tableNamePattern,
- String columnNamePattern) throws SQLException {
- return getEmptyResultSet();
- }
-
- public boolean generatedKeyAlwaysReturned() throws SQLException {
- return false;
+ if (conn == null || conn.isClosed())
+ throw new SQLException(TSDBConstants.FixErrMsg(TSDBConstants.JNI_CONNECTION_NULL));
+ return super.getSuperTables(catalog, schemaPattern, tableNamePattern, conn);
}
- private ResultSet getEmptyResultSet() {
- return new EmptyResultSet();
- }
}
\ No newline at end of file
diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBParameterMetaData.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBParameterMetaData.java
deleted file mode 100644
index d9227523d4ac623d23d85be6376a1530ca606751..0000000000000000000000000000000000000000
--- a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBParameterMetaData.java
+++ /dev/null
@@ -1,75 +0,0 @@
-/***************************************************************************
- * Copyright (c) 2019 TAOS Data, Inc.
- *
- * This program is free software: you can use, redistribute, and/or modify
- * it under the terms of the GNU Affero General Public License, version 3
- * or later ("AGPL"), as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.
- *
- * You should have received a copy of the GNU Affero General Public License
- * along with this program. If not, see .
- *****************************************************************************/
-package com.taosdata.jdbc;
-
-import java.sql.ParameterMetaData;
-import java.sql.SQLException;
-
-public class TSDBParameterMetaData implements ParameterMetaData {
- @Override
- public int getParameterCount() throws SQLException {
- return 0;
- }
-
- @Override
- public int isNullable(int param) throws SQLException {
- return 0;
- }
-
- @Override
- public boolean isSigned(int param) throws SQLException {
- return false;
- }
-
- @Override
- public int getPrecision(int param) throws SQLException {
- return 0;
- }
-
- @Override
- public int getScale(int param) throws SQLException {
- return 0;
- }
-
- @Override
- public int getParameterType(int param) throws SQLException {
- return 0;
- }
-
- @Override
- public String getParameterTypeName(int param) throws SQLException {
- return null;
- }
-
- @Override
- public String getParameterClassName(int param) throws SQLException {
- return null;
- }
-
- @Override
- public int getParameterMode(int param) throws SQLException {
- return 0;
- }
-
- @Override
- public T unwrap(Class iface) throws SQLException {
- return null;
- }
-
- @Override
- public boolean isWrapperFor(Class> iface) throws SQLException {
- return false;
- }
-}
diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBResultSetRowData.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBResultSetRowData.java
index c57f19550dd14719baecb835d76263df1e6a669b..6518bf10e444a05073206e1ef72b8f21a87e26b1 100644
--- a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBResultSetRowData.java
+++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBResultSetRowData.java
@@ -26,9 +26,9 @@ public class TSDBResultSetRowData {
public TSDBResultSetRowData(int colSize) {
this.setColSize(colSize);
}
-
+
public TSDBResultSetRowData() {
- this.data = new ArrayList