提交 4b3eafad 编写于 作者: H Haojun Liao

Merge branch 'develop' of https://github.com/taosdata/TDengine into develop

...@@ -32,7 +32,7 @@ ELSEIF (TD_WINDOWS) ...@@ -32,7 +32,7 @@ ELSEIF (TD_WINDOWS)
#INSTALL(TARGETS taos RUNTIME DESTINATION driver) #INSTALL(TARGETS taos RUNTIME DESTINATION driver)
#INSTALL(TARGETS shell RUNTIME DESTINATION .) #INSTALL(TARGETS shell RUNTIME DESTINATION .)
IF (TD_MVN_INSTALLED) IF (TD_MVN_INSTALLED)
INSTALL(FILES ${LIBRARY_OUTPUT_PATH}/taos-jdbcdriver-2.0.15-dist.jar DESTINATION connector/jdbc) INSTALL(FILES ${LIBRARY_OUTPUT_PATH}/taos-jdbcdriver-2.0.16-dist.jar DESTINATION connector/jdbc)
ENDIF () ENDIF ()
ELSEIF (TD_DARWIN) ELSEIF (TD_DARWIN)
SET(TD_MAKE_INSTALL_SH "${TD_COMMUNITY_DIR}/packaging/tools/make_install.sh") SET(TD_MAKE_INSTALL_SH "${TD_COMMUNITY_DIR}/packaging/tools/make_install.sh")
......
...@@ -4,7 +4,7 @@ PROJECT(TDengine) ...@@ -4,7 +4,7 @@ PROJECT(TDengine)
IF (DEFINED VERNUMBER) IF (DEFINED VERNUMBER)
SET(TD_VER_NUMBER ${VERNUMBER}) SET(TD_VER_NUMBER ${VERNUMBER})
ELSE () ELSE ()
SET(TD_VER_NUMBER "2.0.13.0") SET(TD_VER_NUMBER "2.0.14.0")
ENDIF () ENDIF ()
IF (DEFINED VERCOMPATIBLE) IF (DEFINED VERCOMPATIBLE)
......
...@@ -68,7 +68,9 @@ TDengine缺省的时间戳是毫秒精度,但通过修改配置参数enableMic ...@@ -68,7 +68,9 @@ TDengine缺省的时间戳是毫秒精度,但通过修改配置参数enableMic
2) UPDATE 标志数据库支持更新相同时间戳数据; 2) UPDATE 标志数据库支持更新相同时间戳数据;
3) 数据库名最大长度为33; 3) 数据库名最大长度为33;
4) 一条SQL 语句的最大长度为65480个字符; 4) 一条SQL 语句的最大长度为65480个字符;
5) 数据库还有更多与存储相关的配置参数,请参见系统管理。 5) 数据库还有更多与存储相关的配置参数,请参见系统管理。
- **显示系统当前参数** - **显示系统当前参数**
...@@ -119,6 +121,7 @@ TDengine缺省的时间戳是毫秒精度,但通过修改配置参数enableMic ...@@ -119,6 +121,7 @@ TDengine缺省的时间戳是毫秒精度,但通过修改配置参数enableMic
**Tips**: 以上所有参数修改后都可以用show databases来确认是否修改成功。 **Tips**: 以上所有参数修改后都可以用show databases来确认是否修改成功。
- **显示系统所有数据库** - **显示系统所有数据库**
```mysql ```mysql
SHOW DATABASES; SHOW DATABASES;
``` ```
...@@ -130,10 +133,15 @@ TDengine缺省的时间戳是毫秒精度,但通过修改配置参数enableMic ...@@ -130,10 +133,15 @@ TDengine缺省的时间戳是毫秒精度,但通过修改配置参数enableMic
CREATE TABLE [IF NOT EXISTS] tb_name (timestamp_field_name TIMESTAMP, field1_name data_type1 [, field2_name data_type2 ...]); CREATE TABLE [IF NOT EXISTS] tb_name (timestamp_field_name TIMESTAMP, field1_name data_type1 [, field2_name data_type2 ...]);
``` ```
说明: 说明:
1) 表的第一个字段必须是TIMESTAMP,并且系统自动将其设为主键; 1) 表的第一个字段必须是TIMESTAMP,并且系统自动将其设为主键;
2) 表名最大长度为192; 2) 表名最大长度为192;
3) 表的每行长度不能超过16k个字符; 3) 表的每行长度不能超过16k个字符;
4) 子表名只能由字母、数字和下划线组成,且不能以数字开头 4) 子表名只能由字母、数字和下划线组成,且不能以数字开头
5) 使用数据类型binary或nchar,需指定其最长的字节数,如binary(20),表示20字节; 5) 使用数据类型binary或nchar,需指定其最长的字节数,如binary(20),表示20字节;
- **以超级表为模板创建数据表** - **以超级表为模板创建数据表**
...@@ -149,7 +157,12 @@ TDengine缺省的时间戳是毫秒精度,但通过修改配置参数enableMic ...@@ -149,7 +157,12 @@ TDengine缺省的时间戳是毫秒精度,但通过修改配置参数enableMic
CREATE TABLE [IF NOT EXISTS] tb_name1 USING stb_name TAGS (tag_value1, ...) tb_name2 USING stb_name TAGS (tag_value2, ...) ...; CREATE TABLE [IF NOT EXISTS] tb_name1 USING stb_name TAGS (tag_value1, ...) tb_name2 USING stb_name TAGS (tag_value2, ...) ...;
``` ```
以更快的速度批量创建大量数据表。(服务器端 2.0.14 及以上版本) 以更快的速度批量创建大量数据表。(服务器端 2.0.14 及以上版本)
说明:批量建表方式要求数据表必须以超级表为模板。
说明:
1)批量建表方式要求数据表必须以超级表为模板。
2)在不超出 SQL 语句长度限制的前提下,单条语句中的建表数量建议控制在 1000~3000 之间,将会获得比较理想的建表速度。
- **删除数据表** - **删除数据表**
...@@ -164,7 +177,9 @@ TDengine缺省的时间戳是毫秒精度,但通过修改配置参数enableMic ...@@ -164,7 +177,9 @@ TDengine缺省的时间戳是毫秒精度,但通过修改配置参数enableMic
``` ```
显示当前数据库下的所有数据表信息。 显示当前数据库下的所有数据表信息。
说明:可在like中使用通配符进行名称的匹配,这一通配符字符串最长不能超过24字节。 说明:可在like中使用通配符进行名称的匹配,这一通配符字符串最长不能超过24字节。
通配符匹配:1)’%’ (百分号)匹配0到任意个字符;2)’\_’下划线匹配一个字符。 通配符匹配:1)’%’ (百分号)匹配0到任意个字符;2)’\_’下划线匹配一个字符。
- **在线修改显示字符宽度** - **在线修改显示字符宽度**
...@@ -185,7 +200,9 @@ TDengine缺省的时间戳是毫秒精度,但通过修改配置参数enableMic ...@@ -185,7 +200,9 @@ TDengine缺省的时间戳是毫秒精度,但通过修改配置参数enableMic
ALTER TABLE tb_name ADD COLUMN field_name data_type; ALTER TABLE tb_name ADD COLUMN field_name data_type;
``` ```
说明: 说明:
1) 列的最大个数为1024,最小个数为2; 1) 列的最大个数为1024,最小个数为2;
2) 列名最大长度为64; 2) 列名最大长度为64;
- **表删除列** - **表删除列**
...@@ -204,9 +221,13 @@ TDengine缺省的时间戳是毫秒精度,但通过修改配置参数enableMic ...@@ -204,9 +221,13 @@ TDengine缺省的时间戳是毫秒精度,但通过修改配置参数enableMic
创建STable, 与创建表的SQL语法相似,但需指定TAGS字段的名称和类型 创建STable, 与创建表的SQL语法相似,但需指定TAGS字段的名称和类型
说明: 说明:
1) TAGS 列的数据类型不能是timestamp类型; 1) TAGS 列的数据类型不能是timestamp类型;
2) TAGS 列名不能与其他列名相同; 2) TAGS 列名不能与其他列名相同;
3) TAGS 列名不能为预留关键字; 3) TAGS 列名不能为预留关键字;
4) TAGS 最多允许128个,至少1个,总长度不超过16k个字符。 4) TAGS 最多允许128个,至少1个,总长度不超过16k个字符。
- **删除超级表** - **删除超级表**
...@@ -333,7 +354,9 @@ SELECT select_expr [, select_expr ...] ...@@ -333,7 +354,9 @@ SELECT select_expr [, select_expr ...]
[LIMIT limit_val [, OFFSET offset_val]] [LIMIT limit_val [, OFFSET offset_val]]
[>> export_file] [>> export_file]
``` ```
说明:针对 insert 类型的 SQL 语句,我们采用的流式解析策略,在发现后面的错误之前,前面正确的部分SQL仍会执行。下面的sql中,insert语句是无效的,但是d1001仍会被创建。 说明:针对 insert 类型的 SQL 语句,我们采用的流式解析策略,在发现后面的错误之前,前面正确的部分SQL仍会执行。下面的sql中,insert语句是无效的,但是d1001仍会被创建。
```mysql ```mysql
taos> CREATE TABLE meters(ts TIMESTAMP, current FLOAT, voltage INT, phase FLOAT) TAGS(location BINARY(30), groupId INT); taos> CREATE TABLE meters(ts TIMESTAMP, current FLOAT, voltage INT, phase FLOAT) TAGS(location BINARY(30), groupId INT);
Query OK, 0 row(s) affected (0.008245s) Query OK, 0 row(s) affected (0.008245s)
...@@ -614,10 +637,20 @@ TDengine支持针对数据的聚合查询。提供支持的聚合和选择函数 ...@@ -614,10 +637,20 @@ TDengine支持针对数据的聚合查询。提供支持的聚合和选择函数
SELECT COUNT([*|field_name]) FROM tb_name [WHERE clause]; SELECT COUNT([*|field_name]) FROM tb_name [WHERE clause];
``` ```
功能说明:统计表/超级表中记录行数或某列的非空值个数。 功能说明:统计表/超级表中记录行数或某列的非空值个数。
返回结果数据类型:长整型INT64。 返回结果数据类型:长整型INT64。
应用字段:应用全部字段。 应用字段:应用全部字段。
适用于:表、超级表。 适用于:表、超级表。
说明:1)可以使用星号*来替代具体的字段,使用星号(*)返回全部记录数量。2)针对同一表的(不包含NULL值)字段查询结果均相同。3)如果统计对象是具体的列,则返回该列中非NULL值的记录数量。
说明:
1)可以使用星号*来替代具体的字段,使用星号(*)返回全部记录数量。
2)针对同一表的(不包含NULL值)字段查询结果均相同。
3)如果统计对象是具体的列,则返回该列中非NULL值的记录数量。
示例: 示例:
```mysql ```mysql
...@@ -639,8 +672,11 @@ TDengine支持针对数据的聚合查询。提供支持的聚合和选择函数 ...@@ -639,8 +672,11 @@ TDengine支持针对数据的聚合查询。提供支持的聚合和选择函数
SELECT AVG(field_name) FROM tb_name [WHERE clause]; SELECT AVG(field_name) FROM tb_name [WHERE clause];
``` ```
功能说明:统计表/超级表中某列的平均值。 功能说明:统计表/超级表中某列的平均值。
返回结果数据类型:双精度浮点数Double。 返回结果数据类型:双精度浮点数Double。
应用字段:不能应用在timestamp、binary、nchar、bool字段。 应用字段:不能应用在timestamp、binary、nchar、bool字段。
适用于:表、超级表。 适用于:表、超级表。
示例: 示例:
...@@ -663,8 +699,11 @@ TDengine支持针对数据的聚合查询。提供支持的聚合和选择函数 ...@@ -663,8 +699,11 @@ TDengine支持针对数据的聚合查询。提供支持的聚合和选择函数
SELECT TWA(field_name) FROM tb_name WHERE clause; SELECT TWA(field_name) FROM tb_name WHERE clause;
``` ```
功能说明:时间加权平均函数。统计表/超级表中某列在一段时间内的时间加权平均。 功能说明:时间加权平均函数。统计表/超级表中某列在一段时间内的时间加权平均。
返回结果数据类型:双精度浮点数Double。 返回结果数据类型:双精度浮点数Double。
应用字段:不能应用在timestamp、binary、nchar、bool类型字段。 应用字段:不能应用在timestamp、binary、nchar、bool类型字段。
适用于:表、超级表。 适用于:表、超级表。
- **SUM** - **SUM**
...@@ -672,8 +711,11 @@ TDengine支持针对数据的聚合查询。提供支持的聚合和选择函数 ...@@ -672,8 +711,11 @@ TDengine支持针对数据的聚合查询。提供支持的聚合和选择函数
SELECT SUM(field_name) FROM tb_name [WHERE clause]; SELECT SUM(field_name) FROM tb_name [WHERE clause];
``` ```
功能说明:统计表/超级表中某列的和。 功能说明:统计表/超级表中某列的和。
返回结果数据类型:双精度浮点数Double和长整型INT64。 返回结果数据类型:双精度浮点数Double和长整型INT64。
应用字段:不能应用在timestamp、binary、nchar、bool类型字段。 应用字段:不能应用在timestamp、binary、nchar、bool类型字段。
适用于:表、超级表。 适用于:表、超级表。
示例: 示例:
...@@ -696,9 +738,12 @@ TDengine支持针对数据的聚合查询。提供支持的聚合和选择函数 ...@@ -696,9 +738,12 @@ TDengine支持针对数据的聚合查询。提供支持的聚合和选择函数
SELECT STDDEV(field_name) FROM tb_name [WHERE clause]; SELECT STDDEV(field_name) FROM tb_name [WHERE clause];
``` ```
功能说明:统计表中某列的均方差。 功能说明:统计表中某列的均方差。
返回结果数据类型:双精度浮点数Double。 返回结果数据类型:双精度浮点数Double。
应用字段:不能应用在timestamp、binary、nchar、bool类型字段。 应用字段:不能应用在timestamp、binary、nchar、bool类型字段。
适用于:表。
适用于:表。(从 2.0.15 版本开始,本函数也支持超级表)
示例: 示例:
```mysql ```mysql
...@@ -714,9 +759,13 @@ TDengine支持针对数据的聚合查询。提供支持的聚合和选择函数 ...@@ -714,9 +759,13 @@ TDengine支持针对数据的聚合查询。提供支持的聚合和选择函数
SELECT LEASTSQUARES(field_name, start_val, step_val) FROM tb_name [WHERE clause]; SELECT LEASTSQUARES(field_name, start_val, step_val) FROM tb_name [WHERE clause];
``` ```
功能说明:统计表中某列的值是主键(时间戳)的拟合直线方程。start_val是自变量初始值,step_val是自变量的步长值。 功能说明:统计表中某列的值是主键(时间戳)的拟合直线方程。start_val是自变量初始值,step_val是自变量的步长值。
返回结果数据类型:字符串表达式(斜率, 截距)。 返回结果数据类型:字符串表达式(斜率, 截距)。
应用字段:不能应用在timestamp、binary、nchar、bool类型字段。 应用字段:不能应用在timestamp、binary、nchar、bool类型字段。
说明:自变量是时间戳,因变量是该列的值。 说明:自变量是时间戳,因变量是该列的值。
适用于:表。 适用于:表。
示例: 示例:
...@@ -735,7 +784,9 @@ TDengine支持针对数据的聚合查询。提供支持的聚合和选择函数 ...@@ -735,7 +784,9 @@ TDengine支持针对数据的聚合查询。提供支持的聚合和选择函数
SELECT MIN(field_name) FROM {tb_name | stb_name} [WHERE clause]; SELECT MIN(field_name) FROM {tb_name | stb_name} [WHERE clause];
``` ```
功能说明:统计表/超级表中某列的值最小值。 功能说明:统计表/超级表中某列的值最小值。
返回结果数据类型:同应用的字段。 返回结果数据类型:同应用的字段。
应用字段:不能应用在timestamp、binary、nchar、bool类型字段。 应用字段:不能应用在timestamp、binary、nchar、bool类型字段。
示例: 示例:
...@@ -758,7 +809,9 @@ TDengine支持针对数据的聚合查询。提供支持的聚合和选择函数 ...@@ -758,7 +809,9 @@ TDengine支持针对数据的聚合查询。提供支持的聚合和选择函数
SELECT MAX(field_name) FROM { tb_name | stb_name } [WHERE clause]; SELECT MAX(field_name) FROM { tb_name | stb_name } [WHERE clause];
``` ```
功能说明:统计表/超级表中某列的值最大值。 功能说明:统计表/超级表中某列的值最大值。
返回结果数据类型:同应用的字段。 返回结果数据类型:同应用的字段。
应用字段:不能应用在timestamp、binary、nchar、bool类型字段。 应用字段:不能应用在timestamp、binary、nchar、bool类型字段。
示例: 示例:
...@@ -781,9 +834,18 @@ TDengine支持针对数据的聚合查询。提供支持的聚合和选择函数 ...@@ -781,9 +834,18 @@ TDengine支持针对数据的聚合查询。提供支持的聚合和选择函数
SELECT FIRST(field_name) FROM { tb_name | stb_name } [WHERE clause]; SELECT FIRST(field_name) FROM { tb_name | stb_name } [WHERE clause];
``` ```
功能说明:统计表/超级表中某列的值最先写入的非NULL值。 功能说明:统计表/超级表中某列的值最先写入的非NULL值。
返回结果数据类型:同应用的字段。 返回结果数据类型:同应用的字段。
应用字段:所有字段。 应用字段:所有字段。
说明:1)如果要返回各个列的首个(时间戳最小)非NULL值,可以使用FIRST(\*);2) 如果结果集中的某列全部为NULL值,则该列的返回结果也是NULL;3) 如果结果集中所有列全部为NULL值,则不返回结果。
说明:
1)如果要返回各个列的首个(时间戳最小)非NULL值,可以使用FIRST(\*);
2) 如果结果集中的某列全部为NULL值,则该列的返回结果也是NULL;
3) 如果结果集中所有列全部为NULL值,则不返回结果。
示例: 示例:
```mysql ```mysql
...@@ -805,9 +867,16 @@ TDengine支持针对数据的聚合查询。提供支持的聚合和选择函数 ...@@ -805,9 +867,16 @@ TDengine支持针对数据的聚合查询。提供支持的聚合和选择函数
SELECT LAST(field_name) FROM { tb_name | stb_name } [WHERE clause]; SELECT LAST(field_name) FROM { tb_name | stb_name } [WHERE clause];
``` ```
功能说明:统计表/超级表中某列的值最后写入的非NULL值。 功能说明:统计表/超级表中某列的值最后写入的非NULL值。
返回结果数据类型:同应用的字段。 返回结果数据类型:同应用的字段。
应用字段:所有字段。 应用字段:所有字段。
说明:1)如果要返回各个列的最后(时间戳最大)一个非NULL值,可以使用LAST(\*);2)如果结果集中的某列全部为NULL值,则该列的返回结果也是NULL;如果结果集中所有列全部为NULL值,则不返回结果。
说明:
1)如果要返回各个列的最后(时间戳最大)一个非NULL值,可以使用LAST(\*);
2)如果结果集中的某列全部为NULL值,则该列的返回结果也是NULL;如果结果集中所有列全部为NULL值,则不返回结果。
示例: 示例:
```mysql ```mysql
...@@ -829,9 +898,16 @@ TDengine支持针对数据的聚合查询。提供支持的聚合和选择函数 ...@@ -829,9 +898,16 @@ TDengine支持针对数据的聚合查询。提供支持的聚合和选择函数
SELECT TOP(field_name, K) FROM { tb_name | stb_name } [WHERE clause]; SELECT TOP(field_name, K) FROM { tb_name | stb_name } [WHERE clause];
``` ```
功能说明: 统计表/超级表中某列的值最大*k*个非NULL值。若多于k个列值并列最大,则返回时间戳小的。 功能说明: 统计表/超级表中某列的值最大*k*个非NULL值。若多于k个列值并列最大,则返回时间戳小的。
返回结果数据类型:同应用的字段。 返回结果数据类型:同应用的字段。
应用字段:不能应用在timestamp、binary、nchar、bool类型字段。 应用字段:不能应用在timestamp、binary、nchar、bool类型字段。
说明:1)*k*值取值范围1≤*k*≤100;2)系统同时返回该记录关联的时间戳列。
说明:
1)*k*值取值范围1≤*k*≤100;
2)系统同时返回该记录关联的时间戳列。
示例: 示例:
```mysql ```mysql
...@@ -856,9 +932,16 @@ TDengine支持针对数据的聚合查询。提供支持的聚合和选择函数 ...@@ -856,9 +932,16 @@ TDengine支持针对数据的聚合查询。提供支持的聚合和选择函数
SELECT BOTTOM(field_name, K) FROM { tb_name | stb_name } [WHERE clause]; SELECT BOTTOM(field_name, K) FROM { tb_name | stb_name } [WHERE clause];
``` ```
功能说明:统计表/超级表中某列的值最小*k*个非NULL值。若多于k个列值并列最小,则返回时间戳小的。 功能说明:统计表/超级表中某列的值最小*k*个非NULL值。若多于k个列值并列最小,则返回时间戳小的。
返回结果数据类型:同应用的字段。 返回结果数据类型:同应用的字段。
应用字段:不能应用在timestamp、binary、nchar、bool类型字段。 应用字段:不能应用在timestamp、binary、nchar、bool类型字段。
说明:1)*k*值取值范围1≤*k*≤100;2)系统同时返回该记录关联的时间戳列。
说明:
1)*k*值取值范围1≤*k*≤100;
2)系统同时返回该记录关联的时间戳列。
示例: 示例:
```mysql ```mysql
...@@ -882,8 +965,11 @@ TDengine支持针对数据的聚合查询。提供支持的聚合和选择函数 ...@@ -882,8 +965,11 @@ TDengine支持针对数据的聚合查询。提供支持的聚合和选择函数
SELECT PERCENTILE(field_name, P) FROM { tb_name } [WHERE clause]; SELECT PERCENTILE(field_name, P) FROM { tb_name } [WHERE clause];
``` ```
功能说明:统计表中某列的值百分比分位数。 功能说明:统计表中某列的值百分比分位数。
返回结果数据类型: 双精度浮点数Double。 返回结果数据类型: 双精度浮点数Double。
应用字段:不能应用在timestamp、binary、nchar、bool类型字段。 应用字段:不能应用在timestamp、binary、nchar、bool类型字段。
说明:*P*值取值范围0≤*P*≤100,为0的时候等同于MIN,为100的时候等同于MAX。 说明:*P*值取值范围0≤*P*≤100,为0的时候等同于MIN,为100的时候等同于MAX。
示例: 示例:
...@@ -900,9 +986,13 @@ TDengine支持针对数据的聚合查询。提供支持的聚合和选择函数 ...@@ -900,9 +986,13 @@ TDengine支持针对数据的聚合查询。提供支持的聚合和选择函数
SELECT APERCENTILE(field_name, P) FROM { tb_name | stb_name } [WHERE clause]; SELECT APERCENTILE(field_name, P) FROM { tb_name | stb_name } [WHERE clause];
``` ```
功能说明:统计表中某列的值百分比分位数,与PERCENTILE函数相似,但是返回近似结果。 功能说明:统计表中某列的值百分比分位数,与PERCENTILE函数相似,但是返回近似结果。
返回结果数据类型: 双精度浮点数Double。 返回结果数据类型: 双精度浮点数Double。
应用字段:不能应用在timestamp、binary、nchar、bool类型字段。 应用字段:不能应用在timestamp、binary、nchar、bool类型字段。
说明:*P*值取值范围0≤*P*≤100,为0的时候等同于MIN,为100的时候等同于MAX。推荐使用```APERCENTILE```函数,该函数性能远胜于```PERCENTILE```函数 说明:*P*值取值范围0≤*P*≤100,为0的时候等同于MIN,为100的时候等同于MAX。推荐使用```APERCENTILE```函数,该函数性能远胜于```PERCENTILE```函数
```mysql ```mysql
taos> SELECT APERCENTILE(current, 20) FROM d1001; taos> SELECT APERCENTILE(current, 20) FROM d1001;
apercentile(current, 20) | apercentile(current, 20) |
...@@ -916,8 +1006,11 @@ TDengine支持针对数据的聚合查询。提供支持的聚合和选择函数 ...@@ -916,8 +1006,11 @@ TDengine支持针对数据的聚合查询。提供支持的聚合和选择函数
SELECT LAST_ROW(field_name) FROM { tb_name | stb_name }; SELECT LAST_ROW(field_name) FROM { tb_name | stb_name };
``` ```
功能说明:返回表(超级表)的最后一条记录。 功能说明:返回表(超级表)的最后一条记录。
返回结果数据类型:同应用的字段。 返回结果数据类型:同应用的字段。
应用字段:所有字段。 应用字段:所有字段。
说明:与last函数不同,last_row不支持时间范围限制,强制返回最后一条记录。 说明:与last函数不同,last_row不支持时间范围限制,强制返回最后一条记录。
示例: 示例:
...@@ -941,8 +1034,11 @@ TDengine支持针对数据的聚合查询。提供支持的聚合和选择函数 ...@@ -941,8 +1034,11 @@ TDengine支持针对数据的聚合查询。提供支持的聚合和选择函数
SELECT DIFF(field_name) FROM tb_name [WHERE clause]; SELECT DIFF(field_name) FROM tb_name [WHERE clause];
``` ```
功能说明:统计表中某列的值与前一行对应值的差。 功能说明:统计表中某列的值与前一行对应值的差。
返回结果数据类型: 同应用字段。 返回结果数据类型: 同应用字段。
应用字段:不能应用在timestamp、binary、nchar、bool类型字段。 应用字段:不能应用在timestamp、binary、nchar、bool类型字段。
说明:输出结果行数是范围内总行数减一,第一行没有结果输出。 说明:输出结果行数是范围内总行数减一,第一行没有结果输出。
示例: 示例:
...@@ -960,8 +1056,11 @@ TDengine支持针对数据的聚合查询。提供支持的聚合和选择函数 ...@@ -960,8 +1056,11 @@ TDengine支持针对数据的聚合查询。提供支持的聚合和选择函数
SELECT SPREAD(field_name) FROM { tb_name | stb_name } [WHERE clause]; SELECT SPREAD(field_name) FROM { tb_name | stb_name } [WHERE clause];
``` ```
功能说明:统计表/超级表中某列的最大值和最小值之差。 功能说明:统计表/超级表中某列的最大值和最小值之差。
返回结果数据类型: 双精度浮点数。 返回结果数据类型: 双精度浮点数。
应用字段:不能应用在binary、nchar、bool类型字段。 应用字段:不能应用在binary、nchar、bool类型字段。
说明:可用于TIMESTAMP字段,此时表示记录的时间覆盖范围。 说明:可用于TIMESTAMP字段,此时表示记录的时间覆盖范围。
示例: 示例:
...@@ -985,9 +1084,16 @@ TDengine支持针对数据的聚合查询。提供支持的聚合和选择函数 ...@@ -985,9 +1084,16 @@ TDengine支持针对数据的聚合查询。提供支持的聚合和选择函数
SELECT field_name [+|-|*|/|%][Value|field_name] FROM { tb_name | stb_name } [WHERE clause]; SELECT field_name [+|-|*|/|%][Value|field_name] FROM { tb_name | stb_name } [WHERE clause];
``` ```
功能说明:统计表/超级表中某列或多列间的值加、减、乘、除、取余计算结果。 功能说明:统计表/超级表中某列或多列间的值加、减、乘、除、取余计算结果。
返回结果数据类型:双精度浮点数。 返回结果数据类型:双精度浮点数。
应用字段:不能应用在timestamp、binary、nchar、bool类型字段。 应用字段:不能应用在timestamp、binary、nchar、bool类型字段。
说明:1)支持两列或多列之间进行计算,可使用括号控制计算优先级;2)NULL字段不参与计算,如果参与计算的某行中包含NULL,该行的计算结果为NULL。
说明:
1)支持两列或多列之间进行计算,可使用括号控制计算优先级;
2)NULL字段不参与计算,如果参与计算的某行中包含NULL,该行的计算结果为NULL。
```mysql ```mysql
taos> SELECT current + voltage * phase FROM d1001; taos> SELECT current + voltage * phase FROM d1001;
...@@ -1051,7 +1157,7 @@ SELECT AVG(current), MAX(current), LEASTSQUARES(current, start_val, step_val), P ...@@ -1051,7 +1157,7 @@ SELECT AVG(current), MAX(current), LEASTSQUARES(current, start_val, step_val), P
- 数据库名最大长度为32 - 数据库名最大长度为32
- 表名最大长度为192,每行数据最大长度16k个字符 - 表名最大长度为192,每行数据最大长度16k个字符
- 列名最大长度为64,最多允许1024列,最少需要2列,第一列必须是时间戳 - 列名最大长度为64,最多允许1024列,最少需要2列,第一列必须是时间戳
- 标签最多允许128个,可以0个,标签总长度不超过16k个字符 - 标签最多允许128个,可以1个,标签总长度不超过16k个字符
- SQL语句最大长度65480个字符,但可通过系统配置参数maxSQLLength修改,最长可配置为1M - SQL语句最大长度65480个字符,但可通过系统配置参数maxSQLLength修改,最长可配置为1M
- 库的数目,超级表的数目、表的数目,系统不做限制,仅受系统资源限制 - 库的数目,超级表的数目、表的数目,系统不做限制,仅受系统资源限制
......
...@@ -255,7 +255,7 @@ taos -C 或 taos --dump-config ...@@ -255,7 +255,7 @@ taos -C 或 taos --dump-config
CREATE USER <user_name> PASS <'password'>; CREATE USER <user_name> PASS <'password'>;
``` ```
创建用户,并指定用户名和密码,密码需要用单引号引起来,单引号为英文半角 创建用户,并指定用户名和密码,密码需要用单引号引起来单引号为英文半角
```sql ```sql
DROP USER <user_name>; DROP USER <user_name>;
...@@ -267,13 +267,15 @@ DROP USER <user_name>; ...@@ -267,13 +267,15 @@ DROP USER <user_name>;
ALTER USER <user_name> PASS <'password'>; ALTER USER <user_name> PASS <'password'>;
``` ```
修改用户密码, 为避免被转换为小写,密码需要用单引号引用,单引号为英文半角 修改用户密码,为避免被转换为小写,密码需要用单引号引用,单引号为英文半角
```sql ```sql
ALTER USER <user_name> PRIVILEGE <super|write|read>; ALTER USER <user_name> PRIVILEGE <write|read>;
``` ```
修改用户权限为:super/write/read,不需要添加单引号 修改用户权限为:write 或 read,不需要添加单引号
说明:系统内共有 super/write/read 三种权限级别,但目前不允许通过 alter 指令把 super 权限赋予用户。
```mysql ```mysql
SHOW USERS; SHOW USERS;
...@@ -432,11 +434,12 @@ TDengine的所有可执行文件默认存放在 _/usr/local/taos/bin_ 目录下 ...@@ -432,11 +434,12 @@ TDengine的所有可执行文件默认存放在 _/usr/local/taos/bin_ 目录下
- 数据库名:不能包含“.”以及特殊字符,不能超过32个字符 - 数据库名:不能包含“.”以及特殊字符,不能超过32个字符
- 表名:不能包含“.”以及特殊字符,与所属数据库名一起,不能超过192个字符 - 表名:不能包含“.”以及特殊字符,与所属数据库名一起,不能超过192个字符
- 表的列名:不能包含特殊字符,不能超过64个字符 - 表的列名:不能包含特殊字符,不能超过64个字符
- 数据库名、表名、列名,都不能以数字开头
- 表的列数:不能超过1024列 - 表的列数:不能超过1024列
- 记录的最大长度:包括时间戳8 byte,不能超过16KB - 记录的最大长度:包括时间戳8 byte,不能超过16KB
- 单条SQL语句默认最大字符串长度:65480 byte - 单条SQL语句默认最大字符串长度:65480 byte
- 数据库副本数:不能超过3 - 数据库副本数:不能超过3
- 用户名:不能超过20个byte - 用户名:不能超过23个byte
- 用户密码:不能超过15个byte - 用户密码:不能超过15个byte
- 标签(Tags)数量:不能超过128个 - 标签(Tags)数量:不能超过128个
- 标签的总长度:不能超过16Kbyte - 标签的总长度:不能超过16Kbyte
......
...@@ -248,7 +248,7 @@ Master Vnode遵循下面的写入流程: ...@@ -248,7 +248,7 @@ Master Vnode遵循下面的写入流程:
1. Master vnode收到应用的数据插入请求,验证OK,进入下一步; 1. Master vnode收到应用的数据插入请求,验证OK,进入下一步;
2. 如果系统配置参数walLevel大于0,vnode将把该请求的原始数据包写入数据库日志文件WAL。如果walLevel设置为2,而且fsync设置为0,TDengine还将WAL数据立即落盘,以保证即使宕机,也能从数据库日志文件中恢复数据,避免数据的丢失; 2. 如果系统配置参数walLevel大于0,vnode将把该请求的原始数据包写入数据库日志文件WAL。如果walLevel设置为2,而且fsync设置为0,TDengine还将WAL数据立即落盘,以保证即使宕机,也能从数据库日志文件中恢复数据,避免数据的丢失;
3. 如果有多个副本,vnode将把数据包转发给同一虚拟节点组内slave vnodes, 该转发包带有数据的版本号(version); 3. 如果有多个副本,vnode将把数据包转发给同一虚拟节点组内slave vnodes, 该转发包带有数据的版本号(version);
4. 写入内存,并记录加入到skip list; 4. 写入内存,并记录加入到skip list;
5. Master vnode返回确认信息给应用,表示写入成功。 5. Master vnode返回确认信息给应用,表示写入成功。
6. 如果第2,3,4步中任何一步失败,将直接返回错误给应用。 6. 如果第2,3,4步中任何一步失败,将直接返回错误给应用。
...@@ -372,7 +372,7 @@ select count(*) from d1001 interval(1h); ...@@ -372,7 +372,7 @@ select count(*) from d1001 interval(1h);
select count(*) from d1001 interval(1h) fill(prev); select count(*) from d1001 interval(1h) fill(prev);
``` ```
针对d1001设备采集数据统计每小时记录数,如果某一个小时不存在数据,返回之前一个小时的统计数据。TDengine提供前向插值(prev)、线性插值(linear)、NULL值填充(NULL)、特定值填充(value)。 针对d1001设备采集数据统计每小时记录数,如果某一个小时不存在数据,返回之前一个小时的统计数据。TDengine提供前向插值(prev)、线性插值(linear)、NULL值填充(NULL)、特定值填充(value)。
### 多表聚合查询 ### 多表聚合查询
TDengine对每个数据采集点单独建表,但在实际应用中经常需要对不同的采集点数据进行聚合。为高效的进行聚合操作,TDengine引入超级表(STable)的概念。超级表用来代表一特定类型的数据采集点,它是包含多张表的表集合,集合里每张表的模式(schema)完全一致,但每张表都带有自己的静态标签,标签可以多个,可以随时增加、删除和修改。 应用可通过指定标签的过滤条件,对一个STable下的全部或部分表进行聚合或统计操作,这样大大简化应用的开发。其具体流程如下图所示: TDengine对每个数据采集点单独建表,但在实际应用中经常需要对不同的采集点数据进行聚合。为高效的进行聚合操作,TDengine引入超级表(STable)的概念。超级表用来代表一特定类型的数据采集点,它是包含多张表的表集合,集合里每张表的模式(schema)完全一致,但每张表都带有自己的静态标签,标签可以多个,可以随时增加、删除和修改。 应用可通过指定标签的过滤条件,对一个STable下的全部或部分表进行聚合或统计操作,这样大大简化应用的开发。其具体流程如下图所示:
......
...@@ -218,7 +218,7 @@ SHOW MNODES; ...@@ -218,7 +218,7 @@ SHOW MNODES;
如果一个数据节点离线,TDengine集群将自动检测到。有如下两种情况: 如果一个数据节点离线,TDengine集群将自动检测到。有如下两种情况:
- 该数据节点离线超过一定时间(taos.cfg里配置参数offlineThreshold控制时长),系统将自动把该数据节点删除,产生系统报警信息,触发负载均衡流程。如果该被删除的数据节点重上线时,它将无法加入集群,需要系统管理员重新将其添加进集群才会开始工作。 - 该数据节点离线超过一定时间(taos.cfg里配置参数offlineThreshold控制时长),系统将自动把该数据节点删除,产生系统报警信息,触发负载均衡流程。如果该被删除的数据节点重上线时,它将无法加入集群,需要系统管理员重新将其添加进集群才会开始工作。
- 离线后,在offlineThreshold的时长内重新上线,系统将自动启动数据恢复流程,等数据完全恢复后,该节点将开始正常工作。 - 离线后,在offlineThreshold的时长内重新上线,系统将自动启动数据恢复流程,等数据完全恢复后,该节点将开始正常工作。
**注意:**如果一个虚拟节点组(包括mnode组)里所归属的每个数据节点都处于离线或unsynced状态,必须等该虚拟节点组里的所有数据节点都上线、都能交换状态信息后,才能选出Master,该虚拟节点组才能对外提供服务。比如整个集群有3个数据节点,副本数为3,如果3个数据节点都宕机,然后2个数据节点重启,是无法工作的,只有等3个数据节点都重启成功,才能对外服务。 **注意:**如果一个虚拟节点组(包括mnode组)里所归属的每个数据节点都处于离线或unsynced状态,必须等该虚拟节点组里的所有数据节点都上线、都能交换状态信息后,才能选出Master,该虚拟节点组才能对外提供服务。比如整个集群有3个数据节点,副本数为3,如果3个数据节点都宕机,然后2个数据节点重启,是无法工作的,只有等3个数据节点都重启成功,才能对外服务。
...@@ -227,5 +227,5 @@ SHOW MNODES; ...@@ -227,5 +227,5 @@ SHOW MNODES;
如果副本数为偶数,当一个vnode group里一半vnode不工作时,是无法从中选出master的。同理,一半mnode不工作时,是无法选出mnode的master的,因为存在“split brain”问题。为解决这个问题,TDengine引入了arbitrator的概念。Arbitrator模拟一个vnode或mnode在工作,但只简单的负责网络连接,不处理任何数据插入或访问。只要包含arbitrator在内,超过半数的vnode或mnode工作,那么该vnode group或mnode组就可以正常的提供数据插入或查询服务。比如对于副本数为2的情形,如果一个节点A离线,但另外一个节点B正常,而且能连接到arbitrator, 那么节点B就能正常工作。 如果副本数为偶数,当一个vnode group里一半vnode不工作时,是无法从中选出master的。同理,一半mnode不工作时,是无法选出mnode的master的,因为存在“split brain”问题。为解决这个问题,TDengine引入了arbitrator的概念。Arbitrator模拟一个vnode或mnode在工作,但只简单的负责网络连接,不处理任何数据插入或访问。只要包含arbitrator在内,超过半数的vnode或mnode工作,那么该vnode group或mnode组就可以正常的提供数据插入或查询服务。比如对于副本数为2的情形,如果一个节点A离线,但另外一个节点B正常,而且能连接到arbitrator, 那么节点B就能正常工作。
TDengine提供一个执行程序tarbitrator, 找任何一台Linux服务器运行它即可。请点击[安装包下载](https://www.taosdata.com/cn/all-downloads/),在TDengine Arbitrator Linux一节中,选择适合的版本下载并安装。该程序对系统资源几乎没有要求,只需要保证有网络连接即可。该应用的命令行参数`-p`可以指定其对外服务的端口号,缺省是6042。配置每个taosd实例时,可以在配置文件taos.cfg里将参数arbitrator设置为arbitrator的End Point。如果该参数配置了,当副本数为偶数,系统将自动连接配置的arbitrator。如果副本数为奇数,即使配置了arbitrator, 系统也不会去建立连接。 TDengine提供一个执行程序tarbitrator, 找任何一台Linux服务器运行它即可。请点击[安装包下载](https://www.taosdata.com/cn/all-downloads/),在TDengine Arbitrator Linux一节中,选择适合的版本下载并安装。该程序对系统资源几乎没有要求,只需要保证有网络连接即可。该应用的命令行参数`-p`可以指定其对外服务的端口号,缺省是6042。配置每个taosd实例时,可以在配置文件taos.cfg里将参数arbitrator设置为arbitrator的End Point。如果该参数配置了,当副本数为偶数,系统将自动连接配置的arbitrator。如果副本数为奇数,即使配置了arbitrator, 系统也不会去建立连接。
...@@ -89,6 +89,8 @@ SHOW DNODES; ...@@ -89,6 +89,8 @@ SHOW DNODES;
``` ```
它将列出集群中所有的dnode,每个dnode的fqdn:port, 状态(ready, offline等),vnode数目,还未使用的vnode数目等信息。在添加或删除一个节点后,可以使用该命令查看。 它将列出集群中所有的dnode,每个dnode的fqdn:port, 状态(ready, offline等),vnode数目,还未使用的vnode数目等信息。在添加或删除一个节点后,可以使用该命令查看。
如果集群配置了Arbitrator,那么它也会在这个节点列表中显示出来,其role列的值会是“arb”。
###查看虚拟节点组 ###查看虚拟节点组
为充分利用多核技术,并提供scalability,数据需要分片处理。因此TDengine会将一个DB的数据切分成多份,存放在多个vnode里。这些vnode可能分布在多个dnode里,这样就实现了水平扩展。一个vnode仅仅属于一个DB,但一个DB可以有多个vnode。vnode的是mnode根据当前系统资源的情况,自动进行分配的,无需任何人工干预。 为充分利用多核技术,并提供scalability,数据需要分片处理。因此TDengine会将一个DB的数据切分成多份,存放在多个vnode里。这些vnode可能分布在多个dnode里,这样就实现了水平扩展。一个vnode仅仅属于一个DB,但一个DB可以有多个vnode。vnode的是mnode根据当前系统资源的情况,自动进行分配的,无需任何人工干预。
...@@ -139,4 +141,6 @@ SHOW MNODES; ...@@ -139,4 +141,6 @@ SHOW MNODES;
如果副本数为偶数,当一个vnode group里一半vnode不工作时,是无法从中选出master的。同理,一半mnode不工作时,是无法选出mnode的master的,因为存在“split brain”问题。为解决这个问题,TDengine引入了arbitrator的概念。Arbitrator模拟一个vnode或mnode在工作,但只简单的负责网络连接,不处理任何数据插入或访问。只要包含arbitrator在内,超过半数的vnode或mnode工作,那么该vnode group或mnode组就可以正常的提供数据插入或查询服务。比如对于副本数为2的情形,如果一个节点A离线,但另外一个节点B正常,而且能连接到arbitrator, 那么节点B就能正常工作。 如果副本数为偶数,当一个vnode group里一半vnode不工作时,是无法从中选出master的。同理,一半mnode不工作时,是无法选出mnode的master的,因为存在“split brain”问题。为解决这个问题,TDengine引入了arbitrator的概念。Arbitrator模拟一个vnode或mnode在工作,但只简单的负责网络连接,不处理任何数据插入或访问。只要包含arbitrator在内,超过半数的vnode或mnode工作,那么该vnode group或mnode组就可以正常的提供数据插入或查询服务。比如对于副本数为2的情形,如果一个节点A离线,但另外一个节点B正常,而且能连接到arbitrator, 那么节点B就能正常工作。
TDengine安装包里带有一个执行程序tarbitrator, 找任何一台Linux服务器运行它即可。该程序对系统资源几乎没有要求,只需要保证有网络连接即可。该应用的命令行参数`-p`可以指定其对外服务的端口号,缺省是6030。配置每个taosd实例时,可以在配置文件taos.cfg里将参数arbitrator设置为arbitrator的End Point。如果该参数配置了,当副本数为偶数数,系统将自动连接配置的arbitrator。 TDengine安装包里带有一个执行程序tarbitrator, 找任何一台Linux服务器运行它即可。该程序对系统资源几乎没有要求,只需要保证有网络连接即可。该应用的命令行参数`-p`可以指定其对外服务的端口号,缺省是6030。配置每个taosd实例时,可以在配置文件taos.cfg里将参数arbitrator设置为Arbitrator的End Point。如果该参数配置了,当副本数为偶数时,系统将自动连接配置的Arbitrator。
在配置了Arbitrator的情况下,它也会显示在“show dnodes;”指令给出的节点列表中。
...@@ -252,7 +252,7 @@ C/C++的API类似于MySQL的C API。应用程序使用时,需要包含TDengine ...@@ -252,7 +252,7 @@ C/C++的API类似于MySQL的C API。应用程序使用时,需要包含TDengine
- `void taos_free_result(TAOS_RES *res)` - `void taos_free_result(TAOS_RES *res)`
释放查询结果集以及相关的资源。查询完成后,务必调用该API释放资源,否则可能导致应用内存泄露。 释放查询结果集以及相关的资源。查询完成后,务必调用该API释放资源,否则可能导致应用内存泄露。但也需注意,释放资源后,如果再调用`taos_consume`等获取查询结果的函数,将导致应用Crash。
- `char *taos_errstr(TAOS_RES *res)` - `char *taos_errstr(TAOS_RES *res)`
...@@ -262,11 +262,11 @@ C/C++的API类似于MySQL的C API。应用程序使用时,需要包含TDengine ...@@ -262,11 +262,11 @@ C/C++的API类似于MySQL的C API。应用程序使用时,需要包含TDengine
获取最近一次API调用失败的原因,返回值为错误代码。 获取最近一次API调用失败的原因,返回值为错误代码。
**注意**对于每个数据库应用,2.0及以上版本 TDengine 推荐只建立一个连接。同时在应用中将该连接 (TAOS*) 结构体传递到不同的线程共享使用。基于 TAOS 结构体发出的查询、写入等操作具有多线程安全性。C 语言的连接器可以按照需求动态建立面向数据库的新连接(该过程对用户不可见),同时建议只有在程序最后退出的时候才调用 taos_close 关闭连接。 **注意**2.0及以上版本 TDengine 推荐数据库应用的每个线程都建立一个独立的连接,或基于线程建立连接池。而不推荐在应用中将该连接 (TAOS\*) 结构体传递到不同的线程共享使用。基于 TAOS 结构体发出的查询、写入等操作具有多线程安全性,但 “USE statement” 等状态量有可能在线程之间相互干扰。此外,C 语言的连接器可以按照需求动态建立面向数据库的新连接(该过程对用户不可见),同时建议只有在程序最后退出的时候才调用 taos_close 关闭连接。
### 异步查询API ### 异步查询API
同步API之外,TDengine还提供性能更高的异步调用API处理数据插入、查询操作。在软硬件环境相同的情况下,异步API处理数据插入的速度比同步API快2~4倍。异步API采用非阻塞式的调用方式,在系统真正完成某个具体数据库操作前,立即返回。调用的线程可以去处理其他工作,从而可以提升整个应用的性能。异步API在网络延迟严重的情况下,优点尤为突出。 同步API之外,TDengine还提供性能更高的异步调用API处理数据插入、查询操作。在软硬件环境相同的情况下,异步API处理数据插入的速度比同步API快2\~4倍。异步API采用非阻塞式的调用方式,在系统真正完成某个具体数据库操作前,立即返回。调用的线程可以去处理其他工作,从而可以提升整个应用的性能。异步API在网络延迟严重的情况下,优点尤为突出。
异步API都需要应用提供相应的回调函数,回调函数参数设置如下:前两个参数都是一致的,第三个参数依不同的API而定。第一个参数param是应用调用异步API时提供给系统的,用于回调时,应用能够找回具体操作的上下文,依具体实现而定。第二个参数是SQL操作的结果集,如果为空,比如insert操作,表示没有记录返回,如果不为空,比如select操作,表示有记录返回。 异步API都需要应用提供相应的回调函数,回调函数参数设置如下:前两个参数都是一致的,第三个参数依不同的API而定。第一个参数param是应用调用异步API时提供给系统的,用于回调时,应用能够找回具体操作的上下文,依具体实现而定。第二个参数是SQL操作的结果集,如果为空,比如insert操作,表示没有记录返回,如果不为空,比如select操作,表示有记录返回。
...@@ -288,13 +288,6 @@ C/C++的API类似于MySQL的C API。应用程序使用时,需要包含TDengine ...@@ -288,13 +288,6 @@ C/C++的API类似于MySQL的C API。应用程序使用时,需要包含TDengine
* res:`taos_query_a`回调时返回的结果集 * res:`taos_query_a`回调时返回的结果集
* fp:回调函数。其参数`param`是用户可定义的传递给回调函数的参数结构体;`numOfRows`是获取到的数据的行数(不是整个查询结果集的函数)。 在回调函数中,应用可以通过调用`taos_fetch_row`前向迭代获取批量记录中每一行记录。读完一块内的所有记录后,应用需要在回调函数中继续调用`taos_fetch_rows_a`获取下一批记录进行处理,直到返回的记录数(numOfRows)为零(结果返回完成)或记录数为负值(查询出错)。 * fp:回调函数。其参数`param`是用户可定义的传递给回调函数的参数结构体;`numOfRows`是获取到的数据的行数(不是整个查询结果集的函数)。 在回调函数中,应用可以通过调用`taos_fetch_row`前向迭代获取批量记录中每一行记录。读完一块内的所有记录后,应用需要在回调函数中继续调用`taos_fetch_rows_a`获取下一批记录进行处理,直到返回的记录数(numOfRows)为零(结果返回完成)或记录数为负值(查询出错)。
- `void taos_fetch_row_a(TAOS_RES *res, void (*fp)(void *param, TAOS_RES *, TAOS_ROW row), void *param);`
异步获取一条记录。其中:
* res:`taos_query_a`回调时返回的结果集
* fp:回调函数。其参数`param`是应用提供的一个用于回调的参数。回调时,第三个参数`row`指向一行记录。不同于`taos_fetch_rows_a`,应用无需调用`taos_fetch_row`来获取一行数据,更加简单,但数据提取性能不及批量获取的API。
TDengine的异步API均采用非阻塞调用模式。应用程序可以用多线程同时打开多张表,并可以同时对每张打开的表进行查询或者插入操作。需要指出的是,**客户端应用必须确保对同一张表的操作完全串行化**,即对同一个表的插入或查询操作未完成时(未返回时),不能够执行第二个插入或查询操作。 TDengine的异步API均采用非阻塞调用模式。应用程序可以用多线程同时打开多张表,并可以同时对每张打开的表进行查询或者插入操作。需要指出的是,**客户端应用必须确保对同一张表的操作完全串行化**,即对同一个表的插入或查询操作未完成时(未返回时),不能够执行第二个插入或查询操作。
### 参数绑定API ### 参数绑定API
...@@ -425,7 +418,7 @@ cd C:\TDengine\connector\python\windows ...@@ -425,7 +418,7 @@ cd C:\TDengine\connector\python\windows
python -m pip install python3\ python -m pip install python3\
``` ```
*如果机器上没有pip命令,用户可将src/connector/python/python3或src/connector/python/python2下的taos文件夹拷贝到应用程序的目录使用。 * 如果机器上没有pip命令,用户可将src/connector/python/python3或src/connector/python/python2下的taos文件夹拷贝到应用程序的目录使用。
对于windows 客户端,安装TDengine windows 客户端后,将C:\TDengine\driver\taos.dll拷贝到C:\windows\system32目录下即可。 对于windows 客户端,安装TDengine windows 客户端后,将C:\TDengine\driver\taos.dll拷贝到C:\windows\system32目录下即可。
### 使用 ### 使用
...@@ -442,7 +435,7 @@ import taos ...@@ -442,7 +435,7 @@ import taos
conn = taos.connect(host="127.0.0.1", user="root", password="taosdata", config="/etc/taos") conn = taos.connect(host="127.0.0.1", user="root", password="taosdata", config="/etc/taos")
c1 = conn.cursor() c1 = conn.cursor()
``` ```
*<em>host</em> 是TDengine 服务端所有IP, <em>config</em> 为客户端配置文件所在目录 * <em>host</em> 是TDengine 服务端所有IP, <em>config</em> 为客户端配置文件所在目录
* 写入数据 * 写入数据
```python ```python
...@@ -510,17 +503,17 @@ conn.close() ...@@ -510,17 +503,17 @@ conn.close()
用户可通过python的帮助信息直接查看模块的使用信息,或者参考tests/examples/python中的示例程序。以下为部分常用类和方法: 用户可通过python的帮助信息直接查看模块的使用信息,或者参考tests/examples/python中的示例程序。以下为部分常用类和方法:
- _TDengineConnection_类 - _TDengineConnection_
参考python中help(taos.TDengineConnection)。 参考python中help(taos.TDengineConnection)。
这个类对应客户端和TDengine建立的一个连接。在客户端多线程的场景下,这个连接实例可以是每个线程申请一个,也可以多线程共享一个连接。 这个类对应客户端和TDengine建立的一个连接。在客户端多线程的场景下,推荐每个线程申请一个独立的连接实例,而不建议多线程共享一个连接。
- _TDengineCursor_类 - _TDengineCursor_
参考python中help(taos.TDengineCursor)。 参考python中help(taos.TDengineCursor)。
这个类对应客户端进行的写入、查询操作。在客户端多线程的场景下,这个游标实例必须保持线程独享,不能夸线程共享使用,否则会导致返回结果出现错误。 这个类对应客户端进行的写入、查询操作。在客户端多线程的场景下,这个游标实例必须保持线程独享,不能夸线程共享使用,否则会导致返回结果出现错误。
- _connect_方法 - _connect_ 方法
用于生成taos.TDengineConnection的实例。 用于生成taos.TDengineConnection的实例。
...@@ -800,7 +793,7 @@ go env -w GOPROXY=https://goproxy.io,direct ...@@ -800,7 +793,7 @@ go env -w GOPROXY=https://goproxy.io,direct
- `sql.Open(DRIVER_NAME string, dataSourceName string) *DB` - `sql.Open(DRIVER_NAME string, dataSourceName string) *DB`
该API用来打开DB,返回一个类型为*DB的对象,一般情况下,DRIVER_NAME设置为字符串`taosSql`, dataSourceName设置为字符串`user:password@/tcp(host:port)/dbname`,如果客户想要用多个goroutine并发访问TDengine, 那么需要在各个goroutine中分别创建一个sql.Open对象并用之访问TDengine 该API用来打开DB,返回一个类型为\*DB的对象,一般情况下,DRIVER_NAME设置为字符串`taosSql`, dataSourceName设置为字符串`user:password@/tcp(host:port)/dbname`,如果客户想要用多个goroutine并发访问TDengine, 那么需要在各个goroutine中分别创建一个sql.Open对象并用之访问TDengine
**注意**: 该API成功创建的时候,并没有做权限等检查,只有在真正执行Query或者Exec的时候才能真正的去创建连接,并同时检查user/password/host/port是不是合法。 另外,由于整个驱动程序大部分实现都下沉到taosSql所依赖的libtaos中。所以,sql.Open本身特别轻量。 **注意**: 该API成功创建的时候,并没有做权限等检查,只有在真正执行Query或者Exec的时候才能真正的去创建连接,并同时检查user/password/host/port是不是合法。 另外,由于整个驱动程序大部分实现都下沉到taosSql所依赖的libtaos中。所以,sql.Open本身特别轻量。
...@@ -822,7 +815,7 @@ go env -w GOPROXY=https://goproxy.io,direct ...@@ -822,7 +815,7 @@ go env -w GOPROXY=https://goproxy.io,direct
- `func (s *Stmt) Query(args ...interface{}) (*Rows, error)` - `func (s *Stmt) Query(args ...interface{}) (*Rows, error)`
sql.Open内置的方法,Query executes a prepared query statement with the given arguments and returns the query results as a *Rows. sql.Open内置的方法,Query executes a prepared query statement with the given arguments and returns the query results as a \*Rows.
- `func (s *Stmt) Close() error` - `func (s *Stmt) Close() error`
...@@ -894,7 +887,7 @@ Node-example-raw.js ...@@ -894,7 +887,7 @@ Node-example-raw.js
验证方法: 验证方法:
1. 新建安装验证目录,例如:~/tdengine-test,拷贝github上nodejsChecker.js源程序。下载地址:(https://github.com/taosdata/TDengine/tree/develop/tests/examples/nodejs/nodejsChecker.js)。 1. 新建安装验证目录,例如:\~/tdengine-test,拷贝github上nodejsChecker.js源程序。下载地址:(https://github.com/taosdata/TDengine/tree/develop/tests/examples/nodejs/nodejsChecker.js)。
2. 在命令中执行以下命令: 2. 在命令中执行以下命令:
......
# 常见问题 # 常见问题
## 0. 怎么报告问题?
如果 FAQ 中的信息不能够帮到您,需要 TDengine 技术团队的技术支持与协助,请将以下两个目录中内容打包:
1. /var/log/taos (如果没有修改过默认路径)
2. /etc/taos
附上必要的问题描述,包括使用的 TDengine 版本信息、平台环境信息、发生该问题的执行操作、出现问题的表征及大概的时间,在<a href='https://github.com/taosdata/TDengine'> GitHub</a>提交Issue。
为了保证有足够的debug信息,如果问题能够重复,请修改/etc/taos/taos.cfg文件,最后面添加一行“debugFlag 135"(不带引号本身),然后重启taosd, 重复问题,然后再递交。也可以通过如下SQL语句,临时设置taosd的日志级别。
```
alter dnode <dnode_id> debugFlag 135;
```
但系统正常运行时,请一定将debugFlag设置为131,否则会产生大量的日志信息,降低系统效率。
## 1. TDengine2.0之前的版本升级到2.0及以上的版本应该注意什么?☆☆☆ ## 1. TDengine2.0之前的版本升级到2.0及以上的版本应该注意什么?☆☆☆
2.0版本在之前版本的基础上,进行了完全的重构,配置文件和数据文件是不兼容的。在升级之前务必进行如下操作: 2.0版本在之前版本的基础上,进行了完全的重构,配置文件和数据文件是不兼容的。在升级之前务必进行如下操作:
...@@ -118,16 +132,8 @@ TDengine是根据hostname唯一标志一台机器的,在数据文件从机器A ...@@ -118,16 +132,8 @@ TDengine是根据hostname唯一标志一台机器的,在数据文件从机器A
- 2.0.7.0 及以后的版本,到/var/lib/taos/dnode下,修复dnodeEps.json的dnodeId对应的FQDN,重启。确保机器内所有机器的此文件是完全相同的。 - 2.0.7.0 及以后的版本,到/var/lib/taos/dnode下,修复dnodeEps.json的dnodeId对应的FQDN,重启。确保机器内所有机器的此文件是完全相同的。
- 1.x 和 2.x 版本的存储结构不兼容,需要使用迁移工具或者自己开发应用导出导入数据。 - 1.x 和 2.x 版本的存储结构不兼容,需要使用迁移工具或者自己开发应用导出导入数据。
## 17. 怎么报告问题 ## 17. TDengine 是否支持删除或更新已经写入的数据
如果 FAQ 中的信息不能够帮到您,需要 TDengine 技术团队的技术支持与协助,请将以下两个目录中内容打包: TDengine 目前尚不支持删除功能,未来根据用户需求可能会支持。
1. /var/log/taos
2. /etc/taos
附上必要的问题描述,以及发生该问题的执行操作,出现问题的表征及大概的时间,在<a href='https://github.com/taosdata/TDengine'> GitHub</a>提交Issue。 从 2.0.8.0 开始,TDengine 支持更新已经写入数据的功能。使用更新功能需要在创建数据库时使用 UPDATE 1 参数,之后可以使用 INSERT INTO 命令更新已经写入的相同时间戳数据。UPDATE 参数不支持 ALTER DATABASE 命令修改。没有使用 UPDATE 1 参数创建的数据库,写入相同时间戳的数据不会修改之前的数据,也不会报错。
\ No newline at end of file
为了保证有足够的debug信息,如果问题能够重复,请修改/etc/taos/taos.cfg文件,最后面添加一行“debugFlag 135"(不带引号本身),然后重启taosd, 重复问题,然后再递交。也可以通过如下SQL语句,临时设置taosd的日志级别。
```
alter dnode <dnode_id> debugFlag 135;
```
但系统正常运行时,请一定将debugFlag设置为131,否则会产生大量的日志信息,降低系统效率。
...@@ -36,6 +36,9 @@ ...@@ -36,6 +36,9 @@
# 0.0: only one core available. # 0.0: only one core available.
# tsRatioOfQueryCores 1.0 # tsRatioOfQueryCores 1.0
# the last_row/first/last aggregator will not change the original column name in the result fields
# keepColumnName 0
# number of management nodes in the system # number of management nodes in the system
# numOfMnodes 3 # numOfMnodes 3
...@@ -159,10 +162,10 @@ ...@@ -159,10 +162,10 @@
# stop writing logs when the disk size of the log folder is less than this value # stop writing logs when the disk size of the log folder is less than this value
# minimalLogDirGB 0.1 # minimalLogDirGB 0.1
# stop writing temporary files when the disk size of the log folder is less than this value # stop writing temporary files when the disk size of the tmp folder is less than this value
# minimalTmpDirGB 0.1 # minimalTmpDirGB 0.1
# stop writing data when the disk size of the log folder is less than this value # if disk free space is less than this value, taosd service exit directly within startup process
# minimalDataDirGB 0.1 # minimalDataDirGB 0.1
# One mnode is equal to the number of vnode consumed # One mnode is equal to the number of vnode consumed
......
name: tdengine name: tdengine
base: core18 base: core18
version: '2.0.13.0' version: '2.0.14.0'
icon: snap/gui/t-dengine.svg icon: snap/gui/t-dengine.svg
summary: an open-source big data platform designed and optimized for IoT. summary: an open-source big data platform designed and optimized for IoT.
description: | description: |
...@@ -72,7 +72,7 @@ parts: ...@@ -72,7 +72,7 @@ parts:
- usr/bin/taosd - usr/bin/taosd
- usr/bin/taos - usr/bin/taos
- usr/bin/taosdemo - usr/bin/taosdemo
- usr/lib/libtaos.so.2.0.13.0 - usr/lib/libtaos.so.2.0.14.0
- usr/lib/libtaos.so.1 - usr/lib/libtaos.so.1
- usr/lib/libtaos.so - usr/lib/libtaos.so
......
...@@ -13,8 +13,8 @@ ...@@ -13,8 +13,8 @@
* along with this program. If not, see <http://www.gnu.org/licenses/>. * along with this program. If not, see <http://www.gnu.org/licenses/>.
*/ */
#ifndef TDENGINE_TSC_LOG_H #ifndef TDENGINE_TSCLOG_H
#define TDENGINE_TSC_LOG_H #define TDENGINE_TSCLOG_H
#ifdef __cplusplus #ifdef __cplusplus
extern "C" { extern "C" {
...@@ -22,7 +22,7 @@ extern "C" { ...@@ -22,7 +22,7 @@ extern "C" {
#include "tlog.h" #include "tlog.h"
extern int32_t cDebugFlag; extern uint32_t cDebugFlag;
extern int8_t tscEmbedded; extern int8_t tscEmbedded;
#define tscFatal(...) do { if (cDebugFlag & DEBUG_FATAL) { taosPrintLog("TSC FATAL ", tscEmbedded ? 255 : cDebugFlag, __VA_ARGS__); }} while(0) #define tscFatal(...) do { if (cDebugFlag & DEBUG_FATAL) { taosPrintLog("TSC FATAL ", tscEmbedded ? 255 : cDebugFlag, __VA_ARGS__); }} while(0)
......
...@@ -13,8 +13,8 @@ ...@@ -13,8 +13,8 @@
* along with this program. If not, see <http://www.gnu.org/licenses/>. * along with this program. If not, see <http://www.gnu.org/licenses/>.
*/ */
#ifndef TDENGINE_TSCJOINPROCESS_H #ifndef TDENGINE_TSCSUBQUERY_H
#define TDENGINE_TSCJOINPROCESS_H #define TDENGINE_TSCSUBQUERY_H
#ifdef __cplusplus #ifdef __cplusplus
extern "C" { extern "C" {
...@@ -47,9 +47,8 @@ void tscLockByThread(int64_t *lockedBy); ...@@ -47,9 +47,8 @@ void tscLockByThread(int64_t *lockedBy);
void tscUnlockByThread(int64_t *lockedBy); void tscUnlockByThread(int64_t *lockedBy);
#ifdef __cplusplus #ifdef __cplusplus
} }
#endif #endif
#endif // TDENGINE_TSCJOINPROCESS_H #endif // TDENGINE_TSCSUBQUERY_H
...@@ -110,7 +110,7 @@ void* tscDestroyBlockArrayList(SArray* pDataBlockList); ...@@ -110,7 +110,7 @@ void* tscDestroyBlockArrayList(SArray* pDataBlockList);
void* tscDestroyBlockHashTable(SHashObj* pBlockHashTable); void* tscDestroyBlockHashTable(SHashObj* pBlockHashTable);
int32_t tscCopyDataBlockToPayload(SSqlObj* pSql, STableDataBlocks* pDataBlock); int32_t tscCopyDataBlockToPayload(SSqlObj* pSql, STableDataBlocks* pDataBlock);
int32_t tscMergeTableDataBlocks(SSqlObj* pSql); int32_t tscMergeTableDataBlocks(SSqlObj* pSql, bool freeBlockMap);
int32_t tscGetDataBlockFromList(SHashObj* pHashList, int64_t id, int32_t size, int32_t startOffset, int32_t rowSize, const char* tableId, STableMeta* pTableMeta, int32_t tscGetDataBlockFromList(SHashObj* pHashList, int64_t id, int32_t size, int32_t startOffset, int32_t rowSize, const char* tableId, STableMeta* pTableMeta,
STableDataBlocks** dataBlocks, SArray* pBlockList); STableDataBlocks** dataBlocks, SArray* pBlockList);
...@@ -256,11 +256,11 @@ void tscSVgroupInfoCopy(SVgroupInfo* dst, const SVgroupInfo* src); ...@@ -256,11 +256,11 @@ void tscSVgroupInfoCopy(SVgroupInfo* dst, const SVgroupInfo* src);
* @param pPrevSql * @param pPrevSql
* @return * @return
*/ */
SSqlObj* createSimpleSubObj(SSqlObj* pSql, void (*fp)(), void* param, int32_t cmd); SSqlObj* createSimpleSubObj(SSqlObj* pSql, __async_cb_func_t fp, void* param, int32_t cmd);
void registerSqlObj(SSqlObj* pSql); void registerSqlObj(SSqlObj* pSql);
SSqlObj* createSubqueryObj(SSqlObj* pSql, int16_t tableIndex, void (*fp)(), void* param, int32_t cmd, SSqlObj* pPrevSql); SSqlObj* createSubqueryObj(SSqlObj* pSql, int16_t tableIndex, __async_cb_func_t fp, void* param, int32_t cmd, SSqlObj* pPrevSql);
void addGroupInfoForSubquery(SSqlObj* pParentObj, SSqlObj* pSql, int32_t subClauseIndex, int32_t tableIndex); void addGroupInfoForSubquery(SSqlObj* pParentObj, SSqlObj* pSql, int32_t subClauseIndex, int32_t tableIndex);
void doAddGroupColumnForSubquery(SQueryInfo* pQueryInfo, int32_t tagIndex); void doAddGroupColumnForSubquery(SQueryInfo* pQueryInfo, int32_t tagIndex);
......
...@@ -24,10 +24,6 @@ extern "C" { ...@@ -24,10 +24,6 @@ extern "C" {
#include "tstoken.h" #include "tstoken.h"
#include "tsclient.h" #include "tsclient.h"
#define VALIDNUMOFCOLS(x) ((x) >= TSDB_MIN_COLUMNS && (x) <= TSDB_MAX_COLUMNS)
#define VALIDNUMOFTAGS(x) ((x) >= 0 && (x) <= TSDB_MAX_TAGS)
/** /**
* get the number of tags of this table * get the number of tags of this table
* @param pTableMeta * @param pTableMeta
...@@ -79,26 +75,6 @@ SSchema *tscGetTableColumnSchema(const STableMeta *pMeta, int32_t colIndex); ...@@ -79,26 +75,6 @@ SSchema *tscGetTableColumnSchema(const STableMeta *pMeta, int32_t colIndex);
*/ */
SSchema* tscGetColumnSchemaById(STableMeta* pTableMeta, int16_t colId); SSchema* tscGetColumnSchemaById(STableMeta* pTableMeta, int16_t colId);
/**
* check if the schema is valid or not, including following aspects:
* 1. number of columns
* 2. column types
* 3. column length
* 4. column names
* 5. total length
*
* @param pSchema
* @param numOfCols
* @return
*/
bool isValidSchema(struct SSchema* pSchema, int32_t numOfCols, int32_t numOfTags);
/**
* get the schema for the "tbname" column. it is a built column
* @return
*/
SSchema tscGetTbnameColumnSchema();
/** /**
* create the table meta from the msg * create the table meta from the msg
* @param pTableMetaMsg * @param pTableMetaMsg
......
...@@ -235,7 +235,7 @@ typedef struct { ...@@ -235,7 +235,7 @@ typedef struct {
int32_t numOfTablesInSubmit; int32_t numOfTablesInSubmit;
}; };
uint32_t insertType; uint32_t insertType; // TODO remove it
int32_t clauseIndex; // index of multiple subclause query int32_t clauseIndex; // index of multiple subclause query
char * curSql; // current sql, resume position of sql after parsing paused char * curSql; // current sql, resume position of sql after parsing paused
...@@ -317,7 +317,8 @@ typedef struct STscObj { ...@@ -317,7 +317,8 @@ typedef struct STscObj {
} STscObj; } STscObj;
typedef struct SSubqueryState { typedef struct SSubqueryState {
int32_t numOfRemain; // the number of remain unfinished subquery pthread_mutex_t mutex;
int8_t *states;
int32_t numOfSub; // the number of total sub-queries int32_t numOfSub; // the number of total sub-queries
uint64_t numOfRetrievedRows; // total number of points in this query uint64_t numOfRetrievedRows; // total number of points in this query
} SSubqueryState; } SSubqueryState;
...@@ -327,8 +328,8 @@ typedef struct SSqlObj { ...@@ -327,8 +328,8 @@ typedef struct SSqlObj {
pthread_t owner; // owner of sql object, by which it is executed pthread_t owner; // owner of sql object, by which it is executed
STscObj *pTscObj; STscObj *pTscObj;
int64_t rpcRid; int64_t rpcRid;
void (*fp)(); __async_cb_func_t fp;
void (*fetchFp)(); __async_cb_func_t fetchFp;
void *param; void *param;
int64_t stime; int64_t stime;
uint32_t queryId; uint32_t queryId;
...@@ -463,7 +464,7 @@ static FORCE_INLINE void tscGetResultColumnChr(SSqlRes* pRes, SFieldInfo* pField ...@@ -463,7 +464,7 @@ static FORCE_INLINE void tscGetResultColumnChr(SSqlRes* pRes, SFieldInfo* pField
pRes->length[columnIndex] = pInfo->pSqlExpr->param[1].nLen; pRes->length[columnIndex] = pInfo->pSqlExpr->param[1].nLen;
pRes->tsrow[columnIndex] = (pInfo->pSqlExpr->param[1].nType == TSDB_DATA_TYPE_NULL) ? NULL : (unsigned char*)pData; pRes->tsrow[columnIndex] = (pInfo->pSqlExpr->param[1].nType == TSDB_DATA_TYPE_NULL) ? NULL : (unsigned char*)pData;
} else { } else {
assert(bytes == tDataTypeDesc[type].nSize); assert(bytes == tDataTypes[type].bytes);
pRes->tsrow[columnIndex] = isNull(pData, type) ? NULL : (unsigned char*)&pInfo->pSqlExpr->param[1].i64; pRes->tsrow[columnIndex] = isNull(pData, type) ? NULL : (unsigned char*)&pInfo->pSqlExpr->param[1].i64;
pRes->length[columnIndex] = bytes; pRes->length[columnIndex] = bytes;
...@@ -480,7 +481,7 @@ static FORCE_INLINE void tscGetResultColumnChr(SSqlRes* pRes, SFieldInfo* pField ...@@ -480,7 +481,7 @@ static FORCE_INLINE void tscGetResultColumnChr(SSqlRes* pRes, SFieldInfo* pField
pRes->length[columnIndex] = realLen; pRes->length[columnIndex] = realLen;
} else { } else {
assert(bytes == tDataTypeDesc[type].nSize); assert(bytes == tDataTypes[type].bytes);
pRes->tsrow[columnIndex] = isNull(pData, type) ? NULL : (unsigned char*)pData; pRes->tsrow[columnIndex] = isNull(pData, type) ? NULL : (unsigned char*)pData;
pRes->length[columnIndex] = bytes; pRes->length[columnIndex] = bytes;
......
...@@ -32,7 +32,6 @@ taos_errstr ...@@ -32,7 +32,6 @@ taos_errstr
taos_errno taos_errno
taos_query_a taos_query_a
taos_fetch_rows_a taos_fetch_rows_a
taos_fetch_row_a
taos_subscribe taos_subscribe
taos_consume taos_consume
taos_unsubscribe taos_unsubscribe
......
...@@ -20,24 +20,19 @@ ...@@ -20,24 +20,19 @@
#include "trpc.h" #include "trpc.h"
#include "tscLog.h" #include "tscLog.h"
#include "tscSubquery.h" #include "tscSubquery.h"
#include "tscLocalMerge.h"
#include "tscUtil.h" #include "tscUtil.h"
#include "tsched.h" #include "tsched.h"
#include "tschemautil.h" #include "tschemautil.h"
#include "tsclient.h" #include "tsclient.h"
static void tscProcessFetchRow(SSchedMsg *pMsg);
static void tscAsyncQueryRowsForNextVnode(void *param, TAOS_RES *tres, int numOfRows); static void tscAsyncQueryRowsForNextVnode(void *param, TAOS_RES *tres, int numOfRows);
static void tscProcessAsyncRetrieveImpl(void *param, TAOS_RES *tres, int numOfRows, void (*fp)());
/* /*
* Proxy function to perform sequentially query&retrieve operation. * Proxy function to perform sequentially query&retrieve operation.
* If sql queries upon a super table and two-stage merge procedure is not involved (when employ the projection * If sql queries upon a super table and two-stage merge procedure is not involved (when employ the projection
* query), it will sequentially query&retrieve data for all vnodes * query), it will sequentially query&retrieve data for all vnodes
*/ */
static void tscAsyncFetchRowsProxy(void *param, TAOS_RES *tres, int numOfRows); static void tscAsyncFetchRowsProxy(void *param, TAOS_RES *tres, int numOfRows);
static void tscAsyncFetchSingleRowProxy(void *param, TAOS_RES *tres, int numOfRows);
void doAsyncQuery(STscObj* pObj, SSqlObj* pSql, __async_cb_func_t fp, void* param, const char* sqlstr, size_t sqlLen) { void doAsyncQuery(STscObj* pObj, SSqlObj* pSql, __async_cb_func_t fp, void* param, const char* sqlstr, size_t sqlLen) {
SSqlCmd* pCmd = &pSql->cmd; SSqlCmd* pCmd = &pSql->cmd;
...@@ -148,7 +143,7 @@ static void tscAsyncFetchRowsProxy(void *param, TAOS_RES *tres, int numOfRows) { ...@@ -148,7 +143,7 @@ static void tscAsyncFetchRowsProxy(void *param, TAOS_RES *tres, int numOfRows) {
} }
// actual continue retrieve function with user-specified callback function // actual continue retrieve function with user-specified callback function
static void tscProcessAsyncRetrieveImpl(void *param, TAOS_RES *tres, int numOfRows, void (*fp)()) { static void tscProcessAsyncRetrieveImpl(void *param, TAOS_RES *tres, int numOfRows, __async_cb_func_t fp) {
SSqlObj *pSql = (SSqlObj *)tres; SSqlObj *pSql = (SSqlObj *)tres;
if (pSql == NULL) { // error if (pSql == NULL) { // error
tscError("sql object is NULL"); tscError("sql object is NULL");
...@@ -191,11 +186,6 @@ static void tscAsyncQueryRowsForNextVnode(void *param, TAOS_RES *tres, int numOf ...@@ -191,11 +186,6 @@ static void tscAsyncQueryRowsForNextVnode(void *param, TAOS_RES *tres, int numOf
tscProcessAsyncRetrieveImpl(param, tres, numOfRows, tscAsyncFetchRowsProxy); tscProcessAsyncRetrieveImpl(param, tres, numOfRows, tscAsyncFetchRowsProxy);
} }
void tscAsyncQuerySingleRowForNextVnode(void *param, TAOS_RES *tres, int numOfRows) {
// query completed, continue to retrieve
tscProcessAsyncRetrieveImpl(param, tres, numOfRows, tscAsyncFetchSingleRowProxy);
}
void taos_fetch_rows_a(TAOS_RES *taosa, __async_cb_func_t fp, void *param) { void taos_fetch_rows_a(TAOS_RES *taosa, __async_cb_func_t fp, void *param) {
SSqlObj *pSql = (SSqlObj *)taosa; SSqlObj *pSql = (SSqlObj *)taosa;
if (pSql == NULL || pSql->signature != pSql) { if (pSql == NULL || pSql->signature != pSql) {
...@@ -263,103 +253,6 @@ void taos_fetch_rows_a(TAOS_RES *taosa, __async_cb_func_t fp, void *param) { ...@@ -263,103 +253,6 @@ void taos_fetch_rows_a(TAOS_RES *taosa, __async_cb_func_t fp, void *param) {
} }
} }
void taos_fetch_row_a(TAOS_RES *taosa, void (*fp)(void *, TAOS_RES *, TAOS_ROW), void *param) {
SSqlObj *pSql = (SSqlObj *)taosa;
if (pSql == NULL || pSql->signature != pSql) {
tscError("sql object is NULL");
tscQueueAsyncError(fp, param, TSDB_CODE_TSC_DISCONNECTED);
return;
}
SSqlRes *pRes = &pSql->res;
SSqlCmd *pCmd = &pSql->cmd;
if (pRes->qhandle == 0) {
tscError("qhandle is NULL");
pSql->param = param;
pRes->code = TSDB_CODE_TSC_INVALID_QHANDLE;
tscAsyncResultOnError(pSql);
return;
}
pSql->fetchFp = fp;
pSql->param = param;
if (pRes->row >= pRes->numOfRows) {
tscResetForNextRetrieve(pRes);
pSql->fp = tscAsyncFetchSingleRowProxy;
if (pCmd->command != TSDB_SQL_RETRIEVE_LOCALMERGE && pCmd->command < TSDB_SQL_LOCAL) {
pCmd->command = (pCmd->command > TSDB_SQL_MGMT) ? TSDB_SQL_RETRIEVE : TSDB_SQL_FETCH;
}
tscProcessSql(pSql);
} else {
SSchedMsg schedMsg = { 0 };
schedMsg.fp = tscProcessFetchRow;
schedMsg.ahandle = pSql;
schedMsg.thandle = pRes->tsrow;
schedMsg.msg = NULL;
taosScheduleTask(tscQhandle, &schedMsg);
}
}
void tscAsyncFetchSingleRowProxy(void *param, TAOS_RES *tres, int numOfRows) {
SSqlObj *pSql = (SSqlObj *)tres;
SSqlRes *pRes = &pSql->res;
SSqlCmd *pCmd = &pSql->cmd;
SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(pCmd, pCmd->clauseIndex);
if (numOfRows == 0) {
if (hasMoreVnodesToTry(pSql)) { // sequentially retrieve data from remain vnodes.
tscTryQueryNextVnode(pSql, tscAsyncQuerySingleRowForNextVnode);
} else {
/*
* 1. has reach the limitation
* 2. no remain virtual nodes to be retrieved anymore
*/
(*pSql->fetchFp)(pSql->param, pSql, NULL);
}
return;
}
for (int i = 0; i < pCmd->numOfCols; ++i){
SInternalField* pSup = taosArrayGet(pQueryInfo->fieldsInfo.internalField, i);
if (pSup->pSqlExpr != NULL) {
// pRes->tsrow[i] = TSC_GET_RESPTR_BASE(pRes, pQueryInfo, i) + pSup->pSqlExpr->resBytes * pRes->row;
} else {
//todo add
}
}
pRes->row++;
(*pSql->fetchFp)(pSql->param, pSql, pSql->res.tsrow);
}
void tscProcessFetchRow(SSchedMsg *pMsg) {
SSqlObj *pSql = (SSqlObj *)pMsg->ahandle;
SSqlRes *pRes = &pSql->res;
SSqlCmd *pCmd = &pSql->cmd;
SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(pCmd, pCmd->clauseIndex);
for (int i = 0; i < pCmd->numOfCols; ++i) {
SInternalField* pSup = taosArrayGet(pQueryInfo->fieldsInfo.internalField, i);
if (pSup->pSqlExpr != NULL) {
tscGetResultColumnChr(pRes, &pQueryInfo->fieldsInfo, i, 0);
} else {
// todo add
}
}
pRes->row++;
(*pSql->fetchFp)(pSql->param, pSql, pRes->tsrow);
}
// this function will be executed by queue task threads, so the terrno is not valid // this function will be executed by queue task threads, so the terrno is not valid
static void tscProcessAsyncError(SSchedMsg *pMsg) { static void tscProcessAsyncError(SSchedMsg *pMsg) {
void (*fp)() = pMsg->ahandle; void (*fp)() = pMsg->ahandle;
...@@ -372,7 +265,7 @@ void tscQueueAsyncError(void(*fp), void *param, int32_t code) { ...@@ -372,7 +265,7 @@ void tscQueueAsyncError(void(*fp), void *param, int32_t code) {
int32_t* c = malloc(sizeof(int32_t)); int32_t* c = malloc(sizeof(int32_t));
*c = code; *c = code;
SSchedMsg schedMsg = { 0 }; SSchedMsg schedMsg = {0};
schedMsg.fp = tscProcessAsyncError; schedMsg.fp = tscProcessAsyncError;
schedMsg.ahandle = fp; schedMsg.ahandle = fp;
schedMsg.thandle = param; schedMsg.thandle = param;
...@@ -380,7 +273,6 @@ void tscQueueAsyncError(void(*fp), void *param, int32_t code) { ...@@ -380,7 +273,6 @@ void tscQueueAsyncError(void(*fp), void *param, int32_t code) {
taosScheduleTask(tscQhandle, &schedMsg); taosScheduleTask(tscQhandle, &schedMsg);
} }
void tscAsyncResultOnError(SSqlObj *pSql) { void tscAsyncResultOnError(SSqlObj *pSql) {
if (pSql == NULL || pSql->signature != pSql) { if (pSql == NULL || pSql->signature != pSql) {
tscDebug("%p SqlObj is freed, not add into queue async res", pSql); tscDebug("%p SqlObj is freed, not add into queue async res", pSql);
......
...@@ -79,7 +79,7 @@ static int32_t tscSetValueToResObj(SSqlObj *pSql, int32_t rowLen) { ...@@ -79,7 +79,7 @@ static int32_t tscSetValueToResObj(SSqlObj *pSql, int32_t rowLen) {
char* dst = pRes->data + tscFieldInfoGetOffset(pQueryInfo, 0) * totalNumOfRows + pField->bytes * i; char* dst = pRes->data + tscFieldInfoGetOffset(pQueryInfo, 0) * totalNumOfRows + pField->bytes * i;
STR_WITH_MAXSIZE_TO_VARSTR(dst, pSchema[i].name, pField->bytes); STR_WITH_MAXSIZE_TO_VARSTR(dst, pSchema[i].name, pField->bytes);
char *type = tDataTypeDesc[pSchema[i].type].aName; char *type = tDataTypes[pSchema[i].type].name;
pField = tscFieldInfoGetField(&pQueryInfo->fieldsInfo, 1); pField = tscFieldInfoGetField(&pQueryInfo->fieldsInfo, 1);
dst = pRes->data + tscFieldInfoGetOffset(pQueryInfo, 1) * totalNumOfRows + pField->bytes * i; dst = pRes->data + tscFieldInfoGetOffset(pQueryInfo, 1) * totalNumOfRows + pField->bytes * i;
...@@ -119,7 +119,7 @@ static int32_t tscSetValueToResObj(SSqlObj *pSql, int32_t rowLen) { ...@@ -119,7 +119,7 @@ static int32_t tscSetValueToResObj(SSqlObj *pSql, int32_t rowLen) {
// type name // type name
pField = tscFieldInfoGetField(&pQueryInfo->fieldsInfo, 1); pField = tscFieldInfoGetField(&pQueryInfo->fieldsInfo, 1);
char *type = tDataTypeDesc[pSchema[i].type].aName; char *type = tDataTypes[pSchema[i].type].name;
output = pRes->data + tscFieldInfoGetOffset(pQueryInfo, 1) * totalNumOfRows + pField->bytes * i; output = pRes->data + tscFieldInfoGetOffset(pQueryInfo, 1) * totalNumOfRows + pField->bytes * i;
STR_WITH_MAXSIZE_TO_VARSTR(output, type, pField->bytes); STR_WITH_MAXSIZE_TO_VARSTR(output, type, pField->bytes);
...@@ -619,9 +619,9 @@ static int32_t tscRebuildDDLForNormalTable(SSqlObj *pSql, const char *tableName, ...@@ -619,9 +619,9 @@ static int32_t tscRebuildDDLForNormalTable(SSqlObj *pSql, const char *tableName,
if (type == TSDB_DATA_TYPE_NCHAR) { if (type == TSDB_DATA_TYPE_NCHAR) {
bytes = bytes/TSDB_NCHAR_SIZE; bytes = bytes/TSDB_NCHAR_SIZE;
} }
snprintf(result + strlen(result), TSDB_MAX_BINARY_LEN - strlen(result), "%s %s(%d),", pSchema[i].name, tDataTypeDesc[pSchema[i].type].aName, bytes); snprintf(result + strlen(result), TSDB_MAX_BINARY_LEN - strlen(result), "%s %s(%d),", pSchema[i].name, tDataTypes[pSchema[i].type].name, bytes);
} else { } else {
snprintf(result + strlen(result), TSDB_MAX_BINARY_LEN - strlen(result), "%s %s,", pSchema[i].name, tDataTypeDesc[pSchema[i].type].aName); snprintf(result + strlen(result), TSDB_MAX_BINARY_LEN - strlen(result), "%s %s,", pSchema[i].name, tDataTypes[pSchema[i].type].name);
} }
} }
sprintf(result + strlen(result) - 1, "%s", ")"); sprintf(result + strlen(result) - 1, "%s", ")");
...@@ -646,9 +646,9 @@ static int32_t tscRebuildDDLForSuperTable(SSqlObj *pSql, const char *tableName, ...@@ -646,9 +646,9 @@ static int32_t tscRebuildDDLForSuperTable(SSqlObj *pSql, const char *tableName,
if (type == TSDB_DATA_TYPE_NCHAR) { if (type == TSDB_DATA_TYPE_NCHAR) {
bytes = bytes/TSDB_NCHAR_SIZE; bytes = bytes/TSDB_NCHAR_SIZE;
} }
snprintf(result + strlen(result), TSDB_MAX_BINARY_LEN - strlen(result),"%s %s(%d),", pSchema[i].name,tDataTypeDesc[pSchema[i].type].aName, bytes); snprintf(result + strlen(result), TSDB_MAX_BINARY_LEN - strlen(result),"%s %s(%d),", pSchema[i].name,tDataTypes[pSchema[i].type].name, bytes);
} else { } else {
snprintf(result + strlen(result), TSDB_MAX_BINARY_LEN - strlen(result), "%s %s,", pSchema[i].name, tDataTypeDesc[type].aName); snprintf(result + strlen(result), TSDB_MAX_BINARY_LEN - strlen(result), "%s %s,", pSchema[i].name, tDataTypes[type].name);
} }
} }
snprintf(result + strlen(result) - 1, TSDB_MAX_BINARY_LEN - strlen(result), "%s %s", ")", "TAGS ("); snprintf(result + strlen(result) - 1, TSDB_MAX_BINARY_LEN - strlen(result), "%s %s", ")", "TAGS (");
...@@ -660,9 +660,9 @@ static int32_t tscRebuildDDLForSuperTable(SSqlObj *pSql, const char *tableName, ...@@ -660,9 +660,9 @@ static int32_t tscRebuildDDLForSuperTable(SSqlObj *pSql, const char *tableName,
if (type == TSDB_DATA_TYPE_NCHAR) { if (type == TSDB_DATA_TYPE_NCHAR) {
bytes = bytes/TSDB_NCHAR_SIZE; bytes = bytes/TSDB_NCHAR_SIZE;
} }
snprintf(result + strlen(result), TSDB_MAX_BINARY_LEN - strlen(result), "%s %s(%d),", pSchema[i].name,tDataTypeDesc[pSchema[i].type].aName, bytes); snprintf(result + strlen(result), TSDB_MAX_BINARY_LEN - strlen(result), "%s %s(%d),", pSchema[i].name,tDataTypes[pSchema[i].type].name, bytes);
} else { } else {
snprintf(result + strlen(result), TSDB_MAX_BINARY_LEN - strlen(result), "%s %s,", pSchema[i].name, tDataTypeDesc[type].aName); snprintf(result + strlen(result), TSDB_MAX_BINARY_LEN - strlen(result), "%s %s,", pSchema[i].name, tDataTypes[type].name);
} }
} }
sprintf(result + strlen(result) - 1, "%s", ")"); sprintf(result + strlen(result) - 1, "%s", ")");
......
...@@ -979,14 +979,13 @@ static void doFillResult(SSqlObj *pSql, SLocalReducer *pLocalReducer, bool doneO ...@@ -979,14 +979,13 @@ static void doFillResult(SSqlObj *pSql, SLocalReducer *pLocalReducer, bool doneO
pQueryInfo->limit.offset -= newRows; pQueryInfo->limit.offset -= newRows;
pRes->numOfRows = 0; pRes->numOfRows = 0;
int32_t rpoints = taosNumOfRemainRows(pFillInfo); if (!taosFillHasMoreResults(pFillInfo)) {
if (rpoints <= 0) {
if (!doneOutput) { // reduce procedure has not completed yet, but current results for fill are exhausted if (!doneOutput) { // reduce procedure has not completed yet, but current results for fill are exhausted
break; break;
} }
// all output in current group are completed // all output in current group are completed
int32_t totalRemainRows = (int32_t)getNumOfResWithFill(pFillInfo, actualETime, pLocalReducer->resColModel->capacity); int32_t totalRemainRows = (int32_t)getNumOfResultsAfterFillGap(pFillInfo, actualETime, pLocalReducer->resColModel->capacity);
if (totalRemainRows <= 0) { if (totalRemainRows <= 0) {
break; break;
} }
...@@ -1337,14 +1336,14 @@ static bool doBuildFilledResultForGroup(SSqlObj *pSql) { ...@@ -1337,14 +1336,14 @@ static bool doBuildFilledResultForGroup(SSqlObj *pSql) {
SLocalReducer *pLocalReducer = pRes->pLocalReducer; SLocalReducer *pLocalReducer = pRes->pLocalReducer;
SFillInfo *pFillInfo = pLocalReducer->pFillInfo; SFillInfo *pFillInfo = pLocalReducer->pFillInfo;
if (pFillInfo != NULL && taosNumOfRemainRows(pFillInfo) > 0) { if (pFillInfo != NULL && taosFillHasMoreResults(pFillInfo)) {
assert(pQueryInfo->fillType != TSDB_FILL_NONE); assert(pQueryInfo->fillType != TSDB_FILL_NONE);
tFilePage *pFinalDataBuf = pLocalReducer->pResultBuf; tFilePage *pFinalDataBuf = pLocalReducer->pResultBuf;
int64_t etime = *(int64_t *)(pFinalDataBuf->data + TSDB_KEYSIZE * (pFillInfo->numOfRows - 1)); int64_t etime = *(int64_t *)(pFinalDataBuf->data + TSDB_KEYSIZE * (pFillInfo->numOfRows - 1));
// the first column must be the timestamp column // the first column must be the timestamp column
int32_t rows = (int32_t) getNumOfResWithFill(pFillInfo, etime, pLocalReducer->resColModel->capacity); int32_t rows = (int32_t) getNumOfResultsAfterFillGap(pFillInfo, etime, pLocalReducer->resColModel->capacity);
if (rows > 0) { // do fill gap if (rows > 0) { // do fill gap
doFillResult(pSql, pLocalReducer, false); doFillResult(pSql, pLocalReducer, false);
} }
...@@ -1373,7 +1372,7 @@ static bool doHandleLastRemainData(SSqlObj *pSql) { ...@@ -1373,7 +1372,7 @@ static bool doHandleLastRemainData(SSqlObj *pSql) {
((pRes->numOfRowsGroup < pQueryInfo->limit.limit && pQueryInfo->limit.limit > 0) || (pQueryInfo->limit.limit < 0))) { ((pRes->numOfRowsGroup < pQueryInfo->limit.limit && pQueryInfo->limit.limit > 0) || (pQueryInfo->limit.limit < 0))) {
int64_t etime = (pQueryInfo->order.order == TSDB_ORDER_ASC)? pQueryInfo->window.ekey : pQueryInfo->window.skey; int64_t etime = (pQueryInfo->order.order == TSDB_ORDER_ASC)? pQueryInfo->window.ekey : pQueryInfo->window.skey;
int32_t rows = (int32_t)getNumOfResWithFill(pFillInfo, etime, pLocalReducer->resColModel->capacity); int32_t rows = (int32_t)getNumOfResultsAfterFillGap(pFillInfo, etime, pLocalReducer->resColModel->capacity);
if (rows > 0) { if (rows > 0) {
doFillResult(pSql, pLocalReducer, true); doFillResult(pSql, pLocalReducer, true);
} }
...@@ -1428,6 +1427,10 @@ int32_t tscDoLocalMerge(SSqlObj *pSql) { ...@@ -1428,6 +1427,10 @@ int32_t tscDoLocalMerge(SSqlObj *pSql) {
tscResetForNextRetrieve(pRes); tscResetForNextRetrieve(pRes);
if (pSql->signature != pSql || pRes == NULL || pRes->pLocalReducer == NULL) { // all data has been processed if (pSql->signature != pSql || pRes == NULL || pRes->pLocalReducer == NULL) { // all data has been processed
if (pRes->code == TSDB_CODE_SUCCESS) {
return pRes->code;
}
tscError("%p local merge abort due to error occurs, code:%s", pSql, tstrerror(pRes->code)); tscError("%p local merge abort due to error occurs, code:%s", pSql, tstrerror(pRes->code));
return pRes->code; return pRes->code;
} }
......
...@@ -1281,7 +1281,7 @@ int tsParseInsertSql(SSqlObj *pSql) { ...@@ -1281,7 +1281,7 @@ int tsParseInsertSql(SSqlObj *pSql) {
} }
if (taosHashGetSize(pCmd->pTableBlockHashList) > 0) { // merge according to vgId if (taosHashGetSize(pCmd->pTableBlockHashList) > 0) { // merge according to vgId
if ((code = tscMergeTableDataBlocks(pSql)) != TSDB_CODE_SUCCESS) { if ((code = tscMergeTableDataBlocks(pSql, true)) != TSDB_CODE_SUCCESS) {
goto _clean; goto _clean;
} }
} }
...@@ -1336,15 +1336,6 @@ int tsParseSql(SSqlObj *pSql, bool initial) { ...@@ -1336,15 +1336,6 @@ int tsParseSql(SSqlObj *pSql, bool initial) {
} }
if (tscIsInsertData(pSql->sqlstr)) { if (tscIsInsertData(pSql->sqlstr)) {
/*
* Set the fp before parse the sql string, in case of getTableMeta failed, in which
* the error handle callback function can rightfully restore the user-defined callback function (fp).
*/
if (initial && (pSql->cmd.insertType != TSDB_QUERY_TYPE_STMT_INSERT)) {
pSql->fetchFp = pSql->fp;
pSql->fp = (void(*)())tscHandleMultivnodeInsert;
}
if (initial && ((ret = tsInsertInitialCheck(pSql)) != TSDB_CODE_SUCCESS)) { if (initial && ((ret = tsInsertInitialCheck(pSql)) != TSDB_CODE_SUCCESS)) {
return ret; return ret;
} }
...@@ -1398,7 +1389,7 @@ static int doPackSendDataBlock(SSqlObj *pSql, int32_t numOfRows, STableDataBlock ...@@ -1398,7 +1389,7 @@ static int doPackSendDataBlock(SSqlObj *pSql, int32_t numOfRows, STableDataBlock
return tscInvalidSQLErrMsg(pCmd->payload, "too many rows in sql, total number of rows should be less than 32767", NULL); return tscInvalidSQLErrMsg(pCmd->payload, "too many rows in sql, total number of rows should be less than 32767", NULL);
} }
if ((code = tscMergeTableDataBlocks(pSql)) != TSDB_CODE_SUCCESS) { if ((code = tscMergeTableDataBlocks(pSql, true)) != TSDB_CODE_SUCCESS) {
return code; return code;
} }
......
...@@ -255,7 +255,6 @@ static char* normalStmtBuildSql(STscStmt* stmt) { ...@@ -255,7 +255,6 @@ static char* normalStmtBuildSql(STscStmt* stmt) {
//////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////
// functions for insertion statement preparation // functions for insertion statement preparation
static int doBindParam(char* data, SParamInfo* param, TAOS_BIND* bind) { static int doBindParam(char* data, SParamInfo* param, TAOS_BIND* bind) {
if (bind->is_null != NULL && *(bind->is_null)) { if (bind->is_null != NULL && *(bind->is_null)) {
setNull(data + param->offset, param->type, param->bytes); setNull(data + param->offset, param->type, param->bytes);
...@@ -697,61 +696,44 @@ static int doBindParam(char* data, SParamInfo* param, TAOS_BIND* bind) { ...@@ -697,61 +696,44 @@ static int doBindParam(char* data, SParamInfo* param, TAOS_BIND* bind) {
static int insertStmtBindParam(STscStmt* stmt, TAOS_BIND* bind) { static int insertStmtBindParam(STscStmt* stmt, TAOS_BIND* bind) {
SSqlCmd* pCmd = &stmt->pSql->cmd; SSqlCmd* pCmd = &stmt->pSql->cmd;
int32_t alloced = 1, binded = 0; STableMetaInfo* pTableMetaInfo = tscGetTableMetaInfoFromCmd(pCmd, 0, 0);
if (pCmd->batchSize > 0) {
alloced = (pCmd->batchSize + 1) / 2; STableMeta* pTableMeta = pTableMetaInfo->pTableMeta;
binded = pCmd->batchSize / 2; if (pCmd->pTableBlockHashList == NULL) {
pCmd->pTableBlockHashList = taosHashInit(16, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT), true, false);
} }
size_t size = taosArrayGetSize(pCmd->pDataBlocks); STableDataBlocks* pBlock = NULL;
for (int32_t i = 0; i < size; ++i) {
STableDataBlocks* pBlock = taosArrayGetP(pCmd->pDataBlocks, i);
uint32_t totalDataSize = pBlock->size - sizeof(SSubmitBlk);
uint32_t dataSize = totalDataSize / alloced;
assert(dataSize * alloced == totalDataSize);
if (alloced == binded) {
totalDataSize += dataSize + sizeof(SSubmitBlk);
if (totalDataSize > pBlock->nAllocSize) {
const double factor = 1.5;
void* tmp = realloc(pBlock->pData, (uint32_t)(totalDataSize * factor));
if (tmp == NULL) {
return TSDB_CODE_TSC_OUT_OF_MEMORY;
}
pBlock->pData = (char*)tmp;
pBlock->nAllocSize = (uint32_t)(totalDataSize * factor);
}
}
char* data = pBlock->pData + sizeof(SSubmitBlk) + dataSize * binded; int32_t ret =
for (uint32_t j = 0; j < pBlock->numOfParams; ++j) { tscGetDataBlockFromList(pCmd->pTableBlockHashList, pTableMeta->id.uid, TSDB_PAYLOAD_SIZE, sizeof(SSubmitBlk),
SParamInfo* param = pBlock->params + j; pTableMeta->tableInfo.rowSize, pTableMetaInfo->name, pTableMeta, &pBlock, NULL);
int code = doBindParam(data, param, bind + param->idx); if (ret != 0) {
if (code != TSDB_CODE_SUCCESS) { // todo handle error
tscDebug("param %d: type mismatch or invalid", param->idx);
return code;
}
}
} }
// actual work of all data blocks is done, update block size and numOfRows. uint32_t totalDataSize = sizeof(SSubmitBlk) + pCmd->batchSize * pBlock->rowSize;
// note we don't do this block by block during the binding process, because if (totalDataSize > pBlock->nAllocSize) {
// we cannot recover if something goes wrong. const double factor = 1.5;
pCmd->batchSize = binded * 2 + 1;
if (binded < alloced) { void* tmp = realloc(pBlock->pData, (uint32_t)(totalDataSize * factor));
return TSDB_CODE_SUCCESS; if (tmp == NULL) {
} return TSDB_CODE_TSC_OUT_OF_MEMORY;
}
size_t total = taosArrayGetSize(pCmd->pDataBlocks); pBlock->pData = (char*)tmp;
for (int32_t i = 0; i < total; ++i) { pBlock->nAllocSize = (uint32_t)(totalDataSize * factor);
STableDataBlocks* pBlock = taosArrayGetP(pCmd->pDataBlocks, i); }
uint32_t totalDataSize = pBlock->size - sizeof(SSubmitBlk); char* data = pBlock->pData + sizeof(SSubmitBlk) + pBlock->rowSize * pCmd->batchSize;
pBlock->size += totalDataSize / alloced; for (uint32_t j = 0; j < pBlock->numOfParams; ++j) {
SParamInfo* param = &pBlock->params[j];
SSubmitBlk* pSubmit = (SSubmitBlk*)pBlock->pData; int code = doBindParam(data, param, &bind[param->idx]);
pSubmit->numOfRows += pSubmit->numOfRows / alloced; if (code != TSDB_CODE_SUCCESS) {
tscDebug("param %d: type mismatch or invalid", param->idx);
return code;
}
} }
return TSDB_CODE_SUCCESS; return TSDB_CODE_SUCCESS;
...@@ -759,9 +741,7 @@ static int insertStmtBindParam(STscStmt* stmt, TAOS_BIND* bind) { ...@@ -759,9 +741,7 @@ static int insertStmtBindParam(STscStmt* stmt, TAOS_BIND* bind) {
static int insertStmtAddBatch(STscStmt* stmt) { static int insertStmtAddBatch(STscStmt* stmt) {
SSqlCmd* pCmd = &stmt->pSql->cmd; SSqlCmd* pCmd = &stmt->pSql->cmd;
if ((pCmd->batchSize % 2) == 1) { ++pCmd->batchSize;
++pCmd->batchSize;
}
return TSDB_CODE_SUCCESS; return TSDB_CODE_SUCCESS;
} }
...@@ -793,50 +773,66 @@ static int insertStmtExecute(STscStmt* stmt) { ...@@ -793,50 +773,66 @@ static int insertStmtExecute(STscStmt* stmt) {
if (pCmd->batchSize == 0) { if (pCmd->batchSize == 0) {
return TSDB_CODE_TSC_INVALID_VALUE; return TSDB_CODE_TSC_INVALID_VALUE;
} }
if ((pCmd->batchSize % 2) == 1) {
++pCmd->batchSize;
}
STableMetaInfo* pTableMetaInfo = tscGetTableMetaInfoFromCmd(pCmd, pCmd->clauseIndex, 0);
assert(pCmd->numOfClause == 1); assert(pCmd->numOfClause == 1);
if (taosHashGetSize(pCmd->pTableBlockHashList) == 0) {
return TSDB_CODE_SUCCESS;
}
if (taosHashGetSize(pCmd->pTableBlockHashList) > 0) { STableMetaInfo* pTableMetaInfo = tscGetTableMetaInfoFromCmd(pCmd, 0, 0);
// merge according to vgid
int code = tscMergeTableDataBlocks(stmt->pSql);
if (code != TSDB_CODE_SUCCESS) {
return code;
}
STableDataBlocks *pDataBlock = taosArrayGetP(pCmd->pDataBlocks, 0);
code = tscCopyDataBlockToPayload(stmt->pSql, pDataBlock);
if (code != TSDB_CODE_SUCCESS) {
return code;
}
// set the next sent data vnode index in data block arraylist STableMeta* pTableMeta = pTableMetaInfo->pTableMeta;
pTableMetaInfo->vgroupIndex = 1; if (pCmd->pTableBlockHashList == NULL) {
} else { pCmd->pTableBlockHashList = taosHashInit(16, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT), true, false);
pCmd->pDataBlocks = tscDestroyBlockArrayList(pCmd->pDataBlocks);
} }
SSqlObj *pSql = stmt->pSql; STableDataBlocks* pBlock = NULL;
SSqlRes *pRes = &pSql->res;
pRes->numOfRows = 0; int32_t ret =
pRes->numOfTotal = 0; tscGetDataBlockFromList(pCmd->pTableBlockHashList, pTableMeta->id.uid, TSDB_PAYLOAD_SIZE, sizeof(SSubmitBlk),
pRes->numOfClauseTotal = 0; pTableMeta->tableInfo.rowSize, pTableMetaInfo->name, pTableMeta, &pBlock, NULL);
assert(ret == 0);
pBlock->size = sizeof(SSubmitBlk) + pCmd->batchSize * pBlock->rowSize;
SSubmitBlk* pBlk = (SSubmitBlk*) pBlock->pData;
pBlk->numOfRows = pCmd->batchSize;
pBlk->dataLen = 0;
pBlk->uid = pTableMeta->id.uid;
pBlk->tid = pTableMeta->id.tid;
int code = tscMergeTableDataBlocks(stmt->pSql, false);
if (code != TSDB_CODE_SUCCESS) {
return code;
}
pRes->qhandle = 0; STableDataBlocks* pDataBlock = taosArrayGetP(pCmd->pDataBlocks, 0);
code = tscCopyDataBlockToPayload(stmt->pSql, pDataBlock);
if (code != TSDB_CODE_SUCCESS) {
return code;
}
pSql->cmd.insertType = 0; SSqlObj* pSql = stmt->pSql;
pSql->fetchFp = waitForQueryRsp; SSqlRes* pRes = &pSql->res;
pSql->fp = (void(*)())tscHandleMultivnodeInsert; pRes->numOfRows = 0;
pRes->numOfTotal = 0;
tscDoQuery(pSql); tscProcessSql(pSql);
// wait for the callback function to post the semaphore // wait for the callback function to post the semaphore
tsem_wait(&pSql->rspSem); tsem_wait(&pSql->rspSem);
return pSql->res.code;
// data block reset
pCmd->batchSize = 0;
for(int32_t i = 0; i < pCmd->numOfTables; ++i) {
if (pCmd->pTableNameList && pCmd->pTableNameList[i]) {
tfree(pCmd->pTableNameList[i]);
}
}
pCmd->numOfTables = 0;
tfree(pCmd->pTableNameList);
pCmd->pDataBlocks = tscDestroyBlockArrayList(pCmd->pDataBlocks);
return pSql->res.code;
} }
//////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////
...@@ -867,11 +863,11 @@ TAOS_STMT* taos_stmt_init(TAOS* taos) { ...@@ -867,11 +863,11 @@ TAOS_STMT* taos_stmt_init(TAOS* taos) {
} }
tsem_init(&pSql->rspSem, 0, 0); tsem_init(&pSql->rspSem, 0, 0);
pSql->signature = pSql; pSql->signature = pSql;
pSql->pTscObj = pObj; pSql->pTscObj = pObj;
pSql->maxRetry = TSDB_MAX_REPLICA; pSql->maxRetry = TSDB_MAX_REPLICA;
pStmt->pSql = pSql;
pStmt->pSql = pSql;
return pStmt; return pStmt;
} }
...@@ -890,7 +886,9 @@ int taos_stmt_prepare(TAOS_STMT* stmt, const char* sql, unsigned long length) { ...@@ -890,7 +886,9 @@ int taos_stmt_prepare(TAOS_STMT* stmt, const char* sql, unsigned long length) {
SSqlRes *pRes = &pSql->res; SSqlRes *pRes = &pSql->res;
pSql->param = (void*) pSql; pSql->param = (void*) pSql;
pSql->fp = waitForQueryRsp; pSql->fp = waitForQueryRsp;
pSql->cmd.insertType = TSDB_QUERY_TYPE_STMT_INSERT; pSql->fetchFp = waitForQueryRsp;
pCmd->insertType = TSDB_QUERY_TYPE_STMT_INSERT;
if (TSDB_CODE_SUCCESS != tscAllocPayload(pCmd, TSDB_DEFAULT_PAYLOAD_SIZE)) { if (TSDB_CODE_SUCCESS != tscAllocPayload(pCmd, TSDB_DEFAULT_PAYLOAD_SIZE)) {
tscError("%p failed to malloc payload buffer", pSql); tscError("%p failed to malloc payload buffer", pSql);
...@@ -956,8 +954,9 @@ int taos_stmt_bind_param(TAOS_STMT* stmt, TAOS_BIND* bind) { ...@@ -956,8 +954,9 @@ int taos_stmt_bind_param(TAOS_STMT* stmt, TAOS_BIND* bind) {
STscStmt* pStmt = (STscStmt*)stmt; STscStmt* pStmt = (STscStmt*)stmt;
if (pStmt->isInsert) { if (pStmt->isInsert) {
return insertStmtBindParam(pStmt, bind); return insertStmtBindParam(pStmt, bind);
} else {
return normalStmtBindParam(pStmt, bind);
} }
return normalStmtBindParam(pStmt, bind);
} }
int taos_stmt_add_batch(TAOS_STMT* stmt) { int taos_stmt_add_batch(TAOS_STMT* stmt) {
...@@ -981,7 +980,7 @@ int taos_stmt_execute(TAOS_STMT* stmt) { ...@@ -981,7 +980,7 @@ int taos_stmt_execute(TAOS_STMT* stmt) {
STscStmt* pStmt = (STscStmt*)stmt; STscStmt* pStmt = (STscStmt*)stmt;
if (pStmt->isInsert) { if (pStmt->isInsert) {
ret = insertStmtExecute(pStmt); ret = insertStmtExecute(pStmt);
} else { } else { // normal stmt query
char* sql = normalStmtBuildSql(pStmt); char* sql = normalStmtBuildSql(pStmt);
if (sql == NULL) { if (sql == NULL) {
ret = TSDB_CODE_TSC_OUT_OF_MEMORY; ret = TSDB_CODE_TSC_OUT_OF_MEMORY;
...@@ -995,6 +994,7 @@ int taos_stmt_execute(TAOS_STMT* stmt) { ...@@ -995,6 +994,7 @@ int taos_stmt_execute(TAOS_STMT* stmt) {
free(sql); free(sql);
} }
} }
return ret; return ret;
} }
......
...@@ -229,7 +229,7 @@ static int32_t handlePassword(SSqlCmd* pCmd, SStrToken* pPwd) { ...@@ -229,7 +229,7 @@ static int32_t handlePassword(SSqlCmd* pCmd, SStrToken* pPwd) {
} }
int32_t tscToSQLCmd(SSqlObj* pSql, struct SSqlInfo* pInfo) { int32_t tscToSQLCmd(SSqlObj* pSql, struct SSqlInfo* pInfo) {
if (pInfo == NULL || pSql == NULL || pSql->signature != pSql) { if (pInfo == NULL || pSql == NULL) {
return TSDB_CODE_TSC_APP_ERROR; return TSDB_CODE_TSC_APP_ERROR;
} }
...@@ -264,6 +264,7 @@ int32_t tscToSQLCmd(SSqlObj* pSql, struct SSqlInfo* pInfo) { ...@@ -264,6 +264,7 @@ int32_t tscToSQLCmd(SSqlObj* pSql, struct SSqlInfo* pInfo) {
case TSDB_SQL_DROP_DB: { case TSDB_SQL_DROP_DB: {
const char* msg2 = "invalid name"; const char* msg2 = "invalid name";
const char* msg3 = "param name too long"; const char* msg3 = "param name too long";
const char* msg4 = "table is not super table";
SStrToken* pzName = &pInfo->pDCLInfo->a[0]; SStrToken* pzName = &pInfo->pDCLInfo->a[0];
if ((pInfo->type != TSDB_SQL_DROP_DNODE) && (tscValidateName(pzName) != TSDB_CODE_SUCCESS)) { if ((pInfo->type != TSDB_SQL_DROP_DNODE) && (tscValidateName(pzName) != TSDB_CODE_SUCCESS)) {
...@@ -285,6 +286,18 @@ int32_t tscToSQLCmd(SSqlObj* pSql, struct SSqlInfo* pInfo) { ...@@ -285,6 +286,18 @@ int32_t tscToSQLCmd(SSqlObj* pSql, struct SSqlInfo* pInfo) {
if(code != TSDB_CODE_SUCCESS) { if(code != TSDB_CODE_SUCCESS) {
return code; return code;
} }
if (pInfo->pDCLInfo->tableType == TSDB_SUPER_TABLE) {
code = tscGetTableMeta(pSql, pTableMetaInfo);
if (code != TSDB_CODE_SUCCESS) {
return code;
}
if (!UTIL_TABLE_IS_SUPER_TABLE(pTableMetaInfo)) {
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg4);
}
}
} else if (pInfo->type == TSDB_SQL_DROP_DNODE) { } else if (pInfo->type == TSDB_SQL_DROP_DNODE) {
pzName->n = strdequote(pzName->z); pzName->n = strdequote(pzName->z);
strncpy(pTableMetaInfo->name, pzName->z, pzName->n); strncpy(pTableMetaInfo->name, pzName->z, pzName->n);
...@@ -642,7 +655,11 @@ int32_t tscToSQLCmd(SSqlObj* pSql, struct SSqlInfo* pInfo) { ...@@ -642,7 +655,11 @@ int32_t tscToSQLCmd(SSqlObj* pSql, struct SSqlInfo* pInfo) {
} }
pSql->cmd.parseFinished = 1; pSql->cmd.parseFinished = 1;
return tscBuildMsg[pCmd->command](pSql, pInfo); if (tscBuildMsg[pCmd->command] != NULL) {
return tscBuildMsg[pCmd->command](pSql, pInfo);
} else {
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), "not support sql expression");
}
} }
/* /*
...@@ -1738,7 +1755,7 @@ static int32_t setExprInfoForFunctions(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SS ...@@ -1738,7 +1755,7 @@ static int32_t setExprInfoForFunctions(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SS
return -1; return -1;
} else { } else {
type = TSDB_DATA_TYPE_DOUBLE; type = TSDB_DATA_TYPE_DOUBLE;
bytes = tDataTypeDesc[type].nSize; bytes = tDataTypes[type].bytes;
} }
} else { } else {
type = pSchema->type; type = pSchema->type;
...@@ -1844,7 +1861,7 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t col ...@@ -1844,7 +1861,7 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t col
} }
index = (SColumnIndex){0, PRIMARYKEY_TIMESTAMP_COL_INDEX}; index = (SColumnIndex){0, PRIMARYKEY_TIMESTAMP_COL_INDEX};
int32_t size = tDataTypeDesc[TSDB_DATA_TYPE_BIGINT].nSize; int32_t size = tDataTypes[TSDB_DATA_TYPE_BIGINT].bytes;
pExpr = tscSqlExprAppend(pQueryInfo, functionID, &index, TSDB_DATA_TYPE_BIGINT, size, getNewResColId(pQueryInfo), size, false); pExpr = tscSqlExprAppend(pQueryInfo, functionID, &index, TSDB_DATA_TYPE_BIGINT, size, getNewResColId(pQueryInfo), size, false);
} else if (sqlOptr == TK_INTEGER) { // select count(1) from table1 } else if (sqlOptr == TK_INTEGER) { // select count(1) from table1
char buf[8] = {0}; char buf[8] = {0};
...@@ -1856,7 +1873,7 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t col ...@@ -1856,7 +1873,7 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t col
} }
if (val == 1) { if (val == 1) {
index = (SColumnIndex){0, PRIMARYKEY_TIMESTAMP_COL_INDEX}; index = (SColumnIndex){0, PRIMARYKEY_TIMESTAMP_COL_INDEX};
int32_t size = tDataTypeDesc[TSDB_DATA_TYPE_BIGINT].nSize; int32_t size = tDataTypes[TSDB_DATA_TYPE_BIGINT].bytes;
pExpr = tscSqlExprAppend(pQueryInfo, functionID, &index, TSDB_DATA_TYPE_BIGINT, size, getNewResColId(pQueryInfo), size, false); pExpr = tscSqlExprAppend(pQueryInfo, functionID, &index, TSDB_DATA_TYPE_BIGINT, size, getNewResColId(pQueryInfo), size, false);
} else { } else {
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg3); return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg3);
...@@ -1876,12 +1893,12 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t col ...@@ -1876,12 +1893,12 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t col
isTag = true; isTag = true;
} }
int32_t size = tDataTypeDesc[TSDB_DATA_TYPE_BIGINT].nSize; int32_t size = tDataTypes[TSDB_DATA_TYPE_BIGINT].bytes;
pExpr = tscSqlExprAppend(pQueryInfo, functionID, &index, TSDB_DATA_TYPE_BIGINT, size, getNewResColId(pQueryInfo), size, isTag); pExpr = tscSqlExprAppend(pQueryInfo, functionID, &index, TSDB_DATA_TYPE_BIGINT, size, getNewResColId(pQueryInfo), size, isTag);
} }
} else { // count(*) is equalled to count(primary_timestamp_key) } else { // count(*) is equalled to count(primary_timestamp_key)
index = (SColumnIndex){0, PRIMARYKEY_TIMESTAMP_COL_INDEX}; index = (SColumnIndex){0, PRIMARYKEY_TIMESTAMP_COL_INDEX};
int32_t size = tDataTypeDesc[TSDB_DATA_TYPE_BIGINT].nSize; int32_t size = tDataTypes[TSDB_DATA_TYPE_BIGINT].bytes;
pExpr = tscSqlExprAppend(pQueryInfo, functionID, &index, TSDB_DATA_TYPE_BIGINT, size, getNewResColId(pQueryInfo), size, false); pExpr = tscSqlExprAppend(pQueryInfo, functionID, &index, TSDB_DATA_TYPE_BIGINT, size, getNewResColId(pQueryInfo), size, false);
} }
...@@ -4510,6 +4527,8 @@ int32_t parseFillClause(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SQuerySQL* pQuery ...@@ -4510,6 +4527,8 @@ int32_t parseFillClause(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SQuerySQL* pQuery
} }
} else if (strncasecmp(pItem->pVar.pz, "prev", 4) == 0 && pItem->pVar.nLen == 4) { } else if (strncasecmp(pItem->pVar.pz, "prev", 4) == 0 && pItem->pVar.nLen == 4) {
pQueryInfo->fillType = TSDB_FILL_PREV; pQueryInfo->fillType = TSDB_FILL_PREV;
} else if (strncasecmp(pItem->pVar.pz, "next", 4) == 0 && pItem->pVar.nLen == 4) {
pQueryInfo->fillType = TSDB_FILL_NEXT;
} else if (strncasecmp(pItem->pVar.pz, "linear", 6) == 0 && pItem->pVar.nLen == 6) { } else if (strncasecmp(pItem->pVar.pz, "linear", 6) == 0 && pItem->pVar.nLen == 6) {
pQueryInfo->fillType = TSDB_FILL_LINEAR; pQueryInfo->fillType = TSDB_FILL_LINEAR;
} else if (strncasecmp(pItem->pVar.pz, "value", 5) == 0 && pItem->pVar.nLen == 5) { } else if (strncasecmp(pItem->pVar.pz, "value", 5) == 0 && pItem->pVar.nLen == 5) {
...@@ -4788,6 +4807,7 @@ int32_t setAlterTableInfo(SSqlObj* pSql, struct SSqlInfo* pInfo) { ...@@ -4788,6 +4807,7 @@ int32_t setAlterTableInfo(SSqlObj* pSql, struct SSqlInfo* pInfo) {
const char* msg17 = "invalid column name"; const char* msg17 = "invalid column name";
const char* msg18 = "primary timestamp column cannot be dropped"; const char* msg18 = "primary timestamp column cannot be dropped";
const char* msg19 = "invalid new tag name"; const char* msg19 = "invalid new tag name";
const char* msg20 = "table is not super table";
int32_t code = TSDB_CODE_SUCCESS; int32_t code = TSDB_CODE_SUCCESS;
...@@ -4813,6 +4833,10 @@ int32_t setAlterTableInfo(SSqlObj* pSql, struct SSqlInfo* pInfo) { ...@@ -4813,6 +4833,10 @@ int32_t setAlterTableInfo(SSqlObj* pSql, struct SSqlInfo* pInfo) {
STableMeta* pTableMeta = pTableMetaInfo->pTableMeta; STableMeta* pTableMeta = pTableMetaInfo->pTableMeta;
if (pAlterSQL->tableType == TSDB_SUPER_TABLE && !(UTIL_TABLE_IS_SUPER_TABLE(pTableMetaInfo))) {
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg20);
}
if (pAlterSQL->type == TSDB_ALTER_TABLE_ADD_TAG_COLUMN || pAlterSQL->type == TSDB_ALTER_TABLE_DROP_TAG_COLUMN || if (pAlterSQL->type == TSDB_ALTER_TABLE_ADD_TAG_COLUMN || pAlterSQL->type == TSDB_ALTER_TABLE_DROP_TAG_COLUMN ||
pAlterSQL->type == TSDB_ALTER_TABLE_CHANGE_TAG_COLUMN) { pAlterSQL->type == TSDB_ALTER_TABLE_CHANGE_TAG_COLUMN) {
if (UTIL_TABLE_IS_NORMAL_TABLE(pTableMetaInfo)) { if (UTIL_TABLE_IS_NORMAL_TABLE(pTableMetaInfo)) {
...@@ -4869,7 +4893,7 @@ int32_t setAlterTableInfo(SSqlObj* pSql, struct SSqlInfo* pInfo) { ...@@ -4869,7 +4893,7 @@ int32_t setAlterTableInfo(SSqlObj* pSql, struct SSqlInfo* pInfo) {
char name1[128] = {0}; char name1[128] = {0};
strncpy(name1, pItem->pVar.pz, pItem->pVar.nLen); strncpy(name1, pItem->pVar.pz, pItem->pVar.nLen);
TAOS_FIELD f = tscCreateField(TSDB_DATA_TYPE_INT, name1, tDataTypeDesc[TSDB_DATA_TYPE_INT].nSize); TAOS_FIELD f = tscCreateField(TSDB_DATA_TYPE_INT, name1, tDataTypes[TSDB_DATA_TYPE_INT].bytes);
tscFieldInfoAppend(&pQueryInfo->fieldsInfo, &f); tscFieldInfoAppend(&pQueryInfo->fieldsInfo, &f);
} else if (pAlterSQL->type == TSDB_ALTER_TABLE_CHANGE_TAG_COLUMN) { } else if (pAlterSQL->type == TSDB_ALTER_TABLE_CHANGE_TAG_COLUMN) {
SArray* pVarList = pAlterSQL->varList; SArray* pVarList = pAlterSQL->varList;
...@@ -4905,14 +4929,14 @@ int32_t setAlterTableInfo(SSqlObj* pSql, struct SSqlInfo* pInfo) { ...@@ -4905,14 +4929,14 @@ int32_t setAlterTableInfo(SSqlObj* pSql, struct SSqlInfo* pInfo) {
char name[TSDB_COL_NAME_LEN] = {0}; char name[TSDB_COL_NAME_LEN] = {0};
strncpy(name, pItem->pVar.pz, pItem->pVar.nLen); strncpy(name, pItem->pVar.pz, pItem->pVar.nLen);
TAOS_FIELD f = tscCreateField(TSDB_DATA_TYPE_INT, name, tDataTypeDesc[TSDB_DATA_TYPE_INT].nSize); TAOS_FIELD f = tscCreateField(TSDB_DATA_TYPE_INT, name, tDataTypes[TSDB_DATA_TYPE_INT].bytes);
tscFieldInfoAppend(&pQueryInfo->fieldsInfo, &f); tscFieldInfoAppend(&pQueryInfo->fieldsInfo, &f);
pItem = taosArrayGet(pVarList, 1); pItem = taosArrayGet(pVarList, 1);
memset(name, 0, tListLen(name)); memset(name, 0, tListLen(name));
strncpy(name, pItem->pVar.pz, pItem->pVar.nLen); strncpy(name, pItem->pVar.pz, pItem->pVar.nLen);
f = tscCreateField(TSDB_DATA_TYPE_INT, name, tDataTypeDesc[TSDB_DATA_TYPE_INT].nSize); f = tscCreateField(TSDB_DATA_TYPE_INT, name, tDataTypes[TSDB_DATA_TYPE_INT].bytes);
tscFieldInfoAppend(&pQueryInfo->fieldsInfo, &f); tscFieldInfoAppend(&pQueryInfo->fieldsInfo, &f);
} else if (pAlterSQL->type == TSDB_ALTER_TABLE_UPDATE_TAG_VAL) { } else if (pAlterSQL->type == TSDB_ALTER_TABLE_UPDATE_TAG_VAL) {
// Note: update can only be applied to table not super table. // Note: update can only be applied to table not super table.
...@@ -4987,7 +5011,7 @@ int32_t setAlterTableInfo(SSqlObj* pSql, struct SSqlInfo* pInfo) { ...@@ -4987,7 +5011,7 @@ int32_t setAlterTableInfo(SSqlObj* pSql, struct SSqlInfo* pInfo) {
int32_t len = 0; int32_t len = 0;
if (pTagsSchema->type != TSDB_DATA_TYPE_BINARY && pTagsSchema->type != TSDB_DATA_TYPE_NCHAR) { if (pTagsSchema->type != TSDB_DATA_TYPE_BINARY && pTagsSchema->type != TSDB_DATA_TYPE_NCHAR) {
len = tDataTypeDesc[pTagsSchema->type].nSize; len = tDataTypes[pTagsSchema->type].bytes;
} else { } else {
len = varDataTLen(pUpdateMsg->data + schemaLen); len = varDataTLen(pUpdateMsg->data + schemaLen);
} }
...@@ -5034,7 +5058,7 @@ int32_t setAlterTableInfo(SSqlObj* pSql, struct SSqlInfo* pInfo) { ...@@ -5034,7 +5058,7 @@ int32_t setAlterTableInfo(SSqlObj* pSql, struct SSqlInfo* pInfo) {
char name1[TSDB_COL_NAME_LEN] = {0}; char name1[TSDB_COL_NAME_LEN] = {0};
tstrncpy(name1, pItem->pVar.pz, sizeof(name1)); tstrncpy(name1, pItem->pVar.pz, sizeof(name1));
TAOS_FIELD f = tscCreateField(TSDB_DATA_TYPE_INT, name1, tDataTypeDesc[TSDB_DATA_TYPE_INT].nSize); TAOS_FIELD f = tscCreateField(TSDB_DATA_TYPE_INT, name1, tDataTypes[TSDB_DATA_TYPE_INT].bytes);
tscFieldInfoAppend(&pQueryInfo->fieldsInfo, &f); tscFieldInfoAppend(&pQueryInfo->fieldsInfo, &f);
} }
...@@ -5997,7 +6021,7 @@ int32_t doLocalQueryProcess(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SQuerySQL* pQ ...@@ -5997,7 +6021,7 @@ int32_t doLocalQueryProcess(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SQuerySQL* pQ
SColumnIndex ind = {0}; SColumnIndex ind = {0};
SSqlExpr* pExpr1 = tscSqlExprAppend(pQueryInfo, TSDB_FUNC_TAG_DUMMY, &ind, TSDB_DATA_TYPE_INT, SSqlExpr* pExpr1 = tscSqlExprAppend(pQueryInfo, TSDB_FUNC_TAG_DUMMY, &ind, TSDB_DATA_TYPE_INT,
tDataTypeDesc[TSDB_DATA_TYPE_INT].nSize, getNewResColId(pQueryInfo), tDataTypeDesc[TSDB_DATA_TYPE_INT].nSize, false); tDataTypes[TSDB_DATA_TYPE_INT].bytes, getNewResColId(pQueryInfo), tDataTypes[TSDB_DATA_TYPE_INT].bytes, false);
const char* name = (pExprList->a[0].aliasName != NULL)? pExprList->a[0].aliasName:functionsInfo[index].name; const char* name = (pExprList->a[0].aliasName != NULL)? pExprList->a[0].aliasName:functionsInfo[index].name;
tstrncpy(pExpr1->aliasName, name, tListLen(pExpr1->aliasName)); tstrncpy(pExpr1->aliasName, name, tListLen(pExpr1->aliasName));
...@@ -6101,8 +6125,10 @@ void tscPrintSelectClause(SSqlObj* pSql, int32_t subClauseIndex) { ...@@ -6101,8 +6125,10 @@ void tscPrintSelectClause(SSqlObj* pSql, int32_t subClauseIndex) {
int32_t tmpLen = 0; int32_t tmpLen = 0;
tmpLen = tmpLen =
sprintf(tmpBuf, "%s(uid:%" PRId64 ", %d)", aAggs[pExpr->functionId].aName, pExpr->uid, pExpr->colInfo.colId); sprintf(tmpBuf, "%s(uid:%" PRId64 ", %d)", aAggs[pExpr->functionId].aName, pExpr->uid, pExpr->colInfo.colId);
if (tmpLen + offset >= totalBufSize - 1) break; if (tmpLen + offset >= totalBufSize - 1) break;
offset += sprintf(str + offset, "%s", tmpBuf); offset += sprintf(str + offset, "%s", tmpBuf);
if (i < size - 1) { if (i < size - 1) {
...@@ -6112,6 +6138,7 @@ void tscPrintSelectClause(SSqlObj* pSql, int32_t subClauseIndex) { ...@@ -6112,6 +6138,7 @@ void tscPrintSelectClause(SSqlObj* pSql, int32_t subClauseIndex) {
assert(offset < totalBufSize); assert(offset < totalBufSize);
str[offset] = ']'; str[offset] = ']';
assert(offset < totalBufSize);
tscDebug("%p select clause:%s", pSql, str); tscDebug("%p select clause:%s", pSql, str);
} }
......
...@@ -66,68 +66,6 @@ STableComInfo tscGetTableInfo(const STableMeta* pTableMeta) { ...@@ -66,68 +66,6 @@ STableComInfo tscGetTableInfo(const STableMeta* pTableMeta) {
return pTableMeta->tableInfo; return pTableMeta->tableInfo;
} }
static bool doValidateSchema(SSchema* pSchema, int32_t numOfCols, int32_t maxLen) {
int32_t rowLen = 0;
for (int32_t i = 0; i < numOfCols; ++i) {
// 1. valid types
if (!isValidDataType(pSchema[i].type)) {
return false;
}
// 2. valid length for each type
if (pSchema[i].type == TSDB_DATA_TYPE_BINARY) {
if (pSchema[i].bytes > TSDB_MAX_BINARY_LEN) {
return false;
}
} else if (pSchema[i].type == TSDB_DATA_TYPE_NCHAR) {
if (pSchema[i].bytes > TSDB_MAX_NCHAR_LEN) {
return false;
}
} else {
if (pSchema[i].bytes != tDataTypeDesc[pSchema[i].type].nSize) {
return false;
}
}
// 3. valid column names
for (int32_t j = i + 1; j < numOfCols; ++j) {
if (strncasecmp(pSchema[i].name, pSchema[j].name, sizeof(pSchema[i].name) - 1) == 0) {
return false;
}
}
rowLen += pSchema[i].bytes;
}
return rowLen <= maxLen;
}
bool isValidSchema(struct SSchema* pSchema, int32_t numOfCols, int32_t numOfTags) {
if (!VALIDNUMOFCOLS(numOfCols)) {
return false;
}
if (!VALIDNUMOFTAGS(numOfTags)) {
return false;
}
/* first column must be the timestamp, which is a primary key */
if (pSchema[0].type != TSDB_DATA_TYPE_TIMESTAMP) {
return false;
}
if (!doValidateSchema(pSchema, numOfCols, TSDB_MAX_BYTES_PER_ROW)) {
return false;
}
if (!doValidateSchema(&pSchema[numOfCols], numOfTags, TSDB_MAX_TAGS_LEN)) {
return false;
}
return true;
}
SSchema* tscGetTableColumnSchema(const STableMeta* pTableMeta, int32_t colIndex) { SSchema* tscGetTableColumnSchema(const STableMeta* pTableMeta, int32_t colIndex) {
assert(pTableMeta != NULL); assert(pTableMeta != NULL);
......
...@@ -25,8 +25,6 @@ ...@@ -25,8 +25,6 @@
#include "ttimer.h" #include "ttimer.h"
#include "tlockfree.h" #include "tlockfree.h"
///SRpcCorEpSet tscMgmtEpSet;
int (*tscBuildMsg[TSDB_SQL_MAX])(SSqlObj *pSql, SSqlInfo *pInfo) = {0}; int (*tscBuildMsg[TSDB_SQL_MAX])(SSqlObj *pSql, SSqlInfo *pInfo) = {0};
int (*tscProcessMsgRsp[TSDB_SQL_MAX])(SSqlObj *pSql); int (*tscProcessMsgRsp[TSDB_SQL_MAX])(SSqlObj *pSql);
...@@ -341,7 +339,8 @@ void tscProcessMsgFromServer(SRpcMsg *rpcMsg, SRpcEpSet *pEpSet) { ...@@ -341,7 +339,8 @@ void tscProcessMsgFromServer(SRpcMsg *rpcMsg, SRpcEpSet *pEpSet) {
if (pSql->retry > pSql->maxRetry) { if (pSql->retry > pSql->maxRetry) {
tscError("%p max retry %d reached, give up", pSql, pSql->maxRetry); tscError("%p max retry %d reached, give up", pSql, pSql->maxRetry);
} else { } else {
// wait for a little bit moment and then retry, todo do not sleep in rpc callback thread // wait for a little bit moment and then retry
// todo do not sleep in rpc callback thread, add this process into queueu to process
if (rpcMsg->code == TSDB_CODE_APP_NOT_READY || rpcMsg->code == TSDB_CODE_VND_INVALID_VGROUP_ID) { if (rpcMsg->code == TSDB_CODE_APP_NOT_READY || rpcMsg->code == TSDB_CODE_VND_INVALID_VGROUP_ID) {
int32_t duration = getWaitingTimeInterval(pSql->retry); int32_t duration = getWaitingTimeInterval(pSql->retry);
taosMsleep(duration); taosMsleep(duration);
...@@ -484,9 +483,9 @@ int tscProcessSql(SSqlObj *pSql) { ...@@ -484,9 +483,9 @@ int tscProcessSql(SSqlObj *pSql) {
pSql->res.code = TSDB_CODE_TSC_APP_ERROR; pSql->res.code = TSDB_CODE_TSC_APP_ERROR;
return pSql->res.code; return pSql->res.code;
} }
} else if (pCmd->command < TSDB_SQL_LOCAL) { } else if (pCmd->command >= TSDB_SQL_LOCAL) {
//pSql->epSet = tscMgmtEpSet; //pSql->epSet = tscMgmtEpSet;
} else { // local handler // } else { // local handler
return (*tscProcessMsgRsp[pCmd->command])(pSql); return (*tscProcessMsgRsp[pCmd->command])(pSql);
} }
...@@ -1157,7 +1156,7 @@ int32_t tscBuildDropTableMsg(SSqlObj *pSql, SSqlInfo *pInfo) { ...@@ -1157,7 +1156,7 @@ int32_t tscBuildDropTableMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
SCMDropTableMsg *pDropTableMsg = (SCMDropTableMsg*)pCmd->payload; SCMDropTableMsg *pDropTableMsg = (SCMDropTableMsg*)pCmd->payload;
STableMetaInfo *pTableMetaInfo = tscGetTableMetaInfoFromCmd(pCmd, pCmd->clauseIndex, 0); STableMetaInfo *pTableMetaInfo = tscGetTableMetaInfoFromCmd(pCmd, pCmd->clauseIndex, 0);
strcpy(pDropTableMsg->tableId, pTableMetaInfo->name); strcpy(pDropTableMsg->tableFname, pTableMetaInfo->name);
pDropTableMsg->igNotExists = pInfo->pDCLInfo->existsCheck ? 1 : 0; pDropTableMsg->igNotExists = pInfo->pDCLInfo->existsCheck ? 1 : 0;
pCmd->msgType = TSDB_MSG_TYPE_CM_DROP_TABLE; pCmd->msgType = TSDB_MSG_TYPE_CM_DROP_TABLE;
...@@ -1180,7 +1179,7 @@ int32_t tscBuildDropDnodeMsg(SSqlObj *pSql, SSqlInfo *pInfo) { ...@@ -1180,7 +1179,7 @@ int32_t tscBuildDropDnodeMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
return TSDB_CODE_SUCCESS; return TSDB_CODE_SUCCESS;
} }
int32_t tscBuildDropUserMsg(SSqlObj *pSql, SSqlInfo *pInfo) { int32_t tscBuildDropUserMsg(SSqlObj *pSql, SSqlInfo * UNUSED_PARAM(pInfo)) {
SSqlCmd *pCmd = &pSql->cmd; SSqlCmd *pCmd = &pSql->cmd;
pCmd->payloadLen = sizeof(SDropUserMsg); pCmd->payloadLen = sizeof(SDropUserMsg);
pCmd->msgType = TSDB_MSG_TYPE_CM_DROP_USER; pCmd->msgType = TSDB_MSG_TYPE_CM_DROP_USER;
...@@ -1347,7 +1346,7 @@ int tscBuildCreateTableMsg(SSqlObj *pSql, SSqlInfo *pInfo) { ...@@ -1347,7 +1346,7 @@ int tscBuildCreateTableMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
pMsg += sizeof(SCreateTableMsg); pMsg += sizeof(SCreateTableMsg);
SCreatedTableInfo* p = taosArrayGet(list, i); SCreatedTableInfo* p = taosArrayGet(list, i);
strcpy(pCreate->tableId, p->fullname); strcpy(pCreate->tableFname, p->fullname);
pCreate->igExists = (p->igExist)? 1 : 0; pCreate->igExists = (p->igExist)? 1 : 0;
// use dbinfo from table id without modifying current db info // use dbinfo from table id without modifying current db info
...@@ -1360,7 +1359,7 @@ int tscBuildCreateTableMsg(SSqlObj *pSql, SSqlInfo *pInfo) { ...@@ -1360,7 +1359,7 @@ int tscBuildCreateTableMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
} else { // create (super) table } else { // create (super) table
pCreateTableMsg->numOfTables = htonl(1); // only one table will be created pCreateTableMsg->numOfTables = htonl(1); // only one table will be created
strcpy(pCreateMsg->tableId, pTableMetaInfo->name); strcpy(pCreateMsg->tableFname, pTableMetaInfo->name);
// use dbinfo from table id without modifying current db info // use dbinfo from table id without modifying current db info
tscGetDBInfoFromTableFullName(pTableMetaInfo->name, pCreateMsg->db); tscGetDBInfoFromTableFullName(pTableMetaInfo->name, pCreateMsg->db);
...@@ -1431,7 +1430,7 @@ int tscBuildAlterTableMsg(SSqlObj *pSql, SSqlInfo *pInfo) { ...@@ -1431,7 +1430,7 @@ int tscBuildAlterTableMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
SAlterTableMsg *pAlterTableMsg = (SAlterTableMsg *)pCmd->payload; SAlterTableMsg *pAlterTableMsg = (SAlterTableMsg *)pCmd->payload;
tscGetDBInfoFromTableFullName(pTableMetaInfo->name, pAlterTableMsg->db); tscGetDBInfoFromTableFullName(pTableMetaInfo->name, pAlterTableMsg->db);
strcpy(pAlterTableMsg->tableId, pTableMetaInfo->name); strcpy(pAlterTableMsg->tableFname, pTableMetaInfo->name);
pAlterTableMsg->type = htons(pAlterInfo->type); pAlterTableMsg->type = htons(pAlterInfo->type);
pAlterTableMsg->numOfCols = htons(tscNumOfFields(pQueryInfo)); pAlterTableMsg->numOfCols = htons(tscNumOfFields(pQueryInfo));
...@@ -1630,7 +1629,7 @@ int tscBuildTableMetaMsg(SSqlObj *pSql, SSqlInfo *pInfo) { ...@@ -1630,7 +1629,7 @@ int tscBuildTableMetaMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
STableMetaInfo *pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0); STableMetaInfo *pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0);
STableInfoMsg *pInfoMsg = (STableInfoMsg *)pCmd->payload; STableInfoMsg *pInfoMsg = (STableInfoMsg *)pCmd->payload;
strcpy(pInfoMsg->tableId, pTableMetaInfo->name); strcpy(pInfoMsg->tableFname, pTableMetaInfo->name);
pInfoMsg->createFlag = htons(pSql->cmd.autoCreated ? 1 : 0); pInfoMsg->createFlag = htons(pSql->cmd.autoCreated ? 1 : 0);
char *pMsg = (char *)pInfoMsg + sizeof(STableInfoMsg); char *pMsg = (char *)pInfoMsg + sizeof(STableInfoMsg);
...@@ -1799,7 +1798,7 @@ int tscProcessTableMetaRsp(SSqlObj *pSql) { ...@@ -1799,7 +1798,7 @@ int tscProcessTableMetaRsp(SSqlObj *pSql) {
if ((pMetaMsg->tableType != TSDB_SUPER_TABLE) && if ((pMetaMsg->tableType != TSDB_SUPER_TABLE) &&
(pMetaMsg->tid <= 0 || pMetaMsg->vgroup.vgId < 2 || pMetaMsg->vgroup.numOfEps <= 0)) { (pMetaMsg->tid <= 0 || pMetaMsg->vgroup.vgId < 2 || pMetaMsg->vgroup.numOfEps <= 0)) {
tscError("invalid value in table numOfEps:%d, vgId:%d tid:%d, name:%s", pMetaMsg->vgroup.numOfEps, pMetaMsg->vgroup.vgId, tscError("invalid value in table numOfEps:%d, vgId:%d tid:%d, name:%s", pMetaMsg->vgroup.numOfEps, pMetaMsg->vgroup.vgId,
pMetaMsg->tid, pMetaMsg->tableId); pMetaMsg->tid, pMetaMsg->tableFname);
return TSDB_CODE_TSC_INVALID_VALUE; return TSDB_CODE_TSC_INVALID_VALUE;
} }
...@@ -1831,12 +1830,16 @@ int tscProcessTableMetaRsp(SSqlObj *pSql) { ...@@ -1831,12 +1830,16 @@ int tscProcessTableMetaRsp(SSqlObj *pSql) {
assert(isValidDataType(pSchema->type)); assert(isValidDataType(pSchema->type));
pSchema++; pSchema++;
} }
STableMeta* pTableMeta = tscCreateTableMetaFromMsg(pMetaMsg);
STableMetaInfo *pTableMetaInfo = tscGetTableMetaInfoFromCmd(&pSql->cmd, 0, 0); STableMetaInfo *pTableMetaInfo = tscGetTableMetaInfoFromCmd(&pSql->cmd, 0, 0);
assert(pTableMetaInfo->pTableMeta == NULL); assert(pTableMetaInfo->pTableMeta == NULL);
STableMeta* pTableMeta = tscCreateTableMetaFromMsg(pMetaMsg);
if (!isValidSchema(pTableMeta->schema, pTableMeta->tableInfo.numOfColumns, pTableMeta->tableInfo.numOfTags)) {
tscError("%p invalid table meta from mnode, name:%s", pSql, pTableMetaInfo->name);
return TSDB_CODE_TSC_INVALID_VALUE;
}
if (pTableMeta->tableType == TSDB_CHILD_TABLE) { if (pTableMeta->tableType == TSDB_CHILD_TABLE) {
// check if super table hashmap or not // check if super table hashmap or not
int32_t len = (int32_t) strnlen(pTableMeta->sTableName, TSDB_TABLE_FNAME_LEN); int32_t len = (int32_t) strnlen(pTableMeta->sTableName, TSDB_TABLE_FNAME_LEN);
...@@ -2097,7 +2100,7 @@ int tscProcessShowRsp(SSqlObj *pSql) { ...@@ -2097,7 +2100,7 @@ int tscProcessShowRsp(SSqlObj *pSql) {
return 0; return 0;
} }
static void createHBObj(STscObj* pObj) { static void createHbObj(STscObj* pObj) {
if (pObj->hbrid != 0) { if (pObj->hbrid != 0) {
return; return;
} }
...@@ -2160,7 +2163,7 @@ int tscProcessConnectRsp(SSqlObj *pSql) { ...@@ -2160,7 +2163,7 @@ int tscProcessConnectRsp(SSqlObj *pSql) {
pObj->superAuth = pConnect->superAuth; pObj->superAuth = pConnect->superAuth;
pObj->connId = htonl(pConnect->connId); pObj->connId = htonl(pConnect->connId);
createHBObj(pObj); createHbObj(pObj);
//launch a timer to send heartbeat to maintain the connection and send status to mnode //launch a timer to send heartbeat to maintain the connection and send status to mnode
taosTmrReset(tscProcessActivityTimer, tsShellActivityTimer * 500, (void *)pObj->rid, tscTmr, &pObj->pTimer); taosTmrReset(tscProcessActivityTimer, tsShellActivityTimer * 500, (void *)pObj->rid, tscTmr, &pObj->pTimer);
...@@ -2190,7 +2193,7 @@ int tscProcessDropTableRsp(SSqlObj *pSql) { ...@@ -2190,7 +2193,7 @@ int tscProcessDropTableRsp(SSqlObj *pSql) {
tscDebug("%p remove table meta after drop table:%s, numOfRemain:%d", pSql, pTableMetaInfo->name, tscDebug("%p remove table meta after drop table:%s, numOfRemain:%d", pSql, pTableMetaInfo->name,
(int32_t) taosHashGetSize(tscTableMetaInfo)); (int32_t) taosHashGetSize(tscTableMetaInfo));
assert(pTableMetaInfo->pTableMeta == NULL); pTableMetaInfo->pTableMeta = NULL;
return 0; return 0;
} }
......
...@@ -781,6 +781,7 @@ bool taos_is_null(TAOS_RES *res, int32_t row, int32_t col) { ...@@ -781,6 +781,7 @@ bool taos_is_null(TAOS_RES *res, int32_t row, int32_t col) {
int taos_print_row(char *str, TAOS_ROW row, TAOS_FIELD *fields, int num_fields) { int taos_print_row(char *str, TAOS_ROW row, TAOS_FIELD *fields, int num_fields) {
int len = 0; int len = 0;
for (int i = 0; i < num_fields; ++i) { for (int i = 0; i < num_fields; ++i) {
if (i > 0) { if (i > 0) {
str[len++] = ' '; str[len++] = ' ';
...@@ -838,13 +839,15 @@ int taos_print_row(char *str, TAOS_ROW row, TAOS_FIELD *fields, int num_fields) ...@@ -838,13 +839,15 @@ int taos_print_row(char *str, TAOS_ROW row, TAOS_FIELD *fields, int num_fields)
case TSDB_DATA_TYPE_BINARY: case TSDB_DATA_TYPE_BINARY:
case TSDB_DATA_TYPE_NCHAR: { case TSDB_DATA_TYPE_NCHAR: {
size_t xlen = 0; int32_t charLen = varDataLen((char*)row[i] - VARSTR_HEADER_SIZE);
for (xlen = 0; xlen < fields[i].bytes - VARSTR_HEADER_SIZE; xlen++) { if (fields[i].type == TSDB_DATA_TYPE_BINARY) {
char c = ((char *)row[i])[xlen]; assert(charLen <= fields[i].bytes);
if (c == 0) break; } else {
str[len++] = c; assert(charLen <= fields[i].bytes * TSDB_NCHAR_SIZE);
} }
str[len] = 0;
memcpy(str + len, row[i], charLen);
len += charLen;
} break; } break;
case TSDB_DATA_TYPE_TIMESTAMP: case TSDB_DATA_TYPE_TIMESTAMP:
......
此差异已折叠。
...@@ -441,6 +441,12 @@ static void tscFreeSubobj(SSqlObj* pSql) { ...@@ -441,6 +441,12 @@ static void tscFreeSubobj(SSqlObj* pSql) {
pSql->pSubs[i] = NULL; pSql->pSubs[i] = NULL;
} }
if (pSql->subState.states) {
pthread_mutex_destroy(&pSql->subState.mutex);
}
tfree(pSql->subState.states);
pSql->subState.numOfSub = 0; pSql->subState.numOfSub = 0;
} }
...@@ -603,6 +609,7 @@ int32_t tscCopyDataBlockToPayload(SSqlObj* pSql, STableDataBlocks* pDataBlock) { ...@@ -603,6 +609,7 @@ int32_t tscCopyDataBlockToPayload(SSqlObj* pSql, STableDataBlocks* pDataBlock) {
assert(pCmd->numOfClause == 1); assert(pCmd->numOfClause == 1);
STableMetaInfo* pTableMetaInfo = tscGetTableMetaInfoFromCmd(pCmd, pCmd->clauseIndex, 0); STableMetaInfo* pTableMetaInfo = tscGetTableMetaInfoFromCmd(pCmd, pCmd->clauseIndex, 0);
// todo refactor
// set the correct table meta object, the table meta has been locked in pDataBlocks, so it must be in the cache // set the correct table meta object, the table meta has been locked in pDataBlocks, so it must be in the cache
if (pTableMetaInfo->pTableMeta != pDataBlock->pTableMeta) { if (pTableMetaInfo->pTableMeta != pDataBlock->pTableMeta) {
tstrncpy(pTableMetaInfo->name, pDataBlock->tableName, sizeof(pTableMetaInfo->name)); tstrncpy(pTableMetaInfo->name, pDataBlock->tableName, sizeof(pTableMetaInfo->name));
...@@ -689,7 +696,6 @@ int32_t tscCreateDataBlock(size_t initialSize, int32_t rowSize, int32_t startOff ...@@ -689,7 +696,6 @@ int32_t tscCreateDataBlock(size_t initialSize, int32_t rowSize, int32_t startOff
int32_t tscGetDataBlockFromList(SHashObj* pHashList, int64_t id, int32_t size, int32_t startOffset, int32_t rowSize, const char* tableId, STableMeta* pTableMeta, int32_t tscGetDataBlockFromList(SHashObj* pHashList, int64_t id, int32_t size, int32_t startOffset, int32_t rowSize, const char* tableId, STableMeta* pTableMeta,
STableDataBlocks** dataBlocks, SArray* pBlockList) { STableDataBlocks** dataBlocks, SArray* pBlockList) {
*dataBlocks = NULL; *dataBlocks = NULL;
STableDataBlocks** t1 = (STableDataBlocks**)taosHashGet(pHashList, (const char*)&id, sizeof(id)); STableDataBlocks** t1 = (STableDataBlocks**)taosHashGet(pHashList, (const char*)&id, sizeof(id));
if (t1 != NULL) { if (t1 != NULL) {
*dataBlocks = *t1; *dataBlocks = *t1;
...@@ -785,9 +791,13 @@ static int32_t getRowExpandSize(STableMeta* pTableMeta) { ...@@ -785,9 +791,13 @@ static int32_t getRowExpandSize(STableMeta* pTableMeta) {
return result; return result;
} }
static void extractTableNameList(SSqlCmd* pCmd) { static void extractTableNameList(SSqlCmd* pCmd, bool freeBlockMap) {
pCmd->numOfTables = (int32_t) taosHashGetSize(pCmd->pTableBlockHashList); pCmd->numOfTables = (int32_t) taosHashGetSize(pCmd->pTableBlockHashList);
pCmd->pTableNameList = calloc(pCmd->numOfTables, POINTER_BYTES); if (pCmd->pTableNameList == NULL) {
pCmd->pTableNameList = calloc(pCmd->numOfTables, POINTER_BYTES);
} else {
memset(pCmd->pTableNameList, 0, pCmd->numOfTables * POINTER_BYTES);
}
STableDataBlocks **p1 = taosHashIterate(pCmd->pTableBlockHashList, NULL); STableDataBlocks **p1 = taosHashIterate(pCmd->pTableBlockHashList, NULL);
int32_t i = 0; int32_t i = 0;
...@@ -797,10 +807,12 @@ static void extractTableNameList(SSqlCmd* pCmd) { ...@@ -797,10 +807,12 @@ static void extractTableNameList(SSqlCmd* pCmd) {
p1 = taosHashIterate(pCmd->pTableBlockHashList, p1); p1 = taosHashIterate(pCmd->pTableBlockHashList, p1);
} }
pCmd->pTableBlockHashList = tscDestroyBlockHashTable(pCmd->pTableBlockHashList); if (freeBlockMap) {
pCmd->pTableBlockHashList = tscDestroyBlockHashTable(pCmd->pTableBlockHashList);
}
} }
int32_t tscMergeTableDataBlocks(SSqlObj* pSql) { int32_t tscMergeTableDataBlocks(SSqlObj* pSql, bool freeBlockMap) {
const int INSERT_HEAD_SIZE = sizeof(SMsgDesc) + sizeof(SSubmitMsg); const int INSERT_HEAD_SIZE = sizeof(SMsgDesc) + sizeof(SSubmitMsg);
SSqlCmd* pCmd = &pSql->cmd; SSqlCmd* pCmd = &pSql->cmd;
...@@ -880,7 +892,7 @@ int32_t tscMergeTableDataBlocks(SSqlObj* pSql) { ...@@ -880,7 +892,7 @@ int32_t tscMergeTableDataBlocks(SSqlObj* pSql) {
pOneTableBlock = *p; pOneTableBlock = *p;
} }
extractTableNameList(pCmd); extractTableNameList(pCmd, freeBlockMap);
// free the table data blocks; // free the table data blocks;
pCmd->pDataBlocks = pVnodeDataBlockList; pCmd->pDataBlocks = pVnodeDataBlockList;
...@@ -1915,7 +1927,7 @@ void registerSqlObj(SSqlObj* pSql) { ...@@ -1915,7 +1927,7 @@ void registerSqlObj(SSqlObj* pSql) {
tscDebug("%p new SqlObj from %p, total in tscObj:%d, total:%d", pSql, pSql->pTscObj, num, total); tscDebug("%p new SqlObj from %p, total in tscObj:%d, total:%d", pSql, pSql->pTscObj, num, total);
} }
SSqlObj* createSimpleSubObj(SSqlObj* pSql, void (*fp)(), void* param, int32_t cmd) { SSqlObj* createSimpleSubObj(SSqlObj* pSql, __async_cb_func_t fp, void* param, int32_t cmd) {
SSqlObj* pNew = (SSqlObj*)calloc(1, sizeof(SSqlObj)); SSqlObj* pNew = (SSqlObj*)calloc(1, sizeof(SSqlObj));
if (pNew == NULL) { if (pNew == NULL) {
tscError("%p new subquery failed, tableIndex:%d", pSql, 0); tscError("%p new subquery failed, tableIndex:%d", pSql, 0);
...@@ -2000,7 +2012,7 @@ static void doSetSqlExprAndResultFieldInfo(SQueryInfo* pNewQueryInfo, int64_t ui ...@@ -2000,7 +2012,7 @@ static void doSetSqlExprAndResultFieldInfo(SQueryInfo* pNewQueryInfo, int64_t ui
tscFieldInfoUpdateOffset(pNewQueryInfo); tscFieldInfoUpdateOffset(pNewQueryInfo);
} }
SSqlObj* createSubqueryObj(SSqlObj* pSql, int16_t tableIndex, void (*fp)(), void* param, int32_t cmd, SSqlObj* pPrevSql) { SSqlObj* createSubqueryObj(SSqlObj* pSql, int16_t tableIndex, __async_cb_func_t fp, void* param, int32_t cmd, SSqlObj* pPrevSql) {
SSqlCmd* pCmd = &pSql->cmd; SSqlCmd* pCmd = &pSql->cmd;
SSqlObj* pNew = (SSqlObj*)calloc(1, sizeof(SSqlObj)); SSqlObj* pNew = (SSqlObj*)calloc(1, sizeof(SSqlObj));
...@@ -2196,7 +2208,7 @@ void tscDoQuery(SSqlObj* pSql) { ...@@ -2196,7 +2208,7 @@ void tscDoQuery(SSqlObj* pSql) {
SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(pCmd, pCmd->clauseIndex); SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(pCmd, pCmd->clauseIndex);
uint16_t type = pQueryInfo->type; uint16_t type = pQueryInfo->type;
if (pSql->fp == (void(*)())tscHandleMultivnodeInsert) { // multi-vnodes insertion if (TSDB_QUERY_HAS_TYPE(type, TSDB_QUERY_TYPE_INSERT)) { // multi-vnodes insertion
tscHandleMultivnodeInsert(pSql); tscHandleMultivnodeInsert(pSql);
return; return;
} }
......
...@@ -2,15 +2,117 @@ ...@@ -2,15 +2,117 @@
#include <iostream> #include <iostream>
#include "taos.h" #include "taos.h"
#include "tglobal.h"
namespace { namespace {
static int64_t start_ts = 1433955661000; static int64_t start_ts = 1433955661000;
void stmtInsertTest() {
TAOS* conn = taos_connect("ubuntu", "root", "taosdata", 0, 0);
if (conn == NULL) {
printf("Failed to connect to DB, reason:%s", taos_errstr(conn));
exit(-1);
}
TAOS_RES* res = taos_query(conn, "use test");
taos_free_result(res);
const char* sql = "insert into t1 values(?, ?, ?, ?)";
TAOS_STMT* stmt = taos_stmt_init(conn);
int32_t ret = taos_stmt_prepare(stmt, sql, 0);
ASSERT_EQ(ret, 0);
//ts timestamp, k int, a binary(11), b nchar(4)
struct {
int64_t ts;
int k;
char* a;
char* b;
} v = {0};
TAOS_BIND params[4];
params[0].buffer_type = TSDB_DATA_TYPE_TIMESTAMP;
params[0].buffer_length = sizeof(v.ts);
params[0].buffer = &v.ts;
params[0].length = &params[0].buffer_length;
params[0].is_null = NULL;
params[1].buffer_type = TSDB_DATA_TYPE_INT;
params[1].buffer_length = sizeof(v.k);
params[1].buffer = &v.k;
params[1].length = &params[1].buffer_length;
params[1].is_null = NULL;
params[2].buffer_type = TSDB_DATA_TYPE_BINARY;
params[2].buffer_length = sizeof(v.a);
params[2].buffer = &v.a;
params[2].is_null = NULL;
params[3].buffer_type = TSDB_DATA_TYPE_NCHAR;
params[3].buffer_length = sizeof(v.b);
params[3].buffer = &v.b;
params[3].is_null = NULL;
v.ts = start_ts + 20;
v.k = 123;
char* str = "abc";
uintptr_t len = strlen(str);
v.a = str;
params[2].length = &len;
params[2].buffer_length = len;
params[2].buffer = str;
char* nstr = "999";
uintptr_t len1 = strlen(nstr);
v.b = nstr;
params[3].buffer_length = len1;
params[3].buffer = nstr;
params[3].length = &len1;
taos_stmt_bind_param(stmt, params);
taos_stmt_add_batch(stmt);
if (taos_stmt_execute(stmt) != 0) {
printf("\033[31mfailed to execute insert statement.\033[0m\n");
return;
}
v.ts = start_ts + 30;
v.k = 911;
str = "92";
len = strlen(str);
params[2].length = &len;
params[2].buffer_length = len;
params[2].buffer = str;
nstr = "1920";
len1 = strlen(nstr);
params[3].buffer_length = len1;
params[3].buffer = nstr;
params[3].length = &len1;
taos_stmt_bind_param(stmt, params);
taos_stmt_add_batch(stmt);
ret = taos_stmt_execute(stmt);
if (ret != 0) {
printf("%p\n", ret);
printf("\033[31mfailed to execute insert statement.\033[0m\n");
return;
}
taos_stmt_close(stmt);
taos_close(conn);
} }
/* test parse time function */
TEST(testCase, result_field_test) {
taos_options(TSDB_OPTION_CONFIGDIR, "~/first/cfg");
taos_init();
void validateResultFields() {
TAOS* conn = taos_connect("ubuntu", "root", "taosdata", 0, 0); TAOS* conn = taos_connect("ubuntu", "root", "taosdata", 0, 0);
if (conn == NULL) { if (conn == NULL) {
printf("Failed to connect to DB, reason:%s", taos_errstr(conn)); printf("Failed to connect to DB, reason:%s", taos_errstr(conn));
...@@ -134,5 +236,31 @@ TEST(testCase, result_field_test) { ...@@ -134,5 +236,31 @@ TEST(testCase, result_field_test) {
ASSERT_STREQ(fields[6].name, "first(ts)"); ASSERT_STREQ(fields[6].name, "first(ts)");
taos_free_result(res); taos_free_result(res);
// update the configure parameter, the result field name will be changed
tsKeepOriginalColumnName = 1;
res = taos_query(conn, "select first(ts, a, k, k, b, b, ts) from t1");
ASSERT_EQ(taos_num_fields(res), 7);
fields = taos_fetch_fields(res);
ASSERT_EQ(fields[0].bytes, 8);
ASSERT_EQ(fields[0].type, TSDB_DATA_TYPE_TIMESTAMP);
ASSERT_STREQ(fields[0].name, "ts");
ASSERT_EQ(fields[2].bytes, 4);
ASSERT_EQ(fields[2].type, TSDB_DATA_TYPE_INT);
ASSERT_STREQ(fields[2].name, "k");
taos_free_result(res);
taos_close(conn); taos_close(conn);
} }
}
/* test parse time function */
TEST(testCase, result_field_test) {
taos_options(TSDB_OPTION_CONFIGDIR, "~/first/cfg");
taos_init();
validateResultFields();
stmtInsertTest();
}
...@@ -20,7 +20,7 @@ ...@@ -20,7 +20,7 @@
#include <string.h> #include <string.h>
#include "talgo.h" #include "talgo.h"
#include "taosdef.h" #include "ttype.h"
#include "tutil.h" #include "tutil.h"
#ifdef __cplusplus #ifdef __cplusplus
......
...@@ -180,7 +180,7 @@ extern int32_t tsLogKeepDays; ...@@ -180,7 +180,7 @@ extern int32_t tsLogKeepDays;
extern int32_t dDebugFlag; extern int32_t dDebugFlag;
extern int32_t vDebugFlag; extern int32_t vDebugFlag;
extern int32_t mDebugFlag; extern int32_t mDebugFlag;
extern int32_t cDebugFlag; extern uint32_t cDebugFlag;
extern int32_t jniDebugFlag; extern int32_t jniDebugFlag;
extern int32_t tmrDebugFlag; extern int32_t tmrDebugFlag;
extern int32_t sdbDebugFlag; extern int32_t sdbDebugFlag;
......
...@@ -39,4 +39,18 @@ SColumnFilterInfo* tscFilterInfoClone(const SColumnFilterInfo* src, int32_t numO ...@@ -39,4 +39,18 @@ SColumnFilterInfo* tscFilterInfoClone(const SColumnFilterInfo* src, int32_t numO
SSchema tscGetTbnameColumnSchema(); SSchema tscGetTbnameColumnSchema();
/**
* check if the schema is valid or not, including following aspects:
* 1. number of columns
* 2. column types
* 3. column length
* 4. column names
* 5. total length
*
* @param pSchema
* @param numOfCols
* @return
*/
bool isValidSchema(struct SSchema* pSchema, int32_t numOfCols, int32_t numOfTags);
#endif // TDENGINE_NAME_H #endif // TDENGINE_NAME_H
...@@ -212,7 +212,7 @@ int32_t mDebugFlag = 131; ...@@ -212,7 +212,7 @@ int32_t mDebugFlag = 131;
int32_t sdbDebugFlag = 131; int32_t sdbDebugFlag = 131;
int32_t dDebugFlag = 135; int32_t dDebugFlag = 135;
int32_t vDebugFlag = 135; int32_t vDebugFlag = 135;
int32_t cDebugFlag = 131; uint32_t cDebugFlag = 131;
int32_t jniDebugFlag = 131; int32_t jniDebugFlag = 131;
int32_t odbcDebugFlag = 131; int32_t odbcDebugFlag = 131;
int32_t httpDebugFlag = 131; int32_t httpDebugFlag = 131;
......
...@@ -6,6 +6,10 @@ ...@@ -6,6 +6,10 @@
#include "ttokendef.h" #include "ttokendef.h"
#include "tvariant.h" #include "tvariant.h"
#define VALIDNUMOFCOLS(x) ((x) >= TSDB_MIN_COLUMNS && (x) <= TSDB_MAX_COLUMNS)
#define VALIDNUMOFTAGS(x) ((x) >= 0 && (x) <= TSDB_MAX_TAGS)
// todo refactor // todo refactor
UNUSED_FUNC static FORCE_INLINE const char* skipSegments(const char* input, char delim, int32_t num) { UNUSED_FUNC static FORCE_INLINE const char* skipSegments(const char* input, char delim, int32_t num) {
for (int32_t i = 0; i < num; ++i) { for (int32_t i = 0; i < num; ++i) {
...@@ -62,7 +66,7 @@ SSchema tGetUserSpecifiedColumnSchema(tVariant* pVal, SStrToken* exprStr, const ...@@ -62,7 +66,7 @@ SSchema tGetUserSpecifiedColumnSchema(tVariant* pVal, SStrToken* exprStr, const
if (s.type == TSDB_DATA_TYPE_BINARY || s.type == TSDB_DATA_TYPE_NCHAR) { if (s.type == TSDB_DATA_TYPE_BINARY || s.type == TSDB_DATA_TYPE_NCHAR) {
s.bytes = (int16_t)(pVal->nLen + VARSTR_HEADER_SIZE); s.bytes = (int16_t)(pVal->nLen + VARSTR_HEADER_SIZE);
} else { } else {
s.bytes = tDataTypeDesc[pVal->nType].nSize; s.bytes = tDataTypes[pVal->nType].bytes;
} }
s.colId = TSDB_UD_COLUMN_INDEX; s.colId = TSDB_UD_COLUMN_INDEX;
...@@ -206,3 +210,65 @@ SSchema tscGetTbnameColumnSchema() { ...@@ -206,3 +210,65 @@ SSchema tscGetTbnameColumnSchema() {
strcpy(s.name, TSQL_TBNAME_L); strcpy(s.name, TSQL_TBNAME_L);
return s; return s;
} }
static bool doValidateSchema(SSchema* pSchema, int32_t numOfCols, int32_t maxLen) {
int32_t rowLen = 0;
for (int32_t i = 0; i < numOfCols; ++i) {
// 1. valid types
if (!isValidDataType(pSchema[i].type)) {
return false;
}
// 2. valid length for each type
if (pSchema[i].type == TSDB_DATA_TYPE_BINARY) {
if (pSchema[i].bytes > TSDB_MAX_BINARY_LEN) {
return false;
}
} else if (pSchema[i].type == TSDB_DATA_TYPE_NCHAR) {
if (pSchema[i].bytes > TSDB_MAX_NCHAR_LEN) {
return false;
}
} else {
if (pSchema[i].bytes != tDataTypes[pSchema[i].type].bytes) {
return false;
}
}
// 3. valid column names
for (int32_t j = i + 1; j < numOfCols; ++j) {
if (strncasecmp(pSchema[i].name, pSchema[j].name, sizeof(pSchema[i].name) - 1) == 0) {
return false;
}
}
rowLen += pSchema[i].bytes;
}
return rowLen <= maxLen;
}
bool isValidSchema(struct SSchema* pSchema, int32_t numOfCols, int32_t numOfTags) {
if (!VALIDNUMOFCOLS(numOfCols)) {
return false;
}
if (!VALIDNUMOFTAGS(numOfTags)) {
return false;
}
/* first column must be the timestamp, which is a primary key */
if (pSchema[0].type != TSDB_DATA_TYPE_TIMESTAMP) {
return false;
}
if (!doValidateSchema(pSchema, numOfCols, TSDB_MAX_BYTES_PER_ROW)) {
return false;
}
if (!doValidateSchema(&pSchema[numOfCols], numOfTags, TSDB_MAX_TAGS_LEN)) {
return false;
}
return true;
}
...@@ -14,7 +14,7 @@ ...@@ -14,7 +14,7 @@
*/ */
#include "os.h" #include "os.h"
#include "taosdef.h" #include "ttype.h"
#include "ttokendef.h" #include "ttokendef.h"
#include "tscompression.h" #include "tscompression.h"
...@@ -367,7 +367,7 @@ static void getStatics_nchr(const void *pData, int32_t numOfRow, int64_t *min, i ...@@ -367,7 +367,7 @@ static void getStatics_nchr(const void *pData, int32_t numOfRow, int64_t *min, i
*maxIndex = 0; *maxIndex = 0;
} }
tDataTypeDescriptor tDataTypeDesc[15] = { tDataTypeDescriptor tDataTypes[15] = {
{TSDB_DATA_TYPE_NULL, 6,1, "NOTYPE", NULL, NULL, NULL}, {TSDB_DATA_TYPE_NULL, 6,1, "NOTYPE", NULL, NULL, NULL},
{TSDB_DATA_TYPE_BOOL, 4, CHAR_BYTES, "BOOL", tsCompressBool, tsDecompressBool, getStatics_bool}, {TSDB_DATA_TYPE_BOOL, 4, CHAR_BYTES, "BOOL", tsCompressBool, tsDecompressBool, getStatics_bool},
{TSDB_DATA_TYPE_TINYINT, 7, CHAR_BYTES, "TINYINT", tsCompressTinyint, tsDecompressTinyint, getStatics_i8}, {TSDB_DATA_TYPE_TINYINT, 7, CHAR_BYTES, "TINYINT", tsCompressTinyint, tsDecompressTinyint, getStatics_i8},
...@@ -423,58 +423,58 @@ void setNullN(char *val, int32_t type, int32_t bytes, int32_t numOfElems) { ...@@ -423,58 +423,58 @@ void setNullN(char *val, int32_t type, int32_t bytes, int32_t numOfElems) {
switch (type) { switch (type) {
case TSDB_DATA_TYPE_BOOL: case TSDB_DATA_TYPE_BOOL:
for (int32_t i = 0; i < numOfElems; ++i) { for (int32_t i = 0; i < numOfElems; ++i) {
*(uint8_t *)(val + i * tDataTypeDesc[type].nSize) = TSDB_DATA_BOOL_NULL; *(uint8_t *)(val + i * tDataTypes[type].bytes) = TSDB_DATA_BOOL_NULL;
} }
break; break;
case TSDB_DATA_TYPE_TINYINT: case TSDB_DATA_TYPE_TINYINT:
for (int32_t i = 0; i < numOfElems; ++i) { for (int32_t i = 0; i < numOfElems; ++i) {
*(uint8_t *)(val + i * tDataTypeDesc[type].nSize) = TSDB_DATA_TINYINT_NULL; *(uint8_t *)(val + i * tDataTypes[type].bytes) = TSDB_DATA_TINYINT_NULL;
} }
break; break;
case TSDB_DATA_TYPE_SMALLINT: case TSDB_DATA_TYPE_SMALLINT:
for (int32_t i = 0; i < numOfElems; ++i) { for (int32_t i = 0; i < numOfElems; ++i) {
*(uint16_t *)(val + i * tDataTypeDesc[type].nSize) = TSDB_DATA_SMALLINT_NULL; *(uint16_t *)(val + i * tDataTypes[type].bytes) = TSDB_DATA_SMALLINT_NULL;
} }
break; break;
case TSDB_DATA_TYPE_INT: case TSDB_DATA_TYPE_INT:
for (int32_t i = 0; i < numOfElems; ++i) { for (int32_t i = 0; i < numOfElems; ++i) {
*(uint32_t *)(val + i * tDataTypeDesc[type].nSize) = TSDB_DATA_INT_NULL; *(uint32_t *)(val + i * tDataTypes[type].bytes) = TSDB_DATA_INT_NULL;
} }
break; break;
case TSDB_DATA_TYPE_BIGINT: case TSDB_DATA_TYPE_BIGINT:
case TSDB_DATA_TYPE_TIMESTAMP: case TSDB_DATA_TYPE_TIMESTAMP:
for (int32_t i = 0; i < numOfElems; ++i) { for (int32_t i = 0; i < numOfElems; ++i) {
*(uint64_t *)(val + i * tDataTypeDesc[type].nSize) = TSDB_DATA_BIGINT_NULL; *(uint64_t *)(val + i * tDataTypes[type].bytes) = TSDB_DATA_BIGINT_NULL;
} }
break; break;
case TSDB_DATA_TYPE_UTINYINT: case TSDB_DATA_TYPE_UTINYINT:
for (int32_t i = 0; i < numOfElems; ++i) { for (int32_t i = 0; i < numOfElems; ++i) {
*(uint8_t *)(val + i * tDataTypeDesc[type].nSize) = TSDB_DATA_UTINYINT_NULL; *(uint8_t *)(val + i * tDataTypes[type].bytes) = TSDB_DATA_UTINYINT_NULL;
} }
break; break;
case TSDB_DATA_TYPE_USMALLINT: case TSDB_DATA_TYPE_USMALLINT:
for (int32_t i = 0; i < numOfElems; ++i) { for (int32_t i = 0; i < numOfElems; ++i) {
*(uint16_t *)(val + i * tDataTypeDesc[type].nSize) = TSDB_DATA_USMALLINT_NULL; *(uint16_t *)(val + i * tDataTypes[type].bytes) = TSDB_DATA_USMALLINT_NULL;
} }
break; break;
case TSDB_DATA_TYPE_UINT: case TSDB_DATA_TYPE_UINT:
for (int32_t i = 0; i < numOfElems; ++i) { for (int32_t i = 0; i < numOfElems; ++i) {
*(uint32_t *)(val + i * tDataTypeDesc[type].nSize) = TSDB_DATA_UINT_NULL; *(uint32_t *)(val + i * tDataTypes[type].bytes) = TSDB_DATA_UINT_NULL;
} }
break; break;
case TSDB_DATA_TYPE_UBIGINT: case TSDB_DATA_TYPE_UBIGINT:
for (int32_t i = 0; i < numOfElems; ++i) { for (int32_t i = 0; i < numOfElems; ++i) {
*(uint64_t *)(val + i * tDataTypeDesc[type].nSize) = TSDB_DATA_UBIGINT_NULL; *(uint64_t *)(val + i * tDataTypes[type].bytes) = TSDB_DATA_UBIGINT_NULL;
} }
break; break;
case TSDB_DATA_TYPE_FLOAT: case TSDB_DATA_TYPE_FLOAT:
for (int32_t i = 0; i < numOfElems; ++i) { for (int32_t i = 0; i < numOfElems; ++i) {
*(uint32_t *)(val + i * tDataTypeDesc[type].nSize) = TSDB_DATA_FLOAT_NULL; *(uint32_t *)(val + i * tDataTypes[type].bytes) = TSDB_DATA_FLOAT_NULL;
} }
break; break;
case TSDB_DATA_TYPE_DOUBLE: case TSDB_DATA_TYPE_DOUBLE:
for (int32_t i = 0; i < numOfElems; ++i) { for (int32_t i = 0; i < numOfElems; ++i) {
*(uint64_t *)(val + i * tDataTypeDesc[type].nSize) = TSDB_DATA_DOUBLE_NULL; *(uint64_t *)(val + i * tDataTypes[type].bytes) = TSDB_DATA_DOUBLE_NULL;
} }
break; break;
case TSDB_DATA_TYPE_NCHAR: case TSDB_DATA_TYPE_NCHAR:
...@@ -485,7 +485,7 @@ void setNullN(char *val, int32_t type, int32_t bytes, int32_t numOfElems) { ...@@ -485,7 +485,7 @@ void setNullN(char *val, int32_t type, int32_t bytes, int32_t numOfElems) {
break; break;
default: { default: {
for (int32_t i = 0; i < numOfElems; ++i) { for (int32_t i = 0; i < numOfElems; ++i) {
*(uint32_t *)(val + i * tDataTypeDesc[TSDB_DATA_TYPE_INT].nSize) = TSDB_DATA_INT_NULL; *(uint32_t *)(val + i * tDataTypes[TSDB_DATA_TYPE_INT].bytes) = TSDB_DATA_INT_NULL;
} }
break; break;
} }
...@@ -524,15 +524,18 @@ void assignVal(char *val, const char *src, int32_t len, int32_t type) { ...@@ -524,15 +524,18 @@ void assignVal(char *val, const char *src, int32_t len, int32_t type) {
switch (type) { switch (type) {
case TSDB_DATA_TYPE_BOOL: case TSDB_DATA_TYPE_BOOL:
case TSDB_DATA_TYPE_TINYINT: case TSDB_DATA_TYPE_TINYINT:
case TSDB_DATA_TYPE_UTINYINT:
*((int8_t *)val) = GET_INT8_VAL(src); *((int8_t *)val) = GET_INT8_VAL(src);
break; break;
case TSDB_DATA_TYPE_SMALLINT: case TSDB_DATA_TYPE_SMALLINT:
case TSDB_DATA_TYPE_USMALLINT:
*((int16_t *)val) = GET_INT16_VAL(src); *((int16_t *)val) = GET_INT16_VAL(src);
break; break;
case TSDB_DATA_TYPE_INT: { case TSDB_DATA_TYPE_INT:
case TSDB_DATA_TYPE_UINT:
*((int32_t *)val) = GET_INT32_VAL(src); *((int32_t *)val) = GET_INT32_VAL(src);
break; break;
}
case TSDB_DATA_TYPE_FLOAT: case TSDB_DATA_TYPE_FLOAT:
SET_FLOAT_VAL(val, GET_FLOAT_VAL(src)); SET_FLOAT_VAL(val, GET_FLOAT_VAL(src));
break; break;
...@@ -540,6 +543,7 @@ void assignVal(char *val, const char *src, int32_t len, int32_t type) { ...@@ -540,6 +543,7 @@ void assignVal(char *val, const char *src, int32_t len, int32_t type) {
SET_DOUBLE_VAL(val, GET_DOUBLE_VAL(src)); SET_DOUBLE_VAL(val, GET_DOUBLE_VAL(src));
break; break;
case TSDB_DATA_TYPE_BIGINT: case TSDB_DATA_TYPE_BIGINT:
case TSDB_DATA_TYPE_UBIGINT:
case TSDB_DATA_TYPE_TIMESTAMP: case TSDB_DATA_TYPE_TIMESTAMP:
*((int64_t *)val) = GET_INT64_VAL(src); *((int64_t *)val) = GET_INT64_VAL(src);
break; break;
......
...@@ -205,7 +205,7 @@ void tVariantAssign(tVariant *pDst, const tVariant *pSrc) { ...@@ -205,7 +205,7 @@ void tVariantAssign(tVariant *pDst, const tVariant *pSrc) {
} }
if (pDst->nType != TSDB_DATA_TYPE_ARRAY) { if (pDst->nType != TSDB_DATA_TYPE_ARRAY) {
pDst->nLen = tDataTypeDesc[pDst->nType].nSize; pDst->nLen = tDataTypes[pDst->nType].bytes;
} }
} }
...@@ -399,6 +399,7 @@ static int32_t toNchar(tVariant *pVariant, char **pDest, int32_t *pDestSize) { ...@@ -399,6 +399,7 @@ static int32_t toNchar(tVariant *pVariant, char **pDest, int32_t *pDestSize) {
pVariant->wpz = (wchar_t *)tmp; pVariant->wpz = (wchar_t *)tmp;
} else { } else {
int32_t output = 0; int32_t output = 0;
bool ret = taosMbsToUcs4(pDst, nLen, *pDest, (nLen + 1) * TSDB_NCHAR_SIZE, &output); bool ret = taosMbsToUcs4(pDst, nLen, *pDest, (nLen + 1) * TSDB_NCHAR_SIZE, &output);
if (!ret) { if (!ret) {
return -1; return -1;
...@@ -424,7 +425,7 @@ static FORCE_INLINE int32_t convertToDouble(char *pStr, int32_t len, double *val ...@@ -424,7 +425,7 @@ static FORCE_INLINE int32_t convertToDouble(char *pStr, int32_t len, double *val
static FORCE_INLINE int32_t convertToInteger(tVariant *pVariant, int64_t *result, int32_t type, bool issigned, bool releaseVariantPtr) { static FORCE_INLINE int32_t convertToInteger(tVariant *pVariant, int64_t *result, int32_t type, bool issigned, bool releaseVariantPtr) {
if (pVariant->nType == TSDB_DATA_TYPE_NULL) { if (pVariant->nType == TSDB_DATA_TYPE_NULL) {
setNull((char *)result, type, tDataTypeDesc[type].nSize); setNull((char *)result, type, tDataTypes[type].bytes);
return 0; return 0;
} }
...@@ -445,7 +446,7 @@ static FORCE_INLINE int32_t convertToInteger(tVariant *pVariant, int64_t *result ...@@ -445,7 +446,7 @@ static FORCE_INLINE int32_t convertToInteger(tVariant *pVariant, int64_t *result
pVariant->nLen = 0; pVariant->nLen = 0;
} }
setNull((char *)result, type, tDataTypeDesc[type].nSize); setNull((char *)result, type, tDataTypes[type].bytes);
return 0; return 0;
} }
...@@ -495,7 +496,7 @@ static FORCE_INLINE int32_t convertToInteger(tVariant *pVariant, int64_t *result ...@@ -495,7 +496,7 @@ static FORCE_INLINE int32_t convertToInteger(tVariant *pVariant, int64_t *result
free(pVariant->pz); free(pVariant->pz);
pVariant->nLen = 0; pVariant->nLen = 0;
} }
setNull((char *)result, type, tDataTypeDesc[type].nSize); setNull((char *)result, type, tDataTypes[type].bytes);
return 0; return 0;
} else { } else {
int64_t val = wcstoll(pVariant->wpz, &endPtr, 10); int64_t val = wcstoll(pVariant->wpz, &endPtr, 10);
......
<?xml version="1.0" encoding="UTF-8"?>
<classpath>
<classpathentry kind="src" output="target/classes" path="src/main/java">
<attributes>
<attribute name="optional" value="true"/>
<attribute name="maven.pomderived" value="true"/>
</attributes>
</classpathentry>
<classpathentry excluding="**" kind="src" output="target/classes" path="src/main/resources">
<attributes>
<attribute name="maven.pomderived" value="true"/>
</attributes>
</classpathentry>
<classpathentry kind="src" output="target/test-classes" path="src/test/java">
<attributes>
<attribute name="optional" value="true"/>
<attribute name="maven.pomderived" value="true"/>
<attribute name="test" value="true"/>
</attributes>
</classpathentry>
<classpathentry kind="con" path="org.eclipse.jdt.launching.JRE_CONTAINER/org.eclipse.jdt.internal.debug.ui.launcher.StandardVMType/JavaSE-1.8">
<attributes>
<attribute name="maven.pomderived" value="true"/>
</attributes>
</classpathentry>
<classpathentry kind="con" path="org.eclipse.m2e.MAVEN2_CLASSPATH_CONTAINER">
<attributes>
<attribute name="maven.pomderived" value="true"/>
</attributes>
</classpathentry>
<classpathentry kind="output" path="target/classes"/>
</classpath>
<?xml version="1.0" encoding="UTF-8"?>
<projectDescription>
<name>taos-jdbcdriver</name>
<comment></comment>
<projects>
</projects>
<buildSpec>
<buildCommand>
<name>org.eclipse.jdt.core.javabuilder</name>
<arguments>
</arguments>
</buildCommand>
<buildCommand>
<name>org.eclipse.m2e.core.maven2Builder</name>
<arguments>
</arguments>
</buildCommand>
</buildSpec>
<natures>
<nature>org.eclipse.jdt.core.javanature</nature>
<nature>org.eclipse.m2e.core.maven2Nature</nature>
</natures>
</projectDescription>
...@@ -8,7 +8,7 @@ IF (TD_MVN_INSTALLED) ...@@ -8,7 +8,7 @@ IF (TD_MVN_INSTALLED)
ADD_CUSTOM_COMMAND(OUTPUT ${JDBC_CMD_NAME} ADD_CUSTOM_COMMAND(OUTPUT ${JDBC_CMD_NAME}
POST_BUILD POST_BUILD
COMMAND mvn -Dmaven.test.skip=true install -f ${CMAKE_CURRENT_SOURCE_DIR}/pom.xml COMMAND mvn -Dmaven.test.skip=true install -f ${CMAKE_CURRENT_SOURCE_DIR}/pom.xml
COMMAND ${CMAKE_COMMAND} -E copy ${CMAKE_CURRENT_SOURCE_DIR}/target/taos-jdbcdriver-2.0.15-dist.jar ${LIBRARY_OUTPUT_PATH} COMMAND ${CMAKE_COMMAND} -E copy ${CMAKE_CURRENT_SOURCE_DIR}/target/taos-jdbcdriver-2.0.16-dist.jar ${LIBRARY_OUTPUT_PATH}
COMMAND mvn -Dmaven.test.skip=true clean -f ${CMAKE_CURRENT_SOURCE_DIR}/pom.xml COMMAND mvn -Dmaven.test.skip=true clean -f ${CMAKE_CURRENT_SOURCE_DIR}/pom.xml
COMMENT "build jdbc driver") COMMENT "build jdbc driver")
ADD_CUSTOM_TARGET(${JDBC_TARGET_NAME} ALL WORKING_DIRECTORY ${EXECUTABLE_OUTPUT_PATH} DEPENDS ${JDBC_CMD_NAME}) ADD_CUSTOM_TARGET(${JDBC_TARGET_NAME} ALL WORKING_DIRECTORY ${EXECUTABLE_OUTPUT_PATH} DEPENDS ${JDBC_CMD_NAME})
......
...@@ -5,7 +5,7 @@ ...@@ -5,7 +5,7 @@
<groupId>com.taosdata.jdbc</groupId> <groupId>com.taosdata.jdbc</groupId>
<artifactId>taos-jdbcdriver</artifactId> <artifactId>taos-jdbcdriver</artifactId>
<version>2.0.15</version> <version>2.0.16</version>
<packaging>jar</packaging> <packaging>jar</packaging>
<name>JDBCDriver</name> <name>JDBCDriver</name>
......
...@@ -3,7 +3,7 @@ ...@@ -3,7 +3,7 @@
<modelVersion>4.0.0</modelVersion> <modelVersion>4.0.0</modelVersion>
<groupId>com.taosdata.jdbc</groupId> <groupId>com.taosdata.jdbc</groupId>
<artifactId>taos-jdbcdriver</artifactId> <artifactId>taos-jdbcdriver</artifactId>
<version>2.0.15</version> <version>2.0.16</version>
<packaging>jar</packaging> <packaging>jar</packaging>
<name>JDBCDriver</name> <name>JDBCDriver</name>
<url>https://github.com/taosdata/TDengine/tree/master/src/connector/jdbc</url> <url>https://github.com/taosdata/TDengine/tree/master/src/connector/jdbc</url>
...@@ -49,6 +49,7 @@ ...@@ -49,6 +49,7 @@
</exclusion> </exclusion>
</exclusions> </exclusions>
</dependency> </dependency>
<dependency> <dependency>
<groupId>junit</groupId> <groupId>junit</groupId>
<artifactId>junit</artifactId> <artifactId>junit</artifactId>
...@@ -56,12 +57,6 @@ ...@@ -56,12 +57,6 @@
<scope>test</scope> <scope>test</scope>
</dependency> </dependency>
<dependency>
<groupId>mysql</groupId>
<artifactId>mysql-connector-java</artifactId>
<version>5.1.47</version>
</dependency>
<!-- for restful --> <!-- for restful -->
<dependency> <dependency>
<groupId>org.apache.httpcomponents</groupId> <groupId>org.apache.httpcomponents</groupId>
...@@ -79,12 +74,6 @@ ...@@ -79,12 +74,6 @@
<version>1.2.58</version> <version>1.2.58</version>
</dependency> </dependency>
<dependency>
<groupId>mysql</groupId>
<artifactId>mysql-connector-java</artifactId>
<version>5.1.49</version>
</dependency>
</dependencies> </dependencies>
<build> <build>
......
...@@ -497,12 +497,12 @@ public abstract class AbstractDatabaseMetaData implements DatabaseMetaData { ...@@ -497,12 +497,12 @@ public abstract class AbstractDatabaseMetaData implements DatabaseMetaData {
public ResultSet getProcedures(String catalog, String schemaPattern, String procedureNamePattern) public ResultSet getProcedures(String catalog, String schemaPattern, String procedureNamePattern)
throws SQLException { throws SQLException {
throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); return null;
} }
public ResultSet getProcedureColumns(String catalog, String schemaPattern, String procedureNamePattern, public ResultSet getProcedureColumns(String catalog, String schemaPattern, String procedureNamePattern,
String columnNamePattern) throws SQLException { String columnNamePattern) throws SQLException {
throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); return null;
} }
public abstract ResultSet getTables(String catalog, String schemaPattern, String tableNamePattern, String[] types) public abstract ResultSet getTables(String catalog, String schemaPattern, String tableNamePattern, String[] types)
......
...@@ -20,13 +20,11 @@ import java.util.List; ...@@ -20,13 +20,11 @@ import java.util.List;
public class TSDBDatabaseMetaData implements java.sql.DatabaseMetaData { public class TSDBDatabaseMetaData implements java.sql.DatabaseMetaData {
private String dbProductName = null; private String url;
private String url = null; private String userName;
private String userName = null; private Connection conn;
private Connection conn = null;
public TSDBDatabaseMetaData(String dbProductName, String url, String userName) { public TSDBDatabaseMetaData(String url, String userName) {
this.dbProductName = dbProductName;
this.url = url; this.url = url;
this.userName = userName; this.userName = userName;
} }
...@@ -35,12 +33,17 @@ public class TSDBDatabaseMetaData implements java.sql.DatabaseMetaData { ...@@ -35,12 +33,17 @@ public class TSDBDatabaseMetaData implements java.sql.DatabaseMetaData {
this.conn = conn; this.conn = conn;
} }
@Override
public <T> T unwrap(Class<T> iface) throws SQLException { public <T> T unwrap(Class<T> iface) throws SQLException {
return null; try {
return iface.cast(this);
} catch (ClassCastException cce) {
throw new SQLException("Unable to unwrap to " + iface.toString());
}
} }
public boolean isWrapperFor(Class<?> iface) throws SQLException { public boolean isWrapperFor(Class<?> iface) throws SQLException {
return false; return iface.isInstance(this);
} }
public boolean allProceduresAreCallable() throws SQLException { public boolean allProceduresAreCallable() throws SQLException {
...@@ -80,11 +83,11 @@ public class TSDBDatabaseMetaData implements java.sql.DatabaseMetaData { ...@@ -80,11 +83,11 @@ public class TSDBDatabaseMetaData implements java.sql.DatabaseMetaData {
} }
public String getDatabaseProductName() throws SQLException { public String getDatabaseProductName() throws SQLException {
return this.dbProductName; return "TDengine";
} }
public String getDatabaseProductVersion() throws SQLException { public String getDatabaseProductVersion() throws SQLException {
return "1.5.1"; return "2.0.x.x";
} }
public String getDriverName() throws SQLException { public String getDriverName() throws SQLException {
...@@ -92,7 +95,7 @@ public class TSDBDatabaseMetaData implements java.sql.DatabaseMetaData { ...@@ -92,7 +95,7 @@ public class TSDBDatabaseMetaData implements java.sql.DatabaseMetaData {
} }
public String getDriverVersion() throws SQLException { public String getDriverVersion() throws SQLException {
return "1.0.0"; return "2.0.x";
} }
public int getDriverMajorVersion() { public int getDriverMajorVersion() {
...@@ -111,7 +114,9 @@ public class TSDBDatabaseMetaData implements java.sql.DatabaseMetaData { ...@@ -111,7 +114,9 @@ public class TSDBDatabaseMetaData implements java.sql.DatabaseMetaData {
return false; return false;
} }
public boolean supportsMixedCaseIdentifiers() throws SQLException { public boolean supportsMixedCaseIdentifiers() throws SQLException {
//像database、table这些对象的标识符,在存储时是否采用大小写混合的模式
return false; return false;
} }
...@@ -120,7 +125,7 @@ public class TSDBDatabaseMetaData implements java.sql.DatabaseMetaData { ...@@ -120,7 +125,7 @@ public class TSDBDatabaseMetaData implements java.sql.DatabaseMetaData {
} }
public boolean storesLowerCaseIdentifiers() throws SQLException { public boolean storesLowerCaseIdentifiers() throws SQLException {
return false; return true;
} }
public boolean storesMixedCaseIdentifiers() throws SQLException { public boolean storesMixedCaseIdentifiers() throws SQLException {
...@@ -128,6 +133,7 @@ public class TSDBDatabaseMetaData implements java.sql.DatabaseMetaData { ...@@ -128,6 +133,7 @@ public class TSDBDatabaseMetaData implements java.sql.DatabaseMetaData {
} }
public boolean supportsMixedCaseQuotedIdentifiers() throws SQLException { public boolean supportsMixedCaseQuotedIdentifiers() throws SQLException {
//像database、table这些对象的标识符,在存储时是否采用大小写混合、并带引号的模式
return false; return false;
} }
...@@ -188,10 +194,12 @@ public class TSDBDatabaseMetaData implements java.sql.DatabaseMetaData { ...@@ -188,10 +194,12 @@ public class TSDBDatabaseMetaData implements java.sql.DatabaseMetaData {
} }
public boolean nullPlusNonNullIsNull() throws SQLException { public boolean nullPlusNonNullIsNull() throws SQLException {
// null + non-null != null
return false; return false;
} }
public boolean supportsConvert() throws SQLException { public boolean supportsConvert() throws SQLException {
// 是否支持转换函数convert
return false; return false;
} }
...@@ -216,7 +224,7 @@ public class TSDBDatabaseMetaData implements java.sql.DatabaseMetaData { ...@@ -216,7 +224,7 @@ public class TSDBDatabaseMetaData implements java.sql.DatabaseMetaData {
} }
public boolean supportsGroupBy() throws SQLException { public boolean supportsGroupBy() throws SQLException {
return false; return true;
} }
public boolean supportsGroupByUnrelated() throws SQLException { public boolean supportsGroupByUnrelated() throws SQLException {
...@@ -488,7 +496,7 @@ public class TSDBDatabaseMetaData implements java.sql.DatabaseMetaData { ...@@ -488,7 +496,7 @@ public class TSDBDatabaseMetaData implements java.sql.DatabaseMetaData {
} }
public int getDefaultTransactionIsolation() throws SQLException { public int getDefaultTransactionIsolation() throws SQLException {
return 0; return Connection.TRANSACTION_NONE;
} }
public boolean supportsTransactions() throws SQLException { public boolean supportsTransactions() throws SQLException {
...@@ -496,6 +504,8 @@ public class TSDBDatabaseMetaData implements java.sql.DatabaseMetaData { ...@@ -496,6 +504,8 @@ public class TSDBDatabaseMetaData implements java.sql.DatabaseMetaData {
} }
public boolean supportsTransactionIsolationLevel(int level) throws SQLException { public boolean supportsTransactionIsolationLevel(int level) throws SQLException {
if (level == Connection.TRANSACTION_NONE)
return true;
return false; return false;
} }
...@@ -517,28 +527,27 @@ public class TSDBDatabaseMetaData implements java.sql.DatabaseMetaData { ...@@ -517,28 +527,27 @@ public class TSDBDatabaseMetaData implements java.sql.DatabaseMetaData {
public ResultSet getProcedures(String catalog, String schemaPattern, String procedureNamePattern) public ResultSet getProcedures(String catalog, String schemaPattern, String procedureNamePattern)
throws SQLException { throws SQLException {
throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); return null;
} }
public ResultSet getProcedureColumns(String catalog, String schemaPattern, String procedureNamePattern, public ResultSet getProcedureColumns(String catalog, String schemaPattern, String procedureNamePattern,
String columnNamePattern) throws SQLException { String columnNamePattern) throws SQLException {
throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); return null;
} }
public ResultSet getTables(String catalog, String schemaPattern, String tableNamePattern, String[] types) public ResultSet getTables(String catalog, String schemaPattern, String tableNamePattern, String[] types) throws SQLException {
throws SQLException { if (conn == null || conn.isClosed()) {
Statement stmt = null; throw new SQLException(TSDBConstants.FixErrMsg(TSDBConstants.JNI_CONNECTION_NULL));
if (null != conn && !conn.isClosed()) { }
stmt = conn.createStatement();
if (catalog == null || catalog.length() < 1) { try (Statement stmt = conn.createStatement()) {
catalog = conn.getCatalog(); if (catalog == null || catalog.isEmpty())
} return null;
stmt.executeUpdate("use " + catalog); stmt.executeUpdate("use " + catalog);
ResultSet resultSet0 = stmt.executeQuery("show tables"); ResultSet resultSet0 = stmt.executeQuery("show tables");
GetTablesResultSet getTablesResultSet = new GetTablesResultSet(resultSet0, catalog, schemaPattern, tableNamePattern, types); GetTablesResultSet getTablesResultSet = new GetTablesResultSet(resultSet0, catalog, schemaPattern, tableNamePattern, types);
return getTablesResultSet; return getTablesResultSet;
} else {
throw new SQLException(TSDBConstants.FixErrMsg(TSDBConstants.JNI_CONNECTION_NULL));
} }
} }
...@@ -547,14 +556,12 @@ public class TSDBDatabaseMetaData implements java.sql.DatabaseMetaData { ...@@ -547,14 +556,12 @@ public class TSDBDatabaseMetaData implements java.sql.DatabaseMetaData {
} }
public ResultSet getCatalogs() throws SQLException { public ResultSet getCatalogs() throws SQLException {
if (conn == null || conn.isClosed())
throw new SQLException(TSDBConstants.FixErrMsg(TSDBConstants.JNI_CONNECTION_NULL));
if (conn != null && !conn.isClosed()) { try (Statement stmt = conn.createStatement()) {
Statement stmt = conn.createStatement(); ResultSet rs = stmt.executeQuery("show databases");
ResultSet resultSet0 = stmt.executeQuery("show databases"); return new CatalogResultSet(rs);
CatalogResultSet resultSet = new CatalogResultSet(resultSet0);
return resultSet;
} else {
return getEmptyResultSet();
} }
} }
...@@ -562,7 +569,7 @@ public class TSDBDatabaseMetaData implements java.sql.DatabaseMetaData { ...@@ -562,7 +569,7 @@ public class TSDBDatabaseMetaData implements java.sql.DatabaseMetaData {
DatabaseMetaDataResultSet resultSet = new DatabaseMetaDataResultSet(); DatabaseMetaDataResultSet resultSet = new DatabaseMetaDataResultSet();
// set up ColumnMetaDataList // set up ColumnMetaDataList
List<ColumnMetaData> columnMetaDataList = new ArrayList<ColumnMetaData>(1); List<ColumnMetaData> columnMetaDataList = new ArrayList<>(1);
ColumnMetaData colMetaData = new ColumnMetaData(); ColumnMetaData colMetaData = new ColumnMetaData();
colMetaData.setColIndex(0); colMetaData.setColIndex(0);
colMetaData.setColName("TABLE_TYPE"); colMetaData.setColName("TABLE_TYPE");
...@@ -571,7 +578,7 @@ public class TSDBDatabaseMetaData implements java.sql.DatabaseMetaData { ...@@ -571,7 +578,7 @@ public class TSDBDatabaseMetaData implements java.sql.DatabaseMetaData {
columnMetaDataList.add(colMetaData); columnMetaDataList.add(colMetaData);
// set up rowDataList // set up rowDataList
List<TSDBResultSetRowData> rowDataList = new ArrayList<TSDBResultSetRowData>(2); List<TSDBResultSetRowData> rowDataList = new ArrayList<>(2);
TSDBResultSetRowData rowData = new TSDBResultSetRowData(); TSDBResultSetRowData rowData = new TSDBResultSetRowData();
rowData.setString(0, "TABLE"); rowData.setString(0, "TABLE");
rowDataList.add(rowData); rowDataList.add(rowData);
...@@ -591,11 +598,10 @@ public class TSDBDatabaseMetaData implements java.sql.DatabaseMetaData { ...@@ -591,11 +598,10 @@ public class TSDBDatabaseMetaData implements java.sql.DatabaseMetaData {
Statement stmt = null; Statement stmt = null;
if (null != conn && !conn.isClosed()) { if (null != conn && !conn.isClosed()) {
stmt = conn.createStatement(); stmt = conn.createStatement();
if (catalog == null || catalog.length() < 1) { if (catalog == null || catalog.isEmpty())
catalog = conn.getCatalog(); return null;
}
stmt.executeUpdate("use " + catalog);
stmt.executeUpdate("use " + catalog);
DatabaseMetaDataResultSet resultSet = new DatabaseMetaDataResultSet(); DatabaseMetaDataResultSet resultSet = new DatabaseMetaDataResultSet();
// set up ColumnMetaDataList // set up ColumnMetaDataList
List<ColumnMetaData> columnMetaDataList = new ArrayList<>(24); List<ColumnMetaData> columnMetaDataList = new ArrayList<>(24);
...@@ -851,7 +857,7 @@ public class TSDBDatabaseMetaData implements java.sql.DatabaseMetaData { ...@@ -851,7 +857,7 @@ public class TSDBDatabaseMetaData implements java.sql.DatabaseMetaData {
} }
public Connection getConnection() throws SQLException { public Connection getConnection() throws SQLException {
return null; return this.conn;
} }
public boolean supportsSavepoints() throws SQLException { public boolean supportsSavepoints() throws SQLException {
...@@ -884,15 +890,17 @@ public class TSDBDatabaseMetaData implements java.sql.DatabaseMetaData { ...@@ -884,15 +890,17 @@ public class TSDBDatabaseMetaData implements java.sql.DatabaseMetaData {
} }
public boolean supportsResultSetHoldability(int holdability) throws SQLException { public boolean supportsResultSetHoldability(int holdability) throws SQLException {
if (holdability == ResultSet.HOLD_CURSORS_OVER_COMMIT)
return true;
return false; return false;
} }
public int getResultSetHoldability() throws SQLException { public int getResultSetHoldability() throws SQLException {
return 0; return ResultSet.HOLD_CURSORS_OVER_COMMIT;
} }
public int getDatabaseMajorVersion() throws SQLException { public int getDatabaseMajorVersion() throws SQLException {
return 0; return 2;
} }
public int getDatabaseMinorVersion() throws SQLException { public int getDatabaseMinorVersion() throws SQLException {
...@@ -900,7 +908,7 @@ public class TSDBDatabaseMetaData implements java.sql.DatabaseMetaData { ...@@ -900,7 +908,7 @@ public class TSDBDatabaseMetaData implements java.sql.DatabaseMetaData {
} }
public int getJDBCMajorVersion() throws SQLException { public int getJDBCMajorVersion() throws SQLException {
return 0; return 2;
} }
public int getJDBCMinorVersion() throws SQLException { public int getJDBCMinorVersion() throws SQLException {
......
...@@ -214,7 +214,7 @@ public class TSDBDriver extends AbstractTaosDriver { ...@@ -214,7 +214,7 @@ public class TSDBDriver extends AbstractTaosDriver {
urlProps.setProperty(TSDBDriver.PROPERTY_KEY_HOST, url); urlProps.setProperty(TSDBDriver.PROPERTY_KEY_HOST, url);
} }
this.dbMetaData = new TSDBDatabaseMetaData(dbProductName, urlForMeta, urlProps.getProperty(TSDBDriver.PROPERTY_KEY_USER)); this.dbMetaData = new TSDBDatabaseMetaData(urlForMeta, urlProps.getProperty(TSDBDriver.PROPERTY_KEY_USER));
return urlProps; return urlProps;
} }
......
...@@ -39,7 +39,6 @@ import java.util.Iterator; ...@@ -39,7 +39,6 @@ import java.util.Iterator;
import java.util.List; import java.util.List;
import java.util.Map; import java.util.Map;
@SuppressWarnings("unused")
public class TSDBResultSet implements ResultSet { public class TSDBResultSet implements ResultSet {
private TSDBJNIConnector jniConnector = null; private TSDBJNIConnector jniConnector = null;
...@@ -104,6 +103,7 @@ public class TSDBResultSet implements ResultSet { ...@@ -104,6 +103,7 @@ public class TSDBResultSet implements ResultSet {
} }
public TSDBResultSet() { public TSDBResultSet() {
} }
public TSDBResultSet(TSDBJNIConnector connector, long resultSetPointer) throws SQLException { public TSDBResultSet(TSDBJNIConnector connector, long resultSetPointer) throws SQLException {
......
...@@ -17,6 +17,7 @@ package com.taosdata.jdbc; ...@@ -17,6 +17,7 @@ package com.taosdata.jdbc;
import java.sql.*; import java.sql.*;
import java.util.ArrayList; import java.util.ArrayList;
import java.util.List; import java.util.List;
import java.util.concurrent.TimeUnit;
public class TSDBStatement implements Statement { public class TSDBStatement implements Statement {
private TSDBJNIConnector connector = null; private TSDBJNIConnector connector = null;
...@@ -68,7 +69,6 @@ public class TSDBStatement implements Statement { ...@@ -68,7 +69,6 @@ public class TSDBStatement implements Statement {
pSql = this.connector.executeQuery(sql); pSql = this.connector.executeQuery(sql);
long resultSetPointer = this.connector.getResultSet(); long resultSetPointer = this.connector.getResultSet();
if (resultSetPointer == TSDBConstants.JNI_CONNECTION_NULL) { if (resultSetPointer == TSDBConstants.JNI_CONNECTION_NULL) {
this.connector.freeResultSet(pSql); this.connector.freeResultSet(pSql);
throw new SQLException(TSDBConstants.FixErrMsg(TSDBConstants.JNI_CONNECTION_NULL)); throw new SQLException(TSDBConstants.FixErrMsg(TSDBConstants.JNI_CONNECTION_NULL));
......
...@@ -8,7 +8,6 @@ import java.util.List; ...@@ -8,7 +8,6 @@ import java.util.List;
public class RestfulDatabaseMetaData extends AbstractDatabaseMetaData { public class RestfulDatabaseMetaData extends AbstractDatabaseMetaData {
private final String url; private final String url;
private final String userName; private final String userName;
private final Connection connection; private final Connection connection;
......
package com.taosdata.jdbc;
import org.junit.AfterClass;
import org.junit.BeforeClass;
import org.junit.Test;
import java.sql.*;
import java.util.Properties;
public class DatabaseMetaDataTest {
static Connection connection = null;
static PreparedStatement statement = null;
static String dbName = "test";
static String tName = "t0";
static String host = "localhost";
@BeforeClass
public static void createConnection() throws SQLException {
try {
Class.forName("com.taosdata.jdbc.TSDBDriver");
} catch (ClassNotFoundException e) {
return;
}
Properties properties = new Properties();
properties.setProperty(TSDBDriver.PROPERTY_KEY_HOST, host);
properties.setProperty(TSDBDriver.PROPERTY_KEY_USER, "root");
properties.setProperty(TSDBDriver.PROPERTY_KEY_PASSWORD, "taosdata");
properties.setProperty(TSDBDriver.PROPERTY_KEY_CHARSET, "UTF-8");
properties.setProperty(TSDBDriver.PROPERTY_KEY_LOCALE, "en_US.UTF-8");
properties.setProperty(TSDBDriver.PROPERTY_KEY_TIME_ZONE, "UTC-8");
connection = DriverManager.getConnection("jdbc:TAOS://" + host + ":0/", properties);
String sql = "drop database if exists " + dbName;
statement = connection.prepareStatement(sql);
statement.executeUpdate("create database if not exists " + dbName);
statement.executeUpdate("create table if not exists " + dbName + "." + tName + " (ts timestamp, k int, v int)");
}
@Test
public void testMetaDataTest() throws SQLException {
DatabaseMetaData databaseMetaData = connection.getMetaData();
ResultSet resultSet = databaseMetaData.getTables(dbName, "t*", "t*", new String[]{"t"});
while (resultSet.next()) {
for (int i = 1; i <= resultSet.getMetaData().getColumnCount(); i++) {
System.out.printf("%d: %s\n", i, resultSet.getString(i));
}
}
resultSet.close();
databaseMetaData.isWrapperFor(null);
databaseMetaData.allProceduresAreCallable();
databaseMetaData.allTablesAreSelectable();
databaseMetaData.getURL();
databaseMetaData.getUserName();
databaseMetaData.isReadOnly();
databaseMetaData.nullsAreSortedHigh();
databaseMetaData.nullsAreSortedLow();
databaseMetaData.nullsAreSortedAtStart();
databaseMetaData.nullsAreSortedAtEnd();
databaseMetaData.getDatabaseProductName();
databaseMetaData.getDatabaseProductVersion();
databaseMetaData.getDriverName();
databaseMetaData.getDriverVersion();
databaseMetaData.getDriverMajorVersion();
databaseMetaData.getDriverMinorVersion();
databaseMetaData.usesLocalFiles();
databaseMetaData.usesLocalFilePerTable();
databaseMetaData.supportsMixedCaseIdentifiers();
databaseMetaData.storesUpperCaseIdentifiers();
databaseMetaData.storesLowerCaseIdentifiers();
databaseMetaData.storesMixedCaseIdentifiers();
databaseMetaData.supportsMixedCaseQuotedIdentifiers();
databaseMetaData.storesUpperCaseQuotedIdentifiers();
databaseMetaData.storesLowerCaseQuotedIdentifiers();
databaseMetaData.storesMixedCaseQuotedIdentifiers();
databaseMetaData.getIdentifierQuoteString();
databaseMetaData.getSQLKeywords();
databaseMetaData.getNumericFunctions();
databaseMetaData.getStringFunctions();
databaseMetaData.getSystemFunctions();
databaseMetaData.getTimeDateFunctions();
databaseMetaData.getSearchStringEscape();
databaseMetaData.getExtraNameCharacters();
databaseMetaData.supportsAlterTableWithAddColumn();
databaseMetaData.supportsAlterTableWithDropColumn();
databaseMetaData.supportsColumnAliasing();
databaseMetaData.nullPlusNonNullIsNull();
databaseMetaData.supportsConvert();
databaseMetaData.supportsConvert(0, 0);
databaseMetaData.supportsTableCorrelationNames();
databaseMetaData.supportsDifferentTableCorrelationNames();
databaseMetaData.supportsExpressionsInOrderBy();
databaseMetaData.supportsOrderByUnrelated();
databaseMetaData.supportsGroupBy();
databaseMetaData.supportsGroupByUnrelated();
databaseMetaData.supportsGroupByBeyondSelect();
databaseMetaData.supportsLikeEscapeClause();
databaseMetaData.supportsMultipleResultSets();
databaseMetaData.supportsMultipleTransactions();
databaseMetaData.supportsNonNullableColumns();
databaseMetaData.supportsMinimumSQLGrammar();
databaseMetaData.supportsCoreSQLGrammar();
databaseMetaData.supportsExtendedSQLGrammar();
databaseMetaData.supportsANSI92EntryLevelSQL();
databaseMetaData.supportsANSI92IntermediateSQL();
databaseMetaData.supportsANSI92FullSQL();
databaseMetaData.supportsIntegrityEnhancementFacility();
databaseMetaData.supportsOuterJoins();
databaseMetaData.supportsFullOuterJoins();
databaseMetaData.supportsLimitedOuterJoins();
databaseMetaData.getSchemaTerm();
databaseMetaData.getProcedureTerm();
databaseMetaData.getCatalogTerm();
databaseMetaData.isCatalogAtStart();
databaseMetaData.getCatalogSeparator();
databaseMetaData.supportsSchemasInDataManipulation();
databaseMetaData.supportsSchemasInProcedureCalls();
databaseMetaData.supportsSchemasInTableDefinitions();
databaseMetaData.supportsSchemasInIndexDefinitions();
databaseMetaData.supportsSchemasInPrivilegeDefinitions();
databaseMetaData.supportsCatalogsInDataManipulation();
databaseMetaData.supportsCatalogsInProcedureCalls();
databaseMetaData.supportsCatalogsInTableDefinitions();
databaseMetaData.supportsCatalogsInIndexDefinitions();
databaseMetaData.supportsCatalogsInPrivilegeDefinitions();
databaseMetaData.supportsPositionedDelete();
databaseMetaData.supportsPositionedUpdate();
databaseMetaData.supportsSelectForUpdate();
databaseMetaData.supportsStoredProcedures();
databaseMetaData.supportsSubqueriesInComparisons();
databaseMetaData.supportsSubqueriesInExists();
databaseMetaData.supportsSubqueriesInIns();
databaseMetaData.supportsSubqueriesInQuantifieds();
databaseMetaData.supportsCorrelatedSubqueries();
databaseMetaData.supportsUnion();
databaseMetaData.supportsUnionAll();
databaseMetaData.supportsOpenCursorsAcrossCommit();
databaseMetaData.supportsOpenCursorsAcrossRollback();
databaseMetaData.supportsOpenStatementsAcrossCommit();
databaseMetaData.supportsOpenStatementsAcrossRollback();
databaseMetaData.getMaxBinaryLiteralLength();
databaseMetaData.getMaxCharLiteralLength();
databaseMetaData.getMaxColumnNameLength();
databaseMetaData.getMaxColumnsInGroupBy();
databaseMetaData.getMaxColumnsInIndex();
databaseMetaData.getMaxColumnsInOrderBy();
databaseMetaData.getMaxColumnsInSelect();
databaseMetaData.getMaxColumnsInTable();
databaseMetaData.getMaxConnections();
databaseMetaData.getMaxCursorNameLength();
databaseMetaData.getMaxIndexLength();
databaseMetaData.getMaxSchemaNameLength();
databaseMetaData.getMaxProcedureNameLength();
databaseMetaData.getMaxCatalogNameLength();
databaseMetaData.getMaxRowSize();
databaseMetaData.doesMaxRowSizeIncludeBlobs();
databaseMetaData.getMaxStatementLength();
databaseMetaData.getMaxStatements();
databaseMetaData.getMaxTableNameLength();
databaseMetaData.getMaxTablesInSelect();
databaseMetaData.getMaxUserNameLength();
databaseMetaData.getDefaultTransactionIsolation();
databaseMetaData.supportsTransactions();
databaseMetaData.supportsTransactionIsolationLevel(0);
databaseMetaData.supportsDataDefinitionAndDataManipulationTransactions();
databaseMetaData.supportsDataManipulationTransactionsOnly();
databaseMetaData.dataDefinitionCausesTransactionCommit();
databaseMetaData.dataDefinitionIgnoredInTransactions();
try {
databaseMetaData.getProcedures("", "", "");
} catch (Exception e) {
}
try {
databaseMetaData.getProcedureColumns("", "", "", "");
} catch (Exception e) {
}
try {
databaseMetaData.getTables("", "", "", new String[]{""});
} catch (Exception e) {
}
databaseMetaData.getSchemas();
databaseMetaData.getCatalogs();
// databaseMetaData.getTableTypes();
databaseMetaData.getColumns(dbName, "", tName, "");
databaseMetaData.getColumnPrivileges("", "", "", "");
databaseMetaData.getTablePrivileges("", "", "");
databaseMetaData.getBestRowIdentifier("", "", "", 0, false);
databaseMetaData.getVersionColumns("", "", "");
databaseMetaData.getPrimaryKeys("", "", "");
databaseMetaData.getImportedKeys("", "", "");
databaseMetaData.getExportedKeys("", "", "");
databaseMetaData.getCrossReference("", "", "", "", "", "");
databaseMetaData.getTypeInfo();
databaseMetaData.getIndexInfo("", "", "", false, false);
databaseMetaData.supportsResultSetType(0);
databaseMetaData.supportsResultSetConcurrency(0, 0);
databaseMetaData.ownUpdatesAreVisible(0);
databaseMetaData.ownDeletesAreVisible(0);
databaseMetaData.ownInsertsAreVisible(0);
databaseMetaData.othersUpdatesAreVisible(0);
databaseMetaData.othersDeletesAreVisible(0);
databaseMetaData.othersInsertsAreVisible(0);
databaseMetaData.updatesAreDetected(0);
databaseMetaData.deletesAreDetected(0);
databaseMetaData.insertsAreDetected(0);
databaseMetaData.supportsBatchUpdates();
databaseMetaData.getUDTs("", "", "", new int[]{0});
databaseMetaData.getConnection();
databaseMetaData.supportsSavepoints();
databaseMetaData.supportsNamedParameters();
databaseMetaData.supportsMultipleOpenResults();
databaseMetaData.supportsGetGeneratedKeys();
databaseMetaData.getSuperTypes("", "", "");
databaseMetaData.getSuperTables("", "", "");
databaseMetaData.getAttributes("", "", "", "");
databaseMetaData.supportsResultSetHoldability(0);
databaseMetaData.getResultSetHoldability();
databaseMetaData.getDatabaseMajorVersion();
databaseMetaData.getDatabaseMinorVersion();
databaseMetaData.getJDBCMajorVersion();
databaseMetaData.getJDBCMinorVersion();
databaseMetaData.getSQLStateType();
databaseMetaData.locatorsUpdateCopy();
databaseMetaData.supportsStatementPooling();
databaseMetaData.getRowIdLifetime();
databaseMetaData.getSchemas("", "");
databaseMetaData.supportsStoredFunctionsUsingCallSyntax();
databaseMetaData.autoCommitFailureClosesAllResultSets();
databaseMetaData.getClientInfoProperties();
databaseMetaData.getFunctions("", "", "");
databaseMetaData.getFunctionColumns("", "", "", "");
databaseMetaData.getPseudoColumns("", "", "", "");
databaseMetaData.generatedKeyAlwaysReturned();
}
@AfterClass
public static void close() throws Exception {
statement.executeUpdate("drop database " + dbName);
statement.close();
connection.close();
Thread.sleep(10);
}
}
package com.taosdata.jdbc.cases;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
import java.sql.*;
import java.util.concurrent.TimeUnit;
public class MultiThreadsWithSameStatmentTest {
private class Service {
public Connection conn;
public Statement stmt;
public Service() {
try {
Class.forName("com.taosdata.jdbc.TSDBDriver");
conn = DriverManager.getConnection("jdbc:TAOS://localhost:6030/?user=root&password=taosdata");
stmt = conn.createStatement();
stmt.execute("create database if not exists jdbctest");
stmt.executeUpdate("create table if not exists jdbctest.weather (ts timestamp, f1 int)");
} catch (ClassNotFoundException | SQLException e) {
e.printStackTrace();
}
}
public void release() {
try {
stmt.close();
conn.close();
} catch (SQLException e) {
e.printStackTrace();
}
}
}
@Before
public void before() {
}
@Test
public void test() {
Thread t1 = new Thread(() -> {
try {
Service service = new Service();
ResultSet resultSet = service.stmt.executeQuery("select * from jdbctest.weather");
while (resultSet.next()) {
ResultSetMetaData metaData = resultSet.getMetaData();
for (int i = 1; i <= metaData.getColumnCount(); i++) {
System.out.print(metaData.getColumnLabel(i) + ": " + resultSet.getString(i));
}
System.out.println();
}
resultSet.close();
service.release();
} catch (SQLException e) {
e.printStackTrace();
}
});
Thread t2 = new Thread(() -> {
while (true) {
try {
Service service = new Service();
service.stmt.executeUpdate("insert into jdbctest.weather values(now,1)");
service.release();
} catch (SQLException e) {
e.printStackTrace();
}
}
});
t1.start();
sleep(1000);
t2.start();
}
private void sleep(long mills) {
try {
TimeUnit.MILLISECONDS.sleep(mills);
} catch (InterruptedException e) {
e.printStackTrace();
}
}
@After
public void after() {
}
}
...@@ -5,7 +5,7 @@ with open("README.md", "r") as fh: ...@@ -5,7 +5,7 @@ with open("README.md", "r") as fh:
setuptools.setup( setuptools.setup(
name="taos", name="taos",
version="2.0.3", version="2.0.4",
author="Taosdata Inc.", author="Taosdata Inc.",
author_email="support@taosdata.com", author_email="support@taosdata.com",
description="TDengine python client package", description="TDengine python client package",
......
...@@ -184,7 +184,7 @@ class TDengineCursor(object): ...@@ -184,7 +184,7 @@ class TDengineCursor(object):
return False return False
def fetchall(self): def fetchall_row(self):
"""Fetch all (remaining) rows of a query result, returning them as a sequence of sequences (e.g. a list of tuples). Note that the cursor's arraysize attribute can affect the performance of this operation. """Fetch all (remaining) rows of a query result, returning them as a sequence of sequences (e.g. a list of tuples). Note that the cursor's arraysize attribute can affect the performance of this operation.
""" """
if self._result is None or self._fields is None: if self._result is None or self._fields is None:
...@@ -203,7 +203,7 @@ class TDengineCursor(object): ...@@ -203,7 +203,7 @@ class TDengineCursor(object):
for i in range(len(self._fields)): for i in range(len(self._fields)):
buffer[i].extend(block[i]) buffer[i].extend(block[i])
return list(map(tuple, zip(*buffer))) return list(map(tuple, zip(*buffer)))
def fetchall_block(self): def fetchall(self):
if self._result is None or self._fields is None: if self._result is None or self._fields is None:
raise OperationalError("Invalid use of fetchall") raise OperationalError("Invalid use of fetchall")
......
...@@ -5,7 +5,7 @@ with open("README.md", "r") as fh: ...@@ -5,7 +5,7 @@ with open("README.md", "r") as fh:
setuptools.setup( setuptools.setup(
name="taos", name="taos",
version="2.0.3", version="2.0.4",
author="Taosdata Inc.", author="Taosdata Inc.",
author_email="support@taosdata.com", author_email="support@taosdata.com",
description="TDengine python client package", description="TDengine python client package",
......
...@@ -192,7 +192,7 @@ class TDengineCursor(object): ...@@ -192,7 +192,7 @@ class TDengineCursor(object):
return False return False
def fetchall(self): def fetchall_row(self):
"""Fetch all (remaining) rows of a query result, returning them as a sequence of sequences (e.g. a list of tuples). Note that the cursor's arraysize attribute can affect the performance of this operation. """Fetch all (remaining) rows of a query result, returning them as a sequence of sequences (e.g. a list of tuples). Note that the cursor's arraysize attribute can affect the performance of this operation.
""" """
if self._result is None or self._fields is None: if self._result is None or self._fields is None:
...@@ -212,7 +212,7 @@ class TDengineCursor(object): ...@@ -212,7 +212,7 @@ class TDengineCursor(object):
buffer[i].extend(block[i]) buffer[i].extend(block[i])
return list(map(tuple, zip(*buffer))) return list(map(tuple, zip(*buffer)))
def fetchall_block(self): def fetchall(self):
if self._result is None or self._fields is None: if self._result is None or self._fields is None:
raise OperationalError("Invalid use of fetchall") raise OperationalError("Invalid use of fetchall")
......
...@@ -5,7 +5,7 @@ with open("README.md", "r") as fh: ...@@ -5,7 +5,7 @@ with open("README.md", "r") as fh:
setuptools.setup( setuptools.setup(
name="taos", name="taos",
version="2.0.3", version="2.0.4",
author="Taosdata Inc.", author="Taosdata Inc.",
author_email="support@taosdata.com", author_email="support@taosdata.com",
description="TDengine python client package", description="TDengine python client package",
......
...@@ -138,7 +138,7 @@ class TDengineCursor(object): ...@@ -138,7 +138,7 @@ class TDengineCursor(object):
def fetchmany(self): def fetchmany(self):
pass pass
def fetchall(self): def fetchall_row(self):
"""Fetch all (remaining) rows of a query result, returning them as a sequence of sequences (e.g. a list of tuples). Note that the cursor's arraysize attribute can affect the performance of this operation. """Fetch all (remaining) rows of a query result, returning them as a sequence of sequences (e.g. a list of tuples). Note that the cursor's arraysize attribute can affect the performance of this operation.
""" """
if self._result is None or self._fields is None: if self._result is None or self._fields is None:
...@@ -158,7 +158,7 @@ class TDengineCursor(object): ...@@ -158,7 +158,7 @@ class TDengineCursor(object):
buffer[i].extend(block[i]) buffer[i].extend(block[i])
return list(map(tuple, zip(*buffer))) return list(map(tuple, zip(*buffer)))
def fetchall_block(self): def fetchall(self):
if self._result is None or self._fields is None: if self._result is None or self._fields is None:
raise OperationalError("Invalid use of fetchall") raise OperationalError("Invalid use of fetchall")
......
...@@ -5,7 +5,7 @@ with open("README.md", "r") as fh: ...@@ -5,7 +5,7 @@ with open("README.md", "r") as fh:
setuptools.setup( setuptools.setup(
name="taos", name="taos",
version="2.0.3", version="2.0.4",
author="Taosdata Inc.", author="Taosdata Inc.",
author_email="support@taosdata.com", author_email="support@taosdata.com",
description="TDengine python client package", description="TDengine python client package",
......
...@@ -139,7 +139,7 @@ class TDengineCursor(object): ...@@ -139,7 +139,7 @@ class TDengineCursor(object):
def fetchmany(self): def fetchmany(self):
pass pass
def fetchall(self): def fetchall_row(self):
"""Fetch all (remaining) rows of a query result, returning them as a sequence of sequences (e.g. a list of tuples). Note that the cursor's arraysize attribute can affect the performance of this operation. """Fetch all (remaining) rows of a query result, returning them as a sequence of sequences (e.g. a list of tuples). Note that the cursor's arraysize attribute can affect the performance of this operation.
""" """
if self._result is None or self._fields is None: if self._result is None or self._fields is None:
...@@ -159,7 +159,7 @@ class TDengineCursor(object): ...@@ -159,7 +159,7 @@ class TDengineCursor(object):
buffer[i].extend(block[i]) buffer[i].extend(block[i])
return list(map(tuple, zip(*buffer))) return list(map(tuple, zip(*buffer)))
def fetchall_block(self): def fetchall(self):
if self._result is None or self._fields is None: if self._result is None or self._fields is None:
raise OperationalError("Invalid use of fetchall") raise OperationalError("Invalid use of fetchall")
......
...@@ -216,7 +216,7 @@ void *dnodeSendCfgTableToRecv(int32_t vgId, int32_t tid) { ...@@ -216,7 +216,7 @@ void *dnodeSendCfgTableToRecv(int32_t vgId, int32_t tid) {
int16_t numOfTags = htons(pTable->numOfTags); int16_t numOfTags = htons(pTable->numOfTags);
int32_t tableId = htonl(pTable->tid); int32_t tableId = htonl(pTable->tid);
uint64_t uid = htobe64(pTable->uid); uint64_t uid = htobe64(pTable->uid);
dInfo("table:%s, numOfColumns:%d numOfTags:%d tid:%d uid:%" PRIu64, pTable->tableId, numOfColumns, numOfTags, tableId, uid); dInfo("table:%s, numOfColumns:%d numOfTags:%d tid:%d uid:%" PRIu64, pTable->tableFname, numOfColumns, numOfTags, tableId, uid);
return rpcRsp.pCont; return rpcRsp.pCont;
} }
......
...@@ -140,7 +140,7 @@ DLL_EXPORT int taos_errno(TAOS_RES *tres); ...@@ -140,7 +140,7 @@ DLL_EXPORT int taos_errno(TAOS_RES *tres);
DLL_EXPORT void taos_query_a(TAOS *taos, const char *sql, void (*fp)(void *param, TAOS_RES *, int code), void *param); DLL_EXPORT void taos_query_a(TAOS *taos, const char *sql, void (*fp)(void *param, TAOS_RES *, int code), void *param);
DLL_EXPORT void taos_fetch_rows_a(TAOS_RES *res, void (*fp)(void *param, TAOS_RES *, int numOfRows), void *param); DLL_EXPORT void taos_fetch_rows_a(TAOS_RES *res, void (*fp)(void *param, TAOS_RES *, int numOfRows), void *param);
DLL_EXPORT void taos_fetch_row_a(TAOS_RES *res, void (*fp)(void *param, TAOS_RES *, TAOS_ROW row), void *param); //DLL_EXPORT void taos_fetch_row_a(TAOS_RES *res, void (*fp)(void *param, TAOS_RES *, TAOS_ROW row), void *param);
typedef void (*TAOS_SUBSCRIBE_CALLBACK)(TAOS_SUB* tsub, TAOS_RES *res, void* param, int code); typedef void (*TAOS_SUBSCRIBE_CALLBACK)(TAOS_SUB* tsub, TAOS_RES *res, void* param, int code);
DLL_EXPORT TAOS_SUB *taos_subscribe(TAOS* taos, int restart, const char* topic, const char *sql, TAOS_SUBSCRIBE_CALLBACK fp, void *param, int interval); DLL_EXPORT TAOS_SUB *taos_subscribe(TAOS* taos, int restart, const char* topic, const char *sql, TAOS_SUBSCRIBE_CALLBACK fp, void *param, int interval);
......
...@@ -36,29 +36,6 @@ extern "C" { ...@@ -36,29 +36,6 @@ extern "C" {
#define TSWINDOW_INITIALIZER ((STimeWindow) {INT64_MIN, INT64_MAX}) #define TSWINDOW_INITIALIZER ((STimeWindow) {INT64_MIN, INT64_MAX})
#define TSKEY_INITIAL_VAL INT64_MIN #define TSKEY_INITIAL_VAL INT64_MIN
// ----------------- For variable data types such as TSDB_DATA_TYPE_BINARY and TSDB_DATA_TYPE_NCHAR
typedef int32_t VarDataOffsetT;
typedef int16_t VarDataLenT;
typedef struct tstr {
VarDataLenT len;
char data[];
} tstr;
#define VARSTR_HEADER_SIZE sizeof(VarDataLenT)
#define varDataLen(v) ((VarDataLenT *)(v))[0]
#define varDataTLen(v) (sizeof(VarDataLenT) + varDataLen(v))
#define varDataVal(v) ((void *)((char *)v + VARSTR_HEADER_SIZE))
#define varDataCopy(dst, v) memcpy((dst), (void*) (v), varDataTLen(v))
#define varDataLenByData(v) (*(VarDataLenT *)(((char*)(v)) - VARSTR_HEADER_SIZE))
#define varDataSetLen(v, _len) (((VarDataLenT *)(v))[0] = (VarDataLenT) (_len))
#define IS_VAR_DATA_TYPE(t) (((t) == TSDB_DATA_TYPE_BINARY) || ((t) == TSDB_DATA_TYPE_NCHAR))
// this data type is internally used only in 'in' query to hold the values
#define TSDB_DATA_TYPE_ARRAY (TSDB_DATA_TYPE_NCHAR + 1)
// Bytes for each type. // Bytes for each type.
extern const int32_t TYPE_BYTES[15]; extern const int32_t TYPE_BYTES[15];
...@@ -164,70 +141,6 @@ do { \ ...@@ -164,70 +141,6 @@ do { \
#define SET_DOUBLE_PTR(x, y) { (*(double *)(x)) = (*(double *)(y)); } #define SET_DOUBLE_PTR(x, y) { (*(double *)(x)) = (*(double *)(y)); }
#endif #endif
typedef struct tDataTypeDescriptor {
int16_t nType;
int16_t nameLen;
int32_t nSize;
char * aName;
int (*compFunc)(const char *const input, int inputSize, const int nelements, char *const output, int outputSize,
char algorithm, char *const buffer, int bufferSize);
int (*decompFunc)(const char *const input, int compressedSize, const int nelements, char *const output,
int outputSize, char algorithm, char *const buffer, int bufferSize);
void (*getStatisFunc)(const void *pData, int32_t numofrow, int64_t *min, int64_t *max, int64_t *sum,
int16_t *minindex, int16_t *maxindex, int16_t *numofnull);
} tDataTypeDescriptor;
extern tDataTypeDescriptor tDataTypeDesc[15];
bool isValidDataType(int32_t type);
static FORCE_INLINE bool isNull(const char *val, int32_t type) {
switch (type) {
case TSDB_DATA_TYPE_BOOL:
return *(uint8_t *)val == TSDB_DATA_BOOL_NULL;
case TSDB_DATA_TYPE_TINYINT:
return *(uint8_t *)val == TSDB_DATA_TINYINT_NULL;
case TSDB_DATA_TYPE_SMALLINT:
return *(uint16_t *)val == TSDB_DATA_SMALLINT_NULL;
case TSDB_DATA_TYPE_INT:
return *(uint32_t *)val == TSDB_DATA_INT_NULL;
case TSDB_DATA_TYPE_BIGINT:
case TSDB_DATA_TYPE_TIMESTAMP:
return *(uint64_t *)val == TSDB_DATA_BIGINT_NULL;
case TSDB_DATA_TYPE_FLOAT:
return *(uint32_t *)val == TSDB_DATA_FLOAT_NULL;
case TSDB_DATA_TYPE_DOUBLE:
return *(uint64_t *)val == TSDB_DATA_DOUBLE_NULL;
case TSDB_DATA_TYPE_NCHAR:
return varDataLen(val) == sizeof(int32_t) && *(uint32_t*) varDataVal(val) == TSDB_DATA_NCHAR_NULL;
case TSDB_DATA_TYPE_BINARY:
return varDataLen(val) == sizeof(int8_t) && *(uint8_t *) varDataVal(val) == TSDB_DATA_BINARY_NULL;
case TSDB_DATA_TYPE_UTINYINT:
return *(uint8_t*) val == TSDB_DATA_UTINYINT_NULL;
case TSDB_DATA_TYPE_USMALLINT:
return *(uint16_t*) val == TSDB_DATA_USMALLINT_NULL;
case TSDB_DATA_TYPE_UINT:
return *(uint32_t*) val == TSDB_DATA_UINT_NULL;
case TSDB_DATA_TYPE_UBIGINT:
return *(uint64_t*) val == TSDB_DATA_UBIGINT_NULL;
default:
return false;
};
}
void setVardataNull(char* val, int32_t type);
void setNull(char *val, int32_t type, int32_t bytes);
void setNullN(char *val, int32_t type, int32_t bytes, int32_t numOfElems);
void* getNullValue(int32_t type);
void assignVal(char *val, const char *src, int32_t len, int32_t type);
void tsDataSwap(void *pLeft, void *pRight, int32_t type, int32_t size, void* buf);
int32_t tStrToInteger(const char* z, int16_t type, int32_t n, int64_t* value, bool issigned);
#define SET_DOUBLE_NULL(v) (*(uint64_t *)(v) = TSDB_DATA_DOUBLE_NULL)
// TODO: check if below is necessary // TODO: check if below is necessary
#define TSDB_RELATION_INVALID 0 #define TSDB_RELATION_INVALID 0
#define TSDB_RELATION_LESS 1 #define TSDB_RELATION_LESS 1
...@@ -270,7 +183,7 @@ int32_t tStrToInteger(const char* z, int16_t type, int32_t n, int64_t* value, bo ...@@ -270,7 +183,7 @@ int32_t tStrToInteger(const char* z, int16_t type, int32_t n, int64_t* value, bo
#define TSDB_MAX_SAVED_SQL_LEN TSDB_MAX_COLUMNS * 64 #define TSDB_MAX_SAVED_SQL_LEN TSDB_MAX_COLUMNS * 64
#define TSDB_MAX_SQL_LEN TSDB_PAYLOAD_SIZE #define TSDB_MAX_SQL_LEN TSDB_PAYLOAD_SIZE
#define TSDB_MAX_SQL_SHOW_LEN 512 #define TSDB_MAX_SQL_SHOW_LEN 512
#define TSDB_MAX_ALLOWED_SQL_LEN (1*1024*1024U) // sql length should be less than 1mb #define TSDB_MAX_ALLOWED_SQL_LEN (1*1024*1024u) // sql length should be less than 1mb
#define TSDB_APPNAME_LEN TSDB_UNI_LEN #define TSDB_APPNAME_LEN TSDB_UNI_LEN
...@@ -399,8 +312,8 @@ int32_t tStrToInteger(const char* z, int16_t type, int32_t n, int64_t* value, bo ...@@ -399,8 +312,8 @@ int32_t tStrToInteger(const char* z, int16_t type, int32_t n, int64_t* value, bo
#define TSDB_MAX_RPC_THREADS 5 #define TSDB_MAX_RPC_THREADS 5
#define TSDB_QUERY_TYPE_NON_TYPE 0x00u // none type #define TSDB_QUERY_TYPE_NON_TYPE 0x00u // none type
#define TSDB_QUERY_TYPE_FREE_RESOURCE 0x01u // free qhandle at vnode #define TSDB_QUERY_TYPE_FREE_RESOURCE 0x01u // free qhandle at vnode
/* /*
* 1. ordinary sub query for select * from super_table * 1. ordinary sub query for select * from super_table
...@@ -420,31 +333,31 @@ int32_t tStrToInteger(const char* z, int16_t type, int32_t n, int64_t* value, bo ...@@ -420,31 +333,31 @@ int32_t tStrToInteger(const char* z, int16_t type, int32_t n, int64_t* value, bo
#define TSDB_QUERY_TYPE_MULTITABLE_QUERY 0x200u #define TSDB_QUERY_TYPE_MULTITABLE_QUERY 0x200u
#define TSDB_QUERY_TYPE_STMT_INSERT 0x800u // stmt insert type #define TSDB_QUERY_TYPE_STMT_INSERT 0x800u // stmt insert type
#define TSDB_QUERY_HAS_TYPE(x, _type) (((x) & (_type)) != 0) #define TSDB_QUERY_HAS_TYPE(x, _type) (((x) & (_type)) != 0)
#define TSDB_QUERY_SET_TYPE(x, _type) ((x) |= (_type)) #define TSDB_QUERY_SET_TYPE(x, _type) ((x) |= (_type))
#define TSDB_QUERY_CLEAR_TYPE(x, _type) ((x) &= (~_type)) #define TSDB_QUERY_CLEAR_TYPE(x, _type) ((x) &= (~_type))
#define TSDB_QUERY_RESET_TYPE(x) ((x) = TSDB_QUERY_TYPE_NON_TYPE) #define TSDB_QUERY_RESET_TYPE(x) ((x) = TSDB_QUERY_TYPE_NON_TYPE)
#define TSDB_ORDER_ASC 1 #define TSDB_ORDER_ASC 1
#define TSDB_ORDER_DESC 2 #define TSDB_ORDER_DESC 2
#define TSDB_DEFAULT_CLUSTER_HASH_SIZE 1 #define TSDB_DEFAULT_CLUSTER_HASH_SIZE 1
#define TSDB_DEFAULT_MNODES_HASH_SIZE 5 #define TSDB_DEFAULT_MNODES_HASH_SIZE 5
#define TSDB_DEFAULT_DNODES_HASH_SIZE 10 #define TSDB_DEFAULT_DNODES_HASH_SIZE 10
#define TSDB_DEFAULT_ACCOUNTS_HASH_SIZE 10 #define TSDB_DEFAULT_ACCOUNTS_HASH_SIZE 10
#define TSDB_DEFAULT_USERS_HASH_SIZE 20 #define TSDB_DEFAULT_USERS_HASH_SIZE 20
#define TSDB_DEFAULT_DBS_HASH_SIZE 100 #define TSDB_DEFAULT_DBS_HASH_SIZE 100
#define TSDB_DEFAULT_VGROUPS_HASH_SIZE 100 #define TSDB_DEFAULT_VGROUPS_HASH_SIZE 100
#define TSDB_DEFAULT_STABLES_HASH_SIZE 100 #define TSDB_DEFAULT_STABLES_HASH_SIZE 100
#define TSDB_DEFAULT_CTABLES_HASH_SIZE 20000 #define TSDB_DEFAULT_CTABLES_HASH_SIZE 20000
#define TSDB_PORT_DNODESHELL 0 #define TSDB_PORT_DNODESHELL 0
#define TSDB_PORT_DNODEDNODE 5 #define TSDB_PORT_DNODEDNODE 5
#define TSDB_PORT_SYNC 10 #define TSDB_PORT_SYNC 10
#define TSDB_PORT_HTTP 11 #define TSDB_PORT_HTTP 11
#define TSDB_PORT_ARBITRATOR 12 #define TSDB_PORT_ARBITRATOR 12
#define TSDB_MAX_WAL_SIZE (1024*1024*2) #define TSDB_MAX_WAL_SIZE (1024*1024*3)
typedef enum { typedef enum {
TAOS_QTYPE_RPC = 0, TAOS_QTYPE_RPC = 0,
......
...@@ -153,30 +153,31 @@ enum _mgmt_table { ...@@ -153,30 +153,31 @@ enum _mgmt_table {
#define TSDB_ALTER_TABLE_DROP_COLUMN 6 #define TSDB_ALTER_TABLE_DROP_COLUMN 6
#define TSDB_ALTER_TABLE_CHANGE_COLUMN 7 #define TSDB_ALTER_TABLE_CHANGE_COLUMN 7
#define TSDB_FILL_NONE 0 #define TSDB_FILL_NONE 0
#define TSDB_FILL_NULL 1 #define TSDB_FILL_NULL 1
#define TSDB_FILL_SET_VALUE 2 #define TSDB_FILL_SET_VALUE 2
#define TSDB_FILL_LINEAR 3 #define TSDB_FILL_LINEAR 3
#define TSDB_FILL_PREV 4 #define TSDB_FILL_PREV 4
#define TSDB_FILL_NEXT 5
#define TSDB_ALTER_USER_PASSWD 0x1
#define TSDB_ALTER_USER_PASSWD 0x1
#define TSDB_ALTER_USER_PRIVILEGES 0x2 #define TSDB_ALTER_USER_PRIVILEGES 0x2
#define TSDB_KILL_MSG_LEN 30 #define TSDB_KILL_MSG_LEN 30
#define TSDB_VN_READ_ACCCESS ((char)0x1) #define TSDB_VN_READ_ACCCESS ((char)0x1)
#define TSDB_VN_WRITE_ACCCESS ((char)0x2) #define TSDB_VN_WRITE_ACCCESS ((char)0x2)
#define TSDB_VN_ALL_ACCCESS (TSDB_VN_READ_ACCCESS | TSDB_VN_WRITE_ACCCESS) #define TSDB_VN_ALL_ACCCESS (TSDB_VN_READ_ACCCESS | TSDB_VN_WRITE_ACCCESS)
#define TSDB_COL_NORMAL 0x0u // the normal column of the table #define TSDB_COL_NORMAL 0x0u // the normal column of the table
#define TSDB_COL_TAG 0x1u // the tag column type #define TSDB_COL_TAG 0x1u // the tag column type
#define TSDB_COL_UDC 0x2u // the user specified normal string column, it is a dummy column #define TSDB_COL_UDC 0x2u // the user specified normal string column, it is a dummy column
#define TSDB_COL_NULL 0x4u // the column filter NULL or not #define TSDB_COL_NULL 0x4u // the column filter NULL or not
#define TSDB_COL_IS_TAG(f) (((f&(~(TSDB_COL_NULL)))&TSDB_COL_TAG) != 0) #define TSDB_COL_IS_TAG(f) (((f&(~(TSDB_COL_NULL)))&TSDB_COL_TAG) != 0)
#define TSDB_COL_IS_NORMAL_COL(f) ((f&(~(TSDB_COL_NULL))) == TSDB_COL_NORMAL) #define TSDB_COL_IS_NORMAL_COL(f) ((f&(~(TSDB_COL_NULL))) == TSDB_COL_NORMAL)
#define TSDB_COL_IS_UD_COL(f) ((f&(~(TSDB_COL_NULL))) == TSDB_COL_UDC) #define TSDB_COL_IS_UD_COL(f) ((f&(~(TSDB_COL_NULL))) == TSDB_COL_UDC)
#define TSDB_COL_REQ_NULL(f) (((f)&TSDB_COL_NULL) != 0) #define TSDB_COL_REQ_NULL(f) (((f)&TSDB_COL_NULL) != 0)
extern char *taosMsg[]; extern char *taosMsg[];
...@@ -260,14 +261,14 @@ typedef struct { ...@@ -260,14 +261,14 @@ typedef struct {
uint64_t uid; uint64_t uid;
uint64_t superTableUid; uint64_t superTableUid;
uint64_t createdTime; uint64_t createdTime;
char tableId[TSDB_TABLE_FNAME_LEN]; char tableFname[TSDB_TABLE_FNAME_LEN];
char superTableId[TSDB_TABLE_FNAME_LEN]; char stableFname[TSDB_TABLE_FNAME_LEN];
char data[]; char data[];
} SMDCreateTableMsg; } SMDCreateTableMsg;
typedef struct { typedef struct {
int32_t len; // one create table message int32_t len; // one create table message
char tableId[TSDB_TABLE_FNAME_LEN]; char tableFname[TSDB_TABLE_FNAME_LEN];
char db[TSDB_ACCT_ID_LEN + TSDB_DB_NAME_LEN]; char db[TSDB_ACCT_ID_LEN + TSDB_DB_NAME_LEN];
int8_t igExists; int8_t igExists;
int8_t getMeta; int8_t getMeta;
...@@ -284,12 +285,12 @@ typedef struct { ...@@ -284,12 +285,12 @@ typedef struct {
} SCMCreateTableMsg; } SCMCreateTableMsg;
typedef struct { typedef struct {
char tableId[TSDB_TABLE_FNAME_LEN]; char tableFname[TSDB_TABLE_FNAME_LEN];
int8_t igNotExists; int8_t igNotExists;
} SCMDropTableMsg; } SCMDropTableMsg;
typedef struct { typedef struct {
char tableId[TSDB_TABLE_FNAME_LEN]; char tableFname[TSDB_TABLE_FNAME_LEN];
char db[TSDB_ACCT_ID_LEN + TSDB_DB_NAME_LEN]; char db[TSDB_ACCT_ID_LEN + TSDB_DB_NAME_LEN];
int16_t type; /* operation type */ int16_t type; /* operation type */
int16_t numOfCols; /* number of schema */ int16_t numOfCols; /* number of schema */
...@@ -369,14 +370,14 @@ typedef struct { ...@@ -369,14 +370,14 @@ typedef struct {
int32_t vgId; int32_t vgId;
int32_t tid; int32_t tid;
uint64_t uid; uint64_t uid;
char tableId[TSDB_TABLE_FNAME_LEN]; char tableFname[TSDB_TABLE_FNAME_LEN];
} SMDDropTableMsg; } SMDDropTableMsg;
typedef struct { typedef struct {
int32_t contLen; int32_t contLen;
int32_t vgId; int32_t vgId;
uint64_t uid; uint64_t uid;
char tableId[TSDB_TABLE_FNAME_LEN]; char tableFname[TSDB_TABLE_FNAME_LEN];
} SDropSTableMsg; } SDropSTableMsg;
typedef struct { typedef struct {
...@@ -688,7 +689,7 @@ typedef struct { ...@@ -688,7 +689,7 @@ typedef struct {
} SCreateVnodeMsg, SAlterVnodeMsg; } SCreateVnodeMsg, SAlterVnodeMsg;
typedef struct { typedef struct {
char tableId[TSDB_TABLE_FNAME_LEN]; char tableFname[TSDB_TABLE_FNAME_LEN];
int16_t createFlag; int16_t createFlag;
char tags[]; char tags[];
} STableInfoMsg; } STableInfoMsg;
...@@ -726,7 +727,7 @@ typedef struct { ...@@ -726,7 +727,7 @@ typedef struct {
typedef struct STableMetaMsg { typedef struct STableMetaMsg {
int32_t contLen; int32_t contLen;
char tableId[TSDB_TABLE_FNAME_LEN]; // table id char tableFname[TSDB_TABLE_FNAME_LEN]; // table id
uint8_t numOfTags; uint8_t numOfTags;
uint8_t precision; uint8_t precision;
uint8_t tableType; uint8_t tableType;
...@@ -847,7 +848,7 @@ typedef struct { ...@@ -847,7 +848,7 @@ typedef struct {
uint64_t uid; uint64_t uid;
uint64_t stime; // stream starting time uint64_t stime; // stream starting time
int32_t status; int32_t status;
char tableId[TSDB_TABLE_FNAME_LEN]; char tableFname[TSDB_TABLE_FNAME_LEN];
} SAlterStreamMsg; } SAlterStreamMsg;
typedef struct { typedef struct {
......
此差异已折叠。
...@@ -7,6 +7,28 @@ extern "C" { ...@@ -7,6 +7,28 @@ extern "C" {
#include "taosdef.h" #include "taosdef.h"
// ----------------- For variable data types such as TSDB_DATA_TYPE_BINARY and TSDB_DATA_TYPE_NCHAR
typedef int32_t VarDataOffsetT;
typedef int16_t VarDataLenT;
typedef struct tstr {
VarDataLenT len;
char data[];
} tstr;
#define VARSTR_HEADER_SIZE sizeof(VarDataLenT)
#define varDataLen(v) ((VarDataLenT *)(v))[0]
#define varDataTLen(v) (sizeof(VarDataLenT) + varDataLen(v))
#define varDataVal(v) ((void *)((char *)v + VARSTR_HEADER_SIZE))
#define varDataCopy(dst, v) memcpy((dst), (void*) (v), varDataTLen(v))
#define varDataLenByData(v) (*(VarDataLenT *)(((char*)(v)) - VARSTR_HEADER_SIZE))
#define varDataSetLen(v, _len) (((VarDataLenT *)(v))[0] = (VarDataLenT) (_len))
#define IS_VAR_DATA_TYPE(t) (((t) == TSDB_DATA_TYPE_BINARY) || ((t) == TSDB_DATA_TYPE_NCHAR))
// this data type is internally used only in 'in' query to hold the values
#define TSDB_DATA_TYPE_ARRAY (TSDB_DATA_TYPE_NCHAR + 1)
#define GET_TYPED_DATA(_v, _finalType, _type, _data) \ #define GET_TYPED_DATA(_v, _finalType, _type, _data) \
do { \ do { \
switch (_type) { \ switch (_type) { \
...@@ -59,6 +81,70 @@ extern "C" { ...@@ -59,6 +81,70 @@ extern "C" {
#define IS_VALID_UINT(_t) ((_t) >= 0 && (_t) < UINT32_MAX) #define IS_VALID_UINT(_t) ((_t) >= 0 && (_t) < UINT32_MAX)
#define IS_VALID_UBIGINT(_t) ((_t) >= 0 && (_t) < UINT64_MAX) #define IS_VALID_UBIGINT(_t) ((_t) >= 0 && (_t) < UINT64_MAX)
static FORCE_INLINE bool isNull(const char *val, int32_t type) {
switch (type) {
case TSDB_DATA_TYPE_BOOL:
return *(uint8_t *)val == TSDB_DATA_BOOL_NULL;
case TSDB_DATA_TYPE_TINYINT:
return *(uint8_t *)val == TSDB_DATA_TINYINT_NULL;
case TSDB_DATA_TYPE_SMALLINT:
return *(uint16_t *)val == TSDB_DATA_SMALLINT_NULL;
case TSDB_DATA_TYPE_INT:
return *(uint32_t *)val == TSDB_DATA_INT_NULL;
case TSDB_DATA_TYPE_BIGINT:
case TSDB_DATA_TYPE_TIMESTAMP:
return *(uint64_t *)val == TSDB_DATA_BIGINT_NULL;
case TSDB_DATA_TYPE_FLOAT:
return *(uint32_t *)val == TSDB_DATA_FLOAT_NULL;
case TSDB_DATA_TYPE_DOUBLE:
return *(uint64_t *)val == TSDB_DATA_DOUBLE_NULL;
case TSDB_DATA_TYPE_NCHAR:
return varDataLen(val) == sizeof(int32_t) && *(uint32_t*) varDataVal(val) == TSDB_DATA_NCHAR_NULL;
case TSDB_DATA_TYPE_BINARY:
return varDataLen(val) == sizeof(int8_t) && *(uint8_t *) varDataVal(val) == TSDB_DATA_BINARY_NULL;
case TSDB_DATA_TYPE_UTINYINT:
return *(uint8_t*) val == TSDB_DATA_UTINYINT_NULL;
case TSDB_DATA_TYPE_USMALLINT:
return *(uint16_t*) val == TSDB_DATA_USMALLINT_NULL;
case TSDB_DATA_TYPE_UINT:
return *(uint32_t*) val == TSDB_DATA_UINT_NULL;
case TSDB_DATA_TYPE_UBIGINT:
return *(uint64_t*) val == TSDB_DATA_UBIGINT_NULL;
default:
return false;
};
}
typedef struct tDataTypeDescriptor {
int16_t type;
int16_t nameLen;
int32_t bytes;
char * name;
int (*compFunc)(const char *const input, int inputSize, const int nelements, char *const output, int outputSize,
char algorithm, char *const buffer, int bufferSize);
int (*decompFunc)(const char *const input, int compressedSize, const int nelements, char *const output,
int outputSize, char algorithm, char *const buffer, int bufferSize);
void (*statisFunc)(const void *pData, int32_t numofrow, int64_t *min, int64_t *max, int64_t *sum,
int16_t *minindex, int16_t *maxindex, int16_t *numofnull);
} tDataTypeDescriptor;
extern tDataTypeDescriptor tDataTypes[15];
bool isValidDataType(int32_t type);
void setVardataNull(char* val, int32_t type);
void setNull(char *val, int32_t type, int32_t bytes);
void setNullN(char *val, int32_t type, int32_t bytes, int32_t numOfElems);
void* getNullValue(int32_t type);
void assignVal(char *val, const char *src, int32_t len, int32_t type);
void tsDataSwap(void *pLeft, void *pRight, int32_t type, int32_t size, void* buf);
int32_t tStrToInteger(const char* z, int16_t type, int32_t n, int64_t* value, bool issigned);
#define SET_DOUBLE_NULL(v) (*(uint64_t *)(v) = TSDB_DATA_DOUBLE_NULL)
#ifdef __cplusplus #ifdef __cplusplus
} }
#endif #endif
......
...@@ -1085,8 +1085,8 @@ static void printfQueryMeta() { ...@@ -1085,8 +1085,8 @@ static void printfQueryMeta() {
printf("database name: \033[33m%s\033[0m\n", g_queryInfo.dbName); printf("database name: \033[33m%s\033[0m\n", g_queryInfo.dbName);
printf("\n"); printf("\n");
printf("super table query info: \n"); printf("specified table query info: \n");
printf("rate: \033[33m%d\033[0m\n", g_queryInfo.superQueryInfo.rate); printf("query interval: \033[33m%d\033[0m\n", g_queryInfo.superQueryInfo.rate);
printf("concurrent: \033[33m%d\033[0m\n", g_queryInfo.superQueryInfo.concurrent); printf("concurrent: \033[33m%d\033[0m\n", g_queryInfo.superQueryInfo.concurrent);
printf("sqlCount: \033[33m%d\033[0m\n", g_queryInfo.superQueryInfo.sqlCount); printf("sqlCount: \033[33m%d\033[0m\n", g_queryInfo.superQueryInfo.sqlCount);
...@@ -1102,11 +1102,11 @@ static void printfQueryMeta() { ...@@ -1102,11 +1102,11 @@ static void printfQueryMeta() {
printf(" sql[%d]: \033[33m%s\033[0m\n", i, g_queryInfo.superQueryInfo.sql[i]); printf(" sql[%d]: \033[33m%s\033[0m\n", i, g_queryInfo.superQueryInfo.sql[i]);
} }
printf("\n"); printf("\n");
printf("sub table query info: \n"); printf("super table query info: \n");
printf("rate: \033[33m%d\033[0m\n", g_queryInfo.subQueryInfo.rate); printf("query interval: \033[33m%d\033[0m\n", g_queryInfo.subQueryInfo.rate);
printf("threadCnt: \033[33m%d\033[0m\n", g_queryInfo.subQueryInfo.threadCnt); printf("threadCnt: \033[33m%d\033[0m\n", g_queryInfo.subQueryInfo.threadCnt);
printf("childTblCount: \033[33m%d\033[0m\n", g_queryInfo.subQueryInfo.childTblCount); printf("childTblCount: \033[33m%d\033[0m\n", g_queryInfo.subQueryInfo.childTblCount);
printf("childTblPrefix: \033[33m%s\033[0m\n", g_queryInfo.subQueryInfo.childTblPrefix); printf("stable name: \033[33m%s\033[0m\n", g_queryInfo.subQueryInfo.sTblName);
if (SUBSCRIBE_MODE == g_jsonType) { if (SUBSCRIBE_MODE == g_jsonType) {
printf("mod: \033[33m%d\033[0m\n", g_queryInfo.subQueryInfo.subscribeMode); printf("mod: \033[33m%d\033[0m\n", g_queryInfo.subQueryInfo.subscribeMode);
...@@ -4020,23 +4020,23 @@ void *superQueryProcess(void *sarg) { ...@@ -4020,23 +4020,23 @@ void *superQueryProcess(void *sarg) {
} }
selectAndGetResult(winfo->taos, g_queryInfo.superQueryInfo.sql[i], tmpFile); selectAndGetResult(winfo->taos, g_queryInfo.superQueryInfo.sql[i], tmpFile);
int64_t t2 = taosGetTimestampUs(); int64_t t2 = taosGetTimestampUs();
printf("taosc select sql return, Spent %f s\n", (t2 - t1)/1000000.0); printf("=[taosc] thread[%"PRIu64"] complete one sql, Spent %f s\n", (uint64_t)pthread_self(), (t2 - t1)/1000000.0);
} else { } else {
#ifdef TD_LOWA_CURL #ifdef TD_LOWA_CURL
int64_t t1 = taosGetTimestampUs(); int64_t t1 = taosGetTimestampUs();
int retCode = curlProceSql(g_queryInfo.host, g_queryInfo.port, g_queryInfo.superQueryInfo.sql[i], winfo->curl_handle); int retCode = curlProceSql(g_queryInfo.host, g_queryInfo.port, g_queryInfo.superQueryInfo.sql[i], winfo->curl_handle);
int64_t t2 = taosGetTimestampUs(); int64_t t2 = taosGetTimestampUs();
printf("http select sql return, Spent %f s \n", (t2 - t1)/1000000.0); printf("=[restful] thread[%"PRIu64"] complete one sql, Spent %f s\n", (uint64_t)pthread_self(), (t2 - t1)/1000000.0);
if (0 != retCode) { if (0 != retCode) {
printf("========curl return fail, threadID[%d]\n", winfo->threadID); printf("====curl return fail, threadID[%d]\n", winfo->threadID);
return NULL; return NULL;
} }
#endif #endif
} }
} }
et = taosGetTimestampMs(); et = taosGetTimestampMs();
printf("========thread[%"PRIu64"] complete all sqls to super table once queries duration:%.6fs\n\n", (uint64_t)pthread_self(), (double)(et - st)/1000.0); printf("==thread[%"PRIu64"] complete all sqls to specify tables once queries duration:%.6fs\n\n", (uint64_t)pthread_self(), (double)(et - st)/1000.0);
} }
return NULL; return NULL;
} }
...@@ -4065,7 +4065,7 @@ void *subQueryProcess(void *sarg) { ...@@ -4065,7 +4065,7 @@ void *subQueryProcess(void *sarg) {
char sqlstr[1024]; char sqlstr[1024];
threadInfo *winfo = (threadInfo *)sarg; threadInfo *winfo = (threadInfo *)sarg;
int64_t st = 0; int64_t st = 0;
int64_t et = 0; int64_t et = g_queryInfo.subQueryInfo.rate*1000;
while (1) { while (1) {
if (g_queryInfo.subQueryInfo.rate && (et - st) < g_queryInfo.subQueryInfo.rate*1000) { if (g_queryInfo.subQueryInfo.rate && (et - st) < g_queryInfo.subQueryInfo.rate*1000) {
taosMsleep(g_queryInfo.subQueryInfo.rate*1000 - (et - st)); // ms taosMsleep(g_queryInfo.subQueryInfo.rate*1000 - (et - st)); // ms
...@@ -4085,17 +4085,12 @@ void *subQueryProcess(void *sarg) { ...@@ -4085,17 +4085,12 @@ void *subQueryProcess(void *sarg) {
} }
} }
et = taosGetTimestampMs(); et = taosGetTimestampMs();
printf("========thread[%"PRIu64"] complete all sqls to allocate all sub-tables once queries duration:%.4fs\n\n", (uint64_t)pthread_self(), (double)(et - st)/1000.0); printf("####thread[%"PRIu64"] complete all sqls to allocate all sub-tables[%d - %d] once queries duration:%.4fs\n\n", (uint64_t)pthread_self(), winfo->start_table_id, winfo->end_table_id, (double)(et - st)/1000.0);
} }
return NULL; return NULL;
} }
int queryTestProcess() { int queryTestProcess() {
printfQueryMeta();
printf("Press enter key to continue\n\n");
(void)getchar();
TAOS * taos = NULL; TAOS * taos = NULL;
taos_init(); taos_init();
taos = taos_connect(g_queryInfo.host, g_queryInfo.user, g_queryInfo.password, g_queryInfo.dbName, g_queryInfo.port); taos = taos_connect(g_queryInfo.host, g_queryInfo.user, g_queryInfo.password, g_queryInfo.dbName, g_queryInfo.port);
...@@ -4108,9 +4103,13 @@ int queryTestProcess() { ...@@ -4108,9 +4103,13 @@ int queryTestProcess() {
(void)getAllChildNameOfSuperTable(taos, g_queryInfo.dbName, g_queryInfo.subQueryInfo.sTblName, &g_queryInfo.subQueryInfo.childTblName, &g_queryInfo.subQueryInfo.childTblCount); (void)getAllChildNameOfSuperTable(taos, g_queryInfo.dbName, g_queryInfo.subQueryInfo.sTblName, &g_queryInfo.subQueryInfo.childTblName, &g_queryInfo.subQueryInfo.childTblCount);
} }
printfQueryMeta();
printf("Press enter key to continue\n\n");
(void)getchar();
pthread_t *pids = NULL; pthread_t *pids = NULL;
threadInfo *infos = NULL; threadInfo *infos = NULL;
//==== create sub threads for query from super table //==== create sub threads for query from specify table
if (g_queryInfo.superQueryInfo.sqlCount > 0 && g_queryInfo.superQueryInfo.concurrent > 0) { if (g_queryInfo.superQueryInfo.sqlCount > 0 && g_queryInfo.superQueryInfo.concurrent > 0) {
pids = malloc(g_queryInfo.superQueryInfo.concurrent * sizeof(pthread_t)); pids = malloc(g_queryInfo.superQueryInfo.concurrent * sizeof(pthread_t));
...@@ -4146,7 +4145,7 @@ int queryTestProcess() { ...@@ -4146,7 +4145,7 @@ int queryTestProcess() {
pthread_t *pidsOfSub = NULL; pthread_t *pidsOfSub = NULL;
threadInfo *infosOfSub = NULL; threadInfo *infosOfSub = NULL;
//==== create sub threads for query from sub table //==== create sub threads for query from all sub table of the super table
if ((g_queryInfo.subQueryInfo.sqlCount > 0) && (g_queryInfo.subQueryInfo.threadCnt > 0)) { if ((g_queryInfo.subQueryInfo.sqlCount > 0) && (g_queryInfo.subQueryInfo.threadCnt > 0)) {
pidsOfSub = malloc(g_queryInfo.subQueryInfo.threadCnt * sizeof(pthread_t)); pidsOfSub = malloc(g_queryInfo.subQueryInfo.threadCnt * sizeof(pthread_t));
infosOfSub = malloc(g_queryInfo.subQueryInfo.threadCnt * sizeof(threadInfo)); infosOfSub = malloc(g_queryInfo.subQueryInfo.threadCnt * sizeof(threadInfo));
...@@ -4177,6 +4176,7 @@ int queryTestProcess() { ...@@ -4177,6 +4176,7 @@ int queryTestProcess() {
t_info->start_table_id = last; t_info->start_table_id = last;
t_info->end_table_id = i < b ? last + a : last + a - 1; t_info->end_table_id = i < b ? last + a : last + a - 1;
last = t_info->end_table_id + 1;
t_info->taos = taos; t_info->taos = taos;
pthread_create(pidsOfSub + i, NULL, subQueryProcess, t_info); pthread_create(pidsOfSub + i, NULL, subQueryProcess, t_info);
} }
......
...@@ -195,7 +195,7 @@ static int32_t mnodeGetClusterMeta(STableMetaMsg *pMeta, SShowObj *pShow, void * ...@@ -195,7 +195,7 @@ static int32_t mnodeGetClusterMeta(STableMetaMsg *pMeta, SShowObj *pShow, void *
cols++; cols++;
pMeta->numOfColumns = htons(cols); pMeta->numOfColumns = htons(cols);
strcpy(pMeta->tableId, "show cluster"); strcpy(pMeta->tableFname, "show cluster");
pShow->numOfColumns = cols; pShow->numOfColumns = cols;
pShow->offset[0] = 0; pShow->offset[0] = 0;
......
此差异已折叠。
此差异已折叠。
...@@ -337,7 +337,7 @@ static int32_t mnodeGetUserMeta(STableMetaMsg *pMeta, SShowObj *pShow, void *pCo ...@@ -337,7 +337,7 @@ static int32_t mnodeGetUserMeta(STableMetaMsg *pMeta, SShowObj *pShow, void *pCo
cols++; cols++;
pMeta->numOfColumns = htons(cols); pMeta->numOfColumns = htons(cols);
strcpy(pMeta->tableId, "show users"); strcpy(pMeta->tableFname, "show users");
pShow->numOfColumns = cols; pShow->numOfColumns = cols;
pShow->offset[0] = 0; pShow->offset[0] = 0;
......
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册