未验证 提交 3a096cc5 编写于 作者: Y Yiqing Liu 提交者: GitHub

Merge branch 'develop' into test/testcase

......@@ -146,7 +146,7 @@ matrix:
branch_pattern: coverity_scan
- os: linux
dist: xenial
dist: trusty
language: c
git:
- depth: 1
......@@ -156,8 +156,9 @@ matrix:
packages:
- build-essential
- cmake
- binutils-2.26
env:
- DESC="xenial build"
- DESC="trusty/gcc-4.8/bintuils-2.26 build"
before_script:
- export TZ=Asia/Harbin
......@@ -168,7 +169,7 @@ matrix:
script:
- cmake .. > /dev/null
- make
- export PATH=/usr/lib/binutils-2.26/bin:$PATH && make
- os: linux
dist: bionic
......@@ -200,7 +201,7 @@ matrix:
dist: bionic
language: c
compiler: clang
env: DESC="linux/clang build"
env: DESC="arm64 linux/clang build"
git:
- depth: 1
......@@ -238,7 +239,7 @@ matrix:
- build-essential
- cmake
env:
- DESC="xenial build"
- DESC="arm64 xenial build"
before_script:
- export TZ=Asia/Harbin
......
......@@ -104,6 +104,10 @@ pipeline {
find pytest -name '*'sql|xargs rm -rf
./test-all.sh p2
date'''
sh '''
cd ${WKC}/tests
./test-all.sh b4fq
'''
}
}
stage('test_b1') {
......
......@@ -33,11 +33,17 @@ To build TDengine, use [CMake](https://cmake.org/) 3.5 or higher versions in the
## Install tools
### Ubuntu & Debian:
### Ubuntu 16.04 and above & Debian:
```bash
sudo apt-get install -y gcc cmake build-essential git
```
### Ubuntu 14.04:
```bash
sudo apt-get install -y gcc cmake3 build-essential git binutils-2.26
export PATH=/usr/lib/binutils-2.26/bin:$PATH
```
To compile and package the JDBC driver source code, you should have a Java jdk-8 or higher and Apache Maven 2.7 or higher installed.
To install openjdk-8:
```bash
......
......@@ -4,7 +4,7 @@ PROJECT(TDengine)
IF (DEFINED VERNUMBER)
SET(TD_VER_NUMBER ${VERNUMBER})
ELSE ()
SET(TD_VER_NUMBER "2.0.12.0")
SET(TD_VER_NUMBER "2.0.13.0")
ENDIF ()
IF (DEFINED VERCOMPATIBLE)
......
......@@ -4,6 +4,8 @@
TDengine采用关系型数据模型,需要建库、建表。因此对于一个具体的应用场景,需要考虑库的设计,超级表和普通表的设计。本节不讨论细致的语法规则,只介绍概念。
关于数据建模请参考<a href="https://www.taosdata.com/blog/2020/11/11/1945.html">视频教程</a>
## 创建库
不同类型的数据采集点往往具有不同的数据特征,包括数据采集频率的高低,数据保留时间的长短,副本的数目,数据块的大小,是否允许更新数据等等。为让各种场景下TDengine都能最大效率的工作,TDengine建议将不同数据特征的表创建在不同的库里,因为每个库可以配置不同的存储策略。创建一个库时,除SQL标准的选项外,应用还可以指定保留时长、副本数、内存块个数、时间精度、文件块里最大最小记录条数、是否压缩、一个数据文件覆盖的天数等多种参数。比如:
......@@ -60,4 +62,3 @@ TDengine支持多列模型,只要物理量是一个数据采集点同时采集
TDengine建议尽可能采用多列模型,因为插入效率以及存储效率更高。但对于有些场景,一个采集点的采集量的种类经常变化,这个时候,如果采用多列模型,就需要频繁修改超级表的结构定义,让应用变的复杂,这个时候,采用单列模型会显得简单。
关于数据建模请参考<a href="https://www.taosdata.com/blog/2020/11/11/1945.html">视频教程</a>
......@@ -148,7 +148,7 @@ TDengine缺省的时间戳是毫秒精度,但通过修改配置参数enableMic
SHOW TABLES [LIKE tb_name_wildcar];
```
显示当前数据库下的所有数据表信息。说明:可在like中使用通配符进行名称的匹配。 通配符匹配:1)’%’ (百分号)匹配0到任意个字符;2)’_’下划线匹配一个字符。
显示当前数据库下的所有数据表信息。说明:可在 like 中使用通配符进行名称的匹配。 通配符匹配:1)“%”(百分号)匹配 0 到任意个字符;2)“\_”(下划线)匹配一个字符。
- **在线修改显示字符宽度**
......@@ -263,33 +263,33 @@ TDengine缺省的时间戳是毫秒精度,但通过修改配置参数enableMic
- **插入一条记录,数据对应到指定的列**
```mysql
INSERT INTO tb_name (field1_name, ...) VALUES(field1_value, ...)
INSERT INTO tb_name (field1_name, ...) VALUES (field1_value, ...)
```
向表tb_name中插入一条记录,数据对应到指定的列。SQL语句中没有出现的列,数据库将自动填充为NULL。主键(时间戳)不能为NULL。
- **插入多条记录**
```mysql
INSERT INTO tb_name VALUES (field1_value1, ...) (field1_value2, ...)...;
INSERT INTO tb_name VALUES (field1_value1, ...) (field1_value2, ...) ...;
```
向表tb_name中插入多条记录
- **按指定的列插入多条记录**
```mysql
INSERT INTO tb_name (field1_name, ...) VALUES(field1_value1, ...) (field1_value2, ...)
INSERT INTO tb_name (field1_name, ...) VALUES (field1_value1, ...) (field1_value2, ...) ...;
```
向表tb_name中按指定的列插入多条记录
- **向多个表插入多条记录**
```mysql
INSERT INTO tb1_name VALUES (field1_value1, ...)(field1_value2, ...)...
tb2_name VALUES (field1_value1, ...)(field1_value2, ...)...;
INSERT INTO tb1_name VALUES (field1_value1, ...) (field1_value2, ...) ...
tb2_name VALUES (field1_value1, ...) (field1_value2, ...) ...;
```
同时向表tb1_name和tb2_name中分别插入多条记录
- **同时向多个表按列插入多条记录**
```mysql
INSERT INTO tb1_name (tb1_field1_name, ...) VALUES (field1_value1, ...) (field1_value2, ...)
tb2_name (tb2_field1_name, ...) VALUES (field1_value1, ...) (field1_value2, ...);
INSERT INTO tb1_name (tb1_field1_name, ...) VALUES (field1_value1, ...) (field1_value2, ...) ...
tb2_name (tb2_field1_name, ...) VALUES (field1_value1, ...) (field1_value2, ...) ...;
```
同时向表tb1_name和tb2_name中按列分别插入多条记录
......@@ -318,23 +318,23 @@ SELECT select_expr [, select_expr ...]
```
说明:针对 insert 类型的 SQL 语句,我们采用的流式解析策略,在发现后面的错误之前,前面正确的部分SQL仍会执行。下面的sql中,insert语句是无效的,但是d1001仍会被创建。
```mysql
taos> create table meters(ts timestamp, current float, voltage int, phase float) tags(location binary(30), groupId int);
taos> CREATE TABLE meters(ts TIMESTAMP, current FLOAT, voltage INT, phase FLOAT) TAGS(location BINARY(30), groupId INT);
Query OK, 0 row(s) affected (0.008245s)
taos> show stables;
taos> SHOW STABLES;
name | created_time | columns | tags | tables |
============================================================================================
meters | 2020-08-06 17:50:27.831 | 4 | 2 | 0 |
Query OK, 1 row(s) in set (0.001029s)
taos> show tables;
taos> SHOW TABLES;
Query OK, 0 row(s) in set (0.000946s)
taos> insert into d1001 using meters tags('Beijing.Chaoyang', 2);
taos> INSERT INTO d1001 USING meters TAGS('Beijing.Chaoyang', 2);
DB error: invalid SQL: keyword VALUES or FILE required
taos> show tables;
taos> SHOW TABLES;
table_name | created_time | columns | stable_name |
======================================================================================================
d1001 | 2020-08-06 17:52:02.097 | 4 | meters |
......@@ -397,27 +397,41 @@ Query OK, 1 row(s) in set (0.020443s)
在使用SQL函数来进行查询过程中,部分SQL函数支持通配符操作。其中的区别在于:
```count(\*)```函数只返回一列。```first```、```last```、```last_row```函数则是返回全部列。
```
taos> select count(*) from d1001;
```mysql
taos> SELECT COUNT(*) FROM d1001;
count(*) |
========================
3 |
Query OK, 1 row(s) in set (0.001035s)
```
```
taos> select first(*) from d1001;
```mysql
taos> SELECT FIRST(*) FROM d1001;
first(ts) | first(current) | first(voltage) | first(phase) |
=========================================================================================
2018-10-03 14:38:05.000 | 10.30000 | 219 | 0.31000 |
Query OK, 1 row(s) in set (0.000849s)
```
##### 标签列
从 2.0.14 版本开始,支持在普通表的查询中指定 _标签列_,且标签列的值会与普通列的数据一起返回。
```mysql
taos> SELECT location, groupid, current FROM d1001 LIMIT 2;
location | groupid | current |
======================================================================
Beijing.Chaoyang | 2 | 10.30000 |
Beijing.Chaoyang | 2 | 12.60000 |
Query OK, 2 row(s) in set (0.003112s)
```
注意:普通表的通配符 * 中并不包含 _标签列_。
#### 结果集列名
```SELECT```子句中,如果不指定返回结果集合的列名,结果集列名称默认使用```SELECT```子句中的表达式名称作为列名称。此外,用户可使用```AS```来重命名返回结果集合中列的名称。例如:
```
taos> select ts, ts as primary_key_ts from d1001;
```mysql
taos> SELECT ts, ts AS primary_key_ts FROM d1001;
ts | primary_key_ts |
====================================================
2018-10-03 14:38:05.000 | 2018-10-03 14:38:05.000 |
......@@ -434,53 +448,53 @@ Query OK, 3 row(s) in set (0.001191s)
FROM关键字后面可以是若干个表(超级表)列表,也可以是子查询的结果。
如果没有指定用户的当前数据库,可以在表名称之前使用数据库的名称来指定表所属的数据库。例如:```power.d1001``` 方式来跨库使用表。
```
```mysql
SELECT * FROM power.d1001;
------------------------------
use power;
USE power;
SELECT * FROM d1001;
```
#### 特殊功能
部分特殊的查询功能可以不使用FROM子句执行。获取当前所在的数据库 database()
```
taos> SELECT database();
```mysql
taos> SELECT DATABASE();
database() |
=================================
power |
Query OK, 1 row(s) in set (0.000079s)
```
如果登录的时候没有指定默认数据库,且没有使用```use```命令切换数据,则返回NULL。
```
taos> SELECT database();
```mysql
taos> SELECT DATABASE();
database() |
=================================
NULL |
Query OK, 1 row(s) in set (0.000184s)
```
获取服务器和客户端版本号:
```
taos> SELECT client_version();
```mysql
taos> SELECT CLIENT_VERSION();
client_version() |
===================
2.0.0.0 |
Query OK, 1 row(s) in set (0.000070s)
taos> SELECT server_version();
taos> SELECT SERVER_VERSION();
server_version() |
===================
2.0.0.0 |
Query OK, 1 row(s) in set (0.000077s)
```
服务器状态检测语句。如果服务器正常,返回一个数字(例如 1)。如果服务器异常,返回error code。该SQL语法能兼容连接池对于TDengine状态的检查及第三方工具对于数据库服务器状态的检查。并可以避免出现使用了错误的心跳检测SQL语句导致的连接池连接丢失的问题。
```
taos> SELECT server_status();
```mysql
taos> SELECT SERVER_STATUS();
server_status() |
==================
1 |
Query OK, 1 row(s) in set (0.000074s)
taos> SELECT server_status() as status;
taos> SELECT SERVER_STATUS() AS status;
status |
==============
1 |
......@@ -493,15 +507,15 @@ Query OK, 1 row(s) in set (0.000081s)
#### 小技巧
获取一个超级表所有的子表名及相关的标签信息:
```
```mysql
SELECT TBNAME, location FROM meters;
```
统计超级表下辖子表数量:
```
```mysql
SELECT COUNT(TBNAME) FROM meters;
```
以上两个查询均只支持在Where条件子句中添加针对标签(TAGS)的过滤条件。例如:
```
```mysql
taos> SELECT TBNAME, location FROM meters;
tbname | location |
==================================================================
......@@ -511,7 +525,7 @@ taos> SELECT TBNAME, location FROM meters;
d1001 | Beijing.Chaoyang |
Query OK, 4 row(s) in set (0.000881s)
taos> SELECT count(tbname) FROM meters WHERE groupId > 2;
taos> SELECT COUNT(tbname) FROM meters WHERE groupId > 2;
count(tbname) |
========================
2 |
......@@ -545,7 +559,7 @@ Query OK, 1 row(s) in set (0.001091s)
- 对于下面的例子,表tb1用以下语句创建
```mysql
CREATE TABLE tb1 (ts timestamp, col1 int, col2 float, col3 binary(50));
CREATE TABLE tb1 (ts TIMESTAMP, col1 INT, col2 FLOAT, col3 BINARY(50));
```
- 查询tb1刚过去的一个小时的所有记录
......@@ -563,7 +577,7 @@ Query OK, 1 row(s) in set (0.001091s)
- 查询col1与col2的和,并取名complex, 时间大于2018-06-01 08:00:00.000, col2大于1.2,结果输出仅仅10条记录,从第5条开始
```mysql
SELECT (col1 + col2) AS 'complex' FROM tb1 WHERE ts > '2018-06-01 08:00:00.000' and col2 > 1.2 LIMIT 10 OFFSET 5;
SELECT (col1 + col2) AS 'complex' FROM tb1 WHERE ts > '2018-06-01 08:00:00.000' AND col2 > 1.2 LIMIT 10 OFFSET 5;
```
- 查询过去10分钟的记录,col2的值大于3.14,并且将结果输出到文件 `/home/testoutpu.csv`.
......@@ -590,13 +604,13 @@ TDengine支持针对数据的聚合查询。提供支持的聚合和选择函数
示例:
```mysql
taos> SELECT COUNT(*), COUNT(VOLTAGE) FROM meters;
taos> SELECT COUNT(*), COUNT(voltage) FROM meters;
count(*) | count(voltage) |
================================================
9 | 9 |
Query OK, 1 row(s) in set (0.004475s)
taos> SELECT COUNT(*), COUNT(VOLTAGE) FROM d1001;
taos> SELECT COUNT(*), COUNT(voltage) FROM d1001;
count(*) | count(voltage) |
================================================
3 | 3 |
......@@ -620,7 +634,7 @@ TDengine支持针对数据的聚合查询。提供支持的聚合和选择函数
11.466666751 | 220.444444444 | 0.293333333 |
Query OK, 1 row(s) in set (0.004135s)
taos> SELECT AVG(current), AVG(voltage), AVG(phase) from d1001;
taos> SELECT AVG(current), AVG(voltage), AVG(phase) FROM d1001;
avg(current) | avg(voltage) | avg(phase) |
====================================================================================
11.733333588 | 219.333333333 | 0.316666673 |
......@@ -648,13 +662,13 @@ TDengine支持针对数据的聚合查询。提供支持的聚合和选择函数
示例:
```mysql
taos> SELECT SUM(current), SUM(voltage), SUM(phase) from meters;
taos> SELECT SUM(current), SUM(voltage), SUM(phase) FROM meters;
sum(current) | sum(voltage) | sum(phase) |
================================================================================
103.200000763 | 1984 | 2.640000001 |
Query OK, 1 row(s) in set (0.001702s)
taos> SELECT SUM(current), SUM(voltage), SUM(phase) from d1001;
taos> SELECT SUM(current), SUM(voltage), SUM(phase) FROM d1001;
sum(current) | sum(voltage) | sum(phase) |
================================================================================
35.200000763 | 658 | 0.950000018 |
......@@ -753,7 +767,7 @@ TDengine支持针对数据的聚合查询。提供支持的聚合和选择函数
功能说明:统计表/超级表中某列的值最先写入的非NULL值。
返回结果数据类型:同应用的字段。
应用字段:所有字段。
说明:1)如果要返回各个列的首个(时间戳最小)非NULL值,可以使用FIRST(*);2) 如果结果集中的某列全部为NULL值,则该列的返回结果也是NULL;3) 如果结果集中所有列全部为NULL值,则不返回结果。
说明:1)如果要返回各个列的首个(时间戳最小)非NULL值,可以使用FIRST(\*);2) 如果结果集中的某列全部为NULL值,则该列的返回结果也是NULL;3) 如果结果集中所有列全部为NULL值,则不返回结果。
示例:
```mysql
......@@ -777,7 +791,7 @@ TDengine支持针对数据的聚合查询。提供支持的聚合和选择函数
功能说明:统计表/超级表中某列的值最后写入的非NULL值。
返回结果数据类型:同应用的字段。
应用字段:所有字段。
说明:1)如果要返回各个列的最后(时间戳最大)一个非NULL值,可以使用LAST(*);2)如果结果集中的某列全部为NULL值,则该列的返回结果也是NULL;如果结果集中所有列全部为NULL值,则不返回结果。
说明:1)如果要返回各个列的最后(时间戳最大)一个非NULL值,可以使用LAST(\*);2)如果结果集中的某列全部为NULL值,则该列的返回结果也是NULL;如果结果集中所有列全部为NULL值,则不返回结果。
示例:
```mysql
......@@ -1004,15 +1018,15 @@ SELECT function_list FROM stb_name
**示例:** 智能电表的建表语句如下:
```mysql
CREATE TABLE meters (ts timestamp, current float, voltage int, phase float) TAGS (location binary(64), groupId int);
CREATE TABLE meters (ts TIMESTAMP, current FLOAT, voltage INT, phase FLOAT) TAGS (location BINARY(64), groupId INT);
```
针对智能电表采集的数据,以10分钟为一个阶段,计算过去24小时的电流数据的平均值、最大值、电流的中位数、以及随着时间变化的电流走势拟合直线。如果没有计算值,用前一个非NULL值填充。
使用的查询语句如下:
```mysql
SELECT AVG(current),MAX(current),LEASTSQUARES(current, start_val, step_val), PERCENTILE(current, 50) FROM meters
WHERE TS>=NOW-1d
SELECT AVG(current), MAX(current), LEASTSQUARES(current, start_val, step_val), PERCENTILE(current, 50) FROM meters
WHERE ts>=NOW-1d
INTERVAL(10m)
FILL(PREV);
```
......
......@@ -6,6 +6,8 @@
TDengine的集群管理极其简单,除添加和删除节点需要人工干预之外,其他全部是自动完成,最大程度的降低了运维的工作量。本章对集群管理的操作做详细的描述。
关于集群搭建请参考<a href="https://www.taosdata.com/blog/2020/11/11/1961.html">视频教程</a>
## 准备工作
**第零步**:规划集群所有物理节点的FQDN,将规划好的FQDN分别添加到每个物理节点的/etc/hostname;修改每个物理节点的/etc/hosts,将所有集群物理节点的IP与FQDN的对应添加好。【如部署了DNS,请联系网络管理员在DNS上做好相关配置】
......@@ -227,4 +229,3 @@ SHOW MNODES;
TDengine提供一个执行程序tarbitrator, 找任何一台Linux服务器运行它即可。请点击[安装包下载](https://www.taosdata.com/cn/all-downloads/),在TDengine Arbitrator Linux一节中,选择适合的版本下载并安装。该程序对系统资源几乎没有要求,只需要保证有网络连接即可。该应用的命令行参数`-p`可以指定其对外服务的端口号,缺省是6042。配置每个taosd实例时,可以在配置文件taos.cfg里将参数arbitrator设置为arbitrator的End Point。如果该参数配置了,当副本数为偶数数,系统将自动连接配置的arbitrator。如果副本数为奇数,即使配置了arbitrator, 系统也不会去建立连接。
关于集群搭建请参考<a href="https://www.taosdata.com/blog/2020/11/11/1961.html">视频教程</a>
name: tdengine
base: core18
version: '2.0.12.0'
version: '2.0.13.0'
icon: snap/gui/t-dengine.svg
summary: an open-source big data platform designed and optimized for IoT.
description: |
......@@ -72,7 +72,7 @@ parts:
- usr/bin/taosd
- usr/bin/taos
- usr/bin/taosdemo
- usr/lib/libtaos.so.2.0.12.0
- usr/lib/libtaos.so.2.0.13.0
- usr/lib/libtaos.so.1
- usr/lib/libtaos.so
......
......@@ -421,7 +421,7 @@ void tscTableMetaCallBack(void *param, TAOS_RES *res, int code) {
SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(pCmd, pCmd->clauseIndex);
// check if it is a sub-query of super table query first, if true, enter another routine
if (TSDB_QUERY_HAS_TYPE(pQueryInfo->type, (TSDB_QUERY_TYPE_STABLE_SUBQUERY|TSDB_QUERY_TYPE_TAG_FILTER_QUERY))) {
if (TSDB_QUERY_HAS_TYPE(pQueryInfo->type, (TSDB_QUERY_TYPE_STABLE_SUBQUERY|TSDB_QUERY_TYPE_SUBQUERY|TSDB_QUERY_TYPE_TAG_FILTER_QUERY))) {
tscDebug("%p update local table meta, continue to process sql and send the corresponding query", pSql);
STableMetaInfo* pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0);
......
......@@ -75,11 +75,11 @@ static int32_t insertResultField(SQueryInfo* pQueryInfo, int32_t outputIndex, SC
static int32_t convertFunctionId(int32_t optr, int16_t* functionId);
static uint8_t convertOptr(SStrToken *pToken);
static int32_t parseSelectClause(SSqlCmd* pCmd, int32_t clauseIndex, tSQLExprList* pSelection, bool isSTable, bool joinQuery);
static int32_t parseSelectClause(SSqlCmd* pCmd, int32_t clauseIndex, tSQLExprList* pSelection, bool isSTable, bool joinQuery, bool intervalQuery);
static bool validateIpAddress(const char* ip, size_t size);
static bool hasUnsupportFunctionsForSTableQuery(SSqlCmd* pCmd, SQueryInfo* pQueryInfo);
static bool functionCompatibleCheck(SQueryInfo* pQueryInfo, bool joinQuery);
static bool functionCompatibleCheck(SQueryInfo* pQueryInfo, bool joinQuery, bool intervalQuery);
static int32_t parseGroupbyClause(SQueryInfo* pQueryInfo, SArray* pList, SSqlCmd* pCmd);
......@@ -1475,7 +1475,7 @@ static void addPrimaryTsColIntoResult(SQueryInfo* pQueryInfo) {
pQueryInfo->type |= TSDB_QUERY_TYPE_PROJECTION_QUERY;
}
int32_t parseSelectClause(SSqlCmd* pCmd, int32_t clauseIndex, tSQLExprList* pSelection, bool isSTable, bool joinQuery) {
int32_t parseSelectClause(SSqlCmd* pCmd, int32_t clauseIndex, tSQLExprList* pSelection, bool isSTable, bool joinQuery, bool intervalQuery) {
assert(pSelection != NULL && pCmd != NULL);
const char* msg2 = "functions can not be mixed up";
......@@ -1531,7 +1531,7 @@ int32_t parseSelectClause(SSqlCmd* pCmd, int32_t clauseIndex, tSQLExprList* pSel
addPrimaryTsColIntoResult(pQueryInfo);
}
if (!functionCompatibleCheck(pQueryInfo, joinQuery)) {
if (!functionCompatibleCheck(pQueryInfo, joinQuery, intervalQuery)) {
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg2);
}
......@@ -2810,7 +2810,7 @@ bool hasUnsupportFunctionsForSTableQuery(SSqlCmd* pCmd, SQueryInfo* pQueryInfo)
return false;
}
static bool functionCompatibleCheck(SQueryInfo* pQueryInfo, bool joinQuery) {
static bool functionCompatibleCheck(SQueryInfo* pQueryInfo, bool joinQuery, bool intervalQuery) {
int32_t startIdx = 0;
size_t numOfExpr = tscSqlExprNumOfExprs(pQueryInfo);
......@@ -2826,6 +2826,10 @@ static bool functionCompatibleCheck(SQueryInfo* pQueryInfo, bool joinQuery) {
int32_t factor = functionCompatList[tscSqlExprGet(pQueryInfo, startIdx)->functionId];
if (tscSqlExprGet(pQueryInfo, 0)->functionId == TSDB_FUNC_LAST_ROW && (joinQuery || intervalQuery)) {
return false;
}
// diff function cannot be executed with other function
// arithmetic function can be executed with other arithmetic functions
size_t size = tscSqlExprNumOfExprs(pQueryInfo);
......@@ -2850,7 +2854,7 @@ static bool functionCompatibleCheck(SQueryInfo* pQueryInfo, bool joinQuery) {
}
}
if (functionId == TSDB_FUNC_LAST_ROW && joinQuery) {
if (functionId == TSDB_FUNC_LAST_ROW && (joinQuery || intervalQuery)) {
return false;
}
}
......@@ -4594,8 +4598,8 @@ static void setDefaultOrderInfo(SQueryInfo* pQueryInfo) {
int32_t parseOrderbyClause(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SQuerySQL* pQuerySql, SSchema* pSchema) {
const char* msg0 = "only support order by primary timestamp";
const char* msg1 = "invalid column name";
const char* msg2 = "only support order by primary timestamp or queried column";
const char* msg3 = "only support order by primary timestamp or first tag in groupby clause";
const char* msg2 = "only support order by primary timestamp or first tag in groupby clause allowed";
const char* msg3 = "invalid column in order by clause, only primary timestamp or first tag in groupby clause allowed";
setDefaultOrderInfo(pQueryInfo);
STableMetaInfo* pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0);
......@@ -5275,8 +5279,7 @@ int32_t parseLimitClause(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t clauseIn
const char* msg0 = "soffset/offset can not be less than 0";
const char* msg1 = "slimit/soffset only available for STable query";
const char* msg2 = "functions mixed up in table query";
const char* msg3 = "slimit/soffset can not apply to projection query";
const char* msg2 = "slimit/soffset can not apply to projection query";
// handle the limit offset value, validate the limit
pQueryInfo->limit = pQuerySql->limit;
......@@ -5301,7 +5304,7 @@ int32_t parseLimitClause(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t clauseIn
if (!tscQueryTags(pQueryInfo)) { // local handle the super table tag query
if (tscIsProjectionQueryOnSTable(pQueryInfo, 0)) {
if (pQueryInfo->slimit.limit > 0 || pQueryInfo->slimit.offset > 0) {
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg3);
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg2);
}
// for projection query on super table, all queries are subqueries
......@@ -5359,24 +5362,6 @@ int32_t parseLimitClause(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t clauseIn
if (pQueryInfo->slimit.limit != -1 || pQueryInfo->slimit.offset != 0) {
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1);
}
size_t size = taosArrayGetSize(pQueryInfo->exprList);
bool hasTags = false;
bool hasOtherFunc = false;
// filter the query functions operating on "tbname" column that are not supported by normal columns.
for (int32_t i = 0; i < size; ++i) {
SSqlExpr* pExpr = tscSqlExprGet(pQueryInfo, i);
if (TSDB_COL_IS_TAG(pExpr->colInfo.flag)) {
hasTags = true;
} else {
hasOtherFunc = true;
}
}
if (hasTags && hasOtherFunc) {
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg2);
}
}
return TSDB_CODE_SUCCESS;
......@@ -5848,14 +5833,43 @@ static int32_t doAddGroupbyColumnsOnDemand(SSqlCmd* pCmd, SQueryInfo* pQueryInfo
return TSDB_CODE_SUCCESS;
}
static int32_t doTagFunctionCheck(SQueryInfo* pQueryInfo) {
bool tagProjection = false;
bool tableCounting = false;
int32_t numOfCols = (int32_t) tscSqlExprNumOfExprs(pQueryInfo);
for (int32_t i = 0; i < numOfCols; ++i) {
SSqlExpr* pExpr = tscSqlExprGet(pQueryInfo, i);
int32_t functionId = pExpr->functionId;
if (functionId == TSDB_FUNC_TAGPRJ) {
tagProjection = true;
continue;
}
if (functionId == TSDB_FUNC_COUNT) {
assert(pExpr->colInfo.colId == TSDB_TBNAME_COLUMN_INDEX);
tableCounting = true;
}
}
return (tableCounting && tagProjection)? -1:0;
}
int32_t doFunctionsCompatibleCheck(SSqlCmd* pCmd, SQueryInfo* pQueryInfo) {
const char* msg1 = "functions/columns not allowed in group by query";
const char* msg2 = "projection query on columns not allowed";
const char* msg3 = "group by not allowed on projection query";
const char* msg4 = "retrieve tags not compatible with group by or interval query";
const char* msg5 = "functions can not be mixed up";
// only retrieve tags, group by is not supportted
if (tscQueryTags(pQueryInfo)) {
if (doTagFunctionCheck(pQueryInfo) != TSDB_CODE_SUCCESS) {
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg5);
}
if (pQueryInfo->groupbyExpr.numOfGroupCols > 0 || pQueryInfo->interval.interval > 0) {
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg4);
} else {
......@@ -6278,10 +6292,12 @@ int32_t doCheckForCreateFromStable(SSqlObj* pSql, SSqlInfo* pInfo) {
int32_t doCheckForStream(SSqlObj* pSql, SSqlInfo* pInfo) {
const char* msg1 = "invalid table name";
const char* msg2 = "functions not allowed in CQ";
const char* msg3 = "fill only available for interval query";
const char* msg4 = "fill option not supported in stream computing";
const char* msg5 = "sql too long"; // todo ADD support
const char* msg6 = "from missing in subclause";
const char* msg7 = "time interval is required";
SSqlCmd* pCmd = &pSql->cmd;
SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(pCmd, 0);
......@@ -6291,10 +6307,10 @@ int32_t doCheckForStream(SSqlObj* pSql, SSqlInfo* pInfo) {
STableMetaInfo* pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0);
// if sql specifies db, use it, otherwise use default db
SStrToken* pzTableName = &(pCreateTable->name);
SStrToken* pName = &(pCreateTable->name);
SQuerySQL* pQuerySql = pCreateTable->pSelect;
if (tscValidateName(pzTableName) != TSDB_CODE_SUCCESS) {
if (tscValidateName(pName) != TSDB_CODE_SUCCESS) {
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1);
}
......@@ -6320,7 +6336,7 @@ int32_t doCheckForStream(SSqlObj* pSql, SSqlInfo* pInfo) {
}
bool isSTable = UTIL_TABLE_IS_SUPER_TABLE(pTableMetaInfo);
if (parseSelectClause(&pSql->cmd, 0, pQuerySql->pSelection, isSTable, false) != TSDB_CODE_SUCCESS) {
if (parseSelectClause(&pSql->cmd, 0, pQuerySql->pSelection, isSTable, false, false) != TSDB_CODE_SUCCESS) {
return TSDB_CODE_TSC_INVALID_SQL;
}
......@@ -6333,15 +6349,19 @@ int32_t doCheckForStream(SSqlObj* pSql, SSqlInfo* pInfo) {
// set interval value
if (parseIntervalClause(pSql, pQueryInfo, pQuerySql) != TSDB_CODE_SUCCESS) {
return TSDB_CODE_TSC_INVALID_SQL;
} else {
if ((pQueryInfo->interval.interval > 0) &&
(validateFunctionsInIntervalOrGroupbyQuery(pCmd, pQueryInfo) != TSDB_CODE_SUCCESS)) {
return TSDB_CODE_TSC_INVALID_SQL;
}
}
if ((pQueryInfo->interval.interval > 0) &&
(validateFunctionsInIntervalOrGroupbyQuery(pCmd, pQueryInfo) != TSDB_CODE_SUCCESS)) {
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg2);
}
if (!tscIsProjectionQuery(pQueryInfo) && pQueryInfo->interval.interval == 0) {
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg7);
}
// set the created table[stream] name
code = tscSetTableFullName(pTableMetaInfo, pzTableName, pSql);
code = tscSetTableFullName(pTableMetaInfo, pName, pSql);
if (code != TSDB_CODE_SUCCESS) {
return code;
}
......@@ -6565,7 +6585,9 @@ int32_t doCheckForQuery(SSqlObj* pSql, SQuerySQL* pQuerySql, int32_t index) {
int32_t joinQuery = (pQuerySql->from != NULL && taosArrayGetSize(pQuerySql->from) > 2);
if (parseSelectClause(pCmd, index, pQuerySql->pSelection, isSTable, joinQuery) != TSDB_CODE_SUCCESS) {
int32_t intervalQuery = !(pQuerySql->interval.type == 0 || pQuerySql->interval.n == 0);
if (parseSelectClause(pCmd, index, pQuerySql->pSelection, isSTable, joinQuery, intervalQuery) != TSDB_CODE_SUCCESS) {
return TSDB_CODE_TSC_INVALID_SQL;
}
......
......@@ -65,44 +65,51 @@ static int64_t tscGetRetryDelayTime(SSqlStream* pStream, int64_t slidingTime, in
return retryDelta;
}
static void tscProcessStreamLaunchQuery(SSchedMsg *pMsg) {
SSqlStream *pStream = (SSqlStream *)pMsg->ahandle;
SSqlObj * pSql = pStream->pSql;
static void doLaunchQuery(void* param, TAOS_RES* tres, int32_t code) {
SSqlStream *pStream = (SSqlStream *)param;
assert(pStream->pSql == tres && code == TSDB_CODE_SUCCESS);
pSql->fp = tscProcessStreamQueryCallback;
pSql->fetchFp = tscProcessStreamQueryCallback;
pSql->param = pStream;
SSqlObj* pSql = (SSqlObj*) tres;
pSql->fp = doLaunchQuery;
pSql->fetchFp = doLaunchQuery;
pSql->res.completed = false;
SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(&pSql->cmd, 0);
STableMetaInfo *pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0);
int code = tscGetTableMeta(pSql, pTableMetaInfo);
pSql->res.code = code;
code = tscGetTableMeta(pSql, pTableMetaInfo);
if (code == 0 && UTIL_TABLE_IS_SUPER_TABLE(pTableMetaInfo)) {
code = tscGetSTableVgroupInfo(pSql, 0);
pSql->res.code = code;
}
// failed to get meter/metric meta, retry in 10sec.
if (code != TSDB_CODE_SUCCESS) {
int64_t retryDelayTime = tscGetRetryDelayTime(pStream, pStream->interval.sliding, pStream->precision);
tscDebug("%p stream:%p,get metermeta failed, retry in %" PRId64 "ms", pStream->pSql, pStream, retryDelayTime);
tscSetRetryTimer(pStream, pSql, retryDelayTime);
} else {
// failed to get table Meta or vgroup list, retry in 10sec.
if (code == TSDB_CODE_SUCCESS) {
tscTansformSQLFuncForSTableQuery(pQueryInfo);
tscDebug("%p stream:%p start stream query on:%s", pSql, pStream, pTableMetaInfo->name);
tscDoQuery(pStream->pSql);
tscDebug("%p stream:%p, start stream query on:%s", pSql, pStream, pTableMetaInfo->name);
pSql->fp = tscProcessStreamQueryCallback;
pSql->fetchFp = tscProcessStreamQueryCallback;
tscDoQuery(pSql);
tscIncStreamExecutionCount(pStream);
} else if (code != TSDB_CODE_TSC_ACTION_IN_PROGRESS) {
pSql->res.code = code;
int64_t retryDelayTime = tscGetRetryDelayTime(pStream, pStream->interval.sliding, pStream->precision);
tscDebug("%p stream:%p, get table Meta failed, retry in %" PRId64 "ms", pSql, pStream, retryDelayTime);
tscSetRetryTimer(pStream, pSql, retryDelayTime);
}
}
static void tscProcessStreamLaunchQuery(SSchedMsg *pMsg) {
SSqlStream *pStream = (SSqlStream *)pMsg->ahandle;
doLaunchQuery(pStream, pStream->pSql, 0);
}
static void tscProcessStreamTimer(void *handle, void *tmrId) {
SSqlStream *pStream = (SSqlStream *)handle;
if (pStream == NULL) return;
if (pStream->pTimer != tmrId) return;
if (pStream == NULL || pStream->pTimer != tmrId) {
return;
}
pStream->pTimer = NULL;
pStream->numOfRes = 0; // reset the numOfRes.
......@@ -392,11 +399,16 @@ static void tscSetNextLaunchTimer(SSqlStream *pStream, SSqlObj *pSql) {
tscSetRetryTimer(pStream, pSql, timer);
}
static void tscSetSlidingWindowInfo(SSqlObj *pSql, SSqlStream *pStream) {
static int32_t tscSetSlidingWindowInfo(SSqlObj *pSql, SSqlStream *pStream) {
int64_t minIntervalTime =
(pStream->precision == TSDB_TIME_PRECISION_MICRO) ? tsMinIntervalTime * 1000L : tsMinIntervalTime;
SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(&pSql->cmd, 0);
if (!pStream->isProject && pQueryInfo->interval.interval == 0) {
sprintf(pSql->cmd.payload, "the interval value is 0");
return -1;
}
if (pQueryInfo->interval.intervalUnit != 'n' && pQueryInfo->interval.intervalUnit!= 'y' && pQueryInfo->interval.interval < minIntervalTime) {
tscWarn("%p stream:%p, original sample interval:%" PRId64 " too small, reset to:%" PRId64, pSql, pStream,
......@@ -436,6 +448,8 @@ static void tscSetSlidingWindowInfo(SSqlObj *pSql, SSqlStream *pStream) {
pQueryInfo->interval.interval = 0; // clear the interval value to avoid the force time window split by query processor
pQueryInfo->interval.sliding = 0;
}
return TSDB_CODE_SUCCESS;
}
static int64_t tscGetStreamStartTimestamp(SSqlObj *pSql, SSqlStream *pStream, int64_t stime) {
......@@ -485,34 +499,19 @@ static int64_t tscGetLaunchTimestamp(const SSqlStream *pStream) {
return (pStream->precision == TSDB_TIME_PRECISION_MICRO) ? timer / 1000L : timer;
}
static void setErrorInfo(SSqlObj* pSql, int32_t code, char* info) {
if (pSql == NULL) {
return;
}
SSqlCmd* pCmd = &pSql->cmd;
pSql->res.code = code;
if (info != NULL) {
strncpy(pCmd->payload, info, pCmd->payloadLen);
}
}
static void tscCreateStream(void *param, TAOS_RES *res, int code) {
SSqlStream* pStream = (SSqlStream*)param;
SSqlObj* pSql = pStream->pSql;
SSqlCmd* pCmd = &pSql->cmd;
if (code != TSDB_CODE_SUCCESS) {
setErrorInfo(pSql, code, pCmd->payload);
tscError("%p open stream failed, sql:%s, reason:%s, code:0x%08x", pSql, pSql->sqlstr, pCmd->payload, code);
pSql->res.code = code;
tscError("%p open stream failed, sql:%s, reason:%s, code:%s", pSql, pSql->sqlstr, pCmd->payload, tstrerror(code));
pStream->fp(pStream->param, NULL, NULL);
return;
}
registerSqlObj(pSql);
SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(pCmd, 0);
STableMetaInfo* pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0);
STableComInfo tinfo = tscGetTableInfo(pTableMetaInfo->pTableMeta);
......@@ -523,13 +522,22 @@ static void tscCreateStream(void *param, TAOS_RES *res, int code) {
pStream->ctime = taosGetTimestamp(pStream->precision);
pStream->etime = pQueryInfo->window.ekey;
tscAddIntoStreamList(pStream);
if (tscSetSlidingWindowInfo(pSql, pStream) != TSDB_CODE_SUCCESS) {
pSql->res.code = code;
tscError("%p stream %p open failed, since the interval value is incorrect", pSql, pStream);
pStream->fp(pStream->param, NULL, NULL);
return;
}
tscSetSlidingWindowInfo(pSql, pStream);
pStream->stime = tscGetStreamStartTimestamp(pSql, pStream, pStream->stime);
int64_t starttime = tscGetLaunchTimestamp(pStream);
pCmd->command = TSDB_SQL_SELECT;
registerSqlObj(pSql);
tscAddIntoStreamList(pStream);
taosTmrReset(tscProcessStreamTimer, (int32_t)starttime, pStream, tscTmr, &pStream->pTimer);
tscDebug("%p stream:%p is opened, query on:%s, interval:%" PRId64 ", sliding:%" PRId64 ", first launched in:%" PRId64 ", sql:%s", pSql,
......
......@@ -188,8 +188,10 @@ static int64_t doTSBlockIntersect(SSqlObj* pSql, SJoinSupporter* pSupporter1, SJ
tsBufFlush(output2);
tsBufDestroy(pSupporter1->pTSBuf);
pSupporter1->pTSBuf = NULL;
tsBufDestroy(pSupporter2->pTSBuf);
pSupporter2->pTSBuf = NULL;
TSKEY et = taosGetTimestampUs();
tscDebug("%p input1:%" PRId64 ", input2:%" PRId64 ", final:%" PRId64 " in %d vnodes for secondary query after ts blocks "
"intersecting, skey:%" PRId64 ", ekey:%" PRId64 ", numOfVnode:%d, elapsed time:%" PRId64 " us",
......@@ -219,12 +221,9 @@ SJoinSupporter* tscCreateJoinSupporter(SSqlObj* pSql, int32_t index) {
assert (pSupporter->uid != 0);
taosGetTmpfilePath("join-", pSupporter->path);
pSupporter->f = fopen(pSupporter->path, "w");
// todo handle error
if (pSupporter->f == NULL) {
tscError("%p failed to create tmp file:%s, reason:%s", pSql, pSupporter->path, strerror(errno));
}
// do NOT create file here to reduce crash generated file left issue
pSupporter->f = NULL;
return pSupporter;
}
......@@ -244,12 +243,19 @@ static void tscDestroyJoinSupporter(SJoinSupporter* pSupporter) {
tscFieldInfoClear(&pSupporter->fieldsInfo);
if (pSupporter->pTSBuf != NULL) {
tsBufDestroy(pSupporter->pTSBuf);
pSupporter->pTSBuf = NULL;
}
unlink(pSupporter->path);
if (pSupporter->f != NULL) {
fclose(pSupporter->f);
unlink(pSupporter->path);
pSupporter->f = NULL;
}
if (pSupporter->pVgroupTables != NULL) {
taosArrayDestroy(pSupporter->pVgroupTables);
pSupporter->pVgroupTables = NULL;
......@@ -526,6 +532,8 @@ static void quitAllSubquery(SSqlObj* pSqlObj, SJoinSupporter* pSupporter) {
tscError("%p all subquery return and query failed, global code:%s", pSqlObj, tstrerror(pSqlObj->res.code));
freeJoinSubqueryObj(pSqlObj);
}
tscDestroyJoinSupporter(pSupporter);
}
// update the query time range according to the join results on timestamp
......@@ -921,6 +929,22 @@ static void tsCompRetrieveCallback(void* param, TAOS_RES* tres, int32_t numOfRow
}
if (numOfRows > 0) { // write the compressed timestamp to disk file
if(pSupporter->f == NULL) {
pSupporter->f = fopen(pSupporter->path, "w");
if (pSupporter->f == NULL) {
tscError("%p failed to create tmp file:%s, reason:%s", pSql, pSupporter->path, strerror(errno));
pParentSql->res.code = TAOS_SYSTEM_ERROR(errno);
quitAllSubquery(pParentSql, pSupporter);
tscAsyncResultOnError(pParentSql);
return;
}
}
fwrite(pRes->data, (size_t)pRes->numOfRows, 1, pSupporter->f);
fclose(pSupporter->f);
pSupporter->f = NULL;
......@@ -930,6 +954,9 @@ static void tsCompRetrieveCallback(void* param, TAOS_RES* tres, int32_t numOfRow
tscError("%p invalid ts comp file from vnode, abort subquery, file size:%d", pSql, numOfRows);
pParentSql->res.code = TAOS_SYSTEM_ERROR(errno);
quitAllSubquery(pParentSql, pSupporter);
tscAsyncResultOnError(pParentSql);
return;
......@@ -2094,6 +2121,13 @@ static SSqlObj *tscCreateSTableSubquery(SSqlObj *pSql, SRetrieveSupport *trsuppo
}
void tscRetrieveDataRes(void *param, TAOS_RES *tres, int code) {
// the param may be null, since it may be done by other query threads. and the asyncOnError may enter in this
// function while kill query by a user.
if (param == NULL) {
assert(code != TSDB_CODE_SUCCESS);
return;
}
SRetrieveSupport *trsupport = (SRetrieveSupport *) param;
SSqlObj* pParentSql = trsupport->pParentSql;
......
......@@ -458,12 +458,13 @@ void tscFreeRegisteredSqlObj(void *pSql) {
assert(RID_VALID(p->self));
tscFreeSqlObj(p);
taosReleaseRef(tscRefId, pTscObj->rid);
int32_t num = atomic_sub_fetch_32(&pTscObj->numOfObj, 1);
int32_t total = atomic_sub_fetch_32(&tscNumOfObj, 1);
tscDebug("%p free SqlObj, total in tscObj:%d, total:%d", pSql, num, total);
tscFreeSqlObj(p);
taosReleaseRef(tscRefId, pTscObj->rid);
}
void tscFreeSqlObj(SSqlObj* pSql) {
......
......@@ -19,136 +19,137 @@ using System.Runtime.InteropServices;
namespace TDengineDriver
{
enum TDengineDataType {
TSDB_DATA_TYPE_NULL = 0, // 1 bytes
TSDB_DATA_TYPE_BOOL = 1, // 1 bytes
TSDB_DATA_TYPE_TINYINT = 2, // 1 bytes
TSDB_DATA_TYPE_SMALLINT = 3, // 2 bytes
TSDB_DATA_TYPE_INT = 4, // 4 bytes
TSDB_DATA_TYPE_BIGINT = 5, // 8 bytes
TSDB_DATA_TYPE_FLOAT = 6, // 4 bytes
TSDB_DATA_TYPE_DOUBLE = 7, // 8 bytes
TSDB_DATA_TYPE_BINARY = 8, // string
TSDB_DATA_TYPE_TIMESTAMP = 9,// 8 bytes
TSDB_DATA_TYPE_NCHAR = 10 // unicode string
}
enum TDengineInitOption
{
TSDB_OPTION_LOCALE = 0,
TSDB_OPTION_CHARSET = 1,
TSDB_OPTION_TIMEZONE = 2,
TDDB_OPTION_CONFIGDIR = 3,
TDDB_OPTION_SHELL_ACTIVITY_TIMER = 4
}
class TDengineMeta
{
public string name;
public short size;
public byte type;
public string TypeName()
enum TDengineDataType
{
switch ((TDengineDataType)type)
{
case TDengineDataType.TSDB_DATA_TYPE_BOOL:
return "BOOLEAN";
case TDengineDataType.TSDB_DATA_TYPE_TINYINT:
return "BYTE";
case TDengineDataType.TSDB_DATA_TYPE_SMALLINT:
return "SHORT";
case TDengineDataType.TSDB_DATA_TYPE_INT:
return "INT";
case TDengineDataType.TSDB_DATA_TYPE_BIGINT:
return "LONG";
case TDengineDataType.TSDB_DATA_TYPE_FLOAT:
return "FLOAT";
case TDengineDataType.TSDB_DATA_TYPE_DOUBLE:
return "DOUBLE";
case TDengineDataType.TSDB_DATA_TYPE_BINARY:
return "STRING";
case TDengineDataType.TSDB_DATA_TYPE_TIMESTAMP:
return "TIMESTAMP";
case TDengineDataType.TSDB_DATA_TYPE_NCHAR:
return "NCHAR";
default:
return "undefine";
}
TSDB_DATA_TYPE_NULL = 0, // 1 bytes
TSDB_DATA_TYPE_BOOL = 1, // 1 bytes
TSDB_DATA_TYPE_TINYINT = 2, // 1 bytes
TSDB_DATA_TYPE_SMALLINT = 3, // 2 bytes
TSDB_DATA_TYPE_INT = 4, // 4 bytes
TSDB_DATA_TYPE_BIGINT = 5, // 8 bytes
TSDB_DATA_TYPE_FLOAT = 6, // 4 bytes
TSDB_DATA_TYPE_DOUBLE = 7, // 8 bytes
TSDB_DATA_TYPE_BINARY = 8, // string
TSDB_DATA_TYPE_TIMESTAMP = 9,// 8 bytes
TSDB_DATA_TYPE_NCHAR = 10 // unicode string
}
}
class TDengine
{
public const int TSDB_CODE_SUCCESS = 0;
enum TDengineInitOption
{
TSDB_OPTION_LOCALE = 0,
TSDB_OPTION_CHARSET = 1,
TSDB_OPTION_TIMEZONE = 2,
TDDB_OPTION_CONFIGDIR = 3,
TDDB_OPTION_SHELL_ACTIVITY_TIMER = 4
}
[DllImport("taos.dll", EntryPoint = "taos_init", CallingConvention = CallingConvention.Cdecl)]
static extern public void Init();
class TDengineMeta
{
public string name;
public short size;
public byte type;
public string TypeName()
{
switch ((TDengineDataType)type)
{
case TDengineDataType.TSDB_DATA_TYPE_BOOL:
return "BOOLEAN";
case TDengineDataType.TSDB_DATA_TYPE_TINYINT:
return "BYTE";
case TDengineDataType.TSDB_DATA_TYPE_SMALLINT:
return "SHORT";
case TDengineDataType.TSDB_DATA_TYPE_INT:
return "INT";
case TDengineDataType.TSDB_DATA_TYPE_BIGINT:
return "LONG";
case TDengineDataType.TSDB_DATA_TYPE_FLOAT:
return "FLOAT";
case TDengineDataType.TSDB_DATA_TYPE_DOUBLE:
return "DOUBLE";
case TDengineDataType.TSDB_DATA_TYPE_BINARY:
return "STRING";
case TDengineDataType.TSDB_DATA_TYPE_TIMESTAMP:
return "TIMESTAMP";
case TDengineDataType.TSDB_DATA_TYPE_NCHAR:
return "NCHAR";
default:
return "undefine";
}
}
}
[DllImport("taos.dll", EntryPoint = "taos_cleanup", CallingConvention = CallingConvention.Cdecl)]
static extern public void Cleanup();
class TDengine
{
public const int TSDB_CODE_SUCCESS = 0;
[DllImport("taos.dll", EntryPoint = "taos_options", CallingConvention = CallingConvention.Cdecl)]
static extern public void Options(int option, string value);
[DllImport("taos", EntryPoint = "taos_init", CallingConvention = CallingConvention.Cdecl)]
static extern public void Init();
[DllImport("taos.dll", EntryPoint = "taos_connect", CallingConvention = CallingConvention.Cdecl)]
static extern public IntPtr Connect(string ip, string user, string password, string db, short port);
[DllImport("taos", EntryPoint = "taos_cleanup", CallingConvention = CallingConvention.Cdecl)]
static extern public void Cleanup();
[DllImport("taos.dll", EntryPoint = "taos_errstr", CallingConvention = CallingConvention.Cdecl)]
static extern private IntPtr taos_errstr(IntPtr res);
static public string Error(IntPtr res)
{
IntPtr errPtr = taos_errstr(res);
return Marshal.PtrToStringAnsi(errPtr);
}
[DllImport("taos", EntryPoint = "taos_options", CallingConvention = CallingConvention.Cdecl)]
static extern public void Options(int option, string value);
[DllImport("taos.dll", EntryPoint = "taos_errno", CallingConvention = CallingConvention.Cdecl)]
static extern public int ErrorNo(IntPtr res);
[DllImport("taos", EntryPoint = "taos_connect", CallingConvention = CallingConvention.Cdecl)]
static extern public IntPtr Connect(string ip, string user, string password, string db, short port);
[DllImport("taos.dll", EntryPoint = "taos_query", CallingConvention = CallingConvention.Cdecl)]
static extern public IntPtr Query(IntPtr conn, string sqlstr);
[DllImport("taos", EntryPoint = "taos_errstr", CallingConvention = CallingConvention.Cdecl)]
static extern private IntPtr taos_errstr(IntPtr res);
static public string Error(IntPtr res)
{
IntPtr errPtr = taos_errstr(res);
return Marshal.PtrToStringAnsi(errPtr);
}
[DllImport("taos.dll", EntryPoint = "taos_affected_rows", CallingConvention = CallingConvention.Cdecl)]
static extern public int AffectRows(IntPtr res);
[DllImport("taos", EntryPoint = "taos_errno", CallingConvention = CallingConvention.Cdecl)]
static extern public int ErrorNo(IntPtr res);
[DllImport("taos.dll", EntryPoint = "taos_field_count", CallingConvention = CallingConvention.Cdecl)]
static extern public int FieldCount(IntPtr res);
[DllImport("taos", EntryPoint = "taos_query", CallingConvention = CallingConvention.Cdecl)]
static extern public IntPtr Query(IntPtr conn, string sqlstr);
[DllImport("taos.dll", EntryPoint = "taos_fetch_fields", CallingConvention = CallingConvention.Cdecl)]
static extern private IntPtr taos_fetch_fields(IntPtr res);
static public List<TDengineMeta> FetchFields(IntPtr res)
{
const int fieldSize = 68;
List<TDengineMeta> metas = new List<TDengineMeta>();
if (res == IntPtr.Zero)
{
return metas;
}
int fieldCount = FieldCount(res);
IntPtr fieldsPtr = taos_fetch_fields(res);
for (int i = 0; i < fieldCount; ++i)
{
int offset = i * fieldSize;
TDengineMeta meta = new TDengineMeta();
meta.name = Marshal.PtrToStringAnsi(fieldsPtr + offset);
meta.type = Marshal.ReadByte(fieldsPtr + offset + 65);
meta.size = Marshal.ReadInt16(fieldsPtr + offset + 66);
metas.Add(meta);
}
return metas;
}
[DllImport("taos", EntryPoint = "taos_affected_rows", CallingConvention = CallingConvention.Cdecl)]
static extern public int AffectRows(IntPtr res);
[DllImport("taos.dll", EntryPoint = "taos_fetch_row", CallingConvention = CallingConvention.Cdecl)]
static extern public IntPtr FetchRows(IntPtr res);
[DllImport("taos", EntryPoint = "taos_field_count", CallingConvention = CallingConvention.Cdecl)]
static extern public int FieldCount(IntPtr res);
[DllImport("taos.dll", EntryPoint = "taos_free_result", CallingConvention = CallingConvention.Cdecl)]
static extern public IntPtr FreeResult(IntPtr res);
[DllImport("taos", EntryPoint = "taos_fetch_fields", CallingConvention = CallingConvention.Cdecl)]
static extern private IntPtr taos_fetch_fields(IntPtr res);
static public List<TDengineMeta> FetchFields(IntPtr res)
{
const int fieldSize = 68;
[DllImport("taos.dll", EntryPoint = "taos_close", CallingConvention = CallingConvention.Cdecl)]
static extern public int Close(IntPtr taos);
}
}
\ No newline at end of file
List<TDengineMeta> metas = new List<TDengineMeta>();
if (res == IntPtr.Zero)
{
return metas;
}
int fieldCount = FieldCount(res);
IntPtr fieldsPtr = taos_fetch_fields(res);
for (int i = 0; i < fieldCount; ++i)
{
int offset = i * fieldSize;
TDengineMeta meta = new TDengineMeta();
meta.name = Marshal.PtrToStringAnsi(fieldsPtr + offset);
meta.type = Marshal.ReadByte(fieldsPtr + offset + 65);
meta.size = Marshal.ReadInt16(fieldsPtr + offset + 66);
metas.Add(meta);
}
return metas;
}
[DllImport("taos", EntryPoint = "taos_fetch_row", CallingConvention = CallingConvention.Cdecl)]
static extern public IntPtr FetchRows(IntPtr res);
[DllImport("taos", EntryPoint = "taos_free_result", CallingConvention = CallingConvention.Cdecl)]
static extern public IntPtr FreeResult(IntPtr res);
[DllImport("taos", EntryPoint = "taos_close", CallingConvention = CallingConvention.Cdecl)]
static extern public int Close(IntPtr taos);
}
}
......@@ -56,6 +56,12 @@
<scope>test</scope>
</dependency>
<dependency>
<groupId>mysql</groupId>
<artifactId>mysql-connector-java</artifactId>
<version>5.1.47</version>
</dependency>
<!-- for restful -->
<dependency>
<groupId>org.apache.httpcomponents</groupId>
......@@ -73,7 +79,14 @@
<version>1.2.58</version>
</dependency>
<dependency>
<groupId>mysql</groupId>
<artifactId>mysql-connector-java</artifactId>
<version>5.1.49</version>
</dependency>
</dependencies>
<build>
<plugins>
<plugin>
......
......@@ -19,68 +19,71 @@ import java.util.Map;
public abstract class TSDBConstants {
public static final String DEFAULT_PORT = "6200";
public static final String UNSUPPORT_METHOD_EXCEPTIONZ_MSG = "this operation is NOT supported currently!";
public static final String INVALID_VARIABLES = "invalid variables";
public static Map<Integer, String> DATATYPE_MAP = null;
public static final String DEFAULT_PORT = "6200";
public static final String UNSUPPORT_METHOD_EXCEPTIONZ_MSG = "this operation is NOT supported currently!";
public static final String INVALID_VARIABLES = "invalid variables";
public static Map<Integer, String> DATATYPE_MAP = null;
public static final long JNI_NULL_POINTER = 0L;
public static final long JNI_NULL_POINTER = 0L;
public static final int JNI_SUCCESS = 0;
public static final int JNI_TDENGINE_ERROR = -1;
public static final int JNI_CONNECTION_NULL = -2;
public static final int JNI_RESULT_SET_NULL = -3;
public static final int JNI_NUM_OF_FIELDS_0 = -4;
public static final int JNI_SQL_NULL = -5;
public static final int JNI_FETCH_END = -6;
public static final int TSDB_DATA_TYPE_NULL = 0;
public static final int TSDB_DATA_TYPE_BOOL = 1;
public static final int TSDB_DATA_TYPE_TINYINT = 2;
public static final int TSDB_DATA_TYPE_SMALLINT = 3;
public static final int TSDB_DATA_TYPE_INT = 4;
public static final int TSDB_DATA_TYPE_BIGINT = 5;
public static final int TSDB_DATA_TYPE_FLOAT = 6;
public static final int TSDB_DATA_TYPE_DOUBLE = 7;
public static final int TSDB_DATA_TYPE_BINARY = 8;
public static final int TSDB_DATA_TYPE_TIMESTAMP = 9;
public static final int TSDB_DATA_TYPE_NCHAR = 10;
public static String WrapErrMsg(String msg) {
return "TDengine Error: " + msg;
}
public static final int JNI_SUCCESS = 0;
public static final int JNI_TDENGINE_ERROR = -1;
public static final int JNI_CONNECTION_NULL = -2;
public static final int JNI_RESULT_SET_NULL = -3;
public static final int JNI_NUM_OF_FIELDS_0 = -4;
public static final int JNI_SQL_NULL = -5;
public static final int JNI_FETCH_END = -6;
public static String FixErrMsg(int code) {
switch (code) {
case JNI_TDENGINE_ERROR:
return WrapErrMsg("internal error of database!");
case JNI_CONNECTION_NULL:
return WrapErrMsg("invalid tdengine connection!");
case JNI_RESULT_SET_NULL:
return WrapErrMsg("invalid resultset pointer!");
case JNI_NUM_OF_FIELDS_0:
return WrapErrMsg("invalid num of fields!");
case JNI_SQL_NULL:
return WrapErrMsg("can't execute empty sql!");
case JNI_FETCH_END:
return WrapErrMsg("fetch to the end of resultset");
default:
break;
}
return WrapErrMsg("unkown error!");
}
public static final int TSDB_DATA_TYPE_NULL = 0;
public static final int TSDB_DATA_TYPE_BOOL = 1;
public static final int TSDB_DATA_TYPE_TINYINT = 2;
public static final int TSDB_DATA_TYPE_SMALLINT = 3;
public static final int TSDB_DATA_TYPE_INT = 4;
public static final int TSDB_DATA_TYPE_BIGINT = 5;
public static final int TSDB_DATA_TYPE_FLOAT = 6;
public static final int TSDB_DATA_TYPE_DOUBLE = 7;
public static final int TSDB_DATA_TYPE_BINARY = 8;
public static final int TSDB_DATA_TYPE_TIMESTAMP = 9;
public static final int TSDB_DATA_TYPE_NCHAR = 10;
static {
DATATYPE_MAP = new HashMap<Integer, String>();
DATATYPE_MAP.put(1, "BOOL");
DATATYPE_MAP.put(2, "TINYINT");
DATATYPE_MAP.put(3, "SMALLINT");
DATATYPE_MAP.put(4, "INT");
DATATYPE_MAP.put(5, "BIGINT");
DATATYPE_MAP.put(6, "FLOAT");
DATATYPE_MAP.put(7, "DOUBLE");
DATATYPE_MAP.put(8, "BINARY");
DATATYPE_MAP.put(9, "TIMESTAMP");
DATATYPE_MAP.put(10, "NCHAR");
}
// nchar field's max length
public static final int maxFieldSize = 16 * 1024;
public static String WrapErrMsg(String msg) {
return "TDengine Error: " + msg;
}
public static String FixErrMsg(int code) {
switch (code) {
case JNI_TDENGINE_ERROR:
return WrapErrMsg("internal error of database!");
case JNI_CONNECTION_NULL:
return WrapErrMsg("invalid tdengine connection!");
case JNI_RESULT_SET_NULL:
return WrapErrMsg("invalid resultset pointer!");
case JNI_NUM_OF_FIELDS_0:
return WrapErrMsg("invalid num of fields!");
case JNI_SQL_NULL:
return WrapErrMsg("can't execute empty sql!");
case JNI_FETCH_END:
return WrapErrMsg("fetch to the end of resultset");
default:
break;
}
return WrapErrMsg("unkown error!");
}
static {
DATATYPE_MAP = new HashMap<Integer, String>();
DATATYPE_MAP.put(1, "BOOL");
DATATYPE_MAP.put(2, "TINYINT");
DATATYPE_MAP.put(3, "SMALLINT");
DATATYPE_MAP.put(4, "INT");
DATATYPE_MAP.put(5, "BIGINT");
DATATYPE_MAP.put(6, "FLOAT");
DATATYPE_MAP.put(7, "DOUBLE");
DATATYPE_MAP.put(8, "BINARY");
DATATYPE_MAP.put(9, "TIMESTAMP");
DATATYPE_MAP.put(10, "NCHAR");
}
}
package com.taosdata.jdbc.rs;
import com.taosdata.jdbc.TSDBConstants;
import com.taosdata.jdbc.TSDBDriver;
import java.sql.*;
import java.util.Enumeration;
import java.util.HashMap;
import java.util.Map;
import java.util.Properties;
import java.util.concurrent.Executor;
public class RestfulConnection implements Connection {
private static final String CONNECTION_IS_CLOSED = "connection is closed.";
private static final String AUTO_COMMIT_IS_TRUE = "auto commit is true";
private final String host;
private final int port;
private final Properties props;
private final String database;
private volatile String database;
private final String url;
/******************************************************/
private boolean isClosed;
private DatabaseMetaData metadata;
private Map<String, Class<?>> typeMap;
private Properties clientInfoProps = new Properties();
public RestfulConnection(String host, String port, Properties props, String database, String url) {
this.host = host;
......@@ -21,280 +31,424 @@ public class RestfulConnection implements Connection {
this.props = props;
this.database = database;
this.url = url;
this.metadata = new RestfulDatabaseMetaData(url, props.getProperty(TSDBDriver.PROPERTY_KEY_USER), this);
}
@Override
public Statement createStatement() throws SQLException {
if (isClosed())
throw new SQLException(TSDBConstants.WrapErrMsg("restful TDengine connection is closed."));
throw new SQLException(CONNECTION_IS_CLOSED);
return new RestfulStatement(this, database);
}
@Override
public PreparedStatement prepareStatement(String sql) throws SQLException {
return null;
if (isClosed())
throw new SQLException(CONNECTION_IS_CLOSED);
//TODO: prepareStatement
throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG);
}
@Override
public CallableStatement prepareCall(String sql) throws SQLException {
return null;
if (isClosed())
throw new SQLException(CONNECTION_IS_CLOSED);
throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG);
}
@Override
public String nativeSQL(String sql) throws SQLException {
return null;
if (isClosed())
throw new SQLException(CONNECTION_IS_CLOSED);
//nothing did
return sql;
}
@Override
public void setAutoCommit(boolean autoCommit) throws SQLException {
if (isClosed())
throw new SQLException(CONNECTION_IS_CLOSED);
if (!autoCommit)
throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG);
}
@Override
public boolean getAutoCommit() throws SQLException {
return false;
if (isClosed())
throw new SQLException(CONNECTION_IS_CLOSED);
return true;
}
@Override
public void commit() throws SQLException {
if (isClosed())
throw new SQLException(CONNECTION_IS_CLOSED);
if (getAutoCommit())
throw new SQLException(AUTO_COMMIT_IS_TRUE);
//nothing to do
}
@Override
public void rollback() throws SQLException {
if (isClosed())
throw new SQLException(CONNECTION_IS_CLOSED);
if (getAutoCommit())
throw new SQLException(AUTO_COMMIT_IS_TRUE);
//nothing to do
}
@Override
public void close() throws SQLException {
if (isClosed)
return;
//TODO: release all resources
isClosed = true;
}
@Override
public boolean isClosed() throws SQLException {
return false;
return isClosed;
}
@Override
public DatabaseMetaData getMetaData() throws SQLException {
//TODO: RestfulDatabaseMetaData is not implemented
return new RestfulDatabaseMetaData();
if (isClosed())
throw new SQLException(CONNECTION_IS_CLOSED);
return this.metadata;
}
@Override
public void setReadOnly(boolean readOnly) throws SQLException {
if (isClosed())
throw new SQLException(CONNECTION_IS_CLOSED);
// nothing to do
}
@Override
public boolean isReadOnly() throws SQLException {
return false;
if (isClosed())
throw new SQLException(CONNECTION_IS_CLOSED);
return true;
}
@Override
public void setCatalog(String catalog) throws SQLException {
if (isClosed())
throw new SQLException(CONNECTION_IS_CLOSED);
synchronized (RestfulConnection.class) {
this.database = catalog;
}
}
@Override
public String getCatalog() throws SQLException {
return null;
if (isClosed())
throw new SQLException(CONNECTION_IS_CLOSED);
return this.database;
}
@Override
public void setTransactionIsolation(int level) throws SQLException {
//transaction is not supported
throw new SQLFeatureNotSupportedException("transactions are not supported");
if (isClosed())
throw new SQLException(CONNECTION_IS_CLOSED);
switch (level) {
case Connection.TRANSACTION_NONE:
break;
case Connection.TRANSACTION_READ_UNCOMMITTED:
case Connection.TRANSACTION_READ_COMMITTED:
case Connection.TRANSACTION_REPEATABLE_READ:
case Connection.TRANSACTION_SERIALIZABLE:
throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG);
default:
throw new SQLException(TSDBConstants.INVALID_VARIABLES);
}
}
/**
*
*/
@Override
public int getTransactionIsolation() throws SQLException {
if (isClosed())
throw new SQLException(CONNECTION_IS_CLOSED);
//Connection.TRANSACTION_NONE specifies that transactions are not supported.
return Connection.TRANSACTION_NONE;
}
@Override
public SQLWarning getWarnings() throws SQLException {
//TODO: getWarnings not implemented
if (isClosed())
throw new SQLException(CONNECTION_IS_CLOSED);
return null;
}
@Override
public void clearWarnings() throws SQLException {
throw new SQLFeatureNotSupportedException("clearWarnings not supported.");
if (isClosed())
throw new SQLException(CONNECTION_IS_CLOSED);
//nothing to do
}
@Override
public Statement createStatement(int resultSetType, int resultSetConcurrency) throws SQLException {
return null;
if (isClosed())
throw new SQLException(CONNECTION_IS_CLOSED);
if (resultSetType != ResultSet.TYPE_FORWARD_ONLY) {
throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG);
}
if (resultSetConcurrency != ResultSet.CONCUR_READ_ONLY)
throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG);
return createStatement();
}
@Override
public PreparedStatement prepareStatement(String sql, int resultSetType, int resultSetConcurrency) throws SQLException {
return null;
if (isClosed())
throw new SQLException(CONNECTION_IS_CLOSED);
if (resultSetType != ResultSet.TYPE_FORWARD_ONLY || resultSetConcurrency != ResultSet.CONCUR_READ_ONLY)
throw new SQLFeatureNotSupportedException(TSDBConstants.INVALID_VARIABLES);
return this.prepareStatement(sql);
}
@Override
public CallableStatement prepareCall(String sql, int resultSetType, int resultSetConcurrency) throws SQLException {
return null;
if (isClosed())
throw new SQLException(CONNECTION_IS_CLOSED);
if (resultSetType != ResultSet.TYPE_FORWARD_ONLY || resultSetConcurrency != ResultSet.CONCUR_READ_ONLY)
throw new SQLFeatureNotSupportedException(TSDBConstants.INVALID_VARIABLES);
throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG);
}
@Override
public Map<String, Class<?>> getTypeMap() throws SQLException {
return null;
if (isClosed())
throw new SQLException(CONNECTION_IS_CLOSED);
synchronized (RestfulConnection.class) {
if (this.typeMap == null) {
this.typeMap = new HashMap<>();
}
return this.typeMap;
}
}
@Override
public void setTypeMap(Map<String, Class<?>> map) throws SQLException {
if (isClosed())
throw new SQLException(CONNECTION_IS_CLOSED);
synchronized (RestfulConnection.class) {
this.typeMap = map;
}
}
@Override
public void setHoldability(int holdability) throws SQLException {
if (isClosed())
throw new SQLException(CONNECTION_IS_CLOSED);
if (holdability != ResultSet.HOLD_CURSORS_OVER_COMMIT)
throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG);
}
@Override
public int getHoldability() throws SQLException {
return 0;
if (isClosed())
throw new SQLException(CONNECTION_IS_CLOSED);
return ResultSet.HOLD_CURSORS_OVER_COMMIT;
}
@Override
public Savepoint setSavepoint() throws SQLException {
return null;
if (isClosed())
throw new SQLException(CONNECTION_IS_CLOSED);
if (getAutoCommit())
throw new SQLException(TSDBConstants.INVALID_VARIABLES);
//nothing to do
throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG);
}
@Override
public Savepoint setSavepoint(String name) throws SQLException {
return null;
if (isClosed())
throw new SQLException(CONNECTION_IS_CLOSED);
if (getAutoCommit())
throw new SQLException(TSDBConstants.INVALID_VARIABLES);
//nothing to do
throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG);
}
@Override
public void rollback(Savepoint savepoint) throws SQLException {
if (isClosed())
throw new SQLException(CONNECTION_IS_CLOSED);
if (getAutoCommit())
throw new SQLException(TSDBConstants.INVALID_VARIABLES);
//nothing to do
throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG);
}
@Override
public void releaseSavepoint(Savepoint savepoint) throws SQLException {
if (isClosed())
throw new SQLException(CONNECTION_IS_CLOSED);
throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG);
}
@Override
public Statement createStatement(int resultSetType, int resultSetConcurrency, int resultSetHoldability) throws SQLException {
return null;
if (resultSetHoldability != ResultSet.HOLD_CURSORS_OVER_COMMIT)
throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG);
return createStatement(resultSetType, resultSetConcurrency);
}
@Override
public PreparedStatement prepareStatement(String sql, int resultSetType, int resultSetConcurrency, int resultSetHoldability) throws SQLException {
return null;
if (resultSetHoldability != ResultSet.HOLD_CURSORS_OVER_COMMIT)
throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG);
return prepareStatement(sql, resultSetType, resultSetConcurrency);
}
@Override
public CallableStatement prepareCall(String sql, int resultSetType, int resultSetConcurrency, int resultSetHoldability) throws SQLException {
return null;
throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG);
}
@Override
public PreparedStatement prepareStatement(String sql, int autoGeneratedKeys) throws SQLException {
return null;
throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG);
}
@Override
public PreparedStatement prepareStatement(String sql, int[] columnIndexes) throws SQLException {
return null;
throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG);
}
@Override
public PreparedStatement prepareStatement(String sql, String[] columnNames) throws SQLException {
return null;
throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG);
}
@Override
public Clob createClob() throws SQLException {
//TODO: not supported
throw new SQLFeatureNotSupportedException();
throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG);
}
@Override
public Blob createBlob() throws SQLException {
//TODO: not supported
throw new SQLFeatureNotSupportedException();
throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG);
}
@Override
public NClob createNClob() throws SQLException {
//TODO: not supported
throw new SQLFeatureNotSupportedException();
throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG);
}
@Override
public SQLXML createSQLXML() throws SQLException {
//TODO: not supported
throw new SQLFeatureNotSupportedException();
throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG);
}
@Override
public boolean isValid(int timeout) throws SQLException {
return false;
if (timeout < 0)
throw new SQLException(TSDBConstants.INVALID_VARIABLES);
// TODO:
/* The driver shall submit a query on the connection or use some other mechanism that positively verifies
the connection is still valid when this method is called.*/
return !isClosed();
}
@Override
public void setClientInfo(String name, String value) throws SQLClientInfoException {
if (isClosed)
throw new SQLClientInfoException();
clientInfoProps.setProperty(name, value);
}
@Override
public void setClientInfo(Properties properties) throws SQLClientInfoException {
if (isClosed)
throw new SQLClientInfoException();
for (Enumeration<Object> enumer = properties.keys(); enumer.hasMoreElements(); ) {
String name = (String) enumer.nextElement();
clientInfoProps.put(name, properties.getProperty(name));
}
}
@Override
public String getClientInfo(String name) throws SQLException {
return null;
if (isClosed)
throw new SQLClientInfoException();
return clientInfoProps.getProperty(name);
}
@Override
public Properties getClientInfo() throws SQLException {
return null;
if (isClosed)
throw new SQLClientInfoException();
return clientInfoProps;
}
@Override
public Array createArrayOf(String typeName, Object[] elements) throws SQLException {
//TODO: not supported
throw new SQLFeatureNotSupportedException();
throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG);
}
@Override
public Struct createStruct(String typeName, Object[] attributes) throws SQLException {
//TODO: not supported
throw new SQLFeatureNotSupportedException();
throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG);
}
@Override
public void setSchema(String schema) throws SQLException {
if (isClosed())
throw new SQLException(CONNECTION_IS_CLOSED);
synchronized (RestfulConnection.class) {
this.database = schema;
}
}
@Override
public String getSchema() throws SQLException {
return null;
if (isClosed())
throw new SQLException(CONNECTION_IS_CLOSED);
return this.database;
}
@Override
public void abort(Executor executor) throws SQLException {
if (executor == null) {
throw new SQLException("Executor can not be null");
}
executor.execute(() -> {
try {
close();
} catch (SQLException e) {
e.printStackTrace();
}
});
}
@Override
public void setNetworkTimeout(Executor executor, int milliseconds) throws SQLException {
if (isClosed())
throw new SQLException(CONNECTION_IS_CLOSED);
throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG);
}
@Override
public int getNetworkTimeout() throws SQLException {
if (isClosed())
throw new SQLException(CONNECTION_IS_CLOSED);
return 0;
}
......
......@@ -33,7 +33,7 @@ public class RestfulDriver extends AbstractTaosDriver {
return null;
Properties props = parseURL(url, info);
String host = props.getProperty(TSDBDriver.PROPERTY_KEY_HOST, "localhost");
String host = props.getProperty(TSDBDriver.PROPERTY_KEY_HOST);
String port = props.getProperty(TSDBDriver.PROPERTY_KEY_PORT, "6041");
String database = props.containsKey(TSDBDriver.PROPERTY_KEY_DBNAME) ? props.getProperty(TSDBDriver.PROPERTY_KEY_DBNAME) : null;
......
......@@ -2,13 +2,15 @@ package com.taosdata.jdbc.rs;
import java.sql.ResultSetMetaData;
import java.sql.SQLException;
import java.util.List;
import java.util.ArrayList;
public class RestfulResultSetMetaData implements ResultSetMetaData {
private List<String> fields;
private final String database;
private ArrayList<RestfulResultSet.Field> fields;
public RestfulResultSetMetaData(List<String> fields) {
public RestfulResultSetMetaData(String database, ArrayList<RestfulResultSet.Field> fields) {
this.database = database;
this.fields = fields;
}
......@@ -24,6 +26,7 @@ public class RestfulResultSetMetaData implements ResultSetMetaData {
@Override
public boolean isCaseSensitive(int column) throws SQLException {
//TODO
return false;
}
......@@ -39,7 +42,7 @@ public class RestfulResultSetMetaData implements ResultSetMetaData {
@Override
public int isNullable(int column) throws SQLException {
return 0;
return ResultSetMetaData.columnNullable;
}
@Override
......@@ -54,7 +57,7 @@ public class RestfulResultSetMetaData implements ResultSetMetaData {
@Override
public String getColumnLabel(int column) throws SQLException {
return fields.get(column - 1);
return fields.get(column - 1).name;
}
@Override
......@@ -64,7 +67,7 @@ public class RestfulResultSetMetaData implements ResultSetMetaData {
@Override
public String getSchemaName(int column) throws SQLException {
return null;
return this.database;
}
@Override
......@@ -84,7 +87,7 @@ public class RestfulResultSetMetaData implements ResultSetMetaData {
@Override
public String getCatalogName(int column) throws SQLException {
return null;
return this.database;
}
@Override
......
......@@ -7,20 +7,60 @@ import com.taosdata.jdbc.rs.util.HttpClientPoolUtil;
import com.taosdata.jdbc.utils.SqlSyntaxValidator;
import java.sql.*;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
import java.util.stream.Collectors;
public class RestfulStatement implements Statement {
private static final String STATEMENT_CLOSED = "Statement already closed.";
private boolean closed;
private String database;
private final RestfulConnection conn;
public RestfulStatement(RestfulConnection c, String database) {
this.conn = c;
private volatile RestfulResultSet resultSet;
private volatile int affectedRows;
private volatile boolean closeOnCompletion;
public RestfulStatement(RestfulConnection conn, String database) {
this.conn = conn;
this.database = database;
}
private String[] parseTableIdentifier(String sql) {
sql = sql.trim().toLowerCase();
String[] ret = null;
if (sql.contains("where"))
sql = sql.substring(0, sql.indexOf("where"));
if (sql.contains("interval"))
sql = sql.substring(0, sql.indexOf("interval"));
if (sql.contains("fill"))
sql = sql.substring(0, sql.indexOf("fill"));
if (sql.contains("sliding"))
sql = sql.substring(0, sql.indexOf("sliding"));
if (sql.contains("group by"))
sql = sql.substring(0, sql.indexOf("group by"));
if (sql.contains("order by"))
sql = sql.substring(0, sql.indexOf("order by"));
if (sql.contains("slimit"))
sql = sql.substring(0, sql.indexOf("slimit"));
if (sql.contains("limit"))
sql = sql.substring(0, sql.indexOf("limit"));
// parse
if (sql.contains("from")) {
sql = sql.substring(sql.indexOf("from") + 4).trim();
return Arrays.asList(sql.split(",")).stream()
.map(tableIdentifier -> {
tableIdentifier = tableIdentifier.trim();
if (tableIdentifier.contains(" "))
tableIdentifier = tableIdentifier.substring(0, tableIdentifier.indexOf(" "));
return tableIdentifier;
}).collect(Collectors.joining(",")).split(",");
}
return ret;
}
@Override
public ResultSet executeQuery(String sql) throws SQLException {
if (isClosed())
......@@ -29,43 +69,33 @@ public class RestfulStatement implements Statement {
throw new SQLException("not a select sql for executeQuery: " + sql);
final String url = "http://" + conn.getHost() + ":" + conn.getPort() + "/rest/sql";
// row data
String result = HttpClientPoolUtil.execute(url, sql);
String fields = "";
List<String> words = Arrays.asList(sql.split(" "));
if (words.get(0).equalsIgnoreCase("select")) {
int index = 0;
if (words.contains("from")) {
index = words.indexOf("from");
}
if (words.contains("FROM")) {
index = words.indexOf("FROM");
}
fields = HttpClientPoolUtil.execute(url, "DESCRIBE " + words.get(index + 1));
}
JSONObject jsonObject = JSON.parseObject(result);
if (jsonObject.getString("status").equals("error")) {
throw new SQLException(TSDBConstants.WrapErrMsg("SQL execution error: " +
jsonObject.getString("desc") + "\n" +
"error code: " + jsonObject.getString("code")));
}
String dataStr = jsonObject.getString("data");
if ("use".equalsIgnoreCase(fields.split(" ")[0])) {
return new RestfulResultSet(dataStr, "");
JSONObject resultJson = JSON.parseObject(result);
if (resultJson.getString("status").equals("error")) {
throw new SQLException(TSDBConstants.WrapErrMsg("SQL execution error: " + resultJson.getString("desc") + "\n" + "error code: " + resultJson.getString("code")));
}
JSONObject jsonField = JSON.parseObject(fields);
if (jsonField == null) {
return new RestfulResultSet(dataStr, "");
}
if (jsonField.getString("status").equals("error")) {
throw new SQLException(TSDBConstants.WrapErrMsg("SQL execution error: " +
jsonField.getString("desc") + "\n" +
"error code: " + jsonField.getString("code")));
// parse table name from sql
String[] tableIdentifiers = parseTableIdentifier(sql);
if (tableIdentifiers != null) {
List<JSONObject> fieldJsonList = new ArrayList<>();
for (String tableIdentifier : tableIdentifiers) {
// field meta
String fields = HttpClientPoolUtil.execute(url, "DESCRIBE " + tableIdentifier);
JSONObject fieldJson = JSON.parseObject(fields);
if (fieldJson.getString("status").equals("error")) {
throw new SQLException(TSDBConstants.WrapErrMsg("SQL execution error: " + fieldJson.getString("desc") + "\n" + "error code: " + fieldJson.getString("code")));
}
fieldJsonList.add(fieldJson);
}
this.resultSet = new RestfulResultSet(database, this, resultJson, fieldJsonList);
} else {
this.resultSet = new RestfulResultSet(database, this, resultJson);
}
String fieldData = jsonField.getString("data");
return new RestfulResultSet(dataStr, fieldData);
this.affectedRows = 0;
return resultSet;
}
@Override
......@@ -78,77 +108,103 @@ public class RestfulStatement implements Statement {
if (this.database == null)
throw new SQLException("Database not specified or available");
final String url = "http://" + conn.getHost() + ":" + conn.getPort() + "/rest/sql";
HttpClientPoolUtil.execute(url, "use " + conn.getDatabase());
final String url = "http://" + conn.getHost().trim() + ":" + conn.getPort() + "/rest/sql";
// HttpClientPoolUtil.execute(url, "use " + conn.getDatabase());
String result = HttpClientPoolUtil.execute(url, sql);
JSONObject jsonObject = JSON.parseObject(result);
if (jsonObject.getString("status").equals("error")) {
throw new SQLException(TSDBConstants.WrapErrMsg("SQL execution error: " +
jsonObject.getString("desc") + "\n" +
"error code: " + jsonObject.getString("code")));
throw new SQLException(TSDBConstants.WrapErrMsg("SQL execution error: " + jsonObject.getString("desc") + "\n" + "error code: " + jsonObject.getString("code")));
}
return Integer.parseInt(jsonObject.getString("rows"));
this.resultSet = null;
this.affectedRows = Integer.parseInt(jsonObject.getString("rows"));
return this.affectedRows;
}
@Override
public void close() throws SQLException {
this.closed = true;
synchronized (RestfulStatement.class) {
if (!isClosed())
this.closed = true;
}
}
@Override
public int getMaxFieldSize() throws SQLException {
return 0;
if (isClosed())
throw new SQLException(STATEMENT_CLOSED);
return TSDBConstants.maxFieldSize;
}
@Override
public void setMaxFieldSize(int max) throws SQLException {
if (isClosed())
throw new SQLException(STATEMENT_CLOSED);
if (max < 0)
throw new SQLException(TSDBConstants.INVALID_VARIABLES);
// nothing to do
}
@Override
public int getMaxRows() throws SQLException {
if (isClosed())
throw new SQLException(STATEMENT_CLOSED);
return 0;
}
@Override
public void setMaxRows(int max) throws SQLException {
if (isClosed())
throw new SQLException(STATEMENT_CLOSED);
if (max < 0)
throw new SQLException(TSDBConstants.INVALID_VARIABLES);
// nothing to do
}
@Override
public void setEscapeProcessing(boolean enable) throws SQLException {
if (isClosed())
throw new SQLException(RestfulStatement.STATEMENT_CLOSED);
}
@Override
public int getQueryTimeout() throws SQLException {
if (isClosed())
throw new SQLException(STATEMENT_CLOSED);
return 0;
}
@Override
public void setQueryTimeout(int seconds) throws SQLException {
if (isClosed())
throw new SQLException(STATEMENT_CLOSED);
if (seconds < 0)
throw new SQLException(TSDBConstants.INVALID_VARIABLES);
}
@Override
public void cancel() throws SQLException {
throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG);
}
@Override
public SQLWarning getWarnings() throws SQLException {
//TODO: getWarnings not Implemented
if (isClosed())
throw new SQLException(STATEMENT_CLOSED);
return null;
}
@Override
public void clearWarnings() throws SQLException {
// nothing to do
if (isClosed())
throw new SQLException(STATEMENT_CLOSED);
}
@Override
public void setCursorName(String name) throws SQLException {
if (isClosed())
throw new SQLException(RestfulStatement.STATEMENT_CLOSED);
throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG);
}
@Override
......@@ -159,133 +215,181 @@ public class RestfulStatement implements Statement {
//如果执行了use操作应该将当前Statement的catalog设置为新的database
if (SqlSyntaxValidator.isUseSql(sql)) {
this.database = sql.trim().replace("use", "").trim();
this.conn.setCatalog(this.database);
}
if (this.database == null)
throw new SQLException("Database not specified or available");
final String url = "http://" + conn.getHost() + ":" + conn.getPort() + "/rest/sql";
// use database
HttpClientPoolUtil.execute(url, "use " + conn.getDatabase());
// execute sql
String result = HttpClientPoolUtil.execute(url, sql);
// parse result
JSONObject jsonObject = JSON.parseObject(result);
if (jsonObject.getString("status").equals("error")) {
throw new SQLException(TSDBConstants.WrapErrMsg("SQL execution error: " +
jsonObject.getString("desc") + "\n" +
"error code: " + jsonObject.getString("code")));
if (SqlSyntaxValidator.isSelectSql(sql)) {
executeQuery(sql);
} else if (SqlSyntaxValidator.isShowSql(sql) || SqlSyntaxValidator.isDescribeSql(sql)) {
final String url = "http://" + conn.getHost().trim() + ":" + conn.getPort() + "/rest/sql";
if (!SqlSyntaxValidator.isShowDatabaseSql(sql)) {
HttpClientPoolUtil.execute(url, "use " + conn.getDatabase());
}
String result = HttpClientPoolUtil.execute(url, sql);
JSONObject resultJson = JSON.parseObject(result);
if (resultJson.getString("status").equals("error")) {
throw new SQLException(TSDBConstants.WrapErrMsg("SQL execution error: " + resultJson.getString("desc") + "\n" + "error code: " + resultJson.getString("code")));
}
this.resultSet = new RestfulResultSet(database, this, resultJson);
} else {
executeUpdate(sql);
}
return true;
}
@Override
public ResultSet getResultSet() throws SQLException {
return null;
if (isClosed())
throw new SQLException(STATEMENT_CLOSED);
return resultSet;
}
@Override
public int getUpdateCount() throws SQLException {
return 0;
if (isClosed()) {
throw new SQLException("Invalid method call on a closed statement.");
}
return this.affectedRows;
}
@Override
public boolean getMoreResults() throws SQLException {
return false;
return getMoreResults(CLOSE_CURRENT_RESULT);
}
@Override
public void setFetchDirection(int direction) throws SQLException {
if (direction != ResultSet.FETCH_FORWARD && direction != ResultSet.FETCH_REVERSE && direction != ResultSet.FETCH_UNKNOWN)
throw new SQLException(TSDBConstants.INVALID_VARIABLES);
this.resultSet.setFetchDirection(direction);
}
@Override
public int getFetchDirection() throws SQLException {
return 0;
return this.resultSet.getFetchDirection();
}
@Override
public void setFetchSize(int rows) throws SQLException {
if (isClosed())
throw new SQLException(STATEMENT_CLOSED);
if (rows < 0)
throw new SQLException(TSDBConstants.INVALID_VARIABLES);
//nothing to do
}
@Override
public int getFetchSize() throws SQLException {
if (isClosed())
throw new SQLException(STATEMENT_CLOSED);
return 0;
}
@Override
public int getResultSetConcurrency() throws SQLException {
return 0;
if (isClosed())
throw new SQLException(STATEMENT_CLOSED);
return this.resultSet.getConcurrency();
}
@Override
public int getResultSetType() throws SQLException {
return 0;
if (isClosed())
throw new SQLException(STATEMENT_CLOSED);
return this.resultSet.getType();
}
@Override
public void addBatch(String sql) throws SQLException {
if (isClosed())
throw new SQLException(STATEMENT_CLOSED);
//TODO:
}
@Override
public void clearBatch() throws SQLException {
//TODO:
}
@Override
public int[] executeBatch() throws SQLException {
//TODO:
return new int[0];
}
@Override
public Connection getConnection() throws SQLException {
return null;
if (isClosed())
throw new SQLException(STATEMENT_CLOSED);
return this.conn;
}
@Override
public boolean getMoreResults(int current) throws SQLException {
if (isClosed())
throw new SQLException(STATEMENT_CLOSED);
if (resultSet == null)
return false;
// switch (current) {
// case CLOSE_CURRENT_RESULT:
// resultSet.close();
// break;
// case KEEP_CURRENT_RESULT:
// break;
// case CLOSE_ALL_RESULTS:
// resultSet.close();
// break;
// default:
// throw new SQLException(TSDBConstants.INVALID_VARIABLES);
// }
// return next;
return false;
}
@Override
public ResultSet getGeneratedKeys() throws SQLException {
return null;
throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG);
}
@Override
public int executeUpdate(String sql, int autoGeneratedKeys) throws SQLException {
return 0;
throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG);
}
@Override
public int executeUpdate(String sql, int[] columnIndexes) throws SQLException {
return 0;
throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG);
}
@Override
public int executeUpdate(String sql, String[] columnNames) throws SQLException {
return 0;
throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG);
}
@Override
public boolean execute(String sql, int autoGeneratedKeys) throws SQLException {
return false;
throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG);
}
@Override
public boolean execute(String sql, int[] columnIndexes) throws SQLException {
return false;
throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG);
}
@Override
public boolean execute(String sql, String[] columnNames) throws SQLException {
return false;
throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG);
}
@Override
public int getResultSetHoldability() throws SQLException {
return 0;
if (isClosed())
throw new SQLException(STATEMENT_CLOSED);
return this.resultSet.getHoldability();
}
@Override
......@@ -295,22 +399,30 @@ public class RestfulStatement implements Statement {
@Override
public void setPoolable(boolean poolable) throws SQLException {
if (isClosed())
throw new SQLException(STATEMENT_CLOSED);
//nothing to do
}
@Override
public boolean isPoolable() throws SQLException {
if (isClosed())
throw new SQLException(STATEMENT_CLOSED);
return false;
}
@Override
public void closeOnCompletion() throws SQLException {
if (isClosed())
throw new SQLException(STATEMENT_CLOSED);
this.closeOnCompletion = true;
}
@Override
public boolean isCloseOnCompletion() throws SQLException {
return false;
if (isClosed())
throw new SQLException(STATEMENT_CLOSED);
return this.closeOnCompletion;
}
@Override
......
......@@ -17,6 +17,8 @@ import org.apache.http.protocol.HTTP;
import org.apache.http.protocol.HttpContext;
import org.apache.http.util.EntityUtils;
import java.nio.charset.Charset;
public class HttpClientPoolUtil {
public static PoolingHttpClientConnectionManager cm = null;
......@@ -94,7 +96,9 @@ public class HttpClientPoolUtil {
initPools();
}
method = (HttpEntityEnclosingRequestBase) getRequest(uri, HttpPost.METHOD_NAME, DEFAULT_CONTENT_TYPE, 0);
method.setEntity(new StringEntity(data));
method.setHeader("Authorization", "Basic cm9vdDp0YW9zZGF0YQ==");
method.setHeader("Content-Type", "text/plain");
method.setEntity(new StringEntity(data, Charset.forName("UTF-8")));
HttpContext context = HttpClientContext.create();
CloseableHttpResponse httpResponse = httpClient.execute(method, context);
httpEntity = httpResponse.getEntity();
......@@ -105,26 +109,13 @@ public class HttpClientPoolUtil {
if (method != null) {
method.abort();
}
// e.printStackTrace();
// logger.error("execute post request exception, url:" + uri + ", exception:" + e.toString()
// + ", cost time(ms):" + (System.currentTimeMillis() - startTime));
new Exception("execute post request exception, url:"
+ uri + ", exception:" + e.toString() +
", cost time(ms):" + (System.currentTimeMillis() - startTime))
.printStackTrace();
new Exception("execute post request exception, url:" + uri + ", exception:" + e.toString() + ", cost time(ms):" + (System.currentTimeMillis() - startTime)).printStackTrace();
} finally {
if (httpEntity != null) {
try {
EntityUtils.consumeQuietly(httpEntity);
} catch (Exception e) {
// e.printStackTrace();
// logger.error("close response exception, url:" + uri + ", exception:" + e.toString()
// + ", cost time(ms):" + (System.currentTimeMillis() - startTime));
new Exception(
"close response exception, url:" + uri +
", exception:" + e.toString()
+ ", cost time(ms):" + (System.currentTimeMillis() - startTime))
.printStackTrace();
new Exception("close response exception, url:" + uri + ", exception:" + e.toString() + ", cost time(ms):" + (System.currentTimeMillis() - startTime)).printStackTrace();
}
}
}
......
......@@ -15,14 +15,12 @@
package com.taosdata.jdbc.utils;
import com.taosdata.jdbc.TSDBConnection;
import com.taosdata.jdbc.TSDBJNIConnector;
import java.sql.Connection;
import java.sql.SQLException;
public class SqlSyntaxValidator {
private static final String[] updateSQL = {"insert", "update", "delete", "create", "alter", "drop", "show", "describe", "use"};
private static final String[] updateSQL = {"insert", "update", "delete", "create", "alter", "drop", "show", "describe", "use", "import"};
private static final String[] querySQL = {"select"};
private TSDBConnection tsdbConnection;
......@@ -31,22 +29,6 @@ public class SqlSyntaxValidator {
this.tsdbConnection = (TSDBConnection) connection;
}
public boolean validateSqlSyntax(String sql) throws SQLException {
boolean res = false;
if (tsdbConnection == null || tsdbConnection.isClosed()) {
throw new SQLException("invalid connection");
} else {
TSDBJNIConnector jniConnector = tsdbConnection.getConnection();
if (jniConnector == null) {
throw new SQLException("jniConnector is null");
} else {
res = jniConnector.validateCreateTableSql(sql);
}
}
return res;
}
public static boolean isValidForExecuteUpdate(String sql) {
for (String prefix : updateSQL) {
if (sql.trim().toLowerCase().startsWith(prefix))
......@@ -56,18 +38,28 @@ public class SqlSyntaxValidator {
}
public static boolean isUseSql(String sql) {
return sql.trim().toLowerCase().startsWith(updateSQL[8]) || sql.trim().toLowerCase().matches("create\\s*database.*") || sql.toLowerCase().toLowerCase().matches("drop\\s*database.*");
return sql.trim().toLowerCase().startsWith("use") || sql.trim().toLowerCase().matches("create\\s*database.*") || sql.toLowerCase().toLowerCase().matches("drop\\s*database.*");
}
public static boolean isShowSql(String sql) {
return sql.trim().toLowerCase().startsWith("show");
}
public static boolean isUpdateSql(String sql) {
return sql.trim().toLowerCase().startsWith(updateSQL[1]);
public static boolean isDescribeSql(String sql) {
return sql.trim().toLowerCase().startsWith("describe");
}
public static boolean isInsertSql(String sql) {
return sql.trim().toLowerCase().startsWith(updateSQL[0]);
return sql.trim().toLowerCase().startsWith("insert") || sql.trim().toLowerCase().startsWith("import");
}
public static boolean isSelectSql(String sql) {
return sql.trim().toLowerCase().startsWith(querySQL[0]);
return sql.trim().toLowerCase().startsWith("select");
}
public static boolean isShowDatabaseSql(String sql) {
return sql.trim().toLowerCase().matches("show\\s*databases");
}
}
......@@ -7,7 +7,7 @@ import org.junit.Test;
import java.sql.*;
import java.util.Properties;
public class DatabaseMetaDataTest extends BaseTest {
public class DatabaseMetaDataTest {
static Connection connection = null;
static PreparedStatement statement = null;
static String dbName = "test";
......@@ -23,20 +23,21 @@ public class DatabaseMetaDataTest extends BaseTest {
}
Properties properties = new Properties();
properties.setProperty(TSDBDriver.PROPERTY_KEY_HOST, host);
properties.setProperty(TSDBDriver.PROPERTY_KEY_USER, "root");
properties.setProperty(TSDBDriver.PROPERTY_KEY_PASSWORD, "taosdata");
properties.setProperty(TSDBDriver.PROPERTY_KEY_CHARSET, "UTF-8");
properties.setProperty(TSDBDriver.PROPERTY_KEY_LOCALE, "en_US.UTF-8");
properties.setProperty(TSDBDriver.PROPERTY_KEY_TIME_ZONE, "UTC-8");
connection = DriverManager.getConnection("jdbc:TAOS://" + host + ":0/", properties);
String sql = "drop database if exists " + dbName;
statement = (TSDBPreparedStatement) connection.prepareStatement(sql);
statement = connection.prepareStatement(sql);
statement.executeUpdate("create database if not exists " + dbName);
statement.executeUpdate("create table if not exists " + dbName + "." + tName + " (ts timestamp, k int, v int)");
}
@Test
public void testMetaDataTest() throws SQLException {
DatabaseMetaData databaseMetaData = connection.getMetaData();
ResultSet resultSet = databaseMetaData.getTables(dbName, "t*", "t*", new String[]{"t"});
while (resultSet.next()) {
......@@ -180,7 +181,7 @@ public class DatabaseMetaDataTest extends BaseTest {
databaseMetaData.getCatalogs();
// databaseMetaData.getTableTypes();
databaseMetaData.getColumns("", "", "", "");
databaseMetaData.getColumns(dbName, "", tName, "");
databaseMetaData.getColumnPrivileges("", "", "", "");
databaseMetaData.getTablePrivileges("", "", "");
databaseMetaData.getBestRowIdentifier("", "", "", 0, false);
......
......@@ -19,7 +19,7 @@ public class AppMemoryLeakTest {
}
}
@Test
@Test(expected = Exception.class)
public void testCreateTooManyStatement() throws ClassNotFoundException, SQLException {
Class.forName("com.taosdata.jdbc.TSDBDriver");
int stmtCnt = 0;
......@@ -30,15 +30,4 @@ public class AppMemoryLeakTest {
}
}
public static void main(String[] args) throws ClassNotFoundException, SQLException {
Class.forName("com.taosdata.jdbc.TSDBDriver");
int stmtCnt = 0;
Connection conn = DriverManager.getConnection("jdbc:TAOS://localhost:6030/?user=root&password=taosdata");
while (true) {
Statement stmt = conn.createStatement();
System.out.println(++stmtCnt + " : " + stmt);
}
}
}
package com.taosdata.jdbc.cases;
import org.junit.Assert;
import org.junit.Test;
import java.sql.DriverManager;
import java.sql.SQLException;
public class ConnectWrongDatabaseTest {
@Test
public void connect() {
try {
Class.forName("com.taosdata.jdbc.TSDBDriver");
DriverManager.getConnection("jdbc:TAOS://localhost:6030/wrong_db?user=root&password=taosdata");
} catch (ClassNotFoundException e) {
e.printStackTrace();
} catch (SQLException e) {
System.out.println(e.getMessage());
Assert.assertEquals("TDengine Error: Invalid database name", e.getMessage());
}
}
}
package com.taosdata.jdbc.rs;
import org.junit.*;
import org.junit.runners.MethodSorters;
......
package com.taosdata.jdbc.rs;
import org.junit.AfterClass;
import org.junit.BeforeClass;
import org.junit.FixMethodOrder;
import org.junit.Test;
import org.junit.runners.MethodSorters;
import java.sql.*;
@FixMethodOrder(MethodSorters.NAME_ASCENDING)
public class SQLTest {
private static final String host = "master";
private static Connection connection;
@Test
public void testCase001() {
String sql = "create database if not exists restful_test";
execute(sql);
}
@Test
public void testCase002() {
String sql = "use restful_test";
execute(sql);
}
@Test
public void testCase003() {
String sql = "show databases";
executeWithResult(sql);
}
@Test
public void testCase004() {
String sql = "show tables";
executeWithResult(sql);
}
@Test
public void testCase005() {
String sql = "show stables";
executeWithResult(sql);
}
@Test
public void testCase006() {
String sql = "show dnodes";
executeWithResult(sql);
}
@Test
public void testCase007() {
String sql = "show vgroups";
executeWithResult(sql);
}
@Test
public void testCase008() {
String sql = "drop table if exists restful_test.weather";
execute(sql);
}
@Test
public void testCase009() {
String sql = "create table if not exists restful_test.weather(ts timestamp, temperature float) tags(location nchar(64))";
execute(sql);
}
@Test
public void testCase010() {
String sql = "create table t1 using restful_test.weather tags('北京')";
execute(sql);
}
@Test
public void testCase011() {
String sql = "insert into restful_test.t1 values(now, 22.22)";
executeUpdate(sql);
}
@Test
public void testCase012() {
String sql = "insert into restful_test.t1 values('2020-01-01 00:00:00.000', 22.22)";
executeUpdate(sql);
}
@Test
public void testCase013() {
String sql = "insert into restful_test.t1 values('2020-01-01 00:01:00.000', 22.22),('2020-01-01 00:02:00.000', 22.22)";
executeUpdate(sql);
}
@Test
public void testCase014() {
String sql = "insert into restful_test.t2 using weather tags('上海') values('2020-01-01 00:03:00.000', 22.22)";
executeUpdate(sql);
}
@Test
public void testCase015() {
String sql = "insert into restful_test.t2 using weather tags('上海') values('2020-01-01 00:01:00.000', 22.22),('2020-01-01 00:02:00.000', 22.22)";
executeUpdate(sql);
}
@Test
public void testCase016() {
String sql = "insert into t1 values('2020-01-01 01:0:00.000', 22.22),('2020-01-01 02:00:00.000', 22.22) t2 values('2020-01-01 01:0:00.000', 33.33),('2020-01-01 02:00:00.000', 33.33)";
executeUpdate(sql);
}
@Test
public void testCase017() {
String sql = "Insert into t3 using weather tags('广东') values('2020-01-01 01:0:00.000', 22.22),('2020-01-01 02:00:00.000', 22.22) t4 using weather tags('天津') values('2020-01-01 01:0:00.000', 33.33),('2020-01-01 02:00:00.000', 33.33)";
executeUpdate(sql);
}
@Test
public void testCase018() {
String sql = "select * from restful_test.t1";
executeQuery(sql);
}
@Test
public void testCase019() {
String sql = "select * from restful_test.weather";
executeQuery(sql);
}
@Test
public void testCase020() {
String sql = "select ts, temperature from restful_test.t1";
executeQuery(sql);
}
@Test
public void testCase021() {
String sql = "select ts, temperature from restful_test.weather";
executeQuery(sql);
}
@Test
public void testCase022() {
String sql = "select temperature, ts from restful_test.t1";
executeQuery(sql);
}
@Test
public void testCase023() {
String sql = "select temperature, ts from restful_test.weather";
executeQuery(sql);
}
@Test
public void testCase024() {
String sql = "import into restful_test.t5 using weather tags('石家庄') values('2020-01-01 00:01:00.000', 22.22)";
executeUpdate(sql);
}
@Test
public void testCase025() {
String sql = "import into restful_test.t6 using weather tags('沈阳') values('2020-01-01 00:01:00.000', 22.22),('2020-01-01 00:02:00.000', 22.22)";
executeUpdate(sql);
}
@Test
public void testCase026() {
String sql = "import into restful_test.t7 using weather tags('长沙') values('2020-01-01 00:01:00.000', 22.22) restful_test.t8 using weather tags('吉林') values('2020-01-01 00:01:00.000', 22.22)";
executeUpdate(sql);
}
@Test
public void testCase027() {
String sql = "import into restful_test.t9 using weather tags('武汉') values('2020-01-01 00:01:00.000', 22.22) ,('2020-01-02 00:01:00.000', 22.22) restful_test.t10 using weather tags('哈尔滨') values('2020-01-01 00:01:00.000', 22.22),('2020-01-02 00:01:00.000', 22.22)";
executeUpdate(sql);
}
@Test
public void testCase028() {
String sql = "select location, temperature, ts from restful_test.weather where temperature > 1";
executeQuery(sql);
}
@Test
public void testCase029() {
String sql = "select location, temperature, ts from restful_test.weather where temperature < 1";
executeQuery(sql);
}
@Test
public void testCase030() {
String sql = "select location, temperature, ts from restful_test.weather where ts > now";
executeQuery(sql);
}
@Test
public void testCase031() {
String sql = "select location, temperature, ts from restful_test.weather where ts < now";
executeQuery(sql);
}
@Test
public void testCase032() {
String sql = "select count(*) from restful_test.weather";
executeQuery(sql);
}
@Test
public void testCase033() {
String sql = "select first(*) from restful_test.weather";
executeQuery(sql);
}
@Test
public void testCase034() {
String sql = "select last(*) from restful_test.weather";
executeQuery(sql);
}
@Test
public void testCase035() {
String sql = "select last_row(*) from restful_test.weather";
executeQuery(sql);
}
@Test
public void testCase036() {
String sql = "select ts, ts as primary_key from restful_test.weather";
executeQuery(sql);
}
@Test
public void testCase037() {
String sql = "select database()";
execute("use restful_test");
executeQuery(sql);
}
@Test
public void testCase038() {
String sql = "select client_version()";
executeQuery(sql);
}
@Test
public void testCase039() {
String sql = "select server_status()";
executeQuery(sql);
}
@Test
public void testCase040() {
String sql = "select server_status() as status";
executeQuery(sql);
}
@Test
public void testCase041() {
String sql = "select tbname, location from restful_test.weather";
executeQuery(sql);
}
@Test
public void testCase042() {
String sql = "select count(tbname) from restful_test.weather";
executeQuery(sql);
}
@Test
public void testCase043() {
String sql = "select * from restful_test.weather where ts < now - 1h";
executeQuery(sql);
}
@Test
public void testCase044() {
String sql = "select * from restful_test.weather where ts < now - 1h and location like '%'";
executeQuery(sql);
}
@Test
public void testCase045() {
String sql = "select * from restful_test.weather where ts < now - 1h order by ts";
executeQuery(sql);
}
@Test
public void testCase046() {
String sql = "select last(*) from restful_test.weather where ts < now - 1h group by tbname order by tbname";
executeQuery(sql);
}
@Test
public void testCase047() {
String sql = "select * from restful_test.weather limit 2";
executeQuery(sql);
}
@Test
public void testCase048() {
String sql = "select * from restful_test.weather limit 2 offset 5";
executeQuery(sql);
}
@Test
public void testCase049() {
String sql = "select * from restful_test.t1, restful_test.t3 where t1.ts = t3.ts ";
executeQuery(sql);
}
@Test
public void testCase050() {
String sql = "select * from restful_test.t1, restful_test.t3 where t1.ts = t3.ts and t1.location = t3.location";
executeQuery(sql);
}
@Test
public void testCase051() {
String sql = "select * from restful_test.t1 tt, restful_test.t3 yy where tt.ts = yy.ts";
executeQuery(sql);
}
private void executeUpdate(String sql) {
try (Statement statement = connection.createStatement()) {
long start = System.currentTimeMillis();
int affectedRows = statement.executeUpdate(sql);
long end = System.currentTimeMillis();
System.out.println("[ affected rows : " + affectedRows + " ] time cost: " + (end - start) + " ms, execute statement ====> " + sql);
} catch (SQLException e) {
e.printStackTrace();
}
}
private void executeWithResult(String sql) {
try (Statement statement = connection.createStatement()) {
statement.execute(sql);
ResultSet resultSet = statement.getResultSet();
printResult(resultSet);
} catch (SQLException e) {
e.printStackTrace();
}
}
private void execute(String sql) {
try (Statement statement = connection.createStatement()) {
long start = System.currentTimeMillis();
boolean execute = statement.execute(sql);
long end = System.currentTimeMillis();
printSql(sql, execute, (end - start));
} catch (SQLException e) {
System.out.println("ERROR execute SQL ===> " + sql);
e.printStackTrace();
}
}
private static void printSql(String sql, boolean succeed, long cost) {
System.out.println("[ " + (succeed ? "OK" : "ERROR!") + " ] time cost: " + cost + " ms, execute statement ====> " + sql);
}
private void executeQuery(String sql) {
try (Statement statement = connection.createStatement()) {
long start = System.currentTimeMillis();
ResultSet resultSet = statement.executeQuery(sql);
long end = System.currentTimeMillis();
printSql(sql, true, (end - start));
printResult(resultSet);
} catch (SQLException e) {
System.out.println("ERROR execute SQL ===> " + sql);
e.printStackTrace();
}
}
private static void printResult(ResultSet resultSet) throws SQLException {
ResultSetMetaData metaData = resultSet.getMetaData();
while (resultSet.next()) {
StringBuilder sb = new StringBuilder();
for (int i = 1; i <= metaData.getColumnCount(); i++) {
String columnLabel = metaData.getColumnLabel(i);
String value = resultSet.getString(i);
sb.append(columnLabel + ": " + value + "\t");
}
System.out.println(sb.toString());
}
}
@BeforeClass
public static void before() throws ClassNotFoundException, SQLException {
Class.forName("com.taosdata.jdbc.rs.RestfulDriver");
connection = DriverManager.getConnection("jdbc:TAOS-RS://" + host + ":6041/restful_test?user=root&password=taosdata");
}
@AfterClass
public static void after() throws SQLException {
connection.close();
}
}
......@@ -5,10 +5,6 @@ import org.junit.Test;
public class SqlSyntaxValidatorTest {
@Test
public void validateSqlSyntax() {
}
@Test
public void isSelectSQL() {
Assert.assertTrue(SqlSyntaxValidator.isSelectSql("select * from test.weather"));
......
......@@ -4,6 +4,7 @@
*/
const ref = require('ref');
const os = require('os');
const ffi = require('ffi');
const ArrayType = require('ref-array');
const Struct = require('ref-struct');
......@@ -188,7 +189,13 @@ function CTaosInterface (config = null, pass = false) {
ref.types.void_ptr2 = ref.refType(ref.types.void_ptr);
/*Declare a bunch of functions first*/
/* Note, pointers to TAOS_RES, TAOS, are ref.types.void_ptr. The connection._conn buffer is supplied for pointers to TAOS * */
this.libtaos = ffi.Library('libtaos', {
if ('win32' == os.platform()) {
taoslibname = 'taos';
} else {
taoslibname = 'libtaos';
}
this.libtaos = ffi.Library(taoslibname, {
'taos_options': [ ref.types.int, [ ref.types.int , ref.types.void_ptr ] ],
'taos_init': [ ref.types.void, [ ] ],
//TAOS *taos_connect(char *ip, char *user, char *pass, char *db, int port)
......
{
"name": "td2.0-connector",
"version": "2.0.4",
"version": "2.0.5",
"description": "A Node.js connector for TDengine.",
"main": "tdengine.js",
"scripts": {
......
......@@ -23,8 +23,7 @@ extern "C" {
int32_t dnodeInitModules();
void dnodeCleanupModules();
bool dnodeStartMnode(SMInfos *pMinfos);
void dnodeProcessModuleStatus(uint32_t moduleStatus);
int32_t dnodeStartMnode(SMInfos *pMinfos);
#ifdef __cplusplus
}
......
......@@ -188,8 +188,8 @@ void dnodeReprocessMWriteMsg(void *pMsg) {
++pWrite->pBatchMasterMsg->received;
if (pWrite->pBatchMasterMsg->successed + pWrite->pBatchMasterMsg->received
>= pWrite->pBatchMasterMsg->expected) {
dnodeSendRedirectMsg(&pWrite->rpcMsg, true);
dnodeFreeMWriteMsg(pWrite);
dnodeSendRedirectMsg(&pWrite->pBatchMasterMsg->rpcMsg, true);
dnodeFreeMWriteMsg(pWrite->pBatchMasterMsg);
}
mnodeDestroySubMsg(pWrite);
......
......@@ -127,14 +127,16 @@ int32_t dnodeInitModules() {
return dnodeStartModules();
}
void dnodeProcessModuleStatus(uint32_t moduleStatus) {
int32_t dnodeProcessModuleStatus(uint32_t moduleStatus) {
int32_t code = 0;
for (int32_t module = TSDB_MOD_MNODE; module < TSDB_MOD_HTTP; ++module) {
bool enableModule = moduleStatus & (1 << module);
if (!tsModule[module].enable && enableModule) {
dInfo("module status:%u is set, start %s module", moduleStatus, tsModule[module].name);
tsModule[module].enable = true;
dnodeSetModuleStatus(module);
(*tsModule[module].startFp)();
code = (*tsModule[module].startFp)();
}
if (tsModule[module].enable && !enableModule) {
......@@ -144,21 +146,29 @@ void dnodeProcessModuleStatus(uint32_t moduleStatus) {
(*tsModule[module].stopFp)();
}
}
}
bool dnodeStartMnode(SMInfos *pMinfos) {
SMInfos *pMnodes = pMinfos;
return code;
}
int32_t dnodeStartMnode(SMInfos *pMinfos) {
if (tsModuleStatus & (1 << TSDB_MOD_MNODE)) {
dDebug("mnode module is already started, module status:%d", tsModuleStatus);
return false;
return 0;
}
uint32_t moduleStatus = tsModuleStatus | (1 << TSDB_MOD_MNODE);
dInfo("start mnode module, module status:%d, new status:%d", tsModuleStatus, moduleStatus);
dnodeProcessModuleStatus(moduleStatus);
sdbUpdateSync(pMnodes);
int32_t code = dnodeProcessModuleStatus(moduleStatus);
if (code == 0) {
code = sdbUpdateSync(pMinfos);
}
if (code != 0) {
dError("failed to start mnode module since %s", tstrerror(code));
moduleStatus = tsModuleStatus & ~(1 << TSDB_MOD_MNODE);
dnodeProcessModuleStatus(moduleStatus);
}
return true;
return code;
}
......@@ -60,7 +60,7 @@ int32_t dnodeInitServer() {
rpcInit.label = "DND-S";
rpcInit.numOfThreads = 1;
rpcInit.cfp = dnodeProcessReqMsgFromDnode;
rpcInit.sessions = TSDB_MAX_VNODES;
rpcInit.sessions = TSDB_MAX_VNODES << 4;
rpcInit.connType = TAOS_CONN_SERVER;
rpcInit.idleTime = tsShellActivityTimer * 1000;
......@@ -123,7 +123,7 @@ int32_t dnodeInitClient() {
rpcInit.label = "DND-C";
rpcInit.numOfThreads = 1;
rpcInit.cfp = dnodeProcessRspFromDnode;
rpcInit.sessions = TSDB_MAX_VNODES;
rpcInit.sessions = TSDB_MAX_VNODES << 4;
rpcInit.connType = TAOS_CONN_CLIENT;
rpcInit.idleTime = tsShellActivityTimer * 1000;
rpcInit.user = "t";
......
......@@ -214,7 +214,5 @@ static int32_t dnodeProcessCreateMnodeMsg(SRpcMsg *pMsg) {
dDebug("mnode index:%d, mnode:%d:%s", i, pCfg->mnodes.mnodeInfos[i].mnodeId, pCfg->mnodes.mnodeInfos[i].mnodeEp);
}
dnodeStartMnode(&pCfg->mnodes);
return TSDB_CODE_SUCCESS;
return dnodeStartMnode(&pCfg->mnodes);
}
......@@ -40,7 +40,7 @@ void dnodeGetClusterId(char *clusterId);
void dnodeUpdateEp(int32_t dnodeId, char *ep, char *fqdn, uint16_t *port);
bool dnodeCheckEpChanged(int32_t dnodeId, char *epstr);
bool dnodeStartMnode(SMInfos *pMinfos);
int32_t dnodeStartMnode(SMInfos *pMinfos);
void dnodeAddClientRspHandle(uint8_t msgType, void (*fp)(SRpcMsg *rpcMsg));
void dnodeSendMsgToDnode(SRpcEpSet *epSet, SRpcMsg *rpcMsg);
......
......@@ -65,7 +65,7 @@ int32_t mnodeStartSystem();
void mnodeCleanupSystem();
void mnodeStopSystem();
void sdbUpdateAsync();
void sdbUpdateSync(void *pMnodes);
int32_t sdbUpdateSync(void *pMnodes);
bool mnodeIsRunning();
int32_t mnodeProcessRead(SMnodeMsg *pMsg);
int32_t mnodeProcessWrite(SMnodeMsg *pMsg);
......
......@@ -86,6 +86,7 @@ void qDestroyQueryInfo(qinfo_t qHandle);
void* qOpenQueryMgmt(int32_t vgId);
void qQueryMgmtNotifyClosed(void* pExecutor);
void qQueryMgmtReOpen(void *pExecutor);
void qCleanupQueryMgmt(void* pExecutor);
void** qRegisterQInfo(void* pMgmt, uint64_t qInfo);
void** qAcquireQInfo(void* pMgmt, uint64_t key);
......
......@@ -126,6 +126,11 @@ TAOS_DEFINE_ERROR(TSDB_CODE_MND_INVALID_SHOWOBJ, 0, 0x030B, "Data expir
TAOS_DEFINE_ERROR(TSDB_CODE_MND_INVALID_QUERY_ID, 0, 0x030C, "Invalid query id")
TAOS_DEFINE_ERROR(TSDB_CODE_MND_INVALID_STREAM_ID, 0, 0x030D, "Invalid stream id")
TAOS_DEFINE_ERROR(TSDB_CODE_MND_INVALID_CONN_ID, 0, 0x030E, "Invalid connection id")
TAOS_DEFINE_ERROR(TSDB_CODE_MND_MNODE_IS_RUNNING, 0, 0x0310, "mnode is alreay running")
TAOS_DEFINE_ERROR(TSDB_CODE_MND_FAILED_TO_CONFIG_SYNC, 0, 0x0311, "failed to config sync")
TAOS_DEFINE_ERROR(TSDB_CODE_MND_FAILED_TO_START_SYNC, 0, 0x0312, "failed to start sync")
TAOS_DEFINE_ERROR(TSDB_CODE_MND_FAILED_TO_CREATE_DIR, 0, 0x0313, "failed to create mnode dir")
TAOS_DEFINE_ERROR(TSDB_CODE_MND_FAILED_TO_INIT_STEP, 0, 0x0314, "failed to init components")
TAOS_DEFINE_ERROR(TSDB_CODE_MND_SDB_OBJ_ALREADY_THERE, 0, 0x0320, "Object already there")
TAOS_DEFINE_ERROR(TSDB_CODE_MND_SDB_ERROR, 0, 0x0321, "Unexpected generic error in sdb")
......
此差异已折叠。
此差异已折叠。
......@@ -311,6 +311,11 @@ static int32_t mnodeCheckDbCfg(SDbCfg *pCfg) {
return TSDB_CODE_MND_INVALID_DB_OPTION;
}
if (pCfg->replications > mnodeGetDnodesNum()) {
mError("no enough dnode to config replica: %d, #dnodes: %d", pCfg->replications, mnodeGetDnodesNum());
return TSDB_CODE_MND_INVALID_DB_OPTION;
}
if (pCfg->quorum < TSDB_MIN_DB_REPLICA_OPTION || pCfg->quorum > TSDB_MAX_DB_REPLICA_OPTION) {
mError("invalid db option quorum:%d valid range: [%d, %d]", pCfg->quorum, TSDB_MIN_DB_REPLICA_OPTION,
TSDB_MAX_DB_REPLICA_OPTION);
......
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册