提交 3163eff6 编写于 作者: H Haojun Liao

Merge branch 'develop' into feature/query

...@@ -270,19 +270,20 @@ matrix: ...@@ -270,19 +270,20 @@ matrix:
fi fi
- make > /dev/null - make > /dev/null
# - os: osx - os: osx
# language: c osx_image: xcode11.4
# compiler: clang language: c
# env: DESC="mac/clang build" compiler: clang
# git: env: DESC="mac/clang build"
# - depth: 1 git:
# addons: - depth: 1
# homebrew: addons:
# - cmake homebrew:
# - cmake
# script:
# - cd ${TRAVIS_BUILD_DIR} script:
# - mkdir debug - cd ${TRAVIS_BUILD_DIR}
# - cd debug - mkdir debug
# - cmake .. > /dev/null - cd debug
# - make > /dev/null - cmake .. > /dev/null
- make > /dev/null
...@@ -13,7 +13,7 @@ ENDIF () ...@@ -13,7 +13,7 @@ ENDIF ()
SET(TD_ACCOUNT FALSE) SET(TD_ACCOUNT FALSE)
SET(TD_ADMIN FALSE) SET(TD_ADMIN FALSE)
SET(TD_GRANT FALSE) SET(TD_GRANT FALSE)
SET(TD_MQTT TRUE) SET(TD_MQTT FALSE)
SET(TD_TSDB_PLUGINS FALSE) SET(TD_TSDB_PLUGINS FALSE)
SET(TD_COVER FALSE) SET(TD_COVER FALSE)
...@@ -29,6 +29,11 @@ MESSAGE(STATUS "Community directory: " ${TD_COMMUNITY_DIR}) ...@@ -29,6 +29,11 @@ MESSAGE(STATUS "Community directory: " ${TD_COMMUNITY_DIR})
INCLUDE(cmake/input.inc) INCLUDE(cmake/input.inc)
INCLUDE(cmake/platform.inc) INCLUDE(cmake/platform.inc)
IF (TD_WINDOWS OR TD_DARWIN)
SET(TD_SOMODE_STATIC TRUE)
ENDIF ()
INCLUDE(cmake/define.inc) INCLUDE(cmake/define.inc)
INCLUDE(cmake/env.inc) INCLUDE(cmake/env.inc)
INCLUDE(cmake/version.inc) INCLUDE(cmake/version.inc)
......
...@@ -176,5 +176,84 @@ pipeline { ...@@ -176,5 +176,84 @@ pipeline {
} }
} }
} }
post {
success {
emailext (
subject: "PR-result: Job '${env.JOB_NAME} [${env.BUILD_NUMBER}]'",
body: '''<!DOCTYPE html>
<html>
<head>
<meta charset="UTF-8">
</head>
<body leftmargin="8" marginwidth="0" topmargin="8" marginheight="4" offset="0">
<table width="95%" cellpadding="0" cellspacing="0" style="font-size: 16pt; font-family: Tahoma, Arial, Helvetica, sans-serif">
<tr>
<td><br />
<b><font color="#0B610B"><font size="6">构建信息</font></font></b>
<hr size="2" width="100%" align="center" /></td>
</tr>
<tr>
<td>
<ul>
<div style="font-size:18px">
<li>构建名称>>分支:${PROJECT_NAME}</li>
<li>构建结果:<span style="color:green"> Successful </span></li>
<li>构建编号:${BUILD_NUMBER}</li>
<li>触发用户:${CAUSE}</li>
<li>提交信息:${CHANGE_TITLE}</li>
<li>构建地址:<a href=${BUILD_URL}>${BUILD_URL}</a></li>
<li>构建日志:<a href=${BUILD_URL}console>${BUILD_URL}console</a></li>
<li>变更集:${JELLY_SCRIPT}</li>
</div>
</ul>
</td>
</tr>
</table></font>
</body>
</html>''',
to: "${env.CHANGE_AUTHOR_EMAIL}",
from: "support@taosdata.com"
)
}
failure {
emailext (
subject: "PR-result: Job '${env.JOB_NAME} [${env.BUILD_NUMBER}]'",
body: '''<!DOCTYPE html>
<html>
<head>
<meta charset="UTF-8">
</head>
<body leftmargin="8" marginwidth="0" topmargin="8" marginheight="4" offset="0">
<table width="95%" cellpadding="0" cellspacing="0" style="font-size: 16pt; font-family: Tahoma, Arial, Helvetica, sans-serif">
<tr>
<td><br />
<b><font color="#0B610B"><font size="6">构建信息</font></font></b>
<hr size="2" width="100%" align="center" /></td>
</tr>
<tr>
<td>
<ul>
<div style="font-size:18px">
<li>构建名称>>分支:${PROJECT_NAME}</li>
<li>构建结果:<span style="color:green"> Successful </span></li>
<li>构建编号:${BUILD_NUMBER}</li>
<li>触发用户:${CAUSE}</li>
<li>提交信息:${CHANGE_TITLE}</li>
<li>构建地址:<a href=${BUILD_URL}>${BUILD_URL}</a></li>
<li>构建日志:<a href=${BUILD_URL}console>${BUILD_URL}console</a></li>
<li>变更集:${JELLY_SCRIPT}</li>
</div>
</ul>
</td>
</tr>
</table></font>
</body>
</html>''',
to: "${env.CHANGE_AUTHOR_EMAIL}",
from: "support@taosdata.com"
)
}
}
} }
...@@ -126,44 +126,57 @@ cmake .. -DCPUTYPE=aarch32 && cmake --build . ...@@ -126,44 +126,57 @@ cmake .. -DCPUTYPE=aarch32 && cmake --build .
If you use the Visual Studio 2013, please open a command window by executing "cmd.exe". If you use the Visual Studio 2013, please open a command window by executing "cmd.exe".
Please specify "x86_amd64" for 64 bits Windows or specify "x86" is for 32 bits Windows when you execute vcvarsall.bat. Please specify "x86_amd64" for 64 bits Windows or specify "x86" is for 32 bits Windows when you execute vcvarsall.bat.
``` ```cmd
mkdir debug && cd debug mkdir debug && cd debug
"C:\Program Files (x86)\Microsoft Visual Studio 12.0\VC\vcvarsall.bat" < x86_amd64 | x86 > "C:\Program Files (x86)\Microsoft Visual Studio 12.0\VC\vcvarsall.bat" < x86_amd64 | x86 >
cmake .. -G "NMake Makefiles" cmake .. -G "NMake Makefiles"
nmake nmake
``` ```
If you use the Visual Studio 2019, please open a command window by executing "cmd.exe". If you use the Visual Studio 2019 or 2017:
please open a command window by executing "cmd.exe".
Please specify "x64" for 64 bits Windows or specify "x86" is for 32 bits Windows when you execute vcvarsall.bat. Please specify "x64" for 64 bits Windows or specify "x86" is for 32 bits Windows when you execute vcvarsall.bat.
```
```cmd
mkdir debug && cd debug mkdir debug && cd debug
"c:\Program Files (x86)\Microsoft Visual Studio\2019\Community\VC\Auxiliary\Build\vcvarsall.bat" < x64 | x86 > "c:\Program Files (x86)\Microsoft Visual Studio\2019\Community\VC\Auxiliary\Build\vcvarsall.bat" < x64 | x86 >
cmake .. -G "NMake Makefiles" cmake .. -G "NMake Makefiles"
nmake nmake
``` ```
Or, you can open a command window by clicking Visual Studio 2019 menu "Tools -> Command Line -> Developer Command Prompt" or "Tools -> Command Line -> Developer PowerShell" then execute commands as follows: Or, you can simply open a command window by clicking Windows Start -> "Visual Studio < 2019 | 2017 >" folder -> "x64 Native Tools Command Prompt for VS < 2019 | 2017 >" or "x86 Native Tools Command Prompt for VS < 2019 | 2017 >" depends what architecture your Windows is, then execute commands as follows:
``` ```cmd
mkdir debug && cd debug mkdir debug && cd debug
cmake .. -G "NMake Makefiles" cmake .. -G "NMake Makefiles"
nmake nmake
``` ```
### On Mac OS X platform
Please install XCode command line tools and cmake. Verified with XCode 11.4+ on Catalina and Big Sur.
```shell
mkdir debug && cd debug
cmake .. && cmake --build .
```
# Quick Run # Quick Run
# Quick Run # Quick Run
To quickly start a TDengine server after building, run the command below in terminal: To quickly start a TDengine server after building, run the command below in terminal:
```cmd ```bash
./build/bin/taosd -c test/cfg ./build/bin/taosd -c test/cfg
``` ```
In another terminal, use the TDengine shell to connect the server: In another terminal, use the TDengine shell to connect the server:
``` ```bash
./build/bin/taos -c test/cfg ./build/bin/taos -c test/cfg
``` ```
option "-c test/cfg" specifies the system configuration file directory. option "-c test/cfg" specifies the system configuration file directory.
# Installing # Installing
After building successfully, TDengine can be installed by: After building successfully, TDengine can be installed by:
```cmd ```bash
make install make install
``` ```
Users can find more information about directories installed on the system in the [directory and files](https://www.taosdata.com/en/documentation/administrator/#Directory-and-Files) section. It should be noted that installing from source code does not configure service management for TDengine. Users can find more information about directories installed on the system in the [directory and files](https://www.taosdata.com/en/documentation/administrator/#Directory-and-Files) section. It should be noted that installing from source code does not configure service management for TDengine.
......
...@@ -128,6 +128,8 @@ IF (TD_DARWIN_64) ...@@ -128,6 +128,8 @@ IF (TD_DARWIN_64)
SET(COMMON_FLAGS "-std=gnu99 -Wall -Werror -Wno-missing-braces -fPIC -msse4.2 -D_FILE_OFFSET_BITS=64 -D_LARGE_FILE") SET(COMMON_FLAGS "-std=gnu99 -Wall -Werror -Wno-missing-braces -fPIC -msse4.2 -D_FILE_OFFSET_BITS=64 -D_LARGE_FILE")
SET(DEBUG_FLAGS "-O0 -g3 -DDEBUG") SET(DEBUG_FLAGS "-O0 -g3 -DDEBUG")
SET(RELEASE_FLAGS "-Og") SET(RELEASE_FLAGS "-Og")
INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/deps/cJson/inc)
INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/deps/lz4/inc)
ENDIF () ENDIF ()
IF (TD_WINDOWS) IF (TD_WINDOWS)
...@@ -139,6 +141,9 @@ IF (TD_WINDOWS) ...@@ -139,6 +141,9 @@ IF (TD_WINDOWS)
SET(CMAKE_GENERATOR "NMake Makefiles" CACHE INTERNAL "" FORCE) SET(CMAKE_GENERATOR "NMake Makefiles" CACHE INTERNAL "" FORCE)
IF (NOT TD_GODLL) IF (NOT TD_GODLL)
SET(COMMON_FLAGS "/nologo /WX /wd4018 /wd2220 /Oi /Oy- /Gm- /EHsc /MT /GS /Gy /fp:precise /Zc:wchar_t /Zc:forScope /Gd /errorReport:prompt /analyze-") SET(COMMON_FLAGS "/nologo /WX /wd4018 /wd2220 /Oi /Oy- /Gm- /EHsc /MT /GS /Gy /fp:precise /Zc:wchar_t /Zc:forScope /Gd /errorReport:prompt /analyze-")
IF (MSVC AND (MSVC_VERSION GREATER_EQUAL 1900))
SET(COMMON_FLAGS "${COMMON_FLAGS} /Wv:18")
ENDIF ()
SET(DEBUG_FLAGS "/Zi /W3 /GL") SET(DEBUG_FLAGS "/Zi /W3 /GL")
SET(RELEASE_FLAGS "/W0 /O3 /GL") SET(RELEASE_FLAGS "/W0 /O3 /GL")
ENDIF () ENDIF ()
......
...@@ -32,7 +32,7 @@ ELSEIF (TD_WINDOWS) ...@@ -32,7 +32,7 @@ ELSEIF (TD_WINDOWS)
#INSTALL(TARGETS taos RUNTIME DESTINATION driver) #INSTALL(TARGETS taos RUNTIME DESTINATION driver)
#INSTALL(TARGETS shell RUNTIME DESTINATION .) #INSTALL(TARGETS shell RUNTIME DESTINATION .)
IF (TD_MVN_INSTALLED) IF (TD_MVN_INSTALLED)
INSTALL(FILES ${LIBRARY_OUTPUT_PATH}/taos-jdbcdriver-2.0.17-dist.jar DESTINATION connector/jdbc) INSTALL(FILES ${LIBRARY_OUTPUT_PATH}/taos-jdbcdriver-2.0.18-dist.jar DESTINATION connector/jdbc)
ENDIF () ENDIF ()
ELSEIF (TD_DARWIN) ELSEIF (TD_DARWIN)
SET(TD_MAKE_INSTALL_SH "${TD_COMMUNITY_DIR}/packaging/tools/make_install.sh") SET(TD_MAKE_INSTALL_SH "${TD_COMMUNITY_DIR}/packaging/tools/make_install.sh")
......
...@@ -12,4 +12,8 @@ ADD_SUBDIRECTORY(MsvcLibX) ...@@ -12,4 +12,8 @@ ADD_SUBDIRECTORY(MsvcLibX)
IF (TD_LINUX AND TD_MQTT) IF (TD_LINUX AND TD_MQTT)
ADD_SUBDIRECTORY(MQTT-C) ADD_SUBDIRECTORY(MQTT-C)
ENDIF () ENDIF ()
\ No newline at end of file
IF (TD_DARWIN AND TD_MQTT)
ADD_SUBDIRECTORY(MQTT-C)
ENDIF ()
...@@ -5,6 +5,10 @@ ...@@ -5,6 +5,10 @@
#include <stdint.h> #include <stdint.h>
#include "gzguts.h" #include "gzguts.h"
#ifndef O_BINARY
#define O_BINARY 0
#endif
#if defined(_WIN32) && !defined(__BORLANDC__) && !defined(__MINGW32__) #if defined(_WIN32) && !defined(__BORLANDC__) && !defined(__MINGW32__)
# define LSEEK _lseeki64 # define LSEEK _lseeki64
#else #else
...@@ -240,9 +244,9 @@ local gzFile gz_open(path, fd, mode) ...@@ -240,9 +244,9 @@ local gzFile gz_open(path, fd, mode)
/* open the file with the appropriate flags (or just use fd) */ /* open the file with the appropriate flags (or just use fd) */
state->fd = fd > -1 ? fd : ( state->fd = fd > -1 ? fd : (
#ifdef WIDECHAR #ifdef WIDECHAR
fd == -2 ? _wopen(path, oflag, 0666) : fd == -2 ? _wopen(path, oflag | O_BINARY, 0666) :
#endif #endif
open((const char *)path, oflag, 0666)); open((const char *)path, oflag | O_BINARY, 0666));
if (state->fd == -1) { if (state->fd == -1) {
free(state->path); free(state->path);
free(state); free(state);
......
...@@ -213,16 +213,19 @@ TDengine缺省的时间戳是毫秒精度,但通过修改配置参数enableMic ...@@ -213,16 +213,19 @@ TDengine缺省的时间戳是毫秒精度,但通过修改配置参数enableMic
如果表是通过[超级表](../super-table/)创建,更改表结构的操作只能对超级表进行。同时针对超级表的结构更改对所有通过该结构创建的表生效。对于不是通过超级表创建的表,可以直接修改表结构 如果表是通过[超级表](../super-table/)创建,更改表结构的操作只能对超级表进行。同时针对超级表的结构更改对所有通过该结构创建的表生效。对于不是通过超级表创建的表,可以直接修改表结构
## 超级表STable管理 ## 超级表STable管理
注意:在 2.0.15 以前的版本中,并不支持 STABLE 保留字,而是写作 TABLE。也即,在本节后文的指令说明中,CREATE、DROP、ALTER 三个指令在老版本中保留字需写作 TABLE 而不是 STABLE。
- **创建超级表** - **创建超级表**
```mysql ```mysql
CREATE TABLE [IF NOT EXISTS] stb_name (timestamp_field_name TIMESTAMP, field1_name data_type1 [, field2_name data_type2 ...]) TAGS (tag1_name tag_type1, tag2_name tag_type2 [, tag3_name tag_type3]); CREATE STABLE [IF NOT EXISTS] stb_name (timestamp_field_name TIMESTAMP, field1_name data_type1 [, field2_name data_type2 ...]) TAGS (tag1_name tag_type1, tag2_name tag_type2 [, tag3_name tag_type3]);
``` ```
创建STable, 与创建表的SQL语法相似,但需指定TAGS字段的名称和类型 创建 STable,与创建表的 SQL 语法相似,但需指定 TAGS 字段的名称和类型
说明: 说明:
1) TAGS 列的数据类型不能是timestamp类型; 1) TAGS 列的数据类型不能是 timestamp 类型;
2) TAGS 列名不能与其他列名相同; 2) TAGS 列名不能与其他列名相同;
...@@ -233,16 +236,16 @@ TDengine缺省的时间戳是毫秒精度,但通过修改配置参数enableMic ...@@ -233,16 +236,16 @@ TDengine缺省的时间戳是毫秒精度,但通过修改配置参数enableMic
- **删除超级表** - **删除超级表**
```mysql ```mysql
DROP TABLE [IF EXISTS] stb_name; DROP STABLE [IF EXISTS] stb_name;
``` ```
删除STable会自动删除通过STable创建的子表。 删除 STable 会自动删除通过 STable 创建的子表。
- **显示当前数据库下的所有超级表信息** - **显示当前数据库下的所有超级表信息**
```mysql ```mysql
SHOW STABLES [LIKE tb_name_wildcar]; SHOW STABLES [LIKE tb_name_wildcar];
``` ```
查看数据库内全部STable,及其相关信息,包括STable的名称、创建时间、列数量、标签(TAG)数量、通过该STable建表的数量。 查看数据库内全部 STable,及其相关信息,包括 STable 的名称、创建时间、列数量、标签(TAG)数量、通过该 STable 建表的数量。
- **获取超级表的结构信息** - **获取超级表的结构信息**
...@@ -253,43 +256,43 @@ TDengine缺省的时间戳是毫秒精度,但通过修改配置参数enableMic ...@@ -253,43 +256,43 @@ TDengine缺省的时间戳是毫秒精度,但通过修改配置参数enableMic
- **超级表增加列** - **超级表增加列**
```mysql ```mysql
ALTER TABLE stb_name ADD COLUMN field_name data_type; ALTER STABLE stb_name ADD COLUMN field_name data_type;
``` ```
- **超级表删除列** - **超级表删除列**
```mysql ```mysql
ALTER TABLE stb_name DROP COLUMN field_name; ALTER STABLE stb_name DROP COLUMN field_name;
``` ```
## 超级表 STable 中 TAG 管理 ## 超级表 STable 中 TAG 管理
- **添加标签** - **添加标签**
```mysql ```mysql
ALTER TABLE stb_name ADD TAG new_tag_name tag_type; ALTER STABLE stb_name ADD TAG new_tag_name tag_type;
``` ```
STable增加一个新的标签,并指定新标签的类型。标签总数不能超过128个,总长度不超过16k个字符。 STable 增加一个新的标签,并指定新标签的类型。标签总数不能超过 128 个,总长度不超过 16k 个字符。
- **删除标签** - **删除标签**
```mysql ```mysql
ALTER TABLE stb_name DROP TAG tag_name; ALTER STABLE stb_name DROP TAG tag_name;
``` ```
删除超级表的一个标签,从超级表删除某个标签后,该超级表下的所有子表也会自动删除该标签。 删除超级表的一个标签,从超级表删除某个标签后,该超级表下的所有子表也会自动删除该标签。
- **修改标签名** - **修改标签名**
```mysql ```mysql
ALTER TABLE stb_name CHANGE TAG old_tag_name new_tag_name; ALTER STABLE stb_name CHANGE TAG old_tag_name new_tag_name;
``` ```
修改超级表的标签名,从超级表修改某个标签名后,该超级表下的所有子表也会自动更新该标签名。 修改超级表的标签名,从超级表修改某个标签名后,该超级表下的所有子表也会自动更新该标签名。
- **修改子表标签值** - **修改子表标签值**
```mysql ```mysql
ALTER TABLE tb_name SET TAG tag_name=new_tag_value; ALTER STABLE tb_name SET TAG tag_name=new_tag_value;
``` ```
说明:除了更新标签的值的操作是针对子表进行,其他所有的标签操作(添加标签、删除标签等)均只能作用于STable,不能对单个子表操作。对STable添加标签以后,依托于该STable建立的所有表将自动增加了一个标签,所有新增标签的默认值都是NULL。 说明:除了更新标签的值的操作是针对子表进行,其他所有的标签操作(添加标签、删除标签等)均只能作用于 STable,不能对单个子表操作。对 STable 添加标签以后,依托于该 STable 建立的所有表将自动增加了一个标签,所有新增标签的默认值都是 NULL。
## 数据写入 ## 数据写入
...@@ -467,6 +470,17 @@ Query OK, 2 row(s) in set (0.003112s) ...@@ -467,6 +470,17 @@ Query OK, 2 row(s) in set (0.003112s)
注意:普通表的通配符 * 中并不包含 _标签列_。 注意:普通表的通配符 * 中并不包含 _标签列_。
##### 获取标签列的去重取值
从 2.0.15 版本开始,支持在超级表查询标签列时,指定 distinct 关键字,这样将返回指定标签列的所有不重复取值。
```mysql
SELECT DISTINCT tag_name FROM stb_name;
```
注意:目前 distinct 关键字只支持对超级表的标签列进行去重,而不能用于普通列。
#### 结果集列名 #### 结果集列名
```SELECT```子句中,如果不指定返回结果集合的列名,结果集列名称默认使用```SELECT```子句中的表达式名称作为列名称。此外,用户可使用```AS```来重命名返回结果集合中列的名称。例如: ```SELECT```子句中,如果不指定返回结果集合的列名,结果集列名称默认使用```SELECT```子句中的表达式名称作为列名称。此外,用户可使用```AS```来重命名返回结果集合中列的名称。例如:
......
...@@ -12,7 +12,7 @@ ...@@ -12,7 +12,7 @@
Memory Size = maxVgroupsPerDb * (blocks * cache + 10Mb) + numOfTables * (tagSizePerTable + 0.5Kb) Memory Size = maxVgroupsPerDb * (blocks * cache + 10Mb) + numOfTables * (tagSizePerTable + 0.5Kb)
``` ```
示例:假设是4核机器,cache是缺省大小16M, blocks是缺省值6,假设有10万张表,标签总长度是256字节,则总的内存需求为:4\*(16\*6+10) + 100000*(0.25+0.5)/1000 = 499M。 示例:假设是4核机器,cache是缺省大小16M, blocks是缺省值6,假设有10万张表,标签总长度是256字节,则总的内存需求为:4\*(16\*6+10) + 100000\*(0.25+0.5)/1000 = 499M。
实际运行的系统往往会根据数据特点的不同,将数据存放在不同的DB里。因此做规划时,也需要考虑。 实际运行的系统往往会根据数据特点的不同,将数据存放在不同的DB里。因此做规划时,也需要考虑。
...@@ -35,7 +35,7 @@ TDengine相对于通用数据库,有超高的压缩比,在绝大多数场景 ...@@ -35,7 +35,7 @@ TDengine相对于通用数据库,有超高的压缩比,在绝大多数场景
Raw DataSize = numOfTables * rowSizePerTable * rowsPerTable Raw DataSize = numOfTables * rowSizePerTable * rowsPerTable
``` ```
示例:1000万台智能电表,每台电表每15分钟采集一次数据,每次采集的数据128字节,那么一年的原始数据量是:10000000\*128\*24\*60/15*365 = 44.8512T。TDengine大概需要消耗44.851/5=8.97024T空间。 示例:1000万台智能电表,每台电表每15分钟采集一次数据,每次采集的数据128字节,那么一年的原始数据量是:10000000\*128\*24\*60/15\*365 = 44.8512T。TDengine大概需要消耗44.851/5=8.97024T空间。
用户可以通过参数keep,设置数据在磁盘中的最大保存时长。为进一步减少存储成本,TDengine还提供多级存储,最冷的数据可以存放在最廉价的存储介质上,应用的访问不用做任何调整,只是读取速度降低了。 用户可以通过参数keep,设置数据在磁盘中的最大保存时长。为进一步减少存储成本,TDengine还提供多级存储,最冷的数据可以存放在最廉价的存储介质上,应用的访问不用做任何调整,只是读取速度降低了。
...@@ -181,7 +181,7 @@ taos -C 或 taos --dump-config ...@@ -181,7 +181,7 @@ taos -C 或 taos --dump-config
客户端的输入的字符均采用操作系统当前默认的编码格式,在Linux系统上多为UTF-8,部分中文系统编码则可能是GB18030或GBK等。在docker环境中默认的编码是POSIX。在中文版Windows系统中,编码则是CP936。客户端需要确保正确设置自己所使用的字符集,即客户端运行的操作系统当前编码字符集,才能保证nchar中的数据正确转换为UCS4-LE编码格式。 客户端的输入的字符均采用操作系统当前默认的编码格式,在Linux系统上多为UTF-8,部分中文系统编码则可能是GB18030或GBK等。在docker环境中默认的编码是POSIX。在中文版Windows系统中,编码则是CP936。客户端需要确保正确设置自己所使用的字符集,即客户端运行的操作系统当前编码字符集,才能保证nchar中的数据正确转换为UCS4-LE编码格式。
在 Linux 中 locale 的命名规则为: <语言>_<地区>.<字符集编码> 如:zh_CN.UTF-8,zh代表中文,CN代表大陆地区,UTF-8表示字符集。字符集编码为客户端正确解析本地字符串提供编码转换的说明。Linux系统与 Mac OSX 系统可以通过设置locale来确定系统的字符编码,由于Windows使用的locale中不是POSIX标准的locale格式,因此在Windows下需要采用另一个配置参数charset来指定字符编码。在Linux 系统中也可以使用charset来指定字符编码。 在 Linux 中 locale 的命名规则为: <语言>\_<地区>.<字符集编码> 如:zh_CN.UTF-8,zh代表中文,CN代表大陆地区,UTF-8表示字符集。字符集编码为客户端正确解析本地字符串提供编码转换的说明。Linux系统与 Mac OSX 系统可以通过设置locale来确定系统的字符编码,由于Windows使用的locale中不是POSIX标准的locale格式,因此在Windows下需要采用另一个配置参数charset来指定字符编码。在Linux 系统中也可以使用charset来指定字符编码。
- charset - charset
...@@ -452,39 +452,39 @@ TDengine的所有可执行文件默认存放在 _/usr/local/taos/bin_ 目录下 ...@@ -452,39 +452,39 @@ TDengine的所有可执行文件默认存放在 _/usr/local/taos/bin_ 目录下
| 关键字列表 | | | | | | 关键字列表 | | | | |
| ---------- | ----------- | ------------ | ---------- | --------- | | ---------- | ----------- | ------------ | ---------- | --------- |
| ABLOCKS | CONNECTION | GT | MINUS | SHOW | | ABLOCKS | CONNECTION | GROUP | MINUS | SLASH |
| ABORT | CONNECTIONS | ID | MNODES | SLASH | | ABORT | CONNECTIONS | GT | MNODES | SLIDING |
| ACCOUNT | COPY | IF | MODULES | SLIDING | | ACCOUNT | COPY | ID | MODULES | SMALLINT |
| ACCOUNTS | COUNT | IGNORE | NCHAR | SMALLINT | | ACCOUNTS | COUNT | IF | NCHAR | SPREAD |
| ADD | CREATE | IMMEDIATE | NE | SPREAD | | ADD | CREATE | IGNORE | NE | STABLE |
| AFTER | CTIME | IMPORT | NONE | STAR | | AFTER | CTIME | IMMEDIATE | NONE | STABLES |
| ALL | DATABASE | IN | NOT | STATEMENT | | ALL | DATABASE | IMPORT | NOT | STAR |
| ALTER | DATABASES | INITIALLY | NOTNULL | STDDEV | | ALTER | DATABASES | IN | NOTNULL | STATEMENT |
| AND | DAYS | INSERT | NOW | STREAM | | AND | DAYS | INITIALLY | NOW | STDDEV |
| AS | DEFERRED | INSTEAD | OF | STREAMS | | AS | DEFERRED | INSERT | OF | STREAM |
| ASC | DELIMITERS | INTEGER | OFFSET | STRING | | ASC | DELIMITERS | INSTEAD | OFFSET | STREAMS |
| ATTACH | DESC | INTERVAL | OR | SUM | | ATTACH | DESC | INTEGER | OR | STRING |
| AVG | DESCRIBE | INTO | ORDER | TABLE | | AVG | DESCRIBE | INTERVAL | ORDER | SUM |
| BEFORE | DETACH | IP | PASS | TABLES | | BEFORE | DETACH | INTO | PASS | TABLE |
| BEGIN | DIFF | IS | PERCENTILE | TAG | | BEGIN | DIFF | IP | PERCENTILE | TABLES |
| BETWEEN | DIVIDE | ISNULL | PLUS | TAGS | | BETWEEN | DISTINCT | IS | PLUS | TAG |
| BIGINT | DNODE | JOIN | PRAGMA | TBLOCKS | | BIGINT | DIVIDE | ISNULL | PRAGMA | TAGS |
| BINARY | DNODES | KEEP | PREV | TBNAME | | BINARY | DNODE | JOIN | PREV | TBLOCKS |
| BITAND | DOT | KEY | PRIVILEGE | TIMES | | BITAND | DNODES | KEEP | PRIVILEGE | TBNAME |
| BITNOT | DOUBLE | KILL | QUERIES | TIMESTAMP | | BITNOT | DOT | KEY | QUERIES | TIMES |
| BITOR | DROP | LAST | QUERY | TINYINT | | BITOR | DOUBLE | KILL | QUERY | TIMESTAMP |
| BOOL | EACH | LE | RAISE | TOP | | BOOL | DROP | LAST | RAISE | TINYINT |
| BOTTOM | END | LEASTSQUARES | REM | TRIGGER | | BOTTOM | EACH | LE | REM | TOP |
| BY | EQ | LIKE | REPLACE | UMINUS | | BY | END | LEASTSQUARES | REPLACE | TRIGGER |
| CACHE | EXISTS | LIMIT | REPLICA | UPLUS | | CACHE | EQ | LIKE | REPLICA | UMINUS |
| CASCADE | EXPLAIN | LINEAR | RESET | USE | | CASCADE | EXISTS | LIMIT | RESET | UPLUS |
| CHANGE | FAIL | LOCAL | RESTRICT | USER | | CHANGE | EXPLAIN | LINEAR | RESTRICT | USE |
| CLOG | FILL | LP | ROW | USERS | | CLOG | FAIL | LOCAL | ROW | USER |
| CLUSTER | FIRST | LSHIFT | ROWS | USING | | CLUSTER | FILL | LP | ROWS | USERS |
| COLON | FLOAT | LT | RP | VALUES | | COLON | FIRST | LSHIFT | RP | USING |
| COLUMN | FOR | MATCH | RSHIFT | VARIABLE | | COLUMN | FLOAT | LT | RSHIFT | VALUES |
| COMMA | FROM | MAX | SCORES | VGROUPS | | COMMA | FOR | MATCH | SCORES | VARIABLE |
| COMP | GE | METRIC | SELECT | VIEW | | COMP | FROM | MAX | SELECT | VGROUPS |
| CONCAT | GLOB | METRICS | SEMI | WAVG | | CONCAT | GE | METRIC | SEMI | VIEW |
| CONFIGS | GRANTS | MIN | SET | WHERE | | CONFIGS | GLOB | METRICS | SET | WAVG |
| CONFLICT | GROUP | | | | | CONFLICT | GRANTS | MIN | SHOW | WHERE |
\ No newline at end of file
...@@ -344,7 +344,6 @@ dataDir /mnt/disk6/taos 2 ...@@ -344,7 +344,6 @@ dataDir /mnt/disk6/taos 2
挂载的盘也可以是非本地的网络盘,只要系统能访问即可。 挂载的盘也可以是非本地的网络盘,只要系统能访问即可。
注:多级存储功能仅企业版支持 注:多级存储功能仅企业版支持
**提示:该功能暂未提供**
## 数据查询 ## 数据查询
TDengine提供了多种多样针对表和超级表的查询处理功能,除了常规的聚合查询之外,还提供针对时序数据的窗口查询、统计聚合等功能。TDengine的查询处理需要客户端、vnode, mnode节点协同完成。 TDengine提供了多种多样针对表和超级表的查询处理功能,除了常规的聚合查询之外,还提供针对时序数据的窗口查询、统计聚合等功能。TDengine的查询处理需要客户端、vnode, mnode节点协同完成。
......
...@@ -2,7 +2,7 @@ ...@@ -2,7 +2,7 @@
TDengine 提供了遵循 JDBC 标准(3.0)API 规范的 `taos-jdbcdriver` 实现,可在 maven 的中央仓库 [Sonatype Repository][1] 搜索下载。 TDengine 提供了遵循 JDBC 标准(3.0)API 规范的 `taos-jdbcdriver` 实现,可在 maven 的中央仓库 [Sonatype Repository][1] 搜索下载。
`taos-jdbcdriver` 的实现包括 2 种形式: JDBC-JNI 和 JDBC-RESTful(taos-jdbcdriver-2.0.17 开始支持 JDBC-RESTful)。 JDBC-JNI 通过调用客户端 libtaos.so(或 taos.dll )的本地方法实现, JDBC-RESTful 则在内部封装了 RESTful 接口实现。 `taos-jdbcdriver` 的实现包括 2 种形式: JDBC-JNI 和 JDBC-RESTful(taos-jdbcdriver-2.0.18 开始支持 JDBC-RESTful)。 JDBC-JNI 通过调用客户端 libtaos.so(或 taos.dll )的本地方法实现, JDBC-RESTful 则在内部封装了 RESTful 接口实现。
![tdengine-connector](../assets/tdengine-jdbc-connector.png) ![tdengine-connector](../assets/tdengine-jdbc-connector.png)
...@@ -67,7 +67,7 @@ maven 项目中使用如下 pom.xml 配置即可: ...@@ -67,7 +67,7 @@ maven 项目中使用如下 pom.xml 配置即可:
<dependency> <dependency>
<groupId>com.taosdata.jdbc</groupId> <groupId>com.taosdata.jdbc</groupId>
<artifactId>taos-jdbcdriver</artifactId> <artifactId>taos-jdbcdriver</artifactId>
<version>2.0.17</version> <version>2.0.18</version>
</dependency> </dependency>
``` ```
...@@ -334,16 +334,17 @@ conn.close(); ...@@ -334,16 +334,17 @@ conn.close();
```java ```java
public static void main(String[] args) throws SQLException { public static void main(String[] args) throws SQLException {
HikariConfig config = new HikariConfig(); HikariConfig config = new HikariConfig();
// jdbc properties
config.setJdbcUrl("jdbc:TAOS://127.0.0.1:6030/log"); config.setJdbcUrl("jdbc:TAOS://127.0.0.1:6030/log");
config.setUsername("root"); config.setUsername("root");
config.setPassword("taosdata"); config.setPassword("taosdata");
// connection pool configurations
config.setMinimumIdle(3); //minimum number of idle connection config.setMinimumIdle(10); //minimum number of idle connection
config.setMaximumPoolSize(10); //maximum number of connection in the pool config.setMaximumPoolSize(10); //maximum number of connection in the pool
config.setConnectionTimeout(10000); //maximum wait milliseconds for get connection from pool config.setConnectionTimeout(30000); //maximum wait milliseconds for get connection from pool
config.setIdleTimeout(60000); // max idle time for recycle idle connection config.setMaxLifetime(0); // maximum life time for each connection
config.setConnectionTestQuery("describe log.dn"); //validation query config.setIdleTimeout(0); // max idle time for recycle idle connection
config.setValidationTimeout(3000); //validation query timeout config.setConnectionTestQuery("select server_status()"); //validation query
HikariDataSource ds = new HikariDataSource(config); //create datasource HikariDataSource ds = new HikariDataSource(config); //create datasource
...@@ -375,32 +376,22 @@ conn.close(); ...@@ -375,32 +376,22 @@ conn.close();
* 使用示例如下: * 使用示例如下:
```java ```java
public static void main(String[] args) throws Exception { public static void main(String[] args) throws Exception {
Properties properties = new Properties();
properties.put("driverClassName","com.taosdata.jdbc.TSDBDriver");
properties.put("url","jdbc:TAOS://127.0.0.1:6030/log");
properties.put("username","root");
properties.put("password","taosdata");
properties.put("maxActive","10"); //maximum number of connection in the pool
properties.put("initialSize","3");//initial number of connection
properties.put("maxWait","10000");//maximum wait milliseconds for get connection from pool
properties.put("minIdle","3");//minimum number of connection in the pool
properties.put("timeBetweenEvictionRunsMillis","3000");// the interval milliseconds to test connection
properties.put("minEvictableIdleTimeMillis","60000");//the minimum milliseconds to keep idle
properties.put("maxEvictableIdleTimeMillis","90000");//the maximum milliseconds to keep idle
properties.put("validationQuery","describe log.dn"); //validation query DruidDataSource dataSource = new DruidDataSource();
properties.put("testWhileIdle","true"); // test connection while idle // jdbc properties
properties.put("testOnBorrow","false"); // don't need while testWhileIdle is true dataSource.setDriverClassName("com.taosdata.jdbc.TSDBDriver");
properties.put("testOnReturn","false"); // don't need while testWhileIdle is true dataSource.setUrl(url);
dataSource.setUsername("root");
//create druid datasource dataSource.setPassword("taosdata");
DataSource ds = DruidDataSourceFactory.createDataSource(properties); // pool configurations
Connection connection = ds.getConnection(); // get connection dataSource.setInitialSize(10);
dataSource.setMinIdle(10);
dataSource.setMaxActive(10);
dataSource.setMaxWait(30000);
dataSource.setValidationQuery("select server_status()");
Connection connection = dataSource.getConnection(); // get connection
Statement statement = connection.createStatement(); // get statement Statement statement = connection.createStatement(); // get statement
//query or insert //query or insert
// ... // ...
...@@ -427,7 +418,7 @@ Query OK, 1 row(s) in set (0.000141s) ...@@ -427,7 +418,7 @@ Query OK, 1 row(s) in set (0.000141s)
## 与框架使用 ## 与框架使用
* Spring JdbcTemplate 中使用 taos-jdbcdriver,可参考 [SpringJdbcTemplate][11] * Spring JdbcTemplate 中使用 taos-jdbcdriver,可参考 [SpringJdbcTemplate][11]
* Springboot + Mybatis 中使用,可参考 [springbootdemo * Springboot + Mybatis 中使用,可参考 [springbootdemo][12]
......
...@@ -21,7 +21,7 @@ else ...@@ -21,7 +21,7 @@ else
cd ${script_dir} cd ${script_dir}
script_dir="$(pwd)" script_dir="$(pwd)"
data_dir="/var/lib/taos" data_dir="/var/lib/taos"
log_dir="~/TDengineLog" log_dir=~/TDengine/log
fi fi
log_link_dir="/usr/local/taos/log" log_link_dir="/usr/local/taos/log"
......
...@@ -24,7 +24,7 @@ data_dir="/var/lib/taos" ...@@ -24,7 +24,7 @@ data_dir="/var/lib/taos"
if [ "$osType" != "Darwin" ]; then if [ "$osType" != "Darwin" ]; then
log_dir="/var/log/taos" log_dir="/var/log/taos"
else else
log_dir="~/TDengineLog" log_dir=~/TDengine/log
fi fi
data_link_dir="/usr/local/taos/data" data_link_dir="/usr/local/taos/data"
...@@ -178,7 +178,9 @@ function install_bin() { ...@@ -178,7 +178,9 @@ function install_bin() {
function install_lib() { function install_lib() {
# Remove links # Remove links
${csudo} rm -f ${lib_link_dir}/libtaos.* || : ${csudo} rm -f ${lib_link_dir}/libtaos.* || :
${csudo} rm -f ${lib64_link_dir}/libtaos.* || : if [ "$osType" != "Darwin" ]; then
${csudo} rm -f ${lib64_link_dir}/libtaos.* || :
fi
if [ "$osType" != "Darwin" ]; then if [ "$osType" != "Darwin" ]; then
${csudo} cp ${binary_dir}/build/lib/libtaos.so.${verNumber} ${install_main_dir}/driver && ${csudo} chmod 777 ${install_main_dir}/driver/* ${csudo} cp ${binary_dir}/build/lib/libtaos.so.${verNumber} ${install_main_dir}/driver && ${csudo} chmod 777 ${install_main_dir}/driver/*
...@@ -190,12 +192,14 @@ function install_lib() { ...@@ -190,12 +192,14 @@ function install_lib() {
${csudo} ln -sf ${lib64_link_dir}/libtaos.so.1 ${lib64_link_dir}/libtaos.so ${csudo} ln -sf ${lib64_link_dir}/libtaos.so.1 ${lib64_link_dir}/libtaos.so
fi fi
else else
${csudo} cp ${binary_dir}/build/lib/libtaos.* ${install_main_dir}/driver && ${csudo} chmod 777 ${install_main_dir}/driver/* ${csudo} cp -Rf ${binary_dir}/build/lib/libtaos.* ${install_main_dir}/driver && ${csudo} chmod 777 ${install_main_dir}/driver/*
${csudo} ln -sf ${install_main_dir}/driver/libtaos.* ${lib_link_dir}/libtaos.1.dylib ${csudo} ln -sf ${install_main_dir}/driver/libtaos.1.dylib ${lib_link_dir}/libtaos.1.dylib
${csudo} ln -sf ${lib_link_dir}/libtaos.1.dylib ${lib_link_dir}/libtaos.dylib ${csudo} ln -sf ${lib_link_dir}/libtaos.1.dylib ${lib_link_dir}/libtaos.dylib
fi fi
${csudo} ldconfig if [ "$osType" != "Darwin" ]; then
${csudo} ldconfig
fi
} }
function install_header() { function install_header() {
......
...@@ -8,6 +8,4 @@ INCLUDE_DIRECTORIES(${TD_ENTERPRISE_DIR}/src/inc) ...@@ -8,6 +8,4 @@ INCLUDE_DIRECTORIES(${TD_ENTERPRISE_DIR}/src/inc)
INCLUDE_DIRECTORIES(inc) INCLUDE_DIRECTORIES(inc)
AUX_SOURCE_DIRECTORY(src SRC) AUX_SOURCE_DIRECTORY(src SRC)
IF (TD_LINUX) ADD_LIBRARY(balance ${SRC})
ADD_LIBRARY(balance ${SRC})
ENDIF ()
...@@ -24,7 +24,7 @@ extern "C" { ...@@ -24,7 +24,7 @@ extern "C" {
int32_t bnInitThread(); int32_t bnInitThread();
void bnCleanupThread(); void bnCleanupThread();
void bnNotify(); void bnNotify();
void bnStartTimer(int64_t mseconds); void bnStartTimer(int32_t mseconds);
#ifdef __cplusplus #ifdef __cplusplus
} }
......
...@@ -30,7 +30,7 @@ ...@@ -30,7 +30,7 @@
#include "mnodeVgroup.h" #include "mnodeVgroup.h"
extern int64_t tsDnodeRid; extern int64_t tsDnodeRid;
extern int64_t tsSdbRid; extern int32_t tsSdbRid;
static SBnMgmt tsBnMgmt; static SBnMgmt tsBnMgmt;
static void bnMonitorDnodeModule(); static void bnMonitorDnodeModule();
......
...@@ -271,23 +271,23 @@ static int32_t bnRetrieveScores(SShowObj *pShow, char *data, int32_t rows, void ...@@ -271,23 +271,23 @@ static int32_t bnRetrieveScores(SShowObj *pShow, char *data, int32_t rows, void
cols++; cols++;
pWrite = data + pShow->offset[cols] * rows + pShow->bytes[cols] * numOfRows; pWrite = data + pShow->offset[cols] * rows + pShow->bytes[cols] * numOfRows;
*(float *)pWrite = systemScore; *(float *)pWrite = (float)systemScore;
cols++; cols++;
pWrite = data + pShow->offset[cols] * rows + pShow->bytes[cols] * numOfRows; pWrite = data + pShow->offset[cols] * rows + pShow->bytes[cols] * numOfRows;
*(float *)pWrite = pDnode->customScore; *(float *)pWrite = (float)pDnode->customScore;
cols++; cols++;
pWrite = data + pShow->offset[cols] * rows + pShow->bytes[cols] * numOfRows; pWrite = data + pShow->offset[cols] * rows + pShow->bytes[cols] * numOfRows;
*(float *)pWrite = (int32_t)moduleScore; *(float *)pWrite = (float)moduleScore;
cols++; cols++;
pWrite = data + pShow->offset[cols] * rows + pShow->bytes[cols] * numOfRows; pWrite = data + pShow->offset[cols] * rows + pShow->bytes[cols] * numOfRows;
*(float *)pWrite = (int32_t)vnodeScore; *(float *)pWrite = (float)vnodeScore;
cols++; cols++;
pWrite = data + pShow->offset[cols] * rows + pShow->bytes[cols] * numOfRows; pWrite = data + pShow->offset[cols] * rows + pShow->bytes[cols] * numOfRows;
*(float *)pWrite = (int32_t)(vnodeScore + moduleScore + pDnode->customScore + systemScore); *(float *)pWrite = (float)(vnodeScore + moduleScore + pDnode->customScore + systemScore);
cols++; cols++;
pWrite = data + pShow->offset[cols] * rows + pShow->bytes[cols] * numOfRows; pWrite = data + pShow->offset[cols] * rows + pShow->bytes[cols] * numOfRows;
......
...@@ -119,13 +119,13 @@ static void bnProcessTimer(void *handle, void *tmrId) { ...@@ -119,13 +119,13 @@ static void bnProcessTimer(void *handle, void *tmrId) {
} }
} }
void bnStartTimer(int64_t mseconds) { void bnStartTimer(int32_t mseconds) {
if (tsBnThread.stop) return; if (tsBnThread.stop) return;
bool updateSoon = (mseconds != -1); bool updateSoon = (mseconds != -1);
if (updateSoon) { if (updateSoon) {
mTrace("balance function will be called after %" PRId64 " ms", mseconds); mTrace("balance function will be called after %d ms", mseconds);
taosTmrReset(bnProcessTimer, mseconds, (void *)mseconds, tsMnodeTmr, &tsBnThread.timer); taosTmrReset(bnProcessTimer, mseconds, (void *)(int64_t)mseconds, tsMnodeTmr, &tsBnThread.timer);
} else { } else {
taosTmrReset(bnProcessTimer, tsStatusInterval * 1000, NULL, tsMnodeTmr, &tsBnThread.timer); taosTmrReset(bnProcessTimer, tsStatusInterval * 1000, NULL, tsMnodeTmr, &tsBnThread.timer);
} }
......
...@@ -28,6 +28,28 @@ IF (TD_LINUX) ...@@ -28,6 +28,28 @@ IF (TD_LINUX)
ADD_SUBDIRECTORY(tests) ADD_SUBDIRECTORY(tests)
ELSEIF (TD_DARWIN)
INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/deps/jni/linux)
# set the static lib name
ADD_LIBRARY(taos_static STATIC ${SRC})
TARGET_LINK_LIBRARIES(taos_static common query trpc tutil pthread m)
SET_TARGET_PROPERTIES(taos_static PROPERTIES OUTPUT_NAME "taos_static")
SET_TARGET_PROPERTIES(taos_static PROPERTIES CLEAN_DIRECT_OUTPUT 1)
# generate dynamic library (*.dylib)
ADD_LIBRARY(taos SHARED ${SRC})
TARGET_LINK_LIBRARIES(taos common query trpc tutil pthread m)
SET_TARGET_PROPERTIES(taos PROPERTIES CLEAN_DIRECT_OUTPUT 1)
#set version of .dylib
#VERSION dylib version
#SOVERSION dylib version
#MESSAGE(STATUS "build version ${TD_VER_NUMBER}")
SET_TARGET_PROPERTIES(taos PROPERTIES VERSION ${TD_VER_NUMBER} SOVERSION 1)
ADD_SUBDIRECTORY(tests)
ELSEIF (TD_WINDOWS) ELSEIF (TD_WINDOWS)
INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/deps/jni/windows) INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/deps/jni/windows)
INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/deps/jni/windows/win32) INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/deps/jni/windows/win32)
...@@ -49,12 +71,12 @@ ELSEIF (TD_DARWIN) ...@@ -49,12 +71,12 @@ ELSEIF (TD_DARWIN)
INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/deps/jni/linux) INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/deps/jni/linux)
ADD_LIBRARY(taos_static STATIC ${SRC}) ADD_LIBRARY(taos_static STATIC ${SRC})
TARGET_LINK_LIBRARIES(taos_static trpc tutil pthread m) TARGET_LINK_LIBRARIES(taos_static query trpc tutil pthread m)
SET_TARGET_PROPERTIES(taos_static PROPERTIES OUTPUT_NAME "taos_static") SET_TARGET_PROPERTIES(taos_static PROPERTIES OUTPUT_NAME "taos_static")
# generate dynamic library (*.dylib) # generate dynamic library (*.dylib)
ADD_LIBRARY(taos SHARED ${SRC}) ADD_LIBRARY(taos SHARED ${SRC})
TARGET_LINK_LIBRARIES(taos trpc tutil pthread m) TARGET_LINK_LIBRARIES(taos query trpc tutil pthread m)
SET_TARGET_PROPERTIES(taos PROPERTIES CLEAN_DIRECT_OUTPUT 1) SET_TARGET_PROPERTIES(taos PROPERTIES CLEAN_DIRECT_OUTPUT 1)
......
...@@ -749,7 +749,10 @@ static int32_t tscProcessCurrentUser(SSqlObj *pSql) { ...@@ -749,7 +749,10 @@ static int32_t tscProcessCurrentUser(SSqlObj *pSql) {
static int32_t tscProcessCurrentDB(SSqlObj *pSql) { static int32_t tscProcessCurrentDB(SSqlObj *pSql) {
char db[TSDB_DB_NAME_LEN] = {0}; char db[TSDB_DB_NAME_LEN] = {0};
pthread_mutex_lock(&pSql->pTscObj->mutex);
extractDBName(pSql->pTscObj->db, db); extractDBName(pSql->pTscObj->db, db);
pthread_mutex_unlock(&pSql->pTscObj->mutex);
SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(&pSql->cmd, pSql->cmd.clauseIndex); SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(&pSql->cmd, pSql->cmd.clauseIndex);
......
...@@ -905,6 +905,13 @@ static int32_t tscCheckIfCreateTable(char **sqlstr, SSqlObj *pSql) { ...@@ -905,6 +905,13 @@ static int32_t tscCheckIfCreateTable(char **sqlstr, SSqlObj *pSql) {
return tscInvalidSQLErrMsg(pCmd->payload, "keyword TAGS expected", sToken.z); return tscInvalidSQLErrMsg(pCmd->payload, "keyword TAGS expected", sToken.z);
} }
index = 0;
sToken = tStrGetToken(sql, &index, false, 0, NULL);
sql += index;
if (sToken.type != TK_LP) {
return tscInvalidSQLErrMsg(pCmd->payload, NULL, sToken.z);
}
SKVRowBuilder kvRowBuilder = {0}; SKVRowBuilder kvRowBuilder = {0};
if (tdInitKVRowBuilder(&kvRowBuilder) < 0) { if (tdInitKVRowBuilder(&kvRowBuilder) < 0) {
return TSDB_CODE_TSC_OUT_OF_MEMORY; return TSDB_CODE_TSC_OUT_OF_MEMORY;
...@@ -1548,12 +1555,13 @@ void tscImportDataFromFile(SSqlObj *pSql) { ...@@ -1548,12 +1555,13 @@ void tscImportDataFromFile(SSqlObj *pSql) {
SSqlObj *pNew = createSubqueryObj(pSql, 0, parseFileSendDataBlock, pSupporter, TSDB_SQL_INSERT, NULL); SSqlObj *pNew = createSubqueryObj(pSql, 0, parseFileSendDataBlock, pSupporter, TSDB_SQL_INSERT, NULL);
pCmd->count = 1; pCmd->count = 1;
FILE *fp = fopen(pCmd->payload, "r"); FILE *fp = fopen(pCmd->payload, "rb");
if (fp == NULL) { if (fp == NULL) {
pSql->res.code = TAOS_SYSTEM_ERROR(errno); pSql->res.code = TAOS_SYSTEM_ERROR(errno);
tscError("%p failed to open file %s to load data from file, code:%s", pSql, pCmd->payload, tstrerror(pSql->res.code)); tscError("%p failed to open file %s to load data from file, code:%s", pSql, pCmd->payload, tstrerror(pSql->res.code));
tfree(pSupporter); tfree(pSupporter);
taos_free_result(pNew);
tscAsyncResultOnError(pSql); tscAsyncResultOnError(pSql);
return; return;
} }
......
...@@ -13,10 +13,12 @@ ...@@ -13,10 +13,12 @@
* along with this program. If not, see <http://www.gnu.org/licenses/>. * along with this program. If not, see <http://www.gnu.org/licenses/>.
*/ */
#ifndef __APPLE__
#define _BSD_SOURCE #define _BSD_SOURCE
#define _XOPEN_SOURCE 500 #define _XOPEN_SOURCE 500
#define _DEFAULT_SOURCE #define _DEFAULT_SOURCE
#define _GNU_SOURCE #define _GNU_SOURCE
#endif // __APPLE__
#include "os.h" #include "os.h"
#include "ttype.h" #include "ttype.h"
...@@ -60,7 +62,7 @@ static int32_t setShowInfo(SSqlObj* pSql, SSqlInfo* pInfo); ...@@ -60,7 +62,7 @@ static int32_t setShowInfo(SSqlObj* pSql, SSqlInfo* pInfo);
static char* getAccountId(SSqlObj* pSql); static char* getAccountId(SSqlObj* pSql);
static bool has(SArray* pFieldList, int32_t startIdx, const char* name); static bool has(SArray* pFieldList, int32_t startIdx, const char* name);
static char* getCurrentDBName(SSqlObj* pSql); static char* cloneCurrentDBName(SSqlObj* pSql);
static bool hasSpecifyDB(SStrToken* pTableName); static bool hasSpecifyDB(SStrToken* pTableName);
static bool validateTableColumnInfo(SArray* pFieldList, SSqlCmd* pCmd); static bool validateTableColumnInfo(SArray* pFieldList, SSqlCmd* pCmd);
static bool validateTagParams(SArray* pTagsList, SArray* pFieldList, SSqlCmd* pCmd); static bool validateTagParams(SArray* pTagsList, SArray* pFieldList, SSqlCmd* pCmd);
...@@ -921,16 +923,19 @@ int32_t tscSetTableFullName(STableMetaInfo* pTableMetaInfo, SStrToken* pTableNam ...@@ -921,16 +923,19 @@ int32_t tscSetTableFullName(STableMetaInfo* pTableMetaInfo, SStrToken* pTableNam
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1); return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1);
} }
} else { // get current DB name first, and then set it into path } else { // get current DB name first, and then set it into path
char* t = getCurrentDBName(pSql); char* t = cloneCurrentDBName(pSql);
if (strlen(t) == 0) { if (strlen(t) == 0) {
return TSDB_CODE_TSC_DB_NOT_SELECTED; return TSDB_CODE_TSC_DB_NOT_SELECTED;
} }
code = tNameFromString(&pTableMetaInfo->name, t, T_NAME_ACCT | T_NAME_DB); code = tNameFromString(&pTableMetaInfo->name, t, T_NAME_ACCT | T_NAME_DB);
if (code != 0) { if (code != 0) {
free(t);
return TSDB_CODE_TSC_DB_NOT_SELECTED; return TSDB_CODE_TSC_DB_NOT_SELECTED;
} }
free(t);
if (pTableName->n >= TSDB_TABLE_NAME_LEN) { if (pTableName->n >= TSDB_TABLE_NAME_LEN) {
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1); return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1);
} }
...@@ -1244,8 +1249,12 @@ static bool has(SArray* pFieldList, int32_t startIdx, const char* name) { ...@@ -1244,8 +1249,12 @@ static bool has(SArray* pFieldList, int32_t startIdx, const char* name) {
static char* getAccountId(SSqlObj* pSql) { return pSql->pTscObj->acctId; } static char* getAccountId(SSqlObj* pSql) { return pSql->pTscObj->acctId; }
static char* getCurrentDBName(SSqlObj* pSql) { static char* cloneCurrentDBName(SSqlObj* pSql) {
return pSql->pTscObj->db; pthread_mutex_lock(&pSql->pTscObj->mutex);
char *p = strdup(pSql->pTscObj->db);
pthread_mutex_unlock(&pSql->pTscObj->mutex);
return p;
} }
/* length limitation, strstr cannot be applied */ /* length limitation, strstr cannot be applied */
...@@ -4300,6 +4309,77 @@ static void doAddJoinTagsColumnsIntoTagList(SSqlCmd* pCmd, SQueryInfo* pQueryInf ...@@ -4300,6 +4309,77 @@ static void doAddJoinTagsColumnsIntoTagList(SSqlCmd* pCmd, SQueryInfo* pQueryInf
} }
} }
static int32_t validateTagCondExpr(SSqlCmd* pCmd, tExprNode *p) {
const char *msg1 = "invalid tag operator";
const char* msg2 = "not supported filter condition";
do {
if (p->nodeType != TSQL_NODE_EXPR) {
break;
}
if (!p->_node.pLeft || !p->_node.pRight) {
break;
}
if (IS_ARITHMETIC_OPTR(p->_node.optr)) {
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1);
}
if (!IS_RELATION_OPTR(p->_node.optr)) {
break;
}
tVariant * vVariant = NULL;
int32_t schemaType = -1;
if (p->_node.pLeft->nodeType == TSQL_NODE_VALUE && p->_node.pRight->nodeType == TSQL_NODE_COL) {
if (!p->_node.pRight->pSchema) {
break;
}
vVariant = p->_node.pLeft->pVal;
schemaType = p->_node.pRight->pSchema->type;
} else if (p->_node.pLeft->nodeType == TSQL_NODE_COL && p->_node.pRight->nodeType == TSQL_NODE_VALUE) {
if (!p->_node.pLeft->pSchema) {
break;
}
vVariant = p->_node.pRight->pVal;
schemaType = p->_node.pLeft->pSchema->type;
} else {
break;
}
if (schemaType >= TSDB_DATA_TYPE_TINYINT && schemaType <= TSDB_DATA_TYPE_BIGINT) {
schemaType = TSDB_DATA_TYPE_BIGINT;
} else if (schemaType == TSDB_DATA_TYPE_FLOAT || schemaType == TSDB_DATA_TYPE_DOUBLE) {
schemaType = TSDB_DATA_TYPE_DOUBLE;
}
int32_t retVal = TSDB_CODE_SUCCESS;
if (schemaType == TSDB_DATA_TYPE_BINARY) {
char *tmp = calloc(1, vVariant->nLen + TSDB_NCHAR_SIZE);
retVal = tVariantDump(vVariant, tmp, schemaType, false);
free(tmp);
} else if (schemaType == TSDB_DATA_TYPE_NCHAR) {
// pRight->val.nLen + 1 is larger than the actual nchar string length
char *tmp = calloc(1, (vVariant->nLen + 1) * TSDB_NCHAR_SIZE);
retVal = tVariantDump(vVariant, tmp, schemaType, false);
free(tmp);
} else {
double tmp;
retVal = tVariantDump(vVariant, (char*)&tmp, schemaType, false);
}
if (retVal != TSDB_CODE_SUCCESS) {
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg2);
}
}while (0);
return TSDB_CODE_SUCCESS;
}
static int32_t getTagQueryCondExpr(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SCondExpr* pCondExpr, tSQLExpr** pExpr) { static int32_t getTagQueryCondExpr(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SCondExpr* pCondExpr, tSQLExpr** pExpr) {
int32_t ret = TSDB_CODE_SUCCESS; int32_t ret = TSDB_CODE_SUCCESS;
...@@ -4342,6 +4422,10 @@ static int32_t getTagQueryCondExpr(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SCondE ...@@ -4342,6 +4422,10 @@ static int32_t getTagQueryCondExpr(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SCondE
tsSetSTableQueryCond(&pQueryInfo->tagCond, uid, &bw); tsSetSTableQueryCond(&pQueryInfo->tagCond, uid, &bw);
doCompactQueryExpr(pExpr); doCompactQueryExpr(pExpr);
if (ret == TSDB_CODE_SUCCESS) {
ret = validateTagCondExpr(pCmd, p);
}
tSqlExprDestroy(p1); tSqlExprDestroy(p1);
tExprTreeDestroy(p, NULL); tExprTreeDestroy(p, NULL);
...@@ -4349,6 +4433,10 @@ static int32_t getTagQueryCondExpr(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SCondE ...@@ -4349,6 +4433,10 @@ static int32_t getTagQueryCondExpr(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SCondE
if (pQueryInfo->tagCond.pCond != NULL && taosArrayGetSize(pQueryInfo->tagCond.pCond) > 0 && !UTIL_TABLE_IS_SUPER_TABLE(pTableMetaInfo)) { if (pQueryInfo->tagCond.pCond != NULL && taosArrayGetSize(pQueryInfo->tagCond.pCond) > 0 && !UTIL_TABLE_IS_SUPER_TABLE(pTableMetaInfo)) {
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), "filter on tag not supported for normal table"); return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), "filter on tag not supported for normal table");
} }
if (ret) {
break;
}
} }
pCondExpr->pTagCond = NULL; pCondExpr->pTagCond = NULL;
......
...@@ -1250,8 +1250,10 @@ int32_t tscBuildShowMsg(SSqlObj *pSql, SSqlInfo *pInfo) { ...@@ -1250,8 +1250,10 @@ int32_t tscBuildShowMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
STableMetaInfo *pTableMetaInfo = tscGetTableMetaInfoFromCmd(pCmd, pCmd->clauseIndex, 0); STableMetaInfo *pTableMetaInfo = tscGetTableMetaInfoFromCmd(pCmd, pCmd->clauseIndex, 0);
if (tNameIsEmpty(&pTableMetaInfo->name)) { if (tNameIsEmpty(&pTableMetaInfo->name)) {
tstrncpy(pShowMsg->db, pObj->db, sizeof(pShowMsg->db)); pthread_mutex_lock(&pObj->mutex);
tstrncpy(pShowMsg->db, pObj->db, sizeof(pShowMsg->db));
pthread_mutex_unlock(&pObj->mutex);
} else { } else {
tNameGetFullDbName(&pTableMetaInfo->name, pShowMsg->db); tNameGetFullDbName(&pTableMetaInfo->name, pShowMsg->db);
} }
...@@ -1611,9 +1613,14 @@ int tscBuildConnectMsg(SSqlObj *pSql, SSqlInfo *pInfo) { ...@@ -1611,9 +1613,14 @@ int tscBuildConnectMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
// TODO refactor full_name // TODO refactor full_name
char *db; // ugly code to move the space char *db; // ugly code to move the space
pthread_mutex_lock(&pObj->mutex);
db = strstr(pObj->db, TS_PATH_DELIMITER); db = strstr(pObj->db, TS_PATH_DELIMITER);
db = (db == NULL) ? pObj->db : db + 1; db = (db == NULL) ? pObj->db : db + 1;
tstrncpy(pConnect->db, db, sizeof(pConnect->db)); tstrncpy(pConnect->db, db, sizeof(pConnect->db));
pthread_mutex_unlock(&pObj->mutex);
tstrncpy(pConnect->clientVersion, version, sizeof(pConnect->clientVersion)); tstrncpy(pConnect->clientVersion, version, sizeof(pConnect->clientVersion));
tstrncpy(pConnect->msgVersion, "", sizeof(pConnect->msgVersion)); tstrncpy(pConnect->msgVersion, "", sizeof(pConnect->msgVersion));
...@@ -2131,10 +2138,13 @@ int tscProcessConnectRsp(SSqlObj *pSql) { ...@@ -2131,10 +2138,13 @@ int tscProcessConnectRsp(SSqlObj *pSql) {
SConnectRsp *pConnect = (SConnectRsp *)pRes->pRsp; SConnectRsp *pConnect = (SConnectRsp *)pRes->pRsp;
tstrncpy(pObj->acctId, pConnect->acctId, sizeof(pObj->acctId)); // copy acctId from response tstrncpy(pObj->acctId, pConnect->acctId, sizeof(pObj->acctId)); // copy acctId from response
pthread_mutex_lock(&pObj->mutex);
int32_t len = sprintf(temp, "%s%s%s", pObj->acctId, TS_PATH_DELIMITER, pObj->db); int32_t len = sprintf(temp, "%s%s%s", pObj->acctId, TS_PATH_DELIMITER, pObj->db);
assert(len <= sizeof(pObj->db)); assert(len <= sizeof(pObj->db));
tstrncpy(pObj->db, temp, sizeof(pObj->db)); tstrncpy(pObj->db, temp, sizeof(pObj->db));
pthread_mutex_unlock(&pObj->mutex);
if (pConnect->epSet.numOfEps > 0) { if (pConnect->epSet.numOfEps > 0) {
tscEpSetHtons(&pConnect->epSet); tscEpSetHtons(&pConnect->epSet);
...@@ -2161,11 +2171,18 @@ int tscProcessConnectRsp(SSqlObj *pSql) { ...@@ -2161,11 +2171,18 @@ int tscProcessConnectRsp(SSqlObj *pSql) {
int tscProcessUseDbRsp(SSqlObj *pSql) { int tscProcessUseDbRsp(SSqlObj *pSql) {
STscObj * pObj = pSql->pTscObj; STscObj * pObj = pSql->pTscObj;
STableMetaInfo *pTableMetaInfo = tscGetTableMetaInfoFromCmd(&pSql->cmd, 0, 0); STableMetaInfo *pTableMetaInfo = tscGetTableMetaInfoFromCmd(&pSql->cmd, 0, 0);
return tNameExtractFullName(&pTableMetaInfo->name, pObj->db);
pthread_mutex_lock(&pObj->mutex);
int ret = tNameExtractFullName(&pTableMetaInfo->name, pObj->db);
pthread_mutex_unlock(&pObj->mutex);
return ret;
} }
int tscProcessDropDbRsp(SSqlObj *pSql) { int tscProcessDropDbRsp(SSqlObj *pSql) {
pSql->pTscObj->db[0] = 0; //TODO LOCK DB WHEN MODIFY IT
//pSql->pTscObj->db[0] = 0;
taosHashEmpty(tscTableMetaInfo); taosHashEmpty(tscTableMetaInfo);
return 0; return 0;
} }
......
...@@ -295,6 +295,10 @@ void taos_close(TAOS *taos) { ...@@ -295,6 +295,10 @@ void taos_close(TAOS *taos) {
tscDebug("%p HB is freed", pHb); tscDebug("%p HB is freed", pHb);
taosReleaseRef(tscObjRef, pHb->self); taosReleaseRef(tscObjRef, pHb->self);
#ifdef __APPLE__
// to satisfy later tsem_destroy in taos_free_result
tsem_init(&pHb->rspSem, 0, 0);
#endif // __APPLE__
taos_free_result(pHb); taos_free_result(pHb);
} }
} }
......
...@@ -313,7 +313,7 @@ static int tscLoadSubscriptionProgress(SSub* pSub) { ...@@ -313,7 +313,7 @@ static int tscLoadSubscriptionProgress(SSub* pSub) {
char buf[TSDB_MAX_SQL_LEN]; char buf[TSDB_MAX_SQL_LEN];
sprintf(buf, "%s/subscribe/%s", tsDataDir, pSub->topic); sprintf(buf, "%s/subscribe/%s", tsDataDir, pSub->topic);
FILE* fp = fopen(buf, "r"); FILE* fp = fopen(buf, "rb");
if (fp == NULL) { if (fp == NULL) {
tscDebug("subscription progress file does not exist: %s", pSub->topic); tscDebug("subscription progress file does not exist: %s", pSub->topic);
return 1; return 1;
...@@ -368,7 +368,7 @@ void tscSaveSubscriptionProgress(void* sub) { ...@@ -368,7 +368,7 @@ void tscSaveSubscriptionProgress(void* sub) {
} }
sprintf(path, "%s/subscribe/%s", tsDataDir, pSub->topic); sprintf(path, "%s/subscribe/%s", tsDataDir, pSub->topic);
FILE* fp = fopen(path, "w+"); FILE* fp = fopen(path, "wb+");
if (fp == NULL) { if (fp == NULL) {
tscError("failed to create progress file for subscription: %s", pSub->topic); tscError("failed to create progress file for subscription: %s", pSub->topic);
return; return;
......
...@@ -1018,7 +1018,7 @@ static void tsCompRetrieveCallback(void* param, TAOS_RES* tres, int32_t numOfRow ...@@ -1018,7 +1018,7 @@ static void tsCompRetrieveCallback(void* param, TAOS_RES* tres, int32_t numOfRow
if (numOfRows > 0) { // write the compressed timestamp to disk file if (numOfRows > 0) { // write the compressed timestamp to disk file
if(pSupporter->f == NULL) { if(pSupporter->f == NULL) {
pSupporter->f = fopen(pSupporter->path, "w"); pSupporter->f = fopen(pSupporter->path, "wb");
if (pSupporter->f == NULL) { if (pSupporter->f == NULL) {
tscError("%p failed to create tmp file:%s, reason:%s", pSql, pSupporter->path, strerror(errno)); tscError("%p failed to create tmp file:%s, reason:%s", pSql, pSupporter->path, strerror(errno));
...@@ -1066,7 +1066,7 @@ static void tsCompRetrieveCallback(void* param, TAOS_RES* tres, int32_t numOfRow ...@@ -1066,7 +1066,7 @@ static void tsCompRetrieveCallback(void* param, TAOS_RES* tres, int32_t numOfRow
// continue to retrieve ts-comp data from vnode // continue to retrieve ts-comp data from vnode
if (!pRes->completed) { if (!pRes->completed) {
taosGetTmpfilePath("ts-join", pSupporter->path); taosGetTmpfilePath("ts-join", pSupporter->path);
pSupporter->f = fopen(pSupporter->path, "w"); pSupporter->f = fopen(pSupporter->path, "wb");
pRes->row = pRes->numOfRows; pRes->row = pRes->numOfRows;
taos_fetch_rows_a(tres, tsCompRetrieveCallback, param); taos_fetch_rows_a(tres, tsCompRetrieveCallback, param);
...@@ -1092,7 +1092,7 @@ static void tsCompRetrieveCallback(void* param, TAOS_RES* tres, int32_t numOfRow ...@@ -1092,7 +1092,7 @@ static void tsCompRetrieveCallback(void* param, TAOS_RES* tres, int32_t numOfRow
taosGetTmpfilePath("ts-join", pSupporter->path); taosGetTmpfilePath("ts-join", pSupporter->path);
// TODO check for failure // TODO check for failure
pSupporter->f = fopen(pSupporter->path, "w"); pSupporter->f = fopen(pSupporter->path, "wb");
pRes->row = pRes->numOfRows; pRes->row = pRes->numOfRows;
// set the callback function // set the callback function
...@@ -1991,7 +1991,22 @@ static void tscAbortFurtherRetryRetrieval(SRetrieveSupport *trsupport, TAOS_RES ...@@ -1991,7 +1991,22 @@ static void tscAbortFurtherRetryRetrieval(SRetrieveSupport *trsupport, TAOS_RES
* current query failed, and the retry count is less than the available * current query failed, and the retry count is less than the available
* count, retry query clear previous retrieved data, then launch a new sub query * count, retry query clear previous retrieved data, then launch a new sub query
*/ */
static int32_t tscReissueSubquery(SRetrieveSupport *trsupport, SSqlObj *pSql, int32_t code) { static int32_t tscReissueSubquery(SRetrieveSupport *oriTrs, SSqlObj *pSql, int32_t code) {
SRetrieveSupport *trsupport = malloc(sizeof(SRetrieveSupport));
if (trsupport == NULL) {
return TSDB_CODE_TSC_OUT_OF_MEMORY;
}
memcpy(trsupport, oriTrs, sizeof(*trsupport));
const uint32_t nBufferSize = (1u << 16u); // 64KB
trsupport->localBuffer = (tFilePage *)calloc(1, nBufferSize + sizeof(tFilePage));
if (trsupport->localBuffer == NULL) {
tscError("%p failed to malloc buffer for local buffer, reason:%s", pSql, strerror(errno));
tfree(trsupport);
return TSDB_CODE_TSC_OUT_OF_MEMORY;
}
SSqlObj *pParentSql = trsupport->pParentSql; SSqlObj *pParentSql = trsupport->pParentSql;
int32_t subqueryIndex = trsupport->subqueryIndex; int32_t subqueryIndex = trsupport->subqueryIndex;
......
...@@ -1942,6 +1942,10 @@ SSqlObj* createSimpleSubObj(SSqlObj* pSql, __async_cb_func_t fp, void* param, in ...@@ -1942,6 +1942,10 @@ SSqlObj* createSimpleSubObj(SSqlObj* pSql, __async_cb_func_t fp, void* param, in
} }
if (tscAddSubqueryInfo(pCmd) != TSDB_CODE_SUCCESS) { if (tscAddSubqueryInfo(pCmd) != TSDB_CODE_SUCCESS) {
#ifdef __APPLE__
// to satisfy later tsem_destroy in taos_free_result
tsem_init(&pNew->rspSem, 0, 0);
#endif // __APPLE__
tscFreeSqlObj(pNew); tscFreeSqlObj(pNew);
return NULL; return NULL;
} }
...@@ -2123,7 +2127,11 @@ SSqlObj* createSubqueryObj(SSqlObj* pSql, int16_t tableIndex, __async_cb_func_t ...@@ -2123,7 +2127,11 @@ SSqlObj* createSubqueryObj(SSqlObj* pSql, int16_t tableIndex, __async_cb_func_t
pTableMetaInfo->tagColList, pTableMetaInfo->pVgroupTables); pTableMetaInfo->tagColList, pTableMetaInfo->pVgroupTables);
} else { // transfer the ownership of pTableMeta to the newly create sql object. } else { // transfer the ownership of pTableMeta to the newly create sql object.
STableMetaInfo* pPrevInfo = tscGetTableMetaInfoFromCmd(&pPrevSql->cmd, pPrevSql->cmd.clauseIndex, 0); STableMetaInfo* pPrevInfo = tscGetTableMetaInfoFromCmd(&pPrevSql->cmd, pPrevSql->cmd.clauseIndex, 0);
if (pPrevInfo->pTableMeta && pPrevInfo->pTableMeta->tableType < 0) {
terrno = TSDB_CODE_TSC_APP_ERROR;
goto _error;
}
STableMeta* pPrevTableMeta = tscTableMetaDup(pPrevInfo->pTableMeta); STableMeta* pPrevTableMeta = tscTableMetaDup(pPrevInfo->pTableMeta);
SVgroupsInfo* pVgroupsInfo = pPrevInfo->vgroupList; SVgroupsInfo* pVgroupsInfo = pPrevInfo->vgroupList;
pFinalInfo = tscAddTableMetaInfo(pNewQueryInfo, &pTableMetaInfo->name, pPrevTableMeta, pVgroupsInfo, pTableMetaInfo->tagColList, pFinalInfo = tscAddTableMetaInfo(pNewQueryInfo, &pTableMetaInfo->name, pPrevTableMeta, pVgroupsInfo, pTableMetaInfo->tagColList,
...@@ -2508,7 +2516,11 @@ bool tscSetSqlOwner(SSqlObj* pSql) { ...@@ -2508,7 +2516,11 @@ bool tscSetSqlOwner(SSqlObj* pSql) {
SSqlRes* pRes = &pSql->res; SSqlRes* pRes = &pSql->res;
// set the sql object owner // set the sql object owner
#ifdef __APPLE__
pthread_t threadId = (pthread_t)taosGetSelfPthreadId();
#else // __APPLE__
uint64_t threadId = taosGetSelfPthreadId(); uint64_t threadId = taosGetSelfPthreadId();
#endif // __APPLE__
if (atomic_val_compare_exchange_64(&pSql->owner, 0, threadId) != 0) { if (atomic_val_compare_exchange_64(&pSql->owner, 0, threadId) != 0) {
pRes->code = TSDB_CODE_QRY_IN_EXEC; pRes->code = TSDB_CODE_QRY_IN_EXEC;
return false; return false;
......
...@@ -27,23 +27,23 @@ ...@@ -27,23 +27,23 @@
extern "C" { extern "C" {
#endif #endif
#define STR_TO_VARSTR(x, str) \ #define STR_TO_VARSTR(x, str) \
do { \ do { \
VarDataLenT __len = strlen(str); \ VarDataLenT __len = (VarDataLenT)strlen(str); \
*(VarDataLenT *)(x) = __len; \ *(VarDataLenT *)(x) = __len; \
memcpy(varDataVal(x), (str), __len); \ memcpy(varDataVal(x), (str), __len); \
} while (0); } while (0);
#define STR_WITH_MAXSIZE_TO_VARSTR(x, str, _maxs) \ #define STR_WITH_MAXSIZE_TO_VARSTR(x, str, _maxs) \
do { \ do { \
char *_e = stpncpy(varDataVal(x), (str), (_maxs)-VARSTR_HEADER_SIZE); \ char *_e = stpncpy(varDataVal(x), (str), (_maxs)-VARSTR_HEADER_SIZE); \
varDataSetLen(x, (_e - (x)-VARSTR_HEADER_SIZE)); \ varDataSetLen(x, (_e - (x)-VARSTR_HEADER_SIZE)); \
} while (0) } while (0)
#define STR_WITH_SIZE_TO_VARSTR(x, str, _size) \ #define STR_WITH_SIZE_TO_VARSTR(x, str, _size) \
do { \ do { \
*(VarDataLenT *)(x) = (_size); \ *(VarDataLenT *)(x) = (VarDataLenT)(_size); \
memcpy(varDataVal(x), (str), (_size)); \ memcpy(varDataVal(x), (str), (_size)); \
} while (0); } while (0);
// ----------------- TSDB COLUMN DEFINITION // ----------------- TSDB COLUMN DEFINITION
...@@ -156,7 +156,7 @@ static FORCE_INLINE int tkeyComparFn(const void *tkey1, const void *tkey2) { ...@@ -156,7 +156,7 @@ static FORCE_INLINE int tkeyComparFn(const void *tkey1, const void *tkey2) {
* +----------+----------+---------------------------------+---------------------------------+ * +----------+----------+---------------------------------+---------------------------------+
* | len | sversion | First part | Second part | * | len | sversion | First part | Second part |
* +----------+----------+---------------------------------+---------------------------------+ * +----------+----------+---------------------------------+---------------------------------+
* *
* NOTE: timestamp in this row structure is TKEY instead of TSKEY * NOTE: timestamp in this row structure is TKEY instead of TSKEY
*/ */
typedef void *SDataRow; typedef void *SDataRow;
......
...@@ -88,8 +88,8 @@ extern int32_t tsMinRowsInFileBlock; ...@@ -88,8 +88,8 @@ extern int32_t tsMinRowsInFileBlock;
extern int32_t tsMaxRowsInFileBlock; extern int32_t tsMaxRowsInFileBlock;
extern int16_t tsCommitTime; // seconds extern int16_t tsCommitTime; // seconds
extern int32_t tsTimePrecision; extern int32_t tsTimePrecision;
extern int16_t tsCompression; extern int8_t tsCompression;
extern int16_t tsWAL; extern int8_t tsWAL;
extern int32_t tsFsyncPeriod; extern int32_t tsFsyncPeriod;
extern int32_t tsReplications; extern int32_t tsReplications;
extern int32_t tsQuorum; extern int32_t tsQuorum;
......
...@@ -121,8 +121,8 @@ int32_t tsMinRowsInFileBlock = TSDB_DEFAULT_MIN_ROW_FBLOCK; ...@@ -121,8 +121,8 @@ int32_t tsMinRowsInFileBlock = TSDB_DEFAULT_MIN_ROW_FBLOCK;
int32_t tsMaxRowsInFileBlock = TSDB_DEFAULT_MAX_ROW_FBLOCK; int32_t tsMaxRowsInFileBlock = TSDB_DEFAULT_MAX_ROW_FBLOCK;
int16_t tsCommitTime = TSDB_DEFAULT_COMMIT_TIME; // seconds int16_t tsCommitTime = TSDB_DEFAULT_COMMIT_TIME; // seconds
int32_t tsTimePrecision = TSDB_DEFAULT_PRECISION; int32_t tsTimePrecision = TSDB_DEFAULT_PRECISION;
int16_t tsCompression = TSDB_DEFAULT_COMP_LEVEL; int8_t tsCompression = TSDB_DEFAULT_COMP_LEVEL;
int16_t tsWAL = TSDB_DEFAULT_WAL_LEVEL; int8_t tsWAL = TSDB_DEFAULT_WAL_LEVEL;
int32_t tsFsyncPeriod = TSDB_DEFAULT_FSYNC_PERIOD; int32_t tsFsyncPeriod = TSDB_DEFAULT_FSYNC_PERIOD;
int32_t tsReplications = TSDB_DEFAULT_DB_REPLICA_OPTION; int32_t tsReplications = TSDB_DEFAULT_DB_REPLICA_OPTION;
int32_t tsQuorum = TSDB_DEFAULT_DB_QUORUM_OPTION; int32_t tsQuorum = TSDB_DEFAULT_DB_QUORUM_OPTION;
...@@ -137,7 +137,7 @@ int32_t tsTableIncStepPerVnode = TSDB_TABLES_STEP; ...@@ -137,7 +137,7 @@ int32_t tsTableIncStepPerVnode = TSDB_TABLES_STEP;
int8_t tsEnableBalance = 1; int8_t tsEnableBalance = 1;
int8_t tsAlternativeRole = 0; int8_t tsAlternativeRole = 0;
int32_t tsBalanceInterval = 300; // seconds int32_t tsBalanceInterval = 300; // seconds
int32_t tsOfflineThreshold = 86400 * 100; // seconds 10days int32_t tsOfflineThreshold = 86400 * 100; // seconds 100 days
int32_t tsMnodeEqualVnodeNum = 4; int32_t tsMnodeEqualVnodeNum = 4;
int8_t tsEnableFlowCtrl = 1; int8_t tsEnableFlowCtrl = 1;
int8_t tsEnableSlaveQuery = 1; int8_t tsEnableSlaveQuery = 1;
...@@ -550,7 +550,7 @@ static void doInitGlobalConfig(void) { ...@@ -550,7 +550,7 @@ static void doInitGlobalConfig(void) {
cfg.valType = TAOS_CFG_VTYPE_INT32; cfg.valType = TAOS_CFG_VTYPE_INT32;
cfg.cfgType = TSDB_CFG_CTYPE_B_CONFIG | TSDB_CFG_CTYPE_B_SHOW; cfg.cfgType = TSDB_CFG_CTYPE_B_CONFIG | TSDB_CFG_CTYPE_B_SHOW;
cfg.minValue = 3; cfg.minValue = 3;
cfg.maxValue = 7200000; cfg.maxValue = 86400 * 365;
cfg.ptrLength = 0; cfg.ptrLength = 0;
cfg.unitType = TAOS_CFG_UTYPE_SECOND; cfg.unitType = TAOS_CFG_UTYPE_SECOND;
taosInitConfigOption(cfg); taosInitConfigOption(cfg);
...@@ -758,7 +758,7 @@ static void doInitGlobalConfig(void) { ...@@ -758,7 +758,7 @@ static void doInitGlobalConfig(void) {
cfg.option = "comp"; cfg.option = "comp";
cfg.ptr = &tsCompression; cfg.ptr = &tsCompression;
cfg.valType = TAOS_CFG_VTYPE_INT16; cfg.valType = TAOS_CFG_VTYPE_INT8;
cfg.cfgType = TSDB_CFG_CTYPE_B_CONFIG | TSDB_CFG_CTYPE_B_SHOW; cfg.cfgType = TSDB_CFG_CTYPE_B_CONFIG | TSDB_CFG_CTYPE_B_SHOW;
cfg.minValue = TSDB_MIN_COMP_LEVEL; cfg.minValue = TSDB_MIN_COMP_LEVEL;
cfg.maxValue = TSDB_MAX_COMP_LEVEL; cfg.maxValue = TSDB_MAX_COMP_LEVEL;
...@@ -768,7 +768,7 @@ static void doInitGlobalConfig(void) { ...@@ -768,7 +768,7 @@ static void doInitGlobalConfig(void) {
cfg.option = "walLevel"; cfg.option = "walLevel";
cfg.ptr = &tsWAL; cfg.ptr = &tsWAL;
cfg.valType = TAOS_CFG_VTYPE_INT16; cfg.valType = TAOS_CFG_VTYPE_INT8;
cfg.cfgType = TSDB_CFG_CTYPE_B_CONFIG | TSDB_CFG_CTYPE_B_SHOW; cfg.cfgType = TSDB_CFG_CTYPE_B_CONFIG | TSDB_CFG_CTYPE_B_SHOW;
cfg.minValue = TSDB_MIN_WAL_LEVEL; cfg.minValue = TSDB_MIN_WAL_LEVEL;
cfg.maxValue = TSDB_MAX_WAL_LEVEL; cfg.maxValue = TSDB_MAX_WAL_LEVEL;
......
...@@ -430,7 +430,7 @@ static FORCE_INLINE int32_t convertToInteger(tVariant *pVariant, int64_t *result ...@@ -430,7 +430,7 @@ static FORCE_INLINE int32_t convertToInteger(tVariant *pVariant, int64_t *result
} }
errno = 0; errno = 0;
if (IS_SIGNED_NUMERIC_TYPE(pVariant->nType)) { if (IS_SIGNED_NUMERIC_TYPE(pVariant->nType) || (pVariant->nType == TSDB_DATA_TYPE_BOOL)) {
*result = pVariant->i64; *result = pVariant->i64;
} else if (IS_UNSIGNED_NUMERIC_TYPE(pVariant->nType)) { } else if (IS_UNSIGNED_NUMERIC_TYPE(pVariant->nType)) {
*result = pVariant->u64; *result = pVariant->u64;
...@@ -775,7 +775,7 @@ int32_t tVariantDump(tVariant *pVariant, char *payload, int16_t type, bool inclu ...@@ -775,7 +775,7 @@ int32_t tVariantDump(tVariant *pVariant, char *payload, int16_t type, bool inclu
return -1; return -1;
} }
} else { } else {
wcsncpy((wchar_t *)p, pVariant->wpz, pVariant->nLen); memcpy(p, pVariant->wpz, pVariant->nLen);
newlen = pVariant->nLen; newlen = pVariant->nLen;
} }
...@@ -867,4 +867,4 @@ int32_t tVariantTypeSetType(tVariant *pVariant, char type) { ...@@ -867,4 +867,4 @@ int32_t tVariantTypeSetType(tVariant *pVariant, char type) {
} }
return 0; return 0;
} }
\ No newline at end of file
...@@ -8,7 +8,7 @@ IF (TD_MVN_INSTALLED) ...@@ -8,7 +8,7 @@ IF (TD_MVN_INSTALLED)
ADD_CUSTOM_COMMAND(OUTPUT ${JDBC_CMD_NAME} ADD_CUSTOM_COMMAND(OUTPUT ${JDBC_CMD_NAME}
POST_BUILD POST_BUILD
COMMAND mvn -Dmaven.test.skip=true install -f ${CMAKE_CURRENT_SOURCE_DIR}/pom.xml COMMAND mvn -Dmaven.test.skip=true install -f ${CMAKE_CURRENT_SOURCE_DIR}/pom.xml
COMMAND ${CMAKE_COMMAND} -E copy ${CMAKE_CURRENT_SOURCE_DIR}/target/taos-jdbcdriver-2.0.17-dist.jar ${LIBRARY_OUTPUT_PATH} COMMAND ${CMAKE_COMMAND} -E copy ${CMAKE_CURRENT_SOURCE_DIR}/target/taos-jdbcdriver-2.0.18-dist.jar ${LIBRARY_OUTPUT_PATH}
COMMAND mvn -Dmaven.test.skip=true clean -f ${CMAKE_CURRENT_SOURCE_DIR}/pom.xml COMMAND mvn -Dmaven.test.skip=true clean -f ${CMAKE_CURRENT_SOURCE_DIR}/pom.xml
COMMENT "build jdbc driver") COMMENT "build jdbc driver")
ADD_CUSTOM_TARGET(${JDBC_TARGET_NAME} ALL WORKING_DIRECTORY ${EXECUTABLE_OUTPUT_PATH} DEPENDS ${JDBC_CMD_NAME}) ADD_CUSTOM_TARGET(${JDBC_TARGET_NAME} ALL WORKING_DIRECTORY ${EXECUTABLE_OUTPUT_PATH} DEPENDS ${JDBC_CMD_NAME})
......
...@@ -5,7 +5,7 @@ ...@@ -5,7 +5,7 @@
<groupId>com.taosdata.jdbc</groupId> <groupId>com.taosdata.jdbc</groupId>
<artifactId>taos-jdbcdriver</artifactId> <artifactId>taos-jdbcdriver</artifactId>
<version>2.0.17</version> <version>2.0.18</version>
<packaging>jar</packaging> <packaging>jar</packaging>
<name>JDBCDriver</name> <name>JDBCDriver</name>
......
...@@ -3,7 +3,7 @@ ...@@ -3,7 +3,7 @@
<modelVersion>4.0.0</modelVersion> <modelVersion>4.0.0</modelVersion>
<groupId>com.taosdata.jdbc</groupId> <groupId>com.taosdata.jdbc</groupId>
<artifactId>taos-jdbcdriver</artifactId> <artifactId>taos-jdbcdriver</artifactId>
<version>2.0.17</version> <version>2.0.18</version>
<packaging>jar</packaging> <packaging>jar</packaging>
<name>JDBCDriver</name> <name>JDBCDriver</name>
<url>https://github.com/taosdata/TDengine/tree/master/src/connector/jdbc</url> <url>https://github.com/taosdata/TDengine/tree/master/src/connector/jdbc</url>
...@@ -126,7 +126,7 @@ ...@@ -126,7 +126,7 @@
<include>**/*Test.java</include> <include>**/*Test.java</include>
</includes> </includes>
<excludes> <excludes>
<exclude>**/BatchInsertTest.java</exclude> <exclude>**/AppMemoryLeakTest.java</exclude>
<exclude>**/FailOverTest.java</exclude> <exclude>**/FailOverTest.java</exclude>
</excludes> </excludes>
<testFailureIgnore>true</testFailureIgnore> <testFailureIgnore>true</testFailureIgnore>
......
...@@ -19,9 +19,12 @@ import java.util.Map; ...@@ -19,9 +19,12 @@ import java.util.Map;
public abstract class TSDBConstants { public abstract class TSDBConstants {
public static final String STATEMENT_CLOSED = "Statement already closed.";
public static final String DEFAULT_PORT = "6200"; public static final String DEFAULT_PORT = "6200";
public static final String UNSUPPORT_METHOD_EXCEPTIONZ_MSG = "this operation is NOT supported currently!"; public static final String UNSUPPORT_METHOD_EXCEPTIONZ_MSG = "this operation is NOT supported currently!";
public static final String INVALID_VARIABLES = "invalid variables"; public static final String INVALID_VARIABLES = "invalid variables";
public static final String RESULT_SET_IS_CLOSED = "resultSet is closed.";
public static Map<Integer, String> DATATYPE_MAP = null; public static Map<Integer, String> DATATYPE_MAP = null;
public static final long JNI_NULL_POINTER = 0L; public static final long JNI_NULL_POINTER = 0L;
...@@ -74,7 +77,7 @@ public abstract class TSDBConstants { ...@@ -74,7 +77,7 @@ public abstract class TSDBConstants {
} }
static { static {
DATATYPE_MAP = new HashMap<Integer, String>(); DATATYPE_MAP = new HashMap<>();
DATATYPE_MAP.put(1, "BOOL"); DATATYPE_MAP.put(1, "BOOL");
DATATYPE_MAP.put(2, "TINYINT"); DATATYPE_MAP.put(2, "TINYINT");
DATATYPE_MAP.put(3, "SMALLINT"); DATATYPE_MAP.put(3, "SMALLINT");
......
...@@ -100,7 +100,6 @@ public class TSDBPreparedStatement extends TSDBStatement implements PreparedStat ...@@ -100,7 +100,6 @@ public class TSDBPreparedStatement extends TSDBStatement implements PreparedStat
* order to process those supported SQLs. * order to process those supported SQLs.
*/ */
private void preprocessSql() { private void preprocessSql() {
/***** For processing some of Spark SQLs*****/ /***** For processing some of Spark SQLs*****/
// should replace it first // should replace it first
this.rawSql = this.rawSql.replaceAll("or (.*) is null", ""); this.rawSql = this.rawSql.replaceAll("or (.*) is null", "");
...@@ -149,7 +148,6 @@ public class TSDBPreparedStatement extends TSDBStatement implements PreparedStat ...@@ -149,7 +148,6 @@ public class TSDBPreparedStatement extends TSDBStatement implements PreparedStat
rawSql = rawSql.replace(matcher.group(1), tableFullName); rawSql = rawSql.replace(matcher.group(1), tableFullName);
} }
/***** for inner queries *****/ /***** for inner queries *****/
} }
/** /**
...@@ -196,7 +194,7 @@ public class TSDBPreparedStatement extends TSDBStatement implements PreparedStat ...@@ -196,7 +194,7 @@ public class TSDBPreparedStatement extends TSDBStatement implements PreparedStat
@Override @Override
public void setNull(int parameterIndex, int sqlType) throws SQLException { public void setNull(int parameterIndex, int sqlType) throws SQLException {
setObject(parameterIndex, new String("NULL")); setObject(parameterIndex, "NULL");
} }
@Override @Override
......
...@@ -52,12 +52,18 @@ public class TSDBStatement implements Statement { ...@@ -52,12 +52,18 @@ public class TSDBStatement implements Statement {
this.isClosed = false; this.isClosed = false;
} }
@Override
public <T> T unwrap(Class<T> iface) throws SQLException { public <T> T unwrap(Class<T> iface) throws SQLException {
throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); try {
return iface.cast(this);
} catch (ClassCastException cce) {
throw new SQLException("Unable to unwrap to " + iface.toString());
}
} }
@Override
public boolean isWrapperFor(Class<?> iface) throws SQLException { public boolean isWrapperFor(Class<?> iface) throws SQLException {
throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); return iface.isInstance(this);
} }
public ResultSet executeQuery(String sql) throws SQLException { public ResultSet executeQuery(String sql) throws SQLException {
...@@ -130,10 +136,15 @@ public class TSDBStatement implements Statement { ...@@ -130,10 +136,15 @@ public class TSDBStatement implements Statement {
} }
public void setMaxFieldSize(int max) throws SQLException { public void setMaxFieldSize(int max) throws SQLException {
if (isClosed())
throw new SQLException(TSDBConstants.STATEMENT_CLOSED);
throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG);
} }
public int getMaxRows() throws SQLException { public int getMaxRows() throws SQLException {
if (isClosed())
throw new SQLException(TSDBConstants.STATEMENT_CLOSED);
// always set maxRows to zero, meaning unlimitted rows in a resultSet // always set maxRows to zero, meaning unlimitted rows in a resultSet
return 0; return 0;
} }
......
package com.taosdata.jdbc.rs; package com.taosdata.jdbc.rs;
import com.taosdata.jdbc.TSDBConstants;
import java.sql.ResultSetMetaData; import java.sql.ResultSetMetaData;
import java.sql.SQLException; import java.sql.SQLException;
import java.sql.Timestamp;
import java.util.ArrayList; import java.util.ArrayList;
public class RestfulResultSetMetaData implements ResultSetMetaData { public class RestfulResultSetMetaData implements ResultSetMetaData {
private final String database; private final String database;
private ArrayList<RestfulResultSet.Field> fields; private ArrayList<RestfulResultSet.Field> fields;
private final RestfulResultSet resultSet;
public RestfulResultSetMetaData(String database, ArrayList<RestfulResultSet.Field> fields) { public RestfulResultSetMetaData(String database, ArrayList<RestfulResultSet.Field> fields, RestfulResultSet resultSet) {
this.database = database; this.database = database;
this.fields = fields; this.fields = fields;
this.resultSet = resultSet;
} }
@Override @Override
...@@ -26,13 +31,12 @@ public class RestfulResultSetMetaData implements ResultSetMetaData { ...@@ -26,13 +31,12 @@ public class RestfulResultSetMetaData implements ResultSetMetaData {
@Override @Override
public boolean isCaseSensitive(int column) throws SQLException { public boolean isCaseSensitive(int column) throws SQLException {
//TODO
return false; return false;
} }
@Override @Override
public boolean isSearchable(int column) throws SQLException { public boolean isSearchable(int column) throws SQLException {
return false; return true;
} }
@Override @Override
...@@ -42,17 +46,30 @@ public class RestfulResultSetMetaData implements ResultSetMetaData { ...@@ -42,17 +46,30 @@ public class RestfulResultSetMetaData implements ResultSetMetaData {
@Override @Override
public int isNullable(int column) throws SQLException { public int isNullable(int column) throws SQLException {
if (column == 1)
return ResultSetMetaData.columnNoNulls;
return ResultSetMetaData.columnNullable; return ResultSetMetaData.columnNullable;
} }
@Override @Override
public boolean isSigned(int column) throws SQLException { public boolean isSigned(int column) throws SQLException {
return false; String type = this.fields.get(column - 1).type.toUpperCase();
switch (type) {
case "TINYINT":
case "SMALLINT":
case "INT":
case "BIGINT":
case "FLOAT":
case "DOUBLE":
return true;
default:
return false;
}
} }
@Override @Override
public int getColumnDisplaySize(int column) throws SQLException { public int getColumnDisplaySize(int column) throws SQLException {
return 0; return this.fields.get(column - 1).length;
} }
@Override @Override
...@@ -62,27 +79,46 @@ public class RestfulResultSetMetaData implements ResultSetMetaData { ...@@ -62,27 +79,46 @@ public class RestfulResultSetMetaData implements ResultSetMetaData {
@Override @Override
public String getColumnName(int column) throws SQLException { public String getColumnName(int column) throws SQLException {
return null; return fields.get(column - 1).name;
} }
@Override @Override
public String getSchemaName(int column) throws SQLException { public String getSchemaName(int column) throws SQLException {
return this.database; return "";
} }
@Override @Override
public int getPrecision(int column) throws SQLException { public int getPrecision(int column) throws SQLException {
return 0; String type = this.fields.get(column - 1).type.toUpperCase();
switch (type) {
case "FLOAT":
return 5;
case "DOUBLE":
return 9;
case "BINARY":
case "NCHAR":
return this.fields.get(column - 1).length;
default:
return 0;
}
} }
@Override @Override
public int getScale(int column) throws SQLException { public int getScale(int column) throws SQLException {
return 0; String type = this.fields.get(column - 1).type.toUpperCase();
switch (type) {
case "FLOAT":
return 5;
case "DOUBLE":
return 9;
default:
return 0;
}
} }
@Override @Override
public String getTableName(int column) throws SQLException { public String getTableName(int column) throws SQLException {
return null; return "";
} }
@Override @Override
...@@ -92,17 +128,41 @@ public class RestfulResultSetMetaData implements ResultSetMetaData { ...@@ -92,17 +128,41 @@ public class RestfulResultSetMetaData implements ResultSetMetaData {
@Override @Override
public int getColumnType(int column) throws SQLException { public int getColumnType(int column) throws SQLException {
return 0; String type = this.fields.get(column - 1).type.toUpperCase();
switch (type) {
case "BOOL":
return java.sql.Types.BOOLEAN;
case "TINYINT":
return java.sql.Types.TINYINT;
case "SMALLINT":
return java.sql.Types.SMALLINT;
case "INT":
return java.sql.Types.INTEGER;
case "BIGINT":
return java.sql.Types.BIGINT;
case "FLOAT":
return java.sql.Types.FLOAT;
case "DOUBLE":
return java.sql.Types.DOUBLE;
case "BINARY":
return java.sql.Types.BINARY;
case "TIMESTAMP":
return java.sql.Types.TIMESTAMP;
case "NCHAR":
return java.sql.Types.NCHAR;
}
throw new SQLException(TSDBConstants.INVALID_VARIABLES);
} }
@Override @Override
public String getColumnTypeName(int column) throws SQLException { public String getColumnTypeName(int column) throws SQLException {
return null; String type = fields.get(column - 1).type;
return type.toUpperCase();
} }
@Override @Override
public boolean isReadOnly(int column) throws SQLException { public boolean isReadOnly(int column) throws SQLException {
return false; return true;
} }
@Override @Override
...@@ -117,16 +177,43 @@ public class RestfulResultSetMetaData implements ResultSetMetaData { ...@@ -117,16 +177,43 @@ public class RestfulResultSetMetaData implements ResultSetMetaData {
@Override @Override
public String getColumnClassName(int column) throws SQLException { public String getColumnClassName(int column) throws SQLException {
return null; String type = this.fields.get(column - 1).type;
String columnClassName = "";
switch (type) {
case "BOOL":
return Boolean.class.getName();
case "TINYINT":
case "SMALLINT":
return Short.class.getName();
case "INT":
return Integer.class.getName();
case "BIGINT":
return Long.class.getName();
case "FLOAT":
return Float.class.getName();
case "DOUBLE":
return Double.class.getName();
case "TIMESTAMP":
return Timestamp.class.getName();
case "BINARY":
case "NCHAR":
return String.class.getName();
}
return columnClassName;
} }
@Override @Override
public <T> T unwrap(Class<T> iface) throws SQLException { public <T> T unwrap(Class<T> iface) throws SQLException {
return null; try {
return iface.cast(this);
} catch (ClassCastException cce) {
throw new SQLException("Unable to unwrap to " + iface.toString());
}
} }
@Override @Override
public boolean isWrapperFor(Class<?> iface) throws SQLException { public boolean isWrapperFor(Class<?> iface) throws SQLException {
return false; return iface.isInstance(this);
} }
} }
...@@ -14,7 +14,6 @@ import java.util.stream.Collectors; ...@@ -14,7 +14,6 @@ import java.util.stream.Collectors;
public class RestfulStatement implements Statement { public class RestfulStatement implements Statement {
private static final String STATEMENT_CLOSED = "Statement already closed.";
private boolean closed; private boolean closed;
private String database; private String database;
private final RestfulConnection conn; private final RestfulConnection conn;
...@@ -65,37 +64,18 @@ public class RestfulStatement implements Statement { ...@@ -65,37 +64,18 @@ public class RestfulStatement implements Statement {
public ResultSet executeQuery(String sql) throws SQLException { public ResultSet executeQuery(String sql) throws SQLException {
if (isClosed()) if (isClosed())
throw new SQLException("statement already closed"); throw new SQLException("statement already closed");
if (!SqlSyntaxValidator.isSelectSql(sql)) if (!SqlSyntaxValidator.isValidForExecuteQuery(sql))
throw new SQLException("not a select sql for executeQuery: " + sql); throw new SQLException("not a valid sql for executeQuery: " + sql);
final String url = "http://" + conn.getHost() + ":" + conn.getPort() + "/rest/sql"; final String url = "http://" + conn.getHost() + ":" + conn.getPort() + "/rest/sql";
// row data if (SqlSyntaxValidator.isDatabaseUnspecifiedQuery(sql)) {
String result = HttpClientPoolUtil.execute(url, sql); return executeOneQuery(url, sql);
JSONObject resultJson = JSON.parseObject(result);
if (resultJson.getString("status").equals("error")) {
throw new SQLException(TSDBConstants.WrapErrMsg("SQL execution error: " + resultJson.getString("desc") + "\n" + "error code: " + resultJson.getString("code")));
} }
// parse table name from sql if (this.database == null || this.database.isEmpty())
String[] tableIdentifiers = parseTableIdentifier(sql); throw new SQLException("Database not specified or available");
if (tableIdentifiers != null) { HttpClientPoolUtil.execute(url, "use " + this.database);
List<JSONObject> fieldJsonList = new ArrayList<>(); return executeOneQuery(url, sql);
for (String tableIdentifier : tableIdentifiers) {
// field meta
String fields = HttpClientPoolUtil.execute(url, "DESCRIBE " + tableIdentifier);
JSONObject fieldJson = JSON.parseObject(fields);
if (fieldJson.getString("status").equals("error")) {
throw new SQLException(TSDBConstants.WrapErrMsg("SQL execution error: " + fieldJson.getString("desc") + "\n" + "error code: " + fieldJson.getString("code")));
}
fieldJsonList.add(fieldJson);
}
this.resultSet = new RestfulResultSet(database, this, resultJson, fieldJsonList);
} else {
this.resultSet = new RestfulResultSet(database, this, resultJson);
}
this.affectedRows = 0;
return resultSet;
} }
@Override @Override
...@@ -105,19 +85,15 @@ public class RestfulStatement implements Statement { ...@@ -105,19 +85,15 @@ public class RestfulStatement implements Statement {
if (!SqlSyntaxValidator.isValidForExecuteUpdate(sql)) if (!SqlSyntaxValidator.isValidForExecuteUpdate(sql))
throw new SQLException("not a valid sql for executeUpdate: " + sql); throw new SQLException("not a valid sql for executeUpdate: " + sql);
if (this.database == null) final String url = "http://" + conn.getHost() + ":" + conn.getPort() + "/rest/sql";
throw new SQLException("Database not specified or available"); if (SqlSyntaxValidator.isDatabaseUnspecifiedUpdate(sql)) {
return executeOneUpdate(url, sql);
final String url = "http://" + conn.getHost().trim() + ":" + conn.getPort() + "/rest/sql";
// HttpClientPoolUtil.execute(url, "use " + conn.getDatabase());
String result = HttpClientPoolUtil.execute(url, sql);
JSONObject jsonObject = JSON.parseObject(result);
if (jsonObject.getString("status").equals("error")) {
throw new SQLException(TSDBConstants.WrapErrMsg("SQL execution error: " + jsonObject.getString("desc") + "\n" + "error code: " + jsonObject.getString("code")));
} }
this.resultSet = null;
this.affectedRows = Integer.parseInt(jsonObject.getString("rows")); if (this.database == null || this.database.isEmpty())
return this.affectedRows; throw new SQLException("Database not specified or available");
HttpClientPoolUtil.execute(url, "use " + this.database);
return executeOneUpdate(url, sql);
} }
@Override @Override
...@@ -131,14 +107,14 @@ public class RestfulStatement implements Statement { ...@@ -131,14 +107,14 @@ public class RestfulStatement implements Statement {
@Override @Override
public int getMaxFieldSize() throws SQLException { public int getMaxFieldSize() throws SQLException {
if (isClosed()) if (isClosed())
throw new SQLException(STATEMENT_CLOSED); throw new SQLException(TSDBConstants.STATEMENT_CLOSED);
return TSDBConstants.maxFieldSize; return TSDBConstants.maxFieldSize;
} }
@Override @Override
public void setMaxFieldSize(int max) throws SQLException { public void setMaxFieldSize(int max) throws SQLException {
if (isClosed()) if (isClosed())
throw new SQLException(STATEMENT_CLOSED); throw new SQLException(TSDBConstants.STATEMENT_CLOSED);
if (max < 0) if (max < 0)
throw new SQLException(TSDBConstants.INVALID_VARIABLES); throw new SQLException(TSDBConstants.INVALID_VARIABLES);
// nothing to do // nothing to do
...@@ -147,14 +123,14 @@ public class RestfulStatement implements Statement { ...@@ -147,14 +123,14 @@ public class RestfulStatement implements Statement {
@Override @Override
public int getMaxRows() throws SQLException { public int getMaxRows() throws SQLException {
if (isClosed()) if (isClosed())
throw new SQLException(STATEMENT_CLOSED); throw new SQLException(TSDBConstants.STATEMENT_CLOSED);
return 0; return 0;
} }
@Override @Override
public void setMaxRows(int max) throws SQLException { public void setMaxRows(int max) throws SQLException {
if (isClosed()) if (isClosed())
throw new SQLException(STATEMENT_CLOSED); throw new SQLException(TSDBConstants.STATEMENT_CLOSED);
if (max < 0) if (max < 0)
throw new SQLException(TSDBConstants.INVALID_VARIABLES); throw new SQLException(TSDBConstants.INVALID_VARIABLES);
// nothing to do // nothing to do
...@@ -163,20 +139,20 @@ public class RestfulStatement implements Statement { ...@@ -163,20 +139,20 @@ public class RestfulStatement implements Statement {
@Override @Override
public void setEscapeProcessing(boolean enable) throws SQLException { public void setEscapeProcessing(boolean enable) throws SQLException {
if (isClosed()) if (isClosed())
throw new SQLException(RestfulStatement.STATEMENT_CLOSED); throw new SQLException(TSDBConstants.STATEMENT_CLOSED);
} }
@Override @Override
public int getQueryTimeout() throws SQLException { public int getQueryTimeout() throws SQLException {
if (isClosed()) if (isClosed())
throw new SQLException(STATEMENT_CLOSED); throw new SQLException(TSDBConstants.STATEMENT_CLOSED);
return 0; return 0;
} }
@Override @Override
public void setQueryTimeout(int seconds) throws SQLException { public void setQueryTimeout(int seconds) throws SQLException {
if (isClosed()) if (isClosed())
throw new SQLException(STATEMENT_CLOSED); throw new SQLException(TSDBConstants.STATEMENT_CLOSED);
if (seconds < 0) if (seconds < 0)
throw new SQLException(TSDBConstants.INVALID_VARIABLES); throw new SQLException(TSDBConstants.INVALID_VARIABLES);
} }
...@@ -189,7 +165,7 @@ public class RestfulStatement implements Statement { ...@@ -189,7 +165,7 @@ public class RestfulStatement implements Statement {
@Override @Override
public SQLWarning getWarnings() throws SQLException { public SQLWarning getWarnings() throws SQLException {
if (isClosed()) if (isClosed())
throw new SQLException(STATEMENT_CLOSED); throw new SQLException(TSDBConstants.STATEMENT_CLOSED);
return null; return null;
} }
...@@ -197,53 +173,93 @@ public class RestfulStatement implements Statement { ...@@ -197,53 +173,93 @@ public class RestfulStatement implements Statement {
public void clearWarnings() throws SQLException { public void clearWarnings() throws SQLException {
// nothing to do // nothing to do
if (isClosed()) if (isClosed())
throw new SQLException(STATEMENT_CLOSED); throw new SQLException(TSDBConstants.STATEMENT_CLOSED);
} }
@Override @Override
public void setCursorName(String name) throws SQLException { public void setCursorName(String name) throws SQLException {
if (isClosed()) if (isClosed())
throw new SQLException(RestfulStatement.STATEMENT_CLOSED); throw new SQLException(TSDBConstants.STATEMENT_CLOSED);
throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG);
} }
@Override @Override
public boolean execute(String sql) throws SQLException { public boolean execute(String sql) throws SQLException {
if (isClosed()) { if (isClosed())
throw new SQLException("Invalid method call on a closed statement."); throw new SQLException("Invalid method call on a closed statement.");
} if (!SqlSyntaxValidator.isValidForExecute(sql))
throw new SQLException("not a valid sql for execute: " + sql);
//如果执行了use操作应该将当前Statement的catalog设置为新的database //如果执行了use操作应该将当前Statement的catalog设置为新的database
final String url = "http://" + conn.getHost() + ":" + conn.getPort() + "/rest/sql";
if (SqlSyntaxValidator.isUseSql(sql)) { if (SqlSyntaxValidator.isUseSql(sql)) {
HttpClientPoolUtil.execute(url, sql);
this.database = sql.trim().replace("use", "").trim(); this.database = sql.trim().replace("use", "").trim();
this.conn.setCatalog(this.database); this.conn.setCatalog(this.database);
} else if (SqlSyntaxValidator.isDatabaseUnspecifiedQuery(sql)) {
executeOneQuery(url, sql);
} else if (SqlSyntaxValidator.isDatabaseUnspecifiedUpdate(sql)) {
executeOneUpdate(url, sql);
} else {
if (SqlSyntaxValidator.isValidForExecuteQuery(sql)) {
executeQuery(sql);
} else {
executeUpdate(sql);
}
} }
if (this.database == null)
throw new SQLException("Database not specified or available");
if (SqlSyntaxValidator.isSelectSql(sql)) { return true;
executeQuery(sql); }
} else if (SqlSyntaxValidator.isShowSql(sql) || SqlSyntaxValidator.isDescribeSql(sql)) {
final String url = "http://" + conn.getHost().trim() + ":" + conn.getPort() + "/rest/sql"; private ResultSet executeOneQuery(String url, String sql) throws SQLException {
if (!SqlSyntaxValidator.isShowDatabaseSql(sql)) { if (!SqlSyntaxValidator.isValidForExecuteQuery(sql))
HttpClientPoolUtil.execute(url, "use " + conn.getDatabase()); throw new SQLException("not a select sql for executeQuery: " + sql);
}
String result = HttpClientPoolUtil.execute(url, sql); // row data
JSONObject resultJson = JSON.parseObject(result); String result = HttpClientPoolUtil.execute(url, sql);
if (resultJson.getString("status").equals("error")) { JSONObject resultJson = JSON.parseObject(result);
throw new SQLException(TSDBConstants.WrapErrMsg("SQL execution error: " + resultJson.getString("desc") + "\n" + "error code: " + resultJson.getString("code"))); if (resultJson.getString("status").equals("error")) {
throw new SQLException(TSDBConstants.WrapErrMsg("SQL execution error: " + resultJson.getString("desc") + "\n" + "error code: " + resultJson.getString("code")));
}
// parse table name from sql
String[] tableIdentifiers = parseTableIdentifier(sql);
if (tableIdentifiers != null) {
List<JSONObject> fieldJsonList = new ArrayList<>();
for (String tableIdentifier : tableIdentifiers) {
// field meta
String fields = HttpClientPoolUtil.execute(url, "DESCRIBE " + tableIdentifier);
JSONObject fieldJson = JSON.parseObject(fields);
if (fieldJson.getString("status").equals("error")) {
throw new SQLException(TSDBConstants.WrapErrMsg("SQL execution error: " + fieldJson.getString("desc") + "\n" + "error code: " + fieldJson.getString("code")));
}
fieldJsonList.add(fieldJson);
} }
this.resultSet = new RestfulResultSet(database, this, resultJson); this.resultSet = new RestfulResultSet(database, this, resultJson, fieldJsonList);
} else { } else {
executeUpdate(sql); this.resultSet = new RestfulResultSet(database, this, resultJson);
} }
this.affectedRows = 0;
return resultSet;
}
return true; private int executeOneUpdate(String url, String sql) throws SQLException {
if (!SqlSyntaxValidator.isValidForExecuteUpdate(sql))
throw new SQLException("not a valid sql for executeUpdate: " + sql);
String result = HttpClientPoolUtil.execute(url, sql);
JSONObject jsonObject = JSON.parseObject(result);
if (jsonObject.getString("status").equals("error")) {
throw new SQLException(TSDBConstants.WrapErrMsg("SQL execution error: " + jsonObject.getString("desc") + "\n" + "error code: " + jsonObject.getString("code")));
}
this.resultSet = null;
this.affectedRows = Integer.parseInt(jsonObject.getString("rows"));
return this.affectedRows;
} }
@Override @Override
public ResultSet getResultSet() throws SQLException { public ResultSet getResultSet() throws SQLException {
if (isClosed()) if (isClosed())
throw new SQLException(STATEMENT_CLOSED); throw new SQLException(TSDBConstants.STATEMENT_CLOSED);
return resultSet; return resultSet;
} }
...@@ -275,7 +291,7 @@ public class RestfulStatement implements Statement { ...@@ -275,7 +291,7 @@ public class RestfulStatement implements Statement {
@Override @Override
public void setFetchSize(int rows) throws SQLException { public void setFetchSize(int rows) throws SQLException {
if (isClosed()) if (isClosed())
throw new SQLException(STATEMENT_CLOSED); throw new SQLException(TSDBConstants.STATEMENT_CLOSED);
if (rows < 0) if (rows < 0)
throw new SQLException(TSDBConstants.INVALID_VARIABLES); throw new SQLException(TSDBConstants.INVALID_VARIABLES);
//nothing to do //nothing to do
...@@ -284,28 +300,28 @@ public class RestfulStatement implements Statement { ...@@ -284,28 +300,28 @@ public class RestfulStatement implements Statement {
@Override @Override
public int getFetchSize() throws SQLException { public int getFetchSize() throws SQLException {
if (isClosed()) if (isClosed())
throw new SQLException(STATEMENT_CLOSED); throw new SQLException(TSDBConstants.STATEMENT_CLOSED);
return 0; return 0;
} }
@Override @Override
public int getResultSetConcurrency() throws SQLException { public int getResultSetConcurrency() throws SQLException {
if (isClosed()) if (isClosed())
throw new SQLException(STATEMENT_CLOSED); throw new SQLException(TSDBConstants.STATEMENT_CLOSED);
return this.resultSet.getConcurrency(); return this.resultSet.getConcurrency();
} }
@Override @Override
public int getResultSetType() throws SQLException { public int getResultSetType() throws SQLException {
if (isClosed()) if (isClosed())
throw new SQLException(STATEMENT_CLOSED); throw new SQLException(TSDBConstants.STATEMENT_CLOSED);
return this.resultSet.getType(); return this.resultSet.getType();
} }
@Override @Override
public void addBatch(String sql) throws SQLException { public void addBatch(String sql) throws SQLException {
if (isClosed()) if (isClosed())
throw new SQLException(STATEMENT_CLOSED); throw new SQLException(TSDBConstants.STATEMENT_CLOSED);
//TODO: //TODO:
} }
...@@ -323,14 +339,14 @@ public class RestfulStatement implements Statement { ...@@ -323,14 +339,14 @@ public class RestfulStatement implements Statement {
@Override @Override
public Connection getConnection() throws SQLException { public Connection getConnection() throws SQLException {
if (isClosed()) if (isClosed())
throw new SQLException(STATEMENT_CLOSED); throw new SQLException(TSDBConstants.STATEMENT_CLOSED);
return this.conn; return this.conn;
} }
@Override @Override
public boolean getMoreResults(int current) throws SQLException { public boolean getMoreResults(int current) throws SQLException {
if (isClosed()) if (isClosed())
throw new SQLException(STATEMENT_CLOSED); throw new SQLException(TSDBConstants.STATEMENT_CLOSED);
if (resultSet == null) if (resultSet == null)
return false; return false;
...@@ -388,7 +404,7 @@ public class RestfulStatement implements Statement { ...@@ -388,7 +404,7 @@ public class RestfulStatement implements Statement {
@Override @Override
public int getResultSetHoldability() throws SQLException { public int getResultSetHoldability() throws SQLException {
if (isClosed()) if (isClosed())
throw new SQLException(STATEMENT_CLOSED); throw new SQLException(TSDBConstants.STATEMENT_CLOSED);
return this.resultSet.getHoldability(); return this.resultSet.getHoldability();
} }
...@@ -400,28 +416,28 @@ public class RestfulStatement implements Statement { ...@@ -400,28 +416,28 @@ public class RestfulStatement implements Statement {
@Override @Override
public void setPoolable(boolean poolable) throws SQLException { public void setPoolable(boolean poolable) throws SQLException {
if (isClosed()) if (isClosed())
throw new SQLException(STATEMENT_CLOSED); throw new SQLException(TSDBConstants.STATEMENT_CLOSED);
//nothing to do //nothing to do
} }
@Override @Override
public boolean isPoolable() throws SQLException { public boolean isPoolable() throws SQLException {
if (isClosed()) if (isClosed())
throw new SQLException(STATEMENT_CLOSED); throw new SQLException(TSDBConstants.STATEMENT_CLOSED);
return false; return false;
} }
@Override @Override
public void closeOnCompletion() throws SQLException { public void closeOnCompletion() throws SQLException {
if (isClosed()) if (isClosed())
throw new SQLException(STATEMENT_CLOSED); throw new SQLException(TSDBConstants.STATEMENT_CLOSED);
this.closeOnCompletion = true; this.closeOnCompletion = true;
} }
@Override @Override
public boolean isCloseOnCompletion() throws SQLException { public boolean isCloseOnCompletion() throws SQLException {
if (isClosed()) if (isClosed())
throw new SQLException(STATEMENT_CLOSED); throw new SQLException(TSDBConstants.STATEMENT_CLOSED);
return this.closeOnCompletion; return this.closeOnCompletion;
} }
......
...@@ -20,8 +20,11 @@ import java.sql.Connection; ...@@ -20,8 +20,11 @@ import java.sql.Connection;
public class SqlSyntaxValidator { public class SqlSyntaxValidator {
private static final String[] updateSQL = {"insert", "update", "delete", "create", "alter", "drop", "show", "describe", "use", "import"}; private static final String[] SQL = {"select", "insert", "import", "create", "use", "alter", "drop", "set", "show", "describe"};
private static final String[] querySQL = {"select"}; private static final String[] updateSQL = {"insert", "import", "create", "use", "alter", "drop", "set"};
private static final String[] querySQL = {"select", "show", "describe"};
private static final String[] databaseUnspecifiedShow = {"databases", "dnodes", "mnodes", "variables"};
private TSDBConnection tsdbConnection; private TSDBConnection tsdbConnection;
...@@ -37,8 +40,38 @@ public class SqlSyntaxValidator { ...@@ -37,8 +40,38 @@ public class SqlSyntaxValidator {
return false; return false;
} }
public static boolean isValidForExecuteQuery(String sql) {
for (String prefix : querySQL) {
if (sql.trim().toLowerCase().startsWith(prefix))
return true;
}
return false;
}
public static boolean isValidForExecute(String sql) {
for (String prefix : SQL) {
if (sql.trim().toLowerCase().startsWith(prefix))
return true;
}
return false;
}
public static boolean isDatabaseUnspecifiedQuery(String sql) {
for (String databaseObj : databaseUnspecifiedShow) {
if (sql.trim().toLowerCase().matches("show\\s+" + databaseObj + ".*"))
return true;
}
return false;
}
public static boolean isDatabaseUnspecifiedUpdate(String sql) {
sql = sql.trim().toLowerCase();
return sql.matches("create\\s+database.*") || sql.startsWith("set") || sql.matches("drop\\s+database.*");
}
public static boolean isUseSql(String sql) { public static boolean isUseSql(String sql) {
return sql.trim().toLowerCase().startsWith("use") || sql.trim().toLowerCase().matches("create\\s*database.*") || sql.toLowerCase().toLowerCase().matches("drop\\s*database.*"); return sql.trim().toLowerCase().startsWith("use");
// || sql.trim().toLowerCase().matches("create\\s*database.*") || sql.toLowerCase().toLowerCase().matches("drop\\s*database.*");
} }
public static boolean isShowSql(String sql) { public static boolean isShowSql(String sql) {
...@@ -58,8 +91,9 @@ public class SqlSyntaxValidator { ...@@ -58,8 +91,9 @@ public class SqlSyntaxValidator {
return sql.trim().toLowerCase().startsWith("select"); return sql.trim().toLowerCase().startsWith("select");
} }
public static boolean isShowDatabaseSql(String sql) { public static boolean isShowDatabaseSql(String sql) {
return sql.trim().toLowerCase().matches("show\\s*databases"); return sql.trim().toLowerCase().matches("show\\s*databases");
} }
} }
package com.taosdata.jdbc;
import com.taosdata.jdbc.utils.TDNodes;
import org.junit.AfterClass;
import org.junit.BeforeClass;
public abstract class BaseTest {
private static boolean testCluster = false;
private static TDNodes nodes = new TDNodes();
@BeforeClass
public static void setupEnv() {
try {
if (nodes.getTDNode(1).getTaosdPid() != null) {
System.out.println("Kill taosd before running JDBC test");
nodes.getTDNode(1).setRunning(1);
nodes.stop(1);
}
nodes.setTestCluster(testCluster);
nodes.deploy(1);
nodes.start(1);
} catch (Exception e) {
e.printStackTrace();
}
}
@AfterClass
public static void cleanUpEnv() {
nodes.stop(1);
}
}
\ No newline at end of file
package com.taosdata.jdbc;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
import java.sql.*;
import java.util.Properties;
import java.util.Random;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.TimeUnit;
import static org.junit.Assert.assertEquals;
public class BatchInsertTest {
private Connection connection;
private static String dbName = "test";
private static String stbName = "meters";
private static String host = "127.0.0.1";
private static int numOfTables = 30;
private static int numOfRecordsPerTable = 1000;
private static long ts = 1496732686000l;
private static String tablePrefix = "t";
@Before
public void createDatabase() throws SQLException {
try {
Class.forName("com.taosdata.jdbc.TSDBDriver");
} catch (ClassNotFoundException e) {
return;
}
Properties properties = new Properties();
properties.setProperty(TSDBDriver.PROPERTY_KEY_CHARSET, "UTF-8");
properties.setProperty(TSDBDriver.PROPERTY_KEY_LOCALE, "en_US.UTF-8");
properties.setProperty(TSDBDriver.PROPERTY_KEY_TIME_ZONE, "UTC-8");
connection = DriverManager.getConnection("jdbc:TAOS://" + host + ":0/", properties);
Statement stmt = connection.createStatement();
stmt.execute("drop database if exists " + dbName);
stmt.execute("create database if not exists " + dbName);
stmt.execute("use " + dbName);
String createTableSql = "create table " + stbName + "(ts timestamp, f1 int, f2 int, f3 int) tags(areaid int, loc binary(20))";
stmt.execute(createTableSql);
for (int i = 0; i < numOfTables; i++) {
String loc = i % 2 == 0 ? "beijing" : "shanghai";
String createSubTalbesSql = "create table " + tablePrefix + i + " using " + stbName + " tags(" + i + ", '" + loc + "')";
stmt.execute(createSubTalbesSql);
}
stmt.close();
}
@Test
public void testBatchInsert() throws SQLException {
ExecutorService executorService = Executors.newFixedThreadPool(numOfTables);
for (int i = 0; i < numOfTables; i++) {
final int index = i;
executorService.execute(() -> {
try {
long startTime = System.currentTimeMillis();
Statement statement = connection.createStatement(); // get statement
StringBuilder sb = new StringBuilder();
sb.append("INSERT INTO " + tablePrefix + index + " VALUES");
Random rand = new Random();
for (int j = 1; j <= numOfRecordsPerTable; j++) {
sb.append("(" + (ts + j) + ", ");
sb.append(rand.nextInt(100) + ", ");
sb.append(rand.nextInt(100) + ", ");
sb.append(rand.nextInt(100) + ")");
}
statement.addBatch(sb.toString());
statement.executeBatch();
long endTime = System.currentTimeMillis();
System.out.println("Thread " + index + " takes " + (endTime - startTime) + " microseconds");
connection.commit();
statement.close();
} catch (Exception e) {
e.printStackTrace();
}
});
}
executorService.shutdown();
try {
executorService.awaitTermination(Long.MAX_VALUE, TimeUnit.NANOSECONDS);
} catch (InterruptedException e) {
e.printStackTrace();
}
Statement statement = connection.createStatement();
ResultSet rs = statement.executeQuery("select * from meters");
int num = 0;
while (rs.next()) {
num++;
}
assertEquals(num, numOfTables * numOfRecordsPerTable);
rs.close();
}
@After
public void close() {
try {
if (connection != null)
connection.close();
} catch (SQLException e) {
e.printStackTrace();
}
}
}
\ No newline at end of file
...@@ -6,76 +6,67 @@ import org.junit.Test; ...@@ -6,76 +6,67 @@ import org.junit.Test;
import java.sql.*; import java.sql.*;
import java.util.Properties; import java.util.Properties;
import java.util.Random;
import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertEquals;
import java.util.Properties;
import java.util.concurrent.Executors;
import java.util.concurrent.*;
import static org.junit.Assert.assertTrue;
public class QueryDataTest extends BaseTest { public class QueryDataTest {
static Connection connection = null; static Connection connection;
static Statement statement = null; static Statement statement;
static String dbName = "test"; static String dbName = "test";
static String stbName = "meters"; static String stbName = "meters";
static String host = "localhost"; static String host = "127.0.0.1";
static int numOfTables = 30;
final static int numOfRecordsPerTable = 1000;
static long ts = 1496732686000l;
final static String tablePrefix = "t";
@Before @Before
public void createDatabase() throws SQLException { public void createDatabase() {
try { try {
Class.forName("com.taosdata.jdbc.TSDBDriver"); Class.forName("com.taosdata.jdbc.TSDBDriver");
} catch (ClassNotFoundException e) { Properties properties = new Properties();
properties.setProperty(TSDBDriver.PROPERTY_KEY_CHARSET, "UTF-8");
properties.setProperty(TSDBDriver.PROPERTY_KEY_LOCALE, "en_US.UTF-8");
properties.setProperty(TSDBDriver.PROPERTY_KEY_TIME_ZONE, "UTC-8");
connection = DriverManager.getConnection("jdbc:TAOS://" + host + ":0/", properties);
statement = connection.createStatement();
statement.executeUpdate("drop database if exists " + dbName);
statement.executeUpdate("create database if not exists " + dbName);
statement.executeUpdate("use " + dbName);
String createTableSql = "create table " + stbName + "(ts timestamp, name binary(64))";
statement.executeUpdate(createTableSql);
} catch (ClassNotFoundException | SQLException e) {
return; return;
} }
Properties properties = new Properties();
properties.setProperty(TSDBDriver.PROPERTY_KEY_HOST, host);
properties.setProperty(TSDBDriver.PROPERTY_KEY_CHARSET, "UTF-8");
properties.setProperty(TSDBDriver.PROPERTY_KEY_LOCALE, "en_US.UTF-8");
properties.setProperty(TSDBDriver.PROPERTY_KEY_TIME_ZONE, "UTC-8");
connection = DriverManager.getConnection("jdbc:TAOS://" + host + ":0/", properties);
statement = connection.createStatement();
statement.executeUpdate("drop database if exists " + dbName);
statement.executeUpdate("create database if not exists " + dbName);
statement.executeUpdate("use " + dbName);
String createTableSql = "create table " + stbName + "(ts timestamp, name binary(6))";
statement.executeUpdate(createTableSql);
} }
@Test
public void testQueryBinaryData() throws SQLException{
String insertSql = "insert into " + stbName + " values(now, 'taosda')";
System.out.println(insertSql);
@Test
public void testQueryBinaryData() throws SQLException {
String insertSql = "insert into " + stbName + " values(now, 'taosdata')";
System.out.println(insertSql);
statement.executeUpdate(insertSql); statement.executeUpdate(insertSql);
String querySql = "select * from " + stbName; String querySql = "select * from " + stbName;
ResultSet rs = statement.executeQuery(querySql); ResultSet rs = statement.executeQuery(querySql);
while(rs.next()) { while (rs.next()) {
String name = rs.getString(2) + "001"; String name = rs.getString(2);
System.out.println("name = " + name); System.out.println("name = " + name);
assertEquals(name, "taosda001"); assertEquals("taosdata", name);
} }
rs.close(); rs.close();
} }
@After @After
public void close() throws Exception { public void close() {
statement.close(); try {
connection.close(); if (statement != null)
Thread.sleep(10); statement.close();
if (connection != null)
connection.close();
} catch (SQLException e) {
e.printStackTrace();
}
} }
} }
\ No newline at end of file
...@@ -29,6 +29,7 @@ public class StableTest { ...@@ -29,6 +29,7 @@ public class StableTest {
properties.setProperty(TSDBDriver.PROPERTY_KEY_TIME_ZONE, "UTC-8"); properties.setProperty(TSDBDriver.PROPERTY_KEY_TIME_ZONE, "UTC-8");
connection = DriverManager.getConnection("jdbc:TAOS://" + host + ":0/", properties); connection = DriverManager.getConnection("jdbc:TAOS://" + host + ":0/", properties);
Statement statement = connection.createStatement(); Statement statement = connection.createStatement();
statement.execute("drop database if exists " + dbName);
statement.execute("create database if not exists " + dbName); statement.execute("create database if not exists " + dbName);
statement.execute("use " + dbName); statement.execute("use " + dbName);
statement.close(); statement.close();
......
...@@ -10,36 +10,37 @@ import java.sql.SQLException; ...@@ -10,36 +10,37 @@ import java.sql.SQLException;
import java.sql.Statement; import java.sql.Statement;
import java.util.Properties; import java.util.Properties;
public class SubscribeTest extends BaseTest { public class SubscribeTest {
Connection connection = null; Connection connection;
Statement statement = null; Statement statement;
String dbName = "test"; String dbName = "test";
String tName = "t0"; String tName = "t0";
String host = "localhost"; String host = "localhost";
String topic = "test"; String topic = "test";
@Before @Before
public void createDatabase() throws SQLException { public void createDatabase() {
try { try {
Class.forName("com.taosdata.jdbc.TSDBDriver"); Class.forName("com.taosdata.jdbc.TSDBDriver");
} catch (ClassNotFoundException e) { Properties properties = new Properties();
return; properties.setProperty(TSDBDriver.PROPERTY_KEY_HOST, host);
} properties.setProperty(TSDBDriver.PROPERTY_KEY_CHARSET, "UTF-8");
Properties properties = new Properties(); properties.setProperty(TSDBDriver.PROPERTY_KEY_LOCALE, "en_US.UTF-8");
properties.setProperty(TSDBDriver.PROPERTY_KEY_HOST, host); properties.setProperty(TSDBDriver.PROPERTY_KEY_TIME_ZONE, "UTC-8");
properties.setProperty(TSDBDriver.PROPERTY_KEY_CHARSET, "UTF-8"); connection = DriverManager.getConnection("jdbc:TAOS://" + host + ":0/", properties);
properties.setProperty(TSDBDriver.PROPERTY_KEY_LOCALE, "en_US.UTF-8");
properties.setProperty(TSDBDriver.PROPERTY_KEY_TIME_ZONE, "UTC-8");
connection = DriverManager.getConnection("jdbc:TAOS://" + host + ":0/", properties);
statement = connection.createStatement(); statement = connection.createStatement();
statement.executeUpdate("create database if not exists " + dbName); statement.executeUpdate("create database if not exists " + dbName);
statement.executeUpdate("create table if not exists " + dbName + "." + tName + " (ts timestamp, k int, v int)"); statement.executeUpdate("create table if not exists " + dbName + "." + tName + " (ts timestamp, k int, v int)");
long ts = System.currentTimeMillis(); long ts = System.currentTimeMillis();
for (int i = 0; i < 2; i++) { for (int i = 0; i < 2; i++) {
ts += i; ts += i;
String sql = "insert into " + dbName + "." + tName + " values (" + ts + ", " + (100 + i) + ", " + i + ")"; String sql = "insert into " + dbName + "." + tName + " values (" + ts + ", " + (100 + i) + ", " + i + ")";
statement.executeUpdate(sql); statement.executeUpdate(sql);
}
} catch (ClassNotFoundException | SQLException e) {
return;
} }
} }
...@@ -79,10 +80,16 @@ public class SubscribeTest extends BaseTest { ...@@ -79,10 +80,16 @@ public class SubscribeTest extends BaseTest {
} }
@After @After
public void close() throws Exception { public void close() {
statement.executeQuery("drop database " + dbName); try {
statement.close(); statement.executeQuery("drop database " + dbName);
connection.close(); if (statement != null)
Thread.sleep(10); statement.close();
if (connection != null)
connection.close();
} catch (SQLException e) {
e.printStackTrace();
}
} }
} }
\ No newline at end of file
...@@ -8,6 +8,7 @@ import java.sql.*; ...@@ -8,6 +8,7 @@ import java.sql.*;
public class AuthenticationTest { public class AuthenticationTest {
private static final String host = "127.0.0.1"; private static final String host = "127.0.0.1";
// private static final String host = "master";
private static final String user = "root"; private static final String user = "root";
private static final String password = "123456"; private static final String password = "123456";
private Connection conn; private Connection conn;
......
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册