提交 356dfa30 编写于 作者: M markswang

Merge branch 'develop' of https://github.com/taosdata/TDengine into fix/TD-5534

...@@ -32,7 +32,7 @@ ELSEIF (TD_WINDOWS) ...@@ -32,7 +32,7 @@ ELSEIF (TD_WINDOWS)
#INSTALL(TARGETS taos RUNTIME DESTINATION driver) #INSTALL(TARGETS taos RUNTIME DESTINATION driver)
#INSTALL(TARGETS shell RUNTIME DESTINATION .) #INSTALL(TARGETS shell RUNTIME DESTINATION .)
IF (TD_MVN_INSTALLED) IF (TD_MVN_INSTALLED)
INSTALL(FILES ${LIBRARY_OUTPUT_PATH}/taos-jdbcdriver-2.0.33-dist.jar DESTINATION connector/jdbc) INSTALL(FILES ${LIBRARY_OUTPUT_PATH}/taos-jdbcdriver-2.0.34-dist.jar DESTINATION connector/jdbc)
ENDIF () ENDIF ()
ELSEIF (TD_DARWIN) ELSEIF (TD_DARWIN)
SET(TD_MAKE_INSTALL_SH "${TD_COMMUNITY_DIR}/packaging/tools/make_install.sh") SET(TD_MAKE_INSTALL_SH "${TD_COMMUNITY_DIR}/packaging/tools/make_install.sh")
......
...@@ -179,17 +179,15 @@ taos> select avg(current), max(voltage), min(phase) from test.d10 interval(10s); ...@@ -179,17 +179,15 @@ taos> select avg(current), max(voltage), min(phase) from test.d10 interval(10s);
| | **CentOS 6/7/8** | **Ubuntu 16/18/20** | **Other Linux** | **统信 UOS** | **银河/中标麒麟** | **凝思 V60/V80** | **华为 EulerOS** | | | **CentOS 6/7/8** | **Ubuntu 16/18/20** | **Other Linux** | **统信 UOS** | **银河/中标麒麟** | **凝思 V60/V80** | **华为 EulerOS** |
| -------------- | --------------------- | ------------------------ | --------------- | --------------- | ------------------------- | --------------------- | --------------------- | | -------------- | --------------------- | ------------------------ | --------------- | --------------- | ------------------------- | --------------------- | --------------------- |
| X64 | ● | ● | | ○ | ● | ● | ● | | X64 | ● | ● | | ○ | ● | ● | ● |
| 树莓派 ARM32 | | ● | ● | | | | |
| 龙芯 MIPS64 | | | ● | | | | | | 龙芯 MIPS64 | | | ● | | | | |
| 鲲鹏 ARM64 | | ○ | ○ | | ● | | | | 鲲鹏 ARM64 | | ○ | ○ | | ● | | |
| 申威 Alpha64 | | | ○ | ● | | | | | 申威 Alpha64 | | | ○ | ● | | | |
| 飞腾 ARM64 | | ○ 优麒麟 | | | | | | | 飞腾 ARM64 | | ○ 优麒麟 | | | | | |
| 海光 X64 | ● | ● | ● | ○ | ● | ● | | | 海光 X64 | ● | ● | ● | ○ | ● | ● | |
| 瑞芯微 ARM64/32 | | | ○ | | | | | | 瑞芯微 ARM64 | | | ○ | | | | |
| 全志 ARM64/32 | | | ○ | | | | | | 全志 ARM64 | | | ○ | | | | |
| 炬力 ARM64/32 | | | ○ | | | | | | 炬力 ARM64 | | | ○ | | | | |
| TI ARM32 | | | ○ | | | | | | 华为云 ARM64 | | | | | | | ● |
| 华为云 ARM64 | | | | | | | ● |
注: ● 表示经过官方测试验证, ○ 表示非官方测试验证。 注: ● 表示经过官方测试验证, ○ 表示非官方测试验证。
......
...@@ -715,7 +715,7 @@ Query OK, 1 row(s) in set (0.001091s) ...@@ -715,7 +715,7 @@ Query OK, 1 row(s) in set (0.001091s)
2. 同时进行多个字段的范围过滤,需要使用关键词 AND 来连接不同的查询条件,暂不支持 OR 连接的不同列之间的查询过滤条件。 2. 同时进行多个字段的范围过滤,需要使用关键词 AND 来连接不同的查询条件,暂不支持 OR 连接的不同列之间的查询过滤条件。
3. 针对单一字段的过滤,如果是时间过滤条件,则一条语句中只支持设定一个;但针对其他的(普通)列或标签列,则可以使用 `OR` 关键字进行组合条件的查询过滤。例如: `((value > 20 AND value < 30) OR (value < 12))`。 3. 针对单一字段的过滤,如果是时间过滤条件,则一条语句中只支持设定一个;但针对其他的(普通)列或标签列,则可以使用 `OR` 关键字进行组合条件的查询过滤。例如: `((value > 20 AND value < 30) OR (value < 12))`。
4. 从 2.0.17.0 版本开始,条件过滤开始支持 BETWEEN AND 语法,例如 `WHERE col2 BETWEEN 1.5 AND 3.25` 表示查询条件为“1.5 ≤ col2 ≤ 3.25”。 4. 从 2.0.17.0 版本开始,条件过滤开始支持 BETWEEN AND 语法,例如 `WHERE col2 BETWEEN 1.5 AND 3.25` 表示查询条件为“1.5 ≤ col2 ≤ 3.25”。
5. 从 2.1.4.0 版本开始,条件过滤开始支持 IN 算子,例如 `WHERE city IN ('Beijing', 'Shanghai')`。说明:BOOL 类型写作 `{true, false}` 或 `{0, 1}` 均可,但不能写作 0、1 之外的整数;FLOAT 和 DOUBLE 类型会受到浮点数精度影响,集合内的值在精度范围内认为和数据行的值完全相等才能匹配成功。<!-- REPLACE_OPEN_TO_ENTERPRISE__IN_OPERATOR_AND_UNSIGNED_INTEGER --> 5. 从 2.1.4.0 版本开始,条件过滤开始支持 IN 算子,例如 `WHERE city IN ('Beijing', 'Shanghai')`。说明:BOOL 类型写作 `{true, false}` 或 `{0, 1}` 均可,但不能写作 0、1 之外的整数;FLOAT 和 DOUBLE 类型会受到浮点数精度影响,集合内的值在精度范围内认为和数据行的值完全相等才能匹配成功;TIMESTAMP 类型支持非主键的列。<!-- REPLACE_OPEN_TO_ENTERPRISE__IN_OPERATOR_AND_UNSIGNED_INTEGER -->
<!-- <!--
<a class="anchor" id="having"></a> <a class="anchor" id="having"></a>
...@@ -1338,6 +1338,7 @@ SELECT function_list FROM stb_name ...@@ -1338,6 +1338,7 @@ SELECT function_list FROM stb_name
- 查询过滤、聚合等操作按照每个切分窗口为独立的单位执行。聚合查询目前支持三种窗口的划分方式: - 查询过滤、聚合等操作按照每个切分窗口为独立的单位执行。聚合查询目前支持三种窗口的划分方式:
1. 时间窗口:聚合时间段的窗口宽度由关键词 INTERVAL 指定,最短时间间隔 10 毫秒(10a);并且支持偏移 offset(偏移必须小于间隔),也即时间窗口划分与“UTC 时刻 0”相比的偏移量。SLIDING 语句用于指定聚合时间段的前向增量,也即每次窗口向前滑动的时长。当 SLIDING 与 INTERVAL 取值相等的时候,滑动窗口即为翻转窗口。 1. 时间窗口:聚合时间段的窗口宽度由关键词 INTERVAL 指定,最短时间间隔 10 毫秒(10a);并且支持偏移 offset(偏移必须小于间隔),也即时间窗口划分与“UTC 时刻 0”相比的偏移量。SLIDING 语句用于指定聚合时间段的前向增量,也即每次窗口向前滑动的时长。当 SLIDING 与 INTERVAL 取值相等的时候,滑动窗口即为翻转窗口。
* 从 2.1.5.0 版本开始,INTERVAL 语句允许的最短时间间隔调整为 1 微秒(1u),当然如果所查询的 DATABASE 的时间精度设置为毫秒级,那么允许的最短时间间隔为 1 毫秒(1a)。 * 从 2.1.5.0 版本开始,INTERVAL 语句允许的最短时间间隔调整为 1 微秒(1u),当然如果所查询的 DATABASE 的时间精度设置为毫秒级,那么允许的最短时间间隔为 1 毫秒(1a)。
* **注意:**用到 INTERVAL 语句时,除非极特殊的情况,都要求把客户端和服务端的 taos.cfg 配置文件中的 timezone 参数配置为相同的取值,以避免时间处理函数频繁进行跨时区转换而导致的严重性能影响。
2. 状态窗口:使用整数或布尔值来标识产生记录时设备的状态量,产生的记录如果具有相同的状态量取值则归属于同一个状态窗口,数值改变后该窗口关闭。状态量所对应的列作为 STATE_WINDOW 语句的参数来指定。 2. 状态窗口:使用整数或布尔值来标识产生记录时设备的状态量,产生的记录如果具有相同的状态量取值则归属于同一个状态窗口,数值改变后该窗口关闭。状态量所对应的列作为 STATE_WINDOW 语句的参数来指定。
3. 会话窗口:时间戳所在的列由 SESSION 语句的 ts_col 参数指定,会话窗口根据相邻两条记录的时间戳差值来确定是否属于同一个会话——如果时间戳差异在 tol_val 以内,则认为记录仍属于同一个窗口;如果时间变化超过 tol_val,则自动开启下一个窗口。 3. 会话窗口:时间戳所在的列由 SESSION 语句的 ts_col 参数指定,会话窗口根据相邻两条记录的时间戳差值来确定是否属于同一个会话——如果时间戳差异在 tol_val 以内,则认为记录仍属于同一个窗口;如果时间变化超过 tol_val,则自动开启下一个窗口。
- WHERE 语句可以指定查询的起止时间和其他过滤条件。 - WHERE 语句可以指定查询的起止时间和其他过滤条件。
......
...@@ -11,7 +11,7 @@ One of the modules of TDengine is the time-series database. However, in addition ...@@ -11,7 +11,7 @@ One of the modules of TDengine is the time-series database. However, in addition
- **Full-stack time-series data processing engine**: Integrate database, message queue, cache, stream computing, and other functions, and the applications do not need to integrate with software such as Kafka/Redis/HBase/Spark/HDFS, thus greatly reducing the complexity cost of application development and maintenance. - **Full-stack time-series data processing engine**: Integrate database, message queue, cache, stream computing, and other functions, and the applications do not need to integrate with software such as Kafka/Redis/HBase/Spark/HDFS, thus greatly reducing the complexity cost of application development and maintenance.
- **Powerful analysis functions**: Data from ten years ago or one second ago, can all be queried based on a specified time range. Data can be aggregated on a timeline or multiple devices. Ad-hoc queries can be made at any time through Shell, Python, R, and MATLAB. - **Powerful analysis functions**: Data from ten years ago or one second ago, can all be queried based on a specified time range. Data can be aggregated on a timeline or multiple devices. Ad-hoc queries can be made at any time through Shell, Python, R, and MATLAB.
- **Seamless connection with third-party tools**: Integration with Telegraf, Grafana, EMQ, HiveMQ, Prometheus, MATLAB, R, etc. without even one single line of code. OPC, Hadoop, Spark, etc. will be supported in the future, and more BI tools will be seamlessly connected to. - **Seamless connection with third-party tools**: Integration with Telegraf, Grafana, EMQ, HiveMQ, Prometheus, MATLAB, R, etc. without even one single line of code. OPC, Hadoop, Spark, etc. will be supported in the future, and more BI tools will be seamlessly connected to.
- **Zero operation cost & zero learning cost**: Installing clusters is simple and quick, with real-time backup built-in, and no need to split libraries or tables. Similar to standard SQL, TDengine can support RESTful, Python/Java/C/C + +/C#/Go/Node.js, and similar to MySQL with zero learning cost. - **Zero operation cost & zero learning cost**: Installing clusters is simple and quick, with real-time backup built-in, and no need to split libraries or tables. Similar to standard SQL, TDengine can support RESTful, Python/Java/C/C++/C#/Go/Node.js, and similar to MySQL with zero learning cost.
With TDengine, the total cost of ownership of typical IoT, Internet of Vehicles, and Industrial Internet Big Data platforms can be greatly reduced. However, it should be pointed out that due to making full use of the characteristics of IoT time-series data, TDengine cannot be used to process general data from web crawlers, microblogs, WeChat, e-commerce, ERP, CRM, and other sources. With TDengine, the total cost of ownership of typical IoT, Internet of Vehicles, and Industrial Internet Big Data platforms can be greatly reduced. However, it should be pointed out that due to making full use of the characteristics of IoT time-series data, TDengine cannot be used to process general data from web crawlers, microblogs, WeChat, e-commerce, ERP, CRM, and other sources.
......
...@@ -188,16 +188,14 @@ List of platforms supported by TDengine server ...@@ -188,16 +188,14 @@ List of platforms supported by TDengine server
| | **CentOS 6/7/8** | **Ubuntu 16/18/20** | **Other Linux** | UnionTech UOS | NeoKylin | LINX V60/V80 | | | **CentOS 6/7/8** | **Ubuntu 16/18/20** | **Other Linux** | UnionTech UOS | NeoKylin | LINX V60/V80 |
| ------------------ | ---------------- | ------------------- | --------------- | ------------- | -------- | ------------ | | ------------------ | ---------------- | ------------------- | --------------- | ------------- | -------- | ------------ |
| X64 | ● | ● | | ○ | ● | ● | | X64 | ● | ● | | ○ | ● | ● |
| Raspberry ARM32 | | ● | ● | | | |
| Loongson MIPS64 | | | ● | | | | | Loongson MIPS64 | | | ● | | | |
| Kunpeng ARM64 | | ○ | ○ | | ● | | | Kunpeng ARM64 | | ○ | ○ | | ● | |
| SWCPU Alpha64 | | | ○ | ● | | | | SWCPU Alpha64 | | | ○ | ● | | |
| FT ARM64 | | ○Ubuntu Kylin | | | | | | FT ARM64 | | ○Ubuntu Kylin | | | | |
| Hygon X64 | ● | ● | ● | ○ | ● | ● | | Hygon X64 | ● | ● | ● | ○ | ● | ● |
| Rockchip ARM64/32 | | | ○ | | | | | Rockchip ARM64 | | | ○ | | | |
| Allwinner ARM64/32 | | | ○ | | | | | Allwinner ARM64 | | | ○ | | | |
| Actions ARM64/32 | | | ○ | | | | | Actions ARM64 | | | ○ | | | |
| TI ARM32 | | | ○ | | | |
Note: ● has been verified by official tests; ○ has been verified by unofficial tests. Note: ● has been verified by official tests; ○ has been verified by unofficial tests.
......
...@@ -172,7 +172,7 @@ A complete TDengine system runs on one or more physical nodes. Logically, it inc ...@@ -172,7 +172,7 @@ A complete TDengine system runs on one or more physical nodes. Logically, it inc
**Virtual node group (VGroup)**: Vnodes on different data nodes can form a virtual node group to ensure the high reliability of the system. The virtual node group is managed in a master/slave structure. Write operations can only be performed on the master vnode, and the system synchronizes data to the slave vnode via replication, thus ensuring that one single replica of data is copied on multiple physical nodes. The number of virtual nodes in a vgroup equals the number of data replicas. If the number of replicas of a DB is N, the system must have at least N data nodes. The number of replicas can be specified by the parameter replica when creating DB, and the default is 1. Using the multi-replica feature of TDengine, the same high data reliability can be done without the need for expensive storage devices such as disk arrays. Virtual node group is created and managed by management node, and the management node assigns a system unique ID, aka VGroup ID. If two virtual nodes has the same vnode group ID, means that they belong to the same group and the data is backed up to each other. The number of virtual nodes in a virtual node group can be dynamically changed, allowing only one, that is, no data replication. VGroup ID is never changed. Even if a virtual node group is deleted, its ID will not be reused. **Virtual node group (VGroup)**: Vnodes on different data nodes can form a virtual node group to ensure the high reliability of the system. The virtual node group is managed in a master/slave structure. Write operations can only be performed on the master vnode, and the system synchronizes data to the slave vnode via replication, thus ensuring that one single replica of data is copied on multiple physical nodes. The number of virtual nodes in a vgroup equals the number of data replicas. If the number of replicas of a DB is N, the system must have at least N data nodes. The number of replicas can be specified by the parameter replica when creating DB, and the default is 1. Using the multi-replica feature of TDengine, the same high data reliability can be done without the need for expensive storage devices such as disk arrays. Virtual node group is created and managed by management node, and the management node assigns a system unique ID, aka VGroup ID. If two virtual nodes has the same vnode group ID, means that they belong to the same group and the data is backed up to each other. The number of virtual nodes in a virtual node group can be dynamically changed, allowing only one, that is, no data replication. VGroup ID is never changed. Even if a virtual node group is deleted, its ID will not be reused.
**TAOSC**: TAOSC is the driver provided by TDengine to applications, which is responsible for dealing with the interface interaction between application and cluster, and provides the native interface of C/C + + language, which is embedded in JDBC, C #, Python, Go, Node.js language connection libraries. Applications interact with the whole cluster through taosc instead of directly connecting to data nodes in the cluster. This module is responsible for obtaining and caching metadata; forwarding requests for insertion, query, etc. to the correct data node; when returning the results to the application, taosc also need to be responsible for the final level of aggregation, sorting, filtering and other operations. For JDBC, C/C + +/C #/Python/Go/Node.js interfaces, this module runs on the physical node where the application is located. At the same time, in order to support the fully distributed RESTful interface, taosc has a running instance on each dnode of TDengine cluster. **TAOSC**: TAOSC is the driver provided by TDengine to applications, which is responsible for dealing with the interface interaction between application and cluster, and provides the native interface of C/C++ language, which is embedded in JDBC, C #, Python, Go, Node.js language connection libraries. Applications interact with the whole cluster through taosc instead of directly connecting to data nodes in the cluster. This module is responsible for obtaining and caching metadata; forwarding requests for insertion, query, etc. to the correct data node; when returning the results to the application, taosc also need to be responsible for the final level of aggregation, sorting, filtering and other operations. For JDBC, C/C++/C #/Python/Go/Node.js interfaces, this module runs on the physical node where the application is located. At the same time, in order to support the fully distributed RESTful interface, taosc has a running instance on each dnode of TDengine cluster.
### Node Communication ### Node Communication
......
...@@ -4,7 +4,7 @@ TDengine supports multiple interfaces to write data, including SQL, Prometheus, ...@@ -4,7 +4,7 @@ TDengine supports multiple interfaces to write data, including SQL, Prometheus,
## <a class="anchor" id="sql"></a> SQL Writing ## <a class="anchor" id="sql"></a> SQL Writing
Applications insert data by executing SQL insert statements through C/C + +, JDBC, GO, or Python Connector, and users can manually enter SQL insert statements to insert data through TAOS Shell. For example, the following insert writes a record to table d1001: Applications insert data by executing SQL insert statements through C/C++, JDBC, GO, or Python Connector, and users can manually enter SQL insert statements to insert data through TAOS Shell. For example, the following insert writes a record to table d1001:
```mysql ```mysql
INSERT INTO d1001 VALUES (1538548685000, 10.3, 219, 0.31); INSERT INTO d1001 VALUES (1538548685000, 10.3, 219, 0.31);
......
...@@ -2,7 +2,7 @@ ...@@ -2,7 +2,7 @@
## <a class="anchor" id="queries"></a> Main Query Features ## <a class="anchor" id="queries"></a> Main Query Features
TDengine uses SQL as the query language. Applications can send SQL statements through C/C + +, Java, Go, Python connectors, and users can manually execute SQL Ad-Hoc Query through the Command Line Interface (CLI) tool TAOS Shell provided by TDengine. TDengine supports the following query functions: TDengine uses SQL as the query language. Applications can send SQL statements through C/C++, Java, Go, Python connectors, and users can manually execute SQL Ad-Hoc Query through the Command Line Interface (CLI) tool TAOS Shell provided by TDengine. TDengine supports the following query functions:
- Single-column and multi-column data query - Single-column and multi-column data query
- Multiple filters for tags and numeric values: >, <, =, < >, like, etc - Multiple filters for tags and numeric values: >, <, =, < >, like, etc
......
...@@ -22,7 +22,7 @@ Note: ● stands for that has been verified by official tests; ○ stands for th ...@@ -22,7 +22,7 @@ Note: ● stands for that has been verified by official tests; ○ stands for th
Note: Note:
- To access the TDengine database through connectors (except RESTful) in the system without TDengine server software, it is necessary to install the corresponding version of the client installation package to make the application driver (the file name is [libtaos.so](http://libtaos.so/) in Linux system and taos.dll in Windows system) installed in the system, otherwise, the error that the corresponding library file cannot be found will occur. - To access the TDengine database through connectors (except RESTful) in the system without TDengine server software, it is necessary to install the corresponding version of the client installation package to make the application driver (the file name is [libtaos.so](http://libtaos.so/) in Linux system and taos.dll in Windows system) installed in the system, otherwise, the error that the corresponding library file cannot be found will occur.
- All APIs that execute SQL statements, such as `tao_query`, `taos_query_a`, `taos_subscribe` in C/C + + Connector, and APIs corresponding to them in other languages, can only execute one SQL statement at a time. If the actual parameters contain multiple statements, their behavior is undefined. - All APIs that execute SQL statements, such as `tao_query`, `taos_query_a`, `taos_subscribe` in C/C++ Connector, and APIs corresponding to them in other languages, can only execute one SQL statement at a time. If the actual parameters contain multiple statements, their behavior is undefined.
- Users upgrading to TDengine 2.0. 8.0 must update the JDBC connection. TDengine must upgrade taos-jdbcdriver to 2.0.12 and above. - Users upgrading to TDengine 2.0. 8.0 must update the JDBC connection. TDengine must upgrade taos-jdbcdriver to 2.0.12 and above.
- No matter which programming language connector is selected, TDengine version 2.0 and above recommends that each thread of database application establish an independent connection or establish a connection pool based on threads to avoid mutual interference between threads of "USE statement" state variables in the connection (but query and write operations of the connection are thread-safe). - No matter which programming language connector is selected, TDengine version 2.0 and above recommends that each thread of database application establish an independent connection or establish a connection pool based on threads to avoid mutual interference between threads of "USE statement" state variables in the connection (but query and write operations of the connection are thread-safe).
...@@ -152,7 +152,7 @@ Under cmd, enter the c:\ tdengine directory and directly execute taos.exe, and y ...@@ -152,7 +152,7 @@ Under cmd, enter the c:\ tdengine directory and directly execute taos.exe, and y
| **OS Type** | Linux | Win64 | Win32 | Linux | Linux | | **OS Type** | Linux | Win64 | Win32 | Linux | Linux |
| **Supported or Not** | Yes | **Yes** | **Yes** | **Yes** | **In development** | | **Supported or Not** | Yes | **Yes** | **Yes** | **Yes** | **In development** |
The C/C + + API is similar to MySQL's C API. When application use it, it needs to include the TDengine header file taos.h (after installed, it is located in/usr/local/taos/include): The C/C++ API is similar to MySQL's C API. When application use it, it needs to include the TDengine header file taos.h (after installed, it is located in/usr/local/taos/include):
```C ```C
#include <taos.h> #include <taos.h>
...@@ -923,7 +923,7 @@ Manually install the following tools: ...@@ -923,7 +923,7 @@ Manually install the following tools:
If the steps above cannot be performed successfully, you can refer to Microsoft's Node.js User Manual [Microsoft's Node.js Guidelines for Windows](https://github.com/Microsoft/nodejs-guidelines/blob/master/windows-environment.md#compiling-native-addon-modules). If the steps above cannot be performed successfully, you can refer to Microsoft's Node.js User Manual [Microsoft's Node.js Guidelines for Windows](https://github.com/Microsoft/nodejs-guidelines/blob/master/windows-environment.md#compiling-native-addon-modules).
If you use ARM64 Node.js on Windows 10 ARM, you also need to add "Visual C + + compilers and libraries for ARM64" and "Visual C + + ATL for ARM64". If you use ARM64 Node.js on Windows 10 ARM, you also need to add "Visual C++ compilers and libraries for ARM64" and "Visual C++ ATL for ARM64".
#### Sample #### Sample
......
...@@ -240,7 +240,7 @@ Client configuration parameters: ...@@ -240,7 +240,7 @@ Client configuration parameters:
```sql ```sql
SELECT count(*) FROM table_name WHERE TS<1554984068000; SELECT count(*) FROM table_name WHERE TS<1554984068000;
``` ```
In order to avoid the uncertainty caused by using string time format, Unix timestamp can also be used directly. In addition, timestamp strings with time zones can also be used in SQL statements, such as: timestamp strings in RFC3339 format, 2013-04-12T15: 52: 01.123 +08:00, or ISO-8601 format timestamp strings 2013-04-12T15: 52: 01.123 +0800. The conversion of the above two strings into Unix timestamps is not affected by the time zone in which the system is located. In order to avoid the uncertainty caused by using string time format, Unix timestamp can also be used directly. In addition, timestamp strings with time zones can also be used in SQL statements, such as: timestamp strings in RFC3339 format, 2013-04-12T15:52:01.123+08:00, or ISO-8601 format timestamp strings 2013-04-12T15:52:01.123+0800. The conversion of the above two strings into Unix timestamps is not affected by the time zone in which the system is located.
When starting taos, you can also specify an end point for an instance of taosd from the command line, otherwise read from taos.cfg. When starting taos, you can also specify an end point for an instance of taosd from the command line, otherwise read from taos.cfg.
......
...@@ -8,7 +8,7 @@ IF (TD_MVN_INSTALLED) ...@@ -8,7 +8,7 @@ IF (TD_MVN_INSTALLED)
ADD_CUSTOM_COMMAND(OUTPUT ${JDBC_CMD_NAME} ADD_CUSTOM_COMMAND(OUTPUT ${JDBC_CMD_NAME}
POST_BUILD POST_BUILD
COMMAND mvn -Dmaven.test.skip=true install -f ${CMAKE_CURRENT_SOURCE_DIR}/pom.xml COMMAND mvn -Dmaven.test.skip=true install -f ${CMAKE_CURRENT_SOURCE_DIR}/pom.xml
COMMAND ${CMAKE_COMMAND} -E copy ${CMAKE_CURRENT_SOURCE_DIR}/target/taos-jdbcdriver-2.0.33-dist.jar ${LIBRARY_OUTPUT_PATH} COMMAND ${CMAKE_COMMAND} -E copy ${CMAKE_CURRENT_SOURCE_DIR}/target/taos-jdbcdriver-2.0.34-dist.jar ${LIBRARY_OUTPUT_PATH}
COMMAND mvn -Dmaven.test.skip=true clean -f ${CMAKE_CURRENT_SOURCE_DIR}/pom.xml COMMAND mvn -Dmaven.test.skip=true clean -f ${CMAKE_CURRENT_SOURCE_DIR}/pom.xml
COMMENT "build jdbc driver") COMMENT "build jdbc driver")
ADD_CUSTOM_TARGET(${JDBC_TARGET_NAME} ALL WORKING_DIRECTORY ${EXECUTABLE_OUTPUT_PATH} DEPENDS ${JDBC_CMD_NAME}) ADD_CUSTOM_TARGET(${JDBC_TARGET_NAME} ALL WORKING_DIRECTORY ${EXECUTABLE_OUTPUT_PATH} DEPENDS ${JDBC_CMD_NAME})
......
...@@ -5,7 +5,7 @@ ...@@ -5,7 +5,7 @@
<groupId>com.taosdata.jdbc</groupId> <groupId>com.taosdata.jdbc</groupId>
<artifactId>taos-jdbcdriver</artifactId> <artifactId>taos-jdbcdriver</artifactId>
<version>2.0.33</version> <version>2.0.34</version>
<packaging>jar</packaging> <packaging>jar</packaging>
<name>JDBCDriver</name> <name>JDBCDriver</name>
......
...@@ -3,7 +3,7 @@ ...@@ -3,7 +3,7 @@
<modelVersion>4.0.0</modelVersion> <modelVersion>4.0.0</modelVersion>
<groupId>com.taosdata.jdbc</groupId> <groupId>com.taosdata.jdbc</groupId>
<artifactId>taos-jdbcdriver</artifactId> <artifactId>taos-jdbcdriver</artifactId>
<version>2.0.33</version> <version>2.0.34</version>
<packaging>jar</packaging> <packaging>jar</packaging>
<name>JDBCDriver</name> <name>JDBCDriver</name>
<url>https://github.com/taosdata/TDengine/tree/master/src/connector/jdbc</url> <url>https://github.com/taosdata/TDengine/tree/master/src/connector/jdbc</url>
......
...@@ -130,7 +130,7 @@ public abstract class TSDBConstants { ...@@ -130,7 +130,7 @@ public abstract class TSDBConstants {
case TSDBConstants.TSDB_DATA_TYPE_NCHAR: case TSDBConstants.TSDB_DATA_TYPE_NCHAR:
return Types.NCHAR; return Types.NCHAR;
} }
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNKNOWN_TAOS_TYPE_IN_TDENGINE); throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNKNOWN_TAOS_TYPE);
} }
public static String taosType2JdbcTypeName(int taosType) throws SQLException { public static String taosType2JdbcTypeName(int taosType) throws SQLException {
...@@ -160,7 +160,7 @@ public abstract class TSDBConstants { ...@@ -160,7 +160,7 @@ public abstract class TSDBConstants {
case TSDBConstants.TSDB_DATA_TYPE_NCHAR: case TSDBConstants.TSDB_DATA_TYPE_NCHAR:
return "NCHAR"; return "NCHAR";
default: default:
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNKNOWN_TAOS_TYPE_IN_TDENGINE); throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNKNOWN_TAOS_TYPE);
} }
} }
......
...@@ -31,8 +31,8 @@ public class TSDBError { ...@@ -31,8 +31,8 @@ public class TSDBError {
TSDBErrorMap.put(TSDBErrorNumbers.ERROR_URL_NOT_SET, "url is not set"); TSDBErrorMap.put(TSDBErrorNumbers.ERROR_URL_NOT_SET, "url is not set");
TSDBErrorMap.put(TSDBErrorNumbers.ERROR_INVALID_SQL, "invalid sql"); TSDBErrorMap.put(TSDBErrorNumbers.ERROR_INVALID_SQL, "invalid sql");
TSDBErrorMap.put(TSDBErrorNumbers.ERROR_NUMERIC_VALUE_OUT_OF_RANGE, "numeric value out of range"); TSDBErrorMap.put(TSDBErrorNumbers.ERROR_NUMERIC_VALUE_OUT_OF_RANGE, "numeric value out of range");
TSDBErrorMap.put(TSDBErrorNumbers.ERROR_UNKNOWN_TAOS_TYPE_IN_TDENGINE, "unknown taos type in tdengine"); TSDBErrorMap.put(TSDBErrorNumbers.ERROR_UNKNOWN_TAOS_TYPE, "unknown taos type in tdengine");
TSDBErrorMap.put(TSDBErrorNumbers.ERROR_UNKNOWN_TIMESTAMP_PERCISION, "unknown timestamp precision"); TSDBErrorMap.put(TSDBErrorNumbers.ERROR_UNKNOWN_TIMESTAMP_PRECISION, "unknown timestamp precision");
TSDBErrorMap.put(TSDBErrorNumbers.ERROR_UNKNOWN, "unknown error"); TSDBErrorMap.put(TSDBErrorNumbers.ERROR_UNKNOWN, "unknown error");
......
...@@ -25,8 +25,10 @@ public class TSDBErrorNumbers { ...@@ -25,8 +25,10 @@ public class TSDBErrorNumbers {
public static final int ERROR_URL_NOT_SET = 0x2312; // url is not set public static final int ERROR_URL_NOT_SET = 0x2312; // url is not set
public static final int ERROR_INVALID_SQL = 0x2313; // invalid sql public static final int ERROR_INVALID_SQL = 0x2313; // invalid sql
public static final int ERROR_NUMERIC_VALUE_OUT_OF_RANGE = 0x2314; // numeric value out of range public static final int ERROR_NUMERIC_VALUE_OUT_OF_RANGE = 0x2314; // numeric value out of range
public static final int ERROR_UNKNOWN_TAOS_TYPE_IN_TDENGINE = 0x2315; //unknown taos type in tdengine public static final int ERROR_UNKNOWN_TAOS_TYPE = 0x2315; //unknown taos type in tdengine
public static final int ERROR_UNKNOWN_TIMESTAMP_PERCISION = 0x2316; // unknown timestamp precision public static final int ERROR_UNKNOWN_TIMESTAMP_PRECISION = 0x2316; // unknown timestamp precision
public static final int ERROR_RESTFul_Client_Protocol_Exception = 0x2317;
public static final int ERROR_RESTFul_Client_IOException = 0x2318;
public static final int ERROR_UNKNOWN = 0x2350; //unknown error public static final int ERROR_UNKNOWN = 0x2350; //unknown error
...@@ -62,8 +64,11 @@ public class TSDBErrorNumbers { ...@@ -62,8 +64,11 @@ public class TSDBErrorNumbers {
errorNumbers.add(ERROR_URL_NOT_SET); errorNumbers.add(ERROR_URL_NOT_SET);
errorNumbers.add(ERROR_INVALID_SQL); errorNumbers.add(ERROR_INVALID_SQL);
errorNumbers.add(ERROR_NUMERIC_VALUE_OUT_OF_RANGE); errorNumbers.add(ERROR_NUMERIC_VALUE_OUT_OF_RANGE);
errorNumbers.add(ERROR_UNKNOWN_TAOS_TYPE_IN_TDENGINE); errorNumbers.add(ERROR_UNKNOWN_TAOS_TYPE);
errorNumbers.add(ERROR_UNKNOWN_TIMESTAMP_PERCISION); errorNumbers.add(ERROR_UNKNOWN_TIMESTAMP_PRECISION);
errorNumbers.add(ERROR_RESTFul_Client_IOException);
errorNumbers.add(ERROR_RESTFul_Client_Protocol_Exception);
errorNumbers.add(ERROR_SUBSCRIBE_FAILED); errorNumbers.add(ERROR_SUBSCRIBE_FAILED);
errorNumbers.add(ERROR_UNSUPPORTED_ENCODING); errorNumbers.add(ERROR_UNSUPPORTED_ENCODING);
......
...@@ -213,7 +213,7 @@ public class RestfulResultSet extends AbstractResultSet implements ResultSet { ...@@ -213,7 +213,7 @@ public class RestfulResultSet extends AbstractResultSet implements ResultSet {
long nanoAdjustment = Integer.parseInt(value.substring(20)); long nanoAdjustment = Integer.parseInt(value.substring(20));
return Timestamp.from(Instant.ofEpochSecond(epochSec, nanoAdjustment)); return Timestamp.from(Instant.ofEpochSecond(epochSec, nanoAdjustment));
} }
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNKNOWN_TIMESTAMP_PERCISION); throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNKNOWN_TIMESTAMP_PRECISION);
} }
} }
} }
......
package com.taosdata.jdbc.utils; package com.taosdata.jdbc.utils;
import com.taosdata.jdbc.TSDBError;
import com.taosdata.jdbc.TSDBErrorNumbers;
import org.apache.http.HeaderElement; import org.apache.http.HeaderElement;
import org.apache.http.HeaderElementIterator; import org.apache.http.HeaderElementIterator;
import org.apache.http.HttpEntity; import org.apache.http.HttpEntity;
import org.apache.http.client.ClientProtocolException;
import org.apache.http.client.HttpRequestRetryHandler;
import org.apache.http.client.config.RequestConfig; import org.apache.http.client.config.RequestConfig;
import org.apache.http.client.methods.*; import org.apache.http.client.methods.*;
import org.apache.http.client.protocol.HttpClientContext; import org.apache.http.client.protocol.HttpClientContext;
import org.apache.http.conn.ConnectionKeepAliveStrategy; import org.apache.http.conn.ConnectionKeepAliveStrategy;
import org.apache.http.entity.StringEntity; import org.apache.http.entity.StringEntity;
import org.apache.http.impl.client.CloseableHttpClient; import org.apache.http.impl.client.CloseableHttpClient;
import org.apache.http.impl.client.DefaultHttpRequestRetryHandler;
import org.apache.http.impl.client.HttpClients; import org.apache.http.impl.client.HttpClients;
import org.apache.http.impl.conn.PoolingHttpClientConnectionManager; import org.apache.http.impl.conn.PoolingHttpClientConnectionManager;
import org.apache.http.message.BasicHeaderElementIterator; import org.apache.http.message.BasicHeaderElementIterator;
...@@ -17,35 +20,24 @@ import org.apache.http.protocol.HTTP; ...@@ -17,35 +20,24 @@ import org.apache.http.protocol.HTTP;
import org.apache.http.protocol.HttpContext; import org.apache.http.protocol.HttpContext;
import org.apache.http.util.EntityUtils; import org.apache.http.util.EntityUtils;
import javax.net.ssl.SSLException;
import java.io.IOException;
import java.io.InterruptedIOException;
import java.net.UnknownHostException;
import java.nio.charset.StandardCharsets; import java.nio.charset.StandardCharsets;
import java.sql.SQLException;
public class HttpClientPoolUtil { public class HttpClientPoolUtil {
private static final String DEFAULT_CONTENT_TYPE = "application/json"; private static final String DEFAULT_CONTENT_TYPE = "application/json";
private static final int DEFAULT_MAX_TOTAL = 200;
private static final int DEFAULT_MAX_PER_ROUTE = 20;
private static final int DEFAULT_TIME_OUT = 15000; private static final int DEFAULT_TIME_OUT = 15000;
private static final int DEFAULT_MAX_PER_ROUTE = 32;
private static final int DEFAULT_MAX_TOTAL = 1000;
private static final int DEFAULT_HTTP_KEEP_TIME = 15000; private static final int DEFAULT_HTTP_KEEP_TIME = 15000;
private static final int DEFAULT_MAX_RETRY_COUNT = 5;
private static CloseableHttpClient httpClient;
private static synchronized void initPools() {
if (httpClient == null) {
PoolingHttpClientConnectionManager connectionManager = new PoolingHttpClientConnectionManager();
connectionManager.setDefaultMaxPerRoute(DEFAULT_MAX_PER_ROUTE);
connectionManager.setMaxTotal(DEFAULT_MAX_TOTAL);
httpClient = HttpClients.custom()
.setKeepAliveStrategy(DEFAULT_KEEP_ALIVE_STRATEGY)
.setConnectionManager(connectionManager)
.setRetryHandler(new DefaultHttpRequestRetryHandler(3, true))
.build();
}
}
private static final ConnectionKeepAliveStrategy DEFAULT_KEEP_ALIVE_STRATEGY = (response, context) -> { private static final ConnectionKeepAliveStrategy DEFAULT_KEEP_ALIVE_STRATEGY = (response, context) -> {
HeaderElementIterator it = new BasicHeaderElementIterator(response.headerIterator(HTTP.CONN_KEEP_ALIVE)); HeaderElementIterator it = new BasicHeaderElementIterator(response.headerIterator(HTTP.CONN_KEEP_ALIVE));
int keepTime = DEFAULT_HTTP_KEEP_TIME * 1000;
while (it.hasNext()) { while (it.hasNext()) {
HeaderElement headerElement = it.nextElement(); HeaderElement headerElement = it.nextElement();
String param = headerElement.getName(); String param = headerElement.getName();
...@@ -53,34 +45,73 @@ public class HttpClientPoolUtil { ...@@ -53,34 +45,73 @@ public class HttpClientPoolUtil {
if (value != null && param.equalsIgnoreCase("timeout")) { if (value != null && param.equalsIgnoreCase("timeout")) {
try { try {
return Long.parseLong(value) * 1000; return Long.parseLong(value) * 1000;
} catch (Exception e) { } catch (NumberFormatException ignore) {
new Exception("format KeepAlive timeout exception, exception:" + e.toString()).printStackTrace();
} }
} }
} }
return keepTime; return DEFAULT_HTTP_KEEP_TIME * 1000;
};
private static final HttpRequestRetryHandler retryHandler = (exception, executionCount, httpContext) -> {
if (executionCount >= DEFAULT_MAX_RETRY_COUNT)
// do not retry if over max retry count
return false;
if (exception instanceof InterruptedIOException)
// timeout
return false;
if (exception instanceof UnknownHostException)
// unknown host
return false;
if (exception instanceof SSLException)
// SSL handshake exception
return false;
return true;
}; };
/** private static CloseableHttpClient httpClient;
* 执行http post请求
* 默认采用Content-Type:application/json,Accept:application/json static {
* PoolingHttpClientConnectionManager connectionManager = new PoolingHttpClientConnectionManager();
* @param uri 请求地址 connectionManager.setMaxTotal(DEFAULT_MAX_TOTAL);
* @param data 请求数据 connectionManager.setDefaultMaxPerRoute(DEFAULT_MAX_PER_ROUTE);
* @return responseBody httpClient = HttpClients.custom().setKeepAliveStrategy(DEFAULT_KEEP_ALIVE_STRATEGY).setConnectionManager(connectionManager).setRetryHandler(retryHandler).build();
*/ }
public static String execute(String uri, String data, String token) {
long startTime = System.currentTimeMillis(); /*** execute GET request ***/
public static String execute(String uri) throws SQLException {
HttpEntity httpEntity = null; HttpEntity httpEntity = null;
HttpEntityEnclosingRequestBase method = null;
String responseBody = ""; String responseBody = "";
try { try {
if (httpClient == null) { HttpRequestBase method = getRequest(uri, HttpGet.METHOD_NAME);
initPools(); HttpContext context = HttpClientContext.create();
CloseableHttpResponse httpResponse = httpClient.execute(method, context);
httpEntity = httpResponse.getEntity();
if (httpEntity != null) {
responseBody = EntityUtils.toString(httpEntity, StandardCharsets.UTF_8);
} }
method = (HttpEntityEnclosingRequestBase) getRequest(uri, HttpPost.METHOD_NAME, DEFAULT_CONTENT_TYPE, 0); } catch (ClientProtocolException e) {
method.setHeader("Content-Type", "text/plain"); e.printStackTrace();
method.setHeader("Connection", "keep-alive"); throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_RESTFul_Client_Protocol_Exception, e.getMessage());
} catch (IOException exception) {
exception.printStackTrace();
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_RESTFul_Client_IOException, exception.getMessage());
} finally {
if (httpEntity != null) {
EntityUtils.consumeQuietly(httpEntity);
}
}
return responseBody;
}
/*** execute POST request ***/
public static String execute(String uri, String data, String token) throws SQLException {
HttpEntity httpEntity = null;
String responseBody = "";
try {
HttpEntityEnclosingRequestBase method = (HttpEntityEnclosingRequestBase) getRequest(uri, HttpPost.METHOD_NAME);
method.setHeader(HTTP.CONTENT_TYPE, "text/plain");
method.setHeader(HTTP.CONN_DIRECTIVE, HTTP.CONN_KEEP_ALIVE);
method.setHeader("Authorization", "Taosd " + token); method.setHeader("Authorization", "Taosd " + token);
method.setEntity(new StringEntity(data, StandardCharsets.UTF_8)); method.setEntity(new StringEntity(data, StandardCharsets.UTF_8));
...@@ -88,46 +119,31 @@ public class HttpClientPoolUtil { ...@@ -88,46 +119,31 @@ public class HttpClientPoolUtil {
CloseableHttpResponse httpResponse = httpClient.execute(method, context); CloseableHttpResponse httpResponse = httpClient.execute(method, context);
httpEntity = httpResponse.getEntity(); httpEntity = httpResponse.getEntity();
if (httpEntity != null) { if (httpEntity != null) {
responseBody = EntityUtils.toString(httpEntity, "UTF-8"); responseBody = EntityUtils.toString(httpEntity, StandardCharsets.UTF_8);
} }
} catch (Exception e) { } catch (ClientProtocolException e) {
if (method != null) { e.printStackTrace();
method.abort(); throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_RESTFul_Client_Protocol_Exception, e.getMessage());
} } catch (IOException exception) {
new Exception("execute post request exception, url:" + uri + ", exception:" + e.toString() + ", cost time(ms):" + (System.currentTimeMillis() - startTime)).printStackTrace(); exception.printStackTrace();
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_RESTFul_Client_IOException, exception.getMessage());
} finally { } finally {
if (httpEntity != null) { if (httpEntity != null) {
try { EntityUtils.consumeQuietly(httpEntity);
EntityUtils.consumeQuietly(httpEntity);
} catch (Exception e) {
new Exception("close response exception, url:" + uri + ", exception:" + e.toString() + ", cost time(ms):" + (System.currentTimeMillis() - startTime)).printStackTrace();
}
} }
} }
return responseBody; return responseBody;
} }
/** /*** create http request ***/
* * 创建请求 private static HttpRequestBase getRequest(String uri, String methodName) {
*
* @param uri 请求url
* @param methodName 请求的方法类型
* @param contentType contentType类型
* @param timeout 超时时间
* @return HttpRequestBase 返回类型
* @author lisc
*/
private static HttpRequestBase getRequest(String uri, String methodName, String contentType, int timeout) {
if (httpClient == null) {
initPools();
}
HttpRequestBase method; HttpRequestBase method;
if (timeout <= 0) { RequestConfig requestConfig = RequestConfig.custom()
timeout = DEFAULT_TIME_OUT; .setSocketTimeout(DEFAULT_TIME_OUT * 1000)
} .setConnectTimeout(DEFAULT_TIME_OUT * 1000)
RequestConfig requestConfig = RequestConfig.custom().setSocketTimeout(timeout * 1000) .setConnectionRequestTimeout(DEFAULT_TIME_OUT * 1000)
.setConnectTimeout(timeout * 1000).setConnectionRequestTimeout(timeout * 1000) .setExpectContinueEnabled(false)
.setExpectContinueEnabled(false).build(); .build();
if (HttpPut.METHOD_NAME.equalsIgnoreCase(methodName)) { if (HttpPut.METHOD_NAME.equalsIgnoreCase(methodName)) {
method = new HttpPut(uri); method = new HttpPut(uri);
} else if (HttpPost.METHOD_NAME.equalsIgnoreCase(methodName)) { } else if (HttpPost.METHOD_NAME.equalsIgnoreCase(methodName)) {
...@@ -137,52 +153,10 @@ public class HttpClientPoolUtil { ...@@ -137,52 +153,10 @@ public class HttpClientPoolUtil {
} else { } else {
method = new HttpPost(uri); method = new HttpPost(uri);
} }
method.addHeader(HTTP.CONTENT_TYPE, DEFAULT_CONTENT_TYPE);
if (contentType == null || contentType.isEmpty() || contentType.replaceAll("\\s", "").isEmpty()) { method.addHeader("Accept", DEFAULT_CONTENT_TYPE);
contentType = DEFAULT_CONTENT_TYPE;
}
method.addHeader("Content-Type", contentType);
method.addHeader("Accept", contentType);
method.setConfig(requestConfig); method.setConfig(requestConfig);
return method; return method;
} }
/**
* 执行GET 请求
*
* @param uri 网址
* @return responseBody
*/
public static String execute(String uri) {
long startTime = System.currentTimeMillis();
HttpEntity httpEntity = null;
HttpRequestBase method = null;
String responseBody = "";
try {
if (httpClient == null) {
initPools();
}
method = getRequest(uri, HttpGet.METHOD_NAME, DEFAULT_CONTENT_TYPE, 0);
HttpContext context = HttpClientContext.create();
CloseableHttpResponse httpResponse = httpClient.execute(method, context);
httpEntity = httpResponse.getEntity();
if (httpEntity != null) {
responseBody = EntityUtils.toString(httpEntity, "UTF-8");
}
} catch (Exception e) {
if (method != null) {
method.abort();
}
e.printStackTrace();
} finally {
if (httpEntity != null) {
try {
EntityUtils.consumeQuietly(httpEntity);
} catch (Exception e) {
new Exception("close response exception, url:" + uri + ", exception:" + e.toString() + ",cost time(ms):" + (System.currentTimeMillis() - startTime)).printStackTrace();
}
}
}
return responseBody;
}
} }
\ No newline at end of file
...@@ -648,7 +648,7 @@ static FILE * g_fpOfInsertResult = NULL; ...@@ -648,7 +648,7 @@ static FILE * g_fpOfInsertResult = NULL;
fprintf(stderr, "PERF: "fmt, __VA_ARGS__); } while(0) fprintf(stderr, "PERF: "fmt, __VA_ARGS__); } while(0)
#define errorPrint(fmt, ...) \ #define errorPrint(fmt, ...) \
do { fprintf(stderr, "ERROR: "fmt, __VA_ARGS__); } while(0) do { fprintf(stderr, " \033[31m"); fprintf(stderr, "ERROR: "fmt, __VA_ARGS__); fprintf(stderr, " \033[0m"); } while(0)
// for strncpy buffer overflow // for strncpy buffer overflow
#define min(a, b) (((a) < (b)) ? (a) : (b)) #define min(a, b) (((a) < (b)) ? (a) : (b))
...@@ -5154,8 +5154,10 @@ static int32_t execInsert(threadInfo *pThreadInfo, uint32_t k) ...@@ -5154,8 +5154,10 @@ static int32_t execInsert(threadInfo *pThreadInfo, uint32_t k)
debugPrint("%s() LN%d, stmt=%p", debugPrint("%s() LN%d, stmt=%p",
__func__, __LINE__, pThreadInfo->stmt); __func__, __LINE__, pThreadInfo->stmt);
if (0 != taos_stmt_execute(pThreadInfo->stmt)) { if (0 != taos_stmt_execute(pThreadInfo->stmt)) {
errorPrint("%s() LN%d, failied to execute insert statement\n", errorPrint("%s() LN%d, failied to execute insert statement. reason: %s\n",
__func__, __LINE__); __func__, __LINE__, taos_stmt_errstr(pThreadInfo->stmt));
fprintf(stderr, "\n\033[31m === Please reduce batch number if WAL size exceeds limit. ===\033[0m\n\n");
exit(-1); exit(-1);
} }
affectedRows = k; affectedRows = k;
...@@ -5718,7 +5720,7 @@ static int32_t prepareStmtWithoutStb( ...@@ -5718,7 +5720,7 @@ static int32_t prepareStmtWithoutStb(
int ret = taos_stmt_set_tbname(stmt, tableName); int ret = taos_stmt_set_tbname(stmt, tableName);
if (ret != 0) { if (ret != 0) {
errorPrint("failed to execute taos_stmt_set_tbname(%s). return 0x%x. reason: %s\n", errorPrint("failed to execute taos_stmt_set_tbname(%s). return 0x%x. reason: %s\n",
tableName, ret, taos_errstr(NULL)); tableName, ret, taos_stmt_errstr(stmt));
return ret; return ret;
} }
...@@ -5771,9 +5773,17 @@ static int32_t prepareStmtWithoutStb( ...@@ -5771,9 +5773,17 @@ static int32_t prepareStmtWithoutStb(
return -1; return -1;
} }
} }
taos_stmt_bind_param(stmt, (TAOS_BIND *)bindArray); if (0 != taos_stmt_bind_param(stmt, (TAOS_BIND *)bindArray)) {
errorPrint("%s() LN%d, stmt_bind_param() failed! reason: %s\n",
__func__, __LINE__, taos_stmt_errstr(stmt));
break;
}
// if msg > 3MB, break // if msg > 3MB, break
taos_stmt_add_batch(stmt); if (0 != taos_stmt_add_batch(stmt)) {
errorPrint("%s() LN%d, stmt_add_batch() failed! reason: %s\n",
__func__, __LINE__, taos_stmt_errstr(stmt));
break;
}
k++; k++;
recordFrom ++; recordFrom ++;
...@@ -5949,14 +5959,19 @@ static int32_t prepareStbStmt( ...@@ -5949,14 +5959,19 @@ static int32_t prepareStbStmt(
tmfree(tagsValBuf); tmfree(tagsValBuf);
tmfree(tagsArray); tmfree(tagsArray);
if (0 != ret) {
errorPrint("%s() LN%d, stmt_set_tbname_tags() failed! reason: %s\n",
__func__, __LINE__, taos_stmt_errstr(stmt));
return -1;
}
} else { } else {
ret = taos_stmt_set_tbname(stmt, tableName); ret = taos_stmt_set_tbname(stmt, tableName);
} if (0 != ret) {
errorPrint("%s() LN%d, stmt_set_tbname() failed! reason: %s\n",
if (ret != 0) { __func__, __LINE__, taos_stmt_errstr(stmt));
errorPrint("failed to execute taos_stmt_set_tbname(%s). return 0x%x. reason: %s\n", return -1;
tableName, ret, taos_errstr(NULL)); }
return ret;
} }
char *bindArray = calloc(1, sizeof(TAOS_BIND) * (stbInfo->columnCount + 1)); char *bindArray = calloc(1, sizeof(TAOS_BIND) * (stbInfo->columnCount + 1));
...@@ -5974,9 +5989,19 @@ static int32_t prepareStbStmt( ...@@ -5974,9 +5989,19 @@ static int32_t prepareStbStmt(
free(bindArray); free(bindArray);
return -1; return -1;
} }
taos_stmt_bind_param(stmt, (TAOS_BIND *)bindArray); ret = taos_stmt_bind_param(stmt, (TAOS_BIND *)bindArray);
if (0 != ret) {
errorPrint("%s() LN%d, stmt_bind_param() failed! reason: %s\n",
__func__, __LINE__, taos_stmt_errstr(stmt));
return -1;
}
// if msg > 3MB, break // if msg > 3MB, break
taos_stmt_add_batch(stmt); ret = taos_stmt_add_batch(stmt);
if (0 != ret) {
errorPrint("%s() LN%d, stmt_add_batch() failed! reason: %s\n",
__func__, __LINE__, taos_stmt_errstr(stmt));
return -1;
}
k++; k++;
recordFrom ++; recordFrom ++;
...@@ -6925,7 +6950,7 @@ static void startMultiThreadInsertData(int threads, char* db_name, ...@@ -6925,7 +6950,7 @@ static void startMultiThreadInsertData(int threads, char* db_name,
int ret = taos_stmt_prepare(pThreadInfo->stmt, buffer, 0); int ret = taos_stmt_prepare(pThreadInfo->stmt, buffer, 0);
if (ret != 0){ if (ret != 0){
errorPrint("failed to execute taos_stmt_prepare. return 0x%x. reason: %s\n", errorPrint("failed to execute taos_stmt_prepare. return 0x%x. reason: %s\n",
ret, taos_errstr(NULL)); ret, taos_stmt_errstr(pThreadInfo->stmt));
free(pids); free(pids);
free(infos); free(infos);
exit(-1); exit(-1);
......
...@@ -60,7 +60,7 @@ typedef struct { ...@@ -60,7 +60,7 @@ typedef struct {
fprintf(stderr, "VERB: "fmt, __VA_ARGS__); } while(0) fprintf(stderr, "VERB: "fmt, __VA_ARGS__); } while(0)
#define errorPrint(fmt, ...) \ #define errorPrint(fmt, ...) \
do { fprintf(stderr, "ERROR: "fmt, __VA_ARGS__); } while(0) do { fprintf(stderr, "\033[31m"); fprintf(stderr, "ERROR: "fmt, __VA_ARGS__); fprintf(stderr, "\033[0m"); } while(0)
// -------------------------- SHOW DATABASE INTERFACE----------------------- // -------------------------- SHOW DATABASE INTERFACE-----------------------
...@@ -234,9 +234,9 @@ static struct argp_option options[] = { ...@@ -234,9 +234,9 @@ static struct argp_option options[] = {
{"start-time", 'S', "START_TIME", 0, "Start time to dump. Either epoch or ISO8601/RFC3339 format is acceptable. ISO8601 format example: 2017-10-01T18:00:00.000+0800 or 2017-10-0100:00:00.000+0800 or '2017-10-01 00:00:00.000+0800'", 4}, {"start-time", 'S', "START_TIME", 0, "Start time to dump. Either epoch or ISO8601/RFC3339 format is acceptable. ISO8601 format example: 2017-10-01T18:00:00.000+0800 or 2017-10-0100:00:00.000+0800 or '2017-10-01 00:00:00.000+0800'", 4},
{"end-time", 'E', "END_TIME", 0, "End time to dump. Either epoch or ISO8601/RFC3339 format is acceptable. ISO8601 format example: 2017-10-01T18:00:00.000+0800 or 2017-10-0100:00:00.000+0800 or '2017-10-01 00:00:00.000+0800'", 5}, {"end-time", 'E', "END_TIME", 0, "End time to dump. Either epoch or ISO8601/RFC3339 format is acceptable. ISO8601 format example: 2017-10-01T18:00:00.000+0800 or 2017-10-0100:00:00.000+0800 or '2017-10-01 00:00:00.000+0800'", 5},
#if TSDB_SUPPORT_NANOSECOND == 1 #if TSDB_SUPPORT_NANOSECOND == 1
{"precision", 'C', "PRECISION", 0, "Epoch precision. Valid value is one of ms, us, and ns. Default is ms.", 6}, {"precision", 'C', "PRECISION", 0, "Specify precision for converting human-readable time to epoch. Valid value is one of ms, us, and ns. Default is ms.", 6},
#else #else
{"precision", 'C', "PRECISION", 0, "Epoch precision. Valid value is one of ms and us. Default is ms.", 6}, {"precision", 'C', "PRECISION", 0, "Use specified precision to convert human-readable time. Valid value is one of ms and us. Default is ms.", 6},
#endif #endif
{"data-batch", 'B', "DATA_BATCH", 0, "Number of data point per insert statement. Max value is 32766. Default is 1.", 3}, {"data-batch", 'B', "DATA_BATCH", 0, "Number of data point per insert statement. Max value is 32766. Default is 1.", 3},
{"max-sql-len", 'L', "SQL_LEN", 0, "Max length of one sql. Default is 65480.", 3}, {"max-sql-len", 'L', "SQL_LEN", 0, "Max length of one sql. Default is 65480.", 3},
...@@ -547,8 +547,8 @@ static void parse_precision_first( ...@@ -547,8 +547,8 @@ static void parse_precision_first(
free(tmp); free(tmp);
exit(-1); exit(-1);
} }
strncpy(g_args.precision, tmp, tstrncpy(g_args.precision, tmp,
min(DB_PRECISION_LEN - 1, strlen(tmp))); min(DB_PRECISION_LEN, strlen(tmp) + 1));
free(tmp); free(tmp);
} }
} }
...@@ -599,6 +599,7 @@ static void parse_timestamp( ...@@ -599,6 +599,7 @@ static void parse_timestamp(
return; return;
} }
} else { } else {
tstrncpy(arguments->precision, "n/a", strlen("n/a") + 1);
tmpEpoch = atoll(tmp); tmpEpoch = atoll(tmp);
} }
...@@ -794,12 +795,14 @@ static int taosGetTableRecordInfo( ...@@ -794,12 +795,14 @@ static int taosGetTableRecordInfo(
while ((row = taos_fetch_row(result)) != NULL) { while ((row = taos_fetch_row(result)) != NULL) {
isSet = true; isSet = true;
pTableRecordInfo->isMetric = false; pTableRecordInfo->isMetric = false;
strncpy(pTableRecordInfo->tableRecord.name, tstrncpy(pTableRecordInfo->tableRecord.name,
(char *)row[TSDB_SHOW_TABLES_NAME_INDEX], (char *)row[TSDB_SHOW_TABLES_NAME_INDEX],
fields[TSDB_SHOW_TABLES_NAME_INDEX].bytes); min(TSDB_TABLE_NAME_LEN,
strncpy(pTableRecordInfo->tableRecord.metric, fields[TSDB_SHOW_TABLES_NAME_INDEX].bytes) + 1);
tstrncpy(pTableRecordInfo->tableRecord.metric,
(char *)row[TSDB_SHOW_TABLES_METRIC_INDEX], (char *)row[TSDB_SHOW_TABLES_METRIC_INDEX],
fields[TSDB_SHOW_TABLES_METRIC_INDEX].bytes); min(TSDB_TABLE_NAME_LEN,
fields[TSDB_SHOW_TABLES_METRIC_INDEX].bytes) + 1);
break; break;
} }
...@@ -1080,8 +1083,8 @@ _dump_db_point: ...@@ -1080,8 +1083,8 @@ _dump_db_point:
goto _exit_failure; goto _exit_failure;
} }
strncpy(g_dbInfos[count]->name, (char *)row[TSDB_SHOW_DB_NAME_INDEX], tstrncpy(g_dbInfos[count]->name, (char *)row[TSDB_SHOW_DB_NAME_INDEX],
fields[TSDB_SHOW_DB_NAME_INDEX].bytes); min(TSDB_DB_NAME_LEN, fields[TSDB_SHOW_DB_NAME_INDEX].bytes) + 1);
if (g_args.with_property) { if (g_args.with_property) {
g_dbInfos[count]->ntables = *((int32_t *)row[TSDB_SHOW_DB_NTABLES_INDEX]); g_dbInfos[count]->ntables = *((int32_t *)row[TSDB_SHOW_DB_NTABLES_INDEX]);
g_dbInfos[count]->vgroups = *((int32_t *)row[TSDB_SHOW_DB_VGROUPS_INDEX]); g_dbInfos[count]->vgroups = *((int32_t *)row[TSDB_SHOW_DB_VGROUPS_INDEX]);
...@@ -1089,8 +1092,8 @@ _dump_db_point: ...@@ -1089,8 +1092,8 @@ _dump_db_point:
g_dbInfos[count]->quorum = *((int16_t *)row[TSDB_SHOW_DB_QUORUM_INDEX]); g_dbInfos[count]->quorum = *((int16_t *)row[TSDB_SHOW_DB_QUORUM_INDEX]);
g_dbInfos[count]->days = *((int16_t *)row[TSDB_SHOW_DB_DAYS_INDEX]); g_dbInfos[count]->days = *((int16_t *)row[TSDB_SHOW_DB_DAYS_INDEX]);
strncpy(g_dbInfos[count]->keeplist, (char *)row[TSDB_SHOW_DB_KEEP_INDEX], tstrncpy(g_dbInfos[count]->keeplist, (char *)row[TSDB_SHOW_DB_KEEP_INDEX],
fields[TSDB_SHOW_DB_KEEP_INDEX].bytes); min(32, fields[TSDB_SHOW_DB_KEEP_INDEX].bytes) + 1);
//g_dbInfos[count]->daysToKeep = *((int16_t *)row[TSDB_SHOW_DB_KEEP_INDEX]); //g_dbInfos[count]->daysToKeep = *((int16_t *)row[TSDB_SHOW_DB_KEEP_INDEX]);
//g_dbInfos[count]->daysToKeep1; //g_dbInfos[count]->daysToKeep1;
//g_dbInfos[count]->daysToKeep2; //g_dbInfos[count]->daysToKeep2;
...@@ -1103,8 +1106,8 @@ _dump_db_point: ...@@ -1103,8 +1106,8 @@ _dump_db_point:
g_dbInfos[count]->comp = (int8_t)(*((int8_t *)row[TSDB_SHOW_DB_COMP_INDEX])); g_dbInfos[count]->comp = (int8_t)(*((int8_t *)row[TSDB_SHOW_DB_COMP_INDEX]));
g_dbInfos[count]->cachelast = (int8_t)(*((int8_t *)row[TSDB_SHOW_DB_CACHELAST_INDEX])); g_dbInfos[count]->cachelast = (int8_t)(*((int8_t *)row[TSDB_SHOW_DB_CACHELAST_INDEX]));
strncpy(g_dbInfos[count]->precision, (char *)row[TSDB_SHOW_DB_PRECISION_INDEX], tstrncpy(g_dbInfos[count]->precision, (char *)row[TSDB_SHOW_DB_PRECISION_INDEX],
fields[TSDB_SHOW_DB_PRECISION_INDEX].bytes); min(8, fields[TSDB_SHOW_DB_PRECISION_INDEX].bytes) + 1);
//g_dbInfos[count]->precision = *((int8_t *)row[TSDB_SHOW_DB_PRECISION_INDEX]); //g_dbInfos[count]->precision = *((int8_t *)row[TSDB_SHOW_DB_PRECISION_INDEX]);
g_dbInfos[count]->update = *((int8_t *)row[TSDB_SHOW_DB_UPDATE_INDEX]); g_dbInfos[count]->update = *((int8_t *)row[TSDB_SHOW_DB_UPDATE_INDEX]);
} }
...@@ -1255,17 +1258,19 @@ static int taosGetTableDes( ...@@ -1255,17 +1258,19 @@ static int taosGetTableDes(
tstrncpy(tableDes->name, table, TSDB_TABLE_NAME_LEN); tstrncpy(tableDes->name, table, TSDB_TABLE_NAME_LEN);
while ((row = taos_fetch_row(res)) != NULL) { while ((row = taos_fetch_row(res)) != NULL) {
strncpy(tableDes->cols[count].field, tstrncpy(tableDes->cols[count].field,
(char *)row[TSDB_DESCRIBE_METRIC_FIELD_INDEX], (char *)row[TSDB_DESCRIBE_METRIC_FIELD_INDEX],
fields[TSDB_DESCRIBE_METRIC_FIELD_INDEX].bytes); min(TSDB_COL_NAME_LEN + 1,
strncpy(tableDes->cols[count].type, fields[TSDB_DESCRIBE_METRIC_FIELD_INDEX].bytes + 1));
tstrncpy(tableDes->cols[count].type,
(char *)row[TSDB_DESCRIBE_METRIC_TYPE_INDEX], (char *)row[TSDB_DESCRIBE_METRIC_TYPE_INDEX],
min(15, fields[TSDB_DESCRIBE_METRIC_TYPE_INDEX].bytes)); min(16, fields[TSDB_DESCRIBE_METRIC_TYPE_INDEX].bytes) + 1);
tableDes->cols[count].length = tableDes->cols[count].length =
*((int *)row[TSDB_DESCRIBE_METRIC_LENGTH_INDEX]); *((int *)row[TSDB_DESCRIBE_METRIC_LENGTH_INDEX]);
strncpy(tableDes->cols[count].note, tstrncpy(tableDes->cols[count].note,
(char *)row[TSDB_DESCRIBE_METRIC_NOTE_INDEX], (char *)row[TSDB_DESCRIBE_METRIC_NOTE_INDEX],
fields[TSDB_DESCRIBE_METRIC_NOTE_INDEX].bytes); min(COL_NOTE_LEN,
fields[TSDB_DESCRIBE_METRIC_NOTE_INDEX].bytes + 1));
count++; count++;
} }
...@@ -1700,8 +1705,9 @@ static int32_t taosDumpCreateSuperTableClause(TAOS* taosCon, char* dbName, FILE ...@@ -1700,8 +1705,9 @@ static int32_t taosDumpCreateSuperTableClause(TAOS* taosCon, char* dbName, FILE
while ((row = taos_fetch_row(res)) != NULL) { while ((row = taos_fetch_row(res)) != NULL) {
memset(&tableRecord, 0, sizeof(STableRecord)); memset(&tableRecord, 0, sizeof(STableRecord));
strncpy(tableRecord.name, (char *)row[TSDB_SHOW_TABLES_NAME_INDEX], tstrncpy(tableRecord.name, (char *)row[TSDB_SHOW_TABLES_NAME_INDEX],
fields[TSDB_SHOW_TABLES_NAME_INDEX].bytes); min(TSDB_TABLE_NAME_LEN,
fields[TSDB_SHOW_TABLES_NAME_INDEX].bytes) + 1);
taosWrite(fd, &tableRecord, sizeof(STableRecord)); taosWrite(fd, &tableRecord, sizeof(STableRecord));
} }
...@@ -1775,9 +1781,11 @@ static int taosDumpDb(SDbInfo *dbInfo, FILE *fp, TAOS *taosCon) { ...@@ -1775,9 +1781,11 @@ static int taosDumpDb(SDbInfo *dbInfo, FILE *fp, TAOS *taosCon) {
while ((row = taos_fetch_row(res)) != NULL) { while ((row = taos_fetch_row(res)) != NULL) {
memset(&tableRecord, 0, sizeof(STableRecord)); memset(&tableRecord, 0, sizeof(STableRecord));
tstrncpy(tableRecord.name, (char *)row[TSDB_SHOW_TABLES_NAME_INDEX], tstrncpy(tableRecord.name, (char *)row[TSDB_SHOW_TABLES_NAME_INDEX],
fields[TSDB_SHOW_TABLES_NAME_INDEX].bytes); min(TSDB_TABLE_NAME_LEN,
fields[TSDB_SHOW_TABLES_NAME_INDEX].bytes) + 1);
tstrncpy(tableRecord.metric, (char *)row[TSDB_SHOW_TABLES_METRIC_INDEX], tstrncpy(tableRecord.metric, (char *)row[TSDB_SHOW_TABLES_METRIC_INDEX],
min(TSDB_TABLE_NAME_LEN, fields[TSDB_SHOW_TABLES_METRIC_INDEX].bytes)); min(TSDB_TABLE_NAME_LEN,
fields[TSDB_SHOW_TABLES_METRIC_INDEX].bytes) + 1);
taosWrite(fd, &tableRecord, sizeof(STableRecord)); taosWrite(fd, &tableRecord, sizeof(STableRecord));
...@@ -2165,7 +2173,7 @@ static int taosCheckParam(struct arguments *arguments) { ...@@ -2165,7 +2173,7 @@ static int taosCheckParam(struct arguments *arguments) {
if (g_args.arg_list_len == 0) { if (g_args.arg_list_len == 0) {
if ((!g_args.all_databases) && (!g_args.isDumpIn)) { if ((!g_args.all_databases) && (!g_args.isDumpIn)) {
fprintf(stderr, "taosdump requires parameters\n"); errorPrint("%s", "taosdump requires parameters for database and operation\n");
return -1; return -1;
} }
} }
......
...@@ -195,6 +195,10 @@ python3 ./test.py -f perfbenchmark/bug3433.py ...@@ -195,6 +195,10 @@ python3 ./test.py -f perfbenchmark/bug3433.py
#python3 ./test.py -f perfbenchmark/bug3589.py #python3 ./test.py -f perfbenchmark/bug3589.py
python3 ./test.py -f perfbenchmark/taosdemoInsert.py python3 ./test.py -f perfbenchmark/taosdemoInsert.py
#taosdemo
python3 test.py -f tools/taosdemoAllTest/taosdemoTestInsertWithJson.py
python3 test.py -f tools/taosdemoAllTest/taosdemoTestQueryWithJson.py
#query #query
python3 ./test.py -f query/filter.py python3 ./test.py -f query/filter.py
python3 ./test.py -f query/filterCombo.py python3 ./test.py -f query/filterCombo.py
...@@ -354,8 +358,7 @@ python3 ./test.py -f alter/alter_debugFlag.py ...@@ -354,8 +358,7 @@ python3 ./test.py -f alter/alter_debugFlag.py
python3 ./test.py -f query/queryBetweenAnd.py python3 ./test.py -f query/queryBetweenAnd.py
python3 ./test.py -f tag_lite/alter_tag.py python3 ./test.py -f tag_lite/alter_tag.py
python3 test.py -f tools/taosdemoAllTest/taosdemoTestInsertWithJson.py
python3 test.py -f tools/taosdemoAllTest/taosdemoTestQueryWithJson.py
python3 test.py -f tools/taosdemoAllTest/TD-4985/query-limit-offset.py python3 test.py -f tools/taosdemoAllTest/TD-4985/query-limit-offset.py
python3 ./test.py -f tag_lite/drop_auto_create.py python3 ./test.py -f tag_lite/drop_auto_create.py
python3 test.py -f insert/insert_before_use_db.py python3 test.py -f insert/insert_before_use_db.py
......
...@@ -160,7 +160,7 @@ class TDTestCase: ...@@ -160,7 +160,7 @@ class TDTestCase:
tdSql.execute("drop database if exists db") tdSql.execute("drop database if exists db")
os.system("%staosdemo -f tools/taosdemoAllTest/stmt/insertSigcolumnsNum4096-stmt.json -y " % binPath) os.system("%staosdemo -f tools/taosdemoAllTest/stmt/insertSigcolumnsNum4096-stmt.json -y " % binPath)
tdSql.error("select * from db.stb0") tdSql.error("select * from db.stb0")
tdSql.execute("drop database if exists db") # tdSql.execute("drop database if exists db")
# os.system("%staosdemo -f tools/taosdemoAllTest/stmt/insertColumnsAndTagNum4096-stmt.json -y " % binPath) # os.system("%staosdemo -f tools/taosdemoAllTest/stmt/insertColumnsAndTagNum4096-stmt.json -y " % binPath)
# tdSql.query("select count(*) from db.stb0") # tdSql.query("select count(*) from db.stb0")
# tdSql.checkData(0, 0, 10000) # tdSql.checkData(0, 0, 10000)
...@@ -247,7 +247,7 @@ class TDTestCase: ...@@ -247,7 +247,7 @@ class TDTestCase:
# # insert: sample json # # insert: sample json
# os.system("%staosdemo -f tools/taosdemoAllTest/stmt/insert-sample-stmt.json -y " % binPath) # os.system("%staosdemo -f tools/taosdemoAllTest/stmt/insert-sample-stmt.json -y " % binPath)
# tdSql.execute("use dbtest123") # tdSql.execute("use dbtest123")
# tdSql.query("select col2 from stb0") # tdSql.query("select c2 from stb0")
# tdSql.checkData(0, 0, 2147483647) # tdSql.checkData(0, 0, 2147483647)
# tdSql.query("select * from stb1 where t1=-127") # tdSql.query("select * from stb1 where t1=-127")
# tdSql.checkRows(20) # tdSql.checkRows(20)
......
...@@ -13,7 +13,9 @@ all: $(TARGET) ...@@ -13,7 +13,9 @@ all: $(TARGET)
exe: exe:
gcc $(CFLAGS) ./batchprepare.c -o $(ROOT)batchprepare $(LFLAGS) gcc $(CFLAGS) ./batchprepare.c -o $(ROOT)batchprepare $(LFLAGS)
gcc $(CFLAGS) ./stmtBatchTest.c -o $(ROOT)stmtBatchTest $(LFLAGS) gcc $(CFLAGS) ./stmtBatchTest.c -o $(ROOT)stmtBatchTest $(LFLAGS)
gcc $(CFLAGS) ./stmtTest.c -o $(ROOT)stmtTest $(LFLAGS)
clean: clean:
rm $(ROOT)batchprepare rm $(ROOT)batchprepare
rm $(ROOT)stmtBatchTest rm $(ROOT)stmtBatchTest
rm $(ROOT)stmtTest
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include "taos.h"
#include <sys/time.h>
#include <pthread.h>
#include <unistd.h>
#define PRINT_ERROR printf("\033[31m");
#define PRINT_SUCCESS printf("\033[32m");
void execute_simple_sql(void *taos, char *sql) {
TAOS_RES *result = taos_query(taos, sql);
if ( result == NULL || taos_errno(result) != 0) {
PRINT_ERROR
printf("failed to %s, Reason: %s\n", sql, taos_errstr(result));
taos_free_result(result);
exit(EXIT_FAILURE);
}
taos_free_result(result);
PRINT_SUCCESS
printf("Successfully %s\n", sql);
}
void check_result(TAOS *taos, int id, int expected) {
char sql[256] = {0};
sprintf(sql, "select * from t%d", id);
TAOS_RES *result;
result = taos_query(taos, sql);
if ( result == NULL || taos_errno(result) != 0) {
PRINT_ERROR
printf("failed to %s, Reason: %s\n", sql, taos_errstr(result));
exit(EXIT_FAILURE);
}
PRINT_SUCCESS
printf("Successfully execute %s\n", sql);
int rows = 0;
TAOS_ROW row;
while ((row = taos_fetch_row(result))) {
rows++;
}
if (rows == expected) {
PRINT_SUCCESS
printf("table t%d's %d rows are fetched as expected\n", id, rows);
} else {
PRINT_ERROR
printf("table t%d's %d rows are fetched but %d expected\n", id, rows, expected);
}
taos_free_result(result);
}
int main(int argc, char *argv[]) {
void *taos = taos_connect("127.0.0.1", "root", "taosdata", NULL, 0);
if (taos == NULL) {
PRINT_ERROR
printf("TDengine error: failed to connect\n");
exit(EXIT_FAILURE);
}
PRINT_SUCCESS
printf("Successfully connected to TDengine\n");
execute_simple_sql(taos, "drop database if exists test");
execute_simple_sql(taos, "create database test");
execute_simple_sql(taos, "use test");
execute_simple_sql(taos, "create table super(ts timestamp, c1 int, c2 bigint, c3 float, c4 double, c5 binary(8), c6 smallint, c7 tinyint, c8 bool, c9 nchar(8), c10 timestamp) tags (t1 int, t2 bigint, t3 float, t4 double, t5 binary(8), t6 smallint, t7 tinyint, t8 bool, t9 nchar(8))");
char *sql = calloc(1, 1024*1024);
int sqlLen = 0;
sqlLen = sprintf(sql, "create table");
for (int i = 0; i < 10; i++) {
sqlLen += sprintf(sql + sqlLen, " t%d using super tags (%d, 2147483648, 0.1, 0.000000001, 'abcdefgh', 32767, 127, 1, '一二三四五六七八')", i, i);
}
execute_simple_sql(taos, sql);
int code = taos_load_table_info(taos, "t0,t1,t2,t3,t4,t5,t6,t7,t8,t9");
if (code != 0) {
PRINT_ERROR
printf("failed to load table info: 0x%08x\n", code);
exit(EXIT_FAILURE);
}
PRINT_SUCCESS
printf("Successfully load table info\n");
TAOS_STMT *stmt = taos_stmt_init(taos);
if (stmt == NULL) {
PRINT_ERROR
printf("TDengine error: failed to init taos_stmt\n");
exit(EXIT_FAILURE);
}
PRINT_SUCCESS
printf("Successfully init taos_stmt\n");
uintptr_t c10len = 0;
struct {
int64_t c1;
int32_t c2;
int64_t c3;
float c4;
double c5;
unsigned char c6[8];
int16_t c7;
int8_t c8;
int8_t c9;
char c10[32];
} v = {0};
TAOS_BIND params[11];
params[0].buffer_type = TSDB_DATA_TYPE_TIMESTAMP;
params[0].buffer_length = sizeof(v.c1);
params[0].buffer = &v.c1;
params[0].length = &params[0].buffer_length;
params[0].is_null = NULL;
params[1].buffer_type = TSDB_DATA_TYPE_INT;
params[1].buffer_length = sizeof(v.c2);
params[1].buffer = &v.c2;
params[1].length = &params[1].buffer_length;
params[1].is_null = NULL;
params[2].buffer_type = TSDB_DATA_TYPE_BIGINT;
params[2].buffer_length = sizeof(v.c3);
params[2].buffer = &v.c3;
params[2].length = &params[2].buffer_length;
params[2].is_null = NULL;
params[3].buffer_type = TSDB_DATA_TYPE_FLOAT;
params[3].buffer_length = sizeof(v.c4);
params[3].buffer = &v.c4;
params[3].length = &params[3].buffer_length;
params[3].is_null = NULL;
params[4].buffer_type = TSDB_DATA_TYPE_DOUBLE;
params[4].buffer_length = sizeof(v.c5);
params[4].buffer = &v.c5;
params[4].length = &params[4].buffer_length;
params[4].is_null = NULL;
params[5].buffer_type = TSDB_DATA_TYPE_BINARY;
params[5].buffer_length = sizeof(v.c6);
params[5].buffer = &v.c6;
params[5].length = &params[5].buffer_length;
params[5].is_null = NULL;
params[6].buffer_type = TSDB_DATA_TYPE_SMALLINT;
params[6].buffer_length = sizeof(v.c7);
params[6].buffer = &v.c7;
params[6].length = &params[6].buffer_length;
params[6].is_null = NULL;
params[7].buffer_type = TSDB_DATA_TYPE_TINYINT;
params[7].buffer_length = sizeof(v.c8);
params[7].buffer = &v.c8;
params[7].length = &params[7].buffer_length;
params[7].is_null = NULL;
params[8].buffer_type = TSDB_DATA_TYPE_BOOL;
params[8].buffer_length = sizeof(v.c9);
params[8].buffer = &v.c9;
params[8].length = &params[8].buffer_length;
params[8].is_null = NULL;
params[9].buffer_type = TSDB_DATA_TYPE_NCHAR;
params[9].buffer_length = sizeof(v.c10);
params[9].buffer = &v.c10;
params[9].length = &c10len;
params[9].is_null = NULL;
params[10].buffer_type = TSDB_DATA_TYPE_TIMESTAMP;
params[10].buffer_length = sizeof(v.c1);
params[10].buffer = &v.c1;
params[10].length = &params[10].buffer_length;
params[10].is_null = NULL;
char *stmt_sql = "insert into ? values (?,?,?,?,?,?,?,?,?,?,?)";
code = taos_stmt_prepare(stmt, stmt_sql, 0);
if (code != 0){
PRINT_ERROR
printf("failed to execute taos_stmt_prepare. code:0x%x\n", code);
exit(EXIT_FAILURE);
}
PRINT_SUCCESS
printf("Successfully execute taos_stmt_prepare\n");
for (int i = 0; i < 10; i++) {
char buf[32];
sprintf(buf, "t%d", i);
if (i == 0) {
code = taos_stmt_set_tbname(stmt, buf);
if (code != 0) {
PRINT_ERROR
printf("failed to execute taos_stmt_set_tbname. code:0x%x\n", code);
exit(EXIT_FAILURE);
}
PRINT_SUCCESS
printf("Successfully execute taos_stmt_set_tbname\n");
} else {
code = taos_stmt_set_sub_tbname(stmt, buf);
if (code != 0) {
PRINT_ERROR
printf("failed to execute taos_stmt_set_sub_tbname. code:0x%x\n", code);
exit(EXIT_FAILURE);
}
PRINT_SUCCESS
printf("Successfully execute taos_stmt_set_sub_tbname\n");
}
v.c1 = (int64_t)1591060628000;
v.c2 = (int32_t)2147483647;
v.c3 = (int64_t)2147483648;
v.c4 = (float)0.1;
v.c5 = (double)0.000000001;
for (int j = 0; j < sizeof(v.c6); j++) {
v.c6[j] = (char)('a');
}
v.c7 = 32767;
v.c8 = 127;
v.c9 = 1;
strcpy(v.c10, "一二三四五六七八");
c10len=strlen(v.c10);
taos_stmt_bind_param(stmt, params);
taos_stmt_add_batch(stmt);
}
if (taos_stmt_execute(stmt) != 0) {
PRINT_ERROR
printf("failed to execute insert statement.\n");
exit(EXIT_FAILURE);
}
PRINT_SUCCESS
printf("Successfully execute insert statement.\n");
taos_stmt_close(stmt);
for (int i = 0; i < 10; i++) {
check_result(taos, i, 1);
}
return 0;
}
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册