未验证 提交 3a247522 编写于 作者: W wade zhang 提交者: GitHub

Merge branch 'develop' into taos-support-patch-2

......@@ -23,4 +23,4 @@
branch = develop
[submodule "examples/rust"]
path = examples/rust
url = https://github.com/songtianyi/tdengine-rust-bindings.git
\ No newline at end of file
url = https://github.com/songtianyi/tdengine-rust-bindings.git
TDenginelogo.png

18.4 KB | W: | H:

TDenginelogo.png

9.1 KB | W: | H:

TDenginelogo.png
TDenginelogo.png
TDenginelogo.png
TDenginelogo.png
  • 2-up
  • Swipe
  • Onion skin
......@@ -9,11 +9,12 @@ ELSEIF (TD_WINDOWS)
INSTALL(DIRECTORY ${TD_COMMUNITY_DIR}/src/connector/nodejs DESTINATION connector)
INSTALL(DIRECTORY ${TD_COMMUNITY_DIR}/src/connector/python DESTINATION connector)
INSTALL(DIRECTORY ${TD_COMMUNITY_DIR}/src/connector/C\# DESTINATION connector)
INSTALL(DIRECTORY ${TD_COMMUNITY_DIR}/tests/examples DESTINATION .)
INSTALL(DIRECTORY ${TD_COMMUNITY_DIR}/examples DESTINATION .)
INSTALL(FILES ${TD_COMMUNITY_DIR}/packaging/cfg/taos.cfg DESTINATION cfg)
INSTALL(FILES ${TD_COMMUNITY_DIR}/src/inc/taos.h DESTINATION include)
INSTALL(FILES ${TD_COMMUNITY_DIR}/src/inc/taoserror.h DESTINATION include)
INSTALL(FILES ${LIBRARY_OUTPUT_PATH}/taos.lib DESTINATION driver)
INSTALL(FILES ${LIBRARY_OUTPUT_PATH}/taos_static.lib DESTINATION driver)
INSTALL(FILES ${LIBRARY_OUTPUT_PATH}/taos.exp DESTINATION driver)
INSTALL(FILES ${LIBRARY_OUTPUT_PATH}/taos.dll DESTINATION driver)
INSTALL(FILES ${EXECUTABLE_OUTPUT_PATH}/taos.exe DESTINATION .)
......
此差异已折叠。
......@@ -56,15 +56,15 @@ INSERT INTO test.t1 USING test.weather (ts, temperature) TAGS('beijing') VALUES(
## <a class="anchor" id="version"></a>TAOS-JDBCDriver 版本以及支持的 TDengine 版本和 JDK 版本
| taos-jdbcdriver 版本 | TDengine 2.0.x.x 版本 | TDengine 2.2.x.x 版本 | TDengine 2.4.x.x 版本 | JDK 版本 |
|---------------------| ----------------------| ----------------------| ----------------------| -------- |
| 2.0.37 | X | X | 2.4.0.6 以上 | 1.8.x |
| 2.0.36 | X | 2.2.2.11 以上 | 2.4.0.0 - 2.4.0.5 | 1.8.x |
| 2.0.35 | X | 2.2.2.11 以上 | 2.3.0.0 - 2.4.0.5 | 1.8.x |
| 2.0.33 - 2.0.34 | 2.0.3.0 以上 | 2.2.0.0 以上 | 2.4.0.0 - 2.4.0.5 | 1.8.x |
| 2.0.31 - 2.0.32 | 2.1.3.0 - 2.1.7.7 | X | X | 1.8.x |
| 2.0.22 - 2.0.30 | 2.0.18.0 - 2.1.2.1 | X | X | 1.8.x |
| 2.0.12 - 2.0.21 | 2.0.8.0 - 2.0.17.4 | X | X | 1.8.x |
| 2.0.4 - 2.0.11 | 2.0.0.0 - 2.0.7.3 | X | X | 1.8.x |
| -------------------- | --------------------- | --------------------- | --------------------- | -------- |
| 2.0.37 | X | X | 2.4.0.6 以上 | 1.8.x |
| 2.0.36 | X | 2.2.2.11 以上 | 2.4.0.0 - 2.4.0.5 | 1.8.x |
| 2.0.35 | X | 2.2.2.11 以上 | 2.3.0.0 - 2.4.0.5 | 1.8.x |
| 2.0.33 - 2.0.34 | 2.0.3.0 以上 | 2.2.0.0 以上 | 2.4.0.0 - 2.4.0.5 | 1.8.x |
| 2.0.31 - 2.0.32 | 2.1.3.0 - 2.1.7.7 | X | X | 1.8.x |
| 2.0.22 - 2.0.30 | 2.0.18.0 - 2.1.2.1 | X | X | 1.8.x |
| 2.0.12 - 2.0.21 | 2.0.8.0 - 2.0.17.4 | X | X | 1.8.x |
| 2.0.4 - 2.0.11 | 2.0.0.0 - 2.0.7.3 | X | X | 1.8.x |
## TDengine DataType 和 Java DataType
......@@ -72,18 +72,18 @@ INSERT INTO test.t1 USING test.weather (ts, temperature) TAGS('beijing') VALUES(
TDengine 目前支持时间戳、数字、字符、布尔类型,与 Java 对应类型转换如下:
| TDengine DataType | JDBCType (driver 版本 < 2.0.24) | JDBCType driver 版本 >= 2.0.24) |
|-------------------|-------------------------------| ------------------ |
| TIMESTAMP | java.lang.Long | java.sql.Timestamp |
| INT | java.lang.Integer | java.lang.Integer |
| BIGINT | java.lang.Long | java.lang.Long |
| FLOAT | java.lang.Float | java.lang.Float |
| DOUBLE | java.lang.Double | java.lang.Double |
| SMALLINT | java.lang.Short | java.lang.Short |
| TINYINT | java.lang.Byte | java.lang.Byte |
| BOOL | java.lang.Boolean | java.lang.Boolean |
| BINARY | java.lang.String | byte array |
| NCHAR | java.lang.String | java.lang.String |
| JSON | - | java.lang.String |
| ----------------- | --------------------------------- | ---------------------------------- |
| TIMESTAMP | java.lang.Long | java.sql.Timestamp |
| INT | java.lang.Integer | java.lang.Integer |
| BIGINT | java.lang.Long | java.lang.Long |
| FLOAT | java.lang.Float | java.lang.Float |
| DOUBLE | java.lang.Double | java.lang.Double |
| SMALLINT | java.lang.Short | java.lang.Short |
| TINYINT | java.lang.Byte | java.lang.Byte |
| BOOL | java.lang.Boolean | java.lang.Boolean |
| BINARY | java.lang.String | byte array |
| NCHAR | java.lang.String | java.lang.String |
| JSON | - | java.lang.String |
注意:JSON类型仅在tag中支持。
......@@ -177,7 +177,7 @@ url中的配置参数如下:
* timezone:客户端使用的时区,默认值为系统当前时区。
* batchfetch: 仅在使用JDBC-JNI时生效。true:在执行查询时批量拉取结果集;false:逐行拉取结果集。默认值为:false。
* timestampFormat: 仅在使用JDBC-RESTful时生效. 'TIMESTAMP':结果集中timestamp类型的字段为一个long值; 'UTC':结果集中timestamp类型的字段为一个UTC时间格式的字符串; 'STRING':结果集中timestamp类型的字段为一个本地时间格式的字符串。默认值为'STRING'。
* batchErrorIgnore:true:在执行Statement的executeBatch时,如果中间有一条sql执行失败,继续执行下面的sq了。false:不再执行失败sql后的任何语句。默认值为:false。
* batchErrorIgnore:true:在执行Statement的executeBatch时,如果中间有一条sql执行失败,继续执行下面的sql了。false:不再执行失败sql后的任何语句。默认值为:false。
#### 指定URL和Properties获取连接
......@@ -345,6 +345,7 @@ JDBC连接器可能报错的错误码包括3种:JDBC driver本身的报错(
* setString 和 setNString 都要求用户在 size 参数里声明表定义中对应列的列宽
示例代码:
```java
public class ParameterBindingDemo {
......@@ -572,6 +573,7 @@ public class ParameterBindingDemo {
```
用于设定 TAGS 取值的方法总共有:
```java
public void setTagNull(int index, int type)
public void setTagBoolean(int index, boolean value)
......@@ -587,6 +589,7 @@ public void setTagNString(int index, String value)
```
用于设定 VALUES 数据列的取值的方法总共有:
```java
public void setInt(int columnIndex, ArrayList<Integer> list) throws SQLException
public void setFloat(int columnIndex, ArrayList<Float> list) throws SQLException
......@@ -600,14 +603,56 @@ public void setString(int columnIndex, ArrayList<String> list, int size) throws
public void setNString(int columnIndex, ArrayList<String> list, int size) throws SQLException
```
### <a class="anchor" id="schemaless_java"></a>无模式写入
从 2.2.0.0 版本开始,TDengine 增加了对无模式写入功能。无模式写入兼容 InfluxDB 的 行协议(Line Protocol)、OpenTSDB 的 telnet 行协议和 OpenTSDB 的 JSON 格式协议。详情请参见[无模式写入](https://www.taosdata.com/docs/cn/v2.0/insert#schemaless)
注意:
* JDBC-RESTful 实现并不提供无模式写入这种使用方式
* 以下示例代码基于taos-jdbcdriver-2.0.36
示例代码:
```java
public class SchemalessInsertTest {
private static final String host = "127.0.0.1";
private static final String lineDemo = "st,t1=3i64,t2=4f64,t3=\"t3\" c1=3i64,c3=L\"passit\",c2=false,c4=4f64 1626006833639000000";
private static final String telnetDemo = "stb0_0 1626006833 4 host=host0 interface=eth0";
private static final String jsonDemo = "{\"metric\": \"meter_current\",\"timestamp\": 1346846400,\"value\": 10.3, \"tags\": {\"groupid\": 2, \"location\": \"Beijing\", \"id\": \"d1001\"}}";
public static void main(String[] args) throws SQLException {
final String url = "jdbc:TAOS://" + host + ":6030/?user=root&password=taosdata";
try (Connection connection = DriverManager.getConnection(url)) {
init(connection);
SchemalessWriter writer = new SchemalessWriter(connection);
writer.write(lineDemo, SchemalessProtocolType.LINE, SchemalessTimestampType.NANO_SECONDS);
writer.write(telnetDemo, SchemalessProtocolType.TELNET, SchemalessTimestampType.MILLI_SECONDS);
writer.write(jsonDemo, SchemalessProtocolType.JSON, SchemalessTimestampType.NOT_CONFIGURED);
}
}
private static void init(Connection connection) throws SQLException {
try (Statement stmt = connection.createStatement()) {
stmt.executeUpdate("drop database if exists test_schemaless");
stmt.executeUpdate("create database if not exists test_schemaless");
stmt.executeUpdate("use test_schemaless");
}
}
}
```
### <a class="anchor" id="set-client-configuration"></a>设置客户端参数
从TDengine-2.3.5.0版本开始,jdbc driver支持在应用的第一次连接中,设置TDengine的客户端参数。Driver支持JDBC-JNI方式中,通过jdbcUrl和properties两种方式设置client parameter。
注意:
* JDBC-RESTful不支持设置client parameter的功能。
* 应用中设置的client parameter为进程级别的,即如果要更新client的参数,需要重启应用。这是因为client parameter是全局参数,仅在应用程序的第一次设置生效。
* 以下示例代码基于taos-jdbcdriver-2.0.36。
示例代码:
```java
public class ClientParameterSetting {
private static final String host = "127.0.0.1";
......
......@@ -4,7 +4,7 @@
TAOS SQL 是用户对 TDengine 进行数据写入和查询的主要工具。TAOS SQL 为了便于用户快速上手,在一定程度上提供类似于标准 SQL 类似的风格和模式。严格意义上,TAOS SQL 并不是也不试图提供 SQL 标准的语法。此外,由于 TDengine 针对的时序性结构化数据不提供删除功能,因此在 TAO SQL 中不提供数据删除的相关功能。
TAOS SQL 不支持关键字的缩写,例如 DESCRIBE 不能缩写为 DESC。
TAOS SQL 目前仅支持 DESCRIBE 关键字的缩写,DESCRIBE 可以缩写为 DESC。
本章节 SQL 语法遵循如下约定:
......@@ -135,7 +135,7 @@ CREATE DATABASE db_name PRECISION 'ns';
```mysql
ALTER DATABASE db_name BLOCKS 100;
```
BLOCKS 参数是每个 VNODE (TSDB) 中有多少 cache 大小的内存块,因此一个 VNODE 的用的内存大小粗略为(cache * blocks)。取值范围 [3, 1000]。
BLOCKS 参数是每个 VNODE (TSDB) 中有多少 cache 大小的内存块,因此一个 VNODE 的用的内存大小粗略为(cache * blocks)。取值范围 [3, 10000]。
```mysql
ALTER DATABASE db_name CACHELAST 0;
......
......@@ -2,17 +2,17 @@
## <a class="anchor" id="intro"></a> About TDengine
TDengine is a high-performance, scalable time-series database with SQL support. Its code including cluster feature is open source under GNU AGPL v3.0. Besides the database, it provides caching, stream processing, data data subscription and other functionalities to reduce the complexity and cost of development and operation. TDengine differentiates itself from other TSDBs with the following advanatages.
TDengine is a high-performance, scalable time-series database with SQL support. Its code, including its cluster feature is open source under GNU AGPL v3.0. Besides the database engine, it provides caching, stream processing, data subscription and other functionalities to reduce the complexity and cost of development and operation. TDengine differentiates itself from other TSDBs with the following advantages.
- **High Peroformance**: TDengine outperforms other time series databases in data ingestion and querying while significantly reducing storage cost and compute costs, with an innovatively designed and purpose-built storage engine.
- **High Performance**: TDengine outperforms other time series databases in data ingestion and querying while significantly reducing storage cost and compute costs, with an innovatively designed and purpose-built storage engine.
- **Scalable**: TDengine provides out-of-box scalability and high-availability through its native distributed design. Nodes can be added through simple configuration to achieve greater data processing power. In addition, this feature is open source.
- **SQL Support**: TDengine uses SQL as the query language, thereby reducing learning and migration costs, while adding SQL extensions to handle time-series data better, and supporting convenient and flexible schemaless data ingestion.
- **All in One**: TDengine has built-in caching, stream processing and data subscription functions, it is no longer necessary to integrate Kafka/Redis/HBase/Spark or other software in some scenarios. It makes the system architecture much simpler and easy to maintain.
- **All in One**: TDengine has built-in caching, stream processing and data subscription functions. It is no longer necessary to integrate Kafka/Redis/HBase/Spark or other software in some scenarios. It makes the system architecture much simpler, cost-effective and easier to maintain.
- **Seamless Integration**: Without a single line of code, TDengine provide seamless integration with third-party tools such as Telegraf, Grafana, EMQ X, Prometheus, StatsD, collectd, etc. More will be integrated.
- **Seamless Integration**: Without a single line of code, TDengine provide seamless, configurable integration with third-party tools such as Telegraf, Grafana, EMQ X, Prometheus, StatsD, collectd, etc. More third-party tools are being integrated.
- **Zero Management**: Installation and cluster setup can be done in seconds. Data partitioning and sharding are executed automatically. TDengine’s running status can be monitored via Grafana or other DevOps tools.
......@@ -20,7 +20,7 @@ TDengine is a high-performance, scalable time-series database with SQL support.
- **Interactive Console**: TDengine provides convenient console access to the database to run ad hoc queries, maintain the database, or manage the cluster without any programming.
TDengine can be widely applied to Internet of Things (IoT), Connected Vehicles, Industrial IoT, DevOps, energy, finance and many other scenarios. With TDengine, the total cost of ownership of time-series data platforms can be greatly reduced. However, since it makes full use of the characteristics of time-series data, TDengine cannot be used to process general data from web crawlers, microblogs, WeChat, e-commerce, ERP, CRM, and other sources.
With TDengine, the total cost of ownership of typical IoT, Connected Vehicles, Industrial Internet, Energy, Financial, DevOps and other Big Data applications can be greatly reduced. Note that because TDengine makes full use of the characteristics of IoT time-series data and is highly optimized for it, TDengine cannot be used as a general purpose database engine to process general data from web crawlers, microblogs, WeChat, e-commerce, ERP, CRM, and other sources.
![TDengine Technology Ecosystem](../images/eco_system.png)
<center>Figure 1. TDengine Ecosystem</center>
......@@ -28,45 +28,45 @@ TDengine can be widely applied to Internet of Things (IoT), Connected Vehicles,
## <a class="anchor" id="scenes"></a>Overall Scenarios of TDengine
As a time-series data platform, the typical application scenarios of TDengine are mainly presented in the IoT, connected vehicles, DevOps and Industrial Internet categories, with users having a certain amount of data. The following sections of this document are mainly aimed at IoT-relevant systems. Other systems, such as CRM, ERP, etc., are beyond the scope of this article.
As an IoT time-series Big Data platform, TDengine is optimal for application scenarios with the requirements described below. Therefore the following sections of this document are mainly aimed at IoT-relevant systems. Other systems, such as CRM, ERP, etc., are beyond the scope of this article.
### Characteristics and Requirements of Data Sources
From the perspective of data sources, designers can analyze the applicability of TDengine in target application systems as following.
From the perspective of data sources, designers can analyze the applicability of TDengine in target application systems as follows.
| **Data Source Characteristics and Requirements** | **Not Applicable** | **Might Be Applicable** | **Very Applicable** | **Description** |
| -------------------------------------------------------- | ------------------ | ----------------------- | ------------------- | :----------------------------------------------------------- |
| A huge amount of total data | | | √ | TDengine provides excellent scale-out functions in terms of capacity, and has a storage structure matching high compression ratio to achieve the best storage efficiency in the industry. |
| Data input velocity is occasionally or continuously huge | | | √ | TDengine's performance is much higher than other similar products. It can continuously process a large amount of input data in the same hardware environment, and provide a performance evaluation tool that can easily run in the user environment. |
| A huge amount of data sources | | | √ | TDengine is designed to include optimizations specifically for a huge amount of data sources, such as data writing and querying, which is especially suitable for efficiently processing massive (tens of millions or more) data sources. |
| A massive amount of total data | | | √ | TDengine provides excellent scale-out functions in terms of capacity, and has a storage structure with matching high compression ratio to achieve the best storage efficiency in the industry.|
| Data input velocity is extremely high | | | √ | TDengine's performance is much higher than that of other similar products. It can continuously process larger amounts of input data in the same hardware environment, and provides a performance evaluation tool that can easily run in the user environment. |
| A huge number of data sources | | | √ | TDengine is optimized specifically for a huge number of data sources. It is especially suitable for efficiently ingesting, writing and querying data from billions of data sources. |
### System Architecture Requirements
| **System Architecture Requirements** | **Not Applicable** | **Might Be Applicable** | **Very Applicable** | **Description** |
| ------------------------------------------------- | ------------------ | ----------------------- | ------------------- | ------------------------------------------------------------ |
| Require a simple and reliable system architecture | | | √ | TDengine's system architecture is very simple and reliable, with its own message queue, cache, stream computing, monitoring and other functions, and no need to integrate any additional third-party products. |
| Require fault-tolerance and high-reliability | | | √ | TDengine has cluster functions to automatically provide high-reliability functions such as fault tolerance and disaster recovery. |
| Standardization specifications | | | √ | TDengine uses standard SQL language to provide main functions and follow standardization specifications. |
| A simple and reliable system architecture | | | √ | TDengine's system architecture is very simple and reliable, with its own message queue, cache, stream computing, monitoring and other functions. There is no need to integrate any additional third-party products. |
| Fault-tolerance and high-reliability | | | √ | TDengine has cluster functions to automatically provide high-reliability and high-availability functions such as fault tolerance and disaster recovery. |
| Standardization support | | | √ | TDengine supports standard SQL and also provides extensions specifically to analyze time-series data. |
### System Function Requirements
| **System Architecture Requirements** | **Not Applicable** | **Might Be Applicable** | **Very Applicable** | **Description** |
| **System Function Requirements** | **Not Applicable** | **Might Be Applicable** | **Very Applicable** | **Description** |
| ------------------------------------------------- | ------------------ | ----------------------- | ------------------- | ------------------------------------------------------------ |
| Require completed data processing algorithms built-in | | √ | | TDengine implements various general data processing algorithms, but has not properly handled all requirements of different industries, so special types of processing shall be processed at the application level. |
| Require a huge amount of crosstab queries | | √ | | This type of processing should be handled more by relational database systems, or TDengine and relational database systems should fit together to implement system functions. |
| Complete data processing algorithms built-in | | √ | | While TDengine implements various general data processing algorithms, industry specific algorithms and special types of processing will need to be implemented at the application level.|
| A large number of crosstab queries | | √ | | This type of processing is better handled by general purpose relational database systems but TDengine can work in concert with relational database systems to provide more complete solutions. |
### System Performance Requirements
| **System Architecture Requirements** | **Not Applicable** | **Might Be Applicable** | **Very Applicable** | **Description** |
| **System Performance Requirements** | **Not Applicable** | **Might Be Applicable** | **Very Applicable** | **Description** |
| ------------------------------------------------- | ------------------ | ----------------------- | ------------------- | ------------------------------------------------------------ |
| Require larger total processing capacity | | | √ | TDengine’s cluster functions can easily improve processing capacity via multi-server-cooperating. |
| Require high-speed data processing | | | √ | TDengine’s storage and data processing are designed to be optimized for IoT, can generally improve the processing speed by multiple times than other similar products. |
| Require fast processing of fine-grained data | | | √ | TDengine has achieved the same level of performance with relational and NoSQL data processing systems. |
| Very large total processing capacity | | | √ | TDengine’s cluster functions can easily improve processing capacity via multi-server coordination. |
| Extremely high-speed data processing | | | √ | TDengine’s storage and data processing are optimized for IoT, and can process data many times faster than similar products.|
| Extremely fast processing of fine-grained data | | | √ | TDengine has achieved the same or better performance than other relational and NoSQL data processing systems. |
### System Maintenance Requirements
| **System Architecture Requirements** | **Not Applicable** | **Might Be Applicable** | **Very Applicable** | **Description** |
| **System Maintenance Requirements** | **Not Applicable** | **Might Be Applicable** | **Very Applicable** | **Description** |
| ------------------------------------------------- | ------------------ | ----------------------- | ------------------- | ------------------------------------------------------------ |
| Require system with high-reliability | | | √ | TDengine has a very robust and reliable system architecture to implement simple and convenient daily operation with streamlined experiences for operators, thus human errors and accidents are eliminated to the greatest extent. |
| Require controllable operation learning cost | | | √ | As above. |
| Require abundant talent supply | √ | | | As a new-generation product, it’s still difficult to find talents with TDengine experiences from the market. However, the learning cost is low. As the vendor, we also provide extensive operation training and counseling services. |
| Native high-reliability | | | √ | TDengine has a very robust, reliable and easily configurable system architecture to simplify routine operation. Human errors and accidents are eliminated to the greatest extent, with a streamlined experience for operators. |
| Minimize learning and maintenance costs | | | √ | In addition to being easily configurable, standard SQL support and the Taos shell for ad hoc queries makes maintenance simpler, allows reuse and reduces learning costs.|
| Abundant talent supply | √ | | | Given the above, and given the extensive training and professional services provided by TDengine, it is easy to migrate from existing solutions or create a new and lasting solution based on TDengine.|
......@@ -9,8 +9,8 @@ Continuous query of TDengine adopts time-driven mode, which can be defined direc
The continuous query provided by TDengine differs from the time window calculation in ordinary stream computing in the following ways:
- Unlike the real-time feedback calculated results of stream computing, continuous query only starts calculation after the time window is closed. For example, if the time period is 1 day, the results of that day will only be generated after 23:59:59.
- If a history record is written to the time interval that has been calculated, the continuous query will not re-calculate and will not push the new results to the user again.
- TDengine server does not cache or save the client's status, nor does it provide Exactly-Once semantic guarantee. If the application crashes, the continuous query will be pull up again and starting time must be provided by the application.
- If a history record is written to the time interval that has been calculated, the continuous query will not re-calculate and will not push the new results to the user again.
- TDengine server does not cache or save the client's status, nor does it provide Exactly-Once semantic guarantee. If the application crashes, the continuous query will be pull up again and starting time must be provided by the application.
### How to use continuous query
......@@ -83,7 +83,7 @@ taos_consume
taos_unsubscribe
```
Please refer to the [C/C++ Connector](https://www.taosdata.com/cn/documentation/connector/) for the documentation of these APIs. The following is still a smart meter scenario as an example to introduce their specific usage (please refer to the previous section "Continuous Query" for the structure of STables and sub-tables). The complete sample code can be found [here](https://github.com/taosdata/TDengine/blob/master/tests/examples/c/subscribe.c).
Please refer to the [C/C++ Connector](https://www.taosdata.com/cn/documentation/connector/) for the documentation of these APIs. The following is still a smart meter scenario as an example to introduce their specific usage (please refer to the previous section "Continuous Query" for the structure of STables and sub-tables). The complete sample code can be found [here](https://github.com/taosdata/TDengine/blob/master/examples/c/subscribe.c).
If we want to be notified and do some process when the current of a smart meter exceeds a certain limit (e.g. 10A), there are two methods: one is to query each sub-table separately, record the timestamp of the last piece of data after each query, and then only query all data after this timestamp:
......@@ -210,8 +210,8 @@ After introducing the code, let's take a look at the actual running effect. For
You can compile and start the sample program by executing the following command in the directory where the sample code is located:
```shell
$ make
$ ./subscribe -sql='select * from meters where current > 10;'
make
./subscribe -sql='select * from meters where current > 10;'
```
After the sample program starts, open another terminal window, and the shell that starts TDengine inserts a data with a current of 12A into **D1001**:
......@@ -299,8 +299,8 @@ public class SubscribeDemo {
try {
if (null != subscribe)
subscribe.close(true); // Close the subscription
if (connection != null)
connection.close();
if (connection != null)
connection.close();
} catch (SQLException throwables) {
throwables.printStackTrace();
}
......@@ -312,7 +312,7 @@ public class SubscribeDemo {
Run the sample program. First, it consumes all the historical data that meets the query conditions:
```shell
# java -jar subscribe.jar
# java -jar subscribe.jar
ts: 1597464000000 current: 12.0 voltage: 220 phase: 1 location: Beijing.Chaoyang groupid : 2
ts: 1597464600000 current: 12.3 voltage: 220 phase: 2 location: Beijing.Chaoyang groupid : 2
......@@ -357,4 +357,4 @@ This SQL statement will obtain the last recorded voltage value of all smart mete
In scenarios of TDengine, alarm monitoring is a common requirement. Conceptually, it requires the program to filter out data that meet certain conditions from the data of the latest period of time, and calculate a result according to a defined formula based on these data. When the result meets certain conditions and lasts for a certain period of time, it will notify the user in some form.
In order to meet the needs of users for alarm monitoring, TDengine provides this function in the form of an independent module. For its installation and use, please refer to the blog [How to Use TDengine for Alarm Monitoring](https://www.taosdata.com/blog/2020/04/14/1438.html).
\ No newline at end of file
In order to meet the needs of users for alarm monitoring, TDengine provides this function in the form of an independent module. For its installation and use, please refer to the blog [How to Use TDengine for Alarm Monitoring](https://www.taosdata.com/blog/2020/04/14/1438.html).
......@@ -54,25 +54,23 @@ INSERT INTO test.t1 USING test.weather (ts, temperature) TAGS('beijing') VALUES(
## JDBC driver version and supported TDengine and JDK versions
| taos-jdbcdriver | TDengine | JDK |
| --------------- |--------------------|--------|
| 2.0.36 | 2.4.0 and above | 1.8.x |
| 2.0.35 | 2.3.0 and above | 1.8.x |
| 2.0.33 - 2.0.34 | 2.0.3.0 and above | 1.8.x |
| 2.0.31 - 2.0.32 | 2.1.3.0 and above | 1.8.x |
| 2.0.22 - 2.0.30 | 2.0.18.0 - 2.1.2.x | 1.8.x |
| 2.0.12 - 2.0.21 | 2.0.8.0 - 2.0.17.x | 1.8.x |
| 2.0.4 - 2.0.11 | 2.0.0.0 - 2.0.7.x | 1.8.x |
| 1.0.3 | 1.6.1.x and above | 1.8.x |
| 1.0.2 | 1.6.1.x and above | 1.8.x |
| 1.0.1 | 1.6.1.x and above | 1.8.x |
| taos-jdbcdriver version | TDengine 2.0.x.x version | TDengine 2.2.x.x version | TDengine 2.4.x.x version | JDK version |
|---------------------| ----------------------| ----------------------| ----------------------| -------- |
| 2.0.37 | X | X | 2.4.0.6 以上 | 1.8.x |
| 2.0.36 | X | 2.2.2.11 以上 | 2.4.0.0 - 2.4.0.5 | 1.8.x |
| 2.0.35 | X | 2.2.2.11 以上 | 2.3.0.0 - 2.4.0.5 | 1.8.x |
| 2.0.33 - 2.0.34 | 2.0.3.0 以上 | 2.2.0.0 以上 | 2.4.0.0 - 2.4.0.5 | 1.8.x |
| 2.0.31 - 2.0.32 | 2.1.3.0 - 2.1.7.7 | X | X | 1.8.x |
| 2.0.22 - 2.0.30 | 2.0.18.0 - 2.1.2.1 | X | X | 1.8.x |
| 2.0.12 - 2.0.21 | 2.0.8.0 - 2.0.17.4 | X | X | 1.8.x |
| 2.0.4 - 2.0.11 | 2.0.0.0 - 2.0.7.3 | X | X | 1.8.x |
## DataType in TDengine and Java connector
The TDengine supports the following data types and Java data types:
| TDengine DataType | JDBCType (driver version < 2.0.24) | JDBCType (driver version >= 2.0.24) |
|-------------------|------------------------------------| ----------------------------------- |
| ----------------- | ---------------------------------- | ----------------------------------- |
| TIMESTAMP | java.lang.Long | java.sql.Timestamp |
| INT | java.lang.Integer | java.lang.Integer |
| BIGINT | java.lang.Long | java.lang.Long |
......@@ -314,7 +312,8 @@ The Java connector may report three types of error codes: JDBC Driver (error cod
### Write data through parameter binding
Starting with version 2.1.2.0, TDengine's JDBC-JNI implementation significantly improves support for data write (INSERT) scenarios with Parameter-Binding. When writing data in this way, you can avoid the resource consumption of SQL parsing, which can significantly improve write performance in many cases.
Note:
**Note**:
* Jdbc-restful implementations do not provide Parameter-Binding
* The following sample code is based on taos-jdbcdriver-2.0.36
* use setString to bind BINARY data, and use setNString to bind NCHAR data
......@@ -322,6 +321,7 @@ Note:
Sample Code:
```java
public class ParameterBindingDemo {
......@@ -578,15 +578,57 @@ public void setShort(int columnIndex, ArrayList<Short> list) throws SQLException
public void setString(int columnIndex, ArrayList<String> list, int size) throws SQLException
public void setNString(int columnIndex, ArrayList<String> list, int size) throws SQLException
```
### Data Writing via Schemaless
Starting with version 2.2.0.0, TDengine supports schemaless function. schemaless writing protocol is compatible with InfluxDB's Line Protocol, OpenTSDB's telnet and JSON format protocols, Please see [Schemaless Writing](https://www.taosdata.com/docs/en/v2.0/insert#schemaless)
**Note**:
* Jdbc-restful implementations do not provide Schemaless-Writing
* The following sample code is based on taos-jdbcdriver-2.0.36
Sample Code:
```java
public class SchemalessInsertTest {
private static final String host = "127.0.0.1";
private static final String lineDemo = "st,t1=3i64,t2=4f64,t3=\"t3\" c1=3i64,c3=L\"passit\",c2=false,c4=4f64 1626006833639000000";
private static final String telnetDemo = "stb0_0 1626006833 4 host=host0 interface=eth0";
private static final String jsonDemo = "{\"metric\": \"meter_current\",\"timestamp\": 1346846400,\"value\": 10.3, \"tags\": {\"groupid\": 2, \"location\": \"Beijing\", \"id\": \"d1001\"}}";
public static void main(String[] args) throws SQLException {
final String url = "jdbc:TAOS://" + host + ":6030/?user=root&password=taosdata";
try (Connection connection = DriverManager.getConnection(url)) {
init(connection);
SchemalessWriter writer = new SchemalessWriter(connection);
writer.write(lineDemo, SchemalessProtocolType.LINE, SchemalessTimestampType.NANO_SECONDS);
writer.write(telnetDemo, SchemalessProtocolType.TELNET, SchemalessTimestampType.MILLI_SECONDS);
writer.write(jsonDemo, SchemalessProtocolType.JSON, SchemalessTimestampType.NOT_CONFIGURED);
}
}
private static void init(Connection connection) throws SQLException {
try (Statement stmt = connection.createStatement()) {
stmt.executeUpdate("drop database if exists test_schemaless");
stmt.executeUpdate("create database if not exists test_schemaless");
stmt.executeUpdate("use test_schemaless");
}
}
}
```
### Set client configuration in JDBC
Starting with TDengine-2.3.5.0, JDBC Driver supports setting TDengine client parameters on the first connection of a Java application. The Driver supports jdbcUrl and Properties to set client parameters in JDBC-JNI mode.
Note:
**Note**:
* JDBC-RESTful does not support setting client parameters.
* The client parameters set in the java application are process-level. To update the client parameters, the application needs to be restarted. This is because these client parameters are global that take effect the first time the application is set up.
* The following sample code is based on taos-jdbcdriver-2.0.36.
Sample Code:
```java
public class ClientParameterSetting {
private static final String host = "127.0.0.1";
......
########################################################
# #
# jh_iot Configuration #
# Any questions, please email jhkj@njsteel.com.cn #
# #
########################################################
# first fully qualified domain name (FQDN) for jh_iot system
# firstEp hostname:6030
# local fully qualified domain name (FQDN)
# fqdn hostname
# first port number for the connection (12 continuous UDP/TCP port number are used)
# serverPort 6030
# log file's directory
# logDir /var/log/jh_taos
# data file's directory
# dataDir /var/lib/jh_taos
# temporary file's directory
# tempDir /tmp/
# the arbitrator's fully qualified domain name (FQDN) for jh_iot system, for cluster only
# arbitrator arbitrator_hostname:6042
# number of threads per CPU core
# numOfThreadsPerCore 1.0
# number of threads to commit cache data
# numOfCommitThreads 4
# the proportion of total CPU cores available for query processing
# 2.0: the query threads will be set to double of the CPU cores.
# 1.0: all CPU cores are available for query processing [default].
# 0.5: only half of the CPU cores are available for query.
# 0.0: only one core available.
# ratioOfQueryCores 1.0
# the last_row/first/last aggregator will not change the original column name in the result fields
keepColumnName 1
# number of management nodes in the system
# numOfMnodes 3
# enable/disable backuping vnode directory when removing vnode
# vnodeBak 1
# enable/disable installation / usage report
# telemetryReporting 1
# enable/disable load balancing
# balance 1
# role for dnode. 0 - any, 1 - mnode, 2 - dnode
# role 0
# max timer control blocks
# maxTmrCtrl 512
# time interval of system monitor, seconds
# monitorInterval 30
# number of seconds allowed for a dnode to be offline, for cluster only
# offlineThreshold 864000
# RPC re-try timer, millisecond
# rpcTimer 300
# RPC maximum time for ack, seconds.
# rpcMaxTime 600
# time interval of dnode status reporting to mnode, seconds, for cluster only
# statusInterval 1
# time interval of heart beat from shell to dnode, seconds
# shellActivityTimer 3
# minimum sliding window time, milli-second
# minSlidingTime 10
# minimum time window, milli-second
# minIntervalTime 10
# maximum delay before launching a stream computation, milli-second
# maxStreamCompDelay 20000
# maximum delay before launching a stream computation for the first time, milli-second
# maxFirstStreamCompDelay 10000
# retry delay when a stream computation fails, milli-second
# retryStreamCompDelay 10
# the delayed time for launching a stream computation, from 0.1(default, 10% of whole computing time window) to 0.9
# streamCompDelayRatio 0.1
# max number of vgroups per db, 0 means configured automatically
# maxVgroupsPerDb 0
# max number of tables per vnode
# maxTablesPerVnode 1000000
# cache block size (Mbyte)
# cache 16
# number of cache blocks per vnode
# blocks 6
# number of days per DB file
# days 10
# number of days to keep DB file
# keep 3650
# minimum rows of records in file block
# minRows 100
# maximum rows of records in file block
# maxRows 4096
# the number of acknowledgments required for successful data writing
# quorum 1
# enable/disable compression
# comp 2
# write ahead log (WAL) level, 0: no wal; 1: write wal, but no fysnc; 2: write wal, and call fsync
# walLevel 1
# if walLevel is set to 2, the cycle of fsync being executed, if set to 0, fsync is called right away
# fsync 3000
# number of replications, for cluster only
# replica 1
# the compressed rpc message, option:
# -1 (no compression)
# 0 (all message compressed),
# > 0 (rpc message body which larger than this value will be compressed)
# compressMsgSize -1
# max length of an SQL
# maxSQLLength 65480
# max length of WildCards
# maxWildCardsLength 100
# the maximum number of records allowed for super table time sorting
# maxNumOfOrderedRes 100000
# system time zone
# timezone Asia/Shanghai (CST, +0800)
# system time zone (for windows 10)
# timezone UTC-8
# system locale
# locale en_US.UTF-8
# default system charset
# charset UTF-8
# max number of connections allowed in dnode
# maxShellConns 5000
# max number of connections allowed in client
# maxConnections 5000
# stop writing logs when the disk size of the log folder is less than this value
# minimalLogDirGB 0.1
# stop writing temporary files when the disk size of the tmp folder is less than this value
# minimalTmpDirGB 0.1
# if disk free space is less than this value, server service exit directly within startup process
# minimalDataDirGB 0.1
# One mnode is equal to the number of vnode consumed
# mnodeEqualVnodeNum 4
# enbale/disable http service
# http 1
# enable/disable system monitor
# monitor 1
# enable/disable recording the SQL statements via restful interface
# httpEnableRecordSql 0
# number of threads used to process http requests
# httpMaxThreads 2
# maximum number of rows returned by the restful interface
# restfulRowLimit 10240
# The following parameter is used to limit the maximum number of lines in log files.
# max number of lines per log filters
# numOfLogLines 10000000
# enable/disable async log
# asyncLog 1
# time of keeping log files, days
# logKeepDays 0
# The following parameters are used for debug purpose only.
# debugFlag 8 bits mask: FILE-SCREEN-UNUSED-HeartBeat-DUMP-TRACE_WARN-ERROR
# 131: output warning and error
# 135: output debug, warning and error
# 143: output trace, debug, warning and error to log
# 199: output debug, warning and error to both screen and file
# 207: output trace, debug, warning and error to both screen and file
# debug flag for all log type, take effect when non-zero value
# debugFlag 0
# debug flag for meta management messages
# mDebugFlag 135
# debug flag for dnode messages
# dDebugFlag 135
# debug flag for sync module
# sDebugFlag 135
# debug flag for WAL
# wDebugFlag 135
# debug flag for SDB
# sdbDebugFlag 135
# debug flag for RPC
# rpcDebugFlag 131
# debug flag for TIMER
# tmrDebugFlag 131
# debug flag for jh_iot client
# cDebugFlag 131
# debug flag for JNI
# jniDebugFlag 131
# debug flag for storage
# uDebugFlag 131
# debug flag for http server
# httpDebugFlag 131
# debug flag for monitor
# monDebugFlag 131
# debug flag for query
# qDebugFlag 131
# debug flag for vnode
# vDebugFlag 131
# debug flag for TSDB
# tsdbDebugFlag 131
# debug flag for continue query
# cqDebugFlag 131
# enable/disable recording the SQL in client
# enableRecordSql 0
# generate core file when service crash
# enableCoreFile 1
# maximum display width of binary and nchar fields in the shell. The parts exceeding this limit will be hidden
# maxBinaryDisplayWidth 30
# enable/disable stream (continuous query)
# stream 1
# in retrieve blocking model, only in 50% query threads will be used in query processing in dnode
# retrieveBlockingModel 0
# the maximum allowed query buffer size in MB during query processing for each data node
# -1 no limit (default)
# 0 no query allowed, queries are disabled
# queryBufferSize -1
[Unit]
Description=jh_iot server service
After=network-online.target
Wants=network-online.target
[Service]
Type=simple
ExecStart=/usr/bin/jh_taosd
ExecStartPre=/usr/local/jh_taos/bin/startPre.sh
TimeoutStopSec=1000000s
LimitNOFILE=infinity
LimitNPROC=infinity
LimitCORE=infinity
TimeoutStartSec=0
StandardOutput=null
Restart=always
StartLimitBurst=3
StartLimitInterval=60s
[Install]
WantedBy=multi-user.target
[Unit]
Description=KingHistorian server service
After=network-online.target
Wants=network-online.target
[Service]
Type=simple
ExecStart=/usr/bin/khserver
ExecStartPre=/usr/local/kinghistorian/bin/startPre.sh
TimeoutStopSec=1000000s
LimitNOFILE=infinity
LimitNPROC=infinity
LimitCORE=infinity
TimeoutStartSec=0
StandardOutput=null
Restart=always
StartLimitBurst=3
StartLimitInterval=60s
[Install]
WantedBy=multi-user.target
########################################################
# #
# KingHistorian Configuration #
# Any questions, please email support@wellintech.com #
# #
########################################################
# first fully qualified domain name (FQDN) for KingHistorian system
# firstEp hostname:6030
# local fully qualified domain name (FQDN)
# fqdn hostname
# first port number for the connection (12 continuous UDP/TCP port number are used)
# serverPort 6030
# log file's directory
# logDir /var/log/kinghistorian
# data file's directory
# dataDir /var/lib/kinghistorian
# temporary file's directory
# tempDir /tmp/
# the arbitrator's fully qualified domain name (FQDN) for KingHistorian system, for cluster only
# arbitrator arbitrator_hostname:6042
# number of threads per CPU core
# numOfThreadsPerCore 1.0
# number of threads to commit cache data
# numOfCommitThreads 4
# the proportion of total CPU cores available for query processing
# 2.0: the query threads will be set to double of the CPU cores.
# 1.0: all CPU cores are available for query processing [default].
# 0.5: only half of the CPU cores are available for query.
# 0.0: only one core available.
# ratioOfQueryCores 1.0
# the last_row/first/last aggregator will not change the original column name in the result fields
keepColumnName 1
# number of management nodes in the system
# numOfMnodes 3
# enable/disable backuping vnode directory when removing vnode
# vnodeBak 1
# enable/disable installation / usage report
# telemetryReporting 1
# enable/disable load balancing
# balance 1
# role for dnode. 0 - any, 1 - mnode, 2 - dnode
# role 0
# max timer control blocks
# maxTmrCtrl 512
# time interval of system monitor, seconds
# monitorInterval 30
# number of seconds allowed for a dnode to be offline, for cluster only
# offlineThreshold 864000
# RPC re-try timer, millisecond
# rpcTimer 300
# RPC maximum time for ack, seconds.
# rpcMaxTime 600
# time interval of dnode status reporting to mnode, seconds, for cluster only
# statusInterval 1
# time interval of heart beat from shell to dnode, seconds
# shellActivityTimer 3
# minimum sliding window time, milli-second
# minSlidingTime 10
# minimum time window, milli-second
# minIntervalTime 10
# maximum delay before launching a stream computation, milli-second
# maxStreamCompDelay 20000
# maximum delay before launching a stream computation for the first time, milli-second
# maxFirstStreamCompDelay 10000
# retry delay when a stream computation fails, milli-second
# retryStreamCompDelay 10
# the delayed time for launching a stream computation, from 0.1(default, 10% of whole computing time window) to 0.9
# streamCompDelayRatio 0.1
# max number of vgroups per db, 0 means configured automatically
# maxVgroupsPerDb 0
# max number of tables per vnode
# maxTablesPerVnode 1000000
# cache block size (Mbyte)
# cache 16
# number of cache blocks per vnode
# blocks 6
# number of days per DB file
# days 10
# number of days to keep DB file
# keep 3650
# minimum rows of records in file block
# minRows 100
# maximum rows of records in file block
# maxRows 4096
# the number of acknowledgments required for successful data writing
# quorum 1
# enable/disable compression
# comp 2
# write ahead log (WAL) level, 0: no wal; 1: write wal, but no fysnc; 2: write wal, and call fsync
# walLevel 1
# if walLevel is set to 2, the cycle of fsync being executed, if set to 0, fsync is called right away
# fsync 3000
# number of replications, for cluster only
# replica 1
# the compressed rpc message, option:
# -1 (no compression)
# 0 (all message compressed),
# > 0 (rpc message body which larger than this value will be compressed)
# compressMsgSize -1
# max length of an SQL
# maxSQLLength 65480
# max length of WildCards
# maxWildCardsLength 100
# the maximum number of records allowed for super table time sorting
# maxNumOfOrderedRes 100000
# system time zone
# timezone Asia/Shanghai (CST, +0800)
# system time zone (for windows 10)
# timezone UTC-8
# system locale
# locale en_US.UTF-8
# default system charset
# charset UTF-8
# max number of connections allowed in dnode
# maxShellConns 5000
# max number of connections allowed in client
# maxConnections 5000
# stop writing logs when the disk size of the log folder is less than this value
# minimalLogDirGB 0.1
# stop writing temporary files when the disk size of the tmp folder is less than this value
# minimalTmpDirGB 0.1
# if disk free space is less than this value, khserver service exit directly within startup process
# minimalDataDirGB 0.1
# One mnode is equal to the number of vnode consumed
# mnodeEqualVnodeNum 4
# enbale/disable http service
# http 1
# enable/disable system monitor
# monitor 1
# enable/disable recording the SQL statements via restful interface
# httpEnableRecordSql 0
# number of threads used to process http requests
# httpMaxThreads 2
# maximum number of rows returned by the restful interface
# restfulRowLimit 10240
# The following parameter is used to limit the maximum number of lines in log files.
# max number of lines per log filters
# numOfLogLines 10000000
# enable/disable async log
# asyncLog 1
# time of keeping log files, days
# logKeepDays 0
# The following parameters are used for debug purpose only.
# debugFlag 8 bits mask: FILE-SCREEN-UNUSED-HeartBeat-DUMP-TRACE_WARN-ERROR
# 131: output warning and error
# 135: output debug, warning and error
# 143: output trace, debug, warning and error to log
# 199: output debug, warning and error to both screen and file
# 207: output trace, debug, warning and error to both screen and file
# debug flag for all log type, take effect when non-zero value
# debugFlag 0
# debug flag for meta management messages
# mDebugFlag 135
# debug flag for dnode messages
# dDebugFlag 135
# debug flag for sync module
# sDebugFlag 135
# debug flag for WAL
# wDebugFlag 135
# debug flag for SDB
# sdbDebugFlag 135
# debug flag for RPC
# rpcDebugFlag 131
# debug flag for TIMER
# tmrDebugFlag 131
# debug flag for KingHistorian client
# cDebugFlag 131
# debug flag for JNI
# jniDebugFlag 131
# debug flag for storage
# uDebugFlag 131
# debug flag for http server
# httpDebugFlag 131
# debug flag for monitor
# monDebugFlag 131
# debug flag for query
# qDebugFlag 131
# debug flag for vnode
# vDebugFlag 131
# debug flag for TSDB
# tsdbDebugFlag 131
# debug flag for continue query
# cqDebugFlag 131
# enable/disable recording the SQL in kinghistorian client
# enableRecordSql 0
# generate core file when service crash
# enableCoreFile 1
# maximum display width of binary and nchar fields in the shell. The parts exceeding this limit will be hidden
# maxBinaryDisplayWidth 30
# enable/disable stream (continuous query)
# stream 1
# in retrieve blocking model, only in 50% query threads will be used in query processing in dnode
# retrieveBlockingModel 0
# the maximum allowed query buffer size in MB during query processing for each data node
# -1 no limit (default)
# 0 no query allowed, queries are disabled
# queryBufferSize -1
########################################################
# #
# PowerDB Configuration #
# Any questions, please email support@taosdata.com #
# #
########################################################
# first fully qualified domain name (FQDN) for PowerDB system
# firstEp hostname:6030
# local fully qualified domain name (FQDN)
# fqdn hostname
# first port number for the connection (12 continuous UDP/TCP port number are used)
# serverPort 6030
# log file's directory
# logDir /var/log/power
# data file's directory
# dataDir /var/lib/power
# temporary file's directory
# tempDir /tmp/
# the arbitrator's fully qualified domain name (FQDN) for PowerDB system, for cluster only
# arbitrator arbitrator_hostname:6042
# number of threads per CPU core
# numOfThreadsPerCore 1.0
# number of threads to commit cache data
# numOfCommitThreads 4
# the proportion of total CPU cores available for query processing
# 2.0: the query threads will be set to double of the CPU cores.
# 1.0: all CPU cores are available for query processing [default].
# 0.5: only half of the CPU cores are available for query.
# 0.0: only one core available.
# ratioOfQueryCores 1.0
# the last_row/first/last aggregator will not change the original column name in the result fields
keepColumnName 1
# number of management nodes in the system
# numOfMnodes 3
# enable/disable backuping vnode directory when removing vnode
# vnodeBak 1
# enable/disable installation / usage report
# telemetryReporting 1
# enable/disable load balancing
# balance 1
# role for dnode. 0 - any, 1 - mnode, 2 - dnode
# role 0
# max timer control blocks
# maxTmrCtrl 512
# time interval of system monitor, seconds
# monitorInterval 30
# number of seconds allowed for a dnode to be offline, for cluster only
# offlineThreshold 864000
# RPC re-try timer, millisecond
# rpcTimer 300
# RPC maximum time for ack, seconds.
# rpcMaxTime 600
# time interval of dnode status reporting to mnode, seconds, for cluster only
# statusInterval 1
# time interval of heart beat from shell to dnode, seconds
# shellActivityTimer 3
# minimum sliding window time, milli-second
# minSlidingTime 10
# minimum time window, milli-second
# minIntervalTime 10
# maximum delay before launching a stream computation, milli-second
# maxStreamCompDelay 20000
# maximum delay before launching a stream computation for the first time, milli-second
# maxFirstStreamCompDelay 10000
# retry delay when a stream computation fails, milli-second
# retryStreamCompDelay 10
# the delayed time for launching a stream computation, from 0.1(default, 10% of whole computing time window) to 0.9
# streamCompDelayRatio 0.1
# max number of vgroups per db, 0 means configured automatically
# maxVgroupsPerDb 0
# max number of tables per vnode
# maxTablesPerVnode 1000000
# cache block size (Mbyte)
# cache 16
# number of cache blocks per vnode
# blocks 6
# number of days per DB file
# days 10
# number of days to keep DB file
# keep 3650
# minimum rows of records in file block
# minRows 100
# maximum rows of records in file block
# maxRows 4096
# the number of acknowledgments required for successful data writing
# quorum 1
# enable/disable compression
# comp 2
# write ahead log (WAL) level, 0: no wal; 1: write wal, but no fysnc; 2: write wal, and call fsync
# walLevel 1
# if walLevel is set to 2, the cycle of fsync being executed, if set to 0, fsync is called right away
# fsync 3000
# number of replications, for cluster only
# replica 1
# the compressed rpc message, option:
# -1 (no compression)
# 0 (all message compressed),
# > 0 (rpc message body which larger than this value will be compressed)
# compressMsgSize -1
# max length of an SQL
# maxSQLLength 65480
# max length of WildCards
# maxWildCardsLength 100
# the maximum number of records allowed for super table time sorting
# maxNumOfOrderedRes 100000
# system time zone
# timezone Asia/Shanghai (CST, +0800)
# system time zone (for windows 10)
# timezone UTC-8
# system locale
# locale en_US.UTF-8
# default system charset
# charset UTF-8
# max number of connections allowed in dnode
# maxShellConns 5000
# max number of connections allowed in client
# maxConnections 5000
# stop writing logs when the disk size of the log folder is less than this value
# minimalLogDirGB 0.1
# stop writing temporary files when the disk size of the tmp folder is less than this value
# minimalTmpDirGB 0.1
# if disk free space is less than this value, powerd service exit directly within startup process
# minimalDataDirGB 0.1
# One mnode is equal to the number of vnode consumed
# mnodeEqualVnodeNum 4
# enbale/disable http service
# http 1
# enable/disable system monitor
# monitor 1
# enable/disable recording the SQL statements via restful interface
# httpEnableRecordSql 0
# number of threads used to process http requests
# httpMaxThreads 2
# maximum number of rows returned by the restful interface
# restfulRowLimit 10240
# The following parameter is used to limit the maximum number of lines in log files.
# max number of lines per log filters
# numOfLogLines 10000000
# enable/disable async log
# asyncLog 1
# time of keeping log files, days
# logKeepDays 0
# The following parameters are used for debug purpose only.
# debugFlag 8 bits mask: FILE-SCREEN-UNUSED-HeartBeat-DUMP-TRACE_WARN-ERROR
# 131: output warning and error
# 135: output debug, warning and error
# 143: output trace, debug, warning and error to log
# 199: output debug, warning and error to both screen and file
# 207: output trace, debug, warning and error to both screen and file
# debug flag for all log type, take effect when non-zero value
# debugFlag 0
# debug flag for meta management messages
# mDebugFlag 135
# debug flag for dnode messages
# dDebugFlag 135
# debug flag for sync module
# sDebugFlag 135
# debug flag for WAL
# wDebugFlag 135
# debug flag for SDB
# sdbDebugFlag 135
# debug flag for RPC
# rpcDebugFlag 131
# debug flag for TAOS TIMER
# tmrDebugFlag 131
# debug flag for TDengine client
# cDebugFlag 131
# debug flag for JNI
# jniDebugFlag 131
# debug flag for storage
# uDebugFlag 131
# debug flag for http server
# httpDebugFlag 131
# debug flag for monitor
# monDebugFlag 131
# debug flag for query
# qDebugFlag 131
# debug flag for vnode
# vDebugFlag 131
# debug flag for TSDB
# tsdbDebugFlag 131
# debug flag for continue query
# cqDebugFlag 131
# enable/disable recording the SQL in power client
# enableRecordSql 0
# generate core file when service crash
# enableCoreFile 1
# maximum display width of binary and nchar fields in the shell. The parts exceeding this limit will be hidden
# maxBinaryDisplayWidth 30
# enable/disable stream (continuous query)
# stream 1
# in retrieve blocking model, only in 50% query threads will be used in query processing in dnode
# retrieveBlockingModel 0
# the maximum allowed query buffer size in MB during query processing for each data node
# -1 no limit (default)
# 0 no query allowed, queries are disabled
# queryBufferSize -1
[Unit]
Description=Power server service
After=network-online.target
Wants=network-online.target
[Service]
Type=simple
ExecStart=/usr/bin/powerd
ExecStartPre=/usr/local/power/bin/startPre.sh
TimeoutStopSec=1000000s
LimitNOFILE=infinity
LimitNPROC=infinity
LimitCORE=infinity
TimeoutStartSec=0
StandardOutput=null
Restart=always
StartLimitBurst=3
StartLimitInterval=60s
[Install]
WantedBy=multi-user.target
########################################################
# #
# ProDB Configuration #
# Any questions, please email support@hanatech.com.cn #
# #
########################################################
# first fully qualified domain name (FQDN) for ProDB system
# firstEp hostname:6030
# local fully qualified domain name (FQDN)
# fqdn hostname
# first port number for the connection (12 continuous UDP/TCP port number are used)
# serverPort 6030
# log file's directory
# logDir /var/log/ProDB
# data file's directory
# dataDir /var/lib/ProDB
# temporary file's directory
# tempDir /tmp/
# the arbitrator's fully qualified domain name (FQDN) for ProDB system, for cluster only
# arbitrator arbitrator_hostname:6042
# number of threads per CPU core
# numOfThreadsPerCore 1.0
# number of threads to commit cache data
# numOfCommitThreads 4
# the proportion of total CPU cores available for query processing
# 2.0: the query threads will be set to double of the CPU cores.
# 1.0: all CPU cores are available for query processing [default].
# 0.5: only half of the CPU cores are available for query.
# 0.0: only one core available.
# ratioOfQueryCores 1.0
# the last_row/first/last aggregator will not change the original column name in the result fields
keepColumnName 1
# number of management nodes in the system
# numOfMnodes 3
# enable/disable backuping vnode directory when removing vnode
# vnodeBak 1
# enable/disable installation / usage report
# telemetryReporting 1
# enable/disable load balancing
# balance 1
# role for dnode. 0 - any, 1 - mnode, 2 - dnode
# role 0
# max timer control blocks
# maxTmrCtrl 512
# time interval of system monitor, seconds
# monitorInterval 30
# number of seconds allowed for a dnode to be offline, for cluster only
# offlineThreshold 864000
# RPC re-try timer, millisecond
# rpcTimer 300
# RPC maximum time for ack, seconds.
# rpcMaxTime 600
# time interval of dnode status reporting to mnode, seconds, for cluster only
# statusInterval 1
# time interval of heart beat from shell to dnode, seconds
# shellActivityTimer 3
# minimum sliding window time, milli-second
# minSlidingTime 10
# minimum time window, milli-second
# minIntervalTime 10
# maximum delay before launching a stream computation, milli-second
# maxStreamCompDelay 20000
# maximum delay before launching a stream computation for the first time, milli-second
# maxFirstStreamCompDelay 10000
# retry delay when a stream computation fails, milli-second
# retryStreamCompDelay 10
# the delayed time for launching a stream computation, from 0.1(default, 10% of whole computing time window) to 0.9
# streamCompDelayRatio 0.1
# max number of vgroups per db, 0 means configured automatically
# maxVgroupsPerDb 0
# max number of tables per vnode
# maxTablesPerVnode 1000000
# cache block size (Mbyte)
# cache 16
# number of cache blocks per vnode
# blocks 6
# number of days per DB file
# days 10
# number of days to keep DB file
# keep 3650
# minimum rows of records in file block
# minRows 100
# maximum rows of records in file block
# maxRows 4096
# the number of acknowledgments required for successful data writing
# quorum 1
# enable/disable compression
# comp 2
# write ahead log (WAL) level, 0: no wal; 1: write wal, but no fysnc; 2: write wal, and call fsync
# walLevel 1
# if walLevel is set to 2, the cycle of fsync being executed, if set to 0, fsync is called right away
# fsync 3000
# number of replications, for cluster only
# replica 1
# the compressed rpc message, option:
# -1 (no compression)
# 0 (all message compressed),
# > 0 (rpc message body which larger than this value will be compressed)
# compressMsgSize -1
# max length of an SQL
# maxSQLLength 65480
# max length of WildCards
# maxWildCardsLength 100
# the maximum number of records allowed for super table time sorting
# maxNumOfOrderedRes 100000
# system time zone
# timezone Asia/Shanghai (CST, +0800)
# system time zone (for windows 10)
# timezone UTC-8
# system locale
# locale en_US.UTF-8
# default system charset
# charset UTF-8
# max number of connections allowed in dnode
# maxShellConns 5000
# max number of connections allowed in client
# maxConnections 5000
# stop writing logs when the disk size of the log folder is less than this value
# minimalLogDirGB 0.1
# stop writing temporary files when the disk size of the tmp folder is less than this value
# minimalTmpDirGB 0.1
# if disk free space is less than this value, prodbs service exit directly within startup process
# minimalDataDirGB 0.1
# One mnode is equal to the number of vnode consumed
# mnodeEqualVnodeNum 4
# enbale/disable http service
# http 1
# enable/disable system monitor
# monitor 1
# enable/disable recording the SQL statements via restful interface
# httpEnableRecordSql 0
# number of threads used to process http requests
# httpMaxThreads 2
# maximum number of rows returned by the restful interface
# restfulRowLimit 10240
# The following parameter is used to limit the maximum number of lines in log files.
# max number of lines per log filters
# numOfLogLines 10000000
# enable/disable async log
# asyncLog 1
# time of keeping log files, days
# logKeepDays 0
# The following parameters are used for debug purpose only.
# debugFlag 8 bits mask: FILE-SCREEN-UNUSED-HeartBeat-DUMP-TRACE_WARN-ERROR
# 131: output warning and error
# 135: output debug, warning and error
# 143: output trace, debug, warning and error to log
# 199: output debug, warning and error to both screen and file
# 207: output trace, debug, warning and error to both screen and file
# debug flag for all log type, take effect when non-zero value
# debugFlag 0
# debug flag for meta management messages
# mDebugFlag 135
# debug flag for dnode messages
# dDebugFlag 135
# debug flag for sync module
# sDebugFlag 135
# debug flag for WAL
# wDebugFlag 135
# debug flag for SDB
# sdbDebugFlag 135
# debug flag for RPC
# rpcDebugFlag 131
# debug flag for TAOS TIMER
# tmrDebugFlag 131
# debug flag for ProDB client
# cDebugFlag 131
# debug flag for JNI
# jniDebugFlag 131
# debug flag for storage
# uDebugFlag 131
# debug flag for http server
# httpDebugFlag 131
# debug flag for monitor
# monDebugFlag 131
# debug flag for query
# qDebugFlag 131
# debug flag for vnode
# vDebugFlag 131
# debug flag for TSDB
# tsdbDebugFlag 131
# debug flag for continue query
# cqDebugFlag 131
# enable/disable recording the SQL in prodb client
# enableRecordSql 0
# generate core file when service crash
# enableCoreFile 1
# maximum display width of binary and nchar fields in the shell. The parts exceeding this limit will be hidden
# maxBinaryDisplayWidth 30
# enable/disable stream (continuous query)
# stream 1
# in retrieve blocking model, only in 50% query threads will be used in query processing in dnode
# retrieveBlockingModel 0
# the maximum allowed query buffer size in MB during query processing for each data node
# -1 no limit (default)
# 0 no query allowed, queries are disabled
# queryBufferSize -1
[Unit]
Description=ProDB server service
After=network-online.target
Wants=network-online.target
[Service]
Type=simple
ExecStart=/usr/bin/prodbs
ExecStartPre=/usr/local/ProDB/bin/startPre.sh
TimeoutStopSec=1000000s
LimitNOFILE=infinity
LimitNPROC=infinity
LimitCORE=infinity
TimeoutStartSec=0
StandardOutput=null
Restart=always
StartLimitBurst=3
StartLimitInterval=60s
[Install]
WantedBy=multi-user.target
此差异已折叠。
[Unit]
Description=TQ server service
After=network-online.target
Wants=network-online.target
[Service]
Type=simple
ExecStart=/usr/bin/tqd
ExecStartPre=/usr/local/tq/bin/startPre.sh
TimeoutStopSec=1000000s
LimitNOFILE=infinity
LimitNPROC=infinity
LimitCORE=infinity
TimeoutStartSec=0
StandardOutput=null
Restart=always
StartLimitBurst=3
StartLimitInterval=60s
[Install]
WantedBy=multi-user.target
......@@ -13,6 +13,7 @@ set -e
# set parameters by default value
cpuType=""
cpuTypeAlias=""
version=""
passWord=""
pkgFile=""
......@@ -88,8 +89,18 @@ cp -f ${comunityArchiveDir}/${pkgFile} .
echo "dirName=${dirName}"
if [[ "${cpuType}" == "x64" ]] || [[ "${cpuType}" == "amd64" ]]; then
cpuTypeAlias="amd64"
elif [[ "${cpuType}" == "aarch64" ]]; then
cpuTypeAlias="arm64"
elif [[ "${cpuType}" == "aarch32" ]]; then
cpuTypeAlias="armhf"
else
echo "Unknown cpuType: ${cpuType}"
exit 1
fi
docker build --rm -f "Dockerfile" --network=host -t tdengine/tdengine-${dockername}:${version} "." --build-arg pkgFile=${pkgFile} --build-arg dirName=${dirName} --build-arg cpuType=${cpuType}
docker build --rm -f "Dockerfile" --network=host -t tdengine/tdengine-${dockername}:${version} "." --build-arg pkgFile=${pkgFile} --build-arg dirName=${dirName} --build-arg cpuType=${cpuTypeAlias}
docker login -u tdengine -p ${passWord} #replace the docker registry username and password
docker push tdengine/tdengine-${dockername}:${version}
......
......@@ -248,7 +248,7 @@ cd ${curr_dir}
# 3. Call the corresponding script for packaging
if [ "$osType" != "Darwin" ]; then
if [[ "$verMode" != "cluster" ]] && [[ "$cpuType" == "x64" ]] && [[ "$dbName" == "taos" ]]; then
if [[ "$verMode" != "cluster" ]] && [[ "$pagMode" == "full" ]] && [[ "$cpuType" == "x64" ]] && [[ "$dbName" == "taos" ]]; then
ret='0'
command -v dpkg >/dev/null 2>&1 || { ret='1'; }
if [ "$ret" -eq 0 ]; then
......@@ -304,14 +304,12 @@ if [ "$osType" != "Darwin" ]; then
echo "====do tar.gz package for all systems===="
cd ${script_dir}/tools
if [ "$verMode" == "cluster" ]; then
${csudo}./makepkg.sh ${compile_dir} ${verNumber} "${build_time}" ${cpuType} ${osType} ${verMode} ${verType} ${pagMode} ${verNumberComp}
# ${csudo}./makeclient.sh ${compile_dir} ${verNumber} "${build_time}" ${cpuType} ${osType} ${verMode} ${verType} ${pagMode}
# ${csudo}./makearbi.sh ${compile_dir} ${verNumber} "${build_time}" ${cpuType} ${osType} ${verMode} ${verType} ${pagMode}
fi
${csudo}./makepkg.sh ${compile_dir} ${verNumber} "${build_time}" ${cpuType} ${osType} ${verMode} ${verType} ${pagMode} ${verNumberComp} ${dbName}
${csudo}./makeclient.sh ${compile_dir} ${verNumber} "${build_time}" ${cpuType} ${osType} ${verMode} ${verType} ${pagMode} ${dbName}
${csudo}./makearbi.sh ${compile_dir} ${verNumber} "${build_time}" ${cpuType} ${osType} ${verMode} ${verType} ${pagMode}
else
# only make client for Darwin
cd ${script_dir}/tools
./makeclient.sh ${compile_dir} ${verNumber} "${build_time}" ${cpuType} ${osType} ${verMode} ${verType} ${dbName}
./makeclient.sh ${compile_dir} ${verNumber} "${build_time}" ${cpuType} ${osType} ${verMode} ${verType} ${pagMode} ${dbName}
fi
......@@ -201,6 +201,7 @@ function install_bin() {
[ -x ${install_main_dir}/bin/${serverName} ] && ${csudo}ln -s ${install_main_dir}/bin/${serverName} ${bin_link_dir}/${serverName} || :
[ -x ${install_main_dir}/bin/taosadapter ] && ${csudo}ln -s ${install_main_dir}/bin/taosadapter ${bin_link_dir}/taosadapter || :
[ -x ${install_main_dir}/bin/taosBenchmark ] && ${csudo}ln -s ${install_main_dir}/bin/taosBenchmark ${bin_link_dir}/taosdemo || :
[ -x ${install_main_dir}/bin/taosBenchmark ] && ${csudo}ln -s ${install_main_dir}/bin/taosBenchmark ${bin_link_dir}/taosBenchmark || :
[ -x ${install_main_dir}/bin/taosdump ] && ${csudo}ln -s ${install_main_dir}/bin/taosdump ${bin_link_dir}/taosdump || :
[ -x ${install_main_dir}/bin/TDinsight.sh ] && ${csudo}ln -s ${install_main_dir}/bin/TDinsight.sh ${bin_link_dir}/TDinsight.sh || :
[ -x ${install_main_dir}/bin/remove.sh ] && ${csudo}ln -s ${install_main_dir}/bin/remove.sh ${bin_link_dir}/${uninstallScript} || :
......
......@@ -13,6 +13,7 @@ osType=$5
verMode=$6
verType=$7
pagMode=$8
dbName=$9
productName="TDengine"
clientName="taos"
......@@ -62,7 +63,7 @@ else
fi
header_files="${code_dir}/inc/taos.h ${code_dir}/inc/taosdef.h ${code_dir}/inc/taoserror.h"
if [ "$verMode" == "cluster" ]; then
if [ "$dbName" != "taos" ]; then
cfg_dir="${top_dir}/../enterprise/packaging/cfg"
else
cfg_dir="${top_dir}/packaging/cfg"
......
此差异已折叠。
......@@ -2601,14 +2601,14 @@ static int32_t setExprInfoForFunctions(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SS
void setResultColName(char* name, tSqlExprItem* pItem, int32_t functionId, SStrToken* pToken, bool multiCols) {
if (pItem->aliasName != NULL) {
tstrncpy(name, pItem->aliasName, TSDB_COL_NAME_LEN);
} else if (multiCols) {
} else {
char uname[TSDB_COL_NAME_LEN] = {0};
int32_t len = MIN(pToken->n + 1, TSDB_COL_NAME_LEN);
tstrncpy(uname, pToken->z, len);
if (tsKeepOriginalColumnName) { // keep the original column name
tstrncpy(name, uname, TSDB_COL_NAME_LEN);
} else {
} else if (multiCols) {
if (!TSDB_FUNC_IS_SCALAR(functionId)) {
int32_t size = TSDB_COL_NAME_LEN + tListLen(aAggs[functionId].name) + 2 + 1;
char tmp[TSDB_COL_NAME_LEN + tListLen(aAggs[functionId].name) + 2 + 1] = {0};
......@@ -2623,10 +2623,10 @@ void setResultColName(char* name, tSqlExprItem* pItem, int32_t functionId, SStrT
tstrncpy(name, tmp, TSDB_COL_NAME_LEN);
}
} else { // use the user-input result column name
len = MIN(pItem->pNode->exprToken.n + 1, TSDB_COL_NAME_LEN);
tstrncpy(name, pItem->pNode->exprToken.z, len);
}
} else { // use the user-input result column name
int32_t len = MIN(pItem->pNode->exprToken.n + 1, TSDB_COL_NAME_LEN);
tstrncpy(name, pItem->pNode->exprToken.z, len);
}
}
......
......@@ -65,8 +65,7 @@ public class RestfulDriver extends AbstractDriver {
}
String loginUrl;
String batchLoad = info.getProperty(TSDBDriver.PROPERTY_KEY_BATCH_LOAD);
// if (Boolean.parseBoolean(batchLoad)) {
if (false) {
if (Boolean.parseBoolean(batchLoad)) {
loginUrl = "ws://" + props.getProperty(TSDBDriver.PROPERTY_KEY_HOST)
+ ":" + props.getProperty(TSDBDriver.PROPERTY_KEY_PORT) + "/rest/ws";
WSClient client;
......
......@@ -25,7 +25,7 @@ public abstract class AbstractWSResultSet extends AbstractResultSet {
protected final RequestFactory factory;
protected final long queryId;
protected boolean isClosed;
protected volatile boolean isClosed;
// meta
protected final ResultSetMetaData metaData;
protected final List<RestfulResultSet.Field> fields = new ArrayList<>();
......@@ -105,10 +105,14 @@ public abstract class AbstractWSResultSet extends AbstractResultSet {
@Override
public void close() throws SQLException {
this.isClosed = true;
if (result != null && !result.isEmpty() && !isCompleted) {
FetchReq fetchReq = new FetchReq(queryId, queryId);
transport.sendWithoutRep(new Request(Action.FREE_RESULT.getAction(), fetchReq));
synchronized (this) {
if (!this.isClosed) {
this.isClosed = true;
if (result != null && !result.isEmpty() && !isCompleted) {
FetchReq fetchReq = new FetchReq(queryId, queryId);
transport.sendWithoutRep(new Request(Action.FREE_RESULT.getAction(), fetchReq));
}
}
}
}
......
......@@ -95,7 +95,6 @@ public class WSClient extends WebSocketClient implements AutoCloseable {
long id = bytes.getLong();
ResponseFuture remove = inFlightRequest.remove(Action.FETCH_BLOCK.getAction(), id);
if (null != remove) {
// FetchBlockResp fetchBlockResp = new FetchBlockResp(id, bytes.slice());
FetchBlockResp fetchBlockResp = new FetchBlockResp(id, bytes);
remove.getFuture().complete(fetchBlockResp);
}
......
......@@ -53,8 +53,12 @@ public class WSStatement extends AbstractStatement {
@Override
public void close() throws SQLException {
if (!isClosed())
if (!isClosed()) {
this.closed = true;
if (resultSet != null && !resultSet.isClosed()) {
resultSet.close();
}
}
}
@Override
......
......@@ -15,6 +15,7 @@ public class RestfulDatabaseMetaDataTest {
private static final String url = "jdbc:TAOS-RS://" + host + ":6041/?user=root&password=taosdata";
private static Connection connection;
private static RestfulDatabaseMetaData metaData;
private static final String dbName = "test";
@Test
public void unwrap() throws SQLException {
......@@ -1092,9 +1093,9 @@ public class RestfulDatabaseMetaDataTest {
properties.setProperty(TSDBDriver.PROPERTY_KEY_TIME_ZONE, "UTC-8");
connection = DriverManager.getConnection(url, properties);
Statement stmt = connection.createStatement();
stmt.execute("drop database if exists log");
stmt.execute("create database if not exists log precision 'us'");
stmt.execute("use log");
stmt.execute("drop database if exists " + dbName);
stmt.execute("create database if not exists " + dbName + " precision 'us'");
stmt.execute("use " + dbName);
stmt.execute("create table `dn` (ts TIMESTAMP,cpu_taosd FLOAT,cpu_system FLOAT,cpu_cores INT,mem_taosd FLOAT,mem_system FLOAT,mem_total INT,disk_used FLOAT,disk_total INT,band_speed FLOAT,io_read FLOAT,io_write FLOAT,req_http INT,req_select INT,req_insert INT) TAGS (dnodeid INT,fqdn BINARY(128))");
stmt.execute("insert into dn1 using dn tags(1,'a') (ts) values(now)");
......
......@@ -32,9 +32,7 @@ public class WSQueryTest {
public void queryBlock() throws InterruptedException {
CountDownLatch latch = new CountDownLatch(1000);
IntStream.range(1, 10000).limit(1000).parallel().forEach(x -> {
try {
Statement statement = connection.createStatement();
try (Statement statement = connection.createStatement()) {
statement.execute("insert into " + databaseName + "." + tableName + " values(now+100s, 100)");
ResultSet resultSet = statement.executeQuery("select * from " + databaseName + "." + tableName);
......
Subproject commit c19ecfe96ae6f53d4b4a0e8341656680f5bd9d3b
Subproject commit 3b0c7cdd8861ba25fab476859cf0968b362d1bae
......@@ -3058,12 +3058,18 @@ static void col_project_function(SQLFunctionCtx *pCtx) {
char *pData = GET_INPUT_DATA_LIST(pCtx);
if (pCtx->order == TSDB_ORDER_ASC) {
// ASC
int32_t numOfRows = (pCtx->param[0].i64 == 1)? 1:pCtx->size;
memcpy(pCtx->pOutput, pData, (size_t) numOfRows * pCtx->inputBytes);
} else {
// DESC
for(int32_t i = 0; i < pCtx->size; ++i) {
memcpy(pCtx->pOutput + (pCtx->size - 1 - i) * pCtx->inputBytes, pData + i * pCtx->inputBytes,
pCtx->inputBytes);
char* dst = pCtx->pOutput + (pCtx->size - 1 - i) * pCtx->inputBytes;
char* src = pData + i * pCtx->inputBytes;
if (IS_VAR_DATA_TYPE(pCtx->inputType))
varDataCopy(dst, src);
else
memcpy(dst, src, pCtx->inputBytes);
}
}
}
......
此差异已折叠。
Subproject commit c7d924f44e0cbf9b65afcfe434d2940811e7963e
Subproject commit e19c1f87e65b74ac56b5b0d2a0fbe4d5df44a280
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册