提交 2ee7f19f 编写于 作者: sangshuduo's avatar sangshuduo

Merge branch '3.0' into docs/sangshuduo/update-readme

...@@ -15,7 +15,7 @@ ...@@ -15,7 +15,7 @@
[![Coverage Status](https://coveralls.io/repos/github/taosdata/TDengine/badge.svg?branch=develop)](https://coveralls.io/github/taosdata/TDengine?branch=develop) [![Coverage Status](https://coveralls.io/repos/github/taosdata/TDengine/badge.svg?branch=develop)](https://coveralls.io/github/taosdata/TDengine?branch=develop)
[![CII Best Practices](https://bestpractices.coreinfrastructure.org/projects/4201/badge)](https://bestpractices.coreinfrastructure.org/projects/4201) [![CII Best Practices](https://bestpractices.coreinfrastructure.org/projects/4201/badge)](https://bestpractices.coreinfrastructure.org/projects/4201)
简体中文 | [English](README.md) | 很多职位正在热招中,请看[这里](https://www.taosdata.com/cn/careers/) 简体中文 | [English](README.md) | [TDengine 云服务](https://cloud.taosdata.com/?utm_medium=cn&utm_source=github) | 很多职位正在热招中,请看[这里](https://www.taosdata.com/cn/careers/)
# TDengine 简介 # TDengine 简介
......
...@@ -123,8 +123,8 @@ ELSE () ...@@ -123,8 +123,8 @@ ELSE ()
SET(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -Werror -Werror=return-type -fPIC -O3 -Wformat=2 -Wno-format-nonliteral -Wno-format-truncation -Wno-format-y2k") SET(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -Werror -Werror=return-type -fPIC -O3 -Wformat=2 -Wno-format-nonliteral -Wno-format-truncation -Wno-format-y2k")
SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Werror -Wno-reserved-user-defined-literal -Wno-literal-suffix -Werror=return-type -fPIC -O3 -Wformat=2 -Wno-format-nonliteral -Wno-format-truncation -Wno-format-y2k") SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Werror -Wno-reserved-user-defined-literal -Wno-literal-suffix -Werror=return-type -fPIC -O3 -Wformat=2 -Wno-format-nonliteral -Wno-format-truncation -Wno-format-y2k")
ELSE () ELSE ()
SET(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -Werror -Werror=return-type -fPIC -gdwarf-2 -g3 -Wformat=2 -Wno-format-nonliteral -Wno-format-truncation -Wno-format-y2k") SET(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -Werror -Werror=return-type -fPIC -g3 -gdwarf-2 -Wformat=2 -Wno-format-nonliteral -Wno-format-truncation -Wno-format-y2k")
SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-reserved-user-defined-literal -Wno-literal-suffix -Werror=return-type -fPIC -gdwarf-2 -g3 -Wformat=2 -Wno-format-nonliteral -Wno-format-truncation -Wno-format-y2k") SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-reserved-user-defined-literal -g3 -Wno-literal-suffix -Werror=return-type -fPIC -gdwarf-2 -Wformat=2 -Wno-format-nonliteral -Wno-format-truncation -Wno-format-y2k")
ENDIF () ENDIF ()
# disable all assert # disable all assert
......
...@@ -77,6 +77,12 @@ ELSEIF (TD_DARWIN_64) ...@@ -77,6 +77,12 @@ ELSEIF (TD_DARWIN_64)
ENDIF () ENDIF ()
ENDIF () ENDIF ()
option(
BUILD_GEOS
"If build geos on Windows"
ON
)
option( option(
BUILD_SHARED_LIBS BUILD_SHARED_LIBS
"" ""
......
...@@ -57,8 +57,6 @@ IF (${CMAKE_SYSTEM_NAME} MATCHES "Linux" OR ${CMAKE_SYSTEM_NAME} MATCHES "Darwin ...@@ -57,8 +57,6 @@ IF (${CMAKE_SYSTEM_NAME} MATCHES "Linux" OR ${CMAKE_SYSTEM_NAME} MATCHES "Darwin
SET(TD_DARWIN TRUE) SET(TD_DARWIN TRUE)
SET(OSTYPE "macOS") SET(OSTYPE "macOS")
ADD_DEFINITIONS("-DDARWIN -Wno-tautological-pointer-compare") ADD_DEFINITIONS("-DDARWIN -Wno-tautological-pointer-compare")
INCLUDE_DIRECTORIES(/usr/local/include)
LINK_DIRECTORIES(/usr/local/lib)
IF (${CMAKE_SYSTEM_PROCESSOR} MATCHES "arm64") IF (${CMAKE_SYSTEM_PROCESSOR} MATCHES "arm64")
MESSAGE("Current system arch is arm64") MESSAGE("Current system arch is arm64")
......
...@@ -2,7 +2,7 @@ ...@@ -2,7 +2,7 @@
IF (DEFINED VERNUMBER) IF (DEFINED VERNUMBER)
SET(TD_VER_NUMBER ${VERNUMBER}) SET(TD_VER_NUMBER ${VERNUMBER})
ELSE () ELSE ()
SET(TD_VER_NUMBER "3.0.4.3") SET(TD_VER_NUMBER "3.0.5.0")
ENDIF () ENDIF ()
IF (DEFINED VERCOMPATIBLE) IF (DEFINED VERCOMPATIBLE)
......
...@@ -2,7 +2,7 @@ ...@@ -2,7 +2,7 @@
# taosadapter # taosadapter
ExternalProject_Add(taosadapter ExternalProject_Add(taosadapter
GIT_REPOSITORY https://github.com/taosdata/taosadapter.git GIT_REPOSITORY https://github.com/taosdata/taosadapter.git
GIT_TAG 283b50d GIT_TAG 3.0
SOURCE_DIR "${TD_SOURCE_DIR}/tools/taosadapter" SOURCE_DIR "${TD_SOURCE_DIR}/tools/taosadapter"
BINARY_DIR "" BINARY_DIR ""
#BUILD_IN_SOURCE TRUE #BUILD_IN_SOURCE TRUE
......
...@@ -2,7 +2,7 @@ ...@@ -2,7 +2,7 @@
# taos-tools # taos-tools
ExternalProject_Add(taos-tools ExternalProject_Add(taos-tools
GIT_REPOSITORY https://github.com/taosdata/taos-tools.git GIT_REPOSITORY https://github.com/taosdata/taos-tools.git
GIT_TAG 4378702 GIT_TAG 3.0
SOURCE_DIR "${TD_SOURCE_DIR}/tools/taos-tools" SOURCE_DIR "${TD_SOURCE_DIR}/tools/taos-tools"
BINARY_DIR "" BINARY_DIR ""
#BUILD_IN_SOURCE TRUE #BUILD_IN_SOURCE TRUE
......
...@@ -231,11 +231,16 @@ if(${BUILD_WITH_ROCKSDB}) ...@@ -231,11 +231,16 @@ if(${BUILD_WITH_ROCKSDB})
if(${TD_LINUX}) if(${TD_LINUX})
SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-error=maybe-uninitialized -Wno-error=unused-but-set-variable -Wno-error=unused-variable -Wno-error=unused-function -Wno-errno=unused-private-field -Wno-error=unused-result") SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-error=maybe-uninitialized -Wno-error=unused-but-set-variable -Wno-error=unused-variable -Wno-error=unused-function -Wno-errno=unused-private-field -Wno-error=unused-result")
endif(${TD_LINUX}) endif(${TD_LINUX})
MESSAGE(STATUS "CXXXX STATUS CONFIG: " ${CMAKE_CXX_FLAGS})
if(${TD_DARWIN}) if(${TD_DARWIN})
SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-error=maybe-uninitialized") SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-error=maybe-uninitialized")
endif(${TD_DARWIN}) endif(${TD_DARWIN})
if (${TD_DARWIN_ARM64})
set(HAS_ARMV8_CRC true)
endif(${TD_DARWIN_ARM64})
if (${TD_WINDOWS}) if (${TD_WINDOWS})
SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /wd4244 /wd4819") SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /wd4244 /wd4819")
endif(${TD_WINDOWS}) endif(${TD_WINDOWS})
...@@ -248,7 +253,7 @@ if(${BUILD_WITH_ROCKSDB}) ...@@ -248,7 +253,7 @@ if(${BUILD_WITH_ROCKSDB})
endif(${TD_DARWIN}) endif(${TD_DARWIN})
if(${TD_WINDOWS}) if(${TD_WINDOWS})
option(WITH_JNI "" ON) option(WITH_JNI "" OFF)
endif(${TD_WINDOWS}) endif(${TD_WINDOWS})
if(${TD_WINDOWS}) if(${TD_WINDOWS})
...@@ -260,7 +265,7 @@ if(${BUILD_WITH_ROCKSDB}) ...@@ -260,7 +265,7 @@ if(${BUILD_WITH_ROCKSDB})
option(WITH_FALLOCATE "" OFF) option(WITH_FALLOCATE "" OFF)
option(WITH_JEMALLOC "" OFF) option(WITH_JEMALLOC "" OFF)
option(WITH_GFLAGS "" OFF) option(WITH_GFLAGS "" OFF)
option(PORTABLE "" ON) option(PORTABLE "" OFF)
option(WITH_LIBURING "" OFF) option(WITH_LIBURING "" OFF)
option(FAIL_ON_WARNINGS OFF) option(FAIL_ON_WARNINGS OFF)
...@@ -268,8 +273,11 @@ if(${BUILD_WITH_ROCKSDB}) ...@@ -268,8 +273,11 @@ if(${BUILD_WITH_ROCKSDB})
option(WITH_BENCHMARK_TOOLS "" OFF) option(WITH_BENCHMARK_TOOLS "" OFF)
option(WITH_TOOLS "" OFF) option(WITH_TOOLS "" OFF)
option(WITH_LIBURING "" OFF) option(WITH_LIBURING "" OFF)
IF (TD_LINUX)
option(ROCKSDB_BUILD_SHARED "Build shared versions of the RocksDB libraries" ON)
ELSE()
option(ROCKSDB_BUILD_SHARED "Build shared versions of the RocksDB libraries" OFF) option(ROCKSDB_BUILD_SHARED "Build shared versions of the RocksDB libraries" OFF)
ENDIF()
add_subdirectory(rocksdb EXCLUDE_FROM_ALL) add_subdirectory(rocksdb EXCLUDE_FROM_ALL)
target_include_directories( target_include_directories(
rocksdb rocksdb
......
此差异已折叠。
...@@ -45,7 +45,7 @@ In TDengine, the data types below can be used when specifying a column or tag. ...@@ -45,7 +45,7 @@ In TDengine, the data types below can be used when specifying a column or tag.
:::note :::note
- Only ASCII visible characters are suggested to be used in a column or tag of BINARY type. Multi-byte characters must be stored in NCHAR type. - Only ASCII visible characters are suggested to be used in a column or tag of BINARY type. Multi-byte characters must be stored in NCHAR type.
- The length of BINARY can be up to 16,374 bytes. The string value must be quoted with single quotes. You must specify a length in bytes for a BINARY value, for example binary(20) for up to twenty single-byte characters. If the data exceeds the specified length, an error will occur. The literal single quote inside the string must be preceded with back slash like `\'` - The length of BINARY can be up to 16,374(data column is 65,517 and tag column is 16,382 since version 3.0.5.0) bytes. The string value must be quoted with single quotes. You must specify a length in bytes for a BINARY value, for example binary(20) for up to twenty single-byte characters. If the data exceeds the specified length, an error will occur. The literal single quote inside the string must be preceded with back slash like `\'`
- Numeric values in SQL statements will be determined as integer or float type according to whether there is decimal point or whether scientific notation is used, so attention must be paid to avoid overflow. For example, 9999999999999999999 will be considered as overflow because it exceeds the upper limit of long integer, but 9999999999999999999.0 will be considered as a legal float number. - Numeric values in SQL statements will be determined as integer or float type according to whether there is decimal point or whether scientific notation is used, so attention must be paid to avoid overflow. For example, 9999999999999999999 will be considered as overflow because it exceeds the upper limit of long integer, but 9999999999999999999.0 will be considered as a legal float number.
::: :::
......
...@@ -45,7 +45,7 @@ table_option: { ...@@ -45,7 +45,7 @@ table_option: {
1. The first column of a table MUST be of type TIMESTAMP. It is automatically set as the primary key. 1. The first column of a table MUST be of type TIMESTAMP. It is automatically set as the primary key.
2. The maximum length of the table name is 192 bytes. 2. The maximum length of the table name is 192 bytes.
3. The maximum length of each row is 48k bytes, please note that the extra 2 bytes used by each BINARY/NCHAR column are also counted. 3. The maximum length of each row is 48k(64k since version 3.0.5.0) bytes, please note that the extra 2 bytes used by each BINARY/NCHAR column are also counted.
4. The name of the subtable can only consist of characters from the English alphabet, digits and underscore. Table names can't start with a digit. Table names are case insensitive. 4. The name of the subtable can only consist of characters from the English alphabet, digits and underscore. Table names can't start with a digit. Table names are case insensitive.
5. The maximum length in bytes must be specified when using BINARY or NCHAR types. 5. The maximum length in bytes must be specified when using BINARY or NCHAR types.
6. Escape character "\`" can be used to avoid the conflict between table names and reserved keywords, above rules will be bypassed when using escape character on table names, but the upper limit for the name length is still valid. The table names specified using escape character are case sensitive. 6. Escape character "\`" can be used to avoid the conflict between table names and reserved keywords, above rules will be bypassed when using escape character on table names, but the upper limit for the name length is still valid. The table names specified using escape character are case sensitive.
......
...@@ -55,7 +55,7 @@ window_clause: { ...@@ -55,7 +55,7 @@ window_clause: {
| INTERVAL(interval_val [, interval_offset]) [SLIDING (sliding_val)] [WATERMARK(watermark_val)] [FILL(fill_mod_and_val)] | INTERVAL(interval_val [, interval_offset]) [SLIDING (sliding_val)] [WATERMARK(watermark_val)] [FILL(fill_mod_and_val)]
interp_clause: interp_clause:
RANGE(ts_val, ts_val) EVERY(every_val) FILL(fill_mod_and_val) RANGE(ts_val [, ts_val]) EVERY(every_val) FILL(fill_mod_and_val)
partition_by_clause: partition_by_clause:
PARTITION BY expr [, expr] ... PARTITION BY expr [, expr] ...
......
...@@ -889,9 +889,10 @@ ignore_null_values: { ...@@ -889,9 +889,10 @@ ignore_null_values: {
- `INTERP` is used to get the value that matches the specified time slice from a column. If no such value exists an interpolation value will be returned based on `FILL` parameter. - `INTERP` is used to get the value that matches the specified time slice from a column. If no such value exists an interpolation value will be returned based on `FILL` parameter.
- The input data of `INTERP` is the value of the specified column and a `where` clause can be used to filter the original data. If no `where` condition is specified then all original data is the input. - The input data of `INTERP` is the value of the specified column and a `where` clause can be used to filter the original data. If no `where` condition is specified then all original data is the input.
- `INTERP` must be used along with `RANGE`, `EVERY`, `FILL` keywords. - `INTERP` must be used along with `RANGE`, `EVERY`, `FILL` keywords.
- The output time range of `INTERP` is specified by `RANGE(timestamp1,timestamp2)` parameter, with timestamp1 <= timestamp2. timestamp1 is the starting point of the output time range and must be specified. timestamp2 is the ending point of the output time range and must be specified. - The output time range of `INTERP` is specified by `RANGE(timestamp1,timestamp2)` parameter, with timestamp1 <= timestamp2. timestamp1 is the starting point of the output time range. timestamp2 is the ending point of the output time range.
- The number of rows in the result set of `INTERP` is determined by the parameter `EVERY(time_unit)`. Starting from timestamp1, one interpolation is performed for every time interval specified `time_unit` parameter. The parameter `time_unit` must be an integer, with no quotes, with a time unit of: a(millisecond)), s(second), m(minute), h(hour), d(day), or w(week). For example, `EVERY(500a)` will interpolate every 500 milliseconds. - The number of rows in the result set of `INTERP` is determined by the parameter `EVERY(time_unit)`. Starting from timestamp1, one interpolation is performed for every time interval specified `time_unit` parameter. The parameter `time_unit` must be an integer, with no quotes, with a time unit of: a(millisecond)), s(second), m(minute), h(hour), d(day), or w(week). For example, `EVERY(500a)` will interpolate every 500 milliseconds.
- Interpolation is performed based on `FILL` parameter. For more information about FILL clause, see [FILL Clause](../distinguished/#fill-clause). - Interpolation is performed based on `FILL` parameter. For more information about FILL clause, see [FILL Clause](../distinguished/#fill-clause).
- When only one timestamp value is specified in `RANGE` clause, `INTERP` is used to generate interpolation at this point in time. In this case, `EVERY` clause can be omitted. For example, SELECT INTERP(col) FROM tb RANGE('2023-01-01 00:00:00') FILL(linear).
- `INTERP` can be applied to supertable by interpolating primary key sorted data of all its childtables. It can also be used with `partition by tbname` when applied to supertable to generate interpolation on each single timeline. - `INTERP` can be applied to supertable by interpolating primary key sorted data of all its childtables. It can also be used with `partition by tbname` when applied to supertable to generate interpolation on each single timeline.
- Pseudocolumn `_irowts` can be used along with `INTERP` to return the timestamps associated with interpolation points(support after version 3.0.2.0). - Pseudocolumn `_irowts` can be used along with `INTERP` to return the timestamps associated with interpolation points(support after version 3.0.2.0).
- Pseudocolumn `_isfilled` can be used along with `INTERP` to indicate whether the results are original records or data points generated by interpolation algorithm(support after version 3.0.3.0). - Pseudocolumn `_isfilled` can be used along with `INTERP` to indicate whether the results are original records or data points generated by interpolation algorithm(support after version 3.0.3.0).
...@@ -902,7 +903,7 @@ ignore_null_values: { ...@@ -902,7 +903,7 @@ ignore_null_values: {
- We want to downsample every 1 hour and use a linear fill for missing values. Note the order in which the "partition by" clause and the "range", "every" and "fill" parameters are used. - We want to downsample every 1 hour and use a linear fill for missing values. Note the order in which the "partition by" clause and the "range", "every" and "fill" parameters are used.
```sql ```sql
SELECT _irowts,INTERP(current) FROM test.meters PARTITION BY TBNAME RANGE('2017-07-22 00:00:00','2017-07-24 12:25:00') EVERY(1h) FILL(LINEAR) SELECT _irowts,INTERP(current) FROM test.meters PARTITION BY TBNAME RANGE('2017-07-22 00:00:00','2017-07-24 12:25:00') EVERY(1h) FILL(LINEAR)
``` ```
### LAST ### LAST
...@@ -1008,8 +1009,7 @@ SAMPLE(expr, k) ...@@ -1008,8 +1009,7 @@ SAMPLE(expr, k)
**More explanations**: **More explanations**:
This function cannot be used in expression calculation. - This function cannot be used in expression calculation.
- Must be used with `PARTITION BY tbname` when it's used on a STable to force the result on each single timeline
### TAIL ### TAIL
...@@ -1088,7 +1088,6 @@ CSUM(expr) ...@@ -1088,7 +1088,6 @@ CSUM(expr)
- Arithmetic operation can't be performed on the result of `csum` function - Arithmetic operation can't be performed on the result of `csum` function
- Can only be used with aggregate functions This function can be used with supertables and standard tables. - Can only be used with aggregate functions This function can be used with supertables and standard tables.
- Must be used with `PARTITION BY tbname` when it's used on a STable to force the result on each single timeline
### DERIVATIVE ### DERIVATIVE
...@@ -1112,7 +1111,6 @@ ignore_negative: { ...@@ -1112,7 +1111,6 @@ ignore_negative: {
**More explanation**: **More explanation**:
- It can be used together with `PARTITION BY tbname` against a STable.
- It can be used together with a selected column. For example: select \_rowts, DERIVATIVE() from. - It can be used together with a selected column. For example: select \_rowts, DERIVATIVE() from.
### DIFF ### DIFF
...@@ -1175,7 +1173,6 @@ MAVG(expr, k) ...@@ -1175,7 +1173,6 @@ MAVG(expr, k)
- Arithmetic operation can't be performed on the result of `MAVG`. - Arithmetic operation can't be performed on the result of `MAVG`.
- Can only be used with data columns, can't be used with tags. - Can't be used with aggregate functions. - Can only be used with data columns, can't be used with tags. - Can't be used with aggregate functions.
- Must be used with `PARTITION BY tbname` when it's used on a STable to force the result on each single timeline
### STATECOUNT ### STATECOUNT
...@@ -1201,7 +1198,6 @@ STATECOUNT(expr, oper, val) ...@@ -1201,7 +1198,6 @@ STATECOUNT(expr, oper, val)
**More explanations**: **More explanations**:
- Must be used together with `PARTITION BY tbname` when it's used on a STable to force the result into each single timeline]
- Can't be used with window operation, like interval/state_window/session_window - Can't be used with window operation, like interval/state_window/session_window
...@@ -1229,7 +1225,6 @@ STATEDURATION(expr, oper, val, unit) ...@@ -1229,7 +1225,6 @@ STATEDURATION(expr, oper, val, unit)
**More explanations**: **More explanations**:
- Must be used together with `PARTITION BY tbname` when it's used on a STable to force the result into each single timeline]
- Can't be used with window operation, like interval/state_window/session_window - Can't be used with window operation, like interval/state_window/session_window
...@@ -1247,7 +1242,6 @@ TWA(expr) ...@@ -1247,7 +1242,6 @@ TWA(expr)
**Applicable table types**: standard tables and supertables **Applicable table types**: standard tables and supertables
- Must be used together with `PARTITION BY tbname` to force the result into each single timeline.
## System Information Functions ## System Information Functions
......
...@@ -26,7 +26,7 @@ The following characters cannot occur in a password: single quotation marks ('), ...@@ -26,7 +26,7 @@ The following characters cannot occur in a password: single quotation marks ('),
- Maximum length of database name is 64 bytes - Maximum length of database name is 64 bytes
- Maximum length of table name is 192 bytes, excluding the database name prefix and the separator. - Maximum length of table name is 192 bytes, excluding the database name prefix and the separator.
- Maximum length of each data row is 48K bytes. Note that the upper limit includes the extra 2 bytes consumed by each column of BINARY/NCHAR type. - Maximum length of each data row is 48K(64K since version 3.0.5.0) bytes. Note that the upper limit includes the extra 2 bytes consumed by each column of BINARY/NCHAR type.
- The maximum length of a column name is 64 bytes. - The maximum length of a column name is 64 bytes.
- Maximum number of columns is 4096. There must be at least 2 columns, and the first column must be timestamp. - Maximum number of columns is 4096. There must be at least 2 columns, and the first column must be timestamp.
- The maximum length of a tag name is 64 bytes - The maximum length of a tag name is 64 bytes
......
...@@ -959,6 +959,7 @@ The preceding example uses the SQL statement `select ts, speed from speed_table` ...@@ -959,6 +959,7 @@ The preceding example uses the SQL statement `select ts, speed from speed_table`
```java ```java
Properties config = new Properties(); Properties config = new Properties();
config.setProperty("bootstrap.servers", "localhost:6030");
config.setProperty("enable.auto.commit", "true"); config.setProperty("enable.auto.commit", "true");
config.setProperty("group.id", "group1"); config.setProperty("group.id", "group1");
config.setProperty("value.deserializer", "com.taosdata.jdbc.tmq.ConsumerTest.ResultDeserializer"); config.setProperty("value.deserializer", "com.taosdata.jdbc.tmq.ConsumerTest.ResultDeserializer");
...@@ -966,12 +967,14 @@ config.setProperty("value.deserializer", "com.taosdata.jdbc.tmq.ConsumerTest.Res ...@@ -966,12 +967,14 @@ config.setProperty("value.deserializer", "com.taosdata.jdbc.tmq.ConsumerTest.Res
TaosConsumer consumer = new TaosConsumer<>(config); TaosConsumer consumer = new TaosConsumer<>(config);
``` ```
- bootstrap.servers: `ip:port` where the TDengine server is located, or `ip:port` where the taosAdapter is located if WebSocket connection is used.
- enable.auto.commit: Specifies whether to commit automatically. - enable.auto.commit: Specifies whether to commit automatically.
- group.id: consumer: Specifies the group that the consumer is in. - group.id: consumer: Specifies the group that the consumer is in.
- value.deserializer: To deserialize the results, you can inherit `com.taosdata.jdbc.tmq.ReferenceDeserializer` and specify the result set bean. You can also inherit `com.taosdata.jdbc.tmq.Deserializer` and perform custom deserialization based on the SQL result set. - value.deserializer: To deserialize the results, you can inherit `com.taosdata.jdbc.tmq.ReferenceDeserializer` and specify the result set bean. You can also inherit `com.taosdata.jdbc.tmq.Deserializer` and perform custom deserialization based on the SQL result set.
- td.connect.type: Specifies the type connect with TDengine, `jni` or `WebSocket`. default is `jni` - td.connect.type: Specifies the type connect with TDengine, `jni` or `WebSocket`. default is `jni`
- httpConnectTimeout: WebSocket connection timeout in milliseconds, the default value is 5000 ms. It only takes effect when using WebSocket type. - httpConnectTimeout: WebSocket connection timeout in milliseconds, the default value is 5000 ms. It only takes effect when using WebSocket type.
- messageWaitTimeout: socket timeout in milliseconds, the default value is 10000 ms. It only takes effect when using WebSocket type. - messageWaitTimeout: socket timeout in milliseconds, the default value is 10000 ms. It only takes effect when using WebSocket type.
- httpPoolSize: Maximum number of concurrent requests on the a connection。It only takes effect when using WebSocket type.
- For more information, see [Consumer Parameters](../../../develop/tmq). - For more information, see [Consumer Parameters](../../../develop/tmq).
#### Subscribe to consume data #### Subscribe to consume data
...@@ -1015,10 +1018,20 @@ public abstract class ConsumerLoop { ...@@ -1015,10 +1018,20 @@ public abstract class ConsumerLoop {
public ConsumerLoop() throws SQLException { public ConsumerLoop() throws SQLException {
Properties config = new Properties(); Properties config = new Properties();
config.setProperty("td.connect.type", "jni");
config.setProperty("bootstrap.servers", "localhost:6030");
config.setProperty("td.connect.user", "root");
config.setProperty("td.connect.pass", "taosdata");
config.setProperty("auto.offset.reset", "earliest");
config.setProperty("msg.with.table.name", "true"); config.setProperty("msg.with.table.name", "true");
config.setProperty("enable.auto.commit", "true"); config.setProperty("enable.auto.commit", "true");
config.setProperty("auto.commit.interval.ms", "1000");
config.setProperty("group.id", "group1"); config.setProperty("group.id", "group1");
config.setProperty("client.id", "1");
config.setProperty("value.deserializer", "com.taosdata.jdbc.tmq.ConsumerTest.ConsumerLoop$ResultDeserializer"); config.setProperty("value.deserializer", "com.taosdata.jdbc.tmq.ConsumerTest.ConsumerLoop$ResultDeserializer");
config.setProperty("value.deserializer.encoding", "UTF-8");
config.setProperty("experimental.snapshot.enable", "true");
this.consumer = new TaosConsumer<>(config); this.consumer = new TaosConsumer<>(config);
this.topics = Collections.singletonList("topic_speed"); this.topics = Collections.singletonList("topic_speed");
...@@ -1090,12 +1103,19 @@ public abstract class ConsumerLoop { ...@@ -1090,12 +1103,19 @@ public abstract class ConsumerLoop {
public ConsumerLoop() throws SQLException { public ConsumerLoop() throws SQLException {
Properties config = new Properties(); Properties config = new Properties();
config.setProperty("bootstrap.servers", "localhost:6041");
config.setProperty("td.connect.type", "ws"); config.setProperty("td.connect.type", "ws");
config.setProperty("bootstrap.servers", "localhost:6041");
config.setProperty("td.connect.user", "root");
config.setProperty("td.connect.pass", "taosdata");
config.setProperty("auto.offset.reset", "earliest");
config.setProperty("msg.with.table.name", "true"); config.setProperty("msg.with.table.name", "true");
config.setProperty("enable.auto.commit", "true"); config.setProperty("enable.auto.commit", "true");
config.setProperty("auto.commit.interval.ms", "1000");
config.setProperty("group.id", "group2"); config.setProperty("group.id", "group2");
config.setProperty("client.id", "1");
config.setProperty("value.deserializer", "com.taosdata.jdbc.tmq.ConsumerTest.ConsumerLoop$ResultDeserializer"); config.setProperty("value.deserializer", "com.taosdata.jdbc.tmq.ConsumerTest.ConsumerLoop$ResultDeserializer");
config.setProperty("value.deserializer.encoding", "UTF-8");
config.setProperty("experimental.snapshot.enable", "true");
this.consumer = new TaosConsumer<>(config); this.consumer = new TaosConsumer<>(config);
this.topics = Collections.singletonList("topic_speed"); this.topics = Collections.singletonList("topic_speed");
...@@ -1236,6 +1256,7 @@ The source code of the sample application is under `TDengine/examples/JDBC`: ...@@ -1236,6 +1256,7 @@ The source code of the sample application is under `TDengine/examples/JDBC`:
- connectionPools: using taos-jdbcdriver in connection pools such as HikariCP, Druid, dbcp, c3p0, etc. - connectionPools: using taos-jdbcdriver in connection pools such as HikariCP, Druid, dbcp, c3p0, etc.
- SpringJdbcTemplate: using taos-jdbcdriver in Spring JdbcTemplate. - SpringJdbcTemplate: using taos-jdbcdriver in Spring JdbcTemplate.
- mybatisplus-demo: using taos-jdbcdriver in Springboot + Mybatis. - mybatisplus-demo: using taos-jdbcdriver in Springboot + Mybatis.
- consumer-demo: consumer TDengine data example, the consumption rate can be controlled by parameters.
[JDBC example](https://github.com/taosdata/TDengine/tree/3.0/examples/JDBC) [JDBC example](https://github.com/taosdata/TDengine/tree/3.0/examples/JDBC)
......
...@@ -29,7 +29,7 @@ REST connections are supported on all platforms that can run Go. ...@@ -29,7 +29,7 @@ REST connections are supported on all platforms that can run Go.
## Version support ## Version support
Please refer to [version support list](/reference/connector#version-support) Please refer to [version support list](https://github.com/taosdata/driver-go#remind)
## Supported features ## Supported features
...@@ -379,6 +379,15 @@ Note: `tmq.TopicPartition` is reserved for compatibility purpose ...@@ -379,6 +379,15 @@ Note: `tmq.TopicPartition` is reserved for compatibility purpose
Commit information. Commit information.
* `func (c *Consumer) Assignment() (partitions []tmq.TopicPartition, err error)`
Get Assignment(TDengine >= 3.0.5.0 and driver-go >= v3.5.0 are required).
* `func (c *Consumer) Seek(partition tmq.TopicPartition, ignoredTimeoutMs int) error`
Note: `ignoredTimeoutMs` is reserved for compatibility purpose
Seek offset(TDengine >= 3.0.5.0 and driver-go >= v3.5.0 are required).
* `func (c *Consumer) Unsubscribe() error` * `func (c *Consumer) Unsubscribe() error`
Unsubscribe. Unsubscribe.
...@@ -468,6 +477,15 @@ Note: `tmq.TopicPartition` is reserved for compatibility purpose ...@@ -468,6 +477,15 @@ Note: `tmq.TopicPartition` is reserved for compatibility purpose
Commit information. Commit information.
* `func (c *Consumer) Assignment() (partitions []tmq.TopicPartition, err error)`
Get Assignment(TDengine >= 3.0.5.0 and driver-go >= v3.5.0 are required).
* `func (c *Consumer) Seek(partition tmq.TopicPartition, ignoredTimeoutMs int) error`
Note: `ignoredTimeoutMs` is reserved for compatibility purpose
Seek offset(TDengine >= 3.0.5.0 and driver-go >= v3.5.0 are required).
* `func (c *Consumer) Unsubscribe() error` * `func (c *Consumer) Unsubscribe() error`
Unsubscribe. Unsubscribe.
...@@ -476,7 +494,7 @@ Unsubscribe. ...@@ -476,7 +494,7 @@ Unsubscribe.
Close consumer. Close consumer.
For a complete example see [GitHub sample file](https://github.com/taosdata/driver-go/blob/3.0/examples/tmqoverws/main.go) For a complete example see [GitHub sample file](https://github.com/taosdata/driver-go/blob/main/examples/tmqoverws/main.go)
### parameter binding via WebSocket ### parameter binding via WebSocket
...@@ -524,7 +542,7 @@ For a complete example see [GitHub sample file](https://github.com/taosdata/driv ...@@ -524,7 +542,7 @@ For a complete example see [GitHub sample file](https://github.com/taosdata/driv
Closes the parameter binding. Closes the parameter binding.
For a complete example see [GitHub sample file](https://github.com/taosdata/driver-go/blob/3.0/examples/stmtoverws/main.go) For a complete example see [GitHub sample file](https://github.com/taosdata/driver-go/blob/main/examples/stmtoverws/main.go)
## API Reference ## API Reference
......
...@@ -27,9 +27,14 @@ The source code for the Rust connectors is located on [GitHub](https://github.co ...@@ -27,9 +27,14 @@ The source code for the Rust connectors is located on [GitHub](https://github.co
Native connections are supported on the same platforms as the TDengine client driver. Native connections are supported on the same platforms as the TDengine client driver.
Websocket connections are supported on all platforms that can run Go. Websocket connections are supported on all platforms that can run Go.
## Version support ## Version history
Please refer to [version support list](/reference/connector#version-support) | connector-rust version | TDengine version | major features |
| :----------------: | :--------------: | :--------------------------------------------------: |
| v0.8.8 | 3.0.5.0 or later | TMQ: Get consuming progress and seek offset to consume. |
| v0.8.0 | 3.0.4.0 | Support schemaless insert. |
| v0.7.6 | 3.0.3.0 | Support req_id in query. |
| v0.6.0 | 3.0.0.0 | Base features. |
The Rust Connector is still under rapid development and is not guaranteed to be backward compatible before 1.0. We recommend using TDengine version 3.0 or higher to avoid known issues. The Rust Connector is still under rapid development and is not guaranteed to be backward compatible before 1.0. We recommend using TDengine version 3.0 or higher to avoid known issues.
...@@ -499,6 +504,22 @@ The TMQ is of [futures::Stream](https://docs.rs/futures/latest/futures/stream/in ...@@ -499,6 +504,22 @@ The TMQ is of [futures::Stream](https://docs.rs/futures/latest/futures/stream/in
} }
``` ```
Get assignments:
Version requirements connector-rust >= v0.8.8, TDengine >= 3.0.5.0
```rust
let assignments = consumer.assignments().await.unwrap();
```
Seek offset:
Version requirements connector-rust >= v0.8.8, TDengine >= 3.0.5.0
```rust
consumer.offset_seek(topic, vgroup_id, offset).await;
```
Unsubscribe: Unsubscribe:
```rust ```rust
...@@ -513,7 +534,7 @@ The following parameters can be configured for the TMQ DSN. Only `group.id` is m ...@@ -513,7 +534,7 @@ The following parameters can be configured for the TMQ DSN. Only `group.id` is m
- `enable.auto.commit`: Automatically commits. This can be enabled when data consistency is not essential. - `enable.auto.commit`: Automatically commits. This can be enabled when data consistency is not essential.
- `auto.commit.interval.ms`: Interval for automatic commits. - `auto.commit.interval.ms`: Interval for automatic commits.
For more information, see [GitHub sample file](https://github.com/taosdata/taos-connector-rust/blob/main/examples/subscribe.rs). For more information, see [GitHub sample file](https://github.com/taosdata/TDengine/blob/3.0/docs/examples/rust/nativeexample/examples/subscribe_demo.rs).
For information about other structure APIs, see the [Rust documentation](https://docs.rs/taos). For information about other structure APIs, see the [Rust documentation](https://docs.rs/taos).
......
...@@ -362,7 +362,7 @@ By using the optional req_id parameter, you can specify a request ID that can be ...@@ -362,7 +362,7 @@ By using the optional req_id parameter, you can specify a request ID that can be
##### TaosConnection class ##### TaosConnection class
The `TaosConnection` class contains both an implementation of the PEP249 Connection interface (e.g., the `cursor()` method and the `close()` method) and many extensions (e.g., the `execute()`, `query()`, `schemaless_insert()`, and `subscribe()` methods). As the way to connect introduced above but add `req_id` argument.
```python title="execute method" ```python title="execute method"
{{#include docs/examples/python/connection_usage_native_reference_with_req_id.py:insert}} {{#include docs/examples/python/connection_usage_native_reference_with_req_id.py:insert}}
...@@ -372,13 +372,9 @@ The `TaosConnection` class contains both an implementation of the PEP249 Connect ...@@ -372,13 +372,9 @@ The `TaosConnection` class contains both an implementation of the PEP249 Connect
{{#include docs/examples/python/connection_usage_native_reference_with_req_id.py:query}} {{#include docs/examples/python/connection_usage_native_reference_with_req_id.py:query}}
``` ```
:::tip
The queried results can only be fetched once. For example, only one of `fetch_all()` and `fetch_all_into_dict()` can be used in the example above. Repeated fetches will result in an empty list.
:::
##### Use of TaosResult class ##### Use of TaosResult class
In the above example of using the `TaosConnection` class, we have shown two ways to get the result of a query: `fetch_all()` and `fetch_all_into_dict()`. In addition, `TaosResult` also provides methods to iterate through the result set by rows (`rows_iter`) or by data blocks (`blocks_iter`). Using these two methods will be more efficient in scenarios where the query has a large amount of data. As the way to fetch data introduced above but add `req_id` argument.
```python title="blocks_iter method" ```python title="blocks_iter method"
{{#include docs/examples/python/result_set_with_req_id_examples.py}} {{#include docs/examples/python/result_set_with_req_id_examples.py}}
...@@ -391,17 +387,12 @@ The `TaosConnection` class and the `TaosResult` class already implement all the ...@@ -391,17 +387,12 @@ The `TaosConnection` class and the `TaosResult` class already implement all the
{{#include docs/examples/python/cursor_usage_native_reference_with_req_id.py}} {{#include docs/examples/python/cursor_usage_native_reference_with_req_id.py}}
``` ```
:::note
The TaosCursor class uses native connections for write and query operations. In a client-side multi-threaded scenario, this cursor instance must remain thread exclusive and cannot be shared across threads for use, otherwise, it will result in errors in the returned results.
:::
</TabItem> </TabItem>
<TabItem value="rest" label="REST connection"> <TabItem value="rest" label="REST connection">
##### Use of TaosRestCursor class ##### Use of TaosRestCursor class
The `TaosRestCursor` class is an implementation of the PEP249 Cursor interface. As the way to connect introduced above but add `req_id` argument.
```python title="Use of TaosRestCursor" ```python title="Use of TaosRestCursor"
{{#include docs/examples/python/connect_rest_with_req_id_examples.py:basic}} {{#include docs/examples/python/connect_rest_with_req_id_examples.py:basic}}
...@@ -421,8 +412,11 @@ The `RestClient` class is a direct wrapper for the [REST API](/reference/rest-ap ...@@ -421,8 +412,11 @@ The `RestClient` class is a direct wrapper for the [REST API](/reference/rest-ap
For a more detailed description of the `sql()` method, please refer to [RestClient](https://docs.taosdata.com/api/taospy/taosrest/restclient.html). For a more detailed description of the `sql()` method, please refer to [RestClient](https://docs.taosdata.com/api/taospy/taosrest/restclient.html).
</TabItem> </TabItem>
<TabItem value="websocket" label="WebSocket connection"> <TabItem value="websocket" label="WebSocket connection">
As the way to connect introduced above but add `req_id` argument.
```python ```python
{{#include docs/examples/python/connect_websocket_with_req_id_examples.py:basic}} {{#include docs/examples/python/connect_websocket_with_req_id_examples.py:basic}}
``` ```
......
...@@ -111,7 +111,7 @@ The parameters described in this document by the effect that they have on the sy ...@@ -111,7 +111,7 @@ The parameters described in this document by the effect that they have on the sy
| Attribute | Description | | Attribute | Description |
| ------------- | ---------------------------------------------- | | ------------- | ---------------------------------------------- |
| Applicable | Client/Server | | Applicable | Client/Server |
| Meaning | The maximum waiting time to get avaliable conn | | Meaning | The maximum waiting time to get available conn |
| Value Range | 10-50000000(ms) | | Value Range | 10-50000000(ms) |
| Default Value | 500000 | | Default Value | 500000 |
......
...@@ -90,7 +90,7 @@ You can configure smlChildTableName in taos.cfg to specify table names, for exam ...@@ -90,7 +90,7 @@ You can configure smlChildTableName in taos.cfg to specify table names, for exam
Note: TDengine 3.0.3.0 and later automatically detect whether order is consistent. This parameter is no longer used. Note: TDengine 3.0.3.0 and later automatically detect whether order is consistent. This parameter is no longer used.
:::tip :::tip
All processing logic of schemaless will still follow TDengine's underlying restrictions on data structures, such as the total length of each row of data cannot exceed 48 KB and the total length of a tag value cannot exceed 16 KB. See [TDengine SQL Boundary Limits](/taos-sql/limit) for specific constraints in this area. All processing logic of schemaless will still follow TDengine's underlying restrictions on data structures, such as the total length of each row of data cannot exceed 48 KB(64 KB since version 3.0.5.0) and the total length of a tag value cannot exceed 16 KB. See [TDengine SQL Boundary Limits](/taos-sql/limit) for specific constraints in this area.
::: :::
## Time resolution recognition ## Time resolution recognition
......
...@@ -16,165 +16,79 @@ TDengine Source Connector is used to read data from TDengine in real-time and se ...@@ -16,165 +16,79 @@ TDengine Source Connector is used to read data from TDengine in real-time and se
![TDengine Database Kafka Connector -- streaming integration with kafka connect](kafka/streaming-integration-with-kafka-connect.webp) ![TDengine Database Kafka Connector -- streaming integration with kafka connect](kafka/streaming-integration-with-kafka-connect.webp)
## What is Confluent?
[Confluent](https://www.confluent.io/) adds many extensions to Kafka. include:
1. Schema Registry
2. REST Proxy
3. Non-Java Clients
4. Many packaged Kafka Connect plugins
5. GUI for managing and monitoring Kafka - Confluent Control Center
Some of these extensions are available in the community version of Confluent. Some are only available in the enterprise version.
![TDengine Database Kafka Connector -- Confluent platform](kafka/confluentPlatform.webp)
Confluent Enterprise Edition provides the `confluent` command-line tool to manage various components.
## Prerequisites ## Prerequisites
1. Linux operating system 1. Linux operating system
2. Java 8 and Maven installed 2. Java 8 and Maven installed
3. Git is installed 3. Git/curl/vi is installed
4. TDengine is installed and started. If not, please refer to [Installation and Uninstallation](/operation/pkg-install) 4. TDengine is installed and started. If not, please refer to [Installation and Uninstallation](/operation/pkg-install)
## Install Confluent ## Install Kafka
Confluent provides two installation methods: Docker and binary packages. This article only introduces binary package installation.
Execute in any directory: Execute in any directory:
```` ````
curl -O http://packages.confluent.io/archive/7.1/confluent-7.1.1.tar.gz curl -O https://downloads.apache.org/kafka/3.4.0/kafka_2.13-3.4.0.tgz
tar xzf confluent-7.1.1.tar.gz -C /opt/ tar xzf kafka_2.13-3.4.0.tgz -C /opt/
ln -s /opt/kafka_2.13-3.4.0 /opt/kafka
```` ````
Then you need to add the `$CONFLUENT_HOME/bin` directory to the PATH. Then you need to add the `$KAFKA_HOME/bin` directory to the PATH.
```title=".profile" ```title=".profile"
export CONFLUENT_HOME=/opt/confluent-7.1.1 export KAFKA_HOME=/opt/kafka
export PATH=$CONFLUENT_HOME/bin:$PATH export PATH=$PATH:$KAFKA_HOME/bin
``` ```
Users can append the above script to the current user's profile file (~/.profile or ~/.bash_profile) Users can append the above script to the current user's profile file (~/.profile or ~/.bash_profile)
After the installation is complete, you can enter `confluent version` for simple verification:
```
# confluent version
confluent - Confluent CLI
Version: v2.6.1
Git Ref: 6d920590
Build Date: 2022-02-18T06:14:21Z
Go Version: go1.17.6 (linux/amd64)
Development: false
```
## Install TDengine Connector plugin ## Install TDengine Connector plugin
### Install from source code ### Install from source code
``` ```shell
git clone --branch 3.0 https://github.com/taosdata/kafka-connect-tdengine.git git clone --branch 3.0 https://github.com/taosdata/kafka-connect-tdengine.git
cd kafka-connect-tdengine cd kafka-connect-tdengine
mvn clean package mvn clean package -Dmaven.test.skip=true
unzip -d $CONFLUENT_HOME/share/java/ target/components/packages/taosdata-kafka-connect-tdengine-*.zip unzip -d $KAFKA_HOME/components/ target/components/packages/taosdata-kafka-connect-tdengine-*.zip
``` ```
The above script first clones the project source code and then compiles and packages it with Maven. After the package is complete, the zip package of the plugin is generated in the `target/components/packages/` directory. Unzip this zip package to plugin path. We used `$CONFLUENT_HOME/share/java/` above because it's a build in plugin path. The above script first clones the project source code and then compiles and packages it with Maven. After the package is complete, the zip package of the plugin is generated in the `target/components/packages/` directory. Unzip this zip package to plugin path. We used `$KAFKA_HOME/components/` above because it's a build in plugin path.
### Install with confluent-hub
[Confluent Hub](https://www.confluent.io/hub) provides a service to download Kafka Connect plugins. After TDengine Kafka Connector is published to Confluent Hub, it can be installed using the command tool `confluent-hub`. ### Add configuration file
**TDengine Kafka Connector is currently not officially released and cannot be installed in this way**.
## Start Confluent add kafka-connect-tdengine plugin path to `plugin.path` in `$KAFKA_HOME/config/connect-distributed.properties`.
``` ```properties
confluent local services start plugin.path=/usr/share/java,/opt/kafka/components
``` ```
:::note ## Start Kafka Services
Be sure to install the plugin before starting Confluent. Otherwise, Kafka Connect will fail to discover the plugins.
:::
:::tip Use command bellow to start all services:
If a component fails to start, try clearing the data and restarting. The data directory will be printed to the console at startup, e.g.:
```title="Console output log" {1}
Using CONFLUENT_CURRENT: /tmp/confluent.106668
Starting ZooKeeper
ZooKeeper is [UP]
Starting Kafka
Kafka is [UP]
Starting Schema Registry
Schema Registry is [UP]
Starting Kafka REST
Kafka REST is [UP]
Starting Connect
Connect is [UP]
Starting ksqlDB Server
ksqlDB Server is [UP]
Starting Control Center
Control Center is [UP]
```
To clear data, execute `rm -rf /tmp/confluent.106668`. ```shell
::: zookeeper-server-start.sh -daemon $KAFKA_HOME/config/zookeeper.properties
### Check Confluent Services Status kafka-server-start.sh -daemon $KAFKA_HOME/config/server.properties
Use command bellow to check the status of all service: connect-distributed.sh -daemon $KAFKA_HOME/config/connect-distributed.properties
```
confluent local services status
```
The expected output is:
```
Connect is [UP]
Control Center is [UP]
Kafka is [UP]
Kafka REST is [UP]
ksqlDB Server is [UP]
Schema Registry is [UP]
ZooKeeper is [UP]
``` ```
### Check Successfully Loaded Plugin ### Check Successfully Loaded Plugin
After Kafka Connect was completely started, you can use bellow command to check if our plugins are installed successfully: After Kafka Connect was completely started, you can use bellow command to check if our plugins are installed successfully:
```
confluent local services connect plugin list
```
The output should contains `TDengineSinkConnector` and `TDengineSourceConnector` as bellow:
``` ```shell
Available Connect Plugins: curl http://localhost:8083/connectors
[
{
"class": "com.taosdata.kafka.connect.sink.TDengineSinkConnector",
"type": "sink",
"version": "1.0.0"
},
{
"class": "com.taosdata.kafka.connect.source.TDengineSourceConnector",
"type": "source",
"version": "1.0.0"
},
......
``` ```
If not, please check the log file of Kafka Connect. To view the log file path, please execute: The output as bellow:
```txt
[]
``` ```
echo `cat /tmp/confluent.current`/connect/connect.stdout
```
It should produce a path like:`/tmp/confluent.104086/connect/connect.stdout`
Besides log file `connect.stdout` there is a file named `connect.properties`. At the end of this file you can see the effective `plugin.path` which is a series of paths joined by comma. If Kafka Connect not found our plugins, it's probably because the installed path is not included in `plugin.path`.
## The use of TDengine Sink Connector ## The use of TDengine Sink Connector
...@@ -184,40 +98,47 @@ TDengine Sink Connector internally uses TDengine [modeless write interface](/ref ...@@ -184,40 +98,47 @@ TDengine Sink Connector internally uses TDengine [modeless write interface](/ref
The following example synchronizes the data of the topic meters to the target database power. The data format is the InfluxDB Line protocol format. The following example synchronizes the data of the topic meters to the target database power. The data format is the InfluxDB Line protocol format.
### Add configuration file ### Add Sink Connector configuration file
``` ```shell
mkdir ~/test mkdir ~/test
cd ~/test cd ~/test
vi sink-demo.properties vi sink-demo.json
``` ```
sink-demo.properties' content is following: sink-demo.json' content is following:
```ini title="sink-demo.properties" ```json title="sink-demo.json"
name=TDengineSinkConnector {
connector.class=com.taosdata.kafka.connect.sink.TDengineSinkConnector "name": "TDengineSinkConnector",
tasks.max=1 "config": {
topics=meters "connector.class":"com.taosdata.kafka.connect.sink.TDengineSinkConnector",
connection.url=jdbc:TAOS://127.0.0.1:6030 "tasks.max": "1",
connection.user=root "topics": "meters",
connection.password=taosdata "connection.url": "jdbc:TAOS://127.0.0.1:6030",
connection.database=power "connection.user": "root",
db.schemaless=line "connection.password": "taosdata",
data.precision=ns "connection.database": "power",
key.converter=org.apache.kafka.connect.storage.StringConverter "db.schemaless": "line",
value.converter=org.apache.kafka.connect.storage.StringConverter "data.precision": "ns",
"key.converter": "org.apache.kafka.connect.storage.StringConverter",
"value.converter": "org.apache.kafka.connect.storage.StringConverter",
"errors.tolerance": "all",
"errors.deadletterqueue.topic.name": "dead_letter_topic",
"errors.deadletterqueue.topic.replication.factor": 1
}
}
``` ```
Key configuration instructions: Key configuration instructions:
1. `topics=meters` and `connection.database=power` means to subscribe to the data of the topic meters and write to the database power. 1. `"topics": "meters"` and `"connection.database": "power"` means to subscribe to the data of the topic meters and write to the database power.
2. `db.schemaless=line` means the data in the InfluxDB Line protocol format. 2. `"db.schemaless": "line"` means the data in the InfluxDB Line protocol format.
### Create Connector instance ### Create Sink Connector instance
```` ````shell
confluent local services connect connector load TDengineSinkConnector --config ./sink-demo.properties curl -X POST -d @sink-demo.json http://localhost:8083/connectors -H "Content-Type: application/json"
```` ````
If the above command is executed successfully, the output is as follows: If the above command is executed successfully, the output is as follows:
...@@ -237,7 +158,10 @@ If the above command is executed successfully, the output is as follows: ...@@ -237,7 +158,10 @@ If the above command is executed successfully, the output is as follows:
"tasks.max": "1", "tasks.max": "1",
"topics": "meters", "topics": "meters",
"value.converter": "org.apache.kafka.connect.storage.StringConverter", "value.converter": "org.apache.kafka.connect.storage.StringConverter",
"name": "TDengineSinkConnector" "name": "TDengineSinkConnector",
"errors.tolerance": "all",
"errors.deadletterqueue.topic.name": "dead_letter_topic",
"errors.deadletterqueue.topic.replication.factor": "1",
}, },
"tasks": [], "tasks": [],
"type": "sink" "type": "sink"
...@@ -258,7 +182,7 @@ meters,location=California.LoSangeles,groupid=3 current=11.3,voltage=221,phase=0 ...@@ -258,7 +182,7 @@ meters,location=California.LoSangeles,groupid=3 current=11.3,voltage=221,phase=0
Use kafka-console-producer to write test data to the topic `meters`. Use kafka-console-producer to write test data to the topic `meters`.
``` ```
cat test-data.txt | kafka-console-producer --broker-list localhost:9092 --topic meters cat test-data.txt | kafka-console-producer.sh --broker-list localhost:9092 --topic meters
``` ```
:::note :::note
...@@ -269,12 +193,12 @@ TDengine Sink Connector will automatically create the database if the target dat ...@@ -269,12 +193,12 @@ TDengine Sink Connector will automatically create the database if the target dat
Use the TDengine CLI to verify that the sync was successful. Use the TDengine CLI to verify that the sync was successful.
``` ```sql
taos> use power; taos> use power;
Database changed. Database changed.
taos> select * from meters; taos> select * from meters;
ts | current | voltage | phase | groupid | location | _ts | current | voltage | phase | groupid | location |
=============================================================================================================================================================== ===============================================================================================================================================================
2022-03-28 09:56:51.249000000 | 11.800000000 | 221.000000000 | 0.280000000 | 2 | California.LosAngeles | 2022-03-28 09:56:51.249000000 | 11.800000000 | 221.000000000 | 0.280000000 | 2 | California.LosAngeles |
2022-03-28 09:56:51.250000000 | 13.400000000 | 223.000000000 | 0.290000000 | 2 | California.LosAngeles | 2022-03-28 09:56:51.250000000 | 13.400000000 | 223.000000000 | 0.290000000 | 2 | California.LosAngeles |
...@@ -293,29 +217,34 @@ TDengine Source Connector will convert the data in TDengine data table into [Inf ...@@ -293,29 +217,34 @@ TDengine Source Connector will convert the data in TDengine data table into [Inf
The following sample program synchronizes the data in the database test to the topic tdengine-source-test. The following sample program synchronizes the data in the database test to the topic tdengine-source-test.
### Add configuration file ### Add Source Connector configuration file
``` ```shell
vi source-demo.properties vi source-demo.json
``` ```
Input following content: Input following content:
```ini title="source-demo.properties" ```json title="source-demo.json"
name=TDengineSourceConnector {
connector.class=com.taosdata.kafka.connect.source.TDengineSourceConnector "name":"TDengineSourceConnector",
tasks.max=1 "config":{
connection.url=jdbc:TAOS://127.0.0.1:6030 "connector.class": "com.taosdata.kafka.connect.source.TDengineSourceConnector",
connection.username=root "tasks.max": 1,
connection.password=taosdata "connection.url": "jdbc:TAOS://127.0.0.1:6030",
connection.database=test "connection.username": "root",
connection.attempts=3 "connection.password": "taosdata",
connection.backoff.ms=5000 "connection.database": "test",
topic.prefix=tdengine-source- "connection.attempts": 3,
poll.interval.ms=1000 "connection.backoff.ms": 5000,
fetch.max.rows=100 "topic.prefix": "tdengine-source",
key.converter=org.apache.kafka.connect.storage.StringConverter "poll.interval.ms": 1000,
value.converter=org.apache.kafka.connect.storage.StringConverter "fetch.max.rows": 100,
"topic.per.stable": true,
"key.converter": "org.apache.kafka.connect.storage.StringConverter",
"value.converter": "org.apache.kafka.connect.storage.StringConverter"
}
}
``` ```
### Prepare test data ### Prepare test data
...@@ -340,40 +269,40 @@ INSERT INTO d1001 USING meters TAGS('California.SanFrancisco', 2) VALUES('2018-1 ...@@ -340,40 +269,40 @@ INSERT INTO d1001 USING meters TAGS('California.SanFrancisco', 2) VALUES('2018-1
Use TDengine CLI to execute SQL script Use TDengine CLI to execute SQL script
``` ```shell
taos -f prepare-source-data.sql taos -f prepare-source-data.sql
``` ```
### Create Connector instance ### Create Connector instance
```` ```shell
confluent local services connect connector load TDengineSourceConnector --config source-demo.properties curl -X POST -d @source-demo.json http://localhost:8083/connectors -H "Content-Type: application/json"
```` ```
### View topic data ### View topic data
Use the kafka-console-consumer command-line tool to monitor data in the topic tdengine-source-test. In the beginning, all historical data will be output. After inserting two new data into TDengine, kafka-console-consumer immediately outputs the two new data. The output is in InfluxDB line protocol format. Use the kafka-console-consumer command-line tool to monitor data in the topic tdengine-source-test. In the beginning, all historical data will be output. After inserting two new data into TDengine, kafka-console-consumer immediately outputs the two new data. The output is in InfluxDB line protocol format.
```` ````shell
kafka-console-consumer --bootstrap-server localhost:9092 --from-beginning --topic tdengine-source-test kafka-console-consumer.sh --bootstrap-server localhost:9092 --from-beginning --topic tdengine-source-test-meters
```` ````
output: output:
```` ```txt
...... ......
meters,location="California.SanFrancisco",groupid=2i32 current=10.3f32,voltage=219i32,phase=0.31f32 1538548685000000000 meters,location="California.SanFrancisco",groupid=2i32 current=10.3f32,voltage=219i32,phase=0.31f32 1538548685000000000
meters,location="California.SanFrancisco",groupid=2i32 current=12.6f32,voltage=218i32,phase=0.33f32 1538548695000000000 meters,location="California.SanFrancisco",groupid=2i32 current=12.6f32,voltage=218i32,phase=0.33f32 1538548695000000000
...... ......
```` ```
All historical data is displayed. Switch to the TDengine CLI and insert two new pieces of data: All historical data is displayed. Switch to the TDengine CLI and insert two new pieces of data:
```` ```sql
USE test; USE test;
INSERT INTO d1001 VALUES (now, 13.3, 229, 0.38); INSERT INTO d1001 VALUES (now, 13.3, 229, 0.38);
INSERT INTO d1002 VALUES (now, 16.3, 233, 0.22); INSERT INTO d1002 VALUES (now, 16.3, 233, 0.22);
```` ```
Switch back to kafka-console-consumer, and the command line window has printed out the two pieces of data just inserted. Switch back to kafka-console-consumer, and the command line window has printed out the two pieces of data just inserted.
...@@ -383,16 +312,16 @@ After testing, use the unload command to stop the loaded connector. ...@@ -383,16 +312,16 @@ After testing, use the unload command to stop the loaded connector.
View currently active connectors: View currently active connectors:
```` ```shell
confluent local services connect connector status curl http://localhost:8083/connectors
```` ```
You should now have two active connectors if you followed the previous steps. Use the following command to unload: You should now have two active connectors if you followed the previous steps. Use the following command to unload:
```` ```shell
confluent local services connect connector unload TDengineSinkConnector curl -X DELETE http://localhost:8083/connectors/TDengineSinkConnector
confluent local services connect connector unload TDengineSourceConnector curl -X DELETE http://localhost:8083/connectors/TDengineSourceConnector
```` ```
## Configuration reference ## Configuration reference
...@@ -430,19 +359,14 @@ The following configuration items apply to TDengine Sink Connector and TDengine ...@@ -430,19 +359,14 @@ The following configuration items apply to TDengine Sink Connector and TDengine
6. `query.interval.ms`: The time range of reading data from TDengine each time, its unit is millisecond. It should be adjusted according to the data flow in rate, the default value is 1000. 6. `query.interval.ms`: The time range of reading data from TDengine each time, its unit is millisecond. It should be adjusted according to the data flow in rate, the default value is 1000.
7. `topic.per.stable`: If it's set to true, it means one super table in TDengine corresponds to a topic in Kafka, the topic naming rule is `<topic.prefix>-<connection.database>-<stable.name>`; if it's set to false, it means the whole DB corresponds to a topic in Kafka, the topic naming rule is `<topic.prefix>-<connection.database>`. 7. `topic.per.stable`: If it's set to true, it means one super table in TDengine corresponds to a topic in Kafka, the topic naming rule is `<topic.prefix>-<connection.database>-<stable.name>`; if it's set to false, it means the whole DB corresponds to a topic in Kafka, the topic naming rule is `<topic.prefix>-<connection.database>`.
## Other notes ## Other notes
1. To install plugin to a customized location, refer to https://docs.confluent.io/home/connect/self-managed/install.html#install-connector-manually. 1. To use Kafka Connect, refer to <https://kafka.apache.org/documentation/#connect>.
2. To use Kafka Connect without confluent, refer to https://kafka.apache.org/documentation/#connect.
## Feedback ## Feedback
https://github.com/taosdata/kafka-connect-tdengine/issues <https://github.com/taosdata/kafka-connect-tdengine/issues>
## Reference ## Reference
1. https://www.confluent.io/what-is-apache-kafka 1. For more information, see <https://kafka.apache.org/documentation/>
2. https://developer.confluent.io/learn-kafka/kafka-connect/intro
3. https://docs.confluent.io/platform/current/platform.html
...@@ -10,6 +10,10 @@ For TDengine 2.x installation packages by version, please visit [here](https://w ...@@ -10,6 +10,10 @@ For TDengine 2.x installation packages by version, please visit [here](https://w
import Release from "/components/ReleaseV3"; import Release from "/components/ReleaseV3";
## 3.0.5.0
<Release type="tdengine" version="3.0.5.0" />
## 3.0.4.2 ## 3.0.4.2
<Release type="tdengine" version="3.0.4.2" /> <Release type="tdengine" version="3.0.4.2" />
......
...@@ -10,6 +10,10 @@ For other historical version installers, please visit [here](https://www.taosdat ...@@ -10,6 +10,10 @@ For other historical version installers, please visit [here](https://www.taosdat
import Release from "/components/ReleaseV3"; import Release from "/components/ReleaseV3";
## 2.5.1
<Release type="tools" version="2.5.1" />
## 2.5.0 ## 2.5.0
<Release type="tools" version="2.5.0" /> <Release type="tools" version="2.5.0" />
......
...@@ -53,20 +53,28 @@ public class SubscribeDemo { ...@@ -53,20 +53,28 @@ public class SubscribeDemo {
// create consumer // create consumer
Properties properties = new Properties(); Properties properties = new Properties();
properties.getProperty(TMQConstants.CONNECT_TYPE, "jni");
properties.setProperty(TMQConstants.BOOTSTRAP_SERVERS, "127.0.0.1:6030"); properties.setProperty(TMQConstants.BOOTSTRAP_SERVERS, "127.0.0.1:6030");
properties.setProperty(TMQConstants.CONNECT_USER, "root");
properties.setProperty(TMQConstants.CONNECT_PASS, "taosdata");
properties.setProperty(TMQConstants.MSG_WITH_TABLE_NAME, "true"); properties.setProperty(TMQConstants.MSG_WITH_TABLE_NAME, "true");
properties.setProperty(TMQConstants.ENABLE_AUTO_COMMIT, "true"); properties.setProperty(TMQConstants.ENABLE_AUTO_COMMIT, "true");
properties.setProperty(TMQConstants.GROUP_ID, "test"); properties.setProperty(TMQConstants.AUTO_COMMIT_INTERVAL, "1000");
properties.setProperty(TMQConstants.GROUP_ID, "test1");
properties.setProperty(TMQConstants.CLIENT_ID, "1");
properties.setProperty(TMQConstants.AUTO_OFFSET_RESET, "earliest");
properties.setProperty(TMQConstants.VALUE_DESERIALIZER, properties.setProperty(TMQConstants.VALUE_DESERIALIZER,
"com.taos.example.MetersDeserializer"); "com.taos.example.MetersDeserializer");
properties.setProperty(TMQConstants.VALUE_DESERIALIZER_ENCODING, "UTF-8");
properties.setProperty(TMQConstants.EXPERIMENTAL_SNAPSHOT_ENABLE, "true");
// poll data // poll data
try (TaosConsumer<Meters> consumer = new TaosConsumer<>(properties)) { try (TaosConsumer<Meters> consumer = new TaosConsumer<>(properties)) {
consumer.subscribe(Collections.singletonList(TOPIC)); consumer.subscribe(Collections.singletonList(TOPIC));
while (!shutdown.get()) { while (!shutdown.get()) {
ConsumerRecords<Meters> meters = consumer.poll(Duration.ofMillis(100)); ConsumerRecords<Meters> meters = consumer.poll(Duration.ofMillis(100));
for (ConsumerRecord<Meters> recode : meters) { for (ConsumerRecord<Meters> r : meters) {
Meters meter = recode.value(); Meters meter = r.value();
System.out.println(meter); System.out.println(meter);
} }
} }
......
package com.taos.example; package com.taos.example;
import com.taosdata.jdbc.tmq.ConsumerRecord;
import com.taosdata.jdbc.tmq.ConsumerRecords; import com.taosdata.jdbc.tmq.ConsumerRecords;
import com.taosdata.jdbc.tmq.TMQConstants; import com.taosdata.jdbc.tmq.TMQConstants;
import com.taosdata.jdbc.tmq.TaosConsumer; import com.taosdata.jdbc.tmq.TaosConsumer;
...@@ -54,18 +55,26 @@ public class WebsocketSubscribeDemo { ...@@ -54,18 +55,26 @@ public class WebsocketSubscribeDemo {
Properties properties = new Properties(); Properties properties = new Properties();
properties.setProperty(TMQConstants.BOOTSTRAP_SERVERS, "127.0.0.1:6041"); properties.setProperty(TMQConstants.BOOTSTRAP_SERVERS, "127.0.0.1:6041");
properties.setProperty(TMQConstants.CONNECT_TYPE, "ws"); properties.setProperty(TMQConstants.CONNECT_TYPE, "ws");
properties.setProperty(TMQConstants.CONNECT_USER, "root");
properties.setProperty(TMQConstants.CONNECT_PASS, "taosdata");
properties.setProperty(TMQConstants.AUTO_OFFSET_RESET, "earliest");
properties.setProperty(TMQConstants.MSG_WITH_TABLE_NAME, "true"); properties.setProperty(TMQConstants.MSG_WITH_TABLE_NAME, "true");
properties.setProperty(TMQConstants.ENABLE_AUTO_COMMIT, "true"); properties.setProperty(TMQConstants.ENABLE_AUTO_COMMIT, "true");
properties.setProperty(TMQConstants.GROUP_ID, "test"); properties.setProperty(TMQConstants.AUTO_COMMIT_INTERVAL, "1000");
properties.setProperty(TMQConstants.GROUP_ID, "test2");
properties.setProperty(TMQConstants.CLIENT_ID, "1");
properties.setProperty(TMQConstants.VALUE_DESERIALIZER, properties.setProperty(TMQConstants.VALUE_DESERIALIZER,
"com.taos.example.MetersDeserializer"); "com.taos.example.MetersDeserializer");
properties.setProperty(TMQConstants.VALUE_DESERIALIZER_ENCODING, "UTF-8");
properties.setProperty(TMQConstants.EXPERIMENTAL_SNAPSHOT_ENABLE, "true");
// poll data // poll data
try (TaosConsumer<Meters> consumer = new TaosConsumer<>(properties)) { try (TaosConsumer<Meters> consumer = new TaosConsumer<>(properties)) {
consumer.subscribe(Collections.singletonList(TOPIC)); consumer.subscribe(Collections.singletonList(TOPIC));
while (!shutdown.get()) { while (!shutdown.get()) {
ConsumerRecords<Meters> meters = consumer.poll(Duration.ofMillis(100)); ConsumerRecords<Meters> meters = consumer.poll(Duration.ofMillis(100));
for (Meters meter : meters) { for (ConsumerRecord<Meters> r : meters) {
Meters meter = (Meters) r.value();
System.out.println(meter); System.out.println(meter);
} }
} }
......
此差异已折叠。
...@@ -962,6 +962,7 @@ statement.executeUpdate("create topic if not exists topic_speed as select ts, sp ...@@ -962,6 +962,7 @@ statement.executeUpdate("create topic if not exists topic_speed as select ts, sp
```java ```java
Properties config = new Properties(); Properties config = new Properties();
config.setProperty("bootstrap.servers", "localhost:6030");
config.setProperty("enable.auto.commit", "true"); config.setProperty("enable.auto.commit", "true");
config.setProperty("group.id", "group1"); config.setProperty("group.id", "group1");
config.setProperty("value.deserializer", "com.taosdata.jdbc.tmq.ConsumerTest.ResultDeserializer"); config.setProperty("value.deserializer", "com.taosdata.jdbc.tmq.ConsumerTest.ResultDeserializer");
...@@ -969,12 +970,14 @@ config.setProperty("value.deserializer", "com.taosdata.jdbc.tmq.ConsumerTest.Res ...@@ -969,12 +970,14 @@ config.setProperty("value.deserializer", "com.taosdata.jdbc.tmq.ConsumerTest.Res
TaosConsumer consumer = new TaosConsumer<>(config); TaosConsumer consumer = new TaosConsumer<>(config);
``` ```
- bootstrap.servers: TDengine 服务端所在的`ip:port`,如果使用 WebSocket 连接,则为 taosAdapter 所在的`ip:port`。
- enable.auto.commit: 是否允许自动提交。 - enable.auto.commit: 是否允许自动提交。
- group.id: consumer: 所在的 group。 - group.id: consumer: 所在的 group。
- value.deserializer: 结果集反序列化方法,可以继承 `com.taosdata.jdbc.tmq.ReferenceDeserializer`,并指定结果集 bean,实现反序列化。也可以继承 `com.taosdata.jdbc.tmq.Deserializer`,根据 SQL 的 resultSet 自定义反序列化方式。 - value.deserializer: 结果集反序列化方法,可以继承 `com.taosdata.jdbc.tmq.ReferenceDeserializer`,并指定结果集 bean,实现反序列化。也可以继承 `com.taosdata.jdbc.tmq.Deserializer`,根据 SQL 的 resultSet 自定义反序列化方式。
- td.connect.type: 连接方式。jni:表示使用动态库连接的方式,ws/WebSocket:表示使用 WebSocket 进行数据通信。默认为 jni 方式。 - td.connect.type: 连接方式。jni:表示使用动态库连接的方式,ws/WebSocket:表示使用 WebSocket 进行数据通信。默认为 jni 方式。
- httpConnectTimeout:创建连接超时参数,单位 ms,默认为 5000 ms。仅在 WebSocket 连接下有效。 - httpConnectTimeout: 创建连接超时参数,单位 ms,默认为 5000 ms。仅在 WebSocket 连接下有效。
- messageWaitTimeout:数据传输超时参数,单位 ms,默认为 10000 ms。仅在 WebSocket 连接下有效。 - messageWaitTimeout: 数据传输超时参数,单位 ms,默认为 10000 ms。仅在 WebSocket 连接下有效。
- httpPoolSize: 同一个连接下最大并行请求数。仅在 WebSocket 连接下有效。
其他参数请参考:[Consumer 参数列表](../../../develop/tmq#创建-consumer-以及consumer-group) 其他参数请参考:[Consumer 参数列表](../../../develop/tmq#创建-consumer-以及consumer-group)
#### 订阅消费数据 #### 订阅消费数据
...@@ -1016,10 +1019,19 @@ public abstract class ConsumerLoop { ...@@ -1016,10 +1019,19 @@ public abstract class ConsumerLoop {
public ConsumerLoop() throws SQLException { public ConsumerLoop() throws SQLException {
Properties config = new Properties(); Properties config = new Properties();
config.setProperty("td.connect.type", "jni");
config.setProperty("bootstrap.servers", "localhost:6030");
config.setProperty("td.connect.user", "root");
config.setProperty("td.connect.pass", "taosdata");
config.setProperty("auto.offset.reset", "earliest");
config.setProperty("msg.with.table.name", "true"); config.setProperty("msg.with.table.name", "true");
config.setProperty("enable.auto.commit", "true"); config.setProperty("enable.auto.commit", "true");
config.setProperty("auto.commit.interval.ms", "1000");
config.setProperty("group.id", "group1"); config.setProperty("group.id", "group1");
config.setProperty("client.id", "1");
config.setProperty("value.deserializer", "com.taosdata.jdbc.tmq.ConsumerTest.ConsumerLoop$ResultDeserializer"); config.setProperty("value.deserializer", "com.taosdata.jdbc.tmq.ConsumerTest.ConsumerLoop$ResultDeserializer");
config.setProperty("value.deserializer.encoding", "UTF-8");
config.setProperty("experimental.snapshot.enable", "true");
this.consumer = new TaosConsumer<>(config); this.consumer = new TaosConsumer<>(config);
this.topics = Collections.singletonList("topic_speed"); this.topics = Collections.singletonList("topic_speed");
...@@ -1093,12 +1105,19 @@ public abstract class ConsumerLoop { ...@@ -1093,12 +1105,19 @@ public abstract class ConsumerLoop {
public ConsumerLoop() throws SQLException { public ConsumerLoop() throws SQLException {
Properties config = new Properties(); Properties config = new Properties();
config.setProperty("bootstrap.servers", "localhost:6041");
config.setProperty("td.connect.type", "ws"); config.setProperty("td.connect.type", "ws");
config.setProperty("bootstrap.servers", "localhost:6041");
config.setProperty("td.connect.user", "root");
config.setProperty("td.connect.pass", "taosdata");
config.setProperty("auto.offset.reset", "earliest");
config.setProperty("msg.with.table.name", "true"); config.setProperty("msg.with.table.name", "true");
config.setProperty("enable.auto.commit", "true"); config.setProperty("enable.auto.commit", "true");
config.setProperty("auto.commit.interval.ms", "1000");
config.setProperty("group.id", "group2"); config.setProperty("group.id", "group2");
config.setProperty("client.id", "1");
config.setProperty("value.deserializer", "com.taosdata.jdbc.tmq.ConsumerTest.ConsumerLoop$ResultDeserializer"); config.setProperty("value.deserializer", "com.taosdata.jdbc.tmq.ConsumerTest.ConsumerLoop$ResultDeserializer");
config.setProperty("value.deserializer.encoding", "UTF-8");
config.setProperty("experimental.snapshot.enable", "true");
this.consumer = new TaosConsumer<>(config); this.consumer = new TaosConsumer<>(config);
this.topics = Collections.singletonList("topic_speed"); this.topics = Collections.singletonList("topic_speed");
...@@ -1239,6 +1258,7 @@ public static void main(String[] args) throws Exception { ...@@ -1239,6 +1258,7 @@ public static void main(String[] args) throws Exception {
- connectionPools:HikariCP, Druid, dbcp, c3p0 等连接池中使用 taos-jdbcdriver。 - connectionPools:HikariCP, Druid, dbcp, c3p0 等连接池中使用 taos-jdbcdriver。
- SpringJdbcTemplate:Spring JdbcTemplate 中使用 taos-jdbcdriver。 - SpringJdbcTemplate:Spring JdbcTemplate 中使用 taos-jdbcdriver。
- mybatisplus-demo:Springboot + Mybatis 中使用 taos-jdbcdriver。 - mybatisplus-demo:Springboot + Mybatis 中使用 taos-jdbcdriver。
- consumer-demo:Consumer 消费 TDengine 数据示例,可通过参数控制消费速度。
请参考:[JDBC example](https://github.com/taosdata/TDengine/tree/3.0/examples/JDBC) 请参考:[JDBC example](https://github.com/taosdata/TDengine/tree/3.0/examples/JDBC)
......
...@@ -30,7 +30,7 @@ REST 连接支持所有能运行 Go 的平台。 ...@@ -30,7 +30,7 @@ REST 连接支持所有能运行 Go 的平台。
## 版本支持 ## 版本支持
请参考[版本支持列表](../#版本支持) 请参考[版本支持列表](https://github.com/taosdata/driver-go#remind)
## 支持的功能特性 ## 支持的功能特性
...@@ -383,6 +383,15 @@ func main() { ...@@ -383,6 +383,15 @@ func main() {
提交消息。 提交消息。
* `func (c *Consumer) Assignment() (partitions []tmq.TopicPartition, err error)`
获取消费进度。(需要 TDengine >= 3.0.5.0 driver-go >= v3.5.0)
* `func (c *Consumer) Seek(partition tmq.TopicPartition, ignoredTimeoutMs int) error`
注意:出于兼容目的保留 `ignoredTimeoutMs` 参数,当前未使用
按照指定的进度消费。(需要 TDengine >= 3.0.5.0 driver-go >= v3.5.0)
* `func (c *Consumer) Close() error` * `func (c *Consumer) Close() error`
关闭连接。 关闭连接。
...@@ -468,11 +477,20 @@ func main() { ...@@ -468,11 +477,20 @@ func main() {
提交消息。 提交消息。
* `func (c *Consumer) Assignment() (partitions []tmq.TopicPartition, err error)`
获取消费进度。(需要 TDengine >= 3.0.5.0 driver-go >= v3.5.0)
* `func (c *Consumer) Seek(partition tmq.TopicPartition, ignoredTimeoutMs int) error`
注意:出于兼容目的保留 `ignoredTimeoutMs` 参数,当前未使用
按照指定的进度消费。(需要 TDengine >= 3.0.5.0 driver-go >= v3.5.0)
* `func (c *Consumer) Close() error` * `func (c *Consumer) Close() error`
关闭连接。 关闭连接。
完整订阅示例参见 [GitHub 示例文件](https://github.com/taosdata/driver-go/blob/3.0/examples/tmqoverws/main.go) 完整订阅示例参见 [GitHub 示例文件](https://github.com/taosdata/driver-go/blob/main/examples/tmqoverws/main.go)
### 通过 WebSocket 进行参数绑定 ### 通过 WebSocket 进行参数绑定
...@@ -520,7 +538,7 @@ func main() { ...@@ -520,7 +538,7 @@ func main() {
结束参数绑定。 结束参数绑定。
完整参数绑定示例参见 [GitHub 示例文件](https://github.com/taosdata/driver-go/blob/3.0/examples/stmtoverws/main.go) 完整参数绑定示例参见 [GitHub 示例文件](https://github.com/taosdata/driver-go/blob/main/examples/stmtoverws/main.go)
## API 参考 ## API 参考
......
...@@ -26,9 +26,14 @@ import RustQuery from "../07-develop/04-query-data/_rust.mdx" ...@@ -26,9 +26,14 @@ import RustQuery from "../07-develop/04-query-data/_rust.mdx"
原生连接支持的平台和 TDengine 客户端驱动支持的平台一致。 原生连接支持的平台和 TDengine 客户端驱动支持的平台一致。
Websocket 连接支持所有能运行 Rust 的平台。 Websocket 连接支持所有能运行 Rust 的平台。
## 版本支持 ## 版本历史
请参考[版本支持列表](../#版本支持) | Rust 连接器版本 | TDengine 版本 | 主要功能 |
| :----------------: | :--------------: | :--------------------------------------------------: |
| v0.8.8 | 3.0.5.0 or later | 消息订阅:获取消费进度及按照指定进度开始消费。 |
| v0.8.0 | 3.0.4.0 | 支持无模式写入。 |
| v0.7.6 | 3.0.3.0 | 支持在请求中使用 req_id。 |
| v0.6.0 | 3.0.0.0 | 基础功能。 |
Rust 连接器仍然在快速开发中,1.0 之前无法保证其向后兼容。建议使用 3.0 版本以上的 TDengine,以避免已知问题。 Rust 连接器仍然在快速开发中,1.0 之前无法保证其向后兼容。建议使用 3.0 版本以上的 TDengine,以避免已知问题。
...@@ -65,6 +70,13 @@ taos = "*" ...@@ -65,6 +70,13 @@ taos = "*"
taos = { version = "*", default-features = false, features = ["ws"] } taos = { version = "*", default-features = false, features = ["ws"] }
``` ```
当仅启用 `ws` 特性时,可同时指定 `r2d2` 使得在同步(blocking/sync)模式下使用 [r2d2] 作为连接池:
```toml
[dependencies]
taos = { version = "*", default-features = false, features = ["r2d2", "ws"] }
```
</TabItem> </TabItem>
<TabItem value="native" label="仅原生连接"> <TabItem value="native" label="仅原生连接">
...@@ -257,26 +269,24 @@ let conn: Taos = cfg.build(); ...@@ -257,26 +269,24 @@ let conn: Taos = cfg.build();
### 连接池 ### 连接池
在复杂应用中,建议启用连接池。[taos] 的连接池使用 [r2d2] 实现。 在复杂应用中,建议启用连接池。[taos] 的连接池默认(异步模式)使用 [deadpool] 实现。
如下,可以生成一个默认参数的连接池。 如下,可以生成一个默认参数的连接池。
```rust ```rust
let pool = TaosBuilder::from_dsn(dsn)?.pool()?; let pool: Pool<TaosBuilder> = TaosBuilder::from_dsn("taos:///")
.unwrap()
.pool()
.unwrap();
``` ```
同样可以使用连接池的构造器,对连接池参数进行设置: 同样可以使用连接池的构造器,对连接池参数进行设置:
```rust ```rust
let dsn = "taos://localhost:6030"; let pool: Pool<TaosBuilder> = Pool::builder(Manager::from_dsn(self.dsn.clone()).unwrap().0)
.max_size(88) // 最大连接数
let opts = PoolBuilder::new() .build()
.max_size(5000) // max connections .unwrap();
.max_lifetime(Some(Duration::from_secs(60 * 60))) // lifetime of each connection
.min_idle(Some(1000)) // minimal idle connections
.connection_timeout(Duration::from_secs(2));
let pool = TaosBuilder::from_dsn(dsn)?.with_pool_builder(opts)?;
``` ```
在应用代码中,使用 `pool.get()?` 来获取一个连接对象 [Taos]。 在应用代码中,使用 `pool.get()?` 来获取一个连接对象 [Taos]。
...@@ -497,6 +507,22 @@ TMQ 消息队列是一个 [futures::Stream](https://docs.rs/futures/latest/futur ...@@ -497,6 +507,22 @@ TMQ 消息队列是一个 [futures::Stream](https://docs.rs/futures/latest/futur
} }
``` ```
获取消费进度:
版本要求 connector-rust >= v0.8.8, TDengine >= 3.0.5.0
```rust
let assignments = consumer.assignments().await.unwrap();
```
按照指定的进度消费:
版本要求 connector-rust >= v0.8.8, TDengine >= 3.0.5.0
```rust
consumer.offset_seek(topic, vgroup_id, offset).await;
```
停止订阅: 停止订阅:
```rust ```rust
...@@ -511,11 +537,12 @@ consumer.unsubscribe().await; ...@@ -511,11 +537,12 @@ consumer.unsubscribe().await;
- `enable.auto.commit`: 当设置为 `true` 时,将启用自动标记模式,当对数据一致性不敏感时,可以启用此方式。 - `enable.auto.commit`: 当设置为 `true` 时,将启用自动标记模式,当对数据一致性不敏感时,可以启用此方式。
- `auto.commit.interval.ms`: 自动标记的时间间隔。 - `auto.commit.interval.ms`: 自动标记的时间间隔。
完整订阅示例参见 [GitHub 示例文件](https://github.com/taosdata/taos-connector-rust/blob/main/examples/subscribe.rs). 完整订阅示例参见 [GitHub 示例文件](https://github.com/taosdata/TDengine/blob/3.0/docs/examples/rust/nativeexample/examples/subscribe_demo.rs).
其他相关结构体 API 使用说明请移步 Rust 文档托管网页:<https://docs.rs/taos>。 其他相关结构体 API 使用说明请移步 Rust 文档托管网页:<https://docs.rs/taos>。
[taos]: https://github.com/taosdata/rust-connector-taos [taos]: https://github.com/taosdata/rust-connector-taos
[deadpool]: https://crates.io/crates/deadpool
[r2d2]: https://crates.io/crates/r2d2 [r2d2]: https://crates.io/crates/r2d2
[TaosBuilder]: https://docs.rs/taos/latest/taos/struct.TaosBuilder.html [TaosBuilder]: https://docs.rs/taos/latest/taos/struct.TaosBuilder.html
[TaosCfg]: https://docs.rs/taos/latest/taos/struct.TaosCfg.html [TaosCfg]: https://docs.rs/taos/latest/taos/struct.TaosCfg.html
......
...@@ -362,7 +362,7 @@ TaosCursor 类使用原生连接进行写入、查询操作。在客户端多线 ...@@ -362,7 +362,7 @@ TaosCursor 类使用原生连接进行写入、查询操作。在客户端多线
##### TaosConnection 类的使用 ##### TaosConnection 类的使用
`TaosConnection` 类既包含对 PEP249 Connection 接口的实现(如:`cursor`方法和 `close` 方法),也包含很多扩展功能(如: `execute`、 `query`、`schemaless_insert` 和 `subscribe` 方法 类似上文介绍的使用方法,增加 `req_id` 参数
```python title="execute 方法" ```python title="execute 方法"
{{#include docs/examples/python/connection_usage_native_reference_with_req_id.py:insert}} {{#include docs/examples/python/connection_usage_native_reference_with_req_id.py:insert}}
...@@ -372,13 +372,9 @@ TaosCursor 类使用原生连接进行写入、查询操作。在客户端多线 ...@@ -372,13 +372,9 @@ TaosCursor 类使用原生连接进行写入、查询操作。在客户端多线
{{#include docs/examples/python/connection_usage_native_reference_with_req_id.py:query}} {{#include docs/examples/python/connection_usage_native_reference_with_req_id.py:query}}
``` ```
:::tip
查询结果只能获取一次。比如上面的示例中 `fetch_all()` 和 `fetch_all_into_dict()` 只能用一个。重复获取得到的结果为空列表。
:::
##### TaosResult 类的使用 ##### TaosResult 类的使用
上面 `TaosConnection` 类的使用示例中,我们已经展示了两种获取查询结果的方法: `fetch_all()` 和 `fetch_all_into_dict()`。除此之外 `TaosResult` 还提供了按行迭代(`rows_iter`)或按数据块迭代(`blocks_iter`)结果集的方法。在查询数据量较大的场景,使用这两个方法会更高效 类似上文介绍的使用方法,增加 `req_id` 参数
```python title="blocks_iter 方法" ```python title="blocks_iter 方法"
{{#include docs/examples/python/result_set_with_req_id_examples.py}} {{#include docs/examples/python/result_set_with_req_id_examples.py}}
...@@ -391,14 +387,11 @@ TaosCursor 类使用原生连接进行写入、查询操作。在客户端多线 ...@@ -391,14 +387,11 @@ TaosCursor 类使用原生连接进行写入、查询操作。在客户端多线
{{#include docs/examples/python/cursor_usage_native_reference_with_req_id.py}} {{#include docs/examples/python/cursor_usage_native_reference_with_req_id.py}}
``` ```
:::note
TaosCursor 类使用原生连接进行写入、查询操作。在客户端多线程的场景下,这个游标实例必须保持线程独享,不能跨线程共享使用,否则会导致返回结果出现错误。
:::
</TabItem> </TabItem>
<TabItem value="rest" label="REST 连接"> <TabItem value="rest" label="REST 连接">
类似上文介绍的使用方法,增加 `req_id` 参数。
##### TaosRestCursor 类的使用 ##### TaosRestCursor 类的使用
`TaosRestCursor` 类是对 PEP249 Cursor 接口的实现。 `TaosRestCursor` 类是对 PEP249 Cursor 接口的实现。
...@@ -420,8 +413,11 @@ TaosCursor 类使用原生连接进行写入、查询操作。在客户端多线 ...@@ -420,8 +413,11 @@ TaosCursor 类使用原生连接进行写入、查询操作。在客户端多线
对于 `sql()` 方法更详细的介绍, 请参考 [RestClient](https://docs.taosdata.com/api/taospy/taosrest/restclient.html)。 对于 `sql()` 方法更详细的介绍, 请参考 [RestClient](https://docs.taosdata.com/api/taospy/taosrest/restclient.html)。
</TabItem> </TabItem>
<TabItem value="websocket" label="WebSocket 连接"> <TabItem value="websocket" label="WebSocket 连接">
类似上文介绍的使用方法,增加 `req_id` 参数。
```python ```python
{{#include docs/examples/python/connect_websocket_with_req_id_examples.py:basic}} {{#include docs/examples/python/connect_websocket_with_req_id_examples.py:basic}}
``` ```
......
...@@ -45,9 +45,9 @@ CREATE DATABASE db_name PRECISION 'ns'; ...@@ -45,9 +45,9 @@ CREATE DATABASE db_name PRECISION 'ns';
:::note :::note
- 表的每行长度不能超过 48KB(注意:每个 BINARY/NCHAR 类型的列还会额外占用 2 个字节的存储位置)。 - 表的每行长度不能超过 48KB(从 3.0.5.0 版本开始为 64KB)(注意:每个 BINARY/NCHAR 类型的列还会额外占用 2 个字节的存储位置)。
- 虽然 BINARY 类型在底层存储上支持字节型的二进制字符,但不同编程语言对二进制数据的处理方式并不保证一致,因此建议在 BINARY 类型中只存储 ASCII 可见字符,而避免存储不可见字符。多字节的数据,例如中文字符,则需要使用 NCHAR 类型进行保存。如果强行使用 BINARY 类型保存中文字符,虽然有时也能正常读写,但并不带有字符集信息,很容易出现数据乱码甚至数据损坏等情况。 - 虽然 BINARY 类型在底层存储上支持字节型的二进制字符,但不同编程语言对二进制数据的处理方式并不保证一致,因此建议在 BINARY 类型中只存储 ASCII 可见字符,而避免存储不可见字符。多字节的数据,例如中文字符,则需要使用 NCHAR 类型进行保存。如果强行使用 BINARY 类型保存中文字符,虽然有时也能正常读写,但并不带有字符集信息,很容易出现数据乱码甚至数据损坏等情况。
- BINARY 类型理论上最长可以有 16,374 字节。BINARY 仅支持字符串输入,字符串两端需使用单引号引用。使用时须指定大小,如 BINARY(20) 定义了最长为 20 个单字节字符的字符串,每个字符占 1 字节的存储空间,总共固定占用 20 字节的空间,此时如果用户字符串超出 20 字节将会报错。对于字符串内的单引号,可以用转义字符反斜线加单引号来表示,即 `\'` - BINARY 类型理论上最长可以有 16,374(从 3.0.5.0 版本开始,数据列为 65,517,标签列为 16,382) 字节。BINARY 仅支持字符串输入,字符串两端需使用单引号引用。使用时须指定大小,如 BINARY(20) 定义了最长为 20 个单字节字符的字符串,每个字符占 1 字节的存储空间,总共固定占用 20 字节的空间,此时如果用户字符串超出 20 字节将会报错。对于字符串内的单引号,可以用转义字符反斜线加单引号来表示,即 `\'`
- SQL 语句中的数值类型将依据是否存在小数点,或使用科学计数法表示,来判断数值类型是否为整型或者浮点型,因此在使用时要注意相应类型越界的情况。例如,9999999999999999999 会认为超过长整型的上边界而溢出,而 9999999999999999999.0 会被认为是有效的浮点数。 - SQL 语句中的数值类型将依据是否存在小数点,或使用科学计数法表示,来判断数值类型是否为整型或者浮点型,因此在使用时要注意相应类型越界的情况。例如,9999999999999999999 会认为超过长整型的上边界而溢出,而 9999999999999999999.0 会被认为是有效的浮点数。
::: :::
......
...@@ -121,6 +121,8 @@ alter_database_option: { ...@@ -121,6 +121,8 @@ alter_database_option: {
| WAL_LEVEL value | WAL_LEVEL value
| WAL_FSYNC_PERIOD value | WAL_FSYNC_PERIOD value
| KEEP value | KEEP value
| WAL_RETENTION_PERIOD value
| WAL_RETENTION_SIZE value
} }
``` ```
......
...@@ -43,7 +43,7 @@ table_option: { ...@@ -43,7 +43,7 @@ table_option: {
1. 表的第一个字段必须是 TIMESTAMP,并且系统自动将其设为主键; 1. 表的第一个字段必须是 TIMESTAMP,并且系统自动将其设为主键;
2. 表名最大长度为 192; 2. 表名最大长度为 192;
3. 表的每行长度不能超过 48KB;(注意:每个 BINARY/NCHAR 类型的列还会额外占用 2 个字节的存储位置) 3. 表的每行长度不能超过 48KB(从 3.0.5.0 版本开始为 64KB);(注意:每个 BINARY/NCHAR 类型的列还会额外占用 2 个字节的存储位置)
4. 子表名只能由字母、数字和下划线组成,且不能以数字开头,不区分大小写 4. 子表名只能由字母、数字和下划线组成,且不能以数字开头,不区分大小写
5. 使用数据类型 binary 或 nchar,需指定其最长的字节数,如 binary(20),表示 20 字节; 5. 使用数据类型 binary 或 nchar,需指定其最长的字节数,如 binary(20),表示 20 字节;
6. 为了兼容支持更多形式的表名,TDengine 引入新的转义符 "\`",可以让表名与关键词不冲突,同时不受限于上述表名称合法性约束检查。但是同样具有长度限制要求。使用转义字符以后,不再对转义字符中的内容进行大小写统一。 6. 为了兼容支持更多形式的表名,TDengine 引入新的转义符 "\`",可以让表名与关键词不冲突,同时不受限于上述表名称合法性约束检查。但是同样具有长度限制要求。使用转义字符以后,不再对转义字符中的内容进行大小写统一。
......
...@@ -55,7 +55,7 @@ window_clause: { ...@@ -55,7 +55,7 @@ window_clause: {
| INTERVAL(interval_val [, interval_offset]) [SLIDING (sliding_val)] [WATERMARK(watermark_val)] [FILL(fill_mod_and_val)] | INTERVAL(interval_val [, interval_offset]) [SLIDING (sliding_val)] [WATERMARK(watermark_val)] [FILL(fill_mod_and_val)]
interp_clause: interp_clause:
RANGE(ts_val, ts_val) EVERY(every_val) FILL(fill_mod_and_val) RANGE(ts_val [, ts_val]) EVERY(every_val) FILL(fill_mod_and_val)
partition_by_clause: partition_by_clause:
PARTITION BY expr [, expr] ... PARTITION BY expr [, expr] ...
......
...@@ -890,9 +890,10 @@ ignore_null_values: { ...@@ -890,9 +890,10 @@ ignore_null_values: {
- INTERP 用于在指定时间断面获取指定列的记录值,如果该时间断面不存在符合条件的行数据,那么会根据 FILL 参数的设定进行插值。 - INTERP 用于在指定时间断面获取指定列的记录值,如果该时间断面不存在符合条件的行数据,那么会根据 FILL 参数的设定进行插值。
- INTERP 的输入数据为指定列的数据,可以通过条件语句(where 子句)来对原始列数据进行过滤,如果没有指定过滤条件则输入为全部数据。 - INTERP 的输入数据为指定列的数据,可以通过条件语句(where 子句)来对原始列数据进行过滤,如果没有指定过滤条件则输入为全部数据。
- INTERP 需要同时与 RANGE,EVERY 和 FILL 关键字一起使用。 - INTERP 需要同时与 RANGE,EVERY 和 FILL 关键字一起使用。
- INTERP 的输出时间范围根据 RANGE(timestamp1,timestamp2)字段来指定,需满足 timestamp1 <= timestamp2。其中 timestamp1(必选值)为输出时间范围的起始值,即如果 timestamp1 时刻符合插值条件则 timestamp1 为输出的第一条记录,timestamp2(必选值)为输出时间范围的结束值,即输出的最后一条记录的 timestamp 不能大于 timestamp2。 - INTERP 的输出时间范围根据 RANGE(timestamp1, timestamp2)字段来指定,需满足 timestamp1 <= timestamp2。其中 timestamp1 为输出时间范围的起始值,即如果 timestamp1 时刻符合插值条件则 timestamp1 为输出的第一条记录,timestamp2 为输出时间范围的结束值,即输出的最后一条记录的 timestamp 不能大于 timestamp2。
- INTERP 根据 EVERY(time_unit) 字段来确定输出时间范围内的结果条数,即从 timestamp1 开始每隔固定长度的时间(time_unit 值)进行插值,time_unit 可取值时间单位:1a(毫秒),1s(秒),1m(分),1h(小时),1d(天),1w(周)。例如 EVERY(500a) 将对于指定数据每500毫秒间隔进行一次插值. - INTERP 根据 EVERY(time_unit) 字段来确定输出时间范围内的结果条数,即从 timestamp1 开始每隔固定长度的时间(time_unit 值)进行插值,time_unit 可取值时间单位:1a(毫秒),1s(秒),1m(分),1h(小时),1d(天),1w(周)。例如 EVERY(500a) 将对于指定数据每500毫秒间隔进行一次插值.
- INTERP 根据 FILL 字段来决定在每个符合输出条件的时刻如何进行插值。关于 FILL 子句如何使用请参考 [FILL 子句](../distinguished/#fill-子句) - INTERP 根据 FILL 字段来决定在每个符合输出条件的时刻如何进行插值。关于 FILL 子句如何使用请参考 [FILL 子句](../distinguished/#fill-子句)
- INTERP 可以在 RANGE 字段中只指定唯一的时间戳对单个时间点进行插值,在这种情况下,EVERY 字段可以省略。例如:SELECT INTERP(col) FROM tb RANGE('2023-01-01 00:00:00') FILL(linear).
- INTERP 作用于超级表时, 会将该超级表下的所有子表数据按照主键列排序后进行插值计算,也可以搭配 PARTITION BY tbname 使用,将结果强制规约到单个时间线。 - INTERP 作用于超级表时, 会将该超级表下的所有子表数据按照主键列排序后进行插值计算,也可以搭配 PARTITION BY tbname 使用,将结果强制规约到单个时间线。
- INTERP 可以与伪列 _irowts 一起使用,返回插值点所对应的时间戳(3.0.2.0版本以后支持)。 - INTERP 可以与伪列 _irowts 一起使用,返回插值点所对应的时间戳(3.0.2.0版本以后支持)。
- INTERP 可以与伪列 _isfilled 一起使用,显示返回结果是否为原始记录或插值算法产生的数据(3.0.3.0版本以后支持)。 - INTERP 可以与伪列 _isfilled 一起使用,显示返回结果是否为原始记录或插值算法产生的数据(3.0.3.0版本以后支持)。
...@@ -1001,7 +1002,6 @@ SAMPLE(expr, k) ...@@ -1001,7 +1002,6 @@ SAMPLE(expr, k)
**使用说明** **使用说明**
- 不能参与表达式计算;该函数可以应用在普通表和超级表上; - 不能参与表达式计算;该函数可以应用在普通表和超级表上;
- 使用在超级表上的时候,需要搭配 PARTITION by tbname 使用,将结果强制规约到单个时间线。
### TAIL ### TAIL
...@@ -1080,7 +1080,6 @@ CSUM(expr) ...@@ -1080,7 +1080,6 @@ CSUM(expr)
- 不支持 +、-、*、/ 运算,如 csum(col1) + csum(col2)。 - 不支持 +、-、*、/ 运算,如 csum(col1) + csum(col2)。
- 只能与聚合(Aggregation)函数一起使用。 该函数可以应用在普通表和超级表上。 - 只能与聚合(Aggregation)函数一起使用。 该函数可以应用在普通表和超级表上。
- 使用在超级表上的时候,需要搭配 PARTITION BY tbname使用,将结果强制规约到单个时间线。
### DERIVATIVE ### DERIVATIVE
...@@ -1104,7 +1103,6 @@ ignore_negative: { ...@@ -1104,7 +1103,6 @@ ignore_negative: {
**使用说明**: **使用说明**:
- DERIVATIVE 函数可以在由 PARTITION BY 划分出单独时间线的情况下用于超级表(也即 PARTITION BY tbname)。
- 可以与选择相关联的列一起使用。 例如: select \_rowts, DERIVATIVE() from。 - 可以与选择相关联的列一起使用。 例如: select \_rowts, DERIVATIVE() from。
### DIFF ### DIFF
...@@ -1167,7 +1165,6 @@ MAVG(expr, k) ...@@ -1167,7 +1165,6 @@ MAVG(expr, k)
- 不支持 +、-、*、/ 运算,如 mavg(col1, k1) + mavg(col2, k1); - 不支持 +、-、*、/ 运算,如 mavg(col1, k1) + mavg(col2, k1);
- 只能与普通列,选择(Selection)、投影(Projection)函数一起使用,不能与聚合(Aggregation)函数一起使用; - 只能与普通列,选择(Selection)、投影(Projection)函数一起使用,不能与聚合(Aggregation)函数一起使用;
- 使用在超级表上的时候,需要搭配 PARTITION BY tbname使用,将结果强制规约到单个时间线。
### STATECOUNT ### STATECOUNT
...@@ -1193,7 +1190,6 @@ STATECOUNT(expr, oper, val) ...@@ -1193,7 +1190,6 @@ STATECOUNT(expr, oper, val)
**使用说明** **使用说明**
- 该函数可以应用在普通表上,在由 PARTITION BY 划分出单独时间线的情况下用于超级表(也即 PARTITION BY tbname)
- 不能和窗口操作一起使用,例如 interval/state_window/session_window。 - 不能和窗口操作一起使用,例如 interval/state_window/session_window。
...@@ -1221,7 +1217,6 @@ STATEDURATION(expr, oper, val, unit) ...@@ -1221,7 +1217,6 @@ STATEDURATION(expr, oper, val, unit)
**使用说明** **使用说明**
- 该函数可以应用在普通表上,在由 PARTITION BY 划分出单独时间线的情况下用于超级表(也即 PARTITION BY tbname)
- 不能和窗口操作一起使用,例如 interval/state_window/session_window。 - 不能和窗口操作一起使用,例如 interval/state_window/session_window。
...@@ -1239,8 +1234,6 @@ TWA(expr) ...@@ -1239,8 +1234,6 @@ TWA(expr)
**适用于**:表和超级表。 **适用于**:表和超级表。
**使用说明**: TWA 函数可以在由 PARTITION BY 划分出单独时间线的情况下用于超级表(也即 PARTITION BY tbname)。
## 系统信息函数 ## 系统信息函数
......
...@@ -26,7 +26,7 @@ description: 合法字符集和命名中的限制规则 ...@@ -26,7 +26,7 @@ description: 合法字符集和命名中的限制规则
- 数据库名最大长度为 64 字节 - 数据库名最大长度为 64 字节
- 表名最大长度为 192 字节,不包括数据库名前缀和分隔符 - 表名最大长度为 192 字节,不包括数据库名前缀和分隔符
- 每行数据最大长度 48KB (注意:数据行内每个 BINARY/NCHAR 类型的列还会额外占用 2 个字节的存储位置) - 每行数据最大长度 48KB(从 3.0.5.0 版本开始为 64KB) (注意:数据行内每个 BINARY/NCHAR 类型的列还会额外占用 2 个字节的存储位置)
- 列名最大长度为 64 字节 - 列名最大长度为 64 字节
- 最多允许 4096 列,最少需要 2 列,第一列必须是时间戳。 - 最多允许 4096 列,最少需要 2 列,第一列必须是时间戳。
- 标签名最大长度为 64 字节 - 标签名最大长度为 64 字节
......
...@@ -91,11 +91,30 @@ taos --dump-config ...@@ -91,11 +91,30 @@ taos --dump-config
### maxShellConns ### maxShellConns
| 属性 | 说明 | | 属性 | 说明 |
| -------- | ----------------------- | | --------| ----------------------- |
| 适用范围 | 仅服务端适用 | | 适用范围 | 仅服务端适用 |
| 含义 | 一个 dnode 容许的连接数 | | 含义 | 一个 dnode 容许的连接数 |
| 取值范围 | 10-50000000 | | 取值范围 | 10-50000000 |
| 缺省值 | 5000 | | 缺省值 | 5000 |
### numOfRpcSessions
| 属性 | 说明 |
| --------| ---------------------- |
| 适用范围 | 客户端和服务端都适用 |
| 含义 | 一个客户端能创建的最大连接数|
| 取值范围 | 100-100000 |
| 缺省值 | 10000 |
### timeToGetAvailableConn
| 属性 | 说明 |
| -------- | --------------------|
| 适用范围 | 客户端和服务端都适用 |
| 含义 |获得可用连接的最长等待时间|
| 取值范围 | 10-50000000(单位为毫秒)|
| 缺省值 | 500000 |
### numOfRpcSessions ### numOfRpcSessions
......
...@@ -87,7 +87,7 @@ st,t1=3,t2=4,t3=t3 c1=3i64,c3="passit",c2=false,c4=4f64 1626006833639000000 ...@@ -87,7 +87,7 @@ st,t1=3,t2=4,t3=t3 c1=3i64,c3="passit",c2=false,c4=4f64 1626006833639000000
:::tip :::tip
无模式所有的处理逻辑,仍会遵循 TDengine 对数据结构的底层限制,例如每行数据的总长度不能超过 无模式所有的处理逻辑,仍会遵循 TDengine 对数据结构的底层限制,例如每行数据的总长度不能超过
48KB,标签值的总长度不超过16KB。这方面的具体限制约束请参见 [TDengine SQL 边界限制](/taos-sql/limit) 48KB(从 3.0.5.0 版本开始为 64KB),标签值的总长度不超过16KB。这方面的具体限制约束请参见 [TDengine SQL 边界限制](/taos-sql/limit)
::: :::
......
...@@ -16,169 +16,78 @@ TDengine Source Connector 用于把数据实时地从 TDengine 读出来发送 ...@@ -16,169 +16,78 @@ TDengine Source Connector 用于把数据实时地从 TDengine 读出来发送
![TDengine Database Kafka Connector -- streaming integration with kafka connect](kafka/streaming-integration-with-kafka-connect.webp) ![TDengine Database Kafka Connector -- streaming integration with kafka connect](kafka/streaming-integration-with-kafka-connect.webp)
## 什么是 Confluent?
[Confluent](https://www.confluent.io/) 在 Kafka 的基础上增加很多扩展功能。包括:
1. Schema Registry
2. REST 代理
3. 非 Java 客户端
4. 很多打包好的 Kafka Connect 插件
5. 管理和监控 Kafka 的 GUI —— Confluent 控制中心
这些扩展功能有的包含在社区版本的 Confluent 中,有的只有企业版能用。
![TDengine Database Kafka Connector -- Confluent introduction](kafka/confluentPlatform.webp)
Confluent 企业版提供了 `confluent` 命令行工具管理各个组件。
## 前置条件 ## 前置条件
运行本教程中示例的前提条件。 运行本教程中示例的前提条件。
1. Linux 操作系统 1. Linux 操作系统
2. 已安装 Java 8 和 Maven 2. 已安装 Java 8 和 Maven
3. 已安装 Git 3. 已安装 Git、curl、vi
4. 已安装并启动 TDengine。如果还没有可参考[安装和卸载](/operation/pkg-install) 4. 已安装并启动 TDengine。如果还没有可参考[安装和卸载](/operation/pkg-install)
## 安装 Confluent ## 安装 Kafka
Confluent 提供了 Docker 和二进制包两种安装方式。本文仅介绍二进制包方式安装。
在任意目录下执行: 在任意目录下执行:
``` ```shell
curl -O http://packages.confluent.io/archive/7.1/confluent-7.1.1.tar.gz curl -O https://downloads.apache.org/kafka/3.4.0/kafka_2.13-3.4.0.tgz
tar xzf confluent-7.1.1.tar.gz -C /opt/ tar xzf kafka_2.13-3.4.0.tgz -C /opt/
ln -s /opt/kafka_2.13-3.4.0 /opt/kafka
``` ```
然后需要把 `$CONFLUENT_HOME/bin` 目录加入 PATH。 然后需要把 `$KAFKA_HOME/bin` 目录加入 PATH。
```title=".profile" ```title=".profile"
export CONFLUENT_HOME=/opt/confluent-7.1.1 export KAFKA_HOME=/opt/kafka
export PATH=$CONFLUENT_HOME/bin:$PATH export PATH=$PATH:$KAFKA_HOME/bin
``` ```
以上脚本可以追加到当前用户的 profile 文件(~/.profile 或 ~/.bash_profile) 以上脚本可以追加到当前用户的 profile 文件(~/.profile 或 ~/.bash_profile)
安装完成之后,可以输入`confluent version`做简单验证:
```
# confluent version
confluent - Confluent CLI
Version: v2.6.1
Git Ref: 6d920590
Build Date: 2022-02-18T06:14:21Z
Go Version: go1.17.6 (linux/amd64)
Development: false
```
## 安装 TDengine Connector 插件 ## 安装 TDengine Connector 插件
### 从源码安装 ### 编译插件
``` ```shell
git clone --branch 3.0 https://github.com/taosdata/kafka-connect-tdengine.git git clone --branch 3.0 https://github.com/taosdata/kafka-connect-tdengine.git
cd kafka-connect-tdengine cd kafka-connect-tdengine
mvn clean package mvn clean package -Dmaven.test.skip=true
unzip -d $CONFLUENT_HOME/share/java/ target/components/packages/taosdata-kafka-connect-tdengine-*.zip unzip -d $KAFKA_HOME/components/ target/components/packages/taosdata-kafka-connect-tdengine-*.zip
``` ```
以上脚本先 clone 项目源码,然后用 Maven 编译打包。打包完成后在 `target/components/packages/` 目录生成了插件的 zip 包。把这个 zip 包解压到安装插件的路径即可。上面的示例中使用了内置的插件安装路径: `$CONFLUENT_HOME/share/java/` 以上脚本先 clone 项目源码,然后用 Maven 编译打包。打包完成后在 `target/components/packages/` 目录生成了插件的 zip 包。把这个 zip 包解压到安装插件的路径即可。上面的示例中使用了内置的插件安装路径: `$KAFKA_HOME/components/`
### 用 confluent-hub 安装 ### 配置插件
[Confluent Hub](https://www.confluent.io/hub) 提供下载 Kafka Connect 插件的服务。在 TDengine Kafka Connector 发布到 Confluent Hub 后可以使用命令工具 `confluent-hub` 安装。 将 kafka-connect-tdengine 插件加入 `$KAFKA_HOME/config/connect-distributed.properties` 配置文件 plugin.path 中
**TDengine Kafka Connector 目前没有正式发布,不能用这种方式安装**
## 启动 Confluent ```properties
plugin.path=/usr/share/java,/opt/kafka/components
```
confluent local services start
``` ```
:::note ## 启动 Kafka
一定要先安装插件再启动 Confluent, 否则加载插件会失败。
:::
:::tip ```shell
若某组件启动失败,可尝试清空数据,重新启动。数据目录在启动时将被打印到控制台,比如 : zookeeper-server-start.sh -daemon $KAFKA_HOME/config/zookeeper.properties
```title="控制台输出日志" {1}
Using CONFLUENT_CURRENT: /tmp/confluent.106668
Starting ZooKeeper
ZooKeeper is [UP]
Starting Kafka
Kafka is [UP]
Starting Schema Registry
Schema Registry is [UP]
Starting Kafka REST
Kafka REST is [UP]
Starting Connect
Connect is [UP]
Starting ksqlDB Server
ksqlDB Server is [UP]
Starting Control Center
Control Center is [UP]
```
清空数据可执行 `rm -rf /tmp/confluent.106668` kafka-server-start.sh -daemon $KAFKA_HOME/config/server.properties
:::
### 验证各个组件是否启动成功 connect-distributed.sh -daemon $KAFKA_HOME/config/connect-distributed.properties
输入命令:
```
confluent local services status
```
如果各组件都启动成功,会得到如下输出:
```
Connect is [UP]
Control Center is [UP]
Kafka is [UP]
Kafka REST is [UP]
ksqlDB Server is [UP]
Schema Registry is [UP]
ZooKeeper is [UP]
``` ```
### 验证插件是否安装成功 ### 验证 kafka Connect 是否启动成功
在 Kafka Connect 组件完全启动后,可用以下命令列出成功加载的插件 输入命令
``` ```shell
confluent local services connect plugin list curl http://localhost:8083/connectors
``` ```
如果成功安装,会输出如下: 如果各组件都启动成功,会得到如下输出:
```txt {4,9}
Available Connect Plugins:
[
{
"class": "com.taosdata.kafka.connect.sink.TDengineSinkConnector",
"type": "sink",
"version": "1.0.0"
},
{
"class": "com.taosdata.kafka.connect.source.TDengineSourceConnector",
"type": "source",
"version": "1.0.0"
},
......
```
如果插件安装失败,请检查 Kafka Connect 的启动日志是否有异常信息,用以下命令输出日志路径: ```txt
[]
``` ```
echo `cat /tmp/confluent.current`/connect/connect.stdout
```
该命令的输出类似: `/tmp/confluent.104086/connect/connect.stdout`
与日志文件 `connect.stdout` 同一目录,还有一个文件名为: `connect.properties`。在这个文件的末尾,可以看到最终生效的 `plugin.path`, 它是一系列用逗号分割的路径。如果插件安装失败,很可能是因为实际的安装路径不包含在 `plugin.path` 中。
## TDengine Sink Connector 的使用 ## TDengine Sink Connector 的使用
...@@ -188,40 +97,47 @@ TDengine Sink Connector 内部使用 TDengine [无模式写入接口](../../conn ...@@ -188,40 +97,47 @@ TDengine Sink Connector 内部使用 TDengine [无模式写入接口](../../conn
下面的示例将主题 meters 的数据,同步到目标数据库 power。数据格式为 InfluxDB Line 协议格式。 下面的示例将主题 meters 的数据,同步到目标数据库 power。数据格式为 InfluxDB Line 协议格式。
### 添加配置文件 ### 添加 Sink Connector 配置文件
``` ```shell
mkdir ~/test mkdir ~/test
cd ~/test cd ~/test
vi sink-demo.properties vi sink-demo.json
``` ```
sink-demo.properties 内容如下: sink-demo.json 内容如下:
```ini title="sink-demo.properties" ```json title="sink-demo.json"
name=TDengineSinkConnector {
connector.class=com.taosdata.kafka.connect.sink.TDengineSinkConnector "name": "TDengineSinkConnector",
tasks.max=1 "config": {
topics=meters "connector.class":"com.taosdata.kafka.connect.sink.TDengineSinkConnector",
connection.url=jdbc:TAOS://127.0.0.1:6030 "tasks.max": "1",
connection.user=root "topics": "meters",
connection.password=taosdata "connection.url": "jdbc:TAOS://127.0.0.1:6030",
connection.database=power "connection.user": "root",
db.schemaless=line "connection.password": "taosdata",
data.precision=ns "connection.database": "power",
key.converter=org.apache.kafka.connect.storage.StringConverter "db.schemaless": "line",
value.converter=org.apache.kafka.connect.storage.StringConverter "data.precision": "ns",
"key.converter": "org.apache.kafka.connect.storage.StringConverter",
"value.converter": "org.apache.kafka.connect.storage.StringConverter",
"errors.tolerance": "all",
"errors.deadletterqueue.topic.name": "dead_letter_topic",
"errors.deadletterqueue.topic.replication.factor": 1
}
}
``` ```
关键配置说明: 关键配置说明:
1. `topics=meters``connection.database=power`, 表示订阅主题 meters 的数据,并写入数据库 power。 1. `"topics": "meters"``"connection.database": "power"`, 表示订阅主题 meters 的数据,并写入数据库 power。
2. `db.schemaless=line`, 表示使用 InfluxDB Line 协议格式的数据。 2. `"db.schemaless": "line"`, 表示使用 InfluxDB Line 协议格式的数据。
### 创建 Connector 实例 ### 创建 Sink Connector 实例
``` ```shell
confluent local services connect connector load TDengineSinkConnector --config ./sink-demo.properties curl -X POST -d @sink-demo.json http://localhost:8083/connectors -H "Content-Type: application/json"
``` ```
若以上命令执行成功,则有如下输出: 若以上命令执行成功,则有如下输出:
...@@ -241,7 +157,10 @@ confluent local services connect connector load TDengineSinkConnector --config . ...@@ -241,7 +157,10 @@ confluent local services connect connector load TDengineSinkConnector --config .
"tasks.max": "1", "tasks.max": "1",
"topics": "meters", "topics": "meters",
"value.converter": "org.apache.kafka.connect.storage.StringConverter", "value.converter": "org.apache.kafka.connect.storage.StringConverter",
"name": "TDengineSinkConnector" "name": "TDengineSinkConnector",
"errors.tolerance": "all",
"errors.deadletterqueue.topic.name": "dead_letter_topic",
"errors.deadletterqueue.topic.replication.factor": "1",
}, },
"tasks": [], "tasks": [],
"type": "sink" "type": "sink"
...@@ -261,8 +180,8 @@ meters,location=California.LosAngeles,groupid=3 current=11.3,voltage=221,phase=0 ...@@ -261,8 +180,8 @@ meters,location=California.LosAngeles,groupid=3 current=11.3,voltage=221,phase=0
使用 kafka-console-producer 向主题 meters 添加测试数据。 使用 kafka-console-producer 向主题 meters 添加测试数据。
``` ```shell
cat test-data.txt | kafka-console-producer --broker-list localhost:9092 --topic meters cat test-data.txt | kafka-console-producer.sh --broker-list localhost:9092 --topic meters
``` ```
:::note :::note
...@@ -273,12 +192,12 @@ cat test-data.txt | kafka-console-producer --broker-list localhost:9092 --topic ...@@ -273,12 +192,12 @@ cat test-data.txt | kafka-console-producer --broker-list localhost:9092 --topic
使用 TDengine CLI 验证同步是否成功。 使用 TDengine CLI 验证同步是否成功。
``` ```sql
taos> use power; taos> use power;
Database changed. Database changed.
taos> select * from meters; taos> select * from meters;
ts | current | voltage | phase | groupid | location | _ts | current | voltage | phase | groupid | location |
=============================================================================================================================================================== ===============================================================================================================================================================
2022-03-28 09:56:51.249000000 | 11.800000000 | 221.000000000 | 0.280000000 | 2 | California.LosAngeles | 2022-03-28 09:56:51.249000000 | 11.800000000 | 221.000000000 | 0.280000000 | 2 | California.LosAngeles |
2022-03-28 09:56:51.250000000 | 13.400000000 | 223.000000000 | 0.290000000 | 2 | California.LosAngeles | 2022-03-28 09:56:51.250000000 | 13.400000000 | 223.000000000 | 0.290000000 | 2 | California.LosAngeles |
...@@ -297,29 +216,34 @@ TDengine Source Connector 会将 TDengine 数据表中的数据转换成 [Influx ...@@ -297,29 +216,34 @@ TDengine Source Connector 会将 TDengine 数据表中的数据转换成 [Influx
下面的示例程序同步数据库 test 中的数据到主题 tdengine-source-test。 下面的示例程序同步数据库 test 中的数据到主题 tdengine-source-test。
### 添加配置文件 ### 添加 Source Connector 配置文件
``` ```shell
vi source-demo.properties vi source-demo.json
``` ```
输入以下内容: 输入以下内容:
```ini title="source-demo.properties" ```json title="source-demo.json"
name=TDengineSourceConnector {
connector.class=com.taosdata.kafka.connect.source.TDengineSourceConnector "name":"TDengineSourceConnector",
tasks.max=1 "config":{
connection.url=jdbc:TAOS://127.0.0.1:6030 "connector.class": "com.taosdata.kafka.connect.source.TDengineSourceConnector",
connection.username=root "tasks.max": 1,
connection.password=taosdata "connection.url": "jdbc:TAOS://127.0.0.1:6030",
connection.database=test "connection.username": "root",
connection.attempts=3 "connection.password": "taosdata",
connection.backoff.ms=5000 "connection.database": "test",
topic.prefix=tdengine-source- "connection.attempts": 3,
poll.interval.ms=1000 "connection.backoff.ms": 5000,
fetch.max.rows=100 "topic.prefix": "tdengine-source",
key.converter=org.apache.kafka.connect.storage.StringConverter "poll.interval.ms": 1000,
value.converter=org.apache.kafka.connect.storage.StringConverter "fetch.max.rows": 100,
"topic.per.stable": true,
"key.converter": "org.apache.kafka.connect.storage.StringConverter",
"value.converter": "org.apache.kafka.connect.storage.StringConverter"
}
}
``` ```
### 准备测试数据 ### 准备测试数据
...@@ -344,27 +268,27 @@ INSERT INTO d1001 USING meters TAGS('California.SanFrancisco', 2) VALUES('2018-1 ...@@ -344,27 +268,27 @@ INSERT INTO d1001 USING meters TAGS('California.SanFrancisco', 2) VALUES('2018-1
使用 TDengine CLI, 执行 SQL 文件。 使用 TDengine CLI, 执行 SQL 文件。
``` ```shell
taos -f prepare-source-data.sql taos -f prepare-source-data.sql
``` ```
### 创建 Connector 实例 ### 创建 Source Connector 实例
``` ```shell
confluent local services connect connector load TDengineSourceConnector --config source-demo.properties curl -X POST -d @source-demo.json http://localhost:8083/connectors -H "Content-Type: application/json"
``` ```
### 查看 topic 数据 ### 查看 topic 数据
使用 kafka-console-consumer 命令行工具监控主题 tdengine-source-test 中的数据。一开始会输出所有历史数据, 往 TDengine 插入两条新的数据之后,kafka-console-consumer 也立即输出了新增的两条数据。 输出数据 InfluxDB line protocol 的格式。 使用 kafka-console-consumer 命令行工具监控主题 tdengine-source-test 中的数据。一开始会输出所有历史数据, 往 TDengine 插入两条新的数据之后,kafka-console-consumer 也立即输出了新增的两条数据。 输出数据 InfluxDB line protocol 的格式。
``` ```shell
kafka-console-consumer --bootstrap-server localhost:9092 --from-beginning --topic tdengine-source-test kafka-console-consumer.sh --bootstrap-server localhost:9092 --from-beginning --topic tdengine-source-test-meters
``` ```
输出: 输出:
``` ```txt
...... ......
meters,location="California.SanFrancisco",groupid=2i32 current=10.3f32,voltage=219i32,phase=0.31f32 1538548685000000000 meters,location="California.SanFrancisco",groupid=2i32 current=10.3f32,voltage=219i32,phase=0.31f32 1538548685000000000
meters,location="California.SanFrancisco",groupid=2i32 current=12.6f32,voltage=218i32,phase=0.33f32 1538548695000000000 meters,location="California.SanFrancisco",groupid=2i32 current=12.6f32,voltage=218i32,phase=0.33f32 1538548695000000000
...@@ -373,7 +297,7 @@ meters,location="California.SanFrancisco",groupid=2i32 current=12.6f32,voltage=2 ...@@ -373,7 +297,7 @@ meters,location="California.SanFrancisco",groupid=2i32 current=12.6f32,voltage=2
此时会显示所有历史数据。切换到 TDengine CLI, 插入两条新的数据: 此时会显示所有历史数据。切换到 TDengine CLI, 插入两条新的数据:
``` ```sql
USE test; USE test;
INSERT INTO d1001 VALUES (now, 13.3, 229, 0.38); INSERT INTO d1001 VALUES (now, 13.3, 229, 0.38);
INSERT INTO d1002 VALUES (now, 16.3, 233, 0.22); INSERT INTO d1002 VALUES (now, 16.3, 233, 0.22);
...@@ -387,15 +311,15 @@ INSERT INTO d1002 VALUES (now, 16.3, 233, 0.22); ...@@ -387,15 +311,15 @@ INSERT INTO d1002 VALUES (now, 16.3, 233, 0.22);
查看当前活跃的 connector: 查看当前活跃的 connector:
``` ```shell
confluent local services connect connector status curl http://localhost:8083/connectors
``` ```
如果按照前述操作,此时应有两个活跃的 connector。使用下面的命令 unload: 如果按照前述操作,此时应有两个活跃的 connector。使用下面的命令 unload:
``` ```shell
confluent local services connect connector unload TDengineSinkConnector curl -X DELETE http://localhost:8083/connectors/TDengineSinkConnector
confluent local services connect connector unload TDengineSourceConnector curl -X DELETE http://localhost:8083/connectors/TDengineSourceConnector
``` ```
## 配置参考 ## 配置参考
...@@ -442,15 +366,12 @@ confluent local services connect connector unload TDengineSourceConnector ...@@ -442,15 +366,12 @@ confluent local services connect connector unload TDengineSourceConnector
## 其他说明 ## 其他说明
1. 插件的安装位置可以自定义,请参考官方文档:https://docs.confluent.io/home/connect/self-managed/install.html#install-connector-manually。 1. 关于如何在独立安装的 Kafka 环境使用 Kafka Connect 插件, 请参考官方文档:<https://kafka.apache.org/documentation/#connect>
2. 本教程的示例程序使用了 Confluent 平台,但是 TDengine Kafka Connector 本身同样适用于独立安装的 Kafka, 且配置方法相同。关于如何在独立安装的 Kafka 环境使用 Kafka Connect 插件, 请参考官方文档: https://kafka.apache.org/documentation/#connect。
## 问题反馈 ## 问题反馈
无论遇到任何问题,都欢迎在本项目的 Github 仓库反馈: https://github.com/taosdata/kafka-connect-tdengine/issues 无论遇到任何问题,都欢迎在本项目的 Github 仓库反馈:<https://github.com/taosdata/kafka-connect-tdengine/issues>
## 参考 ## 参考
1. https://www.confluent.io/what-is-apache-kafka 1. <https://kafka.apache.org/documentation/>
2. https://developer.confluent.io/learn-kafka/kafka-connect/intro
3. https://docs.confluent.io/platform/current/platform.html
...@@ -247,10 +247,17 @@ launchctl limit maxfiles ...@@ -247,10 +247,17 @@ launchctl limit maxfiles
该提示是创建 db 的 vnode 数量不够了,需要的 vnode 不能超过了 dnode 中 vnode 的上限。因为系统默认是一个 dnode 中有 CPU 核数两倍的 vnode,也可以通过配置文件中的参数 supportVnodes 控制。 该提示是创建 db 的 vnode 数量不够了,需要的 vnode 不能超过了 dnode 中 vnode 的上限。因为系统默认是一个 dnode 中有 CPU 核数两倍的 vnode,也可以通过配置文件中的参数 supportVnodes 控制。
正常调大 taos.cfg 中 supportVnodes 参数即可。 正常调大 taos.cfg 中 supportVnodes 参数即可。
### 21 【查询】在服务器上的使用 tao-CLI 能查到指定时间段的数据,但在客户端机器上查不到? ### 21 在服务器上的使用 taos-CLI 能查到指定时间段的数据,但在客户端机器上查不到?
这种情况是因为客户端与服务器上设置的时区不一致导致的,调整客户端与服务器的时区一致即可解决。 这种情况是因为客户端与服务器上设置的时区不一致导致的,调整客户端与服务器的时区一致即可解决。
### 22 【表名】表名确认是存在的,但写入或查询时报表不存在错误,非常奇怪,什么原因? ### 22 表名确认是存在的,但在写入或查询时返回表名不存在,什么原因?
TDengine 中的所有名称,包括数据库名、表名等都是区分大小写的,如果这些名称在程序或 taos-CLI 中没有使用反引号(`)括起来使用,即使你输入的是大写的,引擎也会转化成小写来使用,如果名称前后加上了反引号,引擎就不会再转化成小写,会保持原样来使用。 TDengine 中的所有名称,包括数据库名、表名等都是区分大小写的,如果这些名称在程序或 taos-CLI 中没有使用反引号(`)括起来使用,即使你输入的是大写的,引擎也会转化成小写来使用,如果名称前后加上了反引号,引擎就不会再转化成小写,会保持原样来使用。
### 23 在 taos-CLI 中查询,字段内容不能完全显示出来怎么办?
可以使用 \G 参数来竖式显示,如 show databases\G; (为了输入方便,在"\"后加 TAB 键,会自动补全后面的内容)
### 24 使用 taosBenchmark 测试工具写入数据查询很快,为什么我写入的数据查询非常慢?
TDengine 在写入数据时如果有很严重的乱序写入问题,会严重影响查询性能,所以需要在写入前解决乱序的问题。如果业务是从 kafka 消费写入,请合理设计消费者,尽可能的一个子表数据由一个消费者去消费并写入,避免由设计产生的乱序。
### 25 我想统计下前后两条写入记录之间的时间差值是多少?
使用 DIFF 函数,可以查看时间列或数值列前后两条记录的差值,非常方便,详细说明见 SQL手册->函数->DIFF
...@@ -10,6 +10,10 @@ TDengine 2.x 各版本安装包请访问[这里](https://www.taosdata.com/all-do ...@@ -10,6 +10,10 @@ TDengine 2.x 各版本安装包请访问[这里](https://www.taosdata.com/all-do
import Release from "/components/ReleaseV3"; import Release from "/components/ReleaseV3";
## 3.0.5.0
<Release type="tdengine" version="3.0.5.0" />
## 3.0.4.2 ## 3.0.4.2
<Release type="tdengine" version="3.0.4.2" /> <Release type="tdengine" version="3.0.4.2" />
......
...@@ -10,6 +10,10 @@ taosTools 各版本安装包下载链接如下: ...@@ -10,6 +10,10 @@ taosTools 各版本安装包下载链接如下:
import Release from "/components/ReleaseV3"; import Release from "/components/ReleaseV3";
## 2.5.1
<Release type="tools" version="2.5.1" />
## 2.5.0 ## 2.5.0
<Release type="tools" version="2.5.0" /> <Release type="tools" version="2.5.0" />
......
<?xml version="1.0" encoding="UTF-8"?>
<project xmlns="http://maven.apache.org/POM/4.0.0"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
<modelVersion>4.0.0</modelVersion>
<groupId>com.taosdata</groupId>
<artifactId>consumer</artifactId>
<version>1.0-SNAPSHOT</version>
<properties>
<maven.compiler.source>8</maven.compiler.source>
<maven.compiler.target>8</maven.compiler.target>
</properties>
<dependencies>
<dependency>
<groupId>com.taosdata.jdbc</groupId>
<artifactId>taos-jdbcdriver</artifactId>
<version>3.2.1</version>
</dependency>
<dependency>
<groupId>com.google.guava</groupId>
<artifactId>guava</artifactId>
<version>30.1.1-jre</version>
</dependency>
</dependencies>
<build>
<plugins>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-assembly-plugin</artifactId>
<version>3.3.0</version>
<executions>
<execution>
<id>ConsumerDemo</id>
<configuration>
<finalName>ConsumerDemo</finalName>
<archive>
<manifest>
<mainClass>com.taosdata.ConsumerDemo</mainClass>
</manifest>
</archive>
<descriptorRefs>
<descriptorRef>jar-with-dependencies</descriptorRef>
</descriptorRefs>
</configuration>
<phase>package</phase>
<goals>
<goal>single</goal>
</goals>
</execution>
</executions>
</plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-compiler-plugin</artifactId>
<configuration>
<source>8</source>
<target>8</target>
<encoding>UTF-8</encoding>
</configuration>
</plugin>
</plugins>
</build>
</project>
\ No newline at end of file
# How to Run the Consumer Demo Code On Linux OS
TDengine's Consumer demo project is organized in a Maven way so that users can easily compile, package and run the project. If you don't have Maven on your server, you may install it using
```
sudo apt-get install maven
```
## Install TDengine Client and TaosAdapter
Make sure you have already installed a tdengine client on your current develop environment.
Download the tdengine package on our website: ``https://www.taosdata.com/cn/all-downloads/`` and install the client.
## Run Consumer Demo using mvn plugin
run command:
```
mvn clean compile exec:java -Dexec.mainClass="com.taosdata.ConsumerDemo"
```
## Custom configuration
```shell
# the host of TDengine server
export TAOS_HOST="127.0.0.1"
# the port of TDengine server
export TAOS_PORT="6041"
# the consumer type, can be "ws" or "jni"
export TAOS_TYPE="ws"
# the number of consumers
export TAOS_JDBC_CONSUMER_NUM="1"
# the number of processors to consume
export TAOS_JDBC_PROCESSOR_NUM="2"
# the number of records to be consumed per processor per second
export TAOS_JDBC_RATE_PER_PROCESSOR="1000"
# poll wait time in ms
export TAOS_JDBC_POLL_SLEEP="100"
```
## Run Consumer Demo using jar
To compile the demo project, go to the source directory ``TDengine/tests/examples/JDBC/consumer-demo`` and execute
```
mvn clean package assembly:single
```
To run ConsumerDemo.jar, go to ``TDengine/tests/examples/JDBC/consumer-demo`` and execute
```
java -jar target/ConsumerDemo-jar-with-dependencies.jar
```
package com.taosdata;
import java.sql.Timestamp;
public class Bean {
private Timestamp ts;
private Integer c1;
private String c2;
public Timestamp getTs() {
return ts;
}
public void setTs(Timestamp ts) {
this.ts = ts;
}
public Integer getC1() {
return c1;
}
public void setC1(Integer c1) {
this.c1 = c1;
}
public String getC2() {
return c2;
}
public void setC2(String c2) {
this.c2 = c2;
}
@Override
public String toString() {
final StringBuilder sb = new StringBuilder("Bean {");
sb.append("ts=").append(ts);
sb.append(", c1=").append(c1);
sb.append(", c2='").append(c2).append('\'');
sb.append('}');
return sb.toString();
}
}
package com.taosdata;
import com.taosdata.jdbc.tmq.ReferenceDeserializer;
public class BeanDeserializer extends ReferenceDeserializer<Bean> {
}
package com.taosdata;
public class Config {
public static final String TOPIC = "test_consumer";
public static final String TAOS_HOST = "127.0.0.1";
public static final String TAOS_PORT = "6041";
public static final String TAOS_TYPE = "ws";
public static final int TAOS_JDBC_CONSUMER_NUM = 1;
public static final int TAOS_JDBC_PROCESSOR_NUM = 2;
public static final int TAOS_JDBC_RATE_PER_PROCESSOR = 1000;
public static final int TAOS_JDBC_POLL_SLEEP = 100;
private final int consumerNum;
private final int processCapacity;
private final int rate;
private final int pollSleep;
private final String type;
private final String host;
private final String port;
public Config(String type, String host, String port, int consumerNum, int processCapacity, int rate, int pollSleep) {
this.type = type;
this.consumerNum = consumerNum;
this.processCapacity = processCapacity;
this.rate = rate;
this.pollSleep = pollSleep;
this.host = host;
this.port = port;
}
public int getConsumerNum() {
return consumerNum;
}
public int getProcessCapacity() {
return processCapacity;
}
public int getRate() {
return rate;
}
public int getPollSleep() {
return pollSleep;
}
public String getHost() {
return host;
}
public String getPort() {
return port;
}
public String getType() {
return type;
}
public static Config getFromENV() {
String host = System.getenv("TAOS_HOST") != null ? System.getenv("TAOS_HOST") : TAOS_HOST;
String port = System.getenv("TAOS_PORT") != null ? System.getenv("TAOS_PORT") : TAOS_PORT;
String type = System.getenv("TAOS_TYPE") != null ? System.getenv("TAOS_TYPE") : TAOS_TYPE;
String c = System.getenv("TAOS_JDBC_CONSUMER_NUM");
int num = c != null ? Integer.parseInt(c) : TAOS_JDBC_CONSUMER_NUM;
String p = System.getenv("TAOS_JDBC_PROCESSOR_NUM");
int capacity = p != null ? Integer.parseInt(p) : TAOS_JDBC_PROCESSOR_NUM;
String r = System.getenv("TAOS_JDBC_RATE_PER_PROCESSOR");
int rate = r != null ? Integer.parseInt(r) : TAOS_JDBC_RATE_PER_PROCESSOR;
String s = System.getenv("TAOS_JDBC_POLL_SLEEP");
int sleep = s != null ? Integer.parseInt(s) : TAOS_JDBC_POLL_SLEEP;
return new Config(type, host, port, num, capacity, rate, sleep);
}
}
package com.taosdata;
import com.taosdata.jdbc.tmq.TMQConstants;
import java.sql.Connection;
import java.sql.DriverManager;
import java.sql.SQLException;
import java.sql.Statement;
import java.util.Properties;
import java.util.concurrent.Executors;
import java.util.concurrent.ScheduledExecutorService;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicInteger;
import static com.taosdata.Config.*;
public class ConsumerDemo {
public static void main(String[] args) throws SQLException {
// Config
Config config = Config.getFromENV();
// Generated data
mockData();
Properties prop = new Properties();
prop.setProperty(TMQConstants.CONNECT_TYPE, config.getType());
prop.setProperty(TMQConstants.BOOTSTRAP_SERVERS, config.getHost() + ":" + config.getPort());
prop.setProperty(TMQConstants.CONNECT_USER, "root");
prop.setProperty(TMQConstants.CONNECT_PASS, "taosdata");
prop.setProperty(TMQConstants.MSG_WITH_TABLE_NAME, "true");
prop.setProperty(TMQConstants.ENABLE_AUTO_COMMIT, "true");
prop.setProperty(TMQConstants.GROUP_ID, "gId");
prop.setProperty(TMQConstants.VALUE_DESERIALIZER, "com.taosdata.BeanDeserializer");
for (int i = 0; i < config.getConsumerNum() - 1; i++) {
new Thread(new Worker(prop, config)).start();
}
new Worker(prop, config).run();
}
public static void mockData() throws SQLException {
String dbName = "test_consumer";
String tableName = "st";
String url = "jdbc:TAOS-RS://" + TAOS_HOST + ":" + TAOS_PORT + "/?user=root&password=taosdata&batchfetch=true";
Connection connection = DriverManager.getConnection(url);
Statement statement = connection.createStatement();
statement.executeUpdate("create database if not exists " + dbName + " WAL_RETENTION_PERIOD 3650");
statement.executeUpdate("use " + dbName);
statement.executeUpdate("create table if not exists " + tableName + " (ts timestamp, c1 int, c2 nchar(100)) ");
statement.executeUpdate("create topic if not exists " + TOPIC + " as select ts, c1, c2 from " + tableName);
ScheduledExecutorService scheduledExecutorService = Executors.newSingleThreadScheduledExecutor(r -> {
Thread t = new Thread(r);
t.setName("mock-data-thread-" + t.getId());
return t;
});
AtomicInteger atomic = new AtomicInteger();
scheduledExecutorService.scheduleWithFixedDelay(() -> {
int i = atomic.getAndIncrement();
try {
statement.executeUpdate("insert into " + tableName + " values(now, " + i + ",'" + i + "')");
} catch (SQLException e) {
// ignore
}
}, 0, 10, TimeUnit.MILLISECONDS);
}
}
package com.taosdata;
import com.google.common.util.concurrent.RateLimiter;
import com.taosdata.jdbc.tmq.ConsumerRecord;
import com.taosdata.jdbc.tmq.ConsumerRecords;
import com.taosdata.jdbc.tmq.TaosConsumer;
import java.sql.SQLException;
import java.time.Duration;
import java.time.LocalDateTime;
import java.util.Collections;
import java.util.Properties;
import java.util.concurrent.ForkJoinPool;
import java.util.concurrent.Semaphore;
public class Worker implements Runnable {
int sleepTime;
int rate;
ForkJoinPool pool = new ForkJoinPool();
Semaphore semaphore;
TaosConsumer<Bean> consumer;
public Worker(Properties prop, Config config) throws SQLException {
consumer = new TaosConsumer<>(prop);
consumer.subscribe(Collections.singletonList(Config.TOPIC));
semaphore = new Semaphore(config.getProcessCapacity());
sleepTime = config.getPollSleep();
rate = config.getRate();
}
@Override
public void run() {
while (!Thread.interrupted()) {
try {
// 控制请求频率
if (semaphore.tryAcquire()) {
ConsumerRecords<Bean> records = consumer.poll(Duration.ofMillis(sleepTime));
pool.submit(() -> {
RateLimiter limiter = RateLimiter.create(rate);
try {
for (ConsumerRecord<Bean> record : records) {
// 流量控制
limiter.acquire();
// 业务处理数据
System.out.println("[" + LocalDateTime.now() + "] Thread id:" + Thread.currentThread().getId() + " -> " + record.value());
}
} finally {
semaphore.release();
}
});
}
} catch (SQLException e) {
e.printStackTrace();
}
}
}
}
...@@ -37,6 +37,13 @@ extern "C" { ...@@ -37,6 +37,13 @@ extern "C" {
) )
// clang-format on // clang-format on
typedef bool (*state_key_cmpr_fn)(void* pKey1, void* pKey2);
typedef struct STableKeyInfo {
uint64_t uid;
uint64_t groupId;
} STableKeyInfo;
typedef struct SWinKey { typedef struct SWinKey {
uint64_t groupId; uint64_t groupId;
TSKEY ts; TSKEY ts;
...@@ -224,6 +231,7 @@ typedef struct SColumnInfoData { ...@@ -224,6 +231,7 @@ typedef struct SColumnInfoData {
}; };
SColumnInfo info; // column info SColumnInfo info; // column info
bool hasNull; // if current column data has null value. bool hasNull; // if current column data has null value.
bool reassigned; // if current column data is reassigned.
} SColumnInfoData; } SColumnInfoData;
typedef struct SQueryTableDataCond { typedef struct SQueryTableDataCond {
......
...@@ -178,6 +178,7 @@ int32_t getJsonValueLen(const char* data); ...@@ -178,6 +178,7 @@ int32_t getJsonValueLen(const char* data);
int32_t colDataSetVal(SColumnInfoData* pColumnInfoData, uint32_t rowIndex, const char* pData, bool isNull); int32_t colDataSetVal(SColumnInfoData* pColumnInfoData, uint32_t rowIndex, const char* pData, bool isNull);
int32_t colDataAppend(SColumnInfoData* pColumnInfoData, uint32_t rowIndex, const char* pData, bool isNull); int32_t colDataAppend(SColumnInfoData* pColumnInfoData, uint32_t rowIndex, const char* pData, bool isNull);
int32_t colDataReassignVal(SColumnInfoData* pColumnInfoData, uint32_t dstRowIdx, uint32_t srcRowIdx, const char* pData);
int32_t colDataSetNItems(SColumnInfoData* pColumnInfoData, uint32_t rowIndex, const char* pData, uint32_t numOfRows, bool trimValue); int32_t colDataSetNItems(SColumnInfoData* pColumnInfoData, uint32_t rowIndex, const char* pData, uint32_t numOfRows, bool trimValue);
int32_t colDataMergeCol(SColumnInfoData* pColumnInfoData, int32_t numOfRow1, int32_t* capacity, int32_t colDataMergeCol(SColumnInfoData* pColumnInfoData, int32_t numOfRow1, int32_t* capacity,
const SColumnInfoData* pSource, int32_t numOfRow2); const SColumnInfoData* pSource, int32_t numOfRow2);
...@@ -247,6 +248,7 @@ int32_t buildSubmitReqFromDataBlock(SSubmitReq2** pReq, const SSDataBlock* pData ...@@ -247,6 +248,7 @@ int32_t buildSubmitReqFromDataBlock(SSubmitReq2** pReq, const SSDataBlock* pData
tb_uid_t suid); tb_uid_t suid);
char* buildCtbNameByGroupId(const char* stbName, uint64_t groupId); char* buildCtbNameByGroupId(const char* stbName, uint64_t groupId);
int32_t buildCtbNameByGroupIdImpl(const char* stbName, uint64_t groupId, char* pBuf);
static FORCE_INLINE int32_t blockGetEncodeSize(const SSDataBlock* pBlock) { static FORCE_INLINE int32_t blockGetEncodeSize(const SSDataBlock* pBlock) {
return blockDataGetSerialMetaSize(taosArrayGetSize(pBlock->pDataBlock)) + blockDataGetSize(pBlock); return blockDataGetSerialMetaSize(taosArrayGetSize(pBlock->pDataBlock)) + blockDataGetSize(pBlock);
......
...@@ -145,7 +145,7 @@ int32_t tColDataCopy(SColData *pColDataFrom, SColData *pColData, xMallocFn xMall ...@@ -145,7 +145,7 @@ int32_t tColDataCopy(SColData *pColDataFrom, SColData *pColData, xMallocFn xMall
extern void (*tColDataCalcSMA[])(SColData *pColData, int64_t *sum, int64_t *max, int64_t *min, int16_t *numOfNull); extern void (*tColDataCalcSMA[])(SColData *pColData, int64_t *sum, int64_t *max, int64_t *min, int16_t *numOfNull);
// for stmt bind // for stmt bind
int32_t tColDataAddValueByBind(SColData *pColData, TAOS_MULTI_BIND *pBind); int32_t tColDataAddValueByBind(SColData *pColData, TAOS_MULTI_BIND *pBind, int32_t buffMaxLen);
void tColDataSortMerge(SArray *colDataArr); void tColDataSortMerge(SArray *colDataArr);
// for raw block // for raw block
......
...@@ -29,7 +29,6 @@ extern "C" { ...@@ -29,7 +29,6 @@ extern "C" {
#define SLOW_LOG_TYPE_OTHERS 0x4 #define SLOW_LOG_TYPE_OTHERS 0x4
#define SLOW_LOG_TYPE_ALL 0xFFFFFFFF #define SLOW_LOG_TYPE_ALL 0xFFFFFFFF
// cluster // cluster
extern char tsFirst[]; extern char tsFirst[];
extern char tsSecond[]; extern char tsSecond[];
...@@ -83,6 +82,7 @@ extern int64_t tsVndCommitMaxIntervalMs; ...@@ -83,6 +82,7 @@ extern int64_t tsVndCommitMaxIntervalMs;
// mnode // mnode
extern int64_t tsMndSdbWriteDelta; extern int64_t tsMndSdbWriteDelta;
extern int64_t tsMndLogRetention; extern int64_t tsMndLogRetention;
extern bool tsMndSkipGrant;
// monitor // monitor
extern bool tsEnableMonitor; extern bool tsEnableMonitor;
...@@ -131,7 +131,7 @@ extern int32_t tsSlowLogScope; ...@@ -131,7 +131,7 @@ extern int32_t tsSlowLogScope;
// client // client
extern int32_t tsMinSlidingTime; extern int32_t tsMinSlidingTime;
extern int32_t tsMinIntervalTime; extern int32_t tsMinIntervalTime;
extern int32_t tsMaxMemUsedByInsert; extern int32_t tsMaxInsertBatchRows;
// build info // build info
extern char version[]; extern char version[];
...@@ -180,6 +180,8 @@ extern int32_t tsRpcRetryInterval; ...@@ -180,6 +180,8 @@ extern int32_t tsRpcRetryInterval;
extern bool tsDisableStream; extern bool tsDisableStream;
extern int64_t tsStreamBufferSize; extern int64_t tsStreamBufferSize;
extern int64_t tsCheckpointInterval; extern int64_t tsCheckpointInterval;
extern bool tsFilterScalarMode;
extern int32_t tsMaxStreamBackendCache;
// #define NEEDTO_COMPRESSS_MSG(size) (tsCompressMsgSize != -1 && (size) > tsCompressMsgSize) // #define NEEDTO_COMPRESSS_MSG(size) (tsCompressMsgSize != -1 && (size) > tsCompressMsgSize)
......
...@@ -2009,10 +2009,8 @@ typedef struct { ...@@ -2009,10 +2009,8 @@ typedef struct {
int8_t withMeta; int8_t withMeta;
char* sql; char* sql;
char subDbName[TSDB_DB_FNAME_LEN]; char subDbName[TSDB_DB_FNAME_LEN];
union { char* ast;
char* ast; char subStbName[TSDB_TABLE_FNAME_LEN];
char subStbName[TSDB_TABLE_FNAME_LEN];
};
} SCMCreateTopicReq; } SCMCreateTopicReq;
int32_t tSerializeSCMCreateTopicReq(void* buf, int32_t bufLen, const SCMCreateTopicReq* pReq); int32_t tSerializeSCMCreateTopicReq(void* buf, int32_t bufLen, const SCMCreateTopicReq* pReq);
...@@ -2809,37 +2807,49 @@ typedef struct { ...@@ -2809,37 +2807,49 @@ typedef struct {
int64_t suid; int64_t suid;
} SMqRebVgReq; } SMqRebVgReq;
static FORCE_INLINE int32_t tEncodeSMqRebVgReq(void** buf, const SMqRebVgReq* pReq) { static FORCE_INLINE int tEncodeSMqRebVgReq(SEncoder *pCoder, const SMqRebVgReq* pReq) {
int32_t tlen = 0; if (tStartEncode(pCoder) < 0) return -1;
tlen += taosEncodeFixedI64(buf, pReq->leftForVer); if (tEncodeI64(pCoder, pReq->leftForVer) < 0) return -1;
tlen += taosEncodeFixedI32(buf, pReq->vgId); if (tEncodeI32(pCoder, pReq->vgId) < 0) return -1;
tlen += taosEncodeFixedI64(buf, pReq->oldConsumerId); if (tEncodeI64(pCoder, pReq->oldConsumerId) < 0) return -1;
tlen += taosEncodeFixedI64(buf, pReq->newConsumerId); if (tEncodeI64(pCoder, pReq->newConsumerId) < 0) return -1;
tlen += taosEncodeString(buf, pReq->subKey); if (tEncodeCStr(pCoder, pReq->subKey) < 0) return -1;
tlen += taosEncodeFixedI8(buf, pReq->subType); if (tEncodeI8(pCoder, pReq->subType) < 0) return -1;
tlen += taosEncodeFixedI8(buf, pReq->withMeta); if (tEncodeI8(pCoder, pReq->withMeta) < 0) return -1;
if (pReq->subType == TOPIC_SUB_TYPE__COLUMN) { if (pReq->subType == TOPIC_SUB_TYPE__COLUMN) {
tlen += taosEncodeString(buf, pReq->qmsg); if (tEncodeCStr(pCoder, pReq->qmsg) < 0) return -1;
} else if (pReq->subType == TOPIC_SUB_TYPE__TABLE) { } else if (pReq->subType == TOPIC_SUB_TYPE__TABLE) {
tlen += taosEncodeFixedI64(buf, pReq->suid); if (tEncodeI64(pCoder, pReq->suid) < 0) return -1;
if (tEncodeCStr(pCoder, pReq->qmsg) < 0) return -1;
} }
return tlen; tEndEncode(pCoder);
return 0;
} }
static FORCE_INLINE void* tDecodeSMqRebVgReq(const void* buf, SMqRebVgReq* pReq) { static FORCE_INLINE int tDecodeSMqRebVgReq(SDecoder *pCoder, SMqRebVgReq* pReq) {
buf = taosDecodeFixedI64(buf, &pReq->leftForVer); if (tStartDecode(pCoder) < 0) return -1;
buf = taosDecodeFixedI32(buf, &pReq->vgId);
buf = taosDecodeFixedI64(buf, &pReq->oldConsumerId); if (tDecodeI64(pCoder, &pReq->leftForVer) < 0) return -1;
buf = taosDecodeFixedI64(buf, &pReq->newConsumerId);
buf = taosDecodeStringTo(buf, pReq->subKey); if (tDecodeI32(pCoder, &pReq->vgId) < 0) return -1;
buf = taosDecodeFixedI8(buf, &pReq->subType); if (tDecodeI64(pCoder, &pReq->oldConsumerId) < 0) return -1;
buf = taosDecodeFixedI8(buf, &pReq->withMeta); if (tDecodeI64(pCoder, &pReq->newConsumerId) < 0) return -1;
if (tDecodeCStrTo(pCoder, pReq->subKey) < 0) return -1;
if (tDecodeI8(pCoder, &pReq->subType) < 0) return -1;
if (tDecodeI8(pCoder, &pReq->withMeta) < 0) return -1;
if (pReq->subType == TOPIC_SUB_TYPE__COLUMN) { if (pReq->subType == TOPIC_SUB_TYPE__COLUMN) {
buf = taosDecodeString(buf, &pReq->qmsg); if (tDecodeCStr(pCoder, &pReq->qmsg) < 0) return -1;
} else if (pReq->subType == TOPIC_SUB_TYPE__TABLE) { } else if (pReq->subType == TOPIC_SUB_TYPE__TABLE) {
buf = taosDecodeFixedI64(buf, &pReq->suid); if (tDecodeI64(pCoder, &pReq->suid) < 0) return -1;
if (!tDecodeIsEnd(pCoder)){
if (tDecodeCStr(pCoder, &pReq->qmsg) < 0) return -1;
}
} }
return (void*)buf;
tEndDecode(pCoder);
return 0;
} }
typedef struct { typedef struct {
......
...@@ -23,7 +23,7 @@ ...@@ -23,7 +23,7 @@
extern "C" { extern "C" {
#endif #endif
#define TIME_IS_VAR_DURATION(_t) ((_t) == 'n' || (_t) == 'y' || (_t) == 'N' || (_t) == 'Y') #define IS_CALENDAR_TIME_DURATION(_t) ((_t) == 'n' || (_t) == 'y' || (_t) == 'N' || (_t) == 'Y')
#define TIME_UNIT_NANOSECOND 'b' #define TIME_UNIT_NANOSECOND 'b'
#define TIME_UNIT_MICROSECOND 'u' #define TIME_UNIT_MICROSECOND 'u'
...@@ -74,7 +74,7 @@ static FORCE_INLINE int64_t taosGetTimestampToday(int32_t precision) { ...@@ -74,7 +74,7 @@ static FORCE_INLINE int64_t taosGetTimestampToday(int32_t precision) {
int64_t taosTimeAdd(int64_t t, int64_t duration, char unit, int32_t precision); int64_t taosTimeAdd(int64_t t, int64_t duration, char unit, int32_t precision);
int64_t taosTimeTruncate(int64_t t, const SInterval* pInterval, int32_t precision); int64_t taosTimeTruncate(int64_t ts, const SInterval* pInterval);
int32_t taosTimeCountInterval(int64_t skey, int64_t ekey, int64_t interval, char unit, int32_t precision); int32_t taosTimeCountInterval(int64_t skey, int64_t ekey, int64_t interval, char unit, int32_t precision);
int32_t parseAbsoluteDuration(const char* token, int32_t tokenlen, int64_t* ts, char* unit, int32_t timePrecision); int32_t parseAbsoluteDuration(const char* token, int32_t tokenlen, int64_t* ts, char* unit, int32_t timePrecision);
......
...@@ -354,6 +354,7 @@ ...@@ -354,6 +354,7 @@
#define TK_WAL 336 #define TK_WAL 336
#define TK_NK_SPACE 600 #define TK_NK_SPACE 600
#define TK_NK_COMMENT 601 #define TK_NK_COMMENT 601
#define TK_NK_ILLEGAL 602 #define TK_NK_ILLEGAL 602
......
...@@ -59,7 +59,7 @@ typedef struct SDataSinkMgtCfg { ...@@ -59,7 +59,7 @@ typedef struct SDataSinkMgtCfg {
uint32_t maxDataBlockNumPerQuery; uint32_t maxDataBlockNumPerQuery;
} SDataSinkMgtCfg; } SDataSinkMgtCfg;
int32_t dsDataSinkMgtInit(SDataSinkMgtCfg* cfg); int32_t dsDataSinkMgtInit(SDataSinkMgtCfg* cfg, SStorageAPI* pAPI);
typedef struct SInputData { typedef struct SInputData {
const struct SSDataBlock* pData; const struct SSDataBlock* pData;
......
...@@ -23,6 +23,7 @@ extern "C" { ...@@ -23,6 +23,7 @@ extern "C" {
#include "query.h" #include "query.h"
#include "tcommon.h" #include "tcommon.h"
#include "tmsgcb.h" #include "tmsgcb.h"
#include "storageapi.h"
typedef void* qTaskInfo_t; typedef void* qTaskInfo_t;
typedef void* DataSinkHandle; typedef void* DataSinkHandle;
...@@ -41,7 +42,6 @@ typedef struct { ...@@ -41,7 +42,6 @@ typedef struct {
typedef struct { typedef struct {
void* tqReader; void* tqReader;
void* meta;
void* config; void* config;
void* vnode; void* vnode;
void* mnd; void* mnd;
...@@ -51,10 +51,10 @@ typedef struct { ...@@ -51,10 +51,10 @@ typedef struct {
bool initTableReader; bool initTableReader;
bool initTqReader; bool initTqReader;
int32_t numOfVgroups; int32_t numOfVgroups;
void* sContext; // SSnapContext*
void* sContext; // SSnapContext* void* pStateBackend;
struct SStorageAPI api;
void* pStateBackend;
} SReadHandle; } SReadHandle;
// in queue mode, data streams are seperated by msg // in queue mode, data streams are seperated by msg
...@@ -82,6 +82,8 @@ qTaskInfo_t qCreateStreamExecTaskInfo(void* msg, SReadHandle* readers, int32_t v ...@@ -82,6 +82,8 @@ qTaskInfo_t qCreateStreamExecTaskInfo(void* msg, SReadHandle* readers, int32_t v
qTaskInfo_t qCreateQueueExecTaskInfo(void* msg, SReadHandle* pReaderHandle, int32_t vgId, int32_t* numOfCols, qTaskInfo_t qCreateQueueExecTaskInfo(void* msg, SReadHandle* pReaderHandle, int32_t vgId, int32_t* numOfCols,
uint64_t id); uint64_t id);
int32_t qGetTableList(int64_t suid, void* pVnode, void* node, SArray **tableList, void* pTaskInfo);
/** /**
* set the task Id, usually used by message queue process * set the task Id, usually used by message queue process
* @param tinfo * @param tinfo
...@@ -90,6 +92,8 @@ qTaskInfo_t qCreateQueueExecTaskInfo(void* msg, SReadHandle* pReaderHandle, int3 ...@@ -90,6 +92,8 @@ qTaskInfo_t qCreateQueueExecTaskInfo(void* msg, SReadHandle* pReaderHandle, int3
*/ */
void qSetTaskId(qTaskInfo_t tinfo, uint64_t taskId, uint64_t queryId); void qSetTaskId(qTaskInfo_t tinfo, uint64_t taskId, uint64_t queryId);
//void qSetTaskCode(qTaskInfo_t tinfo, int32_t code);
int32_t qSetStreamOpOpen(qTaskInfo_t tinfo); int32_t qSetStreamOpOpen(qTaskInfo_t tinfo);
// todo refactor // todo refactor
...@@ -186,7 +190,17 @@ int32_t qSerializeTaskStatus(qTaskInfo_t tinfo, char** pOutput, int32_t* len); ...@@ -186,7 +190,17 @@ int32_t qSerializeTaskStatus(qTaskInfo_t tinfo, char** pOutput, int32_t* len);
int32_t qDeserializeTaskStatus(qTaskInfo_t tinfo, const char* pInput, int32_t len); int32_t qDeserializeTaskStatus(qTaskInfo_t tinfo, const char* pInput, int32_t len);
STimeWindow getAlignQueryTimeWindow(SInterval* pInterval, int32_t precision, int64_t key); void getNextTimeWindow(const SInterval* pInterval, STimeWindow* tw, int32_t order);
void getInitialStartTimeWindow(SInterval* pInterval, TSKEY ts, STimeWindow* w, bool ascQuery);
STimeWindow getAlignQueryTimeWindow(const SInterval* pInterval, int64_t key);
/**
* return the scan info, in the form of tuple of two items, including table uid and current timestamp
* @param tinfo
* @param uid
* @param ts
* @return
*/
int32_t qGetStreamScanStatus(qTaskInfo_t tinfo, uint64_t* uid, int64_t* ts);
SArray* qGetQueriedTableListInfo(qTaskInfo_t tinfo); SArray* qGetQueriedTableListInfo(qTaskInfo_t tinfo);
......
/*
* Copyright (c) 2019 TAOS Data, Inc. <jhtao@taosdata.com>
*
* This program is free software: you can use, redistribute, and/or modify
* it under the terms of the GNU Affero General Public License, version 3
* or later ("AGPL"), as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#ifndef TDENGINE_STORAGEAPI_H
#define TDENGINE_STORAGEAPI_H
#include "function.h"
#include "index.h"
#include "taosdef.h"
#include "tcommon.h"
#include "tmsg.h"
#include "tscalablebf.h"
#include "tsimplehash.h"
#ifdef __cplusplus
extern "C" {
#endif
#define TIMEWINDOW_RANGE_CONTAINED 1
#define TIMEWINDOW_RANGE_EXTERNAL 2
#define CACHESCAN_RETRIEVE_TYPE_ALL 0x1
#define CACHESCAN_RETRIEVE_TYPE_SINGLE 0x2
#define CACHESCAN_RETRIEVE_LAST_ROW 0x4
#define CACHESCAN_RETRIEVE_LAST 0x8
#define META_READER_NOLOCK 0x1
typedef struct SMeta SMeta;
typedef TSKEY (*GetTsFun)(void*);
typedef struct SMetaEntry {
int64_t version;
int8_t type;
int8_t flags; // TODO: need refactor?
tb_uid_t uid;
char* name;
union {
struct {
SSchemaWrapper schemaRow;
SSchemaWrapper schemaTag;
SRSmaParam rsmaParam;
} stbEntry;
struct {
int64_t ctime;
int32_t ttlDays;
int32_t commentLen;
char* comment;
tb_uid_t suid;
uint8_t* pTags;
} ctbEntry;
struct {
int64_t ctime;
int32_t ttlDays;
int32_t commentLen;
char* comment;
int32_t ncid; // next column id
SSchemaWrapper schemaRow;
} ntbEntry;
struct {
STSma* tsma;
} smaEntry;
};
uint8_t* pBuf;
} SMetaEntry;
typedef struct SMetaReader {
int32_t flags;
void* pMeta;
SDecoder coder;
SMetaEntry me;
void* pBuf;
int32_t szBuf;
struct SStoreMeta* pAPI;
} SMetaReader;
typedef struct SMTbCursor {
void* pMeta;
void* pDbc;
void* pKey;
void* pVal;
int32_t kLen;
int32_t vLen;
SMetaReader mr;
int8_t paused;
} SMTbCursor;
typedef struct SRowBuffPos {
void* pRowBuff;
void* pKey;
bool beFlushed;
bool beUsed;
} SRowBuffPos;
// tq
typedef struct SMetaTableInfo {
int64_t suid;
int64_t uid;
SSchemaWrapper* schema;
char tbName[TSDB_TABLE_NAME_LEN];
} SMetaTableInfo;
typedef struct SSnapContext {
SMeta* pMeta; // todo remove it
int64_t snapVersion;
void* pCur;
int64_t suid;
int8_t subType;
SHashObj* idVersion;
SHashObj* suidInfo;
SArray* idList;
int32_t index;
bool withMeta;
bool queryMeta; // true-get meta, false-get data
} SSnapContext;
typedef struct {
int64_t uid;
int64_t ctbNum;
} SMetaStbStats;
// void tqReaderSetColIdList(STqReader *pReader, SArray *pColIdList);
// int32_t tqReaderSetTbUidList(STqReader *pReader, const SArray *tbUidList);
// int32_t tqReaderAddTbUidList(STqReader *pReader, const SArray *pTableUidList);
// int32_t tqReaderRemoveTbUidList(STqReader *pReader, const SArray *tbUidList);
// bool tqReaderIsQueriedTable(STqReader* pReader, uint64_t uid);
// bool tqCurrentBlockConsumed(const STqReader* pReader);
// int32_t tqReaderSeek(STqReader *pReader, int64_t ver, const char *id);
// bool tqNextBlockInWal(STqReader* pReader, const char* idstr);
// bool tqNextBlockImpl(STqReader *pReader, const char* idstr);
// int32_t getTableInfoFromSnapshot(SSnapContext *ctx, void **pBuf, int32_t *contLen, int16_t *type, int64_t
// *uid); SMetaTableInfo getMetaTableInfoFromSnapshot(SSnapContext *ctx); int32_t setForSnapShot(SSnapContext
// *ctx, int64_t uid); int32_t destroySnapContext(SSnapContext *ctx);
// clang-format off
/*-------------------------------------------------new api format---------------------------------------------------*/
typedef struct TsdReader {
int32_t (*tsdReaderOpen)(void* pVnode, SQueryTableDataCond* pCond, void* pTableList, int32_t numOfTables,
SSDataBlock* pResBlock, void** ppReader, const char* idstr, bool countOnly,
SHashObj** pIgnoreTables);
void (*tsdReaderClose)();
void (*tsdSetReaderTaskId)(void *pReader, const char *pId);
int32_t (*tsdSetQueryTableList)();
int32_t (*tsdNextDataBlock)();
int32_t (*tsdReaderRetrieveBlockSMAInfo)();
SSDataBlock *(*tsdReaderRetrieveDataBlock)();
void (*tsdReaderReleaseDataBlock)();
int32_t (*tsdReaderResetStatus)();
int32_t (*tsdReaderGetDataBlockDistInfo)();
int64_t (*tsdReaderGetNumOfInMemRows)();
void (*tsdReaderNotifyClosing)();
} TsdReader;
typedef struct SStoreCacheReader {
int32_t (*openReader)(void *pVnode, int32_t type, void *pTableIdList, int32_t numOfTables, int32_t numOfCols,
SArray *pCidList, int32_t *pSlotIds, uint64_t suid, void **pReader, const char *idstr);
void *(*closeReader)(void *pReader);
int32_t (*retrieveRows)(void *pReader, SSDataBlock *pResBlock, const int32_t *slotIds, const int32_t *dstSlotIds,
SArray *pTableUidList);
int32_t (*reuseReader)(void *pReader, void *pTableIdList, int32_t numOfTables);
} SStoreCacheReader;
// clang-format on
/*------------------------------------------------------------------------------------------------------------------*/
/*
void tqReaderSetColIdList(STqReader *pReader, SArray *pColIdList);
int32_t tqReaderSetTbUidList(STqReader *pReader, const SArray *tbUidList);
int32_t tqReaderAddTbUidList(STqReader *pReader, const SArray *pTableUidList);
int32_t tqReaderRemoveTbUidList(STqReader *pReader, const SArray *tbUidList);
bool tqReaderIsQueriedTable(STqReader* pReader, uint64_t uid);
bool tqCurrentBlockConsumed(const STqReader* pReader);
int32_t tqReaderSeek(STqReader *pReader, int64_t ver, const char *id);
bool tqNextBlockInWal(STqReader* pReader, const char* idstr);
bool tqNextBlockImpl(STqReader *pReader, const char* idstr);
int32_t tqRetrieveDataBlock(STqReader *pReader, SSDataBlock **pRes, const char* idstr);
STqReader *tqReaderOpen(void *pVnode);
void tqReaderClose(STqReader *);
int32_t tqReaderSetSubmitMsg(STqReader *pReader, void *msgStr, int32_t msgLen, int64_t ver);
bool tqNextDataBlockFilterOut(STqReader *pReader, SHashObj *filterOutUids);
SWalReader* tqGetWalReader(STqReader* pReader);
int32_t tqRetrieveTaosxBlock(STqReader *pReader, SArray *blocks, SArray *schemas, SSubmitTbData **pSubmitTbDataRet);
*/
// todo rename
typedef struct SStoreTqReader {
struct STqReader* (*tqReaderOpen)();
void (*tqReaderClose)();
int32_t (*tqReaderSeek)();
int32_t (*tqRetrieveBlock)();
bool (*tqReaderNextBlockInWal)();
bool (*tqNextBlockImpl)(); // todo remove it
SSDataBlock* (*tqGetResultBlock)();
void (*tqReaderSetColIdList)();
int32_t (*tqReaderSetQueryTableList)();
int32_t (*tqReaderAddTables)();
int32_t (*tqReaderRemoveTables)();
bool (*tqReaderIsQueriedTable)();
bool (*tqReaderCurrentBlockConsumed)();
struct SWalReader* (*tqReaderGetWalReader)(); // todo remove it
int32_t (*tqReaderRetrieveTaosXBlock)(); // todo remove it
int32_t (*tqReaderSetSubmitMsg)(); // todo remove it
bool (*tqReaderNextBlockFilterOut)();
} SStoreTqReader;
typedef struct SStoreSnapshotFn {
int32_t (*createSnapshot)(SSnapContext* ctx, int64_t uid);
int32_t (*destroySnapshot)(SSnapContext* ctx);
SMetaTableInfo (*getMetaTableInfoFromSnapshot)(SSnapContext* ctx);
int32_t (*getTableInfoFromSnapshot)(SSnapContext* ctx, void** pBuf, int32_t* contLen, int16_t* type, int64_t* uid);
} SStoreSnapshotFn;
/**
void metaReaderInit(SMetaReader *pReader, SMeta *pMeta, int32_t flags);
void metaReaderReleaseLock(SMetaReader *pReader);
void metaReaderClear(SMetaReader *pReader);
int32_t metaReaderGetTableEntryByUid(SMetaReader *pReader, tb_uid_t uid);
int32_t metaReaderGetTableEntryByUidCache(SMetaReader *pReader, tb_uid_t uid);
int32_t metaGetTableTags(SMeta *pMeta, uint64_t suid, SArray *uidList);
const void *metaGetTableTagVal(void *tag, int16_t type, STagVal *tagVal);
int metaGetTableNameByUid(void *meta, uint64_t uid, char *tbName);
int metaGetTableUidByName(void *meta, char *tbName, uint64_t *uid);
int metaGetTableTypeByName(void *meta, char *tbName, ETableType *tbType);
bool metaIsTableExist(SMeta *pMeta, tb_uid_t uid);
int32_t metaGetCachedTableUidList(SMeta *pMeta, tb_uid_t suid, const uint8_t *key, int32_t keyLen, SArray *pList,
bool *acquired);
int32_t metaUidFilterCachePut(SMeta *pMeta, uint64_t suid, const void *pKey, int32_t keyLen, void *pPayload,
int32_t payloadLen, double selectivityRatio);
tb_uid_t metaGetTableEntryUidByName(SMeta *pMeta, const char *name);
int32_t metaGetCachedTbGroup(SMeta* pMeta, tb_uid_t suid, const uint8_t* pKey, int32_t keyLen, SArray** pList);
int32_t metaPutTbGroupToCache(SMeta* pMeta, uint64_t suid, const void* pKey, int32_t keyLen, void* pPayload, int32_t
payloadLen);
*/
typedef struct SStoreMeta {
SMTbCursor* (*openTableMetaCursor)(void* pVnode); // metaOpenTbCursor
void (*closeTableMetaCursor)(SMTbCursor* pTbCur); // metaCloseTbCursor
void (*pauseTableMetaCursor)(SMTbCursor* pTbCur); // metaPauseTbCursor
void (*resumeTableMetaCursor)(SMTbCursor* pTbCur, int8_t first); // metaResumeTbCursor
int32_t (*cursorNext)(SMTbCursor* pTbCur, ETableType jumpTableType); // metaTbCursorNext
int32_t (*cursorPrev)(SMTbCursor* pTbCur, ETableType jumpTableType); // metaTbCursorPrev
int32_t (*getTableTags)(void* pVnode, uint64_t suid, SArray* uidList);
int32_t (*getTableTagsByUid)(void* pVnode, int64_t suid, SArray* uidList);
const void* (*extractTagVal)(const void* tag, int16_t type, STagVal* tagVal); // todo remove it
int32_t (*getTableUidByName)(void* pVnode, char* tbName, uint64_t* uid);
int32_t (*getTableTypeByName)(void* pVnode, char* tbName, ETableType* tbType);
int32_t (*getTableNameByUid)(void* pVnode, uint64_t uid, char* tbName);
bool (*isTableExisted)(void* pVnode, tb_uid_t uid);
int32_t (*metaGetCachedTbGroup)(void* pVnode, tb_uid_t suid, const uint8_t* pKey, int32_t keyLen, SArray** pList);
int32_t (*metaPutTbGroupToCache)(void* pVnode, uint64_t suid, const void* pKey, int32_t keyLen, void* pPayload,
int32_t payloadLen);
int32_t (*getCachedTableList)(void* pVnode, tb_uid_t suid, const uint8_t* pKey, int32_t keyLen, SArray* pList1,
bool* acquireRes);
int32_t (*putCachedTableList)(void* pVnode, uint64_t suid, const void* pKey, int32_t keyLen, void* pPayload,
int32_t payloadLen, double selectivityRatio);
void* (*storeGetIndexInfo)();
void* (*getInvertIndex)(void* pVnode);
int32_t (*getChildTableList)(
void* pVnode, int64_t suid,
SArray* list); // support filter and non-filter cases. [vnodeGetCtbIdList & vnodeGetCtbIdListByFilter]
int32_t (*storeGetTableList)(void* pVnode, int8_t type, SArray* pList); // vnodeGetStbIdList & vnodeGetAllTableList
void* storeGetVersionRange;
void* storeGetLastTimestamp;
int32_t (*getTableSchema)(void* pVnode, int64_t uid, STSchema** pSchema, int64_t* suid); // tsdbGetTableSchema
// db name, vgId, numOfTables, numOfSTables
int32_t (*getNumOfChildTables)(
void* pVnode, int64_t uid,
int64_t* numOfTables); // int32_t metaGetStbStats(SMeta *pMeta, int64_t uid, SMetaStbStats *pInfo);
void (*getBasicInfo)(void* pVnode, const char** dbname, int32_t* vgId, int64_t* numOfTables,
int64_t* numOfNormalTables); // vnodeGetInfo(void *pVnode, const char **dbname, int32_t *vgId) &
// metaGetTbNum(SMeta *pMeta) & metaGetNtbNum(SMeta *pMeta);
int64_t (*getNumOfRowsInMem)(void* pVnode);
/**
int32_t vnodeGetCtbIdList(void *pVnode, int64_t suid, SArray *list);
int32_t vnodeGetCtbIdListByFilter(void *pVnode, int64_t suid, SArray *list, bool (*filter)(void *arg), void *arg);
int32_t vnodeGetStbIdList(void *pVnode, int64_t suid, SArray *list);
*/
} SStoreMeta;
typedef struct SStoreMetaReader {
void (*initReader)(SMetaReader* pReader, void* pVnode, int32_t flags, SStoreMeta* pAPI);
void (*clearReader)(SMetaReader* pReader);
void (*readerReleaseLock)(SMetaReader* pReader);
int32_t (*getTableEntryByUid)(SMetaReader* pReader, tb_uid_t uid);
int32_t (*getTableEntryByName)(SMetaReader* pReader, const char* name);
int32_t (*getEntryGetUidCache)(SMetaReader* pReader, tb_uid_t uid);
} SStoreMetaReader;
typedef struct SUpdateInfo {
SArray* pTsBuckets;
uint64_t numBuckets;
SArray* pTsSBFs;
uint64_t numSBFs;
int64_t interval;
int64_t watermark;
TSKEY minTS;
SScalableBf* pCloseWinSBF;
SHashObj* pMap;
uint64_t maxDataVersion;
} SUpdateInfo;
typedef struct {
void* iter; // rocksdb_iterator_t* iter;
void* snapshot; // rocksdb_snapshot_t* snapshot;
void* readOpt; // rocksdb_readoptions_t* readOpt;
void* db; // rocksdb_t* db;
void* pCur;
int64_t number;
} SStreamStateCur;
typedef struct SStateStore {
int32_t (*streamStatePutParName)(SStreamState* pState, int64_t groupId, const char* tbname);
int32_t (*streamStateGetParName)(SStreamState* pState, int64_t groupId, void** pVal);
int32_t (*streamStateAddIfNotExist)(SStreamState* pState, const SWinKey* key, void** pVal, int32_t* pVLen);
int32_t (*streamStateReleaseBuf)(SStreamState* pState, const SWinKey* key, void* pVal);
void (*streamStateFreeVal)(void* val);
int32_t (*streamStatePut)(SStreamState* pState, const SWinKey* key, const void* value, int32_t vLen);
int32_t (*streamStateGet)(SStreamState* pState, const SWinKey* key, void** pVal, int32_t* pVLen);
bool (*streamStateCheck)(SStreamState* pState, const SWinKey* key);
int32_t (*streamStateGetByPos)(SStreamState* pState, void* pos, void** pVal);
int32_t (*streamStateDel)(SStreamState* pState, const SWinKey* key);
int32_t (*streamStateClear)(SStreamState* pState);
void (*streamStateSetNumber)(SStreamState* pState, int32_t number);
int32_t (*streamStateSaveInfo)(SStreamState* pState, void* pKey, int32_t keyLen, void* pVal, int32_t vLen);
int32_t (*streamStateGetInfo)(SStreamState* pState, void* pKey, int32_t keyLen, void** pVal, int32_t* pLen);
int32_t (*streamStateFillPut)(SStreamState* pState, const SWinKey* key, const void* value, int32_t vLen);
int32_t (*streamStateFillGet)(SStreamState* pState, const SWinKey* key, void** pVal, int32_t* pVLen);
int32_t (*streamStateFillDel)(SStreamState* pState, const SWinKey* key);
int32_t (*streamStateCurNext)(SStreamState* pState, SStreamStateCur* pCur);
int32_t (*streamStateCurPrev)(SStreamState* pState, SStreamStateCur* pCur);
SStreamStateCur* (*streamStateGetAndCheckCur)(SStreamState* pState, SWinKey* key);
SStreamStateCur* (*streamStateSeekKeyNext)(SStreamState* pState, const SWinKey* key);
SStreamStateCur* (*streamStateFillSeekKeyNext)(SStreamState* pState, const SWinKey* key);
SStreamStateCur* (*streamStateFillSeekKeyPrev)(SStreamState* pState, const SWinKey* key);
void (*streamStateFreeCur)(SStreamStateCur* pCur);
int32_t (*streamStateGetGroupKVByCur)(SStreamStateCur* pCur, SWinKey* pKey, const void** pVal, int32_t* pVLen);
int32_t (*streamStateGetKVByCur)(SStreamStateCur* pCur, SWinKey* pKey, const void** pVal, int32_t* pVLen);
int32_t (*streamStateSessionAddIfNotExist)(SStreamState* pState, SSessionKey* key, TSKEY gap, void** pVal,
int32_t* pVLen);
int32_t (*streamStateSessionPut)(SStreamState* pState, const SSessionKey* key, const void* value, int32_t vLen);
int32_t (*streamStateSessionGet)(SStreamState* pState, SSessionKey* key, void** pVal, int32_t* pVLen);
int32_t (*streamStateSessionDel)(SStreamState* pState, const SSessionKey* key);
int32_t (*streamStateSessionClear)(SStreamState* pState);
int32_t (*streamStateSessionGetKVByCur)(SStreamStateCur* pCur, SSessionKey* pKey, void** pVal, int32_t* pVLen);
int32_t (*streamStateStateAddIfNotExist)(SStreamState* pState, SSessionKey* key, char* pKeyData, int32_t keyDataLen,
state_key_cmpr_fn fn, void** pVal, int32_t* pVLen);
int32_t (*streamStateSessionGetKeyByRange)(SStreamState* pState, const SSessionKey* range, SSessionKey* curKey);
SUpdateInfo* (*updateInfoInit)(int64_t interval, int32_t precision, int64_t watermark);
TSKEY (*updateInfoFillBlockData)(SUpdateInfo* pInfo, SSDataBlock* pBlock, int32_t primaryTsCol);
bool (*updateInfoIsUpdated)(SUpdateInfo* pInfo, uint64_t tableId, TSKEY ts);
bool (*updateInfoIsTableInserted)(SUpdateInfo* pInfo, int64_t tbUid);
void (*updateInfoDestroy)(SUpdateInfo* pInfo);
SUpdateInfo* (*updateInfoInitP)(SInterval* pInterval, int64_t watermark);
void (*updateInfoAddCloseWindowSBF)(SUpdateInfo* pInfo);
void (*updateInfoDestoryColseWinSBF)(SUpdateInfo* pInfo);
int32_t (*updateInfoSerialize)(void* buf, int32_t bufLen, const SUpdateInfo* pInfo);
int32_t (*updateInfoDeserialize)(void* buf, int32_t bufLen, SUpdateInfo* pInfo);
SStreamStateCur* (*streamStateSessionSeekKeyNext)(SStreamState* pState, const SSessionKey* key);
SStreamStateCur* (*streamStateSessionSeekKeyCurrentPrev)(SStreamState* pState, const SSessionKey* key);
SStreamStateCur* (*streamStateSessionSeekKeyCurrentNext)(SStreamState* pState, const SSessionKey* key);
struct SStreamFileState* (*streamFileStateInit)(int64_t memSize, uint32_t keySize, uint32_t rowSize,
uint32_t selectRowSize, GetTsFun fp, void* pFile, TSKEY delMark);
void (*streamFileStateDestroy)(struct SStreamFileState* pFileState);
void (*streamFileStateClear)(struct SStreamFileState* pFileState);
bool (*needClearDiskBuff)(struct SStreamFileState* pFileState);
SStreamState* (*streamStateOpen)(char* path, void* pTask, bool specPath, int32_t szPage, int32_t pages);
void (*streamStateClose)(SStreamState* pState, bool remove);
int32_t (*streamStateBegin)(SStreamState* pState);
int32_t (*streamStateCommit)(SStreamState* pState);
void (*streamStateDestroy)(SStreamState* pState, bool remove);
int32_t (*streamStateDeleteCheckPoint)(SStreamState* pState, TSKEY mark);
} SStateStore;
typedef struct SStorageAPI {
SStoreMeta metaFn; // todo: refactor
TsdReader tsdReader;
SStoreMetaReader metaReaderFn;
SStoreCacheReader cacheFn;
SStoreSnapshotFn snapshotFn;
SStoreTqReader tqReaderFn;
SStateStore stateStore;
SMetaDataFilterAPI metaFilter;
SFunctionStateStore functionStore;
} SStorageAPI;
#ifdef __cplusplus
}
#endif
#endif // TDENGINE_STORAGEAPI_H
...@@ -21,6 +21,7 @@ extern "C" { ...@@ -21,6 +21,7 @@ extern "C" {
#endif #endif
#include "tcommon.h" #include "tcommon.h"
#include "tsimplehash.h"
#include "tvariant.h" #include "tvariant.h"
struct SqlFunctionCtx; struct SqlFunctionCtx;
...@@ -76,7 +77,7 @@ enum { ...@@ -76,7 +77,7 @@ enum {
enum { enum {
MAIN_SCAN = 0x0u, MAIN_SCAN = 0x0u,
REVERSE_SCAN = 0x1u, // todo remove it REVERSE_SCAN = 0x1u, // todo remove it
PRE_SCAN = 0x2u, // pre-scan belongs to the main scan and occurs before main scan PRE_SCAN = 0x2u, // pre-scan belongs to the main scan and occurs before main scan
}; };
typedef struct SPoint1 { typedef struct SPoint1 {
...@@ -127,16 +128,58 @@ typedef struct SSerializeDataHandle { ...@@ -127,16 +128,58 @@ typedef struct SSerializeDataHandle {
void *pState; void *pState;
} SSerializeDataHandle; } SSerializeDataHandle;
// incremental state storage
typedef struct STdbState {
void *rocksdb;
void **pHandle;
void *writeOpts;
void *readOpts;
void **cfOpts;
void *dbOpt;
struct SStreamTask *pOwner;
void *param;
void *env;
SListNode *pComparNode;
void *pBackend;
char idstr[64];
void *compactFactory;
TdThreadRwlock rwLock;
void *db;
void *pStateDb;
void *pFuncStateDb;
void *pFillStateDb; // todo refactor
void *pSessionStateDb;
void *pParNameDb;
void *pParTagDb;
void *txn;
} STdbState;
typedef struct {
STdbState *pTdbState;
struct SStreamFileState *pFileState;
int32_t number;
SSHashObj *parNameMap;
int64_t checkPointId;
int32_t taskId;
int64_t streamId;
} SStreamState;
typedef struct SFunctionStateStore {
int32_t (*streamStateFuncPut)(SStreamState *pState, const SWinKey *key, const void *value, int32_t vLen);
int32_t (*streamStateFuncGet)(SStreamState *pState, const SWinKey *key, void **ppVal, int32_t *pVLen);
} SFunctionStateStore;
// sql function runtime context // sql function runtime context
typedef struct SqlFunctionCtx { typedef struct SqlFunctionCtx {
SInputColumnInfoData input; SInputColumnInfoData input;
SResultDataInfo resDataInfo; SResultDataInfo resDataInfo;
uint32_t order; // data block scanner order: asc|desc uint32_t order; // data block scanner order: asc|desc
uint8_t isPseudoFunc;// denote current function is pseudo function or not [added for perf reason] uint8_t isPseudoFunc; // denote current function is pseudo function or not [added for perf reason]
uint8_t isNotNullFunc;// not return null value. uint8_t isNotNullFunc; // not return null value.
uint8_t scanFlag; // record current running step, default: 0 uint8_t scanFlag; // record current running step, default: 0
int16_t functionId; // function id int16_t functionId; // function id
char *pOutput; // final result output buffer, point to sdata->data char *pOutput; // final result output buffer, point to sdata->data
// input parameter, e.g., top(k, 20), the number of results of top query is kept in param // input parameter, e.g., top(k, 20), the number of results of top query is kept in param
SFunctParam *param; SFunctParam *param;
// corresponding output buffer for timestamp of each result, e.g., diff/csum // corresponding output buffer for timestamp of each result, e.g., diff/csum
...@@ -155,6 +198,7 @@ typedef struct SqlFunctionCtx { ...@@ -155,6 +198,7 @@ typedef struct SqlFunctionCtx {
SSerializeDataHandle saveHandle; SSerializeDataHandle saveHandle;
int32_t exprIdx; int32_t exprIdx;
char *udfName; char *udfName;
SFunctionStateStore *pStore;
} SqlFunctionCtx; } SqlFunctionCtx;
typedef struct tExprNode { typedef struct tExprNode {
......
...@@ -212,13 +212,38 @@ typedef struct SIndexMetaArg { ...@@ -212,13 +212,38 @@ typedef struct SIndexMetaArg {
void* idx; void* idx;
void* ivtIdx; void* ivtIdx;
uint64_t suid; uint64_t suid;
int (*metaFilterFunc)(void* metaEx, void* param, SArray* result);
} SIndexMetaArg; } SIndexMetaArg;
/**
* the underlying storage module must implement this API to employ the index functions.
* @param pMeta
* @param param
* @param results
* @return
*/
typedef struct SMetaFltParam {
uint64_t suid;
int16_t cid;
int16_t type;
void *val;
bool reverse;
bool equal;
int (*filterFunc)(void *a, void *b, int16_t type);
} SMetaFltParam;
typedef struct SMetaDataFilterAPI {
int32_t (*metaFilterTableIds)(void *pVnode, SMetaFltParam *arg, SArray *pUids);
int32_t (*metaFilterCreateTime)(void *pVnode, SMetaFltParam *arg, SArray *pUids);
int32_t (*metaFilterTableName)(void *pVnode, SMetaFltParam *arg, SArray *pUids);
int32_t (*metaFilterTtl)(void *pVnode, SMetaFltParam *arg, SArray *pUids);
} SMetaDataFilterAPI;
typedef enum { SFLT_NOT_INDEX, SFLT_COARSE_INDEX, SFLT_ACCURATE_INDEX } SIdxFltStatus; typedef enum { SFLT_NOT_INDEX, SFLT_COARSE_INDEX, SFLT_ACCURATE_INDEX } SIdxFltStatus;
SIdxFltStatus idxGetFltStatus(SNode* pFilterNode); SIdxFltStatus idxGetFltStatus(SNode* pFilterNode, SMetaDataFilterAPI* pAPI);
int32_t doFilterTag(SNode* pFilterNode, SIndexMetaArg* metaArg, SArray* result, SIdxFltStatus* status); int32_t doFilterTag(SNode* pFilterNode, SIndexMetaArg* metaArg, SArray* result, SIdxFltStatus* status, SMetaDataFilterAPI* pAPI);
/* /*
* init index env * init index env
......
...@@ -364,6 +364,7 @@ typedef struct SCreateTopicStmt { ...@@ -364,6 +364,7 @@ typedef struct SCreateTopicStmt {
bool ignoreExists; bool ignoreExists;
bool withMeta; bool withMeta;
SNode* pQuery; SNode* pQuery;
SNode* pWhere;
} SCreateTopicStmt; } SCreateTopicStmt;
typedef struct SDropTopicStmt { typedef struct SDropTopicStmt {
......
...@@ -328,6 +328,8 @@ void nodesListInsertList(SNodeList* pTarget, SListCell* pPos, SNodeList* p ...@@ -328,6 +328,8 @@ void nodesListInsertList(SNodeList* pTarget, SListCell* pPos, SNodeList* p
SNode* nodesListGetNode(SNodeList* pList, int32_t index); SNode* nodesListGetNode(SNodeList* pList, int32_t index);
SListCell* nodesListGetCell(SNodeList* pList, int32_t index); SListCell* nodesListGetCell(SNodeList* pList, int32_t index);
void nodesDestroyList(SNodeList* pList); void nodesDestroyList(SNodeList* pList);
bool nodesListMatch(const SNodeList* pList, const SNodeList* pSubList);
// Only clear the linked list structure, without releasing the elements inside // Only clear the linked list structure, without releasing the elements inside
void nodesClearList(SNodeList* pList); void nodesClearList(SNodeList* pList);
...@@ -346,6 +348,7 @@ void nodesRewriteExprPostOrder(SNode** pNode, FNodeRewriter rewriter, void* pCon ...@@ -346,6 +348,7 @@ void nodesRewriteExprPostOrder(SNode** pNode, FNodeRewriter rewriter, void* pCon
void nodesRewriteExprsPostOrder(SNodeList* pList, FNodeRewriter rewriter, void* pContext); void nodesRewriteExprsPostOrder(SNodeList* pList, FNodeRewriter rewriter, void* pContext);
bool nodesEqualNode(const SNode* a, const SNode* b); bool nodesEqualNode(const SNode* a, const SNode* b);
bool nodesMatchNode(const SNode* pSub, const SNode* pNode);
SNode* nodesCloneNode(const SNode* pNode); SNode* nodesCloneNode(const SNode* pNode);
SNodeList* nodesCloneList(const SNodeList* pList); SNodeList* nodesCloneList(const SNodeList* pList);
......
...@@ -112,6 +112,7 @@ typedef struct SJoinLogicNode { ...@@ -112,6 +112,7 @@ typedef struct SJoinLogicNode {
SNode* pOnConditions; SNode* pOnConditions;
bool isSingleTableJoin; bool isSingleTableJoin;
EOrder inputTsOrder; EOrder inputTsOrder;
SNode* pColEqualOnConditions;
} SJoinLogicNode; } SJoinLogicNode;
typedef struct SAggLogicNode { typedef struct SAggLogicNode {
...@@ -406,6 +407,7 @@ typedef struct SSortMergeJoinPhysiNode { ...@@ -406,6 +407,7 @@ typedef struct SSortMergeJoinPhysiNode {
SNode* pOnConditions; SNode* pOnConditions;
SNodeList* pTargets; SNodeList* pTargets;
EOrder inputTsOrder; EOrder inputTsOrder;
SNode* pColEqualOnConditions;
} SSortMergeJoinPhysiNode; } SSortMergeJoinPhysiNode;
typedef struct SAggPhysiNode { typedef struct SAggPhysiNode {
...@@ -448,7 +450,7 @@ typedef struct SMergePhysiNode { ...@@ -448,7 +450,7 @@ typedef struct SMergePhysiNode {
bool ignoreGroupId; bool ignoreGroupId;
} SMergePhysiNode; } SMergePhysiNode;
typedef struct SWinodwPhysiNode { typedef struct SWindowPhysiNode {
SPhysiNode node; SPhysiNode node;
SNodeList* pExprs; // these are expression list of parameter expression of function SNodeList* pExprs; // these are expression list of parameter expression of function
SNodeList* pFuncs; SNodeList* pFuncs;
...@@ -461,10 +463,10 @@ typedef struct SWinodwPhysiNode { ...@@ -461,10 +463,10 @@ typedef struct SWinodwPhysiNode {
EOrder inputTsOrder; EOrder inputTsOrder;
EOrder outputTsOrder; EOrder outputTsOrder;
bool mergeDataBlock; bool mergeDataBlock;
} SWinodwPhysiNode; } SWindowPhysiNode;
typedef struct SIntervalPhysiNode { typedef struct SIntervalPhysiNode {
SWinodwPhysiNode window; SWindowPhysiNode window;
int64_t interval; int64_t interval;
int64_t offset; int64_t offset;
int64_t sliding; int64_t sliding;
...@@ -497,7 +499,7 @@ typedef struct SMultiTableIntervalPhysiNode { ...@@ -497,7 +499,7 @@ typedef struct SMultiTableIntervalPhysiNode {
} SMultiTableIntervalPhysiNode; } SMultiTableIntervalPhysiNode;
typedef struct SSessionWinodwPhysiNode { typedef struct SSessionWinodwPhysiNode {
SWinodwPhysiNode window; SWindowPhysiNode window;
int64_t gap; int64_t gap;
} SSessionWinodwPhysiNode; } SSessionWinodwPhysiNode;
...@@ -506,14 +508,14 @@ typedef SSessionWinodwPhysiNode SStreamSemiSessionWinodwPhysiNode; ...@@ -506,14 +508,14 @@ typedef SSessionWinodwPhysiNode SStreamSemiSessionWinodwPhysiNode;
typedef SSessionWinodwPhysiNode SStreamFinalSessionWinodwPhysiNode; typedef SSessionWinodwPhysiNode SStreamFinalSessionWinodwPhysiNode;
typedef struct SStateWinodwPhysiNode { typedef struct SStateWinodwPhysiNode {
SWinodwPhysiNode window; SWindowPhysiNode window;
SNode* pStateKey; SNode* pStateKey;
} SStateWinodwPhysiNode; } SStateWinodwPhysiNode;
typedef SStateWinodwPhysiNode SStreamStateWinodwPhysiNode; typedef SStateWinodwPhysiNode SStreamStateWinodwPhysiNode;
typedef struct SEventWinodwPhysiNode { typedef struct SEventWinodwPhysiNode {
SWinodwPhysiNode window; SWindowPhysiNode window;
SNode* pStartCond; SNode* pStartCond;
SNode* pEndCond; SNode* pEndCond;
} SEventWinodwPhysiNode; } SEventWinodwPhysiNode;
......
...@@ -241,6 +241,12 @@ typedef enum EFillMode { ...@@ -241,6 +241,12 @@ typedef enum EFillMode {
FILL_MODE_NEXT FILL_MODE_NEXT
} EFillMode; } EFillMode;
typedef enum ETimeLineMode {
TIME_LINE_NONE = 1,
TIME_LINE_MULTI,
TIME_LINE_GLOBAL,
} ETimeLineMode;
typedef struct SFillNode { typedef struct SFillNode {
ENodeType type; // QUERY_NODE_FILL ENodeType type; // QUERY_NODE_FILL
EFillMode mode; EFillMode mode;
...@@ -263,50 +269,50 @@ typedef struct SCaseWhenNode { ...@@ -263,50 +269,50 @@ typedef struct SCaseWhenNode {
} SCaseWhenNode; } SCaseWhenNode;
typedef struct SSelectStmt { typedef struct SSelectStmt {
ENodeType type; // QUERY_NODE_SELECT_STMT ENodeType type; // QUERY_NODE_SELECT_STMT
bool isDistinct; bool isDistinct;
SNodeList* pProjectionList; SNodeList* pProjectionList;
SNode* pFromTable; SNode* pFromTable;
SNode* pWhere; SNode* pWhere;
SNodeList* pPartitionByList; SNodeList* pPartitionByList;
SNodeList* pTags; // for create stream SNodeList* pTags; // for create stream
SNode* pSubtable; // for create stream SNode* pSubtable; // for create stream
SNode* pWindow; SNode* pWindow;
SNodeList* pGroupByList; // SGroupingSetNode SNodeList* pGroupByList; // SGroupingSetNode
SNode* pHaving; SNode* pHaving;
SNode* pRange; SNode* pRange;
SNode* pEvery; SNode* pEvery;
SNode* pFill; SNode* pFill;
SNodeList* pOrderByList; // SOrderByExprNode SNodeList* pOrderByList; // SOrderByExprNode
SLimitNode* pLimit; SLimitNode* pLimit;
SLimitNode* pSlimit; SLimitNode* pSlimit;
STimeWindow timeRange; STimeWindow timeRange;
char stmtName[TSDB_TABLE_NAME_LEN]; char stmtName[TSDB_TABLE_NAME_LEN];
uint8_t precision; uint8_t precision;
int32_t selectFuncNum; int32_t selectFuncNum;
int32_t returnRows; // EFuncReturnRows int32_t returnRows; // EFuncReturnRows
bool isEmptyResult; ETimeLineMode timeLineResMode;
bool isTimeLineResult; bool isEmptyResult;
bool isSubquery; bool isSubquery;
bool hasAggFuncs; bool hasAggFuncs;
bool hasRepeatScanFuncs; bool hasRepeatScanFuncs;
bool hasIndefiniteRowsFunc; bool hasIndefiniteRowsFunc;
bool hasMultiRowsFunc; bool hasMultiRowsFunc;
bool hasSelectFunc; bool hasSelectFunc;
bool hasSelectValFunc; bool hasSelectValFunc;
bool hasOtherVectorFunc; bool hasOtherVectorFunc;
bool hasUniqueFunc; bool hasUniqueFunc;
bool hasTailFunc; bool hasTailFunc;
bool hasInterpFunc; bool hasInterpFunc;
bool hasInterpPseudoColFunc; bool hasInterpPseudoColFunc;
bool hasLastRowFunc; bool hasLastRowFunc;
bool hasLastFunc; bool hasLastFunc;
bool hasTimeLineFunc; bool hasTimeLineFunc;
bool hasUdaf; bool hasUdaf;
bool hasStateKey; bool hasStateKey;
bool onlyHasKeepOrderFunc; bool onlyHasKeepOrderFunc;
bool groupSort; bool groupSort;
bool tagScan; bool tagScan;
} SSelectStmt; } SSelectStmt;
typedef enum ESetOperatorType { SET_OP_TYPE_UNION_ALL = 1, SET_OP_TYPE_UNION } ESetOperatorType; typedef enum ESetOperatorType { SET_OP_TYPE_UNION_ALL = 1, SET_OP_TYPE_UNION } ESetOperatorType;
...@@ -321,6 +327,7 @@ typedef struct SSetOperator { ...@@ -321,6 +327,7 @@ typedef struct SSetOperator {
SNode* pLimit; SNode* pLimit;
char stmtName[TSDB_TABLE_NAME_LEN]; char stmtName[TSDB_TABLE_NAME_LEN];
uint8_t precision; uint8_t precision;
ETimeLineMode timeLineResMode;
} SSetOperator; } SSetOperator;
typedef enum ESqlClause { typedef enum ESqlClause {
......
...@@ -51,6 +51,12 @@ typedef enum { ...@@ -51,6 +51,12 @@ typedef enum {
TARGET_TYPE_OTHER, TARGET_TYPE_OTHER,
} ETargetType; } ETargetType;
typedef enum {
TCOL_TYPE_COLUMN = 1,
TCOL_TYPE_TAG,
TCOL_TYPE_NONE,
} ETableColumnType;
#define QUERY_POLICY_VNODE 1 #define QUERY_POLICY_VNODE 1
#define QUERY_POLICY_HYBRID 2 #define QUERY_POLICY_HYBRID 2
#define QUERY_POLICY_QNODE 3 #define QUERY_POLICY_QNODE 3
...@@ -253,6 +259,7 @@ void destroyQueryExecRes(SExecResult* pRes); ...@@ -253,6 +259,7 @@ void destroyQueryExecRes(SExecResult* pRes);
int32_t dataConverToStr(char* str, int type, void* buf, int32_t bufSize, int32_t* len); int32_t dataConverToStr(char* str, int type, void* buf, int32_t bufSize, int32_t* len);
char* parseTagDatatoJson(void* p); char* parseTagDatatoJson(void* p);
int32_t cloneTableMeta(STableMeta* pSrc, STableMeta** pDst); int32_t cloneTableMeta(STableMeta* pSrc, STableMeta** pDst);
void getColumnTypeFromMeta(STableMeta* pMeta, char* pName, ETableColumnType* pType);
int32_t cloneDbVgInfo(SDBVgInfo* pSrc, SDBVgInfo** pDst); int32_t cloneDbVgInfo(SDBVgInfo* pSrc, SDBVgInfo** pDst);
int32_t cloneSVreateTbReq(SVCreateTbReq* pSrc, SVCreateTbReq** pDst); int32_t cloneSVreateTbReq(SVCreateTbReq* pSrc, SVCreateTbReq** pDst);
void freeVgInfo(SDBVgInfo* vgInfo); void freeVgInfo(SDBVgInfo* vgInfo);
......
...@@ -27,65 +27,63 @@ ...@@ -27,65 +27,63 @@
extern "C" { extern "C" {
#endif #endif
#include "storageapi.h"
// void* streamBackendInit(const char* path); // void* streamBackendInit(const char* path);
// void streamBackendCleanup(void* arg); // void streamBackendCleanup(void* arg);
// SListNode* streamBackendAddCompare(void* backend, void* arg); // SListNode* streamBackendAddCompare(void* backend, void* arg);
// void streamBackendDelCompare(void* backend, void* arg); // void streamBackendDelCompare(void* backend, void* arg);
typedef bool (*state_key_cmpr_fn)(void* pKey1, void* pKey2);
// <<<<<<< HEAD
typedef struct STdbState { // typedef struct STdbState {
rocksdb_t* rocksdb; // rocksdb_t* rocksdb;
rocksdb_column_family_handle_t** pHandle; // rocksdb_column_family_handle_t** pHandle;
rocksdb_writeoptions_t* writeOpts; // rocksdb_writeoptions_t* writeOpts;
rocksdb_readoptions_t* readOpts; // rocksdb_readoptions_t* readOpts;
rocksdb_options_t** cfOpts; // rocksdb_options_t** cfOpts;
rocksdb_options_t* dbOpt; // rocksdb_options_t* dbOpt;
struct SStreamTask* pOwner; // struct SStreamTask* pOwner;
void* param; // void* param;
void* env; // void* env;
SListNode* pComparNode; // SListNode* pComparNode;
void* pBackendHandle; // void* pBackend;
char idstr[64]; // char idstr[64];
void* compactFactory; // void* compactFactory;
// TdThreadRwlock rwLock;
TDB* db; // =======
TTB* pStateDb; // typedef struct STdbState {
TTB* pFuncStateDb; // rocksdb_t* rocksdb;
TTB* pFillStateDb; // todo refactor // rocksdb_column_family_handle_t** pHandle;
TTB* pSessionStateDb; // rocksdb_writeoptions_t* writeOpts;
TTB* pParNameDb; // rocksdb_readoptions_t* readOpts;
TTB* pParTagDb; // rocksdb_options_t** cfOpts;
TXN* txn; // rocksdb_options_t* dbOpt;
} STdbState; // struct SStreamTask* pOwner;
// void* param;
// incremental state storage // void* env;
typedef struct { // SListNode* pComparNode;
STdbState* pTdbState; // void* pBackendHandle;
SStreamFileState* pFileState; // char idstr[64];
int32_t number; // void* compactFactory;
SSHashObj* parNameMap; //
int64_t checkPointId; // TDB* db;
int32_t taskId; // TTB* pStateDb;
int64_t streamId; // TTB* pFuncStateDb;
} SStreamState; // TTB* pFillStateDb; // todo refactor
// TTB* pSessionStateDb;
SStreamState* streamStateOpen(char* path, struct SStreamTask* pTask, bool specPath, int32_t szPage, int32_t pages); // TTB* pParNameDb;
// TTB* pParTagDb;
// TXN* txn;
//} STdbState;
//>>>>>>> enh/dev3.0
SStreamState* streamStateOpen(char* path, void* pTask, bool specPath, int32_t szPage, int32_t pages);
void streamStateClose(SStreamState* pState, bool remove); void streamStateClose(SStreamState* pState, bool remove);
int32_t streamStateBegin(SStreamState* pState); int32_t streamStateBegin(SStreamState* pState);
int32_t streamStateCommit(SStreamState* pState); int32_t streamStateCommit(SStreamState* pState);
void streamStateDestroy(SStreamState* pState, bool remove); void streamStateDestroy(SStreamState* pState, bool remove);
int32_t streamStateDeleteCheckPoint(SStreamState* pState, TSKEY mark); int32_t streamStateDeleteCheckPoint(SStreamState* pState, TSKEY mark);
typedef struct {
rocksdb_iterator_t* iter;
rocksdb_snapshot_t* snapshot;
rocksdb_readoptions_t* readOpt;
rocksdb_t* db;
TBC* pCur;
int64_t number;
} SStreamStateCur;
int32_t streamStateFuncPut(SStreamState* pState, const SWinKey* key, const void* value, int32_t vLen); int32_t streamStateFuncPut(SStreamState* pState, const SWinKey* key, const void* value, int32_t vLen);
int32_t streamStateFuncGet(SStreamState* pState, const SWinKey* key, void** ppVal, int32_t* pVLen); int32_t streamStateFuncGet(SStreamState* pState, const SWinKey* key, void** ppVal, int32_t* pVLen);
...@@ -119,7 +117,7 @@ int32_t streamStateFillDel(SStreamState* pState, const SWinKey* key); ...@@ -119,7 +117,7 @@ int32_t streamStateFillDel(SStreamState* pState, const SWinKey* key);
int32_t streamStateAddIfNotExist(SStreamState* pState, const SWinKey* key, void** pVal, int32_t* pVLen); int32_t streamStateAddIfNotExist(SStreamState* pState, const SWinKey* key, void** pVal, int32_t* pVLen);
int32_t streamStateReleaseBuf(SStreamState* pState, const SWinKey* key, void* pVal); int32_t streamStateReleaseBuf(SStreamState* pState, const SWinKey* key, void* pVal);
void streamFreeVal(void* val); void streamStateFreeVal(void* val);
SStreamStateCur* streamStateGetAndCheckCur(SStreamState* pState, SWinKey* key); SStreamStateCur* streamStateGetAndCheckCur(SStreamState* pState, SWinKey* key);
SStreamStateCur* streamStateSeekKeyNext(SStreamState* pState, const SWinKey* key); SStreamStateCur* streamStateSeekKeyNext(SStreamState* pState, const SWinKey* key);
......
...@@ -78,11 +78,11 @@ enum { ...@@ -78,11 +78,11 @@ enum {
TASK_TRIGGER_STATUS__ACTIVE, TASK_TRIGGER_STATUS__ACTIVE,
}; };
enum { typedef enum {
TASK_LEVEL__SOURCE = 1, TASK_LEVEL__SOURCE = 1,
TASK_LEVEL__AGG, TASK_LEVEL__AGG,
TASK_LEVEL__SINK, TASK_LEVEL__SINK,
}; } ETASK_LEVEL;
enum { enum {
TASK_OUTPUT__FIXED_DISPATCH = 1, TASK_OUTPUT__FIXED_DISPATCH = 1,
...@@ -206,7 +206,7 @@ static FORCE_INLINE void streamQueueProcessFail(SStreamQueue* queue) { ...@@ -206,7 +206,7 @@ static FORCE_INLINE void streamQueueProcessFail(SStreamQueue* queue) {
void* streamQueueNextItem(SStreamQueue* queue); void* streamQueueNextItem(SStreamQueue* queue);
SStreamDataSubmit* streamDataSubmitNew(SPackedData* pData, int32_t type); SStreamDataSubmit* streamDataSubmitNew(SPackedData* pData, int32_t type);
void streamDataSubmitDestroy(SStreamDataSubmit* pDataSubmit); void streamDataSubmitDestroy(SStreamDataSubmit* pDataSubmit);
SStreamDataSubmit* streamSubmitBlockClone(SStreamDataSubmit* pSubmit); SStreamDataSubmit* streamSubmitBlockClone(SStreamDataSubmit* pSubmit);
...@@ -284,13 +284,13 @@ struct SStreamTask { ...@@ -284,13 +284,13 @@ struct SStreamTask {
int16_t dispatchMsgType; int16_t dispatchMsgType;
SStreamStatus status; SStreamStatus status;
int32_t selfChildId; int32_t selfChildId;
int32_t nodeId; int32_t nodeId; // vgroup id
SEpSet epSet; SEpSet epSet;
SCheckpointInfo chkInfo; SCheckpointInfo chkInfo;
STaskExec exec; STaskExec exec;
int8_t fillHistory; // fill history
// fill history int64_t ekey; // end ts key
int8_t fillHistory; int64_t endVer; // end version
// children info // children info
SArray* childEpInfo; // SArray<SStreamChildEpInfo*> SArray* childEpInfo; // SArray<SStreamChildEpInfo*>
...@@ -327,6 +327,7 @@ struct SStreamTask { ...@@ -327,6 +327,7 @@ struct SStreamTask {
int64_t checkpointingId; int64_t checkpointingId;
int32_t checkpointAlignCnt; int32_t checkpointAlignCnt;
struct SStreamMeta* pMeta; struct SStreamMeta* pMeta;
SSHashObj* pNameMap;
}; };
// meta // meta
...@@ -346,12 +347,14 @@ typedef struct SStreamMeta { ...@@ -346,12 +347,14 @@ typedef struct SStreamMeta {
void* streamBackend; void* streamBackend;
int32_t streamBackendId; int32_t streamBackendId;
int64_t streamBackendRid; int64_t streamBackendRid;
SHashObj* pTaskBackendUnique;
} SStreamMeta; } SStreamMeta;
int32_t tEncodeStreamEpInfo(SEncoder* pEncoder, const SStreamChildEpInfo* pInfo); int32_t tEncodeStreamEpInfo(SEncoder* pEncoder, const SStreamChildEpInfo* pInfo);
int32_t tDecodeStreamEpInfo(SDecoder* pDecoder, SStreamChildEpInfo* pInfo); int32_t tDecodeStreamEpInfo(SDecoder* pDecoder, SStreamChildEpInfo* pInfo);
SStreamTask* tNewStreamTask(int64_t streamId); SStreamTask* tNewStreamTask(int64_t streamId, int8_t taskLevel, int8_t fillHistory, int64_t triggerParam,
SArray* pTaskList);
int32_t tEncodeStreamTask(SEncoder* pEncoder, const SStreamTask* pTask); int32_t tEncodeStreamTask(SEncoder* pEncoder, const SStreamTask* pTask);
int32_t tDecodeStreamTask(SDecoder* pDecoder, SStreamTask* pTask); int32_t tDecodeStreamTask(SDecoder* pDecoder, SStreamTask* pTask);
void tFreeStreamTask(SStreamTask* pTask); void tFreeStreamTask(SStreamTask* pTask);
......
...@@ -21,23 +21,16 @@ ...@@ -21,23 +21,16 @@
#include "tarray.h" #include "tarray.h"
#include "tdef.h" #include "tdef.h"
#include "tlist.h" #include "tlist.h"
#include "storageapi.h"
#ifdef __cplusplus #ifdef __cplusplus
extern "C" { extern "C" {
#endif #endif
typedef struct SStreamFileState SStreamFileState; typedef struct SStreamFileState SStreamFileState;
typedef struct SRowBuffPos {
void* pRowBuff;
void* pKey;
bool beFlushed;
bool beUsed;
} SRowBuffPos;
typedef SList SStreamSnapshot; typedef SList SStreamSnapshot;
typedef TSKEY (*GetTsFun)(void*);
SStreamFileState* streamFileStateInit(int64_t memSize, uint32_t keySize, uint32_t rowSize, uint32_t selectRowSize, SStreamFileState* streamFileStateInit(int64_t memSize, uint32_t keySize, uint32_t rowSize, uint32_t selectRowSize,
GetTsFun fp, void* pFile, TSKEY delMark); GetTsFun fp, void* pFile, TSKEY delMark);
void streamFileStateDestroy(SStreamFileState* pFileState); void streamFileStateDestroy(SStreamFileState* pFileState);
......
...@@ -19,7 +19,7 @@ ...@@ -19,7 +19,7 @@
#include "tarray.h" #include "tarray.h"
#include "tcommon.h" #include "tcommon.h"
#include "tmsg.h" #include "tmsg.h"
#include "tscalablebf.h" #include "storageapi.h"
#ifdef __cplusplus #ifdef __cplusplus
extern "C" { extern "C" {
...@@ -30,18 +30,18 @@ typedef struct SUpdateKey { ...@@ -30,18 +30,18 @@ typedef struct SUpdateKey {
TSKEY ts; TSKEY ts;
} SUpdateKey; } SUpdateKey;
typedef struct SUpdateInfo { //typedef struct SUpdateInfo {
SArray *pTsBuckets; // SArray *pTsBuckets;
uint64_t numBuckets; // uint64_t numBuckets;
SArray *pTsSBFs; // SArray *pTsSBFs;
uint64_t numSBFs; // uint64_t numSBFs;
int64_t interval; // int64_t interval;
int64_t watermark; // int64_t watermark;
TSKEY minTS; // TSKEY minTS;
SScalableBf *pCloseWinSBF; // SScalableBf *pCloseWinSBF;
SHashObj *pMap; // SHashObj *pMap;
uint64_t maxDataVersion; // uint64_t maxDataVersion;
} SUpdateInfo; //} SUpdateInfo;
SUpdateInfo *updateInfoInitP(SInterval *pInterval, int64_t watermark); SUpdateInfo *updateInfoInitP(SInterval *pInterval, int64_t watermark);
SUpdateInfo *updateInfoInit(int64_t interval, int32_t precision, int64_t watermark); SUpdateInfo *updateInfoInit(int64_t interval, int32_t precision, int64_t watermark);
......
...@@ -149,6 +149,7 @@ struct SWalReader { ...@@ -149,6 +149,7 @@ struct SWalReader {
TdFilePtr pIdxFile; TdFilePtr pIdxFile;
int64_t curFileFirstVer; int64_t curFileFirstVer;
int64_t curVersion; int64_t curVersion;
int64_t skipToVersion; // skip data and jump to destination version, usually used by stream resume ignoring untreated data
int64_t capacity; int64_t capacity;
TdThreadMutex mutex; TdThreadMutex mutex;
SWalFilterCond cond; SWalFilterCond cond;
...@@ -200,6 +201,8 @@ int32_t walReaderSeekVer(SWalReader *pRead, int64_t ver); ...@@ -200,6 +201,8 @@ int32_t walReaderSeekVer(SWalReader *pRead, int64_t ver);
int32_t walNextValidMsg(SWalReader *pRead); int32_t walNextValidMsg(SWalReader *pRead);
int64_t walReaderGetCurrentVer(const SWalReader *pReader); int64_t walReaderGetCurrentVer(const SWalReader *pReader);
int64_t walReaderGetValidFirstVer(const SWalReader *pReader); int64_t walReaderGetValidFirstVer(const SWalReader *pReader);
int64_t walReaderGetSkipToVersion(SWalReader *pReader);
void walReaderSetSkipToVersion(SWalReader *pReader, int64_t ver);
void walReaderValidVersionRange(SWalReader *pReader, int64_t *sver, int64_t *ever); void walReaderValidVersionRange(SWalReader *pReader, int64_t *sver, int64_t *ever);
void walReaderVerifyOffset(SWalReader *pWalReader, STqOffsetVal* pOffset); void walReaderVerifyOffset(SWalReader *pWalReader, STqOffsetVal* pOffset);
...@@ -209,7 +212,8 @@ int32_t walFetchHead(SWalReader *pRead, int64_t ver, SWalCkHead *pHead); ...@@ -209,7 +212,8 @@ int32_t walFetchHead(SWalReader *pRead, int64_t ver, SWalCkHead *pHead);
int32_t walFetchBody(SWalReader *pRead, SWalCkHead **ppHead); int32_t walFetchBody(SWalReader *pRead, SWalCkHead **ppHead);
int32_t walSkipFetchBody(SWalReader *pRead, const SWalCkHead *pHead); int32_t walSkipFetchBody(SWalReader *pRead, const SWalCkHead *pHead);
SWalRef *walRefFirstVer(SWal *, SWalRef *); void walRefFirstVer(SWal *, SWalRef *);
void walRefLastVer(SWal *, SWalRef *);
SWalRef *walRefCommittedVer(SWal *); SWalRef *walRefCommittedVer(SWal *);
SWalRef *walOpenRef(SWal *); SWalRef *walOpenRef(SWal *);
......
...@@ -409,6 +409,7 @@ int32_t* taosGetErrno(); ...@@ -409,6 +409,7 @@ int32_t* taosGetErrno();
#define TSDB_CODE_MNODE_ALREADY_IS_VOTER TAOS_DEF_ERROR_CODE(0, 0x0413) // internal #define TSDB_CODE_MNODE_ALREADY_IS_VOTER TAOS_DEF_ERROR_CODE(0, 0x0413) // internal
#define TSDB_CODE_MNODE_ONLY_TWO_MNODE TAOS_DEF_ERROR_CODE(0, 0x0414) // internal #define TSDB_CODE_MNODE_ONLY_TWO_MNODE TAOS_DEF_ERROR_CODE(0, 0x0414) // internal
#define TSDB_CODE_MNODE_NO_NEED_RESTORE TAOS_DEF_ERROR_CODE(0, 0x0415) // internal #define TSDB_CODE_MNODE_NO_NEED_RESTORE TAOS_DEF_ERROR_CODE(0, 0x0415) // internal
#define TSDB_CODE_DNODE_ONLY_USE_WHEN_OFFLINE TAOS_DEF_ERROR_CODE(0, 0x0416)
// vnode // vnode
// #define TSDB_CODE_VND_ACTION_IN_PROGRESS TAOS_DEF_ERROR_CODE(0, 0x0500) // 2.x // #define TSDB_CODE_VND_ACTION_IN_PROGRESS TAOS_DEF_ERROR_CODE(0, 0x0500) // 2.x
......
...@@ -32,7 +32,7 @@ extern "C" { ...@@ -32,7 +32,7 @@ extern "C" {
#define TD_VER_MAX UINT64_MAX // TODO: use the real max version from query handle #define TD_VER_MAX UINT64_MAX // TODO: use the real max version from query handle
// Bytes for each type. // Bytes for each type.
extern const int32_t TYPE_BYTES[17]; extern const int32_t TYPE_BYTES[21];
// TODO: replace and remove code below // TODO: replace and remove code below
#define CHAR_BYTES sizeof(char) #define CHAR_BYTES sizeof(char)
......
...@@ -80,4 +80,5 @@ fi ...@@ -80,4 +80,5 @@ fi
# there can not libtaos.so*, otherwise ln -s error # there can not libtaos.so*, otherwise ln -s error
${csudo}rm -f ${install_main_dir}/driver/libtaos.* || : ${csudo}rm -f ${install_main_dir}/driver/libtaos.* || :
[ -f ${install_main_dir}/driver/librocksdb.* ] && ${csudo}rm -f ${install_main_dir}/driver/librocksdb.* || :
[ -f ${install_main_dir}/driver/libtaosws.so ] && ${csudo}rm -f ${install_main_dir}/driver/libtaosws.so || : [ -f ${install_main_dir}/driver/libtaosws.so ] && ${csudo}rm -f ${install_main_dir}/driver/libtaosws.so || :
...@@ -40,6 +40,7 @@ else ...@@ -40,6 +40,7 @@ else
${csudo}rm -f ${inc_link_dir}/taosudf.h || : ${csudo}rm -f ${inc_link_dir}/taosudf.h || :
[ -f ${inc_link_dir}/taosws.h ] && ${csudo}rm -f ${inc_link_dir}/taosws.h || : [ -f ${inc_link_dir}/taosws.h ] && ${csudo}rm -f ${inc_link_dir}/taosws.h || :
${csudo}rm -f ${lib_link_dir}/libtaos.* || : ${csudo}rm -f ${lib_link_dir}/libtaos.* || :
[ -f ${lib_link_dir}/librocksdb.* ] && ${csudo}rm -f ${lib_link_dir}/librocksdb.* || :
[ -f ${lib_link_dir}/libtaosws.so ] && ${csudo}rm -f ${lib_link_dir}/libtaosws.so || : [ -f ${lib_link_dir}/libtaosws.so ] && ${csudo}rm -f ${lib_link_dir}/libtaosws.so || :
${csudo}rm -f ${log_link_dir} || : ${csudo}rm -f ${log_link_dir} || :
......
...@@ -31,6 +31,7 @@ cd ${pkg_dir} ...@@ -31,6 +31,7 @@ cd ${pkg_dir}
libfile="libtaos.so.${tdengine_ver}" libfile="libtaos.so.${tdengine_ver}"
wslibfile="libtaosws.so" wslibfile="libtaosws.so"
rocksdblib="librocksdb.so.8"
# create install dir # create install dir
install_home_path="/usr/local/taos" install_home_path="/usr/local/taos"
...@@ -94,6 +95,7 @@ fi ...@@ -94,6 +95,7 @@ fi
cp ${compile_dir}/build/bin/taos ${pkg_dir}${install_home_path}/bin cp ${compile_dir}/build/bin/taos ${pkg_dir}${install_home_path}/bin
cp ${compile_dir}/build/lib/${libfile} ${pkg_dir}${install_home_path}/driver cp ${compile_dir}/build/lib/${libfile} ${pkg_dir}${install_home_path}/driver
[ -f ${compile_dir}/build/lib/${rocksdblib} ] && cp ${compile_dir}/build/lib/${rocksdblib} ${pkg_dir}${install_home_path}/driver ||:
[ -f ${compile_dir}/build/lib/${wslibfile} ] && cp ${compile_dir}/build/lib/${wslibfile} ${pkg_dir}${install_home_path}/driver ||: [ -f ${compile_dir}/build/lib/${wslibfile} ] && cp ${compile_dir}/build/lib/${wslibfile} ${pkg_dir}${install_home_path}/driver ||:
cp ${compile_dir}/../include/client/taos.h ${pkg_dir}${install_home_path}/include cp ${compile_dir}/../include/client/taos.h ${pkg_dir}${install_home_path}/include
cp ${compile_dir}/../include/common/taosdef.h ${pkg_dir}${install_home_path}/include cp ${compile_dir}/../include/common/taosdef.h ${pkg_dir}${install_home_path}/include
......
...@@ -45,6 +45,7 @@ echo buildroot: %{buildroot} ...@@ -45,6 +45,7 @@ echo buildroot: %{buildroot}
libfile="libtaos.so.%{_version}" libfile="libtaos.so.%{_version}"
wslibfile="libtaosws.so" wslibfile="libtaosws.so"
rocksdblib="librocksdb.so.8"
# create install path, and cp file # create install path, and cp file
mkdir -p %{buildroot}%{homepath}/bin mkdir -p %{buildroot}%{homepath}/bin
...@@ -92,6 +93,7 @@ if [ -f %{_compiledir}/build/bin/taosadapter ]; then ...@@ -92,6 +93,7 @@ if [ -f %{_compiledir}/build/bin/taosadapter ]; then
fi fi
cp %{_compiledir}/build/lib/${libfile} %{buildroot}%{homepath}/driver cp %{_compiledir}/build/lib/${libfile} %{buildroot}%{homepath}/driver
[ -f %{_compiledir}/build/lib/${wslibfile} ] && cp %{_compiledir}/build/lib/${wslibfile} %{buildroot}%{homepath}/driver ||: [ -f %{_compiledir}/build/lib/${wslibfile} ] && cp %{_compiledir}/build/lib/${wslibfile} %{buildroot}%{homepath}/driver ||:
[ -f %{_compiledir}/build/lib/${rocksdblib} ] && cp %{_compiledir}/build/lib/${rocksdblib} %{buildroot}%{homepath}/driver ||:
cp %{_compiledir}/../include/client/taos.h %{buildroot}%{homepath}/include cp %{_compiledir}/../include/client/taos.h %{buildroot}%{homepath}/include
cp %{_compiledir}/../include/common/taosdef.h %{buildroot}%{homepath}/include cp %{_compiledir}/../include/common/taosdef.h %{buildroot}%{homepath}/include
cp %{_compiledir}/../include/util/taoserror.h %{buildroot}%{homepath}/include cp %{_compiledir}/../include/util/taoserror.h %{buildroot}%{homepath}/include
...@@ -174,6 +176,7 @@ fi ...@@ -174,6 +176,7 @@ fi
# there can not libtaos.so*, otherwise ln -s error # there can not libtaos.so*, otherwise ln -s error
${csudo}rm -f %{homepath}/driver/libtaos* || : ${csudo}rm -f %{homepath}/driver/libtaos* || :
${csudo}rm -f %{homepath}/driver/librocksdb* || :
#Scripts executed after installation #Scripts executed after installation
%post %post
...@@ -219,6 +222,7 @@ if [ $1 -eq 0 ];then ...@@ -219,6 +222,7 @@ if [ $1 -eq 0 ];then
${csudo}rm -f ${inc_link_dir}/taoserror.h || : ${csudo}rm -f ${inc_link_dir}/taoserror.h || :
${csudo}rm -f ${inc_link_dir}/taosudf.h || : ${csudo}rm -f ${inc_link_dir}/taosudf.h || :
${csudo}rm -f ${lib_link_dir}/libtaos.* || : ${csudo}rm -f ${lib_link_dir}/libtaos.* || :
${csudo}rm -f ${lib_link_dir}/librocksdb.* || :
${csudo}rm -f ${log_link_dir} || : ${csudo}rm -f ${log_link_dir} || :
${csudo}rm -f ${data_link_dir} || : ${csudo}rm -f ${data_link_dir} || :
......
...@@ -250,18 +250,30 @@ function install_lib() { ...@@ -250,18 +250,30 @@ function install_lib() {
# Remove links # Remove links
${csudo}rm -f ${lib_link_dir}/libtaos.* || : ${csudo}rm -f ${lib_link_dir}/libtaos.* || :
${csudo}rm -f ${lib64_link_dir}/libtaos.* || : ${csudo}rm -f ${lib64_link_dir}/libtaos.* || :
${csudo}rm -f ${lib_link_dir}/librocksdb.* || :
${csudo}rm -f ${lib64_link_dir}/librocksdb.* || :
#${csudo}rm -rf ${v15_java_app_dir} || : #${csudo}rm -rf ${v15_java_app_dir} || :
${csudo}cp -rf ${script_dir}/driver/* ${install_main_dir}/driver && ${csudo}chmod 777 ${install_main_dir}/driver/* ${csudo}cp -rf ${script_dir}/driver/* ${install_main_dir}/driver && ${csudo}chmod 777 ${install_main_dir}/driver/*
${csudo}ln -sf ${install_main_dir}/driver/libtaos.* ${lib_link_dir}/libtaos.so.1 ${csudo}ln -sf ${install_main_dir}/driver/libtaos.* ${lib_link_dir}/libtaos.so.1
${csudo}ln -sf ${lib_link_dir}/libtaos.so.1 ${lib_link_dir}/libtaos.so ${csudo}ln -sf ${lib_link_dir}/libtaos.so.1 ${lib_link_dir}/libtaos.so
${csudo}ln -sf ${install_main_dir}/driver/librocksdb.* ${lib_link_dir}/librocksdb.so.8
${csudo}ln -sf ${lib_link_dir}/librocksdb.so.8 ${lib_link_dir}/librocksdb.so
${csudo}ln -sf ${install_main_dir}/driver/librocksdb.* ${lib_link_dir}/librocksdb.so.8
${csudo}ln -sf ${lib_link_dir}/librocksdb.so.8 ${lib_link_dir}/librocksdb.so
[ -f ${install_main_dir}/driver/libtaosws.so ] && ${csudo}ln -sf ${install_main_dir}/driver/libtaosws.so ${lib_link_dir}/libtaosws.so || : [ -f ${install_main_dir}/driver/libtaosws.so ] && ${csudo}ln -sf ${install_main_dir}/driver/libtaosws.so ${lib_link_dir}/libtaosws.so || :
if [[ -d ${lib64_link_dir} && ! -e ${lib64_link_dir}/libtaos.so ]]; then if [[ -d ${lib64_link_dir} && ! -e ${lib64_link_dir}/libtaos.so ]]; then
${csudo}ln -sf ${install_main_dir}/driver/libtaos.* ${lib64_link_dir}/libtaos.so.1 || : ${csudo}ln -sf ${install_main_dir}/driver/libtaos.* ${lib64_link_dir}/libtaos.so.1 || :
${csudo}ln -sf ${lib64_link_dir}/libtaos.so.1 ${lib64_link_dir}/libtaos.so || : ${csudo}ln -sf ${lib64_link_dir}/libtaos.so.1 ${lib64_link_dir}/libtaos.so || :
${csudo}ln -sf ${install_main_dir}/driver/librocksdb.* ${lib64_link_dir}/librocksdb.so.8 || :
${csudo}ln -sf ${lib64_link_dir}/librocksdb.so.8 ${lib64_link_dir}/librocksdb.so || :
[ -f ${install_main_dir}/libtaosws.so ] && ${csudo}ln -sf ${install_main_dir}/libtaosws.so ${lib64_link_dir}/libtaosws.so || : [ -f ${install_main_dir}/libtaosws.so ] && ${csudo}ln -sf ${install_main_dir}/libtaosws.so ${lib64_link_dir}/libtaosws.so || :
fi fi
......
...@@ -70,8 +70,7 @@ if [ "$pagMode" == "lite" ]; then ...@@ -70,8 +70,7 @@ if [ "$pagMode" == "lite" ]; then
taostools_bin_files="" taostools_bin_files=""
else else
if [ "$verMode" == "cloud" ]; then if [ "$verMode" == "cloud" ]; then
taostools_bin_files=" ${build_dir}/bin/taosdump \ taostools_bin_files=" ${build_dir}/bin/taosBenchmark"
${build_dir}/bin/taosBenchmark"
else else
wget https://github.com/taosdata/grafanaplugin/releases/latest/download/TDinsight.sh -O ${build_dir}/bin/TDinsight.sh \ wget https://github.com/taosdata/grafanaplugin/releases/latest/download/TDinsight.sh -O ${build_dir}/bin/TDinsight.sh \
&& echo "TDinsight.sh downloaded!" \ && echo "TDinsight.sh downloaded!" \
...@@ -112,9 +111,11 @@ fi ...@@ -112,9 +111,11 @@ fi
if [ "$osType" == "Darwin" ]; then if [ "$osType" == "Darwin" ]; then
lib_files="${build_dir}/lib/libtaos.${version}.dylib" lib_files="${build_dir}/lib/libtaos.${version}.dylib"
wslib_files="${build_dir}/lib/libtaosws.dylib" wslib_files="${build_dir}/lib/libtaosws.dylib"
rocksdb_lib_files="${build_dir}/lib/librocksdb.dylib.8.1.1"
else else
lib_files="${build_dir}/lib/libtaos.so.${version}" lib_files="${build_dir}/lib/libtaos.so.${version}"
wslib_files="${build_dir}/lib/libtaosws.so" wslib_files="${build_dir}/lib/libtaosws.so"
rocksdb_lib_files="${build_dir}/lib/librocksdb.so.8.1.1"
fi fi
header_files="${code_dir}/include/client/taos.h ${code_dir}/include/common/taosdef.h ${code_dir}/include/util/taoserror.h ${code_dir}/include/libs/function/taosudf.h" header_files="${code_dir}/include/client/taos.h ${code_dir}/include/common/taosdef.h ${code_dir}/include/util/taoserror.h ${code_dir}/include/libs/function/taosudf.h"
...@@ -337,6 +338,7 @@ fi ...@@ -337,6 +338,7 @@ fi
# Copy driver # Copy driver
mkdir -p ${install_dir}/driver && cp ${lib_files} ${install_dir}/driver && echo "${versionComp}" >${install_dir}/driver/vercomp.txt mkdir -p ${install_dir}/driver && cp ${lib_files} ${install_dir}/driver && echo "${versionComp}" >${install_dir}/driver/vercomp.txt
[ -f ${wslib_files} ] && cp ${wslib_files} ${install_dir}/driver || : [ -f ${wslib_files} ] && cp ${wslib_files} ${install_dir}/driver || :
[ -f ${rocksdb_lib_files} ] && cp ${rocksdb_lib_files} ${install_dir}/driver || :
# Copy connector # Copy connector
if [ "$verMode" == "cluster" ]; then if [ "$verMode" == "cluster" ]; then
......
...@@ -202,10 +202,19 @@ function install_lib() { ...@@ -202,10 +202,19 @@ function install_lib() {
log_print "start install lib from ${lib_dir} to ${lib_link_dir}" log_print "start install lib from ${lib_dir} to ${lib_link_dir}"
${csudo}rm -f ${lib_link_dir}/libtaos* || : ${csudo}rm -f ${lib_link_dir}/libtaos* || :
${csudo}rm -f ${lib64_link_dir}/libtaos* || : ${csudo}rm -f ${lib64_link_dir}/libtaos* || :
#rocksdb
[ -f ${lib_link_dir}/librocksdb* ] && ${csudo}rm -f ${lib_link_dir}/librocksdb* || :
[ -f ${lib64_link_dir}/librocksdb* ] && ${csudo}rm -f ${lib64_link_dir}/librocksdb* || :
#rocksdb
[ -f ${lib_link_dir}/librocksdb* ] && ${csudo}rm -f ${lib_link_dir}/librocksdb* || :
[ -f ${lib64_link_dir}/librocksdb* ] && ${csudo}rm -f ${lib64_link_dir}/librocksdb* || :
[ -f ${lib_link_dir}/libtaosws.${lib_file_ext} ] && ${csudo}rm -f ${lib_link_dir}/libtaosws.${lib_file_ext} || : [ -f ${lib_link_dir}/libtaosws.${lib_file_ext} ] && ${csudo}rm -f ${lib_link_dir}/libtaosws.${lib_file_ext} || :
[ -f ${lib64_link_dir}/libtaosws.${lib_file_ext} ] && ${csudo}rm -f ${lib64_link_dir}/libtaosws.${lib_file_ext} || : [ -f ${lib64_link_dir}/libtaosws.${lib_file_ext} ] && ${csudo}rm -f ${lib64_link_dir}/libtaosws.${lib_file_ext} || :
${csudo}ln -s ${lib_dir}/librocksdb.* ${lib_link_dir}/librocksdb.${lib_file_ext_1} 2>>${install_log_path} || return 1
${csudo}ln -s ${lib_dir}/libtaos.* ${lib_link_dir}/libtaos.${lib_file_ext_1} 2>>${install_log_path} || return 1 ${csudo}ln -s ${lib_dir}/libtaos.* ${lib_link_dir}/libtaos.${lib_file_ext_1} 2>>${install_log_path} || return 1
${csudo}ln -s ${lib_link_dir}/libtaos.${lib_file_ext_1} ${lib_link_dir}/libtaos.${lib_file_ext} 2>>${install_log_path} || return 1 ${csudo}ln -s ${lib_link_dir}/libtaos.${lib_file_ext_1} ${lib_link_dir}/libtaos.${lib_file_ext} 2>>${install_log_path} || return 1
...@@ -214,6 +223,7 @@ function install_lib() { ...@@ -214,6 +223,7 @@ function install_lib() {
if [[ -d ${lib64_link_dir} && ! -e ${lib64_link_dir}/libtaos.${lib_file_ext} ]]; then if [[ -d ${lib64_link_dir} && ! -e ${lib64_link_dir}/libtaos.${lib_file_ext} ]]; then
${csudo}ln -s ${lib_dir}/libtaos.* ${lib64_link_dir}/libtaos.${lib_file_ext_1} 2>>${install_log_path} || return 1 ${csudo}ln -s ${lib_dir}/libtaos.* ${lib64_link_dir}/libtaos.${lib_file_ext_1} 2>>${install_log_path} || return 1
${csudo}ln -s ${lib64_link_dir}/libtaos.${lib_file_ext_1} ${lib64_link_dir}/libtaos.${lib_file_ext} 2>>${install_log_path} || return 1 ${csudo}ln -s ${lib64_link_dir}/libtaos.${lib_file_ext_1} ${lib64_link_dir}/libtaos.${lib_file_ext} 2>>${install_log_path} || return 1
${csudo}ln -s ${lib_dir}/librocksdb.* ${lib64_link_dir}/librocksdb.${lib_file_ext_1} 2>>${install_log_path} || return 1
[ -f ${lib_dir}/libtaosws.${lib_file_ext} ] && ${csudo}ln -sf ${lib_dir}/libtaosws.${lib_file_ext} ${lib64_link_dir}/libtaosws.${lib_file_ext} 2>>${install_log_path} [ -f ${lib_dir}/libtaosws.${lib_file_ext} ] && ${csudo}ln -sf ${lib_dir}/libtaosws.${lib_file_ext} ${lib64_link_dir}/libtaosws.${lib_file_ext} 2>>${install_log_path}
fi fi
......
...@@ -142,11 +142,14 @@ function clean_local_bin() { ...@@ -142,11 +142,14 @@ function clean_local_bin() {
function clean_lib() { function clean_lib() {
# Remove link # Remove link
${csudo}rm -f ${lib_link_dir}/libtaos.* || : ${csudo}rm -f ${lib_link_dir}/libtaos.* || :
${csudo}rm -f ${lib_link_dir}/librocksdb.* || :
[ -f ${lib_link_dir}/libtaosws.* ] && ${csudo}rm -f ${lib_link_dir}/libtaosws.* || : [ -f ${lib_link_dir}/libtaosws.* ] && ${csudo}rm -f ${lib_link_dir}/libtaosws.* || :
${csudo}rm -f ${lib64_link_dir}/libtaos.* || : ${csudo}rm -f ${lib64_link_dir}/libtaos.* || :
${csudo}rm -f ${lib64_link_dir}/librocksdb.* || :
[ -f ${lib64_link_dir}/libtaosws.* ] && ${csudo}rm -f ${lib64_link_dir}/libtaosws.* || : [ -f ${lib64_link_dir}/libtaosws.* ] && ${csudo}rm -f ${lib64_link_dir}/libtaosws.* || :
#${csudo}rm -rf ${v15_java_app_dir} || : #${csudo}rm -rf ${v15_java_app_dir} || :
} }
function clean_header() { function clean_header() {
......
此差异已折叠。
...@@ -1757,6 +1757,7 @@ static int32_t doConvertJson(SReqResultInfo* pResultInfo, int32_t numOfCols, int ...@@ -1757,6 +1757,7 @@ static int32_t doConvertJson(SReqResultInfo* pResultInfo, int32_t numOfCols, int
return TSDB_CODE_TSC_INTERNAL_ERROR; return TSDB_CODE_TSC_INTERNAL_ERROR;
} }
taosMemoryFreeClear(pResultInfo->convertJson);
pResultInfo->convertJson = taosMemoryCalloc(1, dataLen); pResultInfo->convertJson = taosMemoryCalloc(1, dataLen);
if (pResultInfo->convertJson == NULL) return TSDB_CODE_OUT_OF_MEMORY; if (pResultInfo->convertJson == NULL) return TSDB_CODE_OUT_OF_MEMORY;
char* p1 = pResultInfo->convertJson; char* p1 = pResultInfo->convertJson;
......
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册