提交 78c14a2c 编写于 作者: S Shengliang Guan

Merge remote-tracking branch 'origin/3.0' into merge/mainto3.0_1228

...@@ -173,7 +173,7 @@ def pre_test_build_mac() { ...@@ -173,7 +173,7 @@ def pre_test_build_mac() {
''' '''
sh ''' sh '''
cd ${WK}/debug cd ${WK}/debug
cmake .. -DBUILD_TEST=true -DBUILD_HTTPS=false cmake .. -DBUILD_TEST=true -DBUILD_HTTPS=false -DCMAKE_BUILD_TYPE=Release
make -j10 make -j10
ctest -j10 || exit 7 ctest -j10 || exit 7
''' '''
......
...@@ -141,12 +141,12 @@ ELSE () ...@@ -141,12 +141,12 @@ ELSE ()
SET(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -msse4.2") SET(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -msse4.2")
SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -msse4.2") SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -msse4.2")
ENDIF() ENDIF()
IF ("${SIMD_SUPPORT}" MATCHES "true")
IF (COMPILER_SUPPORT_FMA) IF (COMPILER_SUPPORT_FMA)
SET(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -mfma") SET(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -mfma")
SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -mfma") SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -mfma")
ENDIF() ENDIF()
IF ("${SIMD_SUPPORT}" MATCHES "true")
IF (COMPILER_SUPPORT_AVX) IF (COMPILER_SUPPORT_AVX)
SET(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -mavx") SET(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -mavx")
SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -mavx") SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -mavx")
......
...@@ -21,7 +21,7 @@ IF (TD_LINUX) ...@@ -21,7 +21,7 @@ IF (TD_LINUX)
ELSEIF (TD_WINDOWS) ELSEIF (TD_WINDOWS)
SET(TD_MAKE_INSTALL_SH "${TD_SOURCE_DIR}/packaging/tools/make_install.bat") SET(TD_MAKE_INSTALL_SH "${TD_SOURCE_DIR}/packaging/tools/make_install.bat")
INSTALL(CODE "MESSAGE(\"make install script: ${TD_MAKE_INSTALL_SH}\")") INSTALL(CODE "MESSAGE(\"make install script: ${TD_MAKE_INSTALL_SH}\")")
INSTALL(CODE "execute_process(COMMAND ${TD_MAKE_INSTALL_SH} :needAdmin ${TD_SOURCE_DIR} ${PROJECT_BINARY_DIR} Windows ${TD_VER_NUMBER})") INSTALL(CODE "execute_process(COMMAND ${TD_MAKE_INSTALL_SH} :needAdmin ${TD_SOURCE_DIR} ${PROJECT_BINARY_DIR} Windows ${TD_VER_NUMBER} ${TD_BUILD_TAOSA_INTERNAL})")
ELSEIF (TD_DARWIN) ELSEIF (TD_DARWIN)
SET(TD_MAKE_INSTALL_SH "${TD_SOURCE_DIR}/packaging/tools/make_install.sh") SET(TD_MAKE_INSTALL_SH "${TD_SOURCE_DIR}/packaging/tools/make_install.sh")
INSTALL(CODE "MESSAGE(\"make install script: ${TD_MAKE_INSTALL_SH}\")") INSTALL(CODE "MESSAGE(\"make install script: ${TD_MAKE_INSTALL_SH}\")")
......
...@@ -47,7 +47,6 @@ Please refer to [OpenTSDB HTTP API](http://opentsdb.net/docs/build/html/api_http ...@@ -47,7 +47,6 @@ Please refer to [OpenTSDB HTTP API](http://opentsdb.net/docs/build/html/api_http
:::note :::note
- In JSON protocol, strings will be converted to NCHAR type and numeric values will be converted to double type. - In JSON protocol, strings will be converted to NCHAR type and numeric values will be converted to double type.
- Only data in array format is accepted and so an array must be used even if there is only one row.
- The child table name is created automatically in a rule to guarantee its uniqueness. But you can configure `smlChildTableName` in taos.cfg to specify a tag value as the table names if the tag value is unique globally. For example, if a tag is called `tname` and you set `smlChildTableName=tname` in taos.cfg, when you insert `st,tname=cpu1,t1=4 c1=3 1626006833639000000`, the child table `cpu1` will be automatically created. Note that if multiple rows have the same tname but different tag_set values, the tag_set of the first row is used to create the table and the others are ignored. - The child table name is created automatically in a rule to guarantee its uniqueness. But you can configure `smlChildTableName` in taos.cfg to specify a tag value as the table names if the tag value is unique globally. For example, if a tag is called `tname` and you set `smlChildTableName=tname` in taos.cfg, when you insert `st,tname=cpu1,t1=4 c1=3 1626006833639000000`, the child table `cpu1` will be automatically created. Note that if multiple rows have the same tname but different tag_set values, the tag_set of the first row is used to create the table and the others are ignored.
::: :::
......
...@@ -27,10 +27,11 @@ database_option: { ...@@ -27,10 +27,11 @@ database_option: {
| PRECISION {'ms' | 'us' | 'ns'} | PRECISION {'ms' | 'us' | 'ns'}
| REPLICA value | REPLICA value
| RETENTIONS ingestion_duration:keep_duration ... | RETENTIONS ingestion_duration:keep_duration ...
| STRICT {'off' | 'on'}
| WAL_LEVEL {1 | 2} | WAL_LEVEL {1 | 2}
| VGROUPS value | VGROUPS value
| SINGLE_STABLE {0 | 1} | SINGLE_STABLE {0 | 1}
| TABLE_PREFIX value
| TABLE_SUFFIX value
| WAL_RETENTION_PERIOD value | WAL_RETENTION_PERIOD value
| WAL_ROLL_PERIOD value | WAL_ROLL_PERIOD value
| WAL_RETENTION_SIZE value | WAL_RETENTION_SIZE value
...@@ -61,9 +62,6 @@ database_option: { ...@@ -61,9 +62,6 @@ database_option: {
- PRECISION: specifies the precision at which a database records timestamps. Enter ms for milliseconds, us for microseconds, or ns for nanoseconds. The default value is ms. - PRECISION: specifies the precision at which a database records timestamps. Enter ms for milliseconds, us for microseconds, or ns for nanoseconds. The default value is ms.
- REPLICA: specifies the number of replicas that are made of the database. Enter 1 or 3. The default value is 1. The value of the REPLICA parameter cannot exceed the number of dnodes in the cluster. - REPLICA: specifies the number of replicas that are made of the database. Enter 1 or 3. The default value is 1. The value of the REPLICA parameter cannot exceed the number of dnodes in the cluster.
- RETENTIONS: specifies the retention period for data aggregated at various intervals. For example, RETENTIONS 15s:7d,1m:21d,15m:50d indicates that data aggregated every 15 seconds is retained for 7 days, data aggregated every 1 minute is retained for 21 days, and data aggregated every 15 minutes is retained for 50 days. You must enter three aggregation intervals and corresponding retention periods. - RETENTIONS: specifies the retention period for data aggregated at various intervals. For example, RETENTIONS 15s:7d,1m:21d,15m:50d indicates that data aggregated every 15 seconds is retained for 7 days, data aggregated every 1 minute is retained for 21 days, and data aggregated every 15 minutes is retained for 50 days. You must enter three aggregation intervals and corresponding retention periods.
- STRICT: specifies whether strong data consistency is enabled. The default value is off.
- on: Strong consistency is enabled and implemented through the Raft consensus algorithm. In this mode, an operation is considered successful once it is confirmed by half of the nodes in the cluster.
- off: Strong consistency is disabled. In this mode, an operation is considered successful when it is initiated by the local node.
- WAL_LEVEL: specifies whether fsync is enabled. The default value is 1. - WAL_LEVEL: specifies whether fsync is enabled. The default value is 1.
- 1: WAL is enabled but fsync is disabled. - 1: WAL is enabled but fsync is disabled.
- 2: WAL and fsync are both enabled. - 2: WAL and fsync are both enabled.
...@@ -71,6 +69,8 @@ database_option: { ...@@ -71,6 +69,8 @@ database_option: {
- SINGLE_STABLE: specifies whether the database can contain more than one supertable. - SINGLE_STABLE: specifies whether the database can contain more than one supertable.
- 0: The database can contain multiple supertables. - 0: The database can contain multiple supertables.
- 1: The database can contain only one supertable. - 1: The database can contain only one supertable.
- TABLE_PREFIX:The prefix length in the table name that is ignored when distributing table to vnode based on table name.
- TABLE_SUFFIX:The suffix length in the table name that is ignored when distributing table to vnode based on table name.
- WAL_RETENTION_PERIOD: specifies the time after which WAL files are deleted. This parameter is used for data subscription. Enter a time in seconds. The default value of single copy is 0. A value of 0 indicates that each WAL file is deleted immediately after its contents are written to disk. -1: WAL files are never deleted. The default value of multiple copy is 4 days. - WAL_RETENTION_PERIOD: specifies the time after which WAL files are deleted. This parameter is used for data subscription. Enter a time in seconds. The default value of single copy is 0. A value of 0 indicates that each WAL file is deleted immediately after its contents are written to disk. -1: WAL files are never deleted. The default value of multiple copy is 4 days.
- WAL_RETENTION_SIZE: specifies the size at which WAL files are deleted. This parameter is used for data subscription. Enter a size in KB. The default value of single copy is 0. A value of 0 indicates that each WAL file is deleted immediately after its contents are written to disk. -1: WAL files are never deleted. The default value of multiple copy is -1. - WAL_RETENTION_SIZE: specifies the size at which WAL files are deleted. This parameter is used for data subscription. Enter a size in KB. The default value of single copy is 0. A value of 0 indicates that each WAL file is deleted immediately after its contents are written to disk. -1: WAL files are never deleted. The default value of multiple copy is -1.
- WAL_ROLL_PERIOD: specifies the time after which WAL files are rotated. After this period elapses, a new WAL file is created. The default value of single copy is 0. A value of 0 indicates that a new WAL file is created only after the previous WAL file was written to disk. The default values of multiple copy is 1 day. - WAL_ROLL_PERIOD: specifies the time after which WAL files are rotated. After this period elapses, a new WAL file is created. The default value of single copy is 0. A value of 0 indicates that a new WAL file is created only after the previous WAL file was written to disk. The default values of multiple copy is 1 day.
......
...@@ -876,7 +876,8 @@ INTERP(expr) ...@@ -876,7 +876,8 @@ INTERP(expr)
- The number of rows in the result set of `INTERP` is determined by the parameter `EVERY`. Starting from timestamp1, one interpolation is performed for every time interval specified `EVERY` parameter. The parameter `EVERY` must be an integer, with no quotes, with a time unit of: b(nanosecond), u(microsecond), a(millisecond)), s(second), m(minute), h(hour), d(day), or w(week). For example, `EVERY(500a)` will interpolate every 500 milliseconds. - The number of rows in the result set of `INTERP` is determined by the parameter `EVERY`. Starting from timestamp1, one interpolation is performed for every time interval specified `EVERY` parameter. The parameter `EVERY` must be an integer, with no quotes, with a time unit of: b(nanosecond), u(microsecond), a(millisecond)), s(second), m(minute), h(hour), d(day), or w(week). For example, `EVERY(500a)` will interpolate every 500 milliseconds.
- Interpolation is performed based on `FILL` parameter. - Interpolation is performed based on `FILL` parameter.
- `INTERP` can only be used to interpolate in single timeline. So it must be used with `partition by tbname` when it's used on a STable. - `INTERP` can only be used to interpolate in single timeline. So it must be used with `partition by tbname` when it's used on a STable.
- Pseudo column `_irowts` can be used along with `INTERP` to return the timestamps associated with interpolation points(support after version 3.0.1.4). - Pseudocolumn `_irowts` can be used along with `INTERP` to return the timestamps associated with interpolation points(support after version 3.0.1.4).
- Pseudocolumn `_isfilled` can be used along with `INTERP` to indicate whether the results are original records or data points generated by interpolation algorithm(support after version 3.0.2.1).
### LAST ### LAST
......
...@@ -108,7 +108,7 @@ SHOW STREAMS; ...@@ -108,7 +108,7 @@ SHOW STREAMS;
When you create a stream, you can use the TRIGGER parameter to specify triggering conditions for it. When you create a stream, you can use the TRIGGER parameter to specify triggering conditions for it.
For non-windowed processing, triggering occurs in real time. For windowed processing, there are three methods of triggering: For non-windowed processing, triggering occurs in real time. For windowed processing, there are three methods of triggering,the default value is AT_ONCE:
1. AT_ONCE: triggers on write 1. AT_ONCE: triggers on write
......
...@@ -178,6 +178,77 @@ SHOW TABLE DISTRIBUTED table_name; ...@@ -178,6 +178,77 @@ SHOW TABLE DISTRIBUTED table_name;
Shows how table data is distributed. Shows how table data is distributed.
Examples: show table distributed d0\G; Display the block distribution of table `d0` in detailed format.
*************************** 1.row ***************************
_block_dist: Total_Blocks=[5] Total_Size=[93.65 Kb] Average_size=[18.73 Kb] Compression_Ratio=[23.98 %]
Total_Blocks : Table `d0` contains total 5 blocks
Total_Size: The total size of all the data blocks in table `d0` is 93.65 KB
Average_size: The average size of each block is 18.73 KB
Compression_Ratio: The data compression rate is 23.98%
*************************** 2.row ***************************
_block_dist: Total_Rows=[20000] Inmem_Rows=[0] MinRows=[3616] MaxRows=[4096] Average_Rows=[4000]
Total_Rows: Table `d0` contains 20,000 rows
Inmem_Rows: The rows still in memory, i.e. not committed in disk, is 0, i.e. none such rows
MinRows: The minimum number of rows in a block is 3,616
MaxRows: The maximum number of rows in a block is 4,096B
Average_Rows: The average number of rows in a block is 4,000
*************************** 3.row ***************************
_block_dist: Total_Tables=[1] Total_Files=[2]
Total_Tables: The number of child tables, 1 in this example
Total_Files: The number of files storing the table's data, 2 in this example
*************************** 4.row ***************************
_block_dist: --------------------------------------------------------------------------------
*************************** 5.row ***************************
_block_dist: 0100 |
*************************** 6.row ***************************
_block_dist: 0299 |
*************************** 7.row ***************************
_block_dist: 0498 |
*************************** 8.row ***************************
_block_dist: 0697 |
*************************** 9.row ***************************
_block_dist: 0896 |
*************************** 10.row ***************************
_block_dist: 1095 |
*************************** 11.row ***************************
_block_dist: 1294 |
*************************** 12.row ***************************
_block_dist: 1493 |
*************************** 13.row ***************************
_block_dist: 1692 |
*************************** 14.row ***************************
_block_dist: 1891 |
*************************** 15.row ***************************
_block_dist: 2090 |
*************************** 16.row ***************************
_block_dist: 2289 |
*************************** 17.row ***************************
_block_dist: 2488 |
*************************** 18.row ***************************
_block_dist: 2687 |
*************************** 19.row ***************************
_block_dist: 2886 |
*************************** 20.row ***************************
_block_dist: 3085 |
*************************** 21.row ***************************
_block_dist: 3284 |
*************************** 22.row ***************************
_block_dist: 3483 ||||||||||||||||| 1 (20.00%)
*************************** 23.row ***************************
_block_dist: 3682 |
*************************** 24.row ***************************
_block_dist: 3881 ||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||| 4 (80.00%)
Query OK, 24 row(s) in set (0.002444s)
The above show the block distribution percentage according to the number of rows in each block. In the above example, `_block_dist: 3483 ||||||||||||||||| 1 (20.00%)` means there is one block whose rows is between 3,483 and 3,681. `_block_dist: 3881 ||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||| 4 (80.00%)` means there are 4 blocks whose rows is between 3,881 and 4,096. The number of blocks whose rows fall in other range is zero.
## SHOW TAGS ## SHOW TAGS
```sql ```sql
......
...@@ -878,8 +878,10 @@ The source code of the sample application is under `TDengine/examples/JDBC`: ...@@ -878,8 +878,10 @@ The source code of the sample application is under `TDengine/examples/JDBC`:
| taos-jdbcdriver version | major changes | | taos-jdbcdriver version | major changes |
| :---------------------: | :--------------------------------------------: | | :---------------------: | :--------------------------------------------: |
| 3.0.3 | fix timestamp resolution error for REST connection in jdk17+ version |
| 3.0.1 - 3.0.2 | fix the resultSet data is parsed incorrectly sometimes. 3.0.1 is compiled on JDK 11, you are advised to use 3.0.2 in the JDK 8 environment | | 3.0.1 - 3.0.2 | fix the resultSet data is parsed incorrectly sometimes. 3.0.1 is compiled on JDK 11, you are advised to use 3.0.2 in the JDK 8 environment |
| 3.0.0 | Support for TDengine 3.0 | | 3.0.0 | Support for TDengine 3.0 |
| 2.0.42 | fix wasNull interface return value in WebSocket connection |
| 2.0.41 | fix decode method of username and password in REST connection | | 2.0.41 | fix decode method of username and password in REST connection |
| 2.0.39 - 2.0.40 | Add REST connection/request timeout parameters | | 2.0.39 - 2.0.40 | Add REST connection/request timeout parameters |
| 2.0.38 | JDBC REST connections add bulk pull function | | 2.0.38 | JDBC REST connections add bulk pull function |
......
...@@ -21,6 +21,7 @@ taosAdapter provides the following features. ...@@ -21,6 +21,7 @@ taosAdapter provides the following features.
- Seamless connection to collectd - Seamless connection to collectd
- Seamless connection to StatsD - Seamless connection to StatsD
- Supports Prometheus remote_read and remote_write - Supports Prometheus remote_read and remote_write
- Get table's VGroup ID
## taosAdapter architecture diagram ## taosAdapter architecture diagram
...@@ -59,6 +60,7 @@ Usage of taosAdapter: ...@@ -59,6 +60,7 @@ Usage of taosAdapter:
--collectd.port int collectd server port. Env "TAOS_ADAPTER_COLLECTD_PORT" (default 6045) --collectd.port int collectd server port. Env "TAOS_ADAPTER_COLLECTD_PORT" (default 6045)
--collectd.user string collectd user. Env "TAOS_ADAPTER_COLLECTD_USER" (default "root") --collectd.user string collectd user. Env "TAOS_ADAPTER_COLLECTD_USER" (default "root")
--collectd.worker int collectd write worker. Env "TAOS_ADAPTER_COLLECTD_WORKER" (default 10) --collectd.worker int collectd write worker. Env "TAOS_ADAPTER_COLLECTD_WORKER" (default 10)
--collectd.ttl int collectd data ttl. Env "TAOS_ADAPTER_COLLECTD_TTL" (default 0, means no ttl)
-c, --config string config path default /etc/taos/taosadapter.toml -c, --config string config path default /etc/taos/taosadapter.toml
--cors.allowAllOrigins cors allow all origins. Env "TAOS_ADAPTER_CORS_ALLOW_ALL_ORIGINS" (default true) --cors.allowAllOrigins cors allow all origins. Env "TAOS_ADAPTER_CORS_ALLOW_ALL_ORIGINS" (default true)
--cors.allowCredentials cors allow credentials. Env "TAOS_ADAPTER_CORS_ALLOW_Credentials" --cors.allowCredentials cors allow credentials. Env "TAOS_ADAPTER_CORS_ALLOW_Credentials"
...@@ -100,6 +102,7 @@ Usage of taosAdapter: ...@@ -100,6 +102,7 @@ Usage of taosAdapter:
--node_exporter.responseTimeout duration node_exporter response timeout. Env "TAOS_ADAPTER_NODE_EXPORTER_RESPONSE_TIMEOUT" (default 5s) --node_exporter.responseTimeout duration node_exporter response timeout. Env "TAOS_ADAPTER_NODE_EXPORTER_RESPONSE_TIMEOUT" (default 5s)
--node_exporter.urls strings node_exporter urls. Env "TAOS_ADAPTER_NODE_EXPORTER_URLS" (default [http://localhost:9100]) --node_exporter.urls strings node_exporter urls. Env "TAOS_ADAPTER_NODE_EXPORTER_URLS" (default [http://localhost:9100])
--node_exporter.user string node_exporter user. Env "TAOS_ADAPTER_NODE_EXPORTER_USER" (default "root") --node_exporter.user string node_exporter user. Env "TAOS_ADAPTER_NODE_EXPORTER_USER" (default "root")
--node_exporter.ttl int node_exporter data ttl. Env "TAOS_ADAPTER_NODE_EXPORTER_TTL"(default 0, means no ttl)
--opentsdb.enable enable opentsdb. Env "TAOS_ADAPTER_OPENTSDB_ENABLE" (default true) --opentsdb.enable enable opentsdb. Env "TAOS_ADAPTER_OPENTSDB_ENABLE" (default true)
--opentsdb_telnet.batchSize int opentsdb_telnet batch size. Env "TAOS_ADAPTER_OPENTSDB_TELNET_BATCH_SIZE" (default 1) --opentsdb_telnet.batchSize int opentsdb_telnet batch size. Env "TAOS_ADAPTER_OPENTSDB_TELNET_BATCH_SIZE" (default 1)
--opentsdb_telnet.dbs strings opentsdb_telnet db names. Env "TAOS_ADAPTER_OPENTSDB_TELNET_DBS" (default [opentsdb_telnet,collectd_tsdb,icinga2_tsdb,tcollector_tsdb]) --opentsdb_telnet.dbs strings opentsdb_telnet db names. Env "TAOS_ADAPTER_OPENTSDB_TELNET_DBS" (default [opentsdb_telnet,collectd_tsdb,icinga2_tsdb,tcollector_tsdb])
...@@ -110,6 +113,7 @@ Usage of taosAdapter: ...@@ -110,6 +113,7 @@ Usage of taosAdapter:
--opentsdb_telnet.ports ints opentsdb telnet tcp port. Env "TAOS_ADAPTER_OPENTSDB_TELNET_PORTS" (default [6046,6047,6048,6049]) --opentsdb_telnet.ports ints opentsdb telnet tcp port. Env "TAOS_ADAPTER_OPENTSDB_TELNET_PORTS" (default [6046,6047,6048,6049])
--opentsdb_telnet.tcpKeepAlive enable tcp keep alive. Env "TAOS_ADAPTER_OPENTSDB_TELNET_TCP_KEEP_ALIVE" --opentsdb_telnet.tcpKeepAlive enable tcp keep alive. Env "TAOS_ADAPTER_OPENTSDB_TELNET_TCP_KEEP_ALIVE"
--opentsdb_telnet.user string opentsdb_telnet user. Env "TAOS_ADAPTER_OPENTSDB_TELNET_USER" (default "root") --opentsdb_telnet.user string opentsdb_telnet user. Env "TAOS_ADAPTER_OPENTSDB_TELNET_USER" (default "root")
--opentsdb_telnet.ttl int opentsdb_telnet data ttl. Env "TAOS_ADAPTER_OPENTSDB_TELNET_TTL"(default 0, means no ttl)
--pool.idleTimeout duration Set idle connection timeout. Env "TAOS_ADAPTER_POOL_IDLE_TIMEOUT" (default 1h0m0s) --pool.idleTimeout duration Set idle connection timeout. Env "TAOS_ADAPTER_POOL_IDLE_TIMEOUT" (default 1h0m0s)
--pool.maxConnect int max connections to taosd. Env "TAOS_ADAPTER_POOL_MAX_CONNECT" (default 4000) --pool.maxConnect int max connections to taosd. Env "TAOS_ADAPTER_POOL_MAX_CONNECT" (default 4000)
--pool.maxIdle int max idle connections to taosd. Env "TAOS_ADAPTER_POOL_MAX_IDLE" (default 4000) --pool.maxIdle int max idle connections to taosd. Env "TAOS_ADAPTER_POOL_MAX_IDLE" (default 4000)
...@@ -131,6 +135,7 @@ Usage of taosAdapter: ...@@ -131,6 +135,7 @@ Usage of taosAdapter:
--statsd.tcpKeepAlive enable tcp keep alive. Env "TAOS_ADAPTER_STATSD_TCP_KEEP_ALIVE" --statsd.tcpKeepAlive enable tcp keep alive. Env "TAOS_ADAPTER_STATSD_TCP_KEEP_ALIVE"
--statsd.user string statsd user. Env "TAOS_ADAPTER_STATSD_USER" (default "root") --statsd.user string statsd user. Env "TAOS_ADAPTER_STATSD_USER" (default "root")
--statsd.worker int statsd write worker. Env "TAOS_ADAPTER_STATSD_WORKER" (default 10) --statsd.worker int statsd write worker. Env "TAOS_ADAPTER_STATSD_WORKER" (default 10)
--statsd.ttl int statsd data ttl. Env "TAOS_ADAPTER_STATSD_TTL" (default 0, means no ttl)
--taosConfigDir string load taos client config path. Env "TAOS_ADAPTER_TAOS_CONFIG_FILE" --taosConfigDir string load taos client config path. Env "TAOS_ADAPTER_TAOS_CONFIG_FILE"
--version Print the version and exit --version Print the version and exit
``` ```
...@@ -174,6 +179,7 @@ See [example/config/taosadapter.toml](https://github.com/taosdata/taosadapter/bl ...@@ -174,6 +179,7 @@ See [example/config/taosadapter.toml](https://github.com/taosdata/taosadapter/bl
node_export is an exporter for machine metrics. Please visit [https://github.com/prometheus/node_exporter](https://github.com/prometheus/node_exporter) for more information. node_export is an exporter for machine metrics. Please visit [https://github.com/prometheus/node_exporter](https://github.com/prometheus/node_exporter) for more information.
- Support for Prometheus remote_read and remote_write - Support for Prometheus remote_read and remote_write
remote_read and remote_write are interfaces for Prometheus data read and write from/to other data storage solution. Please visit [https://prometheus.io/blog/2019/10/10/remote-read-meets-streaming/#remote-apis](https://prometheus.io/blog/2019/10/10/remote-read-meets-streaming/#remote-apis) for more information. remote_read and remote_write are interfaces for Prometheus data read and write from/to other data storage solution. Please visit [https://prometheus.io/blog/2019/10/10/remote-read-meets-streaming/#remote-apis](https://prometheus.io/blog/2019/10/10/remote-read-meets-streaming/#remote-apis) for more information.
- Get table's VGroup ID. For more information about VGroup, please refer to [primary-logic-unit](/tdinternal/arch/#primary-logic-unit).
## Interfaces ## Interfaces
...@@ -195,6 +201,7 @@ Support InfluxDB query parameters as follows. ...@@ -195,6 +201,7 @@ Support InfluxDB query parameters as follows.
- `precision` The time precision used by TDengine - `precision` The time precision used by TDengine
- `u` TDengine user name - `u` TDengine user name
- `p` TDengine password - `p` TDengine password
- `ttl` The time to live of automatically created sub-table. This value cannot be updated. TDengine will use the ttl value of the first data of sub-table to create sub-table. For more information, please refer [Create Table](/taos-sql/table/#create-table)
Note: InfluxDB token authorization is not supported at present. Only Basic authorization and query parameter validation are supported. Note: InfluxDB token authorization is not supported at present. Only Basic authorization and query parameter validation are supported.
Example: curl --request POST http://127.0.0.1:6041/influxdb/v1/write?db=test --user "root:taosdata" --data-binary "measurement,host=host1 field1=2i,field2=2.0 1577836800000000000" Example: curl --request POST http://127.0.0.1:6041/influxdb/v1/write?db=test --user "root:taosdata" --data-binary "measurement,host=host1 field1=2i,field2=2.0 1577836800000000000"
...@@ -236,6 +243,10 @@ node_export is an exporter of hardware and OS metrics exposed by the \*NIX kerne ...@@ -236,6 +243,10 @@ node_export is an exporter of hardware and OS metrics exposed by the \*NIX kerne
<Prometheus /> <Prometheus />
### Get table's VGroup ID
You can call `http://<fqdn>:6041/rest/vgid?db=<db>&table=<table>` to get table's VGroup ID. For more information about VGroup, please refer to [primary-logic-unit](/tdinternal/arch/#primary-logic-unit).
## Memory usage optimization methods ## Memory usage optimization methods
taosAdapter will monitor its memory usage during operation and adjust it with two thresholds. Valid values are integers between 1 to 100, and represent a percentage of the system's physical memory. taosAdapter will monitor its memory usage during operation and adjust it with two thresholds. Valid values are integers between 1 to 100, and represent a percentage of the system's physical memory.
......
...@@ -204,6 +204,12 @@ taosBenchmark -A INT,DOUBLE,NCHAR,BINARY\(16\) ...@@ -204,6 +204,12 @@ taosBenchmark -A INT,DOUBLE,NCHAR,BINARY\(16\)
- **-a/--replica <replicaNum\>** : - **-a/--replica <replicaNum\>** :
Specify the number of replicas when creating the database. The default value is 1. Specify the number of replicas when creating the database. The default value is 1.
- **-k/--keep-trying <NUMBER\>** :
Keep trying if failed to insert, default is no. Available with v3.0.9+.
- **-z/--trying-interval <NUMBER\>** :
Specify interval between keep trying insert. Valid value is a postive number. Only valid when keep trying be enabled. Available with v3.0.9+.
- **-V/--version** : - **-V/--version** :
Show version information only. Users should not use it with other parameters. Show version information only. Users should not use it with other parameters.
...@@ -231,6 +237,10 @@ The parameters listed in this section apply to all function modes. ...@@ -231,6 +237,10 @@ The parameters listed in this section apply to all function modes.
`filetype` must be set to `insert` in the insertion scenario. See [General Configuration Parameters](#General Configuration Parameters) `filetype` must be set to `insert` in the insertion scenario. See [General Configuration Parameters](#General Configuration Parameters)
- ** keep_trying ** : Keep trying if failed to insert, default is no. Available with v3.0.9+.
- ** trying_interval ** : Specify interval between keep trying insert. Valid value is a postive number. Only valid when keep trying be enabled. Available with v3.0.9+.
#### Database related configuration parameters #### Database related configuration parameters
The parameters related to database creation are configured in `dbinfo` in the json configuration file, as follows. The other parameters correspond to the database parameters specified when `create database` in [../../taos-sql/database]. The parameters related to database creation are configured in `dbinfo` in the json configuration file, as follows. The other parameters correspond to the database parameters specified when `create database` in [../../taos-sql/database].
......
...@@ -19,7 +19,7 @@ Users should not use taosdump to back up raw data, environment settings, hardwar ...@@ -19,7 +19,7 @@ Users should not use taosdump to back up raw data, environment settings, hardwar
There are two ways to install taosdump: There are two ways to install taosdump:
- Install the taosTools official installer. Please find taosTools from [All download links](https://www.tdengine.com/all-downloads) page and download and install it. - Install the taosTools official installer. Please find taosTools from [Release History](https://docs.taosdata.com/releases/tools/) page and download and install it.
- Compile taos-tools separately and install it. Please refer to the [taos-tools](https://github.com/taosdata/taos-tools) repository for details. - Compile taos-tools separately and install it. Please refer to the [taos-tools](https://github.com/taosdata/taos-tools) repository for details.
......
...@@ -10,6 +10,18 @@ For TDengine 2.x installation packages by version, please visit [here](https://w ...@@ -10,6 +10,18 @@ For TDengine 2.x installation packages by version, please visit [here](https://w
import Release from "/components/ReleaseV3"; import Release from "/components/ReleaseV3";
## 3.0.2.2
<Release type="tdengine" version="3.0.2.2" />
## 3.0.2.1
<Release type="tdengine" version="3.0.2.1" />
## 3.0.2.0
<Release type="tdengine" version="3.0.2.0" />
## 3.0.1.8 ## 3.0.1.8
<Release type="tdengine" version="3.0.1.8" /> <Release type="tdengine" version="3.0.1.8" />
......
...@@ -10,6 +10,18 @@ For other historical version installers, please visit [here](https://www.taosdat ...@@ -10,6 +10,18 @@ For other historical version installers, please visit [here](https://www.taosdat
import Release from "/components/ReleaseV3"; import Release from "/components/ReleaseV3";
## 2.4.0
<Release type="tools" version="2.4.0" />
## 2.3.3
<Release type="tools" version="2.3.3" />
## 2.3.2
<Release type="tools" version="2.3.2" />
## 2.3.0 ## 2.3.0
<Release type="tools" version="2.3.0" /> <Release type="tools" version="2.3.0" />
......
...@@ -24,7 +24,7 @@ func main() { ...@@ -24,7 +24,7 @@ func main() {
if err != nil { if err != nil {
panic(err) panic(err)
} }
_, err = db.Exec("create topic if not exists example_tmq_topic with meta as DATABASE example_tmq") _, err = db.Exec("create topic if not exists example_tmq_topic as DATABASE example_tmq")
if err != nil { if err != nil {
panic(err) panic(err)
} }
...@@ -84,20 +84,6 @@ func main() { ...@@ -84,20 +84,6 @@ func main() {
if err != nil { if err != nil {
panic(err) panic(err)
} }
for {
result, err := consumer.Poll(time.Second)
if err != nil {
panic(err)
}
if result.Type != common.TMQ_RES_TABLE_META {
panic("want message type 2 got " + strconv.Itoa(int(result.Type)))
}
data, _ := json.Marshal(result.Meta)
fmt.Println(string(data))
consumer.Commit(context.Background(), result.Message)
consumer.FreeMessage(result.Message)
break
}
_, err = db.Exec("insert into example_tmq.t1 values(now,1)") _, err = db.Exec("insert into example_tmq.t1 values(now,1)")
if err != nil { if err != nil {
panic(err) panic(err)
......
...@@ -8,7 +8,7 @@ conn.execute("CREATE DATABASE test") ...@@ -8,7 +8,7 @@ conn.execute("CREATE DATABASE test")
# change database. same as execute "USE db" # change database. same as execute "USE db"
conn.select_db("test") conn.select_db("test")
conn.execute("CREATE STABLE weather(ts TIMESTAMP, temperature FLOAT) TAGS (location INT)") conn.execute("CREATE STABLE weather(ts TIMESTAMP, temperature FLOAT) TAGS (location INT)")
affected_row: int = conn.execute("INSERT INTO t1 USING weather TAGS(1) VALUES (now, 23.5) (now+1m, 23.5) (now+2m 24.4)") affected_row: int = conn.execute("INSERT INTO t1 USING weather TAGS(1) VALUES (now, 23.5) (now+1m, 23.5) (now+2m, 24.4)")
print("affected_row", affected_row) print("affected_row", affected_row)
# output: # output:
# affected_row 3 # affected_row 3
......
...@@ -47,7 +47,6 @@ OpenTSDB JSON 格式协议采用一个 JSON 字符串表示一行或多行数据 ...@@ -47,7 +47,6 @@ OpenTSDB JSON 格式协议采用一个 JSON 字符串表示一行或多行数据
:::note :::note
- 对于 JSON 格式协议,TDengine 并不会自动把所有标签转成 NCHAR 类型, 字符串将将转为 NCHAR 类型, 数值将同样转换为 DOUBLE 类型。 - 对于 JSON 格式协议,TDengine 并不会自动把所有标签转成 NCHAR 类型, 字符串将将转为 NCHAR 类型, 数值将同样转换为 DOUBLE 类型。
- TDengine 只接收 JSON **数组格式**的字符串,即使一行数据也需要转换成数组形式。
- 默认生成的子表名是根据规则生成的唯一 ID 值。用户也可以通过在 taos.cfg 里配置 smlChildTableName 参数来指定某个标签值作为子表名。该标签值应该具有全局唯一性。举例如下:假设有个标签名为tname, 配置 smlChildTableName=tname, 插入数据为 `"tags": { "host": "web02","dc": "lga","tname":"cpu1"}` 则创建的子表名为 cpu1。注意如果多行数据 tname 相同,但是后面的 tag_set 不同,则使用第一行自动建表时指定的 tag_set,其他的行会忽略)。 - 默认生成的子表名是根据规则生成的唯一 ID 值。用户也可以通过在 taos.cfg 里配置 smlChildTableName 参数来指定某个标签值作为子表名。该标签值应该具有全局唯一性。举例如下:假设有个标签名为tname, 配置 smlChildTableName=tname, 插入数据为 `"tags": { "host": "web02","dc": "lga","tname":"cpu1"}` 则创建的子表名为 cpu1。注意如果多行数据 tname 相同,但是后面的 tag_set 不同,则使用第一行自动建表时指定的 tag_set,其他的行会忽略)。
::: :::
......
...@@ -19,6 +19,7 @@ TDengine 提供了兼容 InfluxDB (v1) 和 OpenTSDB 行协议的 Schemaless API ...@@ -19,6 +19,7 @@ TDengine 提供了兼容 InfluxDB (v1) 和 OpenTSDB 行协议的 Schemaless API
- `precision` TDengine 使用的时间精度 - `precision` TDengine 使用的时间精度
- `u` TDengine 用户名 - `u` TDengine 用户名
- `p` TDengine 密码 - `p` TDengine 密码
- `ttl` 自动创建的子表生命周期,以子表的第一条数据的 TTL 参数为准,不可更新。更多信息请参考[创建表文档](taos-sql/table/#创建表)的 TTL 参数
注意: 目前不支持 InfluxDB 的 token 验证方式,仅支持 Basic 验证和查询参数验证。 注意: 目前不支持 InfluxDB 的 token 验证方式,仅支持 Basic 验证和查询参数验证。
......
...@@ -68,39 +68,38 @@ TDengine 目前支持时间戳、数字、字符、布尔类型,与 Java 对 ...@@ -68,39 +68,38 @@ TDengine 目前支持时间戳、数字、字符、布尔类型,与 Java 对
### 安装连接器 ### 安装连接器
<Tabs defaultValue="maven"> <Tabs defaultValue="maven">
<TabItem value="maven" label="使用 Maven 安装"> <TabItem value="maven" label="使用 Maven 安装">
目前 taos-jdbcdriver 已经发布到 [Sonatype Repository](https://search.maven.org/artifact/com.taosdata.jdbc/taos-jdbcdriver) 目前 taos-jdbcdriver 已经发布到 [Sonatype Repository](https://search.maven.org/artifact/com.taosdata.jdbc/taos-jdbcdriver) 仓库,且各大仓库都已同步。
仓库,且各大仓库都已同步。
- [sonatype](https://search.maven.org/artifact/com.taosdata.jdbc/taos-jdbcdriver) - [sonatype](https://search.maven.org/artifact/com.taosdata.jdbc/taos-jdbcdriver)
- [mvnrepository](https://mvnrepository.com/artifact/com.taosdata.jdbc/taos-jdbcdriver) - [mvnrepository](https://mvnrepository.com/artifact/com.taosdata.jdbc/taos-jdbcdriver)
- [maven.aliyun](https://maven.aliyun.com/mvn/search) - [maven.aliyun](https://maven.aliyun.com/mvn/search)
Maven 项目中,在 pom.xml 中添加以下依赖: Maven 项目中,在 pom.xml 中添加以下依赖:
```xml-dtd ```xml-dtd
<dependency> <dependency>
<groupId>com.taosdata.jdbc</groupId> <groupId>com.taosdata.jdbc</groupId>
<artifactId>taos-jdbcdriver</artifactId> <artifactId>taos-jdbcdriver</artifactId>
<version>3.0.0</version> <version>3.0.0</version>
</dependency> </dependency>
``` ```
</TabItem> </TabItem>
<TabItem value="source" label="使用源码编译安装"> <TabItem value="source" label="使用源码编译安装">
可以通过下载 TDengine 的源码,自己编译最新版本的 Java connector 可以通过下载 TDengine 的源码,自己编译最新版本的 Java connector
```shell ```shell
git clone https://github.com/taosdata/taos-connector-jdbc.git git clone https://github.com/taosdata/taos-connector-jdbc.git
cd taos-connector-jdbc cd taos-connector-jdbc
mvn clean install -Dmaven.test.skip=true mvn clean install -Dmaven.test.skip=true
``` ```
编译后,在 target 目录下会产生 taos-jdbcdriver-3.0.*-dist.jar 的 jar 包,并自动将编译的 jar 文件放在本地的 Maven 仓库中。 编译后,在 target 目录下会产生 taos-jdbcdriver-3.0.*-dist.jar 的 jar 包,并自动将编译的 jar 文件放在本地的 Maven 仓库中。
</TabItem> </TabItem>
</Tabs> </Tabs>
## 建立连接 ## 建立连接
...@@ -111,41 +110,40 @@ TDengine 的 JDBC URL 规范格式为: ...@@ -111,41 +110,40 @@ TDengine 的 JDBC URL 规范格式为:
对于建立连接,原生连接与 REST 连接有细微不同。 对于建立连接,原生连接与 REST 连接有细微不同。
<Tabs defaultValue="rest"> <Tabs defaultValue="rest">
<TabItem value="native" label="原生连接"> <TabItem value="native" label="原生连接">
```java ```java
Class.forName("com.taosdata.jdbc.TSDBDriver"); Class.forName("com.taosdata.jdbc.TSDBDriver");
String jdbcUrl = "jdbc:TAOS://taosdemo.com:6030/test?user=root&password=taosdata"; String jdbcUrl = "jdbc:TAOS://taosdemo.com:6030/test?user=root&password=taosdata";
Connection conn = DriverManager.getConnection(jdbcUrl); Connection conn = DriverManager.getConnection(jdbcUrl);
``` ```
以上示例,使用了 JDBC 原生连接的 TSDBDriver,建立了到 hostname 为 taosdemo.com,端口为 6030(TDengine 的默认端口),数据库名为 test 的连接。这个 URL 以上示例,使用了 JDBC 原生连接的 TSDBDriver,建立了到 hostname 为 taosdemo.com,端口为 6030(TDengine 的默认端口),数据库名为 test 的连接。这个 URL
中指定用户名(user)为 root,密码(password)为 taosdata。 中指定用户名(user)为 root,密码(password)为 taosdata。
**注意**:使用 JDBC 原生连接,taos-jdbcdriver 需要依赖客户端驱动(Linux 下是 libtaos.so;Windows 下是 taos.dll;macOS 下是 libtaos.dylib)。 **注意**:使用 JDBC 原生连接,taos-jdbcdriver 需要依赖客户端驱动(Linux 下是 libtaos.so;Windows 下是 taos.dll;macOS 下是 libtaos.dylib)。
url 中的配置参数如下: url 中的配置参数如下:
- user:登录 TDengine 用户名,默认值 'root'。 - user:登录 TDengine 用户名,默认值 'root'。
- password:用户登录密码,默认值 'taosdata'。 - password:用户登录密码,默认值 'taosdata'。
- cfgdir:客户端配置文件目录路径,Linux OS 上默认值 `/etc/taos`,Windows OS 上默认值 `C:/TDengine/cfg`。 - cfgdir:客户端配置文件目录路径,Linux OS 上默认值 `/etc/taos`,Windows OS 上默认值 `C:/TDengine/cfg`。
- charset:客户端使用的字符集,默认值为系统字符集。 - charset:客户端使用的字符集,默认值为系统字符集。
- locale:客户端语言环境,默认值系统当前 locale。 - locale:客户端语言环境,默认值系统当前 locale。
- timezone:客户端使用的时区,默认值为系统当前时区。 - timezone:客户端使用的时区,默认值为系统当前时区。
- batchfetch: true:在执行查询时批量拉取结果集;false:逐行拉取结果集。默认值为:true。开启批量拉取同时获取一批数据在查询数据量较大时批量拉取可以有效的提升查询性能。 - batchfetch: true:在执行查询时批量拉取结果集;false:逐行拉取结果集。默认值为:true。开启批量拉取同时获取一批数据在查询数据量较大时批量拉取可以有效的提升查询性能。
- batchErrorIgnore:true:在执行 Statement 的 executeBatch 时,如果中间有一条 SQL 执行失败将继续执行下面的 SQL。false:不再执行失败 SQL - batchErrorIgnore:true:在执行 Statement 的 executeBatch 时,如果中间有一条 SQL 执行失败将继续执行下面的 SQL。false:不再执行失败 SQL 后的任何语句。默认值为:false。
后的任何语句。默认值为:false。
JDBC 原生连接的使用请参见[视频教程](https://www.taosdata.com/blog/2020/11/11/1955.html)。 JDBC 原生连接的使用请参见[视频教程](https://www.taosdata.com/blog/2020/11/11/1955.html)。
**使用 TDengine 客户端驱动配置文件建立连接 ** **使用 TDengine 客户端驱动配置文件建立连接 **
当使用 JDBC 原生连接连接 TDengine 集群时,可以使用 TDengine 客户端驱动配置文件,在配置文件中指定集群的 firstEp、secondEp 等参数。如下所示: 当使用 JDBC 原生连接连接 TDengine 集群时,可以使用 TDengine 客户端驱动配置文件,在配置文件中指定集群的 firstEp、secondEp 等参数。如下所示:
1. 在 Java 应用中不指定 hostname 和 port 1. 在 Java 应用中不指定 hostname 和 port
```java ```java
public Connection getConn() throws Exception{ public Connection getConn() throws Exception{
Class.forName("com.taosdata.jdbc.TSDBDriver"); Class.forName("com.taosdata.jdbc.TSDBDriver");
String jdbcUrl = "jdbc:TAOS://:/test?user=root&password=taosdata"; String jdbcUrl = "jdbc:TAOS://:/test?user=root&password=taosdata";
Properties connProps = new Properties(); Properties connProps = new Properties();
...@@ -154,82 +152,75 @@ TDengine 的 JDBC URL 规范格式为: ...@@ -154,82 +152,75 @@ TDengine 的 JDBC URL 规范格式为:
connProps.setProperty(TSDBDriver.PROPERTY_KEY_TIME_ZONE, "UTC-8"); connProps.setProperty(TSDBDriver.PROPERTY_KEY_TIME_ZONE, "UTC-8");
Connection conn = DriverManager.getConnection(jdbcUrl, connProps); Connection conn = DriverManager.getConnection(jdbcUrl, connProps);
return conn; return conn;
} }
``` ```
2. 在配置文件中指定 firstEp 和 secondEp 2. 在配置文件中指定 firstEp 和 secondEp
```shell ```shell
# first fully qualified domain name (FQDN) for TDengine system # first fully qualified domain name (FQDN) for TDengine system
firstEp cluster_node1:6030 firstEp cluster_node1:6030
# second fully qualified domain name (FQDN) for TDengine system, for cluster only # second fully qualified domain name (FQDN) for TDengine system, for cluster only
secondEp cluster_node2:6030 secondEp cluster_node2:6030
# default system charset # default system charset
# charset UTF-8 # charset UTF-8
# system locale # system locale
# locale en_US.UTF-8 # locale en_US.UTF-8
``` ```
以上示例,jdbc 会使用客户端的配置文件,建立到 hostname 为 cluster_node1、端口为 6030、数据库名为 test 的连接。当集群中 firstEp 节点失效时,JDBC 会尝试使用 secondEp 以上示例,jdbc 会使用客户端的配置文件,建立到 hostname 为 cluster_node1、端口为 6030、数据库名为 test 的连接。当集群中 firstEp 节点失效时,JDBC 会尝试使用 secondEp 连接集群。
连接集群。
TDengine 中,只要保证 firstEp 和 secondEp 中一个节点有效,就可以正常建立到集群的连接。 TDengine 中,只要保证 firstEp 和 secondEp 中一个节点有效,就可以正常建立到集群的连接。
> **注意**:这里的配置文件指的是调用 JDBC Connector 的应用程序所在机器上的配置文件,Linux OS 上默认值 /etc/taos/taos.cfg ,Windows OS 上默认值 > **注意**:这里的配置文件指的是调用 JDBC Connector 的应用程序所在机器上的配置文件,Linux OS 上默认值 /etc/taos/taos.cfg ,Windows OS 上默认值 C://TDengine/cfg/taos.cfg。
C://TDengine/cfg/taos.cfg。
</TabItem> </TabItem>
<TabItem value="rest" label="REST 连接"> <TabItem value="rest" label="REST 连接">
```java ```java
Class.forName("com.taosdata.jdbc.rs.RestfulDriver"); Class.forName("com.taosdata.jdbc.rs.RestfulDriver");
String jdbcUrl = "jdbc:TAOS-RS://taosdemo.com:6041/test?user=root&password=taosdata"; String jdbcUrl = "jdbc:TAOS-RS://taosdemo.com:6041/test?user=root&password=taosdata";
Connection conn = DriverManager.getConnection(jdbcUrl); Connection conn = DriverManager.getConnection(jdbcUrl);
``` ```
以上示例,使用了 JDBC REST 连接的 RestfulDriver,建立了到 hostname 为 taosdemo.com,端口为 6041,数据库名为 test 的连接。这个 URL 中指定用户名(user)为 以上示例,使用了 JDBC REST 连接的 RestfulDriver,建立了到 hostname 为 taosdemo.com,端口为 6041,数据库名为 test 的连接。这个 URL 中指定用户名(user)为 root,密码(password)为 taosdata。
root,密码(password)为 taosdata。
使用 JDBC REST 连接,不需要依赖客户端驱动。与 JDBC 原生连接相比,仅需要: 使用 JDBC REST 连接,不需要依赖客户端驱动。与 JDBC 原生连接相比,仅需要:
1. driverClass 指定为“com.taosdata.jdbc.rs.RestfulDriver”; 1. driverClass 指定为“com.taosdata.jdbc.rs.RestfulDriver”;
2. jdbcUrl 以“jdbc:TAOS-RS://”开头; 2. jdbcUrl 以“jdbc:TAOS-RS://”开头;
3. 使用 6041 作为连接端口。 3. 使用 6041 作为连接端口。
url 中的配置参数如下: url 中的配置参数如下:
- user:登录 TDengine 用户名,默认值 'root'。 - user:登录 TDengine 用户名,默认值 'root'。
- password:用户登录密码,默认值 'taosdata'。 - password:用户登录密码,默认值 'taosdata'。
- batchfetch: true:在执行查询时批量拉取结果集;false:逐行拉取结果集。默认值为:false。逐行拉取结果集使用 HTTP 方式进行数据传输。JDBC REST - batchfetch: true:在执行查询时批量拉取结果集;false:逐行拉取结果集。默认值为:false。逐行拉取结果集使用 HTTP 方式进行数据传输。JDBC REST 连接支持批量拉取数据功能。taos-jdbcdriver 与 TDengine 之间通过 WebSocket 连接进行数据传输。相较于 HTTP,WebSocket 可以使 JDBC REST 连接支持大数据量查询,并提升查询性能。
连接支持批量拉取数据功能。taos-jdbcdriver 与 TDengine 之间通过 WebSocket 连接进行数据传输。相较于 HTTP,WebSocket 可以使 JDBC REST 连接支持大数据量查询,并提升查询性能。 - charset: 当开启批量拉取数据时,指定解析字符串数据的字符集。
- charset: 当开启批量拉取数据时,指定解析字符串数据的字符集。 - batchErrorIgnore:true:在执行 Statement 的 executeBatch 时,如果中间有一条 SQL 执行失败,继续执行下面的 SQL 了。false:不再执行失败 SQL 后的任何语句。默认值为:false。
- batchErrorIgnore:true:在执行 Statement 的 executeBatch 时,如果中间有一条 SQL 执行失败,继续执行下面的 SQL 了。false:不再执行失败 SQL - httpConnectTimeout: 连接超时时间,单位 ms, 默认值为 5000。
后的任何语句。默认值为:false。 - httpSocketTimeout: socket 超时时间,单位 ms,默认值为 5000。仅在 batchfetch 设置为 false 时生效。
- httpConnectTimeout: 连接超时时间,单位 ms, 默认值为 5000。 - messageWaitTimeout: 消息超时时间, 单位 ms, 默认值为 3000。 仅在 batchfetch 设置为 true 时生效。
- httpSocketTimeout: socket 超时时间,单位 ms,默认值为 5000。仅在 batchfetch 设置为 false 时生效。 - useSSL: 连接中是否使用 SSL。
- messageWaitTimeout: 消息超时时间, 单位 ms, 默认值为 3000。 仅在 batchfetch 设置为 true 时生效。
- useSSL: 连接中是否使用 SSL。
**注意**:部分配置项(比如:locale、timezone)在 REST 连接中不生效。 **注意**:部分配置项(比如:locale、timezone)在 REST 连接中不生效。
:::note :::note
- 与原生连接方式不同,REST 接口是无状态的。在使用 JDBC REST 连接时,需要在 SQL 中指定表、超级表的数据库名称。例如: - 与原生连接方式不同,REST 接口是无状态的。在使用 JDBC REST 连接时,需要在 SQL 中指定表、超级表的数据库名称。例如:
```sql ```sql
INSERT INTO test.t1 USING test.weather (ts, temperature) TAGS('California.SanFrancisco') VALUES(now, 24.6); INSERT INTO test.t1 USING test.weather (ts, temperature) TAGS('California.SanFrancisco') VALUES(now, 24.6);
``` ```
- 如果在 url 中指定了 dbname,那么,JDBC REST 连接会默认使用/rest/sql/dbname 作为 restful 请求的 url,在 SQL 中不需要指定 dbname。例如:url 为 - 如果在 url 中指定了 dbname,那么,JDBC REST 连接会默认使用/rest/sql/dbname 作为 restful 请求的 url,在 SQL 中不需要指定 dbname。例如:url 为 jdbc:TAOS-RS://127.0.0.1:6041/test,那么,可以执行 sql:insert into t1 using weather(ts, temperature) tags('California.SanFrancisco') values(now, 24.6);
jdbc:TAOS-RS://127.0.0.1:6041/test,那么,可以执行 sql:insert into t1 using weather(ts, temperature)
tags('California.SanFrancisco') values(now, 24.6);
::: :::
</TabItem> </TabItem>
</Tabs> </Tabs>
### 指定 URL 和 Properties 获取连接 ### 指定 URL 和 Properties 获取连接
...@@ -890,8 +881,10 @@ public static void main(String[] args) throws Exception { ...@@ -890,8 +881,10 @@ public static void main(String[] args) throws Exception {
| taos-jdbcdriver 版本 | 主要变化 | | taos-jdbcdriver 版本 | 主要变化 |
| :------------------: | :----------------------------: | | :------------------: | :----------------------------: |
| 3.0.3 | 修复 REST 连接在 jdk17+ 版本时间戳解析错误问题 |
| 3.0.1 - 3.0.2 | 修复一些情况下结果集数据解析错误的问题。3.0.1 在 JDK 11 环境编译,JDK 8 环境下建议使用 3.0.2 版本 | | 3.0.1 - 3.0.2 | 修复一些情况下结果集数据解析错误的问题。3.0.1 在 JDK 11 环境编译,JDK 8 环境下建议使用 3.0.2 版本 |
| 3.0.0 | 支持 TDengine 3.0 | | 3.0.0 | 支持 TDengine 3.0 |
| 2.0.42 | 修在 WebSocket 连接中 wasNull 接口返回值 |
| 2.0.41 | 修正 REST 连接中用户名和密码转码方式 | | 2.0.41 | 修正 REST 连接中用户名和密码转码方式 |
| 2.0.39 - 2.0.40 | 增加 REST 连接/请求 超时设置 | | 2.0.39 - 2.0.40 | 增加 REST 连接/请求 超时设置 |
| 2.0.38 | JDBC REST 连接增加批量拉取功能 | | 2.0.38 | JDBC REST 连接增加批量拉取功能 |
...@@ -928,7 +921,7 @@ public static void main(String[] args) throws Exception { ...@@ -928,7 +921,7 @@ public static void main(String[] args) throws Exception {
**原因**:taos-jdbcdriver 3.0.1 版本需要在 JDK 11+ 环境使用。 **原因**:taos-jdbcdriver 3.0.1 版本需要在 JDK 11+ 环境使用。
**解决方法**: 更换 taos-jdbcdriver 3.0.2 版本。 **解决方法**: 更换 taos-jdbcdriver 3.0.2+ 版本。
其它问题请参考 [FAQ](../../../train-faq/faq) 其它问题请参考 [FAQ](../../../train-faq/faq)
......
...@@ -27,10 +27,11 @@ database_option: { ...@@ -27,10 +27,11 @@ database_option: {
| PRECISION {'ms' | 'us' | 'ns'} | PRECISION {'ms' | 'us' | 'ns'}
| REPLICA value | REPLICA value
| RETENTIONS ingestion_duration:keep_duration ... | RETENTIONS ingestion_duration:keep_duration ...
| STRICT {'off' | 'on'}
| WAL_LEVEL {1 | 2} | WAL_LEVEL {1 | 2}
| VGROUPS value | VGROUPS value
| SINGLE_STABLE {0 | 1} | SINGLE_STABLE {0 | 1}
| TABLE_PREFIX value
| TABLE_SUFFIX value
| WAL_RETENTION_PERIOD value | WAL_RETENTION_PERIOD value
| WAL_ROLL_PERIOD value | WAL_ROLL_PERIOD value
| WAL_RETENTION_SIZE value | WAL_RETENTION_SIZE value
...@@ -61,9 +62,6 @@ database_option: { ...@@ -61,9 +62,6 @@ database_option: {
- PRECISION:数据库的时间戳精度。ms 表示毫秒,us 表示微秒,ns 表示纳秒,默认 ms 毫秒。 - PRECISION:数据库的时间戳精度。ms 表示毫秒,us 表示微秒,ns 表示纳秒,默认 ms 毫秒。
- REPLICA:表示数据库副本数,取值为 1 或 3,默认为 1。在集群中使用,副本数必须小于或等于 DNODE 的数目。 - REPLICA:表示数据库副本数,取值为 1 或 3,默认为 1。在集群中使用,副本数必须小于或等于 DNODE 的数目。
- RETENTIONS:表示数据的聚合周期和保存时长,如 RETENTIONS 15s:7d,1m:21d,15m:50d 表示数据原始采集周期为 15 秒,原始数据保存 7 天;按 1 分钟聚合的数据保存 21 天;按 15 分钟聚合的数据保存 50 天。目前支持且只支持三级存储周期。 - RETENTIONS:表示数据的聚合周期和保存时长,如 RETENTIONS 15s:7d,1m:21d,15m:50d 表示数据原始采集周期为 15 秒,原始数据保存 7 天;按 1 分钟聚合的数据保存 21 天;按 15 分钟聚合的数据保存 50 天。目前支持且只支持三级存储周期。
- STRICT:表示数据同步的一致性要求,默认为 off。
- on 表示强一致,即运行标准的 raft 协议,半数提交返回成功。
- off 表示弱一致,本地提交即返回成功。
- WAL_LEVEL:WAL 级别,默认为 1。 - WAL_LEVEL:WAL 级别,默认为 1。
- 1:写 WAL,但不执行 fsync。 - 1:写 WAL,但不执行 fsync。
- 2:写 WAL,而且执行 fsync。 - 2:写 WAL,而且执行 fsync。
...@@ -71,6 +69,8 @@ database_option: { ...@@ -71,6 +69,8 @@ database_option: {
- SINGLE_STABLE:表示此数据库中是否只可以创建一个超级表,用于超级表列非常多的情况。 - SINGLE_STABLE:表示此数据库中是否只可以创建一个超级表,用于超级表列非常多的情况。
- 0:表示可以创建多张超级表。 - 0:表示可以创建多张超级表。
- 1:表示只可以创建一张超级表。 - 1:表示只可以创建一张超级表。
- TABLE_PREFIX:内部存储引擎根据表名分配存储该表数据的 VNODE 时要忽略的前缀的长度。
- TABLE_SUFFIX:内部存储引擎根据表名分配存储该表数据的 VNODE 时要忽略的后缀的长度。
- WAL_RETENTION_PERIOD:wal 文件的额外保留策略,用于数据订阅。wal 的保存时长,单位为 s。单副本默认为 0,即落盘后立即删除。-1 表示不删除。多副本默认为 4 天。 - WAL_RETENTION_PERIOD:wal 文件的额外保留策略,用于数据订阅。wal 的保存时长,单位为 s。单副本默认为 0,即落盘后立即删除。-1 表示不删除。多副本默认为 4 天。
- WAL_RETENTION_SIZE:wal 文件的额外保留策略,用于数据订阅。wal 的保存的最大上限,单位为 KB。单副本默认为 0,即落盘后立即删除。多副本默认为-1,表示不删除。 - WAL_RETENTION_SIZE:wal 文件的额外保留策略,用于数据订阅。wal 的保存的最大上限,单位为 KB。单副本默认为 0,即落盘后立即删除。多副本默认为-1,表示不删除。
- WAL_ROLL_PERIOD:wal 文件切换时长,单位为 s。当 wal 文件创建并写入后,经过该时间,会自动创建一个新的 wal 文件。单副本默认为 0,即仅在落盘时创建新文件。多副本默认为 1 天。 - WAL_ROLL_PERIOD:wal 文件切换时长,单位为 s。当 wal 文件创建并写入后,经过该时间,会自动创建一个新的 wal 文件。单副本默认为 0,即仅在落盘时创建新文件。多副本默认为 1 天。
......
...@@ -139,10 +139,10 @@ alter_table_option: { ...@@ -139,10 +139,10 @@ alter_table_option: {
- ADD COLUMN:添加列。 - ADD COLUMN:添加列。
- DROP COLUMN:删除列。 - DROP COLUMN:删除列。
- MODIFY COLUMN:修改列定义,如果数据列的类型是可变长类型,那么可以使用此指令修改其宽度,只能改大,不能改小。 - MODIFY COLUMN:修改列的宽度,数据列的类型必须是 nchar 和 binary,使用此指令可以修改其宽度,只能改大,不能改小。
- ADD TAG:给超级表添加一个标签。 - ADD TAG:给超级表添加一个标签。
- DROP TAG:删除超级表的一个标签。从超级表删除某个标签后,该超级表下的所有子表也会自动删除该标签。 - DROP TAG:删除超级表的一个标签。从超级表删除某个标签后,该超级表下的所有子表也会自动删除该标签。
- MODIFY TAG:修改超级表的一个标签的定义。如果标签的类型是可变长类型,那么可以使用此指令修改其宽度,只能改大,不能改小。 - MODIFY TAG:修改超级表的一个标签的列宽度。标签的类型只能是 nchar 和 binary,使用此指令可以修改其宽度,只能改大,不能改小。
- RENAME TAG:修改超级表的一个标签的名称。从超级表修改某个标签名后,该超级表下的所有子表也会自动更新该标签名。 - RENAME TAG:修改超级表的一个标签的名称。从超级表修改某个标签名后,该超级表下的所有子表也会自动更新该标签名。
### 增加列 ### 增加列
......
...@@ -880,6 +880,7 @@ INTERP(expr) ...@@ -880,6 +880,7 @@ INTERP(expr)
- INTERP 根据 FILL 字段来决定在每个符合输出条件的时刻如何进行插值。 - INTERP 根据 FILL 字段来决定在每个符合输出条件的时刻如何进行插值。
- INTERP 只能在一个时间序列内进行插值,因此当作用于超级表时必须跟 partition by tbname 一起使用。 - INTERP 只能在一个时间序列内进行插值,因此当作用于超级表时必须跟 partition by tbname 一起使用。
- INTERP 可以与伪列 _irowts 一起使用,返回插值点所对应的时间戳(3.0.1.4版本以后支持)。 - INTERP 可以与伪列 _irowts 一起使用,返回插值点所对应的时间戳(3.0.1.4版本以后支持)。
- INTERP 可以与伪列 _isfilled 一起使用,显示返回结果是否为原始记录或插值算法产生的数据(3.0.2.1版本以后支持)。
### LAST ### LAST
......
...@@ -114,7 +114,7 @@ SELECT * from information_schema.`ins_streams`; ...@@ -114,7 +114,7 @@ SELECT * from information_schema.`ins_streams`;
在创建流时,可以通过 TRIGGER 指令指定流式计算的触发模式。 在创建流时,可以通过 TRIGGER 指令指定流式计算的触发模式。
对于非窗口计算,流式计算的触发是实时的;对于窗口计算,目前提供 3 种触发模式: 对于非窗口计算,流式计算的触发是实时的;对于窗口计算,目前提供 3 种触发模式,默认为 AT_ONCE
1. AT_ONCE:写入立即触发 1. AT_ONCE:写入立即触发
......
...@@ -179,6 +179,75 @@ SHOW TABLE DISTRIBUTED table_name; ...@@ -179,6 +179,75 @@ SHOW TABLE DISTRIBUTED table_name;
显示表的数据分布信息。 显示表的数据分布信息。
示例说明:
语句: show table distributed d0\G; 竖行显示表 d0 的 BLOCK 分布情况
*************************** 1.row ***************************
_block_dist: Total_Blocks=[5] Total_Size=[93.65 Kb] Average_size=[18.73 Kb] Compression_Ratio=[23.98 %]
Total_Blocks : 表d0 占用的 block 个数为 5 个
Total_Size. : 表 d0 所有 block 在文件中占用的大小为 93.65 KB
Average_size: 平均每个 block 在文件中占用的空间大小为 18.73 KB
Compression_Ratio: 数据压缩率为 23.98%
*************************** 2.row ***************************
_block_dist: Total_Rows=[20000] Inmem_Rows=[0] MinRows=[3616] MaxRows=[4096] Average_Rows=[4000]
Total_Rows: 统计表 d0 的所有行数 为20000 行
Inmem_Rows: 表示仍然还存放在内存中的行数,即没有落盘的行数,为 0行,表示没有
MinRows: BLOCK 中最小的行数,为 3616 行
MaxRows: BLOCK 中最大的行数,为 4096行
Average_Rows: BLOCK 中的平均行数,为4000 行
*************************** 3.row ***************************
_block_dist: Total_Tables=[1] Total_Files=[2]
Total_Tables: 表示子表的个数,这里为1
Total_Files: 表数据保存在几个文件中,这里保存在 2 个文件中
*************************** 5.row ***************************
_block_dist: 0100 |
*************************** 6.row ***************************
_block_dist: 0299 |
......
*************************** 22.row ***************************
_block_dist: 3483 ||||||||||||||||| 1 (20.00%)
*************************** 23.row ***************************
_block_dist: 3682 |
*************************** 24.row ***************************
_block_dist: 3881 ||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||| 4 (80.00%)
Query OK, 24 row(s) in set (0.002444s)
上面是块中包含数据行数的块儿分布情况图,这里的 0100 0299 0498 … 表示的是每个块中包含的数据行数,上面的意思就是这个表的 5 个块,分布在 3483 ~3681 行的块有 1 个,占整个块的 20%,分布在 3881 ~ 4096(最大行数)的块数为 4 个,占整个块的 80%, 其它区域内分布块数为 0。
## SHOW TAGS ## SHOW TAGS
```sql ```sql
......
...@@ -21,6 +21,7 @@ taosAdapter 提供以下功能: ...@@ -21,6 +21,7 @@ taosAdapter 提供以下功能:
- 无缝连接到 collectd - 无缝连接到 collectd
- 无缝连接到 StatsD - 无缝连接到 StatsD
- 支持 Prometheus remote_read 和 remote_write - 支持 Prometheus remote_read 和 remote_write
- 获取 table 所在的虚拟节点组(VGroup)的 VGroup ID
## taosAdapter 架构图 ## taosAdapter 架构图
...@@ -59,6 +60,7 @@ Usage of taosAdapter: ...@@ -59,6 +60,7 @@ Usage of taosAdapter:
--collectd.port int collectd server port. Env "TAOS_ADAPTER_COLLECTD_PORT" (default 6045) --collectd.port int collectd server port. Env "TAOS_ADAPTER_COLLECTD_PORT" (default 6045)
--collectd.user string collectd user. Env "TAOS_ADAPTER_COLLECTD_USER" (default "root") --collectd.user string collectd user. Env "TAOS_ADAPTER_COLLECTD_USER" (default "root")
--collectd.worker int collectd write worker. Env "TAOS_ADAPTER_COLLECTD_WORKER" (default 10) --collectd.worker int collectd write worker. Env "TAOS_ADAPTER_COLLECTD_WORKER" (default 10)
--collectd.ttl int collectd data ttl. Env "TAOS_ADAPTER_COLLECTD_TTL" (default 0, means no ttl)
-c, --config string config path default /etc/taos/taosadapter.toml -c, --config string config path default /etc/taos/taosadapter.toml
--cors.allowAllOrigins cors allow all origins. Env "TAOS_ADAPTER_CORS_ALLOW_ALL_ORIGINS" (default true) --cors.allowAllOrigins cors allow all origins. Env "TAOS_ADAPTER_CORS_ALLOW_ALL_ORIGINS" (default true)
--cors.allowCredentials cors allow credentials. Env "TAOS_ADAPTER_CORS_ALLOW_Credentials" --cors.allowCredentials cors allow credentials. Env "TAOS_ADAPTER_CORS_ALLOW_Credentials"
...@@ -100,6 +102,7 @@ Usage of taosAdapter: ...@@ -100,6 +102,7 @@ Usage of taosAdapter:
--node_exporter.responseTimeout duration node_exporter response timeout. Env "TAOS_ADAPTER_NODE_EXPORTER_RESPONSE_TIMEOUT" (default 5s) --node_exporter.responseTimeout duration node_exporter response timeout. Env "TAOS_ADAPTER_NODE_EXPORTER_RESPONSE_TIMEOUT" (default 5s)
--node_exporter.urls strings node_exporter urls. Env "TAOS_ADAPTER_NODE_EXPORTER_URLS" (default [http://localhost:9100]) --node_exporter.urls strings node_exporter urls. Env "TAOS_ADAPTER_NODE_EXPORTER_URLS" (default [http://localhost:9100])
--node_exporter.user string node_exporter user. Env "TAOS_ADAPTER_NODE_EXPORTER_USER" (default "root") --node_exporter.user string node_exporter user. Env "TAOS_ADAPTER_NODE_EXPORTER_USER" (default "root")
--node_exporter.ttl int node_exporter data ttl. Env "TAOS_ADAPTER_NODE_EXPORTER_TTL"(default 0, means no ttl)
--opentsdb.enable enable opentsdb. Env "TAOS_ADAPTER_OPENTSDB_ENABLE" (default true) --opentsdb.enable enable opentsdb. Env "TAOS_ADAPTER_OPENTSDB_ENABLE" (default true)
--opentsdb_telnet.batchSize int opentsdb_telnet batch size. Env "TAOS_ADAPTER_OPENTSDB_TELNET_BATCH_SIZE" (default 1) --opentsdb_telnet.batchSize int opentsdb_telnet batch size. Env "TAOS_ADAPTER_OPENTSDB_TELNET_BATCH_SIZE" (default 1)
--opentsdb_telnet.dbs strings opentsdb_telnet db names. Env "TAOS_ADAPTER_OPENTSDB_TELNET_DBS" (default [opentsdb_telnet,collectd_tsdb,icinga2_tsdb,tcollector_tsdb]) --opentsdb_telnet.dbs strings opentsdb_telnet db names. Env "TAOS_ADAPTER_OPENTSDB_TELNET_DBS" (default [opentsdb_telnet,collectd_tsdb,icinga2_tsdb,tcollector_tsdb])
...@@ -110,6 +113,7 @@ Usage of taosAdapter: ...@@ -110,6 +113,7 @@ Usage of taosAdapter:
--opentsdb_telnet.ports ints opentsdb telnet tcp port. Env "TAOS_ADAPTER_OPENTSDB_TELNET_PORTS" (default [6046,6047,6048,6049]) --opentsdb_telnet.ports ints opentsdb telnet tcp port. Env "TAOS_ADAPTER_OPENTSDB_TELNET_PORTS" (default [6046,6047,6048,6049])
--opentsdb_telnet.tcpKeepAlive enable tcp keep alive. Env "TAOS_ADAPTER_OPENTSDB_TELNET_TCP_KEEP_ALIVE" --opentsdb_telnet.tcpKeepAlive enable tcp keep alive. Env "TAOS_ADAPTER_OPENTSDB_TELNET_TCP_KEEP_ALIVE"
--opentsdb_telnet.user string opentsdb_telnet user. Env "TAOS_ADAPTER_OPENTSDB_TELNET_USER" (default "root") --opentsdb_telnet.user string opentsdb_telnet user. Env "TAOS_ADAPTER_OPENTSDB_TELNET_USER" (default "root")
--opentsdb_telnet.ttl int opentsdb_telnet data ttl. Env "TAOS_ADAPTER_OPENTSDB_TELNET_TTL"(default 0, means no ttl)
--pool.idleTimeout duration Set idle connection timeout. Env "TAOS_ADAPTER_POOL_IDLE_TIMEOUT" (default 1h0m0s) --pool.idleTimeout duration Set idle connection timeout. Env "TAOS_ADAPTER_POOL_IDLE_TIMEOUT" (default 1h0m0s)
--pool.maxConnect int max connections to taosd. Env "TAOS_ADAPTER_POOL_MAX_CONNECT" (default 4000) --pool.maxConnect int max connections to taosd. Env "TAOS_ADAPTER_POOL_MAX_CONNECT" (default 4000)
--pool.maxIdle int max idle connections to taosd. Env "TAOS_ADAPTER_POOL_MAX_IDLE" (default 4000) --pool.maxIdle int max idle connections to taosd. Env "TAOS_ADAPTER_POOL_MAX_IDLE" (default 4000)
...@@ -131,6 +135,7 @@ Usage of taosAdapter: ...@@ -131,6 +135,7 @@ Usage of taosAdapter:
--statsd.tcpKeepAlive enable tcp keep alive. Env "TAOS_ADAPTER_STATSD_TCP_KEEP_ALIVE" --statsd.tcpKeepAlive enable tcp keep alive. Env "TAOS_ADAPTER_STATSD_TCP_KEEP_ALIVE"
--statsd.user string statsd user. Env "TAOS_ADAPTER_STATSD_USER" (default "root") --statsd.user string statsd user. Env "TAOS_ADAPTER_STATSD_USER" (default "root")
--statsd.worker int statsd write worker. Env "TAOS_ADAPTER_STATSD_WORKER" (default 10) --statsd.worker int statsd write worker. Env "TAOS_ADAPTER_STATSD_WORKER" (default 10)
--statsd.ttl int statsd data ttl. Env "TAOS_ADAPTER_STATSD_TTL" (default 0, means no ttl)
--taosConfigDir string load taos client config path. Env "TAOS_ADAPTER_TAOS_CONFIG_FILE" --taosConfigDir string load taos client config path. Env "TAOS_ADAPTER_TAOS_CONFIG_FILE"
--version Print the version and exit --version Print the version and exit
``` ```
...@@ -174,6 +179,7 @@ AllowWebSockets ...@@ -174,6 +179,7 @@ AllowWebSockets
node_export 是一个机器指标的导出器。请访问 [https://github.com/prometheus/node_exporter](https://github.com/prometheus/node_exporter) 了解更多信息。 node_export 是一个机器指标的导出器。请访问 [https://github.com/prometheus/node_exporter](https://github.com/prometheus/node_exporter) 了解更多信息。
- 支持 Prometheus remote_read 和 remote_write - 支持 Prometheus remote_read 和 remote_write
remote_read 和 remote_write 是 Prometheus 数据读写分离的集群方案。请访问[https://prometheus.io/blog/2019/10/10/remote-read-meets-streaming/#remote-apis](https://prometheus.io/blog/2019/10/10/remote-read-meets-streaming/#remote-apis) 了解更多信息。 remote_read 和 remote_write 是 Prometheus 数据读写分离的集群方案。请访问[https://prometheus.io/blog/2019/10/10/remote-read-meets-streaming/#remote-apis](https://prometheus.io/blog/2019/10/10/remote-read-meets-streaming/#remote-apis) 了解更多信息。
- 获取 table 所在的虚拟节点组(VGroup)的 VGroup ID。关于虚拟节点组(VGroup)的更多信息,请访问[整体架构文档](/tdinternal/arch/#主要逻辑单元)
## 接口 ## 接口
...@@ -195,6 +201,7 @@ AllowWebSockets ...@@ -195,6 +201,7 @@ AllowWebSockets
- `precision` TDengine 使用的时间精度 - `precision` TDengine 使用的时间精度
- `u` TDengine 用户名 - `u` TDengine 用户名
- `p` TDengine 密码 - `p` TDengine 密码
- `ttl` 自动创建的子表生命周期,以子表的第一条数据的 TTL 参数为准,不可更新。更多信息请参考[创建表文档](taos-sql/table/#创建表)的 TTL 参数。
注意: 目前不支持 InfluxDB 的 token 验证方式,仅支持 Basic 验证和查询参数验证。 注意: 目前不支持 InfluxDB 的 token 验证方式,仅支持 Basic 验证和查询参数验证。
示例: curl --request POST http://127.0.0.1:6041/influxdb/v1/write?db=test --user "root:taosdata" --data-binary "measurement,host=host1 field1=2i,field2=2.0 1577836800000000000" 示例: curl --request POST http://127.0.0.1:6041/influxdb/v1/write?db=test --user "root:taosdata" --data-binary "measurement,host=host1 field1=2i,field2=2.0 1577836800000000000"
...@@ -235,6 +242,10 @@ Prometheus 使用的由 \*NIX 内核暴露的硬件和操作系统指标的输 ...@@ -235,6 +242,10 @@ Prometheus 使用的由 \*NIX 内核暴露的硬件和操作系统指标的输
<Prometheus /> <Prometheus />
### 获取 table 的 VGroup ID
可以访问 http 接口 `http://<fqdn>:6041/rest/vgid?db=<db>&table=<table>` 获取 table 的 VGroup ID。关于虚拟节点组(VGroup)的更多信息,请访问[整体架构文档](/tdinternal/arch/#主要逻辑单元)
## 内存使用优化方法 ## 内存使用优化方法
taosAdapter 将监测自身运行过程中内存使用率并通过两个阈值进行调节。有效值范围为 -1 到 100 的整数,单位为系统物理内存的百分比。 taosAdapter 将监测自身运行过程中内存使用率并通过两个阈值进行调节。有效值范围为 -1 到 100 的整数,单位为系统物理内存的百分比。
...@@ -277,7 +288,7 @@ http 返回内容: ...@@ -277,7 +288,7 @@ http 返回内容:
## taosAdapter 监控指标 ## taosAdapter 监控指标
taosAdapter 采集 http 相关指标、cpu 百分比和内存百分比。 taosAdapter 采集 http 相关指标、CPU 百分比和内存百分比。
### http 接口 ### http 接口
...@@ -289,13 +300,13 @@ http://<fqdn>:6041/metrics ...@@ -289,13 +300,13 @@ http://<fqdn>:6041/metrics
### 写入 TDengine ### 写入 TDengine
taosAdapter 支持将 http 监控、cpu 百分比和内存百分比写入 TDengine。 taosAdapter 支持将 http 监控、CPU 百分比和内存百分比写入 TDengine。
有关配置参数 有关配置参数
| **配置项** | **描述** | **默认值** | | **配置项** | **描述** | **默认值** |
|-------------------------|--------------------------------------------|----------| |-------------------------|--------------------------------------------|----------|
| monitor.collectDuration | cpu 和内存采集间隔 | 3s | | monitor.collectDuration | CPU 和内存采集间隔 | 3s |
| monitor.identity | 当前taosadapter 的标识符如果不设置将使用 'hostname:port' | | | monitor.identity | 当前taosadapter 的标识符如果不设置将使用 'hostname:port' | |
| monitor.incgroup | 是否是 cgroup 中运行(容器中运行设置为 true) | false | | monitor.incgroup | 是否是 cgroup 中运行(容器中运行设置为 true) | false |
| monitor.writeToTD | 是否写入到 TDengine | false | | monitor.writeToTD | 是否写入到 TDengine | false |
......
...@@ -204,6 +204,10 @@ taosBenchmark -A INT,DOUBLE,NCHAR,BINARY\(16\) ...@@ -204,6 +204,10 @@ taosBenchmark -A INT,DOUBLE,NCHAR,BINARY\(16\)
- **-a/--replica <replicaNum\>** : - **-a/--replica <replicaNum\>** :
创建数据库时指定其副本数,默认值为 1 。 创建数据库时指定其副本数,默认值为 1 。
- ** -k/--keep-trying <NUMBER\>** : 失败后进行重试的次数,默认不重试。需使用 v3.0.9 以上版本。
- ** -z/--trying-interval <NUMBER\>** : 失败重试间隔时间,单位为毫秒,仅在 -k 指定重试后有效。需使用 v3.0.9 以上版本。
- **-V/--version** : - **-V/--version** :
显示版本信息并退出。不能与其它参数混用。 显示版本信息并退出。不能与其它参数混用。
...@@ -231,6 +235,10 @@ taosBenchmark -A INT,DOUBLE,NCHAR,BINARY\(16\) ...@@ -231,6 +235,10 @@ taosBenchmark -A INT,DOUBLE,NCHAR,BINARY\(16\)
插入场景下 `filetype` 必须设置为 `insert`,该参数及其它通用参数详见[通用配置参数](#通用配置参数) 插入场景下 `filetype` 必须设置为 `insert`,该参数及其它通用参数详见[通用配置参数](#通用配置参数)
- ** keep_trying ** : 失败后进行重试的次数,默认不重试。需使用 v3.0.9 以上版本。
- ** trying_interval ** : 失败重试间隔时间,单位为毫秒,仅在 keep_trying 指定重试后有效。需使用 v3.0.9 以上版本。
#### 数据库相关配置参数 #### 数据库相关配置参数
创建数据库时的相关参数在 json 配置文件中的 `dbinfo` 中配置,个别具体参数如下。其余参数均与 TDengine 中 `create database` 时所指定的数据库参数相对应,详见[../../taos-sql/database] 创建数据库时的相关参数在 json 配置文件中的 `dbinfo` 中配置,个别具体参数如下。其余参数均与 TDengine 中 `create database` 时所指定的数据库参数相对应,详见[../../taos-sql/database]
......
...@@ -22,7 +22,7 @@ taosdump 是一个逻辑备份工具,它不应被用于备份任何原始数 ...@@ -22,7 +22,7 @@ taosdump 是一个逻辑备份工具,它不应被用于备份任何原始数
taosdump 有两种安装方式: taosdump 有两种安装方式:
- 安装 taosTools 官方安装包, 请从[所有下载链接](https://www.taosdata.com/all-downloads)页面找到 taosTools 并下载安装。 - 安装 taosTools 官方安装包, 请从[发布历史页面](https://docs.taosdata.com/releases/tools/)页面找到 taosTools 并下载安装。
- 单独编译 taos-tools 并安装, 详情请参考 [taos-tools](https://github.com/taosdata/taos-tools) 仓库。 - 单独编译 taos-tools 并安装, 详情请参考 [taos-tools](https://github.com/taosdata/taos-tools) 仓库。
......
...@@ -10,11 +10,22 @@ TDengine 2.x 各版本安装包请访问[这里](https://www.taosdata.com/all-do ...@@ -10,11 +10,22 @@ TDengine 2.x 各版本安装包请访问[这里](https://www.taosdata.com/all-do
import Release from "/components/ReleaseV3"; import Release from "/components/ReleaseV3";
## 3.0.2.2
<Release type="tdengine" version="3.0.2.2" />
## 3.0.2.1
<Release type="tdengine" version="3.0.2.1" />
## 3.0.2.0
<Release type="tdengine" version="3.0.2.0" />
## 3.0.1.8 ## 3.0.1.8
<Release type="tdengine" version="3.0.1.8" /> <Release type="tdengine" version="3.0.1.8" />
## 3.0.1.7 ## 3.0.1.7
<Release type="tdengine" version="3.0.1.7" /> <Release type="tdengine" version="3.0.1.7" />
......
...@@ -10,6 +10,18 @@ taosTools 各版本安装包下载链接如下: ...@@ -10,6 +10,18 @@ taosTools 各版本安装包下载链接如下:
import Release from "/components/ReleaseV3"; import Release from "/components/ReleaseV3";
## 2.4.0
<Release type="tools" version="2.4.0" />
## 2.3.3
<Release type="tools" version="2.3.3" />
## 2.3.2
<Release type="tools" version="2.3.2" />
## 2.3.0 ## 2.3.0
<Release type="tools" version="2.3.0" /> <Release type="tools" version="2.3.0" />
......
...@@ -205,7 +205,8 @@ typedef struct SDataBlockInfo { ...@@ -205,7 +205,8 @@ typedef struct SDataBlockInfo {
TSKEY watermark; // used for stream TSKEY watermark; // used for stream
char parTbName[TSDB_TABLE_NAME_LEN]; // used for stream partition char parTbName[TSDB_TABLE_NAME_LEN]; // used for stream partition
STag* pTag; // used for stream partition int32_t tagLen;
void* pTag; // used for stream partition
} SDataBlockInfo; } SDataBlockInfo;
typedef struct SSDataBlock { typedef struct SSDataBlock {
...@@ -340,7 +341,7 @@ typedef struct SExprInfo { ...@@ -340,7 +341,7 @@ typedef struct SExprInfo {
typedef struct { typedef struct {
const char* key; const char* key;
int32_t keyLen; size_t keyLen;
uint8_t type; uint8_t type;
union { union {
const char* value; const char* value;
...@@ -349,7 +350,7 @@ typedef struct { ...@@ -349,7 +350,7 @@ typedef struct {
double d; double d;
float f; float f;
}; };
int32_t length; size_t length;
} SSmlKv; } SSmlKv;
#define QUERY_ASC_FORWARD_STEP 1 #define QUERY_ASC_FORWARD_STEP 1
......
...@@ -265,7 +265,7 @@ void blockDebugShowDataBlocks(const SArray* dataBlocks, const char* flag); ...@@ -265,7 +265,7 @@ void blockDebugShowDataBlocks(const SArray* dataBlocks, const char* flag);
// for debug // for debug
char* dumpBlockData(SSDataBlock* pDataBlock, const char* flag, char** dumpBuf); char* dumpBlockData(SSDataBlock* pDataBlock, const char* flag, char** dumpBuf);
int32_t buildSubmitReqFromDataBlock(SSubmitReq** pReq, const SSDataBlock* pDataBlocks, STSchema* pTSchema, int32_t vgId, int32_t buildSubmitReqFromDataBlock(SSubmitReq2** pReq, const SSDataBlock* pDataBlocks, const STSchema* pTSchema, int64_t uid, int32_t vgId,
tb_uid_t suid); tb_uid_t suid);
char* buildCtbNameByGroupId(const char* stbName, uint64_t groupId); char* buildCtbNameByGroupId(const char* stbName, uint64_t groupId);
......
...@@ -44,18 +44,38 @@ typedef struct SColData SColData; ...@@ -44,18 +44,38 @@ typedef struct SColData SColData;
#define HAS_VALUE ((uint8_t)0x4) #define HAS_VALUE ((uint8_t)0x4)
// bitmap ================================ // bitmap ================================
const static uint8_t BIT2_MAP[4][4] = {{0b00000000, 0b00000001, 0b00000010, 0}, const static uint8_t BIT1_MAP[8] = {0b11111110, 0b11111101, 0b11111011, 0b11110111,
{0b00000000, 0b00000100, 0b00001000, 2}, 0b11101111, 0b11011111, 0b10111111, 0b01111111};
{0b00000000, 0b00010000, 0b00100000, 4},
{0b00000000, 0b01000000, 0b10000000, 6}}; const static uint8_t BIT2_MAP[4] = {0b11111100, 0b11110011, 0b11001111, 0b00111111};
#define N1(n) ((((uint8_t)1) << (n)) - 1) #define ONE ((uint8_t)1)
#define BIT1_SIZE(n) ((((n)-1) >> 3) + 1) #define THREE ((uint8_t)3)
#define BIT2_SIZE(n) ((((n)-1) >> 2) + 1) #define DIV_8(i) ((i) >> 3)
#define SET_BIT1(p, i, v) ((p)[(i) >> 3] = (p)[(i) >> 3] & N1((i)&7) | (((uint8_t)(v)) << ((i)&7))) #define MOD_8(i) ((i)&7)
#define GET_BIT1(p, i) (((p)[(i) >> 3] >> ((i)&7)) & ((uint8_t)1)) #define DIV_4(i) ((i) >> 2)
#define SET_BIT2(p, i, v) ((p)[(i) >> 2] = (p)[(i) >> 2] & N1(BIT2_MAP[(i)&3][3]) | BIT2_MAP[(i)&3][(v)]) #define MOD_4(i) ((i)&3)
#define GET_BIT2(p, i) (((p)[(i) >> 2] >> BIT2_MAP[(i)&3][3]) & ((uint8_t)3)) #define MOD_4_TIME_2(i) (MOD_4(i) << 1)
#define BIT1_SIZE(n) (DIV_8((n)-1) + 1)
#define BIT2_SIZE(n) (DIV_4((n)-1) + 1)
#define SET_BIT1(p, i, v) ((p)[DIV_8(i)] = (p)[DIV_8(i)] & BIT1_MAP[MOD_8(i)] | ((v) << MOD_8(i)))
#define SET_BIT1_EX(p, i, v) \
do { \
if (MOD_8(i) == 0) { \
(p)[DIV_8(i)] = 0; \
} \
SET_BIT1(p, i, v); \
} while (0)
#define GET_BIT1(p, i) (((p)[DIV_8(i)] >> MOD_8(i)) & ONE)
#define SET_BIT2(p, i, v) ((p)[DIV_4(i)] = (p)[DIV_4(i)] & BIT2_MAP[MOD_4(i)] | ((v) << MOD_4_TIME_2(i)))
#define SET_BIT2_EX(p, i, v) \
do { \
if (MOD_4(i) == 0) { \
(p)[DIV_4(i)] = 0; \
} \
SET_BIT2(p, i, v); \
} while (0)
#define GET_BIT2(p, i) (((p)[DIV_4(i)] >> MOD_4_TIME_2(i)) & THREE)
// SBuffer ================================ // SBuffer ================================
struct SBuffer { struct SBuffer {
...@@ -70,9 +90,6 @@ int32_t tBufferInit(SBuffer *pBuffer, int64_t size); ...@@ -70,9 +90,6 @@ int32_t tBufferInit(SBuffer *pBuffer, int64_t size);
int32_t tBufferPut(SBuffer *pBuffer, const void *pData, int64_t nData); int32_t tBufferPut(SBuffer *pBuffer, const void *pData, int64_t nData);
int32_t tBufferReserve(SBuffer *pBuffer, int64_t nData, void **ppData); int32_t tBufferReserve(SBuffer *pBuffer, int64_t nData, void **ppData);
// STSchema ================================
void tDestroyTSchema(STSchema *pTSchema);
// SColVal ================================ // SColVal ================================
#define CV_FLAG_VALUE ((int8_t)0x0) #define CV_FLAG_VALUE ((int8_t)0x0)
#define CV_FLAG_NONE ((int8_t)0x1) #define CV_FLAG_NONE ((int8_t)0x1)
...@@ -87,8 +104,12 @@ void tDestroyTSchema(STSchema *pTSchema); ...@@ -87,8 +104,12 @@ void tDestroyTSchema(STSchema *pTSchema);
#define COL_VAL_IS_VALUE(CV) ((CV)->flag == CV_FLAG_VALUE) #define COL_VAL_IS_VALUE(CV) ((CV)->flag == CV_FLAG_VALUE)
// SRow ================================ // SRow ================================
int32_t tRowBuild(SArray *aColVal, STSchema *pTSchema, SBuffer *pBuffer); int32_t tRowBuild(SArray *aColVal, const STSchema *pTSchema, SRow **ppRow);
void tRowGet(SRow *pRow, STSchema *pTSchema, int32_t iCol, SColVal *pColVal); void tRowGet(SRow *pRow, STSchema *pTSchema, int32_t iCol, SColVal *pColVal);
void tRowDestroy(SRow *pRow);
void tRowSort(SArray *aRowP);
int32_t tRowMerge(SArray *aRowP, STSchema *pTSchema, int8_t flag);
int32_t tRowAppendToColData(SRow *pRow, STSchema *pTSchema, SColData *aColData, int32_t nColData);
// SRowIter ================================ // SRowIter ================================
int32_t tRowIterOpen(SRow *pRow, STSchema *pTSchema, SRowIter **ppIter); int32_t tRowIterOpen(SRow *pRow, STSchema *pTSchema, SRowIter **ppIter);
...@@ -110,15 +131,28 @@ void debugPrintSTag(STag *pTag, const char *tag, int32_t ln); // TODO: remov ...@@ -110,15 +131,28 @@ void debugPrintSTag(STag *pTag, const char *tag, int32_t ln); // TODO: remov
int32_t parseJsontoTagData(const char *json, SArray *pTagVals, STag **ppTag, void *pMsgBuf); int32_t parseJsontoTagData(const char *json, SArray *pTagVals, STag **ppTag, void *pMsgBuf);
// SColData ================================ // SColData ================================
typedef void *(*xMallocFn)(void *, int32_t);
void tColDataDestroy(void *ph); void tColDataDestroy(void *ph);
void tColDataInit(SColData *pColData, int16_t cid, int8_t type, int8_t smaOn); void tColDataInit(SColData *pColData, int16_t cid, int8_t type, int8_t smaOn);
void tColDataClear(SColData *pColData); void tColDataClear(SColData *pColData);
void tColDataDeepClear(SColData *pColData);
int32_t tColDataAppendValue(SColData *pColData, SColVal *pColVal); int32_t tColDataAppendValue(SColData *pColData, SColVal *pColVal);
void tColDataGetValue(SColData *pColData, int32_t iVal, SColVal *pColVal); void tColDataGetValue(SColData *pColData, int32_t iVal, SColVal *pColVal);
uint8_t tColDataGetBitValue(const SColData *pColData, int32_t iVal); uint8_t tColDataGetBitValue(const SColData *pColData, int32_t iVal);
int32_t tColDataCopy(SColData *pColDataSrc, SColData *pColDataDest); int32_t tColDataCopy(SColData *pColDataFrom, SColData *pColData, xMallocFn xMalloc, void *arg);
extern void (*tColDataCalcSMA[])(SColData *pColData, int64_t *sum, int64_t *max, int64_t *min, int16_t *numOfNull); extern void (*tColDataCalcSMA[])(SColData *pColData, int64_t *sum, int64_t *max, int64_t *min, int16_t *numOfNull);
// for stmt bind
int32_t tColDataAddValueByBind(SColData *pColData, TAOS_MULTI_BIND *pBind);
void tColDataSortMerge(SArray *colDataArr);
//for raw block
int32_t tColDataAddValueByDataBlock(SColData *pColData, int8_t type, int32_t bytes,
int32_t nRows, char* lengthOrbitmap, char *data);
// for encode/decode
int32_t tPutColData(uint8_t *pBuf, SColData *pColData);
int32_t tGetColData(uint8_t *pBuf, SColData *pColData);
// STRUCT ================================ // STRUCT ================================
struct STColumn { struct STColumn {
col_id_t colId; col_id_t colId;
...@@ -225,23 +259,9 @@ struct STag { ...@@ -225,23 +259,9 @@ struct STag {
memcpy(varDataVal(x), (str), (_size)); \ memcpy(varDataVal(x), (str), (_size)); \
} while (0); } while (0);
// ----------------- SCHEMA BUILDER DEFINITION // STSchema ================================
typedef struct {
int32_t tCols;
int32_t nCols;
schema_ver_t version;
uint16_t flen;
int32_t tlen;
STColumn *columns;
} STSchemaBuilder;
int32_t tdInitTSchemaBuilder(STSchemaBuilder *pBuilder, schema_ver_t version);
void tdDestroyTSchemaBuilder(STSchemaBuilder *pBuilder);
void tdResetTSchemaBuilder(STSchemaBuilder *pBuilder, schema_ver_t version);
int32_t tdAddColToSchema(STSchemaBuilder *pBuilder, int8_t type, int8_t flags, col_id_t colId, col_bytes_t bytes);
STSchema *tdGetSchemaFromBuilder(STSchemaBuilder *pBuilder);
STSchema *tBuildTSchema(SSchema *aSchema, int32_t numOfCols, int32_t version); STSchema *tBuildTSchema(SSchema *aSchema, int32_t numOfCols, int32_t version);
void tDestroyTSchema(STSchema *pTSchema);
#endif #endif
......
...@@ -482,8 +482,6 @@ static FORCE_INLINE int32_t tDecodeSSchemaWrapperEx(SDecoder* pDecoder, SSchemaW ...@@ -482,8 +482,6 @@ static FORCE_INLINE int32_t tDecodeSSchemaWrapperEx(SDecoder* pDecoder, SSchemaW
return 0; return 0;
} }
STSchema* tdGetSTSChemaFromSSChema(SSchema* pSchema, int32_t nCols, int32_t sver);
typedef struct { typedef struct {
char name[TSDB_TABLE_FNAME_LEN]; char name[TSDB_TABLE_FNAME_LEN];
int8_t igExists; int8_t igExists;
...@@ -1734,6 +1732,8 @@ typedef struct { ...@@ -1734,6 +1732,8 @@ typedef struct {
int32_t execId; int32_t execId;
} STaskDropReq; } STaskDropReq;
int32_t tSerializeSTaskDropReq(void* buf, int32_t bufLen, STaskDropReq* pReq);
int32_t tDeserializeSTaskDropReq(void* buf, int32_t bufLen, STaskDropReq* pReq);
int32_t tSerializeSTaskDropReq(void* buf, int32_t bufLen, STaskDropReq* pReq); int32_t tSerializeSTaskDropReq(void* buf, int32_t bufLen, STaskDropReq* pReq);
int32_t tDeserializeSTaskDropReq(void* buf, int32_t bufLen, STaskDropReq* pReq); int32_t tDeserializeSTaskDropReq(void* buf, int32_t bufLen, STaskDropReq* pReq);
...@@ -2083,8 +2083,13 @@ typedef struct SVCreateTbReq { ...@@ -2083,8 +2083,13 @@ typedef struct SVCreateTbReq {
int tEncodeSVCreateTbReq(SEncoder* pCoder, const SVCreateTbReq* pReq); int tEncodeSVCreateTbReq(SEncoder* pCoder, const SVCreateTbReq* pReq);
int tDecodeSVCreateTbReq(SDecoder* pCoder, SVCreateTbReq* pReq); int tDecodeSVCreateTbReq(SDecoder* pCoder, SVCreateTbReq* pReq);
void tDestroySVCreateTbReq(SVCreateTbReq* pReq, int32_t flags);
static FORCE_INLINE void tdDestroySVCreateTbReq(SVCreateTbReq* req) { static FORCE_INLINE void tdDestroySVCreateTbReq(SVCreateTbReq* req) {
if (NULL == req) {
return;
}
taosMemoryFreeClear(req->name); taosMemoryFreeClear(req->name);
taosMemoryFreeClear(req->comment); taosMemoryFreeClear(req->comment);
if (req->type == TSDB_CHILD_TABLE) { if (req->type == TSDB_CHILD_TABLE) {
...@@ -3232,6 +3237,57 @@ int32_t tSerializeSMqAskEpReq(void* buf, int32_t bufLen, SMqAskEpReq* pReq); ...@@ -3232,6 +3237,57 @@ int32_t tSerializeSMqAskEpReq(void* buf, int32_t bufLen, SMqAskEpReq* pReq);
int32_t tDeserializeSMqAskEpReq(void* buf, int32_t bufLen, SMqAskEpReq* pReq); int32_t tDeserializeSMqAskEpReq(void* buf, int32_t bufLen, SMqAskEpReq* pReq);
int32_t tSerializeSMqHbReq(void* buf, int32_t bufLen, SMqHbReq* pReq); int32_t tSerializeSMqHbReq(void* buf, int32_t bufLen, SMqHbReq* pReq);
int32_t tDeserializeSMqHbReq(void* buf, int32_t bufLen, SMqHbReq* pReq); int32_t tDeserializeSMqHbReq(void* buf, int32_t bufLen, SMqHbReq* pReq);
int32_t tSerializeSMqAskEpReq(void* buf, int32_t bufLen, SMqAskEpReq* pReq);
int32_t tDeserializeSMqAskEpReq(void* buf, int32_t bufLen, SMqAskEpReq* pReq);
int32_t tSerializeSMqHbReq(void* buf, int32_t bufLen, SMqHbReq* pReq);
int32_t tDeserializeSMqHbReq(void* buf, int32_t bufLen, SMqHbReq* pReq);
#define SUBMIT_REQ_AUTO_CREATE_TABLE 0x1
#define SUBMIT_REQ_COLUMN_DATA_FORMAT 0x2
typedef struct {
int32_t flags;
SVCreateTbReq* pCreateTbReq;
int64_t suid;
int64_t uid;
int32_t sver;
union {
SArray* aRowP;
SArray* aCol;
};
} SSubmitTbData;
typedef struct {
SArray* aSubmitTbData; // SArray<SSubmitTbData>
} SSubmitReq2;
int32_t tEncodeSSubmitReq2(SEncoder* pCoder, const SSubmitReq2* pReq);
int32_t tDecodeSSubmitReq2(SDecoder* pCoder, SSubmitReq2* pReq);
void tDestroySSubmitTbData(SSubmitTbData* pTbData, int32_t flag);
void tDestroySSubmitReq2(SSubmitReq2* pReq, int32_t flag);
typedef struct {
int32_t affectedRows;
SArray* aCreateTbRsp; // SArray<SVCreateTbRsp>
} SSubmitRsp2;
int32_t tEncodeSSubmitRsp2(SEncoder* pCoder, const SSubmitRsp2* pRsp);
int32_t tDecodeSSubmitRsp2(SDecoder* pCoder, SSubmitRsp2* pRsp);
void tDestroySSubmitRsp2(SSubmitRsp2* pRsp, int32_t flag);
#define TSDB_MSG_FLG_ENCODE 0x1
#define TSDB_MSG_FLG_DECODE 0x2
typedef struct {
union {
struct {
void* msgStr;
int32_t msgLen;
int64_t ver;
};
void* pDataBlock;
};
} SPackedData;
#pragma pack(pop) #pragma pack(pop)
......
...@@ -182,161 +182,164 @@ ...@@ -182,161 +182,164 @@
#define TK_INDEX 164 #define TK_INDEX 164
#define TK_FUNCTION 165 #define TK_FUNCTION 165
#define TK_INTERVAL 166 #define TK_INTERVAL 166
#define TK_TOPIC 167 #define TK_COUNT 167
#define TK_WITH 168 #define TK_LAST_ROW 168
#define TK_META 169 #define TK_TOPIC 169
#define TK_CONSUMER 170 #define TK_WITH 170
#define TK_GROUP 171 #define TK_META 171
#define TK_DESC 172 #define TK_CONSUMER 172
#define TK_DESCRIBE 173 #define TK_GROUP 173
#define TK_RESET 174 #define TK_DESC 174
#define TK_QUERY 175 #define TK_DESCRIBE 175
#define TK_CACHE 176 #define TK_RESET 176
#define TK_EXPLAIN 177 #define TK_QUERY 177
#define TK_ANALYZE 178 #define TK_CACHE 178
#define TK_VERBOSE 179 #define TK_EXPLAIN 179
#define TK_NK_BOOL 180 #define TK_ANALYZE 180
#define TK_RATIO 181 #define TK_VERBOSE 181
#define TK_NK_FLOAT 182 #define TK_NK_BOOL 182
#define TK_OUTPUTTYPE 183 #define TK_RATIO 183
#define TK_AGGREGATE 184 #define TK_NK_FLOAT 184
#define TK_BUFSIZE 185 #define TK_OUTPUTTYPE 185
#define TK_STREAM 186 #define TK_AGGREGATE 186
#define TK_INTO 187 #define TK_BUFSIZE 187
#define TK_TRIGGER 188 #define TK_STREAM 188
#define TK_AT_ONCE 189 #define TK_INTO 189
#define TK_WINDOW_CLOSE 190 #define TK_TRIGGER 190
#define TK_IGNORE 191 #define TK_AT_ONCE 191
#define TK_EXPIRED 192 #define TK_WINDOW_CLOSE 192
#define TK_FILL_HISTORY 193 #define TK_IGNORE 193
#define TK_SUBTABLE 194 #define TK_EXPIRED 194
#define TK_KILL 195 #define TK_FILL_HISTORY 195
#define TK_CONNECTION 196 #define TK_SUBTABLE 196
#define TK_TRANSACTION 197 #define TK_KILL 197
#define TK_BALANCE 198 #define TK_CONNECTION 198
#define TK_VGROUP 199 #define TK_TRANSACTION 199
#define TK_MERGE 200 #define TK_BALANCE 200
#define TK_REDISTRIBUTE 201 #define TK_VGROUP 201
#define TK_SPLIT 202 #define TK_MERGE 202
#define TK_DELETE 203 #define TK_REDISTRIBUTE 203
#define TK_INSERT 204 #define TK_SPLIT 204
#define TK_NULL 205 #define TK_DELETE 205
#define TK_NK_QUESTION 206 #define TK_INSERT 206
#define TK_NK_ARROW 207 #define TK_NULL 207
#define TK_ROWTS 208 #define TK_NK_QUESTION 208
#define TK_QSTART 209 #define TK_NK_ARROW 209
#define TK_QEND 210 #define TK_ROWTS 210
#define TK_QDURATION 211 #define TK_QSTART 211
#define TK_WSTART 212 #define TK_QEND 212
#define TK_WEND 213 #define TK_QDURATION 213
#define TK_WDURATION 214 #define TK_WSTART 214
#define TK_IROWTS 215 #define TK_WEND 215
#define TK_CAST 216 #define TK_WDURATION 216
#define TK_NOW 217 #define TK_IROWTS 217
#define TK_TODAY 218 #define TK_ISFILLED 218
#define TK_TIMEZONE 219 #define TK_CAST 219
#define TK_CLIENT_VERSION 220 #define TK_NOW 220
#define TK_SERVER_VERSION 221 #define TK_TODAY 221
#define TK_SERVER_STATUS 222 #define TK_TIMEZONE 222
#define TK_CURRENT_USER 223 #define TK_CLIENT_VERSION 223
#define TK_COUNT 224 #define TK_SERVER_VERSION 224
#define TK_LAST_ROW 225 #define TK_SERVER_STATUS 225
#define TK_CASE 226 #define TK_CURRENT_USER 226
#define TK_END 227 #define TK_CASE 227
#define TK_WHEN 228 #define TK_END 228
#define TK_THEN 229 #define TK_WHEN 229
#define TK_ELSE 230 #define TK_THEN 230
#define TK_BETWEEN 231 #define TK_ELSE 231
#define TK_IS 232 #define TK_BETWEEN 232
#define TK_NK_LT 233 #define TK_IS 233
#define TK_NK_GT 234 #define TK_NK_LT 234
#define TK_NK_LE 235 #define TK_NK_GT 235
#define TK_NK_GE 236 #define TK_NK_LE 236
#define TK_NK_NE 237 #define TK_NK_GE 237
#define TK_MATCH 238 #define TK_NK_NE 238
#define TK_NMATCH 239 #define TK_MATCH 239
#define TK_CONTAINS 240 #define TK_NMATCH 240
#define TK_IN 241 #define TK_CONTAINS 241
#define TK_JOIN 242 #define TK_IN 242
#define TK_INNER 243 #define TK_JOIN 243
#define TK_SELECT 244 #define TK_INNER 244
#define TK_DISTINCT 245 #define TK_SELECT 245
#define TK_WHERE 246 #define TK_DISTINCT 246
#define TK_PARTITION 247 #define TK_WHERE 247
#define TK_BY 248 #define TK_PARTITION 248
#define TK_SESSION 249 #define TK_BY 249
#define TK_STATE_WINDOW 250 #define TK_SESSION 250
#define TK_SLIDING 251 #define TK_STATE_WINDOW 251
#define TK_FILL 252 #define TK_EVENT_WINDOW 252
#define TK_VALUE 253 #define TK_START 253
#define TK_NONE 254 #define TK_SLIDING 254
#define TK_PREV 255 #define TK_FILL 255
#define TK_LINEAR 256 #define TK_VALUE 256
#define TK_NEXT 257 #define TK_NONE 257
#define TK_HAVING 258 #define TK_PREV 258
#define TK_RANGE 259 #define TK_LINEAR 259
#define TK_EVERY 260 #define TK_NEXT 260
#define TK_ORDER 261 #define TK_HAVING 261
#define TK_SLIMIT 262 #define TK_RANGE 262
#define TK_SOFFSET 263 #define TK_EVERY 263
#define TK_LIMIT 264 #define TK_ORDER 264
#define TK_OFFSET 265 #define TK_SLIMIT 265
#define TK_ASC 266 #define TK_SOFFSET 266
#define TK_NULLS 267 #define TK_LIMIT 267
#define TK_ABORT 268 #define TK_OFFSET 268
#define TK_AFTER 269 #define TK_ASC 269
#define TK_ATTACH 270 #define TK_NULLS 270
#define TK_BEFORE 271 #define TK_ABORT 271
#define TK_BEGIN 272 #define TK_AFTER 272
#define TK_BITAND 273 #define TK_ATTACH 273
#define TK_BITNOT 274 #define TK_BEFORE 274
#define TK_BITOR 275 #define TK_BEGIN 275
#define TK_BLOCKS 276 #define TK_BITAND 276
#define TK_CHANGE 277 #define TK_BITNOT 277
#define TK_COMMA 278 #define TK_BITOR 278
#define TK_COMPACT 279 #define TK_BLOCKS 279
#define TK_CONCAT 280 #define TK_CHANGE 280
#define TK_CONFLICT 281 #define TK_COMMA 281
#define TK_COPY 282 #define TK_COMPACT 282
#define TK_DEFERRED 283 #define TK_CONCAT 283
#define TK_DELIMITERS 284 #define TK_CONFLICT 284
#define TK_DETACH 285 #define TK_COPY 285
#define TK_DIVIDE 286 #define TK_DEFERRED 286
#define TK_DOT 287 #define TK_DELIMITERS 287
#define TK_EACH 288 #define TK_DETACH 288
#define TK_FAIL 289 #define TK_DIVIDE 289
#define TK_FILE 290 #define TK_DOT 290
#define TK_FOR 291 #define TK_EACH 291
#define TK_GLOB 292 #define TK_FAIL 292
#define TK_ID 293 #define TK_FILE 293
#define TK_IMMEDIATE 294 #define TK_FOR 294
#define TK_IMPORT 295 #define TK_GLOB 295
#define TK_INITIALLY 296 #define TK_ID 296
#define TK_INSTEAD 297 #define TK_IMMEDIATE 297
#define TK_ISNULL 298 #define TK_IMPORT 298
#define TK_KEY 299 #define TK_INITIALLY 299
#define TK_MODULES 300 #define TK_INSTEAD 300
#define TK_NK_BITNOT 301 #define TK_ISNULL 301
#define TK_NK_SEMI 302 #define TK_KEY 302
#define TK_NOTNULL 303 #define TK_MODULES 303
#define TK_OF 304 #define TK_NK_BITNOT 304
#define TK_PLUS 305 #define TK_NK_SEMI 305
#define TK_PRIVILEGE 306 #define TK_NOTNULL 306
#define TK_RAISE 307 #define TK_OF 307
#define TK_REPLACE 308 #define TK_PLUS 308
#define TK_RESTRICT 309 #define TK_PRIVILEGE 309
#define TK_ROW 310 #define TK_RAISE 310
#define TK_SEMI 311 #define TK_REPLACE 311
#define TK_STAR 312 #define TK_RESTRICT 312
#define TK_STATEMENT 313 #define TK_ROW 313
#define TK_STRICT 314 #define TK_SEMI 314
#define TK_STRING 315 #define TK_STAR 315
#define TK_TIMES 316 #define TK_STATEMENT 316
#define TK_UPDATE 317 #define TK_STRICT 317
#define TK_VALUES 318 #define TK_STRING 318
#define TK_VARIABLE 319 #define TK_TIMES 319
#define TK_VIEW 320 #define TK_UPDATE 320
#define TK_WAL 321 #define TK_VALUES 321
#define TK_VARIABLE 322
#define TK_VIEW 323
#define TK_WAL 324
#define TK_NK_SPACE 600 #define TK_NK_SPACE 600
#define TK_NK_COMMENT 601 #define TK_NK_COMMENT 601
......
...@@ -266,6 +266,7 @@ typedef struct { ...@@ -266,6 +266,7 @@ typedef struct {
#define IS_FLOAT_TYPE(_t) ((_t) == TSDB_DATA_TYPE_FLOAT || (_t) == TSDB_DATA_TYPE_DOUBLE) #define IS_FLOAT_TYPE(_t) ((_t) == TSDB_DATA_TYPE_FLOAT || (_t) == TSDB_DATA_TYPE_DOUBLE)
#define IS_INTEGER_TYPE(_t) ((IS_SIGNED_NUMERIC_TYPE(_t)) || (IS_UNSIGNED_NUMERIC_TYPE(_t))) #define IS_INTEGER_TYPE(_t) ((IS_SIGNED_NUMERIC_TYPE(_t)) || (IS_UNSIGNED_NUMERIC_TYPE(_t)))
#define IS_TIMESTAMP_TYPE(_t) ((_t) == TSDB_DATA_TYPE_TIMESTAMP) #define IS_TIMESTAMP_TYPE(_t) ((_t) == TSDB_DATA_TYPE_TIMESTAMP)
#define IS_BOOLEAN_TYPE(_t) ((_t) == TSDB_DATA_TYPE_BOOL)
#define IS_NUMERIC_TYPE(_t) ((IS_SIGNED_NUMERIC_TYPE(_t)) || (IS_UNSIGNED_NUMERIC_TYPE(_t)) || (IS_FLOAT_TYPE(_t))) #define IS_NUMERIC_TYPE(_t) ((IS_SIGNED_NUMERIC_TYPE(_t)) || (IS_UNSIGNED_NUMERIC_TYPE(_t)) || (IS_FLOAT_TYPE(_t)))
#define IS_MATHABLE_TYPE(_t) \ #define IS_MATHABLE_TYPE(_t) \
......
...@@ -190,7 +190,9 @@ int32_t qStreamPrepareTsdbScan(qTaskInfo_t tinfo, uint64_t uid, int64_t ts); ...@@ -190,7 +190,9 @@ int32_t qStreamPrepareTsdbScan(qTaskInfo_t tinfo, uint64_t uid, int64_t ts);
int32_t qStreamPrepareScan(qTaskInfo_t tinfo, STqOffsetVal* pOffset, int8_t subType); int32_t qStreamPrepareScan(qTaskInfo_t tinfo, STqOffsetVal* pOffset, int8_t subType);
int32_t qStreamScanMemData(qTaskInfo_t tinfo, const SSubmitReq* pReq); // int32_t qStreamScanMemData(qTaskInfo_t tinfo, const SSubmitReq* pReq, int64_t ver);
//
int32_t qStreamSetScanMemData(qTaskInfo_t tinfo, SPackedData submit);
int32_t qStreamExtractOffset(qTaskInfo_t tinfo, STqOffsetVal* pOffset); int32_t qStreamExtractOffset(qTaskInfo_t tinfo, STqOffsetVal* pOffset);
......
...@@ -120,6 +120,7 @@ typedef enum EFunctionType { ...@@ -120,6 +120,7 @@ typedef enum EFunctionType {
FUNCTION_TYPE_WEND, FUNCTION_TYPE_WEND,
FUNCTION_TYPE_WDURATION, FUNCTION_TYPE_WDURATION,
FUNCTION_TYPE_IROWTS, FUNCTION_TYPE_IROWTS,
FUNCTION_TYPE_ISFILLED,
FUNCTION_TYPE_TAGS, FUNCTION_TYPE_TAGS,
// internal function // internal function
......
...@@ -171,10 +171,10 @@ typedef struct SCreateSubTableClause { ...@@ -171,10 +171,10 @@ typedef struct SCreateSubTableClause {
STableOptions* pOptions; STableOptions* pOptions;
} SCreateSubTableClause; } SCreateSubTableClause;
typedef struct SCreateMultiTableStmt { typedef struct SCreateMultiTablesStmt {
ENodeType type; ENodeType type;
SNodeList* pSubTables; SNodeList* pSubTables;
} SCreateMultiTableStmt; } SCreateMultiTablesStmt;
typedef struct SDropTableClause { typedef struct SDropTableClause {
ENodeType type; ENodeType type;
...@@ -209,14 +209,14 @@ typedef struct SAlterTableStmt { ...@@ -209,14 +209,14 @@ typedef struct SAlterTableStmt {
typedef struct SCreateUserStmt { typedef struct SCreateUserStmt {
ENodeType type; ENodeType type;
char useName[TSDB_USER_LEN]; char userName[TSDB_USER_LEN];
char password[TSDB_USET_PASSWORD_LEN]; char password[TSDB_USET_PASSWORD_LEN];
int8_t sysinfo; int8_t sysinfo;
} SCreateUserStmt; } SCreateUserStmt;
typedef struct SAlterUserStmt { typedef struct SAlterUserStmt {
ENodeType type; ENodeType type;
char useName[TSDB_USER_LEN]; char userName[TSDB_USER_LEN];
int8_t alterType; int8_t alterType;
char password[TSDB_USET_PASSWORD_LEN]; char password[TSDB_USET_PASSWORD_LEN];
int8_t enable; int8_t enable;
...@@ -225,7 +225,7 @@ typedef struct SAlterUserStmt { ...@@ -225,7 +225,7 @@ typedef struct SAlterUserStmt {
typedef struct SDropUserStmt { typedef struct SDropUserStmt {
ENodeType type; ENodeType type;
char useName[TSDB_USER_LEN]; char userName[TSDB_USER_LEN];
} SDropUserStmt; } SDropUserStmt;
typedef struct SCreateDnodeStmt { typedef struct SCreateDnodeStmt {
......
...@@ -112,11 +112,12 @@ typedef enum ENodeType { ...@@ -112,11 +112,12 @@ typedef enum ENodeType {
QUERY_NODE_COLUMN_REF, QUERY_NODE_COLUMN_REF,
QUERY_NODE_WHEN_THEN, QUERY_NODE_WHEN_THEN,
QUERY_NODE_CASE_WHEN, QUERY_NODE_CASE_WHEN,
QUERY_NODE_EVENT_WINDOW,
// Statement nodes are used in parser and planner module. // Statement nodes are used in parser and planner module.
QUERY_NODE_SET_OPERATOR = 100, QUERY_NODE_SET_OPERATOR = 100,
QUERY_NODE_SELECT_STMT, QUERY_NODE_SELECT_STMT,
QUERY_NODE_VNODE_MODIF_STMT, QUERY_NODE_VNODE_MODIFY_STMT,
QUERY_NODE_CREATE_DATABASE_STMT, QUERY_NODE_CREATE_DATABASE_STMT,
QUERY_NODE_DROP_DATABASE_STMT, QUERY_NODE_DROP_DATABASE_STMT,
QUERY_NODE_ALTER_DATABASE_STMT, QUERY_NODE_ALTER_DATABASE_STMT,
...@@ -124,7 +125,7 @@ typedef enum ENodeType { ...@@ -124,7 +125,7 @@ typedef enum ENodeType {
QUERY_NODE_TRIM_DATABASE_STMT, QUERY_NODE_TRIM_DATABASE_STMT,
QUERY_NODE_CREATE_TABLE_STMT, QUERY_NODE_CREATE_TABLE_STMT,
QUERY_NODE_CREATE_SUBTABLE_CLAUSE, QUERY_NODE_CREATE_SUBTABLE_CLAUSE,
QUERY_NODE_CREATE_MULTI_TABLE_STMT, QUERY_NODE_CREATE_MULTI_TABLES_STMT,
QUERY_NODE_DROP_TABLE_CLAUSE, QUERY_NODE_DROP_TABLE_CLAUSE,
QUERY_NODE_DROP_TABLE_STMT, QUERY_NODE_DROP_TABLE_STMT,
QUERY_NODE_DROP_SUPER_TABLE_STMT, QUERY_NODE_DROP_SUPER_TABLE_STMT,
...@@ -265,7 +266,9 @@ typedef enum ENodeType { ...@@ -265,7 +266,9 @@ typedef enum ENodeType {
QUERY_NODE_PHYSICAL_PLAN_DELETE, QUERY_NODE_PHYSICAL_PLAN_DELETE,
QUERY_NODE_PHYSICAL_SUBPLAN, QUERY_NODE_PHYSICAL_SUBPLAN,
QUERY_NODE_PHYSICAL_PLAN, QUERY_NODE_PHYSICAL_PLAN,
QUERY_NODE_PHYSICAL_PLAN_TABLE_COUNT_SCAN QUERY_NODE_PHYSICAL_PLAN_TABLE_COUNT_SCAN,
QUERY_NODE_PHYSICAL_PLAN_MERGE_EVENT,
QUERY_NODE_PHYSICAL_PLAN_STREAM_EVENT
} ENodeType; } ENodeType;
/** /**
......
...@@ -185,7 +185,12 @@ typedef struct SMergeLogicNode { ...@@ -185,7 +185,12 @@ typedef struct SMergeLogicNode {
bool groupSort; bool groupSort;
} SMergeLogicNode; } SMergeLogicNode;
typedef enum EWindowType { WINDOW_TYPE_INTERVAL = 1, WINDOW_TYPE_SESSION, WINDOW_TYPE_STATE } EWindowType; typedef enum EWindowType {
WINDOW_TYPE_INTERVAL = 1,
WINDOW_TYPE_SESSION,
WINDOW_TYPE_STATE,
WINDOW_TYPE_EVENT
} EWindowType;
typedef enum EWindowAlgorithm { typedef enum EWindowAlgorithm {
INTERVAL_ALGO_HASH = 1, INTERVAL_ALGO_HASH = 1,
...@@ -212,6 +217,8 @@ typedef struct SWindowLogicNode { ...@@ -212,6 +217,8 @@ typedef struct SWindowLogicNode {
SNode* pTspk; SNode* pTspk;
SNode* pTsEnd; SNode* pTsEnd;
SNode* pStateExpr; SNode* pStateExpr;
SNode* pStartCond;
SNode* pEndCond;
int8_t triggerType; int8_t triggerType;
int64_t watermark; int64_t watermark;
int64_t deleteMark; int64_t deleteMark;
...@@ -498,6 +505,14 @@ typedef struct SStateWinodwPhysiNode { ...@@ -498,6 +505,14 @@ typedef struct SStateWinodwPhysiNode {
typedef SStateWinodwPhysiNode SStreamStateWinodwPhysiNode; typedef SStateWinodwPhysiNode SStreamStateWinodwPhysiNode;
typedef struct SEventWinodwPhysiNode {
SWinodwPhysiNode window;
SNode* pStartCond;
SNode* pEndCond;
} SEventWinodwPhysiNode;
typedef SEventWinodwPhysiNode SStreamEventWinodwPhysiNode;
typedef struct SSortPhysiNode { typedef struct SSortPhysiNode {
SPhysiNode node; SPhysiNode node;
SNodeList* pExprs; // these are expression list of order_by_clause and parameter expression of aggregate function SNodeList* pExprs; // these are expression list of order_by_clause and parameter expression of aggregate function
......
...@@ -223,6 +223,13 @@ typedef struct SIntervalWindowNode { ...@@ -223,6 +223,13 @@ typedef struct SIntervalWindowNode {
SNode* pFill; SNode* pFill;
} SIntervalWindowNode; } SIntervalWindowNode;
typedef struct SEventWindowNode {
ENodeType type; // QUERY_NODE_EVENT_WINDOW
SNode* pCol; // timestamp primary key
SNode* pStartCond;
SNode* pEndCond;
} SEventWindowNode;
typedef enum EFillMode { typedef enum EFillMode {
FILL_MODE_NONE = 1, FILL_MODE_NONE = 1,
FILL_MODE_VALUE, FILL_MODE_VALUE,
...@@ -354,10 +361,10 @@ typedef struct SVgDataBlocks { ...@@ -354,10 +361,10 @@ typedef struct SVgDataBlocks {
void* pData; // SSubmitReq + SSubmitBlk + ... void* pData; // SSubmitReq + SSubmitBlk + ...
} SVgDataBlocks; } SVgDataBlocks;
typedef void (*FFreeDataBlockHash)(SHashObj*); typedef void (*FFreeTableBlockHash)(SHashObj*);
typedef void (*FFreeDataBlockArray)(SArray*); typedef void (*FFreeVgourpBlockArray)(SArray*);
typedef struct SVnodeModifOpStmt { typedef struct SVnodeModifyOpStmt {
ENodeType nodeType; ENodeType nodeType;
ENodeType sqlNodeType; ENodeType sqlNodeType;
SArray* pDataBlocks; // data block for each vgroup, SArray<SVgDataBlocks*>. SArray* pDataBlocks; // data block for each vgroup, SArray<SVgDataBlocks*>.
...@@ -370,18 +377,18 @@ typedef struct SVnodeModifOpStmt { ...@@ -370,18 +377,18 @@ typedef struct SVnodeModifOpStmt {
const char* pBoundCols; const char* pBoundCols;
struct STableMeta* pTableMeta; struct STableMeta* pTableMeta;
SHashObj* pVgroupsHashObj; SHashObj* pVgroupsHashObj;
SHashObj* pTableBlockHashObj; SHashObj* pTableBlockHashObj; // SHashObj<tuid, STableDataCxt*>
SHashObj* pSubTableHashObj; SHashObj* pSubTableHashObj;
SHashObj* pTableNameHashObj; SHashObj* pTableNameHashObj;
SHashObj* pDbFNameHashObj; SHashObj* pDbFNameHashObj;
SArray* pVgDataBlocks; SArray* pVgDataBlocks; // SArray<SVgroupDataCxt*>
SVCreateTbReq createTblReq; SVCreateTbReq* pCreateTblReq;
TdFilePtr fp; TdFilePtr fp;
FFreeDataBlockHash freeHashFunc; FFreeTableBlockHash freeHashFunc;
FFreeDataBlockArray freeArrayFunc; FFreeVgourpBlockArray freeArrayFunc;
bool usingTableProcessing; bool usingTableProcessing;
bool fileProcessing; bool fileProcessing;
} SVnodeModifOpStmt; } SVnodeModifyOpStmt;
typedef struct SExplainOptions { typedef struct SExplainOptions {
ENodeType type; ENodeType type;
......
...@@ -58,7 +58,6 @@ typedef struct SParseContext { ...@@ -58,7 +58,6 @@ typedef struct SParseContext {
bool isSuperUser; bool isSuperUser;
bool enableSysInfo; bool enableSysInfo;
bool async; bool async;
int8_t schemalessType;
const char* svrVer; const char* svrVer;
bool nodeOffline; bool nodeOffline;
SArray* pTableMetaPos; // sql table pos => catalog data pos SArray* pTableMetaPos; // sql table pos => catalog data pos
...@@ -85,12 +84,12 @@ int32_t qSetSTableIdForRsma(SNode* pStmt, int64_t uid); ...@@ -85,12 +84,12 @@ int32_t qSetSTableIdForRsma(SNode* pStmt, int64_t uid);
void qCleanupKeywordsTable(); void qCleanupKeywordsTable();
int32_t qBuildStmtOutput(SQuery* pQuery, SHashObj* pVgHash, SHashObj* pBlockHash); int32_t qBuildStmtOutput(SQuery* pQuery, SHashObj* pVgHash, SHashObj* pBlockHash);
int32_t qResetStmtDataBlock(void* block, bool keepBuf); int32_t qResetStmtDataBlock(STableDataCxt* block, bool keepBuf);
int32_t qCloneStmtDataBlock(void** pDst, void* pSrc); int32_t qCloneStmtDataBlock(STableDataCxt** pDst, STableDataCxt* pSrc, bool reset);
void qFreeStmtDataBlock(void* pDataBlock); int32_t qRebuildStmtDataBlock(STableDataCxt** pDst, STableDataCxt* pSrc, uint64_t uid, uint64_t suid, int32_t vgId, bool rebuildCreateTb);
int32_t qRebuildStmtDataBlock(void** pDst, void* pSrc, uint64_t uid, int32_t vgId); void qDestroyStmtDataBlock(STableDataCxt* pBlock);
void qDestroyStmtDataBlock(void* pBlock); STableMeta* qGetTableMetaInDataBlock(STableDataCxt* pDataBlock);
STableMeta* qGetTableMetaInDataBlock(void* pDataBlock); int32_t qCloneCurrentTbData(STableDataCxt* pDataBlock, SSubmitTbData **pData);
int32_t qStmtBindParams(SQuery* pQuery, TAOS_MULTI_BIND* pParams, int32_t colIdx); int32_t qStmtBindParams(SQuery* pQuery, TAOS_MULTI_BIND* pParams, int32_t colIdx);
int32_t qStmtParseQuerySql(SParseContext* pCxt, SQuery* pQuery); int32_t qStmtParseQuerySql(SParseContext* pCxt, SQuery* pQuery);
...@@ -105,11 +104,18 @@ void destroyBoundColumnInfo(void* pBoundInfo); ...@@ -105,11 +104,18 @@ void destroyBoundColumnInfo(void* pBoundInfo);
int32_t qCreateSName(SName* pName, const char* pTableName, int32_t acctId, char* dbName, char* msgBuf, int32_t qCreateSName(SName* pName, const char* pTableName, int32_t acctId, char* dbName, char* msgBuf,
int32_t msgBufLen); int32_t msgBufLen);
void* smlInitHandle(SQuery* pQuery); void qDestroyBoundColInfo(void* pInfo);
void smlDestroyHandle(void* pHandle);
int32_t smlBindData(void* handle, SArray* tags, SArray* colsSchema, SArray* cols, bool format, STableMeta* pTableMeta, SQuery* smlInitHandle();
int32_t smlBuildRow(STableDataCxt* pTableCxt);
int32_t smlBuildCol(STableDataCxt* pTableCxt, SSchema* schema, void *kv, int32_t index);
STableDataCxt* smlInitTableDataCtx(SQuery* query, STableMeta* pTableMeta);
int32_t smlBindData(SQuery* handle, bool dataFormat, SArray* tags, SArray* colsSchema, SArray* cols, STableMeta* pTableMeta,
char* tableName, const char* sTableName, int32_t sTableNameLen, int32_t ttl, char* msgBuf, int16_t msgBufLen); char* tableName, const char* sTableName, int32_t sTableNameLen, int32_t ttl, char* msgBuf, int16_t msgBufLen);
int32_t smlBuildOutput(void* handle, SHashObj* pVgHash); int32_t smlBuildOutput(SQuery* handle, SHashObj* pVgHash);
int rawBlockBindData(SQuery *query, STableMeta* pTableMeta, void* data, SVCreateTbReq* pCreateTb, TAOS_FIELD *fields, int numFields);
int32_t rewriteToVnodeModifyOpStmt(SQuery* pQuery, SArray* pBufArray); int32_t rewriteToVnodeModifyOpStmt(SQuery* pQuery, SArray* pBufArray);
SArray* serializeVgroupsCreateTableBatch(SHashObj* pVgroupHashmap); SArray* serializeVgroupsCreateTableBatch(SHashObj* pVgroupHashmap);
......
...@@ -163,6 +163,23 @@ typedef struct STargetInfo { ...@@ -163,6 +163,23 @@ typedef struct STargetInfo {
int32_t vgId; int32_t vgId;
} STargetInfo; } STargetInfo;
typedef struct SBoundColInfo {
int16_t* pColIndex; // bound index => schema index
int32_t numOfCols;
int32_t numOfBound;
} SBoundColInfo;
typedef struct STableDataCxt {
STableMeta* pMeta;
STSchema* pSchema;
SBoundColInfo boundColsInfo;
SArray* pValues;
SSubmitTbData* pData;
TSKEY lastTs;
bool ordered;
bool duplicateTs;
} STableDataCxt;
typedef int32_t (*__async_send_cb_fn_t)(void* param, SDataBuf* pMsg, int32_t code); typedef int32_t (*__async_send_cb_fn_t)(void* param, SDataBuf* pMsg, int32_t code);
typedef int32_t (*__async_exec_fn_t)(void* param); typedef int32_t (*__async_exec_fn_t)(void* param);
...@@ -238,6 +255,7 @@ int32_t dataConverToStr(char* str, int type, void* buf, int32_t bufSize, int32_t ...@@ -238,6 +255,7 @@ int32_t dataConverToStr(char* str, int type, void* buf, int32_t bufSize, int32_t
char* parseTagDatatoJson(void* p); char* parseTagDatatoJson(void* p);
int32_t cloneTableMeta(STableMeta* pSrc, STableMeta** pDst); int32_t cloneTableMeta(STableMeta* pSrc, STableMeta** pDst);
int32_t cloneDbVgInfo(SDBVgInfo* pSrc, SDBVgInfo** pDst); int32_t cloneDbVgInfo(SDBVgInfo* pSrc, SDBVgInfo** pDst);
int32_t cloneSVreateTbReq(SVCreateTbReq* pSrc, SVCreateTbReq** pDst);
void freeVgInfo(SDBVgInfo* vgInfo); void freeVgInfo(SDBVgInfo* vgInfo);
extern int32_t (*queryBuildMsg[TDMT_MAX])(void* input, char** msg, int32_t msgSize, int32_t* msgLen, extern int32_t (*queryBuildMsg[TDMT_MAX])(void* input, char** msg, int32_t msgSize, int32_t* msgLen,
......
...@@ -35,6 +35,7 @@ typedef struct STdbState { ...@@ -35,6 +35,7 @@ typedef struct STdbState {
TTB* pFillStateDb; // todo refactor TTB* pFillStateDb; // todo refactor
TTB* pSessionStateDb; TTB* pSessionStateDb;
TTB* pParNameDb; TTB* pParNameDb;
TTB* pParTagDb;
TXN* txn; TXN* txn;
} STdbState; } STdbState;
...@@ -108,6 +109,9 @@ int32_t streamStateCurPrev(SStreamState* pState, SStreamStateCur* pCur); ...@@ -108,6 +109,9 @@ int32_t streamStateCurPrev(SStreamState* pState, SStreamStateCur* pCur);
int32_t streamStatePutParName(SStreamState* pState, int64_t groupId, const char* tbname); int32_t streamStatePutParName(SStreamState* pState, int64_t groupId, const char* tbname);
int32_t streamStateGetParName(SStreamState* pState, int64_t groupId, void** pVal); int32_t streamStateGetParName(SStreamState* pState, int64_t groupId, void** pVal);
int32_t streamStatePutParTag(SStreamState* pState, int64_t groupId, const void* tag, int32_t tagLen);
int32_t streamStateGetParTag(SStreamState* pState, int64_t groupId, void** tagVal, int32_t* tagLen);
#if 0 #if 0
char* streamStateSessionDump(SStreamState* pState); char* streamStateSessionDump(SStreamState* pState);
#endif #endif
......
...@@ -103,6 +103,7 @@ typedef struct { ...@@ -103,6 +103,7 @@ typedef struct {
int8_t type; int8_t type;
} SStreamQueueItem; } SStreamQueueItem;
#if 0
typedef struct { typedef struct {
int8_t type; int8_t type;
int64_t ver; int64_t ver;
...@@ -116,6 +117,21 @@ typedef struct { ...@@ -116,6 +117,21 @@ typedef struct {
SArray* dataRefs; // SArray<int32_t*> SArray* dataRefs; // SArray<int32_t*>
SArray* reqs; // SArray<SSubmitReq*> SArray* reqs; // SArray<SSubmitReq*>
} SStreamMergedSubmit; } SStreamMergedSubmit;
#endif
typedef struct {
int8_t type;
int64_t ver;
int32_t* dataRef;
SPackedData submit;
} SStreamDataSubmit2;
typedef struct {
int8_t type;
int64_t ver;
SArray* dataRefs; // SArray<int32_t*>
SArray* submits; // SArray<SPackedSubmit>
} SStreamMergedSubmit2;
typedef struct { typedef struct {
int8_t type; int8_t type;
...@@ -219,11 +235,11 @@ static FORCE_INLINE void* streamQueueNextItem(SStreamQueue* queue) { ...@@ -219,11 +235,11 @@ static FORCE_INLINE void* streamQueueNextItem(SStreamQueue* queue) {
} }
} }
SStreamDataSubmit* streamDataSubmitNew(SSubmitReq* pReq); SStreamDataSubmit2* streamDataSubmitNew(SPackedData submit);
void streamDataSubmitRefDec(SStreamDataSubmit* pDataSubmit); void streamDataSubmitRefDec(SStreamDataSubmit2* pDataSubmit);
SStreamDataSubmit* streamSubmitRefClone(SStreamDataSubmit* pSubmit); SStreamDataSubmit2* streamSubmitRefClone(SStreamDataSubmit2* pSubmit);
typedef struct { typedef struct {
char* qmsg; char* qmsg;
...@@ -355,14 +371,15 @@ void tFreeSStreamTask(SStreamTask* pTask); ...@@ -355,14 +371,15 @@ void tFreeSStreamTask(SStreamTask* pTask);
static FORCE_INLINE int32_t streamTaskInput(SStreamTask* pTask, SStreamQueueItem* pItem) { static FORCE_INLINE int32_t streamTaskInput(SStreamTask* pTask, SStreamQueueItem* pItem) {
if (pItem->type == STREAM_INPUT__DATA_SUBMIT) { if (pItem->type == STREAM_INPUT__DATA_SUBMIT) {
SStreamDataSubmit* pSubmitClone = streamSubmitRefClone((SStreamDataSubmit*)pItem); SStreamDataSubmit2* pSubmitClone = streamSubmitRefClone((SStreamDataSubmit2*)pItem);
if (pSubmitClone == NULL) { if (pSubmitClone == NULL) {
qDebug("task %d %p submit enqueue failed since out of memory", pTask->taskId, pTask); qDebug("task %d %p submit enqueue failed since out of memory", pTask->taskId, pTask);
terrno = TSDB_CODE_OUT_OF_MEMORY; terrno = TSDB_CODE_OUT_OF_MEMORY;
atomic_store_8(&pTask->inputStatus, TASK_INPUT_STATUS__FAILED); atomic_store_8(&pTask->inputStatus, TASK_INPUT_STATUS__FAILED);
return -1; return -1;
} }
qDebug("task %d %p submit enqueue %p %p %p", pTask->taskId, pTask, pItem, pSubmitClone, pSubmitClone->data); qDebug("task %d %p submit enqueue %p %p %p %d %" PRId64, pTask->taskId, pTask, pItem, pSubmitClone,
pSubmitClone->submit.msgStr, pSubmitClone->submit.msgLen, pSubmitClone->submit.ver);
taosWriteQitem(pTask->inputQueue->queue, pSubmitClone); taosWriteQitem(pTask->inputQueue->queue, pSubmitClone);
// qStreamInput(pTask->exec.executor, pSubmitClone); // qStreamInput(pTask->exec.executor, pSubmitClone);
} else if (pItem->type == STREAM_INPUT__DATA_BLOCK || pItem->type == STREAM_INPUT__DATA_RETRIEVE || } else if (pItem->type == STREAM_INPUT__DATA_BLOCK || pItem->type == STREAM_INPUT__DATA_RETRIEVE ||
...@@ -392,21 +409,6 @@ static FORCE_INLINE void streamTaskInputFail(SStreamTask* pTask) { ...@@ -392,21 +409,6 @@ static FORCE_INLINE void streamTaskInputFail(SStreamTask* pTask) {
atomic_store_8(&pTask->inputStatus, TASK_INPUT_STATUS__FAILED); atomic_store_8(&pTask->inputStatus, TASK_INPUT_STATUS__FAILED);
} }
static FORCE_INLINE int32_t streamTaskOutput(SStreamTask* pTask, SStreamDataBlock* pBlock) {
if (pTask->outputType == TASK_OUTPUT__TABLE) {
pTask->tbSink.tbSinkFunc(pTask, pTask->tbSink.vnode, 0, pBlock->blocks);
taosArrayDestroyEx(pBlock->blocks, (FDelete)blockDataFreeRes);
taosFreeQitem(pBlock);
} else if (pTask->outputType == TASK_OUTPUT__SMA) {
pTask->smaSink.smaSink(pTask->smaSink.vnode, pTask->smaSink.smaId, pBlock->blocks);
taosArrayDestroyEx(pBlock->blocks, (FDelete)blockDataFreeRes);
taosFreeQitem(pBlock);
} else {
taosWriteQitem(pTask->outputQueue->queue, pBlock);
}
return 0;
}
typedef struct { typedef struct {
SMsgHead head; SMsgHead head;
int64_t streamId; int64_t streamId;
...@@ -584,6 +586,7 @@ int32_t streamProcessRetrieveRsp(SStreamTask* pTask, SStreamRetrieveRsp* pRsp); ...@@ -584,6 +586,7 @@ int32_t streamProcessRetrieveRsp(SStreamTask* pTask, SStreamRetrieveRsp* pRsp);
int32_t streamTryExec(SStreamTask* pTask); int32_t streamTryExec(SStreamTask* pTask);
int32_t streamSchedExec(SStreamTask* pTask); int32_t streamSchedExec(SStreamTask* pTask);
int32_t streamTaskOutput(SStreamTask* pTask, SStreamDataBlock* pBlock);
int32_t streamScanExec(SStreamTask* pTask, int32_t batchSz); int32_t streamScanExec(SStreamTask* pTask, int32_t batchSz);
......
...@@ -52,11 +52,13 @@ _exit: ...@@ -52,11 +52,13 @@ _exit:
return code; return code;
} }
static FORCE_INLINE void tFree(uint8_t *pBuf) { #define tFree(BUF) \
if (pBuf) { do { \
taosMemoryFree(pBuf - sizeof(int64_t)); if (BUF) { \
} taosMemoryFree((uint8_t *)(BUF) - sizeof(int64_t)); \
} (BUF) = NULL; \
} \
} while (0)
#ifdef __cplusplus #ifdef __cplusplus
} }
......
...@@ -158,6 +158,7 @@ int32_t* taosGetErrno(); ...@@ -158,6 +158,7 @@ int32_t* taosGetErrno();
#define TSDB_CODE_TSC_QUERY_KILLED TAOS_DEF_ERROR_CODE(0, 0X022D) #define TSDB_CODE_TSC_QUERY_KILLED TAOS_DEF_ERROR_CODE(0, 0X022D)
#define TSDB_CODE_TSC_NO_EXEC_NODE TAOS_DEF_ERROR_CODE(0, 0X022E) #define TSDB_CODE_TSC_NO_EXEC_NODE TAOS_DEF_ERROR_CODE(0, 0X022E)
#define TSDB_CODE_TSC_NOT_STABLE_ERROR TAOS_DEF_ERROR_CODE(0, 0X022F) #define TSDB_CODE_TSC_NOT_STABLE_ERROR TAOS_DEF_ERROR_CODE(0, 0X022F)
#define TSDB_CODE_TSC_STMT_CACHE_ERROR TAOS_DEF_ERROR_CODE(0, 0X0230)
// mnode-common // mnode-common
// #define TSDB_CODE_MND_MSG_NOT_PROCESSED TAOS_DEF_ERROR_CODE(0, 0x0300) // 2.x // #define TSDB_CODE_MND_MSG_NOT_PROCESSED TAOS_DEF_ERROR_CODE(0, 0x0300) // 2.x
...@@ -713,7 +714,7 @@ int32_t* taosGetErrno(); ...@@ -713,7 +714,7 @@ int32_t* taosGetErrno();
#define TSDB_CODE_RSMA_INVALID_ENV TAOS_DEF_ERROR_CODE(0, 0x3150) #define TSDB_CODE_RSMA_INVALID_ENV TAOS_DEF_ERROR_CODE(0, 0x3150)
#define TSDB_CODE_RSMA_INVALID_STAT TAOS_DEF_ERROR_CODE(0, 0x3151) #define TSDB_CODE_RSMA_INVALID_STAT TAOS_DEF_ERROR_CODE(0, 0x3151)
#define TSDB_CODE_RSMA_QTASKINFO_CREATE TAOS_DEF_ERROR_CODE(0, 0x3152) #define TSDB_CODE_RSMA_QTASKINFO_CREATE TAOS_DEF_ERROR_CODE(0, 0x3152)
// #define TSDB_CODE_RSMA_FILE_CORRUPTED TAOS_DEF_ERROR_CODE(0, 0x3153) #define TSDB_CODE_RSMA_FS_COMMIT TAOS_DEF_ERROR_CODE(0, 0x3153)
#define TSDB_CODE_RSMA_REMOVE_EXISTS TAOS_DEF_ERROR_CODE(0, 0x3154) #define TSDB_CODE_RSMA_REMOVE_EXISTS TAOS_DEF_ERROR_CODE(0, 0x3154)
#define TSDB_CODE_RSMA_FETCH_MSG_MSSED_UP TAOS_DEF_ERROR_CODE(0, 0x3155) #define TSDB_CODE_RSMA_FETCH_MSG_MSSED_UP TAOS_DEF_ERROR_CODE(0, 0x3155)
#define TSDB_CODE_RSMA_EMPTY_INFO TAOS_DEF_ERROR_CODE(0, 0x3156) #define TSDB_CODE_RSMA_EMPTY_INFO TAOS_DEF_ERROR_CODE(0, 0x3156)
...@@ -721,6 +722,9 @@ int32_t* taosGetErrno(); ...@@ -721,6 +722,9 @@ int32_t* taosGetErrno();
#define TSDB_CODE_RSMA_REGEX_MATCH TAOS_DEF_ERROR_CODE(0, 0x3158) #define TSDB_CODE_RSMA_REGEX_MATCH TAOS_DEF_ERROR_CODE(0, 0x3158)
#define TSDB_CODE_RSMA_STREAM_STATE_OPEN TAOS_DEF_ERROR_CODE(0, 0x3159) #define TSDB_CODE_RSMA_STREAM_STATE_OPEN TAOS_DEF_ERROR_CODE(0, 0x3159)
#define TSDB_CODE_RSMA_STREAM_STATE_COMMIT TAOS_DEF_ERROR_CODE(0, 0x3160) #define TSDB_CODE_RSMA_STREAM_STATE_COMMIT TAOS_DEF_ERROR_CODE(0, 0x3160)
#define TSDB_CODE_RSMA_FS_REF TAOS_DEF_ERROR_CODE(0, 0x3161)
#define TSDB_CODE_RSMA_FS_SYNC TAOS_DEF_ERROR_CODE(0, 0x3162)
#define TSDB_CODE_RSMA_FS_UPDATE TAOS_DEF_ERROR_CODE(0, 0x3163)
//index //index
#define TSDB_CODE_INDEX_REBUILDING TAOS_DEF_ERROR_CODE(0, 0x3200) #define TSDB_CODE_INDEX_REBUILDING TAOS_DEF_ERROR_CODE(0, 0x3200)
......
...@@ -22,19 +22,6 @@ ...@@ -22,19 +22,6 @@
extern "C" { extern "C" {
#endif #endif
#if 0
#define TARRAY(TYPE) \
struct { \
int32_t tarray_size_; \
int32_t tarray_neles_; \
struct TYPE* td_array_data_; \
}
#define TARRAY_SIZE(ARRAY) (ARRAY)->tarray_size_
#define TARRAY_NELES(ARRAY) (ARRAY)->tarray_neles_
#define TARRAY_ELE_AT(ARRAY, IDX) ((ARRAY)->td_array_data_ + idx)
#endif
#define TARRAY_MIN_SIZE 8 #define TARRAY_MIN_SIZE 8
#define TARRAY_GET_ELEM(array, index) ((void*)((char*)((array)->pData) + (index) * (array)->elemSize)) #define TARRAY_GET_ELEM(array, index) ((void*)((char*)((array)->pData) + (index) * (array)->elemSize))
#define TARRAY_ELEM_IDX(array, ele) (POINTER_DISTANCE(ele, (array)->pData) / (array)->elemSize) #define TARRAY_ELEM_IDX(array, ele) (POINTER_DISTANCE(ele, (array)->pData) / (array)->elemSize)
...@@ -46,6 +33,9 @@ typedef struct SArray { ...@@ -46,6 +33,9 @@ typedef struct SArray {
void* pData; void* pData;
} SArray; } SArray;
#define TARRAY_SIZE(array) ((array)->size)
#define TARRAY_DATA(array) ((array)->pData)
/** /**
* *
* @param size * @param size
...@@ -194,6 +184,13 @@ void taosArrayPopTailBatch(SArray* pArray, size_t cnt); ...@@ -194,6 +184,13 @@ void taosArrayPopTailBatch(SArray* pArray, size_t cnt);
*/ */
void taosArrayRemove(SArray* pArray, size_t index); void taosArrayRemove(SArray* pArray, size_t index);
/**
* remove batch entry from the given index
* @param pArray
* @param index
*/
void taosArrayRemoveBatch(SArray* pArray, size_t index, size_t num, FDelete fp);
/** /**
* copy the whole array from source to destination * copy the whole array from source to destination
* @param pDst * @param pDst
......
...@@ -193,6 +193,7 @@ typedef enum ELogicConditionType { ...@@ -193,6 +193,7 @@ typedef enum ELogicConditionType {
#define TSDB_TABLE_NAME_LEN 193 // it is a null-terminated string #define TSDB_TABLE_NAME_LEN 193 // it is a null-terminated string
#define TSDB_TOPIC_NAME_LEN 193 // it is a null-terminated string #define TSDB_TOPIC_NAME_LEN 193 // it is a null-terminated string
#define TSDB_CGROUP_LEN 193 // it is a null-terminated string #define TSDB_CGROUP_LEN 193 // it is a null-terminated string
#define TSDB_USER_CGROUP_LEN (TSDB_USER_LEN + TSDB_CGROUP_LEN) // it is a null-terminated string
#define TSDB_DB_NAME_LEN 65 #define TSDB_DB_NAME_LEN 65
#define TSDB_DB_FNAME_LEN (TSDB_ACCT_ID_LEN + TSDB_DB_NAME_LEN + TSDB_NAME_DELIMITER_LEN) #define TSDB_DB_FNAME_LEN (TSDB_ACCT_ID_LEN + TSDB_DB_NAME_LEN + TSDB_NAME_DELIMITER_LEN)
......
...@@ -116,6 +116,7 @@ static int32_t tEncodeI64v(SEncoder* pCoder, int64_t val); ...@@ -116,6 +116,7 @@ static int32_t tEncodeI64v(SEncoder* pCoder, int64_t val);
static int32_t tEncodeFloat(SEncoder* pCoder, float val); static int32_t tEncodeFloat(SEncoder* pCoder, float val);
static int32_t tEncodeDouble(SEncoder* pCoder, double val); static int32_t tEncodeDouble(SEncoder* pCoder, double val);
static int32_t tEncodeBinary(SEncoder* pCoder, const uint8_t* val, uint32_t len); static int32_t tEncodeBinary(SEncoder* pCoder, const uint8_t* val, uint32_t len);
static int32_t tEncodeBinaryEx(SEncoder* pCoder, const uint8_t* val, uint32_t len);
static int32_t tEncodeCStrWithLen(SEncoder* pCoder, const char* val, uint32_t len); static int32_t tEncodeCStrWithLen(SEncoder* pCoder, const char* val, uint32_t len);
static int32_t tEncodeCStr(SEncoder* pCoder, const char* val); static int32_t tEncodeCStr(SEncoder* pCoder, const char* val);
......
...@@ -14,6 +14,7 @@ set binary_dir=%3 ...@@ -14,6 +14,7 @@ set binary_dir=%3
set binary_dir=%binary_dir:/=\\% set binary_dir=%binary_dir:/=\\%
set osType=%4 set osType=%4
set verNumber=%5 set verNumber=%5
set Enterprise=%6
set target_dir=C:\\TDengine set target_dir=C:\\TDengine
if not exist %target_dir% ( if not exist %target_dir% (
...@@ -57,6 +58,32 @@ if exist %binary_dir%\\build\\lib\\taosws.dll ( ...@@ -57,6 +58,32 @@ if exist %binary_dir%\\build\\lib\\taosws.dll (
if exist %binary_dir%\\build\\bin\\taosdump.exe ( if exist %binary_dir%\\build\\bin\\taosdump.exe (
copy %binary_dir%\\build\\bin\\taosdump.exe %target_dir% > nul copy %binary_dir%\\build\\bin\\taosdump.exe %target_dir% > nul
) )
if %Enterprise% (
if exist %binary_dir%\\build\\bin\\taosx.exe (
copy %binary_dir%\\build\\bin\\taosx.exe %target_dir% > nul
)
if exist %binary_dir%\\build\\bin\\tmq_sim.exe (
copy %binary_dir%\\build\\bin\\tmq_sim.exe %target_dir% > nul
)
if exist %binary_dir%\\build\\bin\\tsim.exe (
copy %binary_dir%\\build\\bin\\tsim.exe %target_dir% > nul
)
if exist %binary_dir%\\build\\bin\\tmq_taosx_ci.exe (
copy %binary_dir%\\build\\bin\\tmq_taosx_ci.exe %target_dir% > nul
)
if exist %binary_dir%\\build\\bin\\tmq_demo.exe (
copy %binary_dir%\\build\\bin\\tmq_demo.exe %target_dir% > nul
)
if exist %binary_dir%\\build\\bin\\dumper.exe (
copy %binary_dir%\\build\\bin\\dumper.exe %target_dir% > nul
)
if exist %binary_dir%\\build\\bin\\runUdf.exe (
copy %binary_dir%\\build\\bin\\runUdf.exe %target_dir% > nul
)
if exist %binary_dir%\\build\\bin\\create_table.exe (
copy %binary_dir%\\build\\bin\\create_table.exe %target_dir% > nul
)
)
copy %binary_dir%\\build\\bin\\taosd.exe %target_dir% > nul copy %binary_dir%\\build\\bin\\taosd.exe %target_dir% > nul
copy %binary_dir%\\build\\bin\\udfd.exe %target_dir% > nul copy %binary_dir%\\build\\bin\\udfd.exe %target_dir% > nul
......
...@@ -149,7 +149,6 @@ typedef struct STscObj { ...@@ -149,7 +149,6 @@ typedef struct STscObj {
int32_t numOfReqs; // number of sqlObj bound to this connection int32_t numOfReqs; // number of sqlObj bound to this connection
SAppInstInfo* pAppInfo; SAppInstInfo* pAppInfo;
SHashObj* pRequests; SHashObj* pRequests;
int8_t schemalessType; // todo remove it, this attribute should be move to request
} STscObj; } STscObj;
typedef struct SResultColumn { typedef struct SResultColumn {
......
...@@ -30,7 +30,7 @@ extern "C" { ...@@ -30,7 +30,7 @@ extern "C" {
#define tscDebug(...) do { if (cDebugFlag & DEBUG_DEBUG) { taosPrintLog("TSC ", DEBUG_DEBUG, cDebugFlag, __VA_ARGS__); }} while(0) #define tscDebug(...) do { if (cDebugFlag & DEBUG_DEBUG) { taosPrintLog("TSC ", DEBUG_DEBUG, cDebugFlag, __VA_ARGS__); }} while(0)
#define tscTrace(...) do { if (cDebugFlag & DEBUG_TRACE) { taosPrintLog("TSC ", DEBUG_TRACE, cDebugFlag, __VA_ARGS__); }} while(0) #define tscTrace(...) do { if (cDebugFlag & DEBUG_TRACE) { taosPrintLog("TSC ", DEBUG_TRACE, cDebugFlag, __VA_ARGS__); }} while(0)
#define tscDebugL(...) do { if (cDebugFlag & DEBUG_DEBUG) { taosPrintLongString("TSC ", DEBUG_DEBUG, cDebugFlag, __VA_ARGS__); }} while(0) #define tscDebugL(...) do { if (cDebugFlag & DEBUG_DEBUG) { taosPrintLongString("TSC ", DEBUG_DEBUG, cDebugFlag, __VA_ARGS__); }} while(0)
//#define tscPerf(...) do { if (cDebugFlag & DEBUG_INFO) { taosPrintLog("TSC ", DEBUG_INFO, cDebugFlag, __VA_ARGS__); }} while(0) #define tscPerf(...) do { if (cDebugFlag & DEBUG_INFO) { taosPrintLog("TSC ", 0, cDebugFlag, __VA_ARGS__); }} while(0)
// clang-format on // clang-format on
#ifdef __cplusplus #ifdef __cplusplus
......
/*
* Copyright (c) 2019 TAOS Data, Inc. <jhtao@taosdata.com>
*
* This program is free software: you can use, redistribute, and/or modify
* it under the terms of the GNU Affero General Public License, version 3
* or later ("AGPL"), as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#ifndef TDENGINE_CLIENTSML_H
#define TDENGINE_CLIENTSML_H
#ifdef __cplusplus
extern "C" {
#endif
#include "catalog.h"
#include "clientInt.h"
#include "osThread.h"
#include "query.h"
#include "taos.h"
#include "taoserror.h"
#include "tcommon.h"
#include "tdef.h"
#include "tglobal.h"
#include "tlog.h"
#include "tmsg.h"
#include "tname.h"
#include "ttime.h"
#include "ttypes.h"
#include "cJSON.h"
#if (defined(__GNUC__) && (__GNUC__ >= 3)) || (defined(__INTEL_COMPILER) && (__INTEL_COMPILER >= 800)) || defined(__clang__)
# define expect(expr,value) (__builtin_expect ((expr),(value)) )
#else
# define expect(expr,value) (expr)
#endif
#ifndef likely
#define likely(expr) expect((expr) != 0, 1)
#endif
#ifndef unlikely
#define unlikely(expr) expect((expr) != 0, 0)
#endif
#define SPACE ' '
#define COMMA ','
#define EQUAL '='
#define QUOTE '"'
#define SLASH '\\'
#define JUMP_SPACE(sql, sqlEnd) \
while (sql < sqlEnd) { \
if (unlikely(*sql == SPACE)) \
sql++; \
else \
break; \
}
#define IS_INVALID_COL_LEN(len) ((len) <= 0 || (len) >= TSDB_COL_NAME_LEN)
#define IS_INVALID_TABLE_LEN(len) ((len) <= 0 || (len) >= TSDB_TABLE_NAME_LEN)
#define TS "_ts"
#define TS_LEN 3
#define VALUE "_value"
#define VALUE_LEN 6
#define MAX_RETRY_TIMES 5
typedef TSDB_SML_PROTOCOL_TYPE SMLProtocolType;
typedef enum {
SCHEMA_ACTION_NULL,
SCHEMA_ACTION_CREATE_STABLE,
SCHEMA_ACTION_ADD_COLUMN,
SCHEMA_ACTION_ADD_TAG,
SCHEMA_ACTION_CHANGE_COLUMN_SIZE,
SCHEMA_ACTION_CHANGE_TAG_SIZE,
} ESchemaAction;
typedef struct {
const void *key;
int32_t keyLen;
void *value;
bool used;
}Node;
typedef struct NodeList{
Node data;
struct NodeList* next;
}NodeList;
typedef struct {
char *measure;
char *tags;
char *cols;
char *timestamp;
int32_t measureLen;
int32_t measureTagsLen;
int32_t tagsLen;
int32_t colsLen;
int32_t timestampLen;
SArray *colArray;
} SSmlLineInfo;
typedef struct {
const char *sTableName; // super table name
int32_t sTableNameLen;
char childTableName[TSDB_TABLE_NAME_LEN];
uint64_t uid;
void *key; // for openTsdb
SArray *tags;
// elements are SHashObj<cols key string, SSmlKv*> for find by key quickly
SArray *cols;
STableDataCxt *tableDataCtx;
} SSmlTableInfo;
typedef struct {
SArray *tags; // save the origin order to create table
SHashObj *tagHash; // elements are <key, index in tags>
SArray *cols;
SHashObj *colHash;
STableMeta *tableMeta;
} SSmlSTableMeta;
typedef struct {
int32_t len;
char *buf;
} SSmlMsgBuf;
typedef struct {
int32_t code;
int32_t lineNum;
int32_t numOfSTables;
int32_t numOfCTables;
int32_t numOfCreateSTables;
int32_t numOfAlterColSTables;
int32_t numOfAlterTagSTables;
int64_t parseTime;
int64_t schemaTime;
int64_t insertBindTime;
int64_t insertRpcTime;
int64_t endTime;
} SSmlCostInfo;
typedef struct {
int64_t id;
SMLProtocolType protocol;
int8_t precision;
bool reRun;
bool dataFormat; // true means that the name and order of keys in each line are the same(only for influx protocol)
bool isRawLine;
int32_t ttl;
NodeList *childTables;
NodeList *superTables;
SHashObj *pVgHash;
STscObj *taos;
SCatalog *pCatalog;
SRequestObj *pRequest;
SQuery *pQuery;
SSmlCostInfo cost;
int32_t lineNum;
SSmlMsgBuf msgBuf;
// cJSON *root; // for parse json
int8_t offset[4];
SSmlLineInfo *lines; // element is SSmlLineInfo
//
SArray *preLineTagKV;
SArray *preLineColKV;
SSmlLineInfo preLine;
STableMeta *currSTableMeta;
STableDataCxt *currTableDataCtx;
bool needModifySchema;
} SSmlHandle;
#define IS_SAME_CHILD_TABLE (elements->measureTagsLen == info->preLine.measureTagsLen \
&& memcmp(elements->measure, info->preLine.measure, elements->measureTagsLen) == 0)
#define IS_SAME_SUPER_TABLE (elements->measureLen == info->preLine.measureLen \
&& memcmp(elements->measure, info->preLine.measure, elements->measureLen) == 0)
#define IS_SAME_KEY (preKV->keyLen == kv.keyLen && memcmp(preKV->key, kv.key, kv.keyLen) == 0)
extern int64_t smlFactorNS[3];
extern int64_t smlFactorS[3];
typedef int32_t (*_equal_fn_sml)(const void *, const void *);
SSmlHandle *smlBuildSmlInfo(TAOS *taos);
void smlDestroyInfo(SSmlHandle *info);
void smlJsonParseObjFirst(char **start, SSmlLineInfo *element, int8_t *offset);
void smlJsonParseObj(char **start, SSmlLineInfo *element, int8_t *offset);
SArray *smlJsonParseTags(char *start, char *end);
bool smlParseNumberOld(SSmlKv *kvVal, SSmlMsgBuf *msg);
void* nodeListGet(NodeList* list, const void *key, int32_t len, _equal_fn_sml fn);
int nodeListSet(NodeList** list, const void *key, int32_t len, void* value, _equal_fn_sml fn);
int nodeListSize(NodeList* list);
bool smlDoubleToInt64OverFlow(double num);
int32_t smlBuildInvalidDataMsg(SSmlMsgBuf *pBuf, const char *msg1, const char *msg2);
bool smlParseNumber(SSmlKv *kvVal, SSmlMsgBuf *msg);
int64_t smlGetTimeValue(const char *value, int32_t len, uint8_t fromPrecision, uint8_t toPrecision);
int8_t smlGetTsTypeByLen(int32_t len);
SSmlTableInfo* smlBuildTableInfo(int numRows, const char* measure, int32_t measureLen);
SSmlSTableMeta* smlBuildSTableMeta(bool isDataFormat);
int32_t smlSetCTableName(SSmlTableInfo *oneTable);
STableMeta* smlGetMeta(SSmlHandle *info, const void* measure, int32_t measureLen);
int32_t is_same_child_table_telnet(const void *a, const void *b);
int64_t smlParseOpenTsdbTime(SSmlHandle *info, const char *data, int32_t len);
int32_t smlClearForRerun(SSmlHandle *info);
int32_t smlParseValue(SSmlKv *pVal, SSmlMsgBuf *msg);
int32_t smlParseInfluxString(SSmlHandle *info, char *sql, char *sqlEnd, SSmlLineInfo *elements);
int32_t smlParseTelnetString(SSmlHandle *info, char *sql, char *sqlEnd, SSmlLineInfo *elements);
int32_t smlParseJSON(SSmlHandle *info, char *payload);
#ifdef __cplusplus
}
#endif
#endif // TDENGINE_CLIENTSML_H
...@@ -21,8 +21,6 @@ extern "C" { ...@@ -21,8 +21,6 @@ extern "C" {
#endif #endif
#include "catalog.h" #include "catalog.h"
typedef void STableDataBlocks;
typedef enum { typedef enum {
STMT_TYPE_INSERT = 1, STMT_TYPE_INSERT = 1,
STMT_TYPE_MULTI_INSERT, STMT_TYPE_MULTI_INSERT,
...@@ -43,7 +41,7 @@ typedef enum { ...@@ -43,7 +41,7 @@ typedef enum {
} STMT_STATUS; } STMT_STATUS;
typedef struct SStmtTableCache { typedef struct SStmtTableCache {
STableDataBlocks *pDataBlock; STableDataCxt *pDataCtx;
void *boundTags; void *boundTags;
} SStmtTableCache; } SStmtTableCache;
...@@ -74,7 +72,8 @@ typedef struct SStmtExecInfo { ...@@ -74,7 +72,8 @@ typedef struct SStmtExecInfo {
int32_t affectedRows; int32_t affectedRows;
SRequestObj *pRequest; SRequestObj *pRequest;
SHashObj *pBlockHash; SHashObj *pBlockHash;
bool autoCreateTbl; STableDataCxt *pCurrBlock;
SSubmitTbData *pCurrTbData;
} SStmtExecInfo; } SStmtExecInfo;
typedef struct SStmtSQLInfo { typedef struct SStmtSQLInfo {
......
...@@ -76,13 +76,19 @@ static void deregisterRequest(SRequestObj *pRequest) { ...@@ -76,13 +76,19 @@ static void deregisterRequest(SRequestObj *pRequest) {
"current:%d, app current:%d", "current:%d, app current:%d",
pRequest->self, pTscObj->id, pRequest->requestId, duration / 1000.0, num, currentInst); pRequest->self, pTscObj->id, pRequest->requestId, duration / 1000.0, num, currentInst);
if (QUERY_NODE_VNODE_MODIF_STMT == pRequest->stmtType) { tscPerf("insert duration %" PRId64 "us: syntax:%" PRId64 "us, ctg:%" PRId64 "us, semantic:%" PRId64
"us, exec:%" PRId64 "us, stmtType:%d",
duration, pRequest->metric.syntaxEnd - pRequest->metric.syntaxStart,
pRequest->metric.ctgEnd - pRequest->metric.ctgStart, pRequest->metric.semanticEnd - pRequest->metric.ctgEnd,
pRequest->metric.execEnd - pRequest->metric.semanticEnd, pRequest->stmtType);
if (QUERY_NODE_VNODE_MODIFY_STMT == pRequest->stmtType) {
// tscPerf("insert duration %" PRId64 "us: syntax:%" PRId64 "us, ctg:%" PRId64 "us, semantic:%" PRId64 // tscPerf("insert duration %" PRId64 "us: syntax:%" PRId64 "us, ctg:%" PRId64 "us, semantic:%" PRId64
// "us, exec:%" PRId64 "us", // "us, exec:%" PRId64 "us",
// duration, pRequest->metric.syntaxEnd - pRequest->metric.syntaxStart, // duration, pRequest->metric.syntaxEnd - pRequest->metric.syntaxStart,
// pRequest->metric.ctgEnd - pRequest->metric.ctgStart, pRequest->metric.semanticEnd - // pRequest->metric.ctgEnd - pRequest->metric.ctgStart, pRequest->metric.semanticEnd -
// pRequest->metric.ctgEnd, pRequest->metric.execEnd - pRequest->metric.semanticEnd); // pRequest->metric.ctgEnd, pRequest->metric.execEnd - pRequest->metric.semanticEnd);
atomic_add_fetch_64((int64_t *)&pActivity->insertElapsedTime, duration); // atomic_add_fetch_64((int64_t *)&pActivity->insertElapsedTime, duration);
} else if (QUERY_NODE_SELECT_STMT == pRequest->stmtType) { } else if (QUERY_NODE_SELECT_STMT == pRequest->stmtType) {
// tscPerf("select duration %" PRId64 "us: syntax:%" PRId64 "us, ctg:%" PRId64 "us, semantic:%" PRId64 // tscPerf("select duration %" PRId64 "us: syntax:%" PRId64 "us, ctg:%" PRId64 "us, semantic:%" PRId64
// "us, planner:%" PRId64 "us, exec:%" PRId64 "us, reqId:0x%" PRIx64, // "us, planner:%" PRId64 "us, exec:%" PRId64 "us, reqId:0x%" PRIx64,
...@@ -264,7 +270,6 @@ void *createTscObj(const char *user, const char *auth, const char *db, int32_t c ...@@ -264,7 +270,6 @@ void *createTscObj(const char *user, const char *auth, const char *db, int32_t c
taosThreadMutexInit(&pObj->mutex, NULL); taosThreadMutexInit(&pObj->mutex, NULL);
pObj->id = taosAddRef(clientConnRefPool, pObj); pObj->id = taosAddRef(clientConnRefPool, pObj);
pObj->schemalessType = 1;
atomic_add_fetch_64(&pObj->pAppInfo->numOfConns, 1); atomic_add_fetch_64(&pObj->pAppInfo->numOfConns, 1);
......
...@@ -239,7 +239,6 @@ int32_t parseSql(SRequestObj* pRequest, bool topicQuery, SQuery** pQuery, SStmtC ...@@ -239,7 +239,6 @@ int32_t parseSql(SRequestObj* pRequest, bool topicQuery, SQuery** pQuery, SStmtC
.pTransporter = pTscObj->pAppInfo->pTransporter, .pTransporter = pTscObj->pAppInfo->pTransporter,
.pStmtCb = pStmtCb, .pStmtCb = pStmtCb,
.pUser = pTscObj->user, .pUser = pTscObj->user,
.schemalessType = pTscObj->schemalessType,
.isSuperUser = (0 == strcmp(pTscObj->user, TSDB_DEFAULT_USER)), .isSuperUser = (0 == strcmp(pTscObj->user, TSDB_DEFAULT_USER)),
.enableSysInfo = pTscObj->sysInfo, .enableSysInfo = pTscObj->sysInfo,
.svrVer = pTscObj->sVer, .svrVer = pTscObj->sVer,
...@@ -741,47 +740,21 @@ int32_t scheduleQuery(SRequestObj* pRequest, SQueryPlan* pDag, SArray* pNodeList ...@@ -741,47 +740,21 @@ int32_t scheduleQuery(SRequestObj* pRequest, SQueryPlan* pDag, SArray* pNodeList
} }
int32_t handleSubmitExecRes(SRequestObj* pRequest, void* res, SCatalog* pCatalog, SEpSet* epset) { int32_t handleSubmitExecRes(SRequestObj* pRequest, void* res, SCatalog* pCatalog, SEpSet* epset) {
int32_t code = 0;
SArray* pArray = NULL; SArray* pArray = NULL;
SSubmitRsp* pRsp = (SSubmitRsp*)res; SSubmitRsp2* pRsp = (SSubmitRsp2*)res;
if (pRsp->nBlocks <= 0) { if (NULL == pRsp->aCreateTbRsp) {
taosMemoryFreeClear(pRsp->pBlocks);
return TSDB_CODE_SUCCESS; return TSDB_CODE_SUCCESS;
} }
pArray = taosArrayInit(pRsp->nBlocks, sizeof(STbSVersion)); int32_t tbNum = taosArrayGetSize(pRsp->aCreateTbRsp);
if (NULL == pArray) { for (int32_t i = 0; i < tbNum; ++i) {
terrno = TSDB_CODE_OUT_OF_MEMORY; SVCreateTbRsp* pTbRsp = (SVCreateTbRsp*)taosArrayGet(pRsp->aCreateTbRsp, i);
return TSDB_CODE_OUT_OF_MEMORY; if (pTbRsp->pMeta) {
} handleCreateTbExecRes(pTbRsp->pMeta, pCatalog);
for (int32_t i = 0; i < pRsp->nBlocks; ++i) {
SSubmitBlkRsp* blk = pRsp->pBlocks + i;
if (blk->pMeta) {
handleCreateTbExecRes(blk->pMeta, pCatalog);
tFreeSTableMetaRsp(blk->pMeta);
taosMemoryFreeClear(blk->pMeta);
}
if (NULL == blk->tblFName || 0 == blk->tblFName[0]) {
continue;
} }
STbSVersion tbSver = {.tbFName = blk->tblFName, .sver = blk->sver};
taosArrayPush(pArray, &tbSver);
} }
SRequestConnInfo conn = {.pTrans = pRequest->pTscObj->pAppInfo->pTransporter, return TSDB_CODE_SUCCESS;
.requestId = pRequest->requestId,
.requestObjRefId = pRequest->self,
.mgmtEps = *epset};
code = catalogChkTbMetaVersion(pCatalog, &conn, pArray);
_return:
taosArrayDestroy(pArray);
return code;
} }
int32_t handleQueryExecRes(SRequestObj* pRequest, void* res, SCatalog* pCatalog, SEpSet* epset) { int32_t handleQueryExecRes(SRequestObj* pRequest, void* res, SCatalog* pCatalog, SEpSet* epset) {
...@@ -882,7 +855,7 @@ int32_t handleQueryExecRsp(SRequestObj* pRequest) { ...@@ -882,7 +855,7 @@ int32_t handleQueryExecRsp(SRequestObj* pRequest) {
} }
static bool incompletaFileParsing(SNode* pStmt) { static bool incompletaFileParsing(SNode* pStmt) {
return QUERY_NODE_VNODE_MODIF_STMT != nodeType(pStmt) ? false : ((SVnodeModifOpStmt*)pStmt)->fileProcessing; return QUERY_NODE_VNODE_MODIFY_STMT != nodeType(pStmt) ? false : ((SVnodeModifyOpStmt*)pStmt)->fileProcessing;
} }
// todo refacto the error code mgmt // todo refacto the error code mgmt
...@@ -961,7 +934,7 @@ SRequestObj* launchQueryImpl(SRequestObj* pRequest, SQuery* pQuery, bool keepQue ...@@ -961,7 +934,7 @@ SRequestObj* launchQueryImpl(SRequestObj* pRequest, SQuery* pQuery, bool keepQue
if (pQuery->pRoot && !pRequest->inRetry) { if (pQuery->pRoot && !pRequest->inRetry) {
STscObj* pTscObj = pRequest->pTscObj; STscObj* pTscObj = pRequest->pTscObj;
SAppClusterSummary* pActivity = &pTscObj->pAppInfo->summary; SAppClusterSummary* pActivity = &pTscObj->pAppInfo->summary;
if (QUERY_NODE_VNODE_MODIF_STMT == pQuery->pRoot->type) { if (QUERY_NODE_VNODE_MODIFY_STMT == pQuery->pRoot->type) {
atomic_add_fetch_64((int64_t*)&pActivity->numOfInsertsReq, 1); atomic_add_fetch_64((int64_t*)&pActivity->numOfInsertsReq, 1);
} else if (QUERY_NODE_SELECT_STMT == pQuery->pRoot->type) { } else if (QUERY_NODE_SELECT_STMT == pQuery->pRoot->type) {
atomic_add_fetch_64((int64_t*)&pActivity->numOfQueryReq, 1); atomic_add_fetch_64((int64_t*)&pActivity->numOfQueryReq, 1);
...@@ -1066,7 +1039,7 @@ static int32_t asyncExecSchQuery(SRequestObj* pRequest, SQuery* pQuery, SMetaDat ...@@ -1066,7 +1039,7 @@ static int32_t asyncExecSchQuery(SRequestObj* pRequest, SQuery* pQuery, SMetaDat
} }
if (TSDB_CODE_SUCCESS == code && !pRequest->validateOnly) { if (TSDB_CODE_SUCCESS == code && !pRequest->validateOnly) {
SArray* pNodeList = NULL; SArray* pNodeList = NULL;
if (QUERY_NODE_VNODE_MODIF_STMT != nodeType(pQuery->pRoot)) { if (QUERY_NODE_VNODE_MODIFY_STMT != nodeType(pQuery->pRoot)) {
buildAsyncExecNodeList(pRequest, &pNodeList, pMnodeList, pResultMeta); buildAsyncExecNodeList(pRequest, &pNodeList, pMnodeList, pResultMeta);
} }
......
...@@ -866,7 +866,6 @@ int32_t createParseContext(const SRequestObj *pRequest, SParseContext **pCxt) { ...@@ -866,7 +866,6 @@ int32_t createParseContext(const SRequestObj *pRequest, SParseContext **pCxt) {
.pTransporter = pTscObj->pAppInfo->pTransporter, .pTransporter = pTscObj->pAppInfo->pTransporter,
.pStmtCb = NULL, .pStmtCb = NULL,
.pUser = pTscObj->user, .pUser = pTscObj->user,
.schemalessType = pTscObj->schemalessType,
.isSuperUser = (0 == strcmp(pTscObj->user, TSDB_DEFAULT_USER)), .isSuperUser = (0 == strcmp(pTscObj->user, TSDB_DEFAULT_USER)),
.enableSysInfo = pTscObj->sysInfo, .enableSysInfo = pTscObj->sysInfo,
.async = true, .async = true,
......
此差异已折叠。
/*
* Copyright (c) 2019 TAOS Data, Inc. <jhtao@taosdata.com>
*
* This program is free software: you can use, redistribute, and/or modify
* it under the terms of the GNU Affero General Public License, version 3
* or later ("AGPL"), as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <ctype.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include "clientSml.h"
#define JUMP_JSON_SPACE(start) \
while(*(start)){\
if(unlikely(*(start) > 32))\
break;\
else\
(start)++;\
}
SArray *smlJsonParseTags(char *start, char *end){
SArray *tags = taosArrayInit(4, sizeof(SSmlKv));
while(start < end){
SSmlKv kv = {0};
kv.type = TSDB_DATA_TYPE_NCHAR;
bool isInQuote = false;
while(start < end){
if(unlikely(!isInQuote && *start == '"')){
start++;
kv.key = start;
isInQuote = true;
continue;
}
if(unlikely(isInQuote && *start == '"')){
kv.keyLen = start - kv.key;
start++;
break;
}
start++;
}
bool hasColon = false;
while(start < end){
if(unlikely(!hasColon && *start == ':')){
start++;
hasColon = true;
continue;
}
if(unlikely(hasColon && kv.value == NULL && (*start > 32 && *start != '"'))){
kv.value = start;
start++;
continue;
}
if(unlikely(hasColon && kv.value != NULL && (*start == '"' || *start == ',' || *start == '}'))){
kv.length = start - kv.value;
taosArrayPush(tags, &kv);
start++;
break;
}
start++;
}
}
return tags;
}
static int32_t smlParseTagsFromJSON(SSmlHandle *info, SSmlLineInfo *elements) {
int32_t ret = TSDB_CODE_SUCCESS;
if(is_same_child_table_telnet(elements, &info->preLine) == 0){
return TSDB_CODE_SUCCESS;
}
bool isSameMeasure = IS_SAME_SUPER_TABLE;
int cnt = 0;
SArray *preLineKV = info->preLineTagKV;
bool isSuperKVInit = true;
SArray *superKV = NULL;
if(info->dataFormat){
if(unlikely(!isSameMeasure)){
SSmlSTableMeta *sMeta = (SSmlSTableMeta *)nodeListGet(info->superTables, elements->measure, elements->measureLen, NULL);
if(unlikely(sMeta == NULL)){
sMeta = smlBuildSTableMeta(info->dataFormat);
STableMeta * pTableMeta = smlGetMeta(info, elements->measure, elements->measureLen);
sMeta->tableMeta = pTableMeta;
if(pTableMeta == NULL){
info->dataFormat = false;
info->reRun = true;
return TSDB_CODE_SUCCESS;
}
nodeListSet(&info->superTables, elements->measure, elements->measureLen, sMeta, NULL);
}
info->currSTableMeta = sMeta->tableMeta;
superKV = sMeta->tags;
if(unlikely(taosArrayGetSize(superKV) == 0)){
isSuperKVInit = false;
}
taosArraySetSize(preLineKV, 0);
}
}else{
taosArraySetSize(preLineKV, 0);
}
SArray *tags = smlJsonParseTags(elements->tags, elements->tags + elements->tagsLen);
int32_t tagNum = taosArrayGetSize(tags);
for (int32_t i = 0; i < tagNum; ++i) {
SSmlKv kv = *(SSmlKv*)taosArrayGet(tags, i);
if(info->dataFormat){
if(unlikely(cnt + 1 > info->currSTableMeta->tableInfo.numOfTags)){
info->dataFormat = false;
info->reRun = true;
taosArrayDestroy(tags);
return TSDB_CODE_SUCCESS;
}
if(isSameMeasure){
if(unlikely(cnt >= taosArrayGetSize(preLineKV))) {
info->dataFormat = false;
info->reRun = true;
taosArrayDestroy(tags);
return TSDB_CODE_SUCCESS;
}
SSmlKv *preKV = (SSmlKv *)taosArrayGet(preLineKV, cnt);
if(unlikely(kv.length > preKV->length)){
preKV->length = kv.length;
SSmlSTableMeta *tableMeta = (SSmlSTableMeta *)nodeListGet(info->superTables, elements->measure, elements->measureLen, NULL);
ASSERT(tableMeta != NULL);
SSmlKv *oldKV = (SSmlKv *)taosArrayGet(tableMeta->tags, cnt);
oldKV->length = kv.length;
info->needModifySchema = true;
}
if(unlikely(!IS_SAME_KEY)){
info->dataFormat = false;
info->reRun = true;
taosArrayDestroy(tags);
return TSDB_CODE_SUCCESS;
}
}else{
if(isSuperKVInit){
if(unlikely(cnt >= taosArrayGetSize(superKV))) {
info->dataFormat = false;
info->reRun = true;
taosArrayDestroy(tags);
return TSDB_CODE_SUCCESS;
}
SSmlKv *preKV = (SSmlKv *)taosArrayGet(superKV, cnt);
if(unlikely(kv.length > preKV->length)) {
preKV->length = kv.length;
}else{
kv.length = preKV->length;
}
info->needModifySchema = true;
if(unlikely(!IS_SAME_KEY)){
info->dataFormat = false;
info->reRun = true;
taosArrayDestroy(tags);
return TSDB_CODE_SUCCESS;
}
}else{
taosArrayPush(superKV, &kv);
}
taosArrayPush(preLineKV, &kv);
}
}else{
taosArrayPush(preLineKV, &kv);
}
cnt++;
}
taosArrayDestroy(tags);
SSmlTableInfo *tinfo = (SSmlTableInfo *)nodeListGet(info->childTables, elements, POINTER_BYTES, is_same_child_table_telnet);
if (unlikely(tinfo == NULL)) {
tinfo = smlBuildTableInfo(1, elements->measure, elements->measureLen);
if (unlikely(!tinfo)) {
return TSDB_CODE_OUT_OF_MEMORY;
}
tinfo->tags = taosArrayDup(preLineKV, NULL);
smlSetCTableName(tinfo);
if (info->dataFormat) {
info->currSTableMeta->uid = tinfo->uid;
tinfo->tableDataCtx = smlInitTableDataCtx(info->pQuery, info->currSTableMeta);
if (tinfo->tableDataCtx == NULL) {
smlBuildInvalidDataMsg(&info->msgBuf, "smlInitTableDataCtx error", NULL);
return TSDB_CODE_SML_INVALID_DATA;
}
}
SSmlLineInfo *key = (SSmlLineInfo *)taosMemoryMalloc(sizeof(SSmlLineInfo));
*key = *elements;
tinfo->key = key;
nodeListSet(&info->childTables, key, POINTER_BYTES, tinfo, is_same_child_table_telnet);
}
if (info->dataFormat) info->currTableDataCtx = tinfo->tableDataCtx;
return ret;
}
static char* smlJsonGetObj(char *payload){
int leftBracketCnt = 0;
while(*payload) {
if (unlikely(*payload == '{')) {
leftBracketCnt++;
payload++;
continue;
}
if (unlikely(*payload == '}')) {
leftBracketCnt--;
payload++;
if (leftBracketCnt == 0) {
return payload;
} else if (leftBracketCnt < 0) {
return NULL;
}
continue;
}
payload++;
}
return NULL;
}
void smlJsonParseObjFirst(char **start, SSmlLineInfo *element, int8_t *offset){
int index = 0;
while(*(*start)){
if((*start)[0] != '"'){
(*start)++;
continue;
}
if(unlikely(index >= 4)) {
uError("index >= 4, %s", *start)
break;
}
char *sTmp = *start;
if((*start)[1] == 'm' && (*start)[2] == 'e' && (*start)[3] == 't'
&& (*start)[4] == 'r' && (*start)[5] == 'i' && (*start)[6] == 'c' && (*start)[7] == '"'){
(*start) += 8;
bool isInQuote = false;
while(*(*start)){
if(unlikely(!isInQuote && *(*start) == '"')){
(*start)++;
offset[index++] = *start - sTmp;
element->measure = (*start);
isInQuote = true;
continue;
}
if(unlikely(isInQuote && *(*start) == '"')){
element->measureLen = (*start) - element->measure;
break;
}
(*start)++;
}
}else if((*start)[1] == 't' && (*start)[2] == 'i' && (*start)[3] == 'm'
&& (*start)[4] == 'e' && (*start)[5] == 's' && (*start)[6] == 't'
&& (*start)[7] == 'a' && (*start)[8] == 'm' && (*start)[9] == 'p' && (*start)[10] == '"'){
(*start) += 11;
bool hasColon = false;
while(*(*start)){
if(unlikely(!hasColon && *(*start) == ':')){
(*start)++;
JUMP_JSON_SPACE((*start))
offset[index++] = *start - sTmp;
element->timestamp = (*start);
hasColon = true;
continue;
}
if(unlikely(hasColon && (*(*start) == ',' || *(*start) == '}' || (*(*start)) <= 32))){
element->timestampLen = (*start) - element->timestamp;
break;
}
(*start)++;
}
}else if((*start)[1] == 'v' && (*start)[2] == 'a' && (*start)[3] == 'l'
&& (*start)[4] == 'u' && (*start)[5] == 'e' && (*start)[6] == '"'){
(*start) += 7;
bool hasColon = false;
while(*(*start)){
if(unlikely(!hasColon && *(*start) == ':')){
(*start)++;
JUMP_JSON_SPACE((*start))
offset[index++] = *start - sTmp;
element->cols = (*start);
hasColon = true;
continue;
}
if(unlikely(hasColon && (*(*start) == ',' || *(*start) == '}' || (*(*start)) <= 32))){
element->colsLen = (*start) - element->cols;
break;
}
(*start)++;
}
}else if((*start)[1] == 't' && (*start)[2] == 'a' && (*start)[3] == 'g'
&& (*start)[4] == 's' && (*start)[5] == '"'){
(*start) += 6;
while(*(*start)){
if(unlikely(*(*start) == ':')){
(*start)++;
JUMP_JSON_SPACE((*start))
offset[index++] = *start - sTmp;
element->tags = (*start);
char* tmp = smlJsonGetObj((*start));
if(tmp){
element->tagsLen = tmp - (*start);
*start = tmp;
}
break;
}
(*start)++;
}
}
if(*(*start) == '}'){
(*start)++;
break;
}
(*start)++;
}
}
void smlJsonParseObj(char **start, SSmlLineInfo *element, int8_t *offset){
int index = 0;
while(*(*start)){
if((*start)[0] != '"'){
(*start)++;
continue;
}
if(unlikely(index >= 4)) {
uError("index >= 4, %s", *start)
break;
}
if((*start)[1] == 'm'){
(*start) += offset[index++];
element->measure = *start;
while(*(*start)){
if(unlikely(*(*start) == '"')){
element->measureLen = (*start) - element->measure;
break;
}
(*start)++;
}
}else if((*start)[1] == 't' && (*start)[2] == 'i'){
(*start) += offset[index++];
element->timestamp = *start;
while(*(*start)){
if(unlikely(*(*start) == ',' || *(*start) == '}' || (*(*start)) <= 32)){
element->timestampLen = (*start) - element->timestamp;
break;
}
(*start)++;
}
}else if((*start)[1] == 'v'){
(*start) += offset[index++];
element->cols = *start;
while(*(*start)){
if(unlikely( *(*start) == ',' || *(*start) == '}' || (*(*start)) <= 32)){
element->colsLen = (*start) - element->cols;
break;
}
(*start)++;
}
}else if((*start)[1] == 't' && (*start)[2] == 'a'){
(*start) += offset[index++];
element->tags = (*start);
char* tmp = smlJsonGetObj((*start));
if(tmp){
element->tagsLen = tmp - (*start);
*start = tmp;
}
break;
}
if(*(*start) == '}'){
(*start)++;
break;
}
(*start)++;
}
}
static int32_t smlParseJSONString(SSmlHandle *info, char **start, SSmlLineInfo *elements) {
int32_t ret = TSDB_CODE_SUCCESS;
if(info->offset[0] == 0){
smlJsonParseObjFirst(start, elements, info->offset);
}else{
smlJsonParseObj(start, elements, info->offset);
}
if(**start == '\0') return TSDB_CODE_SUCCESS;
SSmlKv kv = {.key = VALUE, .keyLen = VALUE_LEN, .value = elements->cols, .length = (size_t)elements->colsLen};
if (smlParseNumber(&kv, &info->msgBuf)) {
kv.length = (int16_t)tDataTypes[kv.type].bytes;
}else{
return TSDB_CODE_TSC_INVALID_VALUE;
}
// Parse tags
ret = smlParseTagsFromJSON(info, elements);
if (unlikely(ret)) {
uError("OTD:0x%" PRIx64 " Unable to parse tags from JSON payload", info->id);
return ret;
}
if(unlikely(info->reRun)){
return TSDB_CODE_SUCCESS;
}
// Parse timestamp
// notice!!! put ts back to tag to ensure get meta->precision
int64_t ts = smlParseOpenTsdbTime(info, elements->timestamp, elements->timestampLen);
if (unlikely(ts < 0)) {
uError("OTD:0x%" PRIx64 " Unable to parse timestamp from JSON payload", info->id);
return TSDB_CODE_INVALID_TIMESTAMP;
}
SSmlKv kvTs = { .key = TS, .keyLen = TS_LEN, .type = TSDB_DATA_TYPE_TIMESTAMP, .i = ts, .length = (size_t)tDataTypes[TSDB_DATA_TYPE_TIMESTAMP].bytes};
if(info->dataFormat){
ret = smlBuildCol(info->currTableDataCtx, info->currSTableMeta->schema, &kvTs, 0);
if(ret == TSDB_CODE_SUCCESS){
ret = smlBuildCol(info->currTableDataCtx, info->currSTableMeta->schema, &kv, 1);
}
if(ret == TSDB_CODE_SUCCESS){
ret = smlBuildRow(info->currTableDataCtx);
}
if (unlikely(ret != TSDB_CODE_SUCCESS)) {
smlBuildInvalidDataMsg(&info->msgBuf, "smlBuildCol error", NULL);
return ret;
}
}else{
if(elements->colArray == NULL){
elements->colArray = taosArrayInit(16, sizeof(SSmlKv));
}
taosArrayPush(elements->colArray, &kvTs);
taosArrayPush(elements->colArray, &kv);
}
info->preLine = *elements;
return TSDB_CODE_SUCCESS;
}
int32_t smlParseJSON(SSmlHandle *info, char *payload) {
int32_t payloadNum = 1 << 15;
int32_t ret = TSDB_CODE_SUCCESS;
int cnt = 0;
char *dataPointStart = payload;
while (1) {
if(info->dataFormat) {
SSmlLineInfo element = {0};
ret = smlParseJSONString(info, &dataPointStart, &element);
}else{
if(cnt >= payloadNum){
payloadNum = payloadNum << 1;
void* tmp = taosMemoryRealloc(info->lines, payloadNum * sizeof(SSmlLineInfo));
if(tmp != NULL){
info->lines = (SSmlLineInfo*)tmp;
}
}
ret = smlParseJSONString(info, &dataPointStart, info->lines + cnt);
}
if (unlikely(ret != TSDB_CODE_SUCCESS)) {
uError("SML:0x%" PRIx64 " Invalid JSON Payload", info->id);
return ret;
}
if(*dataPointStart == '\0') break;
if(unlikely(info->reRun)){
cnt = 0;
dataPointStart = payload;
info->lineNum = payloadNum;
ret = smlClearForRerun(info);
if(ret != TSDB_CODE_SUCCESS){
return ret;
}
continue;
}
cnt++;
}
info->lineNum = cnt;
return TSDB_CODE_SUCCESS;
}
此差异已折叠。
/*
* Copyright (c) 2019 TAOS Data, Inc. <jhtao@taosdata.com>
*
* This program is free software: you can use, redistribute, and/or modify
* it under the terms of the GNU Affero General Public License, version 3
* or later ("AGPL"), as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <ctype.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include "clientSml.h"
int32_t is_same_child_table_telnet(const void *a, const void *b){
SSmlLineInfo *t1 = (SSmlLineInfo *)a;
SSmlLineInfo *t2 = (SSmlLineInfo *)b;
return (((t1->measureLen == t2->measureLen) && memcmp(t1->measure, t2->measure, t1->measureLen) == 0)
&& ((t1->tagsLen == t2->tagsLen) && memcmp(t1->tags, t2->tags, t1->tagsLen) == 0)) ? 0 : 1;
}
int64_t smlParseOpenTsdbTime(SSmlHandle *info, const char *data, int32_t len) {
uint8_t toPrecision = info->currSTableMeta ? info->currSTableMeta->tableInfo.precision : TSDB_TIME_PRECISION_NANO;
if (unlikely(!data)) {
smlBuildInvalidDataMsg(&info->msgBuf, "timestamp can not be null", NULL);
return -1;
}
if (unlikely(len == 1 && data[0] == '0')) {
return taosGetTimestampNs()/smlFactorNS[toPrecision];
}
int8_t fromPrecision = smlGetTsTypeByLen(len);
if (unlikely(fromPrecision == -1)) {
smlBuildInvalidDataMsg(&info->msgBuf,
"timestamp precision can only be seconds(10 digits) or milli seconds(13 digits)", data);
return -1;
}
int64_t ts = smlGetTimeValue(data, len, fromPrecision, toPrecision);
if (unlikely(ts == -1)) {
smlBuildInvalidDataMsg(&info->msgBuf, "invalid timestamp", data);
return -1;
}
return ts;
}
static void smlParseTelnetElement(char **sql, char *sqlEnd, char **data, int32_t *len) {
while (*sql < sqlEnd) {
if (unlikely((**sql != SPACE && !(*data)))) {
*data = *sql;
} else if (unlikely(**sql == SPACE && *data)) {
*len = *sql - *data;
break;
}
(*sql)++;
}
}
static int32_t smlParseTelnetTags(SSmlHandle *info, char *data, char *sqlEnd, SSmlLineInfo *elements, SSmlMsgBuf *msg) {
if(is_same_child_table_telnet(elements, &info->preLine) == 0){
return TSDB_CODE_SUCCESS;
}
bool isSameMeasure = IS_SAME_SUPER_TABLE;
int cnt = 0;
SArray *preLineKV = info->preLineTagKV;
bool isSuperKVInit = true;
SArray *superKV = NULL;
if(info->dataFormat){
if(!isSameMeasure){
SSmlSTableMeta *sMeta = (SSmlSTableMeta *)nodeListGet(info->superTables, elements->measure, elements->measureLen, NULL);
if(unlikely(sMeta == NULL)){
sMeta = smlBuildSTableMeta(info->dataFormat);
STableMeta * pTableMeta = smlGetMeta(info, elements->measure, elements->measureLen);
sMeta->tableMeta = pTableMeta;
if(pTableMeta == NULL){
info->dataFormat = false;
info->reRun = true;
return TSDB_CODE_SUCCESS;
}
nodeListSet(&info->superTables, elements->measure, elements->measureLen, sMeta, NULL);
}
info->currSTableMeta = sMeta->tableMeta;
superKV = sMeta->tags;
if(unlikely(taosArrayGetSize(superKV) == 0)){
isSuperKVInit = false;
}
taosArraySetSize(preLineKV, 0);
}
}else{
taosArraySetSize(preLineKV, 0);
}
const char *sql = data;
while (sql < sqlEnd) {
JUMP_SPACE(sql, sqlEnd)
if (unlikely(*sql == '\0')) break;
const char *key = sql;
size_t keyLen = 0;
// parse key
while (sql < sqlEnd) {
if (unlikely(*sql == SPACE)) {
smlBuildInvalidDataMsg(msg, "invalid data", sql);
return TSDB_CODE_SML_INVALID_DATA;
}
if (unlikely(*sql == EQUAL)) {
keyLen = sql - key;
sql++;
break;
}
sql++;
}
if (unlikely(IS_INVALID_COL_LEN(keyLen))) {
smlBuildInvalidDataMsg(msg, "invalid key or key is too long than 64", key);
return TSDB_CODE_TSC_INVALID_COLUMN_LENGTH;
}
// if (smlCheckDuplicateKey(key, keyLen, dumplicateKey)) {
// smlBuildInvalidDataMsg(msg, "dumplicate key", key);
// return TSDB_CODE_TSC_DUP_NAMES;
// }
// parse value
const char *value = sql;
size_t valueLen = 0;
while (sql < sqlEnd) {
// parse value
if (unlikely(*sql == SPACE)) {
break;
}
if (unlikely(*sql == EQUAL)) {
smlBuildInvalidDataMsg(msg, "invalid data", sql);
return TSDB_CODE_SML_INVALID_DATA;
}
sql++;
}
valueLen = sql - value;
if (unlikely(valueLen == 0)) {
smlBuildInvalidDataMsg(msg, "invalid value", value);
return TSDB_CODE_TSC_INVALID_VALUE;
}
if (unlikely(valueLen > (TSDB_MAX_NCHAR_LEN - VARSTR_HEADER_SIZE) / TSDB_NCHAR_SIZE)) {
return TSDB_CODE_PAR_INVALID_VAR_COLUMN_LEN;
}
SSmlKv kv = {.key = key, .keyLen = keyLen, .type = TSDB_DATA_TYPE_NCHAR, .value = value, .length = valueLen};
if(info->dataFormat){
if(unlikely(cnt + 1 > info->currSTableMeta->tableInfo.numOfTags)){
info->dataFormat = false;
info->reRun = true;
return TSDB_CODE_SUCCESS;
}
if(isSameMeasure){
if(unlikely(cnt >= taosArrayGetSize(preLineKV))) {
info->dataFormat = false;
info->reRun = true;
return TSDB_CODE_SUCCESS;
}
SSmlKv *preKV = (SSmlKv *)taosArrayGet(preLineKV, cnt);
if(unlikely(kv.length > preKV->length)){
preKV->length = kv.length;
SSmlSTableMeta *tableMeta = (SSmlSTableMeta *)nodeListGet(info->superTables, elements->measure, elements->measureLen, NULL);
ASSERT(tableMeta != NULL);
SSmlKv *oldKV = (SSmlKv *)taosArrayGet(tableMeta->tags, cnt);
oldKV->length = kv.length;
info->needModifySchema = true;
}
if(unlikely(!IS_SAME_KEY)){
info->dataFormat = false;
info->reRun = true;
return TSDB_CODE_SUCCESS;
}
}else{
if(isSuperKVInit){
if(unlikely(cnt >= taosArrayGetSize(superKV))) {
info->dataFormat = false;
info->reRun = true;
return TSDB_CODE_SUCCESS;
}
SSmlKv *preKV = (SSmlKv *)taosArrayGet(superKV, cnt);
if(unlikely(kv.length > preKV->length)) {
preKV->length = kv.length;
}else{
kv.length = preKV->length;
}
info->needModifySchema = true;
if(unlikely(!IS_SAME_KEY)){
info->dataFormat = false;
info->reRun = true;
return TSDB_CODE_SUCCESS;
}
}else{
taosArrayPush(superKV, &kv);
}
taosArrayPush(preLineKV, &kv);
}
}else{
taosArrayPush(preLineKV, &kv);
}
cnt++;
}
SSmlTableInfo *tinfo = (SSmlTableInfo *)nodeListGet(info->childTables, elements, POINTER_BYTES, is_same_child_table_telnet);
if (unlikely(tinfo == NULL)) {
tinfo = smlBuildTableInfo(1, elements->measure, elements->measureLen);
if (!tinfo) {
return TSDB_CODE_OUT_OF_MEMORY;
}
tinfo->tags = taosArrayDup(preLineKV, NULL);
smlSetCTableName(tinfo);
if (info->dataFormat) {
info->currSTableMeta->uid = tinfo->uid;
tinfo->tableDataCtx = smlInitTableDataCtx(info->pQuery, info->currSTableMeta);
if (tinfo->tableDataCtx == NULL) {
smlBuildInvalidDataMsg(&info->msgBuf, "smlInitTableDataCtx error", NULL);
return TSDB_CODE_SML_INVALID_DATA;
}
}
SSmlLineInfo *key = (SSmlLineInfo *)taosMemoryMalloc(sizeof(SSmlLineInfo));
*key = *elements;
tinfo->key = key;
nodeListSet(&info->childTables, key, POINTER_BYTES, tinfo, is_same_child_table_telnet);
}
if (info->dataFormat) info->currTableDataCtx = tinfo->tableDataCtx;
return TSDB_CODE_SUCCESS;
}
// format: <metric> <timestamp> <value> <tagk_1>=<tagv_1>[ <tagk_n>=<tagv_n>]
int32_t smlParseTelnetString(SSmlHandle *info, char *sql, char *sqlEnd, SSmlLineInfo *elements) {
if (!sql) return TSDB_CODE_SML_INVALID_DATA;
// parse metric
smlParseTelnetElement(&sql, sqlEnd, &elements->measure, &elements->measureLen);
if (unlikely((!(elements->measure) || IS_INVALID_TABLE_LEN(elements->measureLen)))) {
smlBuildInvalidDataMsg(&info->msgBuf, "invalid data", sql);
return TSDB_CODE_TSC_INVALID_TABLE_ID_LENGTH;
}
// parse timestamp
smlParseTelnetElement(&sql, sqlEnd, &elements->timestamp, &elements->timestampLen);
if (unlikely(!elements->timestamp || elements->timestampLen == 0)) {
smlBuildInvalidDataMsg(&info->msgBuf, "invalid timestamp", sql);
return TSDB_CODE_SML_INVALID_DATA;
}
bool needConverTime = false; // get TS before parse tag(get meta), so need conver time
if(info->dataFormat && info->currSTableMeta == NULL){
needConverTime = true;
}
int64_t ts = smlParseOpenTsdbTime(info, elements->timestamp, elements->timestampLen);
if (unlikely(ts < 0)) {
smlBuildInvalidDataMsg(&info->msgBuf, "invalid timestamp", sql);
return TSDB_CODE_INVALID_TIMESTAMP;
}
SSmlKv kvTs = { .key = TS, .keyLen = TS_LEN, .type = TSDB_DATA_TYPE_TIMESTAMP, .i = ts, .length = (size_t)tDataTypes[TSDB_DATA_TYPE_TIMESTAMP].bytes};
// parse value
smlParseTelnetElement(&sql, sqlEnd, &elements->cols, &elements->colsLen);
if (unlikely(!elements->cols || elements->colsLen == 0)) {
smlBuildInvalidDataMsg(&info->msgBuf, "invalid value", sql);
return TSDB_CODE_TSC_INVALID_VALUE;
}
SSmlKv kv = {.key = VALUE, .keyLen = VALUE_LEN, .value = elements->cols, .length = (size_t)elements->colsLen};
if (smlParseValue(&kv, &info->msgBuf) != TSDB_CODE_SUCCESS) {
return TSDB_CODE_TSC_INVALID_VALUE;
}
JUMP_SPACE(sql, sqlEnd)
elements->tags = sql;
elements->tagsLen = sqlEnd - sql;
if (unlikely(!elements->tags || elements->tagsLen == 0)) {
smlBuildInvalidDataMsg(&info->msgBuf, "invalid value", sql);
return TSDB_CODE_TSC_INVALID_VALUE;
}
int ret = smlParseTelnetTags(info, sql, sqlEnd, elements, &info->msgBuf);
if (unlikely(ret != TSDB_CODE_SUCCESS)) {
return ret;
}
if(unlikely(info->reRun)){
return TSDB_CODE_SUCCESS;
}
if(info->dataFormat){
if(needConverTime) {
kvTs.i = convertTimePrecision(kvTs.i, TSDB_TIME_PRECISION_NANO, info->currSTableMeta->tableInfo.precision);
}
ret = smlBuildCol(info->currTableDataCtx, info->currSTableMeta->schema, &kvTs, 0);
if(ret == TSDB_CODE_SUCCESS){
ret = smlBuildCol(info->currTableDataCtx, info->currSTableMeta->schema, &kv, 1);
}
if(ret == TSDB_CODE_SUCCESS){
ret = smlBuildRow(info->currTableDataCtx);
}
if (unlikely(ret != TSDB_CODE_SUCCESS)) {
smlBuildInvalidDataMsg(&info->msgBuf, "smlBuildCol error", NULL);
return ret;
}
}else{
if(elements->colArray == NULL){
elements->colArray = taosArrayInit(16, sizeof(SSmlKv));
}
taosArrayPush(elements->colArray, &kvTs);
taosArrayPush(elements->colArray, &kv);
}
info->preLine = *elements;
return TSDB_CODE_SUCCESS;
}
\ No newline at end of file
此差异已折叠。
...@@ -530,7 +530,6 @@ static int32_t tmqSendCommitReq(tmq_t* tmq, SMqClientVg* pVg, SMqClientTopic* pT ...@@ -530,7 +530,6 @@ static int32_t tmqSendCommitReq(tmq_t* tmq, SMqClientVg* pVg, SMqClientTopic* pT
int32_t tmqCommitMsgImpl(tmq_t* tmq, const TAOS_RES* msg, int8_t async, tmq_commit_cb* userCb, void* userParam) { int32_t tmqCommitMsgImpl(tmq_t* tmq, const TAOS_RES* msg, int8_t async, tmq_commit_cb* userCb, void* userParam) {
char* topic; char* topic;
int32_t vgId; int32_t vgId;
ASSERT(msg != NULL);
if (TD_RES_TMQ(msg)) { if (TD_RES_TMQ(msg)) {
SMqRspObj* pRspObj = (SMqRspObj*)msg; SMqRspObj* pRspObj = (SMqRspObj*)msg;
topic = pRspObj->topic; topic = pRspObj->topic;
...@@ -809,8 +808,6 @@ int32_t tmqHandleAllDelayedTask(tmq_t* tmq) { ...@@ -809,8 +808,6 @@ int32_t tmqHandleAllDelayedTask(tmq_t* tmq) {
taosTmrReset(tmqAssignDelayedCommitTask, tmq->autoCommitInterval, pRefId, tmqMgmt.timer, &tmq->commitTimer); taosTmrReset(tmqAssignDelayedCommitTask, tmq->autoCommitInterval, pRefId, tmqMgmt.timer, &tmq->commitTimer);
} else if (*pTaskType == TMQ_DELAYED_TASK__REPORT) { } else if (*pTaskType == TMQ_DELAYED_TASK__REPORT) {
} else {
ASSERT(0);
} }
taosFreeQitem(pTaskType); taosFreeQitem(pTaskType);
} }
...@@ -953,10 +950,6 @@ tmq_t* tmq_consumer_new(tmq_conf_t* conf, char* errstr, int32_t errstrLen) { ...@@ -953,10 +950,6 @@ tmq_t* tmq_consumer_new(tmq_conf_t* conf, char* errstr, int32_t errstrLen) {
const char* user = conf->user == NULL ? TSDB_DEFAULT_USER : conf->user; const char* user = conf->user == NULL ? TSDB_DEFAULT_USER : conf->user;
const char* pass = conf->pass == NULL ? TSDB_DEFAULT_PASS : conf->pass; const char* pass = conf->pass == NULL ? TSDB_DEFAULT_PASS : conf->pass;
ASSERT(user);
ASSERT(pass);
ASSERT(conf->groupId[0]);
pTmq->clientTopics = taosArrayInit(0, sizeof(SMqClientTopic)); pTmq->clientTopics = taosArrayInit(0, sizeof(SMqClientTopic));
pTmq->mqueue = taosOpenQueue(); pTmq->mqueue = taosOpenQueue();
pTmq->qall = taosAllocateQall(); pTmq->qall = taosAllocateQall();
...@@ -1247,8 +1240,6 @@ int32_t tmqPollCb(void* param, SDataBuf* pMsg, int32_t code) { ...@@ -1247,8 +1240,6 @@ int32_t tmqPollCb(void* param, SDataBuf* pMsg, int32_t code) {
tDecodeSTaosxRsp(&decoder, &pRspWrapper->taosxRsp); tDecodeSTaosxRsp(&decoder, &pRspWrapper->taosxRsp);
tDecoderClear(&decoder); tDecoderClear(&decoder);
memcpy(&pRspWrapper->taosxRsp, pMsg->pData, sizeof(SMqRspHead)); memcpy(&pRspWrapper->taosxRsp, pMsg->pData, sizeof(SMqRspHead));
} else {
ASSERT(0);
} }
taosMemoryFree(pMsg->pData); taosMemoryFree(pMsg->pData);
......
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
...@@ -298,8 +298,8 @@ int32_t tNameFromString(SName* dst, const char* str, uint32_t type) { ...@@ -298,8 +298,8 @@ int32_t tNameFromString(SName* dst, const char* str, uint32_t type) {
} }
static int compareKv(const void* p1, const void* p2) { static int compareKv(const void* p1, const void* p2) {
SSmlKv* kv1 = *(SSmlKv**)p1; SSmlKv* kv1 = (SSmlKv*)p1;
SSmlKv* kv2 = *(SSmlKv**)p2; SSmlKv* kv2 = (SSmlKv*)p2;
int32_t kvLen1 = kv1->keyLen; int32_t kvLen1 = kv1->keyLen;
int32_t kvLen2 = kv2->keyLen; int32_t kvLen2 = kv2->keyLen;
int32_t res = strncasecmp(kv1->key, kv2->key, TMIN(kvLen1, kvLen2)); int32_t res = strncasecmp(kv1->key, kv2->key, TMIN(kvLen1, kvLen2));
...@@ -320,7 +320,7 @@ void buildChildTableName(RandTableName* rName) { ...@@ -320,7 +320,7 @@ void buildChildTableName(RandTableName* rName) {
taosArraySort(rName->tags, compareKv); taosArraySort(rName->tags, compareKv);
for (int j = 0; j < taosArrayGetSize(rName->tags); ++j) { for (int j = 0; j < taosArrayGetSize(rName->tags); ++j) {
taosStringBuilderAppendChar(&sb, ','); taosStringBuilderAppendChar(&sb, ',');
SSmlKv* tagKv = taosArrayGetP(rName->tags, j); SSmlKv* tagKv = taosArrayGet(rName->tags, j);
taosStringBuilderAppendStringLen(&sb, tagKv->key, tagKv->keyLen); taosStringBuilderAppendStringLen(&sb, tagKv->key, tagKv->keyLen);
taosStringBuilderAppendChar(&sb, '='); taosStringBuilderAppendChar(&sb, '=');
if (IS_VAR_DATA_TYPE(tagKv->type)) { if (IS_VAR_DATA_TYPE(tagKv->type)) {
......
...@@ -117,7 +117,7 @@ STSchema *genSTSchema(int16_t nCols) { ...@@ -117,7 +117,7 @@ STSchema *genSTSchema(int16_t nCols) {
} }
STSchema *pResult = NULL; STSchema *pResult = NULL;
pResult = tdGetSTSChemaFromSSChema(pSchema, nCols, 1); pResult = tBuildTSchema(pSchema, nCols, 1);
taosMemoryFree(pSchema); taosMemoryFree(pSchema);
return pResult; return pResult;
......
...@@ -326,13 +326,11 @@ static int32_t mndBuildStreamObjFromCreateReq(SMnode *pMnode, SStreamObj *pObj, ...@@ -326,13 +326,11 @@ static int32_t mndBuildStreamObjFromCreateReq(SMnode *pMnode, SStreamObj *pObj,
// deserialize ast // deserialize ast
if (nodesStringToNode(pObj->ast, &pAst) < 0) { if (nodesStringToNode(pObj->ast, &pAst) < 0) {
/*ASSERT(0);*/
goto FAIL; goto FAIL;
} }
// extract output schema from ast // extract output schema from ast
if (qExtractResultSchema(pAst, (int32_t *)&pObj->outputSchema.nCols, &pObj->outputSchema.pSchema) != 0) { if (qExtractResultSchema(pAst, (int32_t *)&pObj->outputSchema.nCols, &pObj->outputSchema.pSchema) != 0) {
/*ASSERT(0);*/
goto FAIL; goto FAIL;
} }
...@@ -347,13 +345,11 @@ static int32_t mndBuildStreamObjFromCreateReq(SMnode *pMnode, SStreamObj *pObj, ...@@ -347,13 +345,11 @@ static int32_t mndBuildStreamObjFromCreateReq(SMnode *pMnode, SStreamObj *pObj,
// using ast and param to build physical plan // using ast and param to build physical plan
if (qCreateQueryPlan(&cxt, &pPlan, NULL) < 0) { if (qCreateQueryPlan(&cxt, &pPlan, NULL) < 0) {
/*ASSERT(0);*/
goto FAIL; goto FAIL;
} }
// save physcial plan // save physcial plan
if (nodesNodeToString((SNode *)pPlan, false, &pObj->physicalPlan, NULL) != 0) { if (nodesNodeToString((SNode *)pPlan, false, &pObj->physicalPlan, NULL) != 0) {
/*ASSERT(0);*/
goto FAIL; goto FAIL;
} }
...@@ -361,7 +357,7 @@ static int32_t mndBuildStreamObjFromCreateReq(SMnode *pMnode, SStreamObj *pObj, ...@@ -361,7 +357,7 @@ static int32_t mndBuildStreamObjFromCreateReq(SMnode *pMnode, SStreamObj *pObj,
if (pCreate->numOfTags) { if (pCreate->numOfTags) {
pObj->tagSchema.pSchema = taosMemoryCalloc(pCreate->numOfTags, sizeof(SSchema)); pObj->tagSchema.pSchema = taosMemoryCalloc(pCreate->numOfTags, sizeof(SSchema));
} }
ASSERT(pCreate->numOfTags == taosArrayGetSize(pCreate->pTags)); /*A(pCreate->numOfTags == taosArrayGetSize(pCreate->pTags));*/
for (int32_t i = 0; i < pCreate->numOfTags; i++) { for (int32_t i = 0; i < pCreate->numOfTags; i++) {
SField *pField = taosArrayGet(pCreate->pTags, i); SField *pField = taosArrayGet(pCreate->pTags, i);
pObj->tagSchema.pSchema[i].colId = pObj->outputSchema.nCols + i + 1; pObj->tagSchema.pSchema[i].colId = pObj->outputSchema.nCols + i + 1;
...@@ -378,9 +374,6 @@ FAIL: ...@@ -378,9 +374,6 @@ FAIL:
} }
int32_t mndPersistTaskDeployReq(STrans *pTrans, const SStreamTask *pTask) { int32_t mndPersistTaskDeployReq(STrans *pTrans, const SStreamTask *pTask) {
if (pTask->taskLevel == TASK_LEVEL__AGG) {
ASSERT(taosArrayGetSize(pTask->childEpInfo) != 0);
}
SEncoder encoder; SEncoder encoder;
tEncoderInit(&encoder, NULL, 0); tEncoderInit(&encoder, NULL, 0);
tEncodeSStreamTask(&encoder, pTask); tEncodeSStreamTask(&encoder, pTask);
...@@ -545,8 +538,6 @@ _OVER: ...@@ -545,8 +538,6 @@ _OVER:
} }
static int32_t mndPersistTaskDropReq(STrans *pTrans, SStreamTask *pTask) { static int32_t mndPersistTaskDropReq(STrans *pTrans, SStreamTask *pTask) {
ASSERT(pTask->nodeId != 0);
// vnode // vnode
/*if (pTask->nodeId > 0) {*/ /*if (pTask->nodeId > 0) {*/
SVDropStreamTaskReq *pReq = taosMemoryCalloc(1, sizeof(SVDropStreamTaskReq)); SVDropStreamTaskReq *pReq = taosMemoryCalloc(1, sizeof(SVDropStreamTaskReq));
...@@ -808,10 +799,9 @@ static int32_t mndProcessStreamDoCheckpoint(SRpcMsg *pReq) { ...@@ -808,10 +799,9 @@ static int32_t mndProcessStreamDoCheckpoint(SRpcMsg *pReq) {
int32_t sz = taosArrayGetSize(pLevel); int32_t sz = taosArrayGetSize(pLevel);
for (int32_t j = 0; j < sz; j++) { for (int32_t j = 0; j < sz; j++) {
SStreamTask *pTask = taosArrayGetP(pLevel, j); SStreamTask *pTask = taosArrayGetP(pLevel, j);
ASSERT(pTask->nodeId > 0); /*A(pTask->nodeId > 0);*/
SVgObj *pVgObj = mndAcquireVgroup(pMnode, pTask->nodeId); SVgObj *pVgObj = mndAcquireVgroup(pMnode, pTask->nodeId);
if (pVgObj == NULL) { if (pVgObj == NULL) {
ASSERT(0);
taosRUnLockLatch(&pStream->lock); taosRUnLockLatch(&pStream->lock);
mndReleaseStream(pMnode, pStream); mndReleaseStream(pMnode, pStream);
mndTransDrop(pTrans); mndTransDrop(pTrans);
...@@ -871,7 +861,6 @@ static int32_t mndProcessDropStreamReq(SRpcMsg *pReq) { ...@@ -871,7 +861,6 @@ static int32_t mndProcessDropStreamReq(SRpcMsg *pReq) {
SMDropStreamReq dropReq = {0}; SMDropStreamReq dropReq = {0};
if (tDeserializeSMDropStreamReq(pReq->pCont, pReq->contLen, &dropReq) < 0) { if (tDeserializeSMDropStreamReq(pReq->pCont, pReq->contLen, &dropReq) < 0) {
ASSERT(0);
terrno = TSDB_CODE_INVALID_MSG; terrno = TSDB_CODE_INVALID_MSG;
return -1; return -1;
} }
......
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册