提交 f42ce55a 编写于 作者: H Hongze Cheng

Merge branch '3.0' of https://github.com/taosdata/TDengine into feat/row_refact

...@@ -261,6 +261,66 @@ taos> select hyperloglog(dbig) from shll; ...@@ -261,6 +261,66 @@ taos> select hyperloglog(dbig) from shll;
Query OK, 1 row(s) in set (0.008388s) Query OK, 1 row(s) in set (0.008388s)
``` ```
### HISTOGRAM
```
SELECT HISTOGRAM(field_name,bin_type, bin_description, normalized) FROM tb_name [WHERE clause];
```
**功能说明**:统计数据按照用户指定区间的分布。
**返回结果类型**:如归一化参数 normalized 设置为 1,返回结果为双精度浮点类型 DOUBLE,否则为长整形 INT64。
**应用字段**:数值型字段。
**支持的版本**:2.6.0.0 及以后的版本。
**适用于**: 表和超级表。
**说明**
1. bin_type 用户指定的分桶类型, 有效输入类型为"user_input“, ”linear_bin", "log_bin"。
2. bin_description 描述如何生成分桶区间,针对三种桶类型,分别为以下描述格式(均为 JSON 格式字符串):
- "user_input": "[1, 3, 5, 7]"
用户指定 bin 的具体数值。
- "linear_bin": "{"start": 0.0, "width": 5.0, "count": 5, "infinity": true}"
"start" 表示数据起始点,"width" 表示每次 bin 偏移量, "count" 为 bin 的总数,"infinity" 表示是否添加(-inf, inf)作为区间起点跟终点,
生成区间为[-inf, 0.0, 5.0, 10.0, 15.0, 20.0, +inf]。
- "log_bin": "{"start":1.0, "factor": 2.0, "count": 5, "infinity": true}"
"start" 表示数据起始点,"factor" 表示按指数递增的因子,"count" 为 bin 的总数,"infinity" 表示是否添加(-inf, inf)作为区间起点跟终点,
生成区间为[-inf, 1.0, 2.0, 4.0, 8.0, 16.0, +inf]。
3. normalized 是否将返回结果归一化到 0~1 之间 。有效输入为 0 和 1。
**示例**
```mysql
taos> SELECT HISTOGRAM(voltage, "user_input", "[1,3,5,7]", 1) FROM meters;
histogram(voltage, "user_input", "[1,3,5,7]", 1) |
=======================================================
{"lower_bin":1, "upper_bin":3, "count":0.333333} |
{"lower_bin":3, "upper_bin":5, "count":0.333333} |
{"lower_bin":5, "upper_bin":7, "count":0.333333} |
Query OK, 3 row(s) in set (0.004273s)
taos> SELECT HISTOGRAM(voltage, 'linear_bin', '{"start": 1, "width": 3, "count": 3, "infinity": false}', 0) FROM meters;
histogram(voltage, 'linear_bin', '{"start": 1, "width": 3, " |
===================================================================
{"lower_bin":1, "upper_bin":4, "count":3} |
{"lower_bin":4, "upper_bin":7, "count":3} |
{"lower_bin":7, "upper_bin":10, "count":3} |
Query OK, 3 row(s) in set (0.004887s)
taos> SELECT HISTOGRAM(voltage, 'log_bin', '{"start": 1, "factor": 3, "count": 3, "infinity": true}', 0) FROM meters;
histogram(voltage, 'log_bin', '{"start": 1, "factor": 3, "count" |
===================================================================
{"lower_bin":-inf, "upper_bin":1, "count":3} |
{"lower_bin":1, "upper_bin":3, "count":2} |
{"lower_bin":3, "upper_bin":9, "count":6} |
{"lower_bin":9, "upper_bin":27, "count":3} |
{"lower_bin":27, "upper_bin":inf, "count":1} |
```
## 选择函数 ## 选择函数
在使用所有的选择函数的时候,可以同时指定输出 ts 列或标签列(包括 tbname),这样就可以方便地知道被选出的值是源于哪个数据行的。 在使用所有的选择函数的时候,可以同时指定输出 ts 列或标签列(包括 tbname),这样就可以方便地知道被选出的值是源于哪个数据行的。
...@@ -698,7 +758,7 @@ SELECT INTERP(field_name) FROM { tb_name | stb_name } WHERE ts='timestamp' [FILL ...@@ -698,7 +758,7 @@ SELECT INTERP(field_name) FROM { tb_name | stb_name } WHERE ts='timestamp' [FILL
SELECT TAIL(field_name, k, offset_val) FROM {tb_name | stb_name} [WHERE clause]; SELECT TAIL(field_name, k, offset_val) FROM {tb_name | stb_name} [WHERE clause];
``` ```
**功能说明**:返回跳过最后 offset_value 个,然后取连续 k 个记录,不忽略 NULL 值。offset_val 可以不输入。此时返回最后的 k 个记录。当有 offset_val 输入的情况下,该函数功能等效于 `order by ts desc LIMIT k OFFSET offset_val` **功能说明**:返回跳过最后 offset_val 个,然后取连续 k 个记录,不忽略 NULL 值。offset_val 可以不输入。此时返回最后的 k 个记录。当有 offset_val 输入的情况下,该函数功能等效于 `order by ts desc LIMIT k OFFSET offset_val`
**参数范围**:k: [1,100] offset_val: [0,100]。 **参数范围**:k: [1,100] offset_val: [0,100]。
......
...@@ -222,21 +222,9 @@ TDengine 中时间戳的时区总是由客户端进行处理,而与服务端 ...@@ -222,21 +222,9 @@ TDengine 中时间戳的时区总是由客户端进行处理,而与服务端
### 23. TDengine 2.0 都会用到哪些网络端口? ### 23. TDengine 2.0 都会用到哪些网络端口?
在 TDengine 2.0 版本中,会用到以下这些网络端口(以默认端口 6030 为前提进行说明,如果修改了配置文件中的设置,那么这里列举的端口都会随之出现变化),管理员可以参考这里的信息调整防火墙设置: 使用到的网络端口请看文档:[serverport](/reference/config/#serverport)
| 协议 | 默认端口 | 用途说明 | 修改方法 | 需要注意,文档上列举的端口号都是以默认端口 6030 为前提进行说明,如果修改了配置文件中的设置,那么列举的端口都会随之出现变化,管理员可以参考上述的信息调整防火墙设置。
| :--- | :-------- | :---------------------------------- | :------------------------------- |
| TCP | 6030 | 客户端与服务端之间通讯。 | 由配置文件设置 serverPort 决定。 |
| TCP | 6035 | 多节点集群的节点间通讯。 | 随 serverPort 端口变化。 |
| TCP | 6040 | 多节点集群的节点间数据同步。 | 随 serverPort 端口变化。 |
| TCP | 6041 | 客户端与服务端之间的 RESTful 通讯。 | 随 serverPort 端口变化。2.4.0.0 及以上版本由 taosAdapter 配置。 |
| TCP | 6042 | Arbitrator 的服务端口。 | 随 Arbitrator 启动参数设置变化。 |
| TCP | 6043 | TaosKeeper 监控服务端口。 | 随 TaosKeeper 启动参数设置变化。 |
| TCP | 6044 | 支持 StatsD 的数据接入端口。 | 随 taosAdapter 启动参数设置变化( 2.4.0.0 及以上版本)。 |
| UDP | 6045 | 支持 collectd 数据接入端口。 | 随 taosAdapter 启动参数设置变化( 2.4.0.0 及以上版本)。 |
| TCP | 6060 | 企业版内 Monitor 服务的网络端口。 | |
| UDP | 6030-6034 | 客户端与服务端之间通讯。 | 随 serverPort 端口变化。 |
| UDP | 6035-6039 | 多节点集群的节点间通讯。 | 随 serverPort 端口变化。 |
### 24. 为什么 RESTful 接口无响应、Grafana 无法添加 TDengine 为数据源、TDengineGUI 选了 6041 端口还是无法连接成功?? ### 24. 为什么 RESTful 接口无响应、Grafana 无法添加 TDengine 为数据源、TDengineGUI 选了 6041 端口还是无法连接成功??
......
...@@ -259,6 +259,74 @@ taos> select hyperloglog(dbig) from shll; ...@@ -259,6 +259,74 @@ taos> select hyperloglog(dbig) from shll;
Query OK, 1 row(s) in set (0.008388s) Query OK, 1 row(s) in set (0.008388s)
``` ```
### HISTOGRAM
```
SELECT HISTOGRAM(field_name,bin_type, bin_description, normalized) FROM tb_name [WHERE clause];
```
**Description**:Returns count of data points in user-specified ranges.
**Return value type**:Double or INT64, depends on normalized parameter settings.
**Applicable column type**:Numerical types.
**Applicable versions**:Since version 2.6.0.0.
**Applicable table types**: table, STable
**Explanations**
1. bin_type: parameter to indicate the bucket type, valid inputs are: "user_input", "linear_bin", "log_bin"。
2. bin_description: parameter to describe how to generate buckets,can be in the following JSON formats for each bin_type respectively:
- "user_input": "[1, 3, 5, 7]": User specified bin values.
- "linear_bin": "{"start": 0.0, "width": 5.0, "count": 5, "infinity": true}"
"start" - bin starting point.
"width" - bin offset.
"count" - number of bins generated.
"infinity" - whether to add(-inf, inf)as start/end point in generated set of bins.
The above "linear_bin" descriptor generates a set of bins: [-inf, 0.0, 5.0, 10.0, 15.0, 20.0, +inf].
- "log_bin": "{"start":1.0, "factor": 2.0, "count": 5, "infinity": true}"
"start" - bin starting point.
"factor" - exponential factor of bin offset.
"count" - number of bins generated.
"infinity" - whether to add(-inf, inf)as start/end point in generated range of bins.
The above "log_bin" descriptor generates a set of bins:[-inf, 1.0, 2.0, 4.0, 8.0, 16.0, +inf].
3. normalized: setting to 1/0 to turn on/off result normalization.
**Example**
```mysql
taos> SELECT HISTOGRAM(voltage, "user_input", "[1,3,5,7]", 1) FROM meters;
histogram(voltage, "user_input", "[1,3,5,7]", 1) |
=======================================================
{"lower_bin":1, "upper_bin":3, "count":0.333333} |
{"lower_bin":3, "upper_bin":5, "count":0.333333} |
{"lower_bin":5, "upper_bin":7, "count":0.333333} |
Query OK, 3 row(s) in set (0.004273s)
taos> SELECT HISTOGRAM(voltage, 'linear_bin', '{"start": 1, "width": 3, "count": 3, "infinity": false}', 0) FROM meters;
histogram(voltage, 'linear_bin', '{"start": 1, "width": 3, " |
===================================================================
{"lower_bin":1, "upper_bin":4, "count":3} |
{"lower_bin":4, "upper_bin":7, "count":3} |
{"lower_bin":7, "upper_bin":10, "count":3} |
Query OK, 3 row(s) in set (0.004887s)
taos> SELECT HISTOGRAM(voltage, 'log_bin', '{"start": 1, "factor": 3, "count": 3, "infinity": true}', 0) FROM meters;
histogram(voltage, 'log_bin', '{"start": 1, "factor": 3, "count" |
===================================================================
{"lower_bin":-inf, "upper_bin":1, "count":3} |
{"lower_bin":1, "upper_bin":3, "count":2} |
{"lower_bin":3, "upper_bin":9, "count":6} |
{"lower_bin":9, "upper_bin":27, "count":3} |
{"lower_bin":27, "upper_bin":inf, "count":1} |
```
## Selection Functions ## Selection Functions
When any select function is used, timestamp column or tag columns including `tbname` can be specified to show that the selected value are from which rows. When any select function is used, timestamp column or tag columns including `tbname` can be specified to show that the selected value are from which rows.
......
...@@ -30,7 +30,7 @@ taosAdapter provides the following features. ...@@ -30,7 +30,7 @@ taosAdapter provides the following features.
### Install taosAdapter ### Install taosAdapter
taosAdapter has been part of TDengine server software since TDengine v2.4.0.0. If you use the TDengine server, you don't need additional steps to install taosAdapter. You can download taosAdapter from [TDengine official website](https://tdengine.com/all-downloads/) to download the TDengine server installation package (taosAdapter is included in v2.4.0.0 and later version). If you need to deploy taosAdapter separately on another server other than the TDengine server, you should install the full TDengine on that server to install taosAdapter. If you need to build taosAdapter from source code, you can refer to the [Building taosAdapter]( https://github.com/taosdata/taosadapter/blob/develop/BUILD.md) documentation. taosAdapter has been part of TDengine server software since TDengine v2.4.0.0. If you use the TDengine server, you don't need additional steps to install taosAdapter. You can download taosAdapter from [TDengine official website](https://tdengine.com/all-downloads/) to download the TDengine server installation package (taosAdapter is included in v2.4.0.0 and later version). If you need to deploy taosAdapter separately on another server other than the TDengine server, you should install the full TDengine server package on that server to install taosAdapter. If you need to build taosAdapter from source code, you can refer to the [Building taosAdapter]( https://github.com/taosdata/taosadapter/blob/develop/BUILD.md) documentation.
### Start/Stop taosAdapter ### Start/Stop taosAdapter
...@@ -38,7 +38,7 @@ On Linux systems, the taosAdapter service is managed by `systemd` by default. Yo ...@@ -38,7 +38,7 @@ On Linux systems, the taosAdapter service is managed by `systemd` by default. Yo
### Remove taosAdapter ### Remove taosAdapter
Use the command `rmtaos` to remove the TDengine server software if you use tar.gz package or use package management command like rpm or apt to remove the TDengine server, including taosAdapter. Use the command `rmtaos` to remove the TDengine server software if you use tar.gz package. If you installed using a .deb or .rpm package, use the corresponding command, for your package manager, like apt or rpm to remove the TDengine server, including taosAdapter.
### Upgrade taosAdapter ### Upgrade taosAdapter
...@@ -240,7 +240,7 @@ node_export is an exporter of hardware and OS metrics exposed by the \*NIX kerne ...@@ -240,7 +240,7 @@ node_export is an exporter of hardware and OS metrics exposed by the \*NIX kerne
## Memory usage optimization methods ## Memory usage optimization methods
taosAdapter will monitor its memory usage during operation and adjust it with two thresholds. Valid values range from -1 to 100 integers in percent of the system's physical memory. taosAdapter will monitor its memory usage during operation and adjust it with two thresholds. Valid values are integers between 1 to 100, and represent a percentage of the system's physical memory.
- pauseQueryMemoryThreshold - pauseQueryMemoryThreshold
- pauseAllMemoryThreshold - pauseAllMemoryThreshold
...@@ -276,7 +276,7 @@ Corresponding configuration parameter ...@@ -276,7 +276,7 @@ Corresponding configuration parameter
monitor.pauseQueryMemoryThreshold memory threshold for no more queries Environment variable `TAOS_MONITOR_PAUSE_QUERY_MEMORY_THRESHOLD` (default 70) monitor.pauseQueryMemoryThreshold memory threshold for no more queries Environment variable `TAOS_MONITOR_PAUSE_QUERY_MEMORY_THRESHOLD` (default 70)
``` ```
You can adjust it according to the specific application scenario and operation strategy, and it is recommended to use operation monitoring software to monitor system memory status timely. The load balancer can also check the taosAdapter running status through this interface. You should adjust this parameter based on your specific application scenario and operation strategy. We recommend using monitoring software to monitor system memory status. The load balancer can also check the taosAdapter running status through this interface.
## taosAdapter Monitoring Metrics ## taosAdapter Monitoring Metrics
...@@ -325,7 +325,7 @@ You can also adjust the level of the taosAdapter log output by setting the `--lo ...@@ -325,7 +325,7 @@ You can also adjust the level of the taosAdapter log output by setting the `--lo
## How to migrate from older TDengine versions to taosAdapter ## How to migrate from older TDengine versions to taosAdapter
In TDengine server 2.2.x.x or earlier, the TDengine server process (taosd) contains an embedded HTTP service. As mentioned earlier, taosAdapter is a standalone software managed using `systemd` and has its process ID. And there are some configuration parameters and behaviors that are different between the two. See the following table for details. In TDengine server 2.2.x.x or earlier, the TDengine server process (taosd) contains an embedded HTTP service. As mentioned earlier, taosAdapter is a standalone software managed using `systemd` and has its own process ID. There are some configuration parameters and behaviors that are different between the two. See the following table for details.
| **#** | **embedded httpd** | **taosAdapter** | **comment** | | **#** | **embedded httpd** | **taosAdapter** | **comment** |
| ----- | ------------------- | ------------------------------------ | ------------------------------------------------------------------ ------------------------------------------------------------------------ | | ----- | ------------------- | ------------------------------------ | ------------------------------------------------------------------ ------------------------------------------------------------------------ |
......
...@@ -7,7 +7,7 @@ description: "taosBenchmark (once called taosdemo ) is a tool for testing the pe ...@@ -7,7 +7,7 @@ description: "taosBenchmark (once called taosdemo ) is a tool for testing the pe
## Introduction ## Introduction
taosBenchmark (formerly taosdemo ) is a tool for testing the performance of TDengine products. taosBenchmark can test the performance of TDengine's insert, query, and subscription functions and simulate large amounts of data generated by many devices. taosBenchmark can flexibly control the number and type of databases, supertables, tag columns, number and type of data columns, and sub-tables, and types of databases, super tables, the number and types of data columns, the number of sub-tables, the amount of data per sub-table, the time interval for inserting data, the number of working threads, whether and how to insert disordered data, and so on. The installer provides taosdemo as a soft link to taosBenchmark for compatibility with past users. taosBenchmark (formerly taosdemo ) is a tool for testing the performance of TDengine products. taosBenchmark can test the performance of TDengine's insert, query, and subscription functions and simulate large amounts of data generated by many devices. taosBenchmark can flexibly control the number and type of databases, supertables, tag columns, number and type of data columns, and sub-tables, and types of databases, super tables, the number and types of data columns, the number of sub-tables, the amount of data per sub-table, the time interval for inserting data, the number of working threads, whether and how to insert disordered data, and so on. The installer provides taosdemo as a soft link to taosBenchmark for compatibility and for the convenience of past users.
## Installation ## Installation
...@@ -21,7 +21,7 @@ There are two ways to install taosBenchmark: ...@@ -21,7 +21,7 @@ There are two ways to install taosBenchmark:
### Configuration and running methods ### Configuration and running methods
taosBenchmark supports two configuration methods: [Command-line arguments](#Command-line arguments in detailed) and [JSON configuration file](#Configuration file arguments in detailed). These two methods are mutually exclusive, and with only one command-line parameter, users can use `-f <json file>` to specify a configuration file when using a configuration file. When running taosBenchmark with command-line arguments and controlling its behavior, users should use other parameters for configuration rather than `-f` parameter. In addition, taosBenchmark offers a special way of running without parameters. taosBenchmark supports two configuration methods: [Command-line arguments](#Command-line arguments in detailed) and [JSON configuration file](#Configuration file arguments in detailed). These two methods are mutually exclusive. Users can use `-f <json file>` to specify a configuration file. When running taosBenchmark with command-line arguments to control its behavior, users should use other parameters for configuration, but not the `-f` parameter. In addition, taosBenchmark offers a special way of running without parameters.
taosBenchmark supports complete performance testing of TDengine. taosBenchmark supports the TDengine functions in three categories: write, query, and subscribe. These three functions are mutually exclusive, and users can select only one of them each time taosBenchmark runs. It is important to note that the type of functionality to be tested is not configurable when using the command-line configuration method, which can only test writing performance. To test the query and subscription performance of the TDengine, you must use the configuration file method and specify the function type to test via the parameter `filetype` in the configuration file. taosBenchmark supports complete performance testing of TDengine. taosBenchmark supports the TDengine functions in three categories: write, query, and subscribe. These three functions are mutually exclusive, and users can select only one of them each time taosBenchmark runs. It is important to note that the type of functionality to be tested is not configurable when using the command-line configuration method, which can only test writing performance. To test the query and subscription performance of the TDengine, you must use the configuration file method and specify the function type to test via the parameter `filetype` in the configuration file.
...@@ -35,7 +35,7 @@ Execute the following commands to quickly experience taosBenchmark's default con ...@@ -35,7 +35,7 @@ Execute the following commands to quickly experience taosBenchmark's default con
taosBenchmark taosBenchmark
``` ```
When run without parameters, taosBenchmark connects to the TDengine cluster specified in `/etc/taos` by default and creates a database named test in TDengine, a super table named `meters` under the test database, and 10,000 tables under the super table with 10,000 records written to each table. Note that if there is already a test database, this table is not used. Note that if there is already a test database, this command will delete it first and create a new test database. When run without parameters, taosBenchmark connects to the TDengine cluster specified in `/etc/taos` by default and creates a database named `test`, a super table named `meters` under the test database, and 10,000 tables under the super table with 10,000 records written to each table. Note that if there is already a database named "test" this command will delete it first and create a new database.
### Run with command-line configuration parameters ### Run with command-line configuration parameters
...@@ -45,7 +45,7 @@ The `-f <json file>` argument cannot be used when running taosBenchmark with com ...@@ -45,7 +45,7 @@ The `-f <json file>` argument cannot be used when running taosBenchmark with com
taosBenchmark -I stmt -n 200 -t 100 taosBenchmark -I stmt -n 200 -t 100
``` ```
The above command, `taosBenchmark` will create a database named `test`, create a super table `meters` in it, create 100 sub-tables in the super table and insert 200 records for each sub-table using parameter binding. Using the above command, `taosBenchmark` will create a database named `test`, create a super table `meters` in it, create 100 sub-tables in the super table and insert 200 records for each sub-table using parameter binding.
### Run with the configuration file ### Run with the configuration file
...@@ -95,10 +95,10 @@ taosBenchmark -f <json file> ...@@ -95,10 +95,10 @@ taosBenchmark -f <json file>
## Command-line argument in detailed ## Command-line argument in detailed
- **-f/--file <json file\>** : - **-f/--file <json file\>** :
specify the configuration file to use. This file includes All parameters. And users should not use this parameter with other parameters on the command-line. There is no default value. specify the configuration file to use. This file includes All parameters. Users should not use this parameter with other parameters on the command-line. There is no default value.
- **-c/--config-dir <dir\>** : - **-c/--config-dir <dir\>** :
specify the directory where the TDengine cluster configuration file. the default path is `/etc/taos`. specify the directory where the TDengine cluster configuration file. The default path is `/etc/taos`.
- **-h/--host <host\>** : - **-h/--host <host\>** :
Specify the FQDN of the TDengine server to connect to. The default value is localhost. Specify the FQDN of the TDengine server to connect to. The default value is localhost.
...@@ -272,13 +272,13 @@ The parameters for creating super tables are configured in `super_tables` in the ...@@ -272,13 +272,13 @@ The parameters for creating super tables are configured in `super_tables` in the
- **child_table_prefix** : The prefix of the child table name, mandatory configuration item, no default value. - **child_table_prefix** : The prefix of the child table name, mandatory configuration item, no default value.
- **escape_character**: specify the super table and child table names containing escape characters. By default is "no". The value can be "yes" or "no". - **escape_character**: specify the super table and child table names containing escape characters. The value can be "yes" or "no". The default is "no".
- **auto_create_table**: only when insert_mode is taosc, rest, stmt, and childtable_exists is "no". "yes" means taosBenchmark will automatically create non-existent tables when inserting data; "no" means that taosBenchmark will create all tables before inserting. - **auto_create_table**: only when insert_mode is taosc, rest, stmt, and childtable_exists is "no". "yes" means taosBenchmark will automatically create non-existent tables when inserting data; "no" means that taosBenchmark will create all tables before inserting.
- **batch_create_tbl_num** : the number of tables per batch when creating sub-tables, default is 10. Note: the actual number of batches may not be the same as this value when the executed SQL statement is larger than the maximum length supported, it will be automatically truncated and re-executed to continue creating. - **batch_create_tbl_num** : the number of tables per batch when creating sub-tables, default is 10. Note: the actual number of batches may not be the same as this value. If the executed SQL statement is larger than the maximum length supported, it will be automatically truncated and re-executed to continue creating.
- **data_source**: specify the source of data-generating. Default is taosBenchmark randomly generated. Users can configure it as "rand" and "sample". When "sample" is used, taosBenchmark will use the data in the file specified by the `sample_file` parameter. - **data_source**: specify the source of data-generation. Default is taosBenchmark randomly generated. Users can configure it as "rand" and "sample". When "sample" is used, taosBenchmark will use the data in the file specified by the `sample_file` parameter.
- **insert_mode**: insertion mode with options taosc, rest, stmt, sml, sml-rest, corresponding to normal write, restful interface write, parameter binding interface write, schemaless interface write, restful schemaless interface write (provided by taosAdapter). The default value is taosc. - **insert_mode**: insertion mode with options taosc, rest, stmt, sml, sml-rest, corresponding to normal write, restful interface write, parameter binding interface write, schemaless interface write, restful schemaless interface write (provided by taosAdapter). The default value is taosc.
...@@ -300,15 +300,15 @@ The parameters for creating super tables are configured in `super_tables` in the ...@@ -300,15 +300,15 @@ The parameters for creating super tables are configured in `super_tables` in the
- **partial_col_num**: If this value is a positive number n, only the first n columns are written to, only if insert_mode is taosc and rest, or all columns if n is 0. - **partial_col_num**: If this value is a positive number n, only the first n columns are written to, only if insert_mode is taosc and rest, or all columns if n is 0.
- **disorder_ratio** : Specifies the percentage probability of disordered data in the value range [0,50]. The default is 0, which means there is no disorder data. - **disorder_ratio** : Specifies the percentage probability of disordered (i.e. out-of-order) data in the value range [0,50]. The default is 0, which means there is no disorder data.
- **disorder_range** : Specifies the timestamp fallback range for the disordered data. The generated disorder timestamp is the timestamp that should be used in the non-disorder case minus a random value in this range. Valid only if the percentage of disordered data specified by `-O/--disorder` is greater than 0. - **disorder_range** : Specifies the timestamp fallback range for the disordered data. The disordered timestamp is generated by subtracting a random value in this range, from the timestamp that would be used in the non-disorder case. Valid only if the percentage of disordered data specified by `-O/--disorder` is greater than 0.
- **timestamp_step**: The timestamp step for inserting data in each child table, in units consistent with the `precision` of the database, the default value is 1. - **timestamp_step**: The timestamp step for inserting data in each child table, in units consistent with the `precision` of the database. For e.g. if the `precision` is milliseconds, the timestamp step will be in milliseconds. The default value is 1.
- **start_timestamp** : The timestamp start value of each sub-table, the default value is now. - **start_timestamp** : The timestamp start value of each sub-table, the default value is now.
- **sample_format**: The type of the sample data file, now only "csv" is supported. - **sample_format**: The type of the sample data file; for now only "csv" is supported.
- **sample_file**: Specify a CSV format file as the data source. It only works when data_source is a sample. If the number of rows in the CSV file is less than or equal to prepared_rand, then taosBenchmark will read the CSV file data cyclically until it is the same as prepared_rand; otherwise, taosBenchmark will read only the rows with the number of prepared_rand. The final number of rows of data generated is the smaller of the two. - **sample_file**: Specify a CSV format file as the data source. It only works when data_source is a sample. If the number of rows in the CSV file is less than or equal to prepared_rand, then taosBenchmark will read the CSV file data cyclically until it is the same as prepared_rand; otherwise, taosBenchmark will read only the rows with the number of prepared_rand. The final number of rows of data generated is the smaller of the two.
...@@ -341,7 +341,7 @@ The configuration parameters for specifying super table tag columns and data col ...@@ -341,7 +341,7 @@ The configuration parameters for specifying super table tag columns and data col
- **create_table_thread_count** : The number of threads to build the table, default is 8. - **create_table_thread_count** : The number of threads to build the table, default is 8.
- **connection_pool_size** : The number of pre-established connections to the TDengine server. If not configured, it is the same number of threads specified. - **connection_pool_size** : The number of pre-established connections to the TDengine server. If not configured, it is the same as number of threads specified.
- **result_file** : The path to the result output file, the default value is . /output.txt. - **result_file** : The path to the result output file, the default value is . /output.txt.
......
--- ---
title: taosdump title: taosdump
description: "taosdump is a tool application that supports backing up data from a running TDengine cluster and restoring the backed up data to the same or another running TDengine cluster." description: "taosdump is a tool that supports backing up data from a running TDengine cluster and restoring the backed up data to the same, or another running TDengine cluster."
--- ---
## Introduction ## Introduction
taosdump is a tool application that supports backing up data from a running TDengine cluster and restoring the backed up data to the same or another running TDengine cluster. taosdump is a tool that supports backing up data from a running TDengine cluster and restoring the backed up data to the same, or another running TDengine cluster.
taosdump can back up a database, a super table, or a normal table as a logical data unit or backup data records in the database, super tables, and normal tables. When using taosdump, you can specify the directory path for data backup. If you do not specify a directory, taosdump will back up the data to the current directory by default. taosdump can back up a database, a super table, or a normal table as a logical data unit or backup data records in the database, super tables, and normal tables. When using taosdump, you can specify the directory path for data backup. If you do not specify a directory, taosdump will back up the data to the current directory by default.
Suppose the specified location already has data files. In that case, taosdump will prompt the user and exit immediately to avoid data overwriting which means that the same path can only be used for one backup. If the specified location already has data files, taosdump will prompt the user and exit immediately to avoid data overwriting. This means that the same path can only be used for one backup.
Please be careful if you see a prompt for this.
Please be careful if you see a prompt for this and please ensure that you follow best practices and relevant SOPs for data integrity, backup and data security.
Users should not use taosdump to back up raw data, environment settings, hardware information, server configuration, or cluster topology. taosdump uses [Apache AVRO](https://avro.apache.org/) as the data file format to store backup data. Users should not use taosdump to back up raw data, environment settings, hardware information, server configuration, or cluster topology. taosdump uses [Apache AVRO](https://avro.apache.org/) as the data file format to store backup data.
...@@ -30,7 +31,7 @@ There are two ways to install taosdump: ...@@ -30,7 +31,7 @@ There are two ways to install taosdump:
2. backup multiple specified databases: use `-D db1,db2,... ` parameters; 2. backup multiple specified databases: use `-D db1,db2,... ` parameters;
3. back up some super or normal tables in the specified database: use `-dbname stbname1 stbname2 tbname1 tbname2 ... ` parameters. Note that the first parameter of this input sequence is the database name, and only one database is supported. The second and subsequent parameters are the names of super or normal tables in that database, separated by spaces. 3. back up some super or normal tables in the specified database: use `-dbname stbname1 stbname2 tbname1 tbname2 ... ` parameters. Note that the first parameter of this input sequence is the database name, and only one database is supported. The second and subsequent parameters are the names of super or normal tables in that database, separated by spaces.
4. back up the system log database: TDengine clusters usually contain a system database named `log`. The data in this database is the data that TDengine runs itself, and the taosdump will not back up the log database by default. If users need to back up the log database, users can use the `-a` or `-allow-sys` command-line parameter. 4. back up the system log database: TDengine clusters usually contain a system database named `log`. The data in this database is the data that TDengine runs itself, and the taosdump will not back up the log database by default. If users need to back up the log database, users can use the `-a` or `-allow-sys` command-line parameter.
5. Loose mode backup: taosdump version 1.4.1 onwards provides `-n` and `-L` parameters for backing up data without using escape characters and "loose" mode, which can reduce the number of backups if table names, column names, tag names do not use This can reduce the backup data time and backup data footprint if table names, column names, and tag names do not use `escape character`. If you are unsure about using `-n` and `-L` conditions, please use the default parameters for "strict" mode backup. See the [official documentation](/taos-sql/escape) for a description of escaped characters. 5. Loose mode backup: taosdump version 1.4.1 onwards provides `-n` and `-L` parameters for backing up data without using escape characters and "loose" mode, which can reduce the number of backups if table names, column names, tag names do not use escape characters. This can also reduce the backup data time and backup data footprint. If you are unsure about using `-n` and `-L` conditions, please use the default parameters for "strict" mode backup. See the [official documentation](/taos-sql/escape) for a description of escaped characters.
:::tip :::tip
- taosdump versions after 1.4.1 provide the `-I` argument for parsing Avro file schema and data. If users specify `-s` then only taosdump will parse schema. - taosdump versions after 1.4.1 provide the `-I` argument for parsing Avro file schema and data. If users specify `-s` then only taosdump will parse schema.
...@@ -58,7 +59,7 @@ Usage: taosdump [OPTION...] dbname [tbname ...] ...@@ -58,7 +59,7 @@ Usage: taosdump [OPTION...] dbname [tbname ...]
or: taosdump [OPTION...] -i inpath or: taosdump [OPTION...] -i inpath
or: taosdump [OPTION...] -o outpath or: taosdump [OPTION...] -o outpath
-h, --host=HOST Server host dumping data from. Default is -h, --host=HOST Server host from which to dump data. Default is
localhost. localhost.
-p, --password User password to connect to server. Default is -p, --password User password to connect to server. Default is
taosdata. taosdata.
...@@ -71,10 +72,10 @@ Usage: taosdump [OPTION...] dbname [tbname ...] ...@@ -71,10 +72,10 @@ Usage: taosdump [OPTION...] dbname [tbname ...]
-r, --resultFile=RESULTFILE DumpOut/In Result file path and name. -r, --resultFile=RESULTFILE DumpOut/In Result file path and name.
-a, --allow-sys Allow to dump system database -a, --allow-sys Allow to dump system database
-A, --all-databases Dump all databases. -A, --all-databases Dump all databases.
-D, --databases=DATABASES Dump inputted databases. Use comma to separate -D, --databases=DATABASES Dump listed databases. Use comma to separate
databases' name. database names.
-N, --without-property Dump database without its properties. -N, --without-property Dump database without its properties.
-s, --schemaonly Only dump tables' schema. -s, --schemaonly Only dump table schemas.
-y, --answer-yes Input yes for prompt. It will skip data file -y, --answer-yes Input yes for prompt. It will skip data file
checking! checking!
-d, --avro-codec=snappy Choose an avro codec among null, deflate, snappy, -d, --avro-codec=snappy Choose an avro codec among null, deflate, snappy,
...@@ -97,7 +98,7 @@ Usage: taosdump [OPTION...] dbname [tbname ...] ...@@ -97,7 +98,7 @@ Usage: taosdump [OPTION...] dbname [tbname ...]
and try. The workable value is related to the and try. The workable value is related to the
length of the row and type of table schema. length of the row and type of table schema.
-I, --inspect inspect avro file content and print on screen -I, --inspect inspect avro file content and print on screen
-L, --loose-mode Using loose mode if the table name and column name -L, --loose-mode Use loose mode if the table name and column name
use letter and number only. Default is NOT. use letter and number only. Default is NOT.
-n, --no-escape No escape char '`'. Default is using it. -n, --no-escape No escape char '`'. Default is using it.
-T, --thread-num=THREAD_NUM Number of thread for dump in file. Default is -T, --thread-num=THREAD_NUM Number of thread for dump in file. Default is
......
...@@ -5,11 +5,11 @@ sidebar_label: TDinsight ...@@ -5,11 +5,11 @@ sidebar_label: TDinsight
TDinsight is a solution for monitoring TDengine using the builtin native monitoring database and [Grafana]. TDinsight is a solution for monitoring TDengine using the builtin native monitoring database and [Grafana].
After TDengine starts, it will automatically create a monitoring database `log`. TDengine will automatically write many metrics in specific intervals into the `log` database. The metrics may include the server's CPU, memory, hard disk space, network bandwidth, number of requests, disk read/write speed, slow queries, other information like important system operations (user login, database creation, database deletion, etc.), and error alarms. With [Grafana] and [TDengine Data Source Plugin](https://github.com/taosdata/grafanaplugin/releases), TDinsight can visualize cluster status, node information, insertion and query requests, resource usage, etc., and also vnode, dnode, and mnode status, and exception alerts. Developers monitoring TDengine cluster operation status in real-time can be very convinient. This article will guide users to install the Grafana server, automatically install the TDengine data source plug-in, and deploy the TDinsight visualization panel through `TDinsight.sh` installation script. After TDengine starts, it will automatically create a monitoring database `log`. TDengine will automatically write many metrics in specific intervals into the `log` database. The metrics may include the server's CPU, memory, hard disk space, network bandwidth, number of requests, disk read/write speed, slow queries, other information like important system operations (user login, database creation, database deletion, etc.), and error alarms. With [Grafana] and [TDengine Data Source Plugin](https://github.com/taosdata/grafanaplugin/releases), TDinsight can visualize cluster status, node information, insertion and query requests, resource usage, vnode, dnode, and mnode status, exception alerts and many other metrics. This is very convenient for developers who want to monitor TDengine cluster status in real-time. This article will guide users to install the Grafana server, automatically install the TDengine data source plug-in, and deploy the TDinsight visualization panel using the `TDinsight.sh` installation script.
## System Requirements ## System Requirements
To deploy TDinsight, a single-node TDengine server or a multi-nodes TDengine cluster and a [Grafana] server are required. This dashboard requires TDengine 2.3.3.0 and above, with the `log` database enabled (`monitor = 1`). To deploy TDinsight, a single-node TDengine server or a multi-node TDengine cluster and a [Grafana] server are required. This dashboard requires TDengine 2.3.3.0 and above, with the `log` database enabled (`monitor = 1`).
## Installing Grafana ## Installing Grafana
...@@ -17,7 +17,7 @@ We recommend using the latest [Grafana] version 7 or 8 here. You can install Gra ...@@ -17,7 +17,7 @@ We recommend using the latest [Grafana] version 7 or 8 here. You can install Gra
### Installing Grafana on Debian or Ubuntu ### Installing Grafana on Debian or Ubuntu
For Debian or Ubuntu operating systems, we recommend the Grafana image repository and Use the following command to install from scratch. For Debian or Ubuntu operating systems, we recommend the Grafana image repository and using the following command to install from scratch.
```bash ```bash
sudo apt-get install -y apt-transport-https sudo apt-get install -y apt-transport-https
...@@ -71,7 +71,7 @@ chmod +x TDinsight.sh ...@@ -71,7 +71,7 @@ chmod +x TDinsight.sh
./TDinsight.sh ./TDinsight.sh
``` ```
This script will automatically download the latest [Grafana TDengine data source plugin](https://github.com/taosdata/grafanaplugin/releases/latest) and [TDinsight dashboard](https://grafana.com/grafana/dashboards/15167) with configurable parameters from the command-line options to the [Grafana Provisioning](https://grafana.com/docs/grafana/latest/administration/provisioning/) configuration file to automate deployment and updates, etc. With the alert setting options provided by this script, you can also get built-in support for AliCloud SMS alert notifications. This script will automatically download the latest [Grafana TDengine data source plugin](https://github.com/taosdata/grafanaplugin/releases/latest) and [TDinsight dashboard](https://grafana.com/grafana/dashboards/15167) with configurable parameters for command-line options to the [Grafana Provisioning](https://grafana.com/docs/grafana/latest/administration/provisioning/) configuration file to automate deployment and updates, etc. With the alert setting options provided by this script, you can also get built-in support for AliCloud SMS alert notifications.
Assume you use TDengine and Grafana's default services on the same host. Run `. /TDinsight.sh` and open the Grafana browser window to see the TDinsight dashboard. Assume you use TDengine and Grafana's default services on the same host. Run `. /TDinsight.sh` and open the Grafana browser window to see the TDinsight dashboard.
......
...@@ -13,7 +13,7 @@ The TDengine image starts with the HTTP service activated by default, using the ...@@ -13,7 +13,7 @@ The TDengine image starts with the HTTP service activated by default, using the
docker run -d --name tdengine -p 6041:6041 tdengine/tdengine docker run -d --name tdengine -p 6041:6041 tdengine/tdengine
``` ```
The above command starts a container named "tdengine" and maps the HTTP service end 6041 to the host port 6041. You can verify that the HTTP service provided in this container is available using the following command. The above command starts a container named "tdengine" and maps the HTTP service port 6041 to the host port 6041. You can verify that the HTTP service provided in this container is available using the following command.
```shell ```shell
curl -u root:taosdata -d "show databases" localhost:6041/rest/sql curl -u root:taosdata -d "show databases" localhost:6041/rest/sql
...@@ -34,7 +34,7 @@ taos> show databases; ...@@ -34,7 +34,7 @@ taos> show databases;
Query OK, 1 row(s) in set (0.002843s) Query OK, 1 row(s) in set (0.002843s)
``` ```
The TDengine server running in the container uses the container's hostname to establish a connection. Using TDengine CLI or various connectors (such as JDBC-JNI) to access the TDengine inside the container from outside the container is more complicated. So the above is the simplest way to access the TDengine service in the container and is suitable for some simple scenarios. Please refer to the next section if you want to access the TDengine service in the container from containerized using TDengine CLI or various connectors in some complex scenarios. The TDengine server running in the container uses the container's hostname to establish a connection. Using TDengine CLI or various connectors (such as JDBC-JNI) to access the TDengine inside the container from outside the container is more complicated. So the above is the simplest way to access the TDengine service in the container and is suitable for some simple scenarios. Please refer to the next section if you want to access the TDengine service in the container from outside the container using TDengine CLI or various connectors for complex scenarios.
## Start TDengine on the host network ## Start TDengine on the host network
...@@ -42,7 +42,7 @@ The TDengine server running in the container uses the container's hostname to es ...@@ -42,7 +42,7 @@ The TDengine server running in the container uses the container's hostname to es
docker run -d --name tdengine --network host tdengine/tdengine docker run -d --name tdengine --network host tdengine/tdengine
``` ```
The above command starts TDengine on the host network and uses the host's FQDN to establish a connection instead of the container's hostname. It works too, like using `systemctl` to start TDengine on the host. If the TDengine client is already installed on the host, you can access it directly with the following command. The above command starts TDengine on the host network and uses the host's FQDN to establish a connection instead of the container's hostname. It is the equivalent of using `systemctl` to start TDengine on the host. If the TDengine client is already installed on the host, you can access it directly with the following command.
```shell ```shell
$ taos $ taos
...@@ -382,7 +382,7 @@ password: taosdata ...@@ -382,7 +382,7 @@ password: taosdata
Suppose you want to deploy multiple taosAdapters to improve throughput and provide high availability. In that case, the recommended configuration method uses a reverse proxy such as Nginx to offer a unified access entry. For specific configuration methods, please refer to the official documentation of Nginx. Here is an example: Suppose you want to deploy multiple taosAdapters to improve throughput and provide high availability. In that case, the recommended configuration method uses a reverse proxy such as Nginx to offer a unified access entry. For specific configuration methods, please refer to the official documentation of Nginx. Here is an example:
```docker ```docker
ersion: "3" version: "3"
networks: networks:
inter: inter:
......
...@@ -78,7 +78,7 @@ taos --dump-config ...@@ -78,7 +78,7 @@ taos --dump-config
| Note | REST service is provided by `taosd` before 2.4.0.0 but by `taosAdapter` after 2.4.0.0, the default port of REST service is 6041 | | Note | REST service is provided by `taosd` before 2.4.0.0 but by `taosAdapter` after 2.4.0.0, the default port of REST service is 6041 |
:::note :::note
TDengine uses continuous 13 ports, both TCP and UDP, from the port specified by `serverPort`. These ports need to be kept open if firewall is enabled. Below table describes the ports used by TDengine in details. TDengine uses 13 continuous ports, both TCP and UDP, starting with the port specified by `serverPort`. You should ensure, in your firewall rules, that these ports are kept open. Below table describes the ports used by TDengine in details.
::: :::
...@@ -197,7 +197,7 @@ TDengine uses continuous 13 ports, both TCP and UDP, from the port specified by ...@@ -197,7 +197,7 @@ TDengine uses continuous 13 ports, both TCP and UDP, from the port specified by
| Default Value | TimeZone configured in the host | | Default Value | TimeZone configured in the host |
:::info :::info
To handle the data insertion and data query from multiple timezones, Unix Timestamp is used and stored TDengine. The timestamp generated from any timezones at same time is same in Unix timestamp. To make sure the time on client side can be converted to Unix timestamp correctly, the timezone must be set properly. To handle the data insertion and data query from multiple timezones, Unix Timestamp is used and stored in TDengine. The timestamp generated from any timezones at same time is same in Unix timestamp. To make sure the time on client side can be converted to Unix timestamp correctly, the timezone must be set properly.
On Linux system, TDengine clients automatically obtain timezone from the host. Alternatively, the timezone can be configured explicitly in configuration file `taos.cfg` like below. On Linux system, TDengine clients automatically obtain timezone from the host. Alternatively, the timezone can be configured explicitly in configuration file `taos.cfg` like below.
...@@ -209,7 +209,7 @@ timezone Asia/Shanghai ...@@ -209,7 +209,7 @@ timezone Asia/Shanghai
The above examples are all proper configuration for the timezone of UTC+8. On Windows system, however, `timezone Asia/Shanghai` is not supported, it must be set as `timezone UTC-8`. The above examples are all proper configuration for the timezone of UTC+8. On Windows system, however, `timezone Asia/Shanghai` is not supported, it must be set as `timezone UTC-8`.
The setting for timezone impacts the strings not in Unix timestamp, keywords or functions related to date/time, for example The setting for timezone impacts strings that are not in Unix timestamp format and keywords or functions related to date/time. For example:
```sql ```sql
SELECT count(*) FROM table_name WHERE TS<'2019-04-11 12:01:08'; SELECT count(*) FROM table_name WHERE TS<'2019-04-11 12:01:08';
...@@ -227,7 +227,7 @@ If the timezone is UTC, it's equal to ...@@ -227,7 +227,7 @@ If the timezone is UTC, it's equal to
SELECT count(*) FROM table_name WHERE TS<1554984068000; SELECT count(*) FROM table_name WHERE TS<1554984068000;
``` ```
To avoid the problems of using time strings, Unix timestamp can be used directly. Furthermore, time strings with timezone can be used in SQL statement, for example "2013-04-12T15:52:01.123+08:00" in RFC3339 format or "2013-04-12T15:52:01.123+0800" in ISO-8601 format, they are not influenced by timezone setting when converted to Unix timestamp. To avoid the problems of using time strings, Unix timestamp can be used directly. Furthermore, time strings with timezone can be used in SQL statements. For example "2013-04-12T15:52:01.123+08:00" in RFC3339 format or "2013-04-12T15:52:01.123+0800" in ISO-8601 format are not influenced by timezone setting when converted to Unix timestamp.
::: :::
...@@ -244,7 +244,7 @@ A specific type "nchar" is provided in TDengine to store non-ASCII characters su ...@@ -244,7 +244,7 @@ A specific type "nchar" is provided in TDengine to store non-ASCII characters su
The characters input on the client side are encoded using the default system encoding, which is UTF-8 on Linux, or GB18030 or GBK on some systems in Chinese, POSIX in docker, CP936 on Windows in Chinese. The encoding of the operating system in use must be set correctly so that the characters in nchar type can be converted to UCS4-LE. The characters input on the client side are encoded using the default system encoding, which is UTF-8 on Linux, or GB18030 or GBK on some systems in Chinese, POSIX in docker, CP936 on Windows in Chinese. The encoding of the operating system in use must be set correctly so that the characters in nchar type can be converted to UCS4-LE.
The locale definition standard on Linux is: <Language\>\_<Region\>.<charset\>, for example, in "zh_CN.UTF-8", "zh" means Chinese, "CN" means China mainland, "UTF-8" means charset. On Linux andMac OSX, the charset can be set by locale in the system. On Windows system another configuration parameter `charset` must be used to configure charset because the locale used on Windows is not POSIX standard. Of course, `charset` can also be used on Linux to specify the charset. The locale definition standard on Linux is: <Language\>\_<Region\>.<charset\>, for example, in "zh_CN.UTF-8", "zh" means Chinese, "CN" means China mainland, "UTF-8" means charset. On Linux and Mac OSX, the charset can be set by locale in the system. On Windows system another configuration parameter `charset` must be used to configure charset because the locale used on Windows is not POSIX standard. Of course, `charset` can also be used on Linux to specify the charset.
::: :::
...@@ -263,7 +263,7 @@ On Linux, if `charset` is not set in `taos.cfg`, when `taos` is started, the cha ...@@ -263,7 +263,7 @@ On Linux, if `charset` is not set in `taos.cfg`, when `taos` is started, the cha
locale zh_CN.UTF-8 locale zh_CN.UTF-8
``` ```
Besides, on Linux system, if the charset contained in `locale` is not consistent with that set by `charset`, the one who comes later in the configuration file is used. On a Linux system, if the charset contained in `locale` is not consistent with that set by `charset`, the later setting in the configuration file takes precedence.
```title="Effective charset is GBK" ```title="Effective charset is GBK"
locale zh_CN.UTF-8 locale zh_CN.UTF-8
...@@ -778,7 +778,7 @@ To prevent system resource from being exhausted by multiple concurrent streams, ...@@ -778,7 +778,7 @@ To prevent system resource from being exhausted by multiple concurrent streams,
## HTTP Parameters ## HTTP Parameters
:::note :::note
HTTP server had been provided by `taosd` prior to version 2.4.0.0, now is provided by `taosAdapter` after version 2.4.0.0. HTTP service was provided by `taosd` prior to version 2.4.0.0 and is provided by `taosAdapter` after version 2.4.0.0.
The parameters described in this section are only application in versions prior to 2.4.0.0. If you are using any version from 2.4.0.0, please refer to [taosAdapter](/reference/taosadapter/). The parameters described in this section are only application in versions prior to 2.4.0.0. If you are using any version from 2.4.0.0, please refer to [taosAdapter](/reference/taosadapter/).
::: :::
......
--- ---
title: Schemaless Writing title: Schemaless Writing
description: "The Schemaless write method eliminates the need to create super tables/sub tables in advance and automatically creates the storage structure corresponding to the data as it is written to the interface." description: "The Schemaless write method eliminates the need to create super tables/sub tables in advance and automatically creates the storage structure corresponding to the data, as it is written to the interface."
--- ---
In IoT applications, many data items are often collected for intelligent control, business analysis, device monitoring, etc. Due to the version upgrades of the application logic, or the hardware adjustment of the devices themselves, the data collection items may change frequently. To facilitate the data logging work in such cases, TDengine starting from version 2.2.0.0 provides a series of interfaces to the schemaless writing method, which eliminate the need to create super tables and subtables in advance by automatically creating the storage structure corresponding to the data as the data is written to the interface. And when necessary, schemaless writing will automatically add the required columns to ensure that the data written by the user is stored correctly. In IoT applications, data is collected for many purposes such as intelligent control, business analysis, device monitoring and so on. Due to changes in business or functional requirements or changes in device hardware, the application logic and even the data collected may change. To provide the flexibility needed in such cases and in a rapidly changing IoT landscape, TDengine starting from version 2.2.0.0, provides a series of interfaces for the schemaless writing method. These interfaces eliminate the need to create super tables and subtables in advance by automatically creating the storage structure corresponding to the data as the data is written to the interface. When necessary, schemaless writing will automatically add the required columns to ensure that the data written by the user is stored correctly.
The schemaless writing method creates super tables and their corresponding subtables completely indistinguishable from the super tables and subtables created directly via SQL. You can write data directly to them via SQL statements. Note that the names of tables created by schemaless writing are based on fixed mapping rules for tag values, so they are not explicitly ideographic and lack readability. The schemaless writing method creates super tables and their corresponding subtables. These are completely indistinguishable from the super tables and subtables created directly via SQL. You can write data directly to them via SQL statements. Note that the names of tables created by schemaless writing are based on fixed mapping rules for tag values, so they are not explicitly ideographic and they lack readability.
## Schemaless Writing Line Protocol ## Schemaless Writing Line Protocol
...@@ -76,8 +76,7 @@ If the subtable obtained by the parse line protocol does not exist, Schemaless c ...@@ -76,8 +76,7 @@ If the subtable obtained by the parse line protocol does not exist, Schemaless c
8. Errors encountered throughout the processing will interrupt the writing process and return an error code. 8. Errors encountered throughout the processing will interrupt the writing process and return an error code.
:::tip :::tip
All processing logic of schemaless will still follow TDengine's underlying restrictions on data structures, such as the total length of each row of data cannot exceed All processing logic of schemaless will still follow TDengine's underlying restrictions on data structures, such as the total length of each row of data cannot exceed 48k bytes. See [TAOS SQL Boundary Limits](/taos-sql/limit) for specific constraints in this area.
48k bytes. See [TAOS SQL Boundary Limits](/taos-sql/limit) for specific constraints in this area.
::: :::
## Time resolution recognition ## Time resolution recognition
...@@ -87,7 +86,7 @@ Three specified modes are supported in the schemaless writing process, as follow ...@@ -87,7 +86,7 @@ Three specified modes are supported in the schemaless writing process, as follow
| **Serial** | **Value** | **Description** | | **Serial** | **Value** | **Description** |
| -------- | ------------------- | ------------------------------- | | -------- | ------------------- | ------------------------------- |
| 1 | SML_LINE_PROTOCOL | InfluxDB Line Protocol | | 1 | SML_LINE_PROTOCOL | InfluxDB Line Protocol |
| 2 | SML_TELNET_PROTOCOL | OpenTSDB Text Line Protocol | | 2 | SML_TELNET_PROTOCOL | OpenTSDB Text Line Protocol | 2 | SML_TELNET_PROTOCOL | OpenTSDB Text Line Protocol |
| 3 | SML_JSON_PROTOCOL | JSON protocol format | | 3 | SML_JSON_PROTOCOL | JSON protocol format |
In the SML_LINE_PROTOCOL parsing mode, the user is required to specify the time resolution of the input timestamp. The available time resolutions are shown in the following table. In the SML_LINE_PROTOCOL parsing mode, the user is required to specify the time resolution of the input timestamp. The available time resolutions are shown in the following table.
...@@ -106,8 +105,11 @@ In SML_TELNET_PROTOCOL and SML_JSON_PROTOCOL modes, the time precision is determ ...@@ -106,8 +105,11 @@ In SML_TELNET_PROTOCOL and SML_JSON_PROTOCOL modes, the time precision is determ
## Data schema mapping rules ## Data schema mapping rules
This section describes how data for line protocols are mapped to data with a schema. The data measurement in each line protocol is mapped to This section describes how data for line protocols are mapped to data with a schema. The data measurement in each line protocol is mapped as follows:
The tag name in tag_set is the name of the tag in the data schema, and the name in field_set is the column's name. The following data is used as an example to illustrate the mapping rules. - The tag name in tag_set is the name of the tag in the data schema
- The name in field_set is the column's name.
The following data is used as an example to illustrate the mapping rules.
```json ```json
st,t1=3,t2=4,t3=t3 c1=3i64,c3="passit",c2=false,c4=4f64 1626006833639000000 st,t1=3,t2=4,t3=t3 c1=3i64,c3="passit",c2=false,c4=4f64 1626006833639000000
...@@ -139,7 +141,7 @@ st,t1=3,t2=4,t3=t3 c1=3i64,c5="pass" 1626006833639000000 ...@@ -139,7 +141,7 @@ st,t1=3,t2=4,t3=t3 c1=3i64,c5="pass" 1626006833639000000
st,t1=3,t2=4,t3=t3 c1=3i64,c5="passit" 1626006833640000000 st,t1=3,t2=4,t3=t3 c1=3i64,c5="passit" 1626006833640000000
``` ```
The first line of the line protocol parsing will declare column c5 is a BINARY(4) field, the second line data write will extract column c5 is still a BINARY column. Still, its width is 6, then you need to increase the width of the BINARY field to be able to accommodate the new string. The first line of the line protocol parsing will declare column c5 is a BINARY(4) field. The second line data write will parse column c5 as a BINARY column. But in the second line, c5's width is 6 so you need to increase the width of the BINARY field to be able to accommodate the new string.
```json ```json
st,t1=3,t2=4,t3=t3 c1=3i64 1626006833639000000 st,t1=3,t2=4,t3=t3 c1=3i64 1626006833639000000
......
...@@ -25,7 +25,7 @@ The default database name written by taosAdapter is `collectd`. You can also mod ...@@ -25,7 +25,7 @@ The default database name written by taosAdapter is `collectd`. You can also mod
#collectd #collectd
collectd uses a plugin mechanism to write the collected monitoring data to different data storage software in various forms. tdengine supports both direct collection plugins and write_tsdb plugins. collectd uses a plugin mechanism to write the collected monitoring data to different data storage software in various forms. tdengine supports both direct collection plugins and write_tsdb plugins.
#### is configured to receive data from the direct collection plugin #### Configure the direct collection plugin
Modify the relevant configuration items in the collectd configuration file (default location /etc/collectd/collectd.conf). Modify the relevant configuration items in the collectd configuration file (default location /etc/collectd/collectd.conf).
...@@ -62,7 +62,7 @@ LoadPlugin write_tsdb ...@@ -62,7 +62,7 @@ LoadPlugin write_tsdb
</Plugin> </Plugin>
``` ```
Where <taosAdapter's host\> fills in the server's domain name or IP address running taosAdapter. <port for collectd write_tsdb plugin\> Fill in the data that taosAdapter uses to receive the collectd write_tsdb plugin (default is 6047). Where <taosAdapter's host\> is the domain name or IP address of the server running taosAdapter. <port for collectd write_tsdb plugin\> Fill in the data that taosAdapter uses to receive the collectd write_tsdb plugin (default is 6047).
```text ```text
LoadPlugin write_tsdb LoadPlugin write_tsdb
......
...@@ -17,7 +17,7 @@ password = "taosdata" ...@@ -17,7 +17,7 @@ password = "taosdata"
... ...
``` ```
The taosAdapter writes to the database with the default name `tcollector`. You can also modify the taosAdapter configuration file dbs entry to specify a different name. user and password fill in the actual TDengine configuration values. After changing the configuration file, you need to restart the taosAdapter. The taosAdapter writes to the database with the default name `tcollector`. You can also modify the taosAdapter configuration file dbs entry to specify a different name. Fill in the actual user and password for TDengine. After changing the configuration file, you need to restart the taosAdapter.
- You can also enable taosAdapter to receive tcollector data by using the taosAdapter command-line parameters or setting environment variables. - You can also enable taosAdapter to receive tcollector data by using the taosAdapter command-line parameters or setting environment variables.
...@@ -25,7 +25,7 @@ The taosAdapter writes to the database with the default name `tcollector`. You c ...@@ -25,7 +25,7 @@ The taosAdapter writes to the database with the default name `tcollector`. You c
To use TCollector, you need to download its [source code](https://github.com/OpenTSDB/tcollector). Its configuration items are in its source code. Note: TCollector differs significantly from version to version, so here is an example of the latest code for the current master branch (git commit: 37ae920). To use TCollector, you need to download its [source code](https://github.com/OpenTSDB/tcollector). Its configuration items are in its source code. Note: TCollector differs significantly from version to version, so here is an example of the latest code for the current master branch (git commit: 37ae920).
Modify the contents of the `collectors/etc/config.py` and `tcollector.py` files. Change the address of the OpenTSDB host to the domain name or IP address of the server where taosAdapter is deployed, and change the port to the port that taosAdapter supports TCollector on (default is 6049). Modify the contents of the `collectors/etc/config.py` and `tcollector.py` files. Change the address of the OpenTSDB host to the domain name or IP address of the server where taosAdapter is deployed, and change the port to the port on which taosAdapter supports TCollector (default is 6049).
Example of git diff output of source code changes. Example of git diff output of source code changes.
......
...@@ -3,13 +3,13 @@ sidebar_label: Grafana ...@@ -3,13 +3,13 @@ sidebar_label: Grafana
title: Grafana title: Grafana
--- ---
TDengine can be quickly integrated with the open-source data visualization system [Grafana](https://www.grafana.com/) to build a data monitoring and alerting system. The whole process does not require any code development. And you can visualize the contents of the data tables in TDengine on a DashBoard. TDengine can be quickly integrated with the open-source data visualization system [Grafana](https://www.grafana.com/) to build a data monitoring and alerting system. The whole process does not require any code development. And you can visualize the contents of the data tables in TDengine on a dashboard.
You can learn more about using the TDengine plugin on [GitHub](https://github.com/taosdata/grafanaplugin/blob/master/README.md). You can learn more about using the TDengine plugin on [GitHub](https://github.com/taosdata/grafanaplugin/blob/master/README.md).
## Prerequisites ## Prerequisites
In order for Grafana to add the TDengine data source successfully, the following preparations are required: In order for Grafana to add the TDengine data source successfully, the following preparation is required:
1. The TDengine cluster is deployed and functioning properly 1. The TDengine cluster is deployed and functioning properly
2. taosAdapter is installed and running properly. Please refer to the taosAdapter manual for details. 2. taosAdapter is installed and running properly. Please refer to the taosAdapter manual for details.
...@@ -36,7 +36,7 @@ GF_VERSION=3.1.4 ...@@ -36,7 +36,7 @@ GF_VERSION=3.1.4
wget https://github.com/taosdata/grafanaplugin/releases/download/v$GF_VERSION/tdengine-datasource-$GF_VERSION.zip wget https://github.com/taosdata/grafanaplugin/releases/download/v$GF_VERSION/tdengine-datasource-$GF_VERSION.zip
``` ```
Take CentOS 7.2 for example, extract the plugin package to /var/lib/grafana/plugins directory, and restart grafana. In CentOS 7.2 for example, extract the plugin package to /var/lib/grafana/plugins directory, and restart grafana.
```bash ```bash
sudo unzip tdengine-datasource-$GF_VERSION.zip -d /var/lib/grafana/plugins/ sudo unzip tdengine-datasource-$GF_VERSION.zip -d /var/lib/grafana/plugins/
...@@ -76,13 +76,13 @@ Enter the datasource configuration page, and follow the default prompts to modif ...@@ -76,13 +76,13 @@ Enter the datasource configuration page, and follow the default prompts to modif
- User: TDengine user name. - User: TDengine user name.
- Password: TDengine user password. - Password: TDengine user password.
Click `Save & Test` to test. Follows are a success. Click `Save & Test` to test. You should see a success message if the test worked.
![TDengine Database TDinsight plugin add database 4](./grafana/add_datasource4.webp) ![TDengine Database TDinsight plugin add database 4](./grafana/add_datasource4.webp)
### Create Dashboard ### Create Dashboard
Go back to the main interface to create the Dashboard, click Add Query to enter the panel query page: Go back to the main interface to create a dashboard and click Add Query to enter the panel query page:
![TDengine Database TDinsight plugin create dashboard 1](./grafana/create_dashboard1.webp) ![TDengine Database TDinsight plugin create dashboard 1](./grafana/create_dashboard1.webp)
......
...@@ -5,7 +5,7 @@ title: Telegraf writing ...@@ -5,7 +5,7 @@ title: Telegraf writing
import Telegraf from "../14-reference/_telegraf.mdx" import Telegraf from "../14-reference/_telegraf.mdx"
Telegraf is a viral metrics collection open-source software. Telegraf can collect the operation information of various components without writing any scripts to collect regularly, reducing the difficulty of data acquisition. Telegraf is a viral, open-source, metrics collection software. Telegraf can collect the operation information of various components without having to write any scripts to collect regularly, reducing the difficulty of data acquisition.
Telegraf's data can be written to TDengine by simply adding the output configuration of Telegraf to the URL corresponding to taosAdapter and modifying several configuration items. The presence of Telegraf data in TDengine can take advantage of TDengine's efficient storage query performance and clustering capabilities for time-series data. Telegraf's data can be written to TDengine by simply adding the output configuration of Telegraf to the URL corresponding to taosAdapter and modifying several configuration items. The presence of Telegraf data in TDengine can take advantage of TDengine's efficient storage query performance and clustering capabilities for time-series data.
......
...@@ -6,7 +6,7 @@ title: collectd writing ...@@ -6,7 +6,7 @@ title: collectd writing
import CollectD from "../14-reference/_collectd.mdx" import CollectD from "../14-reference/_collectd.mdx"
collectd is a daemon used to collect system performance metric data. collectd provides various storage mechanisms to store different values. It periodically counts system performance statistics number while the system is running and storing information. You can use this information to help identify current system performance bottlenecks and predict future system load. collectd is a daemon used to collect system performance metric data. collectd provides various storage mechanisms to store different values. It periodically counts system performance statistics while the system is running and storing information. You can use this information to help identify current system performance bottlenecks and predict future system load.
You can write the data collected by collectd to TDengine by simply modifying the configuration of collectd to the domain name (or IP address) and corresponding port of the server running taosAdapter. It can take full advantage of TDengine's efficient storage query performance and clustering capability for time-series data. You can write the data collected by collectd to TDengine by simply modifying the configuration of collectd to the domain name (or IP address) and corresponding port of the server running taosAdapter. It can take full advantage of TDengine's efficient storage query performance and clustering capability for time-series data.
......
...@@ -7,7 +7,7 @@ import StatsD from "../14-reference/_statsd.mdx" ...@@ -7,7 +7,7 @@ import StatsD from "../14-reference/_statsd.mdx"
StatsD is a simple daemon for aggregating application metrics, which has evolved rapidly in recent years into a unified protocol for collecting application performance metrics. StatsD is a simple daemon for aggregating application metrics, which has evolved rapidly in recent years into a unified protocol for collecting application performance metrics.
You can write StatsD data to TDengine by simply modifying in the configuration file of StatsD with the domain name (or IP address) of the server running taosAdapter and the corresponding port. It can take full advantage of TDengine's efficient storage query performance and clustering capabilities for time-series data. You can write StatsD data to TDengine by simply modifying the configuration file of StatsD with the domain name (or IP address) of the server running taosAdapter and the corresponding port. It can take full advantage of TDengine's efficient storage query performance and clustering capabilities for time-series data.
## Prerequisites ## Prerequisites
......
...@@ -5,7 +5,7 @@ title: icinga2 writing ...@@ -5,7 +5,7 @@ title: icinga2 writing
import Icinga2 from "../14-reference/_icinga2.mdx" import Icinga2 from "../14-reference/_icinga2.mdx"
icinga2 is an open-source software monitoring host and network initially developed from the Nagios network monitoring application. Currently, icinga2 is distributed under the GNU GPL v2 license. icinga2 is an open-source, host and network monitoring software initially developed from the Nagios network monitoring application. Currently, icinga2 is distributed under the GNU GPL v2 license.
You can write the data collected by icinga2 to TDengine by simply modifying the icinga2 configuration to point to the taosAdapter server and the corresponding port, taking advantage of TDengine's efficient storage and query performance and clustering capabilities for time-series data. You can write the data collected by icinga2 to TDengine by simply modifying the icinga2 configuration to point to the taosAdapter server and the corresponding port, taking advantage of TDengine's efficient storage and query performance and clustering capabilities for time-series data.
......
...@@ -3,7 +3,7 @@ sidebar_label: EMQX Broker ...@@ -3,7 +3,7 @@ sidebar_label: EMQX Broker
title: EMQX Broker writing title: EMQX Broker writing
--- ---
MQTT is a popular IoT data transfer protocol, [EMQX](https://github.com/emqx/emqx) is an open-source MQTT Broker software, you can write MQTT data directly to TDengine without any code, you only need to use "rules" in EMQX Dashboard to create a simple configuration. EMQX supports saving data to TDengine by sending it to web services and provides a native TDengine driver for direct saving in the Enterprise Edition. Please refer to the [EMQX official documentation](https://www.emqx.io/docs/en/v4.4/rule/rule-engine.html) for details on how to use it.). MQTT is a popular IoT data transfer protocol. [EMQX](https://github.com/emqx/emqx) is an open-source MQTT Broker software. You can write MQTT data directly to TDengine without any code. You only need to setup "rules" in EMQX Dashboard to create a simple configuration. EMQX supports saving data to TDengine by sending data to a web service and provides a native TDengine driver for direct saving in the Enterprise Edition. Please refer to the [EMQX official documentation](https://www.emqx.io/docs/en/v4.4/rule/rule-engine.html) for details on how to use it.).
## Prerequisites ## Prerequisites
......
此差异已折叠。
...@@ -5,16 +5,16 @@ title: Quickly Build IT DevOps Visualization System with TDengine + Telegraf + G ...@@ -5,16 +5,16 @@ title: Quickly Build IT DevOps Visualization System with TDengine + Telegraf + G
## Background ## Background
TDengine is a big data platform designed and optimized for IoT (Internet of Things), Vehicle Telematics, Industrial Internet, IT DevOps, etc. by TAOSData. Since it opened its source code in July 2019, it has won the favor of a large number of time-series data developers with its innovative data modeling design, convenient installation, easy-to-use programming interface, and powerful data writing and query performance. TDengine is a big data platform designed and optimized for IoT (Internet of Things), Vehicle Telemetry, Industrial Internet, IT DevOps and other applications. Since it was open-sourced in July 2019, it has won the favor of a large number of time-series data developers with its innovative data modeling design, convenient installation, easy-to-use programming interface, and powerful data writing and query performance.
IT DevOps metric data usually are time sensitive, for example: IT DevOps metric data usually are time sensitive, for example:
- System resource metrics: CPU, memory, IO, bandwidth, etc. - System resource metrics: CPU, memory, IO, bandwidth, etc.
- Software system metrics: health status, number of connections, number of requests, number of timeouts, number of errors, response time, service type, and other business-related metrics. - Software system metrics: health status, number of connections, number of requests, number of timeouts, number of errors, response time, service type, and other business-related metrics.
Current mainstream IT DevOps system usually include a data collection module, a data persistent module, and a visualization module; Telegraf and Grafana are one of the most popular data collection modules and visualization modules, respectively. The data persistent module is available in a wide range of options, with OpenTSDB or InfluxDB being the most popular. TDengine, as an emerging time-series big data platform, has the advantages of high performance, high reliability, easy management and easy maintenance. Current mainstream IT DevOps system usually include a data collection module, a data persistent module, and a visualization module; Telegraf and Grafana are one of the most popular data collection modules and visualization modules, respectively. The data persistence module is available in a wide range of options, with OpenTSDB or InfluxDB being the most popular. TDengine, as an emerging time-series big data platform, has the advantages of high performance, high reliability, easy management and easy maintenance.
This article introduces how to quickly build a TDengine + Telegraf + Grafana based IT DevOps visualization system without writing even a single line of code and by simply modifying a few lines of configuration files. The architecture is as follows. This article introduces how to quickly build a TDengine + Telegraf + Grafana based IT DevOps visualization system without writing even a single line of code and by simply modifying a few lines in configuration files. The architecture is as follows.
![TDengine Database IT-DevOps-Solutions-Telegraf](./IT-DevOps-Solutions-Telegraf.webp) ![TDengine Database IT-DevOps-Solutions-Telegraf](./IT-DevOps-Solutions-Telegraf.webp)
...@@ -79,5 +79,5 @@ Click on the plus icon on the left and select `Import` to get the data from `htt ...@@ -79,5 +79,5 @@ Click on the plus icon on the left and select `Import` to get the data from `htt
## Wrap-up ## Wrap-up
The above demonstrates how to quickly build a IT DevOps visualization system. Thanks to the new schemaless protocol parsing feature in TDengine version 2.4.0.0 and the powerful ecological software adaptation capability, users can build an efficient and easy-to-use IT DevOps visualization system in just a few minutes. The above demonstrates how to quickly build a IT DevOps visualization system. Thanks to the new schemaless protocol parsing feature in TDengine version 2.4.0.0 and ability to integrate easily with a large software ecosystem, users can build an efficient and easy-to-use IT DevOps visualization system in just a few minutes.
Please refer to the official documentation and product implementation cases for other features. Please refer to the official documentation and product implementation cases for other features.
...@@ -5,17 +5,17 @@ title: Quickly build an IT DevOps visualization system using TDengine + collectd ...@@ -5,17 +5,17 @@ title: Quickly build an IT DevOps visualization system using TDengine + collectd
## Background ## Background
TDengine is a big data platform designed and optimized for IoT (Internet of Things), Vehicle Telematics, Industrial Internet, IT DevOps, etc. by TAOSData. Since it opened its source code in July 2019, it has won the favor of a large number of time-series data developers with its innovative data modeling design, convenient installation, easy-to-use programming interface, and powerful data writing and query performance. TDengine is a big data platform designed and optimized for IoT (Internet of Things), Vehicle Telemetry, Industrial Internet, IT DevOps and other applications. Since it was open-sourced in July 2019, it has won the favor of a large number of time-series data developers with its innovative data modeling design, convenient installation, easy-to-use programming interface, and powerful data writing and query performance.
IT DevOps metric data usually are time sensitive, for example: IT DevOps metric data usually are time sensitive, for example:
- System resource metrics: CPU, memory, IO, bandwidth, etc. - System resource metrics: CPU, memory, IO, bandwidth, etc.
- Software system metrics: health status, number of connections, number of requests, number of timeouts, number of errors, response time, service type, and other business-related metrics. - Software system metrics: health status, number of connections, number of requests, number of timeouts, number of errors, response time, service type, and other business-related metrics.
The current mainstream IT DevOps visualization system usually contains a data collection module, a data persistent module, and a visual display module. collectd/StatsD, as an old-fashion open source data collection tool, has a wide user base. However, collectd/StatsD has limited functionality, and often needs to be combined with Telegraf, Grafana, and a time-series database to build a complete monitoring system. The current mainstream IT DevOps visualization system usually contains a data collection module, a data persistence module, and a visual display module. collectd/StatsD, as an old-fashion open source data collection tool, has a wide user base. However, collectd/StatsD has limited functionality, and often needs to be combined with Telegraf, Grafana, and a time-series database to build a complete monitoring system.
The new version of TDengine supports multiple data protocols and can accept data from collectd and StatsD directly, and provides Grafana dashboard for graphical display. The new version of TDengine supports multiple data protocols and can accept data from collectd and StatsD directly, and provides Grafana dashboard for graphical display.
This article introduces how to quickly build an IT DevOps visualization system based on TDengine + collectd / StatsD + Grafana without writing even a single line of code but by simply modifying a few lines of configuration files. The architecture is shown in the following figure. This article introduces how to quickly build an IT DevOps visualization system based on TDengine + collectd / StatsD + Grafana without writing even a single line of code but by simply modifying a few lines in configuration files. The architecture is shown in the following figure.
![TDengine Database IT-DevOps-Solutions-Collectd-StatsD](./IT-DevOps-Solutions-Collectd-StatsD.webp) ![TDengine Database IT-DevOps-Solutions-Collectd-StatsD](./IT-DevOps-Solutions-Collectd-StatsD.webp)
...@@ -99,6 +99,6 @@ Download the dashboard json from `https://github.com/taosdata/grafanaplugin/blob ...@@ -99,6 +99,6 @@ Download the dashboard json from `https://github.com/taosdata/grafanaplugin/blob
## Wrap-up ## Wrap-up
TDengine, as an emerging time-series big data platform, has the advantages of high performance, high reliability, easy management and easy maintenance. Thanks to the new schemaless protocol parsing function in TDengine version 2.4.0.0 and the powerful ecological software adaptation capability, users can build an efficient and easy-to-use IT DevOps visualization system or adapt to an existing system in just a few minutes. TDengine, as an emerging time-series big data platform, has the advantages of high performance, high reliability, easy management and easy maintenance. Thanks to the new schemaless protocol parsing feature in TDengine version 2.4.0.0 and ability to integrate easily with a large software ecosystem, users can build an efficient and easy-to-use IT DevOps visualization system, or adapt an existing system, in just a few minutes.
For TDengine's powerful data writing and querying performance and other features, please refer to the official documentation and successful product implementation cases. For TDengine's powerful data writing and querying performance and other features, please refer to the official documentation and successful product implementation cases.
...@@ -55,9 +55,9 @@ int32_t qParseSql(SParseContext* pCxt, SQuery** pQuery); ...@@ -55,9 +55,9 @@ int32_t qParseSql(SParseContext* pCxt, SQuery** pQuery);
bool qIsInsertSql(const char* pStr, size_t length); bool qIsInsertSql(const char* pStr, size_t length);
// for async mode // for async mode
int32_t qSyntaxParseSql(SParseContext* pCxt, SQuery** pQuery, struct SCatalogReq* pCatalogReq); int32_t qParseSqlSyntax(SParseContext* pCxt, SQuery** pQuery, struct SCatalogReq* pCatalogReq);
int32_t qSemanticAnalysisSql(SParseContext* pCxt, const struct SCatalogReq* pCatalogReq, int32_t qAnalyseSqlSemantic(SParseContext* pCxt, const struct SCatalogReq* pCatalogReq,
const struct SMetaData* pMetaData, SQuery* pQuery); const struct SMetaData* pMetaData, SQuery* pQuery);
void qDestroyQuery(SQuery* pQueryNode); void qDestroyQuery(SQuery* pQueryNode);
......
...@@ -368,7 +368,11 @@ typedef enum ELogicConditionType { ...@@ -368,7 +368,11 @@ typedef enum ELogicConditionType {
#define PRIMARYKEY_TIMESTAMP_COL_ID 1 #define PRIMARYKEY_TIMESTAMP_COL_ID 1
#define COL_REACH_END(colId, maxColId) ((colId) > (maxColId)) #define COL_REACH_END(colId, maxColId) ((colId) > (maxColId))
#ifdef WINDOWS
#define TSDB_MAX_RPC_THREADS 4 // windows pipe only support 4 connections.
#else
#define TSDB_MAX_RPC_THREADS 5 #define TSDB_MAX_RPC_THREADS 5
#endif
#define TSDB_QUERY_TYPE_NON_TYPE 0x00u // none type #define TSDB_QUERY_TYPE_NON_TYPE 0x00u // none type
#define TSDB_QUERY_TYPE_FREE_RESOURCE 0x01u // free qhandle at vnode #define TSDB_QUERY_TYPE_FREE_RESOURCE 0x01u // free qhandle at vnode
......
...@@ -605,6 +605,10 @@ static int32_t tdAppendKvRowToDataCol(STSRow *pRow, STSchema *pSchema, SDataCols ...@@ -605,6 +605,10 @@ static int32_t tdAppendKvRowToDataCol(STSRow *pRow, STSchema *pSchema, SDataCols
* @param pCols * @param pCols
*/ */
int32_t tdAppendSTSRowToDataCol(STSRow *pRow, STSchema *pSchema, SDataCols *pCols, bool isMerge) { int32_t tdAppendSTSRowToDataCol(STSRow *pRow, STSchema *pSchema, SDataCols *pCols, bool isMerge) {
#ifdef TD_DEBUG_PRINT_TSDB_LOAD_DCOLS
printf("%s:%d ts: %" PRIi64 " sver:%d maxCols:%" PRIi16 " nCols:%" PRIi16 ", nRows:%d\n", __func__, __LINE__,
TD_ROW_KEY(pRow), TD_ROW_SVER(pRow), pCols->maxCols, pCols->numOfCols, pCols->numOfRows);
#endif
if (TD_IS_TP_ROW(pRow)) { if (TD_IS_TP_ROW(pRow)) {
return tdAppendTpRowToDataCol(pRow, pSchema, pCols, isMerge); return tdAppendTpRowToDataCol(pRow, pSchema, pCols, isMerge);
} else if (TD_IS_KV_ROW(pRow)) { } else if (TD_IS_KV_ROW(pRow)) {
......
...@@ -53,6 +53,11 @@ typedef enum { ...@@ -53,6 +53,11 @@ typedef enum {
MND_AUTH_MAX MND_AUTH_MAX
} EAuthOp; } EAuthOp;
typedef enum {
TRN_STEP_LOG = 1,
TRN_STEP_ACTION = 2,
} ETrnStep;
typedef enum { typedef enum {
TRN_STAGE_PREPARE = 0, TRN_STAGE_PREPARE = 0,
TRN_STAGE_REDO_LOG = 1, TRN_STAGE_REDO_LOG = 1,
...@@ -468,7 +473,7 @@ typedef struct { ...@@ -468,7 +473,7 @@ typedef struct {
char* ast; char* ast;
char* physicalPlan; char* physicalPlan;
SSchemaWrapper schema; SSchemaWrapper schema;
int32_t refConsumerCnt; // int32_t refConsumerCnt;
} SMqTopicObj; } SMqTopicObj;
typedef struct { typedef struct {
......
...@@ -22,6 +22,13 @@ ...@@ -22,6 +22,13 @@
extern "C" { extern "C" {
#endif #endif
typedef enum {
TRANS_START_FUNC_TEST = 1,
TRANS_STOP_FUNC_TEST = 2,
TRANS_START_FUNC_MQ_REB = 3,
TRANS_STOP_FUNC_TEST_MQ_REB = 4,
} ETrnFunc;
typedef struct { typedef struct {
SEpSet epSet; SEpSet epSet;
tmsg_t msgType; tmsg_t msgType;
...@@ -33,12 +40,17 @@ typedef struct { ...@@ -33,12 +40,17 @@ typedef struct {
void *pCont; void *pCont;
} STransAction; } STransAction;
typedef enum { typedef struct {
TEST_TRANS_START_FUNC = 1, SSdbRaw *pRaw;
TEST_TRANS_STOP_FUNC = 2, } STransLog;
MQ_REB_TRANS_START_FUNC = 3,
MQ_REB_TRANS_STOP_FUNC = 4, typedef struct {
} ETrnFuncType; ETrnStep stepType;
STransAction redoAction;
STransAction undoAction;
STransLog redoLog;
STransLog undoLog;
} STransStep;
typedef void (*TransCbFp)(SMnode *pMnode, void *param, int32_t paramLen); typedef void (*TransCbFp)(SMnode *pMnode, void *param, int32_t paramLen);
...@@ -55,7 +67,7 @@ int32_t mndTransAppendCommitlog(STrans *pTrans, SSdbRaw *pRaw); ...@@ -55,7 +67,7 @@ int32_t mndTransAppendCommitlog(STrans *pTrans, SSdbRaw *pRaw);
int32_t mndTransAppendRedoAction(STrans *pTrans, STransAction *pAction); int32_t mndTransAppendRedoAction(STrans *pTrans, STransAction *pAction);
int32_t mndTransAppendUndoAction(STrans *pTrans, STransAction *pAction); int32_t mndTransAppendUndoAction(STrans *pTrans, STransAction *pAction);
void mndTransSetRpcRsp(STrans *pTrans, void *pCont, int32_t contLen); void mndTransSetRpcRsp(STrans *pTrans, void *pCont, int32_t contLen);
void mndTransSetCb(STrans *pTrans, ETrnFuncType startFunc, ETrnFuncType stopFunc, void *param, int32_t paramLen); void mndTransSetCb(STrans *pTrans, ETrnFunc startFunc, ETrnFunc stopFunc, void *param, int32_t paramLen);
void mndTransSetDbInfo(STrans *pTrans, SDbObj *pDb); void mndTransSetDbInfo(STrans *pTrans, SDbObj *pDb);
void mndTransSetExecOneByOne(STrans *pTrans); void mndTransSetExecOneByOne(STrans *pTrans);
......
...@@ -414,6 +414,7 @@ static int32_t mndProcessSubscribeReq(SRpcMsg *pMsg) { ...@@ -414,6 +414,7 @@ static int32_t mndProcessSubscribeReq(SRpcMsg *pMsg) {
goto SUBSCRIBE_OVER; goto SUBSCRIBE_OVER;
} }
#if 0
// ref topic to prevent drop // ref topic to prevent drop
// TODO make topic complete // TODO make topic complete
SMqTopicObj topicObj = {0}; SMqTopicObj topicObj = {0};
...@@ -422,6 +423,7 @@ static int32_t mndProcessSubscribeReq(SRpcMsg *pMsg) { ...@@ -422,6 +423,7 @@ static int32_t mndProcessSubscribeReq(SRpcMsg *pMsg) {
mInfo("subscribe topic %s by consumer %ld cgroup %s, refcnt %d", pTopic->name, consumerId, cgroup, mInfo("subscribe topic %s by consumer %ld cgroup %s, refcnt %d", pTopic->name, consumerId, cgroup,
topicObj.refConsumerCnt); topicObj.refConsumerCnt);
if (mndSetTopicCommitLogs(pMnode, pTrans, &topicObj) != 0) goto SUBSCRIBE_OVER; if (mndSetTopicCommitLogs(pMnode, pTrans, &topicObj) != 0) goto SUBSCRIBE_OVER;
#endif
mndReleaseTopic(pMnode, pTopic); mndReleaseTopic(pMnode, pTopic);
} }
......
...@@ -1044,9 +1044,9 @@ static int32_t mndDropDb(SMnode *pMnode, SRpcMsg *pReq, SDbObj *pDb) { ...@@ -1044,9 +1044,9 @@ static int32_t mndDropDb(SMnode *pMnode, SRpcMsg *pReq, SDbObj *pDb) {
if (mndSetDropDbRedoLogs(pMnode, pTrans, pDb) != 0) goto _OVER; if (mndSetDropDbRedoLogs(pMnode, pTrans, pDb) != 0) goto _OVER;
if (mndSetDropDbCommitLogs(pMnode, pTrans, pDb) != 0) goto _OVER; if (mndSetDropDbCommitLogs(pMnode, pTrans, pDb) != 0) goto _OVER;
/*if (mndDropOffsetByDB(pMnode, pTrans, pDb) != 0) goto _OVER;*/ if (mndDropOffsetByDB(pMnode, pTrans, pDb) != 0) goto _OVER;
/*if (mndDropSubByDB(pMnode, pTrans, pDb) != 0) goto _OVER;*/ if (mndDropSubByDB(pMnode, pTrans, pDb) != 0) goto _OVER;
/*if (mndDropTopicByDB(pMnode, pTrans, pDb) != 0) goto _OVER;*/ if (mndDropTopicByDB(pMnode, pTrans, pDb) != 0) goto _OVER;
if (mndSetDropDbRedoActions(pMnode, pTrans, pDb) != 0) goto _OVER; if (mndSetDropDbRedoActions(pMnode, pTrans, pDb) != 0) goto _OVER;
SUserObj *pUser = mndAcquireUser(pMnode, pDb->createUser); SUserObj *pUser = mndAcquireUser(pMnode, pDb->createUser);
......
...@@ -21,6 +21,7 @@ ...@@ -21,6 +21,7 @@
#include "mndMnode.h" #include "mndMnode.h"
#include "mndShow.h" #include "mndShow.h"
#include "mndStb.h" #include "mndStb.h"
#include "mndTopic.h"
#include "mndTrans.h" #include "mndTrans.h"
#include "mndUser.h" #include "mndUser.h"
#include "mndVgroup.h" #include "mndVgroup.h"
...@@ -188,7 +189,15 @@ static int32_t mndProcessCommitOffsetReq(SRpcMsg *pMsg) { ...@@ -188,7 +189,15 @@ static int32_t mndProcessCommitOffsetReq(SRpcMsg *pMsg) {
bool create = false; bool create = false;
SMqOffsetObj *pOffsetObj = mndAcquireOffset(pMnode, key); SMqOffsetObj *pOffsetObj = mndAcquireOffset(pMnode, key);
if (pOffsetObj == NULL) { if (pOffsetObj == NULL) {
SMqTopicObj *pTopic = mndAcquireTopic(pMnode, pOffset->topicName);
if (pTopic == NULL) {
terrno = TSDB_CODE_MND_TOPIC_NOT_EXIST;
mError("submit offset to topic %s failed since %s", pOffset->topicName, terrstr());
continue;
}
pOffsetObj = taosMemoryMalloc(sizeof(SMqOffsetObj)); pOffsetObj = taosMemoryMalloc(sizeof(SMqOffsetObj));
pOffsetObj->dbUid = pTopic->dbUid;
mndReleaseTopic(pMnode, pTopic);
memcpy(pOffsetObj->key, key, TSDB_PARTITION_KEY_LEN); memcpy(pOffsetObj->key, key, TSDB_PARTITION_KEY_LEN);
create = true; create = true;
} }
......
...@@ -286,7 +286,7 @@ int32_t mndScheduleStream(SMnode* pMnode, STrans* pTrans, SStreamObj* pStream) { ...@@ -286,7 +286,7 @@ int32_t mndScheduleStream(SMnode* pMnode, STrans* pTrans, SStreamObj* pStream) {
pStream->tasks = taosArrayInit(totLevel, sizeof(void*)); pStream->tasks = taosArrayInit(totLevel, sizeof(void*));
bool hasExtraSink = false; bool hasExtraSink = false;
if (totLevel == 2) { if (totLevel == 2 || strcmp(pStream->sourceDb, pStream->targetDb) != 0) {
SArray* taskOneLevel = taosArrayInit(0, sizeof(void*)); SArray* taskOneLevel = taosArrayInit(0, sizeof(void*));
taosArrayPush(pStream->tasks, &taskOneLevel); taosArrayPush(pStream->tasks, &taskOneLevel);
// add extra sink // add extra sink
...@@ -407,7 +407,7 @@ int32_t mndScheduleStream(SMnode* pMnode, STrans* pTrans, SStreamObj* pStream) { ...@@ -407,7 +407,7 @@ int32_t mndScheduleStream(SMnode* pMnode, STrans* pTrans, SStreamObj* pStream) {
/*pTask->dispatchMsgType = TDMT_VND_TASK_WRITE_EXEC;*/ /*pTask->dispatchMsgType = TDMT_VND_TASK_WRITE_EXEC;*/
pTask->dispatchMsgType = TDMT_VND_TASK_DISPATCH; pTask->dispatchMsgType = TDMT_VND_TASK_DISPATCH;
SDbObj* pDb = mndAcquireDb(pMnode, pStream->sourceDb); SDbObj* pDb = mndAcquireDb(pMnode, pStream->targetDb);
ASSERT(pDb); ASSERT(pDb);
if (mndExtractDbInfo(pMnode, pDb, &pTask->shuffleDispatcher.dbInfo, NULL) < 0) { if (mndExtractDbInfo(pMnode, pDb, &pTask->shuffleDispatcher.dbInfo, NULL) < 0) {
sdbRelease(pSdb, pDb); sdbRelease(pSdb, pDb);
......
...@@ -393,6 +393,15 @@ static int32_t mndCreateStream(SMnode *pMnode, SRpcMsg *pReq, SCMCreateStreamReq ...@@ -393,6 +393,15 @@ static int32_t mndCreateStream(SMnode *pMnode, SRpcMsg *pReq, SCMCreateStreamReq
streamObj.trigger = pCreate->triggerType; streamObj.trigger = pCreate->triggerType;
streamObj.waterMark = pCreate->watermark; streamObj.waterMark = pCreate->watermark;
if (streamObj.targetSTbName[0]) {
pDb = mndAcquireDbByStb(pMnode, streamObj.targetSTbName);
if (pDb == NULL) {
terrno = TSDB_CODE_MND_DB_NOT_SELECTED;
return -1;
}
tstrncpy(streamObj.targetDb, pDb->name, TSDB_DB_FNAME_LEN);
}
STrans *pTrans = mndTransCreate(pMnode, TRN_POLICY_ROLLBACK, TRN_TYPE_CREATE_STREAM, pReq); STrans *pTrans = mndTransCreate(pMnode, TRN_POLICY_ROLLBACK, TRN_TYPE_CREATE_STREAM, pReq);
if (pTrans == NULL) { if (pTrans == NULL) {
mError("stream:%s, failed to create since %s", pCreate->name, terrstr()); mError("stream:%s, failed to create since %s", pCreate->name, terrstr());
......
...@@ -157,6 +157,7 @@ static int32_t mndPersistSubChangeVgReq(SMnode *pMnode, STrans *pTrans, const SM ...@@ -157,6 +157,7 @@ static int32_t mndPersistSubChangeVgReq(SMnode *pMnode, STrans *pTrans, const SM
int32_t vgId = pRebVg->pVgEp->vgId; int32_t vgId = pRebVg->pVgEp->vgId;
SVgObj *pVgObj = mndAcquireVgroup(pMnode, vgId); SVgObj *pVgObj = mndAcquireVgroup(pMnode, vgId);
if (pVgObj == NULL) { if (pVgObj == NULL) {
ASSERT(0);
taosMemoryFree(buf); taosMemoryFree(buf);
return -1; return -1;
} }
...@@ -451,6 +452,7 @@ static int32_t mndPersistRebResult(SMnode *pMnode, SRpcMsg *pMsg, const SMqRebOu ...@@ -451,6 +452,7 @@ static int32_t mndPersistRebResult(SMnode *pMnode, SRpcMsg *pMsg, const SMqRebOu
taosArrayPush(pConsumerNew->rebNewTopics, &topic); taosArrayPush(pConsumerNew->rebNewTopics, &topic);
mndReleaseConsumer(pMnode, pConsumerOld); mndReleaseConsumer(pMnode, pConsumerOld);
if (mndSetConsumerCommitLogs(pMnode, pTrans, pConsumerNew) != 0) { if (mndSetConsumerCommitLogs(pMnode, pTrans, pConsumerNew) != 0) {
ASSERT(0);
goto REB_FAIL; goto REB_FAIL;
} }
} }
...@@ -469,9 +471,11 @@ static int32_t mndPersistRebResult(SMnode *pMnode, SRpcMsg *pMsg, const SMqRebOu ...@@ -469,9 +471,11 @@ static int32_t mndPersistRebResult(SMnode *pMnode, SRpcMsg *pMsg, const SMqRebOu
taosArrayPush(pConsumerNew->rebRemovedTopics, &topic); taosArrayPush(pConsumerNew->rebRemovedTopics, &topic);
mndReleaseConsumer(pMnode, pConsumerOld); mndReleaseConsumer(pMnode, pConsumerOld);
if (mndSetConsumerCommitLogs(pMnode, pTrans, pConsumerNew) != 0) { if (mndSetConsumerCommitLogs(pMnode, pTrans, pConsumerNew) != 0) {
ASSERT(0);
goto REB_FAIL; goto REB_FAIL;
} }
} }
#if 0
if (consumerNum) { if (consumerNum) {
char topic[TSDB_TOPIC_FNAME_LEN]; char topic[TSDB_TOPIC_FNAME_LEN];
char cgroup[TSDB_CGROUP_LEN]; char cgroup[TSDB_CGROUP_LEN];
...@@ -486,17 +490,24 @@ static int32_t mndPersistRebResult(SMnode *pMnode, SRpcMsg *pMsg, const SMqRebOu ...@@ -486,17 +490,24 @@ static int32_t mndPersistRebResult(SMnode *pMnode, SRpcMsg *pMsg, const SMqRebOu
pTopic->refConsumerCnt = topicObj.refConsumerCnt; pTopic->refConsumerCnt = topicObj.refConsumerCnt;
mInfo("subscribe topic %s unref %d consumer cgroup %s, refcnt %d", pTopic->name, consumerNum, cgroup, mInfo("subscribe topic %s unref %d consumer cgroup %s, refcnt %d", pTopic->name, consumerNum, cgroup,
topicObj.refConsumerCnt); topicObj.refConsumerCnt);
if (mndSetTopicCommitLogs(pMnode, pTrans, &topicObj) != 0) goto REB_FAIL; if (mndSetTopicCommitLogs(pMnode, pTrans, &topicObj) != 0) {
ASSERT(0);
goto REB_FAIL;
}
} }
} }
#endif
// 4. TODO commit log: modification log // 4. TODO commit log: modification log
// 5. set cb // 5. set cb
mndTransSetCb(pTrans, MQ_REB_TRANS_START_FUNC, MQ_REB_TRANS_STOP_FUNC, NULL, 0); mndTransSetCb(pTrans, TRANS_START_FUNC_MQ_REB, TRANS_STOP_FUNC_TEST_MQ_REB, NULL, 0);
// 6. execution // 6. execution
if (mndTransPrepare(pMnode, pTrans) != 0) goto REB_FAIL; if (mndTransPrepare(pMnode, pTrans) != 0) {
ASSERT(0);
goto REB_FAIL;
}
mndTransDrop(pTrans); mndTransDrop(pTrans);
return 0; return 0;
......
...@@ -15,6 +15,7 @@ ...@@ -15,6 +15,7 @@
#include "mndTopic.h" #include "mndTopic.h"
#include "mndAuth.h" #include "mndAuth.h"
#include "mndConsumer.h"
#include "mndDb.h" #include "mndDb.h"
#include "mndDnode.h" #include "mndDnode.h"
#include "mndMnode.h" #include "mndMnode.h"
...@@ -121,7 +122,7 @@ SSdbRaw *mndTopicActionEncode(SMqTopicObj *pTopic) { ...@@ -121,7 +122,7 @@ SSdbRaw *mndTopicActionEncode(SMqTopicObj *pTopic) {
SDB_SET_BINARY(pRaw, dataPos, swBuf, schemaLen, TOPIC_ENCODE_OVER); SDB_SET_BINARY(pRaw, dataPos, swBuf, schemaLen, TOPIC_ENCODE_OVER);
} }
SDB_SET_INT32(pRaw, dataPos, pTopic->refConsumerCnt, TOPIC_ENCODE_OVER); /*SDB_SET_INT32(pRaw, dataPos, pTopic->refConsumerCnt, TOPIC_ENCODE_OVER);*/
SDB_SET_RESERVE(pRaw, dataPos, MND_TOPIC_RESERVE_SIZE, TOPIC_ENCODE_OVER); SDB_SET_RESERVE(pRaw, dataPos, MND_TOPIC_RESERVE_SIZE, TOPIC_ENCODE_OVER);
SDB_SET_DATALEN(pRaw, dataPos, TOPIC_ENCODE_OVER); SDB_SET_DATALEN(pRaw, dataPos, TOPIC_ENCODE_OVER);
...@@ -221,7 +222,7 @@ SSdbRow *mndTopicActionDecode(SSdbRaw *pRaw) { ...@@ -221,7 +222,7 @@ SSdbRow *mndTopicActionDecode(SSdbRaw *pRaw) {
pTopic->schema.pSchema = NULL; pTopic->schema.pSchema = NULL;
} }
SDB_GET_INT32(pRaw, dataPos, &pTopic->refConsumerCnt, TOPIC_DECODE_OVER); /*SDB_GET_INT32(pRaw, dataPos, &pTopic->refConsumerCnt, TOPIC_DECODE_OVER);*/
SDB_GET_RESERVE(pRaw, dataPos, MND_TOPIC_RESERVE_SIZE, TOPIC_DECODE_OVER); SDB_GET_RESERVE(pRaw, dataPos, MND_TOPIC_RESERVE_SIZE, TOPIC_DECODE_OVER);
...@@ -253,7 +254,7 @@ static int32_t mndTopicActionUpdate(SSdb *pSdb, SMqTopicObj *pOldTopic, SMqTopic ...@@ -253,7 +254,7 @@ static int32_t mndTopicActionUpdate(SSdb *pSdb, SMqTopicObj *pOldTopic, SMqTopic
atomic_exchange_64(&pOldTopic->updateTime, pNewTopic->updateTime); atomic_exchange_64(&pOldTopic->updateTime, pNewTopic->updateTime);
atomic_exchange_32(&pOldTopic->version, pNewTopic->version); atomic_exchange_32(&pOldTopic->version, pNewTopic->version);
atomic_store_32(&pOldTopic->refConsumerCnt, pNewTopic->refConsumerCnt); /*atomic_store_32(&pOldTopic->refConsumerCnt, pNewTopic->refConsumerCnt);*/
/*taosWLockLatch(&pOldTopic->lock);*/ /*taosWLockLatch(&pOldTopic->lock);*/
...@@ -327,7 +328,7 @@ static int32_t mndCreateTopic(SMnode *pMnode, SRpcMsg *pReq, SCMCreateTopicReq * ...@@ -327,7 +328,7 @@ static int32_t mndCreateTopic(SMnode *pMnode, SRpcMsg *pReq, SCMCreateTopicReq *
topicObj.version = 1; topicObj.version = 1;
topicObj.sql = strdup(pCreate->sql); topicObj.sql = strdup(pCreate->sql);
topicObj.sqlLen = strlen(pCreate->sql) + 1; topicObj.sqlLen = strlen(pCreate->sql) + 1;
topicObj.refConsumerCnt = 0; /*topicObj.refConsumerCnt = 0;*/
if (pCreate->ast && pCreate->ast[0]) { if (pCreate->ast && pCreate->ast[0]) {
topicObj.ast = strdup(pCreate->ast); topicObj.ast = strdup(pCreate->ast);
...@@ -492,8 +493,8 @@ static int32_t mndDropTopic(SMnode *pMnode, STrans *pTrans, SRpcMsg *pReq, SMqTo ...@@ -492,8 +493,8 @@ static int32_t mndDropTopic(SMnode *pMnode, STrans *pTrans, SRpcMsg *pReq, SMqTo
} }
static int32_t mndProcessDropTopicReq(SRpcMsg *pReq) { static int32_t mndProcessDropTopicReq(SRpcMsg *pReq) {
SMnode *pMnode = pReq->info.node; SMnode *pMnode = pReq->info.node;
/*SSdb *pSdb = pMnode->pSdb;*/ SSdb *pSdb = pMnode->pSdb;
SMDropTopicReq dropReq = {0}; SMDropTopicReq dropReq = {0};
if (tDeserializeSMDropTopicReq(pReq->pCont, pReq->contLen, &dropReq) != 0) { if (tDeserializeSMDropTopicReq(pReq->pCont, pReq->contLen, &dropReq) != 0) {
...@@ -513,12 +514,36 @@ static int32_t mndProcessDropTopicReq(SRpcMsg *pReq) { ...@@ -513,12 +514,36 @@ static int32_t mndProcessDropTopicReq(SRpcMsg *pReq) {
} }
} }
void *pIter = NULL;
SMqConsumerObj *pConsumer;
while (1) {
pIter = sdbFetch(pSdb, SDB_CONSUMER, pIter, (void **)&pConsumer);
if (pIter == NULL) break;
if (pConsumer->status == MQ_CONSUMER_STATUS__LOST_REBD) continue;
int32_t sz = taosArrayGetSize(pConsumer->assignedTopics);
for (int32_t i = 0; i < sz; i++) {
char *name = taosArrayGetP(pConsumer->assignedTopics, i);
if (strcmp(name, pTopic->name) == 0) {
mndReleaseConsumer(pMnode, pConsumer);
mndReleaseTopic(pMnode, pTopic);
terrno = TSDB_CODE_MND_TOPIC_SUBSCRIBED;
mError("topic:%s, failed to drop since subscribed by consumer %ld from cgroup %s", dropReq.name,
pConsumer->consumerId, pConsumer->cgroup);
return -1;
}
}
sdbRelease(pSdb, pConsumer);
}
#if 0
if (pTopic->refConsumerCnt != 0) { if (pTopic->refConsumerCnt != 0) {
mndReleaseTopic(pMnode, pTopic); mndReleaseTopic(pMnode, pTopic);
terrno = TSDB_CODE_MND_TOPIC_SUBSCRIBED; terrno = TSDB_CODE_MND_TOPIC_SUBSCRIBED;
mError("topic:%s, failed to drop since %s", dropReq.name, terrstr()); mError("topic:%s, failed to drop since %s", dropReq.name, terrstr());
return -1; return -1;
} }
#endif
STrans *pTrans = mndTransCreate(pMnode, TRN_POLICY_ROLLBACK, TRN_TYPE_DROP_TOPIC, pReq); STrans *pTrans = mndTransCreate(pMnode, TRN_POLICY_ROLLBACK, TRN_TYPE_DROP_TOPIC, pReq);
if (pTrans == NULL) { if (pTrans == NULL) {
......
...@@ -464,15 +464,15 @@ static void mndTransTestStopFunc(SMnode *pMnode, void *param, int32_t paramLen) ...@@ -464,15 +464,15 @@ static void mndTransTestStopFunc(SMnode *pMnode, void *param, int32_t paramLen)
mInfo("test trans stop, param:%s, len:%d", (char *)param, paramLen); mInfo("test trans stop, param:%s, len:%d", (char *)param, paramLen);
} }
static TransCbFp mndTransGetCbFp(ETrnFuncType ftype) { static TransCbFp mndTransGetCbFp(ETrnFunc ftype) {
switch (ftype) { switch (ftype) {
case TEST_TRANS_START_FUNC: case TRANS_START_FUNC_TEST:
return mndTransTestStartFunc; return mndTransTestStartFunc;
case TEST_TRANS_STOP_FUNC: case TRANS_STOP_FUNC_TEST:
return mndTransTestStopFunc; return mndTransTestStopFunc;
case MQ_REB_TRANS_START_FUNC: case TRANS_START_FUNC_MQ_REB:
return mndRebCntInc; return mndRebCntInc;
case MQ_REB_TRANS_STOP_FUNC: case TRANS_STOP_FUNC_TEST_MQ_REB:
return mndRebCntDec; return mndRebCntDec;
default: default:
return NULL; return NULL;
...@@ -657,7 +657,7 @@ void mndTransSetRpcRsp(STrans *pTrans, void *pCont, int32_t contLen) { ...@@ -657,7 +657,7 @@ void mndTransSetRpcRsp(STrans *pTrans, void *pCont, int32_t contLen) {
pTrans->rpcRspLen = contLen; pTrans->rpcRspLen = contLen;
} }
void mndTransSetCb(STrans *pTrans, ETrnFuncType startFunc, ETrnFuncType stopFunc, void *param, int32_t paramLen) { void mndTransSetCb(STrans *pTrans, ETrnFunc startFunc, ETrnFunc stopFunc, void *param, int32_t paramLen) {
pTrans->startFunc = startFunc; pTrans->startFunc = startFunc;
pTrans->stopFunc = stopFunc; pTrans->stopFunc = stopFunc;
pTrans->param = param; pTrans->param = param;
......
...@@ -123,7 +123,7 @@ class MndTestTrans2 : public ::testing::Test { ...@@ -123,7 +123,7 @@ class MndTestTrans2 : public ::testing::Test {
sdbSetRawStatus(pUndoRaw, SDB_STATUS_DROPPED); sdbSetRawStatus(pUndoRaw, SDB_STATUS_DROPPED);
char *param = strdup("====> test log <====="); char *param = strdup("====> test log <=====");
mndTransSetCb(pTrans, TEST_TRANS_START_FUNC, TEST_TRANS_STOP_FUNC, param, strlen(param) + 1); mndTransSetCb(pTrans, TRANS_START_FUNC_TEST, TRANS_STOP_FUNC_TEST, param, strlen(param) + 1);
if (pDb != NULL) { if (pDb != NULL) {
mndTransSetDbInfo(pTrans, pDb); mndTransSetDbInfo(pTrans, pDb);
...@@ -156,7 +156,7 @@ class MndTestTrans2 : public ::testing::Test { ...@@ -156,7 +156,7 @@ class MndTestTrans2 : public ::testing::Test {
sdbSetRawStatus(pUndoRaw, SDB_STATUS_DROPPED); sdbSetRawStatus(pUndoRaw, SDB_STATUS_DROPPED);
char *param = strdup("====> test action <====="); char *param = strdup("====> test action <=====");
mndTransSetCb(pTrans, TEST_TRANS_START_FUNC, TEST_TRANS_STOP_FUNC, param, strlen(param) + 1); mndTransSetCb(pTrans, TRANS_START_FUNC_TEST, TRANS_STOP_FUNC_TEST, param, strlen(param) + 1);
{ {
STransAction action = {0}; STransAction action = {0};
...@@ -228,7 +228,7 @@ class MndTestTrans2 : public ::testing::Test { ...@@ -228,7 +228,7 @@ class MndTestTrans2 : public ::testing::Test {
sdbSetRawStatus(pUndoRaw, SDB_STATUS_DROPPED); sdbSetRawStatus(pUndoRaw, SDB_STATUS_DROPPED);
char *param = strdup("====> test log <====="); char *param = strdup("====> test log <=====");
mndTransSetCb(pTrans, TEST_TRANS_START_FUNC, TEST_TRANS_STOP_FUNC, param, strlen(param) + 1); mndTransSetCb(pTrans, TRANS_START_FUNC_TEST, TRANS_STOP_FUNC_TEST, param, strlen(param) + 1);
int32_t code = mndTransPrepare(pMnode, pTrans); int32_t code = mndTransPrepare(pMnode, pTrans);
mndTransDrop(pTrans); mndTransDrop(pTrans);
......
...@@ -79,7 +79,8 @@ struct STsdb { ...@@ -79,7 +79,8 @@ struct STsdb {
struct STable { struct STable {
uint64_t tid; uint64_t tid;
uint64_t uid; uint64_t uid;
STSchema *pSchema; STSchema *pSchema; // latest schema
STSchema *pCacheSchema; // cached cache
}; };
#define TABLE_TID(t) (t)->tid #define TABLE_TID(t) (t)->tid
...@@ -181,12 +182,15 @@ int tsdbUnlockRepo(STsdb *pTsdb); ...@@ -181,12 +182,15 @@ int tsdbUnlockRepo(STsdb *pTsdb);
static FORCE_INLINE STSchema *tsdbGetTableSchemaImpl(STsdb *pTsdb, STable *pTable, bool lock, bool copy, static FORCE_INLINE STSchema *tsdbGetTableSchemaImpl(STsdb *pTsdb, STable *pTable, bool lock, bool copy,
int32_t version) { int32_t version) {
if ((version != -1) && (schemaVersion(pTable->pSchema) != version)) { if ((version < 0) || (schemaVersion(pTable->pSchema) == version)) {
taosMemoryFreeClear(pTable->pSchema); return pTable->pSchema;
pTable->pSchema = metaGetTbTSchema(REPO_META(pTsdb), pTable->uid, version);
} }
return pTable->pSchema; if (!pTable->pCacheSchema || (schemaVersion(pTable->pCacheSchema) != version)) {
taosMemoryFreeClear(pTable->pCacheSchema);
pTable->pCacheSchema = metaGetTbTSchema(REPO_META(pTsdb), pTable->uid, version);
}
return pTable->pCacheSchema;
} }
// tsdbMemTable.h // tsdbMemTable.h
......
...@@ -300,7 +300,7 @@ STSchema *metaGetTbTSchema(SMeta *pMeta, tb_uid_t uid, int32_t sver) { ...@@ -300,7 +300,7 @@ STSchema *metaGetTbTSchema(SMeta *pMeta, tb_uid_t uid, int32_t sver) {
pSW = metaGetTableSchema(pMeta, quid, sver, 0); pSW = metaGetTableSchema(pMeta, quid, sver, 0);
if (!pSW) return NULL; if (!pSW) return NULL;
tdInitTSchemaBuilder(&sb, sver); tdInitTSchemaBuilder(&sb, pSW->version);
for (int i = 0; i < pSW->nCols; i++) { for (int i = 0; i < pSW->nCols; i++) {
pSchema = pSW->pSchema + i; pSchema = pSW->pSchema + i;
tdAddColToSchema(&sb, pSchema->type, pSchema->flags, pSchema->colId, pSchema->bytes); tdAddColToSchema(&sb, pSchema->type, pSchema->flags, pSchema->colId, pSchema->bytes);
......
...@@ -441,7 +441,7 @@ static int32_t tdExecuteRSma(SSma *pSma, const void *pMsg, int32_t inputType, tb ...@@ -441,7 +441,7 @@ static int32_t tdExecuteRSma(SSma *pSma, const void *pMsg, int32_t inputType, tb
if (inputType == STREAM_DATA_TYPE_SUBMIT_BLOCK) { if (inputType == STREAM_DATA_TYPE_SUBMIT_BLOCK) {
// TODO: use the proper schema instead of 0, and cache STSchema in cache // TODO: use the proper schema instead of 0, and cache STSchema in cache
STSchema *pTSchema = metaGetTbTSchema(SMA_META(pSma), suid, 1); STSchema *pTSchema = metaGetTbTSchema(SMA_META(pSma), suid, -1);
if (!pTSchema) { if (!pTSchema) {
terrno = TSDB_CODE_TDB_IVD_TB_SCHEMA_VERSION; terrno = TSDB_CODE_TDB_IVD_TB_SCHEMA_VERSION;
return TSDB_CODE_FAILED; return TSDB_CODE_FAILED;
......
...@@ -466,7 +466,7 @@ static int tsdbCreateCommitIters(SCommitH *pCommith) { ...@@ -466,7 +466,7 @@ static int tsdbCreateCommitIters(SCommitH *pCommith) {
pTbData = (STbData *)pNode->pData; pTbData = (STbData *)pNode->pData;
pCommitIter = pCommith->iters + i; pCommitIter = pCommith->iters + i;
pTSchema = metaGetTbTSchema(REPO_META(pRepo), pTbData->uid, -1); // TODO: schema version pTSchema = metaGetTbTSchema(REPO_META(pRepo), pTbData->uid, -1);
if (pTSchema) { if (pTSchema) {
pCommitIter->pIter = tSkipListCreateIter(pTbData->pData); pCommitIter->pIter = tSkipListCreateIter(pTbData->pData);
...@@ -475,7 +475,8 @@ static int tsdbCreateCommitIters(SCommitH *pCommith) { ...@@ -475,7 +475,8 @@ static int tsdbCreateCommitIters(SCommitH *pCommith) {
pCommitIter->pTable = (STable *)taosMemoryMalloc(sizeof(STable)); pCommitIter->pTable = (STable *)taosMemoryMalloc(sizeof(STable));
pCommitIter->pTable->uid = pTbData->uid; pCommitIter->pTable->uid = pTbData->uid;
pCommitIter->pTable->tid = pTbData->uid; pCommitIter->pTable->tid = pTbData->uid;
pCommitIter->pTable->pSchema = pTSchema; // metaGetTbTSchema(REPO_META(pRepo), pTbData->uid, 0); pCommitIter->pTable->pSchema = pTSchema;
pCommitIter->pTable->pCacheSchema = NULL;
} }
} }
tSkipListDestroyIter(pSlIter); tSkipListDestroyIter(pSlIter);
...@@ -490,6 +491,7 @@ static void tsdbDestroyCommitIters(SCommitH *pCommith) { ...@@ -490,6 +491,7 @@ static void tsdbDestroyCommitIters(SCommitH *pCommith) {
tSkipListDestroyIter(pCommith->iters[i].pIter); tSkipListDestroyIter(pCommith->iters[i].pIter);
if (pCommith->iters[i].pTable) { if (pCommith->iters[i].pTable) {
tdFreeSchema(pCommith->iters[i].pTable->pSchema); tdFreeSchema(pCommith->iters[i].pTable->pSchema);
tdFreeSchema(pCommith->iters[i].pTable->pCacheSchema);
taosMemoryFreeClear(pCommith->iters[i].pTable); taosMemoryFreeClear(pCommith->iters[i].pTable);
} }
} }
...@@ -914,7 +916,7 @@ static int tsdbMoveBlkIdx(SCommitH *pCommith, SBlockIdx *pIdx) { ...@@ -914,7 +916,7 @@ static int tsdbMoveBlkIdx(SCommitH *pCommith, SBlockIdx *pIdx) {
while (bidx < nBlocks) { while (bidx < nBlocks) {
if (!pTSchema && !tsdbCommitIsSameFile(pCommith, bidx)) { if (!pTSchema && !tsdbCommitIsSameFile(pCommith, bidx)) {
// Set commit table // Set commit table
pTSchema = metaGetTbTSchema(REPO_META(pTsdb), pIdx->uid, 1); // TODO: schema version pTSchema = metaGetTbTSchema(REPO_META(pTsdb), pIdx->uid, -1); // TODO: schema version
if (!pTSchema) { if (!pTSchema) {
terrno = TSDB_CODE_OUT_OF_MEMORY; terrno = TSDB_CODE_OUT_OF_MEMORY;
return -1; return -1;
......
...@@ -1395,7 +1395,7 @@ static int32_t handleDataMergeIfNeeded(STsdbReadHandle* pTsdbReadHandle, SBlock* ...@@ -1395,7 +1395,7 @@ static int32_t handleDataMergeIfNeeded(STsdbReadHandle* pTsdbReadHandle, SBlock*
} }
if (pTsdbReadHandle->outputCapacity >= binfo.rows) { if (pTsdbReadHandle->outputCapacity >= binfo.rows) {
ASSERT(cur->blockCompleted); ASSERT(cur->blockCompleted || cur->mixBlock);
} }
if (cur->rows == binfo.rows) { if (cur->rows == binfo.rows) {
......
...@@ -2767,7 +2767,7 @@ static SSDataBlock* concurrentlyLoadRemoteDataImpl(SOperatorInfo* pOperator, SEx ...@@ -2767,7 +2767,7 @@ static SSDataBlock* concurrentlyLoadRemoteDataImpl(SOperatorInfo* pOperator, SEx
code = setSDataBlockFromFetchRsp(pExchangeInfo->pResult, pLoadInfo, pTableRsp->numOfRows, pTableRsp->data, code = setSDataBlockFromFetchRsp(pExchangeInfo->pResult, pLoadInfo, pTableRsp->numOfRows, pTableRsp->data,
pTableRsp->compLen, pTableRsp->numOfCols, startTs, &pDataInfo->totalRows, NULL); pTableRsp->compLen, pTableRsp->numOfCols, startTs, &pDataInfo->totalRows, NULL);
if (code != 0) { if (code != 0) {
taosMemoryFreeClear(pDataInfo->pRsp); taosMemoryFreeClear(pDataInfo->pRsp);
goto _error; goto _error;
} }
...@@ -2788,7 +2788,7 @@ static SSDataBlock* concurrentlyLoadRemoteDataImpl(SOperatorInfo* pOperator, SEx ...@@ -2788,7 +2788,7 @@ static SSDataBlock* concurrentlyLoadRemoteDataImpl(SOperatorInfo* pOperator, SEx
pDataInfo->status = EX_SOURCE_DATA_NOT_READY; pDataInfo->status = EX_SOURCE_DATA_NOT_READY;
code = doSendFetchDataRequest(pExchangeInfo, pTaskInfo, i); code = doSendFetchDataRequest(pExchangeInfo, pTaskInfo, i);
if (code != TSDB_CODE_SUCCESS) { if (code != TSDB_CODE_SUCCESS) {
taosMemoryFreeClear(pDataInfo->pRsp); taosMemoryFreeClear(pDataInfo->pRsp);
goto _error; goto _error;
} }
} }
...@@ -2895,7 +2895,7 @@ static SSDataBlock* seqLoadRemoteData(SOperatorInfo* pOperator) { ...@@ -2895,7 +2895,7 @@ static SSDataBlock* seqLoadRemoteData(SOperatorInfo* pOperator) {
pDataInfo->totalRows, pLoadInfo->totalRows); pDataInfo->totalRows, pLoadInfo->totalRows);
pDataInfo->status = EX_SOURCE_DATA_EXHAUSTED; pDataInfo->status = EX_SOURCE_DATA_EXHAUSTED;
pExchangeInfo->current += 1; pExchangeInfo->current += 1;
taosMemoryFreeClear(pDataInfo->pRsp); taosMemoryFreeClear(pDataInfo->pRsp);
continue; continue;
} }
...@@ -2922,7 +2922,7 @@ static SSDataBlock* seqLoadRemoteData(SOperatorInfo* pOperator) { ...@@ -2922,7 +2922,7 @@ static SSDataBlock* seqLoadRemoteData(SOperatorInfo* pOperator) {
} }
pOperator->resultInfo.totalRows += pRes->info.rows; pOperator->resultInfo.totalRows += pRes->info.rows;
taosMemoryFreeClear(pDataInfo->pRsp); taosMemoryFreeClear(pDataInfo->pRsp);
return pExchangeInfo->pResult; return pExchangeInfo->pResult;
} }
} }
...@@ -3384,7 +3384,7 @@ int32_t getTableScanInfo(SOperatorInfo* pOperator, int32_t* order, int32_t* scan ...@@ -3384,7 +3384,7 @@ int32_t getTableScanInfo(SOperatorInfo* pOperator, int32_t* order, int32_t* scan
// todo add more information about exchange operation // todo add more information about exchange operation
int32_t type = pOperator->operatorType; int32_t type = pOperator->operatorType;
if (type == QUERY_NODE_PHYSICAL_PLAN_EXCHANGE || type == QUERY_NODE_PHYSICAL_PLAN_SYSTABLE_SCAN || if (type == QUERY_NODE_PHYSICAL_PLAN_EXCHANGE || type == QUERY_NODE_PHYSICAL_PLAN_SYSTABLE_SCAN ||
type == QUERY_NODE_PHYSICAL_PLAN_STREAM_SCAN) { type == QUERY_NODE_PHYSICAL_PLAN_STREAM_SCAN || type == QUERY_NODE_PHYSICAL_PLAN_TAG_SCAN) {
*order = TSDB_ORDER_ASC; *order = TSDB_ORDER_ASC;
*scanFlag = MAIN_SCAN; *scanFlag = MAIN_SCAN;
return TSDB_CODE_SUCCESS; return TSDB_CODE_SUCCESS;
...@@ -3499,14 +3499,15 @@ static SSDataBlock* getAggregateResult(SOperatorInfo* pOperator) { ...@@ -3499,14 +3499,15 @@ static SSDataBlock* getAggregateResult(SOperatorInfo* pOperator) {
} }
int32_t aggEncodeResultRow(SOperatorInfo* pOperator, char** result, int32_t* length) { int32_t aggEncodeResultRow(SOperatorInfo* pOperator, char** result, int32_t* length) {
if(result == NULL || length == NULL){ if (result == NULL || length == NULL) {
return TSDB_CODE_TSC_INVALID_INPUT; return TSDB_CODE_TSC_INVALID_INPUT;
} }
SOptrBasicInfo* pInfo = (SOptrBasicInfo*)(pOperator->info); SOptrBasicInfo* pInfo = (SOptrBasicInfo*)(pOperator->info);
SAggSupporter* pSup = (SAggSupporter*)POINTER_SHIFT(pOperator->info, sizeof(SOptrBasicInfo)); SAggSupporter* pSup = (SAggSupporter*)POINTER_SHIFT(pOperator->info, sizeof(SOptrBasicInfo));
int32_t size = taosHashGetSize(pSup->pResultRowHashTable); int32_t size = taosHashGetSize(pSup->pResultRowHashTable);
size_t keyLen = sizeof(uint64_t) * 2; // estimate the key length size_t keyLen = sizeof(uint64_t) * 2; // estimate the key length
int32_t totalSize = sizeof(int32_t) + sizeof(int32_t) + size * (sizeof(int32_t) + keyLen + sizeof(int32_t) + pSup->resultRowSize); int32_t totalSize =
sizeof(int32_t) + sizeof(int32_t) + size * (sizeof(int32_t) + keyLen + sizeof(int32_t) + pSup->resultRowSize);
*result = (char*)taosMemoryCalloc(1, totalSize); *result = (char*)taosMemoryCalloc(1, totalSize);
if (*result == NULL) { if (*result == NULL) {
...@@ -3568,11 +3569,11 @@ int32_t aggEncodeResultRow(SOperatorInfo* pOperator, char** result, int32_t* len ...@@ -3568,11 +3569,11 @@ int32_t aggEncodeResultRow(SOperatorInfo* pOperator, char** result, int32_t* len
} }
int32_t aggDecodeResultRow(SOperatorInfo* pOperator, char* result) { int32_t aggDecodeResultRow(SOperatorInfo* pOperator, char* result) {
if(result == NULL){ if (result == NULL) {
return TSDB_CODE_TSC_INVALID_INPUT; return TSDB_CODE_TSC_INVALID_INPUT;
} }
SOptrBasicInfo* pInfo = (SOptrBasicInfo*)(pOperator->info); SOptrBasicInfo* pInfo = (SOptrBasicInfo*)(pOperator->info);
SAggSupporter* pSup = (SAggSupporter*)POINTER_SHIFT(pOperator->info, sizeof(SOptrBasicInfo)); SAggSupporter* pSup = (SAggSupporter*)POINTER_SHIFT(pOperator->info, sizeof(SOptrBasicInfo));
// int32_t size = taosHashGetSize(pSup->pResultRowHashTable); // int32_t size = taosHashGetSize(pSup->pResultRowHashTable);
int32_t length = *(int32_t*)(result); int32_t length = *(int32_t*)(result);
...@@ -4512,8 +4513,8 @@ SOperatorInfo* createOperatorTree(SPhysiNode* pPhyNode, SExecTaskInfo* pTaskInfo ...@@ -4512,8 +4513,8 @@ SOperatorInfo* createOperatorTree(SPhysiNode* pPhyNode, SExecTaskInfo* pTaskInfo
} else if (QUERY_NODE_PHYSICAL_PLAN_STREAM_SCAN == type) { } else if (QUERY_NODE_PHYSICAL_PLAN_STREAM_SCAN == type) {
SScanPhysiNode* pScanPhyNode = (SScanPhysiNode*)pPhyNode; // simple child table. SScanPhysiNode* pScanPhyNode = (SScanPhysiNode*)pPhyNode; // simple child table.
STableScanPhysiNode* pTableScanNode = (STableScanPhysiNode*)pPhyNode; STableScanPhysiNode* pTableScanNode = (STableScanPhysiNode*)pPhyNode;
STimeWindowAggSupp twSup = {.waterMark = pTableScanNode->watermark, STimeWindowAggSupp twSup = {
.calTrigger = pTableScanNode->triggerType, .maxTs = INT64_MIN}; .waterMark = pTableScanNode->watermark, .calTrigger = pTableScanNode->triggerType, .maxTs = INT64_MIN};
tsdbReaderT pDataReader = NULL; tsdbReaderT pDataReader = NULL;
if (pHandle->vnode) { if (pHandle->vnode) {
pDataReader = doCreateDataReader(pTableScanNode, pHandle, pTableListInfo, (uint64_t)queryId, taskId, pTagCond); pDataReader = doCreateDataReader(pTableScanNode, pHandle, pTableListInfo, (uint64_t)queryId, taskId, pTagCond);
...@@ -4527,9 +4528,9 @@ SOperatorInfo* createOperatorTree(SPhysiNode* pPhyNode, SExecTaskInfo* pTaskInfo ...@@ -4527,9 +4528,9 @@ SOperatorInfo* createOperatorTree(SPhysiNode* pPhyNode, SExecTaskInfo* pTaskInfo
} else { } else {
qDebug("%s pDataReader is not NULL", GET_TASKID(pTaskInfo)); qDebug("%s pDataReader is not NULL", GET_TASKID(pTaskInfo));
} }
SArray* tableIdList = extractTableIdList(pTableListInfo); SArray* tableIdList = extractTableIdList(pTableListInfo);
SOperatorInfo* pOperator = createStreamScanOperatorInfo(pDataReader, pHandle, SOperatorInfo* pOperator = createStreamScanOperatorInfo(pDataReader, pHandle, tableIdList, pTableScanNode,
tableIdList, pTableScanNode, pTaskInfo, &twSup, pTableScanNode->tsColId); pTaskInfo, &twSup, pTableScanNode->tsColId);
taosArrayDestroy(tableIdList); taosArrayDestroy(tableIdList);
return pOperator; return pOperator;
...@@ -4996,25 +4997,25 @@ _error: ...@@ -4996,25 +4997,25 @@ _error:
return NULL; return NULL;
} }
int32_t encodeOperator(SOperatorInfo* ops, char** result, int32_t *length){ int32_t encodeOperator(SOperatorInfo* ops, char** result, int32_t* length) {
int32_t code = TDB_CODE_SUCCESS; int32_t code = TDB_CODE_SUCCESS;
char *pCurrent = NULL; char* pCurrent = NULL;
int32_t currLength = 0; int32_t currLength = 0;
if(ops->fpSet.encodeResultRow){ if (ops->fpSet.encodeResultRow) {
if(result == NULL || length == NULL){ if (result == NULL || length == NULL) {
return TSDB_CODE_TSC_INVALID_INPUT; return TSDB_CODE_TSC_INVALID_INPUT;
} }
code = ops->fpSet.encodeResultRow(ops, &pCurrent, &currLength); code = ops->fpSet.encodeResultRow(ops, &pCurrent, &currLength);
if(code != TDB_CODE_SUCCESS){ if (code != TDB_CODE_SUCCESS) {
if(*result != NULL){ if (*result != NULL) {
taosMemoryFree(*result); taosMemoryFree(*result);
*result = NULL; *result = NULL;
} }
return code; return code;
} }
if(*result == NULL){ if (*result == NULL) {
*result = (char*)taosMemoryCalloc(1, currLength + sizeof(int32_t)); *result = (char*)taosMemoryCalloc(1, currLength + sizeof(int32_t));
if (*result == NULL) { if (*result == NULL) {
taosMemoryFree(pCurrent); taosMemoryFree(pCurrent);
...@@ -5022,9 +5023,9 @@ int32_t encodeOperator(SOperatorInfo* ops, char** result, int32_t *length){ ...@@ -5022,9 +5023,9 @@ int32_t encodeOperator(SOperatorInfo* ops, char** result, int32_t *length){
} }
memcpy(*result + sizeof(int32_t), pCurrent, currLength); memcpy(*result + sizeof(int32_t), pCurrent, currLength);
*(int32_t*)(*result) = currLength + sizeof(int32_t); *(int32_t*)(*result) = currLength + sizeof(int32_t);
}else{ } else {
int32_t sizePre = *(int32_t*)(*result); int32_t sizePre = *(int32_t*)(*result);
char* tmp = (char*)taosMemoryRealloc(*result, sizePre + currLength); char* tmp = (char*)taosMemoryRealloc(*result, sizePre + currLength);
if (tmp == NULL) { if (tmp == NULL) {
taosMemoryFree(pCurrent); taosMemoryFree(pCurrent);
taosMemoryFree(*result); taosMemoryFree(*result);
...@@ -5041,33 +5042,33 @@ int32_t encodeOperator(SOperatorInfo* ops, char** result, int32_t *length){ ...@@ -5041,33 +5042,33 @@ int32_t encodeOperator(SOperatorInfo* ops, char** result, int32_t *length){
for (int32_t i = 0; i < ops->numOfDownstream; ++i) { for (int32_t i = 0; i < ops->numOfDownstream; ++i) {
code = encodeOperator(ops->pDownstream[i], result, length); code = encodeOperator(ops->pDownstream[i], result, length);
if(code != TDB_CODE_SUCCESS){ if (code != TDB_CODE_SUCCESS) {
return code; return code;
} }
} }
return TDB_CODE_SUCCESS; return TDB_CODE_SUCCESS;
} }
int32_t decodeOperator(SOperatorInfo* ops, char* result, int32_t length){ int32_t decodeOperator(SOperatorInfo* ops, char* result, int32_t length) {
int32_t code = TDB_CODE_SUCCESS; int32_t code = TDB_CODE_SUCCESS;
if(ops->fpSet.decodeResultRow){ if (ops->fpSet.decodeResultRow) {
if(result == NULL){ if (result == NULL) {
return TSDB_CODE_TSC_INVALID_INPUT; return TSDB_CODE_TSC_INVALID_INPUT;
} }
ASSERT(length == *(int32_t*)result); ASSERT(length == *(int32_t*)result);
char* data = result + sizeof(int32_t); char* data = result + sizeof(int32_t);
code = ops->fpSet.decodeResultRow(ops, data); code = ops->fpSet.decodeResultRow(ops, data);
if(code != TDB_CODE_SUCCESS){ if (code != TDB_CODE_SUCCESS) {
return code; return code;
} }
int32_t totalLength = *(int32_t*)result; int32_t totalLength = *(int32_t*)result;
int32_t dataLength = *(int32_t*)data; int32_t dataLength = *(int32_t*)data;
if(totalLength == dataLength + sizeof(int32_t)) { // the last data if (totalLength == dataLength + sizeof(int32_t)) { // the last data
result = NULL; result = NULL;
length = 0; length = 0;
}else{ } else {
result += dataLength; result += dataLength;
*(int32_t*)(result) = totalLength - dataLength; *(int32_t*)(result) = totalLength - dataLength;
length = totalLength - dataLength; length = totalLength - dataLength;
...@@ -5076,7 +5077,7 @@ int32_t decodeOperator(SOperatorInfo* ops, char* result, int32_t length){ ...@@ -5076,7 +5077,7 @@ int32_t decodeOperator(SOperatorInfo* ops, char* result, int32_t length){
for (int32_t i = 0; i < ops->numOfDownstream; ++i) { for (int32_t i = 0; i < ops->numOfDownstream; ++i) {
code = decodeOperator(ops->pDownstream[i], result, length); code = decodeOperator(ops->pDownstream[i], result, length);
if(code != TDB_CODE_SUCCESS){ if (code != TDB_CODE_SUCCESS) {
return code; return code;
} }
} }
......
...@@ -445,6 +445,11 @@ static int32_t translateStateCount(SFunctionNode* pFunc, char* pErrBuf, int32_t ...@@ -445,6 +445,11 @@ static int32_t translateStateCount(SFunctionNode* pFunc, char* pErrBuf, int32_t
} }
// param0 // param0
SNode* pParaNode0 = nodesListGetNode(pFunc->pParameterList, 0);
if (QUERY_NODE_COLUMN != nodeType(pParaNode0)) {
return buildFuncErrMsg(pErrBuf, len, TSDB_CODE_FUNC_FUNTION_ERROR,
"The input parameter of STATECOUNT function can only be column");
}
uint8_t colType = ((SExprNode*)nodesListGetNode(pFunc->pParameterList, 0))->resType.type; uint8_t colType = ((SExprNode*)nodesListGetNode(pFunc->pParameterList, 0))->resType.type;
if (!IS_NUMERIC_TYPE(colType)) { if (!IS_NUMERIC_TYPE(colType)) {
return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName); return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName);
...@@ -480,6 +485,11 @@ static int32_t translateStateDuration(SFunctionNode* pFunc, char* pErrBuf, int32 ...@@ -480,6 +485,11 @@ static int32_t translateStateDuration(SFunctionNode* pFunc, char* pErrBuf, int32
} }
// param0 // param0
SNode* pParaNode0 = nodesListGetNode(pFunc->pParameterList, 0);
if (QUERY_NODE_COLUMN != nodeType(pParaNode0)) {
return buildFuncErrMsg(pErrBuf, len, TSDB_CODE_FUNC_FUNTION_ERROR,
"The input parameter of STATEDURATION function can only be column");
}
uint8_t colType = ((SExprNode*)nodesListGetNode(pFunc->pParameterList, 0))->resType.type; uint8_t colType = ((SExprNode*)nodesListGetNode(pFunc->pParameterList, 0))->resType.type;
if (!IS_NUMERIC_TYPE(colType)) { if (!IS_NUMERIC_TYPE(colType)) {
return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName); return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName);
...@@ -693,7 +703,7 @@ static int32_t translateFirstLast(SFunctionNode* pFunc, char* pErrBuf, int32_t l ...@@ -693,7 +703,7 @@ static int32_t translateFirstLast(SFunctionNode* pFunc, char* pErrBuf, int32_t l
static int32_t translateUnique(SFunctionNode* pFunc, char* pErrBuf, int32_t len) { static int32_t translateUnique(SFunctionNode* pFunc, char* pErrBuf, int32_t len) {
if (1 != LIST_LENGTH(pFunc->pParameterList)) { if (1 != LIST_LENGTH(pFunc->pParameterList)) {
return TSDB_CODE_SUCCESS; return invaildFuncParaNumErrMsg(pErrBuf, len, pFunc->functionName);
} }
SNode* pPara = nodesListGetNode(pFunc->pParameterList, 0); SNode* pPara = nodesListGetNode(pFunc->pParameterList, 0);
...@@ -1181,7 +1191,7 @@ const SBuiltinFuncDefinition funcMgtBuiltins[] = { ...@@ -1181,7 +1191,7 @@ const SBuiltinFuncDefinition funcMgtBuiltins[] = {
.finalizeFunc = functionFinalize .finalizeFunc = functionFinalize
}, },
{ {
.name = "state_count", .name = "statecount",
.type = FUNCTION_TYPE_STATE_COUNT, .type = FUNCTION_TYPE_STATE_COUNT,
.classification = FUNC_MGT_INDEFINITE_ROWS_FUNC, .classification = FUNC_MGT_INDEFINITE_ROWS_FUNC,
.translateFunc = translateStateCount, .translateFunc = translateStateCount,
...@@ -1191,7 +1201,7 @@ const SBuiltinFuncDefinition funcMgtBuiltins[] = { ...@@ -1191,7 +1201,7 @@ const SBuiltinFuncDefinition funcMgtBuiltins[] = {
.finalizeFunc = NULL .finalizeFunc = NULL
}, },
{ {
.name = "state_duration", .name = "stateduration",
.type = FUNCTION_TYPE_STATE_DURATION, .type = FUNCTION_TYPE_STATE_DURATION,
.classification = FUNC_MGT_INDEFINITE_ROWS_FUNC | FUNC_MGT_TIMELINE_FUNC, .classification = FUNC_MGT_INDEFINITE_ROWS_FUNC | FUNC_MGT_TIMELINE_FUNC,
.translateFunc = translateStateDuration, .translateFunc = translateStateDuration,
......
...@@ -3776,6 +3776,7 @@ static void tailAssignResult(STailItem* pItem, char *data, int32_t colBytes, TSK ...@@ -3776,6 +3776,7 @@ static void tailAssignResult(STailItem* pItem, char *data, int32_t colBytes, TSK
if (isNull) { if (isNull) {
pItem->isNull = true; pItem->isNull = true;
} else { } else {
pItem->isNull = false;
memcpy(pItem->data, data, colBytes); memcpy(pItem->data, data, colBytes);
} }
} }
......
...@@ -24,6 +24,7 @@ extern "C" { ...@@ -24,6 +24,7 @@ extern "C" {
#include "parUtil.h" #include "parUtil.h"
#include "parser.h" #include "parser.h"
int32_t parseInsertSyntax(SParseContext* pContext, SQuery** pQuery);
int32_t parseInsertSql(SParseContext* pContext, SQuery** pQuery); int32_t parseInsertSql(SParseContext* pContext, SQuery** pQuery);
int32_t parse(SParseContext* pParseCxt, SQuery** pQuery); int32_t parse(SParseContext* pParseCxt, SQuery** pQuery);
int32_t collectMetaKey(SParseContext* pParseCxt, SQuery* pQuery); int32_t collectMetaKey(SParseContext* pParseCxt, SQuery* pQuery);
......
...@@ -67,12 +67,15 @@ int32_t trimString(const char* src, int32_t len, char* dst, int32_t dlen); ...@@ -67,12 +67,15 @@ int32_t trimString(const char* src, int32_t len, char* dst, int32_t dlen);
int32_t buildCatalogReq(const SParseMetaCache* pMetaCache, SCatalogReq* pCatalogReq); int32_t buildCatalogReq(const SParseMetaCache* pMetaCache, SCatalogReq* pCatalogReq);
int32_t putMetaDataToCache(const SCatalogReq* pCatalogReq, const SMetaData* pMetaData, SParseMetaCache* pMetaCache); int32_t putMetaDataToCache(const SCatalogReq* pCatalogReq, const SMetaData* pMetaData, SParseMetaCache* pMetaCache);
int32_t reserveTableMetaInCache(int32_t acctId, const char* pDb, const char* pTable, SParseMetaCache* pMetaCache); int32_t reserveTableMetaInCache(int32_t acctId, const char* pDb, const char* pTable, SParseMetaCache* pMetaCache);
int32_t reserveTableMetaInCacheExt(const SName* pName, SParseMetaCache* pMetaCache);
int32_t reserveDbVgInfoInCache(int32_t acctId, const char* pDb, SParseMetaCache* pMetaCache); int32_t reserveDbVgInfoInCache(int32_t acctId, const char* pDb, SParseMetaCache* pMetaCache);
int32_t reserveTableVgroupInCache(int32_t acctId, const char* pDb, const char* pTable, SParseMetaCache* pMetaCache); int32_t reserveTableVgroupInCache(int32_t acctId, const char* pDb, const char* pTable, SParseMetaCache* pMetaCache);
int32_t reserveTableVgroupInCacheExt(const SName* pName, SParseMetaCache* pMetaCache);
int32_t reserveDbVgVersionInCache(int32_t acctId, const char* pDb, SParseMetaCache* pMetaCache); int32_t reserveDbVgVersionInCache(int32_t acctId, const char* pDb, SParseMetaCache* pMetaCache);
int32_t reserveDbCfgInCache(int32_t acctId, const char* pDb, SParseMetaCache* pMetaCache); int32_t reserveDbCfgInCache(int32_t acctId, const char* pDb, SParseMetaCache* pMetaCache);
int32_t reserveUserAuthInCache(int32_t acctId, const char* pUser, const char* pDb, AUTH_TYPE type, int32_t reserveUserAuthInCache(int32_t acctId, const char* pUser, const char* pDb, AUTH_TYPE type,
SParseMetaCache* pMetaCache); SParseMetaCache* pMetaCache);
int32_t reserveUserAuthInCacheExt(const char* pUser, const SName* pName, AUTH_TYPE type, SParseMetaCache* pMetaCache);
int32_t reserveUdfInCache(const char* pFunc, SParseMetaCache* pMetaCache); int32_t reserveUdfInCache(const char* pFunc, SParseMetaCache* pMetaCache);
int32_t getTableMetaFromCache(SParseMetaCache* pMetaCache, const SName* pName, STableMeta** pMeta); int32_t getTableMetaFromCache(SParseMetaCache* pMetaCache, const SName* pName, STableMeta** pMeta);
int32_t getDbVgInfoFromCache(SParseMetaCache* pMetaCache, const char* pDbFName, SArray** pVgInfo); int32_t getDbVgInfoFromCache(SParseMetaCache* pMetaCache, const char* pDbFName, SArray** pVgInfo);
...@@ -80,7 +83,7 @@ int32_t getTableVgroupFromCache(SParseMetaCache* pMetaCache, const SName* pName, ...@@ -80,7 +83,7 @@ int32_t getTableVgroupFromCache(SParseMetaCache* pMetaCache, const SName* pName,
int32_t getDbVgVersionFromCache(SParseMetaCache* pMetaCache, const char* pDbFName, int32_t* pVersion, int64_t* pDbId, int32_t getDbVgVersionFromCache(SParseMetaCache* pMetaCache, const char* pDbFName, int32_t* pVersion, int64_t* pDbId,
int32_t* pTableNum); int32_t* pTableNum);
int32_t getDbCfgFromCache(SParseMetaCache* pMetaCache, const char* pDbFName, SDbCfgInfo* pInfo); int32_t getDbCfgFromCache(SParseMetaCache* pMetaCache, const char* pDbFName, SDbCfgInfo* pInfo);
int32_t getUserAuthFromCache(SParseMetaCache* pMetaCache, const char* pUser, const char* pDb, AUTH_TYPE type, int32_t getUserAuthFromCache(SParseMetaCache* pMetaCache, const char* pUser, const char* pDbFName, AUTH_TYPE type,
bool* pPass); bool* pPass);
int32_t getUdfInfoFromCache(SParseMetaCache* pMetaCache, const char* pFunc, SFuncInfo* pInfo); int32_t getUdfInfoFromCache(SParseMetaCache* pMetaCache, const char* pFunc, SFuncInfo* pInfo);
......
...@@ -333,68 +333,22 @@ static int32_t collectMetaKeyFromQuery(SCollectMetaKeyCxt* pCxt, SNode* pStmt) { ...@@ -333,68 +333,22 @@ static int32_t collectMetaKeyFromQuery(SCollectMetaKeyCxt* pCxt, SNode* pStmt) {
return collectMetaKeyFromSetOperator(pCxt, (SSetOperator*)pStmt); return collectMetaKeyFromSetOperator(pCxt, (SSetOperator*)pStmt);
case QUERY_NODE_SELECT_STMT: case QUERY_NODE_SELECT_STMT:
return collectMetaKeyFromSelect(pCxt, (SSelectStmt*)pStmt); return collectMetaKeyFromSelect(pCxt, (SSelectStmt*)pStmt);
case QUERY_NODE_VNODE_MODIF_STMT:
case QUERY_NODE_CREATE_DATABASE_STMT:
case QUERY_NODE_DROP_DATABASE_STMT:
case QUERY_NODE_ALTER_DATABASE_STMT:
break;
case QUERY_NODE_CREATE_TABLE_STMT: case QUERY_NODE_CREATE_TABLE_STMT:
return collectMetaKeyFromCreateTable(pCxt, (SCreateTableStmt*)pStmt); return collectMetaKeyFromCreateTable(pCxt, (SCreateTableStmt*)pStmt);
case QUERY_NODE_CREATE_SUBTABLE_CLAUSE:
break;
case QUERY_NODE_CREATE_MULTI_TABLE_STMT: case QUERY_NODE_CREATE_MULTI_TABLE_STMT:
return collectMetaKeyFromCreateMultiTable(pCxt, (SCreateMultiTableStmt*)pStmt); return collectMetaKeyFromCreateMultiTable(pCxt, (SCreateMultiTableStmt*)pStmt);
case QUERY_NODE_DROP_TABLE_CLAUSE:
case QUERY_NODE_DROP_TABLE_STMT:
case QUERY_NODE_DROP_SUPER_TABLE_STMT:
break;
case QUERY_NODE_ALTER_TABLE_STMT: case QUERY_NODE_ALTER_TABLE_STMT:
return collectMetaKeyFromAlterTable(pCxt, (SAlterTableStmt*)pStmt); return collectMetaKeyFromAlterTable(pCxt, (SAlterTableStmt*)pStmt);
case QUERY_NODE_CREATE_USER_STMT:
case QUERY_NODE_ALTER_USER_STMT:
case QUERY_NODE_DROP_USER_STMT:
break;
case QUERY_NODE_USE_DATABASE_STMT: case QUERY_NODE_USE_DATABASE_STMT:
return collectMetaKeyFromUseDatabase(pCxt, (SUseDatabaseStmt*)pStmt); return collectMetaKeyFromUseDatabase(pCxt, (SUseDatabaseStmt*)pStmt);
case QUERY_NODE_CREATE_DNODE_STMT:
case QUERY_NODE_DROP_DNODE_STMT:
case QUERY_NODE_ALTER_DNODE_STMT:
break;
case QUERY_NODE_CREATE_INDEX_STMT: case QUERY_NODE_CREATE_INDEX_STMT:
return collectMetaKeyFromCreateIndex(pCxt, (SCreateIndexStmt*)pStmt); return collectMetaKeyFromCreateIndex(pCxt, (SCreateIndexStmt*)pStmt);
case QUERY_NODE_DROP_INDEX_STMT:
case QUERY_NODE_CREATE_QNODE_STMT:
case QUERY_NODE_DROP_QNODE_STMT:
case QUERY_NODE_CREATE_BNODE_STMT:
case QUERY_NODE_DROP_BNODE_STMT:
case QUERY_NODE_CREATE_SNODE_STMT:
case QUERY_NODE_DROP_SNODE_STMT:
case QUERY_NODE_CREATE_MNODE_STMT:
case QUERY_NODE_DROP_MNODE_STMT:
break;
case QUERY_NODE_CREATE_TOPIC_STMT: case QUERY_NODE_CREATE_TOPIC_STMT:
return collectMetaKeyFromCreateTopic(pCxt, (SCreateTopicStmt*)pStmt); return collectMetaKeyFromCreateTopic(pCxt, (SCreateTopicStmt*)pStmt);
case QUERY_NODE_DROP_TOPIC_STMT:
case QUERY_NODE_DROP_CGROUP_STMT:
case QUERY_NODE_ALTER_LOCAL_STMT:
break;
case QUERY_NODE_EXPLAIN_STMT: case QUERY_NODE_EXPLAIN_STMT:
return collectMetaKeyFromExplain(pCxt, (SExplainStmt*)pStmt); return collectMetaKeyFromExplain(pCxt, (SExplainStmt*)pStmt);
case QUERY_NODE_DESCRIBE_STMT:
case QUERY_NODE_RESET_QUERY_CACHE_STMT:
case QUERY_NODE_COMPACT_STMT:
case QUERY_NODE_CREATE_FUNCTION_STMT:
case QUERY_NODE_DROP_FUNCTION_STMT:
break;
case QUERY_NODE_CREATE_STREAM_STMT: case QUERY_NODE_CREATE_STREAM_STMT:
return collectMetaKeyFromCreateStream(pCxt, (SCreateStreamStmt*)pStmt); return collectMetaKeyFromCreateStream(pCxt, (SCreateStreamStmt*)pStmt);
case QUERY_NODE_DROP_STREAM_STMT:
case QUERY_NODE_MERGE_VGROUP_STMT:
case QUERY_NODE_REDISTRIBUTE_VGROUP_STMT:
case QUERY_NODE_SPLIT_VGROUP_STMT:
case QUERY_NODE_SYNCDB_STMT:
case QUERY_NODE_GRANT_STMT:
case QUERY_NODE_REVOKE_STMT:
case QUERY_NODE_SHOW_DNODES_STMT: case QUERY_NODE_SHOW_DNODES_STMT:
return collectMetaKeyFromShowDnodes(pCxt, (SShowStmt*)pStmt); return collectMetaKeyFromShowDnodes(pCxt, (SShowStmt*)pStmt);
case QUERY_NODE_SHOW_MNODES_STMT: case QUERY_NODE_SHOW_MNODES_STMT:
...@@ -407,8 +361,6 @@ static int32_t collectMetaKeyFromQuery(SCollectMetaKeyCxt* pCxt, SNode* pStmt) { ...@@ -407,8 +361,6 @@ static int32_t collectMetaKeyFromQuery(SCollectMetaKeyCxt* pCxt, SNode* pStmt) {
return collectMetaKeyFromShowSnodes(pCxt, (SShowStmt*)pStmt); return collectMetaKeyFromShowSnodes(pCxt, (SShowStmt*)pStmt);
case QUERY_NODE_SHOW_BNODES_STMT: case QUERY_NODE_SHOW_BNODES_STMT:
return collectMetaKeyFromShowBnodes(pCxt, (SShowStmt*)pStmt); return collectMetaKeyFromShowBnodes(pCxt, (SShowStmt*)pStmt);
case QUERY_NODE_SHOW_CLUSTER_STMT:
break;
case QUERY_NODE_SHOW_DATABASES_STMT: case QUERY_NODE_SHOW_DATABASES_STMT:
return collectMetaKeyFromShowDatabases(pCxt, (SShowStmt*)pStmt); return collectMetaKeyFromShowDatabases(pCxt, (SShowStmt*)pStmt);
case QUERY_NODE_SHOW_FUNCTIONS_STMT: case QUERY_NODE_SHOW_FUNCTIONS_STMT:
...@@ -429,25 +381,8 @@ static int32_t collectMetaKeyFromQuery(SCollectMetaKeyCxt* pCxt, SNode* pStmt) { ...@@ -429,25 +381,8 @@ static int32_t collectMetaKeyFromQuery(SCollectMetaKeyCxt* pCxt, SNode* pStmt) {
return collectMetaKeyFromShowVgroups(pCxt, (SShowStmt*)pStmt); return collectMetaKeyFromShowVgroups(pCxt, (SShowStmt*)pStmt);
case QUERY_NODE_SHOW_TOPICS_STMT: case QUERY_NODE_SHOW_TOPICS_STMT:
return collectMetaKeyFromShowTopics(pCxt, (SShowStmt*)pStmt); return collectMetaKeyFromShowTopics(pCxt, (SShowStmt*)pStmt);
case QUERY_NODE_SHOW_CONSUMERS_STMT:
case QUERY_NODE_SHOW_SUBSCRIBES_STMT:
case QUERY_NODE_SHOW_SMAS_STMT:
case QUERY_NODE_SHOW_CONFIGS_STMT:
case QUERY_NODE_SHOW_CONNECTIONS_STMT:
case QUERY_NODE_SHOW_QUERIES_STMT:
case QUERY_NODE_SHOW_VNODES_STMT:
case QUERY_NODE_SHOW_APPS_STMT:
case QUERY_NODE_SHOW_SCORES_STMT:
case QUERY_NODE_SHOW_VARIABLE_STMT:
case QUERY_NODE_SHOW_CREATE_DATABASE_STMT:
case QUERY_NODE_SHOW_CREATE_TABLE_STMT:
case QUERY_NODE_SHOW_CREATE_STABLE_STMT:
break;
case QUERY_NODE_SHOW_TRANSACTIONS_STMT: case QUERY_NODE_SHOW_TRANSACTIONS_STMT:
return collectMetaKeyFromShowTransactions(pCxt, (SShowStmt*)pStmt); return collectMetaKeyFromShowTransactions(pCxt, (SShowStmt*)pStmt);
case QUERY_NODE_KILL_CONNECTION_STMT:
case QUERY_NODE_KILL_QUERY_STMT:
case QUERY_NODE_KILL_TRANSACTION_STMT:
default: default:
break; break;
} }
......
...@@ -64,6 +64,7 @@ typedef struct SInsertParseContext { ...@@ -64,6 +64,7 @@ typedef struct SInsertParseContext {
int32_t totalNum; int32_t totalNum;
SVnodeModifOpStmt* pOutput; SVnodeModifOpStmt* pOutput;
SStmtCallback* pStmtCb; SStmtCallback* pStmtCb;
SParseMetaCache* pMetaCache;
} SInsertParseContext; } SInsertParseContext;
typedef int32_t (*_row_append_fn_t)(SMsgBuf* pMsgBuf, const void* value, int32_t len, void* param); typedef int32_t (*_row_append_fn_t)(SMsgBuf* pMsgBuf, const void* value, int32_t len, void* param);
...@@ -93,15 +94,15 @@ typedef struct SMemParam { ...@@ -93,15 +94,15 @@ typedef struct SMemParam {
} \ } \
} while (0) } while (0)
static int32_t skipInsertInto(SInsertParseContext* pCxt) { static int32_t skipInsertInto(char** pSql, SMsgBuf* pMsg) {
SToken sToken; SToken sToken;
NEXT_TOKEN(pCxt->pSql, sToken); NEXT_TOKEN(*pSql, sToken);
if (TK_INSERT != sToken.type) { if (TK_INSERT != sToken.type) {
return buildSyntaxErrMsg(&pCxt->msg, "keyword INSERT is expected", sToken.z); return buildSyntaxErrMsg(pMsg, "keyword INSERT is expected", sToken.z);
} }
NEXT_TOKEN(pCxt->pSql, sToken); NEXT_TOKEN(*pSql, sToken);
if (TK_INTO != sToken.type) { if (TK_INTO != sToken.type) {
return buildSyntaxErrMsg(&pCxt->msg, "keyword INTO is expected", sToken.z); return buildSyntaxErrMsg(pMsg, "keyword INTO is expected", sToken.z);
} }
return TSDB_CODE_SUCCESS; return TSDB_CODE_SUCCESS;
} }
...@@ -251,25 +252,46 @@ static int32_t createSName(SName* pName, SToken* pTableName, int32_t acctId, con ...@@ -251,25 +252,46 @@ static int32_t createSName(SName* pName, SToken* pTableName, int32_t acctId, con
return code; return code;
} }
static int32_t getTableMetaImpl(SInsertParseContext* pCxt, SName* name, char* dbFname, bool isStb) { static int32_t checkAuth(SInsertParseContext* pCxt, char* pDbFname, bool* pPass) {
SParseContext* pBasicCtx = pCxt->pComCxt;
if (NULL != pCxt->pMetaCache) {
return getUserAuthFromCache(pCxt->pMetaCache, pBasicCtx->pUser, pDbFname, AUTH_TYPE_WRITE, pPass);
}
return catalogChkAuth(pBasicCtx->pCatalog, pBasicCtx->pTransporter, &pBasicCtx->mgmtEpSet, pBasicCtx->pUser, pDbFname,
AUTH_TYPE_WRITE, pPass);
}
static int32_t getTableSchema(SInsertParseContext* pCxt, SName* pTbName, bool isStb, STableMeta** pTableMeta) {
SParseContext* pBasicCtx = pCxt->pComCxt;
if (NULL != pCxt->pMetaCache) {
return getTableMetaFromCache(pCxt->pMetaCache, pTbName, pTableMeta);
}
if (isStb) {
return catalogGetSTableMeta(pBasicCtx->pCatalog, pBasicCtx->pTransporter, &pBasicCtx->mgmtEpSet, pTbName,
pTableMeta);
}
return catalogGetTableMeta(pBasicCtx->pCatalog, pBasicCtx->pTransporter, &pBasicCtx->mgmtEpSet, pTbName, pTableMeta);
}
static int32_t getTableVgroup(SInsertParseContext* pCxt, SName* pTbName, SVgroupInfo* pVg) {
SParseContext* pBasicCtx = pCxt->pComCxt; SParseContext* pBasicCtx = pCxt->pComCxt;
if (NULL != pCxt->pMetaCache) {
return getTableVgroupFromCache(pCxt->pMetaCache, pTbName, pVg);
}
return catalogGetTableHashVgroup(pBasicCtx->pCatalog, pBasicCtx->pTransporter, &pBasicCtx->mgmtEpSet, pTbName, pVg);
}
static int32_t getTableMetaImpl(SInsertParseContext* pCxt, SName* name, char* dbFname, bool isStb) {
bool pass = false; bool pass = false;
CHECK_CODE(catalogChkAuth(pBasicCtx->pCatalog, pBasicCtx->pTransporter, &pBasicCtx->mgmtEpSet, pBasicCtx->pUser, CHECK_CODE(checkAuth(pCxt, dbFname, &pass));
dbFname, AUTH_TYPE_WRITE, &pass));
if (!pass) { if (!pass) {
return TSDB_CODE_PAR_PERMISSION_DENIED; return TSDB_CODE_PAR_PERMISSION_DENIED;
} }
if (isStb) {
CHECK_CODE(catalogGetSTableMeta(pBasicCtx->pCatalog, pBasicCtx->pTransporter, &pBasicCtx->mgmtEpSet, name, CHECK_CODE(getTableSchema(pCxt, name, isStb, &pCxt->pTableMeta));
&pCxt->pTableMeta)); if (!isStb) {
} else {
CHECK_CODE(catalogGetTableMeta(pBasicCtx->pCatalog, pBasicCtx->pTransporter, &pBasicCtx->mgmtEpSet, name,
&pCxt->pTableMeta));
ASSERT(pCxt->pTableMeta->tableInfo.rowSize > 0);
SVgroupInfo vg; SVgroupInfo vg;
CHECK_CODE( CHECK_CODE(getTableVgroup(pCxt, name, &vg));
catalogGetTableHashVgroup(pBasicCtx->pCatalog, pBasicCtx->pTransporter, &pBasicCtx->mgmtEpSet, name, &vg));
CHECK_CODE(taosHashPut(pCxt->pVgroupsHashObj, (const char*)&vg.vgId, sizeof(vg.vgId), (char*)&vg, sizeof(vg))); CHECK_CODE(taosHashPut(pCxt->pVgroupsHashObj, (const char*)&vg.vgId, sizeof(vg.vgId), (char*)&vg, sizeof(vg)));
} }
return TSDB_CODE_SUCCESS; return TSDB_CODE_SUCCESS;
...@@ -867,10 +889,8 @@ static int32_t cloneTableMeta(STableMeta* pSrc, STableMeta** pDst) { ...@@ -867,10 +889,8 @@ static int32_t cloneTableMeta(STableMeta* pSrc, STableMeta** pDst) {
static int32_t storeTableMeta(SInsertParseContext* pCxt, SHashObj* pHash, SName* pTableName, const char* pName, static int32_t storeTableMeta(SInsertParseContext* pCxt, SHashObj* pHash, SName* pTableName, const char* pName,
int32_t len, STableMeta* pMeta) { int32_t len, STableMeta* pMeta) {
SVgroupInfo vg; SVgroupInfo vg;
SParseContext* pBasicCtx = pCxt->pComCxt; CHECK_CODE(getTableVgroup(pCxt, pTableName, &vg));
CHECK_CODE(
catalogGetTableHashVgroup(pBasicCtx->pCatalog, pBasicCtx->pTransporter, &pBasicCtx->mgmtEpSet, pTableName, &vg));
CHECK_CODE(taosHashPut(pCxt->pVgroupsHashObj, (const char*)&vg.vgId, sizeof(vg.vgId), (char*)&vg, sizeof(vg))); CHECK_CODE(taosHashPut(pCxt->pVgroupsHashObj, (const char*)&vg.vgId, sizeof(vg.vgId), (char*)&vg, sizeof(vg)));
pMeta->uid = 0; pMeta->uid = 0;
...@@ -1256,12 +1276,11 @@ int32_t parseInsertSql(SParseContext* pContext, SQuery** pQuery) { ...@@ -1256,12 +1276,11 @@ int32_t parseInsertSql(SParseContext* pContext, SQuery** pQuery) {
if (NULL == *pQuery) { if (NULL == *pQuery) {
return TSDB_CODE_OUT_OF_MEMORY; return TSDB_CODE_OUT_OF_MEMORY;
} }
(*pQuery)->execMode = QUERY_EXEC_MODE_SCHEDULE;
(*pQuery)->haveResultSet = false;
(*pQuery)->msgType = TDMT_VND_SUBMIT;
(*pQuery)->pRoot = (SNode*)context.pOutput;
} }
(*pQuery)->execMode = QUERY_EXEC_MODE_SCHEDULE;
(*pQuery)->haveResultSet = false;
(*pQuery)->msgType = TDMT_VND_SUBMIT;
(*pQuery)->pRoot = (SNode*)context.pOutput;
if (NULL == (*pQuery)->pTableList) { if (NULL == (*pQuery)->pTableList) {
(*pQuery)->pTableList = taosArrayInit(taosHashGetSize(context.pTableNameHashObj), sizeof(SName)); (*pQuery)->pTableList = taosArrayInit(taosHashGetSize(context.pTableNameHashObj), sizeof(SName));
...@@ -1272,7 +1291,7 @@ int32_t parseInsertSql(SParseContext* pContext, SQuery** pQuery) { ...@@ -1272,7 +1291,7 @@ int32_t parseInsertSql(SParseContext* pContext, SQuery** pQuery) {
context.pOutput->payloadType = PAYLOAD_TYPE_KV; context.pOutput->payloadType = PAYLOAD_TYPE_KV;
int32_t code = skipInsertInto(&context); int32_t code = skipInsertInto(&context.pSql, &context.msg);
if (TSDB_CODE_SUCCESS == code) { if (TSDB_CODE_SUCCESS == code) {
code = parseInsertBody(&context); code = parseInsertBody(&context);
} }
...@@ -1287,6 +1306,171 @@ int32_t parseInsertSql(SParseContext* pContext, SQuery** pQuery) { ...@@ -1287,6 +1306,171 @@ int32_t parseInsertSql(SParseContext* pContext, SQuery** pQuery) {
return code; return code;
} }
typedef struct SInsertParseSyntaxCxt {
SParseContext* pComCxt;
char* pSql;
SMsgBuf msg;
SParseMetaCache* pMetaCache;
} SInsertParseSyntaxCxt;
static int32_t skipParentheses(SInsertParseSyntaxCxt* pCxt) {
SToken sToken;
while (1) {
NEXT_TOKEN(pCxt->pSql, sToken);
if (TK_NK_RP == sToken.type) {
break;
}
if (0 == sToken.n) {
return buildSyntaxErrMsg(&pCxt->msg, ") expected", NULL);
}
}
return TSDB_CODE_SUCCESS;
}
static int32_t skipBoundColumns(SInsertParseSyntaxCxt* pCxt) { return skipParentheses(pCxt); }
// pSql -> (field1_value, ...) [(field1_value2, ...) ...]
static int32_t skipValuesClause(SInsertParseSyntaxCxt* pCxt) {
int32_t numOfRows = 0;
SToken sToken;
while (1) {
int32_t index = 0;
NEXT_TOKEN_KEEP_SQL(pCxt->pSql, sToken, index);
if (TK_NK_LP != sToken.type) {
break;
}
pCxt->pSql += index;
CHECK_CODE(skipParentheses(pCxt));
++numOfRows;
}
if (0 == numOfRows) {
return buildSyntaxErrMsg(&pCxt->msg, "no any data points", NULL);
}
return TSDB_CODE_SUCCESS;
}
static int32_t skipTagsClause(SInsertParseSyntaxCxt* pCxt) { return skipParentheses(pCxt); }
// pSql -> [(tag1_name, ...)] TAGS (tag1_value, ...)
static int32_t skipUsingClause(SInsertParseSyntaxCxt* pCxt) {
SToken sToken;
NEXT_TOKEN(pCxt->pSql, sToken);
if (TK_NK_LP == sToken.type) {
CHECK_CODE(skipBoundColumns(pCxt));
NEXT_TOKEN(pCxt->pSql, sToken);
}
if (TK_TAGS != sToken.type) {
return buildSyntaxErrMsg(&pCxt->msg, "TAGS is expected", sToken.z);
}
// pSql -> (tag1_value, ...)
NEXT_TOKEN(pCxt->pSql, sToken);
if (TK_NK_LP != sToken.type) {
return buildSyntaxErrMsg(&pCxt->msg, "( is expected", sToken.z);
}
CHECK_CODE(skipTagsClause(pCxt));
return TSDB_CODE_SUCCESS;
}
static int32_t collectTableMetaKey(SInsertParseSyntaxCxt* pCxt, SToken* pTbToken) {
SName name;
CHECK_CODE(createSName(&name, pTbToken, pCxt->pComCxt->acctId, pCxt->pComCxt->db, &pCxt->msg));
CHECK_CODE(reserveUserAuthInCacheExt(pCxt->pComCxt->pUser, &name, AUTH_TYPE_WRITE, pCxt->pMetaCache));
CHECK_CODE(reserveTableMetaInCacheExt(&name, pCxt->pMetaCache));
CHECK_CODE(reserveTableVgroupInCacheExt(&name, pCxt->pMetaCache));
return TSDB_CODE_SUCCESS;
}
static int32_t parseInsertBodySyntax(SInsertParseSyntaxCxt* pCxt) {
bool hasData = false;
// for each table
while (1) {
SToken sToken;
// pSql -> tb_name ...
NEXT_TOKEN(pCxt->pSql, sToken);
// no data in the sql string anymore.
if (sToken.n == 0) {
if (sToken.type && pCxt->pSql[0]) {
return buildSyntaxErrMsg(&pCxt->msg, "invalid charactor in SQL", sToken.z);
}
if (!hasData) {
return buildInvalidOperationMsg(&pCxt->msg, "no data in sql");
}
break;
}
hasData = false;
SToken tbnameToken = sToken;
NEXT_TOKEN(pCxt->pSql, sToken);
// USING clause
if (TK_USING == sToken.type) {
NEXT_TOKEN(pCxt->pSql, sToken);
CHECK_CODE(collectTableMetaKey(pCxt, &sToken));
CHECK_CODE(skipUsingClause(pCxt));
NEXT_TOKEN(pCxt->pSql, sToken);
} else {
CHECK_CODE(collectTableMetaKey(pCxt, &tbnameToken));
}
if (TK_NK_LP == sToken.type) {
// pSql -> field1_name, ...)
CHECK_CODE(skipBoundColumns(pCxt));
NEXT_TOKEN(pCxt->pSql, sToken);
}
if (TK_VALUES == sToken.type) {
// pSql -> (field1_value, ...) [(field1_value2, ...) ...]
CHECK_CODE(skipValuesClause(pCxt));
hasData = true;
continue;
}
// FILE csv_file_path
if (TK_FILE == sToken.type) {
// pSql -> csv_file_path
NEXT_TOKEN(pCxt->pSql, sToken);
if (0 == sToken.n || (TK_NK_STRING != sToken.type && TK_NK_ID != sToken.type)) {
return buildSyntaxErrMsg(&pCxt->msg, "file path is required following keyword FILE", sToken.z);
}
hasData = true;
continue;
}
return buildSyntaxErrMsg(&pCxt->msg, "keyword VALUES or FILE is expected", sToken.z);
}
return TSDB_CODE_SUCCESS;
}
int32_t parseInsertSyntax(SParseContext* pContext, SQuery** pQuery) {
SInsertParseSyntaxCxt context = {.pComCxt = pContext,
.pSql = (char*)pContext->pSql,
.msg = {.buf = pContext->pMsg, .len = pContext->msgLen},
.pMetaCache = taosMemoryCalloc(1, sizeof(SParseMetaCache))};
if (NULL == context.pMetaCache) {
return TSDB_CODE_OUT_OF_MEMORY;
}
int32_t code = skipInsertInto(&context.pSql, &context.msg);
if (TSDB_CODE_SUCCESS == code) {
code = parseInsertBodySyntax(&context);
}
if (TSDB_CODE_SUCCESS == code) {
*pQuery = taosMemoryCalloc(1, sizeof(SQuery));
if (NULL == *pQuery) {
return TSDB_CODE_OUT_OF_MEMORY;
}
TSWAP((*pQuery)->pMetaCache, context.pMetaCache);
}
return code;
}
int32_t qCreateSName(SName* pName, const char* pTableName, int32_t acctId, char* dbName, char* msgBuf, int32_t qCreateSName(SName* pName, const char* pTableName, int32_t acctId, char* dbName, char* msgBuf,
int32_t msgBufLen) { int32_t msgBufLen) {
SMsgBuf msg = {.buf = msgBuf, .len = msgBufLen}; SMsgBuf msg = {.buf = msgBuf, .len = msgBufLen};
......
...@@ -752,18 +752,30 @@ static bool isMultiResFunc(SNode* pNode) { ...@@ -752,18 +752,30 @@ static bool isMultiResFunc(SNode* pNode) {
return (QUERY_NODE_COLUMN == nodeType(pParam) ? 0 == strcmp(((SColumnNode*)pParam)->colName, "*") : false); return (QUERY_NODE_COLUMN == nodeType(pParam) ? 0 == strcmp(((SColumnNode*)pParam)->colName, "*") : false);
} }
static EDealRes translateUnaryOperator(STranslateContext* pCxt, SOperatorNode* pOp) { static int32_t rewriteNegativeOperator(SNode** pOp) {
SNode* pRes = NULL;
int32_t code = scalarCalculateConstants(*pOp, &pRes);
if (TSDB_CODE_SUCCESS == code) {
*pOp = pRes;
}
return code;
}
static EDealRes translateUnaryOperator(STranslateContext* pCxt, SOperatorNode** pOpRef) {
SOperatorNode* pOp = *pOpRef;
if (OP_TYPE_MINUS == pOp->opType) { if (OP_TYPE_MINUS == pOp->opType) {
if (!IS_MATHABLE_TYPE(((SExprNode*)(pOp->pLeft))->resType.type)) { if (!IS_MATHABLE_TYPE(((SExprNode*)(pOp->pLeft))->resType.type)) {
return generateDealNodeErrMsg(pCxt, TSDB_CODE_PAR_WRONG_VALUE_TYPE, ((SExprNode*)(pOp->pLeft))->aliasName); return generateDealNodeErrMsg(pCxt, TSDB_CODE_PAR_WRONG_VALUE_TYPE, ((SExprNode*)(pOp->pLeft))->aliasName);
} }
pOp->node.resType.type = TSDB_DATA_TYPE_DOUBLE; pOp->node.resType.type = TSDB_DATA_TYPE_DOUBLE;
pOp->node.resType.bytes = tDataTypes[TSDB_DATA_TYPE_DOUBLE].bytes; pOp->node.resType.bytes = tDataTypes[TSDB_DATA_TYPE_DOUBLE].bytes;
pCxt->errCode = rewriteNegativeOperator((SNode**)pOpRef);
} else { } else {
pOp->node.resType.type = TSDB_DATA_TYPE_BOOL; pOp->node.resType.type = TSDB_DATA_TYPE_BOOL;
pOp->node.resType.bytes = tDataTypes[TSDB_DATA_TYPE_BOOL].bytes; pOp->node.resType.bytes = tDataTypes[TSDB_DATA_TYPE_BOOL].bytes;
} }
return DEAL_RES_CONTINUE; return TSDB_CODE_SUCCESS == pCxt->errCode ? DEAL_RES_CONTINUE : DEAL_RES_ERROR;
} }
static EDealRes translateArithmeticOperator(STranslateContext* pCxt, SOperatorNode* pOp) { static EDealRes translateArithmeticOperator(STranslateContext* pCxt, SOperatorNode* pOp) {
...@@ -824,7 +836,9 @@ static EDealRes translateJsonOperator(STranslateContext* pCxt, SOperatorNode* pO ...@@ -824,7 +836,9 @@ static EDealRes translateJsonOperator(STranslateContext* pCxt, SOperatorNode* pO
return DEAL_RES_CONTINUE; return DEAL_RES_CONTINUE;
} }
static EDealRes translateOperator(STranslateContext* pCxt, SOperatorNode* pOp) { static EDealRes translateOperator(STranslateContext* pCxt, SOperatorNode** pOpRef) {
SOperatorNode* pOp = *pOpRef;
if (isMultiResFunc(pOp->pLeft)) { if (isMultiResFunc(pOp->pLeft)) {
return generateDealNodeErrMsg(pCxt, TSDB_CODE_PAR_WRONG_VALUE_TYPE, ((SExprNode*)(pOp->pLeft))->aliasName); return generateDealNodeErrMsg(pCxt, TSDB_CODE_PAR_WRONG_VALUE_TYPE, ((SExprNode*)(pOp->pLeft))->aliasName);
} }
...@@ -833,7 +847,7 @@ static EDealRes translateOperator(STranslateContext* pCxt, SOperatorNode* pOp) { ...@@ -833,7 +847,7 @@ static EDealRes translateOperator(STranslateContext* pCxt, SOperatorNode* pOp) {
} }
if (nodesIsUnaryOp(pOp)) { if (nodesIsUnaryOp(pOp)) {
return translateUnaryOperator(pCxt, pOp); return translateUnaryOperator(pCxt, pOpRef);
} else if (nodesIsArithmeticOp(pOp)) { } else if (nodesIsArithmeticOp(pOp)) {
return translateArithmeticOperator(pCxt, pOp); return translateArithmeticOperator(pCxt, pOp);
} else if (nodesIsComparisonOp(pOp)) { } else if (nodesIsComparisonOp(pOp)) {
...@@ -992,7 +1006,7 @@ static EDealRes doTranslateExpr(SNode** pNode, void* pContext) { ...@@ -992,7 +1006,7 @@ static EDealRes doTranslateExpr(SNode** pNode, void* pContext) {
case QUERY_NODE_VALUE: case QUERY_NODE_VALUE:
return translateValue(pCxt, (SValueNode*)*pNode); return translateValue(pCxt, (SValueNode*)*pNode);
case QUERY_NODE_OPERATOR: case QUERY_NODE_OPERATOR:
return translateOperator(pCxt, (SOperatorNode*)*pNode); return translateOperator(pCxt, (SOperatorNode**)pNode);
case QUERY_NODE_FUNCTION: case QUERY_NODE_FUNCTION:
return translateFunction(pCxt, (SFunctionNode*)*pNode); return translateFunction(pCxt, (SFunctionNode*)*pNode);
case QUERY_NODE_LOGIC_CONDITION: case QUERY_NODE_LOGIC_CONDITION:
...@@ -1891,9 +1905,9 @@ static int32_t translatePartitionBy(STranslateContext* pCxt, SNodeList* pPartiti ...@@ -1891,9 +1905,9 @@ static int32_t translatePartitionBy(STranslateContext* pCxt, SNodeList* pPartiti
return translateExprList(pCxt, pPartitionByList); return translateExprList(pCxt, pPartitionByList);
} }
static int32_t translateWhere(STranslateContext* pCxt, SNode* pWhere) { static int32_t translateWhere(STranslateContext* pCxt, SNode** pWhere) {
pCxt->currClause = SQL_CLAUSE_WHERE; pCxt->currClause = SQL_CLAUSE_WHERE;
return translateExpr(pCxt, &pWhere); return translateExpr(pCxt, pWhere);
} }
static int32_t translateFrom(STranslateContext* pCxt, SSelectStmt* pSelect) { static int32_t translateFrom(STranslateContext* pCxt, SSelectStmt* pSelect) {
...@@ -1964,7 +1978,7 @@ static int32_t translateSelect(STranslateContext* pCxt, SSelectStmt* pSelect) { ...@@ -1964,7 +1978,7 @@ static int32_t translateSelect(STranslateContext* pCxt, SSelectStmt* pSelect) {
pCxt->pCurrStmt = pSelect; pCxt->pCurrStmt = pSelect;
int32_t code = translateFrom(pCxt, pSelect); int32_t code = translateFrom(pCxt, pSelect);
if (TSDB_CODE_SUCCESS == code) { if (TSDB_CODE_SUCCESS == code) {
code = translateWhere(pCxt, pSelect->pWhere); code = translateWhere(pCxt, &pSelect->pWhere);
} }
if (TSDB_CODE_SUCCESS == code) { if (TSDB_CODE_SUCCESS == code) {
code = translatePartitionBy(pCxt, pSelect->pPartitionByList); code = translatePartitionBy(pCxt, pSelect->pPartitionByList);
......
...@@ -673,22 +673,32 @@ int32_t putMetaDataToCache(const SCatalogReq* pCatalogReq, const SMetaData* pMet ...@@ -673,22 +673,32 @@ int32_t putMetaDataToCache(const SCatalogReq* pCatalogReq, const SMetaData* pMet
return code; return code;
} }
static int32_t reserveTableReqInCache(int32_t acctId, const char* pDb, const char* pTable, SHashObj** pTables) { static int32_t reserveTableReqInCacheImpl(const char* pTbFName, int32_t len, SHashObj** pTables) {
if (NULL == *pTables) { if (NULL == *pTables) {
*pTables = taosHashInit(4, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), true, HASH_NO_LOCK); *pTables = taosHashInit(4, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), true, HASH_NO_LOCK);
if (NULL == *pTables) { if (NULL == *pTables) {
return TSDB_CODE_OUT_OF_MEMORY; return TSDB_CODE_OUT_OF_MEMORY;
} }
} }
return taosHashPut(*pTables, pTbFName, len, &pTables, POINTER_BYTES);
}
static int32_t reserveTableReqInCache(int32_t acctId, const char* pDb, const char* pTable, SHashObj** pTables) {
char fullName[TSDB_TABLE_FNAME_LEN]; char fullName[TSDB_TABLE_FNAME_LEN];
int32_t len = snprintf(fullName, sizeof(fullName), "%d.%s.%s", acctId, pDb, pTable); int32_t len = snprintf(fullName, sizeof(fullName), "%d.%s.%s", acctId, pDb, pTable);
return taosHashPut(*pTables, fullName, len, &pTables, POINTER_BYTES); return reserveTableReqInCacheImpl(fullName, len, pTables);
} }
int32_t reserveTableMetaInCache(int32_t acctId, const char* pDb, const char* pTable, SParseMetaCache* pMetaCache) { int32_t reserveTableMetaInCache(int32_t acctId, const char* pDb, const char* pTable, SParseMetaCache* pMetaCache) {
return reserveTableReqInCache(acctId, pDb, pTable, &pMetaCache->pTableMeta); return reserveTableReqInCache(acctId, pDb, pTable, &pMetaCache->pTableMeta);
} }
int32_t reserveTableMetaInCacheExt(const SName* pName, SParseMetaCache* pMetaCache) {
char fullName[TSDB_TABLE_FNAME_LEN];
tNameExtractFullName(pName, fullName);
return reserveTableReqInCacheImpl(fullName, strlen(fullName), &pMetaCache->pTableMeta);
}
int32_t getTableMetaFromCache(SParseMetaCache* pMetaCache, const SName* pName, STableMeta** pMeta) { int32_t getTableMetaFromCache(SParseMetaCache* pMetaCache, const SName* pName, STableMeta** pMeta) {
char fullName[TSDB_TABLE_FNAME_LEN]; char fullName[TSDB_TABLE_FNAME_LEN];
tNameExtractFullName(pName, fullName); tNameExtractFullName(pName, fullName);
...@@ -738,6 +748,12 @@ int32_t reserveTableVgroupInCache(int32_t acctId, const char* pDb, const char* p ...@@ -738,6 +748,12 @@ int32_t reserveTableVgroupInCache(int32_t acctId, const char* pDb, const char* p
return reserveTableReqInCache(acctId, pDb, pTable, &pMetaCache->pTableVgroup); return reserveTableReqInCache(acctId, pDb, pTable, &pMetaCache->pTableVgroup);
} }
int32_t reserveTableVgroupInCacheExt(const SName* pName, SParseMetaCache* pMetaCache) {
char fullName[TSDB_TABLE_FNAME_LEN];
tNameExtractFullName(pName, fullName);
return reserveTableReqInCacheImpl(fullName, strlen(fullName), &pMetaCache->pTableVgroup);
}
int32_t getTableVgroupFromCache(SParseMetaCache* pMetaCache, const SName* pName, SVgroupInfo* pVgroup) { int32_t getTableVgroupFromCache(SParseMetaCache* pMetaCache, const SName* pName, SVgroupInfo* pVgroup) {
char fullName[TSDB_TABLE_FNAME_LEN]; char fullName[TSDB_TABLE_FNAME_LEN];
tNameExtractFullName(pName, fullName); tNameExtractFullName(pName, fullName);
...@@ -778,18 +794,30 @@ int32_t getDbCfgFromCache(SParseMetaCache* pMetaCache, const char* pDbFName, SDb ...@@ -778,18 +794,30 @@ int32_t getDbCfgFromCache(SParseMetaCache* pMetaCache, const char* pDbFName, SDb
return TSDB_CODE_SUCCESS; return TSDB_CODE_SUCCESS;
} }
int32_t reserveUserAuthInCache(int32_t acctId, const char* pUser, const char* pDb, AUTH_TYPE type, static int32_t reserveUserAuthInCacheImpl(const char* pKey, int32_t len, SParseMetaCache* pMetaCache) {
SParseMetaCache* pMetaCache) {
if (NULL == pMetaCache->pUserAuth) { if (NULL == pMetaCache->pUserAuth) {
pMetaCache->pUserAuth = taosHashInit(4, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), true, HASH_NO_LOCK); pMetaCache->pUserAuth = taosHashInit(4, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), true, HASH_NO_LOCK);
if (NULL == pMetaCache->pUserAuth) { if (NULL == pMetaCache->pUserAuth) {
return TSDB_CODE_OUT_OF_MEMORY; return TSDB_CODE_OUT_OF_MEMORY;
} }
} }
bool pass = false;
return taosHashPut(pMetaCache->pUserAuth, pKey, len, &pass, sizeof(pass));
}
int32_t reserveUserAuthInCache(int32_t acctId, const char* pUser, const char* pDb, AUTH_TYPE type,
SParseMetaCache* pMetaCache) {
char key[USER_AUTH_KEY_MAX_LEN] = {0}; char key[USER_AUTH_KEY_MAX_LEN] = {0};
int32_t len = userAuthToString(acctId, pUser, pDb, type, key); int32_t len = userAuthToString(acctId, pUser, pDb, type, key);
bool pass = false; return reserveUserAuthInCacheImpl(key, len, pMetaCache);
return taosHashPut(pMetaCache->pUserAuth, key, len, &pass, sizeof(pass)); }
int32_t reserveUserAuthInCacheExt(const char* pUser, const SName* pName, AUTH_TYPE type, SParseMetaCache* pMetaCache) {
char dbFName[TSDB_DB_FNAME_LEN] = {0};
tNameGetFullDbName(pName, dbFName);
char key[USER_AUTH_KEY_MAX_LEN] = {0};
int32_t len = userAuthToStringExt(pUser, dbFName, type, key);
return reserveUserAuthInCacheImpl(key, len, pMetaCache);
} }
int32_t getUserAuthFromCache(SParseMetaCache* pMetaCache, const char* pUser, const char* pDbFName, AUTH_TYPE type, int32_t getUserAuthFromCache(SParseMetaCache* pMetaCache, const char* pUser, const char* pDbFName, AUTH_TYPE type,
......
...@@ -34,7 +34,7 @@ bool qIsInsertSql(const char* pStr, size_t length) { ...@@ -34,7 +34,7 @@ bool qIsInsertSql(const char* pStr, size_t length) {
} while (1); } while (1);
} }
static int32_t semanticAnalysis(SParseContext* pCxt, SQuery* pQuery) { static int32_t analyseSemantic(SParseContext* pCxt, SQuery* pQuery) {
int32_t code = authenticate(pCxt, pQuery); int32_t code = authenticate(pCxt, pQuery);
if (TSDB_CODE_SUCCESS == code && pQuery->placeholderNum > 0) { if (TSDB_CODE_SUCCESS == code && pQuery->placeholderNum > 0) {
...@@ -54,12 +54,12 @@ static int32_t semanticAnalysis(SParseContext* pCxt, SQuery* pQuery) { ...@@ -54,12 +54,12 @@ static int32_t semanticAnalysis(SParseContext* pCxt, SQuery* pQuery) {
static int32_t parseSqlIntoAst(SParseContext* pCxt, SQuery** pQuery) { static int32_t parseSqlIntoAst(SParseContext* pCxt, SQuery** pQuery) {
int32_t code = parse(pCxt, pQuery); int32_t code = parse(pCxt, pQuery);
if (TSDB_CODE_SUCCESS == code) { if (TSDB_CODE_SUCCESS == code) {
code = semanticAnalysis(pCxt, *pQuery); code = analyseSemantic(pCxt, *pQuery);
} }
return code; return code;
} }
static int32_t syntaxParseSql(SParseContext* pCxt, SQuery** pQuery) { static int32_t parseSqlSyntax(SParseContext* pCxt, SQuery** pQuery) {
int32_t code = parse(pCxt, pQuery); int32_t code = parse(pCxt, pQuery);
if (TSDB_CODE_SUCCESS == code) { if (TSDB_CODE_SUCCESS == code) {
code = collectMetaKey(pCxt, *pQuery); code = collectMetaKey(pCxt, *pQuery);
...@@ -192,12 +192,12 @@ int32_t qParseSql(SParseContext* pCxt, SQuery** pQuery) { ...@@ -192,12 +192,12 @@ int32_t qParseSql(SParseContext* pCxt, SQuery** pQuery) {
return code; return code;
} }
int32_t qSyntaxParseSql(SParseContext* pCxt, SQuery** pQuery, struct SCatalogReq* pCatalogReq) { int32_t qParseSqlSyntax(SParseContext* pCxt, SQuery** pQuery, struct SCatalogReq* pCatalogReq) {
int32_t code = TSDB_CODE_SUCCESS; int32_t code = TSDB_CODE_SUCCESS;
if (qIsInsertSql(pCxt->pSql, pCxt->sqlLen)) { if (qIsInsertSql(pCxt->pSql, pCxt->sqlLen)) {
// todo insert sql code = parseInsertSyntax(pCxt, pQuery);
} else { } else {
code = syntaxParseSql(pCxt, pQuery); code = parseSqlSyntax(pCxt, pQuery);
} }
if (TSDB_CODE_SUCCESS == code) { if (TSDB_CODE_SUCCESS == code) {
code = buildCatalogReq((*pQuery)->pMetaCache, pCatalogReq); code = buildCatalogReq((*pQuery)->pMetaCache, pCatalogReq);
...@@ -206,13 +206,13 @@ int32_t qSyntaxParseSql(SParseContext* pCxt, SQuery** pQuery, struct SCatalogReq ...@@ -206,13 +206,13 @@ int32_t qSyntaxParseSql(SParseContext* pCxt, SQuery** pQuery, struct SCatalogReq
return code; return code;
} }
int32_t qSemanticAnalysisSql(SParseContext* pCxt, const struct SCatalogReq* pCatalogReq, int32_t qAnalyseSqlSemantic(SParseContext* pCxt, const struct SCatalogReq* pCatalogReq,
const struct SMetaData* pMetaData, SQuery* pQuery) { const struct SMetaData* pMetaData, SQuery* pQuery) {
int32_t code = putMetaDataToCache(pCatalogReq, pMetaData, pQuery->pMetaCache); int32_t code = putMetaDataToCache(pCatalogReq, pMetaData, pQuery->pMetaCache);
if (NULL == pQuery->pRoot) { if (NULL == pQuery->pRoot) {
// todo insert sql return parseInsertSql(pCxt, &pQuery);
} }
return semanticAnalysis(pCxt, pQuery); return analyseSemantic(pCxt, pQuery);
} }
void qDestroyQuery(SQuery* pQueryNode) { nodesDestroyNode(pQueryNode); } void qDestroyQuery(SQuery* pQueryNode) { nodesDestroyNode(pQueryNode); }
......
...@@ -26,9 +26,7 @@ if(${BUILD_WINGETOPT}) ...@@ -26,9 +26,7 @@ if(${BUILD_WINGETOPT})
target_link_libraries(parserTest PUBLIC wingetopt) target_link_libraries(parserTest PUBLIC wingetopt)
endif() endif()
if(NOT TD_WINDOWS) add_test(
add_test( NAME parserTest
NAME parserTest COMMAND parserTest
COMMAND parserTest )
)
endif(NOT TD_WINDOWS)
...@@ -242,6 +242,8 @@ class MockCatalogServiceImpl { ...@@ -242,6 +242,8 @@ class MockCatalogServiceImpl {
info->outputType = outputType; info->outputType = outputType;
info->outputLen = outputLen; info->outputLen = outputLen;
info->bufSize = bufSize; info->bufSize = bufSize;
info->pCode = nullptr;
info->pComment = nullptr;
udf_.insert(std::make_pair(func, info)); udf_.insert(std::make_pair(func, info));
} }
......
...@@ -15,6 +15,7 @@ ...@@ -15,6 +15,7 @@
#include <gtest/gtest.h> #include <gtest/gtest.h>
#include "mockCatalogService.h"
#include "os.h" #include "os.h"
#include "parInt.h" #include "parInt.h"
...@@ -57,6 +58,38 @@ class InsertTest : public Test { ...@@ -57,6 +58,38 @@ class InsertTest : public Test {
return code_; return code_;
} }
int32_t runAsync() {
code_ = parseInsertSyntax(&cxt_, &res_);
if (code_ != TSDB_CODE_SUCCESS) {
cout << "parseInsertSyntax code:" << toString(code_) << ", msg:" << errMagBuf_ << endl;
return code_;
}
SCatalogReq catalogReq = {0};
code_ = buildCatalogReq(res_->pMetaCache, &catalogReq);
if (code_ != TSDB_CODE_SUCCESS) {
cout << "buildCatalogReq code:" << toString(code_) << ", msg:" << errMagBuf_ << endl;
return code_;
}
SMetaData metaData = {0};
g_mockCatalogService->catalogGetAllMeta(&catalogReq, &metaData);
code_ = putMetaDataToCache(&catalogReq, &metaData, res_->pMetaCache);
if (code_ != TSDB_CODE_SUCCESS) {
cout << "putMetaDataToCache code:" << toString(code_) << ", msg:" << errMagBuf_ << endl;
return code_;
}
code_ = parseInsertSql(&cxt_, &res_);
if (code_ != TSDB_CODE_SUCCESS) {
cout << "parseInsertSql code:" << toString(code_) << ", msg:" << errMagBuf_ << endl;
return code_;
}
return code_;
}
void dumpReslut() { void dumpReslut() {
SVnodeModifOpStmt* pStmt = getVnodeModifStmt(res_); SVnodeModifOpStmt* pStmt = getVnodeModifStmt(res_);
size_t num = taosArrayGetSize(pStmt->pDataBlocks); size_t num = taosArrayGetSize(pStmt->pDataBlocks);
...@@ -125,7 +158,7 @@ class InsertTest : public Test { ...@@ -125,7 +158,7 @@ class InsertTest : public Test {
SQuery* res_; SQuery* res_;
}; };
// INSERT INTO tb_name VALUES (field1_value, ...) // INSERT INTO tb_name [(field1_name, ...)] VALUES (field1_value, ...)
TEST_F(InsertTest, singleTableSingleRowTest) { TEST_F(InsertTest, singleTableSingleRowTest) {
setDatabase("root", "test"); setDatabase("root", "test");
...@@ -133,6 +166,17 @@ TEST_F(InsertTest, singleTableSingleRowTest) { ...@@ -133,6 +166,17 @@ TEST_F(InsertTest, singleTableSingleRowTest) {
ASSERT_EQ(run(), TSDB_CODE_SUCCESS); ASSERT_EQ(run(), TSDB_CODE_SUCCESS);
dumpReslut(); dumpReslut();
checkReslut(1, 1); checkReslut(1, 1);
bind("insert into t1 (ts, c1, c2, c3, c4, c5) values (now, 1, 'beijing', 3, 4, 5)");
ASSERT_EQ(run(), TSDB_CODE_SUCCESS);
bind("insert into t1 values (now, 1, 'beijing', 3, 4, 5)");
ASSERT_EQ(runAsync(), TSDB_CODE_SUCCESS);
dumpReslut();
checkReslut(1, 1);
bind("insert into t1 (ts, c1, c2, c3, c4, c5) values (now, 1, 'beijing', 3, 4, 5)");
ASSERT_EQ(runAsync(), TSDB_CODE_SUCCESS);
} }
// INSERT INTO tb_name VALUES (field1_value, ...)(field1_value, ...) // INSERT INTO tb_name VALUES (field1_value, ...)(field1_value, ...)
...@@ -140,11 +184,16 @@ TEST_F(InsertTest, singleTableMultiRowTest) { ...@@ -140,11 +184,16 @@ TEST_F(InsertTest, singleTableMultiRowTest) {
setDatabase("root", "test"); setDatabase("root", "test");
bind( bind(
"insert into t1 values (now, 1, 'beijing', 3, 4, 5)(now+1s, 2, 'shanghai', 6, 7, 8)(now+2s, 3, 'guangzhou', 9, " "insert into t1 values (now, 1, 'beijing', 3, 4, 5)(now+1s, 2, 'shanghai', 6, 7, 8)"
"10, 11)"); "(now+2s, 3, 'guangzhou', 9, 10, 11)");
ASSERT_EQ(run(), TSDB_CODE_SUCCESS); ASSERT_EQ(run(), TSDB_CODE_SUCCESS);
dumpReslut(); dumpReslut();
checkReslut(1, 3); checkReslut(1, 3);
bind(
"insert into t1 values (now, 1, 'beijing', 3, 4, 5)(now+1s, 2, 'shanghai', 6, 7, 8)"
"(now+2s, 3, 'guangzhou', 9, 10, 11)");
ASSERT_EQ(runAsync(), TSDB_CODE_SUCCESS);
} }
// INSERT INTO tb1_name VALUES (field1_value, ...) tb2_name VALUES (field1_value, ...) // INSERT INTO tb1_name VALUES (field1_value, ...) tb2_name VALUES (field1_value, ...)
...@@ -155,6 +204,9 @@ TEST_F(InsertTest, multiTableSingleRowTest) { ...@@ -155,6 +204,9 @@ TEST_F(InsertTest, multiTableSingleRowTest) {
ASSERT_EQ(run(), TSDB_CODE_SUCCESS); ASSERT_EQ(run(), TSDB_CODE_SUCCESS);
dumpReslut(); dumpReslut();
checkReslut(2, 1); checkReslut(2, 1);
bind("insert into st1s1 values (now, 1, \"beijing\") st1s2 values (now, 10, \"131028\")");
ASSERT_EQ(runAsync(), TSDB_CODE_SUCCESS);
} }
// INSERT INTO tb1_name VALUES (field1_value, ...) tb2_name VALUES (field1_value, ...) // INSERT INTO tb1_name VALUES (field1_value, ...) tb2_name VALUES (field1_value, ...)
...@@ -167,6 +219,11 @@ TEST_F(InsertTest, multiTableMultiRowTest) { ...@@ -167,6 +219,11 @@ TEST_F(InsertTest, multiTableMultiRowTest) {
ASSERT_EQ(run(), TSDB_CODE_SUCCESS); ASSERT_EQ(run(), TSDB_CODE_SUCCESS);
dumpReslut(); dumpReslut();
checkReslut(2, 3, 2); checkReslut(2, 3, 2);
bind(
"insert into st1s1 values (now, 1, \"beijing\")(now+1s, 2, \"shanghai\")(now+2s, 3, \"guangzhou\")"
" st1s2 values (now, 10, \"131028\")(now+1s, 20, \"132028\")");
ASSERT_EQ(runAsync(), TSDB_CODE_SUCCESS);
} }
// INSERT INTO // INSERT INTO
...@@ -181,6 +238,21 @@ TEST_F(InsertTest, autoCreateTableTest) { ...@@ -181,6 +238,21 @@ TEST_F(InsertTest, autoCreateTableTest) {
ASSERT_EQ(run(), TSDB_CODE_SUCCESS); ASSERT_EQ(run(), TSDB_CODE_SUCCESS);
dumpReslut(); dumpReslut();
checkReslut(1, 3); checkReslut(1, 3);
bind(
"insert into st1s1 using st1 (tag1, tag2) tags(1, 'wxy') values (now, 1, \"beijing\")"
"(now+1s, 2, \"shanghai\")(now+2s, 3, \"guangzhou\")");
ASSERT_EQ(run(), TSDB_CODE_SUCCESS);
bind(
"insert into st1s1 using st1 tags(1, 'wxy') values (now, 1, \"beijing\")(now+1s, 2, \"shanghai\")(now+2s, 3, "
"\"guangzhou\")");
ASSERT_EQ(runAsync(), TSDB_CODE_SUCCESS);
bind(
"insert into st1s1 using st1 (tag1, tag2) tags(1, 'wxy') values (now, 1, \"beijing\")"
"(now+1s, 2, \"shanghai\")(now+2s, 3, \"guangzhou\")");
ASSERT_EQ(runAsync(), TSDB_CODE_SUCCESS);
} }
TEST_F(InsertTest, toleranceTest) { TEST_F(InsertTest, toleranceTest) {
...@@ -190,4 +262,9 @@ TEST_F(InsertTest, toleranceTest) { ...@@ -190,4 +262,9 @@ TEST_F(InsertTest, toleranceTest) {
ASSERT_NE(run(), TSDB_CODE_SUCCESS); ASSERT_NE(run(), TSDB_CODE_SUCCESS);
bind("insert into t"); bind("insert into t");
ASSERT_NE(run(), TSDB_CODE_SUCCESS); ASSERT_NE(run(), TSDB_CODE_SUCCESS);
bind("insert into");
ASSERT_NE(runAsync(), TSDB_CODE_SUCCESS);
bind("insert into t");
ASSERT_NE(runAsync(), TSDB_CODE_SUCCESS);
} }
...@@ -44,6 +44,8 @@ TEST_F(ParserSelectTest, constant) { ...@@ -44,6 +44,8 @@ TEST_F(ParserSelectTest, constant) {
"timestamp '2022-02-09 17:30:20', true, false, 15s FROM t1"); "timestamp '2022-02-09 17:30:20', true, false, 15s FROM t1");
run("SELECT 123 + 45 FROM t1 WHERE 2 - 1"); run("SELECT 123 + 45 FROM t1 WHERE 2 - 1");
run("SELECT * FROM t1 WHERE -2");
} }
TEST_F(ParserSelectTest, expression) { TEST_F(ParserSelectTest, expression) {
...@@ -76,6 +78,12 @@ TEST_F(ParserSelectTest, pseudoColumnSemanticCheck) { ...@@ -76,6 +78,12 @@ TEST_F(ParserSelectTest, pseudoColumnSemanticCheck) {
run("SELECT TBNAME FROM (SELECT * FROM st1s1)", TSDB_CODE_PAR_INVALID_TBNAME, PARSER_STAGE_TRANSLATE); run("SELECT TBNAME FROM (SELECT * FROM st1s1)", TSDB_CODE_PAR_INVALID_TBNAME, PARSER_STAGE_TRANSLATE);
} }
TEST_F(ParserSelectTest, aggFunc) {
useDb("root", "test");
run("SELECT LEASTSQUARES(c1, -1, 1) FROM t1");
}
TEST_F(ParserSelectTest, multiResFunc) { TEST_F(ParserSelectTest, multiResFunc) {
useDb("root", "test"); useDb("root", "test");
......
...@@ -27,6 +27,14 @@ TEST_F(PlanSuperTableTest, pseudoCol) { ...@@ -27,6 +27,14 @@ TEST_F(PlanSuperTableTest, pseudoCol) {
run("SELECT TBNAME, tag1, tag2 FROM st1"); run("SELECT TBNAME, tag1, tag2 FROM st1");
} }
TEST_F(PlanSuperTableTest, pseudoColOnChildTable) {
useDb("root", "test");
run("SELECT TBNAME FROM st1s1");
run("SELECT TBNAME, tag1, tag2 FROM st1s1");
}
TEST_F(PlanSuperTableTest, orderBy) { TEST_F(PlanSuperTableTest, orderBy) {
useDb("root", "test"); useDb("root", "test");
......
...@@ -111,12 +111,10 @@ target_link_libraries (pushServer ...@@ -111,12 +111,10 @@ target_link_libraries (pushServer
) )
if(NOT TD_WINDOWS) add_test(
add_test( NAME transUT
NAME transUT COMMAND transUT
COMMAND transUT )
)
endif(NOT TD_WINDOWS)
add_test( add_test(
NAME transUtilUt NAME transUtilUt
COMMAND transportTest COMMAND transportTest
......
...@@ -252,6 +252,7 @@ endi ...@@ -252,6 +252,7 @@ endi
print ======== step8 print ======== step8
sql alter table tb add column h binary(10) sql alter table tb add column h binary(10)
sql select * from tb
sql describe tb sql describe tb
if $data00 != ts then if $data00 != ts then
return -1 return -1
...@@ -304,7 +305,7 @@ endi ...@@ -304,7 +305,7 @@ endi
if $data80 != h then if $data80 != h then
return -1 return -1
endi endi
if $data81 != BINARY then if $data81 != VARCHAR then
return -1 return -1
endi endi
if $data82 != 10 then if $data82 != 10 then
...@@ -371,7 +372,7 @@ endi ...@@ -371,7 +372,7 @@ endi
if $data80 != h then if $data80 != h then
return -1 return -1
endi endi
if $data81 != BINARY then if $data81 != VARCHAR then
return -1 return -1
endi endi
if $data82 != 10 then if $data82 != 10 then
...@@ -447,7 +448,7 @@ endi ...@@ -447,7 +448,7 @@ endi
if $data70 != h then if $data70 != h then
return -1 return -1
endi endi
if $data71 != BINARY then if $data71 != VARCHAR then
return -1 return -1
endi endi
if $data72 != 10 then if $data72 != 10 then
...@@ -496,7 +497,7 @@ endi ...@@ -496,7 +497,7 @@ endi
if $data60 != h then if $data60 != h then
return -1 return -1
endi endi
if $data61 != BINARY then if $data61 != VARCHAR then
return -1 return -1
endi endi
if $data62 != 10 then if $data62 != 10 then
...@@ -539,7 +540,7 @@ endi ...@@ -539,7 +540,7 @@ endi
if $data50 != h then if $data50 != h then
return -1 return -1
endi endi
if $data51 != BINARY then if $data51 != VARCHAR then
return -1 return -1
endi endi
if $data52 != 10 then if $data52 != 10 then
...@@ -576,7 +577,7 @@ endi ...@@ -576,7 +577,7 @@ endi
if $data40 != h then if $data40 != h then
return -1 return -1
endi endi
if $data41 != BINARY then if $data41 != VARCHAR then
return -1 return -1
endi endi
if $data42 != 10 then if $data42 != 10 then
...@@ -607,7 +608,7 @@ endi ...@@ -607,7 +608,7 @@ endi
if $data30 != h then if $data30 != h then
return -1 return -1
endi endi
if $data31 != BINARY then if $data31 != VARCHAR then
return -1 return -1
endi endi
if $data32 != 10 then if $data32 != 10 then
...@@ -632,7 +633,7 @@ endi ...@@ -632,7 +633,7 @@ endi
if $data20 != h then if $data20 != h then
return -1 return -1
endi endi
if $data21 != BINARY then if $data21 != VARCHAR then
return -1 return -1
endi endi
if $data22 != 10 then if $data22 != 10 then
......
...@@ -104,6 +104,10 @@ ...@@ -104,6 +104,10 @@
./test.sh -f tsim/stable/tag_modify.sim ./test.sh -f tsim/stable/tag_modify.sim
./test.sh -f tsim/stable/tag_rename.sim ./test.sh -f tsim/stable/tag_rename.sim
./test.sh -f tsim/stable/alter_comment.sim ./test.sh -f tsim/stable/alter_comment.sim
./test.sh -f tsim/stable/alter_count.sim
./test.sh -f tsim/stable/alter_insert1.sim
./test.sh -f tsim/stable/alter_insert2.sim
./test.sh -f tsim/stable/alter_import.sim
# --- for multi process mode # --- for multi process mode
./test.sh -f tsim/user/basic1.sim -m ./test.sh -f tsim/user/basic1.sim -m
......
...@@ -29,14 +29,14 @@ if $data00 != 3 then ...@@ -29,14 +29,14 @@ if $data00 != 3 then
endi endi
print ========= step3 print ========= step3
sql import into tb values(now-23d, -23, 0) sql insert into tb values(now-23d, -23, 0)
sql import into tb values(now-21d, -21, 0) sql insert into tb values(now-21d, -21, 0)
sql select count(b) from tb sql select count(b) from tb
if $data00 != 5 then if $data00 != 5 then
return -1 return -1
endi endi
sql import into tb values(now-29d, -29, 0) sql insert into tb values(now-29d, -29, 0)
sql select count(b) from tb sql select count(b) from tb
if $data00 != 6 then if $data00 != 6 then
return -1 return -1
......
...@@ -347,7 +347,7 @@ endi ...@@ -347,7 +347,7 @@ endi
if $data80 != h then if $data80 != h then
return -1 return -1
endi endi
if $data81 != BINARY then if $data81 != VARCHAR then
return -1 return -1
endi endi
if $data82 != 10 then if $data82 != 10 then
...@@ -363,9 +363,8 @@ endi ...@@ -363,9 +363,8 @@ endi
print ======== step9 print ======== step9
print ======== step10 print ======== step10
system sh/exec.sh -n dnode1 -s stop -x SIGINT system sh/exec.sh -n dnode1 -s stop -x SIGINT
sleep 3000
system sh/exec.sh -n dnode1 -s start system sh/exec.sh -n dnode1 -s start
sleep 3000 sql connect
sql use d2 sql use d2
sql describe tb sql describe tb
...@@ -420,7 +419,7 @@ endi ...@@ -420,7 +419,7 @@ endi
if $data80 != h then if $data80 != h then
return -1 return -1
endi endi
if $data81 != BINARY then if $data81 != VARCHAR then
return -1 return -1
endi endi
if $data82 != 10 then if $data82 != 10 then
...@@ -502,7 +501,7 @@ endi ...@@ -502,7 +501,7 @@ endi
if $data70 != h then if $data70 != h then
return -1 return -1
endi endi
if $data71 != BINARY then if $data71 != VARCHAR then
return -1 return -1
endi endi
if $data72 != 10 then if $data72 != 10 then
...@@ -557,7 +556,7 @@ endi ...@@ -557,7 +556,7 @@ endi
if $data60 != h then if $data60 != h then
return -1 return -1
endi endi
if $data61 != BINARY then if $data61 != VARCHAR then
return -1 return -1
endi endi
if $data62 != 10 then if $data62 != 10 then
...@@ -606,7 +605,7 @@ endi ...@@ -606,7 +605,7 @@ endi
if $data50 != h then if $data50 != h then
return -1 return -1
endi endi
if $data51 != BINARY then if $data51 != VARCHAR then
return -1 return -1
endi endi
if $data52 != 10 then if $data52 != 10 then
...@@ -649,7 +648,7 @@ endi ...@@ -649,7 +648,7 @@ endi
if $data40 != h then if $data40 != h then
return -1 return -1
endi endi
if $data41 != BINARY then if $data41 != VARCHAR then
return -1 return -1
endi endi
if $data42 != 10 then if $data42 != 10 then
...@@ -686,7 +685,7 @@ endi ...@@ -686,7 +685,7 @@ endi
if $data30 != h then if $data30 != h then
return -1 return -1
endi endi
if $data31 != BINARY then if $data31 != VARCHAR then
return -1 return -1
endi endi
if $data32 != 10 then if $data32 != 10 then
...@@ -717,7 +716,7 @@ endi ...@@ -717,7 +716,7 @@ endi
if $data20 != h then if $data20 != h then
return -1 return -1
endi endi
if $data21 != BINARY then if $data21 != VARCHAR then
return -1 return -1
endi endi
if $data22 != 10 then if $data22 != 10 then
...@@ -758,7 +757,7 @@ endi ...@@ -758,7 +757,7 @@ endi
print ======= over print ======= over
sql drop database d2 sql drop database d2
sql show databases sql show databases
if $rows != 0 then if $rows != 2 then
return -1 return -1
endi endi
......
...@@ -79,28 +79,31 @@ system sh/exec.sh -n dnode1 -s stop -x SIGINT ...@@ -79,28 +79,31 @@ system sh/exec.sh -n dnode1 -s stop -x SIGINT
system sh/exec.sh -n dnode1 -s start system sh/exec.sh -n dnode1 -s start
sql connect sql connect
sql select * from db.ctb
sql select * from db.stb
print $data[0][0] $data[0][1] $data[0][2] $data[0][3] $data[0][4] $data[0][5] $data[0][6]
print $data[1][0] $data[1][1] $data[1][2] $data[1][3] $data[1][4] $data[1][5] $data[1][6]
if $rows != 2 then if $rows != 2 then
return -1 return -1
endi endi
#if $data[0][1] != 1 then if $data[0][1] != 1 then
# return -1 return -1
#endi endi
#if $data[0][2] != 1234 then if $data[0][2] != 1234 then
# return -1 return -1
#endi endi
#if $data[0][3] != 101 then if $data[0][3] != 101 then
# return -1 return -1
#endi endi
#if $data[1][1] != 1 then if $data[1][1] != 1 then
# return -1 return -1
#endi endi
#if $data[1][2] != 12345 then if $data[1][2] != 12345 then
# return -1 return -1
#endi endi
#if $data[1][3] != 101 then if $data[1][3] != 101 then
# return -1 return -1
#endi endi
system sh/exec.sh -n dnode1 -s stop -x SIGINT system sh/exec.sh -n dnode1 -s stop -x SIGINT
\ No newline at end of file
...@@ -49,10 +49,9 @@ if $data00 != $totalNum then ...@@ -49,10 +49,9 @@ if $data00 != $totalNum then
return -1 return -1
endi endi
sleep 1000
system sh/exec.sh -n dnode1 -s stop -x SIGINT system sh/exec.sh -n dnode1 -s stop -x SIGINT
sleep 1000
system sh/exec.sh -n dnode1 -s start system sh/exec.sh -n dnode1 -s start
sql connect
sql use $db sql use $db
sql show vgroups sql show vgroups
......
...@@ -93,9 +93,6 @@ $i = 2 ...@@ -93,9 +93,6 @@ $i = 2
$tb = $tbPrefix . $i $tb = $tbPrefix . $i
sql insert into $tb values (now + 1m , 1 ) sql insert into $tb values (now + 1m , 1 )
print sleep 2000
sleep 2000
print =============== step6 print =============== step6
# sql select * from $mt # sql select * from $mt
......
...@@ -382,6 +382,7 @@ class TDTestCase: ...@@ -382,6 +382,7 @@ class TDTestCase:
tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt)) tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt))
tdLog.exit("tmq consume rows error!") tdLog.exit("tmq consume rows error!")
time.sleep(15)
tdSql.query("drop topic %s"%topicName1) tdSql.query("drop topic %s"%topicName1)
tdLog.printNoPrefix("======== test case 10 end ...... ") tdLog.printNoPrefix("======== test case 10 end ...... ")
...@@ -453,6 +454,7 @@ class TDTestCase: ...@@ -453,6 +454,7 @@ class TDTestCase:
tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt)) tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt))
tdLog.exit("tmq consume rows error!") tdLog.exit("tmq consume rows error!")
time.sleep(15)
tdSql.query("drop topic %s"%topicName1) tdSql.query("drop topic %s"%topicName1)
tdLog.printNoPrefix("======== test case 11 end ...... ") tdLog.printNoPrefix("======== test case 11 end ...... ")
......
...@@ -23,20 +23,18 @@ target_link_libraries( ...@@ -23,20 +23,18 @@ target_link_libraries(
PUBLIC os PUBLIC os
) )
if(NOT TD_WINDOWS) add_executable(sdbDump sdbDump.c)
add_executable(sdbDump sdbDump.c) target_link_libraries(
target_link_libraries( sdbDump
sdbDump PUBLIC dnode
PUBLIC dnode PUBLIC mnode
PUBLIC mnode PUBLIC sdb
PUBLIC sdb PUBLIC os
PUBLIC os )
) target_include_directories(
target_include_directories( sdbDump
sdbDump PUBLIC "${TD_SOURCE_DIR}/include/dnode/mnode"
PUBLIC "${TD_SOURCE_DIR}/include/dnode/mnode" PRIVATE "${TD_SOURCE_DIR}/source/dnode/mnode/impl/inc"
PRIVATE "${TD_SOURCE_DIR}/source/dnode/mnode/impl/inc" PRIVATE "${TD_SOURCE_DIR}/source/dnode/mnode/sdb/inc"
PRIVATE "${TD_SOURCE_DIR}/source/dnode/mnode/sdb/inc" PRIVATE "${TD_SOURCE_DIR}/source/dnode/mgmt/node_mgmt/inc"
PRIVATE "${TD_SOURCE_DIR}/source/dnode/mgmt/node_mgmt/inc" )
) \ No newline at end of file
ENDIF ()
\ No newline at end of file
...@@ -21,12 +21,12 @@ ...@@ -21,12 +21,12 @@
#include "tjson.h" #include "tjson.h"
#define TMP_DNODE_DIR TD_TMP_DIR_PATH "dumpsdb" #define TMP_DNODE_DIR TD_TMP_DIR_PATH "dumpsdb"
#define TMP_MNODE_DIR TD_TMP_DIR_PATH "dumpsdb/mnode" #define TMP_MNODE_DIR TD_TMP_DIR_PATH "dumpsdb" TD_DIRSEP "mnode"
#define TMP_SDB_DATA_DIR TD_TMP_DIR_PATH "dumpsdb/mnode/data" #define TMP_SDB_DATA_DIR TD_TMP_DIR_PATH "dumpsdb" TD_DIRSEP "mnode" TD_DIRSEP "data"
#define TMP_SDB_SYNC_DIR TD_TMP_DIR_PATH "dumpsdb/mnode/sync" #define TMP_SDB_SYNC_DIR TD_TMP_DIR_PATH "dumpsdb" TD_DIRSEP "mnode" TD_DIRSEP "sync"
#define TMP_SDB_DATA_FILE TD_TMP_DIR_PATH "dumpsdb/mnode/data/sdb.data" #define TMP_SDB_DATA_FILE TD_TMP_DIR_PATH "dumpsdb" TD_DIRSEP "mnode" TD_DIRSEP "data" TD_DIRSEP "sdb.data"
#define TMP_SDB_RAFT_CFG_FILE TD_TMP_DIR_PATH "dumpsdb/mnode/sync/raft_config.json" #define TMP_SDB_RAFT_CFG_FILE TD_TMP_DIR_PATH "dumpsdb" TD_DIRSEP "mnode" TD_DIRSEP "sync" TD_DIRSEP "raft_config.json"
#define TMP_SDB_RAFT_STORE_FILE TD_TMP_DIR_PATH "dumpsdb/mnode/sync/raft_store.json" #define TMP_SDB_RAFT_STORE_FILE TD_TMP_DIR_PATH "dumpsdb" TD_DIRSEP "mnode" TD_DIRSEP "sync" TD_DIRSEP "raft_store.json"
void reportStartup(const char *name, const char *desc) {} void reportStartup(const char *name, const char *desc) {}
...@@ -412,13 +412,23 @@ int32_t parseArgs(int32_t argc, char *argv[]) { ...@@ -412,13 +412,23 @@ int32_t parseArgs(int32_t argc, char *argv[]) {
char dataFile[PATH_MAX] = {0}; char dataFile[PATH_MAX] = {0};
char raftCfgFile[PATH_MAX] = {0}; char raftCfgFile[PATH_MAX] = {0};
char raftStoreFile[PATH_MAX] = {0}; char raftStoreFile[PATH_MAX] = {0};
snprintf(dataFile, PATH_MAX, "%s/mnode/data/sdb.data", tsDataDir); snprintf(dataFile, PATH_MAX, "%s" TD_DIRSEP "mnode" TD_DIRSEP "data" TD_DIRSEP "sdb.data", tsDataDir);
snprintf(raftCfgFile, PATH_MAX, "%s/mnode/sync/raft_config.json", tsDataDir); snprintf(raftCfgFile, PATH_MAX, "%s" TD_DIRSEP "mnode" TD_DIRSEP "sync" TD_DIRSEP "raft_config.json", tsDataDir);
snprintf(raftStoreFile, PATH_MAX, "%s/mnode/sync/raft_store.json", tsDataDir); snprintf(raftStoreFile, PATH_MAX, "%s" TD_DIRSEP "mnode" TD_DIRSEP "sync" TD_DIRSEP "raft_store.json", tsDataDir);
char cmd[PATH_MAX * 2] = {0}; char cmd[PATH_MAX * 2] = {0};
snprintf(cmd, sizeof(cmd), "rm -rf %s", TMP_DNODE_DIR); snprintf(cmd, sizeof(cmd), "rm -rf %s", TMP_DNODE_DIR);
system(cmd); system(cmd);
#ifdef WINDOWS
taosMulMkDir(TMP_SDB_DATA_DIR);
taosMulMkDir(TMP_SDB_SYNC_DIR);
snprintf(cmd, sizeof(cmd), "cp %s %s 2>nul", dataFile, TMP_SDB_DATA_FILE);
system(cmd);
snprintf(cmd, sizeof(cmd), "cp %s %s 2>nul", raftCfgFile, TMP_SDB_RAFT_CFG_FILE);
system(cmd);
snprintf(cmd, sizeof(cmd), "cp %s %s 2>nul", raftStoreFile, TMP_SDB_RAFT_STORE_FILE);
system(cmd);
#else
snprintf(cmd, sizeof(cmd), "mkdir -p %s", TMP_SDB_DATA_DIR); snprintf(cmd, sizeof(cmd), "mkdir -p %s", TMP_SDB_DATA_DIR);
system(cmd); system(cmd);
snprintf(cmd, sizeof(cmd), "mkdir -p %s", TMP_SDB_SYNC_DIR); snprintf(cmd, sizeof(cmd), "mkdir -p %s", TMP_SDB_SYNC_DIR);
...@@ -429,6 +439,7 @@ int32_t parseArgs(int32_t argc, char *argv[]) { ...@@ -429,6 +439,7 @@ int32_t parseArgs(int32_t argc, char *argv[]) {
system(cmd); system(cmd);
snprintf(cmd, sizeof(cmd), "cp %s %s 2>/dev/null", raftStoreFile, TMP_SDB_RAFT_STORE_FILE); snprintf(cmd, sizeof(cmd), "cp %s %s 2>/dev/null", raftStoreFile, TMP_SDB_RAFT_STORE_FILE);
system(cmd); system(cmd);
#endif
strcpy(tsDataDir, TMP_DNODE_DIR); strcpy(tsDataDir, TMP_DNODE_DIR);
return 0; return 0;
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册