diff --git a/Jenkinsfile2 b/Jenkinsfile2 index 1920d8da173cc0cf24d70137e1f6f3e1ac35ca5f..54234cc54789f23c000538b5669841c956992efe 100644 --- a/Jenkinsfile2 +++ b/Jenkinsfile2 @@ -430,7 +430,7 @@ pipeline { date rm -rf ${WKC}/debug cd ${WKC}/tests/parallel_test - time ./container_build.sh -w ${WKDIR} -t 10 -e + time ./container_build.sh -w ${WKDIR} -e ''' def extra_param = "" def log_server_file = "/home/log_server.json" diff --git a/cmake/cmake.version b/cmake/cmake.version index ba85a3d99b8280a49d9d6e6475cbeb3807090d28..a4c783b6c8cfa4c3c1bea4eb7f4ac40b165efe87 100644 --- a/cmake/cmake.version +++ b/cmake/cmake.version @@ -2,7 +2,7 @@ IF (DEFINED VERNUMBER) SET(TD_VER_NUMBER ${VERNUMBER}) ELSE () - SET(TD_VER_NUMBER "3.0.2.2") + SET(TD_VER_NUMBER "3.0.2.4") ENDIF () IF (DEFINED VERCOMPATIBLE) diff --git a/cmake/taosadapter_CMakeLists.txt.in b/cmake/taosadapter_CMakeLists.txt.in index 3e2e879e389fc7f6686949efab43bc5fada33f3a..13b247770ea7eef6b64209ca98787ff6d733bf85 100644 --- a/cmake/taosadapter_CMakeLists.txt.in +++ b/cmake/taosadapter_CMakeLists.txt.in @@ -2,7 +2,7 @@ # taosadapter ExternalProject_Add(taosadapter GIT_REPOSITORY https://github.com/taosdata/taosadapter.git - GIT_TAG a2e9920 + GIT_TAG 213f8b3 SOURCE_DIR "${TD_SOURCE_DIR}/tools/taosadapter" BINARY_DIR "" #BUILD_IN_SOURCE TRUE diff --git a/cmake/taostools_CMakeLists.txt.in b/cmake/taostools_CMakeLists.txt.in index 115e2fc674c982c755397a8dd745057e14e0ac50..13a81f88eab42c64be7ea0cf759da21ddce7a456 100644 --- a/cmake/taostools_CMakeLists.txt.in +++ b/cmake/taostools_CMakeLists.txt.in @@ -2,7 +2,7 @@ # taos-tools ExternalProject_Add(taos-tools GIT_REPOSITORY https://github.com/taosdata/taos-tools.git - GIT_TAG 4776778 + GIT_TAG 0cd564a SOURCE_DIR "${TD_SOURCE_DIR}/tools/taos-tools" BINARY_DIR "" #BUILD_IN_SOURCE TRUE diff --git a/docs/en/07-develop/03-insert-data/30-influxdb-line.mdx b/docs/en/07-develop/03-insert-data/30-influxdb-line.mdx index 3c088602602301fafa824bc256f1f2caca128abd..fc5644850cbedcb91de2aebca29070dc3c021551 100644 --- a/docs/en/07-develop/03-insert-data/30-influxdb-line.mdx +++ b/docs/en/07-develop/03-insert-data/30-influxdb-line.mdx @@ -38,7 +38,7 @@ meters,location=California.LosAngeles,groupid=2 current=13.4,voltage=223,phase=0 - Each data in `field_set` must be self-descriptive for its data type. For example 1.2f32 means a value 1.2 of float type. Without the "f" type suffix, it will be treated as type double. - Multiple kinds of precision can be used for the `timestamp` field. Time precision can be from nanosecond (ns) to hour (h). - The child table name is created automatically in a rule to guarantee its uniqueness. But you can configure `smlChildTableName` in taos.cfg to specify a tag value as the table names if the tag value is unique globally. For example, if a tag is called `tname` and you set `smlChildTableName=tname` in taos.cfg, when you insert `st,tname=cpu1,t1=4 c1=3 1626006833639000000`, the child table `cpu1` will be created automatically. Note that if multiple rows have the same tname but different tag_set values, the tag_set of the first row is used to create the table and the others are ignored. -- It is assumed that the order of field_set in a supertable is consistent, meaning that the first record contains all fields and subsequent records store fields in the same order. If the order is not consistent, set smlDataFormat in taos.cfg to false. Otherwise, data will be written out of order and a database error will occur.(smlDataFormat in taos.cfg default to false after version of 3.0.1.3) +- It is assumed that the order of field_set in a supertable is consistent, meaning that the first record contains all fields and subsequent records store fields in the same order. If the order is not consistent, set smlDataFormat in taos.cfg to false. Otherwise, data will be written out of order and a database error will occur.(smlDataFormat in taos.cfg default to false after version of 3.0.1.3, smlDataFormat is discarded since 3.0.3.0) ::: For more details please refer to [InfluxDB Line Protocol](https://docs.influxdata.com/influxdb/v2.0/reference/syntax/line-protocol/) and [TDengine Schemaless](/reference/schemaless/#Schemaless-Line-Protocol) diff --git a/docs/en/07-develop/07-tmq.mdx b/docs/en/07-develop/07-tmq.mdx index 17b3f5caa062eaacb4216b7153e899040e702cc1..92db7d4cbf4bcb35b3f30483e38be4c627a6b821 100644 --- a/docs/en/07-develop/07-tmq.mdx +++ b/docs/en/07-develop/07-tmq.mdx @@ -94,22 +94,21 @@ void close() throws SQLException; ```python -class TaosConsumer(): - def __init__(self, *topics, **configs) +class Consumer: + def subscribe(self, topics): + pass - def __iter__(self) + def unsubscribe(self): + pass - def __next__(self) + def poll(self, timeout: float = 1.0): + pass - def sync_next(self) - - def subscription(self) + def close(self): + pass - def unsubscribe(self) - - def close(self) - - def __del__(self) + def commit(self, message): + pass ``` @@ -117,19 +116,22 @@ class TaosConsumer(): ```go -func NewConsumer(conf *Config) (*Consumer, error) - -func (c *Consumer) Close() error +func NewConsumer(conf *tmq.ConfigMap) (*Consumer, error) -func (c *Consumer) Commit(ctx context.Context, message unsafe.Pointer) error +// rebalanceCb is reserved for compatibility purpose +func (c *Consumer) Subscribe(topic string, rebalanceCb RebalanceCb) error -func (c *Consumer) FreeMessage(message unsafe.Pointer) +// rebalanceCb is reserved for compatibility purpose +func (c *Consumer) SubscribeTopics(topics []string, rebalanceCb RebalanceCb) error -func (c *Consumer) Poll(timeout time.Duration) (*Result, error) +func (c *Consumer) Poll(timeoutMs int) tmq.Event -func (c *Consumer) Subscribe(topics []string) error +// tmq.TopicPartition is reserved for compatibility purpose +func (c *Consumer) Commit() ([]tmq.TopicPartition, error) func (c *Consumer) Unsubscribe() error + +func (c *Consumer) Close() error ``` @@ -357,50 +359,20 @@ public class MetersDeserializer extends ReferenceDeserializer { ```go -config := tmq.NewConfig() -defer config.Destroy() -err = config.SetGroupID("test") -if err != nil { - panic(err) -} -err = config.SetAutoOffsetReset("earliest") -if err != nil { - panic(err) -} -err = config.SetConnectIP("127.0.0.1") -if err != nil { - panic(err) -} -err = config.SetConnectUser("root") -if err != nil { - panic(err) -} -err = config.SetConnectPass("taosdata") -if err != nil { - panic(err) -} -err = config.SetConnectPort("6030") -if err != nil { - panic(err) -} -err = config.SetMsgWithTableName(true) -if err != nil { - panic(err) -} -err = config.EnableHeartBeat() -if err != nil { - panic(err) -} -err = config.EnableAutoCommit(func(result *wrapper.TMQCommitCallbackResult) { - if result.ErrCode != 0 { - errStr := wrapper.TMQErr2Str(result.ErrCode) - err := errors.NewError(int(result.ErrCode), errStr) - panic(err) - } -}) -if err != nil { - panic(err) +conf := &tmq.ConfigMap{ + "group.id": "test", + "auto.offset.reset": "earliest", + "td.connect.ip": "127.0.0.1", + "td.connect.user": "root", + "td.connect.pass": "taosdata", + "td.connect.port": "6030", + "client.id": "test_tmq_c", + "enable.auto.commit": "false", + "enable.heartbeat.background": "true", + "experimental.snapshot.enable": "true", + "msg.with.table.name": "true", } +consumer, err := NewConsumer(conf) ``` @@ -422,23 +394,31 @@ let mut consumer = tmq.build()?; +```python +from taos.tmq import Consumer + +# Syntax: `consumer = Consumer(configs)` +# +# Example: +consumer = Consumer({"group.id": "local", "td.connect.ip": "127.0.0.1"}) +``` + Python programs use the following parameters: -| Parameter | Type | Description | Remarks | -| :----------------------------: | :----: | -------------------------------------------------------- | ------------------------------------------- | -| `td_connect_ip` | string | Used in establishing a connection; same as `taos_connect` | | -| `td_connect_user` | string | Used in establishing a connection; same as `taos_connect` | | -| `td_connect_pass` | string | Used in establishing a connection; same as `taos_connect` | | -| `td_connect_port` | string | Used in establishing a connection; same as `taos_connect` | | -| `group_id` | string | Consumer group ID; consumers with the same ID are in the same group | **Required**. Maximum length: 192. | -| `client_id` | string | Client ID | Maximum length: 192. | -| `auto_offset_reset` | string | Initial offset for the consumer group | Specify `earliest`, `latest`, or `none`(default) | -| `enable_auto_commit` | string | Commit automatically | Specify `true` or `false`. | -| `auto_commit_interval_ms` | string | Interval for automatic commits, in milliseconds | -| `enable_heartbeat_background` | string | Backend heartbeat; if enabled, the consumer does not go offline even if it has not polled for a long time | Specify `true` or `false`. | -| `experimental_snapshot_enable` | string | Specify whether to consume messages from the WAL or from TSBS | Specify `true` or `false`. | -| `msg_with_table_name` | string | Specify whether to deserialize table names from messages | Specify `true` or `false`. -| `timeout` | int | Consumer pull timeout | | +| Parameter | Type | Description | Remarks | +|:---------:|:----:|:-----------:|:-------:| +| `td.connect.ip` | string | Used in establishing a connection|| +| `td.connect.user` | string | Used in establishing a connection|| +| `td.connect.pass` | string | Used in establishing a connection|| +| `td.connect.port` | string | Used in establishing a connection|| +| `group.id` | string | Consumer group ID; consumers with the same ID are in the same group | **Required**. Maximum length: 192 | +| `client.id` | string | Client ID | Maximum length: 192 | +| `msg.with.table.name` | string | Specify whether to deserialize table names from messages | pecify `true` or `false` | +| `enable.auto.commit` | string | Commit automatically | pecify `true` or `false` | +| `auto.commit.interval.ms` | string | Interval for automatic commits, in milliseconds | | +| `auto.offset.reset` | string | Initial offset for the consumer group | Specify `earliest`, `latest`, or `none`(default) | +| `experimental.snapshot.enable` | string | Specify whether to consume messages from the WAL or from TSDB | Specify `true` or `false` | +| `enable.heartbeat.background` | string | Backend heartbeat; if enabled, the consumer does not go offline even if it has not polled for a long time | Specify `true` or `false` | @@ -523,11 +503,7 @@ consumer.subscribe(topics); ```go -consumer, err := tmq.NewConsumer(config) -if err != nil { - panic(err) -} -err = consumer.Subscribe([]string{"example_tmq_topic"}) +err = consumer.Subscribe("example_tmq_topic", nil) if err != nil { panic(err) } @@ -545,7 +521,7 @@ consumer.subscribe(["tmq_meters"]).await?; ```python -consumer = TaosConsumer('topic_ctb_column', group_id='vg2') +consumer.subscribe(['topic1', 'topic2']) ``` @@ -611,13 +587,17 @@ while(running){ ```go for { - result, err := consumer.Poll(time.Second) - if err != nil { - panic(err) + ev := consumer.Poll(0) + if ev != nil { + switch e := ev.(type) { + case *tmqcommon.DataMessage: + fmt.Println(e.Value()) + case tmqcommon.Error: + fmt.Fprintf(os.Stderr, "%% Error: %v: %v\n", e.Code(), e) + panic(e) + } + consumer.Commit() } - fmt.Println(result) - consumer.Commit(context.Background(), result.Message) - consumer.FreeMessage(result.Message) } ``` @@ -660,9 +640,17 @@ for { ```python -for msg in consumer: - for row in msg: - print(row) +while True: + res = consumer.poll(100) + if not res: + continue + err = res.error() + if err is not None: + raise err + val = res.value() + + for block in val: + print(block.fetchall()) ``` @@ -729,7 +717,11 @@ consumer.close(); ```go -consumer.Close() +/* Unsubscribe */ +_ = consumer.Unsubscribe() + +/* Close consumer */ +_ = consumer.Close() ``` diff --git a/docs/en/10-deployment/05-helm.md b/docs/en/10-deployment/05-helm.md index a4fa68100078efe85fff5e1b078ebd07e5337d5a..90baa5f4459c6d9d01530d5e0c36477d79aa5ab9 100644 --- a/docs/en/10-deployment/05-helm.md +++ b/docs/en/10-deployment/05-helm.md @@ -22,7 +22,7 @@ Helm uses the kubectl and kubeconfig configurations to perform Kubernetes operat To use TDengine Chart, download it from GitHub: ```bash -wget https://github.com/taosdata/TDengine-Operator/raw/3.0/helm/tdengine-3.0.0.tgz +wget https://github.com/taosdata/TDengine-Operator/raw/3.0/helm/tdengine-3.0.2.tgz ``` @@ -38,7 +38,7 @@ With minikube, the default value is standard. Use Helm commands to install TDengine: ```bash -helm install tdengine tdengine-3.0.0.tgz \ +helm install tdengine tdengine-3.0.2.tgz \ --set storage.className= ``` @@ -46,7 +46,7 @@ helm install tdengine tdengine-3.0.0.tgz \ You can configure a small storage size in minikube to ensure that your deployment does not exceed your available disk space. ```bash -helm install tdengine tdengine-3.0.0.tgz \ +helm install tdengine tdengine-3.0.2.tgz \ --set storage.className=standard \ --set storage.dataSize=2Gi \ --set storage.logSize=10Mi @@ -83,14 +83,14 @@ You can configure custom parameters in TDengine with the `values.yaml` file. Run the `helm show values` command to see all parameters supported by TDengine Chart. ```bash -helm show values tdengine-3.0.0.tgz +helm show values tdengine-3.0.2.tgz ``` Save the output of this command as `values.yaml`. Then you can modify this file with your desired values and use it to deploy a TDengine cluster: ```bash -helm install tdengine tdengine-3.0.0.tgz -f values.yaml +helm install tdengine tdengine-3.0.2.tgz -f values.yaml ``` @@ -107,7 +107,7 @@ image: prefix: tdengine/tdengine #pullPolicy: Always # Overrides the image tag whose default is the chart appVersion. -# tag: "3.0.0.0" +# tag: "3.0.2.0" service: # ClusterIP is the default service type, use NodeIP only if you know what you are doing. @@ -155,15 +155,15 @@ clusterDomainSuffix: "" # See the [Configuration Variables](../../reference/config) # # Note: -# 1. firstEp/secondEp: should not be setted here, it's auto generated at scale-up. -# 2. serverPort: should not be setted, we'll use the default 6030 in many places. -# 3. fqdn: will be auto generated in kubenetes, user should not care about it. +# 1. firstEp/secondEp: should not be set here, it's auto generated at scale-up. +# 2. serverPort: should not be set, we'll use the default 6030 in many places. +# 3. fqdn: will be auto generated in kubernetes, user should not care about it. # 4. role: currently role is not supported - every node is able to be mnode and vnode. # # Btw, keep quotes "" around the value like below, even the value will be number or not. taoscfg: # Starts as cluster or not, must be 0 or 1. - # 0: all pods will start as a seperate TDengine server + # 0: all pods will start as a separate TDengine server # 1: pods will start as TDengine server cluster. [default] CLUSTER: "1" diff --git a/docs/en/12-taos-sql/02-database.md b/docs/en/12-taos-sql/02-database.md index c3c7e5928bb6705939dfae7d4e0096b202025520..059f124ea5b5f380cd72ba55a20b6cf3a80b035e 100644 --- a/docs/en/12-taos-sql/02-database.md +++ b/docs/en/12-taos-sql/02-database.md @@ -30,8 +30,10 @@ database_option: { | WAL_LEVEL {1 | 2} | VGROUPS value | SINGLE_STABLE {0 | 1} + | STT_TRIGGER value | TABLE_PREFIX value | TABLE_SUFFIX value + | TSDB_PAGESIZE value | WAL_RETENTION_PERIOD value | WAL_ROLL_PERIOD value | WAL_RETENTION_SIZE value @@ -56,7 +58,7 @@ database_option: { - WAL_FSYNC_PERIOD: specifies the interval (in milliseconds) at which data is written from the WAL to disk. This parameter takes effect only when the WAL parameter is set to 2. The default value is 3000. Enter a value between 0 and 180000. The value 0 indicates that incoming data is immediately written to disk. - MAXROWS: specifies the maximum number of rows recorded in a block. The default value is 4096. - MINROWS: specifies the minimum number of rows recorded in a block. The default value is 100. -- KEEP: specifies the time for which data is retained. Enter a value between 1 and 365000. The default value is 3650. The value of the KEEP parameter must be greater than or equal to the value of the DURATION parameter. TDengine automatically deletes data that is older than the value of the KEEP parameter. You can use m (minutes), h (hours), and d (days) as the unit, for example KEEP 100h or KEEP 10d. If you do not include a unit, d is used by default. +- KEEP: specifies the time for which data is retained. Enter a value between 1 and 365000. The default value is 3650. The value of the KEEP parameter must be greater than or equal to the value of the DURATION parameter. TDengine automatically deletes data that is older than the value of the KEEP parameter. You can use m (minutes), h (hours), and d (days) as the unit, for example KEEP 100h or KEEP 10d. If you do not include a unit, d is used by default. The Enterprise Edition supports [Tiered Storage](https://docs.tdengine.com/tdinternal/arch/#tiered-storage) function, thus multiple KEEP values (comma separated and up to 3 values supported, and meet keep 0 <= keep 1 <= keep 2, e.g. KEEP 100h,100d,3650d) are supported; the Community Edition does not support Tiered Storage function (although multiple keep values are configured, they do not take effect, only the maximum keep value is used as KEEP). - PAGES: specifies the number of pages in the metadata storage engine cache on each vnode. Enter a value greater than or equal to 64. The default value is 256. The space occupied by metadata storage on each vnode is equal to the product of the values of the PAGESIZE and PAGES parameters. The space occupied by default is 1 MB. - PAGESIZE: specifies the size (in KB) of each page in the metadata storage engine cache on each vnode. The default value is 4. Enter a value between 1 and 16384. - PRECISION: specifies the precision at which a database records timestamps. Enter ms for milliseconds, us for microseconds, or ns for nanoseconds. The default value is ms. @@ -69,8 +71,10 @@ database_option: { - SINGLE_STABLE: specifies whether the database can contain more than one supertable. - 0: The database can contain multiple supertables. - 1: The database can contain only one supertable. +- STT_TRIGGER: specifies the number of file merges triggered by flushed files. The default is 8, ranging from 1 to 16. For high-frequency scenarios with few tables, it is recommended to use the default configuration or a smaller value for this parameter; For multi-table low-frequency scenarios, it is recommended to configure this parameter with a larger value. - TABLE_PREFIX:The prefix length in the table name that is ignored when distributing table to vnode based on table name. - TABLE_SUFFIX:The suffix length in the table name that is ignored when distributing table to vnode based on table name. +- TSDB_PAGESIZE: The page size of the data storage engine in a vnode. The unit is KB. The default is 4 KB. The range is 1 to 16384, that is, 1 KB to 16 MB. - WAL_RETENTION_PERIOD: specifies the time after which WAL files are deleted. This parameter is used for data subscription. Enter a time in seconds. The default value of single copy is 0. A value of 0 indicates that each WAL file is deleted immediately after its contents are written to disk. -1: WAL files are never deleted. The default value of multiple copy is 4 days. - WAL_RETENTION_SIZE: specifies the size at which WAL files are deleted. This parameter is used for data subscription. Enter a size in KB. The default value of single copy is 0. A value of 0 indicates that each WAL file is deleted immediately after its contents are written to disk. -1: WAL files are never deleted. The default value of multiple copy is -1. - WAL_ROLL_PERIOD: specifies the time after which WAL files are rotated. After this period elapses, a new WAL file is created. The default value of single copy is 0. A value of 0 indicates that a new WAL file is created only after the previous WAL file was written to disk. The default values of multiple copy is 1 day. @@ -112,6 +116,10 @@ alter_database_options: alter_database_option: { CACHEMODEL {'none' | 'last_row' | 'last_value' | 'both'} | CACHESIZE value + | BUFFER value + | PAGES value + | REPLICA value + | STT_TRIGGER value | WAL_LEVEL value | WAL_FSYNC_PERIOD value | KEEP value @@ -154,3 +162,19 @@ TRIM DATABASE db_name; ``` The preceding SQL statement deletes data that has expired and orders the remaining data in accordance with the storage configuration. + +## Redistribute Vgroup + +```sql +REDISTRIBUTE VGROUP vgroup_no DNODE dnode_id1 [DNODE dnode_id2] [DNODE dnode_id3] +``` + +Adjust the distribution of vnodes in the vgroup according to the given list of dnodes. + +## Balance Vgroup + +```sql +BALANCE VGROUP +``` + +Automatically adjusts the distribution of vnodes in all vgroups of the cluster, which is equivalent to load balancing the data of the cluster at the vnode level. diff --git a/docs/en/12-taos-sql/06-select.md b/docs/en/12-taos-sql/06-select.md index c087a9e9fb2f0af921aa031d41d124c66fbb0ae7..ee06a7be2d3172210bf35302d5bffbf7a49adabc 100644 --- a/docs/en/12-taos-sql/06-select.md +++ b/docs/en/12-taos-sql/06-select.md @@ -350,9 +350,9 @@ SELECT AVG(CASE WHEN voltage < 200 or voltage > 250 THEN 220 ELSE voltage END) F ## JOIN -TDengine supports natural joins between supertables, between standard tables, and between subqueries. The difference between natural joins and inner joins is that natural joins require that the fields being joined in the supertables or standard tables must have the same name. Data or tag columns must be joined with the equivalent column in another table. +TDengine supports the `INTER JOIN` based on the timestamp primary key, that is, the `JOIN` condition must contain the timestamp primary key. As long as the requirement of timestamp-based primary key is met, `INTER JOIN` can be made between normal tables, sub-tables, super tables and sub-queries at will, and there is no limit on the number of tables. -For standard tables, only the timestamp (primary key) can be used in join operations. For example: +For standard tables: ```sql SELECT * @@ -360,7 +360,7 @@ FROM temp_tb_1 t1, pressure_tb_1 t2 WHERE t1.ts = t2.ts ``` -For supertables, tags as well as timestamps can be used in join operations. For example: +For supertables: ```sql SELECT * @@ -368,20 +368,15 @@ FROM temp_stable t1, temp_stable t2 WHERE t1.ts = t2.ts AND t1.deviceid = t2.deviceid AND t1.status=0; ``` -Similarly, join operations can be performed on the result sets of multiple subqueries. - -:::note - -The following restriction apply to JOIN statements: +For sub-table and super table: -- The number of tables or supertables in a single join operation cannot exceed 10. -- `FILL` cannot be used in a JOIN statement. -- Arithmetic operations cannot be performed on the result sets of join operation. -- `GROUP BY` is not allowed on a segment of the tables that participate in a join operation. -- `OR` cannot be used in the conditions for join operation -- Join operation can be performed only on tags or timestamps. You cannot perform a join operation on data columns. +```sql +SELECT * +FROM temp_ctable t1, temp_stable t2 +WHERE t1.ts = t2.ts AND t1.deviceid = t2.deviceid AND t1.status=0; +``` -::: +Similarly, join operations can be performed on the result sets of multiple subqueries. ## Nested Query diff --git a/docs/en/12-taos-sql/10-function.md b/docs/en/12-taos-sql/10-function.md index 30422ca20cf44af4e1808eae2912e1591502d4c8..802eb259bf72687f9c75cdb34e3520040d3c9010 100644 --- a/docs/en/12-taos-sql/10-function.md +++ b/docs/en/12-taos-sql/10-function.md @@ -877,7 +877,7 @@ INTERP(expr) - Interpolation is performed based on `FILL` parameter. - `INTERP` can only be used to interpolate in single timeline. So it must be used with `partition by tbname` when it's used on a STable. - Pseudocolumn `_irowts` can be used along with `INTERP` to return the timestamps associated with interpolation points(support after version 3.0.1.4). -- Pseudocolumn `_isfilled` can be used along with `INTERP` to indicate whether the results are original records or data points generated by interpolation algorithm(support after version 3.0.2.1). +- Pseudocolumn `_isfilled` can be used along with `INTERP` to indicate whether the results are original records or data points generated by interpolation algorithm(support after version 3.0.2.3). ### LAST diff --git a/docs/en/12-taos-sql/24-show.md b/docs/en/12-taos-sql/24-show.md index 2db3e7cb31463e20f024f48e62d06422519ba0e7..f70d86570e5fe9cf4f9eb6e58dd1908c62adcc89 100644 --- a/docs/en/12-taos-sql/24-show.md +++ b/docs/en/12-taos-sql/24-show.md @@ -363,7 +363,7 @@ Shows information about all vgroups in the system or about the vgroups for a spe ## SHOW VNODES ```sql -SHOW VNODES [dnode_name]; +SHOW VNODES {dnode_id | dnode_endpoint}; ``` Shows information about all vnodes in the system or about the vnodes for a specified dnode. diff --git a/docs/en/12-taos-sql/29-changes.md b/docs/en/12-taos-sql/29-changes.md index 78b6d5fc05b9b03e1e8b3af268bc357dfaa401bc..f288cd7545c1de25f6cf8b003e084ba3622524a7 100644 --- a/docs/en/12-taos-sql/29-changes.md +++ b/docs/en/12-taos-sql/29-changes.md @@ -54,7 +54,6 @@ The following data types can be used in the schema for standard tables. | 27 | GRANT | Added | Grants permissions to a user. | 28 | KILL TRANSACTION | Added | Terminates an mnode transaction. | 29 | KILL STREAM | Deprecated | Terminated a continuous query. The continuous query feature has been replaced with the stream processing feature. -| 30 | MERGE VGROUP | Added | Merges vgroups. | 31 | REVOKE | Added | Revokes permissions from a user. | 32 | SELECT | Modified |
  • SELECT does not use the implicit results column. Output columns must be specified in the SELECT clause.
  • DISTINCT support is enhanced. In previous versions, DISTINCT only worked on the tag column and could not be used with JOIN or GROUP BY.
  • JOIN support is enhanced. The following are now supported after JOIN: a WHERE clause with OR, operations on multiple tables, and GROUP BY on multiple tables.
  • Subqueries after FROM are enhanced. Levels of nesting are no longer restricted. Subqueries can be used with UNION ALL. Other syntax restrictions are eliminated.
  • All scalar functions can be used after WHERE.
  • GROUP BY is enhanced. You can group by any scalar expression or combination thereof.
  • SESSION can be used on supertables. When PARTITION BY is not used, data in supertables is merged into a single timeline.
  • STATE_WINDOW can be used on supertables. When PARTITION BY is not used, data in supertables is merged into a single timeline.
  • ORDER BY is enhanced. It is no longer required to use ORDER BY and GROUP BY together. There is no longer a restriction on the number of order expressions. NULLS FIRST and NULLS LAST syntax has been added. Any expression that conforms to the ORDER BY semantics can be used.
  • Added PARTITION BY syntax. PARTITION BY replaces GROUP BY tags.
| 33 | SHOW ACCOUNTS | Deprecated | This Enterprise Edition-only statement has been removed. It returns the error "This statement is no longer supported." @@ -76,8 +75,9 @@ The following data types can be used in the schema for standard tables. | 49 | SHOW TRANSACTIONS | Added | Shows all running transactions in the system. | 50 | SHOW DNODE VARIABLES | Added | Shows the configuration of the specified dnode. | 51 | SHOW VNODES | Not supported | Shows information about vnodes in the system. Not supported. -| 52 | SPLIT VGROUP | Added | Splits a vgroup into two vgroups. -| 53 | TRIM DATABASE | Added | Deletes data that has expired and orders the remaining data in accordance with the storage configuration. +| 52 | TRIM DATABASE | Added | Deletes data that has expired and orders the remaining data in accordance with the storage configuration. +| 53 | REDISTRIBUTE VGROUP | Added | Adjust the distribution of VNODES in VGROUP. +| 54 | BALANCE VGROUP | Added | Auto adjust the distribution of VNODES in VGROUP. ## SQL Functions diff --git a/docs/en/14-reference/03-connector/05-go.mdx b/docs/en/14-reference/03-connector/05-go.mdx index df5b129cea552144d5833190d46e8a78f2fd2fa5..60407c0735bf9bcb42ae54bddcc9afa639a02fcc 100644 --- a/docs/en/14-reference/03-connector/05-go.mdx +++ b/docs/en/14-reference/03-connector/05-go.mdx @@ -355,26 +355,29 @@ The `af` package encapsulates TDengine advanced functions such as connection man #### Subscribe -* `func NewConsumer(conf *Config) (*Consumer, error)` +* `func NewConsumer(conf *tmq.ConfigMap) (*Consumer, error)` Creates consumer group. -* `func (c *Consumer) Subscribe(topics []string) error` +* `func (c *Consumer) Subscribe(topic string, rebalanceCb RebalanceCb) error` +Note: `rebalanceCb` is reserved for compatibility purpose + +Subscribes a topic. + +* `func (c *Consumer) SubscribeTopics(topics []string, rebalanceCb RebalanceCb) error` +Note: `rebalanceCb` is reserved for compatibility purpose Subscribes to topics. -* `func (c *Consumer) Poll(timeout time.Duration) (*Result, error)` +* `func (c *Consumer) Poll(timeoutMs int) tmq.Event` Polling information. -* `func (c *Consumer) Commit(ctx context.Context, message unsafe.Pointer) error` +* `func (c *Consumer) Commit() ([]tmq.TopicPartition, error)` +Note: `tmq.TopicPartition` is reserved for compatibility purpose Commit information. -* `func (c *Consumer) FreeMessage(message unsafe.Pointer)` - -Free information. - * `func (c *Consumer) Unsubscribe() error` Unsubscribe. @@ -441,25 +444,36 @@ Close consumer. ### Subscribe via WebSocket -* `func NewConsumer(config *Config) (*Consumer, error)` +* `func NewConsumer(conf *tmq.ConfigMap) (*Consumer, error)` - Creates consumer group. +Creates consumer group. + +* `func (c *Consumer) Subscribe(topic string, rebalanceCb RebalanceCb) error` +Note: `rebalanceCb` is reserved for compatibility purpose -* `func (c *Consumer) Subscribe(topic []string) error` +Subscribes a topic. - Subscribes to topics. +* `func (c *Consumer) SubscribeTopics(topics []string, rebalanceCb RebalanceCb) error` +Note: `rebalanceCb` is reserved for compatibility purpose -* `func (c *Consumer) Poll(timeout time.Duration) (*Result, error)` +Subscribes to topics. - Polling information. +* `func (c *Consumer) Poll(timeoutMs int) tmq.Event` -* `func (c *Consumer) Commit(messageID uint64) error` +Polling information. - Commit information. +* `func (c *Consumer) Commit() ([]tmq.TopicPartition, error)` +Note: `tmq.TopicPartition` is reserved for compatibility purpose + +Commit information. + +* `func (c *Consumer) Unsubscribe() error` + +Unsubscribe. * `func (c *Consumer) Close() error` - Close consumer. +Close consumer. For a complete example see [GitHub sample file](https://github.com/taosdata/driver-go/blob/3.0/examples/tmqoverws/main.go) diff --git a/docs/en/14-reference/03-connector/07-python.mdx b/docs/en/14-reference/03-connector/07-python.mdx index 25e6b2188a64928e35b8e8c45988a426802eb9f3..d593c3f133dafa3b5f8083577f8f0d4d75cb3d8b 100644 --- a/docs/en/14-reference/03-connector/07-python.mdx +++ b/docs/en/14-reference/03-connector/07-python.mdx @@ -32,7 +32,7 @@ We recommend using the latest version of `taospy`, regardless of the version of ### Preparation -1. Install Python. Python >= 3.7 is recommended. If Python is not available on your system, refer to the [Python BeginnersGuide](https://wiki.python.org/moin/BeginnersGuide/Download) to install it. +1. Install Python. The recent taospy package requires Python 3.6+. The earlier versions of taospy require Python 3.7+. The taos-ws-py package requires Python 3.7+. If Python is not available on your system, refer to the [Python BeginnersGuide](https://wiki.python.org/moin/BeginnersGuide/Download) to install it. 2. Install [pip](https://pypi.org/project/pip/). In most cases, the Python installer comes with the pip utility. If not, please refer to [pip documentation](https://pip.pypa.io/en/stable/installation/) to install it. If you use a native connection, you will also need to [Install Client Driver](/reference/connector#Install-Client-Driver). The client install package includes the TDengine client dynamic link library (`libtaos.so` or `taos.dll`) and the TDengine CLI. @@ -78,6 +78,22 @@ pip3 install git+https://github.com/taosdata/taos-connector-python.git
+#### Install `taos-ws-py` (Optional) + +The taos-ws-py package provides the way to access TDengine via WebSocket. + +##### Install taos-ws-py with taospy + +```bash +pip3 install taospy[ws] +``` + +##### Install taos-ws-py only + +```bash +pip3 install taos-ws-py +``` + ### Verify diff --git a/docs/en/14-reference/03-connector/09-csharp.mdx b/docs/en/14-reference/03-connector/09-csharp.mdx index 85514f58ac1a19c7ae1a725e9b055f10280ebbb6..756e948bd293477c37439b624bab9af86191e232 100644 --- a/docs/en/14-reference/03-connector/09-csharp.mdx +++ b/docs/en/14-reference/03-connector/09-csharp.mdx @@ -17,7 +17,7 @@ import CSAsyncQuery from "../../07-develop/04-query-data/_cs_async.mdx" `TDengine.Connector` is a C# language connector provided by TDengine that allows C# developers to develop C# applications that access TDengine cluster data. -The `TDengine.Connector` connector supports connect to TDengine instances via the TDengine client driver (taosc), providing data writing, querying, subscription, schemaless writing, bind interface, etc.The `TDengine.Connector` also supports WebSocket and developers can build connection through DSN, which supports data writing, querying, and parameter binding, etc. +The `TDengine.Connector` connector supports connect to TDengine instances via the TDengine client driver (taosc), providing data writing, querying, subscription, schemaless writing, bind interface, etc.The `TDengine.Connector` also supports WebSocket from v3.0.1 and developers can build connection through DSN, which supports data writing, querying, and parameter binding, etc. This article describes how to install `TDengine.Connector` in a Linux or Windows environment and connect to TDengine clusters via `TDengine.Connector` to perform basic operations such as data writing and querying. @@ -66,31 +66,43 @@ Please refer to [version support list](/reference/connector#version-support) * [Nuget Client](https://docs.microsoft.com/en-us/nuget/install-nuget-client-tools) (optional installation) * Install TDengine client driver, please refer to [Install client driver](/reference/connector/#install-client-driver) for details -### Install via dotnet CLI +### Install `TDengine.Connector` - + -You can reference the `TDengine.Connector` published in Nuget to the current project via the `dotnet` command under the path of the existing .NET project. +You can reference the `TDengine.Connector` published in Nuget to the current project via the `dotnet` CLI under the path of the existing .NET project. ``` bash dotnet add package TDengine.Connector ``` - - - -You can [download the source code](https://github.com/taosdata/taos-connector-dotnet/tree/3.0) and directly reference the latest version of the TDengine.Connector library. +You may also modify the current.NET project file. You can include the following 'ItemGroup' in your project file (.csproj). -```bash -git clone -b 3.0 https://github.com/taosdata/taos-connector-dotnet.git -cd taos-connector-dotnet -cp -r src/ myProject +``` XML + + + +``` -cd myProject -dotnet add exmaple.csproj reference src/TDengine.csproj + + + +In this scenario, modifying your project file is required in order to copy the WebSocket dependency dynamic library from the nuget package into your project. +```XML + + + + + + + + + ``` +Notice: `TDengine.Connector` only version>= 3.0.2 includes the dynamic library for WebSocket. + @@ -252,19 +264,20 @@ ws://localhost:6041/test |Sample program |Sample program description | |--------------------------------------------------------------------------------------------------------------------|--------------------------------------------| -| [CURD](https://github.com/taosdata/taos-connector-dotnet/blob/3.0/examples/Query/Query.cs) | Table creation, data insertion, and query examples with TDengine.Connector | -| [JSON Tag](https://github.com/taosdata/taos-connector-dotnet/blob/3.0/examples/JSONTag) | Writing and querying JSON tag data with TDengine Connector | -| [stmt](https://github.com/taosdata/taos-connector-dotnet/tree/3.0/examples/Stmt) | Parameter binding with TDengine Connector | -| [schemaless](https://github.com/taosdata/taos-connector-dotnet/blob/3.0/examples/schemaless) | Schemaless writes with TDengine Connector | -| [async query](https://github.com/taosdata/taos-connector-dotnet/blob/3.0/examples/AsyncQuery/QueryAsync.cs) | Asynchronous queries with TDengine Connector | -| [Subscription](https://github.com/taosdata/taos-connector-dotnet/blob/3.0/examples/TMQ/TMQ.cs) | Subscription example with TDengine Connector | -| [Basic WebSocket Usage](https://github.com/taosdata/taos-connector-dotnet/blob/5a4a7cd0dbcda114447cdc6d0c6dedd8e84a52da/examples/WS/WebSocketSample.cs) | WebSocket basic data in and out with TDengine connector | -| [WebSocket Parameter Binding](https://github.com/taosdata/taos-connector-dotnet/blob/5a4a7cd0dbcda114447cdc6d0c6dedd8e84a52da/examples/WS/WebSocketSTMT.cs) | WebSocket parameter binding example | +| [CURD](https://github.com/taosdata/taos-connector-dotnet/blob/3.0/examples/NET6Examples/Query/Query.cs) | Table creation, data insertion, and query examples with TDengine.Connector | +| [JSON Tag](https://github.com/taosdata/taos-connector-dotnet/blob/3.0/examples/NET6Examples/JSONTag) | Writing and querying JSON tag data with TDengine Connector | +| [stmt](https://github.com/taosdata/taos-connector-dotnet/tree/3.0/examples/NET6Examples/Stmt) | Parameter binding with TDengine Connector | +| [schemaless](https://github.com/taosdata/taos-connector-dotnet/blob/3.0/examples/NET6Examples/schemaless) | Schemaless writes with TDengine Connector | +| [async query](https://github.com/taosdata/taos-connector-dotnet/blob/3.0/examples/NET6Examples/AsyncQuery/QueryAsync.cs) | Asynchronous queries with TDengine Connector | +| [Subscription](https://github.com/taosdata/taos-connector-dotnet/blob/3.0/examples/NET6Examples/TMQ/TMQ.cs) | Subscription example with TDengine Connector | +| [Basic WebSocket Usage](https://github.com/taosdata/taos-connector-dotnet/blob/3.0/examples/FrameWork45/WS/WebSocketSample.cs) | WebSocket basic data in and out with TDengine connector | +| [WebSocket Parameter Binding](https://github.com/taosdata/taos-connector-dotnet/blob/3.0/examples/FrameWork45/WS/WebSocketSTMT.cs) | WebSocket parameter binding example | ## Important update records | TDengine.Connector | Description | |--------------------|--------------------------------| +| 3.0.2 | Support .NET Framework 4.5 and above. Support .Net standard 2.0. Nuget package includes dynamic library for WebSocket.| | 3.0.1 | Support WebSocket and Cloud,With function query, insert, and parameter binding| | 3.0.0 | Supports TDengine 3.0.0.0. TDengine 2.x is not supported. Added `TDengine.Impl.GetData()` interface to deserialize query results. | | 1.0.7 | Fixed TDengine.Query() memory leak. | diff --git a/docs/en/14-reference/03-connector/index.mdx b/docs/en/14-reference/03-connector/index.mdx index ba8dbb85d4982c5d6c89f5dbe6157bd88a8c00a4..da3aae8309a9282a37597a3ac4bb623e6dfd1b79 100644 --- a/docs/en/14-reference/03-connector/index.mdx +++ b/docs/en/14-reference/03-connector/index.mdx @@ -59,11 +59,11 @@ The different database framework specifications for various programming language | -------------------------------------- | ------------- | --------------- | ------------- | ------------- | ------------- | ------------- | | **Connection Management** | Support | Support | Support | Support | Support | Support | | **Regular Query** | Support | Support | Support | Support | Support | Support | -| **Parameter Binding** | Not supported | Not supported | support | Support | Not supported | Support | -| **Subscription (TMQ) ** | Not supported | Not supported | support | Not supported | Not supported | Support | -| **Schemaless** | Not supported | Not supported | Not supported | Not supported | Not supported | Not supported | -| **Bulk Pulling (based on WebSocket) ** | Support | Support | Support | support | Support | Support | -| **DataFrame** | Not supported | Support | Not supported | Not supported | Not supported | Not supported | +| **Parameter Binding** | Not Supported | Not Supported | Support | Support | Not Supported | Support | +| **Subscription (TMQ) ** | Not Supported | Support | Support | Not Supported | Not Supported | Support | +| **Schemaless** | Not Supported | Not Supported | Not Supported | Not Supported | Not Supported | Not Supported | +| **Bulk Pulling (based on WebSocket) ** | Support | Support | Support | Support | Support | Support | +| **DataFrame** | Not Supported | Support | Not Supported | Not Supported | Not Supported | Not Supported | :::warning diff --git a/docs/en/14-reference/05-taosbenchmark.md b/docs/en/14-reference/05-taosbenchmark.md index 19feeb674060cbe0e7ec13ed4e47bb3fd85836cc..4017b12be923a9bcb5696c8b4b57c2d67b5c1378 100644 --- a/docs/en/14-reference/05-taosbenchmark.md +++ b/docs/en/14-reference/05-taosbenchmark.md @@ -92,7 +92,7 @@ taosBenchmark -f -## Command-line argument in detailed +## Command-line argument in detail - **-f/--file ** : specify the configuration file to use. This file includes All parameters. Users should not use this parameter with other parameters on the command-line. There is no default value. @@ -198,7 +198,7 @@ taosBenchmark -A INT,DOUBLE,NCHAR,BINARY\(16\) - **-R/--disorder-range ** : Specify the timestamp range for the disordered data. It leads the resulting disorder timestamp as the ordered timestamp minus a random value in this range. Valid only if the percentage of disordered data specified by `-O/--disorder` is greater than 0. -- **-F/--prepare_rand ** : +- **-F/--prepared_rand ** : Specify the number of unique values in the generated random data. A value of 1 means that all data are equal. The default value is 10000. - **-a/--replica ** : @@ -216,7 +216,7 @@ taosBenchmark -A INT,DOUBLE,NCHAR,BINARY\(16\) - **-? /--help** : Show help information and exit. Users should not use it with other parameters. -## Configuration file parameters in detailed +## Configuration file parameters in detail ### General configuration parameters @@ -380,7 +380,7 @@ The configuration parameters for specifying super table tag columns and data col - **num_of_records_per_req** : Writing the number of rows of records per request to TDengine, the default value is 30000. When it is set too large, the TDengine client driver will return the corresponding error message, so you need to lower the setting of this parameter to meet the writing requirements. -- **prepare_rand**: The number of unique values in the generated random data. A value of 1 means that all data are equal. The default value is 10000. +- **prepared_rand**: The number of unique values in the generated random data. A value of 1 means that all data are equal. The default value is 10000. ### Query scenario configuration parameters diff --git a/docs/en/14-reference/12-config/index.md b/docs/en/14-reference/12-config/index.md index b6bfa4bc7d57a9139992a0f1aab528b267e5bd03..9e56a0b0bff931c3b10103c5d63f9134baf280a1 100644 --- a/docs/en/14-reference/12-config/index.md +++ b/docs/en/14-reference/12-config/index.md @@ -142,6 +142,15 @@ The parameters described in this document by the effect that they have on the sy | Meaning | Switch for allowing TDengine to collect and report service usage information | | Value Range | 0: Not allowed; 1: Allowed | | Default Value | 1 | +### crashReporting + +| Attribute | Description | +| -------- | -------------------------------------------- | +| Applicable | Server Only | +| Meaning |Switch for allowing TDengine to collect and report crash related information | +| Value Range | 0,1 0: Not allowed;1:allowed | +| Default Value | 1 | + ## Query Parameters @@ -314,6 +323,7 @@ The charset that takes effect is UTF-8. | Applicable | Server Only | | Meaning | All data files are stored in this directory | | Default Value | /var/lib/taos | +| Note | The [Tiered Storage](https://docs.tdengine.com/tdinternal/arch/#tiered-storage) function needs to be used in conjunction with the [KEEP](https://docs.tdengine.com/taos-sql/database/#parameters) parameter | ### tempDir @@ -594,7 +604,7 @@ The charset that takes effect is UTF-8. | Attribute | Description | | -------- | ----------------------------- | | Applicable | Client only | -| Meaning | Whether schemaless columns are consistently ordered | +| Meaning | Whether schemaless columns are consistently ordered, depat, discarded since 3.0.3.0| | Value Range | 0: not consistent; 1: consistent. | | Default | 1 | @@ -656,7 +666,7 @@ The charset that takes effect is UTF-8. | 20 | minimalTmpDirGB | Yes | Yes | | | 21 | smlChildTableName | Yes | Yes | | | 22 | smlTagName | Yes | Yes | | -| 23 | smlDataFormat | No | Yes | | +| 23 | smlDataFormat | No | Yes(discarded since 3.0.3.0) | | | 24 | statusInterval | Yes | Yes | | | 25 | logDir | Yes | Yes | | | 26 | minimalLogDirGB | Yes | Yes | | diff --git a/docs/en/14-reference/13-schemaless/13-schemaless.md b/docs/en/14-reference/13-schemaless/13-schemaless.md index 10321ab083e6e654e66cb73f1bc21f9fbd678fda..a97a54af02601ddc2dde43614ac7ba8a6c8d4009 100644 --- a/docs/en/14-reference/13-schemaless/13-schemaless.md +++ b/docs/en/14-reference/13-schemaless/13-schemaless.md @@ -80,7 +80,7 @@ You can configure smlChildTableName in taos.cfg to specify table names, for exam NULL. 6. For BINARY or NCHAR columns, if the length of the value provided in a data row exceeds the column type limit, the maximum length of characters allowed to be stored in the column is automatically increased (only incremented and not decremented) to ensure complete preservation of the data. 7. Errors encountered throughout the processing will interrupt the writing process and return an error code. -8. It is assumed that the order of field_set in a supertable is consistent, meaning that the first record contains all fields and subsequent records store fields in the same order. If the order is not consistent, set smlDataFormat in taos.cfg to false. Otherwise, data will be written out of order and a database error will occur.(smlDataFormat in taos.cfg default to false after version of 3.0.1.3) +8. It is assumed that the order of field_set in a supertable is consistent, meaning that the first record contains all fields and subsequent records store fields in the same order. If the order is not consistent, set smlDataFormat in taos.cfg to false. Otherwise, data will be written out of order and a database error will occur.(smlDataFormat in taos.cfg default to false after version of 3.0.1.3, discarded since 3.0.3.0) :::tip All processing logic of schemaless will still follow TDengine's underlying restrictions on data structures, such as the total length of each row of data cannot exceed diff --git a/docs/en/27-train-faq/01-faq.md b/docs/en/27-train-faq/01-faq.md index 82e98b0d980c16acad0783abd62525cc6bde06ec..7650e9736556d3b9887e0c2fc8f9e94091c58c2d 100644 --- a/docs/en/27-train-faq/01-faq.md +++ b/docs/en/27-train-faq/01-faq.md @@ -33,7 +33,7 @@ TDengine 3.0 is not compatible with the configuration and data files from previo 4. Install TDengine 3.0. 5. For assistance in migrating data to TDengine 3.0, contact [TDengine Support](https://tdengine.com/support). -### 4. How can I resolve the "Unable to establish connection" error? +### 2. How can I resolve the "Unable to establish connection" error? This error indicates that the client could not connect to the server. Perform the following troubleshooting steps: @@ -68,7 +68,7 @@ This error indicates that the client could not connect to the server. Perform th 11. You can also use the TDengine CLI to diagnose network issues. For more information, see [Problem Diagnostics](https://docs.tdengine.com/operation/diagnose/). -### 5. How can I resolve the "Unable to resolve FQDN" error? +### 3. How can I resolve the "Unable to resolve FQDN" error? Clients and dnodes must be able to resolve the FQDN of each required node. You can confirm your configuration as follows: @@ -79,15 +79,15 @@ Clients and dnodes must be able to resolve the FQDN of each required node. You c 5. If TDengine has been previously installed and the `hostname` was modified, open `dnode.json` in the `data` folder and verify that the endpoint configuration is correct. The default location of the dnode file is `/var/lib/taos/dnode`. Ensure that you clean up previous installations before reinstalling TDengine. 6. Confirm whether FQDNs are preconfigured in `/etc/hosts` and `/etc/hostname`. -### 6. What is the most effective way to write data to TDengine? +### 4. What is the most effective way to write data to TDengine? Writing data in batches provides higher efficiency in most situations. You can insert one or more data records into one or more tables in a single SQL statement. -### 9. Why are table names not fully displayed? +### 5. Why are table names not fully displayed? The number of columns in the TDengine CLI terminal display is limited. This can cause table names to be cut off, and if you use an incomplete name in a statement, the "Table does not exist" error will occur. You can increase the display size with the `maxBinaryDisplayWidth` parameter or the SQL statement `set max_binary_display_width`. You can also append `\G` to your SQL statement to bypass this limitation. -### 10. How can I migrate data? +### 6. How can I migrate data? In TDengine, the `hostname` uniquely identifies a machine. When you move data files to a new machine, you must configure the new machine to have the same `host name` as the original machine. @@ -97,7 +97,7 @@ The data structure of previous versions of TDengine is not compatible with versi ::: -### 11. How can I temporary change the log level from the TDengine Client? +### 7. How can I temporary change the log level from the TDengine Client? To change the log level for debugging purposes, you can use the following command: @@ -118,14 +118,14 @@ Use `resetlog` to remove all logs generated on the local client. Use the other p For each parameter, you can set the value to `131` (error and warning), `135` (error, warning, and debug), or `143` (error, warning, debug, and trace). -### Why do TDengine components written in Go fail to compile? +### 8. Why do TDengine components written in Go fail to compile? TDengine includes taosAdapter, an independent component written in Go. This component provides the REST API as well as data access for other products such as Prometheus and Telegraf. When using the develop branch, you must run `git submodule update --init --recursive` to download the taosAdapter repository and then compile it. TDengine Go components require Go version 1.14 or later. -### 13. How can I query the storage space being used by my data? +### 9. How can I query the storage space being used by my data? The TDengine data files are stored in `/var/lib/taos` by default. Log files are stored in `/var/log/taos`. @@ -133,7 +133,7 @@ To see how much space your data files occupy, run `du -sh /var/lib/taos/vnode -- If you want to see how much space is occupied by a single database, first determine which vgroup is storing the database by running `show vgroups`. Then check `/var/lib/taos/vnode` for the files associated with the vgroup ID. -### 15. How is timezone information processed for timestamps? +### 10. How is timezone information processed for timestamps? TDengine uses the timezone of the client for timestamps. The server timezone does not affect timestamps. The client converts Unix timestamps in SQL statements to UTC before sending them to the server. When you query data on the server, it provides timestamps in UTC to the client, which converts them to its local time. @@ -144,13 +144,13 @@ Timestamps are processed as follows: 3. A timezone explicitly specified when establishing a connection to TDengine through a connector takes precedence over `taos.cfg` and the system timezone. For example, the Java connector allows you to specify a timezone in the JDBC URL. 4. If you use an RFC 3339 timestamp (2013-04-12T15:52:01.123+08:00), or an ISO 8601 timestamp (2013-04-12T15:52:01.123+0800), the timezone specified in the timestamp is used instead of the timestamps configured using any other method. -### 16. Which network ports are required by TDengine? +### 11. Which network ports are required by TDengine? See [serverPort](https://docs.tdengine.com/reference/config/#serverport) in Configuration Parameters. Note that ports are specified using 6030 as the default first port. If you change this port, all other ports change as well. -### 17. Why do applications such as Grafana fail to connect to TDengine over the REST API? +### 12. Why do applications such as Grafana fail to connect to TDengine over the REST API? In TDengine, the REST API is provided by taosAdapter. Ensure that taosAdapter is running before you connect an application to TDengine over the REST API. You can run `systemctl start taosadapter` to start the service. @@ -158,7 +158,7 @@ Note that the log path for taosAdapter must be configured separately. The defaul For more information, see [taosAdapter](https://docs.tdengine.com/reference/taosadapter/). -### 18. How can I resolve out-of-memory (OOM) errors? +### 13. How can I resolve out-of-memory (OOM) errors? OOM errors are thrown by the operating system when its memory, including swap, becomes insufficient and it needs to terminate processes to remain operational. Most OOM errors in TDengine occur for one of the following reasons: free memory is less than the value of `vm.min_free_kbytes` or free memory is less than the size of the request. If TDengine occupies reserved memory, an OOM error can occur even when free memory is sufficient. diff --git a/docs/en/28-releases/01-tdengine.md b/docs/en/28-releases/01-tdengine.md index 9b83c5fd65a5aa364892b608ffb8b25bd56f66b4..83ea3eb5e6d3edd69d65774a9761324a8f77dcd9 100644 --- a/docs/en/28-releases/01-tdengine.md +++ b/docs/en/28-releases/01-tdengine.md @@ -10,6 +10,14 @@ For TDengine 2.x installation packages by version, please visit [here](https://w import Release from "/components/ReleaseV3"; +## 3.0.2.4 + + + +## 3.0.2.3 + + + ## 3.0.2.2 diff --git a/docs/en/28-releases/02-tools.md b/docs/en/28-releases/02-tools.md index dd44e43ab63a7ea227665706f7f7e593a3b81182..97fed654f2f25c782a036501c39204be5750d93e 100644 --- a/docs/en/28-releases/02-tools.md +++ b/docs/en/28-releases/02-tools.md @@ -10,6 +10,14 @@ For other historical version installers, please visit [here](https://www.taosdat import Release from "/components/ReleaseV3"; +## 2.4.2 + + + +## 2.4.1 + + + ## 2.4.0 diff --git a/docs/examples/csharp/asyncQuery/asyncquery.csproj b/docs/examples/csharp/asyncQuery/asyncquery.csproj index 23e590cd25aa88e58cabf81717a6baf320f447bc..7c5b693f28dfa8832ae08bbaae9aa8a367951c70 100644 --- a/docs/examples/csharp/asyncQuery/asyncquery.csproj +++ b/docs/examples/csharp/asyncQuery/asyncquery.csproj @@ -9,7 +9,7 @@ - + diff --git a/docs/examples/csharp/connect/connect.csproj b/docs/examples/csharp/connect/connect.csproj index 3a912f8987ace6ae540726886d901c8d32a7b81b..a08e86d4b42199be44a6551e37da11efb6e06a34 100644 --- a/docs/examples/csharp/connect/connect.csproj +++ b/docs/examples/csharp/connect/connect.csproj @@ -9,7 +9,7 @@ - + diff --git a/docs/examples/csharp/influxdbLine/influxdbline.csproj b/docs/examples/csharp/influxdbLine/influxdbline.csproj index 58bca485088e409fe1d387c6020418bbc2bf871b..4889f8fde9dc0eb75c0547e32355929d1cceb138 100644 --- a/docs/examples/csharp/influxdbLine/influxdbline.csproj +++ b/docs/examples/csharp/influxdbLine/influxdbline.csproj @@ -9,7 +9,7 @@ - + diff --git a/docs/examples/csharp/optsJSON/optsJSON.csproj b/docs/examples/csharp/optsJSON/optsJSON.csproj index da16025dcd45f8e5c4ba6e242524c2e56191e93c..208f04c82d19f83f2746871b64a6dfdf0dcf3eae 100644 --- a/docs/examples/csharp/optsJSON/optsJSON.csproj +++ b/docs/examples/csharp/optsJSON/optsJSON.csproj @@ -9,7 +9,7 @@ - + diff --git a/docs/examples/csharp/optsTelnet/optstelnet.csproj b/docs/examples/csharp/optsTelnet/optstelnet.csproj index 194de21bcc74653a2267b29681ece6243fd401fc..32c76ec4184b82e943897a36bc3bcbbd9ec85149 100644 --- a/docs/examples/csharp/optsTelnet/optstelnet.csproj +++ b/docs/examples/csharp/optsTelnet/optstelnet.csproj @@ -9,7 +9,7 @@ - + diff --git a/docs/examples/csharp/query/query.csproj b/docs/examples/csharp/query/query.csproj index c97dbd3051e1a415b192e73d6753266b0b41b07d..360d73b2c096ef86df59876d0629fd0c4b6a239b 100644 --- a/docs/examples/csharp/query/query.csproj +++ b/docs/examples/csharp/query/query.csproj @@ -9,7 +9,7 @@ - + diff --git a/docs/examples/csharp/sqlInsert/sqlinsert.csproj b/docs/examples/csharp/sqlInsert/sqlinsert.csproj index ab0e5e717a78faad07c949b434b0d0b8a26c7211..1b6f745c82437e9796da4c48fc720600dbe99cb5 100644 --- a/docs/examples/csharp/sqlInsert/sqlinsert.csproj +++ b/docs/examples/csharp/sqlInsert/sqlinsert.csproj @@ -9,7 +9,7 @@ - + diff --git a/docs/examples/csharp/stmtInsert/Program.cs b/docs/examples/csharp/stmtInsert/Program.cs index 87e1971feb8499c515206f05a1e916070ac57f4c..80cadb2ff8b596a0484d05ff15aeaa50f22ff859 100644 --- a/docs/examples/csharp/stmtInsert/Program.cs +++ b/docs/examples/csharp/stmtInsert/Program.cs @@ -42,7 +42,7 @@ namespace TDengineExample // 5. execute res = TDengine.StmtExecute(stmt); - CheckStmtRes(res, "faild to execute"); + CheckStmtRes(res, "failed to execute"); // 6. free TaosMultiBind.FreeTaosBind(tags); @@ -92,7 +92,7 @@ namespace TDengineExample int code = TDengine.StmtClose(stmt); if (code != 0) { - throw new Exception($"falied to close stmt, {code} reason: {TDengine.StmtErrorStr(stmt)} "); + throw new Exception($"failed to close stmt, {code} reason: {TDengine.StmtErrorStr(stmt)} "); } } } diff --git a/docs/examples/csharp/stmtInsert/stmtinsert.csproj b/docs/examples/csharp/stmtInsert/stmtinsert.csproj index 3d459fbeda02ab03dc40dac2ecae290724cccbcc..f5b2b673971c3822e6f6c9b65b8f02bc9d4dc80e 100644 --- a/docs/examples/csharp/stmtInsert/stmtinsert.csproj +++ b/docs/examples/csharp/stmtInsert/stmtinsert.csproj @@ -9,7 +9,7 @@ - + diff --git a/docs/examples/csharp/subscribe/subscribe.csproj b/docs/examples/csharp/subscribe/subscribe.csproj index 8ae1cf6bc6023558c28797a0d9fcccb2f2e87653..191b3f9e9bb07dc72c9bb452ad19e30e42af922a 100644 --- a/docs/examples/csharp/subscribe/subscribe.csproj +++ b/docs/examples/csharp/subscribe/subscribe.csproj @@ -9,7 +9,7 @@ - + diff --git a/docs/examples/csharp/wsConnect/wsConnect.csproj b/docs/examples/csharp/wsConnect/wsConnect.csproj index 34951dc761903e5a4b7a4bec5dfe55a965ab88be..6d78be6e7a112475523d449b1ba308923bf13add 100644 --- a/docs/examples/csharp/wsConnect/wsConnect.csproj +++ b/docs/examples/csharp/wsConnect/wsConnect.csproj @@ -3,11 +3,16 @@ Exe net5.0 - enable - + + + + + + + diff --git a/docs/examples/csharp/wsInsert/wsInsert.csproj b/docs/examples/csharp/wsInsert/wsInsert.csproj index 34951dc761903e5a4b7a4bec5dfe55a965ab88be..95bfbdea3df6d1f8047c082a31a43dad958edce0 100644 --- a/docs/examples/csharp/wsInsert/wsInsert.csproj +++ b/docs/examples/csharp/wsInsert/wsInsert.csproj @@ -5,9 +5,13 @@ net5.0 enable - - + - + + + + + + diff --git a/docs/examples/csharp/wsQuery/wsQuery.csproj b/docs/examples/csharp/wsQuery/wsQuery.csproj index 34951dc761903e5a4b7a4bec5dfe55a965ab88be..e5c2cf767cf4a427d11a72298e932940706fb2f4 100644 --- a/docs/examples/csharp/wsQuery/wsQuery.csproj +++ b/docs/examples/csharp/wsQuery/wsQuery.csproj @@ -7,7 +7,13 @@ - + + + + + + + diff --git a/docs/examples/csharp/wsStmt/wsStmt.csproj b/docs/examples/csharp/wsStmt/wsStmt.csproj index 34951dc761903e5a4b7a4bec5dfe55a965ab88be..e5c2cf767cf4a427d11a72298e932940706fb2f4 100644 --- a/docs/examples/csharp/wsStmt/wsStmt.csproj +++ b/docs/examples/csharp/wsStmt/wsStmt.csproj @@ -7,7 +7,13 @@ - + + + + + + + diff --git a/docs/examples/go/go.mod b/docs/examples/go/go.mod index 2bc1a74cb6ef14221fa384701773dc73fe3b161d..716a0ef5dc91b4d3598c8af304204edb99e9b584 100644 --- a/docs/examples/go/go.mod +++ b/docs/examples/go/go.mod @@ -2,5 +2,5 @@ module goexample go 1.17 -require github.com/taosdata/driver-go/v3 3.0 +require github.com/taosdata/driver-go/v3 v3.1.0 diff --git a/docs/examples/go/go.sum b/docs/examples/go/go.sum new file mode 100644 index 0000000000000000000000000000000000000000..13e13adaa189053696320a6eb9740daa319a98b7 --- /dev/null +++ b/docs/examples/go/go.sum @@ -0,0 +1,15 @@ +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= +github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= +github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/taosdata/driver-go/v3 v3.1.0/go.mod h1:H2vo/At+rOPY1aMzUV9P49SVX7NlXb3LAbKw+MCLrmU= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= diff --git a/docs/examples/go/sub/main.go b/docs/examples/go/sub/main.go index a13d394a2c5009c1ad88684109b6f16b4d8a0540..1f7218936fbe457615562ded1b938daca95225cb 100644 --- a/docs/examples/go/sub/main.go +++ b/docs/examples/go/sub/main.go @@ -1,17 +1,12 @@ package main import ( - "context" - "encoding/json" "fmt" - "strconv" - "time" + "os" "github.com/taosdata/driver-go/v3/af" "github.com/taosdata/driver-go/v3/af/tmq" - "github.com/taosdata/driver-go/v3/common" - "github.com/taosdata/driver-go/v3/errors" - "github.com/taosdata/driver-go/v3/wrapper" + tmqcommon "github.com/taosdata/driver-go/v3/common/tmq" ) func main() { @@ -28,79 +23,56 @@ func main() { if err != nil { panic(err) } - config := tmq.NewConfig() - defer config.Destroy() - err = config.SetGroupID("test") if err != nil { panic(err) } - err = config.SetAutoOffsetReset("earliest") - if err != nil { - panic(err) - } - err = config.SetConnectIP("127.0.0.1") - if err != nil { - panic(err) - } - err = config.SetConnectUser("root") - if err != nil { - panic(err) - } - err = config.SetConnectPass("taosdata") + consumer, err := tmq.NewConsumer(&tmqcommon.ConfigMap{ + "group.id": "test", + "auto.offset.reset": "earliest", + "td.connect.ip": "127.0.0.1", + "td.connect.user": "root", + "td.connect.pass": "taosdata", + "td.connect.port": "6030", + "client.id": "test_tmq_client", + "enable.auto.commit": "false", + "enable.heartbeat.background": "true", + "experimental.snapshot.enable": "true", + "msg.with.table.name": "true", + }) if err != nil { panic(err) } - err = config.SetConnectPort("6030") + err = consumer.Subscribe("example_tmq_topic", nil) if err != nil { panic(err) } - err = config.SetMsgWithTableName(true) + _, err = db.Exec("create table example_tmq.t1 (ts timestamp,v int)") if err != nil { panic(err) } - err = config.EnableHeartBeat() + _, err = db.Exec("insert into example_tmq.t1 values(now,1)") if err != nil { panic(err) } - err = config.EnableAutoCommit(func(result *wrapper.TMQCommitCallbackResult) { - if result.ErrCode != 0 { - errStr := wrapper.TMQErr2Str(result.ErrCode) - err := errors.NewError(int(result.ErrCode), errStr) - panic(err) + for i := 0; i < 5; i++ { + ev := consumer.Poll(0) + if ev != nil { + switch e := ev.(type) { + case *tmqcommon.DataMessage: + fmt.Println(e.String()) + case tmqcommon.Error: + fmt.Fprintf(os.Stderr, "%% Error: %v: %v\n", e.Code(), e) + panic(e) + } + consumer.Commit() } - }) - if err != nil { - panic(err) } - consumer, err := tmq.NewConsumer(config) + err = consumer.Unsubscribe() if err != nil { panic(err) } - err = consumer.Subscribe([]string{"example_tmq_topic"}) + err = consumer.Close() if err != nil { panic(err) } - _, err = db.Exec("create table example_tmq.t1 (ts timestamp,v int)") - if err != nil { - panic(err) - } - _, err = db.Exec("insert into example_tmq.t1 values(now,1)") - if err != nil { - panic(err) - } - for { - result, err := consumer.Poll(time.Second) - if err != nil { - panic(err) - } - if result.Type != common.TMQ_RES_DATA { - panic("want message type 1 got " + strconv.Itoa(int(result.Type))) - } - data, _ := json.Marshal(result.Data) - fmt.Println(string(data)) - consumer.Commit(context.Background(), result.Message) - consumer.FreeMessage(result.Message) - break - } - consumer.Close() } diff --git a/docs/examples/python/conn_native_pandas.py b/docs/examples/python/conn_native_pandas.py index 56942ef57085766cd128b03cabb7a357587eab16..f3bab15efbe6669a88828fb194682dbfedb382df 100644 --- a/docs/examples/python/conn_native_pandas.py +++ b/docs/examples/python/conn_native_pandas.py @@ -1,8 +1,11 @@ import pandas -from sqlalchemy import create_engine +from sqlalchemy import create_engine, text engine = create_engine("taos://root:taosdata@localhost:6030/power") -df = pandas.read_sql("SELECT * FROM meters", engine) +conn = engine.connect() +df = pandas.read_sql(text("SELECT * FROM power.meters"), conn) +conn.close() + # print index print(df.index) diff --git a/docs/examples/python/conn_rest_pandas.py b/docs/examples/python/conn_rest_pandas.py index 0164080cd5a05e72dce40b1d111ea423623ff9b2..1b207d6ff10a353f3473116ce807cc8daf362ca7 100644 --- a/docs/examples/python/conn_rest_pandas.py +++ b/docs/examples/python/conn_rest_pandas.py @@ -1,8 +1,10 @@ import pandas -from sqlalchemy import create_engine +from sqlalchemy import create_engine, text engine = create_engine("taosrest://root:taosdata@localhost:6041") -df: pandas.DataFrame = pandas.read_sql("SELECT * FROM power.meters", engine) +conn = engine.connect() +df: pandas.DataFrame = pandas.read_sql(text("SELECT * FROM power.meters"), conn) +conn.close() # print index print(df.index) diff --git a/docs/examples/python/connect_rest_examples.py b/docs/examples/python/connect_rest_examples.py index 900ec1022ec81ac2db761d918d1ec11c9bb26852..0f8625ae5387a275f7b84948ad80191b8e443862 100644 --- a/docs/examples/python/connect_rest_examples.py +++ b/docs/examples/python/connect_rest_examples.py @@ -1,24 +1,25 @@ # ANCHOR: connect from taosrest import connect, TaosRestConnection, TaosRestCursor -conn: TaosRestConnection = connect(url="http://localhost:6041", - user="root", - password="taosdata", - timeout=30) +conn = connect(url="http://localhost:6041", + user="root", + password="taosdata", + timeout=30) # ANCHOR_END: connect # ANCHOR: basic # create STable -cursor: TaosRestCursor = conn.cursor() +cursor = conn.cursor() cursor.execute("DROP DATABASE IF EXISTS power") cursor.execute("CREATE DATABASE power") -cursor.execute("CREATE STABLE power.meters (ts TIMESTAMP, current FLOAT, voltage INT, phase FLOAT) TAGS (location BINARY(64), groupId INT)") +cursor.execute( + "CREATE STABLE power.meters (ts TIMESTAMP, current FLOAT, voltage INT, phase FLOAT) TAGS (location BINARY(64), groupId INT)") # insert data -cursor.execute("""INSERT INTO power.d1001 USING power.meters TAGS(California.SanFrancisco, 2) VALUES ('2018-10-03 14:38:05.000', 10.30000, 219, 0.31000) ('2018-10-03 14:38:15.000', 12.60000, 218, 0.33000) ('2018-10-03 14:38:16.800', 12.30000, 221, 0.31000) - power.d1002 USING power.meters TAGS(California.SanFrancisco, 3) VALUES ('2018-10-03 14:38:16.650', 10.30000, 218, 0.25000) - power.d1003 USING power.meters TAGS(California.LosAngeles, 2) VALUES ('2018-10-03 14:38:05.500', 11.80000, 221, 0.28000) ('2018-10-03 14:38:16.600', 13.40000, 223, 0.29000) - power.d1004 USING power.meters TAGS(California.LosAngeles, 3) VALUES ('2018-10-03 14:38:05.000', 10.80000, 223, 0.29000) ('2018-10-03 14:38:06.500', 11.50000, 221, 0.35000)""") +cursor.execute("""INSERT INTO power.d1001 USING power.meters TAGS('California.SanFrancisco', 2) VALUES ('2018-10-03 14:38:05.000', 10.30000, 219, 0.31000) ('2018-10-03 14:38:15.000', 12.60000, 218, 0.33000) ('2018-10-03 14:38:16.800', 12.30000, 221, 0.31000) + power.d1002 USING power.meters TAGS('California.SanFrancisco', 3) VALUES ('2018-10-03 14:38:16.650', 10.30000, 218, 0.25000) + power.d1003 USING power.meters TAGS('California.LosAngeles', 2) VALUES ('2018-10-03 14:38:05.500', 11.80000, 221, 0.28000) ('2018-10-03 14:38:16.600', 13.40000, 223, 0.29000) + power.d1004 USING power.meters TAGS('California.LosAngeles', 3) VALUES ('2018-10-03 14:38:05.000', 10.80000, 223, 0.29000) ('2018-10-03 14:38:06.500', 11.50000, 221, 0.35000)""") print("inserted row count:", cursor.rowcount) # query data @@ -28,7 +29,7 @@ print("queried row count:", cursor.rowcount) # get column names from cursor column_names = [meta[0] for meta in cursor.description] # get rows -data: list[tuple] = cursor.fetchall() +data = cursor.fetchall() print(column_names) for row in data: print(row) diff --git a/docs/examples/python/connection_usage_native_reference.py b/docs/examples/python/connection_usage_native_reference.py index a7179b4cf859eb440b535a797eeb8e2be1e33589..8b754ec7226e8fd25dbdeb27b28faebdcf612049 100644 --- a/docs/examples/python/connection_usage_native_reference.py +++ b/docs/examples/python/connection_usage_native_reference.py @@ -8,7 +8,7 @@ conn.execute("CREATE DATABASE test") # change database. same as execute "USE db" conn.select_db("test") conn.execute("CREATE STABLE weather(ts TIMESTAMP, temperature FLOAT) TAGS (location INT)") -affected_row: int = conn.execute("INSERT INTO t1 USING weather TAGS(1) VALUES (now, 23.5) (now+1m, 23.5) (now+2m, 24.4)") +affected_row = conn.execute("INSERT INTO t1 USING weather TAGS(1) VALUES (now, 23.5) (now+1m, 23.5) (now+2m, 24.4)") print("affected_row", affected_row) # output: # affected_row 3 @@ -16,10 +16,10 @@ print("affected_row", affected_row) # ANCHOR: query # Execute a sql and get its result set. It's useful for SELECT statement -result: taos.TaosResult = conn.query("SELECT * from weather") +result = conn.query("SELECT * from weather") # Get fields from result -fields: taos.field.TaosFields = result.fields +fields = result.fields for field in fields: print(field) # {name: ts, type: 9, bytes: 8} diff --git a/docs/examples/python/fast_write_example.py b/docs/examples/python/fast_write_example.py index c9d606388fdecd85f1468f24cc497ecc5941f035..626e3310b120b9415952614b4b110ed29f787582 100644 --- a/docs/examples/python/fast_write_example.py +++ b/docs/examples/python/fast_write_example.py @@ -1,15 +1,14 @@ # install dependencies: # recommend python >= 3.8 -# pip3 install faster-fifo # import logging import math +import multiprocessing import sys import time import os -from multiprocessing import Process -from faster_fifo import Queue +from multiprocessing import Process, Queue from mockdatasource import MockDataSource from queue import Empty from typing import List @@ -22,8 +21,7 @@ TABLE_COUNT = 1000 QUEUE_SIZE = 1000000 MAX_BATCH_SIZE = 3000 -read_processes = [] -write_processes = [] +_DONE_MESSAGE = '__DONE__' def get_connection(): @@ -44,41 +42,64 @@ def get_connection(): # ANCHOR: read -def run_read_task(task_id: int, task_queues: List[Queue]): +def run_read_task(task_id: int, task_queues: List[Queue], infinity): table_count_per_task = TABLE_COUNT // READ_TASK_COUNT - data_source = MockDataSource(f"tb{task_id}", table_count_per_task) + data_source = MockDataSource(f"tb{task_id}", table_count_per_task, infinity) try: for batch in data_source: + if isinstance(batch, tuple): + batch = [batch] for table_id, rows in batch: # hash data to different queue i = table_id % len(task_queues) # block putting forever when the queue is full - task_queues[i].put_many(rows, block=True, timeout=-1) + for row in rows: + task_queues[i].put(row) + if not infinity: + for queue in task_queues: + queue.put(_DONE_MESSAGE) except KeyboardInterrupt: pass + finally: + logging.info('read task over') # ANCHOR_END: read + # ANCHOR: write -def run_write_task(task_id: int, queue: Queue): +def run_write_task(task_id: int, queue: Queue, done_queue: Queue): from sql_writer import SQLWriter log = logging.getLogger(f"WriteTask-{task_id}") writer = SQLWriter(get_connection) lines = None try: while True: - try: - # get as many as possible - lines = queue.get_many(block=False, max_messages_to_get=MAX_BATCH_SIZE) + over = False + lines = [] + for _ in range(MAX_BATCH_SIZE): + try: + line = queue.get_nowait() + if line == _DONE_MESSAGE: + over = True + break + if line: + lines.append(line) + except Empty: + time.sleep(0.1) + if len(lines) > 0: writer.process_lines(lines) - except Empty: - time.sleep(0.01) + if over: + done_queue.put(_DONE_MESSAGE) + break except KeyboardInterrupt: pass except BaseException as e: log.debug(f"lines={lines}") raise e + finally: + writer.close() + log.debug('write task over') # ANCHOR_END: write @@ -103,47 +124,64 @@ def set_global_config(): # ANCHOR: monitor -def run_monitor_process(): +def run_monitor_process(done_queue: Queue): log = logging.getLogger("DataBaseMonitor") - conn = get_connection() - conn.execute("DROP DATABASE IF EXISTS test") - conn.execute("CREATE DATABASE test") - conn.execute("CREATE STABLE test.meters (ts TIMESTAMP, current FLOAT, voltage INT, phase FLOAT) " - "TAGS (location BINARY(64), groupId INT)") + conn = None + try: + conn = get_connection() - def get_count(): - res = conn.query("SELECT count(*) FROM test.meters") - rows = res.fetch_all() - return rows[0][0] if rows else 0 + def get_count(): + res = conn.query("SELECT count(*) FROM test.meters") + rows = res.fetch_all() + return rows[0][0] if rows else 0 - last_count = 0 - while True: - time.sleep(10) - count = get_count() - log.info(f"count={count} speed={(count - last_count) / 10}") - last_count = count + last_count = 0 + while True: + try: + done = done_queue.get_nowait() + if done == _DONE_MESSAGE: + break + except Empty: + pass + time.sleep(10) + count = get_count() + log.info(f"count={count} speed={(count - last_count) / 10}") + last_count = count + finally: + conn.close() # ANCHOR_END: monitor # ANCHOR: main -def main(): +def main(infinity): set_global_config() logging.info(f"READ_TASK_COUNT={READ_TASK_COUNT}, WRITE_TASK_COUNT={WRITE_TASK_COUNT}, " f"TABLE_COUNT={TABLE_COUNT}, QUEUE_SIZE={QUEUE_SIZE}, MAX_BATCH_SIZE={MAX_BATCH_SIZE}") - monitor_process = Process(target=run_monitor_process) + conn = get_connection() + conn.execute("DROP DATABASE IF EXISTS test") + conn.execute("CREATE DATABASE IF NOT EXISTS test") + conn.execute("CREATE STABLE IF NOT EXISTS test.meters (ts TIMESTAMP, current FLOAT, voltage INT, phase FLOAT) " + "TAGS (location BINARY(64), groupId INT)") + conn.close() + + done_queue = Queue() + monitor_process = Process(target=run_monitor_process, args=(done_queue,)) monitor_process.start() - time.sleep(3) # waiting for database ready. + logging.debug(f"monitor task started with pid {monitor_process.pid}") task_queues: List[Queue] = [] + write_processes = [] + read_processes = [] + # create task queues for i in range(WRITE_TASK_COUNT): - queue = Queue(max_size_bytes=QUEUE_SIZE) + queue = Queue() task_queues.append(queue) # create write processes for i in range(WRITE_TASK_COUNT): - p = Process(target=run_write_task, args=(i, task_queues[i])) + p = Process(target=run_write_task, args=(i, task_queues[i], done_queue)) p.start() logging.debug(f"WriteTask-{i} started with pid {p.pid}") write_processes.append(p) @@ -151,13 +189,19 @@ def main(): # create read processes for i in range(READ_TASK_COUNT): queues = assign_queues(i, task_queues) - p = Process(target=run_read_task, args=(i, queues)) + p = Process(target=run_read_task, args=(i, queues, infinity)) p.start() logging.debug(f"ReadTask-{i} started with pid {p.pid}") read_processes.append(p) try: monitor_process.join() + for p in read_processes: + p.join() + for p in write_processes: + p.join() + time.sleep(1) + return except KeyboardInterrupt: monitor_process.terminate() [p.terminate() for p in read_processes] @@ -176,5 +220,6 @@ def assign_queues(read_task_id, task_queues): if __name__ == '__main__': - main() + multiprocessing.set_start_method('spawn') + main(False) # ANCHOR_END: main diff --git a/docs/examples/python/kafka_example.py b/docs/examples/python/kafka_example.py index 735059eec0f3dcf5094810916e66a39db5682560..43f9183f7e25b680827aef836363ef5f0549468b 100644 --- a/docs/examples/python/kafka_example.py +++ b/docs/examples/python/kafka_example.py @@ -26,7 +26,8 @@ class Consumer(object): 'bath_consume': True, 'batch_size': 1000, 'async_model': True, - 'workers': 10 + 'workers': 10, + 'testing': False } LOCATIONS = ['California.SanFrancisco', 'California.LosAngles', 'California.SanDiego', 'California.SanJose', @@ -46,11 +47,12 @@ class Consumer(object): def __init__(self, **configs): self.config: dict = self.DEFAULT_CONFIGS self.config.update(configs) - self.consumer = KafkaConsumer( - self.config.get('kafka_topic'), # topic - bootstrap_servers=self.config.get('kafka_brokers'), - group_id=self.config.get('kafka_group_id'), - ) + if not self.config.get('testing'): + self.consumer = KafkaConsumer( + self.config.get('kafka_topic'), # topic + bootstrap_servers=self.config.get('kafka_brokers'), + group_id=self.config.get('kafka_group_id'), + ) self.taos = taos.connect( host=self.config.get('taos_host'), user=self.config.get('taos_user'), @@ -60,7 +62,7 @@ class Consumer(object): ) if self.config.get('async_model'): self.pool = ThreadPoolExecutor(max_workers=self.config.get('workers')) - self.tasks: list[Future] = [] + self.tasks = [] # tags and table mapping # key: {location}_{groupId} value: self.tag_table_mapping = {} i = 0 @@ -115,14 +117,14 @@ class Consumer(object): if self.taos is not None: self.taos.close() - def _run(self, f: Callable[[ConsumerRecord], bool]): + def _run(self, f): for message in self.consumer: if self.config.get('async_model'): self.pool.submit(f(message)) else: f(message) - def _run_batch(self, f: Callable[[list[list[ConsumerRecord]]], None]): + def _run_batch(self, f): while True: messages = self.consumer.poll(timeout_ms=500, max_records=self.config.get('batch_size')) if messages: @@ -140,7 +142,7 @@ class Consumer(object): logging.info('## insert sql %s', sql) return self.taos.execute(sql=sql) == 1 - def _to_taos_batch(self, messages: list[list[ConsumerRecord]]): + def _to_taos_batch(self, messages): sql = self._build_sql_batch(messages=messages) if len(sql) == 0: # decode error, skip return @@ -162,7 +164,7 @@ class Consumer(object): table_name = self._get_table_name(location=location, group_id=group_id) return self.INSERT_PART_SQL.format(table_name, ts, current, voltage, phase) - def _build_sql_batch(self, messages: list[list[ConsumerRecord]]) -> str: + def _build_sql_batch(self, messages) -> str: sql_list = [] for partition_messages in messages: for message in partition_messages: @@ -186,7 +188,54 @@ def _get_location_and_group(key: str) -> (str, int): return fields[0], fields[1] +def test_to_taos(consumer: Consumer): + msg = { + 'location': 'California.SanFrancisco', + 'groupId': 1, + 'ts': '2022-12-06 15:13:38.643', + 'current': 3.41, + 'voltage': 105, + 'phase': 0.02027, + } + record = ConsumerRecord(checksum=None, headers=None, offset=1, key=None, value=json.dumps(msg), partition=1, + topic='test', serialized_key_size=None, serialized_header_size=None, + serialized_value_size=None, timestamp=time.time(), timestamp_type=None) + assert consumer._to_taos(message=record) + + +def test_to_taos_batch(consumer: Consumer): + records = [ + [ + ConsumerRecord(checksum=None, headers=None, offset=1, key=None, + value=json.dumps({'location': 'California.SanFrancisco', + 'groupId': 1, + 'ts': '2022-12-06 15:13:38.643', + 'current': 3.41, + 'voltage': 105, + 'phase': 0.02027, }), + partition=1, topic='test', serialized_key_size=None, serialized_header_size=None, + serialized_value_size=None, timestamp=time.time(), timestamp_type=None), + ConsumerRecord(checksum=None, headers=None, offset=1, key=None, + value=json.dumps({'location': 'California.LosAngles', + 'groupId': 2, + 'ts': '2022-12-06 15:13:39.643', + 'current': 3.41, + 'voltage': 102, + 'phase': 0.02027, }), + partition=1, topic='test', serialized_key_size=None, serialized_header_size=None, + serialized_value_size=None, timestamp=time.time(), timestamp_type=None), + ] + ] + + consumer._to_taos_batch(messages=records) + + if __name__ == '__main__': - consumer = Consumer(async_model=True) + consumer = Consumer(async_model=True, testing=True) + # init env consumer.init_env() - consumer.consume() \ No newline at end of file + # consumer.consume() + # test build sql + # test build sql batch + test_to_taos(consumer) + test_to_taos_batch(consumer) diff --git a/docs/examples/python/mockdatasource.py b/docs/examples/python/mockdatasource.py index 1c516a800e007934f8e6815f82024a53fea70073..9c702936ea6f1bdff3f604d376fd1925b4dc118e 100644 --- a/docs/examples/python/mockdatasource.py +++ b/docs/examples/python/mockdatasource.py @@ -10,13 +10,14 @@ class MockDataSource: "9.4,118,0.141,California.SanFrancisco,4" ] - def __init__(self, tb_name_prefix, table_count): + def __init__(self, tb_name_prefix, table_count, infinity=True): self.table_name_prefix = tb_name_prefix + "_" self.table_count = table_count self.max_rows = 10000000 self.current_ts = round(time.time() * 1000) - self.max_rows * 100 # [(tableId, tableName, values),] self.data = self._init_data() + self.infinity = infinity def _init_data(self): lines = self.samples * (self.table_count // 5 + 1) @@ -28,14 +29,19 @@ class MockDataSource: def __iter__(self): self.row = 0 - return self + if not self.infinity: + return iter(self._iter_data()) + else: + return self def __next__(self): """ next 1000 rows for each table. return: {tableId:[row,...]} """ - # generate 1000 timestamps + return self._iter_data() + + def _iter_data(self): ts = [] for _ in range(1000): self.current_ts += 100 @@ -47,3 +53,9 @@ class MockDataSource: rows = [table_name + ',' + t + ',' + values for t in ts] result.append((table_id, rows)) return result + + +if __name__ == '__main__': + datasource = MockDataSource('t', 10, False) + for data in datasource: + print(data) diff --git a/docs/examples/python/native_insert_example.py b/docs/examples/python/native_insert_example.py index 94fd00a6e9d1dcd2119693c4b5c862d36c219a3d..cdde7d23d24d12e11c67b6c6acc0e0b089fb5335 100644 --- a/docs/examples/python/native_insert_example.py +++ b/docs/examples/python/native_insert_example.py @@ -25,10 +25,10 @@ def create_stable(conn: taos.TaosConnection): # The generated SQL is: -# INSERT INTO d1001 USING meters TAGS(California.SanFrancisco, 2) VALUES ('2018-10-03 14:38:05.000', 10.30000, 219, 0.31000) ('2018-10-03 14:38:15.000', 12.60000, 218, 0.33000) ('2018-10-03 14:38:16.800', 12.30000, 221, 0.31000) -# d1002 USING meters TAGS(California.SanFrancisco, 3) VALUES ('2018-10-03 14:38:16.650', 10.30000, 218, 0.25000) -# d1003 USING meters TAGS(California.LosAngeles, 2) VALUES ('2018-10-03 14:38:05.500', 11.80000, 221, 0.28000) ('2018-10-03 14:38:16.600', 13.40000, 223, 0.29000) -# d1004 USING meters TAGS(California.LosAngeles, 3) VALUES ('2018-10-03 14:38:05.000', 10.80000, 223, 0.29000) ('2018-10-03 14:38:06.500', 11.50000, 221, 0.35000) +# INSERT INTO d1001 USING meters TAGS('California.SanFrancisco', 2) VALUES ('2018-10-03 14:38:05.000', 10.30000, 219, 0.31000) ('2018-10-03 14:38:15.000', 12.60000, 218, 0.33000) ('2018-10-03 14:38:16.800', 12.30000, 221, 0.31000) +# d1002 USING meters TAGS('California.SanFrancisco', 3) VALUES ('2018-10-03 14:38:16.650', 10.30000, 218, 0.25000) +# d1003 USING meters TAGS('California.LosAngeles', 2) VALUES ('2018-10-03 14:38:05.500', 11.80000, 221, 0.28000) ('2018-10-03 14:38:16.600', 13.40000, 223, 0.29000) +# d1004 USING meters TAGS('California.LosAngeles', 3) VALUES ('2018-10-03 14:38:05.000', 10.80000, 223, 0.29000) ('2018-10-03 14:38:06.500', 11.50000, 221, 0.35000) def get_sql(): global lines diff --git a/docs/examples/python/sql_writer.py b/docs/examples/python/sql_writer.py index 758167376b009f21afc701be7d89c1bfbabdeb9f..3456981a7b9a174e38f8795ff7251ab3c675174b 100644 --- a/docs/examples/python/sql_writer.py +++ b/docs/examples/python/sql_writer.py @@ -10,6 +10,7 @@ class SQLWriter: self._tb_tags = {} self._conn = get_connection_func() self._max_sql_length = self.get_max_sql_length() + self._conn.execute("create database if not exists test") self._conn.execute("USE test") def get_max_sql_length(self): @@ -20,7 +21,7 @@ class SQLWriter: return int(r[1]) return 1024 * 1024 - def process_lines(self, lines: str): + def process_lines(self, lines: [str]): """ :param lines: [[tbName,ts,current,voltage,phase,location,groupId]] """ @@ -60,6 +61,7 @@ class SQLWriter: buf.append(q) sql_len += len(q) sql += " ".join(buf) + self.create_tables() self.execute_sql(sql) self._tb_values.clear() @@ -88,3 +90,22 @@ class SQLWriter: except BaseException as e: self.log.error("Execute SQL: %s", sql) raise e + + def close(self): + if self._conn: + self._conn.close() + + +if __name__ == '__main__': + def get_connection_func(): + conn = taos.connect() + return conn + + + writer = SQLWriter(get_connection_func=get_connection_func) + writer.execute_sql( + "create stable if not exists meters (ts timestamp, current float, voltage int, phase float) " + "tags (location binary(64), groupId int)") + writer.execute_sql( + "INSERT INTO d21001 USING meters TAGS ('California.SanFrancisco', 2) " + "VALUES ('2021-07-13 14:06:32.272', 10.2, 219, 0.32)") diff --git a/docs/examples/python/tmq_example.py b/docs/examples/python/tmq_example.py index a4625ca11accfbf7d263f4c1993f712987a136cb..6f7fb87c89ce4cb96793d09a837f60ad54ae69bc 100644 --- a/docs/examples/python/tmq_example.py +++ b/docs/examples/python/tmq_example.py @@ -1,58 +1,55 @@ +from taos.tmq import Consumer import taos -from taos.tmq import * -conn = taos.connect() -print("init") -conn.execute("drop topic if exists topic_ctb_column") -conn.execute("drop database if exists py_tmq") -conn.execute("create database if not exists py_tmq vgroups 2") -conn.select_db("py_tmq") -conn.execute( - "create stable if not exists stb1 (ts timestamp, c1 int, c2 float, c3 binary(10)) tags(t1 int)" -) -conn.execute("create table if not exists tb1 using stb1 tags(1)") -conn.execute("create table if not exists tb2 using stb1 tags(2)") -conn.execute("create table if not exists tb3 using stb1 tags(3)") - -print("create topic") -conn.execute( - "create topic if not exists topic_ctb_column as select ts, c1, c2, c3 from stb1" -) - -print("build consumer") -conf = TaosTmqConf() -conf.set("group.id", "tg2") -conf.set("td.connect.user", "root") -conf.set("td.connect.pass", "taosdata") -conf.set("enable.auto.commit", "true") - - -def tmq_commit_cb_print(tmq, resp, offset, param=None): - print(f"commit: {resp}, tmq: {tmq}, offset: {offset}, param: {param}") - - -conf.set_auto_commit_cb(tmq_commit_cb_print, None) -tmq = conf.new_consumer() - -print("build topic list") - -topic_list = TaosTmqList() -topic_list.append("topic_ctb_column") - -print("basic consume loop") -tmq.subscribe(topic_list) - -sub_list = tmq.subscription() - -print("subscribed topics: ", sub_list) - -while 1: - res = tmq.poll(1000) - if res: - topic = res.get_topic_name() - vg = res.get_vgroup_id() - db = res.get_db_name() - print(f"topic: {topic}\nvgroup id: {vg}\ndb: {db}") - for row in res: - print(row) +def init_tmq_env(db, topic): + conn = taos.connect() + conn.execute("drop topic if exists {}".format(topic)) + conn.execute("drop database if exists {}".format(db)) + conn.execute("create database if not exists {}".format(db)) + conn.select_db(db) + conn.execute( + "create stable if not exists stb1 (ts timestamp, c1 int, c2 float, c3 varchar(16)) tags(t1 int, t3 varchar(16))") + conn.execute("create table if not exists tb1 using stb1 tags(1, 't1')") + conn.execute("create table if not exists tb2 using stb1 tags(2, 't2')") + conn.execute("create table if not exists tb3 using stb1 tags(3, 't3')") + conn.execute("create topic if not exists {} as select ts, c1, c2, c3 from stb1".format(topic)) + conn.execute("insert into tb1 values (now, 1, 1.0, 'tmq test')") + conn.execute("insert into tb2 values (now, 2, 2.0, 'tmq test')") + conn.execute("insert into tb3 values (now, 3, 3.0, 'tmq test')") + + +def cleanup(db, topic): + conn = taos.connect() + conn.execute("drop topic if exists {}".format(topic)) + conn.execute("drop database if exists {}".format(db)) + + +if __name__ == '__main__': + init_tmq_env("tmq_test", "tmq_test_topic") # init env + consumer = Consumer( + { + "group.id": "tg2", + "td.connect.user": "root", + "td.connect.pass": "taosdata", + "enable.auto.commit": "true", + } + ) + consumer.subscribe(["tmq_test_topic"]) + + try: + while True: + res = consumer.poll(1) + if not res: + break + err = res.error() + if err is not None: + raise err + val = res.value() + + for block in val: + print(block.fetchall()) + finally: + consumer.unsubscribe() + consumer.close() + cleanup("tmq_test", "tmq_test_topic") diff --git a/docs/examples/python/tmq_websocket_example.py b/docs/examples/python/tmq_websocket_example.py new file mode 100644 index 0000000000000000000000000000000000000000..e1dcb0086a995c0c20a5d079ed6d8f4d18ea0356 --- /dev/null +++ b/docs/examples/python/tmq_websocket_example.py @@ -0,0 +1,31 @@ +#!/usr/bin/python3 +from taosws import Consumer + +conf = { + "td.connect.websocket.scheme": "ws", + "group.id": "0", +} +consumer = Consumer(conf) + +consumer.subscribe(["test"]) + +while True: + message = consumer.poll(timeout=1.0) + if message: + id = message.vgroup() + topic = message.topic() + database = message.database() + + for block in message: + nrows = block.nrows() + ncols = block.ncols() + for row in block: + print(row) + values = block.fetchall() + print(nrows, ncols) + + # consumer.commit(message) + else: + break + +consumer.close() diff --git a/docs/zh/05-get-started/index.md b/docs/zh/05-get-started/index.md index 832310aa7c677940c7e4ca13be5f31c2d98a64dc..e144c563b97304f6257d3a1989d7caf85d3789aa 100644 --- a/docs/zh/05-get-started/index.md +++ b/docs/zh/05-get-started/index.md @@ -4,6 +4,7 @@ description: '快速设置 TDengine 环境并体验其高效写入和查询' --- import xiaot from './xiaot.webp' +import xiaot_new from './xiaot-new.webp' import channel from './channel.webp' import official_account from './official-account.webp' @@ -35,13 +36,13 @@ TDengine 知识地图中涵盖了 TDengine 的各种知识点,揭示了各概 - + - - - + + +
小 T 的二维码小 T 的二维码 TDengine 微信视频号 TDengine 微信公众号
加入“物联网大数据技术前沿群”
与大家进行技术交流
关注 TDengine 微信视频号
收看技术直播与教学视频
关注 TDengine 微信公众号
阅读核心技术与行业案例文章
加入“物联网大数据技术群”
与大家进行技术交流
关注 TDengine 视频号
收看技术直播与教学视频
关注 TDengine 公众号
阅读技术文章与行业案例
diff --git a/docs/zh/05-get-started/xiaot-new.webp b/docs/zh/05-get-started/xiaot-new.webp new file mode 100644 index 0000000000000000000000000000000000000000..483b54d2ef3d8894527aa154a42cf6cd2463c579 Binary files /dev/null and b/docs/zh/05-get-started/xiaot-new.webp differ diff --git a/docs/zh/07-develop/03-insert-data/30-influxdb-line.mdx b/docs/zh/07-develop/03-insert-data/30-influxdb-line.mdx index afe73af8dbc3a768a3e7640a9de5f11dbe18653c..876f123fe13776b5ccb045fc390182e8bc8ecf8e 100644 --- a/docs/zh/07-develop/03-insert-data/30-influxdb-line.mdx +++ b/docs/zh/07-develop/03-insert-data/30-influxdb-line.mdx @@ -37,7 +37,7 @@ meters,location=California.LosAngeles,groupid=2 current=13.4,voltage=223,phase=0 - tag_set 中的所有的数据自动转化为 NCHAR 数据类型; - field_set 中的每个数据项都需要对自身的数据类型进行描述, 比如 1.2f32 代表 FLOAT 类型的数值 1.2, 如果不带类型后缀会被当作 DOUBLE 处理; - timestamp 支持多种时间精度。写入数据的时候需要用参数指定时间精度,支持从小时到纳秒的 6 种时间精度。 -- 为了提高写入的效率,默认假设同一个超级表中 field_set 的顺序是一样的(第一条数据包含所有的 field,后面的数据按照这个顺序),如果顺序不一样,需要配置参数 smlDataFormat 为 false,否则,数据写入按照相同顺序写入,库中数据会异常。(3.0.1.3 之后的版本 smlDataFormat 默认为 false) [TDengine 无模式写入参考指南](/reference/schemaless/#无模式写入行协议) +- 为了提高写入的效率,默认假设同一个超级表中 field_set 的顺序是一样的(第一条数据包含所有的 field,后面的数据按照这个顺序),如果顺序不一样,需要配置参数 smlDataFormat 为 false,否则,数据写入按照相同顺序写入,库中数据会异常。(3.0.1.3 之后的版本 smlDataFormat 默认为 false,从3.0.3.0开始,该配置废弃) [TDengine 无模式写入参考指南](/reference/schemaless/#无模式写入行协议) - 默认产生的子表名是根据规则生成的唯一 ID 值。用户也可以通过在 taos.cfg 里配置 smlChildTableName 参数来指定某个标签值作为子表名。该标签值应该具有全局唯一性。举例如下:假设有个标签名为tname, 配置 smlChildTableName=tname, 插入数据为 st,tname=cpu1,t1=4 c1=3 1626006833639000000 则创建的子表名为 cpu1。注意如果多行数据 tname 相同,但是后面的 tag_set 不同,则使用第一行自动建表时指定的 tag_set,其他的行会忽略)。[TDengine 无模式写入参考指南](/reference/schemaless/#无模式写入行协议) ::: diff --git a/docs/zh/07-develop/07-tmq.mdx b/docs/zh/07-develop/07-tmq.mdx index 1f5a089aaa2a051e238eedc0315c37cad643b33f..fb171042d973ec8201ca0ae5f016c5d5b4324e85 100644 --- a/docs/zh/07-develop/07-tmq.mdx +++ b/docs/zh/07-develop/07-tmq.mdx @@ -92,22 +92,21 @@ void close() throws SQLException; ```python -class TaosConsumer(): - def __init__(self, *topics, **configs) +class Consumer: + def subscribe(self, topics): + pass - def __iter__(self) + def unsubscribe(self): + pass - def __next__(self) + def poll(self, timeout: float = 1.0): + pass - def sync_next(self) - - def subscription(self) + def close(self): + pass - def unsubscribe(self) - - def close(self) - - def __del__(self) + def commit(self, message): + pass ``` @@ -115,19 +114,22 @@ class TaosConsumer(): ```go -func NewConsumer(conf *Config) (*Consumer, error) - -func (c *Consumer) Close() error +func NewConsumer(conf *tmq.ConfigMap) (*Consumer, error) -func (c *Consumer) Commit(ctx context.Context, message unsafe.Pointer) error +// 出于兼容目的保留 rebalanceCb 参数,当前未使用 +func (c *Consumer) Subscribe(topic string, rebalanceCb RebalanceCb) error -func (c *Consumer) FreeMessage(message unsafe.Pointer) +// 出于兼容目的保留 rebalanceCb 参数,当前未使用 +func (c *Consumer) SubscribeTopics(topics []string, rebalanceCb RebalanceCb) error -func (c *Consumer) Poll(timeout time.Duration) (*Result, error) +func (c *Consumer) Poll(timeoutMs int) tmq.Event -func (c *Consumer) Subscribe(topics []string) error +// 出于兼容目的保留 tmq.TopicPartition 参数,当前未使用 +func (c *Consumer) Commit() ([]tmq.TopicPartition, error) func (c *Consumer) Unsubscribe() error + +func (c *Consumer) Close() error ``` @@ -355,50 +357,20 @@ public class MetersDeserializer extends ReferenceDeserializer { ```go -config := tmq.NewConfig() -defer config.Destroy() -err = config.SetGroupID("test") -if err != nil { - panic(err) -} -err = config.SetAutoOffsetReset("earliest") -if err != nil { - panic(err) -} -err = config.SetConnectIP("127.0.0.1") -if err != nil { - panic(err) -} -err = config.SetConnectUser("root") -if err != nil { - panic(err) -} -err = config.SetConnectPass("taosdata") -if err != nil { - panic(err) -} -err = config.SetConnectPort("6030") -if err != nil { - panic(err) -} -err = config.SetMsgWithTableName(true) -if err != nil { - panic(err) -} -err = config.EnableHeartBeat() -if err != nil { - panic(err) -} -err = config.EnableAutoCommit(func(result *wrapper.TMQCommitCallbackResult) { - if result.ErrCode != 0 { - errStr := wrapper.TMQErr2Str(result.ErrCode) - err := errors.NewError(int(result.ErrCode), errStr) - panic(err) - } -}) -if err != nil { - panic(err) +conf := &tmq.ConfigMap{ + "group.id": "test", + "auto.offset.reset": "earliest", + "td.connect.ip": "127.0.0.1", + "td.connect.user": "root", + "td.connect.pass": "taosdata", + "td.connect.port": "6030", + "client.id": "test_tmq_c", + "enable.auto.commit": "false", + "enable.heartbeat.background": "true", + "experimental.snapshot.enable": "true", + "msg.with.table.name": "true", } +consumer, err := NewConsumer(conf) ``` @@ -420,34 +392,33 @@ let mut consumer = tmq.build()?; -Python 语言下引入 `taos` 库的 `TaosConsumer` 类,创建一个 Consumer 示例: +Python 语言下引入 `taos` 库的 `Consumer` 类,创建一个 Consumer 示例: ```python -from taos.tmq import TaosConsumer +from taos.tmq import Consumer -# Syntax: `consumer = TaosConsumer(*topics, **args)` +# Syntax: `consumer = Consumer(configs)` # # Example: -consumer = TaosConsumer('topic1', 'topic2', td_connect_ip = "127.0.0.1", group_id = "local") +consumer = Consumer({"group.id": "local", "td.connect.ip": "127.0.0.1"}) ``` -其中,元组类型参数被视为 *Topics*,字典类型参数用于以下订阅配置设置: +其中,`configs` 为 dict 类型,传递创建 Consumer 的参数。可以配置的参数有: -| 参数名称 | 类型 | 参数说明 | 备注 | -| :----------------------------: | :----: | -------------------------------------------------------- | ------------------------------------------- | -| `td_connect_ip` | string | 用于创建连接,同 `taos_connect` | | -| `td_connect_user` | string | 用于创建连接,同 `taos_connect` | | -| `td_connect_pass` | string | 用于创建连接,同 `taos_connect` | | -| `td_connect_port` | string | 用于创建连接,同 `taos_connect` | | -| `group_id` | string | 消费组 ID,同一消费组共享消费进度 | **必填项**。最大长度:192。 | -| `client_id` | string | 客户端 ID | 最大长度:192。 | -| `auto_offset_reset` | string | 消费组订阅的初始位置 | 可选:`earliest`(default), `latest`, `none` | -| `enable_auto_commit` | string | 启用自动提交 | 合法值:`true`, `false`,默认为 true | -| `auto_commit_interval_ms` | string | 以毫秒为单位的自动提交时间间隔 | 默认值:5000 ms | -| `enable_heartbeat_background` | string | 启用后台心跳,启用后即使长时间不 poll 消息也不会造成离线 | 合法值:`true`, `false` | -| `experimental_snapshot_enable` | string | 是否允许从 TSDB 消费数据 | 合法值:`true`, `false` | -| `msg_with_table_name` | string | 是否允许从消息中解析表名,不适用于列订阅 | 合法值:`true`, `false` | -| `timeout` | int | 消费者拉取数据的超时时间 | | +| 参数名称 | 类型 | 参数说明 | 备注 | +|:------:|:----:|:-------:|:---:| +| `td.connect.ip` | string | 用于创建连接|| +| `td.connect.user` | string | 用于创建连接|| +| `td.connect.pass` | string | 用于创建连接|| +| `td.connect.port` | string | 用于创建连接|| +| `group.id` | string | 消费组 ID,同一消费组共享消费进度 | **必填项**。最大长度:192 | +| `client.id` | string | 客户端 ID | 最大长度:192 | +| `msg.with.table.name` | string | 是否允许从消息中解析表名,不适用于列订阅 | 合法值:`true`, `false` | +| `enable.auto.commit` | string | 启用自动提交 | 合法值:`true`, `false` | +| `auto.commit.interval.ms` | string | 以毫秒为单位的自动提交时间间隔 | 默认值:5000 ms | +| `auto.offset.reset` | string | 消费组订阅的初始位置 | 可选:`earliest`(default), `latest`, `none` | +| `experimental.snapshot.enable` | string | 是否允许从 TSDB 消费数据 | 合法值:`true`, `false` | +| `enable.heartbeat.background` | string | 启用后台心跳,启用后即使长时间不 poll 消息也不会造成离线 | 合法值:`true`, `false` | @@ -532,11 +503,7 @@ consumer.subscribe(topics); ```go -consumer, err := tmq.NewConsumer(config) -if err != nil { - panic(err) -} -err = consumer.Subscribe([]string{"example_tmq_topic"}) +err = consumer.Subscribe("example_tmq_topic", nil) if err != nil { panic(err) } @@ -554,7 +521,7 @@ consumer.subscribe(["tmq_meters"]).await?; ```python -consumer = TaosConsumer('topic_ctb_column', group_id='vg2') +consumer.subscribe(['topic1', 'topic2']) ``` @@ -620,13 +587,17 @@ while(running){ ```go for { - result, err := consumer.Poll(time.Second) - if err != nil { - panic(err) + ev := consumer.Poll(0) + if ev != nil { + switch e := ev.(type) { + case *tmqcommon.DataMessage: + fmt.Println(e.Value()) + case tmqcommon.Error: + fmt.Fprintf(os.Stderr, "%% Error: %v: %v\n", e.Code(), e) + panic(e) + } + consumer.Commit() } - fmt.Println(result) - consumer.Commit(context.Background(), result.Message) - consumer.FreeMessage(result.Message) } ``` @@ -669,9 +640,17 @@ for { ```python -for msg in consumer: - for row in msg: - print(row) +while True: + res = consumer.poll(100) + if not res: + continue + err = res.error() + if err is not None: + raise err + val = res.value() + + for block in val: + print(block.fetchall()) ``` @@ -738,7 +717,11 @@ consumer.close(); ```go -consumer.Close() +/* Unsubscribe */ +_ = consumer.Unsubscribe() + +/* Close consumer */ +_ = consumer.Close() ``` diff --git a/docs/zh/08-connector/20-go.mdx b/docs/zh/08-connector/20-go.mdx index 0fc4007f6362697222b425c8c2c803b911b9ac8a..2aa1a58e49f34b412f12bd0d67586dc6e56cf0bc 100644 --- a/docs/zh/08-connector/20-go.mdx +++ b/docs/zh/08-connector/20-go.mdx @@ -15,7 +15,7 @@ import GoOpenTSDBTelnet from "../07-develop/03-insert-data/_go_opts_telnet.mdx" import GoOpenTSDBJson from "../07-develop/03-insert-data/_go_opts_json.mdx" import GoQuery from "../07-develop/04-query-data/_go.mdx" -`driver-go` 是 TDengine 的官方 Go 语言连接器,实现了 Go 语言[ database/sql ](https://golang.org/pkg/database/sql/) 包的接口。Go 开发人员可以通过它开发存取 TDengine 集群数据的应用软件。 +`driver-go` 是 TDengine 的官方 Go 语言连接器,实现了 Go 语言 [database/sql](https://golang.org/pkg/database/sql/) 包的接口。Go 开发人员可以通过它开发存取 TDengine 集群数据的应用软件。 `driver-go` 提供两种建立连接的方式。一种是**原生连接**,它通过 TDengine 客户端驱动程序(taosc)原生连接 TDengine 运行实例,支持数据写入、查询、订阅、schemaless 接口和参数绑定接口等功能。另外一种是 **REST 连接**,它通过 taosAdapter 提供的 REST 接口连接 TDengine 运行实例。REST 连接实现的功能特性集合和原生连接有少量不同。 @@ -112,6 +112,7 @@ REST 连接支持所有能运行 Go 的平台。 ```text username:password@protocol(address)/dbname?param=value ``` + ### 使用连接器进行连接 @@ -176,6 +177,7 @@ func main() { } } ``` + @@ -207,6 +209,7 @@ func main() { } } ``` +
@@ -357,33 +360,32 @@ func main() { #### 订阅 -* `func NewConsumer(conf *Config) (*Consumer, error)` - -创建消费者。 +* `func NewConsumer(conf *tmq.ConfigMap) (*Consumer, error)` -* `func (c *Consumer) Subscribe(topics []string) error` + 创建消费者。 -订阅主题。 +* `func (c *Consumer) Subscribe(topic string, rebalanceCb RebalanceCb) error` +注意:出于兼容目的保留 `rebalanceCb` 参数,当前未使用 -* `func (c *Consumer) Poll(timeout time.Duration) (*Result, error)` + 订阅单个主题。 -轮询消息。 +* `func (c *Consumer) SubscribeTopics(topics []string, rebalanceCb RebalanceCb) error` +注意:出于兼容目的保留 `rebalanceCb` 参数,当前未使用 -* `func (c *Consumer) Commit(ctx context.Context, message unsafe.Pointer) error` + 订阅主题。 -提交消息。 +* `func (c *Consumer) Poll(timeoutMs int) tmq.Event` -* `func (c *Consumer) FreeMessage(message unsafe.Pointer)` + 轮询消息。 -释放消息。 +* `func (c *Consumer) Commit() ([]tmq.TopicPartition, error)` +注意:出于兼容目的保留 `tmq.TopicPartition` 参数,当前未使用 -* `func (c *Consumer) Unsubscribe() error` - -取消订阅。 + 提交消息。 * `func (c *Consumer) Close() error` -关闭消费者。 + 关闭连接。 #### schemaless @@ -443,25 +445,32 @@ func main() { ### 通过 WebSocket 订阅 -* `func NewConsumer(config *Config) (*Consumer, error)` +* `func NewConsumer(conf *tmq.ConfigMap) (*Consumer, error)` + + 创建消费者。 + +* `func (c *Consumer) Subscribe(topic string, rebalanceCb RebalanceCb) error` +注意:出于兼容目的保留 `rebalanceCb` 参数,当前未使用 - 创建消费者。 + 订阅单个主题。 -* `func (c *Consumer) Subscribe(topic []string) error` +* `func (c *Consumer) SubscribeTopics(topics []string, rebalanceCb RebalanceCb) error` +注意:出于兼容目的保留 `rebalanceCb` 参数,当前未使用 - 订阅主题。 + 订阅主题。 -* `func (c *Consumer) Poll(timeout time.Duration) (*Result, error)` +* `func (c *Consumer) Poll(timeoutMs int) tmq.Event` - 轮询消息。 + 轮询消息。 -* `func (c *Consumer) Commit(messageID uint64) error` +* `func (c *Consumer) Commit() ([]tmq.TopicPartition, error)` +注意:出于兼容目的保留 `tmq.TopicPartition` 参数,当前未使用 - 提交消息。 + 提交消息。 * `func (c *Consumer) Close() error` - 关闭消费者。 + 关闭连接。 完整订阅示例参见 [GitHub 示例文件](https://github.com/taosdata/driver-go/blob/3.0/examples/tmqoverws/main.go) diff --git a/docs/zh/08-connector/30-python.mdx b/docs/zh/08-connector/30-python.mdx index 2ca11800c8b34dd8fe815cad8e81fe7b6faf79ad..1962df9607eb82ffaed75f9472a0c97fbc9f0ba3 100644 --- a/docs/zh/08-connector/30-python.mdx +++ b/docs/zh/08-connector/30-python.mdx @@ -32,7 +32,7 @@ Python 连接器的源码托管在 [GitHub](https://github.com/taosdata/taos-con ### 准备 -1. 安装 Python。建议使用 Python >= 3.7。如果系统上还没有 Python 可参考 [Python BeginnersGuide](https://wiki.python.org/moin/BeginnersGuide/Download) 安装。 +1. 安装 Python。新近版本 taospy 包要求 Python 3.6+。早期版本 taospy 包要求 Python 3.7+。taos-ws-py 包要求 Python 3.7+。如果系统上还没有 Python 可参考 [Python BeginnersGuide](https://wiki.python.org/moin/BeginnersGuide/Download) 安装。 2. 安装 [pip](https://pypi.org/project/pip/)。大部分情况下 Python 的安装包都自带了 pip 工具, 如果没有请参考 [pip documentation](https://pip.pypa.io/en/stable/installation/) 安装。 3. 如果使用原生连接,还需[安装客户端驱动](../#安装客户端驱动)。客户端软件包含了 TDengine 客户端动态链接库(libtaos.so 或 taos.dll) 和 TDengine CLI。 @@ -78,6 +78,23 @@ pip3 install git+https://github.com/taosdata/taos-connector-python.git +#### 安装 `taos-ws-py`(可选) + +taos-ws-py 包提供了通过 WebSocket 连接 TDengine 的能力,可选安装 taos-ws-py 以获得 WebSocket 连接 TDengine 的能力。 + + +##### 和 taospy 同时安装 + +```bash +pip3 install taospy[ws] +``` + +##### 单独安装 + +```bash +pip3 install taos-ws-py +``` + ### 安装验证 @@ -306,6 +323,30 @@ TaosCursor 类使用原生连接进行写入、查询操作。在客户端多线 +### 数据订阅 + +连接器支持数据订阅功能,数据订阅功能请参考 [数据订阅](../../develop/tmq/)。 + + + + +`Consumer` 提供了 Python 连接器订阅 TMQ 数据的 API,相关 API 定义请参考 [数据订阅文档](../../develop/tmq/#%E4%B8%BB%E8%A6%81%E6%95%B0%E6%8D%AE%E7%BB%93%E6%9E%84%E5%92%8C-api)。 + +```python +{{#include docs/examples/python/tmq_example.py}} +``` + + + + +除了原生的连接方式,Python 连接器还支持通过 websocket 订阅 TMQ 数据。 + +```python +{{#include docs/examples/python/tmq_websocket_example.py}} +``` + + + ### 其它示例程序 | 示例程序链接 | 示例程序内容 | @@ -314,7 +355,7 @@ TaosCursor 类使用原生连接进行写入、查询操作。在客户端多线 | [bind_row.py](https://github.com/taosdata/taos-connector-python/blob/main/examples/bind-row.py) | 参数绑定,一次绑定一行 | | [insert_lines.py](https://github.com/taosdata/taos-connector-python/blob/main/examples/insert-lines.py) | InfluxDB 行协议写入 | | [json_tag.py](https://github.com/taosdata/taos-connector-python/blob/main/examples/json-tag.py) | 使用 JSON 类型的标签 | -| [tmq.py](https://github.com/taosdata/taos-connector-python/blob/main/examples/tmq.py) | tmq 订阅 | +| [tmq_consumer.py](https://github.com/taosdata/taos-connector-python/blob/main/examples/tmq_consumer.py) | tmq 订阅 | ## 其它说明 diff --git a/docs/zh/08-connector/40-csharp.mdx b/docs/zh/08-connector/40-csharp.mdx index a1a161d4eea9e2534ebb3a573211dcfae5dbb21f..80a831bab9d7343eaa85242c2e0e5e85c9f0d864 100644 --- a/docs/zh/08-connector/40-csharp.mdx +++ b/docs/zh/08-connector/40-csharp.mdx @@ -17,7 +17,7 @@ import CSAsyncQuery from "../07-develop/04-query-data/_cs_async.mdx" `TDengine.Connector` 是 TDengine 提供的 C# 语言连接器。C# 开发人员可以通过它开发存取 TDengine 集群数据的 C# 应用软件。 -`TDengine.Connector` 连接器支持通过 TDengine 客户端驱动(taosc)建立与 TDengine 运行实例的连接,提供数据写入、查询、数据订阅、schemaless 数据写入、参数绑定接口数据写入等功能。 `TDengine.Connector` 还支持 WebSocket,通过 DSN 建立 WebSocket 连接,提供数据写入、查询、参数绑定接口数据写入等功能。 +`TDengine.Connector` 连接器支持通过 TDengine 客户端驱动(taosc)建立与 TDengine 运行实例的连接,提供数据写入、查询、数据订阅、schemaless 数据写入、参数绑定接口数据写入等功能。 `TDengine.Connector` 自 v3.0.1 起还支持 WebSocket,通过 DSN 建立 WebSocket 连接,提供数据写入、查询、参数绑定接口数据写入等功能。 本文介绍如何在 Linux 或 Windows 环境中安装 `TDengine.Connector`,并通过 `TDengine.Connector` 连接 TDengine 集群,进行数据写入、查询等基本操作。 @@ -67,30 +67,45 @@ import CSAsyncQuery from "../07-develop/04-query-data/_cs_async.mdx" * [Nuget 客户端](https://docs.microsoft.com/en-us/nuget/install-nuget-client-tools) (可选安装) * 安装 TDengine 客户端驱动,具体步骤请参考[安装客户端驱动](../#安装客户端驱动) -### 使用 dotnet CLI 安装 +### 安装 TDengine.Connector - + -可以在当前 .NET 项目的路径下,通过 dotnet 命令引用 Nuget 中发布的 `TDengine.Connector` 到当前项目。 +可以在当前 .NET 项目的路径下,通过 dotnet CLI 添加 Nuget package `TDengine.Connector` 到当前项目。 ``` bash dotnet add package TDengine.Connector ``` +也可以修改当前项目的 `.csproj` 文件,添加如下 ItemGroup。 + +``` XML + + + +``` + - -也可以[下载源码](https://github.com/taosdata/taos-connector-dotnet/tree/3.0),直接引用 TDengine.Connector 库 + -```bash -git clone -b 3.0 https://github.com/taosdata/taos-connector-dotnet.git -cd taos-connector-dotnet -cp -r src/ myProject +需要修改目标项目的 `.csproj` 项目文件,将 `.nupkg` 中的 `runtimes` 目录中的动态库复制到当前项目的 `$(OutDir)` 目录下。 -cd myProject -dotnet add exmaple.csproj reference src/TDengine.csproj +```XML + + + + + + + + + ``` + +注意:`TDengine.Connector` 自 version>= 3.0.2 的 nuget package 中才会有动态库( taosws.dll,libtaows.so )。 + @@ -148,9 +163,9 @@ namespace TDengineExample 各部分意义见下表: -* **protocol**: 显示指定以何种方式建立连接,例如:`ws://localhost:6041` 指定以 Websocket 方式建立连接(支持http/ws)。 +* **protocol**: 显示指定以何种方式建立连接,例如:`ws://localhost:6041` 指定以 Websocket 方式建立连接(支持 http/ws )。 -* **username/password**: 用于创建连接的用户名及密码(默认`root/taosdata`)。 +* **username/password**: 用于创建连接的用户名及密码(默认 `root/taosdata` )。 * **host/port**: 指定创建连接的服务器及端口,WebSocket 连接默认为 `localhost:6041` 。 @@ -253,19 +268,20 @@ namespace TDengineExample |示例程序 | 示例程序描述 | |--------------------------------------------------------------------------------------------------------------------|--------------------------------------------| -| [CURD](https://github.com/taosdata/taos-connector-dotnet/blob/3.0/examples/Query/Query.cs) | 使用 TDengine.Connector 实现的建表、插入、查询示例 | -| [JSON Tag](https://github.com/taosdata/taos-connector-dotnet/blob/3.0/examples/JSONTag) | 使用 TDengine.Connector 实现的写入和查询 JSON tag 类型数据的示例 | -| [stmt](https://github.com/taosdata/taos-connector-dotnet/tree/3.0/examples/Stmt) | 使用 TDengine.Connector 实现的参数绑定插入和查询的示例 | -| [schemaless](https://github.com/taosdata/taos-connector-dotnet/blob/3.0/examples/schemaless) | 使用 TDengine.Connector 实现的使用 schemaless 写入的示例 | -| [async query](https://github.com/taosdata/taos-connector-dotnet/blob/3.0/examples/AsyncQuery/QueryAsync.cs) | 使用 TDengine.Connector 实现的异步查询的示例 | -| [数据订阅(TMQ)](https://github.com/taosdata/taos-connector-dotnet/blob/3.0/examples/TMQ/TMQ.cs) | 使用 TDengine.Connector 实现的订阅数据的示例 | -| [Basic WebSocket Usage](https://github.com/taosdata/taos-connector-dotnet/blob/5a4a7cd0dbcda114447cdc6d0c6dedd8e84a52da/examples/WS/WebSocketSample.cs) | 使用 TDengine.Connector 的 WebSocket 基本的示例 | -| [Basic WebSocket STMT](https://github.com/taosdata/taos-connector-dotnet/blob/5a4a7cd0dbcda114447cdc6d0c6dedd8e84a52da/examples/WS/WebSocketSTMT.cs) | 使用 TDengine.Connector 的 WebSocket STMT 基本的示例 | +| [CURD](https://github.com/taosdata/taos-connector-dotnet/blob/3.0/examples/NET6Examples/Query/Query.cs) | 使用 TDengine.Connector 实现的建表、插入、查询示例 | +| [JSON Tag](https://github.com/taosdata/taos-connector-dotnet/blob/3.0/examples/NET6Examples/JSONTag) | 使用 TDengine.Connector 实现的写入和查询 JSON tag 类型数据的示例 | +| [stmt](https://github.com/taosdata/taos-connector-dotnet/tree/3.0/examples/NET6Examples/Stmt) | 使用 TDengine.Connector 实现的参数绑定插入和查询的示例 | +| [schemaless](https://github.com/taosdata/taos-connector-dotnet/blob/3.0/examples/NET6Examples/schemaless) | 使用 TDengine.Connector 实现的使用 schemaless 写入的示例 | +| [async query](https://github.com/taosdata/taos-connector-dotnet/blob/3.0/examples/NET6Examples/AsyncQuery/QueryAsync.cs) | 使用 TDengine.Connector 实现的异步查询的示例 | +| [数据订阅(TMQ)](https://github.com/taosdata/taos-connector-dotnet/blob/3.0/examples/NET6Examples/TMQ/TMQ.cs) | 使用 TDengine.Connector 实现的订阅数据的示例 | +| [Basic WebSocket Usage](https://github.com/taosdata/taos-connector-dotnet/blob/3.0/examples/FrameWork45/WS/WebSocketSample.cs) | 使用 TDengine.Connector 的 WebSocket 基本的示例 | +| [Basic WebSocket STMT](https://github.com/taosdata/taos-connector-dotnet/blob/3.0/examples/FrameWork45/WS/WebSocketSTMT.cs) | 使用 TDengine.Connector 的 WebSocket STMT 基本的示例 | ## 重要更新记录 | TDengine.Connector | 说明 | |--------------------|--------------------------------| +| 3.0.2 | 支持 .NET Framework 4.5 及以上,支持 .NET standard 2.0。Nuget Package 包含 WebSocket 动态库。 | | 3.0.1 | 支持 WebSocket 和 Cloud,查询,插入,参数绑定。 | | 3.0.0 | 支持 TDengine 3.0.0.0,不兼容 2.x。新增接口TDengine.Impl.GetData(),解析查询结果。 | | 1.0.7 | 修复 TDengine.Query()内存泄露。 | diff --git a/docs/zh/08-connector/index.md b/docs/zh/08-connector/index.md index a28a7cd66c1cc7598d473903eb95df5192d933d2..f3f0f23b34d0b75b69d8dd866566ac98f306f13f 100644 --- a/docs/zh/08-connector/index.md +++ b/docs/zh/08-connector/index.md @@ -60,7 +60,7 @@ TDengine 版本更新往往会增加新的功能特性,列表中的连接器 | **连接管理** | 支持 | 支持 | 支持 | 支持 | 支持 | 支持 | | **普通查询** | 支持 | 支持 | 支持 | 支持 | 支持 | 支持 | | **参数绑定** | 暂不支持 | 暂不支持 | 支持 | 支持 | 暂不支持 | 支持 | -| **数据订阅(TMQ)** | 暂不支持 | 暂不支持 | 支持 | 暂不支持 | 暂不支持 | 支持 | +| **数据订阅(TMQ)** | 暂不支持 | 支持 | 支持 | 暂不支持 | 暂不支持 | 支持 | | **Schemaless** | 暂不支持 | 暂不支持 | 暂不支持 | 暂不支持 | 暂不支持 | 暂不支持 | | **批量拉取(基于 WebSocket)** | 支持 | 支持 | 支持 | 支持 | 支持 | 支持 | | **DataFrame** | 不支持 | 支持 | 不支持 | 不支持 | 不支持 | 不支持 | diff --git a/docs/zh/10-deployment/05-helm.md b/docs/zh/10-deployment/05-helm.md index 9a3b21f09296e6f5a8dbd089225b6580b9567586..b2c405033f2814e3be5bcb298675e3bfb628babd 100644 --- a/docs/zh/10-deployment/05-helm.md +++ b/docs/zh/10-deployment/05-helm.md @@ -4,7 +4,7 @@ title: 使用 Helm 部署 TDengine 集群 description: 使用 Helm 部署 TDengine 集群的详细指南 --- -Helm 是 Kubernetes 的包管理器,上一节使用 Kubernets 部署 TDengine 集群的操作已经足够简单,但 Helm 依然可以提供更强大的能力。 +Helm 是 Kubernetes 的包管理器,上一节使用 Kubernetes 部署 TDengine 集群的操作已经足够简单,但 Helm 依然可以提供更强大的能力。 ## 安装 Helm @@ -23,7 +23,7 @@ Helm 会使用 kubectl 和 kubeconfig 的配置来操作 Kubernetes,可以参 TDengine Chart 尚未发布到 Helm 仓库,当前可以从 GitHub 直接下载: ```bash -wget https://github.com/taosdata/TDengine-Operator/raw/3.0/helm/tdengine-3.0.0.tgz +wget https://github.com/taosdata/TDengine-Operator/raw/3.0/helm/tdengine-3.0.2.tgz ``` @@ -39,7 +39,7 @@ kubectl get storageclass 之后,使用 helm 命令安装: ```bash -helm install tdengine tdengine-3.0.0.tgz \ +helm install tdengine tdengine-3.0.2.tgz \ --set storage.className= ``` @@ -47,7 +47,7 @@ helm install tdengine tdengine-3.0.0.tgz \ 在 minikube 环境下,可以设置一个较小的容量避免超出磁盘可用空间: ```bash -helm install tdengine tdengine-3.0.0.tgz \ +helm install tdengine tdengine-3.0.2.tgz \ --set storage.className=standard \ --set storage.dataSize=2Gi \ --set storage.logSize=10Mi @@ -84,14 +84,14 @@ TDengine 支持 `values.yaml` 自定义。 通过 helm show values 可以获取 TDengine Chart 支持的全部 values 列表: ```bash -helm show values tdengine-3.0.0.tgz +helm show values tdengine-3.0.2.tgz ``` 你可以将结果保存为 values.yaml,之后可以修改其中的各项参数,如 replica 数量,存储类名称,容量大小,TDengine 配置等,然后使用如下命令安装 TDengine 集群: ```bash -helm install tdengine tdengine-3.0.0.tgz -f values.yaml +helm install tdengine tdengine-3.0.2.tgz -f values.yaml ``` @@ -108,7 +108,7 @@ image: prefix: tdengine/tdengine #pullPolicy: Always # Overrides the image tag whose default is the chart appVersion. -# tag: "3.0.0.0" +# tag: "3.0.2.0" service: # ClusterIP is the default service type, use NodeIP only if you know what you are doing. @@ -156,15 +156,15 @@ clusterDomainSuffix: "" # See the variable list at https://www.taosdata.com/cn/documentation/administrator . # # Note: -# 1. firstEp/secondEp: should not be setted here, it's auto generated at scale-up. -# 2. serverPort: should not be setted, we'll use the default 6030 in many places. -# 3. fqdn: will be auto generated in kubenetes, user should not care about it. +# 1. firstEp/secondEp: should not be set here, it's auto generated at scale-up. +# 2. serverPort: should not be set, we'll use the default 6030 in many places. +# 3. fqdn: will be auto generated in kubernetes, user should not care about it. # 4. role: currently role is not supported - every node is able to be mnode and vnode. # # Btw, keep quotes "" around the value like below, even the value will be number or not. taoscfg: # Starts as cluster or not, must be 0 or 1. - # 0: all pods will start as a seperate TDengine server + # 0: all pods will start as a separate TDengine server # 1: pods will start as TDengine server cluster. [default] CLUSTER: "1" diff --git a/docs/zh/12-taos-sql/02-database.md b/docs/zh/12-taos-sql/02-database.md index df52a0890b6ff091b8d5cfb051618c88e8195cf0..fc35da863676943e152d5c113b70e5b5bd6b566e 100644 --- a/docs/zh/12-taos-sql/02-database.md +++ b/docs/zh/12-taos-sql/02-database.md @@ -30,8 +30,10 @@ database_option: { | WAL_LEVEL {1 | 2} | VGROUPS value | SINGLE_STABLE {0 | 1} + | STT_TRIGGER value | TABLE_PREFIX value | TABLE_SUFFIX value + | TSDB_PAGESIZE value | WAL_RETENTION_PERIOD value | WAL_ROLL_PERIOD value | WAL_RETENTION_SIZE value @@ -56,7 +58,7 @@ database_option: { - WAL_FSYNC_PERIOD:当 WAL 参数设置为 2 时,落盘的周期。默认为 3000,单位毫秒。最小为 0,表示每次写入立即落盘;最大为 180000,即三分钟。 - MAXROWS:文件块中记录的最大条数,默认为 4096 条。 - MINROWS:文件块中记录的最小条数,默认为 100 条。 -- KEEP:表示数据文件保存的天数,缺省值为 3650,取值范围 [1, 365000],且必须大于或等于 DURATION 参数值。数据库会自动删除保存时间超过 KEEP 值的数据。KEEP 可以使用加单位的表示形式,如 KEEP 100h、KEEP 10d 等,支持 m(分钟)、h(小时)和 d(天)三个单位。也可以不写单位,如 KEEP 50,此时默认单位为天。 +- KEEP:表示数据文件保存的天数,缺省值为 3650,取值范围 [1, 365000],且必须大于或等于 DURATION 参数值。数据库会自动删除保存时间超过 KEEP 值的数据。KEEP 可以使用加单位的表示形式,如 KEEP 100h、KEEP 10d 等,支持 m(分钟)、h(小时)和 d(天)三个单位。也可以不写单位,如 KEEP 50,此时默认单位为天。企业版支持[多级存储](https://docs.taosdata.com/tdinternal/arch/#%E5%A4%9A%E7%BA%A7%E5%AD%98%E5%82%A8)功能, 因此, 可以设置多个保存时间(多个以英文逗号分隔,最多 3 个,满足 keep 0 <= keep 1 <= keep 2,如 KEEP 100h,100d,3650d); 社区版不支持多级存储功能(即使配置了多个保存时间, 也不会生效, KEEP 会取最大的保存时间)。 - PAGES:一个 VNODE 中元数据存储引擎的缓存页个数,默认为 256,最小 64。一个 VNODE 元数据存储占用 PAGESIZE \* PAGES,默认情况下为 1MB 内存。 - PAGESIZE:一个 VNODE 中元数据存储引擎的页大小,单位为 KB,默认为 4 KB。范围为 1 到 16384,即 1 KB 到 16 MB。 - PRECISION:数据库的时间戳精度。ms 表示毫秒,us 表示微秒,ns 表示纳秒,默认 ms 毫秒。 @@ -69,8 +71,10 @@ database_option: { - SINGLE_STABLE:表示此数据库中是否只可以创建一个超级表,用于超级表列非常多的情况。 - 0:表示可以创建多张超级表。 - 1:表示只可以创建一张超级表。 +- STT_TRIGGER:表示落盘文件触发文件合并的个数。默认为 1,范围 1 到 16。对于少表高频场景,此参数建议使用默认配置,或较小的值;而对于多表低频场景,此参数建议配置较大的值。 - TABLE_PREFIX:内部存储引擎根据表名分配存储该表数据的 VNODE 时要忽略的前缀的长度。 - TABLE_SUFFIX:内部存储引擎根据表名分配存储该表数据的 VNODE 时要忽略的后缀的长度。 +- TSDB_PAGESIZE:一个 VNODE 中时序数据存储引擎的页大小,单位为 KB,默认为 4 KB。范围为 1 到 16384,即 1 KB到 16 MB。 - WAL_RETENTION_PERIOD:wal 文件的额外保留策略,用于数据订阅。wal 的保存时长,单位为 s。单副本默认为 0,即落盘后立即删除。-1 表示不删除。多副本默认为 4 天。 - WAL_RETENTION_SIZE:wal 文件的额外保留策略,用于数据订阅。wal 的保存的最大上限,单位为 KB。单副本默认为 0,即落盘后立即删除。多副本默认为-1,表示不删除。 - WAL_ROLL_PERIOD:wal 文件切换时长,单位为 s。当 wal 文件创建并写入后,经过该时间,会自动创建一个新的 wal 文件。单副本默认为 0,即仅在落盘时创建新文件。多副本默认为 1 天。 @@ -112,6 +116,10 @@ alter_database_options: alter_database_option: { CACHEMODEL {'none' | 'last_row' | 'last_value' | 'both'} | CACHESIZE value + | BUFFER value + | PAGES value + | REPLICA value + | STT_TRIGGER value | WAL_LEVEL value | WAL_FSYNC_PERIOD value | KEEP value @@ -154,3 +162,19 @@ TRIM DATABASE db_name; ``` 删除过期数据,并根据多级存储的配置归整数据。 + +## 调整VGROUP中VNODE的分布 + +```sql +REDISTRIBUTE VGROUP vgroup_no DNODE dnode_id1 [DNODE dnode_id2] [DNODE dnode_id3] +``` + +按照给定的dnode列表,调整vgroup中的vnode分布。因为副本数目最大为3,所以最多输入3个dnode。 + +## 自动调整VGROUP中VNODE的分布 + +```sql +BALANCE VGROUP +``` + +自动调整集群所有vgroup中的vnode分布,相当于在vnode级别对集群进行数据的负载均衡操作。 \ No newline at end of file diff --git a/docs/zh/12-taos-sql/06-select.md b/docs/zh/12-taos-sql/06-select.md index 3b681f401c826a8b93ba446ad2804cb37c8c7bf7..8244c0f27a271347938e8b55cd5d0f4938df0ea7 100644 --- a/docs/zh/12-taos-sql/06-select.md +++ b/docs/zh/12-taos-sql/06-select.md @@ -350,9 +350,9 @@ SELECT AVG(CASE WHEN voltage < 200 or voltage > 250 THEN 220 ELSE voltage END) F ## JOIN 子句 -TDengine 支持“普通表与普通表之间”、“超级表与超级表之间”、“子查询与子查询之间” 进行自然连接。自然连接与内连接的主要区别是,自然连接要求参与连接的字段在不同的表/超级表中必须是同名字段。也即,TDengine 在连接关系的表达中,要求必须使用同名数据列/标签列的相等关系。 +TDengine 支持基于时间戳主键的内连接,即 JOIN 条件必须包含时间戳主键。只要满足基于时间戳主键这个要求,普通表、子表、超级表和子查询之间可以随意的进行内连接,且对表个数没有限制。 -在普通表与普通表之间的 JOIN 操作中,只能使用主键时间戳之间的相等关系。例如: +普通表与普通表之间的 JOIN 操作: ```sql SELECT * @@ -360,7 +360,7 @@ FROM temp_tb_1 t1, pressure_tb_1 t2 WHERE t1.ts = t2.ts ``` -在超级表与超级表之间的 JOIN 操作中,除了主键时间戳一致的条件外,还要求引入能实现一一对应的标签列的相等关系。例如: +超级表与超级表之间的 JOIN 操作: ```sql SELECT * @@ -368,20 +368,15 @@ FROM temp_stable t1, temp_stable t2 WHERE t1.ts = t2.ts AND t1.deviceid = t2.deviceid AND t1.status=0; ``` -类似地,也可以对多个子查询的查询结果进行 JOIN 操作。 - -:::note - -JOIN 语句存在如下限制要求: +子表与超级表之间的 JOIN 操作: -- 参与一条语句中 JOIN 操作的表/超级表最多可以有 10 个。 -- 在包含 JOIN 操作的查询语句中不支持 FILL。 -- 暂不支持参与 JOIN 操作的表之间聚合后的四则运算。 -- 不支持只对其中一部分表做 GROUP BY。 -- JOIN 查询的不同表的过滤条件之间不能为 OR。 -- JOIN 查询要求连接条件不能是普通列,只能针对标签和主时间字段列(第一列)。 +```sql +SELECT * +FROM temp_ctable t1, temp_stable t2 +WHERE t1.ts = t2.ts AND t1.deviceid = t2.deviceid AND t1.status=0; +``` -::: +类似地,也可以对多个子查询的查询结果进行 JOIN 操作。 ## 嵌套查询 diff --git a/docs/zh/12-taos-sql/10-function.md b/docs/zh/12-taos-sql/10-function.md index cb99c83cc59aede3061ddc4a2c43278388961d8b..81fdb46f25a609f829401509aa87fb6c29350f90 100644 --- a/docs/zh/12-taos-sql/10-function.md +++ b/docs/zh/12-taos-sql/10-function.md @@ -880,7 +880,7 @@ INTERP(expr) - INTERP 根据 FILL 字段来决定在每个符合输出条件的时刻如何进行插值。 - INTERP 只能在一个时间序列内进行插值,因此当作用于超级表时必须跟 partition by tbname 一起使用。 - INTERP 可以与伪列 _irowts 一起使用,返回插值点所对应的时间戳(3.0.1.4版本以后支持)。 -- INTERP 可以与伪列 _isfilled 一起使用,显示返回结果是否为原始记录或插值算法产生的数据(3.0.2.1版本以后支持)。 +- INTERP 可以与伪列 _isfilled 一起使用,显示返回结果是否为原始记录或插值算法产生的数据(3.0.2.3版本以后支持)。 ### LAST diff --git a/docs/zh/12-taos-sql/24-show.md b/docs/zh/12-taos-sql/24-show.md index 2b875199b501136afef4ce0ba1cbcdd1e0abc933..0a326729f204f95f61b7f9fd1c62c8aa180359bd 100644 --- a/docs/zh/12-taos-sql/24-show.md +++ b/docs/zh/12-taos-sql/24-show.md @@ -306,7 +306,7 @@ SHOW [db_name.]VGROUPS; ## SHOW VNODES ```sql -SHOW VNODES [dnode_name]; +SHOW VNODES {dnode_id | dnode_endpoint}; ``` 显示当前系统中所有 VNODE 或某个 DNODE 的 VNODE 的信息。 diff --git a/docs/zh/12-taos-sql/29-changes.md b/docs/zh/12-taos-sql/29-changes.md index 1e421f95e15f2465b0d71a48947f540a994baa71..9d67533cdea5c055c8d3ff58000fb80ac640e271 100644 --- a/docs/zh/12-taos-sql/29-changes.md +++ b/docs/zh/12-taos-sql/29-changes.md @@ -54,7 +54,6 @@ description: "TDengine 3.0 版本的语法变更说明" | 27 | GRANT | 新增 | 授予用户权限。 | 28 | KILL TRANSACTION | 新增 | 终止管理节点的事务。 | 29 | KILL STREAM | 废除 | 终止连续查询。3.0版本不再支持连续查询,而是用更通用的流计算来代替。 -| 30 | MERGE VGROUP | 新增 | 合并VGROUP。 | 31 | REVOKE | 新增 | 回收用户权限。 | 32 | SELECT | 调整 | | 33 | SHOW ACCOUNTS | 废除 | 2.x中为企业版功能,3.0不再支持。语法暂时保留了,执行报“This statement is no longer supported”错误。 @@ -76,8 +75,9 @@ description: "TDengine 3.0 版本的语法变更说明" | 49 | SHOW TRANSACTIONS | 新增 | 显示当前系统中正在执行的事务的信息。 | 50 | SHOW DNODE VARIABLES | 新增 |显示指定DNODE的配置参数。 | 51 | SHOW VNODES | 暂不支持 | 显示当前系统中VNODE的信息。3.0.0版本暂不支持。 -| 52 | SPLIT VGROUP | 新增 | 拆分VGROUP。 -| 53 | TRIM DATABASE | 新增 | 删除过期数据,并根据多级存储的配置归整数据。 +| 52 | TRIM DATABASE | 新增 | 删除过期数据,并根据多级存储的配置归整数据。 +| 53 | REDISTRIBUTE VGROUP | 新增 | 调整VGROUP中VNODE的分布。 +| 54 | BALANCE VGROUP | 新增 | 自动调整VGROUP中VNODE的分布。 ## SQL 函数变更 diff --git a/docs/zh/14-reference/12-config/index.md b/docs/zh/14-reference/12-config/index.md index bc62a536e535b4de642d66849de7e6b10373a191..503f6927648ce77eedc6c2a5d160a0d997bf00d7 100644 --- a/docs/zh/14-reference/12-config/index.md +++ b/docs/zh/14-reference/12-config/index.md @@ -134,6 +134,24 @@ taos --dump-config | 取值范围 | 1-200000 | | 缺省值 | 30 | +### telemetryReporting + +| 属性 | 说明 | +| -------- | -------------------------------------------- | +| 适用范围 | 仅服务端适用 | +| 含义 |是否上传 telemetry | +| 取值范围 | 0,1 0: 不上传;1:上传 | +| 缺省值 | 1 | + +### crashReporting + +| 属性 | 说明 | +| -------- | -------------------------------------------- | +| 适用范围 | 仅服务端适用 | +| 含义 |是否上传 crash 信息 | +| 取值范围 | 0,1 0: 不上传;1:上传 | +| 缺省值 | 1 | + ## 查询相关 ### queryPolicy @@ -305,6 +323,7 @@ charset 的有效值是 UTF-8。 | 适用范围 | 仅服务端适用 | | 含义 | 数据文件目录,所有的数据文件都将写入该目录 | | 缺省值 | /var/lib/taos | +| 补充说明 | [多级存储](https://docs.taosdata.com/tdinternal/arch/#%E5%A4%9A%E7%BA%A7%E5%AD%98%E5%82%A8) 功能需要与 [KEEP](https://docs.taosdata.com/taos-sql/database/#%E5%8F%82%E6%95%B0%E8%AF%B4%E6%98%8E) 参数配合使用 | ### tempDir @@ -597,7 +616,7 @@ charset 的有效值是 UTF-8。 | 属性 | 说明 | | -------- | ----------------------------- | | 适用范围 | 仅客户端适用 | -| 含义 | schemaless 列数据是否顺序一致 | +| 含义 | schemaless 列数据是否顺序一致,从3.0.3.0开始,该配置废弃 | | 值域 | 0:不一致;1: 一致 | | 缺省值 | 1 | @@ -657,7 +676,7 @@ charset 的有效值是 UTF-8。 | 20 | minimalTmpDirGB | 是 | 是 | | | 21 | smlChildTableName | 是 | 是 | | | 22 | smlTagName | 是 | 是 | | -| 23 | smlDataFormat | 否 | 是 | | +| 23 | smlDataFormat | 否 | 是(从3.0.3.0开始,该配置废弃) | | | 24 | statusInterval | 是 | 是 | | | 25 | logDir | 是 | 是 | | | 26 | minimalLogDirGB | 是 | 是 | | @@ -698,7 +717,7 @@ charset 的有效值是 UTF-8。 | 2 | numOfThreadsPerCore | 是 | 否 | 有其它参数设置多种线程池的大小 | | 3 | numOfMnodes | 是 | 否 | 通过 create mnode 命令动态创建 mnode | | 4 | vnodeBak | 是 | 否 | 3.0 行为未知 | -| 5 | balance | 是 | 否 | 负载均衡功能由 split/merge vgroups 实现 | +| 5 | balance | 是 | 否 | 负载均衡功能由 split/merge vgroups 实现 (暂不支持) | | 6 | balanceInterval | 是 | 否 | 随着 balance 参数失效 | | 7 | offlineThreshold | 是 | 否 | 3.0 行为未知 | | 8 | role | 是 | 否 | 由 supportVnode 决定是否能够创建 | diff --git a/docs/zh/14-reference/13-schemaless/13-schemaless.md b/docs/zh/14-reference/13-schemaless/13-schemaless.md index 3aebd616a075394dbeddc9e608cd2a57bb8cf844..3d0bac25d20953733d842e8057f2c0c24deb831e 100644 --- a/docs/zh/14-reference/13-schemaless/13-schemaless.md +++ b/docs/zh/14-reference/13-schemaless/13-schemaless.md @@ -83,7 +83,7 @@ st,t1=3,t2=4,t3=t3 c1=3i64,c3="passit",c2=false,c4=4f64 1626006833639000000 NULL。 6. 对 BINARY 或 NCHAR 列,如果数据行中所提供值的长度超出了列类型的限制,自动增加该列允许存储的字符长度上限(只增不减),以保证数据的完整保存。 7. 整个处理过程中遇到的错误会中断写入过程,并返回错误代码。 -8. 为了提高写入的效率,默认假设同一个超级表中 field_set 的顺序是一样的(第一条数据包含所有的 field,后面的数据按照这个顺序),如果顺序不一样,需要配置参数 smlDataFormat 为 false,否则,数据写入按照相同顺序写入,库中数据会异常。 +8. 为了提高写入的效率,默认假设同一个超级表中 field_set 的顺序是一样的(第一条数据包含所有的 field,后面的数据按照这个顺序),如果顺序不一样,需要配置参数 smlDataFormat 为 false,否则,数据写入按照相同顺序写入,库中数据会异常,从3.0.3.0开始,自动检测顺序是否一致,该配置废弃。 :::tip 无模式所有的处理逻辑,仍会遵循 TDengine 对数据结构的底层限制,例如每行数据的总长度不能超过 diff --git a/docs/zh/27-train-faq/01-faq.md b/docs/zh/27-train-faq/01-faq.md index 595b69b08b87ee33e27937fb89b84adc41c89d08..c6ecbe471a5c0863bac80ed6edfa6abd1e13c010 100644 --- a/docs/zh/27-train-faq/01-faq.md +++ b/docs/zh/27-train-faq/01-faq.md @@ -243,3 +243,8 @@ sudo launchctl load -w /Library/LaunchDaemons/limit.maxfiles.plist ``` launchctl limit maxfiles ``` +### 19 建库时提示Out of dnode +该提示是创建db的vnode数量不够了,需要的vnode不能超过了dnode中vnode的上限。因为系统默认是一个dnode中有cpu核数两倍的vnode,也可以通过配置文件中的参数supportVnodes控制。 +正常调大taos.cfg种这个supportVnodes参数即可。 + + diff --git a/docs/zh/28-releases/01-tdengine.md b/docs/zh/28-releases/01-tdengine.md index e19b2133e4e1a1ff67d3d7e289055f58397c8ece..2ec1e6cc5434d640efdfce6d29f73a605137cd51 100644 --- a/docs/zh/28-releases/01-tdengine.md +++ b/docs/zh/28-releases/01-tdengine.md @@ -10,6 +10,14 @@ TDengine 2.x 各版本安装包请访问[这里](https://www.taosdata.com/all-do import Release from "/components/ReleaseV3"; +## 3.0.2.4 + + + +## 3.0.2.3 + + + ## 3.0.2.2 diff --git a/docs/zh/28-releases/02-tools.md b/docs/zh/28-releases/02-tools.md index b2ead5b2640f3fcf221e19e66eb4971e7daaa911..421cbd39e3462c9290b70970afede034598a8c2e 100644 --- a/docs/zh/28-releases/02-tools.md +++ b/docs/zh/28-releases/02-tools.md @@ -10,6 +10,14 @@ taosTools 各版本安装包下载链接如下: import Release from "/components/ReleaseV3"; +## 2.4.2 + + + +## 2.4.1 + + + ## 2.4.0 diff --git a/include/client/taos.h b/include/client/taos.h index 838d0e826662abe5d2fbd6253601a12f06978c75..cf410a42daf1e9c401af767497a603aa12c7a536 100644 --- a/include/client/taos.h +++ b/include/client/taos.h @@ -208,6 +208,7 @@ DLL_EXPORT TAOS_ROW *taos_result_block(TAOS_RES *res); DLL_EXPORT const char *taos_get_server_info(TAOS *taos); DLL_EXPORT const char *taos_get_client_info(); +DLL_EXPORT int taos_get_current_db(TAOS *taos, char *database, int len, int *required); DLL_EXPORT const char *taos_errstr(TAOS_RES *res); DLL_EXPORT int taos_errno(TAOS_RES *res); diff --git a/include/common/systable.h b/include/common/systable.h index 6f65c1e8b870d4a42427173bf3ea17ae7ade0ce1..9b5f66f64c6fb41c7479c726ab02dc96d08e8ef5 100644 --- a/include/common/systable.h +++ b/include/common/systable.h @@ -36,6 +36,7 @@ extern "C" { #define TSDB_INS_TABLE_STABLES "ins_stables" #define TSDB_INS_TABLE_TABLES "ins_tables" #define TSDB_INS_TABLE_TAGS "ins_tags" +#define TSDB_INS_TABLE_COLS "ins_columns" #define TSDB_INS_TABLE_TABLE_DISTRIBUTED "ins_table_distributed" #define TSDB_INS_TABLE_USERS "ins_users" #define TSDB_INS_TABLE_LICENCES "ins_grants" diff --git a/include/common/tcommon.h b/include/common/tcommon.h index 2643273555f0ed3addc5f733dc81b5e3a002bad1..c6e21af644d1054ce91d31b137fac030abcc749a 100644 --- a/include/common/tcommon.h +++ b/include/common/tcommon.h @@ -162,6 +162,7 @@ typedef enum EStreamType { STREAM_PULL_DATA, STREAM_PULL_OVER, STREAM_FILL_OVER, + STREAM_CREATE_CHILD_TABLE, } EStreamType; #pragma pack(push, 1) @@ -205,8 +206,6 @@ typedef struct SDataBlockInfo { TSKEY watermark; // used for stream char parTbName[TSDB_TABLE_NAME_LEN]; // used for stream partition - int32_t tagLen; - void* pTag; // used for stream partition } SDataBlockInfo; typedef struct SSDataBlock { @@ -379,6 +378,11 @@ typedef struct SSortExecInfo { #define CALCULATE_END_TS_COLUMN_INDEX 5 #define TABLE_NAME_COLUMN_INDEX 6 +// stream create table block column +#define UD_TABLE_NAME_COLUMN_INDEX 0 +#define UD_GROUPID_COLUMN_INDEX 1 +#define UD_TAG_COLUMN_INDEX 2 + #ifdef __cplusplus } #endif diff --git a/include/common/tdatablock.h b/include/common/tdatablock.h index d9b8ae266b358507a8b396180b67c1bb536bdf18..20ffb48ab0173c8f811b2ab620d977e96256425d 100644 --- a/include/common/tdatablock.h +++ b/include/common/tdatablock.h @@ -41,6 +41,12 @@ typedef struct SBlockOrderInfo { BMCharPos(bm_, r_) |= (1u << (7u - BitPos(r_))); \ } while (0) +#define colDataSetNull_f_s(c_, r_) \ + do { \ + colDataSetNull_f((c_)->nullbitmap, r_); \ + memset(((char*)(c_)->pData) + (c_)->info.bytes * (r_), 0, (c_)->info.bytes); \ + } while (0) + #define colDataClearNull_f(bm_, r_) \ do { \ BMCharPos(bm_, r_) &= ((char)(~(1u << (7u - BitPos(r_))))); \ @@ -136,7 +142,7 @@ static FORCE_INLINE void colDataAppendNULL(SColumnInfoData* pColumnInfoData, uin if (IS_VAR_DATA_TYPE(pColumnInfoData->info.type)) { colDataSetNull_var(pColumnInfoData, currentRow); // it is a null value of VAR type. } else { - colDataSetNull_f(pColumnInfoData->nullbitmap, currentRow); + colDataSetNull_f_s(pColumnInfoData, currentRow); } pColumnInfoData->hasNull = true; @@ -151,6 +157,7 @@ static FORCE_INLINE void colDataAppendNNULL(SColumnInfoData* pColumnInfoData, ui for (int32_t i = start; i < start + nRows; ++i) { colDataSetNull_f(pColumnInfoData->nullbitmap, i); } + memset(pColumnInfoData->pData + start * pColumnInfoData->info.bytes, 0, pColumnInfoData->info.bytes * nRows); } pColumnInfoData->hasNull = true; @@ -231,7 +238,6 @@ int32_t blockDataSort_rv(SSDataBlock* pDataBlock, SArray* pOrderInfo, bool nullF int32_t colInfoDataEnsureCapacity(SColumnInfoData* pColumn, uint32_t numOfRows, bool clearPayload); int32_t blockDataEnsureCapacity(SSDataBlock* pDataBlock, uint32_t numOfRows); -int32_t blockDataEnsureCapacityNoClear(SSDataBlock* pDataBlock, uint32_t numOfRows); void colInfoDataCleanup(SColumnInfoData* pColumn, uint32_t numOfRows); void blockDataCleanup(SSDataBlock* pDataBlock); diff --git a/include/common/tdataformat.h b/include/common/tdataformat.h index 852bc1eb5e7cbaef3d36426b2f2004ff26dc2a38..d7a62f5402defb95da6a4217254e14dbbc6de56c 100644 --- a/include/common/tdataformat.h +++ b/include/common/tdataformat.h @@ -265,7 +265,13 @@ struct STag { // STSchema ================================ STSchema *tBuildTSchema(SSchema *aSchema, int32_t numOfCols, int32_t version); -void tDestroyTSchema(STSchema *pTSchema); +#define tDestroyTSchema(pTSchema) \ + do { \ + if (pTSchema) { \ + taosMemoryFree(pTSchema); \ + pTSchema = NULL; \ + } \ + } while (0) #endif diff --git a/include/common/tglobal.h b/include/common/tglobal.h index d445fc26e882d840a3a3df87464c43d0225cbb5e..209bc29e4f58d6d8d72504a74fce15ec5247f743 100644 --- a/include/common/tglobal.h +++ b/include/common/tglobal.h @@ -69,6 +69,9 @@ extern int32_t tsElectInterval; extern int32_t tsHeartbeatInterval; extern int32_t tsHeartbeatTimeout; +// vnode +extern int64_t tsVndCommitMaxIntervalMs; + // monitor extern bool tsEnableMonitor; extern int32_t tsMonitorInterval; @@ -82,6 +85,10 @@ extern bool tsEnableTelem; extern int32_t tsTelemInterval; extern char tsTelemServer[]; extern uint16_t tsTelemPort; +extern bool tsEnableCrashReport; +extern char* tsTelemUri; +extern char* tsClientCrashReportUri; +extern char* tsSvrCrashReportUri; // query buffer management extern int32_t tsQueryBufferSize; // maximum allowed usage buffer size in MB for each data node during query processing @@ -134,8 +141,8 @@ extern char tsUdfdLdLibPath[]; // schemaless extern char tsSmlChildTableName[]; extern char tsSmlTagName[]; -extern bool tsSmlDataFormat; -extern int32_t tsSmlBatchSize; +//extern bool tsSmlDataFormat; +//extern int32_t tsSmlBatchSize; // wal extern int64_t tsWalFsyncDataSizeLimit; diff --git a/include/common/tmsg.h b/include/common/tmsg.h index 94b365e76508c0893d595ef6147e7dba0c30480c..68c1744ad2117ec44cf18ff29e4dd8dd9f7a8af3 100644 --- a/include/common/tmsg.h +++ b/include/common/tmsg.h @@ -115,6 +115,7 @@ typedef enum _mgmt_table { TSDB_MGMT_TABLE_STREAMS, TSDB_MGMT_TABLE_TABLE, TSDB_MGMT_TABLE_TAG, + TSDB_MGMT_TABLE_COL, TSDB_MGMT_TABLE_USER, TSDB_MGMT_TABLE_GRANTS, TSDB_MGMT_TABLE_VGROUP, @@ -343,7 +344,8 @@ void tFreeSSubmitRsp(SSubmitRsp* pRsp); #define COL_IS_SET(FLG) (((FLG) & (COL_SET_VAL | COL_SET_NULL)) != 0) #define COL_CLR_SET(FLG) ((FLG) &= (~(COL_SET_VAL | COL_SET_NULL))) -#define IS_BSMA_ON(s) (((s)->flags & 0x01) == COL_SMA_ON) +#define IS_BSMA_ON(s) (((s)->flags & 0x01) == COL_SMA_ON) +#define IS_SET_NULL(s) (((s)->flags & COL_SET_NULL) == COL_SET_NULL) #define SSCHMEA_TYPE(s) ((s)->type) #define SSCHMEA_FLAGS(s) ((s)->flags) @@ -381,6 +383,13 @@ static FORCE_INLINE void tDeleteSSchemaWrapper(SSchemaWrapper* pSchemaWrapper) { } } +static FORCE_INLINE void tDeleteSSchemaWrapperForHash(void* pSchemaWrapper) { + if (pSchemaWrapper != NULL && *(SSchemaWrapper**)pSchemaWrapper != NULL) { + taosMemoryFree((*(SSchemaWrapper**)pSchemaWrapper)->pSchema); + taosMemoryFree(*(SSchemaWrapper**)pSchemaWrapper); + } +} + static FORCE_INLINE int32_t taosEncodeSSchema(void** buf, const SSchema* pSchema) { int32_t tlen = 0; tlen += taosEncodeFixedI8(buf, pSchema->type); @@ -905,6 +914,7 @@ typedef struct { int32_t numOfRetensions; SArray* pRetensions; int8_t schemaless; + int16_t sstTrigger; } SDbCfgRsp; int32_t tSerializeSDbCfgRsp(void* buf, int32_t bufLen, const SDbCfgRsp* pRsp); @@ -1392,6 +1402,7 @@ typedef struct { char db[TSDB_DB_FNAME_LEN]; char tb[TSDB_TABLE_NAME_LEN]; char user[TSDB_USER_LEN]; + char filterTb[TSDB_TABLE_NAME_LEN]; int64_t showId; } SRetrieveTableReq; @@ -1754,6 +1765,12 @@ typedef struct { #define STREAM_CREATE_STABLE_TRUE 1 #define STREAM_CREATE_STABLE_FALSE 0 +typedef struct SColLocation { + int16_t slotId; + col_id_t colId; + int8_t type; +} SColLocation; + typedef struct { char name[TSDB_STREAM_FNAME_LEN]; char sourceDB[TSDB_DB_FNAME_LEN]; @@ -1771,7 +1788,9 @@ typedef struct { // 3.0.20 int64_t checkpointFreq; // ms // 3.0.2.3 - int8_t createStb; + int8_t createStb; + uint64_t targetStbUid; + SArray* fillNullCols; // array of SColLocation } SCMCreateStreamReq; typedef struct { diff --git a/include/common/tmsgcb.h b/include/common/tmsgcb.h index a1ebd855cdc7cd2a80280bc074b0b3da41a95e75..eca8740d2864612ff604601d089b206690854290 100644 --- a/include/common/tmsgcb.h +++ b/include/common/tmsgcb.h @@ -39,7 +39,7 @@ typedef enum { QUEUE_MAX, } EQueueType; -typedef int32_t (*UpdateDnodeInfoFp)(void* pData, int32_t* dnodeId, int64_t* clusterId, char* fqdn, uint16_t* port); +typedef bool (*UpdateDnodeInfoFp)(void* pData, int32_t* dnodeId, int64_t* clusterId, char* fqdn, uint16_t* port); typedef int32_t (*PutToQueueFp)(void* pMgmt, EQueueType qtype, SRpcMsg* pMsg); typedef int32_t (*GetQueueSizeFp)(void* pMgmt, int32_t vgId, EQueueType qtype); typedef int32_t (*SendReqFp)(const SEpSet* pEpSet, SRpcMsg* pMsg); @@ -70,7 +70,8 @@ void tmsgSendRsp(SRpcMsg* pMsg); void tmsgRegisterBrokenLinkArg(SRpcMsg* pMsg); void tmsgReleaseHandle(SRpcHandleInfo* pHandle, int8_t type); void tmsgReportStartup(const char* name, const char* desc); -int32_t tmsgUpdateDnodeInfo(int32_t* dnodeId, int64_t* clusterId, char* fqdn, uint16_t* port); +bool tmsgUpdateDnodeInfo(int32_t* dnodeId, int64_t* clusterId, char* fqdn, uint16_t* port); +void tmsgUpdateDnodeEpSet(SEpSet* epset); #ifdef __cplusplus } diff --git a/include/common/tname.h b/include/common/tname.h index 666a25303ebae0d9098c50e10d4a8c4df0e7624f..6a89d2a6be398c0fd3afd13fd780c9a25631e473 100644 --- a/include/common/tname.h +++ b/include/common/tname.h @@ -78,7 +78,7 @@ typedef struct { // output char* ctbShortName; // must have size of TSDB_TABLE_NAME_LEN; - uint64_t uid; // child table uid, may be useful +// uint64_t uid; // child table uid, may be useful } RandTableName; void buildChildTableName(RandTableName* rName); diff --git a/include/common/ttypes.h b/include/common/ttypes.h index d0f72fbfe59bccabe1d32474c252658fe8e85fdd..97ae151b7a8ccedb84dd113fa9c9560cfe5ffe14 100644 --- a/include/common/ttypes.h +++ b/include/common/ttypes.h @@ -343,8 +343,8 @@ typedef struct tDataTypeDescriptor { extern tDataTypeDescriptor tDataTypes[TSDB_DATA_TYPE_MAX]; bool isValidDataType(int32_t type); +int32_t operateVal(void *dst, void *s1, void *s2, int32_t optr, int32_t type); void assignVal(char *val, const char *src, int32_t len, int32_t type); -void operateVal(void *dst, void *s1, void *s2, int32_t optr, int32_t type); void *getDataMin(int32_t type, void* value); void *getDataMax(int32_t type, void* value); diff --git a/include/libs/executor/executor.h b/include/libs/executor/executor.h index 3489b359cbae646ae561352b48a85c86bdb1f947..095d2f6d10dbe6faa0460f5532c569b543fb4ac0 100644 --- a/include/libs/executor/executor.h +++ b/include/libs/executor/executor.h @@ -154,6 +154,8 @@ void qCleanExecTaskBlockBuf(qTaskInfo_t tinfo); */ int32_t qAsyncKillTask(qTaskInfo_t tinfo, int32_t rspCode); +bool qTaskIsExecuting(qTaskInfo_t qinfo); + /** * destroy query info structure * @param qHandle diff --git a/include/libs/stream/tstream.h b/include/libs/stream/tstream.h index 0f57da2b804c7c58411ee7ffa2cbb1fc4d68b3e8..1c52d7ea5df4904938201ee379c7b1068b25c0a0 100644 --- a/include/libs/stream/tstream.h +++ b/include/libs/stream/tstream.h @@ -370,7 +370,8 @@ int32_t tDecodeSStreamTask(SDecoder* pDecoder, SStreamTask* pTask); void tFreeSStreamTask(SStreamTask* pTask); static FORCE_INLINE int32_t streamTaskInput(SStreamTask* pTask, SStreamQueueItem* pItem) { - if (pItem->type == STREAM_INPUT__DATA_SUBMIT) { + int8_t type = pItem->type; + if (type == STREAM_INPUT__DATA_SUBMIT) { SStreamDataSubmit2* pSubmitClone = streamSubmitRefClone((SStreamDataSubmit2*)pItem); if (pSubmitClone == NULL) { qDebug("task %d %p submit enqueue failed since out of memory", pTask->taskId, pTask); @@ -382,19 +383,19 @@ static FORCE_INLINE int32_t streamTaskInput(SStreamTask* pTask, SStreamQueueItem pSubmitClone->submit.msgStr, pSubmitClone->submit.msgLen, pSubmitClone->submit.ver); taosWriteQitem(pTask->inputQueue->queue, pSubmitClone); // qStreamInput(pTask->exec.executor, pSubmitClone); - } else if (pItem->type == STREAM_INPUT__DATA_BLOCK || pItem->type == STREAM_INPUT__DATA_RETRIEVE || - pItem->type == STREAM_INPUT__REF_DATA_BLOCK) { + } else if (type == STREAM_INPUT__DATA_BLOCK || type == STREAM_INPUT__DATA_RETRIEVE || + type == STREAM_INPUT__REF_DATA_BLOCK) { taosWriteQitem(pTask->inputQueue->queue, pItem); // qStreamInput(pTask->exec.executor, pItem); - } else if (pItem->type == STREAM_INPUT__CHECKPOINT) { + } else if (type == STREAM_INPUT__CHECKPOINT) { taosWriteQitem(pTask->inputQueue->queue, pItem); // qStreamInput(pTask->exec.executor, pItem); - } else if (pItem->type == STREAM_INPUT__GET_RES) { + } else if (type == STREAM_INPUT__GET_RES) { taosWriteQitem(pTask->inputQueue->queue, pItem); // qStreamInput(pTask->exec.executor, pItem); } - if (pItem->type != STREAM_INPUT__GET_RES && pItem->type != STREAM_INPUT__CHECKPOINT && pTask->triggerParam != 0) { + if (type != STREAM_INPUT__GET_RES && type != STREAM_INPUT__CHECKPOINT && pTask->triggerParam != 0) { atomic_val_compare_exchange_8(&pTask->triggerStatus, TASK_TRIGGER_STATUS__INACTIVE, TASK_TRIGGER_STATUS__ACTIVE); } diff --git a/include/libs/sync/sync.h b/include/libs/sync/sync.h index d37f8f76c28288c48ef297007142df19a761f289..defafce30eb14e8c2cf6aaa4408199340a9d2adf 100644 --- a/include/libs/sync/sync.h +++ b/include/libs/sync/sync.h @@ -49,10 +49,12 @@ extern "C" { #define SYNC_HEARTBEAT_REPLY_SLOW_MS 1500 #define SYNC_SNAP_RESEND_MS 1000 * 60 +#define SYNC_VND_COMMIT_MIN_MS 1000 + #define SYNC_MAX_BATCH_SIZE 1 #define SYNC_INDEX_BEGIN 0 #define SYNC_INDEX_INVALID -1 -#define SYNC_TERM_INVALID -1 // 0xFFFFFFFFFFFFFFFF +#define SYNC_TERM_INVALID -1 typedef enum { SYNC_STRATEGY_NO_SNAPSHOT = 0, @@ -191,7 +193,7 @@ typedef struct SSyncLogStore { SyncIndex (*syncLogLastIndex)(struct SSyncLogStore* pLogStore); SyncTerm (*syncLogLastTerm)(struct SSyncLogStore* pLogStore); - int32_t (*syncLogAppendEntry)(struct SSyncLogStore* pLogStore, SSyncRaftEntry* pEntry); + int32_t (*syncLogAppendEntry)(struct SSyncLogStore* pLogStore, SSyncRaftEntry* pEntry, bool forcSync); int32_t (*syncLogGetEntry)(struct SSyncLogStore* pLogStore, SyncIndex index, SSyncRaftEntry** ppEntry); int32_t (*syncLogTruncate)(struct SSyncLogStore* pLogStore, SyncIndex fromIndex); @@ -232,6 +234,7 @@ int64_t syncOpen(SSyncInfo* pSyncInfo); int32_t syncStart(int64_t rid); void syncStop(int64_t rid); void syncPreStop(int64_t rid); +void syncPostStop(int64_t rid); int32_t syncPropose(int64_t rid, SRpcMsg* pMsg, bool isWeak, int64_t* seq); int32_t syncProcessMsg(int64_t rid, SRpcMsg* pMsg); int32_t syncReconfig(int64_t rid, SSyncCfg* pCfg); diff --git a/include/libs/transport/thttp.h b/include/libs/transport/thttp.h index 7d8c588bfc13787377a0512a73c5afdc5ad68700..9a6aee418794b61abd88e132b42964c56c69451c 100644 --- a/include/libs/transport/thttp.h +++ b/include/libs/transport/thttp.h @@ -24,7 +24,7 @@ extern "C" { typedef enum { HTTP_GZIP, HTTP_FLAT } EHttpCompFlag; -int32_t taosSendHttpReport(const char* server, uint16_t port, char* pCont, int32_t contLen, EHttpCompFlag flag); +int32_t taosSendHttpReport(const char* server, const char* uri, uint16_t port, char* pCont, int32_t contLen, EHttpCompFlag flag); #ifdef __cplusplus } diff --git a/include/libs/wal/wal.h b/include/libs/wal/wal.h index a1ae1e429dd5dbb18f6521b263576e7096482327..a0f421212a56603402c61c9bb2763a3d1e7cee1c 100644 --- a/include/libs/wal/wal.h +++ b/include/libs/wal/wal.h @@ -201,6 +201,7 @@ int32_t walFetchHead(SWalReader *pRead, int64_t ver, SWalCkHead *pHead); int32_t walFetchBody(SWalReader *pRead, SWalCkHead **ppHead); int32_t walSkipFetchBody(SWalReader *pRead, const SWalCkHead *pHead); +SWalRef *walRefFirstVer(SWal *, SWalRef *); SWalRef *walRefCommittedVer(SWal *); SWalRef *walOpenRef(SWal *); diff --git a/include/os/osSystem.h b/include/os/osSystem.h index 58f34d26f07dafc55e7ab0c9526581545bdb8171..5154c56e4b5b48fb1a85e2dd620b3b3338902478 100644 --- a/include/os/osSystem.h +++ b/include/os/osSystem.h @@ -46,27 +46,73 @@ void taosSetTerminalMode(); int32_t taosGetOldTerminalMode(); void taosResetTerminalMode(); +#define STACKSIZE 100 + #if !defined(WINDOWS) -#define taosPrintTrace(flags, level, dflag) \ - { \ - void* array[100]; \ - int32_t size = backtrace(array, 100); \ - char** strings = backtrace_symbols(array, size); \ - if (strings != NULL) { \ - taosPrintLog(flags, level, dflag, "obtained %d stack frames", size); \ - for (int32_t i = 0; i < size; i++) { \ - taosPrintLog(flags, level, dflag, "frame:%d, %s", i, strings[i]); \ - } \ - } \ - \ - taosMemoryFree(strings); \ +#define taosLogTraceToBuf(buf, bufSize, ignoreNum) { \ + void* array[STACKSIZE]; \ + int32_t size = backtrace(array, STACKSIZE); \ + char** strings = backtrace_symbols(array, size); \ + int32_t offset = 0; \ + if (strings != NULL) { \ + offset = snprintf(buf, bufSize - 1, "obtained %d stack frames\n", (ignoreNum > 0) ? size - ignoreNum : size); \ + for (int32_t i = (ignoreNum > 0) ? ignoreNum : 0; i < size; i++) { \ + offset += snprintf(buf + offset, bufSize - 1 - offset, "frame:%d, %s\n", (ignoreNum > 0) ? i - ignoreNum : i, strings[i]); \ + } \ + } \ + \ + taosMemoryFree(strings); \ +} + +#define taosPrintTrace(flags, level, dflag, ignoreNum) \ + { \ + void* array[STACKSIZE]; \ + int32_t size = backtrace(array, STACKSIZE); \ + char** strings = backtrace_symbols(array, size); \ + if (strings != NULL) { \ + taosPrintLog(flags, level, dflag, "obtained %d stack frames", (ignoreNum > 0) ? size - ignoreNum : size); \ + for (int32_t i = (ignoreNum > 0) ? ignoreNum : 0; i < size; i++) { \ + taosPrintLog(flags, level, dflag, "frame:%d, %s", (ignoreNum > 0) ? i - ignoreNum : i, strings[i]); \ + } \ + } \ + \ + taosMemoryFree(strings); \ } #else + #include #include -#define STACKSIZE 64 -#define taosPrintTrace(flags, level, dflag) \ +#define taosLogTraceToBuf(buf, bufSize, ignoreNum) { \ + unsigned int i; \ + void* stack[STACKSIZE]; \ + unsigned short frames; \ + SYMBOL_INFO* symbol; \ + HANDLE process; \ + int32_t offset = 0; \ + \ + process = GetCurrentProcess(); \ + \ + SymInitialize(process, NULL, TRUE); \ + \ + frames = CaptureStackBackTrace(0, STACKSIZE, stack, NULL); \ + symbol = (SYMBOL_INFO*)calloc(sizeof(SYMBOL_INFO) + 256 * sizeof(char), 1); \ + if (symbol != NULL) { \ + symbol->MaxNameLen = 255; \ + symbol->SizeOfStruct = sizeof(SYMBOL_INFO); \ + \ + if (frames > 0) { \ + offset = snprintf(buf, bufSize - 1, "obtained %d stack frames\n", (ignoreNum > 0) ? frames - ignoreNum : frames); \ + for (i = (ignoreNum > 0) ? ignoreNum : 0; i < frames; i++) { \ + SymFromAddr(process, (DWORD64)(stack[i]), 0, symbol); \ + offset += snprintf(buf + offset, bufSize - 1 - offset, "frame:%i, %s - 0x%0X\n", (ignoreNum > 0) ? i - ignoreNum : i, symbol->Name, symbol->Address); \ + } \ + } \ + free(symbol); \ + } \ + } + +#define taosPrintTrace(flags, level, dflag, ignoreNum) \ { \ unsigned int i; \ void* stack[STACKSIZE]; \ @@ -85,10 +131,10 @@ void taosResetTerminalMode(); symbol->SizeOfStruct = sizeof(SYMBOL_INFO); \ \ if (frames > 0) { \ - taosPrintLog(flags, level, dflag, "obtained %d stack frames", frames); \ - for (i = 0; i < frames; i++) { \ + taosPrintLog(flags, level, dflag, "obtained %d stack frames\n", (ignoreNum > 0) ? frames - ignoreNum : frames); \ + for (i = (ignoreNum > 0) ? ignoreNum : 0; i < frames; i++) { \ SymFromAddr(process, (DWORD64)(stack[i]), 0, symbol); \ - taosPrintLog(flags, level, dflag, "frame:%i: %s - 0x%0X", frames - i - 1, symbol->Name, symbol->Address); \ + taosPrintLog(flags, level, dflag, "frame:%i, %s - 0x%0X\n", (ignoreNum > 0) ? i - ignoreNum : i, symbol->Name, symbol->Address); \ } \ } \ free(symbol); \ diff --git a/include/util/taoserror.h b/include/util/taoserror.h index 9d594d150795627053c58726ddbb3bf8066dafba..52d8a75ee06b10de97ebd488d3a9a2ee439bacea 100644 --- a/include/util/taoserror.h +++ b/include/util/taoserror.h @@ -159,6 +159,7 @@ int32_t* taosGetErrno(); #define TSDB_CODE_TSC_NO_EXEC_NODE TAOS_DEF_ERROR_CODE(0, 0X022E) #define TSDB_CODE_TSC_NOT_STABLE_ERROR TAOS_DEF_ERROR_CODE(0, 0X022F) #define TSDB_CODE_TSC_STMT_CACHE_ERROR TAOS_DEF_ERROR_CODE(0, 0X0230) +#define TSDB_CODE_TSC_INTERNAL_ERROR TAOS_DEF_ERROR_CODE(0, 0X0231) // mnode-common // #define TSDB_CODE_MND_MSG_NOT_PROCESSED TAOS_DEF_ERROR_CODE(0, 0x0300) // 2.x @@ -414,6 +415,7 @@ int32_t* taosGetErrno(); #define TSDB_CODE_VND_NO_AVAIL_BUFPOOL TAOS_DEF_ERROR_CODE(0, 0x0528) #define TSDB_CODE_VND_STOPPED TAOS_DEF_ERROR_CODE(0, 0x0529) #define TSDB_CODE_VND_DUP_REQUEST TAOS_DEF_ERROR_CODE(0, 0x0530) +#define TSDB_CODE_VND_QUERY_BUSY TAOS_DEF_ERROR_CODE(0, 0x0531) // tsdb #define TSDB_CODE_TDB_INVALID_TABLE_ID TAOS_DEF_ERROR_CODE(0, 0x0600) @@ -700,6 +702,7 @@ int32_t* taosGetErrno(); #define TSDB_CODE_SML_INVALID_DATA TAOS_DEF_ERROR_CODE(0, 0x3002) #define TSDB_CODE_SML_INVALID_DB_CONF TAOS_DEF_ERROR_CODE(0, 0x3003) #define TSDB_CODE_SML_NOT_SAME_TYPE TAOS_DEF_ERROR_CODE(0, 0x3004) +#define TSDB_CODE_SML_INTERNAL_ERROR TAOS_DEF_ERROR_CODE(0, 0x3005) //tsma #define TSDB_CODE_TSMA_INIT_FAILED TAOS_DEF_ERROR_CODE(0, 0x3100) diff --git a/include/util/tdef.h b/include/util/tdef.h index f2e2ab42844d006cbab0ee18cf11957ef3247eb4..9036befc028fde0572018d641495ee521aa688ea 100644 --- a/include/util/tdef.h +++ b/include/util/tdef.h @@ -500,7 +500,7 @@ enum { #define DEFAULT_PAGESIZE 4096 #define VNODE_TIMEOUT_SEC 60 -#define MNODE_TIMEOUT_SEC 10 +#define MNODE_TIMEOUT_SEC 60 #ifdef __cplusplus } diff --git a/include/util/tlog.h b/include/util/tlog.h index 6e9b304e1d78a2dac22f7f1a0057312e0d3a4413..808377fa772893c7e25ce9c03c749b81fef49092 100644 --- a/include/util/tlog.h +++ b/include/util/tlog.h @@ -99,6 +99,11 @@ bool taosAssertRelease(bool condition); #endif #endif +void taosLogCrashInfo(char* nodeType, char* pMsg, int64_t msgLen, int signum, void *sigInfo); +void taosReadCrashInfo(char* filepath, char** pMsg, int64_t* pMsgLen, TdFilePtr* pFd); +void taosReleaseCrashLogFile(TdFilePtr pFile, bool truncateFile); +int32_t taosGenCrashJsonMsg(int signum, char** pMsg, int64_t clusterId, int64_t startTime); + // clang-format off #define uFatal(...) { if (uDebugFlag & DEBUG_FATAL) { taosPrintLog("UTL FATAL", DEBUG_FATAL, tsLogEmbedded ? 255 : uDebugFlag, __VA_ARGS__); }} #define uError(...) { if (uDebugFlag & DEBUG_ERROR) { taosPrintLog("UTL ERROR ", DEBUG_ERROR, tsLogEmbedded ? 255 : uDebugFlag, __VA_ARGS__); }} diff --git a/packaging/cfg/taos.cfg b/packaging/cfg/taos.cfg index e22aa85c978c65d24047f85f9cbdaab84e124670..3d3dfc8e7322ac11ca7bc30b95127c2d4590271d 100644 --- a/packaging/cfg/taos.cfg +++ b/packaging/cfg/taos.cfg @@ -43,6 +43,9 @@ # Switch for allowing TDengine to collect and report service usage information # telemetryReporting 1 +# Switch for allowing TDengine to collect and report crash information +# crashReporting 1 + # The maximum number of vnodes supported by this dnode # supportVnodes 0 diff --git a/packaging/release.sh b/packaging/release.sh deleted file mode 100755 index 7a8a08352f7facdd42621b56db9278dd5829b844..0000000000000000000000000000000000000000 --- a/packaging/release.sh +++ /dev/null @@ -1,319 +0,0 @@ -#!/bin/bash -# -# Generate the deb package for ubuntu, or rpm package for centos, or tar.gz package for other linux os - -set -e -# set -x - -# release.sh -v [cluster | edge] -# -c [aarch32 | aarch64 | x64 | x86 | mips64 | loongarch64...] -# -o [Linux | Kylin | Alpine | Raspberrypi | Darwin | Windows | Ningsi60 | Ningsi80 |...] -# -V [stable | beta] -# -l [full | lite] -# -s [static | dynamic] -# -d [taos | ...] -# -n [2.0.0.3] -# -m [2.0.0.0] -# -H [ false | true] - -# set parameters by default value -verMode=edge # [cluster, edge, cloud] -verType=stable # [stable, beta] -cpuType=x64 # [aarch32 | aarch64 | x64 | x86 | mips64 loongarch64...] -osType=Linux # [Linux | Kylin | Alpine | Raspberrypi | Darwin | Windows | Ningsi60 | Ningsi80 |...] -pagMode=full # [full | lite] -soMode=dynamic # [static | dynamic] -dbName=taos # [taos | ...] -allocator=glibc # [glibc | jemalloc] -verNumber="" -verNumberComp="3.0.0.0" -httpdBuild=false - -while getopts "hv:V:c:o:l:s:d:a:n:m:H:" arg; do - case $arg in - v) - #echo "verMode=$OPTARG" - verMode=$(echo $OPTARG) - ;; - V) - #echo "verType=$OPTARG" - verType=$(echo $OPTARG) - ;; - c) - #echo "cpuType=$OPTARG" - cpuType=$(echo $OPTARG) - ;; - l) - #echo "pagMode=$OPTARG" - pagMode=$(echo $OPTARG) - ;; - s) - #echo "soMode=$OPTARG" - soMode=$(echo $OPTARG) - ;; - d) - #echo "dbName=$OPTARG" - dbName=$(echo $OPTARG) - ;; - a) - #echo "allocator=$OPTARG" - allocator=$(echo $OPTARG) - ;; - n) - #echo "verNumber=$OPTARG" - verNumber=$(echo $OPTARG) - ;; - m) - #echo "verNumberComp=$OPTARG" - verNumberComp=$(echo $OPTARG) - ;; - o) - #echo "osType=$OPTARG" - osType=$(echo $OPTARG) - ;; - H) - #echo "httpdBuild=$OPTARG" - httpdBuild=$(echo $OPTARG) - ;; - h) - echo "Usage: $(basename $0) -v [cluster | edge] " - echo " -c [aarch32 | aarch64 | x64 | x86 | mips64 | loongarch64 ...] " - echo " -o [Linux | Kylin | Alpine | Raspberrypi | Darwin | Windows | Ningsi60 | Ningsi80 |...] " - echo " -V [stable | beta] " - echo " -l [full | lite] " - echo " -a [glibc | jemalloc] " - echo " -s [static | dynamic] " - echo " -d [taos | ...] " - echo " -n [version number] " - echo " -m [compatible version number] " - echo " -H [false | true] " - exit 0 - ;; - ?) #unknow option - echo "unkonw argument" - exit 1 - ;; - esac -done - -osType=$(uname) - -echo "verMode=${verMode} verType=${verType} cpuType=${cpuType} osType=${osType} pagMode=${pagMode} soMode=${soMode} dbName=${dbName} allocator=${allocator} verNumber=${verNumber} verNumberComp=${verNumberComp} httpdBuild=${httpdBuild}" - -curr_dir=$(pwd) - -if [ "$osType" == "Darwin" ]; then - script_dir=$(dirname $0) - cd ${script_dir} - script_dir="$(pwd)" - top_dir=${script_dir}/.. -else - script_dir="$(dirname $(readlink -f $0))" - top_dir="$(readlink -f ${script_dir}/..)" -fi - -csudo="" -#if command -v sudo > /dev/null; then -# csudo="sudo " -#fi - -function is_valid_version() { - [ -z $1 ] && return 1 || : - - rx='^([0-9]+\.){3}(\*|[0-9]+)$' - if [[ $1 =~ $rx ]]; then - return 0 - fi - return 1 -} - -function vercomp() { - if [[ $1 == $2 ]]; then - echo 0 - exit 0 - fi - - local IFS=. - local i ver1=($1) ver2=($2) - - # fill empty fields in ver1 with zeros - for ((i = ${#ver1[@]}; i < ${#ver2[@]}; i++)); do - ver1[i]=0 - done - - for ((i = 0; i < ${#ver1[@]}; i++)); do - if [[ -z ${ver2[i]} ]]; then - # fill empty fields in ver2 with zeros - ver2[i]=0 - fi - if ((10#${ver1[i]} > 10#${ver2[i]})); then - echo 1 - exit 0 - fi - if ((10#${ver1[i]} < 10#${ver2[i]})); then - echo 2 - exit 0 - fi - done - echo 0 -} - -# 1. check version information -if ( (! is_valid_version $verNumber) || (! is_valid_version $verNumberComp) || [[ "$(vercomp $verNumber $verNumberComp)" == '2' ]]); then - echo "please enter correct version" - exit 0 -fi - -echo "=======================new version number: ${verNumber}, compatible version: ${verNumberComp}======================================" - -build_time=$(date +"%F %R") - -# get commint id from git -gitinfo=$(git rev-parse --verify HEAD) - -if [[ "$verMode" == "cluster" ]] || [[ "$verMode" == "cloud" ]]; then - enterprise_dir="${top_dir}/../enterprise" - cd ${enterprise_dir} - gitinfoOfInternal=$(git rev-parse --verify HEAD) -else - gitinfoOfInternal=NULL -fi - -cd "${curr_dir}" - -# 2. cmake executable file -compile_dir="${top_dir}/debug" -if [ -d ${compile_dir} ]; then - rm -rf ${compile_dir} -fi - -mkdir -p ${compile_dir} -cd ${compile_dir} - -if [[ "$allocator" == "jemalloc" ]]; then - allocator_macro="-DJEMALLOC_ENABLED=true" -else - allocator_macro="" -fi - -if [[ "$dbName" != "taos" ]]; then - source ${enterprise_dir}/packaging/oem/sed_$dbName.sh - replace_community_$dbName -fi - -if [[ "$httpdBuild" == "true" ]]; then - BUILD_HTTP=true -else - BUILD_HTTP=false -fi - -if [[ "$verMode" == "cluster" ]] || [[ "$verMode" == "cloud" ]]; then - BUILD_HTTP=internal -fi - -if [[ "$pagMode" == "full" ]]; then - BUILD_TOOLS=true -else - BUILD_TOOLS=false -fi - -# check support cpu type -if [[ "$cpuType" == "x64" ]] || [[ "$cpuType" == "aarch64" ]] || [[ "$cpuType" == "aarch32" ]] || [[ "$cpuType" == "arm64" ]] || [[ "$cpuType" == "arm32" ]] || [[ "$cpuType" == "mips64" ]] || [[ "$cpuType" == "loongarch64" ]] ; then - if [ "$verMode" == "edge" ]; then - # community-version compile - cmake ../ -DCPUTYPE=${cpuType} -DWEBSOCKET=true -DOSTYPE=${osType} -DSOMODE=${soMode} -DDBNAME=${dbName} -DVERTYPE=${verType} -DVERDATE="${build_time}" -DGITINFO=${gitinfo} -DGITINFOI=${gitinfoOfInternal} -DVERNUMBER=${verNumber} -DVERCOMPATIBLE=${verNumberComp} -DPAGMODE=${pagMode} -DBUILD_HTTP=${BUILD_HTTP} -DBUILD_TOOLS=${BUILD_TOOLS} ${allocator_macro} - elif [ "$verMode" == "cloud" ]; then - cmake ../../ -DCPUTYPE=${cpuType} -DWEBSOCKET=true -DBUILD_TAOSX=true -DBUILD_CLOUD=true -DOSTYPE=${osType} -DSOMODE=${soMode} -DDBNAME=${dbName} -DVERTYPE=${verType} -DVERDATE="${build_time}" -DGITINFO=${gitinfo} -DGITINFOI=${gitinfoOfInternal} -DVERNUMBER=${verNumber} -DVERCOMPATIBLE=${verNumberComp} -DBUILD_HTTP=${BUILD_HTTP} -DBUILD_TOOLS=${BUILD_TOOLS} ${allocator_macro} - elif [ "$verMode" == "cluster" ]; then - if [[ "$dbName" != "taos" ]]; then - replace_enterprise_$dbName - fi - cmake ../../ -DCPUTYPE=${cpuType} -DWEBSOCKET=true -DBUILD_TAOSX=true -DOSTYPE=${osType} -DSOMODE=${soMode} -DDBNAME=${dbName} -DVERTYPE=${verType} -DVERDATE="${build_time}" -DGITINFO=${gitinfo} -DGITINFOI=${gitinfoOfInternal} -DVERNUMBER=${verNumber} -DVERCOMPATIBLE=${verNumberComp} -DBUILD_HTTP=${BUILD_HTTP} -DBUILD_TOOLS=${BUILD_TOOLS} ${allocator_macro} - fi -else - echo "input cpuType=${cpuType} error!!!" - exit 1 -fi - -ostype=`uname` -if [ "${ostype}" == "Darwin" ]; then - CORES=$(sysctl -n hw.ncpu) -else - CORES=$(grep -c ^processor /proc/cpuinfo) -fi - -if [[ "$allocator" == "jemalloc" ]]; then - # jemalloc need compile first, so disable parallel build - make -j ${CORES} && ${csudo}make install -else - make -j ${CORES} && ${csudo}make install -fi - -cd ${curr_dir} - -# 3. Call the corresponding script for packaging -if [ "$osType" != "Darwin" ]; then - if [[ "$verMode" != "cluster" ]] && [[ "$verMode" != "cloud" ]] && [[ "$pagMode" == "full" ]] && [[ "$cpuType" == "x64" ]] && [[ "$dbName" == "taos" ]]; then - ret='0' - command -v dpkg >/dev/null 2>&1 || { ret='1'; } - if [ "$ret" -eq 0 ]; then - echo "====do deb package for the ubuntu system====" - output_dir="${top_dir}/debs" - if [ -d ${output_dir} ]; then - rm -rf ${output_dir} - fi - mkdir -p ${output_dir} - cd ${script_dir}/deb - ${csudo}./makedeb.sh ${compile_dir} ${output_dir} ${verNumber} ${cpuType} ${osType} ${verMode} ${verType} - - if [[ "$pagMode" == "full" ]]; then - if [ -d ${top_dir}/tools/taos-tools/packaging/deb ]; then - cd ${top_dir}/tools/taos-tools/packaging/deb - taos_tools_ver=$(git tag |grep -v taos | sort | tail -1) - [ -z "$taos_tools_ver" ] && taos_tools_ver="0.1.0" - - ${csudo}./make-taos-tools-deb.sh ${top_dir} \ - ${compile_dir} ${output_dir} ${taos_tools_ver} ${cpuType} ${osType} ${verMode} ${verType} - fi - fi - else - echo "==========dpkg command not exist, so not release deb package!!!" - fi - ret='0' - command -v rpmbuild >/dev/null 2>&1 || { ret='1'; } - if [ "$ret" -eq 0 ]; then - echo "====do rpm package for the centos system====" - output_dir="${top_dir}/rpms" - if [ -d ${output_dir} ]; then - rm -rf ${output_dir} - fi - mkdir -p ${output_dir} - cd ${script_dir}/rpm - ${csudo}./makerpm.sh ${compile_dir} ${output_dir} ${verNumber} ${cpuType} ${osType} ${verMode} ${verType} - - if [[ "$pagMode" == "full" ]]; then - if [ -d ${top_dir}/tools/taos-tools/packaging/rpm ]; then - cd ${top_dir}/tools/taos-tools/packaging/rpm - taos_tools_ver=$(git tag |grep -v taos | sort | tail -1) - [ -z "$taos_tools_ver" ] && taos_tools_ver="0.1.0" - - ${csudo}./make-taos-tools-rpm.sh ${top_dir} \ - ${compile_dir} ${output_dir} ${taos_tools_ver} ${cpuType} ${osType} ${verMode} ${verType} - fi - fi - else - echo "==========rpmbuild command not exist, so not release rpm package!!!" - fi - fi - - echo "====do tar.gz package for all systems====" - cd ${script_dir}/tools - - ${csudo}./makepkg.sh ${compile_dir} ${verNumber} "${build_time}" ${cpuType} ${osType} ${verMode} ${verType} ${pagMode} ${verNumberComp} ${dbName} - ${csudo}./makeclient.sh ${compile_dir} ${verNumber} "${build_time}" ${cpuType} ${osType} ${verMode} ${verType} ${pagMode} ${dbName} - -else - cd ${script_dir}/tools - ./makepkg.sh ${compile_dir} ${verNumber} "${build_time}" ${cpuType} ${osType} ${verMode} ${verType} ${pagMode} ${verNumberComp} ${dbName} - ./makeclient.sh ${compile_dir} ${verNumber} "${build_time}" ${cpuType} ${osType} ${verMode} ${verType} ${pagMode} ${dbName} -fi diff --git a/packaging/tools/install.sh b/packaging/tools/install.sh index 63009e5421ec9db8e787980846c00b14beaab75a..7f95ca3d72000acd97259d934097153be91943cf 100755 --- a/packaging/tools/install.sh +++ b/packaging/tools/install.sh @@ -481,11 +481,11 @@ function install_adapter_config() { ${csudo}mkdir -p ${cfg_install_dir} [ -f ${script_dir}/cfg/${adapterName}.toml ] && ${csudo}cp ${script_dir}/cfg/${adapterName}.toml ${cfg_install_dir} [ -f ${cfg_install_dir}/${adapterName}.toml ] && ${csudo}chmod 644 ${cfg_install_dir}/${adapterName}.toml + else + [ -f ${script_dir}/cfg/${adapterName}.toml ] && + ${csudo}cp -f ${script_dir}/cfg/${adapterName}.toml ${cfg_install_dir}/${adapterName}.toml.new fi - [ -f ${script_dir}/cfg/${adapterName}.toml ] && - ${csudo}cp -f ${script_dir}/cfg/${adapterName}.toml ${cfg_install_dir}/${adapterName}.toml.new - [ -f ${cfg_install_dir}/${adapterName}.toml ] && ${csudo}ln -s ${cfg_install_dir}/${adapterName}.toml ${install_main_dir}/cfg/${adapterName}.toml @@ -499,9 +499,10 @@ function install_config() { ${csudo}mkdir -p ${cfg_install_dir} [ -f ${script_dir}/cfg/${configFile} ] && ${csudo}cp ${script_dir}/cfg/${configFile} ${cfg_install_dir} ${csudo}chmod 644 ${cfg_install_dir}/* + else + ${csudo}cp -f ${script_dir}/cfg/${configFile} ${cfg_install_dir}/${configFile}.new fi - ${csudo}cp -f ${script_dir}/cfg/${configFile} ${cfg_install_dir}/${configFile}.new ${csudo}ln -s ${cfg_install_dir}/${configFile} ${install_main_dir}/cfg [ ! -z $1 ] && return 0 || : # only install client @@ -742,6 +743,34 @@ function is_version_compatible() { esac } +deb_erase() { + confirm="" + while [ "" == "${confirm}" ]; do + echo -e -n "${RED}Existing TDengine deb is detected, do you want to remove it? [yes|no] ${NC}:" + read confirm + if [ "yes" == "$confirm" ]; then + ${csudo}dpkg --remove tdengine ||: + break + elif [ "no" == "$confirm" ]; then + break + fi + done +} + +rpm_erase() { + confirm="" + while [ "" == "${confirm}" ]; do + echo -e -n "${RED}Existing TDengine rpm is detected, do you want to remove it? [yes|no] ${NC}:" + read confirm + if [ "yes" == "$confirm" ]; then + ${csudo}rpm -e tdengine ||: + break + elif [ "no" == "$confirm" ]; then + break + fi + done +} + function updateProduct() { # Check if version compatible if ! is_version_compatible; then @@ -754,6 +783,13 @@ function updateProduct() { echo "File ${tarName} does not exist" exit 1 fi + + if echo $osinfo | grep -qwi "centos"; then + rpm -q tdengine 2>&1 > /dev/null && rpm_erase tdengine ||: + elif echo $osinfo | grep -qwi "ubuntu"; then + dpkg -l tdengine 2>&1 > /dev/null && deb_erase tdengine ||: + fi + tar -zxf ${tarName} install_jemalloc diff --git a/packaging/tools/post.sh b/packaging/tools/post.sh index 482345dcd819dce683d3aa68438e6fa9fd5a4a9f..85f1f00935e3597ee3cddc9cdabef2a0d2a4e0f3 100755 --- a/packaging/tools/post.sh +++ b/packaging/tools/post.sh @@ -48,6 +48,7 @@ fi data_link_dir="${install_main_dir}/data" log_link_dir="${install_main_dir}/log" +install_log_path="${log_dir}/taos_install.log" # static directory cfg_dir="${install_main_dir}/cfg" @@ -101,6 +102,11 @@ if [ "$osType" = "Darwin" ]; then ${csudo}cp -rf tdengine ${install_main_dir} fi +function log_print(){ + now=$(date +"%D %T") + echo "$now $1" >> ${install_log_path} +} + function kill_taosadapter() { # ${csudo}pkill -f taosadapter || : pid=$(ps -ef | grep "taosadapter" | grep -v "grep" | awk '{print $2}') @@ -109,6 +115,13 @@ function kill_taosadapter() { fi } +function kill_taoskeeper() { + pid=$(ps -ef | grep "taoskeeper" | grep -v "grep" | awk '{print $2}') + if [ -n "$pid" ]; then + ${csudo}kill -9 $pid || : + fi +} + function kill_taosd() { # ${csudo}pkill -f taosd || : pid=$(ps -ef | grep "taosd" | grep -v "grep" | awk '{print $2}') @@ -118,6 +131,7 @@ function kill_taosd() { } function install_include() { + log_print "start install include from ${inc_dir} to ${inc_link_dir}" ${csudo}mkdir -p ${inc_link_dir} ${csudo}rm -f ${inc_link_dir}/taos.h ${inc_link_dir}/taosdef.h ${inc_link_dir}/taoserror.h ${inc_link_dir}/taosudf.h || : [ -f ${inc_link_dir}/taosws.h ] && ${csudo}rm -f ${inc_link_dir}/taosws.h ||: @@ -128,39 +142,45 @@ function install_include() { ${csudo}ln -s ${inc_dir}/taosudf.h ${inc_link_dir}/taosudf.h [ -f ${inc_dir}/taosws.h ] && ${csudo}ln -sf ${inc_dir}/taosws.h ${inc_link_dir}/taosws.h ||: + log_print "install include success" } function install_lib() { + log_print "start install lib from ${lib_dir} to ${lib_link_dir}" ${csudo}rm -f ${lib_link_dir}/libtaos* || : ${csudo}rm -f ${lib64_link_dir}/libtaos* || : [ -f ${lib_link_dir}/libtaosws.${lib_file_ext} ] && ${csudo}rm -f ${lib_link_dir}/libtaosws.${lib_file_ext} || : [ -f ${lib64_link_dir}/libtaosws.${lib_file_ext} ] && ${csudo}rm -f ${lib64_link_dir}/libtaosws.${lib_file_ext} || : - ${csudo}ln -s ${lib_dir}/libtaos.* ${lib_link_dir}/libtaos.${lib_file_ext_1} - ${csudo}ln -s ${lib_link_dir}/libtaos.${lib_file_ext_1} ${lib_link_dir}/libtaos.${lib_file_ext} + ${csudo}ln -s ${lib_dir}/libtaos.* ${lib_link_dir}/libtaos.${lib_file_ext_1} 2>>${install_log_path} || return 1 + ${csudo}ln -s ${lib_link_dir}/libtaos.${lib_file_ext_1} ${lib_link_dir}/libtaos.${lib_file_ext} 2>>${install_log_path} || return 1 [ -f ${lib_dir}/libtaosws.${lib_file_ext} ] && ${csudo}ln -sf ${lib_dir}/libtaosws.${lib_file_ext} ${lib_link_dir}/libtaosws.${lib_file_ext} ||: if [[ -d ${lib64_link_dir} && ! -e ${lib64_link_dir}/libtaos.${lib_file_ext} ]]; then - ${csudo}ln -s ${lib_dir}/libtaos.* ${lib64_link_dir}/libtaos.${lib_file_ext_1} || : - ${csudo}ln -s ${lib64_link_dir}/libtaos.${lib_file_ext_1} ${lib64_link_dir}/libtaos.${lib_file_ext} || : + ${csudo}ln -s ${lib_dir}/libtaos.* ${lib64_link_dir}/libtaos.${lib_file_ext_1} 2>>${install_log_path} || return 1 + ${csudo}ln -s ${lib64_link_dir}/libtaos.${lib_file_ext_1} ${lib64_link_dir}/libtaos.${lib_file_ext} 2>>${install_log_path} || return 1 - [ -f ${lib_dir}/libtaosws.${lib_file_ext} ] && ${csudo}ln -sf ${lib_dir}/libtaosws.${lib_file_ext} ${lib64_link_dir}/libtaosws.${lib_file_ext} || : + [ -f ${lib_dir}/libtaosws.${lib_file_ext} ] && ${csudo}ln -sf ${lib_dir}/libtaosws.${lib_file_ext} ${lib64_link_dir}/libtaosws.${lib_file_ext} 2>>${install_log_path} fi if [ "$osType" != "Darwin" ]; then ${csudo}ldconfig fi + + log_print "install lib success" } function install_bin() { # Remove links + log_print "start install bin from ${bin_dir} to ${bin_link_dir}" ${csudo}rm -f ${bin_link_dir}/taos || : ${csudo}rm -f ${bin_link_dir}/taosd || : ${csudo}rm -f ${bin_link_dir}/udfd || : ${csudo}rm -f ${bin_link_dir}/taosadapter || : ${csudo}rm -f ${bin_link_dir}/taosBenchmark || : + ${csudo}rm -f ${bin_link_dir}/taoskeeper || : ${csudo}rm -f ${bin_link_dir}/taosdemo || : ${csudo}rm -f ${bin_link_dir}/taosdump || : ${csudo}rm -f ${bin_link_dir}/rmtaos || : @@ -169,16 +189,38 @@ function install_bin() { ${csudo}chmod 0555 ${bin_dir}/* #Make link - [ -x ${bin_dir}/taos ] && ${csudo}ln -s ${bin_dir}/taos ${bin_link_dir}/taos || : - [ -x ${bin_dir}/taosd ] && ${csudo}ln -s ${bin_dir}/taosd ${bin_link_dir}/taosd || : - [ -x ${bin_dir}/udfd ] && ${csudo}ln -s ${bin_dir}/udfd ${bin_link_dir}/udfd || : - [ -x ${bin_dir}/taosadapter ] && ${csudo}ln -s ${bin_dir}/taosadapter ${bin_link_dir}/taosadapter || : - [ -x ${bin_dir}/taosBenchmark ] && ${csudo}ln -sf ${bin_dir}/taosBenchmark ${bin_link_dir}/taosdemo || : - [ -x ${bin_dir}/taosBenchmark ] && ${csudo}ln -sf ${bin_dir}/taosBenchmark ${bin_link_dir}/taosBenchmark || : - [ -x ${bin_dir}/TDinsight.sh ] && ${csudo}ln -sf ${bin_dir}/TDinsight.sh ${bin_link_dir}/TDinsight.sh || : - [ -x ${bin_dir}/taosdump ] && ${csudo}ln -s ${bin_dir}/taosdump ${bin_link_dir}/taosdump || : - [ -x ${bin_dir}/set_core.sh ] && ${csudo}ln -s ${bin_dir}/set_core.sh ${bin_link_dir}/set_core || : - [ -x ${bin_dir}/remove.sh ] && ${csudo}ln -s ${bin_dir}/remove.sh ${bin_link_dir}/rmtaos || : + if [ -x ${bin_dir}/taos ]; then + ${csudo}ln -s ${bin_dir}/taos ${bin_link_dir}/taos 2>>${install_log_path} || return 1 + fi + if [ -x ${bin_dir}/taosd ]; then + ${csudo}ln -s ${bin_dir}/taosd ${bin_link_dir}/taosd 2>>${install_log_path} || return 1 + fi + if [ -x ${bin_dir}/udfd ]; then + ${csudo}ln -s ${bin_dir}/udfd ${bin_link_dir}/udfd 2>>${install_log_path} || return 1 + fi + if [ -x ${bin_dir}/taosadapter ]; then + ${csudo}ln -s ${bin_dir}/taosadapter ${bin_link_dir}/taosadapter 2>>${install_log_path} || return 1 + fi + if [ -x ${bin_dir}/taosBenchmark ]; then + ${csudo}ln -sf ${bin_dir}/taosBenchmark ${bin_link_dir}/taosdemo 2>>${install_log_path} + ${csudo}ln -sf ${bin_dir}/taosBenchmark ${bin_link_dir}/taosBenchmark 2>>${install_log_path} + fi + if [ -x ${bin_dir}/TDinsight.sh ]; then + ${csudo}ln -sf ${bin_dir}/TDinsight.sh ${bin_link_dir}/TDinsight.sh 2>>${install_log_path} || return 1 + fi + if [ -x ${bin_dir}/taosdump ]; then + ${csudo}ln -s ${bin_dir}/taosdump ${bin_link_dir}/taosdump 2>>${install_log_path} || return 1 + fi + if [ -x ${bin_dir}/set_core.sh ]; then + ${csudo}ln -s ${bin_dir}/set_core.sh ${bin_link_dir}/set_core 2>>${install_log_path} || return 1 + fi + if [ -x ${bin_dir}/remove.sh ]; then + ${csudo}ln -s ${bin_dir}/remove.sh ${bin_link_dir}/rmtaos 2>>${install_log_path} || return 1 + fi + if [ -x ${bin_dir}/taoskeeper ]; then + ${csudo}ln -sf ${bin_dir}/taoskeeper ${bin_link_dir}/taoskeeper 2>>${install_log_path} || return 1 + fi + log_print "install bin success" } function add_newHostname_to_hosts() { @@ -351,7 +393,24 @@ function install_taosadapter_config() { ${csudo}ln -s ${cfg_install_dir}/taosadapter.toml ${cfg_dir} } +function install_taoskeeper_config() { + if [ ! -f "${cfg_install_dir}/keeper.toml" ]; then + [ ! -d %{cfg_install_dir} ] && + ${csudo}${csudo}mkdir -p ${cfg_install_dir} + [ -f ${cfg_dir}/keeper.toml ] && ${csudo}cp ${cfg_dir}/keeper.toml ${cfg_install_dir} + [ -f ${cfg_install_dir}/keeper.toml ] && + ${csudo}chmod 644 ${cfg_install_dir}/keeper.toml + fi + + [ -f ${cfg_dir}/keeper.toml ] && + ${csudo}mv ${cfg_dir}/keeper.toml ${cfg_dir}/keeper.toml.new + + [ -f ${cfg_install_dir}/keeper.toml ] && + ${csudo}ln -s ${cfg_install_dir}/keeper.toml ${cfg_dir} +} + function install_config() { + log_print "start install config from ${cfg_dir} to ${cfg_install_dir}" if [ ! -f "${cfg_install_dir}/taos.cfg" ]; then ${csudo}${csudo}mkdir -p ${cfg_install_dir} [ -f ${cfg_dir}/taos.cfg ] && ${csudo}cp ${cfg_dir}/taos.cfg ${cfg_install_dir} @@ -419,6 +478,8 @@ function install_config() { break fi done + + log_print "install config success" } function clean_service_on_sysvinit() { @@ -533,6 +594,7 @@ function install_taosadapter_service() { } function install_service() { + log_print "start install service" if [ "$osType" != "Darwin" ]; then if ((${service_mod}==0)); then install_service_on_systemd @@ -546,6 +608,7 @@ function install_service() { else install_service_on_launchctl fi + log_print "install service success" } function install_app() { @@ -566,6 +629,7 @@ function install_app() { function install_TDengine() { echo -e "${GREEN}Start to install TDengine...${NC}" + log_print "start to install TDengine" #install log and data dir , then ln to /usr/local/taos ${csudo}mkdir -p ${log_dir} && ${csudo}chmod 777 ${log_dir} @@ -578,11 +642,18 @@ function install_TDengine() { ${csudo}ln -s ${data_dir} ${data_link_dir} || : # Install include, lib, binary and service - install_include - install_lib + install_include && + install_lib && install_bin + + if [[ "$?" != 0 ]];then + log_print "install TDengine failed!" + return 1 + fi + install_config install_taosadapter_config + install_taoskeeper_config install_taosadapter_service install_service install_app @@ -622,11 +693,14 @@ function install_TDengine() { echo -e "${GREEN_DARK}To access TDengine ${NC}: taos -h $serverFqdn${GREEN_DARK} to login into TDengine server${NC}" echo fi + log_print "install TDengine successfully!" echo echo -e "\033[44;32;1mTDengine is installed successfully!${NC}" } ## ==============================Main program starts from here============================ +${csudo}rm -rf ${install_log_path} +log_print "installer start..." serverFqdn=$(hostname) install_TDengine diff --git a/source/client/inc/clientInt.h b/source/client/inc/clientInt.h index c3b79d78298af72ce3e9b6894c7f66ef048a2382..3fc5cc9cecb6d164ce6ec009f67dcd7de2a99564 100644 --- a/source/client/inc/clientInt.h +++ b/source/client/inc/clientInt.h @@ -312,6 +312,8 @@ extern SAppInfo appInfo; extern int32_t clientReqRefPool; extern int32_t clientConnRefPool; extern int32_t timestampDeltaLimit; +extern int64_t lastClusterId; + __async_send_cb_fn_t getMsgRspHandle(int32_t msgType); @@ -339,6 +341,7 @@ void resetConnectDB(STscObj* pTscObj); int taos_options_imp(TSDB_OPTION option, const char* str); void* openTransporter(const char* user, const char* auth, int32_t numOfThreads); +void tscStopCrashReport(); typedef struct AsyncArg { SRpcMsg msg; diff --git a/source/client/inc/clientSml.h b/source/client/inc/clientSml.h index 074daa7441d430c5a0c55c8dfb3235001217018e..311260e3fb1eec8b6a1118bb88cf3180ba5e68de 100644 --- a/source/client/inc/clientSml.h +++ b/source/client/inc/clientSml.h @@ -164,6 +164,7 @@ typedef struct { bool dataFormat; // true means that the name and order of keys in each line are the same(only for influx protocol) bool isRawLine; int32_t ttl; + int32_t uid; // used for automatic create child table NodeList *childTables; NodeList *superTables; @@ -183,6 +184,7 @@ typedef struct { SSmlLineInfo *lines; // element is SSmlLineInfo bool parseJsonByLib; SArray *tagJsonArray; + SArray *valueJsonArray; // SArray *preLineTagKV; @@ -232,6 +234,7 @@ int32_t smlClearForRerun(SSmlHandle *info); int32_t smlParseValue(SSmlKv *pVal, SSmlMsgBuf *msg); uint8_t smlGetTimestampLen(int64_t num); void clearColValArray(SArray* pCols); +void smlDestroyTableInfo(SSmlHandle *info, SSmlTableInfo *tag); int32_t smlParseInfluxString(SSmlHandle *info, char *sql, char *sqlEnd, SSmlLineInfo *elements); int32_t smlParseTelnetString(SSmlHandle *info, char *sql, char *sqlEnd, SSmlLineInfo *elements); diff --git a/source/client/src/clientEnv.c b/source/client/src/clientEnv.c index d429e521117669e0b61cb950d56151f4443ec20f..fbab1ee08b847544c3711d73697636f502c2ce25 100644 --- a/source/client/src/clientEnv.c +++ b/source/client/src/clientEnv.c @@ -28,13 +28,16 @@ #include "trpc.h" #include "tsched.h" #include "ttime.h" +#include "thttp.h" #define TSC_VAR_NOT_RELEASE 1 #define TSC_VAR_RELEASED 0 SAppInfo appInfo; +int64_t lastClusterId = 0; int32_t clientReqRefPool = -1; int32_t clientConnRefPool = -1; +int32_t clientStop = 0; int32_t timestampDeltaLimit = 900; // s @@ -62,7 +65,10 @@ static int32_t registerRequest(SRequestObj *pRequest, STscObj *pTscObj) { static void deregisterRequest(SRequestObj *pRequest) { const static int64_t SLOW_QUERY_INTERVAL = 3000000L; // todo configurable - assert(pRequest != NULL); + if(pRequest == NULL){ + tscError("pRequest == NULL"); + return; + } STscObj *pTscObj = pRequest->pTscObj; SAppClusterSummary *pActivity = &pTscObj->pAppInfo->summary; @@ -359,6 +365,7 @@ void doDestroyRequest(void *p) { taosMemoryFreeClear(pRequest->pDb); doFreeReqResultInfo(&pRequest->body.resInfo); + tsem_destroy(&pRequest->body.rspSem); taosArrayDestroy(pRequest->tableList); taosArrayDestroy(pRequest->dbList); @@ -373,6 +380,9 @@ void doDestroyRequest(void *p) { } if (pRequest->syncQuery) { + if (pRequest->body.param){ + tsem_destroy(&((SSyncQueryParam*)pRequest->body.param)->sem); + } taosMemoryFree(pRequest->body.param); } @@ -390,6 +400,122 @@ void destroyRequest(SRequestObj *pRequest) { removeRequest(pRequest->self); } +void crashReportThreadFuncUnexpectedStopped(void) { atomic_store_32(&clientStop, -1); } + +static void *tscCrashReportThreadFp(void *param) { + setThreadName("client-crashReport"); + char filepath[PATH_MAX] = {0}; + snprintf(filepath, sizeof(filepath), "%s%s.taosCrashLog", tsLogDir, TD_DIRSEP); + char *pMsg = NULL; + int64_t msgLen = 0; + TdFilePtr pFile = NULL; + bool truncateFile = false; + int32_t sleepTime = 200; + int32_t reportPeriodNum = 3600 * 1000 / sleepTime; + int32_t loopTimes = reportPeriodNum; + +#ifdef WINDOWS + if (taosCheckCurrentInDll()) { + atexit(crashReportThreadFuncUnexpectedStopped); + } +#endif + + while (1) { + if (clientStop) break; + if (loopTimes++ < reportPeriodNum) { + taosMsleep(sleepTime); + continue; + } + + taosReadCrashInfo(filepath, &pMsg, &msgLen, &pFile); + if (pMsg && msgLen > 0) { + if (taosSendHttpReport(tsTelemServer, tsClientCrashReportUri, tsTelemPort, pMsg, msgLen, HTTP_FLAT) != 0) { + tscError("failed to send crash report"); + if (pFile) { + taosReleaseCrashLogFile(pFile, false); + continue; + } + } else { + tscInfo("succeed to send crash report"); + truncateFile = true; + } + } else { + tscDebug("no crash info"); + } + + taosMemoryFree(pMsg); + + if (pMsg && msgLen > 0) { + pMsg = NULL; + continue; + } + + if (pFile) { + taosReleaseCrashLogFile(pFile, truncateFile); + truncateFile = false; + } + + taosMsleep(sleepTime); + loopTimes = 0; + } + + clientStop = -1; + return NULL; +} + +int32_t tscCrashReportInit() { + if (!tsEnableCrashReport) { + return 0; + } + + TdThreadAttr thAttr; + taosThreadAttrInit(&thAttr); + taosThreadAttrSetDetachState(&thAttr, PTHREAD_CREATE_JOINABLE); + TdThread crashReportThread; + if (taosThreadCreate(&crashReportThread, &thAttr, tscCrashReportThreadFp, NULL) != 0) { + tscError("failed to create crashReport thread since %s", strerror(errno)); + return -1; + } + + taosThreadAttrDestroy(&thAttr); + return 0; +} + +void tscStopCrashReport() { + if (!tsEnableCrashReport) { + return; + } + + if (atomic_val_compare_exchange_32(&clientStop, 0, 1)) { + tscDebug("hb thread already stopped"); + return; + } + + while (atomic_load_32(&clientStop) > 0) { + taosMsleep(100); + } +} + + +void tscWriteCrashInfo(int signum, void *sigInfo, void *context) { + char *pMsg = NULL; + const char *flags = "UTL FATAL "; + ELogLevel level = DEBUG_FATAL; + int32_t dflag = 255; + int64_t msgLen= -1; + + if (tsEnableCrashReport) { + if (taosGenCrashJsonMsg(signum, &pMsg, lastClusterId, appInfo.startTime)) { + taosPrintLog(flags, level, dflag, "failed to generate crash json msg"); + } else { + msgLen = strlen(pMsg); + } + } + + taosLogCrashInfo("taos", pMsg, msgLen, signum, sigInfo); +} + + void taos_init_imp(void) { // In the APIs of other program language, taos_cleanup is not available yet. // So, to make sure taos_cleanup will be invoked to clean up the allocated resource to suppress the valgrind warning. @@ -397,6 +523,10 @@ void taos_init_imp(void) { errno = TSDB_CODE_SUCCESS; taosSeedRand(taosGetTimestampSec()); + appInfo.pid = taosGetPId(); + appInfo.startTime = taosGetTimestampMs(); + appInfo.pInstMap = taosHashInit(4, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), true, HASH_ENTRY_LOCK); + deltaToUtcInitOnce(); if (taosCreateLog("taoslog", 10, configDir, NULL, NULL, NULL, NULL, 1) != 0) { @@ -412,7 +542,8 @@ void taos_init_imp(void) { initQueryModuleMsgHandle(); if (taosConvInit() != 0) { - ASSERTS(0, "failed to init conv"); + tscError("failed to init conv"); + return; } rpcInit(); @@ -438,9 +569,8 @@ void taos_init_imp(void) { taosGetAppName(appInfo.appName, NULL); taosThreadMutexInit(&appInfo.mutex, NULL); - appInfo.pid = taosGetPId(); - appInfo.startTime = taosGetTimestampMs(); - appInfo.pInstMap = taosHashInit(4, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), true, HASH_ENTRY_LOCK); + tscCrashReportInit(); + tscDebug("client is initialized successfully"); } diff --git a/source/client/src/clientHb.c b/source/client/src/clientHb.c index 47ed2cf035a13314b81f2208718d2c466efb5c68..3cb8a2e1bd9e8c6c2c5a68f2e266611d209326a9 100644 --- a/source/client/src/clientHb.c +++ b/source/client/src/clientHb.c @@ -376,7 +376,6 @@ int32_t hbBuildQueryDesc(SQueryHbReqBasic *hbBasic, STscObj *pObj) { desc.subPlanNum = 0; } desc.subPlanNum = taosArrayGetSize(desc.subDesc); - ASSERT(desc.subPlanNum == taosArrayGetSize(desc.subDesc)); } else { desc.subDesc = NULL; } @@ -813,7 +812,10 @@ static void hbStopThread() { } SAppHbMgr *appHbMgrInit(SAppInstInfo *pAppInstInfo, char *key) { - hbMgrInit(); + if(hbMgrInit() != 0){ + terrno = TSDB_CODE_TSC_INTERNAL_ERROR; + return NULL; + } SAppHbMgr *pAppHbMgr = taosMemoryMalloc(sizeof(SAppHbMgr)); if (pAppHbMgr == NULL) { terrno = TSDB_CODE_OUT_OF_MEMORY; @@ -899,16 +901,28 @@ int hbMgrInit() { TdThreadMutexAttr attr = {0}; int ret = taosThreadMutexAttrInit(&attr); - assert(ret == 0); + if(ret != 0){ + uError("hbMgrInit:taosThreadMutexAttrInit error") + return ret; + } ret = taosThreadMutexAttrSetType(&attr, PTHREAD_MUTEX_RECURSIVE); - assert(ret == 0); + if(ret != 0){ + uError("hbMgrInit:taosThreadMutexAttrSetType error") + return ret; + } ret = taosThreadMutexInit(&clientHbMgr.lock, &attr); - assert(ret == 0); + if(ret != 0){ + uError("hbMgrInit:taosThreadMutexInit error") + return ret; + } ret = taosThreadMutexAttrDestroy(&attr); - assert(ret == 0); + if(ret != 0){ + uError("hbMgrInit:taosThreadMutexAttrDestroy error") + return ret; + } // init handle funcs hbMgrInitHandle(); diff --git a/source/client/src/clientImpl.c b/source/client/src/clientImpl.c index 75288cfa143266cb09800cb2f6ecfe34adec1d77..2c53fe40800badfd17d308d4aca094729edf1d3d 100644 --- a/source/client/src/clientImpl.c +++ b/source/client/src/clientImpl.c @@ -159,6 +159,12 @@ STscObj* taos_connect_internal(const char* ip, const char* user, const char* pas return taosConnectImpl(user, &secretEncrypt[0], localDb, NULL, NULL, *pInst, connType); } +void freeQueryParam(SSyncQueryParam* param) { + if (param == NULL) return; + tsem_destroy(¶m->sem); + taosMemoryFree(param); +} + int32_t buildRequest(uint64_t connId, const char* sql, int sqlLen, void* param, bool validateSql, SRequestObj** pRequest, int64_t reqid) { *pRequest = createRequest(connId, TSDB_SQL_SELECT, reqid); @@ -180,17 +186,18 @@ int32_t buildRequest(uint64_t connId, const char* sql, int sqlLen, void* param, (*pRequest)->sqlLen = sqlLen; (*pRequest)->validateOnly = validateSql; + SSyncQueryParam* newpParam; if (param == NULL) { - SSyncQueryParam* pParam = taosMemoryCalloc(1, sizeof(SSyncQueryParam)); - if (pParam == NULL) { + newpParam = taosMemoryCalloc(1, sizeof(SSyncQueryParam)); + if (newpParam == NULL) { destroyRequest(*pRequest); *pRequest = NULL; return TSDB_CODE_OUT_OF_MEMORY; } - tsem_init(&pParam->sem, 0, 0); - pParam->pRequest = (*pRequest); - param = pParam; + tsem_init(&newpParam->sem, 0, 0); + newpParam->pRequest = (*pRequest); + param = newpParam; } (*pRequest)->body.param = param; @@ -201,8 +208,7 @@ int32_t buildRequest(uint64_t connId, const char* sql, int sqlLen, void* param, if (err) { tscError("%" PRId64 " failed to add to request container, reqId:0x%" PRIx64 ", conn:%" PRId64 ", %s", (*pRequest)->self, (*pRequest)->requestId, pTscObj->id, sql); - - taosMemoryFree(param); + freeQueryParam(newpParam); destroyRequest(*pRequest); *pRequest = NULL; return TSDB_CODE_OUT_OF_MEMORY; @@ -214,6 +220,7 @@ int32_t buildRequest(uint64_t connId, const char* sql, int sqlLen, void* param, nodesCreateAllocator((*pRequest)->requestId, tsQueryNodeChunkSize, &((*pRequest)->allocatorRefId))) { tscError("%" PRId64 " failed to create node allocator, reqId:0x%" PRIx64 ", conn:%" PRId64 ", %s", (*pRequest)->self, (*pRequest)->requestId, pTscObj->id, sql); + freeQueryParam(newpParam); destroyRequest(*pRequest); *pRequest = NULL; return TSDB_CODE_OUT_OF_MEMORY; @@ -452,7 +459,10 @@ int32_t getPlan(SRequestObj* pRequest, SQuery* pQuery, SQueryPlan** pPlan, SArra } void setResSchemaInfo(SReqResultInfo* pResInfo, const SSchema* pSchema, int32_t numOfCols) { - ASSERT(pSchema != NULL && numOfCols > 0); + if(pResInfo == NULL || pSchema == NULL || numOfCols <= 0){ + tscError("invalid paras, pResInfo == NULL || pSchema == NULL || numOfCols <= 0"); + return; + } pResInfo->numOfCols = numOfCols; if (pResInfo->fields != NULL) { @@ -463,7 +473,10 @@ void setResSchemaInfo(SReqResultInfo* pResInfo, const SSchema* pSchema, int32_t } pResInfo->fields = taosMemoryCalloc(numOfCols, sizeof(TAOS_FIELD)); pResInfo->userFields = taosMemoryCalloc(numOfCols, sizeof(TAOS_FIELD)); - ASSERT(numOfCols == pResInfo->numOfCols); + if(numOfCols != pResInfo->numOfCols){ + tscError("numOfCols:%d != pResInfo->numOfCols:%d", numOfCols, pResInfo->numOfCols); + return; + } for (int32_t i = 0; i < pResInfo->numOfCols; ++i) { pResInfo->fields[i].bytes = pSchema[i].bytes; @@ -1339,7 +1352,10 @@ int32_t doProcessMsgFromServer(void* param) { SEpSet* pEpSet = arg->pEpset; SMsgSendInfo* pSendInfo = (SMsgSendInfo*)pMsg->info.ahandle; - assert(pMsg->info.ahandle != NULL); + if(pMsg->info.ahandle == NULL){ + tscError("doProcessMsgFromServer pMsg->info.ahandle == NULL"); + return TSDB_CODE_TSC_INTERNAL_ERROR; + } STscObj* pTscObj = NULL; STraceId* trace = &pMsg->info.traceId; @@ -1352,8 +1368,10 @@ int32_t doProcessMsgFromServer(void* param) { if (pSendInfo->requestObjRefId != 0) { SRequestObj* pRequest = (SRequestObj*)taosAcquireRef(clientReqRefPool, pSendInfo->requestObjRefId); if (pRequest) { - assert(pRequest->self == pSendInfo->requestObjRefId); - + if(pRequest->self != pSendInfo->requestObjRefId){ + tscError("doProcessMsgFromServer pRequest->self:%"PRId64" != pSendInfo->requestObjRefId:%"PRId64, pRequest->self, pSendInfo->requestObjRefId); + return TSDB_CODE_TSC_INTERNAL_ERROR; + } pRequest->metric.rsp = taosGetTimestampUs(); pTscObj = pRequest->pTscObj; /* @@ -1495,7 +1513,9 @@ void doSetOneRowPtr(SReqResultInfo* pResultInfo) { } void* doFetchRows(SRequestObj* pRequest, bool setupOneRowPtr, bool convertUcs4) { - assert(pRequest != NULL); + if(pRequest == NULL){ + return NULL; + } SReqResultInfo* pResultInfo = &pRequest->body.resInfo; if (pResultInfo->pData == NULL || pResultInfo->current >= pResultInfo->numOfRows) { @@ -1549,7 +1569,9 @@ static void syncFetchFn(void* param, TAOS_RES* res, int32_t numOfRows) { } void* doAsyncFetchRows(SRequestObj* pRequest, bool setupOneRowPtr, bool convertUcs4) { - assert(pRequest != NULL); + if(pRequest == NULL){ + return NULL; + } SReqResultInfo* pResultInfo = &pRequest->body.resInfo; if (pResultInfo->pData == NULL || pResultInfo->current >= pResultInfo->numOfRows) { @@ -1613,8 +1635,10 @@ static int32_t doConvertUCS4(SReqResultInfo* pResultInfo, int32_t numOfRows, int char* pStart = pCol->offset[j] + pCol->pData; int32_t len = taosUcs4ToMbs((TdUcs4*)varDataVal(pStart), varDataLen(pStart), varDataVal(p)); - ASSERT(len <= bytes); - ASSERT((p + len) < (pResultInfo->convertBuf[i] + colLength[i])); + if(len > bytes || (p + len) >= (pResultInfo->convertBuf[i] + colLength[i])){ + tscError("doConvertUCS4 error, invalid data. len:%d, bytes:%d, (p + len):%p, (pResultInfo->convertBuf[i] + colLength[i]):%p", len, bytes, (p + len), (pResultInfo->convertBuf[i] + colLength[i])); + return TSDB_CODE_TSC_INTERNAL_ERROR; + } varDataSetLen(p, len); pCol->offset[j] = (p - pResultInfo->convertBuf[i]); @@ -1631,9 +1655,6 @@ static int32_t doConvertUCS4(SReqResultInfo* pResultInfo, int32_t numOfRows, int } int32_t getVersion1BlockMetaSize(const char* p, int32_t numOfCols) { - int32_t cols = *(int32_t*)(p + sizeof(int32_t) * 3); - ASSERT(numOfCols == cols); - return sizeof(int32_t) + sizeof(int32_t) + sizeof(int32_t) * 3 + sizeof(uint64_t) + numOfCols * (sizeof(int8_t) + sizeof(int32_t)); } @@ -1643,6 +1664,12 @@ static int32_t estimateJsonLen(SReqResultInfo* pResultInfo, int32_t numOfCols, i // | version | total length | total rows | total columns | flag seg| block group id | column schema | each column // length | + int32_t cols = *(int32_t*)(p + sizeof(int32_t) * 3); + if(ASSERT(numOfCols == cols)){ + tscError("estimateJsonLen error: numOfCols:%d != cols:%d", numOfCols, cols); + return -1; + } + int32_t len = getVersion1BlockMetaSize(p, numOfCols); int32_t* colLength = (int32_t*)(p + len); len += sizeof(int32_t) * numOfCols; @@ -1676,7 +1703,8 @@ static int32_t estimateJsonLen(SReqResultInfo* pResultInfo, int32_t numOfCols, i } else if (jsonInnerType == TSDB_DATA_TYPE_BOOL) { len += (VARSTR_HEADER_SIZE + 5); } else { - ASSERT(0); + tscError("estimateJsonLen error: invalid type:%d", jsonInnerType); + return -1; } } } else if (IS_VAR_DATA_TYPE(pResultInfo->fields[i].type)) { @@ -1710,12 +1738,21 @@ static int32_t doConvertJson(SReqResultInfo* pResultInfo, int32_t numOfCols, int char* p = (char*)pResultInfo->pData; int32_t dataLen = estimateJsonLen(pResultInfo, numOfCols, numOfRows); + if(dataLen <= 0){ + return TSDB_CODE_TSC_INTERNAL_ERROR; + } pResultInfo->convertJson = taosMemoryCalloc(1, dataLen); if (pResultInfo->convertJson == NULL) return TSDB_CODE_OUT_OF_MEMORY; char* p1 = pResultInfo->convertJson; int32_t totalLen = 0; + int32_t cols = *(int32_t*)(p + sizeof(int32_t) * 3); + if(ASSERT(numOfCols == cols)){ + tscError("doConvertJson error: numOfCols:%d != cols:%d", numOfCols, cols); + return TSDB_CODE_TSC_INTERNAL_ERROR; + } + int32_t len = getVersion1BlockMetaSize(p, numOfCols); memcpy(p1, p, len); @@ -1736,8 +1773,10 @@ static int32_t doConvertJson(SReqResultInfo* pResultInfo, int32_t numOfCols, int for (int32_t i = 0; i < numOfCols; ++i) { int32_t colLen = htonl(colLength[i]); int32_t colLen1 = htonl(colLength1[i]); - ASSERT(colLen < dataLen); - + if(ASSERT(colLen < dataLen)){ + tscError("doConvertJson error: colLen:%d >= dataLen:%d", colLen, dataLen); + return TSDB_CODE_TSC_INTERNAL_ERROR; + } if (pResultInfo->fields[i].type == TSDB_DATA_TYPE_JSON) { int32_t* offset = (int32_t*)pStart; int32_t* offset1 = (int32_t*)pStart1; @@ -1782,7 +1821,8 @@ static int32_t doConvertJson(SReqResultInfo* pResultInfo, int32_t numOfCols, int sprintf(varDataVal(dst), "%s", (*((char*)jsonInnerData) == 1) ? "true" : "false"); varDataSetLen(dst, strlen(varDataVal(dst))); } else { - ASSERT(0); + tscError("doConvertJson error: invalid type:%d", jsonInnerType); + return TSDB_CODE_TSC_INTERNAL_ERROR; } offset1[j] = len; @@ -1820,7 +1860,10 @@ static int32_t doConvertJson(SReqResultInfo* pResultInfo, int32_t numOfCols, int int32_t setResultDataPtr(SReqResultInfo* pResultInfo, TAOS_FIELD* pFields, int32_t numOfCols, int32_t numOfRows, bool convertUcs4) { - assert(numOfCols > 0 && pFields != NULL && pResultInfo != NULL); + if(ASSERT(numOfCols > 0 && pFields != NULL && pResultInfo != NULL)){ + tscError("setResultDataPtr paras error"); + return TSDB_CODE_TSC_INTERNAL_ERROR; + } if (numOfRows == 0) { return TSDB_CODE_SUCCESS; } @@ -1849,7 +1892,10 @@ int32_t setResultDataPtr(SReqResultInfo* pResultInfo, TAOS_FIELD* pFields, int32 int32_t cols = *(int32_t*)p; p += sizeof(int32_t); - ASSERT(rows == numOfRows && cols == numOfCols); + if(ASSERT(rows == numOfRows && cols == numOfCols)){ + tscError("setResultDataPtr paras error:rows;%d numOfRows:%d cols:%d numOfCols:%d", rows, numOfRows, cols, numOfCols); + return TSDB_CODE_TSC_INTERNAL_ERROR; + } int32_t hasColumnSeg = *(int32_t*)p; p += sizeof(int32_t); @@ -1876,7 +1922,7 @@ int32_t setResultDataPtr(SReqResultInfo* pResultInfo, TAOS_FIELD* pFields, int32 colLength[i] = htonl(colLength[i]); if (colLength[i] >= dataLen) { tscError("invalid colLength %d, dataLen %d", colLength[i], dataLen); - ASSERT(0); + return TSDB_CODE_TSC_INTERNAL_ERROR; } if (IS_VAR_DATA_TYPE(pResultInfo->fields[i].type)) { @@ -1914,7 +1960,11 @@ char* getDbOfConnection(STscObj* pObj) { } void setConnectionDB(STscObj* pTscObj, const char* db) { - assert(db != NULL && pTscObj != NULL); + if(db == NULL || pTscObj == NULL){ + tscError("setConnectionDB para is NULL"); + return; + } + taosThreadMutexLock(&pTscObj->mutex); tstrncpy(pTscObj->db, db, tListLen(pTscObj->db)); taosThreadMutexUnlock(&pTscObj->mutex); @@ -1932,7 +1982,10 @@ void resetConnectDB(STscObj* pTscObj) { int32_t setQueryResultFromRsp(SReqResultInfo* pResultInfo, const SRetrieveTableRsp* pRsp, bool convertUcs4, bool freeAfterUse) { - assert(pResultInfo != NULL && pRsp != NULL); + if(pResultInfo == NULL || pRsp == NULL){ + tscError("setQueryResultFromRsp paras is null"); + return TSDB_CODE_TSC_INTERNAL_ERROR; + } if (freeAfterUse) taosMemoryFreeClear(pResultInfo->pRspMsg); diff --git a/source/client/src/clientJniConnector.c b/source/client/src/clientJniConnector.c index 859d4ec80ff6e954259415dabaf1761eb3e7129e..750ba684f42c2f35cd6f5180e6d1d722ad3e0e16 100644 --- a/source/client/src/clientJniConnector.c +++ b/source/client/src/clientJniConnector.c @@ -574,7 +574,11 @@ JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_fetchBlockImp(JNI TAOS_RES *tres = (TAOS_RES *)res; int32_t numOfFields = taos_num_fields(tres); - assert(numOfFields > 0); + if(numOfFields <= 0){ + jniError("jobj:%p, conn:%p, query interrupted. taos_num_fields error code:%d, msg:%s", jobj, tscon, numOfFields, + taos_errstr(tres)); + return JNI_RESULT_SET_NULL; + } void *data; int32_t numOfRows; diff --git a/source/client/src/clientMain.c b/source/client/src/clientMain.c index 1f864f07b4da973810968a7640d3f899b8374f46..7b04603f95c3880cb75b5a48cfe607aa05eba9f0 100644 --- a/source/client/src/clientMain.c +++ b/source/client/src/clientMain.c @@ -55,6 +55,8 @@ void taos_cleanup(void) { return; } + tscStopCrashReport(); + int32_t id = clientReqRefPool; clientReqRefPool = -1; taosCloseRef(id); @@ -106,7 +108,7 @@ TAOS *taos_connect(const char *ip, const char *user, const char *pass, const cha if (pass == NULL) { pass = TSDB_DEFAULT_PASS; } - + STscObj *pObj = taos_connect_internal(ip, user, pass, NULL, db, port, CONN_TYPE__QUERY); if (pObj) { int64_t *rid = taosMemoryCalloc(1, sizeof(int64_t)); @@ -291,7 +293,6 @@ TAOS_ROW taos_fetch_row(TAOS_RES *res) { tscError("invalid result passed to taos_fetch_row"); return NULL; } - return NULL; } int taos_print_row(char *str, TAOS_ROW row, TAOS_FIELD *fields, int num_fields) { @@ -355,9 +356,13 @@ int taos_print_row(char *str, TAOS_ROW row, TAOS_FIELD *fields, int num_fields) case TSDB_DATA_TYPE_NCHAR: { int32_t charLen = varDataLen((char *)row[i] - VARSTR_HEADER_SIZE); if (fields[i].type == TSDB_DATA_TYPE_BINARY) { - assert(charLen <= fields[i].bytes && charLen >= 0); + if(ASSERT(charLen <= fields[i].bytes && charLen >= 0)){ + tscError("taos_print_row error binary. charLen:%d, fields[i].bytes:%d", charLen, fields[i].bytes); + } } else { - assert(charLen <= fields[i].bytes * TSDB_NCHAR_SIZE && charLen >= 0); + if(ASSERT(charLen <= fields[i].bytes * TSDB_NCHAR_SIZE && charLen >= 0)){ + tscError("taos_print_row error. charLen:%d, fields[i].bytes:%d", charLen, fields[i].bytes); + } } memcpy(str + len, row[i], charLen); @@ -430,6 +435,22 @@ const char *taos_data_type(int type) { return "TSDB_DATA_TYPE_NCHAR"; case TSDB_DATA_TYPE_JSON: return "TSDB_DATA_TYPE_JSON"; + case TSDB_DATA_TYPE_UTINYINT: + return "TSDB_DATA_TYPE_UTINYINT"; + case TSDB_DATA_TYPE_USMALLINT: + return "TSDB_DATA_TYPE_USMALLINT"; + case TSDB_DATA_TYPE_UINT: + return "TSDB_DATA_TYPE_UINT"; + case TSDB_DATA_TYPE_UBIGINT: + return "TSDB_DATA_TYPE_UBIGINT"; + case TSDB_DATA_TYPE_VARBINARY: + return "TSDB_DATA_TYPE_VARBINARY"; + case TSDB_DATA_TYPE_DECIMAL: + return "TSDB_DATA_TYPE_DECIMAL"; + case TSDB_DATA_TYPE_BLOB: + return "TSDB_DATA_TYPE_BLOB"; + case TSDB_DATA_TYPE_MEDIUMBLOB: + return "TSDB_DATA_TYPE_MEDIUMBLOB"; default: return "UNKNOWN"; } @@ -507,9 +528,8 @@ void taos_stop_query(TAOS_RES *res) { SRequestObj *pRequest = (SRequestObj *)res; pRequest->killed = true; - int32_t numOfFields = taos_num_fields(pRequest); // It is not a query, no need to stop. - if (numOfFields == 0) { + if (NULL == pRequest->pQuery || QUERY_EXEC_MODE_SCHEDULE != pRequest->pQuery->execMode) { tscDebug("request 0x%" PRIx64 " no need to be killed since not query", pRequest->requestId); return; } @@ -577,7 +597,7 @@ int taos_fetch_block_s(TAOS_RES *res, int *numOfRows, TAOS_ROW *rows) { (*numOfRows) = pResultInfo->numOfRows; return 0; } else { - ASSERT(0); + tscError("taos_fetch_block_s invalid res type"); return -1; } } @@ -670,6 +690,32 @@ const char *taos_get_server_info(TAOS *taos) { return pTscObj->sDetailVer; } +int taos_get_current_db(TAOS *taos, char *database, int len, int *required) { + STscObj *pTscObj = acquireTscObj(*(int64_t *)taos); + if (pTscObj == NULL) { + terrno = TSDB_CODE_TSC_DISCONNECTED; + return -1; + } + + int code = TSDB_CODE_SUCCESS; + taosThreadMutexLock(&pTscObj->mutex); + if(database == NULL || len <= 0){ + if(required != NULL) *required = strlen(pTscObj->db) + 1; + terrno = TSDB_CODE_INVALID_PARA; + code = -1; + }else if(len < strlen(pTscObj->db) + 1){ + tstrncpy(database, pTscObj->db, len); + if(required) *required = strlen(pTscObj->db) + 1; + terrno = TSDB_CODE_INVALID_PARA; + code = -1; + }else{ + strcpy(database, pTscObj->db); + code = 0; + } + taosThreadMutexUnlock(&pTscObj->mutex); + return code; +} + static void destoryTablesReq(void *p) { STablesReq *pRes = (STablesReq *)p; taosArrayDestroy(pRes->pTables); @@ -1000,8 +1046,14 @@ static void fetchCallback(void *pResult, void *param, int32_t code) { } void taos_fetch_rows_a(TAOS_RES *res, __taos_async_fn_t fp, void *param) { - ASSERT(res != NULL && fp != NULL); - ASSERT(TD_RES_QUERY(res)); + if(ASSERT(res != NULL && fp != NULL)){ + tscError("taos_fetch_rows_a invalid paras"); + return; + } + if(ASSERT(TD_RES_QUERY(res))){ + tscError("taos_fetch_rows_a res is NULL"); + return; + } SRequestObj *pRequest = res; pRequest->body.fetchFp = fp; @@ -1044,9 +1096,14 @@ void taos_fetch_rows_a(TAOS_RES *res, __taos_async_fn_t fp, void *param) { } void taos_fetch_raw_block_a(TAOS_RES *res, __taos_async_fn_t fp, void *param) { - ASSERT(res != NULL && fp != NULL); - ASSERT(TD_RES_QUERY(res)); - + if(ASSERT(res != NULL && fp != NULL)){ + tscError("taos_fetch_rows_a invalid paras"); + return; + } + if(ASSERT(TD_RES_QUERY(res))){ + tscError("taos_fetch_rows_a res is NULL"); + return; + } SRequestObj *pRequest = res; SReqResultInfo *pResultInfo = &pRequest->body.resInfo; @@ -1058,8 +1115,14 @@ void taos_fetch_raw_block_a(TAOS_RES *res, __taos_async_fn_t fp, void *param) { } const void *taos_get_raw_block(TAOS_RES *res) { - ASSERT(res != NULL); - ASSERT(TD_RES_QUERY(res)); + if(ASSERT(res != NULL)){ + tscError("taos_fetch_rows_a invalid paras"); + return NULL; + } + if(ASSERT(TD_RES_QUERY(res))){ + tscError("taos_fetch_rows_a res is NULL"); + return NULL; + } SRequestObj *pRequest = res; return pRequest->body.resInfo.pData; diff --git a/source/client/src/clientMsgHandler.c b/source/client/src/clientMsgHandler.c index 85027ff371c921cc6bf9137d4f7eaece092c2e7d..2191c54315d6324e819337fa41db3875e1506594 100644 --- a/source/client/src/clientMsgHandler.c +++ b/source/client/src/clientMsgHandler.c @@ -119,6 +119,7 @@ int32_t processConnectRsp(void* param, SDataBuf* pMsg, int32_t code) { // update the appInstInfo pTscObj->pAppInfo->clusterId = connectRsp.clusterId; + lastClusterId = connectRsp.clusterId; pTscObj->connType = connectRsp.connType; @@ -149,7 +150,6 @@ SMsgSendInfo* buildMsgInfoImpl(SRequestObj* pRequest) { pMsgSendInfo->msgType = pRequest->type; pMsgSendInfo->target.type = TARGET_TYPE_MNODE; - assert(pRequest != NULL); pMsgSendInfo->msgInfo = pRequest->body.requestMsg; pMsgSendInfo->fp = getMsgRspHandle(pRequest->type); return pMsgSendInfo; @@ -273,7 +273,9 @@ int32_t processUseDbRsp(void* param, SDataBuf* pMsg, int32_t code) { } int32_t processCreateSTableRsp(void* param, SDataBuf* pMsg, int32_t code) { - assert(pMsg != NULL && param != NULL); + if(pMsg == NULL || param == NULL){ + return TSDB_CODE_TSC_INVALID_INPUT; + } SRequestObj* pRequest = param; if (code != TSDB_CODE_SUCCESS) { @@ -454,7 +456,10 @@ static int32_t buildShowVariablesRsp(SArray* pVars, SRetrieveTableRsp** pRsp) { (*pRsp)->numOfCols = htonl(SHOW_VARIABLES_RESULT_COLS); int32_t len = blockEncode(pBlock, (*pRsp)->data, SHOW_VARIABLES_RESULT_COLS); - ASSERT(len == rspSize - sizeof(SRetrieveTableRsp)); + if(len != rspSize - sizeof(SRetrieveTableRsp)){ + uError("buildShowVariablesRsp error, len:%d != rspSize - sizeof(SRetrieveTableRsp):%" PRIu64, len, (uint64_t) (rspSize - sizeof(SRetrieveTableRsp))); + return TSDB_CODE_TSC_INVALID_INPUT; + } blockDataDestroy(pBlock); return TSDB_CODE_SUCCESS; diff --git a/source/client/src/clientRawBlockWrite.c b/source/client/src/clientRawBlockWrite.c index 0918b99cdab1b01b809296eb19ee4af3ece2ac37..72a00ef471fa0010efc7666c00b2a5e020509ab2 100644 --- a/source/client/src/clientRawBlockWrite.c +++ b/source/client/src/clientRawBlockWrite.c @@ -179,7 +179,7 @@ static char* buildAlterSTableJson(void* alterData, int32_t alterDataLen) { } string = cJSON_PrintUnformatted(json); - end: +end: cJSON_Delete(json); tFreeSMAltertbReq(&req); return string; @@ -200,7 +200,7 @@ static char* processCreateStb(SMqMetaRsp* metaRsp) { } string = buildCreateTableJson(&req.schemaRow, &req.schemaTag, req.name, req.suid, TSDB_SUPER_TABLE); - _err: +_err: tDecoderClear(&coder); return string; } @@ -220,7 +220,7 @@ static char* processAlterStb(SMqMetaRsp* metaRsp) { } string = buildAlterSTableJson(req.alterOriData, req.alterOriDataLen); - _err: +_err: tDecoderClear(&coder); return string; } @@ -302,7 +302,7 @@ static void buildChildElement(cJSON* json, SVCreateTbReq* pCreateReq) { cJSON_AddItemToArray(tags, tag); } - end: +end: cJSON_AddItemToObject(json, "tags", tags); taosArrayDestroy(pTagVals); } @@ -360,7 +360,7 @@ static char* processCreateTable(SMqMetaRsp* metaRsp) { } } - _exit: +_exit: for (int32_t iReq = 0; iReq < req.nReqs; iReq++) { pCreateReq = req.pReqs + iReq; taosMemoryFreeClear(pCreateReq->comment); @@ -373,7 +373,10 @@ static char* processCreateTable(SMqMetaRsp* metaRsp) { } static char* processAutoCreateTable(STaosxRsp* rsp) { - ASSERT(rsp->createTableNum != 0); + if (rsp->createTableNum <= 0) { + uError("WriteRaw:processAutoCreateTable rsp->createTableNum <= 0"); + goto _exit; + } SDecoder* decoder = taosMemoryCalloc(rsp->createTableNum, sizeof(SDecoder)); SVCreateTbReq* pCreateReq = taosMemoryCalloc(rsp->createTableNum, sizeof(SVCreateTbReq)); @@ -389,11 +392,14 @@ static char* processAutoCreateTable(STaosxRsp* rsp) { goto _exit; } - ASSERT(pCreateReq[iReq].type == TSDB_CHILD_TABLE); + if (pCreateReq[iReq].type != TSDB_CHILD_TABLE) { + uError("WriteRaw:processAutoCreateTable pCreateReq[iReq].type != TSDB_CHILD_TABLE"); + goto _exit; + } } string = buildCreateCTableJson(pCreateReq, rsp->createTableNum); - _exit: +_exit: for (int i = 0; i < rsp->createTableNum; i++) { tDecoderClear(&decoder[i]); taosMemoryFreeClear(pCreateReq[i].comment); @@ -494,7 +500,10 @@ static char* processAlterTable(SMqMetaRsp* metaRsp) { char* buf = NULL; if (vAlterTbReq.tagType == TSDB_DATA_TYPE_JSON) { - ASSERT(tTagIsJson(vAlterTbReq.pTagVal) == true); + if (!tTagIsJson(vAlterTbReq.pTagVal)) { + uError("processAlterTable isJson false"); + goto _exit; + } buf = parseTagDatatoJson(vAlterTbReq.pTagVal); } else { buf = taosMemoryCalloc(vAlterTbReq.nTagVal + 1, 1); @@ -515,7 +524,7 @@ static char* processAlterTable(SMqMetaRsp* metaRsp) { } string = cJSON_PrintUnformatted(json); - _exit: +_exit: cJSON_Delete(json); tDecoderClear(&decoder); return string; @@ -548,12 +557,12 @@ static char* processDropSTable(SMqMetaRsp* metaRsp) { string = cJSON_PrintUnformatted(json); - _exit: +_exit: cJSON_Delete(json); tDecoderClear(&decoder); return string; } -static char* processDeleteTable(SMqMetaRsp* metaRsp){ +static char* processDeleteTable(SMqMetaRsp* metaRsp) { SDeleteRes req = {0}; SDecoder coder = {0}; int32_t code = TSDB_CODE_SUCCESS; @@ -587,7 +596,7 @@ static char* processDeleteTable(SMqMetaRsp* metaRsp){ string = cJSON_PrintUnformatted(json); - _exit: +_exit: cJSON_Delete(json); tDecoderClear(&coder); return string; @@ -629,7 +638,7 @@ static char* processDropTable(SMqMetaRsp* metaRsp) { string = cJSON_PrintUnformatted(json); - _exit: +_exit: cJSON_Delete(json); tDecoderClear(&decoder); return string; @@ -717,7 +726,7 @@ static int32_t taosCreateStb(TAOS* taos, void* meta, int32_t metaLen) { code = pRequest->code; taosMemoryFree(pCmdMsg.pMsg); - end: +end: destroyRequest(pRequest); tFreeSMCreateStbReq(&pReq); tDecoderClear(&coder); @@ -787,7 +796,7 @@ static int32_t taosDropStb(TAOS* taos, void* meta, int32_t metaLen) { code = pRequest->code; taosMemoryFree(pCmdMsg.pMsg); - end: +end: destroyRequest(pRequest); tDecoderClear(&coder); return code; @@ -848,9 +857,9 @@ static int32_t taosCreateTable(TAOS* taos, void* meta, int32_t metaLen) { taosHashSetFreeFp(pVgroupHashmap, destroyCreateTbReqBatch); SRequestConnInfo conn = {.pTrans = pTscObj->pAppInfo->pTransporter, - .requestId = pRequest->requestId, - .requestObjRefId = pRequest->self, - .mgmtEps = getEpSet_s(&pTscObj->pAppInfo->mgmtEp)}; + .requestId = pRequest->requestId, + .requestObjRefId = pRequest->self, + .mgmtEps = getEpSet_s(&pTscObj->pAppInfo->mgmtEp)}; pRequest->tableList = taosArrayInit(req.nReqs, sizeof(SName)); // loop to create table @@ -930,7 +939,7 @@ static int32_t taosCreateTable(TAOS* taos, void* meta, int32_t metaLen) { code = pRequest->code; - end: +end: for (int32_t iReq = 0; iReq < req.nReqs; iReq++) { pCreateReq = req.pReqs + iReq; taosMemoryFreeClear(pCreateReq->comment); @@ -1000,9 +1009,9 @@ static int32_t taosDropTable(TAOS* taos, void* meta, int32_t metaLen) { taosHashSetFreeFp(pVgroupHashmap, destroyDropTbReqBatch); SRequestConnInfo conn = {.pTrans = pTscObj->pAppInfo->pTransporter, - .requestId = pRequest->requestId, - .requestObjRefId = pRequest->self, - .mgmtEps = getEpSet_s(&pTscObj->pAppInfo->mgmtEp)}; + .requestId = pRequest->requestId, + .requestObjRefId = pRequest->self, + .mgmtEps = getEpSet_s(&pTscObj->pAppInfo->mgmtEp)}; pRequest->tableList = taosArrayInit(req.nReqs, sizeof(SName)); // loop to create table for (int32_t iReq = 0; iReq < req.nReqs; iReq++) { @@ -1054,7 +1063,7 @@ static int32_t taosDropTable(TAOS* taos, void* meta, int32_t metaLen) { } code = pRequest->code; - end: +end: taosHashCleanup(pVgroupHashmap); destroyRequest(pRequest); tDecoderClear(&coder); @@ -1122,7 +1131,7 @@ static int32_t taosDeleteData(TAOS* taos, void* meta, int32_t metaLen) { } taos_free_result(res); - end: +end: tDecoderClear(&coder); return code; } @@ -1169,9 +1178,9 @@ static int32_t taosAlterTable(TAOS* taos, void* meta, int32_t metaLen) { } SRequestConnInfo conn = {.pTrans = pTscObj->pAppInfo->pTransporter, - .requestId = pRequest->requestId, - .requestObjRefId = pRequest->self, - .mgmtEps = getEpSet_s(&pTscObj->pAppInfo->mgmtEp)}; + .requestId = pRequest->requestId, + .requestObjRefId = pRequest->self, + .mgmtEps = getEpSet_s(&pTscObj->pAppInfo->mgmtEp)}; SVgroupInfo pInfo = {0}; SName pName = {0}; @@ -1230,7 +1239,7 @@ static int32_t taosAlterTable(TAOS* taos, void* meta, int32_t metaLen) { code = handleAlterTbExecRes(pRes->res, pCatalog); } } - end: +end: taosArrayDestroy(pArray); if (pVgData) taosMemoryFreeClear(pVgData->pData); taosMemoryFreeClear(pVgData); @@ -1393,7 +1402,7 @@ int taos_write_raw_block(TAOS* taos, int rows, char* pData, const char* tbname) launchQueryImpl(pRequest, pQuery, true, NULL); code = pRequest->code; - end: +end: taosMemoryFreeClear(pTableMeta); qDestroyQuery(pQuery); destroyRequest(pRequest); @@ -1512,7 +1521,7 @@ static int32_t tmqWriteRawDataImpl(TAOS* taos, void* data, int32_t dataLen) { launchQueryImpl(pRequest, pQuery, true, NULL); code = pRequest->code; - end: +end: tDeleteSMqDataRsp(&rspObj.rsp); tDecoderClear(&decoder); qDestroyQuery(pQuery); @@ -1610,7 +1619,11 @@ static int32_t tmqWriteRawMetaDataImpl(TAOS* taos, void* data, int32_t dataLen) goto end; } - ASSERT(pCreateReq.type == TSDB_CHILD_TABLE); + if (pCreateReq.type != TSDB_CHILD_TABLE) { + uError("WriteRaw:pCreateReq.type != TSDB_CHILD_TABLE. table name: %s", tbName); + code = TSDB_CODE_TSC_INVALID_VALUE; + goto end; + } if (strcmp(tbName, pCreateReq.name) == 0) { strcpy(pName.tname, pCreateReq.ctb.stbName); tDecoderClear(&decoderTmp); diff --git a/source/client/src/clientSml.c b/source/client/src/clientSml.c index b751f574323b23a1c02a70b05dfef06504b8433f..e21fcd64b8bf33bfa474e44d618813b12bc5f25d 100644 --- a/source/client/src/clientSml.c +++ b/source/client/src/clientSml.c @@ -186,13 +186,10 @@ int32_t smlSetCTableName(SSmlTableInfo *oneTable) { if (strlen(oneTable->childTableName) == 0) { SArray *dst = taosArrayDup(oneTable->tags, NULL); - RandTableName rName = {dst, oneTable->sTableName, (uint8_t)oneTable->sTableNameLen, oneTable->childTableName, 0}; + RandTableName rName = {dst, oneTable->sTableName, (uint8_t)oneTable->sTableNameLen, oneTable->childTableName}; buildChildTableName(&rName); taosArrayDestroy(dst); - oneTable->uid = rName.uid; - } else { - oneTable->uid = *(uint64_t *)(oneTable->childTableName); } return TSDB_CODE_SUCCESS; } @@ -523,7 +520,10 @@ STableMeta *smlGetMeta(SSmlHandle *info, const void *measure, int32_t measureLen memset(pName.tname, 0, TSDB_TABLE_NAME_LEN); memcpy(pName.tname, measure, measureLen); - catalogGetSTableMeta(info->pCatalog, &conn, &pName, &pTableMeta); + int32_t code = catalogGetSTableMeta(info->pCatalog, &conn, &pName, &pTableMeta); + if (code != TSDB_CODE_SUCCESS) { + return NULL; + } return pTableMeta; } @@ -996,7 +996,10 @@ static int32_t smlUpdateMeta(SHashObj *metaHash, SArray *metaArray, SArray *cols } } else { size_t tmp = taosArrayGetSize(metaArray); - ASSERT(tmp <= INT16_MAX); + if (tmp > INT16_MAX) { + uError("too many cols or tags"); + return -1; + } int16_t size = tmp; int ret = taosHashPut(metaHash, kv->key, kv->keyLen, &size, SHORT_BYTES); if (ret == 0) { @@ -1008,15 +1011,15 @@ static int32_t smlUpdateMeta(SHashObj *metaHash, SArray *metaArray, SArray *cols return TSDB_CODE_SUCCESS; } -static void smlDestroyTableInfo(SSmlHandle *info, SSmlTableInfo *tag) { +void smlDestroyTableInfo(SSmlHandle *info, SSmlTableInfo *tag) { for (size_t i = 0; i < taosArrayGetSize(tag->cols); i++) { SHashObj *kvHash = (SHashObj *)taosArrayGetP(tag->cols, i); taosHashCleanup(kvHash); } - if(info->parseJsonByLib){ + if (info->parseJsonByLib) { SSmlLineInfo *key = (SSmlLineInfo *)(tag->key); - if(key != NULL) taosMemoryFree(key->tags); + if (key != NULL) taosMemoryFree(key->tags); } taosMemoryFree(tag->key); taosArrayDestroy(tag->cols); @@ -1024,10 +1027,10 @@ static void smlDestroyTableInfo(SSmlHandle *info, SSmlTableInfo *tag) { taosMemoryFree(tag); } -void clearColValArray(SArray* pCols) { +void clearColValArray(SArray *pCols) { int32_t num = taosArrayGetSize(pCols); for (int32_t i = 0; i < num; ++i) { - SColVal* pCol = taosArrayGet(pCols, i); + SColVal *pCol = taosArrayGet(pCols, i); if (TSDB_DATA_TYPE_NCHAR == pCol->type) { taosMemoryFreeClear(pCol->value.pData); } @@ -1063,12 +1066,18 @@ void smlDestroyInfo(SSmlHandle *info) { // destroy info->pVgHash taosHashCleanup(info->pVgHash); - for(int i = 0; i< taosArrayGetSize(info->tagJsonArray); i++){ + for (int i = 0; i < taosArrayGetSize(info->tagJsonArray); i++) { cJSON *tags = (cJSON *)taosArrayGetP(info->tagJsonArray, i); cJSON_Delete(tags); } taosArrayDestroy(info->tagJsonArray); + for (int i = 0; i < taosArrayGetSize(info->valueJsonArray); i++) { + cJSON *value = (cJSON *)taosArrayGetP(info->valueJsonArray, i); + cJSON_Delete(value); + } + taosArrayDestroy(info->valueJsonArray); + taosArrayDestroy(info->preLineTagKV); taosArrayDestroy(info->maxTagKVs); taosArrayDestroy(info->preLineColKV); @@ -1076,7 +1085,7 @@ void smlDestroyInfo(SSmlHandle *info) { if (!info->dataFormat) { for (int i = 0; i < info->lineNum; i++) { taosArrayDestroy(info->lines[i].colArray); - if(info->parseJsonByLib){ + if (info->parseJsonByLib) { taosMemoryFree(info->lines[i].tags); } } @@ -1108,6 +1117,7 @@ SSmlHandle *smlBuildSmlInfo(TAOS *taos) { info->dataFormat = true; info->tagJsonArray = taosArrayInit(8, POINTER_BYTES); + info->valueJsonArray = taosArrayInit(8, POINTER_BYTES); info->preLineTagKV = taosArrayInit(8, sizeof(SSmlKv)); info->maxTagKVs = taosArrayInit(8, sizeof(SSmlKv)); info->preLineColKV = taosArrayInit(8, sizeof(SSmlKv)); @@ -1229,7 +1239,10 @@ static int32_t smlInsertData(SSmlHandle *info) { SSmlSTableMeta *pMeta = (SSmlSTableMeta *)nodeListGet(info->superTables, tableData->sTableName, tableData->sTableNameLen, NULL); - ASSERT(NULL != pMeta); + if (unlikely(NULL == pMeta || NULL == pMeta->tableMeta)) { + uError("SML:0x%" PRIx64 " NULL == pMeta. table name: %s", info->id, tableData->childTableName); + return TSDB_CODE_SML_INTERNAL_ERROR; + } // use tablemeta of stable to save vgid and uid of child table pMeta->tableMeta->vgId = vg.vgId; @@ -1294,7 +1307,7 @@ int32_t smlClearForRerun(SSmlHandle *info) { pList = pList->next; } - if (!info->dataFormat){ + if (!info->dataFormat) { if (unlikely(info->lines != NULL)) { uError("SML:0x%" PRIx64 " info->lines != NULL", info->id); return TSDB_CODE_SML_INVALID_DATA; @@ -1365,9 +1378,8 @@ static int32_t smlParseLine(SSmlHandle *info, char *lines[], char *rawLine, char } else { code = smlParseTelnetString(info, (char *)tmp, (char *)tmp + len, info->lines + i); } - } else { - ASSERT(0); + code = TSDB_CODE_SML_INVALID_PROTOCOL_TYPE; } if (code != TSDB_CODE_SUCCESS) { uError("SML:0x%" PRIx64 " smlParseLine failed. line %d : %s", info->id, i, tmp); diff --git a/source/client/src/clientSmlJson.c b/source/client/src/clientSmlJson.c index 6d80ac29e739a2e6baeff9400d8b6b3040613c19..db1ca5a4217aaed2f4469df14aecbf283b1fcfbc 100644 --- a/source/client/src/clientSmlJson.c +++ b/source/client/src/clientSmlJson.c @@ -143,7 +143,10 @@ while(*(start)){\ // if(unlikely(kv.length > preKV->length)){ // preKV->length = kv.length; // SSmlSTableMeta *tableMeta = (SSmlSTableMeta *)nodeListGet(info->superTables, elements->measure, elements->measureLen, NULL); -// ASSERT(tableMeta != NULL); +// if(unlikely(NULL == tableMeta)){ +// uError("SML:0x%" PRIx64 " NULL == tableMeta", info->id); +// return TSDB_CODE_SML_INTERNAL_ERROR; +// } // // SSmlKv *oldKV = (SSmlKv *)taosArrayGet(tableMeta->tags, cnt); // oldKV->length = kv.length; @@ -218,14 +221,17 @@ while(*(start)){\ //} static char* smlJsonGetObj(char *payload){ - int leftBracketCnt = 0; + int leftBracketCnt = 0; + bool isInQuote = false; while(*payload) { - if (unlikely(*payload == '{')) { + if(*payload == '"' && *(payload - 1) != '\\'){ + isInQuote = !isInQuote; + }else if (!isInQuote && unlikely(*payload == '{')) { leftBracketCnt++; payload++; continue; } - if (unlikely(*payload == '}')) { + else if (!isInQuote && unlikely(*payload == '}')) { leftBracketCnt--; payload++; if (leftBracketCnt == 0) { @@ -285,6 +291,14 @@ int smlJsonParseObjFirst(char **start, SSmlLineInfo *element, int8_t *offset){ JUMP_JSON_SPACE((*start)) offset[index++] = *start - sTmp; element->timestamp = (*start); + if(*(*start) == '{'){ + char* tmp = smlJsonGetObj((*start)); + if(tmp){ + element->timestampLen = tmp - (*start); + *start = tmp; + } + break; + } hasColon = true; continue; } @@ -306,6 +320,14 @@ int smlJsonParseObjFirst(char **start, SSmlLineInfo *element, int8_t *offset){ JUMP_JSON_SPACE((*start)) offset[index++] = *start - sTmp; element->cols = (*start); + if(*(*start) == '{'){ + char* tmp = smlJsonGetObj((*start)); + if(tmp){ + element->colsLen = tmp - (*start); + *start = tmp; + } + break; + } hasColon = true; continue; } @@ -378,6 +400,15 @@ int smlJsonParseObj(char **start, SSmlLineInfo *element, int8_t *offset){ }else if((*start)[1] == 't' && (*start)[2] == 'i'){ (*start) += offset[index++]; element->timestamp = *start; + if(*(*start) == '{'){ + char* tmp = smlJsonGetObj((*start)); + if(tmp){ + element->timestampLen = tmp - (*start); + *start = tmp; + } + continue; + } + while(*(*start)){ if(unlikely(*(*start) == ',' || *(*start) == '}' || (*(*start)) <= 32)){ element->timestampLen = (*start) - element->timestamp; @@ -388,6 +419,14 @@ int smlJsonParseObj(char **start, SSmlLineInfo *element, int8_t *offset){ }else if((*start)[1] == 'v'){ (*start) += offset[index++]; element->cols = *start; + if(*(*start) == '{'){ + char* tmp = smlJsonGetObj((*start)); + if(tmp){ + element->colsLen = tmp - (*start); + *start = tmp; + } + continue; + } while(*(*start)){ if(unlikely( *(*start) == ',' || *(*start) == '}' || (*(*start)) <= 32)){ element->colsLen = (*start) - element->cols; @@ -403,7 +442,7 @@ int smlJsonParseObj(char **start, SSmlLineInfo *element, int8_t *offset){ element->tagsLen = tmp - (*start); *start = tmp; } - break; + continue; } if(*(*start) == '}'){ (*start)++; @@ -624,7 +663,7 @@ static int32_t smlParseValueFromJSON(cJSON *root, SSmlKv *kv) { * user configured parameter tsDefaultJSONStrType */ - char *tsDefaultJSONStrType = "nchar"; // todo + char *tsDefaultJSONStrType = "binary"; // todo smlConvertJSONString(kv, tsDefaultJSONStrType, root); break; } @@ -723,7 +762,10 @@ static int32_t smlParseTagsFromJSON(SSmlHandle *info, cJSON *tags, SSmlLineInfo if(unlikely(kv.length > maxKV->length)){ maxKV->length = kv.length; SSmlSTableMeta *tableMeta = (SSmlSTableMeta *)nodeListGet(info->superTables, elements->measure, elements->measureLen, NULL); - ASSERT(tableMeta != NULL); + if(unlikely(NULL == tableMeta)){ + uError("SML:0x%" PRIx64 " NULL == tableMeta", info->id); + return TSDB_CODE_SML_INTERNAL_ERROR; + } SSmlKv *oldKV = (SSmlKv *)taosArrayGet(tableMeta->tags, cnt); oldKV->length = kv.length; @@ -775,11 +817,13 @@ static int32_t smlParseTagsFromJSON(SSmlHandle *info, cJSON *tags, SSmlLineInfo tinfo->tags = taosArrayDup(preLineKV, NULL); smlSetCTableName(tinfo); + tinfo->uid = info->uid++; if (info->dataFormat) { info->currSTableMeta->uid = tinfo->uid; tinfo->tableDataCtx = smlInitTableDataCtx(info->pQuery, info->currSTableMeta); if (tinfo->tableDataCtx == NULL) { smlBuildInvalidDataMsg(&info->msgBuf, "smlInitTableDataCtx error", NULL); + smlDestroyTableInfo(info, tinfo); return TSDB_CODE_SML_INVALID_DATA; } } @@ -1103,7 +1147,27 @@ static int32_t smlParseJSONString(SSmlHandle *info, char **start, SSmlLineInfo * } SSmlKv kv = {.key = VALUE, .keyLen = VALUE_LEN, .value = elements->cols, .length = (size_t)elements->colsLen}; - if (elements->colsLen == 0 || smlParseValue(&kv, &info->msgBuf) != TSDB_CODE_SUCCESS) { + + if (unlikely(elements->colsLen == 0)) { + uError("SML:colsLen == 0"); + return TSDB_CODE_TSC_INVALID_VALUE; + }else if(unlikely(elements->cols[0] == '{')){ + char tmp = elements->cols[elements->colsLen]; + elements->cols[elements->colsLen] = '\0'; + cJSON* valueJson = cJSON_Parse(elements->cols); + if (unlikely(valueJson == NULL)) { + uError("SML:0x%" PRIx64 " parse json cols failed:%s", info->id, elements->cols); + return TSDB_CODE_TSC_INVALID_JSON; + } + taosArrayPush(info->tagJsonArray, &valueJson); + ret = smlParseValueFromJSONObj(valueJson, &kv); + if (ret != TSDB_CODE_SUCCESS) { + uError("SML:Failed to parse value from JSON Obj:%s", elements->cols); + elements->cols[elements->colsLen] = tmp; + return TSDB_CODE_TSC_INVALID_VALUE; + } + elements->cols[elements->colsLen] = tmp; + }else if(smlParseValue(&kv, &info->msgBuf) != TSDB_CODE_SUCCESS){ uError("SML:cols invalidate:%s", elements->cols); return TSDB_CODE_TSC_INVALID_VALUE; } @@ -1115,7 +1179,7 @@ static int32_t smlParseJSONString(SSmlHandle *info, char **start, SSmlLineInfo * cJSON* tagsJson = cJSON_Parse(elements->tags); *(elements->tags + elements->tagsLen) = tmp; if (unlikely(tagsJson == NULL)) { - uError("SML:0x%" PRIx64 " parse json failed:%s", info->id, elements->tags); + uError("SML:0x%" PRIx64 " parse json tag failed:%s", info->id, elements->tags); return TSDB_CODE_TSC_INVALID_JSON; } @@ -1133,10 +1197,29 @@ static int32_t smlParseJSONString(SSmlHandle *info, char **start, SSmlLineInfo * // Parse timestamp // notice!!! put ts back to tag to ensure get meta->precision - int64_t ts = smlParseOpenTsdbTime(info, elements->timestamp, elements->timestampLen); - if (unlikely(ts < 0)) { - uError("OTD:0x%" PRIx64 " Unable to parse timestamp from JSON payload", info->id); + int64_t ts = 0; + if(unlikely(elements->timestampLen == 0)){ + uError("OTD:0x%" PRIx64 " elements->timestampLen == 0", info->id); return TSDB_CODE_INVALID_TIMESTAMP; + }else if(elements->timestamp[0] == '{'){ + char tmp = elements->timestamp[elements->timestampLen]; + elements->cols[elements->timestampLen] = '\0'; + cJSON* tsJson = cJSON_Parse(elements->timestamp); + ts = smlParseTSFromJSON(info, tsJson); + if (unlikely(ts < 0)) { + uError("SML:0x%" PRIx64 " Unable to parse timestamp from JSON payload:%s", info->id, elements->timestamp); + elements->timestamp[elements->timestampLen] = tmp; + cJSON_Delete(tsJson); + return TSDB_CODE_INVALID_TIMESTAMP; + } + elements->timestamp[elements->timestampLen] = tmp; + cJSON_Delete(tsJson); + }else{ + ts = smlParseOpenTsdbTime(info, elements->timestamp, elements->timestampLen); + if (unlikely(ts < 0)) { + uError("OTD:0x%" PRIx64 " Unable to parse timestamp from JSON payload", info->id); + return TSDB_CODE_INVALID_TIMESTAMP; + } } SSmlKv kvTs = { .key = TS, .keyLen = TS_LEN, .type = TSDB_DATA_TYPE_TIMESTAMP, .i = ts, .length = (size_t)tDataTypes[TSDB_DATA_TYPE_TIMESTAMP].bytes}; @@ -1203,8 +1286,8 @@ int32_t smlParseJSON(SSmlHandle *info, char *payload) { continue; } - if(*dataPointStart == '\0') break; cnt++; + if(*dataPointStart == '\0') break; } info->lineNum = cnt; diff --git a/source/client/src/clientSmlLine.c b/source/client/src/clientSmlLine.c index 6853e6d9f31fbb01694e8d085962a9c379c99c4c..890245f53d5f7cbf52613ca7804da3c43e9543f8 100644 --- a/source/client/src/clientSmlLine.c +++ b/source/client/src/clientSmlLine.c @@ -259,7 +259,10 @@ static int32_t smlParseTagKv(SSmlHandle *info, char **sql, char *sqlEnd, if(unlikely(kv.length > maxKV->length)){ maxKV->length = kv.length; SSmlSTableMeta *tableMeta = (SSmlSTableMeta *)nodeListGet(info->superTables, currElement->measure, currElement->measureLen, NULL); - ASSERT(tableMeta != NULL); + if(unlikely(NULL == tableMeta)){ + uError("SML:0x%" PRIx64 " NULL == tableMeta", info->id); + return TSDB_CODE_SML_INTERNAL_ERROR; + } SSmlKv *oldKV = (SSmlKv *)taosArrayGet(tableMeta->tags, cnt); oldKV->length = kv.length; @@ -319,6 +322,7 @@ static int32_t smlParseTagKv(SSmlHandle *info, char **sql, char *sqlEnd, tinfo->tags = taosArrayDup(preLineKV, NULL); smlSetCTableName(tinfo); + tinfo->uid = info->uid++; if(info->dataFormat) { info->currSTableMeta->uid = tinfo->uid; tinfo->tableDataCtx = smlInitTableDataCtx(info->pQuery, info->currSTableMeta); @@ -484,7 +488,10 @@ static int32_t smlParseColKv(SSmlHandle *info, char **sql, char *sqlEnd, if(unlikely(IS_VAR_DATA_TYPE(kv.type) && kv.length > maxKV->length)){ maxKV->length = kv.length; SSmlSTableMeta *tableMeta = (SSmlSTableMeta *)nodeListGet(info->superTables, currElement->measure, currElement->measureLen, NULL); - ASSERT(tableMeta != NULL); + if(unlikely(NULL == tableMeta)){ + uError("SML:0x%" PRIx64 " NULL == tableMeta", info->id); + return TSDB_CODE_SML_INTERNAL_ERROR; + } SSmlKv *oldKV = (SSmlKv *)taosArrayGet(tableMeta->cols, cnt); oldKV->length = kv.length; diff --git a/source/client/src/clientSmlTelnet.c b/source/client/src/clientSmlTelnet.c index 11a58040b349b0b4e530a1da6507dc076f4ff467..d43ab6c9f97ca7d1c4523f03ff2bb9c73556fc4b 100644 --- a/source/client/src/clientSmlTelnet.c +++ b/source/client/src/clientSmlTelnet.c @@ -184,7 +184,10 @@ static int32_t smlParseTelnetTags(SSmlHandle *info, char *data, char *sqlEnd, SS if(unlikely(kv.length > maxKV->length)){ maxKV->length = kv.length; SSmlSTableMeta *tableMeta = (SSmlSTableMeta *)nodeListGet(info->superTables, elements->measure, elements->measureLen, NULL); - ASSERT(tableMeta != NULL); + if(unlikely(NULL == tableMeta)){ + uError("SML:0x%" PRIx64 " NULL == tableMeta", info->id); + return TSDB_CODE_SML_INTERNAL_ERROR; + } SSmlKv *oldKV = (SSmlKv *)taosArrayGet(tableMeta->tags, cnt); oldKV->length = kv.length; @@ -235,11 +238,13 @@ static int32_t smlParseTelnetTags(SSmlHandle *info, char *data, char *sqlEnd, SS tinfo->tags = taosArrayDup(preLineKV, NULL); smlSetCTableName(tinfo); + tinfo->uid = info->uid++; if (info->dataFormat) { info->currSTableMeta->uid = tinfo->uid; tinfo->tableDataCtx = smlInitTableDataCtx(info->pQuery, info->currSTableMeta); if (tinfo->tableDataCtx == NULL) { smlBuildInvalidDataMsg(&info->msgBuf, "smlInitTableDataCtx error", NULL); + smlDestroyTableInfo(info, tinfo); return TSDB_CODE_SML_INVALID_DATA; } } diff --git a/source/client/src/clientStmt.c b/source/client/src/clientStmt.c index 7bc99c65e8662685a32a24779389619f0c2ae422..f5b65371a7c90393b72ee059afb8060f1b373891 100644 --- a/source/client/src/clientStmt.c +++ b/source/client/src/clientStmt.c @@ -296,7 +296,6 @@ int32_t stmtCleanExecInfo(STscStmt* pStmt, bool keepTable, bool deepClean) { STableMeta* pMeta = qGetTableMetaInDataBlock(pBlocks); if (keepTable && pBlocks == pStmt->exec.pCurrBlock) { - ASSERT(NULL == pBlocks->pData); TSWAP(pBlocks->pData, pStmt->exec.pCurrTbData); STMT_ERR_RET(qResetStmtDataBlock(pBlocks, false)); @@ -394,7 +393,10 @@ int32_t stmtGetFromCache(STscStmt* pStmt) { if (NULL == pStmt->sql.pTableCache || taosHashGetSize(pStmt->sql.pTableCache) <= 0) { if (pStmt->bInfo.inExecCache) { - ASSERT(taosHashGetSize(pStmt->exec.pBlockHash) == 1); + if(ASSERT(taosHashGetSize(pStmt->exec.pBlockHash) == 1)){ + tscError("stmtGetFromCache error"); + return TSDB_CODE_TSC_STMT_CACHE_ERROR; + } pStmt->bInfo.needParse = false; tscDebug("reuse stmt block for tb %s in execBlock", pStmt->bInfo.tbFName); return TSDB_CODE_SUCCESS; diff --git a/source/client/src/clientTmq.c b/source/client/src/clientTmq.c index a3754d06952fc24199d6dd3ab576bee889374318..01c99c6e9ed16048a715839dc4ccb18ca04c3852 100644 --- a/source/client/src/clientTmq.c +++ b/source/client/src/clientTmq.c @@ -418,7 +418,10 @@ int32_t tmqCommitDone(SMqCommitCbParamSet* pParamSet) { static void tmqCommitRspCountDown(SMqCommitCbParamSet* pParamSet) { int32_t waitingRspNum = atomic_sub_fetch_32(&pParamSet->waitingRspNum, 1); - ASSERT(waitingRspNum >= 0); + if(ASSERT(waitingRspNum >= 0)){ + tscError("tmqCommitRspCountDown error:%d", waitingRspNum); + return; + } if (waitingRspNum == 0) { tmqCommitDone(pParamSet); } diff --git a/source/common/src/systable.c b/source/common/src/systable.c index a51a711e960b312868e862b26c022408869e3325..02f1b7466f4471ba2f283f879aa36ca7cb31ad6e 100644 --- a/source/common/src/systable.c +++ b/source/common/src/systable.c @@ -67,6 +67,8 @@ static const SSysDbTableSchema clusterSchema[] = { {.name = "name", .bytes = TSDB_CLUSTER_ID_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = true}, {.name = "uptime", .bytes = 4, .type = TSDB_DATA_TYPE_INT, .sysInfo = true}, {.name = "create_time", .bytes = 8, .type = TSDB_DATA_TYPE_TIMESTAMP, .sysInfo = true}, + {.name = "version", .bytes = 10 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = true}, + {.name = "expire_time", .bytes = 8, .type = TSDB_DATA_TYPE_TIMESTAMP, .sysInfo = true}, }; static const SSysDbTableSchema userDBSchema[] = { @@ -176,6 +178,18 @@ static const SSysDbTableSchema userTagsSchema[] = { {.name = "tag_value", .bytes = TSDB_MAX_TAGS_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false}, }; +static const SSysDbTableSchema userColsSchema[] = { + {.name = "table_name", .bytes = SYSTABLE_SCH_TABLE_NAME_LEN, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false}, + {.name = "db_name", .bytes = SYSTABLE_SCH_DB_NAME_LEN, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false}, + {.name = "table_type", .bytes = 21 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false}, + {.name = "col_name", .bytes = TSDB_COL_NAME_LEN - 1 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false}, + {.name = "col_type", .bytes = 32 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false}, + {.name = "col_length", .bytes = 4, .type = TSDB_DATA_TYPE_INT, .sysInfo = false}, + {.name = "col_precision", .bytes = 4, .type = TSDB_DATA_TYPE_INT, .sysInfo = false}, + {.name = "col_scale", .bytes = 4, .type = TSDB_DATA_TYPE_INT, .sysInfo = false}, + {.name = "col_nullable", .bytes = 4, .type = TSDB_DATA_TYPE_INT, .sysInfo = false} +}; + static const SSysDbTableSchema userTblDistSchema[] = { {.name = "db_name", .bytes = 32 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = true}, {.name = "table_name", .bytes = SYSTABLE_SCH_DB_NAME_LEN, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = true}, @@ -293,6 +307,7 @@ static const SSysTableMeta infosMeta[] = { {TSDB_INS_TABLE_STABLES, userStbsSchema, tListLen(userStbsSchema), false}, {TSDB_INS_TABLE_TABLES, userTblsSchema, tListLen(userTblsSchema), false}, {TSDB_INS_TABLE_TAGS, userTagsSchema, tListLen(userTagsSchema), false}, + {TSDB_INS_TABLE_COLS, userColsSchema, tListLen(userColsSchema), false}, // {TSDB_INS_TABLE_TABLE_DISTRIBUTED, userTblDistSchema, tListLen(userTblDistSchema)}, {TSDB_INS_TABLE_USERS, userUsersSchema, tListLen(userUsersSchema), false}, {TSDB_INS_TABLE_LICENCES, grantsSchema, tListLen(grantsSchema), true}, diff --git a/source/common/src/tdatablock.c b/source/common/src/tdatablock.c index 3dfa32ccc54f324ccb6d88499046592ef97411ac..9b5b32cf695ff6d38ff09fe9eb778dbb2679a43c 100644 --- a/source/common/src/tdatablock.c +++ b/source/common/src/tdatablock.c @@ -22,7 +22,6 @@ #define MALLOC_ALIGN_BYTES 32 int32_t colDataGetLength(const SColumnInfoData* pColumnInfoData, int32_t numOfRows) { - ASSERT(pColumnInfoData != NULL); if (IS_VAR_DATA_TYPE(pColumnInfoData->info.type)) { return pColumnInfoData->varmeta.length; } else { @@ -66,14 +65,12 @@ int32_t getJsonValueLen(const char* data) { } int32_t colDataAppend(SColumnInfoData* pColumnInfoData, uint32_t currentRow, const char* pData, bool isNull) { - ASSERT(pColumnInfoData != NULL); - if (isNull) { // There is a placehold for each NULL value of binary or nchar type. if (IS_VAR_DATA_TYPE(pColumnInfoData->info.type)) { pColumnInfoData->varmeta.offset[currentRow] = -1; // it is a null value of VAR type. } else { - colDataSetNull_f(pColumnInfoData->nullbitmap, currentRow); + colDataSetNull_f_s(pColumnInfoData, currentRow); } pColumnInfoData->hasNull = true; @@ -112,7 +109,7 @@ int32_t colDataAppend(SColumnInfoData* pColumnInfoData, uint32_t currentRow, con uint32_t len = pColumnInfoData->varmeta.length; pColumnInfoData->varmeta.offset[currentRow] = len; - memcpy(pColumnInfoData->pData + len, pData, dataLen); + memmove(pColumnInfoData->pData + len, pData, dataLen); pColumnInfoData->varmeta.length += dataLen; } else { memcpy(pColumnInfoData->pData + pColumnInfoData->info.bytes * currentRow, pData, pColumnInfoData->info.bytes); @@ -178,8 +175,6 @@ static void doCopyNItems(struct SColumnInfoData* pColumnInfoData, int32_t curren int32_t colDataAppendNItems(SColumnInfoData* pColumnInfoData, uint32_t currentRow, const char* pData, uint32_t numOfRows) { - ASSERT(pData != NULL && pColumnInfoData != NULL); - int32_t len = pColumnInfoData->info.bytes; if (IS_VAR_DATA_TYPE(pColumnInfoData->info.type)) { len = varDataTLen(pData); @@ -237,7 +232,10 @@ static void doBitmapMerge(SColumnInfoData* pColumnInfoData, int32_t numOfRow1, c int32_t colDataMergeCol(SColumnInfoData* pColumnInfoData, int32_t numOfRow1, int32_t* capacity, const SColumnInfoData* pSource, int32_t numOfRow2) { - ASSERT(pColumnInfoData != NULL && pSource != NULL && pColumnInfoData->info.type == pSource->info.type); + if (pColumnInfoData->info.type != pSource->info.type) { + return TSDB_CODE_FAILED; + } + if (numOfRow2 == 0) { return numOfRow1; } @@ -317,13 +315,12 @@ int32_t colDataMergeCol(SColumnInfoData* pColumnInfoData, int32_t numOfRow1, int int32_t colDataAssign(SColumnInfoData* pColumnInfoData, const SColumnInfoData* pSource, int32_t numOfRows, const SDataBlockInfo* pBlockInfo) { - ASSERT(pColumnInfoData != NULL && pSource != NULL && pColumnInfoData->info.type == pSource->info.type); - if (numOfRows <= 0) { - return numOfRows; + if (pColumnInfoData->info.type != pSource->info.type || (pBlockInfo != NULL && pBlockInfo->capacity < numOfRows)) { + return TSDB_CODE_FAILED; } - if (pBlockInfo != NULL) { - ASSERT(pBlockInfo->capacity >= numOfRows); + if (numOfRows <= 0) { + return numOfRows; } if (IS_VAR_DATA_TYPE(pColumnInfoData->info.type)) { @@ -359,14 +356,14 @@ size_t blockDataGetNumOfCols(const SSDataBlock* pBlock) { return taosArrayGetSiz size_t blockDataGetNumOfRows(const SSDataBlock* pBlock) { return pBlock->info.rows; } int32_t blockDataUpdateTsWindow(SSDataBlock* pDataBlock, int32_t tsColumnIndex) { - if (pDataBlock->info.rows > 0) { - // ASSERT(pDataBlock->info.dataLoad == 1); - } - if (pDataBlock == NULL || pDataBlock->info.rows <= 0 || pDataBlock->info.dataLoad == 0) { return 0; } + if (pDataBlock->info.rows > 0) { + // ASSERT(pDataBlock->info.dataLoad == 1); + } + size_t numOfCols = taosArrayGetSize(pDataBlock->pDataBlock); if (numOfCols <= 0) { return -1; @@ -389,7 +386,6 @@ int32_t blockDataUpdateTsWindow(SSDataBlock* pDataBlock, int32_t tsColumnIndex) } int32_t blockDataMerge(SSDataBlock* pDest, const SSDataBlock* pSrc) { - assert(pSrc != NULL && pDest != NULL); int32_t capacity = pDest->info.capacity; size_t numOfCols = taosArrayGetSize(pDest->pDataBlock); @@ -407,8 +403,6 @@ int32_t blockDataMerge(SSDataBlock* pDest, const SSDataBlock* pSrc) { } size_t blockDataGetSize(const SSDataBlock* pBlock) { - assert(pBlock != NULL); - size_t total = 0; size_t numOfCols = taosArrayGetSize(pBlock->pDataBlock); for (int32_t i = 0; i < numOfCols; ++i) { @@ -423,8 +417,6 @@ size_t blockDataGetSize(const SSDataBlock* pBlock) { // Actual data rows pluses the corresponding meta data must fit in one memory buffer of the given page size. int32_t blockDataSplitRows(SSDataBlock* pBlock, bool hasVarCol, int32_t startIndex, int32_t* stopIndex, int32_t pageSize) { - ASSERT(pBlock != NULL && stopIndex != NULL); - size_t numOfCols = taosArrayGetSize(pBlock->pDataBlock); int32_t numOfRows = pBlock->info.rows; @@ -438,7 +430,9 @@ int32_t blockDataSplitRows(SSDataBlock* pBlock, bool hasVarCol, int32_t startInd if (!hasVarCol) { size_t rowSize = blockDataGetRowSize(pBlock); int32_t capacity = payloadSize / (rowSize + numOfCols * bitmapChar / 8.0); - ASSERT(capacity > 0); + if (capacity <= 0) { + return TSDB_CODE_FAILED; + } *stopIndex = startIndex + capacity - 1; if (*stopIndex >= numOfRows) { @@ -470,7 +464,9 @@ int32_t blockDataSplitRows(SSDataBlock* pBlock, bool hasVarCol, int32_t startInd if (size > pageSize) { // pageSize must be able to hold one row *stopIndex = j - 1; - ASSERT(*stopIndex >= startIndex); + if (*stopIndex < startIndex) { + return TSDB_CODE_FAILED; + } return TSDB_CODE_SUCCESS; } @@ -541,8 +537,6 @@ SSDataBlock* blockDataExtractBlock(SSDataBlock* pBlock, int32_t startIndex, int3 * @return */ int32_t blockDataToBuf(char* buf, const SSDataBlock* pBlock) { - ASSERT(pBlock != NULL); - // write the number of rows *(uint32_t*)buf = pBlock->info.rows; @@ -613,7 +607,9 @@ int32_t blockDataFromBuf(SSDataBlock* pBlock, const char* buf) { } pCol->varmeta.length = colLength; - ASSERT(pCol->varmeta.length <= pCol->varmeta.allocLen); + if (pCol->varmeta.length > pCol->varmeta.allocLen) { + return TSDB_CODE_FAILED; + } } memcpy(pCol->pData, pStart, colLength); @@ -660,7 +656,9 @@ int32_t blockDataFromBuf1(SSDataBlock* pBlock, const char* buf, size_t capacity) } pCol->varmeta.length = colLength; - ASSERT(pCol->varmeta.length <= pCol->varmeta.allocLen); + if (pCol->varmeta.length > pCol->varmeta.allocLen) { + return TSDB_CODE_FAILED; + } } if (!colDataIsNNull_s(pCol, 0, pBlock->info.rows)) { @@ -674,7 +672,6 @@ int32_t blockDataFromBuf1(SSDataBlock* pBlock, const char* buf, size_t capacity) } size_t blockDataGetRowSize(SSDataBlock* pBlock) { - ASSERT(pBlock != NULL); if (pBlock->info.rowSize == 0) { size_t rowSize = 0; @@ -703,7 +700,6 @@ size_t blockDataGetSerialMetaSize(uint32_t numOfCols) { } double blockDataGetSerialRowSize(const SSDataBlock* pBlock) { - ASSERT(pBlock != NULL); double rowSize = 0; size_t numOfCols = taosArrayGetSize(pBlock->pDataBlock); @@ -829,7 +825,7 @@ static int32_t blockDataAssign(SColumnInfoData* pCols, const SSDataBlock* pDataB } else { for (int32_t j = 0; j < pDataBlock->info.rows; ++j) { if (colDataIsNull_f(pSrc->nullbitmap, index[j])) { - colDataSetNull_f(pDst->nullbitmap, j); + colDataSetNull_f_s(pDst, j); continue; } memcpy(pDst->pData + j * pDst->info.bytes, pSrc->pData + index[j] * pDst->info.bytes, pDst->info.bytes); @@ -906,7 +902,6 @@ static int32_t* createTupleIndex(size_t rows) { static void destroyTupleIndex(int32_t* index) { taosMemoryFreeClear(index); } int32_t blockDataSort(SSDataBlock* pDataBlock, SArray* pOrderInfo) { - ASSERT(pDataBlock != NULL && pOrderInfo != NULL); if (pDataBlock->info.rows <= 1) { return TSDB_CODE_SUCCESS; } @@ -1150,8 +1145,7 @@ void blockDataCleanup(SSDataBlock* pDataBlock) { void blockDataEmpty(SSDataBlock* pDataBlock) { SDataBlockInfo* pInfo = &pDataBlock->info; - ASSERT(pInfo->rows <= pDataBlock->info.capacity); - if (pInfo->capacity == 0) { + if (pInfo->capacity == 0 || pInfo->rows > pDataBlock->info.capacity) { return; } @@ -1167,19 +1161,17 @@ void blockDataEmpty(SSDataBlock* pDataBlock) { pInfo->window.skey = 0; } -// todo temporarily disable it - +/* + * NOTE: the type of the input column may be TSDB_DATA_TYPE_NULL, which is used to denote + * the all NULL value in this column. It is an internal representation of all NULL value column, and no visible to + * any users. The length of TSDB_DATA_TYPE_NULL is 0, and it is an special case. + */ static int32_t doEnsureCapacity(SColumnInfoData* pColumn, const SDataBlockInfo* pBlockInfo, uint32_t numOfRows, bool clearPayload) { - ASSERT(numOfRows > 0); - - if (numOfRows <= pBlockInfo->capacity) { + if (numOfRows <= 0 || numOfRows <= pBlockInfo->capacity) { return TSDB_CODE_SUCCESS; } - // todo temp disable it - // ASSERT(pColumn->info.bytes != 0); - int32_t existedRows = pBlockInfo->rows; if (IS_VAR_DATA_TYPE(pColumn->info.type)) { @@ -1200,9 +1192,12 @@ static int32_t doEnsureCapacity(SColumnInfoData* pColumn, const SDataBlockInfo* int32_t oldLen = BitmapLen(existedRows); pColumn->nullbitmap = tmp; memset(&pColumn->nullbitmap[oldLen], 0, BitmapLen(numOfRows) - oldLen); - ASSERT(pColumn->info.bytes); + if (pColumn->info.bytes == 0) { + return TSDB_CODE_FAILED; + } - // make sure the allocated memory is MALLOC_ALIGN_BYTES aligned + // here we employ the aligned malloc function, to make sure that the address of allocated memory is aligned + // to MALLOC_ALIGN_BYTES tmp = taosMemoryMallocAlign(MALLOC_ALIGN_BYTES, numOfRows * pColumn->info.bytes); if (tmp == NULL) { return TSDB_CODE_OUT_OF_MEMORY; @@ -1216,9 +1211,11 @@ static int32_t doEnsureCapacity(SColumnInfoData* pColumn, const SDataBlockInfo* pColumn->pData = tmp; - // todo remove it soon + // check if the allocated memory is aligned to the requried bytes. #if defined LINUX - ASSERT((((uint64_t)pColumn->pData) & (MALLOC_ALIGN_BYTES - 1)) == 0x0); + if ((((uint64_t)pColumn->pData) & (MALLOC_ALIGN_BYTES - 1)) != 0x0) { + return TSDB_CODE_FAILED; + } #endif if (clearPayload) { @@ -1255,25 +1252,6 @@ int32_t blockDataEnsureCapacity(SSDataBlock* pDataBlock, uint32_t numOfRows) { return TSDB_CODE_SUCCESS; } - size_t numOfCols = taosArrayGetSize(pDataBlock->pDataBlock); - for (int32_t i = 0; i < numOfCols; ++i) { - SColumnInfoData* p = taosArrayGet(pDataBlock->pDataBlock, i); - code = doEnsureCapacity(p, &pDataBlock->info, numOfRows, true); - if (code) { - return code; - } - } - - pDataBlock->info.capacity = numOfRows; - return TSDB_CODE_SUCCESS; -} - -int32_t blockDataEnsureCapacityNoClear(SSDataBlock* pDataBlock, uint32_t numOfRows) { - int32_t code = 0; - if (numOfRows == 0 || numOfRows <= pDataBlock->info.capacity) { - return TSDB_CODE_SUCCESS; - } - size_t numOfCols = taosArrayGetSize(pDataBlock->pDataBlock); for (int32_t i = 0; i < numOfCols; ++i) { SColumnInfoData* p = taosArrayGet(pDataBlock->pDataBlock, i); @@ -1297,7 +1275,6 @@ void blockDataFreeRes(SSDataBlock* pBlock) { taosArrayDestroy(pBlock->pDataBlock); pBlock->pDataBlock = NULL; taosMemoryFreeClear(pBlock->pBlockAgg); - taosMemoryFree(pBlock->info.pTag); memset(&pBlock->info, 0, sizeof(SDataBlockInfo)); } @@ -1312,8 +1289,6 @@ void* blockDataDestroy(SSDataBlock* pBlock) { } int32_t assignOneDataBlock(SSDataBlock* dst, const SSDataBlock* src) { - ASSERT(src != NULL); - dst->info = src->info; dst->info.rows = 0; dst->info.capacity = 0; @@ -1348,8 +1323,6 @@ int32_t assignOneDataBlock(SSDataBlock* dst, const SSDataBlock* src) { } int32_t copyDataBlock(SSDataBlock* dst, const SSDataBlock* src) { - ASSERT(src != NULL && dst != NULL); - blockDataCleanup(dst); int32_t code = blockDataEnsureCapacity(dst, src->info.rows); if (code != TSDB_CODE_SUCCESS) { @@ -1505,7 +1478,6 @@ SSDataBlock* createDataBlock() { } int32_t blockDataAppendColInfo(SSDataBlock* pBlock, SColumnInfoData* pColInfoData) { - ASSERT(pBlock != NULL && pColInfoData != NULL); if (pBlock->pDataBlock == NULL) { pBlock->pDataBlock = taosArrayInit(4, sizeof(SColumnInfoData)); if (pBlock->pDataBlock == NULL) { @@ -1540,7 +1512,6 @@ SColumnInfoData createColumnInfoData(int16_t type, int32_t bytes, int16_t colId) } SColumnInfoData* bdGetColumnInfoData(const SSDataBlock* pBlock, int32_t index) { - ASSERT(pBlock != NULL); if (index >= taosArrayGetSize(pBlock->pDataBlock)) { return NULL; } @@ -1971,10 +1942,10 @@ char* dumpBlockData(SSDataBlock* pDataBlock, const char* flag, char** pDataBuf) int32_t len = 0; len += snprintf(dumpBuf + len, size - len, "===stream===%s|block type %d|child id %d|group id:%" PRIu64 "|uid:%" PRId64 - "|rows:%d|version:%" PRIu64 "|cal start:%" PRIu64 "|cal end:%" PRIu64 "\n", + "|rows:%d|version:%" PRIu64 "|cal start:%" PRIu64 "|cal end:%" PRIu64 "|tbl:%s\n", flag, (int32_t)pDataBlock->info.type, pDataBlock->info.childId, pDataBlock->info.id.groupId, pDataBlock->info.id.uid, pDataBlock->info.rows, pDataBlock->info.version, - pDataBlock->info.calWin.skey, pDataBlock->info.calWin.ekey); + pDataBlock->info.calWin.skey, pDataBlock->info.calWin.ekey, pDataBlock->info.parTbName); if (len >= size - 1) return dumpBuf; for (int32_t j = 0; j < rows; j++) { @@ -2164,7 +2135,6 @@ int32_t buildSubmitReqFromDataBlock(SSubmitReq** pReq, const SSDataBlock* pDataB case TSDB_DATA_TYPE_JSON: case TSDB_DATA_TYPE_MEDIUMBLOB: uError("the column type %" PRIi16 " is defined but not implemented yet", pColInfoData->info.type); - ASSERT(0); break; default: if (pColInfoData->info.type < TSDB_DATA_TYPE_MAX && pColInfoData->info.type > TSDB_DATA_TYPE_NULL) { @@ -2198,7 +2168,6 @@ int32_t buildSubmitReqFromDataBlock(SSubmitReq** pReq, const SSDataBlock* pDataB } } else { uError("the column type %" PRIi16 " is undefined\n", pColInfoData->info.type); - ASSERT(0); } break; } @@ -2405,7 +2374,10 @@ _end: } char* buildCtbNameByGroupId(const char* stbFullName, uint64_t groupId) { - ASSERT(stbFullName[0] != 0); + if (stbFullName[0] == 0) { + return NULL; + } + SArray* tags = taosArrayInit(0, sizeof(SSmlKv)); if (tags == NULL) { return NULL; @@ -2435,7 +2407,9 @@ char* buildCtbNameByGroupId(const char* stbFullName, uint64_t groupId) { taosArrayDestroy(tags); - ASSERT(rname.ctbShortName && rname.ctbShortName[0]); + if ((rname.ctbShortName && rname.ctbShortName[0]) == 0) { + return NULL; + } return rname.ctbShortName; } diff --git a/source/common/src/tdataformat.c b/source/common/src/tdataformat.c index be894df89aa4b72ec9ba93cb7c142a8c7fd25524..7c0de3d6f0ca67754f3c62fb5102a4d3837feedc 100644 --- a/source/common/src/tdataformat.c +++ b/source/common/src/tdataformat.c @@ -907,9 +907,7 @@ static int32_t tRowTupleUpsertColData(SRow *pRow, STSchema *pTSchema, SColData * int32_t iTColumn = 1; STColumn *pTColumn = &pTSchema->columns[iTColumn]; - uint8_t *pb = NULL; - uint8_t *pf; - uint8_t *pv; + uint8_t *pb = NULL, *pf = NULL, *pv = NULL; switch (pRow->flag) { case HAS_VALUE: @@ -1544,10 +1542,6 @@ STSchema *tBuildTSchema(SSchema *aSchema, int32_t numOfCols, int32_t version) { return pTSchema; } -void tDestroyTSchema(STSchema *pTSchema) { - if (pTSchema) taosMemoryFree(pTSchema); -} - // SColData ======================================== void tColDataDestroy(void *ph) { SColData *pColData = (SColData *)ph; diff --git a/source/common/src/tglobal.c b/source/common/src/tglobal.c index deefa65595ef50c9b136e70265acf02e96fd8b1e..a9ff9a1e8b054adafde820653ede70868e9a7c46 100644 --- a/source/common/src/tglobal.c +++ b/source/common/src/tglobal.c @@ -60,6 +60,9 @@ int32_t tsElectInterval = 25 * 1000; int32_t tsHeartbeatInterval = 1000; int32_t tsHeartbeatTimeout = 20 * 1000; +// vnode +int64_t tsVndCommitMaxIntervalMs = 60 * 1000; + // monitor bool tsEnableMonitor = true; int32_t tsMonitorInterval = 30; @@ -73,14 +76,19 @@ bool tsEnableTelem = true; int32_t tsTelemInterval = 43200; char tsTelemServer[TSDB_FQDN_LEN] = "telemetry.taosdata.com"; uint16_t tsTelemPort = 80; +char* tsTelemUri = "/report"; + +bool tsEnableCrashReport = true; +char* tsClientCrashReportUri = "/ccrashreport"; +char* tsSvrCrashReportUri = "/dcrashreport"; // schemaless char tsSmlTagName[TSDB_COL_NAME_LEN] = "_tag_null"; char tsSmlChildTableName[TSDB_TABLE_NAME_LEN] = ""; // user defined child table name can be specified in tag value. // If set to empty system will generate table name using MD5 hash. // true means that the name and order of cols in each line are the same(only for influx protocol) -bool tsSmlDataFormat = false; -int32_t tsSmlBatchSize = 10000; +//bool tsSmlDataFormat = false; +//int32_t tsSmlBatchSize = 10000; // query int32_t tsQueryPolicy = 1; @@ -202,7 +210,9 @@ int32_t taosSetTfsCfg(SConfig *pCfg) { int32_t taosSetTfsCfg(SConfig *pCfg); #endif -struct SConfig *taosGetCfg() { return tsCfg; } +struct SConfig *taosGetCfg() { + return tsCfg; +} static int32_t taosLoadCfg(SConfig *pCfg, const char **envCmd, const char *inputCfgDir, const char *envFile, char *apolloUrl) { @@ -309,11 +319,12 @@ static int32_t taosAddClientCfg(SConfig *pCfg) { if (cfgAddBool(pCfg, "keepColumnName", tsKeepColumnName, true) != 0) return -1; if (cfgAddString(pCfg, "smlChildTableName", "", 1) != 0) return -1; if (cfgAddString(pCfg, "smlTagName", tsSmlTagName, 1) != 0) return -1; - if (cfgAddBool(pCfg, "smlDataFormat", tsSmlDataFormat, 1) != 0) return -1; - if (cfgAddInt32(pCfg, "smlBatchSize", tsSmlBatchSize, 1, INT32_MAX, true) != 0) return -1; +// if (cfgAddBool(pCfg, "smlDataFormat", tsSmlDataFormat, 1) != 0) return -1; +// if (cfgAddInt32(pCfg, "smlBatchSize", tsSmlBatchSize, 1, INT32_MAX, true) != 0) return -1; if (cfgAddInt32(pCfg, "maxMemUsedByInsert", tsMaxMemUsedByInsert, 1, INT32_MAX, true) != 0) return -1; if (cfgAddInt32(pCfg, "maxRetryWaitTime", tsMaxRetryWaitTime, 0, 86400000, 0) != 0) return -1; if (cfgAddBool(pCfg, "useAdapter", tsUseAdapter, true) != 0) return -1; + if (cfgAddBool(pCfg, "crashReporting", tsEnableCrashReport, true) != 0) return -1; tsNumOfTaskQueueThreads = tsNumOfCores / 2; tsNumOfTaskQueueThreads = TMAX(tsNumOfTaskQueueThreads, 4); @@ -377,7 +388,7 @@ static int32_t taosAddServerCfg(SConfig *pCfg) { if (cfgAddInt32(pCfg, "queryRspPolicy", tsQueryRspPolicy, 0, 1, 0) != 0) return -1; tsNumOfRpcThreads = tsNumOfCores / 2; - tsNumOfRpcThreads = TRANGE(tsNumOfRpcThreads, 1, 4); + tsNumOfRpcThreads = TRANGE(tsNumOfRpcThreads, 1, TSDB_MAX_RPC_THREADS); if (cfgAddInt32(pCfg, "numOfRpcThreads", tsNumOfRpcThreads, 1, 1024, 0) != 0) return -1; tsNumOfCommitThreads = tsNumOfCores / 2; @@ -427,6 +438,8 @@ static int32_t taosAddServerCfg(SConfig *pCfg) { if (cfgAddInt32(pCfg, "syncHeartbeatInterval", tsHeartbeatInterval, 10, 1000 * 60 * 24 * 2, 0) != 0) return -1; if (cfgAddInt32(pCfg, "syncHeartbeatTimeout", tsHeartbeatTimeout, 10, 1000 * 60 * 24 * 2, 0) != 0) return -1; + if (cfgAddInt64(pCfg, "vndCommitMaxInterval", tsVndCommitMaxIntervalMs, 1000, 1000 * 60 * 60, 0) != 0) return -1; + if (cfgAddBool(pCfg, "monitor", tsEnableMonitor, 0) != 0) return -1; if (cfgAddInt32(pCfg, "monitorInterval", tsMonitorInterval, 1, 200000, 0) != 0) return -1; if (cfgAddString(pCfg, "monitorFqdn", tsMonitorFqdn, 0) != 0) return -1; @@ -434,6 +447,7 @@ static int32_t taosAddServerCfg(SConfig *pCfg) { if (cfgAddInt32(pCfg, "monitorMaxLogs", tsMonitorMaxLogs, 1, 1000000, 0) != 0) return -1; if (cfgAddBool(pCfg, "monitorComp", tsMonitorComp, 0) != 0) return -1; + if (cfgAddBool(pCfg, "crashReporting", tsEnableCrashReport, 0) != 0) return -1; if (cfgAddBool(pCfg, "telemetryReporting", tsEnableTelem, 0) != 0) return -1; if (cfgAddInt32(pCfg, "telemetryInterval", tsTelemInterval, 1, 200000, 0) != 0) return -1; if (cfgAddString(pCfg, "telemetryServer", tsTelemServer, 0) != 0) return -1; @@ -648,9 +662,9 @@ static int32_t taosSetClientCfg(SConfig *pCfg) { tstrncpy(tsSmlChildTableName, cfgGetItem(pCfg, "smlChildTableName")->str, TSDB_TABLE_NAME_LEN); tstrncpy(tsSmlTagName, cfgGetItem(pCfg, "smlTagName")->str, TSDB_COL_NAME_LEN); - tsSmlDataFormat = cfgGetItem(pCfg, "smlDataFormat")->bval; +// tsSmlDataFormat = cfgGetItem(pCfg, "smlDataFormat")->bval; - tsSmlBatchSize = cfgGetItem(pCfg, "smlBatchSize")->i32; +// tsSmlBatchSize = cfgGetItem(pCfg, "smlBatchSize")->i32; tsMaxMemUsedByInsert = cfgGetItem(pCfg, "maxMemUsedByInsert")->i32; tsShellActivityTimer = cfgGetItem(pCfg, "shellActivityTimer")->i32; @@ -665,6 +679,7 @@ static int32_t taosSetClientCfg(SConfig *pCfg) { tsQueryUseNodeAllocator = cfgGetItem(pCfg, "queryUseNodeAllocator")->bval; tsKeepColumnName = cfgGetItem(pCfg, "keepColumnName")->bval; tsUseAdapter = cfgGetItem(pCfg, "useAdapter")->bval; + tsEnableCrashReport = cfgGetItem(pCfg, "crashReporting")->bval; tsMaxRetryWaitTime = cfgGetItem(pCfg, "maxRetryWaitTime")->i32; return 0; @@ -715,7 +730,7 @@ static int32_t taosSetServerCfg(SConfig *pCfg) { tsNumOfSnodeWriteThreads = cfgGetItem(pCfg, "numOfSnodeUniqueThreads")->i32; tsRpcQueueMemoryAllowed = cfgGetItem(pCfg, "rpcQueueMemoryAllowed")->i64; - tsSIMDBuiltins = (bool) cfgGetItem(pCfg, "SIMD-builtins")->bval; + tsSIMDBuiltins = (bool)cfgGetItem(pCfg, "SIMD-builtins")->bval; tsEnableMonitor = cfgGetItem(pCfg, "monitor")->bval; tsMonitorInterval = cfgGetItem(pCfg, "monitorInterval")->i32; @@ -726,6 +741,7 @@ static int32_t taosSetServerCfg(SConfig *pCfg) { tsQueryRspPolicy = cfgGetItem(pCfg, "queryRspPolicy")->i32; tsEnableTelem = cfgGetItem(pCfg, "telemetryReporting")->bval; + tsEnableCrashReport = cfgGetItem(pCfg, "crashReporting")->bval; tsTelemInterval = cfgGetItem(pCfg, "telemetryInterval")->i32; tstrncpy(tsTelemServer, cfgGetItem(pCfg, "telemetryServer")->str, TSDB_FQDN_LEN); tsTelemPort = (uint16_t)cfgGetItem(pCfg, "telemetryPort")->i32; @@ -743,6 +759,8 @@ static int32_t taosSetServerCfg(SConfig *pCfg) { tsHeartbeatInterval = cfgGetItem(pCfg, "syncHeartbeatInterval")->i32; tsHeartbeatTimeout = cfgGetItem(pCfg, "syncHeartbeatTimeout")->i32; + tsVndCommitMaxIntervalMs = cfgGetItem(pCfg, "vndCommitMaxInterval")->i64; + tsStartUdfd = cfgGetItem(pCfg, "udf")->bval; tstrncpy(tsUdfdResFuncs, cfgGetItem(pCfg, "udfdResFuncs")->str, sizeof(tsUdfdResFuncs)); tstrncpy(tsUdfdLdLibPath, cfgGetItem(pCfg, "udfdLdLibPath")->str, sizeof(tsUdfdLdLibPath)); @@ -795,6 +813,8 @@ int32_t taosSetCfg(SConfig *pCfg, char *name) { tsCountAlwaysReturnValue = cfgGetItem(pCfg, "countAlwaysReturnValue")->i32; } else if (strcasecmp("cDebugFlag", name) == 0) { cDebugFlag = cfgGetItem(pCfg, "cDebugFlag")->i32; + } else if (strcasecmp("crashReporting", name) == 0) { + tsEnableCrashReport = cfgGetItem(pCfg, "crashReporting")->bval; } break; } @@ -1028,10 +1048,10 @@ int32_t taosSetCfg(SConfig *pCfg, char *name) { tstrncpy(tsSmlChildTableName, cfgGetItem(pCfg, "smlChildTableName")->str, TSDB_TABLE_NAME_LEN); } else if (strcasecmp("smlTagName", name) == 0) { tstrncpy(tsSmlTagName, cfgGetItem(pCfg, "smlTagName")->str, TSDB_COL_NAME_LEN); - } else if (strcasecmp("smlDataFormat", name) == 0) { - tsSmlDataFormat = cfgGetItem(pCfg, "smlDataFormat")->bval; - } else if (strcasecmp("smlBatchSize", name) == 0) { - tsSmlBatchSize = cfgGetItem(pCfg, "smlBatchSize")->i32; +// } else if (strcasecmp("smlDataFormat", name) == 0) { +// tsSmlDataFormat = cfgGetItem(pCfg, "smlDataFormat")->bval; +// } else if (strcasecmp("smlBatchSize", name) == 0) { +// tsSmlBatchSize = cfgGetItem(pCfg, "smlBatchSize")->i32; } else if (strcasecmp("shellActivityTimer", name) == 0) { tsShellActivityTimer = cfgGetItem(pCfg, "shellActivityTimer")->i32; } else if (strcasecmp("supportVnodes", name) == 0) { diff --git a/source/common/src/tmsg.c b/source/common/src/tmsg.c index 5e99c8ec11d4469e36802c0bfc90356bbb442d8d..d64e5c66e0c9376f8769e9a0d94bc55ceba86036 100644 --- a/source/common/src/tmsg.c +++ b/source/common/src/tmsg.c @@ -2821,8 +2821,8 @@ int32_t tSerializeSDbCfgRsp(void *buf, int32_t bufLen, const SDbCfgRsp *pRsp) { if (tEncodeI8(&encoder, pRetension->keepUnit) < 0) return -1; } if (tEncodeI8(&encoder, pRsp->schemaless) < 0) return -1; + if (tEncodeI16(&encoder, pRsp->sstTrigger) < 0) return -1; tEndEncode(&encoder); - int32_t tlen = encoder.pos; tEncoderClear(&encoder); return tlen; @@ -2873,6 +2873,7 @@ int32_t tDeserializeSDbCfgRsp(void *buf, int32_t bufLen, SDbCfgRsp *pRsp) { } } if (tDecodeI8(&decoder, &pRsp->schemaless) < 0) return -1; + if (tDecodeI16(&decoder, &pRsp->sstTrigger) < 0) return -1; tEndDecode(&decoder); tDecoderClear(&decoder); @@ -3001,7 +3002,7 @@ int32_t tSerializeSTableIndexRsp(void *buf, int32_t bufLen, const STableIndexRsp void tFreeSerializeSTableIndexRsp(STableIndexRsp *pRsp) { if (pRsp->pIndex != NULL) { - taosArrayDestroy(pRsp->pIndex); + tFreeSTableIndexRsp(pRsp); pRsp->pIndex = NULL; } } @@ -3191,6 +3192,7 @@ int32_t tSerializeSRetrieveTableReq(void *buf, int32_t bufLen, SRetrieveTableReq if (tEncodeI64(&encoder, pReq->showId) < 0) return -1; if (tEncodeCStr(&encoder, pReq->db) < 0) return -1; if (tEncodeCStr(&encoder, pReq->tb) < 0) return -1; + if (tEncodeCStr(&encoder, pReq->filterTb) < 0) return -1; if (tEncodeCStr(&encoder, pReq->user) < 0) return -1; tEndEncode(&encoder); @@ -3207,6 +3209,7 @@ int32_t tDeserializeSRetrieveTableReq(void *buf, int32_t bufLen, SRetrieveTableR if (tDecodeI64(&decoder, &pReq->showId) < 0) return -1; if (tDecodeCStrTo(&decoder, pReq->db) < 0) return -1; if (tDecodeCStrTo(&decoder, pReq->tb) < 0) return -1; + if (tDecodeCStrTo(&decoder, pReq->filterTb) < 0) return -1; if (tDecodeCStrTo(&decoder, pReq->user) < 0) return -1; tEndDecode(&decoder); @@ -5421,6 +5424,14 @@ int32_t tSerializeSCMCreateStreamReq(void *buf, int32_t bufLen, const SCMCreateS if (tEncodeCStr(&encoder, pField->name) < 0) return -1; } if (tEncodeI8(&encoder, pReq->createStb) < 0) return -1; + if (tEncodeU64(&encoder, pReq->targetStbUid) < 0) return -1; + if (tEncodeI32(&encoder, taosArrayGetSize(pReq->fillNullCols)) < 0) return -1; + for (int32_t i = 0; i < taosArrayGetSize(pReq->fillNullCols); ++i) { + SColLocation *pCol = taosArrayGet(pReq->fillNullCols, i); + if (tEncodeI16(&encoder, pCol->slotId) < 0) return -1; + if (tEncodeI16(&encoder, pCol->colId) < 0) return -1; + if (tEncodeI8(&encoder, pCol->type) < 0) return -1; + } tEndEncode(&encoder); @@ -5482,6 +5493,27 @@ int32_t tDeserializeSCMCreateStreamReq(void *buf, int32_t bufLen, SCMCreateStrea } } if (tDecodeI8(&decoder, &pReq->createStb) < 0) return -1; + if (tDecodeU64(&decoder, &pReq->targetStbUid) < 0) return -1; + int32_t numOfFillNullCols = 0; + if (tDecodeI32(&decoder, &numOfFillNullCols) < 0) return -1; + if (numOfFillNullCols > 0) { + pReq->fillNullCols = taosArrayInit(numOfFillNullCols, sizeof(SColLocation)); + if (pReq->fillNullCols == NULL) { + terrno = TSDB_CODE_OUT_OF_MEMORY; + return -1; + } + + for (int32_t i = 0; i < numOfFillNullCols; ++i) { + SColLocation col = {0}; + if (tDecodeI16(&decoder, &col.slotId) < 0) return -1; + if (tDecodeI16(&decoder, &col.colId) < 0) return -1; + if (tDecodeI8(&decoder, &col.type) < 0) return -1; + if (taosArrayPush(pReq->fillNullCols, &col) == NULL) { + terrno = TSDB_CODE_OUT_OF_MEMORY; + return -1; + } + } + } tEndDecode(&decoder); @@ -5551,6 +5583,7 @@ void tFreeSCMCreateStreamReq(SCMCreateStreamReq *pReq) { taosArrayDestroy(pReq->pTags); taosMemoryFreeClear(pReq->sql); taosMemoryFreeClear(pReq->ast); + taosArrayDestroy(pReq->fillNullCols); } int32_t tEncodeSRSmaParam(SEncoder *pCoder, const SRSmaParam *pRSmaParam) { diff --git a/source/common/src/tname.c b/source/common/src/tname.c index 2d12204a31362423a4ce5e57f348ee209f099cbf..e5ed7a3728cebe78ae9cc24304e6e8ce2e94c7ef 100644 --- a/source/common/src/tname.c +++ b/source/common/src/tname.c @@ -96,8 +96,6 @@ SName* toName(int32_t acctId, const char* pDbName, const char* pTableName, SName } int32_t tNameExtractFullName(const SName* name, char* dst) { - assert(name != NULL && dst != NULL); - // invalid full name format, abort if (!tNameIsValid(name)) { return -1; @@ -107,7 +105,7 @@ int32_t tNameExtractFullName(const SName* name, char* dst) { size_t tnameLen = strlen(name->tname); if (tnameLen > 0) { - /*assert(name->type == TSDB_TABLE_NAME_T);*/ + /*ASSERT(name->type == TSDB_TABLE_NAME_T);*/ dst[len] = TS_PATH_DELIMITER[0]; memcpy(dst + len + 1, name->tname, tnameLen); @@ -118,25 +116,21 @@ int32_t tNameExtractFullName(const SName* name, char* dst) { } int32_t tNameLen(const SName* name) { - assert(name != NULL); - char tmp[12] = {0}; int32_t len = sprintf(tmp, "%d", name->acctId); int32_t len1 = (int32_t)strlen(name->dbname); int32_t len2 = (int32_t)strlen(name->tname); if (name->type == TSDB_DB_NAME_T) { - assert(len2 == 0); + ASSERT(len2 == 0); return len + len1 + TSDB_NAME_DELIMITER_LEN; } else { - assert(len2 > 0); + ASSERT(len2 > 0); return len + len1 + len2 + TSDB_NAME_DELIMITER_LEN * 2; } } bool tNameIsValid(const SName* name) { - assert(name != NULL); - if (!VALID_NAME_TYPE(name->type)) { return false; } @@ -149,15 +143,12 @@ bool tNameIsValid(const SName* name) { } SName* tNameDup(const SName* name) { - assert(name != NULL); - SName* p = taosMemoryMalloc(sizeof(SName)); memcpy(p, name, sizeof(SName)); return p; } int32_t tNameGetDbName(const SName* name, char* dst) { - assert(name != NULL && dst != NULL); strncpy(dst, name->dbname, tListLen(name->dbname)); return 0; } @@ -165,28 +156,22 @@ int32_t tNameGetDbName(const SName* name, char* dst) { const char* tNameGetDbNameP(const SName* name) { return &name->dbname[0]; } int32_t tNameGetFullDbName(const SName* name, char* dst) { - assert(name != NULL && dst != NULL); snprintf(dst, TSDB_DB_FNAME_LEN, "%d.%s", name->acctId, name->dbname); return 0; } -bool tNameIsEmpty(const SName* name) { - assert(name != NULL); - return name->type == 0 || name->acctId == 0; -} +bool tNameIsEmpty(const SName* name) { return name->type == 0 || name->acctId == 0; } const char* tNameGetTableName(const SName* name) { - assert(name != NULL && name->type == TSDB_TABLE_NAME_T); + ASSERT(name != NULL && name->type == TSDB_TABLE_NAME_T); return &name->tname[0]; } void tNameAssign(SName* dst, const SName* src) { memcpy(dst, src, sizeof(SName)); } int32_t tNameSetDbName(SName* dst, int32_t acct, const char* dbName, size_t nameLen) { - assert(dst != NULL && dbName != NULL && nameLen > 0); - // too long account id or too long db name - if (nameLen >= tListLen(dst->dbname)) { + if (nameLen <= 0 || nameLen >= tListLen(dst->dbname)) { return -1; } @@ -197,8 +182,6 @@ int32_t tNameSetDbName(SName* dst, int32_t acct, const char* dbName, size_t name } int32_t tNameAddTbName(SName* dst, const char* tbName, size_t nameLen) { - assert(dst != NULL && tbName != NULL && nameLen > 0); - // too long account id or too long db name if (nameLen >= tListLen(dst->tname) || nameLen <= 0) { return -1; @@ -210,7 +193,6 @@ int32_t tNameAddTbName(SName* dst, const char* tbName, size_t nameLen) { } int32_t tNameSetAcctId(SName* dst, int32_t acctId) { - assert(dst != NULL); dst->acctId = acctId; return 0; } @@ -245,7 +227,9 @@ bool tNameTbNameEqual(SName* left, SName* right) { } int32_t tNameFromString(SName* dst, const char* str, uint32_t type) { - assert(dst != NULL && str != NULL && strlen(str) > 0); + if (strlen(str) == 0) { + return -1; + } char* p = NULL; if ((type & T_NAME_ACCT) == T_NAME_ACCT) { @@ -342,5 +326,4 @@ void buildChildTableName(RandTableName* rName) { strcat(rName->ctbShortName, temp); } taosStringBuilderDestroy(&sb); - rName->uid = *(uint64_t*)(context.digest); } diff --git a/source/common/src/trow.c b/source/common/src/trow.c index ca2c0567439f22bcc77e23ae69065f065a275764..9d381ce15fa762b74cecf23eeec1fa6667a10b99 100644 --- a/source/common/src/trow.c +++ b/source/common/src/trow.c @@ -76,7 +76,6 @@ void tdSCellValPrint(SCellVal *pVal, int8_t colType) { return; } if (!pVal->val) { - ASSERT(0); printf("BadVal "); return; } @@ -342,7 +341,7 @@ int32_t tdSTSRowNew(SArray *pArray, STSchema *pTSchema, STSRow **ppRow) { } if (iColumn == 0) { - ASSERT(pColVal->cid == pTColumn->colId); + ASSERT(pColVal && pColVal->cid == pTColumn->colId); ASSERT(pTColumn->type == TSDB_DATA_TYPE_TIMESTAMP); ASSERT(pTColumn->colId == PRIMARYKEY_TIMESTAMP_COL_ID); } else { @@ -490,7 +489,6 @@ bool tdSTSRowGetVal(STSRowIter *pIter, col_id_t colId, col_type_t colType, SCell int32_t tdGetBitmapValTypeII(const void *pBitmap, int16_t colIdx, TDRowValT *pValType) { if (!pBitmap || colIdx < 0) { - ASSERT(0); terrno = TSDB_CODE_INVALID_PARA; return terrno; } @@ -512,7 +510,6 @@ int32_t tdGetBitmapValTypeII(const void *pBitmap, int16_t colIdx, TDRowValT *pVa *pValType = ((*pDestByte) & 0x03); break; default: - ASSERT(0); terrno = TSDB_CODE_INVALID_PARA; return terrno; } @@ -521,7 +518,6 @@ int32_t tdGetBitmapValTypeII(const void *pBitmap, int16_t colIdx, TDRowValT *pVa int32_t tdGetBitmapValTypeI(const void *pBitmap, int16_t colIdx, TDRowValT *pValType) { if (!pBitmap || colIdx < 0) { - ASSERT(0); terrno = TSDB_CODE_INVALID_PARA; return terrno; } @@ -555,7 +551,6 @@ int32_t tdGetBitmapValTypeI(const void *pBitmap, int16_t colIdx, TDRowValT *pVal *pValType = ((*pDestByte) & 0x01); break; default: - ASSERT(0); terrno = TSDB_CODE_INVALID_PARA; return terrno; } @@ -564,7 +559,6 @@ int32_t tdGetBitmapValTypeI(const void *pBitmap, int16_t colIdx, TDRowValT *pVal int32_t tdSetBitmapValTypeI(void *pBitmap, int16_t colIdx, TDRowValT valType) { if (!pBitmap || colIdx < 0) { - ASSERT(0); terrno = TSDB_CODE_INVALID_PARA; return terrno; } @@ -607,7 +601,6 @@ int32_t tdSetBitmapValTypeI(void *pBitmap, int16_t colIdx, TDRowValT valType) { // *pDestByte |= (valType); break; default: - ASSERT(0); terrno = TSDB_CODE_INVALID_PARA; return terrno; } @@ -630,7 +623,6 @@ int32_t tdGetKvRowValOfCol(SCellVal *output, STSRow *pRow, void *pBitmap, int32_ output->val = POINTER_SHIFT(pRow, offset); } #else - ASSERT(0); if (offset < 0) { terrno = TSDB_CODE_INVALID_PARA; output->valType = TD_VTYPE_NONE; @@ -680,7 +672,6 @@ int32_t tdAppendColValToRow(SRowBuilder *pBuilder, col_id_t colId, int8_t colTyp return terrno; } #else - ASSERT(0); terrno = TSDB_CODE_INVALID_PARA; return terrno; #endif @@ -707,8 +698,8 @@ int32_t tdAppendColValToRow(SRowBuilder *pBuilder, col_id_t colId, int8_t colTyp if (!pBuilder->hasNone) pBuilder->hasNone = true; return TSDB_CODE_SUCCESS; default: - ASSERT(0); - break; + terrno = TSDB_CODE_INVALID_PARA; + return terrno; } if (TD_IS_TP_ROW(pRow)) { @@ -722,7 +713,6 @@ int32_t tdAppendColValToRow(SRowBuilder *pBuilder, col_id_t colId, int8_t colTyp int32_t tdAppendColValToKvRow(SRowBuilder *pBuilder, TDRowValT valType, const void *val, bool isCopyVarData, int8_t colType, int16_t colIdx, int32_t offset, col_id_t colId) { if ((offset < (int32_t)sizeof(SKvRowIdx)) || (colIdx < 1)) { - ASSERT(0); terrno = TSDB_CODE_INVALID_PARA; return terrno; } @@ -810,7 +800,6 @@ int32_t tdSRowSetExtendedInfo(SRowBuilder *pBuilder, int32_t nCols, int32_t nBou pBuilder->nCols = nCols; pBuilder->nBoundCols = nBoundCols; if (pBuilder->flen <= 0 || pBuilder->nCols <= 0) { - ASSERT(0); terrno = TSDB_CODE_INVALID_PARA; return terrno; } @@ -832,7 +821,6 @@ int32_t tdSRowSetExtendedInfo(SRowBuilder *pBuilder, int32_t nCols, int32_t nBou int32_t tdSRowResetBuf(SRowBuilder *pBuilder, void *pBuf) { pBuilder->pBuf = (STSRow *)pBuf; if (!pBuilder->pBuf) { - ASSERT(0); terrno = TSDB_CODE_INVALID_PARA; return terrno; } @@ -869,7 +857,6 @@ int32_t tdSRowResetBuf(SRowBuilder *pBuilder, void *pBuf) { TD_ROW_SET_NCOLS(pBuilder->pBuf, pBuilder->nBoundCols); break; default: - ASSERT(0); terrno = TSDB_CODE_INVALID_PARA; return terrno; } @@ -880,7 +867,6 @@ int32_t tdSRowResetBuf(SRowBuilder *pBuilder, void *pBuf) { int32_t tdSRowGetBuf(SRowBuilder *pBuilder, void *pBuf) { pBuilder->pBuf = (STSRow *)pBuf; if (!pBuilder->pBuf) { - ASSERT(0); terrno = TSDB_CODE_INVALID_PARA; return terrno; } @@ -900,7 +886,6 @@ int32_t tdSRowGetBuf(SRowBuilder *pBuilder, void *pBuf) { #endif break; default: - ASSERT(0); terrno = TSDB_CODE_INVALID_PARA; return terrno; } @@ -920,7 +905,6 @@ int32_t tdSRowSetTpInfo(SRowBuilder *pBuilder, int32_t nCols, int32_t flen) { pBuilder->flen = flen; pBuilder->nCols = nCols; if (pBuilder->flen <= 0 || pBuilder->nCols <= 0) { - ASSERT(0); terrno = TSDB_CODE_INVALID_PARA; return terrno; } @@ -939,7 +923,6 @@ int32_t tdSRowSetInfo(SRowBuilder *pBuilder, int32_t nCols, int32_t nBoundCols, pBuilder->nCols = nCols; pBuilder->nBoundCols = nBoundCols; if (pBuilder->flen <= 0 || pBuilder->nCols <= 0) { - ASSERT(0); terrno = TSDB_CODE_INVALID_PARA; return terrno; } @@ -968,7 +951,6 @@ int32_t tdGetBitmapValType(const void *pBitmap, int16_t colIdx, TDRowValT *pValT tdGetBitmapValTypeI(pBitmap, colIdx, pValType); break; default: - ASSERT(0); terrno = TSDB_CODE_INVALID_PARA; return TSDB_CODE_FAILED; } @@ -987,7 +969,6 @@ bool tdIsBitmapValTypeNorm(const void *pBitmap, int16_t idx, int8_t bitmapMode) int32_t tdSetBitmapValTypeII(void *pBitmap, int16_t colIdx, TDRowValT valType) { if (!pBitmap || colIdx < 0) { - ASSERT(0); terrno = TSDB_CODE_INVALID_PARA; return terrno; } @@ -1014,7 +995,6 @@ int32_t tdSetBitmapValTypeII(void *pBitmap, int16_t colIdx, TDRowValT valType) { // *pDestByte |= (valType); break; default: - ASSERT(0); terrno = TSDB_CODE_INVALID_PARA; return terrno; } @@ -1031,7 +1011,6 @@ int32_t tdSetBitmapValType(void *pBitmap, int16_t colIdx, TDRowValT valType, int tdSetBitmapValTypeI(pBitmap, colIdx, valType); break; default: - ASSERT(0); terrno = TSDB_CODE_INVALID_PARA; return TSDB_CODE_FAILED; } diff --git a/source/common/src/ttime.c b/source/common/src/ttime.c index ec05ef4c449c3ad3cb917a11feb727030d8ee06d..559ffd2aaf24e3628110a6f5418cd7d8094ed397 100644 --- a/source/common/src/ttime.c +++ b/source/common/src/ttime.c @@ -168,12 +168,13 @@ int64_t parseFraction(char* str, char** end, int32_t timePrec) { i = MICRO_SEC_FRACTION_LEN; } times = MICRO_SEC_FRACTION_LEN - i; - } else { - assert(timePrec == TSDB_TIME_PRECISION_NANO); + } else if (timePrec == TSDB_TIME_PRECISION_NANO) { if (i >= NANO_SEC_FRACTION_LEN) { i = NANO_SEC_FRACTION_LEN; } times = NANO_SEC_FRACTION_LEN - i; + } else { + return -1; } fraction = strnatoi(str, i) * factor[times]; @@ -510,8 +511,11 @@ int64_t convertTimePrecision(int64_t utime, int32_t fromPrecision, int32_t toPre // !!!!notice: double lose precison if time is too large, for example: 1626006833631000000*1.0 = double = // 1626006833631000064 int64_t convertTimeFromPrecisionToUnit(int64_t time, int32_t fromPrecision, char toUnit) { - assert(fromPrecision == TSDB_TIME_PRECISION_MILLI || fromPrecision == TSDB_TIME_PRECISION_MICRO || - fromPrecision == TSDB_TIME_PRECISION_NANO); + if (fromPrecision != TSDB_TIME_PRECISION_MILLI && fromPrecision != TSDB_TIME_PRECISION_MICRO && + fromPrecision != TSDB_TIME_PRECISION_NANO) { + return -1; + } + int64_t factors[3] = {NANOSECOND_PER_MSEC, NANOSECOND_PER_USEC, 1}; double tmp = time; switch (toUnit) { @@ -761,8 +765,7 @@ int32_t taosTimeCountInterval(int64_t skey, int64_t ekey, int64_t interval, char } int64_t taosTimeTruncate(int64_t t, const SInterval* pInterval, int32_t precision) { - if (pInterval->sliding == 0) { - assert(pInterval->interval == 0); + if (pInterval->sliding == 0 && pInterval->interval == 0) { return t; } @@ -931,7 +934,7 @@ void taosFormatUtcTime(char* buf, int32_t bufLen, int64_t t, int32_t precision) default: fractionLen = 0; - assert(false); + ASSERT(false); } taosLocalTime(", &ptm); diff --git a/source/common/src/ttszip.c b/source/common/src/ttszip.c index 3a6bc097c1ae97c7cfadac1df81b6cfb0897dab3..0faa6eb4c85a318a25e6324063cf8ede237c88f6 100644 --- a/source/common/src/ttszip.c +++ b/source/common/src/ttszip.c @@ -17,6 +17,7 @@ #include "ttszip.h" #include "taoserror.h" #include "tcompression.h" +#include "tlog.h" static int32_t getDataStartOffset(); static void TSBufUpdateGroupInfo(STSBuf* pTSBuf, int32_t index, STSGroupBlockInfo* pBlockInfo); @@ -202,14 +203,14 @@ void* tsBufDestroy(STSBuf* pTSBuf) { static STSGroupBlockInfoEx* tsBufGetLastGroupInfo(STSBuf* pTSBuf) { int32_t last = pTSBuf->numOfGroups - 1; - assert(last >= 0); + ASSERT(last >= 0); return &pTSBuf->pData[last]; } static STSGroupBlockInfoEx* addOneGroupInfo(STSBuf* pTSBuf, int32_t id) { if (pTSBuf->numOfAlloc <= pTSBuf->numOfGroups) { uint32_t newSize = (uint32_t)(pTSBuf->numOfAlloc * 1.5); - assert((int32_t)newSize > pTSBuf->numOfAlloc); + ASSERT((int32_t)newSize > pTSBuf->numOfAlloc); STSGroupBlockInfoEx* tmp = (STSGroupBlockInfoEx*)taosMemoryRealloc(pTSBuf->pData, sizeof(STSGroupBlockInfoEx) * newSize); @@ -233,7 +234,7 @@ static STSGroupBlockInfoEx* addOneGroupInfo(STSBuf* pTSBuf, int32_t id) { STSGroupBlockInfo* pBlockInfo = &pTSBuf->pData[pTSBuf->numOfGroups].info; pBlockInfo->id = id; pBlockInfo->offset = pTSBuf->fileSize; - assert(pBlockInfo->offset >= getDataStartOffset()); + ASSERT(pBlockInfo->offset >= getDataStartOffset()); // update vnode info in file TSBufUpdateGroupInfo(pTSBuf, pTSBuf->numOfGroups, pBlockInfo); @@ -282,7 +283,7 @@ static void writeDataToDisk(STSBuf* pTSBuf) { pTsData->allocSize, TWO_STAGE_COMP, pTSBuf->assistBuf, pTSBuf->bufSize); int64_t r = taosLSeekFile(pTSBuf->pFile, pTSBuf->fileSize, SEEK_SET); - assert(r == 0); + ASSERT(r == 0); /* * format for output data: @@ -316,7 +317,7 @@ static void writeDataToDisk(STSBuf* pTSBuf) { taosWriteFile(pTSBuf->pFile, &pBlock->compLen, sizeof(pBlock->compLen)); metaLen += (int32_t)taosWriteFile(pTSBuf->pFile, &trueLen, sizeof(pBlock->tag.nLen)); - assert(metaLen == getTagAreaLength(&pBlock->tag)); + ASSERT(metaLen == getTagAreaLength(&pBlock->tag)); int32_t blockSize = metaLen + sizeof(pBlock->numOfElem) + sizeof(pBlock->compLen) * 2 + pBlock->compLen; pTSBuf->fileSize += blockSize; @@ -379,7 +380,7 @@ STSBlock* readDataFromDisk(STSBuf* pTSBuf, int32_t order, bool decomp) { size_t sz = 0; if (pBlock->tag.nType == TSDB_DATA_TYPE_BINARY || pBlock->tag.nType == TSDB_DATA_TYPE_NCHAR) { char* tp = taosMemoryRealloc(pBlock->tag.pz, pBlock->tag.nLen + 1); - assert(tp != NULL); + ASSERT(tp != NULL); memset(tp, 0, pBlock->tag.nLen + 1); pBlock->tag.pz = tp; @@ -410,14 +411,14 @@ STSBlock* readDataFromDisk(STSBuf* pTSBuf, int32_t order, bool decomp) { // read the comp length at the length of comp block sz = taosReadFile(pTSBuf->pFile, &pBlock->padding, sizeof(pBlock->padding)); - assert(pBlock->padding == pBlock->compLen); + ASSERT(pBlock->padding == pBlock->compLen); int32_t n = 0; sz = taosReadFile(pTSBuf->pFile, &n, sizeof(pBlock->tag.nLen)); if (pBlock->tag.nType == TSDB_DATA_TYPE_NULL) { - assert(n == 0); + ASSERT(n == 0); } else { - assert(n == pBlock->tag.nLen); + ASSERT(n == pBlock->tag.nLen); } UNUSED(sz); @@ -477,7 +478,7 @@ void tsBufAppend(STSBuf* pTSBuf, int32_t id, SVariant* tag, const char* pData, i pBlockInfo = tsBufGetLastGroupInfo(pTSBuf); } - assert(pBlockInfo->info.id == id); + ASSERT(pBlockInfo->info.id == id); if ((taosVariantCompare(&pTSBuf->block.tag, tag) != 0) && ptsData->len > 0) { // new arrived data with different tags value, save current value into disk first @@ -596,7 +597,7 @@ static int32_t tsBufFindBlockByTag(STSBuf* pTSBuf, STSGroupBlockInfo* pBlockInfo static void tsBufGetBlock(STSBuf* pTSBuf, int32_t groupIndex, int32_t blockIndex) { STSGroupBlockInfo* pBlockInfo = &pTSBuf->pData[groupIndex].info; if (pBlockInfo->numOfBlocks <= blockIndex) { - assert(false); + ASSERT(false); } STSCursor* pCur = &pTSBuf->cur; @@ -613,7 +614,7 @@ static void tsBufGetBlock(STSBuf* pTSBuf, int32_t groupIndex, int32_t blockIndex } } else { if (tsBufFindBlock(pTSBuf, pBlockInfo, blockIndex) == -1) { - assert(false); + ASSERT(false); } } @@ -633,7 +634,7 @@ static void tsBufGetBlock(STSBuf* pTSBuf, int32_t groupIndex, int32_t blockIndex tsDecompressTimestamp(pBlock->payload, pBlock->compLen, pBlock->numOfElem, pTSBuf->tsData.rawBuf, pTSBuf->tsData.allocSize, TWO_STAGE_COMP, pTSBuf->assistBuf, pTSBuf->bufSize); - assert((pTSBuf->tsData.len / TSDB_KEYSIZE == pBlock->numOfElem) && (pTSBuf->tsData.allocSize >= pTSBuf->tsData.len)); + ASSERT((pTSBuf->tsData.len / TSDB_KEYSIZE == pBlock->numOfElem) && (pTSBuf->tsData.allocSize >= pTSBuf->tsData.len)); pCur->vgroupIndex = groupIndex; pCur->blockIndex = blockIndex; @@ -668,7 +669,9 @@ int32_t STSBufUpdateHeader(STSBuf* pTSBuf, STSBufFileHeader* pHeader) { return -1; } - assert(pHeader->tsOrder == TSDB_ORDER_ASC || pHeader->tsOrder == TSDB_ORDER_DESC); + if (pHeader->tsOrder != TSDB_ORDER_ASC && pHeader->tsOrder != TSDB_ORDER_DESC) { + return -1; + } int32_t r = taosLSeekFile(pTSBuf->pFile, 0, SEEK_SET); if (r != 0) { @@ -705,7 +708,7 @@ bool tsBufNextPos(STSBuf* pTSBuf) { } } else { // get the last timestamp record in the last block of the last vnode - assert(pTSBuf->numOfGroups > 0); + ASSERT(pTSBuf->numOfGroups > 0); int32_t groupIndex = pTSBuf->numOfGroups - 1; pCur->vgroupIndex = groupIndex; @@ -729,7 +732,7 @@ bool tsBufNextPos(STSBuf* pTSBuf) { int32_t step = pCur->order == TSDB_ORDER_ASC ? 1 : -1; while (1) { - assert(pTSBuf->tsData.len == pTSBuf->block.numOfElem * TSDB_KEYSIZE); + ASSERT(pTSBuf->tsData.len == pTSBuf->block.numOfElem * TSDB_KEYSIZE); if ((pCur->order == TSDB_ORDER_ASC && pCur->tsIndex >= pTSBuf->block.numOfElem - 1) || (pCur->order == TSDB_ORDER_DESC && pCur->tsIndex <= 0)) { @@ -810,7 +813,7 @@ int32_t tsBufMerge(STSBuf* pDestBuf, const STSBuf* pSrcBuf) { } // src can only have one vnode index - assert(pSrcBuf->numOfGroups == 1); + ASSERT(pSrcBuf->numOfGroups == 1); // there are data in buffer, flush to disk first tsBufFlush(pDestBuf); @@ -853,7 +856,7 @@ int32_t tsBufMerge(STSBuf* pDestBuf, const STSBuf* pSrcBuf) { } int32_t r = taosLSeekFile(pDestBuf->pFile, 0, SEEK_END); - assert(r == 0); + ASSERT(r == 0); int64_t offset = getDataStartOffset(); int32_t size = (int32_t)pSrcBuf->fileSize - (int32_t)offset; @@ -881,7 +884,7 @@ int32_t tsBufMerge(STSBuf* pDestBuf, const STSBuf* pSrcBuf) { } pDestBuf->fileSize = (uint32_t)file_size; - assert(pDestBuf->fileSize == oldSize + size); + ASSERT(pDestBuf->fileSize == oldSize + size); return 0; } @@ -913,7 +916,10 @@ STSBuf* tsBufCreateFromCompBlocks(const char* pData, int32_t numOfBlocks, int32_ pTSBuf->fileSize += len; pTSBuf->tsOrder = order; - assert(order == TSDB_ORDER_ASC || order == TSDB_ORDER_DESC); + if (order != TSDB_ORDER_ASC && order != TSDB_ORDER_DESC) { + tsBufDestroy(pTSBuf); + return NULL; + } STSBufFileHeader header = { .magic = TS_COMP_FILE_MAGIC, .numOfGroup = pTSBuf->numOfGroups, .tsOrder = pTSBuf->tsOrder}; @@ -1095,7 +1101,7 @@ void tsBufGetGroupIdList(STSBuf* pTSBuf, int32_t* num, int32_t** id) { } int32_t dumpFileBlockByGroupId(STSBuf* pTSBuf, int32_t groupIndex, void* buf, int32_t* len, int32_t* numOfBlocks) { - assert(groupIndex >= 0 && groupIndex < pTSBuf->numOfGroups); + ASSERT(groupIndex >= 0 && groupIndex < pTSBuf->numOfGroups); STSGroupBlockInfo* pBlockInfo = &pTSBuf->pData[groupIndex].info; *len = 0; diff --git a/source/common/src/ttypes.c b/source/common/src/ttypes.c index d412fd89da36730f2fda350340d6499110f5720d..c83bdc0e3293e97c01a2eb7aed24652e9fd9775d 100644 --- a/source/common/src/ttypes.c +++ b/source/common/src/ttypes.c @@ -140,7 +140,7 @@ void assignVal(char *val, const char *src, int32_t len, int32_t type) { } } -void operateVal(void *dst, void *s1, void *s2, int32_t optr, int32_t type) { +int32_t operateVal(void *dst, void *s1, void *s2, int32_t optr, int32_t type) { if (optr == OP_TYPE_ADD) { switch (type) { case TSDB_DATA_TYPE_TINYINT: @@ -177,11 +177,12 @@ void operateVal(void *dst, void *s1, void *s2, int32_t optr, int32_t type) { SET_DOUBLE_VAL(dst, GET_DOUBLE_VAL(s1) + GET_DOUBLE_VAL(s2)); break; default: { - assert(0); - break; + return -1; } } } else { - assert(0); + return -1; } + + return 0; } diff --git a/source/common/src/tvariant.c b/source/common/src/tvariant.c index 65e9a767f5843d7ce025ccfbe2f789475d7105f6..de225581a679db43a4c5b7aaccc12854d75d0495 100644 --- a/source/common/src/tvariant.c +++ b/source/common/src/tvariant.c @@ -168,7 +168,7 @@ void taosVariantAssign(SVariant *pDst, const SVariant *pSrc) { pSrc->nType == TSDB_DATA_TYPE_JSON) { int32_t len = pSrc->nLen + TSDB_NCHAR_SIZE; char *p = taosMemoryRealloc(pDst->pz, len); - assert(p); + ASSERT(p); memset(p, 0, len); pDst->pz = p; @@ -192,7 +192,7 @@ void taosVariantAssign(SVariant *pDst, const SVariant *pSrc) { size_t num = taosArrayGetSize(pSrc->arr); pDst->arr = taosArrayInit(num, sizeof(int64_t)); pDst->nLen = pSrc->nLen; - assert(pSrc->nLen == num); + ASSERT(pSrc->nLen == num); for (size_t i = 0; i < num; i++) { int64_t *p = taosArrayGet(pSrc->arr, i); taosArrayPush(pDst->arr, p); diff --git a/source/dnode/mgmt/exe/dmMain.c b/source/dnode/mgmt/exe/dmMain.c index d308d3e618f132eecc82e482f7c4fe120c013bf4..4910b0ac3faab414b4a3197cd2412a7e4badd21e 100644 --- a/source/dnode/mgmt/exe/dmMain.c +++ b/source/dnode/mgmt/exe/dmMain.c @@ -44,6 +44,7 @@ static struct { char apolloUrl[PATH_MAX]; const char **envCmd; SArray *pArgs; // SConfigPair + int64_t startTime; } global = {0}; static void dmSetDebugFlag(int32_t signum, void *sigInfo, void *context) { taosSetAllDebugFlag(143, true); } @@ -67,23 +68,71 @@ static void dmStopDnode(int signum, void *sigInfo, void *context) { dmStop(); } +void dmLogCrash(int signum, void *sigInfo, void *context) { + taosIgnSignal(SIGTERM); + taosIgnSignal(SIGHUP); + taosIgnSignal(SIGINT); + taosIgnSignal(SIGBREAK); + +#ifndef WINDOWS + taosIgnSignal(SIGBUS); +#endif + taosIgnSignal(SIGABRT); + taosIgnSignal(SIGFPE); + taosIgnSignal(SIGSEGV); + + char *pMsg = NULL; + const char *flags = "UTL FATAL "; + ELogLevel level = DEBUG_FATAL; + int32_t dflag = 255; + int64_t msgLen= -1; + + if (tsEnableCrashReport) { + if (taosGenCrashJsonMsg(signum, &pMsg, dmGetClusterId(), global.startTime)) { + taosPrintLog(flags, level, dflag, "failed to generate crash json msg"); + goto _return; + } else { + msgLen = strlen(pMsg); + } + } + +_return: + + taosLogCrashInfo("taosd", pMsg, msgLen, signum, sigInfo); + +#ifdef _TD_DARWIN_64 + exit(signum); +#elif defined(WINDOWS) + exit(signum); +#endif +} + static void dmSetSignalHandle() { taosSetSignal(SIGUSR1, dmSetDebugFlag); taosSetSignal(SIGUSR2, dmSetAssert); taosSetSignal(SIGTERM, dmStopDnode); taosSetSignal(SIGHUP, dmStopDnode); taosSetSignal(SIGINT, dmStopDnode); - taosSetSignal(SIGABRT, dmStopDnode); taosSetSignal(SIGBREAK, dmStopDnode); #ifndef WINDOWS taosSetSignal(SIGTSTP, dmStopDnode); taosSetSignal(SIGQUIT, dmStopDnode); #endif + +#ifndef WINDOWS + taosSetSignal(SIGBUS, dmLogCrash); +#endif + taosSetSignal(SIGABRT, dmLogCrash); + taosSetSignal(SIGFPE, dmLogCrash); + taosSetSignal(SIGSEGV, dmLogCrash); } static int32_t dmParseArgs(int32_t argc, char const *argv[]) { + global.startTime = taosGetTimestampMs(); + int32_t cmdEnvIndex = 0; if (argc < 2) return 0; + global.envCmd = taosMemoryMalloc((argc - 1) * sizeof(char *)); memset(global.envCmd, 0, (argc - 1) * sizeof(char *)); for (int32_t i = 1; i < argc; ++i) { diff --git a/source/dnode/mgmt/mgmt_dnode/inc/dmInt.h b/source/dnode/mgmt/mgmt_dnode/inc/dmInt.h index c776beb3f099d40173bc69490f7dc65bef67ed71..ff32cbcb08b2c743644df3b077408dfd4878e999 100644 --- a/source/dnode/mgmt/mgmt_dnode/inc/dmInt.h +++ b/source/dnode/mgmt/mgmt_dnode/inc/dmInt.h @@ -29,6 +29,7 @@ typedef struct SDnodeMgmt { const char *name; TdThread statusThread; TdThread monitorThread; + TdThread crashReportThread; SSingleWorker mgmtWorker; ProcessCreateNodeFp processCreateNodeFp; ProcessDropNodeFp processDropNodeFp; @@ -55,6 +56,8 @@ int32_t dmStartStatusThread(SDnodeMgmt *pMgmt); void dmStopStatusThread(SDnodeMgmt *pMgmt); int32_t dmStartMonitorThread(SDnodeMgmt *pMgmt); void dmStopMonitorThread(SDnodeMgmt *pMgmt); +int32_t dmStartCrashReportThread(SDnodeMgmt *pMgmt); +void dmStopCrashReportThread(SDnodeMgmt *pMgmt); int32_t dmStartWorker(SDnodeMgmt *pMgmt); void dmStopWorker(SDnodeMgmt *pMgmt); diff --git a/source/dnode/mgmt/mgmt_dnode/src/dmInt.c b/source/dnode/mgmt/mgmt_dnode/src/dmInt.c index d2db1a4a62fd157b2df235133c85bb6e38ac680d..51df293ba70bc37be14c763ecb4a5ea296077bd4 100644 --- a/source/dnode/mgmt/mgmt_dnode/src/dmInt.c +++ b/source/dnode/mgmt/mgmt_dnode/src/dmInt.c @@ -23,6 +23,9 @@ static int32_t dmStartMgmt(SDnodeMgmt *pMgmt) { if (dmStartMonitorThread(pMgmt) != 0) { return -1; } + if (dmStartCrashReportThread(pMgmt) != 0) { + return -1; + } return 0; } @@ -30,6 +33,7 @@ static void dmStopMgmt(SDnodeMgmt *pMgmt) { pMgmt->pData->stopped = true; dmStopMonitorThread(pMgmt); dmStopStatusThread(pMgmt); + dmStopCrashReportThread(pMgmt); } static int32_t dmOpenMgmt(SMgmtInputOpt *pInput, SMgmtOutputOpt *pOutput) { diff --git a/source/dnode/mgmt/mgmt_dnode/src/dmWorker.c b/source/dnode/mgmt/mgmt_dnode/src/dmWorker.c index 80c040a5e8f84a8d5d7ea887b8a633f945576e17..76c8e09b70a31c8e9168809dea69bc1a1a3e5478 100644 --- a/source/dnode/mgmt/mgmt_dnode/src/dmWorker.c +++ b/source/dnode/mgmt/mgmt_dnode/src/dmWorker.c @@ -15,6 +15,7 @@ #define _DEFAULT_SOURCE #include "dmInt.h" +#include "thttp.h" static void *dmStatusThreadFp(void *param) { SDnodeMgmt *pMgmt = param; @@ -63,6 +64,63 @@ static void *dmMonitorThreadFp(void *param) { return NULL; } +static void *dmCrashReportThreadFp(void *param) { + SDnodeMgmt *pMgmt = param; + int64_t lastTime = taosGetTimestampMs(); + setThreadName("dnode-crashReport"); + char filepath[PATH_MAX] = {0}; + snprintf(filepath, sizeof(filepath), "%s%s.taosdCrashLog", tsLogDir, TD_DIRSEP); + char *pMsg = NULL; + int64_t msgLen = 0; + TdFilePtr pFile = NULL; + bool truncateFile = false; + int32_t sleepTime = 200; + int32_t reportPeriodNum = 3600 * 1000 / sleepTime;; + int32_t loopTimes = reportPeriodNum; + + while (1) { + if (pMgmt->pData->dropped || pMgmt->pData->stopped) break; + if (loopTimes++ < reportPeriodNum) { + taosMsleep(sleepTime); + continue; + } + + taosReadCrashInfo(filepath, &pMsg, &msgLen, &pFile); + if (pMsg && msgLen > 0) { + if (taosSendHttpReport(tsTelemServer, tsSvrCrashReportUri, tsTelemPort, pMsg, msgLen, HTTP_FLAT) != 0) { + dError("failed to send crash report"); + if (pFile) { + taosReleaseCrashLogFile(pFile, false); + continue; + } + } else { + dInfo("succeed to send crash report"); + truncateFile = true; + } + } else { + dDebug("no crash info"); + } + + taosMemoryFree(pMsg); + + if (pMsg && msgLen > 0) { + pMsg = NULL; + continue; + } + + if (pFile) { + taosReleaseCrashLogFile(pFile, truncateFile); + truncateFile = false; + } + + taosMsleep(sleepTime); + loopTimes = 0; + } + + return NULL; +} + + int32_t dmStartStatusThread(SDnodeMgmt *pMgmt) { TdThreadAttr thAttr; taosThreadAttrInit(&thAttr); @@ -105,6 +163,36 @@ void dmStopMonitorThread(SDnodeMgmt *pMgmt) { } } +int32_t dmStartCrashReportThread(SDnodeMgmt *pMgmt) { + if (!tsEnableCrashReport) { + return 0; + } + + TdThreadAttr thAttr; + taosThreadAttrInit(&thAttr); + taosThreadAttrSetDetachState(&thAttr, PTHREAD_CREATE_JOINABLE); + if (taosThreadCreate(&pMgmt->crashReportThread, &thAttr, dmCrashReportThreadFp, pMgmt) != 0) { + dError("failed to create crashReport thread since %s", strerror(errno)); + return -1; + } + + taosThreadAttrDestroy(&thAttr); + tmsgReportStartup("dnode-crashReport", "initialized"); + return 0; +} + +void dmStopCrashReportThread(SDnodeMgmt *pMgmt) { + if (!tsEnableCrashReport) { + return; + } + + if (taosCheckPthreadValid(pMgmt->crashReportThread)) { + taosThreadJoin(pMgmt->crashReportThread, NULL); + taosThreadClear(&pMgmt->crashReportThread); + } +} + + static void dmProcessMgmtQueue(SQueueInfo *pInfo, SRpcMsg *pMsg) { SDnodeMgmt *pMgmt = pInfo->ahandle; int32_t code = -1; diff --git a/source/dnode/mgmt/mgmt_mnode/src/mmFile.c b/source/dnode/mgmt/mgmt_mnode/src/mmFile.c index f736ffd0c8ec4f4a4517f50080ecdac7f06c922d..f06669a610142a38b6b2937c95bd150c324df568 100644 --- a/source/dnode/mgmt/mgmt_mnode/src/mmFile.c +++ b/source/dnode/mgmt/mgmt_mnode/src/mmFile.c @@ -15,171 +15,166 @@ #define _DEFAULT_SOURCE #include "mmInt.h" +#include "tjson.h" + +static int32_t mmDecodeOption(SJson *pJson, SMnodeOpt *pOption) { + int32_t code = 0; + + tjsonGetInt32ValueFromDouble(pJson, "deployed", pOption->deploy, code); + if (code < 0) return -1; + tjsonGetInt32ValueFromDouble(pJson, "selfIndex", pOption->selfIndex, code); + if (code < 0) return 0; + + SJson *replicas = tjsonGetObjectItem(pJson, "replicas"); + if (replicas == NULL) return 0; + pOption->numOfReplicas = tjsonGetArraySize(replicas); + + for (int32_t i = 0; i < pOption->numOfReplicas; ++i) { + SJson *replica = tjsonGetArrayItem(replicas, i); + if (replica == NULL) return -1; + + SReplica *pReplica = pOption->replicas + i; + tjsonGetInt32ValueFromDouble(replica, "id", pReplica->id, code); + if (code < 0) return -1; + code = tjsonGetStringValue(replica, "fqdn", pReplica->fqdn); + if (code < 0) return -1; + tjsonGetUInt16ValueFromDouble(replica, "port", pReplica->port, code); + if (code < 0) return -1; + } + + return 0; +} int32_t mmReadFile(const char *path, SMnodeOpt *pOption) { - int32_t code = TSDB_CODE_INVALID_JSON_FORMAT; - int32_t len = 0; - int32_t maxLen = 4096; - char *content = taosMemoryCalloc(1, maxLen + 1); - cJSON *root = NULL; - char file[PATH_MAX] = {0}; + int32_t code = -1; TdFilePtr pFile = NULL; - + char *pData = NULL; + SJson *pJson = NULL; + char file[PATH_MAX] = {0}; snprintf(file, sizeof(file), "%s%smnode.json", path, TD_DIRSEP); + + if (taosStatFile(file, NULL, NULL) < 0) { + dInfo("mnode file:%s not exist", file); + return 0; + } + pFile = taosOpenFile(file, TD_FILE_READ); if (pFile == NULL) { - code = 0; + terrno = TAOS_SYSTEM_ERROR(errno); + dError("failed to open mnode file:%s since %s", file, terrstr()); goto _OVER; } - len = (int32_t)taosReadFile(pFile, content, maxLen); - if (len <= 0) { - dError("failed to read %s since content is null", file); + int64_t size = 0; + if (taosFStatFile(pFile, &size, NULL) < 0) { + terrno = TAOS_SYSTEM_ERROR(errno); + dError("failed to fstat mnode file:%s since %s", file, terrstr()); goto _OVER; } - content[len] = 0; - root = cJSON_Parse(content); - if (root == NULL) { - dError("failed to read %s since invalid json format", file); + pData = taosMemoryMalloc(size + 1); + if (pData == NULL) { + terrno = TSDB_CODE_OUT_OF_MEMORY; goto _OVER; } - cJSON *deployed = cJSON_GetObjectItem(root, "deployed"); - if (!deployed || deployed->type != cJSON_Number) { - dError("failed to read %s since deployed not found", file); + if (taosReadFile(pFile, pData, size) != size) { + terrno = TAOS_SYSTEM_ERROR(errno); + dError("failed to read mnode file:%s since %s", file, terrstr()); goto _OVER; } - pOption->deploy = deployed->valueint; - cJSON *selfIndex = cJSON_GetObjectItem(root, "selfIndex"); - if (selfIndex) { - if (selfIndex->type != cJSON_Number) { - dError("failed to read %s since selfIndex not found", file); - goto _OVER; - } - pOption->selfIndex = selfIndex->valueint; - } + pData[size] = '\0'; - cJSON *replicas = cJSON_GetObjectItem(root, "replicas"); - if (replicas) { - if (replicas->type != cJSON_Array) { - dError("failed to read %s since replicas not found", file); - goto _OVER; - } + pJson = tjsonParse(pData); + if (pJson == NULL) { + terrno = TSDB_CODE_INVALID_JSON_FORMAT; + goto _OVER; + } - int32_t numOfReplicas = cJSON_GetArraySize(replicas); - if (numOfReplicas <= 0) { - dError("failed to read %s since numOfReplicas:%d invalid", file, numOfReplicas); - goto _OVER; - } - pOption->numOfReplicas = numOfReplicas; - - for (int32_t i = 0; i < numOfReplicas; ++i) { - SReplica *pReplica = pOption->replicas + i; - - cJSON *replica = cJSON_GetArrayItem(replicas, i); - if (replica == NULL) break; - - cJSON *id = cJSON_GetObjectItem(replica, "id"); - if (id) { - if (id->type != cJSON_Number) { - dError("failed to read %s since id not found", file); - goto _OVER; - } - if (pReplica) { - pReplica->id = id->valueint; - } - } - - cJSON *fqdn = cJSON_GetObjectItem(replica, "fqdn"); - if (fqdn) { - if (fqdn->type != cJSON_String || fqdn->valuestring == NULL) { - dError("failed to read %s since fqdn not found", file); - goto _OVER; - } - if (pReplica) { - tstrncpy(pReplica->fqdn, fqdn->valuestring, TSDB_FQDN_LEN); - } - } - - cJSON *port = cJSON_GetObjectItem(replica, "port"); - if (port) { - if (port->type != cJSON_Number) { - dError("failed to read %s since port not found", file); - goto _OVER; - } - if (pReplica) { - pReplica->port = (uint16_t)port->valueint; - } - } - } + if (mmDecodeOption(pJson, pOption) < 0) { + terrno = TSDB_CODE_INVALID_JSON_FORMAT; + goto _OVER; } code = 0; + dInfo("succceed to read mnode file %s", file); _OVER: - if (content != NULL) taosMemoryFree(content); - if (root != NULL) cJSON_Delete(root); + if (pData != NULL) taosMemoryFree(pData); + if (pJson != NULL) cJSON_Delete(pJson); if (pFile != NULL) taosCloseFile(&pFile); - if (code == 0) { - dDebug("succcessed to read file %s, deployed:%d", file, pOption->deploy); - } - terrno = code; + if (code != 0) { + dError("failed to read mnode file:%s since %s", file, terrstr()); + } return code; } -int32_t mmWriteFile(const char *path, const SMnodeOpt *pOption) { - char file[PATH_MAX] = {0}; - char realfile[PATH_MAX] = {0}; - snprintf(file, sizeof(file), "%s%smnode.json.bak", path, TD_DIRSEP); - snprintf(realfile, sizeof(realfile), "%s%smnode.json", path, TD_DIRSEP); - - TdFilePtr pFile = taosOpenFile(file, TD_FILE_CREATE | TD_FILE_WRITE | TD_FILE_TRUNC); - if (pFile == NULL) { - terrno = TAOS_SYSTEM_ERROR(errno); - dError("failed to write %s since %s", file, terrstr()); - return -1; - } - - int32_t len = 0; - int32_t maxLen = 4096; - char *content = taosMemoryCalloc(1, maxLen + 1); - - len += snprintf(content + len, maxLen - len, "{\n"); +static int32_t mmEncodeOption(SJson *pJson, const SMnodeOpt *pOption) { if (pOption->deploy && pOption->numOfReplicas > 0) { - len += snprintf(content + len, maxLen - len, " \"selfIndex\": %d,\n", pOption->selfIndex); - len += snprintf(content + len, maxLen - len, " \"replicas\": [{\n"); + if (tjsonAddDoubleToObject(pJson, "selfIndex", pOption->selfIndex) < 0) return -1; + + SJson *replicas = tjsonCreateArray(); + if (replicas == NULL) return -1; + if (tjsonAddItemToObject(pJson, "replicas", replicas) < 0) return -1; for (int32_t i = 0; i < pOption->numOfReplicas; ++i) { + SJson *replica = tjsonCreateObject(); + if (replica == NULL) return -1; + const SReplica *pReplica = pOption->replicas + i; - if (pReplica != NULL && pReplica->id > 0) { - len += snprintf(content + len, maxLen - len, " \"id\": %d,\n", pReplica->id); - len += snprintf(content + len, maxLen - len, " \"fqdn\": \"%s\",\n", pReplica->fqdn); - len += snprintf(content + len, maxLen - len, " \"port\": %u\n", pReplica->port); - } - if (i < pOption->numOfReplicas - 1) { - len += snprintf(content + len, maxLen - len, " },{\n"); - } else { - len += snprintf(content + len, maxLen - len, " }],\n"); - } + if (tjsonAddDoubleToObject(replica, "id", pReplica->id) < 0) return -1; + if (tjsonAddStringToObject(replica, "fqdn", pReplica->fqdn) < 0) return -1; + if (tjsonAddDoubleToObject(replica, "port", pReplica->port) < 0) return -1; + if (tjsonAddItemToArray(replicas, replica) < 0) return -1; } } - len += snprintf(content + len, maxLen - len, " \"deployed\": %d\n", pOption->deploy); - len += snprintf(content + len, maxLen - len, "}\n"); - taosWriteFile(pFile, content, len); - taosFsyncFile(pFile); + if (tjsonAddDoubleToObject(pJson, "deployed", pOption->deploy) < 0) return -1; + + return 0; +} + +int32_t mmWriteFile(const char *path, const SMnodeOpt *pOption) { + int32_t code = -1; + char *buffer = NULL; + SJson *pJson = NULL; + TdFilePtr pFile = NULL; + char file[PATH_MAX] = {0}; + char realfile[PATH_MAX] = {0}; + snprintf(file, sizeof(file), "%s%smnode.json.bak", path, TD_DIRSEP); + snprintf(realfile, sizeof(realfile), "%s%smnode.json", path, TD_DIRSEP); + + terrno = TSDB_CODE_OUT_OF_MEMORY; + pJson = tjsonCreateObject(); + if (pJson == NULL) goto _OVER; + if (mmEncodeOption(pJson, pOption) != 0) goto _OVER; + buffer = tjsonToString(pJson); + if (buffer == NULL) goto _OVER; + terrno = 0; + + pFile = taosOpenFile(file, TD_FILE_CREATE | TD_FILE_WRITE | TD_FILE_TRUNC); + if (pFile == NULL) goto _OVER; + + int32_t len = strlen(buffer); + if (taosWriteFile(pFile, buffer, len) <= 0) goto _OVER; + if (taosFsyncFile(pFile) < 0) goto _OVER; + taosCloseFile(&pFile); - taosMemoryFree(content); + if (taosRenameFile(file, realfile) != 0) goto _OVER; - if (taosRenameFile(file, realfile) != 0) { - terrno = TAOS_SYSTEM_ERROR(errno); - dError("failed to rename %s since %s", file, terrstr()); - return -1; - } + code = 0; + dInfo("succeed to write mnode file:%s, deloyed:%d", realfile, pOption->deploy); - dDebug("succeed to write %s, deployed:%d", realfile, pOption->deploy); - return 0; +_OVER: + if (pJson != NULL) tjsonDelete(pJson); + if (buffer != NULL) taosMemoryFree(buffer); + if (pFile != NULL) taosCloseFile(&pFile); + + if (code != 0) { + if (terrno == 0) terrno = TAOS_SYSTEM_ERROR(errno); + dError("failed to write mnode file:%s since %s, deloyed:%d", realfile, terrstr(), pOption->deploy); + } + return code; } diff --git a/source/dnode/mgmt/mgmt_vnode/src/vmFile.c b/source/dnode/mgmt/mgmt_vnode/src/vmFile.c index dc32054fd7ea2510b5f1e51843511a2a656195dc..bf176ebb40574b5806fe67f5a0b6fb2519886d05 100644 --- a/source/dnode/mgmt/mgmt_vnode/src/vmFile.c +++ b/source/dnode/mgmt/mgmt_vnode/src/vmFile.c @@ -14,6 +14,7 @@ */ #define _DEFAULT_SOURCE +#include "tjson.h" #include "vmInt.h" #define MAX_CONTENT_LEN 2 * 1024 * 1024 @@ -45,164 +46,171 @@ SVnodeObj **vmGetVnodeListFromHash(SVnodeMgmt *pMgmt, int32_t *numOfVnodes) { return pVnodes; } +static int32_t vmDecodeVnodeList(SJson *pJson, SVnodeMgmt *pMgmt, SWrapperCfg **ppCfgs, int32_t *numOfVnodes) { + int32_t code = -1; + SWrapperCfg *pCfgs = NULL; + *ppCfgs = NULL; + + SJson *vnodes = tjsonGetObjectItem(pJson, "vnodes"); + if (vnodes == NULL) return -1; + + int32_t vnodesNum = cJSON_GetArraySize(vnodes); + if (vnodesNum > 0) { + pCfgs = taosMemoryCalloc(vnodesNum, sizeof(SWrapperCfg)); + if (pCfgs == NULL) return -1; + } + + for (int32_t i = 0; i < vnodesNum; ++i) { + SJson *vnode = tjsonGetArrayItem(vnodes, i); + if (vnode == NULL) goto _OVER; + + SWrapperCfg *pCfg = &pCfgs[i]; + tjsonGetInt32ValueFromDouble(vnode, "vgId", pCfg->vgId, code); + if (code < 0) goto _OVER; + tjsonGetInt32ValueFromDouble(vnode, "dropped", pCfg->dropped, code); + if (code < 0) goto _OVER; + tjsonGetInt32ValueFromDouble(vnode, "vgVersion", pCfg->vgVersion, code); + if (code < 0) goto _OVER; + + snprintf(pCfg->path, sizeof(pCfg->path), "%s%svnode%d", pMgmt->path, TD_DIRSEP, pCfg->vgId); + } + + code = 0; + *ppCfgs = pCfgs; + *numOfVnodes = vnodesNum; + +_OVER: + if (*ppCfgs == NULL) taosMemoryFree(pCfgs); + return code; +} + int32_t vmGetVnodeListFromFile(SVnodeMgmt *pMgmt, SWrapperCfg **ppCfgs, int32_t *numOfVnodes) { - int32_t code = TSDB_CODE_INVALID_JSON_FORMAT; - int32_t len = 0; - int32_t maxLen = MAX_CONTENT_LEN; - char *content = taosMemoryCalloc(1, maxLen + 1); - cJSON *root = NULL; - FILE *fp = NULL; + int32_t code = -1; + TdFilePtr pFile = NULL; + char *pData = NULL; + SJson *pJson = NULL; char file[PATH_MAX] = {0}; SWrapperCfg *pCfgs = NULL; - TdFilePtr pFile = NULL; - snprintf(file, sizeof(file), "%s%svnodes.json", pMgmt->path, TD_DIRSEP); + if (taosStatFile(file, NULL, NULL) < 0) { + dInfo("vnode file:%s not exist", file); + return 0; + } + pFile = taosOpenFile(file, TD_FILE_READ); if (pFile == NULL) { - dInfo("file %s not exist", file); - code = 0; + terrno = TAOS_SYSTEM_ERROR(errno); + dError("failed to open vnode file:%s since %s", file, terrstr()); goto _OVER; } - if (content == NULL) { - terrno = TSDB_CODE_OUT_OF_MEMORY; - return -1; - } - - len = (int32_t)taosReadFile(pFile, content, maxLen); - if (len <= 0) { - dError("failed to read %s since content is null", file); + int64_t size = 0; + if (taosFStatFile(pFile, &size, NULL) < 0) { + terrno = TAOS_SYSTEM_ERROR(errno); + dError("failed to fstat mnode file:%s since %s", file, terrstr()); goto _OVER; } - content[len] = 0; - root = cJSON_Parse(content); - if (root == NULL) { - dError("failed to read %s since invalid json format", file); + pData = taosMemoryMalloc(size + 1); + if (pData == NULL) { + terrno = TSDB_CODE_OUT_OF_MEMORY; goto _OVER; } - cJSON *vnodes = cJSON_GetObjectItem(root, "vnodes"); - if (!vnodes || vnodes->type != cJSON_Array) { - dError("failed to read %s since vnodes not found", file); + if (taosReadFile(pFile, pData, size) != size) { + terrno = TAOS_SYSTEM_ERROR(errno); + dError("failed to read vnode file:%s since %s", file, terrstr()); goto _OVER; } - int32_t vnodesNum = cJSON_GetArraySize(vnodes); - if (vnodesNum > 0) { - pCfgs = taosMemoryCalloc(vnodesNum, sizeof(SWrapperCfg)); - if (pCfgs == NULL) { - dError("failed to read %s since out of memory", file); - code = TSDB_CODE_OUT_OF_MEMORY; - goto _OVER; - } - - for (int32_t i = 0; i < vnodesNum; ++i) { - cJSON *vnode = cJSON_GetArrayItem(vnodes, i); - SWrapperCfg *pCfg = &pCfgs[i]; - - cJSON *vgId = cJSON_GetObjectItem(vnode, "vgId"); - if (!vgId || vgId->type != cJSON_Number) { - dError("failed to read %s since vgId not found", file); - taosMemoryFree(pCfgs); - goto _OVER; - } - pCfg->vgId = vgId->valueint; - snprintf(pCfg->path, sizeof(pCfg->path), "%s%svnode%d", pMgmt->path, TD_DIRSEP, pCfg->vgId); - - cJSON *dropped = cJSON_GetObjectItem(vnode, "dropped"); - if (!dropped || dropped->type != cJSON_Number) { - dError("failed to read %s since dropped not found", file); - taosMemoryFree(pCfgs); - goto _OVER; - } - pCfg->dropped = dropped->valueint; + pData[size] = '\0'; - cJSON *vgVersion = cJSON_GetObjectItem(vnode, "vgVersion"); - if (!vgVersion || vgVersion->type != cJSON_Number) { - dError("failed to read %s since vgVersion not found", file); - taosMemoryFree(pCfgs); - goto _OVER; - } - pCfg->vgVersion = vgVersion->valueint; - } + pJson = tjsonParse(pData); + if (pJson == NULL) { + terrno = TSDB_CODE_INVALID_JSON_FORMAT; + goto _OVER; + } - *ppCfgs = pCfgs; + if (vmDecodeVnodeList(pJson, pMgmt, ppCfgs, numOfVnodes) < 0) { + terrno = TSDB_CODE_INVALID_JSON_FORMAT; + goto _OVER; } - *numOfVnodes = vnodesNum; code = 0; - dInfo("succcessed to read file %s, numOfVnodes:%d", file, vnodesNum); + dInfo("succceed to read vnode file %s", file); _OVER: - if (content != NULL) taosMemoryFree(content); - if (root != NULL) cJSON_Delete(root); + if (pData != NULL) taosMemoryFree(pData); + if (pJson != NULL) cJSON_Delete(pJson); if (pFile != NULL) taosCloseFile(&pFile); - terrno = code; + if (code != 0) { + dError("failed to read vnode file:%s since %s", file, terrstr()); + } return code; } -int32_t vmWriteVnodeListToFile(SVnodeMgmt *pMgmt) { - int32_t code = 0; - char file[PATH_MAX] = {0}; - char realfile[PATH_MAX] = {0}; - snprintf(file, sizeof(file), "%s%svnodes.json.bak", pMgmt->path, TD_DIRSEP); - snprintf(realfile, sizeof(file), "%s%svnodes.json", pMgmt->path, TD_DIRSEP); - - TdFilePtr pFile = taosOpenFile(file, TD_FILE_CREATE | TD_FILE_WRITE | TD_FILE_TRUNC); - if (pFile == NULL) { - terrno = TAOS_SYSTEM_ERROR(errno); - dError("failed to write %s since %s", file, terrstr()); - return -1; - } - - int32_t numOfVnodes = 0; - SVnodeObj **ppVnodes = vmGetVnodeListFromHash(pMgmt, &numOfVnodes); - if (ppVnodes == NULL) { - terrno = TSDB_CODE_OUT_OF_MEMORY; - code = -1; - dError("failed to write %s while get vnodelist", file); - goto _OVER; - } - - int32_t len = 0; - int32_t maxLen = MAX_CONTENT_LEN; - char *content = taosMemoryCalloc(1, maxLen + 1); - if (content == NULL) { - terrno = TSDB_CODE_OUT_OF_MEMORY; - code = -1; - dError("failed to write %s while malloc content", file); - goto _OVER; - } +static int32_t vmEncodeVnodeList(SJson *pJson, SVnodeObj **ppVnodes, int32_t numOfVnodes) { + SJson *vnodes = tjsonCreateArray(); + if (vnodes == NULL) return -1; + if (tjsonAddItemToObject(pJson, "vnodes", vnodes) < 0) return -1; - len += snprintf(content + len, maxLen - len, "{\n"); - len += snprintf(content + len, maxLen - len, " \"vnodes\": [\n"); for (int32_t i = 0; i < numOfVnodes; ++i) { SVnodeObj *pVnode = ppVnodes[i]; if (pVnode == NULL) continue; - len += snprintf(content + len, maxLen - len, " {\n"); - len += snprintf(content + len, maxLen - len, " \"vgId\": %d,\n", pVnode->vgId); - len += snprintf(content + len, maxLen - len, " \"dropped\": %d,\n", pVnode->dropped); - len += snprintf(content + len, maxLen - len, " \"vgVersion\": %d\n", pVnode->vgVersion); - if (i < numOfVnodes - 1) { - len += snprintf(content + len, maxLen - len, " },\n"); - } else { - len += snprintf(content + len, maxLen - len, " }\n"); - } + SJson *vnode = tjsonCreateObject(); + if (vnode == NULL) return -1; + if (tjsonAddDoubleToObject(vnode, "vgId", pVnode->vgId) < 0) return -1; + if (tjsonAddDoubleToObject(vnode, "dropped", pVnode->dropped) < 0) return -1; + if (tjsonAddDoubleToObject(vnode, "vgVersion", pVnode->vgVersion) < 0) return -1; + if (tjsonAddItemToArray(vnodes, vnode) < 0) return -1; } - len += snprintf(content + len, maxLen - len, " ]\n"); - len += snprintf(content + len, maxLen - len, "}\n"); + + return 0; +} + +int32_t vmWriteVnodeListToFile(SVnodeMgmt *pMgmt) { + int32_t code = -1; + char *buffer = NULL; + SJson *pJson = NULL; + TdFilePtr pFile = NULL; + SVnodeObj **ppVnodes = NULL; + char file[PATH_MAX] = {0}; + char realfile[PATH_MAX] = {0}; + snprintf(file, sizeof(file), "%s%svnodes.json.bak", pMgmt->path, TD_DIRSEP); + snprintf(realfile, sizeof(realfile), "%s%svnodes.json", pMgmt->path, TD_DIRSEP); + + int32_t numOfVnodes = 0; + ppVnodes = vmGetVnodeListFromHash(pMgmt, &numOfVnodes); + if (ppVnodes == NULL) goto _OVER; + + terrno = TSDB_CODE_OUT_OF_MEMORY; + pJson = tjsonCreateObject(); + if (pJson == NULL) goto _OVER; + if (vmEncodeVnodeList(pJson, ppVnodes, numOfVnodes) != 0) goto _OVER; + buffer = tjsonToString(pJson); + if (buffer == NULL) goto _OVER; terrno = 0; -_OVER: - taosWriteFile(pFile, content, len); - taosFsyncFile(pFile); + pFile = taosOpenFile(file, TD_FILE_CREATE | TD_FILE_WRITE | TD_FILE_TRUNC); + if (pFile == NULL) goto _OVER; + + int32_t len = strlen(buffer); + if (taosWriteFile(pFile, buffer, len) <= 0) goto _OVER; + if (taosFsyncFile(pFile) < 0) goto _OVER; + taosCloseFile(&pFile); - taosMemoryFree(content); + if (taosRenameFile(file, realfile) != 0) goto _OVER; + + code = 0; + dInfo("succeed to write vnodes file:%s, vnodes:%d", realfile, numOfVnodes); +_OVER: + if (pJson != NULL) tjsonDelete(pJson); + if (buffer != NULL) taosMemoryFree(buffer); + if (pFile != NULL) taosCloseFile(&pFile); if (ppVnodes != NULL) { for (int32_t i = 0; i < numOfVnodes; ++i) { SVnodeObj *pVnode = ppVnodes[i]; @@ -213,14 +221,9 @@ _OVER: taosMemoryFree(ppVnodes); } - if (code != 0) return -1; - - dInfo("succeed to write %s, numOfVnodes:%d", realfile, numOfVnodes); - code = taosRenameFile(file, realfile); - if (code != 0) { - dError("failed to rename %s to %s", file, realfile); + if (terrno == 0) terrno = TAOS_SYSTEM_ERROR(errno); + dError("failed to write vnodes file:%s since %s, vnodes:%d", realfile, terrstr(), numOfVnodes); } - return code; } \ No newline at end of file diff --git a/source/dnode/mgmt/mgmt_vnode/src/vmHandle.c b/source/dnode/mgmt/mgmt_vnode/src/vmHandle.c index 3ce37a5f8e817ec9121503088b1292d3eb0299e8..47772acbdce512a73f24977edb08ad62e0ccef79 100644 --- a/source/dnode/mgmt/mgmt_vnode/src/vmHandle.c +++ b/source/dnode/mgmt/mgmt_vnode/src/vmHandle.c @@ -137,7 +137,7 @@ static void vmGenerateVnodeCfg(SCreateVnodeReq *pCreate, SVnodeCfg *pCfg) { pNode->nodeId = pCreate->replicas[i].id; pNode->nodePort = pCreate->replicas[i].port; tstrncpy(pNode->nodeFqdn, pCreate->replicas[i].fqdn, TSDB_FQDN_LEN); - (void)tmsgUpdateDnodeInfo(&pNode->nodeId, &pNode->clusterId, pNode->nodeFqdn, &pNode->nodePort); + tmsgUpdateDnodeInfo(&pNode->nodeId, &pNode->clusterId, pNode->nodeFqdn, &pNode->nodePort); } } @@ -157,6 +157,7 @@ static int32_t vmTsmaAdjustDays(SVnodeCfg *pCfg, SCreateVnodeReq *pReq) { return 0; } +#if 0 static int32_t vmTsmaProcessCreate(SVnode *pVnode, SCreateVnodeReq *pReq) { if (pReq->isTsma) { SMsgHead *smaMsg = pReq->pTsma; @@ -165,6 +166,7 @@ static int32_t vmTsmaProcessCreate(SVnode *pVnode, SCreateVnodeReq *pReq) { } return 0; } +#endif int32_t vmProcessCreateVnodeReq(SVnodeMgmt *pMgmt, SRpcMsg *pMsg) { SCreateVnodeReq req = {0}; @@ -245,12 +247,14 @@ int32_t vmProcessCreateVnodeReq(SVnodeMgmt *pMgmt, SRpcMsg *pMsg) { goto _OVER; } +#if 0 code = vmTsmaProcessCreate(pImpl, &req); if (code != 0) { dError("vgId:%d, failed to create tsma since %s", req.vgId, terrstr()); code = terrno; goto _OVER; } +#endif code = vnodeStart(pImpl); if (code != 0) { diff --git a/source/dnode/mgmt/mgmt_vnode/src/vmInt.c b/source/dnode/mgmt/mgmt_vnode/src/vmInt.c index f808c67ef66a31f143c0555afade9ff8b31a2163..99ba9b9b3bcc1e0efe9d1a1bb987e01c89786374 100644 --- a/source/dnode/mgmt/mgmt_vnode/src/vmInt.c +++ b/source/dnode/mgmt/mgmt_vnode/src/vmInt.c @@ -79,6 +79,8 @@ int32_t vmOpenVnode(SVnodeMgmt *pMgmt, SWrapperCfg *pCfg, SVnode *pImpl) { void vmCloseVnode(SVnodeMgmt *pMgmt, SVnodeObj *pVnode) { char path[TSDB_FILENAME_LEN] = {0}; + vnodeProposeCommitOnNeed(pVnode->pImpl); + taosThreadRwlockWrlock(&pMgmt->lock); taosHashRemove(pMgmt->hash, &pVnode->vgId, sizeof(int32_t)); taosThreadRwlockUnlock(&pMgmt->lock); @@ -118,6 +120,9 @@ void vmCloseVnode(SVnodeMgmt *pMgmt, SVnodeObj *pVnode) { dInfo("vgId:%d, all vnode queues is empty", pVnode->vgId); + dInfo("vgId:%d, post close", pVnode->vgId); + vnodePostClose(pVnode->pImpl); + vmFreeQueue(pMgmt, pVnode); vnodeClose(pVnode->pImpl); pVnode->pImpl = NULL; @@ -338,13 +343,12 @@ static void vmCheckSyncTimeout(SVnodeMgmt *pMgmt) { int32_t numOfVnodes = 0; SVnodeObj **ppVnodes = vmGetVnodeListFromHash(pMgmt, &numOfVnodes); - for (int32_t i = 0; i < numOfVnodes; ++i) { - SVnodeObj *pVnode = ppVnodes[i]; - vnodeSyncCheckTimeout(pVnode->pImpl); - vmReleaseVnode(pMgmt, pVnode); - } - if (ppVnodes != NULL) { + for (int32_t i = 0; i < numOfVnodes; ++i) { + SVnodeObj *pVnode = ppVnodes[i]; + vnodeSyncCheckTimeout(pVnode->pImpl); + vmReleaseVnode(pMgmt, pVnode); + } taosMemoryFree(ppVnodes); } } diff --git a/source/dnode/mgmt/mgmt_vnode/src/vmWorker.c b/source/dnode/mgmt/mgmt_vnode/src/vmWorker.c index 642ca1ebc1880632821bb453fec511f7bfda6755..cd29b115508ccfcd51d9c130c4847b2ef265dba5 100644 --- a/source/dnode/mgmt/mgmt_vnode/src/vmWorker.c +++ b/source/dnode/mgmt/mgmt_vnode/src/vmWorker.c @@ -134,6 +134,13 @@ static void vmProcessSyncQueue(SQueueInfo *pInfo, STaosQall *qall, int32_t numOf } } +static void vmSendResponse(SRpcMsg *pMsg) { + if (pMsg->info.handle) { + SRpcMsg rsp = {.info = pMsg->info, .code = terrno}; + rpcSendResponse(&rsp); + } +} + static int32_t vmPutMsgToQueue(SVnodeMgmt *pMgmt, SRpcMsg *pMsg, EQueueType qtype) { const STraceId *trace = &pMsg->info.traceId; if (pMsg->contLen < sizeof(SMsgHead)) { @@ -152,7 +159,9 @@ static int32_t vmPutMsgToQueue(SVnodeMgmt *pMgmt, SRpcMsg *pMsg, EQueueType qtyp if (pVnode == NULL) { dGError("vgId:%d, msg:%p failed to put into vnode queue since %s, type:%s qtype:%d contLen:%d", pHead->vgId, pMsg, terrstr(), TMSG_INFO(pMsg->msgType), qtype, pHead->contLen); - return terrno != 0 ? terrno : -1; + terrno = (terrno != 0) ? terrno : -1; + vmSendResponse(pMsg); + return terrno; } switch (qtype) { diff --git a/source/dnode/mgmt/node_mgmt/inc/dmMgmt.h b/source/dnode/mgmt/node_mgmt/inc/dmMgmt.h index 7e85e6b72239260d13aba44306309cd2528c3364..02cd6784331b0f7c9f49238f537d70bf36bf0a12 100644 --- a/source/dnode/mgmt/node_mgmt/inc/dmMgmt.h +++ b/source/dnode/mgmt/node_mgmt/inc/dmMgmt.h @@ -85,6 +85,7 @@ typedef struct SDnode { // dmEnv.c SDnode *dmInstance(); void dmReportStartup(const char *pName, const char *pDesc); +int64_t dmGetClusterId(); // dmMgmt.c int32_t dmInitDnode(SDnode *pDnode); diff --git a/source/dnode/mgmt/node_mgmt/src/dmEnv.c b/source/dnode/mgmt/node_mgmt/src/dmEnv.c index e3bda5a3f01355f33bdf758e0a14824162c50a2b..acf96ad397c405c35fe7089aa4d618dcd8eb13e5 100644 --- a/source/dnode/mgmt/node_mgmt/src/dmEnv.c +++ b/source/dnode/mgmt/node_mgmt/src/dmEnv.c @@ -16,9 +16,9 @@ #define _DEFAULT_SOURCE #include "dmMgmt.h" -static SDnode global = {0}; +static SDnode globalDnode = {0}; -SDnode *dmInstance() { return &global; } +SDnode *dmInstance() { return &globalDnode; } static int32_t dmCheckRepeatInit(SDnode *pDnode) { if (atomic_val_compare_exchange_8(&pDnode->once, DND_ENV_INIT, DND_ENV_READY) != DND_ENV_INIT) { @@ -268,3 +268,8 @@ void dmReportStartup(const char *pName, const char *pDesc) { tstrncpy(pStartup->desc, pDesc, TSDB_STEP_DESC_LEN); dDebug("step:%s, %s", pStartup->name, pStartup->desc); } + +int64_t dmGetClusterId() { + return globalDnode.data.clusterId; +} + diff --git a/source/dnode/mgmt/node_mgmt/src/dmMgmt.c b/source/dnode/mgmt/node_mgmt/src/dmMgmt.c index b6cce249ea935a55e0515a84955702e5e463ca4d..3dd8a19d9258e6a4073ded3314ac172a3523b4e7 100644 --- a/source/dnode/mgmt/node_mgmt/src/dmMgmt.c +++ b/source/dnode/mgmt/node_mgmt/src/dmMgmt.c @@ -79,6 +79,13 @@ static void dmClearVars(SDnode *pDnode) { SDnodeData *pData = &pDnode->data; taosThreadRwlockWrlock(&pData->lock); + if (pData->oldDnodeEps != NULL) { + if (dmWriteEps(pData) == 0) { + dmRemoveDnodePairs(pData); + } + taosArrayDestroy(pData->oldDnodeEps); + pData->oldDnodeEps = NULL; + } if (pData->dnodeEps != NULL) { taosArrayDestroy(pData->dnodeEps); pData->dnodeEps = NULL; diff --git a/source/dnode/mgmt/node_mgmt/src/dmNodes.c b/source/dnode/mgmt/node_mgmt/src/dmNodes.c index 981797834ae268e2eeab923a61fa98837a1d4de0..16931ab6dfa1a67f37792b19946c337461108062 100644 --- a/source/dnode/mgmt/node_mgmt/src/dmNodes.c +++ b/source/dnode/mgmt/node_mgmt/src/dmNodes.c @@ -109,8 +109,8 @@ static int32_t dmStartNodes(SDnode *pDnode) { } } - dInfo("TDengine initialized successfully"); - dmReportStartup("TDengine", "initialized successfully"); + dInfo("The daemon initialized successfully"); + dmReportStartup("The daemon", "initialized successfully"); return 0; } @@ -142,7 +142,7 @@ int32_t dmRunDnode(SDnode *pDnode) { while (1) { if (pDnode->stop) { - dInfo("TDengine is about to stop"); + dInfo("The daemon is about to stop"); dmSetStatus(pDnode, DND_STAT_STOPPED); dmStopNodes(pDnode); dmCloseNodes(pDnode); diff --git a/source/dnode/mgmt/node_util/inc/dmUtil.h b/source/dnode/mgmt/node_util/inc/dmUtil.h index 92b66230e3ee8714c014777a39288cb2f2b28df3..c2f403dfbb4e827cb137a26056e8d7cec41fcf0a 100644 --- a/source/dnode/mgmt/node_util/inc/dmUtil.h +++ b/source/dnode/mgmt/node_util/inc/dmUtil.h @@ -100,6 +100,7 @@ typedef struct { bool stopped; SEpSet mnodeEps; SArray *dnodeEps; + SArray *oldDnodeEps; SHashObj *dnodeHash; TdThreadRwlock lock; SMsgCb msgCb; @@ -167,7 +168,8 @@ void dmUpdateEps(SDnodeData *pData, SArray *pDnodeEps); void dmGetMnodeEpSet(SDnodeData *pData, SEpSet *pEpSet); void dmGetMnodeEpSetForRedirect(SDnodeData *pData, SRpcMsg *pMsg, SEpSet *pEpSet); void dmSetMnodeEpSet(SDnodeData *pData, SEpSet *pEpSet); -int32_t dmUpdateDnodeInfo(void *pData, int32_t *dnodeId, int64_t *clusterId, char *fqdn, uint16_t *port); +bool dmUpdateDnodeInfo(void *pData, int32_t *dnodeId, int64_t *clusterId, char *fqdn, uint16_t *port); +void dmRemoveDnodePairs(SDnodeData *pData); #ifdef __cplusplus } diff --git a/source/dnode/mgmt/node_util/src/dmEps.c b/source/dnode/mgmt/node_util/src/dmEps.c index a7a63fbacadf472f3b8c5dd318e47021d27f3791..a7a5b8b999cc6bff4b5154c84c8d3749e2f0d1fa 100644 --- a/source/dnode/mgmt/node_util/src/dmEps.c +++ b/source/dnode/mgmt/node_util/src/dmEps.c @@ -15,11 +15,21 @@ #define _DEFAULT_SOURCE #include "dmUtil.h" +#include "tjson.h" #include "tmisce.h" -static void dmPrintEps(SDnodeData *pData); -static bool dmIsEpChanged(SDnodeData *pData, int32_t dnodeId, const char *ep); -static void dmResetEps(SDnodeData *pData, SArray *dnodeEps); +typedef struct { + int32_t id; + uint16_t oldPort; + uint16_t newPort; + char oldFqdn[TSDB_FQDN_LEN]; + char newFqdn[TSDB_FQDN_LEN]; +} SDnodeEpPair; + +static void dmPrintEps(SDnodeData *pData); +static bool dmIsEpChanged(SDnodeData *pData, int32_t dnodeId, const char *ep); +static void dmResetEps(SDnodeData *pData, SArray *dnodeEps); +static int32_t dmReadDnodePairs(SDnodeData *pData); static void dmGetDnodeEp(SDnodeData *pData, int32_t dnodeId, char *pEp, char *pFqdn, uint16_t *pPort) { taosThreadRwlockRdlock(&pData->lock); @@ -40,14 +50,49 @@ static void dmGetDnodeEp(SDnodeData *pData, int32_t dnodeId, char *pEp, char *pF taosThreadRwlockUnlock(&pData->lock); } +static int32_t dmDecodeEps(SJson *pJson, SDnodeData *pData) { + int32_t code = 0; + + tjsonGetInt32ValueFromDouble(pJson, "dnodeId", pData->dnodeId, code); + if (code < 0) return -1; + tjsonGetNumberValue(pJson, "dnodeVer", pData->dnodeVer, code); + if (code < 0) return -1; + tjsonGetNumberValue(pJson, "clusterId", pData->clusterId, code); + if (code < 0) return -1; + tjsonGetInt32ValueFromDouble(pJson, "dropped", pData->dropped, code); + if (code < 0) return -1; + + SJson *dnodes = tjsonGetObjectItem(pJson, "dnodes"); + if (dnodes == NULL) return 0; + int32_t numOfDnodes = tjsonGetArraySize(dnodes); + + for (int32_t i = 0; i < numOfDnodes; ++i) { + SJson *dnode = tjsonGetArrayItem(dnodes, i); + if (dnode == NULL) return -1; + + SDnodeEp dnodeEp = {0}; + tjsonGetInt32ValueFromDouble(dnode, "id", dnodeEp.id, code); + if (code < 0) return -1; + code = tjsonGetStringValue(dnode, "fqdn", dnodeEp.ep.fqdn); + if (code < 0) return -1; + tjsonGetUInt16ValueFromDouble(dnode, "port", dnodeEp.ep.port, code); + if (code < 0) return -1; + tjsonGetInt8ValueFromDouble(dnode, "isMnode", dnodeEp.isMnode, code); + if (code < 0) return -1; + + if (taosArrayPush(pData->dnodeEps, &dnodeEp) == NULL) return -1; + } + + return 0; +} + int32_t dmReadEps(SDnodeData *pData) { - int32_t code = TSDB_CODE_INVALID_JSON_FORMAT; - int32_t len = 0; - int32_t maxLen = 256 * 1024; - char *content = taosMemoryCalloc(1, maxLen + 1); - cJSON *root = NULL; - char file[PATH_MAX] = {0}; + int32_t code = -1; TdFilePtr pFile = NULL; + char *content = NULL; + SJson *pJson = NULL; + char file[PATH_MAX] = {0}; + snprintf(file, sizeof(file), "%s%sdnode%sdnode.json", tsDataDir, TD_DIRSEP, TD_DIRSEP); pData->dnodeEps = taosArrayInit(1, sizeof(SDnodeEp)); if (pData->dnodeEps == NULL) { @@ -55,113 +100,64 @@ int32_t dmReadEps(SDnodeData *pData) { goto _OVER; } - snprintf(file, sizeof(file), "%s%sdnode%sdnode.json", tsDataDir, TD_DIRSEP, TD_DIRSEP); - pFile = taosOpenFile(file, TD_FILE_READ); - if (pFile == NULL) { + if (taosStatFile(file, NULL, NULL) < 0) { + dInfo("dnode file:%s not exist", file); code = 0; goto _OVER; } - len = (int32_t)taosReadFile(pFile, content, maxLen); - if (len <= 0) { - dError("failed to read %s since content is null", file); - goto _OVER; - } - - content[len] = 0; - root = cJSON_Parse(content); - if (root == NULL) { - dError("failed to read %s since invalid json format", file); + pFile = taosOpenFile(file, TD_FILE_READ); + if (pFile == NULL) { + terrno = TAOS_SYSTEM_ERROR(errno); + dError("failed to open dnode file:%s since %s", file, terrstr()); goto _OVER; } - cJSON *dnodeId = cJSON_GetObjectItem(root, "dnodeId"); - if (!dnodeId || dnodeId->type != cJSON_Number) { - dError("failed to read %s since dnodeId not found", file); + int64_t size = 0; + if (taosFStatFile(pFile, &size, NULL) < 0) { + terrno = TAOS_SYSTEM_ERROR(errno); + dError("failed to fstat dnode file:%s since %s", file, terrstr()); goto _OVER; } - pData->dnodeId = dnodeId->valueint; - cJSON *dnodeVer = cJSON_GetObjectItem(root, "dnodeVer"); - if (!dnodeVer || dnodeVer->type != cJSON_String) { - dError("failed to read %s since dnodeVer not found", file); + content = taosMemoryMalloc(size + 1); + if (content == NULL) { + terrno = TSDB_CODE_OUT_OF_MEMORY; goto _OVER; } - pData->dnodeVer = atoll(dnodeVer->valuestring); - cJSON *clusterId = cJSON_GetObjectItem(root, "clusterId"); - if (!clusterId || clusterId->type != cJSON_String) { - dError("failed to read %s since clusterId not found", file); + if (taosReadFile(pFile, content, size) != size) { + terrno = TAOS_SYSTEM_ERROR(errno); + dError("failed to read dnode file:%s since %s", file, terrstr()); goto _OVER; } - pData->clusterId = atoll(clusterId->valuestring); - cJSON *dropped = cJSON_GetObjectItem(root, "dropped"); - if (!dropped || dropped->type != cJSON_Number) { - dError("failed to read %s since dropped not found", file); - goto _OVER; - } - pData->dropped = dropped->valueint; + content[size] = '\0'; - cJSON *dnodes = cJSON_GetObjectItem(root, "dnodes"); - if (!dnodes || dnodes->type != cJSON_Array) { - dError("failed to read %s since dnodes not found", file); + pJson = tjsonParse(content); + if (pJson == NULL) { + terrno = TSDB_CODE_INVALID_JSON_FORMAT; goto _OVER; } - int32_t numOfDnodes = cJSON_GetArraySize(dnodes); - if (numOfDnodes <= 0) { - dError("failed to read %s since numOfDnodes:%d invalid", file, numOfDnodes); + if (dmDecodeEps(pJson, pData) < 0) { + terrno = TSDB_CODE_INVALID_JSON_FORMAT; goto _OVER; } - for (int32_t i = 0; i < numOfDnodes; ++i) { - cJSON *node = cJSON_GetArrayItem(dnodes, i); - if (node == NULL) break; - - SDnodeEp dnodeEp = {0}; - - cJSON *did = cJSON_GetObjectItem(node, "id"); - if (!did || did->type != cJSON_Number) { - dError("failed to read %s since dnodeId not found", file); - goto _OVER; - } - - dnodeEp.id = did->valueint; - - cJSON *dnodeFqdn = cJSON_GetObjectItem(node, "fqdn"); - if (!dnodeFqdn || dnodeFqdn->type != cJSON_String || dnodeFqdn->valuestring == NULL) { - dError("failed to read %s since dnodeFqdn not found", file); - goto _OVER; - } - tstrncpy(dnodeEp.ep.fqdn, dnodeFqdn->valuestring, TSDB_FQDN_LEN); - - cJSON *dnodePort = cJSON_GetObjectItem(node, "port"); - if (!dnodePort || dnodePort->type != cJSON_Number) { - dError("failed to read %s since dnodePort not found", file); - goto _OVER; - } - - dnodeEp.ep.port = dnodePort->valueint; - - cJSON *isMnode = cJSON_GetObjectItem(node, "isMnode"); - if (!isMnode || isMnode->type != cJSON_Number) { - dError("failed to read %s since isMnode not found", file); - goto _OVER; - } - dnodeEp.isMnode = isMnode->valueint; - - taosArrayPush(pData->dnodeEps, &dnodeEp); - } - code = 0; - dDebug("succcessed to read file %s", file); + dInfo("succceed to read dnode file %s", file); _OVER: if (content != NULL) taosMemoryFree(content); - if (root != NULL) cJSON_Delete(root); + if (pJson != NULL) cJSON_Delete(pJson); if (pFile != NULL) taosCloseFile(&pFile); + if (code != 0) { + dError("failed to read dnode file:%s since %s", file, terrstr()); + return code; + } + if (taosArrayGetSize(pData->dnodeEps) == 0) { SDnodeEp dnodeEp = {0}; dnodeEp.isMnode = 1; @@ -169,93 +165,90 @@ _OVER: taosArrayPush(pData->dnodeEps, &dnodeEp); } + if (dmReadDnodePairs(pData) != 0) { + return -1; + } + dDebug("reset dnode list on startup"); dmResetEps(pData, pData->dnodeEps); - if (dmIsEpChanged(pData, pData->dnodeId, tsLocalEp)) { + if (pData->oldDnodeEps == NULL && dmIsEpChanged(pData, pData->dnodeId, tsLocalEp)) { dError("localEp %s different with %s and need reconfigured", tsLocalEp, file); + terrno = TSDB_CODE_INVALID_CFG; return -1; } - terrno = code; return code; } -int32_t dmWriteEps(SDnodeData *pData) { - int32_t code = -1; - char *content = NULL; - TdFilePtr pFile = NULL; - - char file[PATH_MAX] = {0}; - char realfile[PATH_MAX] = {0}; - snprintf(file, sizeof(file), "%s%sdnode%sdnode.json.bak", tsDataDir, TD_DIRSEP, TD_DIRSEP); - snprintf(realfile, sizeof(realfile), "%s%sdnode%sdnode.json", tsDataDir, TD_DIRSEP, TD_DIRSEP); - - pFile = taosOpenFile(file, TD_FILE_CREATE | TD_FILE_WRITE | TD_FILE_TRUNC); - if (pFile == NULL) { - dError("failed to open %s since %s", file, strerror(errno)); - terrno = TAOS_SYSTEM_ERROR(errno); - goto _OVER; - } +static int32_t dmEncodeEps(SJson *pJson, SDnodeData *pData) { + if (tjsonAddDoubleToObject(pJson, "dnodeId", pData->dnodeId) < 0) return -1; + if (tjsonAddIntegerToObject(pJson, "dnodeVer", pData->dnodeVer) < 0) return -1; + if (tjsonAddIntegerToObject(pJson, "clusterId", pData->clusterId) < 0) return -1; + if (tjsonAddDoubleToObject(pJson, "dropped", pData->dropped) < 0) return -1; - int32_t len = 0; - int32_t maxLen = 256 * 1024; - content = taosMemoryCalloc(1, maxLen + 1); - - len += snprintf(content + len, maxLen - len, "{\n"); - len += snprintf(content + len, maxLen - len, " \"dnodeId\": %d,\n", pData->dnodeId); - len += snprintf(content + len, maxLen - len, " \"dnodeVer\": \"%" PRId64 "\",\n", pData->dnodeVer); - len += snprintf(content + len, maxLen - len, " \"clusterId\": \"%" PRId64 "\",\n", pData->clusterId); - len += snprintf(content + len, maxLen - len, " \"dropped\": %d,\n", pData->dropped); - len += snprintf(content + len, maxLen - len, " \"dnodes\": [{\n"); + SJson *dnodes = tjsonCreateArray(); + if (dnodes == NULL) return -1; + if (tjsonAddItemToObject(pJson, "dnodes", dnodes) < 0) return -1; int32_t numOfEps = (int32_t)taosArrayGetSize(pData->dnodeEps); for (int32_t i = 0; i < numOfEps; ++i) { SDnodeEp *pDnodeEp = taosArrayGet(pData->dnodeEps, i); - len += snprintf(content + len, maxLen - len, " \"id\": %d,\n", pDnodeEp->id); - len += snprintf(content + len, maxLen - len, " \"fqdn\": \"%s\",\n", pDnodeEp->ep.fqdn); - len += snprintf(content + len, maxLen - len, " \"port\": %u,\n", pDnodeEp->ep.port); - len += snprintf(content + len, maxLen - len, " \"isMnode\": %d\n", pDnodeEp->isMnode); - if (i < numOfEps - 1) { - len += snprintf(content + len, maxLen - len, " },{\n"); - } else { - len += snprintf(content + len, maxLen - len, " }]\n"); - } + SJson *dnode = tjsonCreateObject(); + if (dnode == NULL) return -1; + + if (tjsonAddDoubleToObject(dnode, "id", pDnodeEp->id) < 0) return -1; + if (tjsonAddStringToObject(dnode, "fqdn", pDnodeEp->ep.fqdn) < 0) return -1; + if (tjsonAddDoubleToObject(dnode, "port", pDnodeEp->ep.port) < 0) return -1; + if (tjsonAddDoubleToObject(dnode, "isMnode", pDnodeEp->isMnode) < 0) return -1; + if (tjsonAddItemToArray(dnodes, dnode) < 0) return -1; } - len += snprintf(content + len, maxLen - len, "}\n"); - if (taosWriteFile(pFile, content, len) != len) { - dError("failed to write %s since %s", file, strerror(errno)); - terrno = TAOS_SYSTEM_ERROR(errno); - goto _OVER; - } + return 0; +} - if (taosFsyncFile(pFile) < 0) { - dError("failed to fsync %s since %s", file, strerror(errno)); - terrno = TAOS_SYSTEM_ERROR(errno); - goto _OVER; - } +int32_t dmWriteEps(SDnodeData *pData) { + int32_t code = -1; + char *buffer = NULL; + SJson *pJson = NULL; + TdFilePtr pFile = NULL; + char file[PATH_MAX] = {0}; + char realfile[PATH_MAX] = {0}; + snprintf(file, sizeof(file), "%s%sdnode%sdnode.json.bak", tsDataDir, TD_DIRSEP, TD_DIRSEP); + snprintf(realfile, sizeof(realfile), "%s%sdnode%sdnode.json", tsDataDir, TD_DIRSEP, TD_DIRSEP); - taosCloseFile(&pFile); - taosMemoryFreeClear(content); + terrno = TSDB_CODE_OUT_OF_MEMORY; + pJson = tjsonCreateObject(); + if (pJson == NULL) goto _OVER; + if (dmEncodeEps(pJson, pData) != 0) goto _OVER; + buffer = tjsonToString(pJson); + if (buffer == NULL) goto _OVER; + terrno = 0; - if (taosRenameFile(file, realfile) != 0) { - terrno = TAOS_SYSTEM_ERROR(errno); - dError("failed to rename %s since %s", file, terrstr()); - goto _OVER; - } + pFile = taosOpenFile(file, TD_FILE_CREATE | TD_FILE_WRITE | TD_FILE_TRUNC); + if (pFile == NULL) goto _OVER; + + int32_t len = strlen(buffer); + if (taosWriteFile(pFile, buffer, len) <= 0) goto _OVER; + if (taosFsyncFile(pFile) < 0) goto _OVER; + + taosCloseFile(&pFile); + if (taosRenameFile(file, realfile) != 0) goto _OVER; code = 0; pData->updateTime = taosGetTimestampMs(); - dInfo("succeed to write %s, dnodeVer:%" PRId64, realfile, pData->dnodeVer); + dInfo("succeed to write dnode file:%s, num:%d ver:%" PRId64, realfile, (int32_t)taosArrayGetSize(pData->dnodeEps), + pData->dnodeVer); _OVER: - if (content != NULL) taosMemoryFreeClear(content); + if (pJson != NULL) tjsonDelete(pJson); + if (buffer != NULL) taosMemoryFree(buffer); if (pFile != NULL) taosCloseFile(&pFile); + if (code != 0) { - dError("failed to write file %s since %s", realfile, terrstr()); + if (terrno == 0) terrno = TAOS_SYSTEM_ERROR(errno); + dError("failed to write dnode file:%s since %s, dnodeVer:%" PRId64, realfile, terrstr(), pData->dnodeVer); } - return code; } @@ -355,40 +348,210 @@ void dmSetMnodeEpSet(SDnodeData *pData, SEpSet *pEpSet) { } } -int32_t dmUpdateDnodeInfo(void *data, int32_t *dnodeId, int64_t *clusterId, char *fqdn, uint16_t *port) { +bool dmUpdateDnodeInfo(void *data, int32_t *did, int64_t *clusterId, char *fqdn, uint16_t *port) { + bool updated = false; SDnodeData *pData = data; - int32_t ret = -1; + int32_t dnodeId = -1; + if (did != NULL) dnodeId = *did; + taosThreadRwlockRdlock(&pData->lock); - if (*dnodeId <= 0) { - for (int32_t i = 0; i < (int32_t)taosArrayGetSize(pData->dnodeEps); ++i) { + + if (pData->oldDnodeEps != NULL) { + int32_t size = (int32_t)taosArrayGetSize(pData->oldDnodeEps); + for (int32_t i = 0; i < size; ++i) { + SDnodeEpPair *pair = taosArrayGet(pData->oldDnodeEps, i); + if (strcmp(pair->oldFqdn, fqdn) == 0 && pair->oldPort == *port) { + dInfo("dnode:%d, update ep:%s:%u to %s:%u", dnodeId, fqdn, *port, pair->newFqdn, pair->newPort); + tstrncpy(fqdn, pair->newFqdn, TSDB_FQDN_LEN); + *port = pair->newPort; + updated = true; + } + } + } + + if (did != NULL && dnodeId <= 0) { + int32_t size = (int32_t)taosArrayGetSize(pData->dnodeEps); + for (int32_t i = 0; i < size; ++i) { SDnodeEp *pDnodeEp = taosArrayGet(pData->dnodeEps, i); if (strcmp(pDnodeEp->ep.fqdn, fqdn) == 0 && pDnodeEp->ep.port == *port) { - dInfo("dnode:%s:%u, update dnodeId from %d to %d", fqdn, *port, *dnodeId, pDnodeEp->id); - *dnodeId = pDnodeEp->id; - *clusterId = pData->clusterId; - ret = 0; + dInfo("dnode:%s:%u, update dnodeId to dnode:%d", fqdn, *port, pDnodeEp->id); + *did = pDnodeEp->id; + if (clusterId != NULL) *clusterId = pData->clusterId; } } - if (ret != 0) { - dInfo("dnode:%s:%u, failed to update dnodeId:%d", fqdn, *port, *dnodeId); - } - } else { - SDnodeEp *pDnodeEp = taosHashGet(pData->dnodeHash, dnodeId, sizeof(int32_t)); + } + + if (dnodeId > 0) { + SDnodeEp *pDnodeEp = taosHashGet(pData->dnodeHash, &dnodeId, sizeof(int32_t)); if (pDnodeEp) { - if (strcmp(pDnodeEp->ep.fqdn, fqdn) != 0) { - dInfo("dnode:%d, update port from %s to %s", *dnodeId, fqdn, pDnodeEp->ep.fqdn); + if (strcmp(pDnodeEp->ep.fqdn, fqdn) != 0 || pDnodeEp->ep.port != *port) { + dInfo("dnode:%d, update ep:%s:%u to %s:%u", dnodeId, fqdn, *port, pDnodeEp->ep.fqdn, pDnodeEp->ep.port); tstrncpy(fqdn, pDnodeEp->ep.fqdn, TSDB_FQDN_LEN); - } - if (pDnodeEp->ep.port != *port) { - dInfo("dnode:%d, update port from %u to %u", *dnodeId, *port, pDnodeEp->ep.port); *port = pDnodeEp->ep.port; + updated = true; } - *clusterId = pData->clusterId; - ret = 0; - } else { - dInfo("dnode:%d, failed to update dnode info", *dnodeId); + if (clusterId != NULL) *clusterId = pData->clusterId; } } + taosThreadRwlockUnlock(&pData->lock); - return ret; -} \ No newline at end of file + return updated; +} + +static int32_t dmDecodeEpPairs(SJson *pJson, SDnodeData *pData) { + int32_t code = 0; + + SJson *dnodes = tjsonGetObjectItem(pJson, "dnodes"); + if (dnodes == NULL) return 0; + int32_t numOfDnodes = tjsonGetArraySize(dnodes); + + for (int32_t i = 0; i < numOfDnodes; ++i) { + SJson *dnode = tjsonGetArrayItem(dnodes, i); + if (dnode == NULL) return -1; + + SDnodeEpPair pair = {0}; + tjsonGetInt32ValueFromDouble(dnode, "id", pair.id, code); + if (code < 0) return -1; + code = tjsonGetStringValue(dnode, "fqdn", pair.oldFqdn); + if (code < 0) return -1; + tjsonGetUInt16ValueFromDouble(dnode, "port", pair.oldPort, code); + if (code < 0) return -1; + code = tjsonGetStringValue(dnode, "new_fqdn", pair.newFqdn); + if (code < 0) return -1; + tjsonGetUInt16ValueFromDouble(dnode, "new_port", pair.newPort, code); + if (code < 0) return -1; + + if (taosArrayPush(pData->oldDnodeEps, &pair) == NULL) return -1; + } + + return code; +} + +void dmRemoveDnodePairs(SDnodeData *pData) { + char file[PATH_MAX] = {0}; + char bak[PATH_MAX] = {0}; + snprintf(file, sizeof(file), "%s%sdnode%sep.json", tsDataDir, TD_DIRSEP, TD_DIRSEP); + snprintf(bak, sizeof(bak), "%s%sdnode%sep.json.bak", tsDataDir, TD_DIRSEP, TD_DIRSEP); + dInfo("dnode file:%s is rename to bak file", file); + (void)taosRenameFile(file, bak); +} + +static int32_t dmReadDnodePairs(SDnodeData *pData) { + int32_t code = -1; + TdFilePtr pFile = NULL; + char *content = NULL; + SJson *pJson = NULL; + char file[PATH_MAX] = {0}; + snprintf(file, sizeof(file), "%s%sdnode%sep.json", tsDataDir, TD_DIRSEP, TD_DIRSEP); + + if (taosStatFile(file, NULL, NULL) < 0) { + dDebug("dnode file:%s not exist", file); + code = 0; + goto _OVER; + } + + pFile = taosOpenFile(file, TD_FILE_READ); + if (pFile == NULL) { + terrno = TAOS_SYSTEM_ERROR(errno); + dError("failed to open dnode file:%s since %s", file, terrstr()); + goto _OVER; + } + + int64_t size = 0; + if (taosFStatFile(pFile, &size, NULL) < 0) { + terrno = TAOS_SYSTEM_ERROR(errno); + dError("failed to fstat dnode file:%s since %s", file, terrstr()); + goto _OVER; + } + + content = taosMemoryMalloc(size + 1); + if (content == NULL) { + terrno = TSDB_CODE_OUT_OF_MEMORY; + goto _OVER; + } + + if (taosReadFile(pFile, content, size) != size) { + terrno = TAOS_SYSTEM_ERROR(errno); + dError("failed to read dnode file:%s since %s", file, terrstr()); + goto _OVER; + } + + content[size] = '\0'; + + pJson = tjsonParse(content); + if (pJson == NULL) { + terrno = TSDB_CODE_INVALID_JSON_FORMAT; + goto _OVER; + } + + pData->oldDnodeEps = taosArrayInit(1, sizeof(SDnodeEpPair)); + if (pData->oldDnodeEps == NULL) { + dError("failed to calloc dnodeEp array since %s", strerror(errno)); + goto _OVER; + } + + if (dmDecodeEpPairs(pJson, pData) < 0) { + taosArrayDestroy(pData->oldDnodeEps); + pData->oldDnodeEps = NULL; + terrno = TSDB_CODE_INVALID_JSON_FORMAT; + goto _OVER; + } + + code = 0; + dInfo("succceed to read dnode file %s", file); + +_OVER: + if (content != NULL) taosMemoryFree(content); + if (pJson != NULL) cJSON_Delete(pJson); + if (pFile != NULL) taosCloseFile(&pFile); + + if (code != 0) { + dError("failed to read dnode file:%s since %s", file, terrstr()); + return code; + } + + // update old fqdn and port + for (int32_t i = 0; i < (int32_t)taosArrayGetSize(pData->oldDnodeEps); ++i) { + SDnodeEpPair *pair = taosArrayGet(pData->oldDnodeEps, i); + for (int32_t j = 0; j < (int32_t)taosArrayGetSize(pData->dnodeEps); ++j) { + SDnodeEp *pDnodeEp = taosArrayGet(pData->dnodeEps, j); + if (pDnodeEp->id == pair->id) { + tstrncpy(pair->oldFqdn, pDnodeEp->ep.fqdn, TSDB_FQDN_LEN); + pair->oldPort = pDnodeEp->ep.port; + } + } + } + + // check new fqdn and port + for (int32_t i = 0; i < (int32_t)taosArrayGetSize(pData->oldDnodeEps); ++i) { + SDnodeEpPair *pair = taosArrayGet(pData->oldDnodeEps, i); + for (int32_t j = 0; j < (int32_t)taosArrayGetSize(pData->dnodeEps); ++j) { + SDnodeEp *pDnodeEp = taosArrayGet(pData->dnodeEps, j); + if (pDnodeEp->id != pair->id && + (strcmp(pDnodeEp->ep.fqdn, pair->newFqdn) == 0 && pDnodeEp->ep.port == pair->newPort)) { + dError("dnode:%d, can't update ep:%s:%u to %s:%u since already exists as dnode:%d", pair->id, pair->oldFqdn, + pair->oldPort, pair->newFqdn, pair->newPort, pDnodeEp->id); + taosArrayDestroy(pData->oldDnodeEps); + pData->oldDnodeEps = NULL; + terrno = TSDB_CODE_INVALID_CFG; + return -1; + } + } + } + + for (int32_t i = 0; i < (int32_t)taosArrayGetSize(pData->oldDnodeEps); ++i) { + SDnodeEpPair *pair = taosArrayGet(pData->oldDnodeEps, i); + for (int32_t j = 0; j < (int32_t)taosArrayGetSize(pData->dnodeEps); ++j) { + SDnodeEp *pDnodeEp = taosArrayGet(pData->dnodeEps, j); + if (strcmp(pDnodeEp->ep.fqdn, pair->oldFqdn) == 0 && pDnodeEp->ep.port == pair->oldPort) { + dInfo("dnode:%d, will update ep:%s:%u to %s:%u", pDnodeEp->id, pDnodeEp->ep.fqdn, pDnodeEp->ep.port, + pair->newFqdn, pair->newPort); + tstrncpy(pDnodeEp->ep.fqdn, pair->newFqdn, TSDB_FQDN_LEN); + pDnodeEp->ep.port = pair->newPort; + } + } + } + + pData->dnodeVer = 0; + return 0; +} diff --git a/source/dnode/mgmt/node_util/src/dmFile.c b/source/dnode/mgmt/node_util/src/dmFile.c index 2eb1462efcc5aa490a9c0e7e51f0ef9eaae54b87..fb05f08c0c5e26130c399e179052c81dae56ebf8 100644 --- a/source/dnode/mgmt/node_util/src/dmFile.c +++ b/source/dnode/mgmt/node_util/src/dmFile.c @@ -15,104 +15,133 @@ #define _DEFAULT_SOURCE #include "dmUtil.h" +#include "tjson.h" #define MAXLEN 1024 +static int32_t dmDecodeFile(SJson *pJson, bool *deployed) { + int32_t code = 0; + int32_t value = 0; + + tjsonGetInt32ValueFromDouble(pJson, "deployed", value, code); + if (code < 0) return -1; + + *deployed = (value != 0); + return code; +} + int32_t dmReadFile(const char *path, const char *name, bool *pDeployed) { - int32_t code = TSDB_CODE_INVALID_JSON_FORMAT; - int64_t len = 0; - char content[MAXLEN + 1] = {0}; - cJSON *root = NULL; - char file[PATH_MAX] = {0}; + int32_t code = -1; TdFilePtr pFile = NULL; - + char *content = NULL; + SJson *pJson = NULL; + char file[PATH_MAX] = {0}; snprintf(file, sizeof(file), "%s%s%s.json", path, TD_DIRSEP, name); + + if (taosStatFile(file, NULL, NULL) < 0) { + dInfo("file:%s not exist", file); + code = 0; + goto _OVER; + } + pFile = taosOpenFile(file, TD_FILE_READ); if (pFile == NULL) { - code = 0; + terrno = TAOS_SYSTEM_ERROR(errno); + dError("failed to open file:%s since %s", file, terrstr()); goto _OVER; } - len = taosReadFile(pFile, content, MAXLEN); - if (len <= 0) { - dError("failed to read %s since content is null", file); + int64_t size = 0; + if (taosFStatFile(pFile, &size, NULL) < 0) { + terrno = TAOS_SYSTEM_ERROR(errno); + dError("failed to fstat file:%s since %s", file, terrstr()); goto _OVER; } - root = cJSON_Parse(content); - if (root == NULL) { - dError("failed to read %s since invalid json format", file); + content = taosMemoryMalloc(size + 1); + if (content == NULL) { + terrno = TSDB_CODE_OUT_OF_MEMORY; goto _OVER; } - cJSON *deployed = cJSON_GetObjectItem(root, "deployed"); - if (!deployed || deployed->type != cJSON_Number) { - dError("failed to read %s since deployed not found", file); + if (taosReadFile(pFile, content, size) != size) { + terrno = TAOS_SYSTEM_ERROR(errno); + dError("failed to read file:%s since %s", file, terrstr()); + goto _OVER; + } + + content[size] = '\0'; + + pJson = tjsonParse(content); + if (pJson == NULL) { + terrno = TSDB_CODE_INVALID_JSON_FORMAT; + goto _OVER; + } + + if (dmDecodeFile(pJson, pDeployed) < 0) { + terrno = TSDB_CODE_INVALID_JSON_FORMAT; goto _OVER; } - *pDeployed = deployed->valueint != 0; - dDebug("succcessed to read file %s, deployed:%d", file, *pDeployed); code = 0; + dInfo("succceed to read mnode file %s", file); _OVER: - if (root != NULL) cJSON_Delete(root); + if (content != NULL) taosMemoryFree(content); + if (pJson != NULL) cJSON_Delete(pJson); if (pFile != NULL) taosCloseFile(&pFile); - terrno = code; + if (code != 0) { + dError("failed to read dnode file:%s since %s", file, terrstr()); + } return code; } +static int32_t dmEncodeFile(SJson *pJson, bool deployed) { + if (tjsonAddDoubleToObject(pJson, "deployed", deployed) < 0) return -1; + return 0; +} + int32_t dmWriteFile(const char *path, const char *name, bool deployed) { int32_t code = -1; - int32_t len = 0; - char content[MAXLEN + 1] = {0}; + char *buffer = NULL; + SJson *pJson = NULL; + TdFilePtr pFile = NULL; char file[PATH_MAX] = {0}; char realfile[PATH_MAX] = {0}; - TdFilePtr pFile = NULL; - snprintf(file, sizeof(file), "%s%s%s.json", path, TD_DIRSEP, name); snprintf(realfile, sizeof(realfile), "%s%s%s.json", path, TD_DIRSEP, name); - pFile = taosOpenFile(file, TD_FILE_CREATE | TD_FILE_WRITE | TD_FILE_TRUNC); - if (pFile == NULL) { - terrno = TAOS_SYSTEM_ERROR(errno); - dError("failed to write %s since %s", file, terrstr()); - goto _OVER; - } - - len += snprintf(content + len, MAXLEN - len, "{\n"); - len += snprintf(content + len, MAXLEN - len, " \"deployed\": %d\n", deployed); - len += snprintf(content + len, MAXLEN - len, "}\n"); + terrno = TSDB_CODE_OUT_OF_MEMORY; + pJson = tjsonCreateObject(); + if (pJson == NULL) goto _OVER; + if (dmEncodeFile(pJson, deployed) != 0) goto _OVER; + buffer = tjsonToString(pJson); + if (buffer == NULL) goto _OVER; + terrno = 0; - if (taosWriteFile(pFile, content, len) != len) { - terrno = TAOS_SYSTEM_ERROR(errno); - dError("failed to write file:%s since %s", file, terrstr()); - goto _OVER; - } + pFile = taosOpenFile(file, TD_FILE_CREATE | TD_FILE_WRITE | TD_FILE_TRUNC); + if (pFile == NULL) goto _OVER; - if (taosFsyncFile(pFile) != 0) { - terrno = TAOS_SYSTEM_ERROR(errno); - dError("failed to fsync file:%s since %s", file, terrstr()); - goto _OVER; - } + int32_t len = strlen(buffer); + if (taosWriteFile(pFile, buffer, len) <= 0) goto _OVER; + if (taosFsyncFile(pFile) < 0) goto _OVER; taosCloseFile(&pFile); + if (taosRenameFile(file, realfile) != 0) goto _OVER; - if (taosRenameFile(file, realfile) != 0) { - terrno = TAOS_SYSTEM_ERROR(errno); - dError("failed to rename %s since %s", file, terrstr()); - return -1; - } - - dInfo("succeed to write %s, deployed:%d", realfile, deployed); code = 0; + dInfo("succeed to write file:%s, deloyed:%d", realfile, deployed); _OVER: - if (pFile != NULL) { - taosCloseFile(&pFile); - } + if (pJson != NULL) tjsonDelete(pJson); + if (buffer != NULL) taosMemoryFree(buffer); + if (pFile != NULL) taosCloseFile(&pFile); + if (code != 0) { + if (terrno == 0) terrno = TAOS_SYSTEM_ERROR(errno); + dError("failed to write file:%s since %s, deloyed:%d", realfile, terrstr(), deployed); + } return code; } diff --git a/source/dnode/mnode/impl/inc/mndDef.h b/source/dnode/mnode/impl/inc/mndDef.h index 9ccf990d6c93381112fb52acc99d571053022566..075122d85ba6aa4504a2f6168db35de1f117a0a4 100644 --- a/source/dnode/mnode/impl/inc/mndDef.h +++ b/source/dnode/mnode/impl/inc/mndDef.h @@ -445,6 +445,7 @@ typedef struct { STableMetaRsp* pMeta; bool sysDbRsp; char db[TSDB_DB_FNAME_LEN]; + char filterTb[TSDB_TABLE_NAME_LEN]; } SShowObj; typedef struct { diff --git a/source/dnode/mnode/impl/inc/mndUser.h b/source/dnode/mnode/impl/inc/mndUser.h index cf7deba397556f8df3550067057a03f6ca374a2a..8943ba703ee47442a49fe25ab44ed7fcfb7867cd 100644 --- a/source/dnode/mnode/impl/inc/mndUser.h +++ b/source/dnode/mnode/impl/inc/mndUser.h @@ -34,6 +34,8 @@ SHashObj *mndDupDbHash(SHashObj *pOld); SHashObj *mndDupTopicHash(SHashObj *pOld); int32_t mndValidateUserAuthInfo(SMnode *pMnode, SUserAuthVersion *pUsers, int32_t numOfUses, void **ppRsp, int32_t *pRspLen); +int32_t mndUserRemoveDb(SMnode *pMnode, STrans *pTrans, char *db); +int32_t mndUserRemoveTopic(SMnode *pMnode, STrans *pTrans, char *topic); #ifdef __cplusplus } diff --git a/source/dnode/mnode/impl/src/mndCluster.c b/source/dnode/mnode/impl/src/mndCluster.c index ca03207d2b95bab562c48a02ca383d67bba7349d..e0d8ecb3eb949d4161f5330d62250b3267ed513b 100644 --- a/source/dnode/mnode/impl/src/mndCluster.c +++ b/source/dnode/mnode/impl/src/mndCluster.c @@ -20,6 +20,8 @@ #define CLUSTER_VER_NUMBE 1 #define CLUSTER_RESERVE_SIZE 60 +char tsVersionName[16] = "community"; +int64_t tsExpireTime = 0; static SSdbRaw *mndClusterActionEncode(SClusterObj *pCluster); static SSdbRow *mndClusterActionDecode(SSdbRaw *pRaw); @@ -291,6 +293,18 @@ static int32_t mndRetrieveClusters(SRpcMsg *pMsg, SShowObj *pShow, SSDataBlock * pColInfo = taosArrayGet(pBlock->pDataBlock, cols++); colDataAppend(pColInfo, numOfRows, (const char *)&pCluster->createdTime, false); + char ver[12] = {0}; + STR_WITH_MAXSIZE_TO_VARSTR(ver, tsVersionName, pShow->pMeta->pSchemas[cols].bytes); + pColInfo = taosArrayGet(pBlock->pDataBlock, cols++); + colDataAppend(pColInfo, numOfRows, (const char *)ver, false); + + pColInfo = taosArrayGet(pBlock->pDataBlock, cols++); + if (tsExpireTime <= 0) { + colDataAppendNULL(pColInfo, numOfRows); + } else { + colDataAppend(pColInfo, numOfRows, (const char *)&tsExpireTime, false); + } + sdbRelease(pSdb, pCluster); numOfRows++; } diff --git a/source/dnode/mnode/impl/src/mndConsumer.c b/source/dnode/mnode/impl/src/mndConsumer.c index b2ca1e7ad8f19c016a155e462c693fda50e47027..eeb4249217bbc08780939b10a2d7fb4b467226c3 100644 --- a/source/dnode/mnode/impl/src/mndConsumer.c +++ b/source/dnode/mnode/impl/src/mndConsumer.c @@ -745,6 +745,7 @@ SSdbRow *mndConsumerActionDecode(SSdbRaw *pRaw) { if (tDecodeSMqConsumerObj(buf, pConsumer) == NULL) { goto CM_DECODE_OVER; } + tmsgUpdateDnodeEpSet(&pConsumer->ep); terrno = TSDB_CODE_SUCCESS; diff --git a/source/dnode/mnode/impl/src/mndDb.c b/source/dnode/mnode/impl/src/mndDb.c index c44c2f948bc5c7d32ff7fb901fe7f7f62f214b27..054c02b3b0d6f3fea6bb17b0ab5c58d78f084cdd 100644 --- a/source/dnode/mnode/impl/src/mndDb.c +++ b/source/dnode/mnode/impl/src/mndDb.c @@ -892,7 +892,7 @@ static int32_t mndProcessGetDbCfgReq(SRpcMsg *pReq) { cfgRsp.numOfRetensions = pDb->cfg.numOfRetensions; cfgRsp.pRetensions = pDb->cfg.pRetensions; cfgRsp.schemaless = pDb->cfg.schemaless; - + cfgRsp.sstTrigger = pDb->cfg.sstTrigger; int32_t contLen = tSerializeSDbCfgRsp(NULL, 0, &cfgRsp); void *pRsp = rpcMallocCont(contLen); if (pRsp == NULL) { @@ -1054,17 +1054,7 @@ static int32_t mndDropDb(SMnode *pMnode, SRpcMsg *pReq, SDbObj *pDb) { if (mndDropStreamByDb(pMnode, pTrans, pDb) != 0) goto _OVER; if (mndDropSmasByDb(pMnode, pTrans, pDb) != 0) goto _OVER; if (mndSetDropDbRedoActions(pMnode, pTrans, pDb) != 0) goto _OVER; - - SUserObj *pUser = mndAcquireUser(pMnode, pDb->createUser); - if (pUser != NULL) { - pUser->authVersion++; - SSdbRaw *pCommitRaw = mndUserActionEncode(pUser); - if (pCommitRaw == NULL || mndTransAppendCommitlog(pTrans, pCommitRaw) != 0) { - mError("trans:%d, failed to append redo log since %s", pTrans->id, terrstr()); - goto _OVER; - } - (void)sdbSetRawStatus(pCommitRaw, SDB_STATUS_READY); - } + if (mndUserRemoveDb(pMnode, pTrans, pDb->name) != 0) goto _OVER; int32_t rspLen = 0; void *pRsp = NULL; diff --git a/source/dnode/mnode/impl/src/mndDef.c b/source/dnode/mnode/impl/src/mndDef.c index a5f77513de9f6de359acf129612136af1762fa68..8983d73c7078d09ec82cefc7d54592f9457330ef 100644 --- a/source/dnode/mnode/impl/src/mndDef.c +++ b/source/dnode/mnode/impl/src/mndDef.c @@ -167,6 +167,10 @@ void tFreeStreamObj(SStreamObj *pStream) { taosArrayDestroy(pLevel); } taosArrayDestroy(pStream->tasks); + // tagSchema.pSchema + if (pStream->tagSchema.nCols > 0) { + taosMemoryFree(pStream->tagSchema.pSchema); + } } SMqVgEp *tCloneSMqVgEp(const SMqVgEp *pVgEp) { diff --git a/source/dnode/mnode/impl/src/mndDnode.c b/source/dnode/mnode/impl/src/mndDnode.c index d7b16c2c8ea7cd4e351b3333551f450af77929b8..97490beb3c49d24bbc5679d92cccb2bdf5201013 100644 --- a/source/dnode/mnode/impl/src/mndDnode.c +++ b/source/dnode/mnode/impl/src/mndDnode.c @@ -180,6 +180,9 @@ static SSdbRow *mndDnodeActionDecode(SSdbRaw *pRaw) { SDB_GET_RESERVE(pRaw, dataPos, TSDB_DNODE_RESERVE_SIZE, _OVER) terrno = 0; + if (tmsgUpdateDnodeInfo(&pDnode->id, NULL, pDnode->fqdn, &pDnode->port)) { + mInfo("dnode:%d, endpoint changed", pDnode->id); + } _OVER: if (terrno != 0) { @@ -188,7 +191,7 @@ _OVER: return NULL; } - mTrace("dnode:%d, decode from raw:%p, row:%p", pDnode->id, pRaw, pDnode); + mTrace("dnode:%d, decode from raw:%p, row:%p ep:%s:%u", pDnode->id, pRaw, pDnode, pDnode->fqdn, pDnode->port); return pRow; } @@ -308,7 +311,8 @@ void mndGetDnodeData(SMnode *pMnode, SArray *pDnodeEps) { void *pIter = NULL; while (1) { SDnodeObj *pDnode = NULL; - pIter = sdbFetch(pSdb, SDB_DNODE, pIter, (void **)&pDnode); + ESdbStatus objStatus = 0; + pIter = sdbFetchAll(pSdb, SDB_DNODE, pIter, (void **)&pDnode, &objStatus, true); if (pIter == NULL) break; SDnodeEp dnodeEp = {0}; @@ -1050,7 +1054,7 @@ static int32_t mndRetrieveDnodes(SRpcMsg *pReq, SShowObj *pShow, SSDataBlock *pB status = "offline"; } - char b1[9] = {0}; + char b1[16] = {0}; STR_TO_VARSTR(b1, status); pColInfo = taosArrayGet(pBlock->pDataBlock, cols++); colDataAppend(pColInfo, numOfRows, b1, false); diff --git a/source/dnode/mnode/impl/src/mndFunc.c b/source/dnode/mnode/impl/src/mndFunc.c index 31f31a15baf31813aeb071ff933ff5263d5976f8..244e6058d4a493a8d9f6b2cf794cca6a3db2cdad 100644 --- a/source/dnode/mnode/impl/src/mndFunc.c +++ b/source/dnode/mnode/impl/src/mndFunc.c @@ -293,7 +293,7 @@ static int32_t mndProcessCreateFuncReq(SRpcMsg *pReq) { goto _OVER; } - mInfo("func:%s, start to create", createReq.name); + mInfo("func:%s, start to create, size:%d", createReq.name, createReq.codeLen); if (mndCheckOperPrivilege(pMnode, pReq->info.conn.user, MND_OPER_CREATE_FUNC) != 0) { goto _OVER; } diff --git a/source/dnode/mnode/impl/src/mndMnode.c b/source/dnode/mnode/impl/src/mndMnode.c index c8c8e06c5ed5a3b041161cf7b3d573053f1a93ec..add32fd335fba086e2eb1d21d77f8d56c9079596 100644 --- a/source/dnode/mnode/impl/src/mndMnode.c +++ b/source/dnode/mnode/impl/src/mndMnode.c @@ -15,13 +15,13 @@ #define _DEFAULT_SOURCE #include "mndMnode.h" +#include "mndCluster.h" #include "mndDnode.h" #include "mndPrivilege.h" #include "mndShow.h" #include "mndSync.h" #include "mndTrans.h" #include "tmisce.h" -#include "mndCluster.h" #define MNODE_VER_NUMBER 1 #define MNODE_RESERVE_SIZE 64 @@ -181,9 +181,8 @@ _OVER: static int32_t mndMnodeActionInsert(SSdb *pSdb, SMnodeObj *pObj) { mTrace("mnode:%d, perform insert action, row:%p", pObj->id, pObj); - pObj->pDnode = sdbAcquire(pSdb, SDB_DNODE, &pObj->id); + pObj->pDnode = sdbAcquireNotReadyObj(pSdb, SDB_DNODE, &pObj->id); if (pObj->pDnode == NULL) { - terrno = TSDB_CODE_MND_DNODE_NOT_EXIST; mError("mnode:%d, failed to perform insert action since %s", pObj->id, terrstr()); return -1; } @@ -785,9 +784,9 @@ static void mndReloadSyncConfig(SMnode *pMnode) { int32_t code = syncReconfig(pMnode->syncMgmt.sync, &cfg); if (code != 0) { - mError("vgId:1, failed to reconfig mnode sync since %s", terrstr()); + mError("vgId:1, mnode sync reconfig failed since %s", terrstr()); } else { - mInfo("vgId:1, reconfig mnode sync success"); + mInfo("vgId:1, mnode sync reconfig success"); } } } diff --git a/source/dnode/mnode/impl/src/mndShow.c b/source/dnode/mnode/impl/src/mndShow.c index 7a8de4099f0a11f313552a6f2629c797fda06f39..48d8e89bfe73e47dd89e75f8c13626ebd3d1ecf4 100644 --- a/source/dnode/mnode/impl/src/mndShow.c +++ b/source/dnode/mnode/impl/src/mndShow.c @@ -19,6 +19,7 @@ #include "systable.h" #define SHOW_STEP_SIZE 100 +#define SHOW_COLS_STEP_SIZE 4096 static SShowObj *mndCreateShowObj(SMnode *pMnode, SRetrieveTableReq *pReq); static void mndFreeShowObj(SShowObj *pShow); @@ -76,6 +77,8 @@ static int32_t convertToRetrieveType(char *name, int32_t len) { type = TSDB_MGMT_TABLE_TABLE; } else if (strncasecmp(name, TSDB_INS_TABLE_TAGS, len) == 0) { type = TSDB_MGMT_TABLE_TAG; + } else if (strncasecmp(name, TSDB_INS_TABLE_COLS, len) == 0) { + type = TSDB_MGMT_TABLE_COL; } else if (strncasecmp(name, TSDB_INS_TABLE_TABLE_DISTRIBUTED, len) == 0) { // type = TSDB_MGMT_TABLE_DIST; } else if (strncasecmp(name, TSDB_INS_TABLE_USERS, len) == 0) { @@ -131,6 +134,7 @@ static SShowObj *mndCreateShowObj(SMnode *pMnode, SRetrieveTableReq *pReq) { showObj.pMnode = pMnode; showObj.type = convertToRetrieveType(pReq->tb, tListLen(pReq->tb)); memcpy(showObj.db, pReq->db, TSDB_DB_FNAME_LEN); + strncpy(showObj.filterTb, pReq->filterTb, TSDB_TABLE_NAME_LEN); int32_t keepTime = tsShellActivityTimer * 6 * 1000; SShowObj *pShow = taosCachePut(pMgmt->cache, &showId, sizeof(int64_t), &showObj, size, keepTime); @@ -190,13 +194,15 @@ static int32_t mndProcessRetrieveSysTableReq(SRpcMsg *pReq) { int32_t rowsToRead = SHOW_STEP_SIZE; int32_t size = 0; int32_t rowsRead = 0; - + mDebug("mndProcessRetrieveSysTableReq start"); SRetrieveTableReq retrieveReq = {0}; if (tDeserializeSRetrieveTableReq(pReq->pCont, pReq->contLen, &retrieveReq) != 0) { terrno = TSDB_CODE_INVALID_MSG; return -1; } + mDebug("mndProcessRetrieveSysTableReq tb:%s", retrieveReq.tb); + if (retrieveReq.showId == 0) { STableMetaRsp *pMeta = taosHashGet(pMnode->infosMeta, retrieveReq.tb, strlen(retrieveReq.tb)); if (pMeta == NULL) { @@ -226,6 +232,9 @@ static int32_t mndProcessRetrieveSysTableReq(SRpcMsg *pReq) { } } + if(pShow->type == TSDB_MGMT_TABLE_COL){ // expend capacity for ins_columns + rowsToRead = SHOW_COLS_STEP_SIZE; + } ShowRetrieveFp retrieveFp = pMgmt->retrieveFps[pShow->type]; if (retrieveFp == NULL) { mndReleaseShowObj(pShow, false); diff --git a/source/dnode/mnode/impl/src/mndSma.c b/source/dnode/mnode/impl/src/mndSma.c index 141bb1df601fe7d1e3c27090170e37d6769d879f..90baf57c52159b78c0f78b11b199b32299d572de 100644 --- a/source/dnode/mnode/impl/src/mndSma.c +++ b/source/dnode/mnode/impl/src/mndSma.c @@ -202,11 +202,13 @@ static SSdbRow *mndSmaActionDecode(SSdbRaw *pRaw) { _OVER: if (terrno != 0) { - mError("sma:%s, failed to decode from raw:%p since %s", pSma == NULL ? "null" : pSma->name, pRaw, terrstr()); - taosMemoryFreeClear(pSma->expr); - taosMemoryFreeClear(pSma->tagsFilter); - taosMemoryFreeClear(pSma->sql); - taosMemoryFreeClear(pSma->ast); + if (pSma != NULL) { + mError("sma:%s, failed to decode from raw:%p since %s", pSma->name, pRaw, terrstr()); + taosMemoryFreeClear(pSma->expr); + taosMemoryFreeClear(pSma->tagsFilter); + taosMemoryFreeClear(pSma->sql); + taosMemoryFreeClear(pSma->ast); + } taosMemoryFreeClear(pRow); return NULL; } @@ -457,8 +459,10 @@ static int32_t mndSetCreateSmaVgroupRedoActions(SMnode *pMnode, STrans *pTrans, int32_t contLen = 0; void *pReq = mndBuildCreateVnodeReq(pMnode, pDnode, pDb, pVgroup, &contLen); - taosMemoryFreeClear(pSmaReq); - if (pReq == NULL) return -1; + if (pReq == NULL) { + taosMemoryFreeClear(pSmaReq); + return -1; + } action.pCont = pReq; action.contLen = contLen; @@ -466,10 +470,21 @@ static int32_t mndSetCreateSmaVgroupRedoActions(SMnode *pMnode, STrans *pTrans, action.acceptableCode = TSDB_CODE_VND_ALREADY_EXIST; if (mndTransAppendRedoAction(pTrans, &action) != 0) { + taosMemoryFreeClear(pSmaReq); taosMemoryFree(pReq); return -1; } + action.pCont = pSmaReq; + action.contLen = smaContLen; + action.msgType = TDMT_VND_CREATE_SMA; + action.acceptableCode = TSDB_CODE_TSMA_ALREADY_EXIST; + + if (mndTransAppendRedoAction(pTrans, &action) != 0) { + taosMemoryFreeClear(pSmaReq); + return -1; + } + return 0; } diff --git a/source/dnode/mnode/impl/src/mndStb.c b/source/dnode/mnode/impl/src/mndStb.c index d504a94700096162d636390ee6eb5e2d2f77f139..c243e83a15333c877434c07ed961c1ce2f5af9e2 100644 --- a/source/dnode/mnode/impl/src/mndStb.c +++ b/source/dnode/mnode/impl/src/mndStb.c @@ -43,6 +43,7 @@ static int32_t mndProcessAlterStbReq(SRpcMsg *pReq); static int32_t mndProcessDropStbReq(SRpcMsg *pReq); static int32_t mndProcessTableMetaReq(SRpcMsg *pReq); static int32_t mndRetrieveStb(SRpcMsg *pReq, SShowObj *pShow, SSDataBlock *pBlock, int32_t rows); +static int32_t mndRetrieveStbCol(SRpcMsg *pReq, SShowObj *pShow, SSDataBlock *pBlock, int32_t rows); static void mndCancelGetNextStb(SMnode *pMnode, void *pIter); static int32_t mndProcessTableCfgReq(SRpcMsg *pReq); static int32_t mndAlterStbImp(SMnode *pMnode, SRpcMsg *pReq, SDbObj *pDb, SStbObj *pStb, bool needRsp, @@ -69,10 +70,14 @@ int32_t mndInitStb(SMnode *pMnode) { mndSetMsgHandle(pMnode, TDMT_MND_TABLE_META, mndProcessTableMetaReq); mndSetMsgHandle(pMnode, TDMT_MND_TTL_TIMER, mndProcessTtlTimer); mndSetMsgHandle(pMnode, TDMT_MND_TABLE_CFG, mndProcessTableCfgReq); +// mndSetMsgHandle(pMnode, TDMT_MND_SYSTABLE_RETRIEVE, mndProcessRetrieveStbReq); mndAddShowRetrieveHandle(pMnode, TSDB_MGMT_TABLE_STB, mndRetrieveStb); mndAddShowFreeIterHandle(pMnode, TSDB_MGMT_TABLE_STB, mndCancelGetNextStb); + mndAddShowRetrieveHandle(pMnode, TSDB_MGMT_TABLE_COL, mndRetrieveStbCol); + mndAddShowFreeIterHandle(pMnode, TSDB_MGMT_TABLE_COL, mndCancelGetNextStb); + return sdbSetTable(pMnode->pSdb, table); } @@ -2489,6 +2494,283 @@ void mndExtractTbNameFromStbFullName(const char *stbFullName, char *dst, int32_t } } +//static int32_t mndProcessRetrieveStbReq(SRpcMsg *pReq) { +// SMnode *pMnode = pReq->info.node; +// SShowMgmt *pMgmt = &pMnode->showMgmt; +// SShowObj *pShow = NULL; +// int32_t rowsToRead = SHOW_STEP_SIZE; +// int32_t rowsRead = 0; +// +// SRetrieveTableReq retrieveReq = {0}; +// if (tDeserializeSRetrieveTableReq(pReq->pCont, pReq->contLen, &retrieveReq) != 0) { +// terrno = TSDB_CODE_INVALID_MSG; +// return -1; +// } +// +// SMnode *pMnode = pReq->info.node; +// SSdb *pSdb = pMnode->pSdb; +// int32_t numOfRows = 0; +// SDbObj *pDb = NULL; +// ESdbStatus objStatus = 0; +// +// SUserObj *pUser = mndAcquireUser(pMnode, pReq->info.conn.user); +// if (pUser == NULL) return 0; +// bool sysinfo = pUser->sysInfo; +// +// // Append the information_schema database into the result. +//// if (!pShow->sysDbRsp) { +//// SDbObj infoschemaDb = {0}; +//// setInformationSchemaDbCfg(pMnode, &infoschemaDb); +//// size_t numOfTables = 0; +//// getVisibleInfosTablesNum(sysinfo, &numOfTables); +//// mndDumpDbInfoData(pMnode, pBlock, &infoschemaDb, pShow, numOfRows, numOfTables, true, 0, 1); +//// +//// numOfRows += 1; +//// +//// SDbObj perfschemaDb = {0}; +//// setPerfSchemaDbCfg(pMnode, &perfschemaDb); +//// numOfTables = 0; +//// getPerfDbMeta(NULL, &numOfTables); +//// mndDumpDbInfoData(pMnode, pBlock, &perfschemaDb, pShow, numOfRows, numOfTables, true, 0, 1); +//// +//// numOfRows += 1; +//// pShow->sysDbRsp = true; +//// } +// +// SSDataBlock* p = buildInfoSchemaTableMetaBlock(TSDB_INS_TABLE_COLS); +// blockDataEnsureCapacity(p, rowsToRead); +// +// size_t size = 0; +// const SSysTableMeta* pSysDbTableMeta = NULL; +// +// getInfosDbMeta(&pSysDbTableMeta, &size); +// p->info.rows = buildDbColsInfoBlock(sysinfo, p, pSysDbTableMeta, size, TSDB_INFORMATION_SCHEMA_DB); +// +// getPerfDbMeta(&pSysDbTableMeta, &size); +// p->info.rows = buildDbColsInfoBlock(sysinfo, p, pSysDbTableMeta, size, TSDB_PERFORMANCE_SCHEMA_DB); +// +// blockDataDestroy(p); +// +// +// while (numOfRows < rowsToRead) { +// pShow->pIter = sdbFetchAll(pSdb, SDB_DB, pShow->pIter, (void **)&pDb, &objStatus, true); +// if (pShow->pIter == NULL) break; +// if (strncmp(retrieveReq.db, pDb->name, strlen(retrieveReq.db)) != 0){ +// continue; +// } +// if (mndCheckDbPrivilege(pMnode, pReq->info.conn.user, MND_OPER_READ_OR_WRITE_DB, pDb) != 0) { +// continue; +// } +// +// while (numOfRows < rowsToRead) { +// pShow->pIter = sdbFetch(pSdb, SDB_STB, pShow->pIter, (void **)&pStb); +// if (pShow->pIter == NULL) break; +// +// if (pDb != NULL && pStb->dbUid != pDb->uid) { +// sdbRelease(pSdb, pStb); +// continue; +// } +// +// cols = 0; +// +// SName name = {0}; +// char stbName[TSDB_TABLE_NAME_LEN + VARSTR_HEADER_SIZE] = {0}; +// mndExtractTbNameFromStbFullName(pStb->name, &stbName[VARSTR_HEADER_SIZE], TSDB_TABLE_NAME_LEN); +// varDataSetLen(stbName, strlen(&stbName[VARSTR_HEADER_SIZE])); +// +// SColumnInfoData *pColInfo = taosArrayGet(pBlock->pDataBlock, cols++); +// colDataAppend(pColInfo, numOfRows, (const char *)stbName, false); +// +// char db[TSDB_DB_NAME_LEN + VARSTR_HEADER_SIZE] = {0}; +// tNameFromString(&name, pStb->db, T_NAME_ACCT | T_NAME_DB); +// tNameGetDbName(&name, varDataVal(db)); +// varDataSetLen(db, strlen(varDataVal(db))); +// +// pColInfo = taosArrayGet(pBlock->pDataBlock, cols++); +// colDataAppend(pColInfo, numOfRows, (const char *)db, false); +// +// pColInfo = taosArrayGet(pBlock->pDataBlock, cols++); +// colDataAppend(pColInfo, numOfRows, (const char *)&pStb->createdTime, false); +// +// pColInfo = taosArrayGet(pBlock->pDataBlock, cols++); +// colDataAppend(pColInfo, numOfRows, (const char *)&pStb->numOfColumns, false); +// +// pColInfo = taosArrayGet(pBlock->pDataBlock, cols++); +// colDataAppend(pColInfo, numOfRows, (const char *)&pStb->numOfTags, false); +// +// pColInfo = taosArrayGet(pBlock->pDataBlock, cols++); +// colDataAppend(pColInfo, numOfRows, (const char *)&pStb->updateTime, false); // number of tables +// +// pColInfo = taosArrayGet(pBlock->pDataBlock, cols++); +// if (pStb->commentLen > 0) { +// char comment[TSDB_TB_COMMENT_LEN + VARSTR_HEADER_SIZE] = {0}; +// STR_TO_VARSTR(comment, pStb->comment); +// colDataAppend(pColInfo, numOfRows, comment, false); +// } else if (pStb->commentLen == 0) { +// char comment[VARSTR_HEADER_SIZE + VARSTR_HEADER_SIZE] = {0}; +// STR_TO_VARSTR(comment, ""); +// colDataAppend(pColInfo, numOfRows, comment, false); +// } else { +// colDataAppendNULL(pColInfo, numOfRows); +// } +// +// char watermark[64 + VARSTR_HEADER_SIZE] = {0}; +// sprintf(varDataVal(watermark), "%" PRId64 "a,%" PRId64 "a", pStb->watermark[0], pStb->watermark[1]); +// varDataSetLen(watermark, strlen(varDataVal(watermark))); +// +// pColInfo = taosArrayGet(pBlock->pDataBlock, cols++); +// colDataAppend(pColInfo, numOfRows, (const char *)watermark, false); +// +// char maxDelay[64 + VARSTR_HEADER_SIZE] = {0}; +// sprintf(varDataVal(maxDelay), "%" PRId64 "a,%" PRId64 "a", pStb->maxdelay[0], pStb->maxdelay[1]); +// varDataSetLen(maxDelay, strlen(varDataVal(maxDelay))); +// +// pColInfo = taosArrayGet(pBlock->pDataBlock, cols++); +// colDataAppend(pColInfo, numOfRows, (const char *)maxDelay, false); +// +// char rollup[160 + VARSTR_HEADER_SIZE] = {0}; +// int32_t rollupNum = (int32_t)taosArrayGetSize(pStb->pFuncs); +// char *sep = ", "; +// int32_t sepLen = strlen(sep); +// int32_t rollupLen = sizeof(rollup) - VARSTR_HEADER_SIZE - 2; +// for (int32_t i = 0; i < rollupNum; ++i) { +// char *funcName = taosArrayGet(pStb->pFuncs, i); +// if (i) { +// strncat(varDataVal(rollup), sep, rollupLen); +// rollupLen -= sepLen; +// } +// strncat(varDataVal(rollup), funcName, rollupLen); +// rollupLen -= strlen(funcName); +// } +// varDataSetLen(rollup, strlen(varDataVal(rollup))); +// +// pColInfo = taosArrayGet(pBlock->pDataBlock, cols++); +// colDataAppend(pColInfo, numOfRows, (const char *)rollup, false); +// +// numOfRows++; +// sdbRelease(pSdb, pStb); +// } +// +// if (pDb != NULL) { +// mndReleaseDb(pMnode, pDb); +// } +// +// sdbRelease(pSdb, pDb); +// } +// +// pShow->numOfRows += numOfRows; +// mndReleaseUser(pMnode, pUser); +// +// +// +// +// +// +// +// +// ShowRetrieveFp retrieveFp = pMgmt->retrieveFps[pShow->type]; +// if (retrieveFp == NULL) { +// mndReleaseShowObj(pShow, false); +// terrno = TSDB_CODE_MSG_NOT_PROCESSED; +// mError("show:0x%" PRIx64 ", failed to retrieve data since %s", pShow->id, terrstr()); +// return -1; +// } +// +// mDebug("show:0x%" PRIx64 ", start retrieve data, type:%d", pShow->id, pShow->type); +// if (retrieveReq.user[0] != 0) { +// memcpy(pReq->info.conn.user, retrieveReq.user, TSDB_USER_LEN); +// } else { +// memcpy(pReq->info.conn.user, TSDB_DEFAULT_USER, strlen(TSDB_DEFAULT_USER) + 1); +// } +// if (retrieveReq.db[0] && mndCheckShowPrivilege(pMnode, pReq->info.conn.user, pShow->type, retrieveReq.db) != 0) { +// return -1; +// } +// +// int32_t numOfCols = pShow->pMeta->numOfColumns; +// +// SSDataBlock *pBlock = createDataBlock(); +// for (int32_t i = 0; i < numOfCols; ++i) { +// SColumnInfoData idata = {0}; +// +// SSchema *p = &pShow->pMeta->pSchemas[i]; +// +// idata.info.bytes = p->bytes; +// idata.info.type = p->type; +// idata.info.colId = p->colId; +// blockDataAppendColInfo(pBlock, &idata); +// } +// +// blockDataEnsureCapacity(pBlock, rowsToRead); +// +// if (mndCheckRetrieveFinished(pShow)) { +// mDebug("show:0x%" PRIx64 ", read finished, numOfRows:%d", pShow->id, pShow->numOfRows); +// rowsRead = 0; +// } else { +// rowsRead = (*retrieveFp)(pReq, pShow, pBlock, rowsToRead); +// if (rowsRead < 0) { +// terrno = rowsRead; +// mDebug("show:0x%" PRIx64 ", retrieve completed", pShow->id); +// mndReleaseShowObj(pShow, true); +// blockDataDestroy(pBlock); +// return -1; +// } +// +// pBlock->info.rows = rowsRead; +// mDebug("show:0x%" PRIx64 ", stop retrieve data, rowsRead:%d numOfRows:%d", pShow->id, rowsRead, pShow->numOfRows); +// } +// +// size = sizeof(SRetrieveMetaTableRsp) + sizeof(int32_t) + sizeof(SSysTableSchema) * pShow->pMeta->numOfColumns + +// blockDataGetSize(pBlock) + blockDataGetSerialMetaSize(taosArrayGetSize(pBlock->pDataBlock)); +// +// SRetrieveMetaTableRsp *pRsp = rpcMallocCont(size); +// if (pRsp == NULL) { +// mndReleaseShowObj(pShow, false); +// terrno = TSDB_CODE_OUT_OF_MEMORY; +// mError("show:0x%" PRIx64 ", failed to retrieve data since %s", pShow->id, terrstr()); +// blockDataDestroy(pBlock); +// return -1; +// } +// +// pRsp->handle = htobe64(pShow->id); +// +// if (rowsRead > 0) { +// char *pStart = pRsp->data; +// SSchema *ps = pShow->pMeta->pSchemas; +// +// *(int32_t *)pStart = htonl(pShow->pMeta->numOfColumns); +// pStart += sizeof(int32_t); // number of columns +// +// for (int32_t i = 0; i < pShow->pMeta->numOfColumns; ++i) { +// SSysTableSchema *pSchema = (SSysTableSchema *)pStart; +// pSchema->bytes = htonl(ps[i].bytes); +// pSchema->colId = htons(ps[i].colId); +// pSchema->type = ps[i].type; +// +// pStart += sizeof(SSysTableSchema); +// } +// +// int32_t len = blockEncode(pBlock, pStart, pShow->pMeta->numOfColumns); +// } +// +// pRsp->numOfRows = htonl(rowsRead); +// pRsp->precision = TSDB_TIME_PRECISION_MILLI; // millisecond time precision +// pReq->info.rsp = pRsp; +// pReq->info.rspLen = size; +// +// if (rowsRead == 0 || rowsRead < rowsToRead) { +// pRsp->completed = 1; +// mDebug("show:0x%" PRIx64 ", retrieve completed", pShow->id); +// mndReleaseShowObj(pShow, true); +// } else { +// mDebug("show:0x%" PRIx64 ", retrieve not completed yet", pShow->id); +// mndReleaseShowObj(pShow, false); +// } +// +// blockDataDestroy(pBlock); +// return TSDB_CODE_SUCCESS; +//} + + static int32_t mndRetrieveStb(SRpcMsg *pReq, SShowObj *pShow, SSDataBlock *pBlock, int32_t rows) { SMnode *pMnode = pReq->info.node; SSdb *pSdb = pMnode->pSdb; @@ -2599,6 +2881,187 @@ static int32_t mndRetrieveStb(SRpcMsg *pReq, SShowObj *pShow, SSDataBlock *pBloc return numOfRows; } +static int32_t buildDbColsInfoBlock(const SSDataBlock* p, const SSysTableMeta* pSysDbTableMeta, size_t size, + const char* dbName, const char* tbName) { + char tName[TSDB_TABLE_FNAME_LEN + VARSTR_HEADER_SIZE] = {0}; + char dName[TSDB_TABLE_FNAME_LEN + VARSTR_HEADER_SIZE] = {0}; + char typeName[TSDB_TABLE_FNAME_LEN + VARSTR_HEADER_SIZE] = {0}; + int32_t numOfRows = p->info.rows; + + STR_TO_VARSTR(dName, dbName); + STR_TO_VARSTR(typeName, "SYSTEM_TABLE"); + + for (int32_t i = 0; i < size; ++i) { + const SSysTableMeta* pm = &pSysDbTableMeta[i]; +// if (pm->sysInfo) { +// continue; +// } + if(tbName[0] && strncmp(tbName, pm->name, TSDB_TABLE_NAME_LEN) != 0){ + continue; + } + + STR_TO_VARSTR(tName, pm->name); + + for(int32_t j = 0; j < pm->colNum; j++){ + // table name + SColumnInfoData* pColInfoData = taosArrayGet(p->pDataBlock, 0); + colDataAppend(pColInfoData, numOfRows, tName, false); + + // database name + pColInfoData = taosArrayGet(p->pDataBlock, 1); + colDataAppend(pColInfoData, numOfRows, dName, false); + + pColInfoData = taosArrayGet(p->pDataBlock, 2); + colDataAppend(pColInfoData, numOfRows, typeName, false); + + // col name + char colName[TSDB_COL_NAME_LEN + VARSTR_HEADER_SIZE] = {0}; + STR_TO_VARSTR(colName, pm->schema[j].name); + pColInfoData = taosArrayGet(p->pDataBlock, 3); + colDataAppend(pColInfoData, numOfRows, colName, false); + + // col type + int8_t colType = pm->schema[j].type; + pColInfoData = taosArrayGet(p->pDataBlock, 4); + char colTypeStr[VARSTR_HEADER_SIZE + 32]; + int colTypeLen = sprintf(varDataVal(colTypeStr), "%s", tDataTypes[colType].name); + if (colType == TSDB_DATA_TYPE_VARCHAR) { + colTypeLen += sprintf(varDataVal(colTypeStr) + colTypeLen, "(%d)", + (int32_t)(pm->schema[j].bytes - VARSTR_HEADER_SIZE)); + } else if (colType == TSDB_DATA_TYPE_NCHAR) { + colTypeLen += sprintf( + varDataVal(colTypeStr) + colTypeLen, "(%d)", + (int32_t)((pm->schema[j].bytes - VARSTR_HEADER_SIZE) / TSDB_NCHAR_SIZE)); + } + varDataSetLen(colTypeStr, colTypeLen); + colDataAppend(pColInfoData, numOfRows, (char*)colTypeStr, false); + + pColInfoData = taosArrayGet(p->pDataBlock, 5); + colDataAppend(pColInfoData, numOfRows, (const char*)&pm->schema[j].bytes, false); + for (int32_t k = 6; k <= 8; ++k) { + pColInfoData = taosArrayGet(p->pDataBlock, k); + colDataAppendNULL(pColInfoData, numOfRows); + } + + numOfRows += 1; + } + } + + return numOfRows; +} + +static int32_t buildSysDbColsInfo(SSDataBlock* p, char* db, char* tb) { + size_t size = 0; + const SSysTableMeta* pSysDbTableMeta = NULL; + + if(db[0] && strncmp(db, TSDB_INFORMATION_SCHEMA_DB, TSDB_DB_FNAME_LEN) != 0 && strncmp(db, TSDB_PERFORMANCE_SCHEMA_DB, TSDB_DB_FNAME_LEN) != 0){ + return p->info.rows; + } + + getInfosDbMeta(&pSysDbTableMeta, &size); + p->info.rows = buildDbColsInfoBlock(p, pSysDbTableMeta, size, TSDB_INFORMATION_SCHEMA_DB, tb); + + getPerfDbMeta(&pSysDbTableMeta, &size); + p->info.rows = buildDbColsInfoBlock(p, pSysDbTableMeta, size, TSDB_PERFORMANCE_SCHEMA_DB, tb); + + return p->info.rows; +} + +static int32_t mndRetrieveStbCol(SRpcMsg *pReq, SShowObj *pShow, SSDataBlock *pBlock, int32_t rows) { + SMnode *pMnode = pReq->info.node; + SSdb *pSdb = pMnode->pSdb; + SStbObj *pStb = NULL; + + int32_t numOfRows = buildSysDbColsInfo(pBlock, pShow->db, pShow->filterTb); + mDebug("mndRetrieveStbCol get system table cols, rows:%d, db:%s", numOfRows, pShow->db); + SDbObj *pDb = NULL; + if (strlen(pShow->db) > 0) { + pDb = mndAcquireDb(pMnode, pShow->db); + if (pDb == NULL) return terrno; + } + + char typeName[TSDB_TABLE_FNAME_LEN + VARSTR_HEADER_SIZE] = {0}; + STR_TO_VARSTR(typeName, "SUPER_TABLE"); + while (numOfRows < rows) { + pShow->pIter = sdbFetch(pSdb, SDB_STB, pShow->pIter, (void **)&pStb); + if (pShow->pIter == NULL) break; + + if (pDb != NULL && pStb->dbUid != pDb->uid) { + sdbRelease(pSdb, pStb); + continue; + } + + SName name = {0}; + char stbName[TSDB_TABLE_NAME_LEN + VARSTR_HEADER_SIZE] = {0}; + mndExtractTbNameFromStbFullName(pStb->name, &stbName[VARSTR_HEADER_SIZE], TSDB_TABLE_NAME_LEN); + if(pShow->filterTb[0] && strncmp(pShow->filterTb, &stbName[VARSTR_HEADER_SIZE], TSDB_TABLE_NAME_LEN) != 0){ + sdbRelease(pSdb, pStb); + continue; + } + varDataSetLen(stbName, strlen(&stbName[VARSTR_HEADER_SIZE])); + + mDebug("mndRetrieveStbCol get stable cols, stable name:%s, db:%s", pStb->name, pStb->db); + + char db[TSDB_DB_NAME_LEN + VARSTR_HEADER_SIZE] = {0}; + tNameFromString(&name, pStb->db, T_NAME_ACCT | T_NAME_DB); + tNameGetDbName(&name, varDataVal(db)); + varDataSetLen(db, strlen(varDataVal(db))); + + for(int i = 0; i < pStb->numOfColumns; i++){ + int32_t cols = 0; + SColumnInfoData *pColInfo = taosArrayGet(pBlock->pDataBlock, cols++); + colDataAppend(pColInfo, numOfRows, (const char *)stbName, false); + + pColInfo = taosArrayGet(pBlock->pDataBlock, cols++); + colDataAppend(pColInfo, numOfRows, (const char *)db, false); + + pColInfo = taosArrayGet(pBlock->pDataBlock, cols++); + colDataAppend(pColInfo, numOfRows, typeName, false); + + // col name + char colName[TSDB_COL_NAME_LEN + VARSTR_HEADER_SIZE] = {0}; + STR_TO_VARSTR(colName, pStb->pColumns[i].name); + pColInfo = taosArrayGet(pBlock->pDataBlock, cols++); + colDataAppend(pColInfo, numOfRows, colName, false); + + // col type + int8_t colType = pStb->pColumns[i].type; + pColInfo = taosArrayGet(pBlock->pDataBlock, cols++); + char colTypeStr[VARSTR_HEADER_SIZE + 32]; + int colTypeLen = sprintf(varDataVal(colTypeStr), "%s", tDataTypes[colType].name); + if (colType == TSDB_DATA_TYPE_VARCHAR) { + colTypeLen += sprintf(varDataVal(colTypeStr) + colTypeLen, "(%d)", + (int32_t)(pStb->pColumns[i].bytes - VARSTR_HEADER_SIZE)); + } else if (colType == TSDB_DATA_TYPE_NCHAR) { + colTypeLen += sprintf( + varDataVal(colTypeStr) + colTypeLen, "(%d)", + (int32_t)((pStb->pColumns[i].bytes - VARSTR_HEADER_SIZE) / TSDB_NCHAR_SIZE)); + } + varDataSetLen(colTypeStr, colTypeLen); + colDataAppend(pColInfo, numOfRows, (char*)colTypeStr, false); + + pColInfo = taosArrayGet(pBlock->pDataBlock, cols++); + colDataAppend(pColInfo, numOfRows, (const char*)&pStb->pColumns[i].bytes, false); + while(cols < pShow->numOfColumns) { + pColInfo = taosArrayGet(pBlock->pDataBlock, cols++); + colDataAppendNULL(pColInfo, numOfRows); + } + numOfRows++; + } + + sdbRelease(pSdb, pStb); + } + + if (pDb != NULL) { + mndReleaseDb(pMnode, pDb); + } + + pShow->numOfRows += numOfRows; + mDebug("mndRetrieveStbCol success, rows:%d, pShow->numOfRows:%d", numOfRows, pShow->numOfRows); + + return numOfRows; +} + static void mndCancelGetNextStb(SMnode *pMnode, void *pIter) { SSdb *pSdb = pMnode->pSdb; sdbCancelFetch(pSdb, pIter); diff --git a/source/dnode/mnode/impl/src/mndStream.c b/source/dnode/mnode/impl/src/mndStream.c index 61374aa0bf53a4394bcaa1da160fac628f052027..6b54a36a6fc3b4135fb19f1bbe40ee13cdecf7c5 100644 --- a/source/dnode/mnode/impl/src/mndStream.c +++ b/source/dnode/mnode/impl/src/mndStream.c @@ -314,7 +314,11 @@ static int32_t mndBuildStreamObjFromCreateReq(SMnode *pMnode, SStreamObj *pObj, } tstrncpy(pObj->targetDb, pTargetDb->name, TSDB_DB_FNAME_LEN); - pObj->targetStbUid = mndGenerateUid(pObj->targetSTbName, TSDB_TABLE_FNAME_LEN); + if (pCreate->createStb == STREAM_CREATE_STABLE_TRUE) { + pObj->targetStbUid = mndGenerateUid(pObj->targetSTbName, TSDB_TABLE_FNAME_LEN); + } else { + pObj->targetStbUid = pCreate->targetStbUid; + } pObj->targetDbUid = pTargetDb->uid; mndReleaseDb(pMnode, pTargetDb); @@ -334,6 +338,38 @@ static int32_t mndBuildStreamObjFromCreateReq(SMnode *pMnode, SStreamObj *pObj, goto FAIL; } + int32_t numOfNULL = taosArrayGetSize(pCreate->fillNullCols); + if(numOfNULL > 0) { + pObj->outputSchema.nCols += numOfNULL; + SSchema* pFullSchema = taosMemoryCalloc(pObj->outputSchema.nCols, sizeof(SSchema)); + if (!pFullSchema) { + goto FAIL; + } + + int32_t nullIndex = 0; + int32_t dataIndex = 0; + for (int16_t i = 0; i < pObj->outputSchema.nCols; i++) { + SColLocation* pos = taosArrayGet(pCreate->fillNullCols, nullIndex); + if (i < pos->slotId) { + pFullSchema[i].bytes = pObj->outputSchema.pSchema[dataIndex].bytes; + pFullSchema[i].colId = i + 1; // pObj->outputSchema.pSchema[dataIndex].colId; + pFullSchema[i].flags = pObj->outputSchema.pSchema[dataIndex].flags; + strcpy(pFullSchema[i].name, pObj->outputSchema.pSchema[dataIndex].name); + pFullSchema[i].type = pObj->outputSchema.pSchema[dataIndex].type; + dataIndex++; + } else { + pFullSchema[i].bytes = 0; + pFullSchema[i].colId = pos->colId; + pFullSchema[i].flags = COL_SET_NULL; + memset(pFullSchema[i].name, 0, TSDB_COL_NAME_LEN); + pFullSchema[i].type = pos->type; + nullIndex++; + } + } + taosMemoryFree(pObj->outputSchema.pSchema); + pObj->outputSchema.pSchema = pFullSchema; + } + SPlanContext cxt = { .pAstRoot = pAst, .topicQuery = false, @@ -465,7 +501,6 @@ static int32_t mndCreateStbForStream(SMnode *pMnode, STrans *pTrans, const SStre SMCreateStbReq createReq = {0}; tstrncpy(createReq.name, pStream->targetSTbName, TSDB_TABLE_FNAME_LEN); createReq.numOfColumns = pStream->outputSchema.nCols; - createReq.numOfTags = 1; // group id createReq.pColumns = taosArrayInit(createReq.numOfColumns, sizeof(SField)); // build fields taosArraySetSize(createReq.pColumns, createReq.numOfColumns); @@ -476,14 +511,29 @@ static int32_t mndCreateStbForStream(SMnode *pMnode, STrans *pTrans, const SStre pField->type = pStream->outputSchema.pSchema[i].type; pField->bytes = pStream->outputSchema.pSchema[i].bytes; } - createReq.pTags = taosArrayInit(createReq.numOfTags, sizeof(SField)); - taosArraySetSize(createReq.pTags, 1); - // build tags - SField *pField = taosArrayGet(createReq.pTags, 0); - strcpy(pField->name, "group_id"); - pField->type = TSDB_DATA_TYPE_UBIGINT; - pField->flags = 0; - pField->bytes = 8; + + if (pStream->tagSchema.nCols == 0) { + createReq.numOfTags = 1; + createReq.pTags = taosArrayInit(createReq.numOfTags, sizeof(SField)); + taosArraySetSize(createReq.pTags, createReq.numOfTags); + // build tags + SField *pField = taosArrayGet(createReq.pTags, 0); + strcpy(pField->name, "group_id"); + pField->type = TSDB_DATA_TYPE_UBIGINT; + pField->flags = 0; + pField->bytes = 8; + } else { + createReq.numOfTags = pStream->tagSchema.nCols; + createReq.pTags = taosArrayInit(createReq.numOfTags, sizeof(SField)); + taosArraySetSize(createReq.pTags, createReq.numOfTags); + for (int32_t i = 0; i < createReq.numOfTags; i++) { + SField *pField = taosArrayGet(createReq.pTags, i); + pField->bytes = pStream->tagSchema.pSchema[i].bytes; + pField->flags = pStream->tagSchema.pSchema[i].flags; + pField->type = pStream->tagSchema.pSchema[i].type; + tstrncpy(pField->name, pStream->tagSchema.pSchema[i].name, TSDB_COL_NAME_LEN); + } + } if (mndCheckCreateStbReq(&createReq) != 0) { goto _OVER; diff --git a/source/dnode/mnode/impl/src/mndSubscribe.c b/source/dnode/mnode/impl/src/mndSubscribe.c index b8ef185199c5b93c57ab4d2e68d30bb12c797373..153bb8bd04b73bf3c025851a0f2d00b1f6908eac 100644 --- a/source/dnode/mnode/impl/src/mndSubscribe.c +++ b/source/dnode/mnode/impl/src/mndSubscribe.c @@ -760,6 +760,27 @@ static SSdbRow *mndSubActionDecode(SSdbRaw *pRaw) { goto SUB_DECODE_OVER; } + // update epset saved in mnode + if (pSub->unassignedVgs != NULL) { + int32_t size = (int32_t)taosArrayGetSize(pSub->unassignedVgs); + for (int32_t i = 0; i < size; ++i) { + SMqVgEp *pMqVgEp = taosArrayGet(pSub->unassignedVgs, i); + tmsgUpdateDnodeEpSet(&pMqVgEp->epSet); + } + } + if (pSub->consumerHash != NULL) { + void *pIter = taosHashIterate(pSub->consumerHash, NULL); + while (pIter) { + SMqConsumerEp *pConsumerEp = pIter; + int32_t size = (int32_t)taosArrayGetSize(pConsumerEp->vgs); + for (int32_t i = 0; i < size; ++i) { + SMqVgEp *pMqVgEp = taosArrayGet(pConsumerEp->vgs, i); + tmsgUpdateDnodeEpSet(&pMqVgEp->epSet); + } + pIter = taosHashIterate(pSub->consumerHash, pIter); + } + } + terrno = TSDB_CODE_SUCCESS; SUB_DECODE_OVER: diff --git a/source/dnode/mnode/impl/src/mndSync.c b/source/dnode/mnode/impl/src/mndSync.c index 93c9192bed7c498221b28d6258608d0b1e4ca1af..7dc09124039a48ad742ec641cf626b4ff3bbf3b9 100644 --- a/source/dnode/mnode/impl/src/mndSync.c +++ b/source/dnode/mnode/impl/src/mndSync.c @@ -271,9 +271,11 @@ SSyncFSM *mndSyncMakeFsm(SMnode *pMnode) { int32_t mndInitSync(SMnode *pMnode) { SSyncMgmt *pMgmt = &pMnode->syncMgmt; taosThreadMutexInit(&pMgmt->lock, NULL); + taosThreadMutexLock(&pMgmt->lock); pMgmt->transId = 0; pMgmt->transSec = 0; pMgmt->transSeq = 0; + taosThreadMutexUnlock(&pMgmt->lock); SSyncInfo syncInfo = { .snapshotStrategy = SYNC_STRATEGY_STANDARD_SNAPSHOT, @@ -369,6 +371,7 @@ int32_t mndSyncPropose(SMnode *pMnode, SSdbRaw *pRaw, int32_t transId) { if (pMgmt->transId != 0) { mError("trans:%d, can't be proposed since trans:%d already waiting for confirm", transId, pMgmt->transId); taosThreadMutexUnlock(&pMgmt->lock); + rpcFreeCont(req.pCont); terrno = TSDB_CODE_MND_LAST_TRANS_NOT_FINISHED; return terrno; } diff --git a/source/dnode/mnode/impl/src/mndTelem.c b/source/dnode/mnode/impl/src/mndTelem.c index b0b49b42dce114ffdf223f16eaad59abd822d700..679fafa28d89e398c1e5900f354925a50c38c06a 100644 --- a/source/dnode/mnode/impl/src/mndTelem.c +++ b/source/dnode/mnode/impl/src/mndTelem.c @@ -131,7 +131,7 @@ static int32_t mndProcessTelemTimer(SRpcMsg* pReq) { taosThreadMutexUnlock(&pMgmt->lock); if (pCont != NULL) { - if (taosSendHttpReport(tsTelemServer, tsTelemPort, pCont, strlen(pCont), HTTP_FLAT) != 0) { + if (taosSendHttpReport(tsTelemServer, tsTelemUri, tsTelemPort, pCont, strlen(pCont), HTTP_FLAT) != 0) { mError("failed to send telemetry report"); } else { mInfo("succeed to send telemetry report"); diff --git a/source/dnode/mnode/impl/src/mndTopic.c b/source/dnode/mnode/impl/src/mndTopic.c index ce04c723442c65b1ff721f626d8170ffce63e170..96dba24566d8a6f21ea19672d728b795ba0245d7 100644 --- a/source/dnode/mnode/impl/src/mndTopic.c +++ b/source/dnode/mnode/impl/src/mndTopic.c @@ -604,22 +604,19 @@ _OVER: } static int32_t mndDropTopic(SMnode *pMnode, STrans *pTrans, SRpcMsg *pReq, SMqTopicObj *pTopic) { + int32_t code = -1; + if (mndUserRemoveTopic(pMnode, pTrans, pTopic->name) != 0) goto _OVER; + SSdbRaw *pCommitRaw = mndTopicActionEncode(pTopic); - if (pCommitRaw == NULL || mndTransAppendCommitlog(pTrans, pCommitRaw) != 0) { - mError("trans:%d, failed to append commit log since %s", pTrans->id, terrstr()); - mndTransDrop(pTrans); - return -1; - } + if (pCommitRaw == NULL || mndTransAppendCommitlog(pTrans, pCommitRaw) != 0) goto _OVER; (void)sdbSetRawStatus(pCommitRaw, SDB_STATUS_DROPPED); - if (mndTransPrepare(pMnode, pTrans) != 0) { - mError("trans:%d, failed to prepare since %s", pTrans->id, terrstr()); - mndTransDrop(pTrans); - return -1; - } + if (mndTransPrepare(pMnode, pTrans) != 0) goto _OVER; + code = 0; +_OVER: mndTransDrop(pTrans); - return 0; + return code; } static int32_t mndProcessDropTopicReq(SRpcMsg *pReq) { @@ -885,6 +882,7 @@ int32_t mndCheckTopicExist(SMnode *pMnode, SDbObj *pDb) { return 0; } +#if 0 int32_t mndDropTopicByDB(SMnode *pMnode, STrans *pTrans, SDbObj *pDb) { int32_t code = 0; SSdb *pSdb = pMnode->pSdb; @@ -912,3 +910,4 @@ int32_t mndDropTopicByDB(SMnode *pMnode, STrans *pTrans, SDbObj *pDb) { return code; } +#endif \ No newline at end of file diff --git a/source/dnode/mnode/impl/src/mndTrans.c b/source/dnode/mnode/impl/src/mndTrans.c index 718fc5c73f27e96fb261effbdc54c73310a3e924..dfcd55bcba7ca6a46a79cac2351df18a135218db 100644 --- a/source/dnode/mnode/impl/src/mndTrans.c +++ b/source/dnode/mnode/impl/src/mndTrans.c @@ -329,6 +329,7 @@ static SSdbRow *mndTransActionDecode(SSdbRaw *pRaw) { action.pRaw = NULL; } else if (action.actionType == TRANS_ACTION_MSG) { SDB_GET_BINARY(pRaw, dataPos, (void *)&action.epSet, sizeof(SEpSet), _OVER); + tmsgUpdateDnodeEpSet(&action.epSet); SDB_GET_INT16(pRaw, dataPos, &action.msgType, _OVER) SDB_GET_INT8(pRaw, dataPos, &unused /*&action.msgSent*/, _OVER) SDB_GET_INT8(pRaw, dataPos, &unused /*&action.msgReceived*/, _OVER) diff --git a/source/dnode/mnode/impl/src/mndUser.c b/source/dnode/mnode/impl/src/mndUser.c index efce6255fbb31f790a876bc4506584e0156a4563..b965e1331649af7634385cea552f1d129750ad89 100644 --- a/source/dnode/mnode/impl/src/mndUser.c +++ b/source/dnode/mnode/impl/src/mndUser.c @@ -285,14 +285,35 @@ static int32_t mndUserActionInsert(SSdb *pSdb, SUserObj *pUser) { return 0; } -static int32_t mndUserActionDelete(SSdb *pSdb, SUserObj *pUser) { - mTrace("user:%s, perform delete action, row:%p", pUser->user, pUser); +static int32_t mndUserDupObj(SUserObj *pUser, SUserObj *pNew) { + memcpy(pNew, pUser, sizeof(SUserObj)); + pNew->authVersion++; + pNew->updateTime = taosGetTimestampMs(); + + taosRLockLatch(&pUser->lock); + pNew->readDbs = mndDupDbHash(pUser->readDbs); + pNew->writeDbs = mndDupDbHash(pUser->writeDbs); + pNew->topics = mndDupTopicHash(pUser->topics); + taosRUnLockLatch(&pUser->lock); + + if (pNew->readDbs == NULL || pNew->writeDbs == NULL || pNew->topics == NULL) { + return -1; + } + return 0; +} + +static void mndUserFreeObj(SUserObj *pUser) { taosHashCleanup(pUser->readDbs); taosHashCleanup(pUser->writeDbs); taosHashCleanup(pUser->topics); pUser->readDbs = NULL; pUser->writeDbs = NULL; pUser->topics = NULL; +} + +static int32_t mndUserActionDelete(SSdb *pSdb, SUserObj *pUser) { + mTrace("user:%s, perform delete action, row:%p", pUser->user, pUser); + mndUserFreeObj(pUser); return 0; } @@ -516,19 +537,7 @@ static int32_t mndProcessAlterUserReq(SRpcMsg *pReq) { goto _OVER; } - memcpy(&newUser, pUser, sizeof(SUserObj)); - newUser.authVersion++; - newUser.updateTime = taosGetTimestampMs(); - - taosRLockLatch(&pUser->lock); - newUser.readDbs = mndDupDbHash(pUser->readDbs); - newUser.writeDbs = mndDupDbHash(pUser->writeDbs); - newUser.topics = mndDupTopicHash(pUser->topics); - taosRUnLockLatch(&pUser->lock); - - if (newUser.readDbs == NULL || newUser.writeDbs == NULL || newUser.topics == NULL) { - goto _OVER; - } + if (mndUserDupObj(pUser, &newUser) != 0) goto _OVER; if (alterReq.alterType == TSDB_ALTER_USER_PASSWD) { char pass[TSDB_PASSWORD_LEN + 1] = {0}; @@ -654,9 +663,7 @@ _OVER: mndReleaseUser(pMnode, pOperUser); mndReleaseUser(pMnode, pUser); - taosHashCleanup(newUser.writeDbs); - taosHashCleanup(newUser.readDbs); - taosHashCleanup(newUser.topics); + mndUserFreeObj(&newUser); return code; } @@ -884,9 +891,9 @@ static int32_t mndRetrievePrivileges(SRpcMsg *pReq, SShowObj *pShow, SSDataBlock db = taosHashIterate(pUser->writeDbs, NULL); while (db != NULL) { cols = 0; - SColumnInfoData *pColInfo = taosArrayGet(pBlock->pDataBlock, cols++); - char userName[TSDB_USER_LEN + VARSTR_HEADER_SIZE] = {0}; + char userName[TSDB_USER_LEN + VARSTR_HEADER_SIZE] = {0}; STR_WITH_MAXSIZE_TO_VARSTR(userName, pUser->user, pShow->pMeta->pSchemas[cols].bytes); + SColumnInfoData *pColInfo = taosArrayGet(pBlock->pDataBlock, cols++); colDataAppend(pColInfo, numOfRows, (const char *)userName, false); char privilege[20] = {0}; @@ -909,9 +916,9 @@ static int32_t mndRetrievePrivileges(SRpcMsg *pReq, SShowObj *pShow, SSDataBlock char *topic = taosHashIterate(pUser->topics, NULL); while (topic != NULL) { cols = 0; - SColumnInfoData *pColInfo = taosArrayGet(pBlock->pDataBlock, cols++); - char userName[TSDB_USER_LEN + VARSTR_HEADER_SIZE] = {0}; + char userName[TSDB_USER_LEN + VARSTR_HEADER_SIZE] = {0}; STR_WITH_MAXSIZE_TO_VARSTR(userName, pUser->user, pShow->pMeta->pSchemas[cols].bytes); + SColumnInfoData *pColInfo = taosArrayGet(pBlock->pDataBlock, cols++); colDataAppend(pColInfo, numOfRows, (const char *)userName, false); char privilege[20] = {0}; @@ -1007,3 +1014,74 @@ _OVER: tFreeSUserAuthBatchRsp(&batchRsp); return code; } + +int32_t mndUserRemoveDb(SMnode *pMnode, STrans *pTrans, char *db) { + int32_t code = 0; + SSdb *pSdb = pMnode->pSdb; + int32_t len = strlen(db) + 1; + void *pIter = NULL; + SUserObj *pUser = NULL; + SUserObj newUser = {0}; + + while (1) { + pIter = sdbFetch(pSdb, SDB_USER, pIter, (void **)&pUser); + if (pIter == NULL) break; + + code = -1; + if (mndUserDupObj(pUser, &newUser) != 0) break; + + bool inRead = (taosHashGet(newUser.readDbs, db, len) != NULL); + bool inWrite = (taosHashGet(newUser.writeDbs, db, len) != NULL); + if (inRead || inWrite) { + (void)taosHashRemove(newUser.readDbs, db, len); + (void)taosHashRemove(newUser.writeDbs, db, len); + + SSdbRaw *pCommitRaw = mndUserActionEncode(&newUser); + if (pCommitRaw == NULL || mndTransAppendCommitlog(pTrans, pCommitRaw) != 0) break; + (void)sdbSetRawStatus(pCommitRaw, SDB_STATUS_READY); + } + + mndUserFreeObj(&newUser); + sdbRelease(pSdb, pUser); + code = 0; + } + + if (pUser != NULL) sdbRelease(pSdb, pUser); + if (pIter != NULL) sdbCancelFetch(pSdb, pIter); + mndUserFreeObj(&newUser); + return code; +} + +int32_t mndUserRemoveTopic(SMnode *pMnode, STrans *pTrans, char *topic) { + int32_t code = 0; + SSdb *pSdb = pMnode->pSdb; + int32_t len = strlen(topic) + 1; + void *pIter = NULL; + SUserObj *pUser = NULL; + SUserObj newUser = {0}; + + while (1) { + pIter = sdbFetch(pSdb, SDB_USER, pIter, (void **)&pUser); + if (pIter == NULL) break; + + code = -1; + if (mndUserDupObj(pUser, &newUser) != 0) break; + + bool inTopic = (taosHashGet(newUser.topics, topic, len) != NULL); + if (inTopic) { + (void)taosHashRemove(newUser.topics, topic, len); + SSdbRaw *pCommitRaw = mndUserActionEncode(&newUser); + if (pCommitRaw == NULL || mndTransAppendCommitlog(pTrans, pCommitRaw) != 0) break; + (void)sdbSetRawStatus(pCommitRaw, SDB_STATUS_READY); + } + + mndUserFreeObj(&newUser); + sdbRelease(pSdb, pUser); + code = 0; + } + + if (pUser != NULL) sdbRelease(pSdb, pUser); + if (pIter != NULL) sdbCancelFetch(pSdb, pIter); + mndUserFreeObj(&newUser); + return code; +} diff --git a/source/dnode/mnode/impl/src/mndVgroup.c b/source/dnode/mnode/impl/src/mndVgroup.c index 2d700770aa0cb8be54038bc1751a778efcc34787..5c178278c1ef05f55324cd7c2f8321b6f1cdc25a 100644 --- a/source/dnode/mnode/impl/src/mndVgroup.c +++ b/source/dnode/mnode/impl/src/mndVgroup.c @@ -1448,10 +1448,10 @@ static int32_t mndRedistributeVgroup(SMnode *pMnode, SRpcMsg *pReq, SDbObj *pDb, { SSdbRaw *pRaw = mndVgroupActionEncode(&newVg); - if (pRaw == NULL) return -1; + if (pRaw == NULL) goto _OVER; if (mndTransAppendCommitlog(pTrans, pRaw) != 0) { sdbFreeRaw(pRaw); - return -1; + goto _OVER; } (void)sdbSetRawStatus(pRaw, SDB_STATUS_READY); } diff --git a/source/dnode/mnode/sdb/inc/sdb.h b/source/dnode/mnode/sdb/inc/sdb.h index e799f08a17208f107dd67835d2a2733c3a078b15..5a44e4279ff2456a9c0df4d134ecc6d4e49801dd 100644 --- a/source/dnode/mnode/sdb/inc/sdb.h +++ b/source/dnode/mnode/sdb/inc/sdb.h @@ -291,6 +291,7 @@ int32_t sdbWriteWithoutFree(SSdb *pSdb, SSdbRaw *pRaw); * @return void* The object of the row. */ void *sdbAcquire(SSdb *pSdb, ESdbType type, const void *pKey); +void *sdbAcquireNotReadyObj(SSdb *pSdb, ESdbType type, const void *pKey); /** * @brief Release a row from sdb. diff --git a/source/dnode/mnode/sdb/src/sdbFile.c b/source/dnode/mnode/sdb/src/sdbFile.c index f43b6bdb2541c57987d48208da38cd3ea486f1f5..c2d7a9757abf522d67b88e29a0990b2c95f87e68 100644 --- a/source/dnode/mnode/sdb/src/sdbFile.c +++ b/source/dnode/mnode/sdb/src/sdbFile.c @@ -228,11 +228,12 @@ static int32_t sdbReadFileImp(SSdb *pSdb) { int32_t readLen = 0; int64_t ret = 0; char file[PATH_MAX] = {0}; + int32_t bufLen = TSDB_MAX_MSG_SIZE; snprintf(file, sizeof(file), "%s%ssdb.data", pSdb->currDir, TD_DIRSEP); mInfo("start to read sdb file:%s", file); - SSdbRaw *pRaw = taosMemoryMalloc(TSDB_MAX_MSG_SIZE + 100); + SSdbRaw *pRaw = taosMemoryMalloc(bufLen + 100); if (pRaw == NULL) { terrno = TSDB_CODE_OUT_OF_MEMORY; mError("failed read sdb file since %s", terrstr()); @@ -243,7 +244,7 @@ static int32_t sdbReadFileImp(SSdb *pSdb) { if (pFile == NULL) { taosMemoryFree(pRaw); terrno = TAOS_SYSTEM_ERROR(errno); - mDebug("failed to read sdb file:%s since %s", file, terrstr()); + mInfo("read sdb file:%s finished since %s", file, terrstr()); return 0; } @@ -275,14 +276,15 @@ static int32_t sdbReadFileImp(SSdb *pSdb) { } readLen = pRaw->dataLen + sizeof(int32_t); - if (readLen >= pRaw->dataLen) { - SSdbRaw *pNewRaw = taosMemoryMalloc(pRaw->dataLen + TSDB_MAX_MSG_SIZE); + if (readLen >= bufLen) { + bufLen = pRaw->dataLen * 2; + SSdbRaw *pNewRaw = taosMemoryMalloc(bufLen + 100); if (pNewRaw == NULL) { terrno = TSDB_CODE_OUT_OF_MEMORY; - mError("failed read sdb file since malloc new sdbRaw size:%d failed", pRaw->dataLen + TSDB_MAX_MSG_SIZE); + mError("failed read sdb file since malloc new sdbRaw size:%d failed", bufLen); goto _OVER; } - mInfo("malloc new sdbRaw size:%d, type:%d", pRaw->dataLen + TSDB_MAX_MSG_SIZE, pRaw->type); + mInfo("malloc new sdb raw size:%d, type:%d", bufLen, pRaw->type); memcpy(pNewRaw, pRaw, sizeof(SSdbRaw)); sdbFreeRaw(pRaw); pRaw = pNewRaw; @@ -636,15 +638,20 @@ int32_t sdbStartWrite(SSdb *pSdb, SSdbIter **ppIter) { } int32_t sdbStopWrite(SSdb *pSdb, SSdbIter *pIter, bool isApply, int64_t index, int64_t term, int64_t config) { - int32_t code = 0; + int32_t code = -1; if (!isApply) { mInfo("sdbiter:%p, not apply to sdb", pIter); - sdbCloseIter(pIter); - return 0; + code = 0; + goto _OVER; + } + + if (taosFsyncFile(pIter->file) != 0) { + terrno = TAOS_SYSTEM_ERROR(errno); + mError("sdbiter:%p, failed to fasync file %s since %s", pIter, pIter->name, terrstr()); + goto _OVER; } - taosFsyncFile(pIter->file); taosCloseFile(&pIter->file); pIter->file = NULL; @@ -653,14 +660,12 @@ int32_t sdbStopWrite(SSdb *pSdb, SSdbIter *pIter, bool isApply, int64_t index, i if (taosRenameFile(pIter->name, datafile) != 0) { terrno = TAOS_SYSTEM_ERROR(errno); mError("sdbiter:%p, failed to rename file %s to %s since %s", pIter, pIter->name, datafile, terrstr()); - sdbCloseIter(pIter); - return -1; + goto _OVER; } if (sdbReadFile(pSdb) != 0) { mError("sdbiter:%p, failed to read from %s since %s", pIter, datafile, terrstr()); - sdbCloseIter(pIter); - return -1; + goto _OVER; } if (config > 0) { @@ -674,8 +679,11 @@ int32_t sdbStopWrite(SSdb *pSdb, SSdbIter *pIter, bool isApply, int64_t index, i } mInfo("sdbiter:%p, success applyed to sdb", pIter); + code = 0; + +_OVER: sdbCloseIter(pIter); - return 0; + return code; } int32_t sdbDoWrite(SSdb *pSdb, SSdbIter *pIter, void *pBuf, int32_t len) { diff --git a/source/dnode/mnode/sdb/src/sdbHash.c b/source/dnode/mnode/sdb/src/sdbHash.c index 32b34ea3a3b01193b0b23a2a2b4d80d938417a18..505dee3d87053d2b406ba5679419f12bdc5d837b 100644 --- a/source/dnode/mnode/sdb/src/sdbHash.c +++ b/source/dnode/mnode/sdb/src/sdbHash.c @@ -270,7 +270,7 @@ int32_t sdbWrite(SSdb *pSdb, SSdbRaw *pRaw) { return code; } -void *sdbAcquire(SSdb *pSdb, ESdbType type, const void *pKey) { +void *sdbAcquireAll(SSdb *pSdb, ESdbType type, const void *pKey, bool onlyReady) { terrno = 0; SHashObj *hash = sdbGetHash(pSdb, type); @@ -306,10 +306,24 @@ void *sdbAcquire(SSdb *pSdb, ESdbType type, const void *pKey) { break; } + if (pRet == NULL) { + if (!onlyReady) { + terrno = 0; + atomic_add_fetch_32(&pRow->refCount, 1); + pRet = pRow->pObj; + sdbPrintOper(pSdb, pRow, "acquire"); + } + } + sdbUnLock(pSdb, type); return pRet; } +void *sdbAcquire(SSdb *pSdb, ESdbType type, const void *pKey) { return sdbAcquireAll(pSdb, type, pKey, true); } +void *sdbAcquireNotReadyObj(SSdb *pSdb, ESdbType type, const void *pKey) { + return sdbAcquireAll(pSdb, type, pKey, false); +} + static void sdbCheckRow(SSdb *pSdb, SSdbRow *pRow) { int32_t type = pRow->type; sdbWriteLock(pSdb, type); diff --git a/source/dnode/snode/src/snode.c b/source/dnode/snode/src/snode.c index b133226ed39bb5c20ed96b56d95881d009e1e872..860db20fa87e225f43e33e7b25f728a13f65846a 100644 --- a/source/dnode/snode/src/snode.c +++ b/source/dnode/snode/src/snode.c @@ -124,6 +124,7 @@ FAIL: } void sndClose(SSnode *pSnode) { + streamMetaCommit(pSnode->pMeta); streamMetaClose(pSnode->pMeta); taosMemoryFree(pSnode->path); taosMemoryFree(pSnode); diff --git a/source/dnode/vnode/inc/vnode.h b/source/dnode/vnode/inc/vnode.h index d7851d6fe7de31a390a9a98be10ded41c6a51cec..7d368c76c55d61af9305acb23d180cefdc936396 100644 --- a/source/dnode/vnode/inc/vnode.h +++ b/source/dnode/vnode/inc/vnode.h @@ -54,6 +54,7 @@ int32_t vnodeAlter(const char *path, SAlterVnodeReplicaReq *pReq, STfs *pTfs); void vnodeDestroy(const char *path, STfs *pTfs); SVnode *vnodeOpen(const char *path, STfs *pTfs, SMsgCb msgCb); void vnodePreClose(SVnode *pVnode); +void vnodePostClose(SVnode *pVnode); void vnodeSyncCheckTimeout(SVnode *pVnode); void vnodeClose(SVnode *pVnode); @@ -88,6 +89,7 @@ int32_t vnodeProcessQueryMsg(SVnode *pVnode, SRpcMsg *pMsg); int32_t vnodeProcessFetchMsg(SVnode *pVnode, SRpcMsg *pMsg, SQueueInfo *pInfo); void vnodeProposeWriteMsg(SQueueInfo *pInfo, STaosQall *qall, int32_t numOfMsgs); void vnodeApplyWriteMsg(SQueueInfo *pInfo, STaosQall *qall, int32_t numOfMsgs); +void vnodeProposeCommitOnNeed(SVnode *pVnode); // meta typedef struct SMeta SMeta; // todo: remove @@ -150,7 +152,9 @@ typedef struct SMTbCursor SMTbCursor; SMTbCursor *metaOpenTbCursor(SMeta *pMeta); void metaCloseTbCursor(SMTbCursor *pTbCur); -int32_t metaTbCursorNext(SMTbCursor *pTbCur); +int32_t metaTbCursorNext(SMTbCursor *pTbCur, ETableType jumpTableType); +int32_t metaTbCursorPrev(SMTbCursor *pTbCur); + #endif // tsdb diff --git a/source/dnode/vnode/src/inc/tsdb.h b/source/dnode/vnode/src/inc/tsdb.h index 04301d0dbbaf02863a4d43c5dc6cbec1f7fd66da..2276601ec72f49234bc91d3b90923e811e77a200 100644 --- a/source/dnode/vnode/src/inc/tsdb.h +++ b/source/dnode/vnode/src/inc/tsdb.h @@ -65,7 +65,6 @@ typedef struct SSmaInfo SSmaInfo; typedef struct SBlockCol SBlockCol; typedef struct SVersionRange SVersionRange; typedef struct SLDataIter SLDataIter; -typedef struct SQueryNode SQueryNode; typedef struct SDiskCol SDiskCol; typedef struct SDiskData SDiskData; typedef struct SDiskDataBuilder SDiskDataBuilder; @@ -209,15 +208,14 @@ int32_t tsdbCmprColData(SColData *pColData, int8_t cmprAlg, SBlockCol *pBlockCol uint8_t **ppBuf); int32_t tsdbDecmprColData(uint8_t *pIn, SBlockCol *pBlockCol, int8_t cmprAlg, int32_t nVal, SColData *pColData, uint8_t **ppBuf); +int32_t tRowInfoCmprFn(const void *p1, const void *p2); // tsdbMemTable ============================================================================================== // SMemTable -typedef int32_t (*_tsdb_reseek_func_t)(void *pQHandle); - int32_t tsdbMemTableCreate(STsdb *pTsdb, SMemTable **ppMemTable); -void tsdbMemTableDestroy(SMemTable *pMemTable); +void tsdbMemTableDestroy(SMemTable *pMemTable, bool proactive); STbData *tsdbGetTbDataFromMemTable(SMemTable *pMemTable, tb_uid_t suid, tb_uid_t uid); -int32_t tsdbRefMemTable(SMemTable *pMemTable, void *pQHandle, _tsdb_reseek_func_t reseek, SQueryNode **ppNode); -int32_t tsdbUnrefMemTable(SMemTable *pMemTable, SQueryNode *pNode); +int32_t tsdbRefMemTable(SMemTable *pMemTable, SQueryNode *pQNode); +int32_t tsdbUnrefMemTable(SMemTable *pMemTable, SQueryNode *pNode, bool proactive); SArray *tsdbMemTableGetTbDataArray(SMemTable *pMemTable); // STbDataIter int32_t tsdbTbDataIterCreate(STbData *pTbData, TSDBKEY *pFrom, int8_t backward, STbDataIter **ppIter); @@ -295,8 +293,8 @@ int32_t tsdbDelFReaderClose(SDelFReader **ppReader); int32_t tsdbReadDelData(SDelFReader *pReader, SDelIdx *pDelIdx, SArray *aDelData); int32_t tsdbReadDelIdx(SDelFReader *pReader, SArray *aDelIdx); // tsdbRead.c ============================================================================================== -int32_t tsdbTakeReadSnap(STsdbReader *pReader, _tsdb_reseek_func_t reseek, STsdbReadSnap **ppSnap); -void tsdbUntakeReadSnap(STsdbReader *pReader, STsdbReadSnap *pSnap); +int32_t tsdbTakeReadSnap(STsdbReader *pReader, _query_reseek_func_t reseek, STsdbReadSnap **ppSnap); +void tsdbUntakeReadSnap(STsdbReader *pReader, STsdbReadSnap *pSnap, bool proactive); // tsdbMerge.c ============================================================================================== int32_t tsdbMerge(STsdb *pTsdb); @@ -371,13 +369,6 @@ struct STbData { STbData *next; }; -struct SQueryNode { - SQueryNode *pNext; - SQueryNode **ppNext; - void *pQHandle; - _tsdb_reseek_func_t reseek; -}; - struct SMemTable { SRWLatch latch; STsdb *pTsdb; @@ -394,7 +385,6 @@ struct SMemTable { int32_t nBucket; STbData **aBucket; }; - SQueryNode qList; }; struct TSDBROW { diff --git a/source/dnode/vnode/src/inc/vnd.h b/source/dnode/vnode/src/inc/vnd.h index 279d5e4e9532c0a17128921012823053718a134c..f06548bbddee2c5ca974624c7796e4abfbd23743 100644 --- a/source/dnode/vnode/src/inc/vnd.h +++ b/source/dnode/vnode/src/inc/vnd.h @@ -61,10 +61,19 @@ struct SVBufPoolNode { }; struct SVBufPool { - SVBufPool* next; + SVBufPool* freeNext; + SVBufPool* recycleNext; + SVBufPool* recyclePrev; + + // query handle list + TdThreadMutex mutex; + int32_t nQuery; + SQueryNode qList; + SVnode* pVnode; - TdThreadSpinlock* lock; + int32_t id; volatile int32_t nRef; + TdThreadSpinlock* lock; int64_t size; uint8_t* ptr; SVBufPoolNode* pTail; @@ -74,6 +83,8 @@ struct SVBufPool { int32_t vnodeOpenBufPool(SVnode* pVnode); int32_t vnodeCloseBufPool(SVnode* pVnode); void vnodeBufPoolReset(SVBufPool* pPool); +void vnodeBufPoolAddToFreeList(SVBufPool* pPool); +int32_t vnodeBufPoolRecycle(SVBufPool* pPool); // vnodeQuery.c int32_t vnodeQueryOpen(SVnode* pVnode); @@ -86,6 +97,7 @@ int32_t vnodeGetBatchMeta(SVnode* pVnode, SRpcMsg* pMsg); // vnodeCommit.c int32_t vnodeBegin(SVnode* pVnode); int32_t vnodeShouldCommit(SVnode* pVnode); +void vnodeUpdCommitSched(SVnode* pVnode); void vnodeRollback(SVnode* pVnode); int32_t vnodeSaveInfo(const char* dir, const SVnodeInfo* pCfg); int32_t vnodeCommitInfo(const char* dir, const SVnodeInfo* pInfo); @@ -102,10 +114,12 @@ int32_t vnodeSyncCompact(SVnode* pVnode); int32_t vnodeSyncOpen(SVnode* pVnode, char* path); int32_t vnodeSyncStart(SVnode* pVnode); void vnodeSyncPreClose(SVnode* pVnode); +void vnodeSyncPostClose(SVnode* pVnode); void vnodeSyncClose(SVnode* pVnode); void vnodeRedirectRpcMsg(SVnode* pVnode, SRpcMsg* pMsg, int32_t code); bool vnodeIsLeader(SVnode* pVnode); bool vnodeIsRoleLeader(SVnode* pVnode); +int vnodeShouldCommit(SVnode* pVnode); #ifdef __cplusplus } diff --git a/source/dnode/vnode/src/inc/vnodeInt.h b/source/dnode/vnode/src/inc/vnodeInt.h index b09ac70105882770242d6a836414f379f60e1258..f420f2ff8da4b5d78e9804c59f7fae427ee2ef1b 100644 --- a/source/dnode/vnode/src/inc/vnodeInt.h +++ b/source/dnode/vnode/src/inc/vnodeInt.h @@ -76,6 +76,7 @@ typedef struct SRSmaSnapReader SRSmaSnapReader; typedef struct SRSmaSnapWriter SRSmaSnapWriter; typedef struct SSnapDataHdr SSnapDataHdr; typedef struct SCommitInfo SCommitInfo; +typedef struct SQueryNode SQueryNode; #define VNODE_META_DIR "meta" #define VNODE_TSDB_DIR "tsdb" @@ -87,18 +88,29 @@ typedef struct SCommitInfo SCommitInfo; #define VNODE_RSMA1_DIR "rsma1" #define VNODE_RSMA2_DIR "rsma2" -#define VNODE_BUF_POOL_SEG 1 // TODO: change parameter here for sync/async commit -#define VND_INFO_FNAME "vnode.json" +#define VNODE_BUFPOOL_SEGMENTS 3 + +#define VND_INFO_FNAME "vnode.json" // vnd.h +typedef int32_t (*_query_reseek_func_t)(void* pQHandle); +struct SQueryNode { + SQueryNode* pNext; + SQueryNode** ppNext; + void* pQHandle; + _query_reseek_func_t reseek; +}; void* vnodeBufPoolMalloc(SVBufPool* pPool, int size); void* vnodeBufPoolMallocAligned(SVBufPool* pPool, int size); void vnodeBufPoolFree(SVBufPool* pPool, void* p); void vnodeBufPoolRef(SVBufPool* pPool); -void vnodeBufPoolUnRef(SVBufPool* pPool); +void vnodeBufPoolUnRef(SVBufPool* pPool, bool proactive); int vnodeDecodeInfo(uint8_t* pData, SVnodeInfo* pInfo); +int32_t vnodeBufPoolRegisterQuery(SVBufPool* pPool, SQueryNode* pQNode); +void vnodeBufPoolDeregisterQuery(SVBufPool* pPool, SQueryNode* pQNode, bool proactive); + // meta typedef struct SMCtbCursor SMCtbCursor; typedef struct SMStbCursor SMStbCursor; @@ -240,7 +252,7 @@ int32_t tsdbSnapReaderClose(STsdbSnapReader** ppReader); int32_t tsdbSnapRead(STsdbSnapReader* pReader, uint8_t** ppData); // STsdbSnapWriter ======================================== int32_t tsdbSnapWriterOpen(STsdb* pTsdb, int64_t sver, int64_t ever, STsdbSnapWriter** ppWriter); -int32_t tsdbSnapWrite(STsdbSnapWriter* pWriter, uint8_t* pData, uint32_t nData); +int32_t tsdbSnapWrite(STsdbSnapWriter* pWriter, SSnapDataHdr* pHdr); int32_t tsdbSnapWriterPrepareClose(STsdbSnapWriter* pWriter); int32_t tsdbSnapWriterClose(STsdbSnapWriter** ppWriter, int8_t rollback); // STqSnapshotReader == @@ -326,17 +338,30 @@ struct STsdbKeepCfg { int32_t keep2; }; +typedef struct SVCommitSched { + int64_t commitMs; + int64_t maxWaitMs; +} SVCommitSched; + struct SVnode { - char* path; - SVnodeCfg config; - SVState state; - SVStatis statis; - STfs* pTfs; - SMsgCb msgCb; + char* path; + SVnodeCfg config; + SVState state; + SVStatis statis; + STfs* pTfs; + SMsgCb msgCb; + + // Buffer Pool TdThreadMutex mutex; TdThreadCond poolNotEmpty; - SVBufPool* pPool; + SVBufPool* aBufPool[VNODE_BUFPOOL_SEGMENTS]; + SVBufPool* freeList; SVBufPool* inUse; + SVBufPool* onCommit; + SVBufPool* recycleHead; + SVBufPool* recycleTail; + SVBufPool* onRecycle; + SMeta* pMeta; SSma* pSma; STsdb* pTsdb; @@ -344,6 +369,7 @@ struct SVnode { STQ* pTq; SSink* pSink; tsem_t canCommit; + SVCommitSched commitSched; int64_t sync; TdThreadMutex lock; bool blocked; diff --git a/source/dnode/vnode/src/meta/metaCache.c b/source/dnode/vnode/src/meta/metaCache.c index 893bf1b49664b5e0ac9ca8c24a1882917c795ac6..85d8f031fb3f4ab32c806cebec583c5b21656c11 100644 --- a/source/dnode/vnode/src/meta/metaCache.c +++ b/source/dnode/vnode/src/meta/metaCache.c @@ -55,8 +55,8 @@ struct SMetaCache { // query cache struct STagFilterResCache { TdThreadMutex lock; - SHashObj* pTableEntry; - SLRUCache* pUidResCache; + SHashObj* pTableEntry; + SLRUCache* pUidResCache; } sTagFilterResCache; }; @@ -563,13 +563,13 @@ int32_t metaUidFilterCachePut(SMeta* pMeta, uint64_t suid, const void* pKey, int } // add to cache. - taosLRUCacheInsert(pCache, buf, sizeof(uint64_t) + keyLen, pPayload, payloadLen, freePayload, NULL, - TAOS_LRU_PRIORITY_LOW); + int32_t ret = taosLRUCacheInsert(pCache, buf, sizeof(uint64_t) + keyLen, pPayload, payloadLen, freePayload, NULL, + TAOS_LRU_PRIORITY_LOW); taosThreadMutexUnlock(pLock); - metaDebug("vgId:%d, suid:%" PRIu64 " list cache added into cache, total:%d, tables:%d", TD_VID(pMeta->pVnode), suid, - (int32_t)taosLRUCacheGetUsage(pCache), taosHashGetSize(pTableEntry)); + metaDebug("vgId:%d, suid:%" PRIu64 " list cache added into cache, total:%d, tables:%d, ret:%d", TD_VID(pMeta->pVnode), + suid, (int32_t)taosLRUCacheGetUsage(pCache), taosHashGetSize(pTableEntry), ret); return TSDB_CODE_SUCCESS; } diff --git a/source/dnode/vnode/src/meta/metaOpen.c b/source/dnode/vnode/src/meta/metaOpen.c index a78239eb565aaa9946a5561fde73a871e3b19fed..609ffc58c3d4ed00c3b5db729179a3559b36e93d 100644 --- a/source/dnode/vnode/src/meta/metaOpen.c +++ b/source/dnode/vnode/src/meta/metaOpen.c @@ -203,6 +203,7 @@ _err: int metaClose(SMeta *pMeta) { if (pMeta) { + if (pMeta->pEnv) tdbAbort(pMeta->pEnv, pMeta->txn); if (pMeta->pCache) metaCacheClose(pMeta); if (pMeta->pIdx) metaCloseIdx(pMeta); if (pMeta->pStreamDb) tdbTbClose(pMeta->pStreamDb); diff --git a/source/dnode/vnode/src/meta/metaQuery.c b/source/dnode/vnode/src/meta/metaQuery.c index 0697f68f898f86b860af5b435942ce4f91bf1a51..6741b7ca456e78a99f326a77028511039c37b2a0 100644 --- a/source/dnode/vnode/src/meta/metaQuery.c +++ b/source/dnode/vnode/src/meta/metaQuery.c @@ -310,7 +310,7 @@ void metaCloseTbCursor(SMTbCursor *pTbCur) { } } -int metaTbCursorNext(SMTbCursor *pTbCur) { +int32_t metaTbCursorNext(SMTbCursor *pTbCur, ETableType jumpTableType) { int ret; void *pBuf; STbCfg tbCfg; @@ -323,6 +323,30 @@ int metaTbCursorNext(SMTbCursor *pTbCur) { tDecoderClear(&pTbCur->mr.coder); + metaGetTableEntryByVersion(&pTbCur->mr, ((SUidIdxVal *)pTbCur->pVal)[0].version, *(tb_uid_t *)pTbCur->pKey); + if (pTbCur->mr.me.type == jumpTableType) { + continue; + } + + break; + } + + return 0; +} + +int32_t metaTbCursorPrev(SMTbCursor *pTbCur) { + int ret; + void *pBuf; + STbCfg tbCfg; + + for (;;) { + ret = tdbTbcPrev(pTbCur->pDbc, &pTbCur->pKey, &pTbCur->kLen, &pTbCur->pVal, &pTbCur->vLen); + if (ret < 0) { + return -1; + } + + tDecoderClear(&pTbCur->mr.coder); + metaGetTableEntryByVersion(&pTbCur->mr, ((SUidIdxVal *)pTbCur->pVal)[0].version, *(tb_uid_t *)pTbCur->pKey); if (pTbCur->mr.me.type == TSDB_SUPER_TABLE) { continue; @@ -653,8 +677,10 @@ int32_t metaGetTbTSchemaEx(SMeta *pMeta, tb_uid_t suid, tb_uid_t uid, int32_t sv } if (c == 0) { - metaError("meta/query: incorrect c: %" PRId32 ".", c); + metaULock(pMeta); + tdbTbcClose(pSkmDbC); code = TSDB_CODE_FAILED; + metaError("meta/query: incorrect c: %" PRId32 ".", c); goto _exit; } @@ -680,9 +706,8 @@ int32_t metaGetTbTSchemaEx(SMeta *pMeta, tb_uid_t suid, tb_uid_t uid, int32_t sv } } - if (sver <= 0) { - metaError("meta/query: incorrect sver: %" PRId32 ".", sver); - code = TSDB_CODE_FAILED; + if (ASSERTS(sver > 0, __FILE__, __LINE__, "failed to get table schema version: %d", sver)) { + code = TSDB_CODE_NOT_FOUND; goto _exit; } diff --git a/source/dnode/vnode/src/meta/metaTable.c b/source/dnode/vnode/src/meta/metaTable.c index 0aaf6417a82fbcce3e834aaca7f5e0f9100667c7..78e0643e1076c04529e04c98e347016aeec0076b 100644 --- a/source/dnode/vnode/src/meta/metaTable.c +++ b/source/dnode/vnode/src/meta/metaTable.c @@ -343,12 +343,17 @@ int metaAlterSTable(SMeta *pMeta, int64_t version, SVCreateStbReq *pReq) { tdbTbcOpen(pMeta->pTbDb, &pTbDbc, NULL); ret = tdbTbcMoveTo(pTbDbc, &((STbDbKey){.uid = pReq->suid, .version = oversion}), sizeof(STbDbKey), &c); if (!(ret == 0 && c == 0)) { + tdbTbcClose(pUidIdxc); + tdbTbcClose(pTbDbc); + + terrno = TSDB_CODE_TDB_STB_NOT_EXIST; metaError("meta/table: invalide ret: %" PRId32 " or c: %" PRId32 "alter stb failed.", ret, c); return -1; } ret = tdbTbcGet(pTbDbc, NULL, NULL, &pData, &nData); if (ret < 0) { + tdbTbcClose(pUidIdxc); tdbTbcClose(pTbDbc); terrno = TSDB_CODE_TDB_STB_NOT_EXIST; @@ -783,6 +788,7 @@ static int metaAlterTableColumn(SMeta *pMeta, int64_t version, SVAlterTbReq *pAl tdbTbcOpen(pMeta->pUidIdx, &pUidIdxc, NULL); tdbTbcMoveTo(pUidIdxc, &uid, sizeof(uid), &c); if (c != 0) { + tdbTbcClose(pUidIdxc); metaError("meta/table: invalide c: %" PRId32 " alt tb column failed.", c); return -1; } @@ -796,6 +802,8 @@ static int metaAlterTableColumn(SMeta *pMeta, int64_t version, SVAlterTbReq *pAl tdbTbcOpen(pMeta->pTbDb, &pTbDbc, NULL); tdbTbcMoveTo(pTbDbc, &((STbDbKey){.uid = uid, .version = oversion}), sizeof(STbDbKey), &c); if (c != 0) { + tdbTbcClose(pUidIdxc); + tdbTbcClose(pTbDbc); metaError("meta/table: invalide c: %" PRId32 " alt tb column failed.", c); return -1; } @@ -809,6 +817,8 @@ static int metaAlterTableColumn(SMeta *pMeta, int64_t version, SVAlterTbReq *pAl tDecoderInit(&dc, entry.pBuf, nData); ret = metaDecodeEntry(&dc, &entry); if (ret != 0) { + tdbTbcClose(pUidIdxc); + tdbTbcClose(pTbDbc); tDecoderClear(&dc); metaError("meta/table: invalide ret: %" PRId32 " alt tb column failed.", ret); return -1; @@ -989,6 +999,8 @@ static int metaUpdateTableTagVal(SMeta *pMeta, int64_t version, SVAlterTbReq *pA tdbTbcOpen(pMeta->pUidIdx, &pUidIdxc, NULL); tdbTbcMoveTo(pUidIdxc, &uid, sizeof(uid), &c); if (c != 0) { + tdbTbcClose(pUidIdxc); + terrno = TSDB_CODE_TDB_TABLE_NOT_EXIST; metaError("meta/table: invalide c: %" PRId32 " update tb tag val failed.", c); return -1; } @@ -1005,6 +1017,9 @@ static int metaUpdateTableTagVal(SMeta *pMeta, int64_t version, SVAlterTbReq *pA tdbTbcOpen(pMeta->pTbDb, &pTbDbc, NULL); tdbTbcMoveTo(pTbDbc, &((STbDbKey){.uid = uid, .version = oversion}), sizeof(STbDbKey), &c); if (c != 0) { + tdbTbcClose(pUidIdxc); + tdbTbcClose(pTbDbc); + terrno = TSDB_CODE_TDB_TABLE_NOT_EXIST; metaError("meta/table: invalide c: %" PRId32 " update tb tag val failed.", c); return -1; } @@ -1166,6 +1181,7 @@ static int metaUpdateTableOptions(SMeta *pMeta, int64_t version, SVAlterTbReq *p tdbTbcOpen(pMeta->pUidIdx, &pUidIdxc, NULL); tdbTbcMoveTo(pUidIdxc, &uid, sizeof(uid), &c); if (c != 0) { + tdbTbcClose(pUidIdxc); metaError("meta/table: invalide c: %" PRId32 " update tb options failed.", c); return -1; } @@ -1179,6 +1195,8 @@ static int metaUpdateTableOptions(SMeta *pMeta, int64_t version, SVAlterTbReq *p tdbTbcOpen(pMeta->pTbDb, &pTbDbc, NULL); tdbTbcMoveTo(pTbDbc, &((STbDbKey){.uid = uid, .version = oversion}), sizeof(STbDbKey), &c); if (c != 0) { + tdbTbcClose(pUidIdxc); + tdbTbcClose(pTbDbc); metaError("meta/table: invalide c: %" PRId32 " update tb options failed.", c); return -1; } @@ -1193,6 +1211,8 @@ static int metaUpdateTableOptions(SMeta *pMeta, int64_t version, SVAlterTbReq *p ret = metaDecodeEntry(&dc, &entry); if (ret != 0) { tDecoderClear(&dc); + tdbTbcClose(pUidIdxc); + tdbTbcClose(pTbDbc); metaError("meta/table: invalide ret: %" PRId32 " alt tb options failed.", ret); return -1; } diff --git a/source/dnode/vnode/src/sma/smaCommit.c b/source/dnode/vnode/src/sma/smaCommit.c index 0aa20e9e1d0a69a776d5c73412c4d99d3177bb5b..38f04bb8e8bea7f12e7fb128775d5a80f54bb51a 100644 --- a/source/dnode/vnode/src/sma/smaCommit.c +++ b/source/dnode/vnode/src/sma/smaCommit.c @@ -239,6 +239,11 @@ static int32_t tdProcessRSmaAsyncCommitImpl(SSma *pSma, SCommitInfo *pInfo) { int32_t lino = 0; SVnode *pVnode = pSma->pVnode; + SSmaEnv *pSmaEnv = SMA_RSMA_ENV(pSma); + if (!pSmaEnv) { + goto _exit; + } + code = tdRSmaFSCommit(pSma); TSDB_CHECK_CODE(code, lino, _exit); diff --git a/source/dnode/vnode/src/sma/smaOpen.c b/source/dnode/vnode/src/sma/smaOpen.c index ed33e0fd7b2cedfb2cacca97b248bde0491bf799..21c283994345a00badb3e5c12c80f1e3b86bdc6f 100644 --- a/source/dnode/vnode/src/sma/smaOpen.c +++ b/source/dnode/vnode/src/sma/smaOpen.c @@ -148,10 +148,6 @@ int32_t smaOpen(SVnode *pVnode, int8_t rollback) { SMA_OPEN_RSMA_IMPL(pVnode, 1); } else if (i == TSDB_RETENTION_L2) { SMA_OPEN_RSMA_IMPL(pVnode, 2); - } else { - code = TSDB_CODE_APP_ERROR; - smaError("vgId:%d, sma open failed since %s, level:%d", TD_VID(pVnode), tstrerror(code), i); - TSDB_CHECK_CODE(code, lino, _exit); } } diff --git a/source/dnode/vnode/src/sma/smaSnapshot.c b/source/dnode/vnode/src/sma/smaSnapshot.c index c68525a493b83a8baadeb86c8133c00f2dad09a0..c00e96a06664db0a60184fdb09e16ee0b68c3d45 100644 --- a/source/dnode/vnode/src/sma/smaSnapshot.c +++ b/source/dnode/vnode/src/sma/smaSnapshot.c @@ -237,8 +237,8 @@ int32_t rsmaSnapRead(SRSmaSnapReader* pReader, uint8_t** ppData) { _exit: if (code) { - rsmaSnapReaderClose(&pReader); smaError("vgId:%d, vnode snapshot rsma read failed since %s", SMA_VID(pReader->pSma), tstrerror(code)); + rsmaSnapReaderClose(&pReader); } else { smaInfo("vgId:%d, vnode snapshot rsma read succeed", SMA_VID(pReader->pSma)); } @@ -432,7 +432,7 @@ _exit: if (pInFD) taosCloseFile(&pInFD); smaError("vgId:%d, vnode snapshot rsma writer close failed since %s", SMA_VID(pSma), tstrerror(code)); } else { - smaInfo("vgId:%d, vnode snapshot rsma writer close succeed", SMA_VID(pSma)); + smaInfo("vgId:%d, vnode snapshot rsma writer close succeed", pSma ? SMA_VID(pSma) : 0); } return code; @@ -446,10 +446,10 @@ int32_t rsmaSnapWrite(SRSmaSnapWriter* pWriter, uint8_t* pData, uint32_t nData) // rsma1/rsma2 if (pHdr->type == SNAP_DATA_RSMA1) { pHdr->type = SNAP_DATA_TSDB; - code = tsdbSnapWrite(pWriter->pDataWriter[0], pData, nData); + code = tsdbSnapWrite(pWriter->pDataWriter[0], pHdr); } else if (pHdr->type == SNAP_DATA_RSMA2) { pHdr->type = SNAP_DATA_TSDB; - code = tsdbSnapWrite(pWriter->pDataWriter[1], pData, nData); + code = tsdbSnapWrite(pWriter->pDataWriter[1], pHdr); } else if (pHdr->type == SNAP_DATA_QTASK) { code = rsmaSnapWriteQTaskInfo(pWriter, pData, nData); } else { diff --git a/source/dnode/vnode/src/sma/smaTimeRange.c b/source/dnode/vnode/src/sma/smaTimeRange.c index 65c3bf3095e223f78166ebbd7c8b0d161d51e43d..1b191dd5a54c21068ea2dfbc9b4d573b02fe766e 100644 --- a/source/dnode/vnode/src/sma/smaTimeRange.c +++ b/source/dnode/vnode/src/sma/smaTimeRange.c @@ -25,14 +25,13 @@ static int32_t tdProcessTSmaCreateImpl(SSma *pSma, int64_t version, const char * static int32_t tdProcessTSmaInsertImpl(SSma *pSma, int64_t indexUid, const char *msg); static int32_t tdProcessTSmaGetDaysImpl(SVnodeCfg *pCfg, void *pCont, uint32_t contLen, int32_t *days); -// TODO: Who is responsible for resource allocate and release? int32_t tdProcessTSmaInsert(SSma *pSma, int64_t indexUid, const char *msg) { int32_t code = TSDB_CODE_SUCCESS; if ((code = tdProcessTSmaInsertImpl(pSma, indexUid, msg)) < 0) { smaWarn("vgId:%d, insert tsma data failed since %s", SMA_VID(pSma), tstrerror(terrno)); } - // TODO: destroy SSDataBlocks(msg) + return code; } @@ -42,7 +41,6 @@ int32_t tdProcessTSmaCreate(SSma *pSma, int64_t version, const char *msg) { if ((code = tdProcessTSmaCreateImpl(pSma, version, msg)) < 0) { smaWarn("vgId:%d, create tsma failed since %s", SMA_VID(pSma), tstrerror(terrno)); } - // TODO: destroy SSDataBlocks(msg) return code; } @@ -258,28 +256,23 @@ int32_t smaBlockToSubmit(SVnode *pVnode, const SArray *pBlocks, const STSchema * int32_t rows = pDataBlock->info.rows; - SSubmitTbData *pTbData = (SSubmitTbData *)taosMemoryCalloc(1, sizeof(SSubmitTbData)); - if (!pTbData) { - terrno = TSDB_CODE_OUT_OF_MEMORY; - goto _end; - } + SSubmitTbData tbData = {0}; + - if (!(pTbData->aRowP = taosArrayInit(rows, sizeof(SRow *)))) { - taosMemoryFree(pTbData); + if (!(tbData.aRowP = taosArrayInit(rows, sizeof(SRow *)))) { goto _end; } - pTbData->suid = suid; - pTbData->uid = 0; // uid is assigned by vnode - pTbData->sver = pTSchema->version; + tbData.suid = suid; + tbData.uid = 0; // uid is assigned by vnode + tbData.sver = pTSchema->version; if (createTb) { - pTbData->pCreateTbReq = taosArrayGetP(createTbArray, i); - if (pTbData->pCreateTbReq) pTbData->flags = SUBMIT_REQ_AUTO_CREATE_TABLE; + tbData.pCreateTbReq = taosArrayGetP(createTbArray, i); + if (tbData.pCreateTbReq) tbData.flags = SUBMIT_REQ_AUTO_CREATE_TABLE; } if (!pVals && !(pVals = taosArrayInit(pTSchema->numOfCols, sizeof(SColVal)))) { - taosArrayDestroy(pTbData->aRowP); - taosMemoryFree(pTbData); + taosArrayDestroy(tbData.aRowP); goto _end; } @@ -307,14 +300,13 @@ int32_t smaBlockToSubmit(SVnode *pVnode, const SArray *pBlocks, const STSchema * } SRow *pRow = NULL; if ((terrno = tRowBuild(pVals, (STSchema *)pTSchema, &pRow)) < 0) { - tDestroySSubmitTbData(pTbData, TSDB_MSG_FLG_ENCODE); + tDestroySSubmitTbData(&tbData, TSDB_MSG_FLG_ENCODE); goto _end; } - ASSERT(pRow); - taosArrayPush(pTbData->aRowP, &pRow); + taosArrayPush(tbData.aRowP, &pRow); } - taosArrayPush(pReq->aSubmitTbData, pTbData); + taosArrayPush(pReq->aSubmitTbData, &tbData); } // encode @@ -336,9 +328,13 @@ int32_t smaBlockToSubmit(SVnode *pVnode, const SArray *pBlocks, const STSchema * tEncoderClear(&encoder); } _end: + taosArrayDestroy(createTbArray); taosArrayDestroy(tagArray); taosArrayDestroy(pVals); - tDestroySSubmitReq2(pReq, TSDB_MSG_FLG_ENCODE); + if (pReq) { + tDestroySSubmitReq2(pReq, TSDB_MSG_FLG_ENCODE); + taosMemoryFree(pReq); + } if (terrno != 0) { rpcFreeCont(pBuf); diff --git a/source/dnode/vnode/src/tq/tq.c b/source/dnode/vnode/src/tq/tq.c index a27b6988c58d9173ca4250440a7abbac98f71114..1b6da6c50d48d0c27e187a0009e9344e54ce13fd 100644 --- a/source/dnode/vnode/src/tq/tq.c +++ b/source/dnode/vnode/src/tq/tq.c @@ -520,7 +520,12 @@ int32_t tqProcessPollReq(STQ* pTq, SRpcMsg* pMsg) { tqOffsetResetToData(&fetchOffsetNew, 0, 0); } } else { - tqOffsetResetToLog(&fetchOffsetNew, walGetFirstVer(pTq->pVnode->pWal)); + pHandle->pRef = walRefFirstVer(pTq->pVnode->pWal, pHandle->pRef); + if (pHandle->pRef == NULL) { + terrno = TSDB_CODE_OUT_OF_MEMORY; + return -1; + } + tqOffsetResetToLog(&fetchOffsetNew, pHandle->pRef->refVer - 1); } } else if (reqOffset.type == TMQ_OFFSET__RESET_LATEST) { if (pHandle->execHandle.subType == TOPIC_SUB_TYPE__COLUMN) { diff --git a/source/dnode/vnode/src/tq/tqCommit.c b/source/dnode/vnode/src/tq/tqCommit.c index dabd97a345f375c6774d37e4f4a408bd0bd44940..7fc66c49192eb011ce674bc7a2d396825ba1b435 100644 --- a/source/dnode/vnode/src/tq/tqCommit.c +++ b/source/dnode/vnode/src/tq/tqCommit.c @@ -15,4 +15,11 @@ #include "tq.h" -int tqCommit(STQ* pTq) { return tqOffsetCommitFile(pTq->pOffsetStore); } +int tqCommit(STQ* pTq) { + if (streamMetaCommit(pTq->pStreamMeta) < 0) { + tqError("vgId:%d, failed to commit stream meta since %s", TD_VID(pTq->pVnode), terrstr()); + return -1; + } + + return tqOffsetCommitFile(pTq->pOffsetStore); +} diff --git a/source/dnode/vnode/src/tq/tqOffsetSnapshot.c b/source/dnode/vnode/src/tq/tqOffsetSnapshot.c index 2413a792c60c36a059e5411f2ff51d3792c12aa0..a4428aed4368fec9c96a2faae5fabd33cd8eb8f4 100644 --- a/source/dnode/vnode/src/tq/tqOffsetSnapshot.c +++ b/source/dnode/vnode/src/tq/tqOffsetSnapshot.c @@ -56,7 +56,7 @@ int32_t tqOffsetSnapRead(STqOffsetReader* pReader, uint8_t** ppData) { TdFilePtr pFile = taosOpenFile(fname, TD_FILE_READ); if (pFile == NULL) { taosMemoryFree(fname); - return -1; + return 0; } int64_t sz = 0; diff --git a/source/dnode/vnode/src/tq/tqSink.c b/source/dnode/vnode/src/tq/tqSink.c index 75c0c0659e0f245d69b0efaed07e22d15f8cd255..f1103ad48a1be4101da041e40ddf1e7b4a6b181b 100644 --- a/source/dnode/vnode/src/tq/tqSink.c +++ b/source/dnode/vnode/src/tq/tqSink.c @@ -323,6 +323,61 @@ void tqSinkToTablePipeline(SStreamTask* pTask, void* vnode, int64_t ver, void* d taosArrayDestroy(tagArray); } +static int32_t encodeCreateChildTableForRPC(SVCreateTbBatchReq* pReqs, int32_t vgId, void** pBuf, int32_t* contLen) { + int32_t ret = 0; + + tEncodeSize(tEncodeSVCreateTbBatchReq, pReqs, *contLen, ret); + if (ret < 0) { + ret = -1; + goto end; + } + *contLen += sizeof(SMsgHead); + *pBuf = rpcMallocCont(*contLen); + if (NULL == *pBuf) { + ret = -1; + goto end; + } + ((SMsgHead*)(*pBuf))->vgId = vgId; + ((SMsgHead*)(*pBuf))->contLen = htonl(*contLen); + SEncoder coder = {0}; + tEncoderInit(&coder, POINTER_SHIFT(*pBuf, sizeof(SMsgHead)), (*contLen) - sizeof(SMsgHead) ); + if (tEncodeSVCreateTbBatchReq(&coder, pReqs) < 0) { + rpcFreeCont(*pBuf); + *pBuf = NULL; + *contLen = 0; + tEncoderClear(&coder); + ret = -1; + goto end; + } + tEncoderClear(&coder); + +end: + return ret; +} + +int32_t tqPutReqToQueue(SVnode* pVnode, SVCreateTbBatchReq* pReqs) { + void* buf = NULL; + int32_t tlen = 0; + encodeCreateChildTableForRPC(pReqs, TD_VID(pVnode), &buf, &tlen); + + SRpcMsg msg = { + .msgType = TDMT_VND_CREATE_TABLE, + .pCont = buf, + .contLen = tlen, + }; + + if (tmsgPutToQueue(&pVnode->msgCb, WRITE_QUEUE, &msg) != 0) { + tqError("failed to put into write-queue since %s", terrstr()); + } + + return TSDB_CODE_SUCCESS; + +_error: + terrno = TSDB_CODE_OUT_OF_MEMORY; + tqError("failed to encode submit req since %s", terrstr()); + return TSDB_CODE_OUT_OF_MEMORY; +} + void tqSinkToTablePipeline2(SStreamTask* pTask, void* vnode, int64_t ver, void* data) { const SArray* pBlocks = (const SArray*)data; SVnode* pVnode = (SVnode*)vnode; @@ -338,13 +393,11 @@ void tqSinkToTablePipeline2(SStreamTask* pTask, void* vnode, int64_t ver, void* void* pBuf = NULL; SArray* tagArray = NULL; SArray* pVals = NULL; - - if (!(tagArray = taosArrayInit(1, sizeof(STagVal)))) { - goto _end; - } + SArray* crTblArray = NULL; for (int32_t i = 0; i < blockSz; i++) { SSDataBlock* pDataBlock = taosArrayGet(pBlocks, i); + int32_t rows = pDataBlock->info.rows; if (pDataBlock->info.type == STREAM_DELETE_RESULT) { SBatchDeleteReq deleteReq = {0}; deleteReq.deleteReqs = taosArrayInit(0, sizeof(SSingleDeleteReq)); @@ -380,9 +433,107 @@ void tqSinkToTablePipeline2(SStreamTask* pTask, void* vnode, int64_t ver, void* if (tmsgPutToQueue(&pVnode->msgCb, WRITE_QUEUE, &msg) != 0) { tqDebug("failed to put delete req into write-queue since %s", terrstr()); } + } else if (pDataBlock->info.type == STREAM_CREATE_CHILD_TABLE) { + SVCreateTbBatchReq reqs = {0}; + crTblArray = reqs.pArray = taosArrayInit(1, sizeof(struct SVCreateTbReq)); + if (NULL == reqs.pArray) { + goto _end; + } + for (int32_t rowId = 0; rowId < rows; rowId++) { + SVCreateTbReq createTbReq = {0}; + SVCreateTbReq* pCreateTbReq = &createTbReq; + if (!pCreateTbReq) { + goto _end; + } + + // set const + pCreateTbReq->flags = 0; + pCreateTbReq->type = TSDB_CHILD_TABLE; + pCreateTbReq->ctb.suid = suid; + + // set super table name + SName name = {0}; + tNameFromString(&name, stbFullName, T_NAME_ACCT | T_NAME_DB | T_NAME_TABLE); + pCreateTbReq->ctb.stbName = strdup((char*)tNameGetTableName(&name)); // strdup(stbFullName); + + // set tag content + int32_t size = taosArrayGetSize(pDataBlock->pDataBlock); + if (size == 2) { + tagArray = taosArrayInit(1, sizeof(STagVal)); + if (!tagArray) { + goto _end; + } + STagVal tagVal = { + .cid = pTSchema->numOfCols + 1, + .type = TSDB_DATA_TYPE_UBIGINT, + .i64 = (int64_t)pDataBlock->info.id.groupId, + }; + taosArrayPush(tagArray, &tagVal); + + // set tag name + SArray* tagName = taosArrayInit(1, TSDB_COL_NAME_LEN); + char tagNameStr[TSDB_COL_NAME_LEN] = "group_id"; + taosArrayPush(tagName, tagNameStr); + pCreateTbReq->ctb.tagName = tagName; + } else { + tagArray = taosArrayInit(size - 1, sizeof(STagVal)); + if (!tagArray) { + goto _end; + } + for (int32_t tagId = UD_TAG_COLUMN_INDEX, step = 1; tagId < size; tagId++, step++) { + SColumnInfoData* pTagData = taosArrayGet(pDataBlock->pDataBlock, tagId); + STagVal tagVal = { + .cid = pTSchema->numOfCols + step, + .type = pTagData->info.type, + }; + void* pData = colDataGetData(pTagData, rowId); + if (colDataIsNull_s(pTagData, rowId)) { + tagVal.type = TSDB_DATA_TYPE_NULL; + tagVal.pData = NULL; + tagVal.nData = 0; + } else if (IS_VAR_DATA_TYPE(pTagData->info.type)) { + tagVal.nData = varDataLen(pData); + tagVal.pData = varDataVal(pData); + } else { + memcpy(&tagVal.i64, pData, pTagData->info.bytes); + } + taosArrayPush(tagArray, &tagVal); + } + } + pCreateTbReq->ctb.tagNum = taosArrayGetSize(tagArray); + + STag* pTag = NULL; + tTagNew(tagArray, 1, false, &pTag); + tagArray = taosArrayDestroy(tagArray); + if (pTag == NULL) { + terrno = TSDB_CODE_OUT_OF_MEMORY; + goto _end; + } + + pCreateTbReq->ctb.pTag = (uint8_t*)pTag; + + // set table name + SColumnInfoData* pTbColInfo = taosArrayGet(pDataBlock->pDataBlock, UD_TABLE_NAME_COLUMN_INDEX); + if (colDataIsNull_s(pTbColInfo, rowId)) { + SColumnInfoData* pGpIdColInfo = taosArrayGet(pDataBlock->pDataBlock, UD_GROUPID_COLUMN_INDEX); + void* pGpIdData = colDataGetData(pGpIdColInfo, rowId); + pCreateTbReq->name = buildCtbNameByGroupId(stbFullName, *(uint64_t*)pGpIdData); + } else { + void* pTbData = colDataGetData(pTbColInfo, rowId); + pCreateTbReq->name = taosMemoryCalloc(1, varDataLen(pTbData) + 1); + memcpy(pCreateTbReq->name, varDataVal(pTbData), varDataLen(pTbData)); + } + taosArrayPush(reqs.pArray, pCreateTbReq); + } + reqs.nReqs = taosArrayGetSize(reqs.pArray); + if (tqPutReqToQueue(pVnode, &reqs) != TSDB_CODE_SUCCESS) { + goto _end; + } + tagArray = taosArrayDestroy(tagArray); + taosArrayDestroyEx(crTblArray, (FDelete)tdDestroySVCreateTbReq); + crTblArray = NULL; } else { SSubmitTbData tbData = {0}; - int32_t rows = pDataBlock->info.rows; tqDebug("tq sink pipe2, convert block1 %d, rows: %d", i, rows); if (!(tbData.aRowP = taosArrayInit(rows, sizeof(SRow*)))) { @@ -394,6 +545,7 @@ void tqSinkToTablePipeline2(SStreamTask* pTask, void* vnode, int64_t ver, void* tbData.sver = pTSchema->version; char* ctbName = NULL; + tqDebug("vgId:%d, stream write into %s, table auto created", TD_VID(pVnode), pDataBlock->info.parTbName); if (pDataBlock->info.parTbName[0]) { ctbName = strdup(pDataBlock->info.parTbName); } else { @@ -423,9 +575,12 @@ void tqSinkToTablePipeline2(SStreamTask* pTask, void* vnode, int64_t ver, void* pCreateTbReq->ctb.stbName = strdup((char*)tNameGetTableName(&name)); // strdup(stbFullName); // set tag content - taosArrayClear(tagArray); + tagArray = taosArrayInit(1, sizeof(STagVal)); + if (!tagArray) { + goto _end; + } STagVal tagVal = { - .cid = taosArrayGetSize(pDataBlock->pDataBlock) + 1, + .cid = pTSchema->numOfCols + 1, .type = TSDB_DATA_TYPE_UBIGINT, .i64 = (int64_t)pDataBlock->info.id.groupId, }; @@ -434,6 +589,7 @@ void tqSinkToTablePipeline2(SStreamTask* pTask, void* vnode, int64_t ver, void* STag* pTag = NULL; tTagNew(tagArray, 1, false, &pTag); + tagArray = taosArrayDestroy(tagArray); if (pTag == NULL) { terrno = TSDB_CODE_OUT_OF_MEMORY; goto _end; @@ -483,28 +639,37 @@ void tqSinkToTablePipeline2(SStreamTask* pTask, void* vnode, int64_t ver, void* for (int32_t j = 0; j < rows; j++) { taosArrayClear(pVals); + int32_t dataIndex = 0; for (int32_t k = 0; k < pTSchema->numOfCols; k++) { const STColumn* pCol = &pTSchema->columns[k]; - SColumnInfoData* pColData = taosArrayGet(pDataBlock->pDataBlock, k); if (k == 0) { + SColumnInfoData* pColData = taosArrayGet(pDataBlock->pDataBlock, dataIndex); void* colData = colDataGetData(pColData, j); tqDebug("tq sink pipe2, row %d, col %d ts %" PRId64, j, k, *(int64_t*)colData); } - if (colDataIsNull_s(pColData, j)) { + if (IS_SET_NULL(pCol)) { SColVal cv = COL_VAL_NULL(pCol->colId, pCol->type); taosArrayPush(pVals, &cv); - } else { - void* colData = colDataGetData(pColData, j); - if (IS_STR_DATA_TYPE(pCol->type)) { - SValue sv = - (SValue){.nData = varDataLen(colData), .pData = varDataVal(colData)}; // address copy, no value - SColVal cv = COL_VAL_VALUE(pCol->colId, pCol->type, sv); + } else{ + SColumnInfoData* pColData = taosArrayGet(pDataBlock->pDataBlock, dataIndex); + if (colDataIsNull_s(pColData, j)) { + SColVal cv = COL_VAL_NULL(pCol->colId, pCol->type); taosArrayPush(pVals, &cv); + dataIndex++; } else { - SValue sv; - memcpy(&sv.val, colData, tDataTypes[pCol->type].bytes); - SColVal cv = COL_VAL_VALUE(pCol->colId, pCol->type, sv); - taosArrayPush(pVals, &cv); + void* colData = colDataGetData(pColData, j); + if (IS_STR_DATA_TYPE(pCol->type)) { + SValue sv = + (SValue){.nData = varDataLen(colData), .pData = varDataVal(colData)}; // address copy, no value + SColVal cv = COL_VAL_VALUE(pCol->colId, pCol->type, sv); + taosArrayPush(pVals, &cv); + } else { + SValue sv; + memcpy(&sv.val, colData, tDataTypes[pCol->type].bytes); + SColVal cv = COL_VAL_VALUE(pCol->colId, pCol->type, sv); + taosArrayPush(pVals, &cv); + } + dataIndex++; } } } @@ -561,5 +726,6 @@ void tqSinkToTablePipeline2(SStreamTask* pTask, void* vnode, int64_t ver, void* _end: taosArrayDestroy(tagArray); taosArrayDestroy(pVals); + taosArrayDestroyEx(crTblArray, (FDelete)tdDestroySVCreateTbReq); // TODO: change } diff --git a/source/dnode/vnode/src/tsdb/tsdbCache.c b/source/dnode/vnode/src/tsdb/tsdbCache.c index c7b36ebb226014115d4fc08f63a92747c389f3f9..d18014c37b096f044a6cf4fbb7a933193e297d02 100644 --- a/source/dnode/vnode/src/tsdb/tsdbCache.c +++ b/source/dnode/vnode/src/tsdb/tsdbCache.c @@ -1153,6 +1153,8 @@ static int32_t nextRowIterGet(CacheNextRowIter *pIter, TSDBROW **ppRow) { iMax[nMax] = i; max[nMax++] = pIter->input[i].pRow; + } else { + pIter->input[i].next = false; } } } diff --git a/source/dnode/vnode/src/tsdb/tsdbCacheRead.c b/source/dnode/vnode/src/tsdb/tsdbCacheRead.c index 1c1626ba5cf19947ab16b6eb1fd9625a7268656b..a2de1bdf4e1251d4f4276603da1d490a9c214692 100644 --- a/source/dnode/vnode/src/tsdb/tsdbCacheRead.c +++ b/source/dnode/vnode/src/tsdb/tsdbCacheRead.c @@ -208,13 +208,21 @@ static int32_t tsdbCacheQueryReseek(void* pQHandle) { int32_t code = 0; SCacheRowsReader* pReader = pQHandle; - taosThreadMutexLock(&pReader->readerMutex); + code = taosThreadMutexTryLock(&pReader->readerMutex); + if (code == 0) { + // pause current reader's state if not paused, save ts & version for resuming + // just wait for the big all tables' snapshot untaking for now - // pause current reader's state if not paused, save ts & version for resuming - // just wait for the big all tables' snapshot untaking for now + code = TSDB_CODE_VND_QUERY_BUSY; - taosThreadMutexUnlock(&pReader->readerMutex); - return code; + taosThreadMutexUnlock(&pReader->readerMutex); + + return code; + } else if (code == EBUSY) { + return TSDB_CODE_VND_QUERY_BUSY; + } else { + return -1; + } } int32_t tsdbRetrieveCacheRows(void* pReader, SSDataBlock* pResBlock, const int32_t* slotIds, SArray* pTableUidList) { @@ -260,7 +268,10 @@ int32_t tsdbRetrieveCacheRows(void* pReader, SSDataBlock* pResBlock, const int32 } taosThreadMutexLock(&pr->readerMutex); - tsdbTakeReadSnap((STsdbReader*)pr, tsdbCacheQueryReseek, &pr->pReadSnap); + code = tsdbTakeReadSnap((STsdbReader*)pr, tsdbCacheQueryReseek, &pr->pReadSnap); + if (code != TSDB_CODE_SUCCESS) { + goto _end; + } pr->pDataFReader = NULL; pr->pDataFReaderLast = NULL; @@ -271,7 +282,7 @@ int32_t tsdbRetrieveCacheRows(void* pReader, SSDataBlock* pResBlock, const int32 code = doExtractCacheRow(pr, lruCache, pKeyInfo->uid, &pRow, &h); if (code != TSDB_CODE_SUCCESS) { - return code; + goto _end; } if (h == NULL) { @@ -344,7 +355,7 @@ int32_t tsdbRetrieveCacheRows(void* pReader, SSDataBlock* pResBlock, const int32 STableKeyInfo* pKeyInfo = &pr->pTableList[i]; code = doExtractCacheRow(pr, lruCache, pKeyInfo->uid, &pRow, &h); if (code != TSDB_CODE_SUCCESS) { - return code; + goto _end; } if (h == NULL) { @@ -375,7 +386,7 @@ _end: tsdbDataFReaderClose(&pr->pDataFReader); resetLastBlockLoadInfo(pr->pLoadInfo); - tsdbUntakeReadSnap((STsdbReader*)pr, pr->pReadSnap); + tsdbUntakeReadSnap((STsdbReader*)pr, pr->pReadSnap, true); taosThreadMutexUnlock(&pr->readerMutex); for (int32_t j = 0; j < pr->numOfCols; ++j) { diff --git a/source/dnode/vnode/src/tsdb/tsdbCommit.c b/source/dnode/vnode/src/tsdb/tsdbCommit.c index 31d6fd85c681ed93737271f4444930b78ed0793b..d15f848cfdcde11c946a943d96b0397933324b14 100644 --- a/source/dnode/vnode/src/tsdb/tsdbCommit.c +++ b/source/dnode/vnode/src/tsdb/tsdbCommit.c @@ -175,7 +175,7 @@ int32_t tsdbCommit(STsdb *pTsdb, SCommitInfo *pInfo) { pTsdb->imem = NULL; taosThreadRwlockUnlock(&pTsdb->rwLock); - tsdbUnrefMemTable(pMemTable, NULL); + tsdbUnrefMemTable(pMemTable, NULL, true); goto _exit; } @@ -1664,7 +1664,7 @@ int32_t tsdbFinishCommit(STsdb *pTsdb) { // unlock taosThreadRwlockUnlock(&pTsdb->rwLock); if (pMemTable) { - tsdbUnrefMemTable(pMemTable, NULL); + tsdbUnrefMemTable(pMemTable, NULL, true); } _exit: diff --git a/source/dnode/vnode/src/tsdb/tsdbFS.c b/source/dnode/vnode/src/tsdb/tsdbFS.c index 3e565ac0127cc4b69bd4f05394347a1600197c38..5519d43012f46dc2e24ad56083ff421d5fcff1c5 100644 --- a/source/dnode/vnode/src/tsdb/tsdbFS.c +++ b/source/dnode/vnode/src/tsdb/tsdbFS.c @@ -458,9 +458,8 @@ static int32_t tsdbMergeFileSet(STsdb *pTsdb, SDFileSet *pSetOld, SDFileSet *pSe taosMemoryFree(pHeadF); } } else { - nRef = pHeadF->nRef; - *pHeadF = *pSetNew->pHeadF; - pHeadF->nRef = nRef; + ASSERT(pHeadF->offset == pSetNew->pHeadF->offset); + ASSERT(pHeadF->size == pSetNew->pHeadF->size); } // data @@ -481,9 +480,7 @@ static int32_t tsdbMergeFileSet(STsdb *pTsdb, SDFileSet *pSetOld, SDFileSet *pSe taosMemoryFree(pDataF); } } else { - nRef = pDataF->nRef; - *pDataF = *pSetNew->pDataF; - pDataF->nRef = nRef; + pDataF->size = pSetNew->pDataF->size; } // sma @@ -504,9 +501,7 @@ static int32_t tsdbMergeFileSet(STsdb *pTsdb, SDFileSet *pSetOld, SDFileSet *pSe taosMemoryFree(pSmaF); } } else { - nRef = pSmaF->nRef; - *pSmaF = *pSetNew->pSmaF; - pSmaF->nRef = nRef; + pSmaF->size = pSetNew->pSmaF->size; } // stt diff --git a/source/dnode/vnode/src/tsdb/tsdbMemTable.c b/source/dnode/vnode/src/tsdb/tsdbMemTable.c index 00c126bcb6c623f7b2e9457f8a35d6c90bcc902f..3db9ff2b42607a0b39c8cddb8f69bd3c21ce5f5e 100644 --- a/source/dnode/vnode/src/tsdb/tsdbMemTable.c +++ b/source/dnode/vnode/src/tsdb/tsdbMemTable.c @@ -22,10 +22,10 @@ #define SL_NODE_SIZE(l) (sizeof(SMemSkipListNode) + ((l) << 4)) #define SL_NODE_FORWARD(n, l) ((n)->forwards[l]) #define SL_NODE_BACKWARD(n, l) ((n)->forwards[(n)->level + (l)]) -#define SL_GET_NODE_FORWARD(n, l) ((SMemSkipListNode *)atomic_load_64((int64_t *)&SL_NODE_FORWARD(n, l))) -#define SL_GET_NODE_BACKWARD(n, l) ((SMemSkipListNode *)atomic_load_64((int64_t *)&SL_NODE_BACKWARD(n, l))) -#define SL_SET_NODE_FORWARD(n, l, p) atomic_store_64((int64_t *)&SL_NODE_FORWARD(n, l), (int64_t)(p)) -#define SL_SET_NODE_BACKWARD(n, l, p) atomic_store_64((int64_t *)&SL_NODE_BACKWARD(n, l), (int64_t)(p)) +#define SL_GET_NODE_FORWARD(n, l) ((SMemSkipListNode *)atomic_load_ptr(&SL_NODE_FORWARD(n, l))) +#define SL_GET_NODE_BACKWARD(n, l) ((SMemSkipListNode *)atomic_load_ptr(&SL_NODE_BACKWARD(n, l))) +#define SL_SET_NODE_FORWARD(n, l, p) atomic_store_ptr(&SL_NODE_FORWARD(n, l), p) +#define SL_SET_NODE_BACKWARD(n, l, p) atomic_store_ptr(&SL_NODE_BACKWARD(n, l), p) #define SL_MOVE_BACKWARD 0x1 #define SL_MOVE_FROM_POS 0x2 @@ -64,8 +64,6 @@ int32_t tsdbMemTableCreate(STsdb *pTsdb, SMemTable **ppMemTable) { taosMemoryFree(pMemTable); goto _err; } - pMemTable->qList.pNext = &pMemTable->qList; - pMemTable->qList.ppNext = &pMemTable->qList.pNext; vnodeBufPoolRef(pMemTable->pPool); *ppMemTable = pMemTable; @@ -76,9 +74,9 @@ _err: return code; } -void tsdbMemTableDestroy(SMemTable *pMemTable) { +void tsdbMemTableDestroy(SMemTable *pMemTable, bool proactive) { if (pMemTable) { - vnodeBufPoolUnRef(pMemTable->pPool); + vnodeBufPoolUnRef(pMemTable->pPool, proactive); taosMemoryFree(pMemTable->aBucket); taosMemoryFree(pMemTable); } @@ -112,30 +110,6 @@ int32_t tsdbInsertTableData(STsdb *pTsdb, int64_t version, SSubmitTbData *pSubmi tb_uid_t suid = pSubmitTbData->suid; tb_uid_t uid = pSubmitTbData->uid; -#if 0 - SMetaInfo info; - code = metaGetInfo(pTsdb->pVnode->pMeta, uid, &info, NULL); - if (code) { - code = TSDB_CODE_TDB_TABLE_NOT_EXIST; - goto _err; - } - if (info.suid != suid) { - code = TSDB_CODE_INVALID_MSG; - goto _err; - } - if (info.suid) { - metaGetInfo(pTsdb->pVnode->pMeta, info.suid, &info, NULL); - } - if (pSubmitTbData->sver != info.skmVer) { - tsdbError("vgId:%d, req sver:%d, skmVer:%d suid:%" PRId64 " uid:%" PRId64, TD_VID(pTsdb->pVnode), - pSubmitTbData->sver, info.skmVer, suid, uid); - code = TSDB_CODE_TDB_INVALID_TABLE_SCHEMA_VER; - goto _err; - } - - if (pRsp) pRsp->sver = info.skmVer; -#endif - // create/get STbData to op code = tsdbGetOrCreateTbData(pMemTable, suid, uid, &pTbData); if (code) { @@ -749,42 +723,27 @@ _exit: int32_t tsdbGetNRowsInTbData(STbData *pTbData) { return pTbData->sl.size; } -int32_t tsdbRefMemTable(SMemTable *pMemTable, void *pQHandle, _tsdb_reseek_func_t reseek, SQueryNode **ppNode) { +int32_t tsdbRefMemTable(SMemTable *pMemTable, SQueryNode *pQNode) { int32_t code = 0; int32_t nRef = atomic_fetch_add_32(&pMemTable->nRef, 1); ASSERT(nRef > 0); - /* - // register handle (todo: take concurrency in consideration) - *ppNode = taosMemoryMalloc(sizeof(SQueryNode)); - if (*ppNode == NULL) { - code = TSDB_CODE_OUT_OF_MEMORY; - goto _exit; - } - (*ppNode)->pQHandle = pQHandle; - (*ppNode)->reseek = reseek; - (*ppNode)->pNext = pMemTable->qList.pNext; - (*ppNode)->ppNext = &pMemTable->qList.pNext; - pMemTable->qList.pNext->ppNext = &(*ppNode)->pNext; - pMemTable->qList.pNext = *ppNode; - */ + + vnodeBufPoolRegisterQuery(pMemTable->pPool, pQNode); + _exit: return code; } -int32_t tsdbUnrefMemTable(SMemTable *pMemTable, SQueryNode *pNode) { +int32_t tsdbUnrefMemTable(SMemTable *pMemTable, SQueryNode *pNode, bool proactive) { int32_t code = 0; - /* - // unregister handle (todo: take concurrency in consideration) + if (pNode) { - pNode->pNext->ppNext = pNode->ppNext; - *pNode->ppNext = pNode->pNext; - taosMemoryFree(pNode); + vnodeBufPoolDeregisterQuery(pMemTable->pPool, pNode, proactive); } - */ - int32_t nRef = atomic_sub_fetch_32(&pMemTable->nRef, 1); - if (nRef == 0) { - tsdbMemTableDestroy(pMemTable); + + if (atomic_sub_fetch_32(&pMemTable->nRef, 1) == 0) { + tsdbMemTableDestroy(pMemTable, proactive); } return code; @@ -826,30 +785,4 @@ SArray *tsdbMemTableGetTbDataArray(SMemTable *pMemTable) { _exit: return aTbDataP; -} - -int32_t tsdbRecycleMemTable(SMemTable *pMemTable) { - int32_t code = 0; - - SQueryNode *pNode = pMemTable->qList.pNext; - while (1) { - ASSERT(pNode != &pMemTable->qList); - SQueryNode *pNextNode = pNode->pNext; - - if (pNextNode == &pMemTable->qList) { - code = (*pNode->reseek)(pNode->pQHandle); - if (code) goto _exit; - break; - } else { - code = (*pNode->reseek)(pNode->pQHandle); - if (code) goto _exit; - pNode = pMemTable->qList.pNext; - ASSERT(pNode == pNextNode); - } - } - - // NOTE: Take care here, pMemTable is destroyed - -_exit: - return code; -} +} \ No newline at end of file diff --git a/source/dnode/vnode/src/tsdb/tsdbOpen.c b/source/dnode/vnode/src/tsdb/tsdbOpen.c index f71b5b6706ccb152f4dc221d01f4a1ea6e207c7a..8901f644598ec4ca5343f4a35a7b063bf39096fd 100644 --- a/source/dnode/vnode/src/tsdb/tsdbOpen.c +++ b/source/dnode/vnode/src/tsdb/tsdbOpen.c @@ -88,7 +88,7 @@ _err: int tsdbClose(STsdb **pTsdb) { if (*pTsdb) { taosThreadRwlockWrlock(&(*pTsdb)->rwLock); - tsdbMemTableDestroy((*pTsdb)->mem); + tsdbMemTableDestroy((*pTsdb)->mem, true); (*pTsdb)->mem = NULL; taosThreadRwlockUnlock(&(*pTsdb)->rwLock); diff --git a/source/dnode/vnode/src/tsdb/tsdbRead.c b/source/dnode/vnode/src/tsdb/tsdbRead.c index 87b5e59bc0e598ef3603a3f8a0d0f964bebc0b9b..588debbe52907a33541e4c08ecfe0ba620f88ae0 100644 --- a/source/dnode/vnode/src/tsdb/tsdbRead.c +++ b/source/dnode/vnode/src/tsdb/tsdbRead.c @@ -1776,12 +1776,15 @@ static int32_t doMergeBufAndFileRows(STsdbReader* pReader, STableBlockScanInfo* } if (minKey == k.ts) { + STSchema* pSchema = doGetSchemaForTSRow(TSDBROW_SVERSION(pRow), pReader, pBlockScanInfo->uid); + if (pSchema == NULL) { + return terrno; + } if (init) { - tsdbRowMerge(&merge, pRow); + tsdbRowMergerAdd(&merge, pRow, pSchema); } else { init = true; - STSchema* pSchema = doGetSchemaForTSRow(TSDBROW_SVERSION(pRow), pReader, pBlockScanInfo->uid); - int32_t code = tsdbRowMergerInit(&merge, pRow, pSchema); + int32_t code = tsdbRowMergerInit(&merge, pRow, pSchema); if (code != TSDB_CODE_SUCCESS) { return code; } @@ -2353,32 +2356,33 @@ static int32_t buildComposedDataBlockImpl(STsdbReader* pReader, STableBlockScanI SBlockData* pBlockData, SLastBlockReader* pLastBlockReader) { SFileBlockDumpInfo* pDumpInfo = &pReader->status.fBlockDumpInfo; + TSDBROW *pRow = NULL, *piRow = NULL; int64_t key = (pBlockData->nRow > 0 && (!pDumpInfo->allDumped)) ? pBlockData->aTSKEY[pDumpInfo->rowIndex] : INT64_MIN; - if (pBlockScanInfo->iter.hasVal && pBlockScanInfo->iiter.hasVal) { - return doMergeMultiLevelRows(pReader, pBlockScanInfo, pBlockData, pLastBlockReader); - } else { - TSDBROW *pRow = NULL, *piRow = NULL; - if (pBlockScanInfo->iter.hasVal) { - pRow = getValidMemRow(&pBlockScanInfo->iter, pBlockScanInfo->delSkyline, pReader); - } + if (pBlockScanInfo->iter.hasVal) { + pRow = getValidMemRow(&pBlockScanInfo->iter, pBlockScanInfo->delSkyline, pReader); + } - if (pBlockScanInfo->iiter.hasVal) { - piRow = getValidMemRow(&pBlockScanInfo->iiter, pBlockScanInfo->delSkyline, pReader); - } + if (pBlockScanInfo->iiter.hasVal) { + piRow = getValidMemRow(&pBlockScanInfo->iiter, pBlockScanInfo->delSkyline, pReader); + } - // imem + file + last block - if (pBlockScanInfo->iiter.hasVal) { - return doMergeBufAndFileRows(pReader, pBlockScanInfo, piRow, &pBlockScanInfo->iiter, key, pLastBlockReader); - } + // two levels of mem-table does contain the valid rows + if (pRow != NULL && piRow != NULL) { + return doMergeMultiLevelRows(pReader, pBlockScanInfo, pBlockData, pLastBlockReader); + } - // mem + file + last block - if (pBlockScanInfo->iter.hasVal) { - return doMergeBufAndFileRows(pReader, pBlockScanInfo, pRow, &pBlockScanInfo->iter, key, pLastBlockReader); - } + // imem + file + last block + if (pBlockScanInfo->iiter.hasVal) { + return doMergeBufAndFileRows(pReader, pBlockScanInfo, piRow, &pBlockScanInfo->iiter, key, pLastBlockReader); + } - // files data blocks + last block - return mergeFileBlockAndLastBlock(pReader, pLastBlockReader, key, pBlockScanInfo, pBlockData); + // mem + file + last block + if (pBlockScanInfo->iter.hasVal) { + return doMergeBufAndFileRows(pReader, pBlockScanInfo, pRow, &pBlockScanInfo->iter, key, pLastBlockReader); } + + // files data blocks + last block + return mergeFileBlockAndLastBlock(pReader, pLastBlockReader, key, pBlockScanInfo, pBlockData); } static int32_t loadNeighborIfOverlap(SFileDataBlockInfo* pBlockInfo, STableBlockScanInfo* pBlockScanInfo, @@ -2421,6 +2425,19 @@ static int32_t loadNeighborIfOverlap(SFileDataBlockInfo* pBlockInfo, STableBlock return code; } +static void updateComposedBlockInfo(STsdbReader* pReader, double el, STableBlockScanInfo* pBlockScanInfo) { + SSDataBlock* pResBlock = pReader->pResBlock; + + pResBlock->info.id.uid = (pBlockScanInfo != NULL) ? pBlockScanInfo->uid : 0; + pResBlock->info.dataLoad = 1; + blockDataUpdateTsWindow(pResBlock, pReader->suppInfo.slotId[0]); + + setComposedBlockFlag(pReader, true); + + pReader->cost.composedBlocks += 1; + pReader->cost.buildComposedBlockTime += el; +} + static int32_t buildComposedDataBlock(STsdbReader* pReader) { int32_t code = TSDB_CODE_SUCCESS; @@ -2432,6 +2449,7 @@ static int32_t buildComposedDataBlock(STsdbReader* pReader) { bool asc = ASCENDING_TRAVERSE(pReader->order); int64_t st = taosGetTimestampUs(); int32_t step = asc ? 1 : -1; + double el = 0; STableBlockScanInfo* pBlockScanInfo = NULL; if (pBlockInfo != NULL) { @@ -2494,10 +2512,8 @@ static int32_t buildComposedDataBlock(STsdbReader* pReader) { } } - bool hasBlockLData = hasDataInLastBlock(pLastBlockReader); - // no data in last block and block, no need to proceed. - if ((hasBlockData == false) && (hasBlockLData == false)) { + if (hasBlockData == false) { break; } @@ -2516,15 +2532,8 @@ static int32_t buildComposedDataBlock(STsdbReader* pReader) { } _end: - pResBlock->info.id.uid = (pBlockScanInfo != NULL) ? pBlockScanInfo->uid : 0; - pResBlock->info.dataLoad = 1; - blockDataUpdateTsWindow(pResBlock, pReader->suppInfo.slotId[0]); - - setComposedBlockFlag(pReader, true); - double el = (taosGetTimestampUs() - st) / 1000.0; - - pReader->cost.composedBlocks += 1; - pReader->cost.buildComposedBlockTime += el; + el = (taosGetTimestampUs() - st) / 1000.0; + updateComposedBlockInfo(pReader, el, pBlockScanInfo); if (pResBlock->info.rows > 0) { tsdbDebug("%p uid:%" PRIu64 ", composed data block created, brange:%" PRIu64 "-%" PRIu64 @@ -2768,6 +2777,8 @@ static int32_t doLoadLastBlockSequentially(STsdbReader* pReader) { return code; } + SSDataBlock* pResBlock = pReader->pResBlock; + while (1) { // load the last data block of current table STableBlockScanInfo* pScanInfo = *(STableBlockScanInfo**)pStatus->pTableIter; @@ -2778,15 +2789,33 @@ static int32_t doLoadLastBlockSequentially(STsdbReader* pReader) { if (!hasNexTable) { return TSDB_CODE_SUCCESS; } + continue; } - code = doBuildDataBlock(pReader); - if (code != TSDB_CODE_SUCCESS) { - return code; + int64_t st = taosGetTimestampUs(); + while (1) { + bool hasBlockLData = hasDataInLastBlock(pLastBlockReader); + + // no data in last block and block, no need to proceed. + if (hasBlockLData == false) { + break; + } + + buildComposedDataBlockImpl(pReader, pScanInfo, &pReader->status.fileBlockData, pLastBlockReader); + if (pResBlock->info.rows >= pReader->capacity) { + break; + } } - if (pReader->pResBlock->info.rows > 0) { + double el = (taosGetTimestampUs() - st) / 1000.0; + updateComposedBlockInfo(pReader, el, pScanInfo); + + if (pResBlock->info.rows > 0) { + tsdbDebug("%p uid:%" PRIu64 ", composed data block created, brange:%" PRIu64 "-%" PRIu64 + " rows:%d, elapsed time:%.2f ms %s", + pReader, pResBlock->info.id.uid, pResBlock->info.window.skey, pResBlock->info.window.ekey, + pResBlock->info.rows, el, pReader->idStr); return TSDB_CODE_SUCCESS; } @@ -2829,7 +2858,37 @@ static int32_t doBuildDataBlock(STsdbReader* pReader) { TSDBKEY keyInBuf = getCurrentKeyInBuf(pScanInfo, pReader); if (pBlockInfo == NULL) { // build data block from last data file - code = buildComposedDataBlock(pReader); + SBlockData* pBData = &pReader->status.fileBlockData; + tBlockDataReset(pBData); + + SSDataBlock* pResBlock = pReader->pResBlock; + tsdbDebug("load data in last block firstly, due to desc scan data, %s", pReader->idStr); + + int64_t st = taosGetTimestampUs(); + + while (1) { + bool hasBlockLData = hasDataInLastBlock(pLastBlockReader); + + // no data in last block and block, no need to proceed. + if (hasBlockLData == false) { + break; + } + + buildComposedDataBlockImpl(pReader, pScanInfo, &pReader->status.fileBlockData, pLastBlockReader); + if (pResBlock->info.rows >= pReader->capacity) { + break; + } + } + + double el = (taosGetTimestampUs() - st) / 1000.0; + updateComposedBlockInfo(pReader, el, pScanInfo); + + if (pResBlock->info.rows > 0) { + tsdbDebug("%p uid:%" PRIu64 ", composed data block created, brange:%" PRIu64 "-%" PRIu64 + " rows:%d, elapsed time:%.2f ms %s", + pReader, pResBlock->info.id.uid, pResBlock->info.window.skey, pResBlock->info.window.ekey, + pResBlock->info.rows, el, pReader->idStr); + } } else if (fileBlockShouldLoad(pReader, pBlockInfo, pBlock, pScanInfo, keyInBuf, pLastBlockReader)) { code = doLoadFileBlockData(pReader, pBlockIter, &pStatus->fileBlockData, pScanInfo->uid); if (code != TSDB_CODE_SUCCESS) { @@ -2848,10 +2907,38 @@ static int32_t doBuildDataBlock(STsdbReader* pReader) { // only return the rows in last block int64_t tsLast = getCurrentKeyInLastBlock(pLastBlockReader); ASSERT(tsLast >= pBlock->maxKey.ts); - tBlockDataReset(&pReader->status.fileBlockData); + SBlockData* pBData = &pReader->status.fileBlockData; + tBlockDataReset(pBData); + + SSDataBlock* pResBlock = pReader->pResBlock; tsdbDebug("load data in last block firstly, due to desc scan data, %s", pReader->idStr); - code = buildComposedDataBlock(pReader); + + int64_t st = taosGetTimestampUs(); + + while (1) { + bool hasBlockLData = hasDataInLastBlock(pLastBlockReader); + + // no data in last block and block, no need to proceed. + if (hasBlockLData == false) { + break; + } + + buildComposedDataBlockImpl(pReader, pScanInfo, &pReader->status.fileBlockData, pLastBlockReader); + if (pResBlock->info.rows >= pReader->capacity) { + break; + } + } + + double el = (taosGetTimestampUs() - st) / 1000.0; + updateComposedBlockInfo(pReader, el, pScanInfo); + + if (pResBlock->info.rows > 0) { + tsdbDebug("%p uid:%" PRIu64 ", composed data block created, brange:%" PRIu64 "-%" PRIu64 + " rows:%d, elapsed time:%.2f ms %s", + pReader, pResBlock->info.id.uid, pResBlock->info.window.skey, pResBlock->info.window.ekey, + pResBlock->info.rows, el, pReader->idStr); + } } else { // whole block is required, return it directly SDataBlockInfo* pInfo = &pReader->pResBlock->info; pInfo->rows = pBlock->nRow; @@ -3928,7 +4015,7 @@ void tsdbReaderClose(STsdbReader* pReader) { } qTrace("tsdb/reader: %p, untake snapshot", pReader); - tsdbUntakeReadSnap(pReader, pReader->pReadSnap); + tsdbUntakeReadSnap(pReader, pReader->pReadSnap, true); taosThreadMutexDestroy(&pReader->readerMutex); @@ -3976,7 +4063,8 @@ int32_t tsdbReaderSuspend(STsdbReader* pReader) { if (pStatus->loadFromFile) { SFileDataBlockInfo* pBlockInfo = getCurrentBlockInfo(&pReader->status.blockIter); if (pBlockInfo != NULL) { - pBlockScanInfo = taosHashGet(pStatus->pTableMap, &pBlockInfo->uid, sizeof(pBlockInfo->uid)); + pBlockScanInfo = + *(STableBlockScanInfo**)taosHashGet(pStatus->pTableMap, &pBlockInfo->uid, sizeof(pBlockInfo->uid)); if (pBlockScanInfo == NULL) { code = TSDB_CODE_INVALID_PARA; tsdbError("failed to locate the uid:%" PRIu64 " in query table uid list, total tables:%d, %s", pBlockInfo->uid, @@ -3990,20 +4078,50 @@ int32_t tsdbReaderSuspend(STsdbReader* pReader) { tsdbDataFReaderClose(&pReader->pFileReader); // resetDataBlockScanInfo excluding lastKey - STableBlockScanInfo* p = NULL; + STableBlockScanInfo** p = NULL; while ((p = taosHashIterate(pStatus->pTableMap, p)) != NULL) { - p->iterInit = false; - p->iiter.hasVal = false; - if (p->iter.iter != NULL) { - p->iter.iter = tsdbTbDataIterDestroy(p->iter.iter); + STableBlockScanInfo* pInfo = *(STableBlockScanInfo**)p; + + pInfo->iterInit = false; + pInfo->iter.hasVal = false; + pInfo->iiter.hasVal = false; + + if (pInfo->iter.iter != NULL) { + pInfo->iter.iter = tsdbTbDataIterDestroy(pInfo->iter.iter); } - p->delSkyline = taosArrayDestroy(p->delSkyline); - // p->lastKey = ts; + if (pInfo->iiter.iter != NULL) { + pInfo->iiter.iter = tsdbTbDataIterDestroy(pInfo->iiter.iter); + } + + pInfo->delSkyline = taosArrayDestroy(pInfo->delSkyline); + // pInfo->lastKey = ts; } } else { - pBlockScanInfo = *pStatus->pTableIter; + // resetDataBlockScanInfo excluding lastKey + STableBlockScanInfo** p = NULL; + + while ((p = taosHashIterate(pStatus->pTableMap, p)) != NULL) { + STableBlockScanInfo* pInfo = *(STableBlockScanInfo**)p; + + pInfo->iterInit = false; + pInfo->iter.hasVal = false; + pInfo->iiter.hasVal = false; + + if (pInfo->iter.iter != NULL) { + pInfo->iter.iter = tsdbTbDataIterDestroy(pInfo->iter.iter); + } + + if (pInfo->iiter.iter != NULL) { + pInfo->iiter.iter = tsdbTbDataIterDestroy(pInfo->iiter.iter); + } + + pInfo->delSkyline = taosArrayDestroy(pInfo->delSkyline); + // pInfo->lastKey = ts; + } + + pBlockScanInfo = pStatus->pTableIter == NULL ? NULL : *pStatus->pTableIter; if (pBlockScanInfo) { // save lastKey to restore memory iterator STimeWindow w = pReader->pResBlock->info.window; @@ -4011,7 +4129,8 @@ int32_t tsdbReaderSuspend(STsdbReader* pReader) { // reset current current table's data block scan info, pBlockScanInfo->iterInit = false; - // pBlockScanInfo->iiter.hasVal = false; + pBlockScanInfo->iter.hasVal = false; + pBlockScanInfo->iiter.hasVal = false; if (pBlockScanInfo->iter.iter != NULL) { pBlockScanInfo->iter.iter = tsdbTbDataIterDestroy(pBlockScanInfo->iter.iter); } @@ -4027,11 +4146,13 @@ int32_t tsdbReaderSuspend(STsdbReader* pReader) { } } - tsdbUntakeReadSnap(pReader, pReader->pReadSnap); + tsdbUntakeReadSnap(pReader, pReader->pReadSnap, false); + pReader->pReadSnap = NULL; pReader->suspended = true; - tsdbDebug("reader: %p suspended uid %" PRIu64 " in this query %s", pReader, pBlockScanInfo->uid, pReader->idStr); + tsdbDebug("reader: %p suspended uid %" PRIu64 " in this query %s", pReader, pBlockScanInfo ? pBlockScanInfo->uid : 0, + pReader->idStr); return code; _err: @@ -4043,18 +4164,24 @@ static int32_t tsdbSetQueryReseek(void* pQHandle) { int32_t code = 0; STsdbReader* pReader = pQHandle; - taosThreadMutexLock(&pReader->readerMutex); + code = taosThreadMutexTryLock(&pReader->readerMutex); + if (code == 0) { + if (pReader->suspended) { + taosThreadMutexUnlock(&pReader->readerMutex); + return code; + } + + tsdbReaderSuspend(pReader); - if (pReader->suspended) { taosThreadMutexUnlock(&pReader->readerMutex); + return code; + } else if (code == EBUSY) { + return TSDB_CODE_VND_QUERY_BUSY; + } else { + terrno = TAOS_SYSTEM_ERROR(code); + return TSDB_CODE_FAILED; } - - tsdbReaderSuspend(pReader); - - taosThreadMutexUnlock(&pReader->readerMutex); - - return code; } int32_t tsdbReaderResume(STsdbReader* pReader) { @@ -4403,17 +4530,18 @@ SSDataBlock* tsdbRetrieveDataBlock(STsdbReader* pReader, SArray* pIdList) { int32_t tsdbReaderReset(STsdbReader* pReader, SQueryTableDataCond* pCond) { SReaderStatus* pStatus = &pReader->status; - qTrace("tsdb/read: %p, take read mutex", pReader); + qTrace("tsdb/reader-reset: %p, take read mutex", pReader); taosThreadMutexLock(&pReader->readerMutex); if (pReader->suspended) { tsdbReaderResume(pReader); } - taosThreadMutexUnlock(&pReader->readerMutex); - if (isEmptyQueryTimeWindow(&pReader->window) || pReader->pReadSnap == NULL) { tsdbDebug("tsdb reader reset return %p", pReader->pReadSnap); + + taosThreadMutexUnlock(&pReader->readerMutex); + return TSDB_CODE_SUCCESS; } @@ -4449,6 +4577,9 @@ int32_t tsdbReaderReset(STsdbReader* pReader, SQueryTableDataCond* pCond) { if (code != TSDB_CODE_SUCCESS) { tsdbError("%p reset reader failed, numOfTables:%d, query range:%" PRId64 " - %" PRId64 " in query %s", pReader, numOfTables, pReader->window.skey, pReader->window.ekey, pReader->idStr); + + taosThreadMutexUnlock(&pReader->readerMutex); + return code; } } @@ -4458,6 +4589,8 @@ int32_t tsdbReaderReset(STsdbReader* pReader, SQueryTableDataCond* pCond) { pReader, pReader->suid, numOfTables, pCond->twindows.skey, pReader->window.skey, pReader->window.ekey, pReader->idStr); + taosThreadMutexUnlock(&pReader->readerMutex); + return code; } @@ -4614,68 +4747,91 @@ int32_t tsdbGetTableSchema(SVnode* pVnode, int64_t uid, STSchema** pSchema, int6 return TSDB_CODE_SUCCESS; } -int32_t tsdbTakeReadSnap(STsdbReader* pReader, _tsdb_reseek_func_t reseek, STsdbReadSnap** ppSnap) { +int32_t tsdbTakeReadSnap(STsdbReader* pReader, _query_reseek_func_t reseek, STsdbReadSnap** ppSnap) { int32_t code = 0; STsdb* pTsdb = pReader->pTsdb; SVersionRange* pRange = &pReader->verRange; // alloc - *ppSnap = (STsdbReadSnap*)taosMemoryCalloc(1, sizeof(STsdbReadSnap)); - if (*ppSnap == NULL) { + STsdbReadSnap* pSnap = (STsdbReadSnap*)taosMemoryCalloc(1, sizeof(*pSnap)); + if (pSnap == NULL) { code = TSDB_CODE_OUT_OF_MEMORY; goto _exit; } // lock - code = taosThreadRwlockRdlock(&pTsdb->rwLock); - if (code) { - code = TAOS_SYSTEM_ERROR(code); - goto _exit; - } + taosThreadRwlockRdlock(&pTsdb->rwLock); // take snapshot if (pTsdb->mem && (pRange->minVer <= pTsdb->mem->maxVer && pRange->maxVer >= pTsdb->mem->minVer)) { - tsdbRefMemTable(pTsdb->mem, pReader, reseek, &(*ppSnap)->pNode); - (*ppSnap)->pMem = pTsdb->mem; + pSnap->pMem = pTsdb->mem; + pSnap->pNode = taosMemoryMalloc(sizeof(*pSnap->pNode)); + if (pSnap->pNode == NULL) { + taosThreadRwlockUnlock(&pTsdb->rwLock); + code = TSDB_CODE_OUT_OF_MEMORY; + goto _exit; + } + pSnap->pNode->pQHandle = pReader; + pSnap->pNode->reseek = reseek; + + tsdbRefMemTable(pTsdb->mem, pSnap->pNode); } if (pTsdb->imem && (pRange->minVer <= pTsdb->imem->maxVer && pRange->maxVer >= pTsdb->imem->minVer)) { - tsdbRefMemTable(pTsdb->imem, pReader, reseek, &(*ppSnap)->pINode); - (*ppSnap)->pIMem = pTsdb->imem; + pSnap->pIMem = pTsdb->imem; + pSnap->pINode = taosMemoryMalloc(sizeof(*pSnap->pINode)); + if (pSnap->pINode == NULL) { + taosThreadRwlockUnlock(&pTsdb->rwLock); + code = TSDB_CODE_OUT_OF_MEMORY; + goto _exit; + } + pSnap->pINode->pQHandle = pReader; + pSnap->pINode->reseek = reseek; + + tsdbRefMemTable(pTsdb->imem, pSnap->pINode); } // fs - code = tsdbFSRef(pTsdb, &(*ppSnap)->fs); + code = tsdbFSRef(pTsdb, &pSnap->fs); if (code) { taosThreadRwlockUnlock(&pTsdb->rwLock); goto _exit; } // unlock - code = taosThreadRwlockUnlock(&pTsdb->rwLock); - if (code) { - code = TAOS_SYSTEM_ERROR(code); - goto _exit; - } + taosThreadRwlockUnlock(&pTsdb->rwLock); tsdbTrace("vgId:%d, take read snapshot", TD_VID(pTsdb->pVnode)); + _exit: + if (code) { + *ppSnap = NULL; + if (pSnap) { + if (pSnap->pNode) taosMemoryFree(pSnap->pNode); + if (pSnap->pINode) taosMemoryFree(pSnap->pINode); + taosMemoryFree(pSnap); + } + } else { + *ppSnap = pSnap; + } return code; } -void tsdbUntakeReadSnap(STsdbReader* pReader, STsdbReadSnap* pSnap) { +void tsdbUntakeReadSnap(STsdbReader* pReader, STsdbReadSnap* pSnap, bool proactive) { STsdb* pTsdb = pReader->pTsdb; if (pSnap) { if (pSnap->pMem) { - tsdbUnrefMemTable(pSnap->pMem, pSnap->pNode); + tsdbUnrefMemTable(pSnap->pMem, pSnap->pNode, proactive); } if (pSnap->pIMem) { - tsdbUnrefMemTable(pSnap->pIMem, pSnap->pINode); + tsdbUnrefMemTable(pSnap->pIMem, pSnap->pINode, proactive); } tsdbFSUnref(pTsdb, &pSnap->fs); + if (pSnap->pNode) taosMemoryFree(pSnap->pNode); + if (pSnap->pINode) taosMemoryFree(pSnap->pINode); taosMemoryFree(pSnap); } tsdbTrace("vgId:%d, untake read snapshot", TD_VID(pTsdb->pVnode)); diff --git a/source/dnode/vnode/src/tsdb/tsdbSnapshot.c b/source/dnode/vnode/src/tsdb/tsdbSnapshot.c index 6f56539b4f136e64b481140be2301ed2b44c598e..c280e8c0e747f1ce450c187d5e3d51183ca02973 100644 --- a/source/dnode/vnode/src/tsdb/tsdbSnapshot.c +++ b/source/dnode/vnode/src/tsdb/tsdbSnapshot.c @@ -15,272 +15,628 @@ #include "tsdb.h" -// STsdbSnapReader ======================================== -typedef enum { SNAP_DATA_FILE_ITER = 0, SNAP_STT_FILE_ITER } EFIterT; +extern int32_t tsdbReadDataBlockEx(SDataFReader* pReader, SDataBlk* pDataBlk, SBlockData* pBlockData); +extern int32_t tsdbUpdateTableSchema(SMeta* pMeta, int64_t suid, int64_t uid, SSkmInfo* pSkmInfo); +extern int32_t tsdbWriteDataBlock(SDataFWriter* pWriter, SBlockData* pBlockData, SMapData* mDataBlk, int8_t cmprAlg); +extern int32_t tsdbWriteSttBlock(SDataFWriter* pWriter, SBlockData* pBlockData, SArray* aSttBlk, int8_t cmprAlg); + +// STsdbDataIter2 ======================================== +#define TSDB_MEM_TABLE_DATA_ITER 0 +#define TSDB_DATA_FILE_DATA_ITER 1 +#define TSDB_STT_FILE_DATA_ITER 2 +#define TSDB_TOMB_FILE_DATA_ITER 3 + +typedef struct STsdbDataIter2 STsdbDataIter2; +typedef struct STsdbFilterInfo STsdbFilterInfo; + typedef struct { - SRBTreeNode n; - SRowInfo rInfo; - EFIterT type; + int64_t suid; + int64_t uid; + SDelData delData; +} SDelInfo; + +struct STsdbDataIter2 { + STsdbDataIter2* next; + SRBTreeNode rbtn; + + int32_t type; + SRowInfo rowInfo; + SDelInfo delInfo; union { + // TSDB_MEM_TABLE_DATA_ITER struct { - SArray* aBlockIdx; - int32_t iBlockIdx; - SBlockIdx* pBlockIdx; - SMapData mBlock; - int32_t iBlock; - }; // .data file + SMemTable* pMemTable; + } mIter; + + // TSDB_DATA_FILE_DATA_ITER struct { - int32_t iStt; - SArray* aSttBlk; - int32_t iSttBlk; - }; // .stt file + SDataFReader* pReader; + SArray* aBlockIdx; // SArray + SMapData mDataBlk; + SBlockData bData; + int32_t iBlockIdx; + int32_t iDataBlk; + int32_t iRow; + } dIter; + + // TSDB_STT_FILE_DATA_ITER + struct { + SDataFReader* pReader; + int32_t iStt; + SArray* aSttBlk; + SBlockData bData; + int32_t iSttBlk; + int32_t iRow; + } sIter; + // TSDB_TOMB_FILE_DATA_ITER + struct { + SDelFReader* pReader; + SArray* aDelIdx; + SArray* aDelData; + int32_t iDelIdx; + int32_t iDelData; + } tIter; }; - SBlockData bData; - int32_t iRow; -} SFDataIter; +}; -struct STsdbSnapReader { - STsdb* pTsdb; +#define TSDB_FILTER_FLAG_BY_VERSION 0x1 +struct STsdbFilterInfo { + int32_t flag; int64_t sver; int64_t ever; - STsdbFS fs; - int8_t type; - // for data file - int8_t dataDone; - int32_t fid; - SDataFReader* pDataFReader; - SFDataIter* pIter; - SRBTree rbt; - SFDataIter aFDataIter[TSDB_MAX_STT_TRIGGER + 1]; - SBlockData bData; - SSkmInfo skmTable; - // for del file - int8_t delDone; - SDelFReader* pDelFReader; - SArray* aDelIdx; // SArray - int32_t iDelIdx; - SArray* aDelData; // SArray - uint8_t* aBuf[5]; }; -extern int32_t tsdbReadDataBlockEx(SDataFReader* pReader, SDataBlk* pDataBlk, SBlockData* pBlockData); -extern int32_t tsdbUpdateTableSchema(SMeta* pMeta, int64_t suid, int64_t uid, SSkmInfo* pSkmInfo); +#define TSDB_RBTN_TO_DATA_ITER(pNode) ((STsdbDataIter2*)(((char*)pNode) - offsetof(STsdbDataIter2, rbtn))) -static int32_t tFDataIterCmprFn(const SRBTreeNode* pNode1, const SRBTreeNode* pNode2) { - SFDataIter* pIter1 = (SFDataIter*)(((uint8_t*)pNode1) - offsetof(SFDataIter, n)); - SFDataIter* pIter2 = (SFDataIter*)(((uint8_t*)pNode2) - offsetof(SFDataIter, n)); +/* open */ +static int32_t tsdbOpenDataFileDataIter(SDataFReader* pReader, STsdbDataIter2** ppIter) { + int32_t code = 0; + int32_t lino = 0; - return tRowInfoCmprFn(&pIter1->rInfo, &pIter2->rInfo); + // create handle + STsdbDataIter2* pIter = (STsdbDataIter2*)taosMemoryCalloc(1, sizeof(*pIter)); + if (pIter == NULL) { + code = TSDB_CODE_OUT_OF_MEMORY; + TSDB_CHECK_CODE(code, lino, _exit); + } + + pIter->type = TSDB_DATA_FILE_DATA_ITER; + pIter->dIter.pReader = pReader; + if ((pIter->dIter.aBlockIdx = taosArrayInit(0, sizeof(SBlockIdx))) == NULL) { + code = TSDB_CODE_OUT_OF_MEMORY; + TSDB_CHECK_CODE(code, lino, _exit); + } + + code = tBlockDataCreate(&pIter->dIter.bData); + TSDB_CHECK_CODE(code, lino, _exit); + + pIter->dIter.iBlockIdx = 0; + pIter->dIter.iDataBlk = 0; + pIter->dIter.iRow = 0; + + // read data + code = tsdbReadBlockIdx(pReader, pIter->dIter.aBlockIdx); + TSDB_CHECK_CODE(code, lino, _exit); + + if (taosArrayGetSize(pIter->dIter.aBlockIdx) == 0) goto _clear; + +_exit: + if (code) { + if (pIter) { + _clear: + tBlockDataDestroy(&pIter->dIter.bData); + taosArrayDestroy(pIter->dIter.aBlockIdx); + taosMemoryFree(pIter); + pIter = NULL; + } + } + *ppIter = pIter; + return code; } -static int32_t tsdbSnapReadOpenFile(STsdbSnapReader* pReader) { +static int32_t tsdbOpenSttFileDataIter(SDataFReader* pReader, int32_t iStt, STsdbDataIter2** ppIter) { int32_t code = 0; int32_t lino = 0; - SDFileSet dFileSet = {.fid = pReader->fid}; - SDFileSet* pSet = taosArraySearch(pReader->fs.aDFileSet, &dFileSet, tDFileSetCmprFn, TD_GT); - if (pSet == NULL) return code; + // create handle + STsdbDataIter2* pIter = (STsdbDataIter2*)taosMemoryCalloc(1, sizeof(*pIter)); + if (pIter == NULL) { + code = TSDB_CODE_OUT_OF_MEMORY; + TSDB_CHECK_CODE(code, lino, _exit); + } - pReader->fid = pSet->fid; - code = tsdbDataFReaderOpen(&pReader->pDataFReader, pReader->pTsdb, pSet); - TSDB_CHECK_CODE(code, lino, _exit); + pIter->type = TSDB_STT_FILE_DATA_ITER; + pIter->sIter.pReader = pReader; + pIter->sIter.iStt = iStt; + pIter->sIter.aSttBlk = taosArrayInit(0, sizeof(SSttBlk)); + if (pIter->sIter.aSttBlk == NULL) { + code = TSDB_CODE_OUT_OF_MEMORY; + TSDB_CHECK_CODE(code, lino, _exit); + } - pReader->pIter = NULL; - tRBTreeCreate(&pReader->rbt, tFDataIterCmprFn); + code = tBlockDataCreate(&pIter->sIter.bData); + TSDB_CHECK_CODE(code, lino, _exit); - // .data file - SFDataIter* pIter = &pReader->aFDataIter[0]; - pIter->type = SNAP_DATA_FILE_ITER; + pIter->sIter.iSttBlk = 0; + pIter->sIter.iRow = 0; - code = tsdbReadBlockIdx(pReader->pDataFReader, pIter->aBlockIdx); + // read data + code = tsdbReadSttBlk(pReader, iStt, pIter->sIter.aSttBlk); TSDB_CHECK_CODE(code, lino, _exit); - for (pIter->iBlockIdx = 0; pIter->iBlockIdx < taosArrayGetSize(pIter->aBlockIdx); pIter->iBlockIdx++) { - pIter->pBlockIdx = (SBlockIdx*)taosArrayGet(pIter->aBlockIdx, pIter->iBlockIdx); + if (taosArrayGetSize(pIter->sIter.aSttBlk) == 0) goto _clear; - code = tsdbReadDataBlk(pReader->pDataFReader, pIter->pBlockIdx, &pIter->mBlock); +_exit: + if (code) { + if (pIter) { + _clear: + taosArrayDestroy(pIter->sIter.aSttBlk); + tBlockDataDestroy(&pIter->sIter.bData); + taosMemoryFree(pIter); + pIter = NULL; + } + } + *ppIter = pIter; + return code; +} + +static int32_t tsdbOpenTombFileDataIter(SDelFReader* pReader, STsdbDataIter2** ppIter) { + int32_t code = 0; + int32_t lino = 0; + + STsdbDataIter2* pIter = (STsdbDataIter2*)taosMemoryCalloc(1, sizeof(*pIter)); + if (pIter == NULL) { + code = TSDB_CODE_OUT_OF_MEMORY; TSDB_CHECK_CODE(code, lino, _exit); + } + pIter->type = TSDB_TOMB_FILE_DATA_ITER; - for (pIter->iBlock = 0; pIter->iBlock < pIter->mBlock.nItem; pIter->iBlock++) { - SDataBlk dataBlk; - tMapDataGetItemByIdx(&pIter->mBlock, pIter->iBlock, &dataBlk, tGetDataBlk); + pIter->tIter.pReader = pReader; + if ((pIter->tIter.aDelIdx = taosArrayInit(0, sizeof(SDelIdx))) == NULL) { + code = TSDB_CODE_OUT_OF_MEMORY; + TSDB_CHECK_CODE(code, lino, _exit); + } + if ((pIter->tIter.aDelData = taosArrayInit(0, sizeof(SDelData))) == NULL) { + code = TSDB_CODE_OUT_OF_MEMORY; + TSDB_CHECK_CODE(code, lino, _exit); + } - if (dataBlk.minVer > pReader->ever || dataBlk.maxVer < pReader->sver) continue; + code = tsdbReadDelIdx(pReader, pIter->tIter.aDelIdx); + TSDB_CHECK_CODE(code, lino, _exit); - code = tsdbReadDataBlockEx(pReader->pDataFReader, &dataBlk, &pIter->bData); - TSDB_CHECK_CODE(code, lino, _exit); + if (taosArrayGetSize(pIter->tIter.aDelIdx) == 0) goto _clear; - ASSERT(pIter->pBlockIdx->suid == pIter->bData.suid); - ASSERT(pIter->pBlockIdx->uid == pIter->bData.uid); + pIter->tIter.iDelIdx = 0; + pIter->tIter.iDelData = 0; - for (pIter->iRow = 0; pIter->iRow < pIter->bData.nRow; pIter->iRow++) { - int64_t rowVer = pIter->bData.aVersion[pIter->iRow]; +_exit: + if (code) { + if (pIter) { + _clear: + taosArrayDestroy(pIter->tIter.aDelIdx); + taosArrayDestroy(pIter->tIter.aDelData); + taosMemoryFree(pIter); + pIter = NULL; + } + } + *ppIter = pIter; + return code; +} - if (rowVer >= pReader->sver && rowVer <= pReader->ever) { - pIter->rInfo.suid = pIter->pBlockIdx->suid; - pIter->rInfo.uid = pIter->pBlockIdx->uid; - pIter->rInfo.row = tsdbRowFromBlockData(&pIter->bData, pIter->iRow); - goto _add_iter_and_break; +/* close */ +static void tsdbCloseDataFileDataIter(STsdbDataIter2* pIter) { + tBlockDataDestroy(&pIter->dIter.bData); + tMapDataClear(&pIter->dIter.mDataBlk); + taosArrayDestroy(pIter->dIter.aBlockIdx); + taosMemoryFree(pIter); +} + +static void tsdbCloseSttFileDataIter(STsdbDataIter2* pIter) { + tBlockDataDestroy(&pIter->sIter.bData); + taosArrayDestroy(pIter->sIter.aSttBlk); + taosMemoryFree(pIter); +} + +static void tsdbCloseTombFileDataIter(STsdbDataIter2* pIter) { + taosArrayDestroy(pIter->tIter.aDelData); + taosArrayDestroy(pIter->tIter.aDelIdx); + taosMemoryFree(pIter); +} + +static void tsdbCloseDataIter2(STsdbDataIter2* pIter) { + if (pIter->type == TSDB_MEM_TABLE_DATA_ITER) { + ASSERT(0); + } else if (pIter->type == TSDB_DATA_FILE_DATA_ITER) { + tsdbCloseDataFileDataIter(pIter); + } else if (pIter->type == TSDB_STT_FILE_DATA_ITER) { + tsdbCloseSttFileDataIter(pIter); + } else if (pIter->type == TSDB_TOMB_FILE_DATA_ITER) { + tsdbCloseTombFileDataIter(pIter); + } else { + ASSERT(0); + } +} + +/* cmpr */ +static int32_t tsdbDataIterCmprFn(const SRBTreeNode* pNode1, const SRBTreeNode* pNode2) { + STsdbDataIter2* pIter1 = TSDB_RBTN_TO_DATA_ITER(pNode1); + STsdbDataIter2* pIter2 = TSDB_RBTN_TO_DATA_ITER(pNode2); + return tRowInfoCmprFn(&pIter1->rowInfo, &pIter2->rowInfo); +} + +/* seek */ + +/* iter next */ +static int32_t tsdbDataFileDataIterNext(STsdbDataIter2* pIter, STsdbFilterInfo* pFilterInfo) { + int32_t code = 0; + int32_t lino = 0; + + for (;;) { + while (pIter->dIter.iRow < pIter->dIter.bData.nRow) { + if (pFilterInfo) { + if (pFilterInfo->flag & TSDB_FILTER_FLAG_BY_VERSION) { + if (pIter->dIter.bData.aVersion[pIter->dIter.iRow] < pFilterInfo->sver || + pIter->dIter.bData.aVersion[pIter->dIter.iRow] > pFilterInfo->ever) { + pIter->dIter.iRow++; + continue; + } } } + + pIter->rowInfo.suid = pIter->dIter.bData.suid; + pIter->rowInfo.uid = pIter->dIter.bData.uid; + pIter->rowInfo.row = tsdbRowFromBlockData(&pIter->dIter.bData, pIter->dIter.iRow); + pIter->dIter.iRow++; + goto _exit; } - continue; + for (;;) { + while (pIter->dIter.iDataBlk < pIter->dIter.mDataBlk.nItem) { + SDataBlk dataBlk; + tMapDataGetItemByIdx(&pIter->dIter.mDataBlk, pIter->dIter.iDataBlk, &dataBlk, tGetDataBlk); + + // filter + if (pFilterInfo) { + if (pFilterInfo->flag & TSDB_FILTER_FLAG_BY_VERSION) { + if (pFilterInfo->sver > dataBlk.maxVer || pFilterInfo->ever < dataBlk.minVer) { + pIter->dIter.iDataBlk++; + continue; + } + } + } - _add_iter_and_break: - tRBTreePut(&pReader->rbt, (SRBTreeNode*)pIter); - break; - } + code = tsdbReadDataBlockEx(pIter->dIter.pReader, &dataBlk, &pIter->dIter.bData); + TSDB_CHECK_CODE(code, lino, _exit); - // .stt file - pIter = &pReader->aFDataIter[1]; - for (int32_t iStt = 0; iStt < pSet->nSttF; iStt++) { - pIter->type = SNAP_STT_FILE_ITER; - pIter->iStt = iStt; + pIter->dIter.iDataBlk++; + pIter->dIter.iRow = 0; - code = tsdbReadSttBlk(pReader->pDataFReader, iStt, pIter->aSttBlk); - TSDB_CHECK_CODE(code, lino, _exit); + break; + } - for (pIter->iSttBlk = 0; pIter->iSttBlk < taosArrayGetSize(pIter->aSttBlk); pIter->iSttBlk++) { - SSttBlk* pSttBlk = (SSttBlk*)taosArrayGet(pIter->aSttBlk, pIter->iSttBlk); + if (pIter->dIter.iRow < pIter->dIter.bData.nRow) break; - if (pSttBlk->minVer > pReader->ever) continue; - if (pSttBlk->maxVer < pReader->sver) continue; + for (;;) { + if (pIter->dIter.iBlockIdx < taosArrayGetSize(pIter->dIter.aBlockIdx)) { + SBlockIdx* pBlockIdx = taosArrayGet(pIter->dIter.aBlockIdx, pIter->dIter.iBlockIdx); - code = tsdbReadSttBlockEx(pReader->pDataFReader, iStt, pSttBlk, &pIter->bData); - TSDB_CHECK_CODE(code, lino, _exit); + code = tsdbReadDataBlk(pIter->dIter.pReader, pBlockIdx, &pIter->dIter.mDataBlk); + TSDB_CHECK_CODE(code, lino, _exit); - for (pIter->iRow = 0; pIter->iRow < pIter->bData.nRow; pIter->iRow++) { - int64_t rowVer = pIter->bData.aVersion[pIter->iRow]; + pIter->dIter.iBlockIdx++; + pIter->dIter.iDataBlk = 0; - if (rowVer >= pReader->sver && rowVer <= pReader->ever) { - pIter->rInfo.suid = pIter->bData.suid; - pIter->rInfo.uid = pIter->bData.uid ? pIter->bData.uid : pIter->bData.aUid[pIter->iRow]; - pIter->rInfo.row = tsdbRowFromBlockData(&pIter->bData, pIter->iRow); - goto _add_iter; + break; + } else { + pIter->rowInfo = (SRowInfo){0}; + goto _exit; } } } - - continue; - - _add_iter: - tRBTreePut(&pReader->rbt, (SRBTreeNode*)pIter); - pIter++; } _exit: if (code) { - tsdbError("vgId:%d, %s failed since %s", TD_VID(pReader->pTsdb->pVnode), __func__, tstrerror(code)); - } else { - tsdbInfo("vgId:%d, %s done, path:%s, fid:%d", TD_VID(pReader->pTsdb->pVnode), __func__, pReader->pTsdb->path, - pReader->fid); + tsdbError("%s failed at line %d since %s", __func__, lino, tstrerror(code)); } return code; } -static int32_t tsdbSnapNextRow(STsdbSnapReader* pReader) { +static int32_t tsdbSttFileDataIterNext(STsdbDataIter2* pIter, STsdbFilterInfo* pFilterInfo) { int32_t code = 0; + int32_t lino = 0; - if (pReader->pIter) { - SFDataIter* pIter = NULL; - while (true) { - _find_row: - pIter = pReader->pIter; - for (pIter->iRow++; pIter->iRow < pIter->bData.nRow; pIter->iRow++) { - int64_t rowVer = pIter->bData.aVersion[pIter->iRow]; - - if (rowVer >= pReader->sver && rowVer <= pReader->ever) { - pIter->rInfo.uid = pIter->bData.uid ? pIter->bData.uid : pIter->bData.aUid[pIter->iRow]; - pIter->rInfo.row = tsdbRowFromBlockData(&pIter->bData, pIter->iRow); - goto _out; + for (;;) { + while (pIter->sIter.iRow < pIter->sIter.bData.nRow) { + if (pFilterInfo) { + if (pFilterInfo->flag & TSDB_FILTER_FLAG_BY_VERSION) { + if (pFilterInfo->sver > pIter->sIter.bData.aVersion[pIter->sIter.iRow] || + pFilterInfo->ever < pIter->sIter.bData.aVersion[pIter->sIter.iRow]) { + pIter->sIter.iRow++; + continue; + } } } - if (pIter->type == SNAP_DATA_FILE_ITER) { - while (true) { - for (pIter->iBlock++; pIter->iBlock < pIter->mBlock.nItem; pIter->iBlock++) { - SDataBlk dataBlk; - tMapDataGetItemByIdx(&pIter->mBlock, pIter->iBlock, &dataBlk, tGetDataBlk); - - if (dataBlk.minVer > pReader->ever || dataBlk.maxVer < pReader->sver) continue; + pIter->rowInfo.suid = pIter->sIter.bData.suid; + pIter->rowInfo.uid = pIter->sIter.bData.uid ? pIter->sIter.bData.uid : pIter->sIter.bData.aUid[pIter->sIter.iRow]; + pIter->rowInfo.row = tsdbRowFromBlockData(&pIter->sIter.bData, pIter->sIter.iRow); + pIter->sIter.iRow++; + goto _exit; + } - code = tsdbReadDataBlockEx(pReader->pDataFReader, &dataBlk, &pIter->bData); - if (code) goto _err; + for (;;) { + if (pIter->sIter.iSttBlk < taosArrayGetSize(pIter->sIter.aSttBlk)) { + SSttBlk* pSttBlk = taosArrayGet(pIter->sIter.aSttBlk, pIter->sIter.iSttBlk); - pIter->iRow = -1; - goto _find_row; + if (pFilterInfo) { + if (pFilterInfo->flag & TSDB_FILTER_FLAG_BY_VERSION) { + if (pFilterInfo->sver > pSttBlk->maxVer || pFilterInfo->ever < pSttBlk->minVer) { + pIter->sIter.iSttBlk++; + continue; + } } - - pIter->iBlockIdx++; - if (pIter->iBlockIdx >= taosArrayGetSize(pIter->aBlockIdx)) break; - - pIter->pBlockIdx = (SBlockIdx*)taosArrayGet(pIter->aBlockIdx, pIter->iBlockIdx); - code = tsdbReadDataBlk(pReader->pDataFReader, pIter->pBlockIdx, &pIter->mBlock); - if (code) goto _err; - pIter->iBlock = -1; } - pReader->pIter = NULL; + code = tsdbReadSttBlockEx(pIter->sIter.pReader, pIter->sIter.iStt, pSttBlk, &pIter->sIter.bData); + TSDB_CHECK_CODE(code, lino, _exit); + + pIter->sIter.iRow = 0; + pIter->sIter.iSttBlk++; break; - } else if (pIter->type == SNAP_STT_FILE_ITER) { - for (pIter->iSttBlk++; pIter->iSttBlk < taosArrayGetSize(pIter->aSttBlk); pIter->iSttBlk++) { - SSttBlk* pSttBlk = (SSttBlk*)taosArrayGet(pIter->aSttBlk, pIter->iSttBlk); + } else { + pIter->rowInfo = (SRowInfo){0}; + goto _exit; + } + } + } + +_exit: + if (code) { + tsdbError("%s failed at line %d since %s", __func__, lino, tstrerror(code)); + } + return code; +} - if (pSttBlk->minVer > pReader->ever || pSttBlk->maxVer < pReader->sver) continue; +static int32_t tsdbTombFileDataIterNext(STsdbDataIter2* pIter, STsdbFilterInfo* pFilterInfo) { + int32_t code = 0; + int32_t lino = 0; - code = tsdbReadSttBlockEx(pReader->pDataFReader, pIter->iStt, pSttBlk, &pIter->bData); - if (code) goto _err; + for (;;) { + while (pIter->tIter.iDelData < taosArrayGetSize(pIter->tIter.aDelData)) { + SDelData* pDelData = taosArrayGet(pIter->tIter.aDelData, pIter->tIter.iDelData); - pIter->iRow = -1; - goto _find_row; + if (pFilterInfo) { + if (pFilterInfo->flag & TSDB_FILTER_FLAG_BY_VERSION) { + if (pFilterInfo->sver > pDelData->version || pFilterInfo->ever < pDelData->version) { + pIter->tIter.iDelData++; + continue; + } } + } - pReader->pIter = NULL; + pIter->delInfo.delData = *pDelData; + pIter->tIter.iDelData++; + goto _exit; + } + + for (;;) { + if (pIter->tIter.iDelIdx < taosArrayGetSize(pIter->tIter.aDelIdx)) { + SDelIdx* pDelIdx = taosArrayGet(pIter->tIter.aDelIdx, pIter->tIter.iDelIdx); + + code = tsdbReadDelData(pIter->tIter.pReader, pDelIdx, pIter->tIter.aDelData); + TSDB_CHECK_CODE(code, lino, _exit); + + pIter->delInfo.suid = pDelIdx->suid; + pIter->delInfo.uid = pDelIdx->uid; + pIter->tIter.iDelData = 0; + pIter->tIter.iDelIdx++; break; } else { - ASSERT(0); + pIter->delInfo = (SDelInfo){0}; + goto _exit; } } + } - _out: - pIter = (SFDataIter*)tRBTreeMin(&pReader->rbt); - if (pReader->pIter && pIter) { - int32_t c = tRowInfoCmprFn(&pReader->pIter->rInfo, &pIter->rInfo); - if (c > 0) { - tRBTreePut(&pReader->rbt, (SRBTreeNode*)pReader->pIter); - pReader->pIter = NULL; - } else { - ASSERT(c); - } +_exit: + if (code) { + tsdbError("%s failed at line %d since %s", __func__, lino, tstrerror(code)); + } + return code; +} + +static int32_t tsdbDataIterNext2(STsdbDataIter2* pIter, STsdbFilterInfo* pFilterInfo) { + int32_t code = 0; + + if (pIter->type == TSDB_MEM_TABLE_DATA_ITER) { + ASSERT(0); + return code; + } else if (pIter->type == TSDB_DATA_FILE_DATA_ITER) { + return tsdbDataFileDataIterNext(pIter, pFilterInfo); + } else if (pIter->type == TSDB_STT_FILE_DATA_ITER) { + return tsdbSttFileDataIterNext(pIter, pFilterInfo); + } else if (pIter->type == TSDB_TOMB_FILE_DATA_ITER) { + return tsdbTombFileDataIterNext(pIter, pFilterInfo); + } else { + ASSERT(0); + return code; + } +} + +/* get */ + +// STsdbSnapReader ======================================== +struct STsdbSnapReader { + STsdb* pTsdb; + int64_t sver; + int64_t ever; + int8_t type; + uint8_t* aBuf[5]; + + STsdbFS fs; + TABLEID tbid; + SSkmInfo skmTable; + + // timeseries data + int8_t dataDone; + int32_t fid; + + SDataFReader* pDataFReader; + STsdbDataIter2* iterList; + STsdbDataIter2* pIter; + SRBTree rbt; + SBlockData bData; + + // tombstone data + int8_t delDone; + SDelFReader* pDelFReader; + STsdbDataIter2* pTIter; + SArray* aDelData; +}; + +static int32_t tsdbSnapReadFileDataStart(STsdbSnapReader* pReader) { + int32_t code = 0; + int32_t lino = 0; + + SDFileSet* pSet = taosArraySearch(pReader->fs.aDFileSet, &(SDFileSet){.fid = pReader->fid}, tDFileSetCmprFn, TD_GT); + if (pSet == NULL) { + pReader->fid = INT32_MAX; + goto _exit; + } + + pReader->fid = pSet->fid; + + tRBTreeCreate(&pReader->rbt, tsdbDataIterCmprFn); + + code = tsdbDataFReaderOpen(&pReader->pDataFReader, pReader->pTsdb, pSet); + TSDB_CHECK_CODE(code, lino, _exit); + + code = tsdbOpenDataFileDataIter(pReader->pDataFReader, &pReader->pIter); + TSDB_CHECK_CODE(code, lino, _exit); + + if (pReader->pIter) { + // iter to next with filter info (sver, ever) + code = tsdbDataIterNext2(pReader->pIter, + &(STsdbFilterInfo){.flag = TSDB_FILTER_FLAG_BY_VERSION, // flag + .sver = pReader->sver, + .ever = pReader->ever}); + TSDB_CHECK_CODE(code, lino, _exit); + + if (pReader->pIter->rowInfo.suid || pReader->pIter->rowInfo.uid) { + // add to rbtree + tRBTreePut(&pReader->rbt, &pReader->pIter->rbtn); + + // add to iterList + pReader->pIter->next = pReader->iterList; + pReader->iterList = pReader->pIter; + } else { + tsdbCloseDataIter2(pReader->pIter); } } - if (pReader->pIter == NULL) { - pReader->pIter = (SFDataIter*)tRBTreeMin(&pReader->rbt); + for (int32_t iStt = 0; iStt < pSet->nSttF; ++iStt) { + code = tsdbOpenSttFileDataIter(pReader->pDataFReader, iStt, &pReader->pIter); + TSDB_CHECK_CODE(code, lino, _exit); + if (pReader->pIter) { - tRBTreeDrop(&pReader->rbt, (SRBTreeNode*)pReader->pIter); + // iter to valid row + code = tsdbDataIterNext2(pReader->pIter, + &(STsdbFilterInfo){.flag = TSDB_FILTER_FLAG_BY_VERSION, // flag + .sver = pReader->sver, + .ever = pReader->ever}); + TSDB_CHECK_CODE(code, lino, _exit); + + if (pReader->pIter->rowInfo.suid || pReader->pIter->rowInfo.uid) { + // add to rbtree + tRBTreePut(&pReader->rbt, &pReader->pIter->rbtn); + + // add to iterList + pReader->pIter->next = pReader->iterList; + pReader->iterList = pReader->pIter; + } else { + tsdbCloseDataIter2(pReader->pIter); + } } } - return code; + pReader->pIter = NULL; -_err: +_exit: + if (code) { + tsdbError("vgId:%d %s failed at line %d since %s", TD_VID(pReader->pTsdb->pVnode), __func__, lino, tstrerror(code)); + } else { + tsdbInfo("vgId:%d %s done, fid:%d", TD_VID(pReader->pTsdb->pVnode), __func__, pReader->fid); + } return code; } -static SRowInfo* tsdbSnapGetRow(STsdbSnapReader* pReader) { +static void tsdbSnapReadFileDataEnd(STsdbSnapReader* pReader) { + while (pReader->iterList) { + STsdbDataIter2* pIter = pReader->iterList; + pReader->iterList = pIter->next; + tsdbCloseDataIter2(pIter); + } + + tsdbDataFReaderClose(&pReader->pDataFReader); +} + +static int32_t tsdbSnapReadNextRow(STsdbSnapReader* pReader, SRowInfo** ppRowInfo) { + int32_t code = 0; + int32_t lino = 0; + if (pReader->pIter) { - return &pReader->pIter->rInfo; - } else { - tsdbSnapNextRow(pReader); + code = tsdbDataIterNext2(pReader->pIter, &(STsdbFilterInfo){.flag = TSDB_FILTER_FLAG_BY_VERSION, // flag + .sver = pReader->sver, + .ever = pReader->ever}); + TSDB_CHECK_CODE(code, lino, _exit); + + if (pReader->pIter->rowInfo.suid == 0 && pReader->pIter->rowInfo.uid == 0) { + pReader->pIter = NULL; + } else { + SRBTreeNode* pNode = tRBTreeMin(&pReader->rbt); + if (pNode) { + int32_t c = tsdbDataIterCmprFn(&pReader->pIter->rbtn, pNode); + if (c > 0) { + tRBTreePut(&pReader->rbt, &pReader->pIter->rbtn); + pReader->pIter = NULL; + } else if (c == 0) { + ASSERT(0); + } + } + } + } + if (pReader->pIter == NULL) { + SRBTreeNode* pNode = tRBTreeMin(&pReader->rbt); + if (pNode) { + tRBTreeDrop(&pReader->rbt, pNode); + pReader->pIter = TSDB_RBTN_TO_DATA_ITER(pNode); + } + } + + if (ppRowInfo) { if (pReader->pIter) { - return &pReader->pIter->rInfo; + *ppRowInfo = &pReader->pIter->rowInfo; } else { - return NULL; + *ppRowInfo = NULL; } } + +_exit: + if (code) { + tsdbError("vgId:%d %s failed at line %d since %s", TD_VID(pReader->pTsdb->pVnode), __func__, lino, tstrerror(code)); + } + return code; +} + +static int32_t tsdbSnapReadGetRow(STsdbSnapReader* pReader, SRowInfo** ppRowInfo) { + if (pReader->pIter) { + *ppRowInfo = &pReader->pIter->rowInfo; + return 0; + } + + return tsdbSnapReadNextRow(pReader, ppRowInfo); } static int32_t tsdbSnapCmprData(STsdbSnapReader* pReader, uint8_t** ppData) { @@ -316,155 +672,215 @@ _exit: return code; } -static int32_t tsdbSnapReadData(STsdbSnapReader* pReader, uint8_t** ppData) { +static int32_t tsdbSnapReadTimeSeriesData(STsdbSnapReader* pReader, uint8_t** ppData) { int32_t code = 0; int32_t lino = 0; STsdb* pTsdb = pReader->pTsdb; - while (true) { + tBlockDataReset(&pReader->bData); + + for (;;) { + // start a new file read if need if (pReader->pDataFReader == NULL) { - code = tsdbSnapReadOpenFile(pReader); + code = tsdbSnapReadFileDataStart(pReader); TSDB_CHECK_CODE(code, lino, _exit); } if (pReader->pDataFReader == NULL) break; - SRowInfo* pRowInfo = tsdbSnapGetRow(pReader); + SRowInfo* pRowInfo; + code = tsdbSnapReadGetRow(pReader, &pRowInfo); + TSDB_CHECK_CODE(code, lino, _exit); + if (pRowInfo == NULL) { - tsdbDataFReaderClose(&pReader->pDataFReader); + tsdbSnapReadFileDataEnd(pReader); continue; } - TABLEID id = {.suid = pRowInfo->suid, .uid = pRowInfo->uid}; - SBlockData* pBlockData = &pReader->bData; - - code = tsdbUpdateTableSchema(pTsdb->pVnode->pMeta, id.suid, id.uid, &pReader->skmTable); + code = tsdbUpdateTableSchema(pTsdb->pVnode->pMeta, pRowInfo->suid, pRowInfo->uid, &pReader->skmTable); TSDB_CHECK_CODE(code, lino, _exit); - code = tBlockDataInit(pBlockData, &id, pReader->skmTable.pTSchema, NULL, 0); + code = tBlockDataInit(&pReader->bData, (TABLEID*)pRowInfo, pReader->skmTable.pTSchema, NULL, 0); TSDB_CHECK_CODE(code, lino, _exit); - while (pRowInfo->suid == id.suid && pRowInfo->uid == id.uid) { - code = tBlockDataAppendRow(pBlockData, &pRowInfo->row, NULL, pRowInfo->uid); + do { + if (!TABLE_SAME_SCHEMA(pReader->bData.suid, pReader->bData.uid, pRowInfo->suid, pRowInfo->uid)) break; + + if (pReader->bData.uid && pReader->bData.uid != pRowInfo->uid) { + code = tRealloc((uint8_t**)&pReader->bData.aUid, sizeof(int64_t) * (pReader->bData.nRow + 1)); + TSDB_CHECK_CODE(code, lino, _exit); + + for (int32_t iRow = 0; iRow < pReader->bData.nRow; ++iRow) { + pReader->bData.aUid[iRow] = pReader->bData.uid; + } + pReader->bData.uid = 0; + } + + code = tBlockDataAppendRow(&pReader->bData, &pRowInfo->row, NULL, pRowInfo->uid); TSDB_CHECK_CODE(code, lino, _exit); - code = tsdbSnapNextRow(pReader); + code = tsdbSnapReadNextRow(pReader, &pRowInfo); TSDB_CHECK_CODE(code, lino, _exit); - pRowInfo = tsdbSnapGetRow(pReader); - if (pRowInfo == NULL) { - tsdbDataFReaderClose(&pReader->pDataFReader); - break; - } + if (pReader->bData.nRow >= 4096) break; + } while (pRowInfo); + + ASSERT(pReader->bData.nRow > 0); + + break; + } + + if (pReader->bData.nRow > 0) { + ASSERT(pReader->bData.suid || pReader->bData.uid); + + code = tsdbSnapCmprData(pReader, ppData); + TSDB_CHECK_CODE(code, lino, _exit); + } + +_exit: + if (code) { + tsdbError("vgId:%d %s failed at line %d since %s", TD_VID(pTsdb->pVnode), __func__, lino, tstrerror(code)); + } + return code; +} + +static int32_t tsdbSnapCmprTombData(STsdbSnapReader* pReader, uint8_t** ppData) { + int32_t code = 0; + int32_t lino = 0; + + int64_t size = sizeof(TABLEID); + for (int32_t iDelData = 0; iDelData < taosArrayGetSize(pReader->aDelData); ++iDelData) { + size += tPutDelData(NULL, taosArrayGet(pReader->aDelData, iDelData)); + } + + uint8_t* pData = (uint8_t*)taosMemoryMalloc(sizeof(SSnapDataHdr) + size); + if (pData == NULL) { + code = TSDB_CODE_OUT_OF_MEMORY; + TSDB_CHECK_CODE(code, lino, _exit); + } + + SSnapDataHdr* pHdr = (SSnapDataHdr*)pData; + pHdr->type = SNAP_DATA_DEL; + pHdr->size = size; + + TABLEID* pId = (TABLEID*)(pData + sizeof(SSnapDataHdr)); + *pId = pReader->tbid; + + size = sizeof(SSnapDataHdr) + sizeof(TABLEID); + for (int32_t iDelData = 0; iDelData < taosArrayGetSize(pReader->aDelData); ++iDelData) { + size += tPutDelData(pData + size, taosArrayGet(pReader->aDelData, iDelData)); + } - if (pBlockData->nRow >= 4096) break; +_exit: + if (code) { + tsdbError("vgId:%d %s failed at line %d since %s", TD_VID(pReader->pTsdb->pVnode), __func__, lino, tstrerror(code)); + if (pData) { + taosMemoryFree(pData); + pData = NULL; } + } + *ppData = pData; + return code; +} - code = tsdbSnapCmprData(pReader, ppData); - TSDB_CHECK_CODE(code, lino, _exit); +static void tsdbSnapReadGetTombData(STsdbSnapReader* pReader, SDelInfo** ppDelInfo) { + if (pReader->pTIter == NULL || (pReader->pTIter->delInfo.suid == 0 && pReader->pTIter->delInfo.uid == 0)) { + *ppDelInfo = NULL; + } else { + *ppDelInfo = &pReader->pTIter->delInfo; + } +} - break; +static int32_t tsdbSnapReadNextTombData(STsdbSnapReader* pReader, SDelInfo** ppDelInfo) { + int32_t code = 0; + int32_t lino = 0; + + code = tsdbDataIterNext2( + pReader->pTIter, + &(STsdbFilterInfo){.flag = TSDB_FILTER_FLAG_BY_VERSION, .sver = pReader->sver, .ever = pReader->ever}); + TSDB_CHECK_CODE(code, lino, _exit); + + if (ppDelInfo) { + tsdbSnapReadGetTombData(pReader, ppDelInfo); } _exit: if (code) { - tsdbError("vgId:%d, %s failed since %s, path:%s", TD_VID(pTsdb->pVnode), __func__, tstrerror(code), pTsdb->path); + tsdbError("vgId:%d %s failed at line %d since %s", TD_VID(pReader->pTsdb->pVnode), __func__, lino, tstrerror(code)); } return code; } -static int32_t tsdbSnapReadDel(STsdbSnapReader* pReader, uint8_t** ppData) { +static int32_t tsdbSnapReadTombData(STsdbSnapReader* pReader, uint8_t** ppData) { int32_t code = 0; int32_t lino = 0; - STsdb* pTsdb = pReader->pTsdb; - SDelFile* pDelFile = pReader->fs.pDelFile; + STsdb* pTsdb = pReader->pTsdb; + // open tombstone data iter if need if (pReader->pDelFReader == NULL) { - if (pDelFile == NULL) { - goto _exit; - } + if (pReader->fs.pDelFile == NULL) goto _exit; // open - code = tsdbDelFReaderOpen(&pReader->pDelFReader, pDelFile, pTsdb); + code = tsdbDelFReaderOpen(&pReader->pDelFReader, pReader->fs.pDelFile, pTsdb); TSDB_CHECK_CODE(code, lino, _exit); - // read index - code = tsdbReadDelIdx(pReader->pDelFReader, pReader->aDelIdx); + code = tsdbOpenTombFileDataIter(pReader->pDelFReader, &pReader->pTIter); TSDB_CHECK_CODE(code, lino, _exit); - pReader->iDelIdx = 0; + if (pReader->pTIter) { + code = tsdbSnapReadNextTombData(pReader, NULL); + TSDB_CHECK_CODE(code, lino, _exit); + } } - while (true) { - if (pReader->iDelIdx >= taosArrayGetSize(pReader->aDelIdx)) { - tsdbDelFReaderClose(&pReader->pDelFReader); - break; - } + // loop to get tombstone data + SDelInfo* pDelInfo; + tsdbSnapReadGetTombData(pReader, &pDelInfo); - SDelIdx* pDelIdx = (SDelIdx*)taosArrayGet(pReader->aDelIdx, pReader->iDelIdx); + if (pDelInfo == NULL) goto _exit; - pReader->iDelIdx++; + pReader->tbid = *(TABLEID*)pDelInfo; - code = tsdbReadDelData(pReader->pDelFReader, pDelIdx, pReader->aDelData); + if (pReader->aDelData) { + taosArrayClear(pReader->aDelData); + } else if ((pReader->aDelData = taosArrayInit(16, sizeof(SDelData))) == NULL) { + code = TSDB_CODE_OUT_OF_MEMORY; TSDB_CHECK_CODE(code, lino, _exit); + } - int32_t size = 0; - for (int32_t iDelData = 0; iDelData < taosArrayGetSize(pReader->aDelData); iDelData++) { - SDelData* pDelData = (SDelData*)taosArrayGet(pReader->aDelData, iDelData); - - if (pDelData->version >= pReader->sver && pDelData->version <= pReader->ever) { - size += tPutDelData(NULL, pDelData); - } - } - if (size == 0) continue; - - // org data - size = sizeof(TABLEID) + size; - *ppData = taosMemoryMalloc(sizeof(SSnapDataHdr) + size); - if (*ppData == NULL) { + while (pDelInfo && pDelInfo->suid == pReader->tbid.suid && pDelInfo->uid == pReader->tbid.uid) { + if (taosArrayPush(pReader->aDelData, &pDelInfo->delData) < 0) { code = TSDB_CODE_OUT_OF_MEMORY; TSDB_CHECK_CODE(code, lino, _exit); } - SSnapDataHdr* pHdr = (SSnapDataHdr*)(*ppData); - pHdr->type = SNAP_DATA_DEL; - pHdr->size = size; - - TABLEID* pId = (TABLEID*)(&pHdr[1]); - pId->suid = pDelIdx->suid; - pId->uid = pDelIdx->uid; - int32_t n = sizeof(SSnapDataHdr) + sizeof(TABLEID); - for (int32_t iDelData = 0; iDelData < taosArrayGetSize(pReader->aDelData); iDelData++) { - SDelData* pDelData = (SDelData*)taosArrayGet(pReader->aDelData, iDelData); - - if (pDelData->version < pReader->sver) continue; - if (pDelData->version > pReader->ever) continue; - - n += tPutDelData((*ppData) + n, pDelData); - } - - tsdbInfo("vgId:%d, vnode snapshot tsdb read del data for %s, suid:%" PRId64 " uid:%" PRId64 " size:%d", - TD_VID(pTsdb->pVnode), pTsdb->path, pDelIdx->suid, pDelIdx->uid, size); + code = tsdbSnapReadNextTombData(pReader, &pDelInfo); + TSDB_CHECK_CODE(code, lino, _exit); + } - break; + // encode tombstone data + if (taosArrayGetSize(pReader->aDelData) > 0) { + code = tsdbSnapCmprTombData(pReader, ppData); + TSDB_CHECK_CODE(code, lino, _exit); } _exit: if (code) { - tsdbError("vgId:%d, %s failed since %s, path:%s", TD_VID(pTsdb->pVnode), __func__, tstrerror(code), pTsdb->path); + tsdbError("vgId:%d %s failed at line %d since %s", TD_VID(pTsdb->pVnode), __func__, lino, tstrerror(code)); + } else { + tsdbDebug("vgId:%d %s done", TD_VID(pTsdb->pVnode), __func__); } return code; } int32_t tsdbSnapReaderOpen(STsdb* pTsdb, int64_t sver, int64_t ever, int8_t type, STsdbSnapReader** ppReader) { - int32_t code = 0; - int32_t lino = 0; - STsdbSnapReader* pReader = NULL; + int32_t code = 0; + int32_t lino = 0; // alloc - pReader = (STsdbSnapReader*)taosMemoryCalloc(1, sizeof(*pReader)); + STsdbSnapReader* pReader = (STsdbSnapReader*)taosMemoryCalloc(1, sizeof(*pReader)); if (pReader == NULL) { code = TSDB_CODE_OUT_OF_MEMORY; TSDB_CHECK_CODE(code, lino, _exit); @@ -474,118 +890,80 @@ int32_t tsdbSnapReaderOpen(STsdb* pTsdb, int64_t sver, int64_t ever, int8_t type pReader->ever = ever; pReader->type = type; - code = taosThreadRwlockRdlock(&pTsdb->rwLock); - if (code) { - code = TAOS_SYSTEM_ERROR(code); - TSDB_CHECK_CODE(code, lino, _exit); - } - + taosThreadRwlockRdlock(&pTsdb->rwLock); code = tsdbFSRef(pTsdb, &pReader->fs); if (code) { taosThreadRwlockUnlock(&pTsdb->rwLock); TSDB_CHECK_CODE(code, lino, _exit); } + taosThreadRwlockUnlock(&pTsdb->rwLock); - code = taosThreadRwlockUnlock(&pTsdb->rwLock); - if (code) { - code = TAOS_SYSTEM_ERROR(code); - TSDB_CHECK_CODE(code, lino, _exit); - } - - // data + // init pReader->fid = INT32_MIN; - for (int32_t iIter = 0; iIter < sizeof(pReader->aFDataIter) / sizeof(pReader->aFDataIter[0]); iIter++) { - SFDataIter* pIter = &pReader->aFDataIter[iIter]; - - if (iIter == 0) { - pIter->aBlockIdx = taosArrayInit(0, sizeof(SBlockIdx)); - if (pIter->aBlockIdx == NULL) { - code = TSDB_CODE_OUT_OF_MEMORY; - TSDB_CHECK_CODE(code, lino, _exit); - } - } else { - pIter->aSttBlk = taosArrayInit(0, sizeof(SSttBlk)); - if (pIter->aSttBlk == NULL) { - code = TSDB_CODE_OUT_OF_MEMORY; - TSDB_CHECK_CODE(code, lino, _exit); - } - } - - code = tBlockDataCreate(&pIter->bData); - TSDB_CHECK_CODE(code, lino, _exit); - } code = tBlockDataCreate(&pReader->bData); TSDB_CHECK_CODE(code, lino, _exit); - // del - pReader->aDelIdx = taosArrayInit(0, sizeof(SDelIdx)); - if (pReader->aDelIdx == NULL) { - code = TSDB_CODE_OUT_OF_MEMORY; - TSDB_CHECK_CODE(code, lino, _exit); - } - pReader->aDelData = taosArrayInit(0, sizeof(SDelData)); - if (pReader->aDelData == NULL) { - code = TSDB_CODE_OUT_OF_MEMORY; - TSDB_CHECK_CODE(code, lino, _exit); - } - _exit: if (code) { - tsdbError("vgId:%d, %s failed at line %d since %s, TSDB path: %s", TD_VID(pTsdb->pVnode), __func__, lino, - tstrerror(code), pTsdb->path); - *ppReader = NULL; - + tsdbError("vgId:%d %s failed at line %d since %s, sver:%" PRId64 " ever:%" PRId64 " type:%d", TD_VID(pTsdb->pVnode), + __func__, lino, tstrerror(code), sver, ever, type); if (pReader) { - taosArrayDestroy(pReader->aDelData); - taosArrayDestroy(pReader->aDelIdx); tBlockDataDestroy(&pReader->bData); - tsdbFSDestroy(&pReader->fs); + tsdbFSUnref(pTsdb, &pReader->fs); taosMemoryFree(pReader); + pReader = NULL; } } else { - *ppReader = pReader; - tsdbInfo("vgId:%d, vnode snapshot tsdb reader opened for %s", TD_VID(pTsdb->pVnode), pTsdb->path); + tsdbInfo("vgId:%d %s done, sver:%" PRId64 " ever:%" PRId64 " type:%d", TD_VID(pTsdb->pVnode), __func__, sver, ever, + type); } + *ppReader = pReader; return code; } int32_t tsdbSnapReaderClose(STsdbSnapReader** ppReader) { - int32_t code = 0; - STsdbSnapReader* pReader = *ppReader; - - // data - if (pReader->pDataFReader) tsdbDataFReaderClose(&pReader->pDataFReader); - for (int32_t iIter = 0; iIter < sizeof(pReader->aFDataIter) / sizeof(pReader->aFDataIter[0]); iIter++) { - SFDataIter* pIter = &pReader->aFDataIter[iIter]; + int32_t code = 0; + int32_t lino = 0; - if (iIter == 0) { - taosArrayDestroy(pIter->aBlockIdx); - tMapDataClear(&pIter->mBlock); - } else { - taosArrayDestroy(pIter->aSttBlk); - } + STsdbSnapReader* pReader = *ppReader; + STsdb* pTsdb = pReader->pTsdb; - tBlockDataDestroy(&pIter->bData); + // tombstone + if (pReader->pTIter) { + tsdbCloseDataIter2(pReader->pTIter); + pReader->pTIter = NULL; + } + if (pReader->pDelFReader) { + tsdbDelFReaderClose(&pReader->pDelFReader); } + taosArrayDestroy(pReader->aDelData); + // timeseries + while (pReader->iterList) { + STsdbDataIter2* pIter = pReader->iterList; + pReader->iterList = pIter->next; + tsdbCloseDataIter2(pIter); + } + if (pReader->pDataFReader) { + tsdbDataFReaderClose(&pReader->pDataFReader); + } tBlockDataDestroy(&pReader->bData); - tDestroyTSchema(pReader->skmTable.pTSchema); - - // del - if (pReader->pDelFReader) tsdbDelFReaderClose(&pReader->pDelFReader); - taosArrayDestroy(pReader->aDelIdx); - taosArrayDestroy(pReader->aDelData); + // other + tDestroyTSchema(pReader->skmTable.pTSchema); tsdbFSUnref(pReader->pTsdb, &pReader->fs); - - tsdbInfo("vgId:%d, vnode snapshot tsdb reader closed for %s", TD_VID(pReader->pTsdb->pVnode), pReader->pTsdb->path); - for (int32_t iBuf = 0; iBuf < sizeof(pReader->aBuf) / sizeof(pReader->aBuf[0]); iBuf++) { tFree(pReader->aBuf[iBuf]); } - taosMemoryFree(pReader); + +_exit: + if (code) { + tsdbError("vgId:%d %s failed at line %d since %s", TD_VID(pTsdb->pVnode), __func__, lino, tstrerror(code)); + } else { + tsdbDebug("vgId:%d %s done", TD_VID(pTsdb->pVnode), __func__); + } *ppReader = NULL; return code; } @@ -598,7 +976,7 @@ int32_t tsdbSnapRead(STsdbSnapReader* pReader, uint8_t** ppData) { // read data file if (!pReader->dataDone) { - code = tsdbSnapReadData(pReader, ppData); + code = tsdbSnapReadTimeSeriesData(pReader, ppData); TSDB_CHECK_CODE(code, lino, _exit); if (*ppData) { goto _exit; @@ -609,7 +987,7 @@ int32_t tsdbSnapRead(STsdbSnapReader* pReader, uint8_t** ppData) { // read del file if (!pReader->delDone) { - code = tsdbSnapReadDel(pReader, ppData); + code = tsdbSnapReadTombData(pReader, ppData); TSDB_CHECK_CODE(code, lino, _exit); if (*ppData) { goto _exit; @@ -620,22 +998,18 @@ int32_t tsdbSnapRead(STsdbSnapReader* pReader, uint8_t** ppData) { _exit: if (code) { - tsdbError("vgId:%d, %s failed since %s, path:%s", TD_VID(pReader->pTsdb->pVnode), __func__, tstrerror(code), - pReader->pTsdb->path); + tsdbError("vgId:%d %s failed at line %d since %s", TD_VID(pReader->pTsdb->pVnode), __func__, lino, tstrerror(code)); } else { - tsdbDebug("vgId:%d, %s done, path:%s", TD_VID(pReader->pTsdb->pVnode), __func__, pReader->pTsdb->path); + tsdbDebug("vgId:%d %s done", TD_VID(pReader->pTsdb->pVnode), __func__); } return code; } // STsdbSnapWriter ======================================== struct STsdbSnapWriter { - STsdb* pTsdb; - int64_t sver; - int64_t ever; - STsdbFS fs; - - // config + STsdb* pTsdb; + int64_t sver; + int64_t ever; int32_t minutes; int8_t precision; int32_t minRow; @@ -644,641 +1018,816 @@ struct STsdbSnapWriter { int64_t commitID; uint8_t* aBuf[5]; - // for data file - SBlockData bData; - int32_t fid; - TABLEID id; - SSkmInfo skmTable; - struct { - SDataFReader* pReader; - SArray* aBlockIdx; - int32_t iBlockIdx; - SBlockIdx* pBlockIdx; - SMapData mDataBlk; - int32_t iDataBlk; - SBlockData bData; - int32_t iRow; - } dReader; - struct { - SDataFWriter* pWriter; - SArray* aBlockIdx; - SMapData mDataBlk; - SArray* aSttBlk; - SBlockData bData; - SBlockData sData; - } dWriter; - - // for del file - SDelFReader* pDelFReader; + STsdbFS fs; + TABLEID tbid; + + // time-series data + SBlockData inData; + + int32_t fid; + SSkmInfo skmTable; + + /* reader */ + SDataFReader* pDataFReader; + STsdbDataIter2* iterList; + STsdbDataIter2* pDIter; + STsdbDataIter2* pSIter; + SRBTree rbt; // SRBTree + + /* writer */ + SDataFWriter* pDataFWriter; + SArray* aBlockIdx; + SMapData mDataBlk; // SMapData + SArray* aSttBlk; // SArray + SBlockData bData; + SBlockData sData; + + // tombstone data + /* reader */ + SDelFReader* pDelFReader; + STsdbDataIter2* pTIter; + + /* writer */ SDelFWriter* pDelFWriter; - int32_t iDelIdx; - SArray* aDelIdxR; + SArray* aDelIdx; SArray* aDelData; - SArray* aDelIdxW; }; // SNAP_DATA_TSDB -extern int32_t tsdbWriteDataBlock(SDataFWriter* pWriter, SBlockData* pBlockData, SMapData* mDataBlk, int8_t cmprAlg); -extern int32_t tsdbWriteSttBlock(SDataFWriter* pWriter, SBlockData* pBlockData, SArray* aSttBlk, int8_t cmprAlg); - -static int32_t tsdbSnapNextTableData(STsdbSnapWriter* pWriter) { +static int32_t tsdbSnapWriteTableDataStart(STsdbSnapWriter* pWriter, TABLEID* pId) { int32_t code = 0; + int32_t lino = 0; + + if (pId) { + pWriter->tbid = *pId; + } else { + pWriter->tbid = (TABLEID){INT64_MAX, INT64_MAX}; + } + + if (pWriter->pDIter) { + STsdbDataIter2* pIter = pWriter->pDIter; + + // assert last table data end + ASSERT(pIter->dIter.iRow >= pIter->dIter.bData.nRow); + ASSERT(pIter->dIter.iDataBlk >= pIter->dIter.mDataBlk.nItem); + + for (;;) { + if (pIter->dIter.iBlockIdx >= taosArrayGetSize(pIter->dIter.aBlockIdx)) { + pWriter->pDIter = NULL; + break; + } + + SBlockIdx* pBlockIdx = (SBlockIdx*)taosArrayGet(pIter->dIter.aBlockIdx, pIter->dIter.iBlockIdx); + + int32_t c = tTABLEIDCmprFn(pBlockIdx, &pWriter->tbid); + if (c < 0) { + code = tsdbReadDataBlk(pIter->dIter.pReader, pBlockIdx, &pIter->dIter.mDataBlk); + TSDB_CHECK_CODE(code, lino, _exit); + + SBlockIdx* pNewBlockIdx = taosArrayReserve(pWriter->aBlockIdx, 1); + if (pNewBlockIdx == NULL) { + code = TSDB_CODE_OUT_OF_MEMORY; + TSDB_CHECK_CODE(code, lino, _exit); + } + + pNewBlockIdx->suid = pBlockIdx->suid; + pNewBlockIdx->uid = pBlockIdx->uid; + + code = tsdbWriteDataBlk(pWriter->pDataFWriter, &pIter->dIter.mDataBlk, pNewBlockIdx); + TSDB_CHECK_CODE(code, lino, _exit); + + pIter->dIter.iBlockIdx++; + } else if (c == 0) { + code = tsdbReadDataBlk(pIter->dIter.pReader, pBlockIdx, &pIter->dIter.mDataBlk); + TSDB_CHECK_CODE(code, lino, _exit); + + pIter->dIter.iDataBlk = 0; + pIter->dIter.iBlockIdx++; + + break; + } else { + pIter->dIter.iDataBlk = pIter->dIter.mDataBlk.nItem; + break; + } + } + } + + if (pId) { + code = tsdbUpdateTableSchema(pWriter->pTsdb->pVnode->pMeta, pId->suid, pId->uid, &pWriter->skmTable); + TSDB_CHECK_CODE(code, lino, _exit); + + tMapDataReset(&pWriter->mDataBlk); - ASSERT(pWriter->dReader.iRow >= pWriter->dReader.bData.nRow); + code = tBlockDataInit(&pWriter->bData, pId, pWriter->skmTable.pTSchema, NULL, 0); + TSDB_CHECK_CODE(code, lino, _exit); + } - if (pWriter->dReader.iBlockIdx < taosArrayGetSize(pWriter->dReader.aBlockIdx)) { - pWriter->dReader.pBlockIdx = (SBlockIdx*)taosArrayGet(pWriter->dReader.aBlockIdx, pWriter->dReader.iBlockIdx); + if (!TABLE_SAME_SCHEMA(pWriter->tbid.suid, pWriter->tbid.uid, pWriter->sData.suid, pWriter->sData.uid)) { + if ((pWriter->sData.nRow > 0)) { + code = tsdbWriteSttBlock(pWriter->pDataFWriter, &pWriter->sData, pWriter->aSttBlk, pWriter->cmprAlg); + TSDB_CHECK_CODE(code, lino, _exit); + } - code = tsdbReadDataBlk(pWriter->dReader.pReader, pWriter->dReader.pBlockIdx, &pWriter->dReader.mDataBlk); - if (code) goto _exit; + if (pId) { + TABLEID id = {.suid = pWriter->tbid.suid, .uid = pWriter->tbid.suid ? 0 : pWriter->tbid.uid}; + code = tBlockDataInit(&pWriter->sData, &id, pWriter->skmTable.pTSchema, NULL, 0); + TSDB_CHECK_CODE(code, lino, _exit); + } + } - pWriter->dReader.iBlockIdx++; +_exit: + if (code) { + tsdbError("vgId:%d %s failed at line %d since %s", TD_VID(pWriter->pTsdb->pVnode), __func__, lino, tstrerror(code)); } else { - pWriter->dReader.pBlockIdx = NULL; - tMapDataReset(&pWriter->dReader.mDataBlk); + tsdbTrace("vgId:%d %s done, suid:%" PRId64 " uid:%" PRId64, TD_VID(pWriter->pTsdb->pVnode), __func__, + pWriter->tbid.suid, pWriter->tbid.uid); + } + return code; +} + +static int32_t tsdbSnapWriteTableRowImpl(STsdbSnapWriter* pWriter, TSDBROW* pRow) { + int32_t code = 0; + int32_t lino = 0; + + code = tBlockDataAppendRow(&pWriter->bData, pRow, pWriter->skmTable.pTSchema, pWriter->tbid.uid); + TSDB_CHECK_CODE(code, lino, _exit); + + if (pWriter->bData.nRow >= pWriter->maxRow) { + code = tsdbWriteDataBlock(pWriter->pDataFWriter, &pWriter->bData, &pWriter->mDataBlk, pWriter->cmprAlg); + TSDB_CHECK_CODE(code, lino, _exit); } - pWriter->dReader.iDataBlk = 0; // point to the next one - tBlockDataReset(&pWriter->dReader.bData); - pWriter->dReader.iRow = 0; _exit: + if (code) { + tsdbError("vgId:%d %s failed at line %d since %s", TD_VID(pWriter->pTsdb->pVnode), __func__, lino, tstrerror(code)); + } return code; } -static int32_t tsdbSnapWriteCopyData(STsdbSnapWriter* pWriter, TABLEID* pId) { +static int32_t tsdbSnapWriteTableRow(STsdbSnapWriter* pWriter, TSDBROW* pRow) { int32_t code = 0; + int32_t lino = 0; - while (true) { - if (pWriter->dReader.pBlockIdx == NULL) break; - if (tTABLEIDCmprFn(pWriter->dReader.pBlockIdx, pId) >= 0) break; + TSDBKEY inKey = pRow ? TSDBROW_KEY(pRow) : TSDBKEY_MAX; + + if (pWriter->pDIter == NULL || (pWriter->pDIter->dIter.iRow >= pWriter->pDIter->dIter.bData.nRow && + pWriter->pDIter->dIter.iDataBlk >= pWriter->pDIter->dIter.mDataBlk.nItem)) { + goto _write_row; + } else { + for (;;) { + while (pWriter->pDIter->dIter.iRow < pWriter->pDIter->dIter.bData.nRow) { + TSDBROW row = tsdbRowFromBlockData(&pWriter->pDIter->dIter.bData, pWriter->pDIter->dIter.iRow); + + int32_t c = tsdbKeyCmprFn(&inKey, &TSDBROW_KEY(&row)); + if (c < 0) { + goto _write_row; + } else if (c > 0) { + code = tsdbSnapWriteTableRowImpl(pWriter, &row); + TSDB_CHECK_CODE(code, lino, _exit); + + pWriter->pDIter->dIter.iRow++; + } else { + ASSERT(0); + } + } - SBlockIdx blkIdx = *pWriter->dReader.pBlockIdx; - code = tsdbWriteDataBlk(pWriter->dWriter.pWriter, &pWriter->dReader.mDataBlk, &blkIdx); - if (code) goto _exit; + for (;;) { + if (pWriter->pDIter->dIter.iDataBlk >= pWriter->pDIter->dIter.mDataBlk.nItem) goto _write_row; - if (taosArrayPush(pWriter->dWriter.aBlockIdx, &blkIdx) == NULL) { - code = TSDB_CODE_OUT_OF_MEMORY; - goto _exit; + // FIXME: Here can be slow, use array instead + SDataBlk dataBlk; + tMapDataGetItemByIdx(&pWriter->pDIter->dIter.mDataBlk, pWriter->pDIter->dIter.iDataBlk, &dataBlk, tGetDataBlk); + + int32_t c = tDataBlkCmprFn(&dataBlk, &(SDataBlk){.minKey = inKey, .maxKey = inKey}); + if (c > 0) { + goto _write_row; + } else if (c < 0) { + if (pWriter->bData.nRow > 0) { + code = tsdbWriteDataBlock(pWriter->pDataFWriter, &pWriter->bData, &pWriter->mDataBlk, pWriter->cmprAlg); + TSDB_CHECK_CODE(code, lino, _exit); + } + + tMapDataPutItem(&pWriter->pDIter->dIter.mDataBlk, &dataBlk, tPutDataBlk); + pWriter->pDIter->dIter.iDataBlk++; + } else { + code = tsdbReadDataBlockEx(pWriter->pDataFReader, &dataBlk, &pWriter->pDIter->dIter.bData); + TSDB_CHECK_CODE(code, lino, _exit); + + pWriter->pDIter->dIter.iRow = 0; + pWriter->pDIter->dIter.iDataBlk++; + break; + } + } } + } - code = tsdbSnapNextTableData(pWriter); - if (code) goto _exit; +_write_row: + if (pRow) { + code = tsdbSnapWriteTableRowImpl(pWriter, pRow); + TSDB_CHECK_CODE(code, lino, _exit); } _exit: + if (code) { + tsdbError("vgId:%d %s failed at line %d since %s", TD_VID(pWriter->pTsdb->pVnode), __func__, lino, tstrerror(code)); + } return code; } -static int32_t tsdbSnapWriteTableDataStart(STsdbSnapWriter* pWriter, TABLEID* pId) { +static int32_t tsdbSnapWriteTableDataEnd(STsdbSnapWriter* pWriter) { int32_t code = 0; + int32_t lino = 0; + + // write a NULL row to end current table data write + code = tsdbSnapWriteTableRow(pWriter, NULL); + TSDB_CHECK_CODE(code, lino, _exit); + + if (pWriter->bData.nRow > 0) { + if (pWriter->bData.nRow < pWriter->minRow) { + ASSERT(TABLE_SAME_SCHEMA(pWriter->sData.suid, pWriter->sData.uid, pWriter->tbid.suid, pWriter->tbid.uid)); + for (int32_t iRow = 0; iRow < pWriter->bData.nRow; iRow++) { + code = + tBlockDataAppendRow(&pWriter->sData, &tsdbRowFromBlockData(&pWriter->bData, iRow), NULL, pWriter->tbid.uid); + TSDB_CHECK_CODE(code, lino, _exit); - code = tsdbSnapWriteCopyData(pWriter, pId); - if (code) goto _err; + if (pWriter->sData.nRow >= pWriter->maxRow) { + code = tsdbWriteSttBlock(pWriter->pDataFWriter, &pWriter->sData, pWriter->aSttBlk, pWriter->cmprAlg); + TSDB_CHECK_CODE(code, lino, _exit); + } + } - pWriter->id.suid = pId->suid; - pWriter->id.uid = pId->uid; + tBlockDataClear(&pWriter->bData); + } else { + code = tsdbWriteDataBlock(pWriter->pDataFWriter, &pWriter->bData, &pWriter->mDataBlk, pWriter->cmprAlg); + TSDB_CHECK_CODE(code, lino, _exit); + } + } - code = tsdbUpdateTableSchema(pWriter->pTsdb->pVnode->pMeta, pId->suid, pId->uid, &pWriter->skmTable); - if (code) goto _err; + if (pWriter->mDataBlk.nItem) { + SBlockIdx* pBlockIdx = taosArrayReserve(pWriter->aBlockIdx, 1); + if (pBlockIdx == NULL) { + code = TSDB_CODE_OUT_OF_MEMORY; + TSDB_CHECK_CODE(code, lino, _exit); + } - tMapDataReset(&pWriter->dWriter.mDataBlk); - code = tBlockDataInit(&pWriter->dWriter.bData, pId, pWriter->skmTable.pTSchema, NULL, 0); - if (code) goto _err; + pBlockIdx->suid = pWriter->tbid.suid; + pBlockIdx->uid = pWriter->tbid.uid; - return code; + code = tsdbWriteDataBlk(pWriter->pDataFWriter, &pWriter->mDataBlk, pBlockIdx); + TSDB_CHECK_CODE(code, lino, _exit); + } -_err: - tsdbError("vgId:%d, %s failed since %s", TD_VID(pWriter->pTsdb->pVnode), __func__, tstrerror(code)); +_exit: + if (code) { + tsdbError("vgId:%d %s failed at line %d since %s", TD_VID(pWriter->pTsdb->pVnode), __func__, lino, tstrerror(code)); + } return code; } -static int32_t tsdbSnapWriteTableDataEnd(STsdbSnapWriter* pWriter) { +static int32_t tsdbSnapWriteFileDataStart(STsdbSnapWriter* pWriter, int32_t fid) { int32_t code = 0; + int32_t lino = 0; - if (pWriter->id.suid == 0 && pWriter->id.uid == 0) return code; + ASSERT(pWriter->pDataFWriter == NULL && pWriter->fid < fid); - int32_t c = 1; - if (pWriter->dReader.pBlockIdx) { - c = tTABLEIDCmprFn(pWriter->dReader.pBlockIdx, &pWriter->id); - ASSERT(c >= 0); - } + STsdb* pTsdb = pWriter->pTsdb; + + pWriter->fid = fid; + pWriter->tbid = (TABLEID){0}; + SDFileSet* pSet = taosArraySearch(pWriter->fs.aDFileSet, &(SDFileSet){.fid = fid}, tDFileSetCmprFn, TD_EQ); + + // open reader + pWriter->pDataFReader = NULL; + pWriter->iterList = NULL; + pWriter->pDIter = NULL; + pWriter->pSIter = NULL; + tRBTreeCreate(&pWriter->rbt, tsdbDataIterCmprFn); + if (pSet) { + code = tsdbDataFReaderOpen(&pWriter->pDataFReader, pTsdb, pSet); + TSDB_CHECK_CODE(code, lino, _exit); + + code = tsdbOpenDataFileDataIter(pWriter->pDataFReader, &pWriter->pDIter); + TSDB_CHECK_CODE(code, lino, _exit); + if (pWriter->pDIter) { + pWriter->pDIter->next = pWriter->iterList; + pWriter->iterList = pWriter->pDIter; + } - if (c == 0) { - SBlockData* pBData = &pWriter->dWriter.bData; + for (int32_t iStt = 0; iStt < pSet->nSttF; iStt++) { + code = tsdbOpenSttFileDataIter(pWriter->pDataFReader, iStt, &pWriter->pSIter); + TSDB_CHECK_CODE(code, lino, _exit); - for (; pWriter->dReader.iRow < pWriter->dReader.bData.nRow; pWriter->dReader.iRow++) { - TSDBROW row = tsdbRowFromBlockData(&pWriter->dReader.bData, pWriter->dReader.iRow); + if (pWriter->pSIter) { + code = tsdbSttFileDataIterNext(pWriter->pSIter, NULL); + TSDB_CHECK_CODE(code, lino, _exit); - code = tBlockDataAppendRow(pBData, &row, NULL, pWriter->id.uid); - if (code) goto _err; + // add to tree + tRBTreePut(&pWriter->rbt, &pWriter->pSIter->rbtn); - if (pBData->nRow >= pWriter->maxRow) { - code = tsdbWriteDataBlock(pWriter->dWriter.pWriter, pBData, &pWriter->dWriter.mDataBlk, pWriter->cmprAlg); - if (code) goto _err; + // add to list + pWriter->pSIter->next = pWriter->iterList; + pWriter->iterList = pWriter->pSIter; } } - code = tsdbWriteDataBlock(pWriter->dWriter.pWriter, pBData, &pWriter->dWriter.mDataBlk, pWriter->cmprAlg); - if (code) goto _err; + pWriter->pSIter = NULL; + } + + // open writer + SDiskID diskId; + if (pSet) { + diskId = pSet->diskId; + } else { + tfsAllocDisk(pTsdb->pVnode->pTfs, 0 /*TODO*/, &diskId); + tfsMkdirRecurAt(pTsdb->pVnode->pTfs, pTsdb->path, diskId); + } + SDFileSet wSet = {.diskId = diskId, + .fid = fid, + .pHeadF = &(SHeadFile){.commitID = pWriter->commitID}, + .pDataF = (pSet) ? pSet->pDataF : &(SDataFile){.commitID = pWriter->commitID}, + .pSmaF = (pSet) ? pSet->pSmaF : &(SSmaFile){.commitID = pWriter->commitID}, + .nSttF = 1, + .aSttF = {&(SSttFile){.commitID = pWriter->commitID}}}; + code = tsdbDataFWriterOpen(&pWriter->pDataFWriter, pTsdb, &wSet); + TSDB_CHECK_CODE(code, lino, _exit); - for (; pWriter->dReader.iDataBlk < pWriter->dReader.mDataBlk.nItem; pWriter->dReader.iDataBlk++) { - SDataBlk dataBlk; - tMapDataGetItemByIdx(&pWriter->dReader.mDataBlk, pWriter->dReader.iDataBlk, &dataBlk, tGetDataBlk); + if (pWriter->aBlockIdx) { + taosArrayClear(pWriter->aBlockIdx); + } else if ((pWriter->aBlockIdx = taosArrayInit(0, sizeof(SBlockIdx))) == NULL) { + code = TSDB_CODE_OUT_OF_MEMORY; + TSDB_CHECK_CODE(code, lino, _exit); + } - code = tMapDataPutItem(&pWriter->dWriter.mDataBlk, &dataBlk, tPutDataBlk); - if (code) goto _err; - } + tMapDataReset(&pWriter->mDataBlk); - code = tsdbSnapNextTableData(pWriter); - if (code) goto _err; + if (pWriter->aSttBlk) { + taosArrayClear(pWriter->aSttBlk); + } else if ((pWriter->aSttBlk = taosArrayInit(0, sizeof(SSttBlk))) == NULL) { + code = TSDB_CODE_OUT_OF_MEMORY; + TSDB_CHECK_CODE(code, lino, _exit); } - if (pWriter->dWriter.mDataBlk.nItem) { - SBlockIdx blockIdx = {.suid = pWriter->id.suid, .uid = pWriter->id.uid}; - code = tsdbWriteDataBlk(pWriter->dWriter.pWriter, &pWriter->dWriter.mDataBlk, &blockIdx); + tBlockDataReset(&pWriter->bData); + tBlockDataReset(&pWriter->sData); - if (taosArrayPush(pWriter->dWriter.aBlockIdx, &blockIdx) == NULL) { - code = TSDB_CODE_OUT_OF_MEMORY; - goto _err; +_exit: + if (code) { + tsdbError("vgId:%d %s failed at line %d since %s, fid:%d", TD_VID(pTsdb->pVnode), __func__, lino, tstrerror(code), + fid); + } else { + tsdbDebug("vgId:%d %s done, fid:%d", TD_VID(pTsdb->pVnode), __func__, fid); + } + return code; +} + +static int32_t tsdbSnapWriteTableData(STsdbSnapWriter* pWriter, SRowInfo* pRowInfo) { + int32_t code = 0; + int32_t lino = 0; + + // switch to new table if need + if (pRowInfo == NULL || pRowInfo->uid != pWriter->tbid.uid) { + if (pWriter->tbid.uid) { + code = tsdbSnapWriteTableDataEnd(pWriter); + TSDB_CHECK_CODE(code, lino, _exit); } + + code = tsdbSnapWriteTableDataStart(pWriter, (TABLEID*)pRowInfo); + TSDB_CHECK_CODE(code, lino, _exit); } - pWriter->id.suid = 0; - pWriter->id.uid = 0; + if (pRowInfo == NULL) goto _exit; - return code; + code = tsdbSnapWriteTableRow(pWriter, &pRowInfo->row); + TSDB_CHECK_CODE(code, lino, _exit); -_err: +_exit: + if (code) { + tsdbError("vgId:%d %s failed at line %d since %s", TD_VID(pWriter->pTsdb->pVnode), __func__, lino, tstrerror(code)); + } return code; } -static int32_t tsdbSnapWriteOpenFile(STsdbSnapWriter* pWriter, int32_t fid) { +static int32_t tsdbSnapWriteNextRow(STsdbSnapWriter* pWriter, SRowInfo** ppRowInfo) { int32_t code = 0; - STsdb* pTsdb = pWriter->pTsdb; - - ASSERT(pWriter->dWriter.pWriter == NULL); + int32_t lino = 0; - pWriter->fid = fid; - pWriter->id = (TABLEID){0}; - SDFileSet* pSet = taosArraySearch(pWriter->fs.aDFileSet, &(SDFileSet){.fid = fid}, tDFileSetCmprFn, TD_EQ); + if (pWriter->pSIter) { + code = tsdbDataIterNext2(pWriter->pSIter, NULL); + TSDB_CHECK_CODE(code, lino, _exit); - // Reader - if (pSet) { - code = tsdbDataFReaderOpen(&pWriter->dReader.pReader, pWriter->pTsdb, pSet); - if (code) goto _err; + if (pWriter->pSIter->rowInfo.suid == 0 && pWriter->pSIter->rowInfo.uid == 0) { + pWriter->pSIter = NULL; + } else { + SRBTreeNode* pNode = tRBTreeMin(&pWriter->rbt); + if (pNode) { + int32_t c = tsdbDataIterCmprFn(&pWriter->pSIter->rbtn, pNode); + if (c > 0) { + tRBTreePut(&pWriter->rbt, &pWriter->pSIter->rbtn); + pWriter->pSIter = NULL; + } else if (c == 0) { + ASSERT(0); + } + } + } + } - code = tsdbReadBlockIdx(pWriter->dReader.pReader, pWriter->dReader.aBlockIdx); - if (code) goto _err; - } else { - ASSERT(pWriter->dReader.pReader == NULL); - taosArrayClear(pWriter->dReader.aBlockIdx); - } - pWriter->dReader.iBlockIdx = 0; // point to the next one - code = tsdbSnapNextTableData(pWriter); - if (code) goto _err; - - // Writer - SHeadFile fHead = {.commitID = pWriter->commitID}; - SDataFile fData = {.commitID = pWriter->commitID}; - SSmaFile fSma = {.commitID = pWriter->commitID}; - SSttFile fStt = {.commitID = pWriter->commitID}; - SDFileSet wSet = {.fid = pWriter->fid, .pHeadF = &fHead, .pDataF = &fData, .pSmaF = &fSma}; - if (pSet) { - wSet.diskId = pSet->diskId; - fData = *pSet->pDataF; - fSma = *pSet->pSmaF; - for (int32_t iStt = 0; iStt < pSet->nSttF; iStt++) { - wSet.aSttF[iStt] = pSet->aSttF[iStt]; + if (pWriter->pSIter == NULL) { + SRBTreeNode* pNode = tRBTreeMin(&pWriter->rbt); + if (pNode) { + tRBTreeDrop(&pWriter->rbt, pNode); + pWriter->pSIter = TSDB_RBTN_TO_DATA_ITER(pNode); } - wSet.nSttF = pSet->nSttF + 1; // TODO: fix pSet->nSttF == pTsdb->maxFile - } else { - SDiskID did = {0}; - tfsAllocDisk(pTsdb->pVnode->pTfs, 0, &did); - tfsMkdirRecurAt(pTsdb->pVnode->pTfs, pTsdb->path, did); - wSet.diskId = did; - wSet.nSttF = 1; - } - wSet.aSttF[wSet.nSttF - 1] = &fStt; - - code = tsdbDataFWriterOpen(&pWriter->dWriter.pWriter, pWriter->pTsdb, &wSet); - if (code) goto _err; - taosArrayClear(pWriter->dWriter.aBlockIdx); - tMapDataReset(&pWriter->dWriter.mDataBlk); - taosArrayClear(pWriter->dWriter.aSttBlk); - tBlockDataReset(&pWriter->dWriter.bData); - tBlockDataReset(&pWriter->dWriter.sData); + } - return code; + if (ppRowInfo) { + if (pWriter->pSIter) { + *ppRowInfo = &pWriter->pSIter->rowInfo; + } else { + *ppRowInfo = NULL; + } + } -_err: +_exit: + if (code) { + tsdbError("vgId:%d %s failed at line %d since %s", TD_VID(pWriter->pTsdb->pVnode), __func__, lino, tstrerror(code)); + } return code; } -static int32_t tsdbSnapWriteCloseFile(STsdbSnapWriter* pWriter) { +static int32_t tsdbSnapWriteGetRow(STsdbSnapWriter* pWriter, SRowInfo** ppRowInfo) { int32_t code = 0; + int32_t lino = 0; - ASSERT(pWriter->dWriter.pWriter); - - code = tsdbSnapWriteTableDataEnd(pWriter); - if (code) goto _err; - - // copy remain table data - TABLEID id = {.suid = INT64_MAX, .uid = INT64_MAX}; - code = tsdbSnapWriteCopyData(pWriter, &id); - if (code) goto _err; - - code = - tsdbWriteSttBlock(pWriter->dWriter.pWriter, &pWriter->dWriter.sData, pWriter->dWriter.aSttBlk, pWriter->cmprAlg); - if (code) goto _err; - - // Indices - code = tsdbWriteBlockIdx(pWriter->dWriter.pWriter, pWriter->dWriter.aBlockIdx); - if (code) goto _err; - - code = tsdbWriteSttBlk(pWriter->dWriter.pWriter, pWriter->dWriter.aSttBlk); - if (code) goto _err; - - code = tsdbUpdateDFileSetHeader(pWriter->dWriter.pWriter); - if (code) goto _err; - - code = tsdbFSUpsertFSet(&pWriter->fs, &pWriter->dWriter.pWriter->wSet); - if (code) goto _err; - - code = tsdbDataFWriterClose(&pWriter->dWriter.pWriter, 1); - if (code) goto _err; - - if (pWriter->dReader.pReader) { - code = tsdbDataFReaderClose(&pWriter->dReader.pReader); - if (code) goto _err; + if (pWriter->pSIter) { + *ppRowInfo = &pWriter->pSIter->rowInfo; + goto _exit; } -_exit: - return code; + code = tsdbSnapWriteNextRow(pWriter, ppRowInfo); + TSDB_CHECK_CODE(code, lino, _exit); -_err: +_exit: + if (code) { + tsdbError("vgId:%d %s failed at line %d since %s", TD_VID(pWriter->pTsdb->pVnode), __func__, lino, tstrerror(code)); + } return code; } -static int32_t tsdbSnapWriteToDataFile(STsdbSnapWriter* pWriter, int32_t iRow, int8_t* done) { +static int32_t tsdbSnapWriteFileDataEnd(STsdbSnapWriter* pWriter) { int32_t code = 0; + int32_t lino = 0; - SBlockData* pBData = &pWriter->bData; - TABLEID id = {.suid = pBData->suid, .uid = pBData->uid ? pBData->uid : pBData->aUid[iRow]}; - TSDBROW row = tsdbRowFromBlockData(pBData, iRow); - TSDBKEY key = TSDBROW_KEY(&row); + ASSERT(pWriter->pDataFWriter); - *done = 0; - while (pWriter->dReader.iRow < pWriter->dReader.bData.nRow || - pWriter->dReader.iDataBlk < pWriter->dReader.mDataBlk.nItem) { - // Merge row by row - for (; pWriter->dReader.iRow < pWriter->dReader.bData.nRow; pWriter->dReader.iRow++) { - TSDBROW trow = tsdbRowFromBlockData(&pWriter->dReader.bData, pWriter->dReader.iRow); - TSDBKEY tKey = TSDBROW_KEY(&trow); + // consume remain data and end with a NULL table row + SRowInfo* pRowInfo; + code = tsdbSnapWriteGetRow(pWriter, &pRowInfo); + TSDB_CHECK_CODE(code, lino, _exit); + for (;;) { + code = tsdbSnapWriteTableData(pWriter, pRowInfo); + TSDB_CHECK_CODE(code, lino, _exit); - ASSERT(pWriter->dReader.bData.suid == id.suid && pWriter->dReader.bData.uid == id.uid); + if (pRowInfo == NULL) break; - int32_t c = tsdbKeyCmprFn(&key, &tKey); - if (c < 0) { - code = tBlockDataAppendRow(&pWriter->dWriter.bData, &row, NULL, id.uid); - if (code) goto _err; - } else if (c > 0) { - code = tBlockDataAppendRow(&pWriter->dWriter.bData, &trow, NULL, id.uid); - if (code) goto _err; - } else { - ASSERT(0); - } + code = tsdbSnapWriteNextRow(pWriter, &pRowInfo); + TSDB_CHECK_CODE(code, lino, _exit); + } - if (pWriter->dWriter.bData.nRow >= pWriter->maxRow) { - code = tsdbWriteDataBlock(pWriter->dWriter.pWriter, &pWriter->dWriter.bData, &pWriter->dWriter.mDataBlk, - pWriter->cmprAlg); - if (code) goto _err; - } + // do file-level updates + code = tsdbWriteSttBlk(pWriter->pDataFWriter, pWriter->aSttBlk); + TSDB_CHECK_CODE(code, lino, _exit); - if (c < 0) { - *done = 1; - goto _exit; - } - } + code = tsdbWriteBlockIdx(pWriter->pDataFWriter, pWriter->aBlockIdx); + TSDB_CHECK_CODE(code, lino, _exit); - // Merge row by block - SDataBlk tDataBlk = {.minKey = key, .maxKey = key}; - for (; pWriter->dReader.iDataBlk < pWriter->dReader.mDataBlk.nItem; pWriter->dReader.iDataBlk++) { - SDataBlk dataBlk; - tMapDataGetItemByIdx(&pWriter->dReader.mDataBlk, pWriter->dReader.iDataBlk, &dataBlk, tGetDataBlk); + code = tsdbUpdateDFileSetHeader(pWriter->pDataFWriter); + TSDB_CHECK_CODE(code, lino, _exit); - int32_t c = tDataBlkCmprFn(&dataBlk, &tDataBlk); - if (c < 0) { - code = tsdbWriteDataBlock(pWriter->dWriter.pWriter, &pWriter->dWriter.bData, &pWriter->dWriter.mDataBlk, - pWriter->cmprAlg); - if (code) goto _err; - - code = tMapDataPutItem(&pWriter->dWriter.mDataBlk, &dataBlk, tPutDataBlk); - if (code) goto _err; - } else if (c > 0) { - code = tBlockDataAppendRow(&pWriter->dWriter.bData, &row, NULL, id.uid); - if (code) goto _err; - - if (pWriter->dWriter.bData.nRow >= pWriter->maxRow) { - code = tsdbWriteDataBlock(pWriter->dWriter.pWriter, &pWriter->dWriter.bData, &pWriter->dWriter.mDataBlk, - pWriter->cmprAlg); - if (code) goto _err; - } + code = tsdbFSUpsertFSet(&pWriter->fs, &pWriter->pDataFWriter->wSet); + TSDB_CHECK_CODE(code, lino, _exit); - *done = 1; - goto _exit; - } else { - code = tsdbReadDataBlockEx(pWriter->dReader.pReader, &dataBlk, &pWriter->dReader.bData); - if (code) goto _err; - pWriter->dReader.iRow = 0; + code = tsdbDataFWriterClose(&pWriter->pDataFWriter, 1); + TSDB_CHECK_CODE(code, lino, _exit); - pWriter->dReader.iDataBlk++; - break; - } - } + if (pWriter->pDataFReader) { + code = tsdbDataFReaderClose(&pWriter->pDataFReader); + TSDB_CHECK_CODE(code, lino, _exit); } -_exit: - return code; + // clear sources + while (pWriter->iterList) { + STsdbDataIter2* pIter = pWriter->iterList; + pWriter->iterList = pIter->next; + tsdbCloseDataIter2(pIter); + } -_err: - tsdbError("vgId:%d, %s failed since %s", TD_VID(pWriter->pTsdb->pVnode), __func__, tstrerror(code)); +_exit: + if (code) { + tsdbError("vgId:%d %s failed since %s", TD_VID(pWriter->pTsdb->pVnode), __func__, tstrerror(code)); + } else { + tsdbDebug("vgId:%d %s is done", TD_VID(pWriter->pTsdb->pVnode), __func__); + } return code; } -static int32_t tsdbSnapWriteToSttFile(STsdbSnapWriter* pWriter, int32_t iRow) { +static int32_t tsdbSnapWriteTimeSeriesData(STsdbSnapWriter* pWriter, SSnapDataHdr* pHdr) { int32_t code = 0; + int32_t lino = 0; - TABLEID id = {.suid = pWriter->bData.suid, - .uid = pWriter->bData.uid ? pWriter->bData.uid : pWriter->bData.aUid[iRow]}; - TSDBROW row = tsdbRowFromBlockData(&pWriter->bData, iRow); - SBlockData* pBData = &pWriter->dWriter.sData; + code = tDecmprBlockData(pHdr->data, pHdr->size, &pWriter->inData, pWriter->aBuf); + TSDB_CHECK_CODE(code, lino, _exit); - if (pBData->suid || pBData->uid) { - if (!TABLE_SAME_SCHEMA(pBData->suid, pBData->uid, id.suid, id.uid)) { - code = tsdbWriteSttBlock(pWriter->dWriter.pWriter, pBData, pWriter->dWriter.aSttBlk, pWriter->cmprAlg); - if (code) goto _err; + ASSERT(pWriter->inData.nRow > 0); - pBData->suid = 0; - pBData->uid = 0; + // switch to new data file if need + int32_t fid = tsdbKeyFid(pWriter->inData.aTSKEY[0], pWriter->minutes, pWriter->precision); + if (pWriter->fid != fid) { + if (pWriter->pDataFWriter) { + code = tsdbSnapWriteFileDataEnd(pWriter); + TSDB_CHECK_CODE(code, lino, _exit); } - } - if (pBData->suid == 0 && pBData->uid == 0) { - code = tsdbUpdateTableSchema(pWriter->pTsdb->pVnode->pMeta, pWriter->id.suid, pWriter->id.uid, &pWriter->skmTable); - if (code) goto _err; - - TABLEID tid = {.suid = pWriter->id.suid, .uid = pWriter->id.suid ? 0 : pWriter->id.uid}; - code = tBlockDataInit(pBData, &tid, pWriter->skmTable.pTSchema, NULL, 0); - if (code) goto _err; + code = tsdbSnapWriteFileDataStart(pWriter, fid); + TSDB_CHECK_CODE(code, lino, _exit); } - code = tBlockDataAppendRow(pBData, &row, NULL, id.uid); - if (code) goto _err; + // loop write each row + SRowInfo* pRowInfo; + code = tsdbSnapWriteGetRow(pWriter, &pRowInfo); + TSDB_CHECK_CODE(code, lino, _exit); + for (int32_t iRow = 0; iRow < pWriter->inData.nRow; ++iRow) { + SRowInfo rInfo = {.suid = pWriter->inData.suid, + .uid = pWriter->inData.uid ? pWriter->inData.uid : pWriter->inData.aUid[iRow], + .row = tsdbRowFromBlockData(&pWriter->inData, iRow)}; - if (pBData->nRow >= pWriter->maxRow) { - code = tsdbWriteSttBlock(pWriter->dWriter.pWriter, pBData, pWriter->dWriter.aSttBlk, pWriter->cmprAlg); - if (code) goto _err; + for (;;) { + if (pRowInfo == NULL) { + code = tsdbSnapWriteTableData(pWriter, &rInfo); + TSDB_CHECK_CODE(code, lino, _exit); + break; + } else { + int32_t c = tRowInfoCmprFn(&rInfo, pRowInfo); + if (c < 0) { + code = tsdbSnapWriteTableData(pWriter, &rInfo); + TSDB_CHECK_CODE(code, lino, _exit); + break; + } else if (c > 0) { + code = tsdbSnapWriteTableData(pWriter, pRowInfo); + TSDB_CHECK_CODE(code, lino, _exit); + + code = tsdbSnapWriteNextRow(pWriter, &pRowInfo); + TSDB_CHECK_CODE(code, lino, _exit); + } else { + ASSERT(0); + } + } + } } _exit: - return code; - -_err: + if (code) { + tsdbError("vgId:%d %s failed at line %d since %s", TD_VID(pWriter->pTsdb->pVnode), __func__, lino, tstrerror(code)); + } else { + tsdbDebug("vgId:%d %s done, suid:%" PRId64 " uid:%" PRId64 " nRow:%d", TD_VID(pWriter->pTsdb->pVnode), __func__, + pWriter->inData.suid, pWriter->inData.uid, pWriter->inData.nRow); + } return code; } -static int32_t tsdbSnapWriteRowData(STsdbSnapWriter* pWriter, int32_t iRow) { +// SNAP_DATA_DEL +static int32_t tsdbSnapWriteDelTableDataStart(STsdbSnapWriter* pWriter, TABLEID* pId) { int32_t code = 0; + int32_t lino = 0; - SBlockData* pBlockData = &pWriter->bData; - TABLEID id = {.suid = pBlockData->suid, .uid = pBlockData->uid ? pBlockData->uid : pBlockData->aUid[iRow]}; - - // End last table data write if need - if (tTABLEIDCmprFn(&pWriter->id, &id) != 0) { - code = tsdbSnapWriteTableDataEnd(pWriter); - if (code) goto _err; - } - - // Start new table data write if need - if (pWriter->id.suid == 0 && pWriter->id.uid == 0) { - code = tsdbSnapWriteTableDataStart(pWriter, &id); - if (code) goto _err; - } - - // Merge with .data file data - int8_t done = 0; - if (pWriter->dReader.pBlockIdx && tTABLEIDCmprFn(pWriter->dReader.pBlockIdx, &id) == 0) { - code = tsdbSnapWriteToDataFile(pWriter, iRow, &done); - if (code) goto _err; - } - - // Append to the .stt data block (todo: check if need to set/reload sst block) - if (!done) { - code = tsdbSnapWriteToSttFile(pWriter, iRow); - if (code) goto _err; + if (pId) { + pWriter->tbid = *pId; + } else { + pWriter->tbid = (TABLEID){.suid = INT64_MAX, .uid = INT64_MAX}; } -_exit: - return code; + taosArrayClear(pWriter->aDelData); -_err: - tsdbError("vgId:%d, %s failed since %s", TD_VID(pWriter->pTsdb->pVnode), __func__, tstrerror(code)); - return code; -} + if (pWriter->pTIter) { + while (pWriter->pTIter->tIter.iDelIdx < taosArrayGetSize(pWriter->pTIter->tIter.aDelIdx)) { + SDelIdx* pDelIdx = taosArrayGet(pWriter->pTIter->tIter.aDelIdx, pWriter->pTIter->tIter.iDelIdx); -static int32_t tsdbSnapWriteData(STsdbSnapWriter* pWriter, uint8_t* pData, uint32_t nData) { - int32_t code = 0; - STsdb* pTsdb = pWriter->pTsdb; - SBlockData* pBlockData = &pWriter->bData; + int32_t c = tTABLEIDCmprFn(pDelIdx, &pWriter->tbid); + if (c < 0) { + code = tsdbReadDelData(pWriter->pDelFReader, pDelIdx, pWriter->pTIter->tIter.aDelData); + TSDB_CHECK_CODE(code, lino, _exit); - // Decode data - SSnapDataHdr* pHdr = (SSnapDataHdr*)pData; - code = tDecmprBlockData(pHdr->data, pHdr->size, pBlockData, pWriter->aBuf); - if (code) goto _err; + SDelIdx* pDelIdxNew = taosArrayReserve(pWriter->aDelIdx, 1); + if (pDelIdxNew == NULL) { + code = TSDB_CODE_OUT_OF_MEMORY; + TSDB_CHECK_CODE(code, lino, _exit); + } - ASSERT(pBlockData->nRow > 0); + pDelIdxNew->suid = pDelIdx->suid; + pDelIdxNew->uid = pDelIdx->uid; - // Loop to handle each row - for (int32_t iRow = 0; iRow < pBlockData->nRow; iRow++) { - TSKEY ts = pBlockData->aTSKEY[iRow]; - int32_t fid = tsdbKeyFid(ts, pWriter->minutes, pWriter->precision); + code = tsdbWriteDelData(pWriter->pDelFWriter, pWriter->pTIter->tIter.aDelData, pDelIdxNew); + TSDB_CHECK_CODE(code, lino, _exit); - if (pWriter->dWriter.pWriter == NULL || pWriter->fid != fid) { - if (pWriter->dWriter.pWriter) { - // ASSERT(fid > pWriter->fid); + pWriter->pTIter->tIter.iDelIdx++; + } else if (c == 0) { + code = tsdbReadDelData(pWriter->pDelFReader, pDelIdx, pWriter->aDelData); + TSDB_CHECK_CODE(code, lino, _exit); - code = tsdbSnapWriteCloseFile(pWriter); - if (code) goto _err; + pWriter->pTIter->tIter.iDelIdx++; + break; + } else { + break; } - - code = tsdbSnapWriteOpenFile(pWriter, fid); - if (code) goto _err; } - - code = tsdbSnapWriteRowData(pWriter, iRow); - if (code) goto _err; } - return code; - -_err: - tsdbError("vgId:%d, vnode snapshot tsdb write data for %s failed since %s", TD_VID(pTsdb->pVnode), pTsdb->path, - tstrerror(code)); +_exit: + if (code) { + tsdbError("vgId:%d %s failed at line %d since %s", TD_VID(pWriter->pTsdb->pVnode), __func__, lino, tstrerror(code)); + } else { + tsdbTrace("vgId:%d %s done, suid:%" PRId64 " uid:%" PRId64, TD_VID(pWriter->pTsdb->pVnode), __func__, pId->suid, + pId->uid); + } return code; } -// SNAP_DATA_DEL -static int32_t tsdbSnapMoveWriteDelData(STsdbSnapWriter* pWriter, TABLEID* pId) { +static int32_t tsdbSnapWriteDelTableDataEnd(STsdbSnapWriter* pWriter) { int32_t code = 0; + int32_t lino = 0; - while (true) { - if (pWriter->iDelIdx >= taosArrayGetSize(pWriter->aDelIdxR)) break; - - SDelIdx* pDelIdx = (SDelIdx*)taosArrayGet(pWriter->aDelIdxR, pWriter->iDelIdx); - - if (tTABLEIDCmprFn(pDelIdx, pId) >= 0) break; - - code = tsdbReadDelData(pWriter->pDelFReader, pDelIdx, pWriter->aDelData); - if (code) goto _exit; - - SDelIdx delIdx = *pDelIdx; - code = tsdbWriteDelData(pWriter->pDelFWriter, pWriter->aDelData, &delIdx); - if (code) goto _exit; - - if (taosArrayPush(pWriter->aDelIdxW, &delIdx) == NULL) { + if (taosArrayGetSize(pWriter->aDelData) > 0) { + SDelIdx* pDelIdx = taosArrayReserve(pWriter->aDelIdx, 1); + if (pDelIdx == NULL) { code = TSDB_CODE_OUT_OF_MEMORY; - goto _exit; + TSDB_CHECK_CODE(code, lino, _exit); } - pWriter->iDelIdx++; + pDelIdx->suid = pWriter->tbid.suid; + pDelIdx->uid = pWriter->tbid.uid; + + code = tsdbWriteDelData(pWriter->pDelFWriter, pWriter->aDelData, pDelIdx); + TSDB_CHECK_CODE(code, lino, _exit); } _exit: + if (code) { + tsdbError("vgId:%d %s failed at line %d since %s", TD_VID(pWriter->pTsdb->pVnode), __func__, lino, tstrerror(code)); + } else { + tsdbTrace("vgId:%d %s done", TD_VID(pWriter->pTsdb->pVnode), __func__); + } return code; } -static int32_t tsdbSnapWriteDel(STsdbSnapWriter* pWriter, uint8_t* pData, uint32_t nData) { +static int32_t tsdbSnapWriteDelTableData(STsdbSnapWriter* pWriter, TABLEID* pId, uint8_t* pData, int64_t size) { int32_t code = 0; - STsdb* pTsdb = pWriter->pTsdb; - - // Open del file if not opened yet - if (pWriter->pDelFWriter == NULL) { - SDelFile* pDelFile = pWriter->fs.pDelFile; - - // reader - if (pDelFile) { - code = tsdbDelFReaderOpen(&pWriter->pDelFReader, pDelFile, pTsdb); - if (code) goto _err; + int32_t lino = 0; - code = tsdbReadDelIdx(pWriter->pDelFReader, pWriter->aDelIdxR); - if (code) goto _err; - } else { - taosArrayClear(pWriter->aDelIdxR); + if (pId == NULL || pId->uid != pWriter->tbid.uid) { + if (pWriter->tbid.uid) { + code = tsdbSnapWriteDelTableDataEnd(pWriter); + TSDB_CHECK_CODE(code, lino, _exit); } - pWriter->iDelIdx = 0; - // writer - SDelFile delFile = {.commitID = pWriter->commitID}; - code = tsdbDelFWriterOpen(&pWriter->pDelFWriter, &delFile, pTsdb); - if (code) goto _err; - taosArrayClear(pWriter->aDelIdxW); + code = tsdbSnapWriteDelTableDataStart(pWriter, pId); + TSDB_CHECK_CODE(code, lino, _exit); } - SSnapDataHdr* pHdr = (SSnapDataHdr*)pData; - TABLEID id = *(TABLEID*)pHdr->data; + if (pId == NULL) goto _exit; - ASSERT(pHdr->size + sizeof(SSnapDataHdr) == nData); + int64_t n = 0; + while (n < size) { + SDelData delData; + n += tGetDelData(pData + n, &delData); - // Move write data < id - code = tsdbSnapMoveWriteDelData(pWriter, &id); - if (code) goto _err; + if (taosArrayPush(pWriter->aDelData, &delData) < 0) { + code = TSDB_CODE_OUT_OF_MEMORY; + TSDB_CHECK_CODE(code, lino, _exit); + } + } + ASSERT(n == size); - // Merge incoming data with current - if (pWriter->iDelIdx < taosArrayGetSize(pWriter->aDelIdxR) && - tTABLEIDCmprFn(taosArrayGet(pWriter->aDelIdxR, pWriter->iDelIdx), &id) == 0) { - SDelIdx* pDelIdx = (SDelIdx*)taosArrayGet(pWriter->aDelIdxR, pWriter->iDelIdx); +_exit: + if (code) { + tsdbError("vgId:%d %s failed at line %d since %s", TD_VID(pWriter->pTsdb->pVnode), __func__, lino, tstrerror(code)); + } + return code; +} - code = tsdbReadDelData(pWriter->pDelFReader, pDelIdx, pWriter->aDelData); - if (code) goto _err; +static int32_t tsdbSnapWriteDelDataStart(STsdbSnapWriter* pWriter) { + int32_t code = 0; + int32_t lino = 0; - pWriter->iDelIdx++; - } else { - taosArrayClear(pWriter->aDelData); - } + STsdb* pTsdb = pWriter->pTsdb; + SDelFile* pDelFile = pWriter->fs.pDelFile; - int64_t n = sizeof(SSnapDataHdr) + sizeof(TABLEID); - while (n < nData) { - SDelData delData; + pWriter->tbid = (TABLEID){0}; - n += tGetDelData(pData + n, &delData); + // reader + if (pDelFile) { + code = tsdbDelFReaderOpen(&pWriter->pDelFReader, pDelFile, pTsdb); + TSDB_CHECK_CODE(code, lino, _exit); - if (taosArrayPush(pWriter->aDelData, &delData) == NULL) { - code = TSDB_CODE_OUT_OF_MEMORY; - goto _err; - } + code = tsdbOpenTombFileDataIter(pWriter->pDelFReader, &pWriter->pTIter); + TSDB_CHECK_CODE(code, lino, _exit); } - SDelIdx delIdx = {.suid = id.suid, .uid = id.uid}; - code = tsdbWriteDelData(pWriter->pDelFWriter, pWriter->aDelData, &delIdx); - if (code) goto _err; + // writer + code = tsdbDelFWriterOpen(&pWriter->pDelFWriter, &(SDelFile){.commitID = pWriter->commitID}, pTsdb); + TSDB_CHECK_CODE(code, lino, _exit); - if (taosArrayPush(pWriter->aDelIdxW, &delIdx) == NULL) { + if ((pWriter->aDelIdx = taosArrayInit(0, sizeof(SDelIdx))) == NULL) { code = TSDB_CODE_OUT_OF_MEMORY; - goto _err; + TSDB_CHECK_CODE(code, lino, _exit); + } + if ((pWriter->aDelData = taosArrayInit(0, sizeof(SDelData))) == NULL) { + code = TSDB_CODE_OUT_OF_MEMORY; + TSDB_CHECK_CODE(code, lino, _exit); } - return code; - -_err: - tsdbError("vgId:%d, vnode snapshot tsdb write del for %s failed since %s", TD_VID(pTsdb->pVnode), pTsdb->path, - tstrerror(code)); +_exit: + if (code) { + tsdbError("vgId:%d %s failed at line %d since %s", TD_VID(pTsdb->pVnode), __func__, lino, tstrerror(code)); + } else { + tsdbDebug("vgId:%d %s done", TD_VID(pTsdb->pVnode), __func__); + } return code; } -static int32_t tsdbSnapWriteDelEnd(STsdbSnapWriter* pWriter) { +static int32_t tsdbSnapWriteDelDataEnd(STsdbSnapWriter* pWriter) { int32_t code = 0; - STsdb* pTsdb = pWriter->pTsdb; + int32_t lino = 0; - if (pWriter->pDelFWriter == NULL) return code; + STsdb* pTsdb = pWriter->pTsdb; - TABLEID id = {.suid = INT64_MAX, .uid = INT64_MAX}; - code = tsdbSnapMoveWriteDelData(pWriter, &id); - if (code) goto _err; + // end remaining table with NULL data + code = tsdbSnapWriteDelTableData(pWriter, NULL, NULL, 0); + TSDB_CHECK_CODE(code, lino, _exit); - code = tsdbWriteDelIdx(pWriter->pDelFWriter, pWriter->aDelIdxW); - if (code) goto _err; + // update file-level info + code = tsdbWriteDelIdx(pWriter->pDelFWriter, pWriter->aDelIdx); + TSDB_CHECK_CODE(code, lino, _exit); code = tsdbUpdateDelFileHdr(pWriter->pDelFWriter); - if (code) goto _err; + TSDB_CHECK_CODE(code, lino, _exit); code = tsdbFSUpsertDelFile(&pWriter->fs, &pWriter->pDelFWriter->fDel); - if (code) goto _err; + TSDB_CHECK_CODE(code, lino, _exit); code = tsdbDelFWriterClose(&pWriter->pDelFWriter, 1); - if (code) goto _err; + TSDB_CHECK_CODE(code, lino, _exit); if (pWriter->pDelFReader) { code = tsdbDelFReaderClose(&pWriter->pDelFReader); - if (code) goto _err; + TSDB_CHECK_CODE(code, lino, _exit); + } + + if (pWriter->pTIter) { + tsdbCloseDataIter2(pWriter->pTIter); + pWriter->pTIter = NULL; } - tsdbInfo("vgId:%d, vnode snapshot tsdb write del for %s end", TD_VID(pTsdb->pVnode), pTsdb->path); +_exit: + if (code) { + tsdbError("vgId:%d %s failed at line %d since %s", TD_VID(pTsdb->pVnode), __func__, lino, tstrerror(code)); + } else { + tsdbInfo("vgId:%d %s done", TD_VID(pTsdb->pVnode), __func__); + } return code; +} + +static int32_t tsdbSnapWriteDelData(STsdbSnapWriter* pWriter, SSnapDataHdr* pHdr) { + int32_t code = 0; + int32_t lino = 0; + + STsdb* pTsdb = pWriter->pTsdb; -_err: - tsdbError("vgId:%d, vnode snapshot tsdb write del end for %s failed since %s", TD_VID(pTsdb->pVnode), pTsdb->path, - tstrerror(code)); + // start to write del data if need + if (pWriter->pDelFWriter == NULL) { + code = tsdbSnapWriteDelDataStart(pWriter); + TSDB_CHECK_CODE(code, lino, _exit); + } + + // do write del data + code = tsdbSnapWriteDelTableData(pWriter, (TABLEID*)pHdr->data, pHdr->data + sizeof(TABLEID), + pHdr->size - sizeof(TABLEID)); + TSDB_CHECK_CODE(code, lino, _exit); + +_exit: + if (code) { + tsdbError("vgId:%d %s failed since %s", TD_VID(pTsdb->pVnode), __func__, tstrerror(code)); + } else { + tsdbTrace("vgId:%d %s done", TD_VID(pTsdb->pVnode), __func__); + } return code; } // APIs int32_t tsdbSnapWriterOpen(STsdb* pTsdb, int64_t sver, int64_t ever, STsdbSnapWriter** ppWriter) { - int32_t code = 0; - int32_t lino = 0; - STsdbSnapWriter* pWriter = NULL; + int32_t code = 0; + int32_t lino = 0; // alloc - pWriter = (STsdbSnapWriter*)taosMemoryCalloc(1, sizeof(*pWriter)); + STsdbSnapWriter* pWriter = (STsdbSnapWriter*)taosMemoryCalloc(1, sizeof(*pWriter)); if (pWriter == NULL) { code = TSDB_CODE_OUT_OF_MEMORY; TSDB_CHECK_CODE(code, lino, _exit); @@ -1286,11 +1835,6 @@ int32_t tsdbSnapWriterOpen(STsdb* pTsdb, int64_t sver, int64_t ever, STsdbSnapWr pWriter->pTsdb = pTsdb; pWriter->sver = sver; pWriter->ever = ever; - - code = tsdbFSCopy(pTsdb, &pWriter->fs); - TSDB_CHECK_CODE(code, lino, _exit); - - // config pWriter->minutes = pTsdb->keepCfg.days; pWriter->precision = pTsdb->keepCfg.precision; pWriter->minRow = pTsdb->pVnode->config.tsdbCfg.minRows; @@ -1298,102 +1842,70 @@ int32_t tsdbSnapWriterOpen(STsdb* pTsdb, int64_t sver, int64_t ever, STsdbSnapWr pWriter->cmprAlg = pTsdb->pVnode->config.tsdbCfg.compression; pWriter->commitID = pTsdb->pVnode->state.commitID; + code = tsdbFSCopy(pTsdb, &pWriter->fs); + TSDB_CHECK_CODE(code, lino, _exit); + // SNAP_DATA_TSDB - code = tBlockDataCreate(&pWriter->bData); + code = tBlockDataCreate(&pWriter->inData); TSDB_CHECK_CODE(code, lino, _exit); pWriter->fid = INT32_MIN; - pWriter->id = (TABLEID){0}; - // Reader - pWriter->dReader.aBlockIdx = taosArrayInit(0, sizeof(SBlockIdx)); - if (pWriter->dReader.aBlockIdx == NULL) { - code = TSDB_CODE_OUT_OF_MEMORY; - TSDB_CHECK_CODE(code, lino, _exit); - } - code = tBlockDataCreate(&pWriter->dReader.bData); - TSDB_CHECK_CODE(code, lino, _exit); - // Writer - pWriter->dWriter.aBlockIdx = taosArrayInit(0, sizeof(SBlockIdx)); - if (pWriter->dWriter.aBlockIdx == NULL) { - code = TSDB_CODE_OUT_OF_MEMORY; - TSDB_CHECK_CODE(code, lino, _exit); - } - pWriter->dWriter.aSttBlk = taosArrayInit(0, sizeof(SSttBlk)); - if (pWriter->dWriter.aSttBlk == NULL) { - code = TSDB_CODE_OUT_OF_MEMORY; - TSDB_CHECK_CODE(code, lino, _exit); - } - code = tBlockDataCreate(&pWriter->dWriter.bData); + code = tBlockDataCreate(&pWriter->bData); TSDB_CHECK_CODE(code, lino, _exit); - code = tBlockDataCreate(&pWriter->dWriter.sData); + + code = tBlockDataCreate(&pWriter->sData); TSDB_CHECK_CODE(code, lino, _exit); // SNAP_DATA_DEL - pWriter->aDelIdxR = taosArrayInit(0, sizeof(SDelIdx)); - if (pWriter->aDelIdxR == NULL) { - code = TSDB_CODE_OUT_OF_MEMORY; - TSDB_CHECK_CODE(code, lino, _exit); - } - pWriter->aDelData = taosArrayInit(0, sizeof(SDelData)); - if (pWriter->aDelData == NULL) { - code = TSDB_CODE_OUT_OF_MEMORY; - TSDB_CHECK_CODE(code, lino, _exit); - } - pWriter->aDelIdxW = taosArrayInit(0, sizeof(SDelIdx)); - if (pWriter->aDelIdxW == NULL) { - code = TSDB_CODE_OUT_OF_MEMORY; - TSDB_CHECK_CODE(code, lino, _exit); - } _exit: if (code) { - tsdbError("vgId:%d, %s failed at line %d since %s", TD_VID(pTsdb->pVnode), __func__, lino, tstrerror(code)); - *ppWriter = NULL; - + tsdbError("vgId:%d %s failed at line %d since %s", TD_VID(pTsdb->pVnode), __func__, lino, tstrerror(code)); if (pWriter) { - if (pWriter->aDelIdxW) taosArrayDestroy(pWriter->aDelIdxW); - if (pWriter->aDelData) taosArrayDestroy(pWriter->aDelData); - if (pWriter->aDelIdxR) taosArrayDestroy(pWriter->aDelIdxR); - tBlockDataDestroy(&pWriter->dWriter.sData); - tBlockDataDestroy(&pWriter->dWriter.bData); - if (pWriter->dWriter.aSttBlk) taosArrayDestroy(pWriter->dWriter.aSttBlk); - if (pWriter->dWriter.aBlockIdx) taosArrayDestroy(pWriter->dWriter.aBlockIdx); - tBlockDataDestroy(&pWriter->dReader.bData); - if (pWriter->dReader.aBlockIdx) taosArrayDestroy(pWriter->dReader.aBlockIdx); + tBlockDataDestroy(&pWriter->sData); tBlockDataDestroy(&pWriter->bData); + tBlockDataDestroy(&pWriter->inData); tsdbFSDestroy(&pWriter->fs); - taosMemoryFree(pWriter); + pWriter = NULL; } } else { - tsdbInfo("vgId:%d, %s done", TD_VID(pTsdb->pVnode), __func__); - *ppWriter = pWriter; + tsdbInfo("vgId:%d %s done, sver:%" PRId64 " ever:%" PRId64, TD_VID(pTsdb->pVnode), __func__, sver, ever); } + *ppWriter = pWriter; return code; } int32_t tsdbSnapWriterPrepareClose(STsdbSnapWriter* pWriter) { int32_t code = 0; - if (pWriter->dWriter.pWriter) { - code = tsdbSnapWriteCloseFile(pWriter); - if (code) goto _exit; + int32_t lino = 0; + + if (pWriter->pDataFWriter) { + code = tsdbSnapWriteFileDataEnd(pWriter); + TSDB_CHECK_CODE(code, lino, _exit); } - code = tsdbSnapWriteDelEnd(pWriter); - if (code) goto _exit; + if (pWriter->pDelFWriter) { + code = tsdbSnapWriteDelDataEnd(pWriter); + TSDB_CHECK_CODE(code, lino, _exit); + } code = tsdbFSPrepareCommit(pWriter->pTsdb, &pWriter->fs); - if (code) goto _exit; + TSDB_CHECK_CODE(code, lino, _exit); _exit: if (code) { - tsdbError("vgId:%d, %s failed since %s", TD_VID(pWriter->pTsdb->pVnode), __func__, tstrerror(code)); + tsdbError("vgId:%d %s failed at line %d since %s", TD_VID(pWriter->pTsdb->pVnode), __func__, lino, tstrerror(code)); + } else { + tsdbDebug("vgId:%d %s done", TD_VID(pWriter->pTsdb->pVnode), __func__); } return code; } int32_t tsdbSnapWriterClose(STsdbSnapWriter** ppWriter, int8_t rollback) { - int32_t code = 0; + int32_t code = 0; + int32_t lino = 0; + STsdbSnapWriter* pWriter = *ppWriter; STsdb* pTsdb = pWriter->pTsdb; @@ -1406,7 +1918,7 @@ int32_t tsdbSnapWriterClose(STsdbSnapWriter** ppWriter, int8_t rollback) { code = tsdbFSCommit(pWriter->pTsdb); if (code) { taosThreadRwlockUnlock(&pTsdb->rwLock); - goto _err; + TSDB_CHECK_CODE(code, lino, _exit); } // unlock @@ -1414,72 +1926,60 @@ int32_t tsdbSnapWriterClose(STsdbSnapWriter** ppWriter, int8_t rollback) { } // SNAP_DATA_DEL - taosArrayDestroy(pWriter->aDelIdxW); taosArrayDestroy(pWriter->aDelData); - taosArrayDestroy(pWriter->aDelIdxR); + taosArrayDestroy(pWriter->aDelIdx); // SNAP_DATA_TSDB - - // Writer - tBlockDataDestroy(&pWriter->dWriter.sData); - tBlockDataDestroy(&pWriter->dWriter.bData); - taosArrayDestroy(pWriter->dWriter.aSttBlk); - tMapDataClear(&pWriter->dWriter.mDataBlk); - taosArrayDestroy(pWriter->dWriter.aBlockIdx); - - // Reader - tBlockDataDestroy(&pWriter->dReader.bData); - tMapDataClear(&pWriter->dReader.mDataBlk); - taosArrayDestroy(pWriter->dReader.aBlockIdx); - + tBlockDataDestroy(&pWriter->sData); tBlockDataDestroy(&pWriter->bData); + taosArrayDestroy(pWriter->aSttBlk); + tMapDataClear(&pWriter->mDataBlk); + taosArrayDestroy(pWriter->aBlockIdx); tDestroyTSchema(pWriter->skmTable.pTSchema); + tBlockDataDestroy(&pWriter->inData); for (int32_t iBuf = 0; iBuf < sizeof(pWriter->aBuf) / sizeof(uint8_t*); iBuf++) { tFree(pWriter->aBuf[iBuf]); } - tsdbInfo("vgId:%d, %s done", TD_VID(pWriter->pTsdb->pVnode), __func__); + tsdbFSDestroy(&pWriter->fs); taosMemoryFree(pWriter); *ppWriter = NULL; - return code; -_err: - tsdbError("vgId:%d, vnode snapshot tsdb writer close for %s failed since %s", TD_VID(pWriter->pTsdb->pVnode), - pWriter->pTsdb->path, tstrerror(code)); - taosMemoryFree(pWriter); - *ppWriter = NULL; +_exit: + if (code) { + tsdbError("vgId:%d %s failed at line %d since %s", TD_VID(pTsdb->pVnode), __func__, lino, tstrerror(code)); + } else { + tsdbInfo("vgId:%d %s done", TD_VID(pTsdb->pVnode), __func__); + } return code; } -int32_t tsdbSnapWrite(STsdbSnapWriter* pWriter, uint8_t* pData, uint32_t nData) { - int32_t code = 0; - SSnapDataHdr* pHdr = (SSnapDataHdr*)pData; +int32_t tsdbSnapWrite(STsdbSnapWriter* pWriter, SSnapDataHdr* pHdr) { + int32_t code = 0; + int32_t lino = 0; - // ts data if (pHdr->type == SNAP_DATA_TSDB) { - code = tsdbSnapWriteData(pWriter, pData, nData); - if (code) goto _err; - + code = tsdbSnapWriteTimeSeriesData(pWriter, pHdr); + TSDB_CHECK_CODE(code, lino, _exit); goto _exit; - } else { - if (pWriter->dWriter.pWriter) { - code = tsdbSnapWriteCloseFile(pWriter); - if (code) goto _err; - } + } else if (pWriter->pDataFWriter) { + code = tsdbSnapWriteFileDataEnd(pWriter); + TSDB_CHECK_CODE(code, lino, _exit); } - // del data if (pHdr->type == SNAP_DATA_DEL) { - code = tsdbSnapWriteDel(pWriter, pData, nData); - if (code) goto _err; + code = tsdbSnapWriteDelData(pWriter, pHdr); + TSDB_CHECK_CODE(code, lino, _exit); + goto _exit; } _exit: - tsdbDebug("vgId:%d, tsdb snapshot write for %s succeed", TD_VID(pWriter->pTsdb->pVnode), pWriter->pTsdb->path); - return code; - -_err: - tsdbError("vgId:%d, tsdb snapshot write for %s failed since %s", TD_VID(pWriter->pTsdb->pVnode), pWriter->pTsdb->path, - tstrerror(code)); + if (code) { + tsdbError("vgId:%d %s failed at line %d since %s, type:%d index:%" PRId64 " size:%" PRId64, + TD_VID(pWriter->pTsdb->pVnode), __func__, lino, tstrerror(code), pHdr->type, pHdr->index, pHdr->size); + } else { + tsdbDebug("vgId:%d %s done, type:%d index:%" PRId64 " size:%" PRId64, TD_VID(pWriter->pTsdb->pVnode), __func__, + pHdr->type, pHdr->index, pHdr->size); + } return code; } diff --git a/source/dnode/vnode/src/tsdb/tsdbUtil.c b/source/dnode/vnode/src/tsdb/tsdbUtil.c index 2afa44374e775e9076fe6ecdd31477700b6a1bbf..aaa707dbb4348252ef92b2da5c78d53ccf850eb2 100644 --- a/source/dnode/vnode/src/tsdb/tsdbUtil.c +++ b/source/dnode/vnode/src/tsdb/tsdbUtil.c @@ -683,6 +683,18 @@ int32_t tsdbRowMergerInit2(SRowMerger *pMerger, STSchema *pResTSchema, TSDBROW * } tsdbRowGetColVal(pRow, pTSchema, jCol++, pColVal); + if ((!COL_VAL_IS_NONE(pColVal)) && (!COL_VAL_IS_NULL(pColVal)) && IS_VAR_DATA_TYPE(pColVal->type)) { + uint8_t *pVal = pColVal->value.pData; + + pColVal->value.pData = NULL; + code = tRealloc(&pColVal->value.pData, pColVal->value.nData); + if (code) goto _exit; + + if (pColVal->value.nData) { + memcpy(pColVal->value.pData, pVal, pColVal->value.nData); + } + } + if (taosArrayPush(pMerger->pArray, pColVal) == NULL) { code = TSDB_CODE_OUT_OF_MEMORY; goto _exit; @@ -720,13 +732,59 @@ int32_t tsdbRowMergerAdd(SRowMerger *pMerger, TSDBROW *pRow, STSchema *pTSchema) tsdbRowGetColVal(pRow, pTSchema, jCol++, pColVal); if (key.version > pMerger->version) { +#if 0 + if (!COL_VAL_IS_NONE(pColVal)) { + if ((!COL_VAL_IS_NULL(pColVal)) && IS_VAR_DATA_TYPE(pColVal->type)) { + SColVal *tColVal = taosArrayGet(pMerger->pArray, iCol); + code = tRealloc(&tColVal->value.pData, pColVal->value.nData); + if (code) return code; + + tColVal->value.nData = pColVal->value.nData; + if (pColVal->value.nData) { + memcpy(tColVal->value.pData, pColVal->value.pData, pColVal->value.nData); + } + tColVal->flag = 0; + } else { + taosArraySet(pMerger->pArray, iCol, pColVal); + } + } +#endif if (!COL_VAL_IS_NONE(pColVal)) { - taosArraySet(pMerger->pArray, iCol, pColVal); + if (IS_VAR_DATA_TYPE(pColVal->type)) { + SColVal *pTColVal = taosArrayGet(pMerger->pArray, iCol); + if (!COL_VAL_IS_NULL(pColVal)) { + code = tRealloc(&pTColVal->value.pData, pColVal->value.nData); + if (code) return code; + + pTColVal->value.nData = pColVal->value.nData; + if (pTColVal->value.nData) { + memcpy(pTColVal->value.pData, pColVal->value.pData, pTColVal->value.nData); + } + pTColVal->flag = 0; + } else { + tFree(pTColVal->value.pData); + pTColVal->value.pData = NULL; + taosArraySet(pMerger->pArray, iCol, pColVal); + } + } else { + taosArraySet(pMerger->pArray, iCol, pColVal); + } } } else if (key.version < pMerger->version) { SColVal *tColVal = (SColVal *)taosArrayGet(pMerger->pArray, iCol); if (COL_VAL_IS_NONE(tColVal) && !COL_VAL_IS_NONE(pColVal)) { - taosArraySet(pMerger->pArray, iCol, pColVal); + if ((!COL_VAL_IS_NULL(pColVal)) && IS_VAR_DATA_TYPE(pColVal->type)) { + code = tRealloc(&tColVal->value.pData, pColVal->value.nData); + if (code) return code; + + tColVal->value.nData = pColVal->value.nData; + if (pColVal->value.nData) { + memcpy(tColVal->value.pData, pColVal->value.pData, pColVal->value.nData); + } + tColVal->flag = 0; + } else { + taosArraySet(pMerger->pArray, iCol, pColVal); + } } } else { ASSERT(0 && "dup versions not allowed"); @@ -766,6 +824,18 @@ int32_t tsdbRowMergerInit(SRowMerger *pMerger, TSDBROW *pRow, STSchema *pTSchema // other for (int16_t iCol = 1; iCol < pTSchema->numOfCols; iCol++) { tsdbRowGetColVal(pRow, pTSchema, iCol, pColVal); + if ((!COL_VAL_IS_NONE(pColVal)) && (!COL_VAL_IS_NULL(pColVal)) && IS_VAR_DATA_TYPE(pColVal->type)) { + uint8_t *pVal = pColVal->value.pData; + + pColVal->value.pData = NULL; + code = tRealloc(&pColVal->value.pData, pColVal->value.nData); + if (code) goto _exit; + + if (pColVal->value.nData) { + memcpy(pColVal->value.pData, pVal, pColVal->value.nData); + } + } + if (taosArrayPush(pMerger->pArray, pColVal) == NULL) { code = TSDB_CODE_OUT_OF_MEMORY; goto _exit; @@ -776,7 +846,16 @@ _exit: return code; } -void tsdbRowMergerClear(SRowMerger *pMerger) { taosArrayDestroy(pMerger->pArray); } +void tsdbRowMergerClear(SRowMerger *pMerger) { + for (int32_t iCol = 1; iCol < pMerger->pTSchema->numOfCols; iCol++) { + SColVal *pTColVal = taosArrayGet(pMerger->pArray, iCol); + if (IS_VAR_DATA_TYPE(pTColVal->type)) { + tFree(pTColVal->value.pData); + } + } + + taosArrayDestroy(pMerger->pArray); +} int32_t tsdbRowMerge(SRowMerger *pMerger, TSDBROW *pRow) { int32_t code = 0; @@ -790,12 +869,47 @@ int32_t tsdbRowMerge(SRowMerger *pMerger, TSDBROW *pRow) { if (key.version > pMerger->version) { if (!COL_VAL_IS_NONE(pColVal)) { - taosArraySet(pMerger->pArray, iCol, pColVal); + if (IS_VAR_DATA_TYPE(pColVal->type)) { + SColVal *pTColVal = taosArrayGet(pMerger->pArray, iCol); + if (!COL_VAL_IS_NULL(pColVal)) { + code = tRealloc(&pTColVal->value.pData, pColVal->value.nData); + if (code) goto _exit; + + pTColVal->value.nData = pColVal->value.nData; + if (pTColVal->value.nData) { + memcpy(pTColVal->value.pData, pColVal->value.pData, pTColVal->value.nData); + } + pTColVal->flag = 0; + } else { + tFree(pTColVal->value.pData); + pTColVal->value.pData = NULL; + taosArraySet(pMerger->pArray, iCol, pColVal); + } + } else { + taosArraySet(pMerger->pArray, iCol, pColVal); + } } } else if (key.version < pMerger->version) { SColVal *tColVal = (SColVal *)taosArrayGet(pMerger->pArray, iCol); if (COL_VAL_IS_NONE(tColVal) && !COL_VAL_IS_NONE(pColVal)) { - taosArraySet(pMerger->pArray, iCol, pColVal); + if (IS_VAR_DATA_TYPE(pColVal->type)) { + if (!COL_VAL_IS_NULL(pColVal)) { + code = tRealloc(&tColVal->value.pData, pColVal->value.nData); + if (code) goto _exit; + + tColVal->value.nData = pColVal->value.nData; + if (tColVal->value.nData) { + memcpy(tColVal->value.pData, pColVal->value.pData, tColVal->value.nData); + } + tColVal->flag = 0; + } else { + tFree(tColVal->value.pData); + tColVal->value.pData = NULL; + taosArraySet(pMerger->pArray, iCol, pColVal); + } + } else { + taosArraySet(pMerger->pArray, iCol, pColVal); + } } } else { ASSERT(0); @@ -1019,6 +1133,7 @@ _exit: void tBlockDataReset(SBlockData *pBlockData) { pBlockData->suid = 0; pBlockData->uid = 0; + pBlockData->nRow = 0; } void tBlockDataClear(SBlockData *pBlockData) { diff --git a/source/dnode/vnode/src/vnd/vnodeBufPool.c b/source/dnode/vnode/src/vnd/vnodeBufPool.c index 9c1d54b57ebc45a5dfa551d987fd2ec312717559..b1575fb49642b4b5ee93265f8286c64e433f98fa 100644 --- a/source/dnode/vnode/src/vnd/vnodeBufPool.c +++ b/source/dnode/vnode/src/vnd/vnodeBufPool.c @@ -16,9 +16,7 @@ #include "vnd.h" /* ------------------------ STRUCTURES ------------------------ */ -#define VNODE_BUFPOOL_SEGMENTS 3 - -static int vnodeBufPoolCreate(SVnode *pVnode, int64_t size, SVBufPool **ppPool) { +static int vnodeBufPoolCreate(SVnode *pVnode, int32_t id, int64_t size, SVBufPool **ppPool) { SVBufPool *pPool; pPool = taosMemoryMalloc(sizeof(SVBufPool) + size); @@ -26,6 +24,21 @@ static int vnodeBufPoolCreate(SVnode *pVnode, int64_t size, SVBufPool **ppPool) terrno = TSDB_CODE_OUT_OF_MEMORY; return -1; } + memset(pPool, 0, sizeof(SVBufPool)); + + // query handle list + taosThreadMutexInit(&pPool->mutex, NULL); + pPool->nQuery = 0; + pPool->qList.pNext = &pPool->qList; + pPool->qList.ppNext = &pPool->qList.pNext; + + pPool->pVnode = pVnode; + pPool->id = id; + pPool->ptr = pPool->node.data; + pPool->pTail = &pPool->node; + pPool->node.prev = NULL; + pPool->node.pnext = &pPool->pTail; + pPool->node.size = size; if (VND_IS_RSMA(pVnode)) { pPool->lock = taosMemoryMalloc(sizeof(TdThreadSpinlock)); @@ -44,16 +57,6 @@ static int vnodeBufPoolCreate(SVnode *pVnode, int64_t size, SVBufPool **ppPool) pPool->lock = NULL; } - pPool->next = NULL; - pPool->pVnode = pVnode; - pPool->nRef = 0; - pPool->size = 0; - pPool->ptr = pPool->node.data; - pPool->pTail = &pPool->node; - pPool->node.prev = NULL; - pPool->node.pnext = &pPool->pTail; - pPool->node.size = size; - *ppPool = pPool; return 0; } @@ -64,27 +67,25 @@ static int vnodeBufPoolDestroy(SVBufPool *pPool) { taosThreadSpinDestroy(pPool->lock); taosMemoryFree((void *)pPool->lock); } + taosThreadMutexDestroy(&pPool->mutex); taosMemoryFree(pPool); return 0; } int vnodeOpenBufPool(SVnode *pVnode) { - SVBufPool *pPool = NULL; - int64_t size = pVnode->config.szBuf / VNODE_BUFPOOL_SEGMENTS; + int64_t size = pVnode->config.szBuf / VNODE_BUFPOOL_SEGMENTS; - ASSERT(pVnode->pPool == NULL); - - for (int i = 0; i < VNODE_BUF_POOL_SEG; i++) { + for (int i = 0; i < VNODE_BUFPOOL_SEGMENTS; i++) { // create pool - if (vnodeBufPoolCreate(pVnode, size, &pPool)) { + if (vnodeBufPoolCreate(pVnode, i, size, &pVnode->aBufPool[i])) { vError("vgId:%d, failed to open vnode buffer pool since %s", TD_VID(pVnode), tstrerror(terrno)); vnodeCloseBufPool(pVnode); return -1; } - // add pool to vnode - pPool->next = pVnode->pPool; - pVnode->pPool = pPool; + // add to free list + pVnode->aBufPool[i]->freeNext = pVnode->freeList; + pVnode->freeList = pVnode->aBufPool[i]; } vDebug("vgId:%d, vnode buffer pool is opened, size:%" PRId64, TD_VID(pVnode), size); @@ -92,23 +93,19 @@ int vnodeOpenBufPool(SVnode *pVnode) { } int vnodeCloseBufPool(SVnode *pVnode) { - SVBufPool *pPool; - - for (pPool = pVnode->pPool; pPool; pPool = pVnode->pPool) { - pVnode->pPool = pPool->next; - vnodeBufPoolDestroy(pPool); + for (int32_t i = 0; i < VNODE_BUFPOOL_SEGMENTS; i++) { + if (pVnode->aBufPool[i]) { + vnodeBufPoolDestroy(pVnode->aBufPool[i]); + pVnode->aBufPool[i] = NULL; + } } - if (pVnode->inUse) { - vnodeBufPoolDestroy(pVnode->inUse); - pVnode->inUse = NULL; - } vDebug("vgId:%d, vnode buffer pool is closed", TD_VID(pVnode)); - return 0; } void vnodeBufPoolReset(SVBufPool *pPool) { + ASSERT(pPool->nQuery == 0); for (SVBufPoolNode *pNode = pPool->pTail; pNode->prev; pNode = pPool->pTail) { ASSERT(pNode->pnext == &pPool->pTail); pNode->prev->pnext = &pPool->pTail; @@ -123,7 +120,6 @@ void vnodeBufPoolReset(SVBufPool *pPool) { pPool->ptr = pPool->node.data; } - void *vnodeBufPoolMallocAligned(SVBufPool *pPool, int size) { SVBufPoolNode *pNode; void *p = NULL; @@ -216,35 +212,119 @@ void vnodeBufPoolRef(SVBufPool *pPool) { ASSERT(nRef > 0); } -void vnodeBufPoolUnRef(SVBufPool *pPool) { - if (pPool == NULL) { - return; +void vnodeBufPoolAddToFreeList(SVBufPool *pPool) { + SVnode *pVnode = pPool->pVnode; + + int64_t size = pVnode->config.szBuf / VNODE_BUFPOOL_SEGMENTS; + if (pPool->node.size != size) { + SVBufPool *pNewPool = NULL; + if (vnodeBufPoolCreate(pVnode, pPool->id, size, &pNewPool) < 0) { + vWarn("vgId:%d failed to change buffer pool of id %d size from %" PRId64 " to %" PRId64 " since %s", + TD_VID(pVnode), pPool->id, pPool->node.size, size, tstrerror(errno)); + } else { + vInfo("vgId:%d buffer pool of id %d size changed from %" PRId64 " to %" PRId64, TD_VID(pVnode), pPool->id, + pPool->node.size, size); + + vnodeBufPoolDestroy(pPool); + pPool = pNewPool; + pVnode->aBufPool[pPool->id] = pPool; + } } - int32_t nRef = atomic_sub_fetch_32(&pPool->nRef, 1); - if (nRef == 0) { - SVnode *pVnode = pPool->pVnode; - - vnodeBufPoolReset(pPool); - - taosThreadMutexLock(&pVnode->mutex); - - int64_t size = pVnode->config.szBuf / VNODE_BUFPOOL_SEGMENTS; - if (pPool->node.size != size) { - SVBufPool *pPoolT = NULL; - if (vnodeBufPoolCreate(pVnode, size, &pPoolT) < 0) { - vWarn("vgId:%d, try to change buf pools size from %" PRId64 " to %" PRId64 " since %s", TD_VID(pVnode), - pPool->node.size, size, tstrerror(errno)); - } else { - vnodeBufPoolDestroy(pPool); - pPool = pPoolT; - vDebug("vgId:%d, change buf pools size from %" PRId64 " to %" PRId64, TD_VID(pVnode), pPool->node.size, size); - } + + // add to free list + vDebug("vgId:%d buffer pool %p of id %d is added to free list", TD_VID(pVnode), pPool, pPool->id); + vnodeBufPoolReset(pPool); + pPool->freeNext = pVnode->freeList; + pVnode->freeList = pPool; + taosThreadCondSignal(&pVnode->poolNotEmpty); +} + +void vnodeBufPoolUnRef(SVBufPool *pPool, bool proactive) { + if (pPool == NULL) return; + + SVnode *pVnode = pPool->pVnode; + + if (proactive) taosThreadMutexLock(&pVnode->mutex); + + if (atomic_sub_fetch_32(&pPool->nRef, 1) > 0) goto _exit; + + // remove from recycle queue or on-recycle position + if (pVnode->onRecycle == pPool) { + pVnode->onRecycle = NULL; + } else { + if (pPool->recyclePrev) { + pPool->recyclePrev->recycleNext = pPool->recycleNext; + } else { + pVnode->recycleHead = pPool->recycleNext; + } + + if (pPool->recycleNext) { + pPool->recycleNext->recyclePrev = pPool->recyclePrev; + } else { + pVnode->recycleTail = pPool->recyclePrev; } + pPool->recyclePrev = pPool->recycleNext = NULL; + } + + vnodeBufPoolAddToFreeList(pPool); + +_exit: + if (proactive) taosThreadMutexUnlock(&pVnode->mutex); + return; +} + +int32_t vnodeBufPoolRegisterQuery(SVBufPool *pPool, SQueryNode *pQNode) { + int32_t code = 0; + + taosThreadMutexLock(&pPool->mutex); + + pQNode->pNext = pPool->qList.pNext; + pQNode->ppNext = &pPool->qList.pNext; + pPool->qList.pNext->ppNext = &pQNode->pNext; + pPool->qList.pNext = pQNode; + pPool->nQuery++; - pPool->next = pVnode->pPool; - pVnode->pPool = pPool; - taosThreadCondSignal(&pVnode->poolNotEmpty); + taosThreadMutexUnlock(&pPool->mutex); - taosThreadMutexUnlock(&pVnode->mutex); +_exit: + return code; +} + +void vnodeBufPoolDeregisterQuery(SVBufPool *pPool, SQueryNode *pQNode, bool proactive) { + int32_t code = 0; + + if (proactive) taosThreadMutexLock(&pPool->mutex); + + pQNode->pNext->ppNext = pQNode->ppNext; + *pQNode->ppNext = pQNode->pNext; + pPool->nQuery--; + + if (proactive) taosThreadMutexUnlock(&pPool->mutex); +} + +int32_t vnodeBufPoolRecycle(SVBufPool *pPool) { + int32_t code = 0; + + SVnode *pVnode = pPool->pVnode; + + vDebug("vgId:%d recycle buffer pool %p of id %d", TD_VID(pVnode), pPool, pPool->id); + + taosThreadMutexLock(&pPool->mutex); + + SQueryNode *pNode = pPool->qList.pNext; + while (pNode != &pPool->qList) { + SQueryNode *pTNode = pNode->pNext; + + int32_t rc = pNode->reseek(pNode->pQHandle); + if (rc == 0 || rc == TSDB_CODE_VND_QUERY_BUSY) { + pNode = pTNode; + } else { + code = rc; + goto _exit; + } } + +_exit: + taosThreadMutexUnlock(&pPool->mutex); + return code; } diff --git a/source/dnode/vnode/src/vnd/vnodeCfg.c b/source/dnode/vnode/src/vnd/vnodeCfg.c index 69af536a4454909be7816ebda880e99e648656c3..c326c8bfacbd897fc15527edaef5cd8e77c40f5f 100644 --- a/source/dnode/vnode/src/vnd/vnodeCfg.c +++ b/source/dnode/vnode/src/vnd/vnodeCfg.c @@ -262,9 +262,10 @@ int vnodeDecodeConfig(const SJson *pJson, void *pObj) { tjsonGetNumberValue(info, "nodePort", pNode->nodePort, code); if (code < 0) return -1; tjsonGetStringValue(info, "nodeFqdn", pNode->nodeFqdn); - if (code < 0) return -1; tjsonGetNumberValue(info, "nodeId", pNode->nodeId, code); + if (code < 0) return -1; tjsonGetNumberValue(info, "clusterId", pNode->clusterId, code); + if (code < 0) return -1; vDebug("vgId:%d, decode config, replica:%d ep:%s:%u dnode:%d", pCfg->vgId, i, pNode->nodeFqdn, pNode->nodePort, pNode->nodeId); } diff --git a/source/dnode/vnode/src/vnd/vnodeCommit.c b/source/dnode/vnode/src/vnd/vnodeCommit.c index e6a2a32ce0f5c6f97d84b31151d8d0b932b03fc3..3e18da3e74d3b9bfccdc091c6a3ffe56a9d34d83 100644 --- a/source/dnode/vnode/src/vnd/vnodeCommit.c +++ b/source/dnode/vnode/src/vnd/vnodeCommit.c @@ -21,44 +21,149 @@ static int vnodeEncodeInfo(const SVnodeInfo *pInfo, char **ppData); static int vnodeCommitImpl(SCommitInfo *pInfo); -int vnodeBegin(SVnode *pVnode) { - // alloc buffer pool - taosThreadMutexLock(&pVnode->mutex); +#define WAIT_TIME_MILI_SEC 10 // miliseconds + +static int32_t vnodeTryRecycleBufPool(SVnode *pVnode) { + int32_t code = 0; + + if (pVnode->onRecycle == NULL) { + if (pVnode->recycleHead == NULL) { + vDebug("vgId:%d no recyclable buffer pool", TD_VID(pVnode)); + goto _exit; + } else { + vDebug("vgId:%d buffer pool %p of id %d on recycle queue, try to recycle", TD_VID(pVnode), pVnode->recycleHead, + pVnode->recycleHead->id); + + pVnode->onRecycle = pVnode->recycleHead; + if (pVnode->recycleHead == pVnode->recycleTail) { + pVnode->recycleHead = pVnode->recycleTail = NULL; + } else { + pVnode->recycleHead = pVnode->recycleHead->recycleNext; + pVnode->recycleHead->recyclePrev = NULL; + } + pVnode->onRecycle->recycleNext = pVnode->onRecycle->recyclePrev = NULL; + } + } + + code = vnodeBufPoolRecycle(pVnode->onRecycle); + if (code) goto _exit; - while (pVnode->pPool == NULL) { - taosThreadCondWait(&pVnode->poolNotEmpty, &pVnode->mutex); +_exit: + if (code) { + vError("vgId:%d %s failed since %s", TD_VID(pVnode), __func__, tstrerror(code)); } + return code; +} +static int32_t vnodeGetBufPoolToUse(SVnode *pVnode) { + int32_t code = 0; + int32_t lino = 0; - pVnode->inUse = pVnode->pPool; - pVnode->inUse->nRef = 1; - pVnode->pPool = pVnode->inUse->next; - pVnode->inUse->next = NULL; + taosThreadMutexLock(&pVnode->mutex); + int32_t nTry = 0; + for (;;) { + ++nTry; + + if (pVnode->freeList) { + vDebug("vgId:%d allocate free buffer pool on %d try, pPool:%p id:%d", TD_VID(pVnode), nTry, pVnode->freeList, + pVnode->freeList->id); + + pVnode->inUse = pVnode->freeList; + pVnode->inUse->nRef = 1; + pVnode->freeList = pVnode->inUse->freeNext; + pVnode->inUse->freeNext = NULL; + break; + } else { + vDebug("vgId:%d no free buffer pool on %d try, try to recycle...", TD_VID(pVnode), nTry); + + code = vnodeTryRecycleBufPool(pVnode); + TSDB_CHECK_CODE(code, lino, _exit); + + if (pVnode->freeList == NULL) { + vDebug("vgId:%d no free buffer pool on %d try, wait %d ms...", TD_VID(pVnode), nTry, WAIT_TIME_MILI_SEC); + + struct timeval tv; + struct timespec ts; + taosGetTimeOfDay(&tv); + ts.tv_nsec = tv.tv_usec * 1000 + WAIT_TIME_MILI_SEC * 1000000; + if (ts.tv_nsec > 999999999l) { + ts.tv_sec = tv.tv_sec + 1; + ts.tv_nsec -= 1000000000l; + } else { + ts.tv_sec = tv.tv_sec; + } + + int32_t rc = taosThreadCondTimedWait(&pVnode->poolNotEmpty, &pVnode->mutex, &ts); + if (rc && rc != ETIMEDOUT) { + code = TAOS_SYSTEM_ERROR(rc); + TSDB_CHECK_CODE(code, lino, _exit); + } + } + } + } + +_exit: taosThreadMutexUnlock(&pVnode->mutex); + if (code) { + vError("vgId:%d %s failed at line %d since %s", TD_VID(pVnode), __func__, lino, tstrerror(code)); + } + return code; +} +int vnodeBegin(SVnode *pVnode) { + int32_t code = 0; + int32_t lino = 0; pVnode->state.commitID++; + + // alloc buffer pool + code = vnodeGetBufPoolToUse(pVnode); + TSDB_CHECK_CODE(code, lino, _exit); + // begin meta if (metaBegin(pVnode->pMeta, META_BEGIN_HEAP_BUFFERPOOL) < 0) { - vError("vgId:%d, failed to begin meta since %s", TD_VID(pVnode), tstrerror(terrno)); - return -1; + code = terrno; + TSDB_CHECK_CODE(code, lino, _exit); } // begin tsdb if (tsdbBegin(pVnode->pTsdb) < 0) { - vError("vgId:%d, failed to begin tsdb since %s", TD_VID(pVnode), tstrerror(terrno)); - return -1; + code = terrno; + TSDB_CHECK_CODE(code, lino, _exit); } // begin sma if (VND_IS_RSMA(pVnode) && smaBegin(pVnode->pSma) < 0) { - vError("vgId:%d, failed to begin sma since %s", TD_VID(pVnode), tstrerror(terrno)); - return -1; + code = terrno; + TSDB_CHECK_CODE(code, lino, _exit); } - return 0; +_exit: + if (code) { + terrno = code; + vError("vgId:%d %s failed at line %d since %s", TD_VID(pVnode), __func__, lino, tstrerror(code)); + } + return code; +} + +void vnodeUpdCommitSched(SVnode *pVnode) { + int64_t randNum = taosRand(); + pVnode->commitSched.commitMs = taosGetMonoTimestampMs(); + pVnode->commitSched.maxWaitMs = tsVndCommitMaxIntervalMs + (randNum % tsVndCommitMaxIntervalMs); } int vnodeShouldCommit(SVnode *pVnode) { + if (!pVnode->inUse || !osDataSpaceAvailable()) { + return false; + } + + SVCommitSched *pSched = &pVnode->commitSched; + int64_t nowMs = taosGetMonoTimestampMs(); + + return (((pVnode->inUse->size > pVnode->inUse->node.size) && (pSched->commitMs + SYNC_VND_COMMIT_MIN_MS < nowMs)) || + (pVnode->inUse->size > 0 && pSched->commitMs + pSched->maxWaitMs < nowMs)); +} + +int vnodeShouldCommitOld(SVnode *pVnode) { if (pVnode->inUse) { return osDataSpaceAvailable() && (pVnode->inUse->size > pVnode->inUse->node.size); } @@ -191,6 +296,12 @@ int32_t vnodePrepareCommit(SVnode *pVnode, SCommitInfo *pInfo) { tsem_wait(&pVnode->canCommit); + taosThreadMutexLock(&pVnode->mutex); + ASSERT(pVnode->onCommit == NULL); + pVnode->onCommit = pVnode->inUse; + pVnode->inUse = NULL; + taosThreadMutexUnlock(&pVnode->mutex); + pVnode->state.commitTerm = pVnode->state.applyTerm; pInfo->info.config = pVnode->config; @@ -220,9 +331,6 @@ int32_t vnodePrepareCommit(SVnode *pVnode, SCommitInfo *pInfo) { code = smaPrepareAsyncCommit(pVnode->pSma); if (code) goto _exit; - vnodeBufPoolUnRef(pVnode->inUse); - pVnode->inUse = NULL; - _exit: if (code) { vError("vgId:%d, %s failed at line %d since %s, commit id:%" PRId64, TD_VID(pVnode), __func__, lino, @@ -233,22 +341,52 @@ _exit: return code; } +static void vnodeReturnBufPool(SVnode *pVnode) { + taosThreadMutexLock(&pVnode->mutex); + SVBufPool *pPool = pVnode->onCommit; + int32_t nRef = atomic_sub_fetch_32(&pPool->nRef, 1); + + pVnode->onCommit = NULL; + if (nRef == 0) { + vnodeBufPoolAddToFreeList(pPool); + } else if (nRef > 0) { + vDebug("vgId:%d buffer pool %p of id %d is added to recycle queue", TD_VID(pVnode), pPool, pPool->id); + + if (pVnode->recycleTail == NULL) { + pPool->recyclePrev = pPool->recycleNext = NULL; + pVnode->recycleHead = pVnode->recycleTail = pPool; + } else { + pPool->recyclePrev = pVnode->recycleTail; + pPool->recycleNext = NULL; + pVnode->recycleTail->recycleNext = pPool; + pVnode->recycleTail = pPool; + } + } else { + ASSERT(0); + } + + taosThreadMutexUnlock(&pVnode->mutex); +} static int32_t vnodeCommitTask(void *arg) { int32_t code = 0; SCommitInfo *pInfo = (SCommitInfo *)arg; + SVnode *pVnode = pInfo->pVnode; // commit code = vnodeCommitImpl(pInfo); if (code) goto _exit; + vnodeReturnBufPool(pVnode); + _exit: // end commit - tsem_post(&pInfo->pVnode->canCommit); + tsem_post(&pVnode->canCommit); taosMemoryFree(pInfo); return code; } + int vnodeAsyncCommit(SVnode *pVnode) { int32_t code = 0; @@ -297,7 +435,9 @@ static int vnodeCommitImpl(SCommitInfo *pInfo) { SVnode *pVnode = pInfo->pVnode; vInfo("vgId:%d, start to commit, commitId:%" PRId64 " version:%" PRId64 " term: %" PRId64, TD_VID(pVnode), - pVnode->state.commitID, pVnode->state.applied, pVnode->state.applyTerm); + pInfo->info.state.commitID, pInfo->info.state.committed, pInfo->info.state.commitTerm); + + vnodeUpdCommitSched(pVnode); // persist wal before starting if (walPersist(pVnode->pWal) < 0) { @@ -311,8 +451,7 @@ static int vnodeCommitImpl(SCommitInfo *pInfo) { snprintf(dir, TSDB_FILENAME_LEN, "%s", pVnode->path); } - // walBeginSnapshot(pVnode->pWal, pVnode->state.applied); - syncBeginSnapshot(pVnode->sync, pVnode->state.applied); + syncBeginSnapshot(pVnode->sync, pInfo->info.state.committed); // commit each sub-system code = tsdbCommit(pVnode->pTsdb, pInfo); @@ -354,7 +493,6 @@ static int vnodeCommitImpl(SCommitInfo *pInfo) { return -1; } - // walEndSnapshot(pVnode->pWal); syncEndSnapshot(pVnode->sync); _exit: diff --git a/source/dnode/vnode/src/vnd/vnodeOpen.c b/source/dnode/vnode/src/vnd/vnodeOpen.c index bec5d2977b8db7b4c11efd7c977264b2b5800372..61cb75b1da592a4b6d06b3fea21a2bb7b69ae22a 100644 --- a/source/dnode/vnode/src/vnd/vnodeOpen.c +++ b/source/dnode/vnode/src/vnd/vnodeOpen.c @@ -134,6 +134,21 @@ SVnode *vnodeOpen(const char *path, STfs *pTfs, SMsgCb msgCb) { return NULL; } + // save vnode info on dnode ep changed + bool updated = false; + SSyncCfg *pCfg = &info.config.syncCfg; + for (int32_t i = 0; i < pCfg->replicaNum; ++i) { + SNodeInfo *pNode = &pCfg->nodeInfo[i]; + if (tmsgUpdateDnodeInfo(&pNode->nodeId, &pNode->clusterId, pNode->nodeFqdn, &pNode->nodePort)) { + updated = true; + } + } + if (updated) { + vInfo("vgId:%d, save vnode info since dnode info changed", info.config.vgId); + (void)vnodeSaveInfo(dir, &info); + (void)vnodeCommitInfo(dir, &info); + } + // create handle pVnode = taosMemoryCalloc(1, sizeof(*pVnode) + strlen(path) + 1); if (pVnode == NULL) { @@ -160,6 +175,8 @@ SVnode *vnodeOpen(const char *path, STfs *pTfs, SMsgCb msgCb) { taosThreadMutexInit(&pVnode->mutex, NULL); taosThreadCondInit(&pVnode->poolNotEmpty, NULL); + vnodeUpdCommitSched(pVnode); + int8_t rollback = vnodeShouldRollback(pVnode); // open buffer pool @@ -238,7 +255,7 @@ _err: if (pVnode->pTsdb) tsdbClose(&pVnode->pTsdb); if (pVnode->pSma) smaClose(pVnode->pSma); if (pVnode->pMeta) metaClose(pVnode->pMeta); - if (pVnode->pPool) vnodeCloseBufPool(pVnode); + if (pVnode->freeList) vnodeCloseBufPool(pVnode); tsem_destroy(&(pVnode->canCommit)); taosMemoryFree(pVnode); @@ -250,9 +267,11 @@ void vnodePreClose(SVnode *pVnode) { vnodeSyncPreClose(pVnode); } +void vnodePostClose(SVnode *pVnode) { vnodeSyncPostClose(pVnode); } + void vnodeClose(SVnode *pVnode) { if (pVnode) { - vnodeSyncCommit(pVnode); + tsem_wait(&pVnode->canCommit); vnodeSyncClose(pVnode); vnodeQueryClose(pVnode); walClose(pVnode->pWal); @@ -261,6 +280,8 @@ void vnodeClose(SVnode *pVnode) { smaClose(pVnode->pSma); metaClose(pVnode->pMeta); vnodeCloseBufPool(pVnode); + tsem_post(&pVnode->canCommit); + // destroy handle tsem_destroy(&(pVnode->canCommit)); tsem_destroy(&pVnode->syncSem); diff --git a/source/dnode/vnode/src/vnd/vnodeSnapshot.c b/source/dnode/vnode/src/vnd/vnodeSnapshot.c index cc22668b299db87d0701c507f75f79258be7a178..43f903dc4867178919e3d3b519b899afcca6d835 100644 --- a/source/dnode/vnode/src/vnd/vnodeSnapshot.c +++ b/source/dnode/vnode/src/vnd/vnodeSnapshot.c @@ -406,8 +406,10 @@ static int32_t vnodeSnapWriteInfo(SVSnapWriter *pWriter, uint8_t *pData, uint32_ snprintf(dir, TSDB_FILENAME_LEN, "%s", pWriter->pVnode->path); } - SVnode *pVnode = pWriter->pVnode; + SVnodeStats vndStats = pWriter->info.config.vndStats; + SVnode *pVnode = pWriter->pVnode; pWriter->info.config = pVnode->config; + pWriter->info.config.vndStats = vndStats; vDebug("vgId:%d, save config while write snapshot", pWriter->pVnode->config.vgId); if (vnodeSaveInfo(dir, &pWriter->info) < 0) { code = terrno; @@ -453,7 +455,7 @@ int32_t vnodeSnapWrite(SVSnapWriter *pWriter, uint8_t *pData, uint32_t nData) { if (code) goto _err; } - code = tsdbSnapWrite(pWriter->pTsdbSnapWriter, pData, nData); + code = tsdbSnapWrite(pWriter->pTsdbSnapWriter, pHdr); if (code) goto _err; } break; case SNAP_DATA_TQ_HANDLE: { diff --git a/source/dnode/vnode/src/vnd/vnodeSvr.c b/source/dnode/vnode/src/vnd/vnodeSvr.c index 2669ad600e239e26574ffc451381cabc4dc2035e..bed61dfd6cf63fde1dfec7b8cfe7378e00897a3c 100644 --- a/source/dnode/vnode/src/vnd/vnodeSvr.c +++ b/source/dnode/vnode/src/vnd/vnodeSvr.c @@ -192,18 +192,17 @@ int32_t vnodeProcessWriteMsg(SVnode *pVnode, SRpcMsg *pMsg, int64_t version, SRp void *pReq; int32_t len; int32_t ret; - + /* if (!pVnode->inUse) { terrno = TSDB_CODE_VND_NO_AVAIL_BUFPOOL; vError("vgId:%d, not ready to write since %s", TD_VID(pVnode), terrstr()); return -1; } - + */ if (version <= pVnode->state.applied) { vError("vgId:%d, duplicate write request. version: %" PRId64 ", applied: %" PRId64 "", TD_VID(pVnode), version, pVnode->state.applied); terrno = TSDB_CODE_VND_DUP_REQUEST; - pRsp->info.handle = NULL; return -1; } @@ -225,6 +224,7 @@ int32_t vnodeProcessWriteMsg(SVnode *pVnode, SRpcMsg *pMsg, int64_t version, SRp // skip header pReq = POINTER_SHIFT(pMsg->pCont, sizeof(SMsgHead)); len = pMsg->contLen - sizeof(SMsgHead); + bool needCommit = false; switch (pMsg->msgType) { /* META */ @@ -321,9 +321,8 @@ int32_t vnodeProcessWriteMsg(SVnode *pVnode, SRpcMsg *pMsg, int64_t version, SRp vnodeProcessAlterConfigReq(pVnode, version, pReq, len, pRsp); break; case TDMT_VND_COMMIT: - vnodeSyncCommit(pVnode); - vnodeBegin(pVnode); - goto _exit; + needCommit = true; + break; case TDMT_VND_COMPACT: vnodeProcessCompactVnodeReq(pVnode, version, pReq, len, pRsp); goto _exit; @@ -343,7 +342,7 @@ int32_t vnodeProcessWriteMsg(SVnode *pVnode, SRpcMsg *pMsg, int64_t version, SRp } // commit if need - if (vnodeShouldCommit(pVnode)) { + if (needCommit) { vInfo("vgId:%d, commit at version %" PRId64, TD_VID(pVnode), version); if (vnodeAsyncCommit(pVnode) < 0) { vError("vgId:%d, failed to vnode async commit since %s.", TD_VID(pVnode), tstrerror(terrno)); @@ -1006,6 +1005,7 @@ static int32_t vnodeProcessSubmitReq(SVnode *pVnode, int64_t version, void *pReq code = terrno; goto _exit; } + pSubmitTbData->uid = pSubmitTbData->pCreateTbReq->uid; // update uid if table exist for using below } } diff --git a/source/dnode/vnode/src/vnd/vnodeSync.c b/source/dnode/vnode/src/vnd/vnodeSync.c index 749c81224c44810a8961ab2e5577e5599ec06854..6f3788616a5f45e440d07f23b89e3e493d07fa4d 100644 --- a/source/dnode/vnode/src/vnd/vnodeSync.c +++ b/source/dnode/vnode/src/vnd/vnodeSync.c @@ -101,6 +101,64 @@ static void vnodeHandleProposeError(SVnode *pVnode, SRpcMsg *pMsg, int32_t code) } } +static int32_t inline vnodeProposeMsg(SVnode *pVnode, SRpcMsg *pMsg, bool isWeak) { + int64_t seq = 0; + + taosThreadMutexLock(&pVnode->lock); + int32_t code = syncPropose(pVnode->sync, pMsg, isWeak, &seq); + bool wait = (code == 0 && vnodeIsMsgBlock(pMsg->msgType)); + if (wait) { + ASSERT(!pVnode->blocked); + pVnode->blocked = true; + pVnode->blockSec = taosGetTimestampSec(); + pVnode->blockSeq = seq; +#if 0 + pVnode->blockInfo = pMsg->info; +#endif + } + taosThreadMutexUnlock(&pVnode->lock); + + if (code > 0) { + vnodeHandleWriteMsg(pVnode, pMsg); + } else if (code < 0) { + if (terrno != 0) code = terrno; + vnodeHandleProposeError(pVnode, pMsg, code); + } + + if (wait) vnodeWaitBlockMsg(pVnode, pMsg); + return code; +} + +void vnodeProposeCommitOnNeed(SVnode *pVnode) { + if (!vnodeShouldCommit(pVnode)) { + return; + } + + int32_t contLen = sizeof(SMsgHead); + SMsgHead *pHead = rpcMallocCont(contLen); + pHead->contLen = contLen; + pHead->vgId = pVnode->config.vgId; + + SRpcMsg rpcMsg = {0}; + rpcMsg.msgType = TDMT_VND_COMMIT; + rpcMsg.contLen = contLen; + rpcMsg.pCont = pHead; + rpcMsg.info.noResp = 1; + + bool isWeak = false; + if (vnodeProposeMsg(pVnode, &rpcMsg, isWeak) < 0) { + vTrace("vgId:%d, failed to propose vnode commit since %s", pVnode->config.vgId, terrstr()); + goto _out; + } + + vInfo("vgId:%d, proposed vnode commit", pVnode->config.vgId); + +_out: + vnodeUpdCommitSched(pVnode); + rpcFreeCont(rpcMsg.pCont); + rpcMsg.pCont = NULL; +} + #if BATCH_ENABLE static void inline vnodeProposeBatchMsg(SVnode *pVnode, SRpcMsg **pMsgArr, bool *pIsWeakArr, int32_t *arrSize) { @@ -178,6 +236,8 @@ void vnodeProposeWriteMsg(SQueueInfo *pInfo, STaosQall *qall, int32_t numOfMsgs) continue; } + vnodeProposeCommitOnNeed(pVnode); + code = vnodePreProcessWriteMsg(pVnode, pMsg); if (code != 0) { vGError("vgId:%d, msg:%p failed to pre-process since %s", vgId, pMsg, terrstr()); @@ -205,34 +265,6 @@ void vnodeProposeWriteMsg(SQueueInfo *pInfo, STaosQall *qall, int32_t numOfMsgs) #else -static int32_t inline vnodeProposeMsg(SVnode *pVnode, SRpcMsg *pMsg, bool isWeak) { - int64_t seq = 0; - - taosThreadMutexLock(&pVnode->lock); - int32_t code = syncPropose(pVnode->sync, pMsg, isWeak, &seq); - bool wait = (code == 0 && vnodeIsMsgBlock(pMsg->msgType)); - if (wait) { - ASSERT(!pVnode->blocked); - pVnode->blocked = true; - pVnode->blockSec = taosGetTimestampSec(); - pVnode->blockSeq = seq; -#if 0 - pVnode->blockInfo = pMsg->info; -#endif - } - taosThreadMutexUnlock(&pVnode->lock); - - if (code > 0) { - vnodeHandleWriteMsg(pVnode, pMsg); - } else if (code < 0) { - if (terrno != 0) code = terrno; - vnodeHandleProposeError(pVnode, pMsg, code); - } - - if (wait) vnodeWaitBlockMsg(pVnode, pMsg); - return code; -} - void vnodeProposeWriteMsg(SQueueInfo *pInfo, STaosQall *qall, int32_t numOfMsgs) { SVnode *pVnode = pInfo->ahandle; int32_t vgId = pVnode->config.vgId; @@ -256,6 +288,8 @@ void vnodeProposeWriteMsg(SQueueInfo *pInfo, STaosQall *qall, int32_t numOfMsgs) continue; } + vnodeProposeCommitOnNeed(pVnode); + code = vnodePreProcessWriteMsg(pVnode, pMsg); if (code != 0) { vGError("vgId:%d, msg:%p failed to pre-process since %s", vgId, pMsg, terrstr()); @@ -614,6 +648,11 @@ void vnodeSyncPreClose(SVnode *pVnode) { taosThreadMutexUnlock(&pVnode->lock); } +void vnodeSyncPostClose(SVnode *pVnode) { + vInfo("vgId:%d, post close sync", pVnode->config.vgId); + syncPostStop(pVnode->sync); +} + void vnodeSyncClose(SVnode *pVnode) { vInfo("vgId:%d, close sync", pVnode->config.vgId); syncStop(pVnode->sync); diff --git a/source/libs/catalog/inc/catalogInt.h b/source/libs/catalog/inc/catalogInt.h index 836ce87fbb7bb0620567be05030527c4ca7ac101..7ee7a24f97146ce91b1294d21a81e8cf163b2993 100644 --- a/source/libs/catalog/inc/catalogInt.h +++ b/source/libs/catalog/inc/catalogInt.h @@ -805,6 +805,7 @@ int32_t ctgMakeVgArray(SDBVgInfo* dbInfo); int32_t ctgAcquireVgMetaFromCache(SCatalog *pCtg, const char *dbFName, const char *tbName, SCtgDBCache **pDb, SCtgTbCache **pTb); int32_t ctgCopyTbMeta(SCatalog *pCtg, SCtgTbMetaCtx *ctx, SCtgDBCache **pDb, SCtgTbCache **pTb, STableMeta **pTableMeta, char* dbFName); void ctgReleaseVgMetaToCache(SCatalog *pCtg, SCtgDBCache *dbCache, SCtgTbCache *pCache); +void ctgReleaseTbMetaToCache(SCatalog *pCtg, SCtgDBCache *dbCache, SCtgTbCache *pCache); extern SCatalogMgmt gCtgMgmt; extern SCtgDebug gCTGDebug; diff --git a/source/libs/catalog/src/catalog.c b/source/libs/catalog/src/catalog.c index c7af0411bea055b5cf6afb13941735ce3c104eaa..f9a218835ea77d4a20aa1b7ac8086187c995c561 100644 --- a/source/libs/catalog/src/catalog.c +++ b/source/libs/catalog/src/catalog.c @@ -598,10 +598,16 @@ int32_t ctgGetCachedTbVgMeta(SCatalog* pCtg, const SName* pTableName, SVgroupInf CTG_ERR_JRET(ctgGetVgInfoFromHashValue(pCtg, dbCache->vgCache.vgInfo, pTableName, pVgroup)); + ctgRUnlockVgInfo(dbCache); + SCtgTbMetaCtx ctx = {0}; ctx.pName = (SName*)pTableName; ctx.flag = CTG_FLAG_UNKNOWN_STB; - CTG_ERR_JRET(ctgCopyTbMeta(pCtg, &ctx, &dbCache, &tbCache, pTableMeta, db)); + code = ctgCopyTbMeta(pCtg, &ctx, &dbCache, &tbCache, pTableMeta, db); + + ctgReleaseTbMetaToCache(pCtg, dbCache, tbCache); + + CTG_RET(code); _return: diff --git a/source/libs/catalog/src/ctgAsync.c b/source/libs/catalog/src/ctgAsync.c index b8590c9255047b3c53ff90b82ffb3862ac8e2bdc..325d6e0e46c61e398e3766b931fe6141abd710c4 100644 --- a/source/libs/catalog/src/ctgAsync.c +++ b/source/libs/catalog/src/ctgAsync.c @@ -483,7 +483,7 @@ int32_t ctgInitTask(SCtgJob* pJob, CTG_TASK_TYPE type, void* param, int32_t* tas _return: CTG_UNLOCK(CTG_WRITE, &pJob->taskLock); - + return code; } @@ -905,6 +905,14 @@ int32_t ctgCallUserCb(void* param) { return TSDB_CODE_SUCCESS; } +void ctgUpdateJobErrCode(SCtgJob* pJob, int32_t errCode) { + if (!NEED_CLIENT_REFRESH_VG_ERROR(errCode) || errCode == TSDB_CODE_SUCCESS) return; + + atomic_store_32(&pJob->jobResCode, errCode); + qDebug("QID:0x%" PRIx64 " ctg job errCode updated to %s", pJob->queryId, tstrerror(errCode)); + return; +} + int32_t ctgHandleTaskEnd(SCtgTask* pTask, int32_t rspCode) { SCtgJob* pJob = pTask->pJob; int32_t code = 0; @@ -924,6 +932,8 @@ int32_t ctgHandleTaskEnd(SCtgTask* pTask, int32_t rspCode) { if (taskDone < taosArrayGetSize(pJob->pTasks)) { qDebug("QID:0x%" PRIx64 " task done: %d, total: %d", pJob->queryId, taskDone, (int32_t)taosArrayGetSize(pJob->pTasks)); + + ctgUpdateJobErrCode(pJob, rspCode); return TSDB_CODE_SUCCESS; } @@ -931,7 +941,8 @@ int32_t ctgHandleTaskEnd(SCtgTask* pTask, int32_t rspCode) { _return: - pJob->jobResCode = code; + ctgUpdateJobErrCode(pJob, rspCode); + // pJob->jobResCode = code; // taosSsleep(2); // qDebug("QID:0x%" PRIx64 " ctg after sleep", pJob->queryId); @@ -988,6 +999,7 @@ int32_t ctgHandleGetTbMetaRsp(SCtgTaskReq* tReq, int32_t reqType, const SDataBuf CTG_ERR_JRET(ctgGetTbMetaFromVnode(pCtg, pConn, pName, &vgInfo, NULL, tReq)); ctgReleaseVgInfoToCache(pCtg, dbCache); + dbCache = NULL; } else { SBuildUseDBInput input = {0}; @@ -1098,7 +1110,8 @@ _return: } if (code) { - ctgTaskError("Get table %d.%s.%s meta failed with error %s", pName->acctId, pName->dbname, pName->tname, tstrerror(code)); + ctgTaskError("Get table %d.%s.%s meta failed with error %s", pName->acctId, pName->dbname, pName->tname, + tstrerror(code)); } if (pTask->res || code) { ctgHandleTaskEnd(pTask, code); @@ -1156,6 +1169,7 @@ int32_t ctgHandleGetTbMetasRsp(SCtgTaskReq* tReq, int32_t reqType, const SDataBu CTG_ERR_JRET(ctgGetTbMetaFromVnode(pCtg, pConn, pName, &vgInfo, NULL, tReq)); ctgReleaseVgInfoToCache(pCtg, dbCache); + dbCache = NULL; } else { SBuildUseDBInput input = {0}; @@ -1286,7 +1300,8 @@ _return: TSWAP(pTask->res, ctx->pResList); taskDone = true; } - ctgTaskError("Get table %d.%s.%s meta failed with error %s", pName->acctId, pName->dbname, pName->tname, tstrerror(code)); + ctgTaskError("Get table %d.%s.%s meta failed with error %s", pName->acctId, pName->dbname, pName->tname, + tstrerror(code)); } if (pTask->res && taskDone) { diff --git a/source/libs/catalog/src/ctgCache.c b/source/libs/catalog/src/ctgCache.c index c266cc1df9fc0ab5bd61574f519920df327630c9..6e4077eae05aa8cead28111f13f9600bb4e3244d 100644 --- a/source/libs/catalog/src/ctgCache.c +++ b/source/libs/catalog/src/ctgCache.c @@ -2118,7 +2118,7 @@ int32_t ctgOpUpdateEpset(SCtgCacheOperation *operation) { _return: - if (dbCache) { + if (code == TSDB_CODE_SUCCESS && dbCache) { ctgWUnlockVgInfo(dbCache); } diff --git a/source/libs/command/inc/commandInt.h b/source/libs/command/inc/commandInt.h index 6acf19218d893a9962853e1fe7b0c73c8dddf7bc..764553f1e9dc53495c1fbfdf432c9d6fc89a3a8a 100644 --- a/source/libs/command/inc/commandInt.h +++ b/source/libs/command/inc/commandInt.h @@ -64,6 +64,9 @@ extern "C" { #define EXPLAIN_IGNORE_GROUPID_FORMAT "Ignore Group Id: %s" #define EXPLAIN_PARTITION_KETS_FORMAT "Partition Key: " #define EXPLAIN_INTERP_FORMAT "Interp" +#define EXPLAIN_EVENT_FORMAT "Event" +#define EXPLAIN_EVENT_START_FORMAT "Start Cond: " +#define EXPLAIN_EVENT_END_FORMAT "End Cond: " #define EXPLAIN_PLANNING_TIME_FORMAT "Planning Time: %.3f ms" #define EXPLAIN_EXEC_TIME_FORMAT "Execution Time: %.3f ms" diff --git a/source/libs/command/src/command.c b/source/libs/command/src/command.c index c87f6953eb5fbb2515b2db54ab1e85854111eeb2..6eef1ded695accbcf204e004c4d00af3987d8d91 100644 --- a/source/libs/command/src/command.c +++ b/source/libs/command/src/command.c @@ -281,10 +281,10 @@ static void setCreateDBResultIntoDataBlock(SSDataBlock* pBlock, char* dbFName, S len += sprintf( buf2 + VARSTR_HEADER_SIZE, "CREATE DATABASE `%s` BUFFER %d CACHESIZE %d CACHEMODEL '%s' COMP %d DURATION %dm " - "WAL_FSYNC_PERIOD %d MAXROWS %d MINROWS %d KEEP %dm,%dm,%dm PAGES %d PAGESIZE %d PRECISION '%s' REPLICA %d " + "WAL_FSYNC_PERIOD %d MAXROWS %d MINROWS %d STT_TRIGGER %d KEEP %dm,%dm,%dm PAGES %d PAGESIZE %d PRECISION '%s' REPLICA %d " "WAL_LEVEL %d VGROUPS %d SINGLE_STABLE %d", dbFName, pCfg->buffer, pCfg->cacheSize, cacheModelStr(pCfg->cacheLast), pCfg->compression, pCfg->daysPerFile, - pCfg->walFsyncPeriod, pCfg->maxRows, pCfg->minRows, pCfg->daysToKeep0, pCfg->daysToKeep1, pCfg->daysToKeep2, + pCfg->walFsyncPeriod, pCfg->maxRows, pCfg->minRows, pCfg->sstTrigger, pCfg->daysToKeep0, pCfg->daysToKeep1, pCfg->daysToKeep2, pCfg->pages, pCfg->pageSize, prec, pCfg->replications, pCfg->walLevel, pCfg->numOfVgroups, 1 == pCfg->numOfStables); diff --git a/source/libs/command/src/explain.c b/source/libs/command/src/explain.c index 38222112a19a447a4a7294a6082bf8bd9edd18ce..fdbbcad9685eaa73b31a86a3401a36f3494a3789 100644 --- a/source/libs/command/src/explain.c +++ b/source/libs/command/src/explain.c @@ -114,129 +114,7 @@ _return: int32_t qExplainGenerateResChildren(SPhysiNode *pNode, SExplainGroup *group, SNodeList **pChildren) { int32_t tlen = 0; - SNodeList *pPhysiChildren = NULL; - - switch (pNode->type) { - case QUERY_NODE_PHYSICAL_PLAN_TAG_SCAN: { - STagScanPhysiNode *pTagScanNode = (STagScanPhysiNode *)pNode; - pPhysiChildren = pTagScanNode->node.pChildren; - break; - } - case QUERY_NODE_PHYSICAL_PLAN_TABLE_SEQ_SCAN: - case QUERY_NODE_PHYSICAL_PLAN_TABLE_SCAN: { - STableScanPhysiNode *pTblScanNode = (STableScanPhysiNode *)pNode; - pPhysiChildren = pTblScanNode->scan.node.pChildren; - break; - } - case QUERY_NODE_PHYSICAL_PLAN_SYSTABLE_SCAN: { - SSystemTableScanPhysiNode *pSTblScanNode = (SSystemTableScanPhysiNode *)pNode; - pPhysiChildren = pSTblScanNode->scan.node.pChildren; - break; - } - case QUERY_NODE_PHYSICAL_PLAN_PROJECT: { - SProjectPhysiNode *pPrjNode = (SProjectPhysiNode *)pNode; - pPhysiChildren = pPrjNode->node.pChildren; - break; - } - case QUERY_NODE_PHYSICAL_PLAN_MERGE_JOIN: { - SSortMergeJoinPhysiNode *pJoinNode = (SSortMergeJoinPhysiNode *)pNode; - pPhysiChildren = pJoinNode->node.pChildren; - break; - } - case QUERY_NODE_PHYSICAL_PLAN_HASH_AGG: { - SAggPhysiNode *pAggNode = (SAggPhysiNode *)pNode; - pPhysiChildren = pAggNode->node.pChildren; - break; - } - case QUERY_NODE_PHYSICAL_PLAN_EXCHANGE: { - SExchangePhysiNode *pExchNode = (SExchangePhysiNode *)pNode; - pPhysiChildren = pExchNode->node.pChildren; - break; - } - case QUERY_NODE_PHYSICAL_PLAN_SORT: { - SSortPhysiNode *pSortNode = (SSortPhysiNode *)pNode; - pPhysiChildren = pSortNode->node.pChildren; - break; - } - case QUERY_NODE_PHYSICAL_PLAN_HASH_INTERVAL: { - SIntervalPhysiNode *pIntNode = (SIntervalPhysiNode *)pNode; - pPhysiChildren = pIntNode->window.node.pChildren; - break; - } - case QUERY_NODE_PHYSICAL_PLAN_MERGE_SESSION: { - SSessionWinodwPhysiNode *pSessNode = (SSessionWinodwPhysiNode *)pNode; - pPhysiChildren = pSessNode->window.node.pChildren; - break; - } - case QUERY_NODE_PHYSICAL_PLAN_MERGE_STATE: { - SStateWinodwPhysiNode *pStateNode = (SStateWinodwPhysiNode *)pNode; - pPhysiChildren = pStateNode->window.node.pChildren; - break; - } - case QUERY_NODE_PHYSICAL_PLAN_PARTITION: { - SPartitionPhysiNode *partitionPhysiNode = (SPartitionPhysiNode *)pNode; - pPhysiChildren = partitionPhysiNode->node.pChildren; - break; - } - case QUERY_NODE_PHYSICAL_PLAN_MERGE: { - SMergePhysiNode *mergePhysiNode = (SMergePhysiNode *)pNode; - pPhysiChildren = mergePhysiNode->node.pChildren; - break; - } - case QUERY_NODE_PHYSICAL_PLAN_INDEF_ROWS_FUNC: { - SIndefRowsFuncPhysiNode *indefPhysiNode = (SIndefRowsFuncPhysiNode *)pNode; - pPhysiChildren = indefPhysiNode->node.pChildren; - break; - } - case QUERY_NODE_PHYSICAL_PLAN_MERGE_ALIGNED_INTERVAL: { - SMergeAlignedIntervalPhysiNode *intPhysiNode = (SMergeAlignedIntervalPhysiNode *)pNode; - pPhysiChildren = intPhysiNode->window.node.pChildren; - break; - } - case QUERY_NODE_PHYSICAL_PLAN_FILL: { - SFillPhysiNode *fillPhysiNode = (SFillPhysiNode *)pNode; - pPhysiChildren = fillPhysiNode->node.pChildren; - break; - } - case QUERY_NODE_PHYSICAL_PLAN_TABLE_MERGE_SCAN: { - STableMergeScanPhysiNode *mergePhysiNode = (STableMergeScanPhysiNode *)pNode; - pPhysiChildren = mergePhysiNode->scan.node.pChildren; - break; - } - case QUERY_NODE_PHYSICAL_PLAN_BLOCK_DIST_SCAN: { - SBlockDistScanPhysiNode *distPhysiNode = (SBlockDistScanPhysiNode *)pNode; - pPhysiChildren = distPhysiNode->node.pChildren; - break; - } - case QUERY_NODE_PHYSICAL_PLAN_LAST_ROW_SCAN: { - SLastRowScanPhysiNode *lastRowPhysiNode = (SLastRowScanPhysiNode *)pNode; - pPhysiChildren = lastRowPhysiNode->scan.node.pChildren; - break; - } - case QUERY_NODE_PHYSICAL_PLAN_TABLE_COUNT_SCAN: { - STableCountScanPhysiNode *tableCountPhysiNode = (STableCountScanPhysiNode *)pNode; - pPhysiChildren = tableCountPhysiNode->scan.node.pChildren; - break; - } - case QUERY_NODE_PHYSICAL_PLAN_GROUP_SORT: { - SGroupSortPhysiNode *groupSortPhysiNode = (SGroupSortPhysiNode *)pNode; - pPhysiChildren = groupSortPhysiNode->node.pChildren; - break; - } - case QUERY_NODE_PHYSICAL_PLAN_MERGE_INTERVAL: { - SMergeIntervalPhysiNode *mergeIntPhysiNode = (SMergeIntervalPhysiNode *)pNode; - pPhysiChildren = mergeIntPhysiNode->window.node.pChildren; - break; - } - case QUERY_NODE_PHYSICAL_PLAN_INTERP_FUNC: { - SInterpFuncPhysiNode *interpPhysiNode = (SInterpFuncPhysiNode *)pNode; - pPhysiChildren = interpPhysiNode->node.pChildren; - break; - } - default: - qError("not supported physical node type %d", pNode->type); - QRY_ERR_RET(TSDB_CODE_APP_ERROR); - } + SNodeList *pPhysiChildren = pNode->pChildren; if (pPhysiChildren) { *pChildren = nodesMakeList(); @@ -1583,6 +1461,36 @@ int32_t qExplainResNodeToRowsImpl(SExplainResNode *pResNode, SExplainCtx *ctx, i } break; } + case QUERY_NODE_PHYSICAL_PLAN_MERGE_EVENT: { + SEventWinodwPhysiNode *pEventNode = (SEventWinodwPhysiNode *)pNode; + EXPLAIN_ROW_NEW(level, EXPLAIN_EVENT_FORMAT); + EXPLAIN_ROW_APPEND(EXPLAIN_LEFT_PARENTHESIS_FORMAT); + if (pResNode->pExecInfo) { + QRY_ERR_RET(qExplainBufAppendExecInfo(pResNode->pExecInfo, tbuf, &tlen)); + EXPLAIN_ROW_APPEND(EXPLAIN_BLANK_FORMAT); + } + EXPLAIN_ROW_APPEND(EXPLAIN_FUNCTIONS_FORMAT, pEventNode->window.pFuncs->length); + EXPLAIN_ROW_APPEND(EXPLAIN_BLANK_FORMAT); + EXPLAIN_ROW_APPEND(EXPLAIN_WIDTH_FORMAT, pEventNode->window.node.pOutputDataBlockDesc->totalRowSize); + EXPLAIN_ROW_APPEND(EXPLAIN_RIGHT_PARENTHESIS_FORMAT); + EXPLAIN_ROW_END(); + QRY_ERR_RET(qExplainResAppendRow(ctx, tbuf, tlen, level)); + + if (verbose) { + EXPLAIN_ROW_NEW(level + 1, EXPLAIN_EVENT_START_FORMAT); + QRY_ERR_RET(nodesNodeToSQL(pEventNode->pStartCond, tbuf + VARSTR_HEADER_SIZE, + TSDB_EXPLAIN_RESULT_ROW_SIZE, &tlen)); + EXPLAIN_ROW_END(); + QRY_ERR_RET(qExplainResAppendRow(ctx, tbuf, tlen, level + 1)); + + EXPLAIN_ROW_NEW(level + 1, EXPLAIN_EVENT_END_FORMAT); + QRY_ERR_RET(nodesNodeToSQL(pEventNode->pEndCond, tbuf + VARSTR_HEADER_SIZE, + TSDB_EXPLAIN_RESULT_ROW_SIZE, &tlen)); + EXPLAIN_ROW_END(); + QRY_ERR_RET(qExplainResAppendRow(ctx, tbuf, tlen, level + 1)); + } + break; + } default: qError("not supported physical node type %d", pNode->type); return TSDB_CODE_APP_ERROR; diff --git a/source/libs/executor/inc/executorimpl.h b/source/libs/executor/inc/executorimpl.h index 45cfcba8b5b4c3d52d168ce03434dcf5b0d98b7b..7d33f150ffc3b38faf93800fe18651c1afaecef8 100644 --- a/source/libs/executor/inc/executorimpl.h +++ b/source/libs/executor/inc/executorimpl.h @@ -474,9 +474,11 @@ typedef struct SStreamScanInfo { SNode* pTagIndexCond; // recover - int32_t blockRecoverContiCnt; - int32_t blockRecoverTotCnt; + int32_t blockRecoverContiCnt; + int32_t blockRecoverTotCnt; + SSDataBlock* pRecoverRes; + SSDataBlock* pCreateTbRes; } SStreamScanInfo; typedef struct { @@ -567,6 +569,8 @@ typedef struct SStreamIntervalOperatorInfo { SStreamState* pState; SWinKey delKey; uint64_t numOfDatapack; + SArray* pUpdated; + SHashObj* pUpdatedMap; } SStreamIntervalOperatorInfo; typedef struct SDataGroupInfo { @@ -613,6 +617,8 @@ typedef struct SStreamSessionAggOperatorInfo { SPhysiNode* pPhyNode; // create new child bool isFinal; bool ignoreExpiredData; + SArray* pUpdated; + SSHashObj* pStUpdated; } SStreamSessionAggOperatorInfo; typedef struct SStreamStateAggOperatorInfo { @@ -628,6 +634,8 @@ typedef struct SStreamStateAggOperatorInfo { void* pDelIterator; SArray* pChildren; // cache for children's result; bool ignoreExpiredData; + SArray* pUpdated; + SSHashObj* pSeUpdated; } SStreamStateAggOperatorInfo; typedef struct SStreamPartitionOperatorInfo { @@ -638,9 +646,11 @@ typedef struct SStreamPartitionOperatorInfo { SExprSupp tagCalSup; SHashObj* pPartitions; void* parIte; + void* pTbNameIte; SSDataBlock* pInputDataBlock; int32_t tsColIndex; SSDataBlock* pDelRes; + SSDataBlock* pCreateTbRes; } SStreamPartitionOperatorInfo; typedef struct SStreamFillSupporter { @@ -708,7 +718,8 @@ void doBuildResultDatablock(SOperatorInfo* pOperator, SOptrBasicInfo* pbInfo, SG bool hasLimitOffsetInfo(SLimitInfo* pLimitInfo); void initLimitInfo(const SNode* pLimit, const SNode* pSLimit, SLimitInfo* pLimitInfo); -void applyLimitOffset(SLimitInfo* pLimitInfo, SSDataBlock* pBlock, SExecTaskInfo* pTaskInfo, SOperatorInfo* pOperator); +void resetLimitInfoForNextGroup(SLimitInfo* pLimitInfo); +bool applyLimitOffset(SLimitInfo* pLimitInfo, SSDataBlock* pBlock, SExecTaskInfo* pTaskInfo, SOperatorInfo* pOperator); void applyAggFunctionOnPartialTuples(SExecTaskInfo* taskInfo, SqlFunctionCtx* pCtx, SColumnInfoData* pTimeWindowData, int32_t offset, int32_t forwardStep, int32_t numOfTotal, int32_t numOfOutput); @@ -732,6 +743,7 @@ void appendOneRowToDataBlock(SSDataBlock* pBlock, STupleHandle* pTupleHandle); void setTbNameColData(const SSDataBlock* pBlock, SColumnInfoData* pColInfoData, int32_t functionId, const char* name); void setResultRowInitCtx(SResultRow* pResult, SqlFunctionCtx* pCtx, int32_t numOfOutput, int32_t* rowEntryInfoOffset); +void clearResultRowInitFlag(SqlFunctionCtx* pCtx, int32_t numOfOutput); SResultRow* doSetResultOutBufByKey(SDiskbasedBuf* pResultBuf, SResultRowInfo* pResultRowInfo, char* pData, int16_t bytes, bool masterscan, uint64_t groupId, SExecTaskInfo* pTaskInfo, @@ -762,7 +774,7 @@ SOperatorInfo* createMultiwayMergeOperatorInfo(SOperatorInfo** dowStreams, size_ SOperatorInfo* createCacherowsScanOperator(SLastRowScanPhysiNode* pTableScanNode, SReadHandle* readHandle, SExecTaskInfo* pTaskInfo); -SOperatorInfo* createIntervalOperatorInfo(SOperatorInfo* downstream, SIntervalPhysiNode* pPhyNode, SExecTaskInfo* pTaskInfo, bool isStream); +SOperatorInfo* createIntervalOperatorInfo(SOperatorInfo* downstream, SIntervalPhysiNode* pPhyNode, SExecTaskInfo* pTaskInfo); SOperatorInfo* createMergeIntervalOperatorInfo(SOperatorInfo* downstream, SMergeIntervalPhysiNode* pIntervalPhyNode, SExecTaskInfo* pTaskInfo); @@ -803,6 +815,8 @@ SOperatorInfo* createStreamStateAggOperatorInfo(SOperatorInfo* downstream, SPhys SOperatorInfo* createStreamFillOperatorInfo(SOperatorInfo* downstream, SStreamFillPhysiNode* pPhyFillNode, SExecTaskInfo* pTaskInfo); SOperatorInfo* createGroupSortOperatorInfo(SOperatorInfo* downstream, SGroupSortPhysiNode* pSortPhyNode, SExecTaskInfo* pTaskInfo); + +SOperatorInfo* createEventwindowOperatorInfo(SOperatorInfo* downstream, SPhysiNode* physiNode, SExecTaskInfo* pTaskInfo); // clang-format on int32_t projectApplyFunctions(SExprInfo* pExpr, SSDataBlock* pResult, SSDataBlock* pSrcBlock, SqlFunctionCtx* pCtx, @@ -838,7 +852,6 @@ bool isDeletedStreamWindow(STimeWindow* pWin, uint64_t groupId, SStreamState* pS void appendOneRowToStreamSpecialBlock(SSDataBlock* pBlock, TSKEY* pStartTs, TSKEY* pEndTs, uint64_t* pUid, uint64_t* pGp, void* pTbName); uint64_t calGroupIdByData(SPartitionBySupporter* pParSup, SExprSupp* pExprSup, SSDataBlock* pBlock, int32_t rowId); -void calBlockTbName(SStreamScanInfo* pInfo, SSDataBlock* pBlock); int32_t finalizeResultRows(SDiskbasedBuf* pBuf, SResultRowPosition* resultRowPosition, SExprSupp* pSup, SSDataBlock* pBlock, SExecTaskInfo* pTaskInfo); @@ -857,6 +870,15 @@ void getNextIntervalWindow(SInterval* pInterval, STimeWindow* tw, int32_t ord int32_t qAppendTaskStopInfo(SExecTaskInfo* pTaskInfo, SExchangeOpStopInfo* pInfo); int32_t getForwardStepsInBlock(int32_t numOfRows, __block_search_fn_t searchFn, TSKEY ekey, int32_t pos, int32_t order, int64_t* pData); +void appendCreateTableRow(SStreamState* pState, SExprSupp* pTableSup, SExprSupp* pTagSup, int64_t groupId, + SSDataBlock* pSrcBlock, int32_t rowId, SSDataBlock* pDestBlock); + +SSDataBlock* buildCreateTableBlock(SExprSupp* tbName, SExprSupp* tag); +SExprInfo* createExpr(SNodeList* pNodeList, int32_t* numOfExprs); + +void copyResultrowToDataBlock(SExprInfo* pExprInfo, int32_t numOfExprs, SResultRow* pRow, SqlFunctionCtx* pCtx, + SSDataBlock* pBlock, const int32_t* rowEntryOffset, SExecTaskInfo* pTaskInfo); +void doUpdateNumOfRows(SqlFunctionCtx* pCtx, SResultRow* pRow, int32_t numOfExprs, const int32_t* rowEntryOffset) ; #ifdef __cplusplus } diff --git a/source/libs/executor/src/eventwindowoperator.c b/source/libs/executor/src/eventwindowoperator.c new file mode 100644 index 0000000000000000000000000000000000000000..0e3c67c5d7c5ce5d998a252a2141de206a9dc4af --- /dev/null +++ b/source/libs/executor/src/eventwindowoperator.c @@ -0,0 +1,338 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +#include "executorimpl.h" +#include "filter.h" +#include "function.h" +#include "functionMgt.h" +#include "tcommon.h" +#include "tcompare.h" +#include "tdatablock.h" +#include "ttime.h" + +typedef struct SEventWindowOperatorInfo { + SOptrBasicInfo binfo; + SAggSupporter aggSup; + SExprSupp scalarSup; + SGroupResInfo groupResInfo; + SWindowRowsSup winSup; + bool hasKey; + SStateKeys stateKey; + int32_t tsSlotId; // primary timestamp column slot id + STimeWindowAggSupp twAggSup; + + SFilterInfo* pStartCondInfo; + SFilterInfo* pEndCondInfo; + bool inWindow; + SResultRow* pRow; +} SEventWindowOperatorInfo; + +static SSDataBlock* eventWindowAggregate(SOperatorInfo* pOperator); +static void destroyEWindowOperatorInfo(void* param); +static void eventWindowAggImpl(SOperatorInfo* pOperator, SEventWindowOperatorInfo* pInfo, SSDataBlock* pBlock); +static SSDataBlock* doEventWindowAgg(SOperatorInfo* pOperator); + +// todo : move to util +static void doKeepNewWindowStartInfo(SWindowRowsSup* pRowSup, const int64_t* tsList, int32_t rowIndex, + uint64_t groupId) { + pRowSup->startRowIndex = rowIndex; + pRowSup->numOfRows = 0; + pRowSup->win.skey = tsList[rowIndex]; + pRowSup->groupId = groupId; +} + +static void doKeepTuple(SWindowRowsSup* pRowSup, int64_t ts, uint64_t groupId) { + pRowSup->win.ekey = ts; + pRowSup->prevTs = ts; + pRowSup->numOfRows += 1; + pRowSup->groupId = groupId; +} + +static void updateTimeWindowInfo(SColumnInfoData* pColData, STimeWindow* pWin, bool includeEndpoint) { + int64_t* ts = (int64_t*)pColData->pData; + int32_t delta = includeEndpoint ? 1 : 0; + + int64_t duration = pWin->ekey - pWin->skey + delta; + ts[2] = duration; // set the duration + ts[3] = pWin->skey; // window start key + ts[4] = pWin->ekey + delta; // window end key +} + +SOperatorInfo* createEventwindowOperatorInfo(SOperatorInfo* downstream, SPhysiNode* physiNode, + SExecTaskInfo* pTaskInfo) { + SEventWindowOperatorInfo* pInfo = taosMemoryCalloc(1, sizeof(SEventWindowOperatorInfo)); + SOperatorInfo* pOperator = taosMemoryCalloc(1, sizeof(SOperatorInfo)); + if (pInfo == NULL || pOperator == NULL) { + goto _error; + } + + SEventWinodwPhysiNode* pEventWindowNode = (SEventWinodwPhysiNode*)physiNode; + + int32_t tsSlotId = ((SColumnNode*)pEventWindowNode->window.pTspk)->slotId; + int32_t code = filterInitFromNode((SNode*)pEventWindowNode->pStartCond, &pInfo->pStartCondInfo, 0); + if (code != TSDB_CODE_SUCCESS) { + goto _error; + } + + code = filterInitFromNode((SNode*)pEventWindowNode->pEndCond, &pInfo->pEndCondInfo, 0); + if (code != TSDB_CODE_SUCCESS) { + goto _error; + } + + if (pEventWindowNode->window.pExprs != NULL) { + int32_t numOfScalarExpr = 0; + SExprInfo* pScalarExprInfo = createExprInfo(pEventWindowNode->window.pExprs, NULL, &numOfScalarExpr); + code = initExprSupp(&pInfo->scalarSup, pScalarExprInfo, numOfScalarExpr); + if (code != TSDB_CODE_SUCCESS) { + goto _error; + } + } + + code = filterInitFromNode((SNode*)pEventWindowNode->window.node.pConditions, &pOperator->exprSupp.pFilterInfo, 0); + if (code != TSDB_CODE_SUCCESS) { + goto _error; + } + + size_t keyBufSize = sizeof(int64_t) + sizeof(int64_t) + POINTER_BYTES; + + int32_t num = 0; + SExprInfo* pExprInfo = createExprInfo(pEventWindowNode->window.pFuncs, NULL, &num); + initResultSizeInfo(&pOperator->resultInfo, 4096); + + code = initAggSup(&pOperator->exprSupp, &pInfo->aggSup, pExprInfo, num, keyBufSize, pTaskInfo->id.str, + pTaskInfo->streamInfo.pState); + if (code != TSDB_CODE_SUCCESS) { + goto _error; + } + + SSDataBlock* pResBlock = createDataBlockFromDescNode(pEventWindowNode->window.node.pOutputDataBlockDesc); + blockDataEnsureCapacity(pResBlock, pOperator->resultInfo.capacity); + + initBasicInfo(&pInfo->binfo, pResBlock); + initResultRowInfo(&pInfo->binfo.resultRowInfo); + + pInfo->twAggSup = (STimeWindowAggSupp){.waterMark = pEventWindowNode->window.watermark, + .calTrigger = pEventWindowNode->window.triggerType}; + + initExecTimeWindowInfo(&pInfo->twAggSup.timeWindowData, &pTaskInfo->window); + + pInfo->tsSlotId = tsSlotId; + + setOperatorInfo(pOperator, "EventWindowOperator", QUERY_NODE_PHYSICAL_PLAN_MERGE_STATE, true, OP_NOT_OPENED, pInfo, + pTaskInfo); + pOperator->fpSet = createOperatorFpSet(optrDummyOpenFn, eventWindowAggregate, NULL, destroyEWindowOperatorInfo, + optrDefaultBufFn, NULL); + + code = appendDownstream(pOperator, &downstream, 1); + if (code != TSDB_CODE_SUCCESS) { + goto _error; + } + + return pOperator; + +_error: + if (pInfo != NULL) { + destroyEWindowOperatorInfo(pInfo); + } + + taosMemoryFreeClear(pOperator); + pTaskInfo->code = code; + return NULL; +} + +void destroyEWindowOperatorInfo(void* param) { + SEventWindowOperatorInfo* pInfo = (SEventWindowOperatorInfo*)param; + if (pInfo == NULL) { + return; + } + + if (pInfo->pRow != NULL) { + taosMemoryFree(pInfo->pRow); + } + + if (pInfo->pStartCondInfo != NULL) { + filterFreeInfo(pInfo->pStartCondInfo); + pInfo->pStartCondInfo = NULL; + } + + if (pInfo->pEndCondInfo != NULL) { + filterFreeInfo(pInfo->pEndCondInfo); + pInfo->pEndCondInfo = NULL; + } + + cleanupBasicInfo(&pInfo->binfo); + colDataDestroy(&pInfo->twAggSup.timeWindowData); + + cleanupAggSup(&pInfo->aggSup); + cleanupGroupResInfo(&pInfo->groupResInfo); + taosMemoryFreeClear(param); +} + +static SSDataBlock* eventWindowAggregate(SOperatorInfo* pOperator) { + SEventWindowOperatorInfo* pInfo = pOperator->info; + SExecTaskInfo* pTaskInfo = pOperator->pTaskInfo; + + SExprSupp* pSup = &pOperator->exprSupp; + int32_t order = TSDB_ORDER_ASC; + + SSDataBlock* pRes = pInfo->binfo.pRes; + + blockDataCleanup(pRes); + + SOperatorInfo* downstream = pOperator->pDownstream[0]; + while (1) { + SSDataBlock* pBlock = downstream->fpSet.getNextFn(downstream); + if (pBlock == NULL) { + break; + } + + setInputDataBlock(pSup, pBlock, order, MAIN_SCAN, true); + blockDataUpdateTsWindow(pBlock, pInfo->tsSlotId); + + // there is an scalar expression that needs to be calculated right before apply the group aggregation. + if (pInfo->scalarSup.pExprInfo != NULL) { + pTaskInfo->code = projectApplyFunctions(pInfo->scalarSup.pExprInfo, pBlock, pBlock, pInfo->scalarSup.pCtx, + pInfo->scalarSup.numOfExprs, NULL); + if (pTaskInfo->code != TSDB_CODE_SUCCESS) { + T_LONG_JMP(pTaskInfo->env, pTaskInfo->code); + } + } + + eventWindowAggImpl(pOperator, pInfo, pBlock); + if (pRes->info.rows >= pOperator->resultInfo.threshold) { + return pRes; + } + } + + return pRes->info.rows == 0 ? NULL : pRes; +} + +static int32_t setSingleOutputTupleBufv1(SResultRowInfo* pResultRowInfo, STimeWindow* win, SResultRow** pResult, + SExprSupp* pExprSup, SAggSupporter* pAggSup) { + if (*pResult == NULL) { + SResultRow* p = taosMemoryCalloc(1, pAggSup->resultRowSize); + pResultRowInfo->cur = (SResultRowPosition){.pageId = p->pageId, .offset = p->offset}; + *pResult = p; + } + + (*pResult)->win = *win; + + clearResultRowInitFlag(pExprSup->pCtx, pExprSup->numOfExprs); + setResultRowInitCtx(*pResult, pExprSup->pCtx, pExprSup->numOfExprs, pExprSup->rowEntryInfoOffset); + return TSDB_CODE_SUCCESS; +} + +static void doEventWindowAggImpl(SEventWindowOperatorInfo* pInfo, SExprSupp* pSup, int32_t startIndex, int32_t endIndex, + const SSDataBlock* pBlock, int64_t* tsList, SExecTaskInfo* pTaskInfo) { + SWindowRowsSup* pRowSup = &pInfo->winSup; + + int32_t numOfOutput = pSup->numOfExprs; + int32_t numOfRows = endIndex - startIndex + 1; + + doKeepTuple(pRowSup, tsList[endIndex], pBlock->info.id.groupId); + + int32_t ret = + setSingleOutputTupleBufv1(&pInfo->binfo.resultRowInfo, &pRowSup->win, &pInfo->pRow, pSup, &pInfo->aggSup); + if (ret != TSDB_CODE_SUCCESS) { // null data, too many state code + T_LONG_JMP(pTaskInfo->env, TSDB_CODE_APP_ERROR); + } + + updateTimeWindowInfo(&pInfo->twAggSup.timeWindowData, &pRowSup->win, false); + applyAggFunctionOnPartialTuples(pTaskInfo, pSup->pCtx, &pInfo->twAggSup.timeWindowData, startIndex, numOfRows, + pBlock->info.rows, numOfOutput); +} + +void eventWindowAggImpl(SOperatorInfo* pOperator, SEventWindowOperatorInfo* pInfo, SSDataBlock* pBlock) { + SExecTaskInfo* pTaskInfo = pOperator->pTaskInfo; + SExprSupp* pSup = &pOperator->exprSupp; + + SSDataBlock* pRes = pInfo->binfo.pRes; + int64_t gid = pBlock->info.id.groupId; + + SColumnInfoData* pColInfoData = taosArrayGet(pBlock->pDataBlock, pInfo->tsSlotId); + TSKEY* tsList = (TSKEY*)pColInfoData->pData; + + SColumnInfoData *ps = NULL, *pe = NULL; + + SWindowRowsSup* pRowSup = &pInfo->winSup; + pRowSup->numOfRows = 0; + + SFilterColumnParam param1 = {.numOfCols = taosArrayGetSize(pBlock->pDataBlock), .pDataBlock = pBlock->pDataBlock}; + int32_t code = filterSetDataFromSlotId(pInfo->pStartCondInfo, ¶m1); + + int32_t status1 = 0; + bool keep1 = filterExecute(pInfo->pStartCondInfo, pBlock, &ps, NULL, param1.numOfCols, &status1); + + SFilterColumnParam param2 = {.numOfCols = taosArrayGetSize(pBlock->pDataBlock), .pDataBlock = pBlock->pDataBlock}; + code = filterSetDataFromSlotId(pInfo->pEndCondInfo, ¶m2); + + int32_t status2 = 0; + bool keep2 = filterExecute(pInfo->pEndCondInfo, pBlock, &pe, NULL, param2.numOfCols, &status2); + + int32_t rowIndex = 0; + int32_t startIndex = pInfo->inWindow ? 0 : -1; + + while (rowIndex < pBlock->info.rows) { + if (pInfo->inWindow) { // let's find the first end value + for (rowIndex = startIndex; rowIndex < pBlock->info.rows; ++rowIndex) { + if (((bool*)pe->pData)[rowIndex]) { + break; + } + } + + if (rowIndex < pBlock->info.rows) { + doEventWindowAggImpl(pInfo, pSup, startIndex, rowIndex, pBlock, tsList, pTaskInfo); + + doUpdateNumOfRows(pSup->pCtx, pInfo->pRow, pSup->numOfExprs, pSup->rowEntryInfoOffset); + + // check buffer size + if (pRes->info.rows + pInfo->pRow->numOfRows >= pRes->info.capacity) { + int32_t newSize = pRes->info.rows + pInfo->pRow->numOfRows; + blockDataEnsureCapacity(pRes, newSize); + } + + copyResultrowToDataBlock(pSup->pExprInfo, pSup->numOfExprs, pInfo->pRow, pSup->pCtx, pRes, + pSup->rowEntryInfoOffset, pTaskInfo); + + pRes->info.rows += pInfo->pRow->numOfRows; + + pInfo->inWindow = false; + rowIndex += 1; + } else { + doEventWindowAggImpl(pInfo, pSup, startIndex, pBlock->info.rows - 1, pBlock, tsList, pTaskInfo); + } + } else { // find the first start value that is fulfill for the start condition + for (; rowIndex < pBlock->info.rows; ++rowIndex) { + if (((bool*)ps->pData)[rowIndex]) { + doKeepNewWindowStartInfo(pRowSup, tsList, rowIndex, gid); + pInfo->inWindow = true; + startIndex = rowIndex; + break; + } + } + + if (pInfo->inWindow) { + continue; + } else { + break; + } + } + } + + colDataDestroy(ps); + taosMemoryFree(ps); + colDataDestroy(pe); + taosMemoryFree(pe); +} diff --git a/source/libs/executor/src/exchangeoperator.c b/source/libs/executor/src/exchangeoperator.c index 9873c520060d1a645255ef05b1542c39143c2623..037b33dc9f35c0b36c5e8fb092bf15102821b089 100644 --- a/source/libs/executor/src/exchangeoperator.c +++ b/source/libs/executor/src/exchangeoperator.c @@ -584,7 +584,13 @@ int32_t doExtractResultBlocks(SExchangeInfo* pExchangeInfo, SSourceDataInfo* pDa int32_t index = 0; int32_t code = 0; while (index++ < pRetrieveRsp->numOfBlocks) { - SSDataBlock* pb = createOneDataBlock(pExchangeInfo->pDummyBlock, false); + SSDataBlock* pb = NULL; + if (taosArrayGetSize(pExchangeInfo->pRecycledBlocks) > 0) { + pb = *(SSDataBlock**)taosArrayPop(pExchangeInfo->pRecycledBlocks); + blockDataCleanup(pb); + } else { + pb = createOneDataBlock(pExchangeInfo->pDummyBlock, false); + } code = extractDataBlockFromFetchRsp(pb, pStart, NULL, &pStart); if (code != 0) { @@ -732,9 +738,7 @@ int32_t handleLimitOffset(SOperatorInfo* pOperator, SLimitInfo* pLimitInfo, SSDa } // reset the value for a new group data - pLimitInfo->numOfOutputRows = 0; - pLimitInfo->remainOffset = pLimitInfo->limit.offset; - + resetLimitInfoForNextGroup(pLimitInfo); // existing rows that belongs to previous group. if (pBlock->info.rows > 0) { return PROJECT_RETRIEVE_DONE; @@ -760,7 +764,12 @@ int32_t handleLimitOffset(SOperatorInfo* pOperator, SLimitInfo* pLimitInfo, SSDa int32_t keepRows = (int32_t)(pLimitInfo->limit.limit - pLimitInfo->numOfOutputRows); blockDataKeepFirstNRows(pBlock, keepRows); if (pLimitInfo->slimit.limit > 0 && pLimitInfo->slimit.limit <= pLimitInfo->numOfOutputGroups) { - pOperator->status = OP_EXEC_DONE; + setOperatorCompleted(pOperator); + } else { + // current group limitation is reached, and future blocks of this group need to be discarded. + if (pBlock->info.rows == 0) { + return PROJECT_RETRIEVE_CONTINUE; + } } return PROJECT_RETRIEVE_DONE; diff --git a/source/libs/executor/src/executil.c b/source/libs/executor/src/executil.c index a5468008aa8b6ce4fd9c52ca4a7f4965c1f0dbc8..1d16a3418d30acfd06a07952723750921337fc1c 100644 --- a/source/libs/executor/src/executil.c +++ b/source/libs/executor/src/executil.c @@ -954,7 +954,7 @@ static int32_t optimizeTbnameInCondImpl(void* metaHandle, int64_t suid, SArray* return -1; } } else { - qWarn("failed to get tableIds from by table name: %s, reason: %s", name, tstrerror(terrno)); +// qWarn("failed to get tableIds from by table name: %s, reason: %s", name, tstrerror(terrno)); terrno = 0; } } @@ -1424,6 +1424,18 @@ void createExprFromTargetNode(SExprInfo* pExp, STargetNode* pTargetNode) { createExprFromOneNode(pExp, pTargetNode->pExpr, pTargetNode->slotId); } +SExprInfo* createExpr(SNodeList* pNodeList, int32_t* numOfExprs) { + *numOfExprs = LIST_LENGTH(pNodeList); + SExprInfo* pExprs = taosMemoryCalloc(*numOfExprs, sizeof(SExprInfo)); + + for (int32_t i = 0; i < (*numOfExprs); ++i) { + SExprInfo* pExp = &pExprs[i]; + createExprFromOneNode(pExp, nodesListGetNode(pNodeList, i), i + UD_TAG_COLUMN_INDEX); + } + + return pExprs; +} + SExprInfo* createExprInfo(SNodeList* pNodeList, SNodeList* pGroupKeys, int32_t* numOfExprs) { int32_t numOfFuncs = LIST_LENGTH(pNodeList); int32_t numOfGroupKeys = 0; @@ -1759,6 +1771,11 @@ void initLimitInfo(const SNode* pLimit, const SNode* pSLimit, SLimitInfo* pLimit pLimitInfo->remainGroupOffset = slimit.offset; } +void resetLimitInfoForNextGroup(SLimitInfo* pLimitInfo) { + pLimitInfo->numOfOutputRows = 0; + pLimitInfo->remainOffset = pLimitInfo->limit.offset; +} + uint64_t tableListGetSize(const STableListInfo* pTableList) { ASSERT(taosArrayGetSize(pTableList->pTableList) == taosHashGetSize(pTableList->map)); return taosArrayGetSize(pTableList->pTableList); diff --git a/source/libs/executor/src/executor.c b/source/libs/executor/src/executor.c index a40e29ba090358eb260619bc2406ea8b75a42717..c599b479e61b4c30a46713e4549983a6fd448c19 100644 --- a/source/libs/executor/src/executor.c +++ b/source/libs/executor/src/executor.c @@ -24,12 +24,16 @@ static TdThreadOnce initPoolOnce = PTHREAD_ONCE_INIT; int32_t exchangeObjRefPool = -1; -static void initRefPool() { exchangeObjRefPool = taosOpenRef(1024, doDestroyExchangeOperatorInfo); } static void cleanupRefPool() { int32_t ref = atomic_val_compare_exchange_32(&exchangeObjRefPool, exchangeObjRefPool, 0); taosCloseRef(ref); } +static void initRefPool() { + exchangeObjRefPool = taosOpenRef(1024, doDestroyExchangeOperatorInfo); + atexit(cleanupRefPool); +} + static int32_t doSetSMABlock(SOperatorInfo* pOperator, void* input, size_t numOfBlocks, int32_t type, char* id) { ASSERT(pOperator != NULL); if (pOperator->operatorType != QUERY_NODE_PHYSICAL_PLAN_STREAM_SCAN) { @@ -448,7 +452,6 @@ int32_t qCreateExecTask(SReadHandle* readHandle, int32_t vgId, uint64_t taskId, SExecTaskInfo** pTask = (SExecTaskInfo**)pTaskInfo; taosThreadOnce(&initPoolOnce, initRefPool); - atexit(cleanupRefPool); qDebug("start to create subplan task, TID:0x%" PRIx64 " QID:0x%" PRIx64, taskId, pSubplan->id.queryId); @@ -710,6 +713,15 @@ int32_t qAsyncKillTask(qTaskInfo_t qinfo, int32_t rspCode) { return TSDB_CODE_SUCCESS; } +bool qTaskIsExecuting(qTaskInfo_t qinfo) { + SExecTaskInfo* pTaskInfo = (SExecTaskInfo*)qinfo; + if (NULL == pTaskInfo) { + return false; + } + + return 0 != atomic_load_64(&pTaskInfo->owner); +} + static void printTaskExecCostInLog(SExecTaskInfo* pTaskInfo) { STaskCostInfo* pSummary = &pTaskInfo->cost; int64_t idleTime = pSummary->start - pSummary->created; diff --git a/source/libs/executor/src/executorimpl.c b/source/libs/executor/src/executorimpl.c index 1d1298bf5f83a8a95d1a698f920706b6e2b58f85..21ef5dfab3891e8c00d164790df0762e047750af 100644 --- a/source/libs/executor/src/executorimpl.c +++ b/source/libs/executor/src/executorimpl.c @@ -833,6 +833,20 @@ void setResultRowInitCtx(SResultRow* pResult, SqlFunctionCtx* pCtx, int32_t numO } } +void clearResultRowInitFlag(SqlFunctionCtx* pCtx, int32_t numOfOutput) { + for (int32_t i = 0; i < numOfOutput; ++i) { + SResultRowEntryInfo* pResInfo = pCtx[i].resultInfo; + if (pResInfo == NULL) { + continue; + } + + pResInfo->initialized = false; + pResInfo->numOfRes = 0; + pResInfo->isNullRes = 0; + pResInfo->complete = false; + } +} + void doFilter(SSDataBlock* pBlock, SFilterInfo* pFilterInfo, SColMatchInfo* pColMatchInfo) { if (pFilterInfo == NULL || pBlock->info.rows == 0) { return; @@ -871,6 +885,7 @@ void extractQualifiedTupleByFilterResult(SSDataBlock* pBlock, const SColumnInfoD return; } + int8_t* pIndicator = (int8_t*)p->pData; int32_t totalRows = pBlock->info.rows; if (status == FILTER_RESULT_ALL_QUALIFIED) { @@ -878,42 +893,134 @@ void extractQualifiedTupleByFilterResult(SSDataBlock* pBlock, const SColumnInfoD } else if (status == FILTER_RESULT_NONE_QUALIFIED) { pBlock->info.rows = 0; } else { - SSDataBlock* px = createOneDataBlock(pBlock, true); + int32_t bmLen = BitmapLen(totalRows); + char* pBitmap = NULL; + int32_t maxRows = 0; size_t numOfCols = taosArrayGetSize(pBlock->pDataBlock); for (int32_t i = 0; i < numOfCols; ++i) { - SColumnInfoData* pSrc = taosArrayGet(px->pDataBlock, i); SColumnInfoData* pDst = taosArrayGet(pBlock->pDataBlock, i); // it is a reserved column for scalar function, and no data in this column yet. - if (pDst->pData == NULL || pSrc->pData == NULL) { + if (pDst->pData == NULL) { continue; } - colInfoDataCleanup(pDst, pBlock->info.rows); - int32_t numOfRows = 0; - for (int32_t j = 0; j < totalRows; ++j) { - if (((int8_t*)p->pData)[j] == 0) { - continue; + if (IS_VAR_DATA_TYPE(pDst->info.type)) { + int32_t j = 0; + pDst->varmeta.length = 0; + + while (j < totalRows) { + if (pIndicator[j] == 0) { + j += 1; + continue; + } + + if (colDataIsNull_var(pDst, j)) { + colDataSetNull_var(pDst, numOfRows); + } else { + char* p1 = colDataGetVarData(pDst, j); + colDataAppend(pDst, numOfRows, p1, false); + } + numOfRows += 1; + j += 1; + } + + if (maxRows < numOfRows) { + maxRows = numOfRows; + } + } else { + if (pBitmap == NULL) { + pBitmap = taosMemoryCalloc(1, bmLen); } - if (colDataIsNull_s(pSrc, j)) { - colDataAppendNULL(pDst, numOfRows); - } else { - colDataAppend(pDst, numOfRows, colDataGetData(pSrc, j), false); + memcpy(pBitmap, pDst->nullbitmap, bmLen); + memset(pDst->nullbitmap, 0, bmLen); + + int32_t j = 0; + + switch (pDst->info.type) { + case TSDB_DATA_TYPE_BIGINT: + case TSDB_DATA_TYPE_UBIGINT: + case TSDB_DATA_TYPE_DOUBLE: + case TSDB_DATA_TYPE_TIMESTAMP: + while (j < totalRows) { + if (pIndicator[j] == 0) { + j += 1; + continue; + } + + if (colDataIsNull_f(pBitmap, j)) { + colDataSetNull_f(pDst->nullbitmap, numOfRows); + } else { + ((int64_t*)pDst->pData)[numOfRows] = ((int64_t*)pDst->pData)[j]; + } + numOfRows += 1; + j += 1; + } + break; + case TSDB_DATA_TYPE_FLOAT: + case TSDB_DATA_TYPE_INT: + case TSDB_DATA_TYPE_UINT: + while (j < totalRows) { + if (pIndicator[j] == 0) { + j += 1; + continue; + } + if (colDataIsNull_f(pBitmap, j)) { + colDataSetNull_f(pDst->nullbitmap, numOfRows); + } else { + ((int32_t*)pDst->pData)[numOfRows] = ((int32_t*)pDst->pData)[j]; + } + numOfRows += 1; + j += 1; + } + break; + case TSDB_DATA_TYPE_SMALLINT: + case TSDB_DATA_TYPE_USMALLINT: + while (j < totalRows) { + if (pIndicator[j] == 0) { + j += 1; + continue; + } + if (colDataIsNull_f(pBitmap, j)) { + colDataSetNull_f(pDst->nullbitmap, numOfRows); + } else { + ((int16_t*)pDst->pData)[numOfRows] = ((int16_t*)pDst->pData)[j]; + } + numOfRows += 1; + j += 1; + } + break; + case TSDB_DATA_TYPE_BOOL: + case TSDB_DATA_TYPE_TINYINT: + case TSDB_DATA_TYPE_UTINYINT: + while (j < totalRows) { + if (pIndicator[j] == 0) { + j += 1; + continue; + } + if (colDataIsNull_f(pBitmap, j)) { + colDataSetNull_f(pDst->nullbitmap, numOfRows); + } else { + ((int8_t*)pDst->pData)[numOfRows] = ((int8_t*)pDst->pData)[j]; + } + numOfRows += 1; + j += 1; + } + break; } - numOfRows += 1; } - // todo this value can be assigned directly - if (pBlock->info.rows == totalRows) { - pBlock->info.rows = numOfRows; - } else { - ASSERT(pBlock->info.rows == numOfRows); + if (maxRows < numOfRows) { + maxRows = numOfRows; } } - blockDataDestroy(px); // fix memory leak + pBlock->info.rows = maxRows; + if (pBitmap != NULL) { + taosMemoryFree(pBitmap); + } } } @@ -956,8 +1063,7 @@ static void setExecutionContext(SOperatorInfo* pOperator, int32_t numOfOutput, u pAggInfo->groupId = groupId; } -static void doUpdateNumOfRows(SqlFunctionCtx* pCtx, SResultRow* pRow, int32_t numOfExprs, - const int32_t* rowEntryOffset) { +void doUpdateNumOfRows(SqlFunctionCtx* pCtx, SResultRow* pRow, int32_t numOfExprs, const int32_t* rowEntryOffset) { bool returnNotNull = false; for (int32_t j = 0; j < numOfExprs; ++j) { SResultRowEntryInfo* pResInfo = getResultEntryInfo(pRow, j, rowEntryOffset); @@ -980,8 +1086,8 @@ static void doUpdateNumOfRows(SqlFunctionCtx* pCtx, SResultRow* pRow, int32_t nu } } -static void doCopyResultToDataBlock(SExprInfo* pExprInfo, int32_t numOfExprs, SResultRow* pRow, SqlFunctionCtx* pCtx, - SSDataBlock* pBlock, const int32_t* rowEntryOffset, SExecTaskInfo* pTaskInfo) { +void copyResultrowToDataBlock(SExprInfo* pExprInfo, int32_t numOfExprs, SResultRow* pRow, SqlFunctionCtx* pCtx, + SSDataBlock* pBlock, const int32_t* rowEntryOffset, SExecTaskInfo* pTaskInfo) { for (int32_t j = 0; j < numOfExprs; ++j) { int32_t slotId = pExprInfo[j].base.resSchema.slotId; @@ -1017,7 +1123,7 @@ static void doCopyResultToDataBlock(SExprInfo* pExprInfo, int32_t numOfExprs, SR // todo refactor. SResultRow has direct pointer in miainfo int32_t finalizeResultRows(SDiskbasedBuf* pBuf, SResultRowPosition* resultRowPosition, SExprSupp* pSup, SSDataBlock* pBlock, SExecTaskInfo* pTaskInfo) { - SFilePage* page = getBufPage(pBuf, resultRowPosition->pageId); + SFilePage* page = getBufPage(pBuf, resultRowPosition->pageId); if (page == NULL) { qError("failed to get buffer, code:%s, %s", tstrerror(terrno), GET_TASKID(pTaskInfo)); T_LONG_JMP(pTaskInfo->env, terrno); @@ -1047,7 +1153,7 @@ int32_t finalizeResultRows(SDiskbasedBuf* pBuf, SResultRowPosition* resultRowPos T_LONG_JMP(pTaskInfo->env, code); } - doCopyResultToDataBlock(pExprInfo, pSup->numOfExprs, pRow, pCtx, pBlock, rowEntryOffset, pTaskInfo); + copyResultrowToDataBlock(pExprInfo, pSup->numOfExprs, pRow, pCtx, pBlock, rowEntryOffset, pTaskInfo); releaseBufPage(pBuf, page); pBlock->info.rows += pRow->numOfRows; @@ -1099,7 +1205,7 @@ int32_t doCopyToSDataBlock(SExecTaskInfo* pTaskInfo, SSDataBlock* pBlock, SExprS } pGroupResInfo->index += 1; - doCopyResultToDataBlock(pExprInfo, numOfExprs, pRow, pCtx, pBlock, rowEntryOffset, pTaskInfo); + copyResultrowToDataBlock(pExprInfo, numOfExprs, pRow, pCtx, pBlock, rowEntryOffset, pTaskInfo); releaseBufPage(pBuf, page); pBlock->info.rows += pRow->numOfRows; @@ -2128,8 +2234,6 @@ SOperatorInfo* createOperatorTree(SPhysiNode* pPhyNode, SExecTaskInfo* pTaskInfo pOperator = createCacherowsScanOperator(pScanNode, pHandle, pTaskInfo); } else if (QUERY_NODE_PHYSICAL_PLAN_PROJECT == type) { pOperator = createProjectOperatorInfo(NULL, (SProjectPhysiNode*)pPhyNode, pTaskInfo); - } else { - ASSERT(0); } if (pOperator != NULL) { @@ -2166,9 +2270,7 @@ SOperatorInfo* createOperatorTree(SPhysiNode* pPhyNode, SExecTaskInfo* pTaskInfo } } else if (QUERY_NODE_PHYSICAL_PLAN_HASH_INTERVAL == type) { SIntervalPhysiNode* pIntervalPhyNode = (SIntervalPhysiNode*)pPhyNode; - - bool isStream = (QUERY_NODE_PHYSICAL_PLAN_STREAM_INTERVAL == type); - pOptr = createIntervalOperatorInfo(ops[0], pIntervalPhyNode, pTaskInfo, isStream); + pOptr = createIntervalOperatorInfo(ops[0], pIntervalPhyNode, pTaskInfo); } else if (QUERY_NODE_PHYSICAL_PLAN_STREAM_INTERVAL == type) { pOptr = createStreamIntervalOperatorInfo(ops[0], pPhyNode, pTaskInfo); } else if (QUERY_NODE_PHYSICAL_PLAN_MERGE_ALIGNED_INTERVAL == type) { @@ -2220,8 +2322,8 @@ SOperatorInfo* createOperatorTree(SPhysiNode* pPhyNode, SExecTaskInfo* pTaskInfo pOptr = createIndefinitOutputOperatorInfo(ops[0], pPhyNode, pTaskInfo); } else if (QUERY_NODE_PHYSICAL_PLAN_INTERP_FUNC == type) { pOptr = createTimeSliceOperatorInfo(ops[0], pPhyNode, pTaskInfo); - } else { - ASSERT(0); + } else if (QUERY_NODE_PHYSICAL_PLAN_MERGE_EVENT == type) { + pOptr = createEventwindowOperatorInfo(ops[0], pPhyNode, pTaskInfo); } taosMemoryFree(ops); diff --git a/source/libs/executor/src/groupoperator.c b/source/libs/executor/src/groupoperator.c index 5676e19cdfa1173bd71f5d33ea05c015e05194ae..fb122b077fc21477b18c5db83b28d0caa294e605 100644 --- a/source/libs/executor/src/groupoperator.c +++ b/source/libs/executor/src/groupoperator.c @@ -593,8 +593,11 @@ void* getCurrentDataGroupInfo(const SPartitionOperatorInfo* pInfo, SDataGroupInf int32_t pageId = 0; pPage = getNewBufPage(pInfo->pBuf, &pageId); - taosArrayPush(p->pPageList, &pageId); + if (pPage == NULL) { + return pPage; + } + taosArrayPush(p->pPageList, &pageId); *(int32_t*)pPage = 0; } else { int32_t* curId = taosArrayGetLast(p->pPageList); @@ -612,6 +615,11 @@ void* getCurrentDataGroupInfo(const SPartitionOperatorInfo* pInfo, SDataGroupInf // add a new page for current group int32_t pageId = 0; pPage = getNewBufPage(pInfo->pBuf, &pageId); + if (pPage == NULL) { + qError("failed to get new buffer, code:%s", tstrerror(terrno)); + return NULL; + } + taosArrayPush(p->pPageList, &pageId); memset(pPage, 0, getBufPageSize(pInfo->pBuf)); } @@ -917,6 +925,7 @@ uint64_t calGroupIdByData(SPartitionBySupporter* pParSup, SExprSupp* pExprSup, S } static bool hasRemainPartion(SStreamPartitionOperatorInfo* pInfo) { return pInfo->parIte != NULL; } +static bool hasRemainTbName(SStreamPartitionOperatorInfo* pInfo) { return pInfo->pTbNameIte != NULL; } static SSDataBlock* buildStreamPartitionResult(SOperatorInfo* pOperator) { SStreamPartitionOperatorInfo* pInfo = pOperator->info; @@ -937,40 +946,13 @@ static SSDataBlock* buildStreamPartitionResult(SOperatorInfo* pOperator) { colDataAppend(pDestCol, pDest->info.rows, pSrcData, isNull); } pDest->info.rows++; - if (pInfo->tbnameCalSup.numOfExprs > 0 && i == 0) { - void* tbname = NULL; - if (streamStateGetParName(pOperator->pTaskInfo->streamInfo.pState, pParInfo->groupId, &tbname) == 0) { - memcpy(pDest->info.parTbName, tbname, TSDB_TABLE_NAME_LEN); - tdbFree(tbname); - } else { - SSDataBlock* pTmpBlock = blockCopyOneRow(pSrc, rowIndex); - SSDataBlock* pResBlock = createDataBlock(); - pResBlock->info.rowSize = TSDB_TABLE_NAME_LEN; - SColumnInfoData data = createColumnInfoData(TSDB_DATA_TYPE_VARCHAR, TSDB_TABLE_NAME_LEN, 0); - taosArrayPush(pResBlock->pDataBlock, &data); - blockDataEnsureCapacity(pResBlock, 1); - projectApplyFunctions(pInfo->tbnameCalSup.pExprInfo, pResBlock, pTmpBlock, pInfo->tbnameCalSup.pCtx, 1, NULL); - ASSERT(pResBlock->info.rows == 1); - ASSERT(taosArrayGetSize(pResBlock->pDataBlock) == 1); - SColumnInfoData* pCol = taosArrayGet(pResBlock->pDataBlock, 0); - ASSERT(pCol->info.type == TSDB_DATA_TYPE_VARCHAR); - void* pData = colDataGetVarData(pCol, 0); - // TODO check tbname validity - if (pData != (void*)-1) { - memset(pDest->info.parTbName, 0, TSDB_TABLE_NAME_LEN); - int32_t len = TMIN(varDataLen(pData), TSDB_TABLE_NAME_LEN - 1); - memcpy(pDest->info.parTbName, varDataVal(pData), len); - /*pDest->info.parTbName[len + 1] = 0;*/ - } else { - pDest->info.parTbName[0] = 0; - } - if (pParInfo->groupId && pDest->info.parTbName[0]) { - streamStatePutParName(pOperator->pTaskInfo->streamInfo.pState, pParInfo->groupId, pDest->info.parTbName); - } - /*printf("\n\n set name %s\n\n", pDest->info.parTbName);*/ - blockDataDestroy(pTmpBlock); - blockDataDestroy(pResBlock); - } + } + pDest->info.parTbName[0] = 0; + if (pInfo->tbnameCalSup.numOfExprs > 0) { + void* tbname = NULL; + if (streamStateGetParName(pOperator->pTaskInfo->streamInfo.pState, pParInfo->groupId, &tbname) == 0) { + memcpy(pDest->info.parTbName, tbname, TSDB_TABLE_NAME_LEN); + tdbFree(tbname); } } taosArrayDestroy(pParInfo->rowIds); @@ -986,6 +968,60 @@ static SSDataBlock* buildStreamPartitionResult(SOperatorInfo* pOperator) { return pDest; } +void appendCreateTableRow(SStreamState* pState, SExprSupp* pTableSup, SExprSupp* pTagSup, int64_t groupId, + SSDataBlock* pSrcBlock, int32_t rowId, SSDataBlock* pDestBlock) { + void* pValue = NULL; + if (groupId != 0 && streamStateGetParName(pState, groupId, &pValue) != 0) { + SSDataBlock* pTmpBlock = blockCopyOneRow(pSrcBlock, rowId); + if (pTableSup->numOfExprs > 0) { + projectApplyFunctions(pTableSup->pExprInfo, pDestBlock, pTmpBlock, pTableSup->pCtx, pTableSup->numOfExprs, NULL); + SColumnInfoData* pTbCol = taosArrayGet(pDestBlock->pDataBlock, UD_TABLE_NAME_COLUMN_INDEX); + void* pData = colDataGetVarData(pTbCol, pDestBlock->info.rows - 1); + char* tbName = pSrcBlock->info.parTbName; + memset(tbName, 0, TSDB_TABLE_NAME_LEN); + int32_t len = TMIN(varDataLen(pData), TSDB_TABLE_NAME_LEN - 1); + memcpy(tbName, varDataVal(pData), len); + streamStatePutParName(pState, groupId, tbName); + pDestBlock->info.rows--; + } else { + void* pTbNameCol = taosArrayGet(pDestBlock->pDataBlock, UD_TABLE_NAME_COLUMN_INDEX); + colDataAppendNULL(pTbNameCol, pDestBlock->info.rows); + pSrcBlock->info.parTbName[0] = 0; + } + + if (pTagSup->numOfExprs > 0) { + projectApplyFunctions(pTagSup->pExprInfo, pDestBlock, pTmpBlock, pTagSup->pCtx, pTagSup->numOfExprs, NULL); + pDestBlock->info.rows--; + } + + void* pGpIdCol = taosArrayGet(pDestBlock->pDataBlock, UD_GROUPID_COLUMN_INDEX); + colDataAppend(pGpIdCol, pDestBlock->info.rows, (const char*)&groupId, false); + + pDestBlock->info.rows++; + blockDataDestroy(pTmpBlock); + } + streamStateReleaseBuf(pState, NULL, pValue); +} + +static SSDataBlock* buildStreamCreateTableResult(SOperatorInfo* pOperator) { + SStreamPartitionOperatorInfo* pInfo = pOperator->info; + if ( (pInfo->tbnameCalSup.numOfExprs == 0 && pInfo->tagCalSup.numOfExprs == 0) || taosHashGetSize(pInfo->pPartitions) == 0) { + return NULL; + } + blockDataCleanup(pInfo->pCreateTbRes); + blockDataEnsureCapacity(pInfo->pCreateTbRes, taosHashGetSize(pInfo->pPartitions)); + SSDataBlock* pSrc = pInfo->pInputDataBlock; + + while (pInfo->pTbNameIte != NULL) { + SPartitionDataInfo* pParInfo = (SPartitionDataInfo*)pInfo->pTbNameIte; + int32_t rowId = *(int32_t*) taosArrayGet(pParInfo->rowIds, 0); + appendCreateTableRow(pOperator->pTaskInfo->streamInfo.pState, &pInfo->tbnameCalSup, &pInfo->tagCalSup, + pParInfo->groupId, pSrc, rowId, pInfo->pCreateTbRes); + pInfo->pTbNameIte = taosHashIterate(pInfo->pPartitions, pInfo->pTbNameIte); + } + return pInfo->pCreateTbRes->info.rows > 0 ? pInfo->pCreateTbRes : NULL; +} + static void doStreamHashPartitionImpl(SStreamPartitionOperatorInfo* pInfo, SSDataBlock* pBlock) { pInfo->pInputDataBlock = pBlock; for (int32_t i = 0; i < pBlock->info.rows; ++i) { @@ -1012,6 +1048,15 @@ static SSDataBlock* doStreamHashPartition(SOperatorInfo* pOperator) { SExecTaskInfo* pTaskInfo = pOperator->pTaskInfo; SStreamPartitionOperatorInfo* pInfo = pOperator->info; + SSDataBlock* pCtRes = NULL; + + if (hasRemainTbName(pInfo)) { + pCtRes = buildStreamCreateTableResult(pOperator); + if (pCtRes != NULL) { + return pCtRes; + } + } + if (hasRemainPartion(pInfo)) { return buildStreamPartitionResult(pOperator); } @@ -1039,6 +1084,7 @@ static SSDataBlock* doStreamHashPartition(SOperatorInfo* pOperator) { return pInfo->pDelRes; } break; default: + ASSERTS(pBlock->info.type == STREAM_CREATE_CHILD_TABLE, "invalid SSDataBlock type"); return pBlock; } @@ -1056,6 +1102,11 @@ static SSDataBlock* doStreamHashPartition(SOperatorInfo* pOperator) { pOperator->cost.openCost = (taosGetTimestampUs() - st) / 1000.0; pInfo->parIte = taosHashIterate(pInfo->pPartitions, NULL); + pInfo->pTbNameIte = taosHashIterate(pInfo->pPartitions, NULL); + pCtRes = buildStreamCreateTableResult(pOperator); + if (pCtRes != NULL) { + return pCtRes; + } return buildStreamPartitionResult(pOperator); } @@ -1076,6 +1127,7 @@ static void destroyStreamPartitionOperatorInfo(void* param) { cleanupExprSupp(&pInfo->tagCalSup); blockDataDestroy(pInfo->pDelRes); taosHashCleanup(pInfo->pPartitions); + blockDataDestroy(pInfo->pCreateTbRes); taosMemoryFreeClear(param); } @@ -1091,6 +1143,46 @@ void initParDownStream(SOperatorInfo* downstream, SPartitionBySupporter* pParSup } } +SSDataBlock* buildCreateTableBlock(SExprSupp* tbName, SExprSupp* tag) { + SSDataBlock* pBlock = taosMemoryCalloc(1, sizeof(SSDataBlock)); + pBlock->info.hasVarCol = false; + pBlock->info.id.groupId = 0; + pBlock->info.rows = 0; + pBlock->info.type = STREAM_CREATE_CHILD_TABLE; + pBlock->info.watermark = INT64_MIN; + + pBlock->pDataBlock = taosArrayInit(4, sizeof(SColumnInfoData)); + SColumnInfoData infoData = {0}; + infoData.info.type = TSDB_DATA_TYPE_VARCHAR; + if (tbName->numOfExprs > 0) { + infoData.info.bytes = tbName->pExprInfo->base.resSchema.bytes; + } else { + infoData.info.bytes = 1; + } + pBlock->info.rowSize += infoData.info.bytes; + // sub table name + taosArrayPush(pBlock->pDataBlock, &infoData); + + SColumnInfoData gpIdData = {0}; + gpIdData.info.type = TSDB_DATA_TYPE_UBIGINT; + gpIdData.info.bytes = 8; + pBlock->info.rowSize += gpIdData.info.bytes; + // group id + taosArrayPush(pBlock->pDataBlock, &gpIdData); + + for (int32_t i = 0; i < tag->numOfExprs; i++) { + SColumnInfoData tagCol = {0}; + tagCol.info.type = tag->pExprInfo[i].base.resSchema.type; + tagCol.info.bytes = tag->pExprInfo[i].base.resSchema.bytes; + tagCol.info.precision = tag->pExprInfo[i].base.resSchema.precision; + // tag info + taosArrayPush(pBlock->pDataBlock, &tagCol); + pBlock->info.rowSize += tagCol.info.bytes; + } + + return pBlock; +} + SOperatorInfo* createStreamPartitionOperatorInfo(SOperatorInfo* downstream, SStreamPartitionPhysiNode* pPartNode, SExecTaskInfo* pTaskInfo) { SStreamPartitionOperatorInfo* pInfo = taosMemoryCalloc(1, sizeof(SStreamPartitionOperatorInfo)); @@ -1110,6 +1202,7 @@ SOperatorInfo* createStreamPartitionOperatorInfo(SOperatorInfo* downstream, SStr } } + pInfo->tbnameCalSup.numOfExprs = 0; if (pPartNode->pSubtable != NULL) { SExprInfo* pSubTableExpr = taosMemoryCalloc(1, sizeof(SExprInfo)); if (pSubTableExpr == NULL) { @@ -1124,9 +1217,10 @@ SOperatorInfo* createStreamPartitionOperatorInfo(SOperatorInfo* downstream, SStr } } + pInfo->tagCalSup.numOfExprs = 0; if (pPartNode->pTags != NULL) { int32_t numOfTags; - SExprInfo* pTagExpr = createExprInfo(pPartNode->pTags, NULL, &numOfTags); + SExprInfo* pTagExpr = createExpr(pPartNode->pTags, &numOfTags); if (pTagExpr == NULL) { terrno = TSDB_CODE_OUT_OF_MEMORY; goto _error; @@ -1137,6 +1231,12 @@ SOperatorInfo* createStreamPartitionOperatorInfo(SOperatorInfo* downstream, SStr } } + if (pInfo->tbnameCalSup.numOfExprs != 0 || pInfo->tagCalSup.numOfExprs != 0) { + pInfo->pCreateTbRes = buildCreateTableBlock(&pInfo->tbnameCalSup, &pInfo->tagCalSup); + } else { + pInfo->pCreateTbRes = NULL; + } + int32_t keyLen = 0; code = initGroupOptrInfo(&pInfo->partitionSup.pGroupColVals, &keyLen, &pInfo->partitionSup.keyBuf, pInfo->partitionSup.pGroupCols); @@ -1153,6 +1253,7 @@ SOperatorInfo* createStreamPartitionOperatorInfo(SOperatorInfo* downstream, SStr blockDataEnsureCapacity(pInfo->binfo.pRes, 4096); pInfo->parIte = NULL; + pInfo->pTbNameIte = NULL; pInfo->pInputDataBlock = NULL; _hash_fn_t hashFn = taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY); diff --git a/source/libs/executor/src/projectoperator.c b/source/libs/executor/src/projectoperator.c index 2a90f24346ceae26cbf6bfa27abfc1c5b06d2f99..6c2bcf086ddf1a66b007711537f847f686d180e2 100644 --- a/source/libs/executor/src/projectoperator.c +++ b/source/libs/executor/src/projectoperator.c @@ -175,8 +175,7 @@ static int32_t setInfoForNewGroup(SSDataBlock* pBlock, SLimitInfo* pLimitInfo, S // reset the value for a new group data // existing rows that belongs to previous group. - pLimitInfo->numOfOutputRows = 0; - pLimitInfo->remainOffset = pLimitInfo->limit.offset; + resetLimitInfoForNextGroup(pLimitInfo); } return PROJECT_RETRIEVE_DONE; @@ -200,10 +199,18 @@ static int32_t doIngroupLimitOffset(SLimitInfo* pLimitInfo, uint64_t groupId, SS if (pLimitInfo->limit.limit >= 0 && pLimitInfo->numOfOutputRows + pBlock->info.rows >= pLimitInfo->limit.limit) { int32_t keepRows = (int32_t)(pLimitInfo->limit.limit - pLimitInfo->numOfOutputRows); blockDataKeepFirstNRows(pBlock, keepRows); + // TODO: optimize it later when partition by + limit + // all retrieved requirement has been fulfilled, let's finish this if ((pLimitInfo->slimit.limit == -1 && pLimitInfo->currentGroupId == 0) || (pLimitInfo->slimit.limit > 0 && pLimitInfo->slimit.limit <= pLimitInfo->numOfOutputGroups)) { setOperatorCompleted(pOperator); + } else { + // Even current group is done, there may be many vgroups remain existed, and we need to continue to retrieve data + // from next group. So let's continue this retrieve process + if (keepRows == 0) { + return PROJECT_RETRIEVE_CONTINUE; + } } } @@ -278,7 +285,7 @@ SSDataBlock* doProjectOperation(SOperatorInfo* pOperator) { // for stream interval if (pBlock->info.type == STREAM_RETRIEVE || pBlock->info.type == STREAM_DELETE_RESULT || - pBlock->info.type == STREAM_DELETE_DATA) { + pBlock->info.type == STREAM_DELETE_DATA || pBlock->info.type == STREAM_CREATE_CHILD_TABLE) { // printDataBlock1(pBlock, "project1"); return pBlock; } @@ -358,7 +365,6 @@ SSDataBlock* doProjectOperation(SOperatorInfo* pOperator) { pOperator->cost.openCost = (taosGetTimestampUs() - st) / 1000.0; } - // printDataBlock1(p, "project"); return (p->info.rows > 0) ? p : NULL; } diff --git a/source/libs/executor/src/scanoperator.c b/source/libs/executor/src/scanoperator.c index 6d9398958289a4d7d7e55be466d027b690f57fa1..37c33c44e28137bc2fec87f51a54e531bbb5a0dc 100644 --- a/source/libs/executor/src/scanoperator.c +++ b/source/libs/executor/src/scanoperator.c @@ -257,7 +257,7 @@ static void doSetTagColumnData(STableScanBase* pTableScanInfo, SSDataBlock* pBlo } // todo handle the slimit info -void applyLimitOffset(SLimitInfo* pLimitInfo, SSDataBlock* pBlock, SExecTaskInfo* pTaskInfo, SOperatorInfo* pOperator) { +bool applyLimitOffset(SLimitInfo* pLimitInfo, SSDataBlock* pBlock, SExecTaskInfo* pTaskInfo, SOperatorInfo* pOperator) { SLimit* pLimit = &pLimitInfo->limit; const char* id = GET_TASKID(pTaskInfo); @@ -266,6 +266,7 @@ void applyLimitOffset(SLimitInfo* pLimitInfo, SSDataBlock* pBlock, SExecTaskInfo pLimitInfo->remainOffset -= pBlock->info.rows; blockDataEmpty(pBlock); qDebug("current block ignore due to offset, current:%" PRId64 ", %s", pLimitInfo->remainOffset, id); + return false; } else { blockDataTrimFirstNRows(pBlock, pLimitInfo->remainOffset); pLimitInfo->remainOffset = 0; @@ -274,13 +275,14 @@ void applyLimitOffset(SLimitInfo* pLimitInfo, SSDataBlock* pBlock, SExecTaskInfo if (pLimit->limit != -1 && pLimit->limit <= (pLimitInfo->numOfOutputRows + pBlock->info.rows)) { // limit the output rows - int32_t overflowRows = pLimitInfo->numOfOutputRows + pBlock->info.rows - pLimit->limit; - int32_t keep = pBlock->info.rows - overflowRows; + int32_t keep = (int32_t)(pLimit->limit - pLimitInfo->numOfOutputRows); blockDataKeepFirstNRows(pBlock, keep); qDebug("output limit %" PRId64 " has reached, %s", pLimit->limit, id); - pOperator->status = OP_EXEC_DONE; + return true; } + + return false; } static int32_t loadDataBlock(SOperatorInfo* pOperator, STableScanBase* pTableScanInfo, SSDataBlock* pBlock, @@ -395,7 +397,10 @@ static int32_t loadDataBlock(SOperatorInfo* pOperator, STableScanBase* pTableSca } } - applyLimitOffset(&pTableScanInfo->limitInfo, pBlock, pTaskInfo, pOperator); + bool limitReached = applyLimitOffset(&pTableScanInfo->limitInfo, pBlock, pTaskInfo, pOperator); + if (limitReached) { // set operator flag is done + setOperatorCompleted(pOperator); + } pCost->totalRows += pBlock->info.rows; pTableScanInfo->limitInfo.numOfOutputRows = pCost->totalRows; @@ -772,8 +777,7 @@ static SSDataBlock* doTableScan(SOperatorInfo* pOperator) { // reset value for the next group data output pOperator->status = OP_OPENED; - pInfo->base.limitInfo.numOfOutputRows = 0; - pInfo->base.limitInfo.remainOffset = pInfo->base.limitInfo.limit.offset; + resetLimitInfoForNextGroup(&pInfo->base.limitInfo); int32_t num = 0; STableKeyInfo* pList = NULL; @@ -1361,54 +1365,16 @@ void calBlockTag(SStreamScanInfo* pInfo, SSDataBlock* pBlock) { } #endif -void calBlockTbName(SStreamScanInfo* pInfo, SSDataBlock* pBlock) { +static void calBlockTbName(SStreamScanInfo* pInfo, SSDataBlock* pBlock) { SExprSupp* pTbNameCalSup = &pInfo->tbnameCalSup; SStreamState* pState = pInfo->pStreamScanOp->pTaskInfo->streamInfo.pState; - if (pTbNameCalSup == NULL || pTbNameCalSup->numOfExprs == 0) return; - if (pBlock == NULL || pBlock->info.rows == 0) return; - - void* tbname = NULL; - if (streamStateGetParName(pState, pBlock->info.id.groupId, &tbname) == 0) { - memcpy(pBlock->info.parTbName, tbname, TSDB_TABLE_NAME_LEN); - tdbFree(tbname); - return; - } else { + blockDataCleanup(pInfo->pCreateTbRes); + if (pInfo->tbnameCalSup.numOfExprs == 0 && pInfo->tagCalSup.numOfExprs == 0) { pBlock->info.parTbName[0] = 0; - } - tdbFree(tbname); - - SSDataBlock* pSrcBlock = blockCopyOneRow(pBlock, 0); - ASSERT(pSrcBlock->info.rows == 1); - - SSDataBlock* pResBlock = createDataBlock(); - pResBlock->info.rowSize = VARSTR_HEADER_SIZE + TSDB_TABLE_NAME_LEN; - SColumnInfoData data = createColumnInfoData(TSDB_DATA_TYPE_VARCHAR, TSDB_TABLE_NAME_LEN, 0); - taosArrayPush(pResBlock->pDataBlock, &data); - blockDataEnsureCapacity(pResBlock, 1); - - projectApplyFunctions(pTbNameCalSup->pExprInfo, pResBlock, pSrcBlock, pTbNameCalSup->pCtx, 1, NULL); - ASSERT(pResBlock->info.rows == 1); - ASSERT(taosArrayGetSize(pResBlock->pDataBlock) == 1); - SColumnInfoData* pCol = taosArrayGet(pResBlock->pDataBlock, 0); - ASSERT(pCol->info.type == TSDB_DATA_TYPE_VARCHAR); - - void* pData = colDataGetData(pCol, 0); - // TODO check tbname validation - if (pData != (void*)-1 && pData != NULL) { - memset(pBlock->info.parTbName, 0, TSDB_TABLE_NAME_LEN); - int32_t len = TMIN(varDataLen(pData), TSDB_TABLE_NAME_LEN - 1); - memcpy(pBlock->info.parTbName, varDataVal(pData), len); - /*pBlock->info.parTbName[len + 1] = 0;*/ } else { - pBlock->info.parTbName[0] = 0; - } - - if (pBlock->info.id.groupId && pBlock->info.parTbName[0]) { - streamStatePutParName(pState, pBlock->info.id.groupId, pBlock->info.parTbName); + appendCreateTableRow(pInfo->pStreamScanOp->pTaskInfo->streamInfo.pState, &pInfo->tbnameCalSup, &pInfo->tagCalSup, + pBlock->info.id.groupId, pBlock, 0, pInfo->pCreateTbRes); } - - blockDataDestroy(pSrcBlock); - blockDataDestroy(pResBlock); } void appendOneRowToStreamSpecialBlock(SSDataBlock* pBlock, TSKEY* pStartTs, TSKEY* pEndTs, uint64_t* pUid, @@ -1710,47 +1676,30 @@ static void setBlockGroupIdByUid(SStreamScanInfo* pInfo, SSDataBlock* pBlock) { } } +static void doCheckUpdate(SStreamScanInfo* pInfo, TSKEY endKey) { + if (pInfo->pUpdateInfo) { + checkUpdateData(pInfo, true, pInfo->pRes, true); + pInfo->twAggSup.maxTs = TMAX(pInfo->twAggSup.maxTs, endKey); + if (pInfo->pUpdateDataRes->info.rows > 0) { + pInfo->updateResIndex = 0; + if (pInfo->pUpdateDataRes->info.type == STREAM_CLEAR) { + pInfo->scanMode = STREAM_SCAN_FROM_UPDATERES; + } else if (pInfo->pUpdateDataRes->info.type == STREAM_INVERT) { + pInfo->scanMode = STREAM_SCAN_FROM_RES; + // return pInfo->pUpdateDataRes; + } else if (pInfo->pUpdateDataRes->info.type == STREAM_DELETE_DATA) { + pInfo->scanMode = STREAM_SCAN_FROM_DELETE_DATA; + } + } + } +} + static SSDataBlock* doStreamScan(SOperatorInfo* pOperator) { // NOTE: this operator does never check if current status is done or not SExecTaskInfo* pTaskInfo = pOperator->pTaskInfo; SStreamScanInfo* pInfo = pOperator->info; qDebug("stream scan called"); -#if 0 - SStreamState* pState = pTaskInfo->streamInfo.pState; - if (pState) { - printf(">>>>>>>> stream write backend\n"); - SWinKey key = { - .ts = 1, - .groupId = 2, - }; - char tmp[100] = "abcdefg1"; - if (streamStatePut(pState, &key, &tmp, strlen(tmp) + 1) < 0) { - ASSERT(0); - } - - key.ts = 2; - char tmp2[100] = "abcdefg2"; - if (streamStatePut(pState, &key, &tmp2, strlen(tmp2) + 1) < 0) { - ASSERT(0); - } - - key.groupId = 5; - key.ts = 1; - char tmp3[100] = "abcdefg3"; - if (streamStatePut(pState, &key, &tmp3, strlen(tmp3) + 1) < 0) { - ASSERT(0); - } - - char* val2 = NULL; - int32_t sz; - if (streamStateGet(pState, &key, (void**)&val2, &sz) < 0) { - ASSERT(0); - } - printf("stream read %s %d\n", val2, sz); - streamFreeVal(val2); - } -#endif if (pTaskInfo->streamInfo.recoverStep == STREAM_RECOVER_STEP__PREPARE1 || pTaskInfo->streamInfo.recoverStep == STREAM_RECOVER_STEP__PREPARE2) { @@ -1771,6 +1720,7 @@ static SSDataBlock* doStreamScan(SOperatorInfo* pOperator) { /*resetTableScanInfo(pTSInfo, pWin);*/ tsdbReaderClose(pTSInfo->base.dataReader); pTSInfo->base.dataReader = NULL; + pInfo->pTableScanOp->status = OP_OPENED; pTSInfo->scanTimes = 0; pTSInfo->currentGroupId = -1; @@ -1784,17 +1734,32 @@ static SSDataBlock* doStreamScan(SOperatorInfo* pOperator) { pInfo->blockRecoverContiCnt = 0; return NULL; } - SSDataBlock* pBlock = doTableScan(pInfo->pTableScanOp); - if (pBlock != NULL) { + + switch (pInfo->scanMode) { + case STREAM_SCAN_FROM_RES: { + pInfo->scanMode = STREAM_SCAN_FROM_READERHANDLE; + printDataBlock(pInfo->pRecoverRes, "scan recover"); + return pInfo->pRecoverRes; + } break; + default: + break; + } + + pInfo->pRecoverRes = doTableScan(pInfo->pTableScanOp); + if (pInfo->pRecoverRes != NULL) { pInfo->blockRecoverContiCnt++; - calBlockTbName(pInfo, pBlock); + calBlockTbName(pInfo, pInfo->pRecoverRes); if (pInfo->pUpdateInfo) { - TSKEY maxTs = updateInfoFillBlockData(pInfo->pUpdateInfo, pBlock, pInfo->primaryTsIndex); + TSKEY maxTs = updateInfoFillBlockData(pInfo->pUpdateInfo, pInfo->pRecoverRes, pInfo->primaryTsIndex); pInfo->twAggSup.maxTs = TMAX(pInfo->twAggSup.maxTs, maxTs); } - qDebug("stream recover scan get block, rows %d", pBlock->info.rows); - printDataBlock(pBlock, "scan recover"); - return pBlock; + if (pInfo->pCreateTbRes->info.rows > 0) { + pInfo->scanMode = STREAM_SCAN_FROM_RES; + return pInfo->pCreateTbRes; + } + qDebug("stream recover scan get block, rows %d", pInfo->pRecoverRes->info.rows); + printDataBlock(pInfo->pRecoverRes, "scan recover"); + return pInfo->pRecoverRes; } pTaskInfo->streamInfo.recoverStep = STREAM_RECOVER_STEP__NONE; STableScanInfo* pTSInfo = pInfo->pTableScanOp->info; @@ -1896,8 +1861,11 @@ FETCH_NEXT_BLOCK: qDebug("scan mode %d", pInfo->scanMode); switch (pInfo->scanMode) { case STREAM_SCAN_FROM_RES: { - blockDataDestroy(pInfo->pUpdateRes); pInfo->scanMode = STREAM_SCAN_FROM_READERHANDLE; + doCheckUpdate(pInfo, pInfo->pRes->info.window.ekey); + doFilter(pInfo->pRes, pOperator->exprSupp.pFilterInfo, NULL); + pInfo->pRes->info.dataLoad = 1; + blockDataUpdateTsWindow(pInfo->pRes, pInfo->primaryTsIndex); return pInfo->pRes; } break; case STREAM_SCAN_FROM_DELETE_DATA: { @@ -1988,22 +1956,12 @@ FETCH_NEXT_BLOCK: continue; } - if (pInfo->pUpdateInfo) { - checkUpdateData(pInfo, true, pInfo->pRes, true); - pInfo->twAggSup.maxTs = TMAX(pInfo->twAggSup.maxTs, pBlockInfo->window.ekey); - if (pInfo->pUpdateDataRes->info.rows > 0) { - pInfo->updateResIndex = 0; - if (pInfo->pUpdateDataRes->info.type == STREAM_CLEAR) { - pInfo->scanMode = STREAM_SCAN_FROM_UPDATERES; - } else if (pInfo->pUpdateDataRes->info.type == STREAM_INVERT) { - pInfo->scanMode = STREAM_SCAN_FROM_RES; - return pInfo->pUpdateDataRes; - } else if (pInfo->pUpdateDataRes->info.type == STREAM_DELETE_DATA) { - pInfo->scanMode = STREAM_SCAN_FROM_DELETE_DATA; - } - } + if (pInfo->pCreateTbRes->info.rows > 0) { + pInfo->scanMode = STREAM_SCAN_FROM_RES; + return pInfo->pCreateTbRes; } + doCheckUpdate(pInfo, pBlockInfo->window.ekey); doFilter(pInfo->pRes, pOperator->exprSupp.pFilterInfo, NULL); pInfo->pRes->info.dataLoad = 1; blockDataUpdateTsWindow(pInfo->pRes, pInfo->primaryTsIndex); @@ -2017,7 +1975,6 @@ FETCH_NEXT_BLOCK: } else { continue; } - /*blockDataCleanup(pInfo->pRes);*/ } // record the scan action. @@ -2223,6 +2180,7 @@ static void destroyStreamScanOperatorInfo(void* param) { } cleanupExprSupp(&pStreamScan->tbnameCalSup); + cleanupExprSupp(&pStreamScan->tagCalSup); updateInfoDestroy(pStreamScan->pUpdateInfo); blockDataDestroy(pStreamScan->pRes); @@ -2230,6 +2188,7 @@ static void destroyStreamScanOperatorInfo(void* param) { blockDataDestroy(pStreamScan->pPullDataRes); blockDataDestroy(pStreamScan->pDeleteDataRes); blockDataDestroy(pStreamScan->pUpdateDataRes); + blockDataDestroy(pStreamScan->pCreateTbRes); taosArrayDestroy(pStreamScan->pBlockLists); taosMemoryFree(pStreamScan); } @@ -2285,7 +2244,7 @@ SOperatorInfo* createStreamScanOperatorInfo(SReadHandle* pHandle, STableScanPhys if (pTableScanNode->pTags != NULL) { int32_t numOfTags; - SExprInfo* pTagExpr = createExprInfo(pTableScanNode->pTags, NULL, &numOfTags); + SExprInfo* pTagExpr = createExpr(pTableScanNode->pTags, &numOfTags); if (pTagExpr == NULL) { terrno = TSDB_CODE_OUT_OF_MEMORY; goto _error; @@ -2343,6 +2302,8 @@ SOperatorInfo* createStreamScanOperatorInfo(SReadHandle* pHandle, STableScanPhys pInfo->readHandle = *pHandle; pInfo->tableUid = pScanPhyNode->uid; pTaskInfo->streamInfo.snapshotVer = pHandle->version; + pInfo->pCreateTbRes = buildCreateTableBlock(&pInfo->tbnameCalSup, &pInfo->tagCalSup); + blockDataEnsureCapacity(pInfo->pCreateTbRes, 8); // set the extract column id to streamHandle tqReaderSetColIdList(pInfo->tqReader, pColIds); @@ -2728,9 +2689,12 @@ int32_t stopGroupTableMergeScan(SOperatorInfo* pOperator) { taosArrayDestroy(pInfo->queryConds); pInfo->queryConds = NULL; + resetLimitInfoForNextGroup(&pInfo->limitInfo); return TSDB_CODE_SUCCESS; } +// all data produced by this function only belongs to one group +// slimit/soffset does not need to be concerned here, since this function only deal with data within one group. SSDataBlock* getSortedTableMergeScanBlockData(SSortHandle* pHandle, SSDataBlock* pResBlock, int32_t capacity, SOperatorInfo* pOperator) { STableMergeScanInfo* pInfo = pOperator->info; @@ -2750,10 +2714,12 @@ SSDataBlock* getSortedTableMergeScanBlockData(SSortHandle* pHandle, SSDataBlock* } } - qDebug("%s get sorted row blocks, rows:%d", GET_TASKID(pTaskInfo), pResBlock->info.rows); applyLimitOffset(&pInfo->limitInfo, pResBlock, pTaskInfo, pOperator); pInfo->limitInfo.numOfOutputRows += pResBlock->info.rows; + qDebug("%s get sorted row block, rows:%d, limit:%"PRId64, GET_TASKID(pTaskInfo), pResBlock->info.rows, + pInfo->limitInfo.numOfOutputRows); + return (pResBlock->info.rows > 0) ? pResBlock : NULL; } @@ -2792,11 +2758,13 @@ SSDataBlock* doTableMergeScan(SOperatorInfo* pOperator) { pOperator->resultInfo.totalRows += pBlock->info.rows; return pBlock; } else { + // Data of this group are all dumped, let's try the next group stopGroupTableMergeScan(pOperator); if (pInfo->tableEndIndex >= tableListSize - 1) { setOperatorCompleted(pOperator); break; } + pInfo->tableStartIndex = pInfo->tableEndIndex + 1; pInfo->groupId = tableListGetInfo(pTaskInfo->pTableInfoList, pInfo->tableStartIndex)->groupId; startGroupTableMergeScan(pOperator); @@ -3265,7 +3233,9 @@ static void buildVnodeGroupedNtbTableCount(STableCountScanOperatorInfo* pInfo, S uint64_t groupId = calcGroupId(fullStbName, strlen(fullStbName)); pRes->info.id.groupId = groupId; int64_t ntbNum = metaGetNtbNum(pInfo->readHandle.meta); - fillTableCountScanDataBlock(pSupp, dbName, "", ntbNum, pRes); + if (ntbNum != 0) { + fillTableCountScanDataBlock(pSupp, dbName, "", ntbNum, pRes); + } } static void buildVnodeGroupedStbTableCount(STableCountScanOperatorInfo* pInfo, STableCountScanSupp* pSupp, diff --git a/source/libs/executor/src/sortoperator.c b/source/libs/executor/src/sortoperator.c index f5dc6cc623ca800cb459923cf69b495e70447360..97b4fd9dc43635a9afdc67bd4a61513c722b84b9 100644 --- a/source/libs/executor/src/sortoperator.c +++ b/source/libs/executor/src/sortoperator.c @@ -680,11 +680,13 @@ SSDataBlock* getMultiwaySortedBlockData(SSortHandle* pHandle, SSDataBlock* pData break; } + bool limitReached = applyLimitOffset(&pInfo->limitInfo, p, pTaskInfo, pOperator); + if (limitReached) { + resetLimitInfoForNextGroup(&pInfo->limitInfo); + } + if (p->info.rows > 0) { - applyLimitOffset(&pInfo->limitInfo, p, pTaskInfo, pOperator); - if (p->info.rows > 0) { - break; - } + break; } } @@ -698,7 +700,6 @@ SSDataBlock* getMultiwaySortedBlockData(SSortHandle* pHandle, SSDataBlock* pData colDataAssign(pDst, pSrc, p->info.rows, &pDataBlock->info); } - pInfo->limitInfo.numOfOutputRows += p->info.rows; pDataBlock->info.rows = p->info.rows; pDataBlock->info.id.groupId = pInfo->groupId; pDataBlock->info.dataLoad = 1; diff --git a/source/libs/executor/src/sysscanoperator.c b/source/libs/executor/src/sysscanoperator.c index ac32b54f56d77795629760e4ebbc15d1bdab2d45..2b78f265fb4d6f2a1e41350f2f649582f9b84213 100644 --- a/source/libs/executor/src/sysscanoperator.c +++ b/source/libs/executor/src/sysscanoperator.c @@ -66,7 +66,7 @@ typedef struct SSysTableScanInfo { int64_t numOfBlocks; // extract basic running information. SLoadRemoteDataInfo loadInfo; - int32_t tbnameSlotId; + int32_t tbnameSlotId; } SSysTableScanInfo; typedef struct { @@ -81,10 +81,10 @@ typedef struct MergeIndex { } MergeIndex; typedef struct SBlockDistInfo { - SSDataBlock* pResBlock; - STsdbReader* pHandle; - SReadHandle readHandle; - uint64_t uid; // table uid + SSDataBlock* pResBlock; + STsdbReader* pHandle; + SReadHandle readHandle; + uint64_t uid; // table uid } SBlockDistInfo; static int32_t sysChkFilter__Comm(SNode* pNode); @@ -129,17 +129,21 @@ static char* SYSTABLE_IDX_COLUMN[] = {"table_name", "db_name", "create_time" static char* SYSTABLE_SPECIAL_COL[] = {"db_name", "vgroup_id"}; -static int32_t buildSysDbTableInfo(const SSysTableScanInfo* pInfo, int32_t capacity); -static SSDataBlock* buildInfoSchemaTableMetaBlock(char* tableName); -static void destroySysScanOperator(void* param); -static int32_t loadSysTableCallback(void* param, SDataBuf* pMsg, int32_t code); -static SSDataBlock* doFilterResult(SSDataBlock* pDataBlock, SFilterInfo* pFilterInfo); +static int32_t buildSysDbTableInfo(const SSysTableScanInfo* pInfo, int32_t capacity); +static SSDataBlock* buildInfoSchemaTableMetaBlock(char* tableName); +static void destroySysScanOperator(void* param); +static int32_t loadSysTableCallback(void* param, SDataBuf* pMsg, int32_t code); +static SSDataBlock* doFilterResult(SSDataBlock* pDataBlock, SFilterInfo* pFilterInfo); static __optSysFilter optSysGetFilterFunc(int32_t ctype, bool* reverse); static int32_t sysTableUserTagsFillOneTableTags(const SSysTableScanInfo* pInfo, SMetaReader* smrSuperTable, SMetaReader* smrChildTable, const char* dbname, const char* tableName, int32_t* pNumOfRows, const SSDataBlock* dataBlock); +static int32_t sysTableUserColsFillOneTableCols(const SSysTableScanInfo* pInfo, const char* dbname, int32_t* pNumOfRows, + const SSDataBlock* dataBlock, char* tName, SSchemaWrapper* schemaRow, + char* tableType); + static void relocateAndFilterSysTagsScanResult(SSysTableScanInfo* pInfo, int32_t numOfRows, SSDataBlock* dataBlock, SFilterInfo* pFilterInfo); @@ -200,11 +204,11 @@ int32_t sysFilte__TableName(void* arg, SNode* pNode, SArray* result) { if (func == NULL) return -1; SMetaFltParam param = {.suid = 0, - .cid = 0, - .type = TSDB_DATA_TYPE_VARCHAR, - .val = pVal->datum.p, - .reverse = reverse, - .filterFunc = func}; + .cid = 0, + .type = TSDB_DATA_TYPE_VARCHAR, + .val = pVal->datum.p, + .reverse = reverse, + .filterFunc = func}; return -1; } @@ -219,11 +223,11 @@ int32_t sysFilte__CreateTime(void* arg, SNode* pNode, SArray* result) { if (func == NULL) return -1; SMetaFltParam param = {.suid = 0, - .cid = 0, - .type = TSDB_DATA_TYPE_BIGINT, - .val = &pVal->datum.i, - .reverse = reverse, - .filterFunc = func}; + .cid = 0, + .type = TSDB_DATA_TYPE_BIGINT, + .val = &pVal->datum.i, + .reverse = reverse, + .filterFunc = func}; int32_t ret = metaFilterCreateTime(pMeta, ¶m, result); return ret; @@ -351,9 +355,9 @@ static int32_t optSysMergeRslt(SArray* mRslt, SArray* rslt); static SSDataBlock* sysTableScanFromMNode(SOperatorInfo* pOperator, SSysTableScanInfo* pInfo, const char* name, SExecTaskInfo* pTaskInfo); void extractTbnameSlotId(SSysTableScanInfo* pInfo, const SScanPhysiNode* pScanNode); -static SSDataBlock* sysTableScanFillTbName(SOperatorInfo* pOperator, const SSysTableScanInfo* pInfo, - const char* name, SSDataBlock* pBlock); -__optSysFilter optSysGetFilterFunc(int32_t ctype, bool* reverse) { +static SSDataBlock* sysTableScanFillTbName(SOperatorInfo* pOperator, const SSysTableScanInfo* pInfo, const char* name, + SSDataBlock* pBlock); +__optSysFilter optSysGetFilterFunc(int32_t ctype, bool* reverse) { if (ctype == OP_TYPE_LOWER_EQUAL || ctype == OP_TYPE_LOWER_THAN) { *reverse = true; } @@ -413,6 +417,176 @@ static bool sysTableIsCondOnOneTable(SNode* pCond, char* condTable) { return false; } +static SSDataBlock* sysTableScanUserCols(SOperatorInfo* pOperator) { + qDebug("sysTableScanUserCols get cols start"); + SExecTaskInfo* pTaskInfo = pOperator->pTaskInfo; + SSysTableScanInfo* pInfo = pOperator->info; + if (pOperator->status == OP_EXEC_DONE) { + return NULL; + } + + blockDataCleanup(pInfo->pRes); + int32_t numOfRows = 0; + + SSDataBlock* dataBlock = buildInfoSchemaTableMetaBlock(TSDB_INS_TABLE_COLS); + blockDataEnsureCapacity(dataBlock, pOperator->resultInfo.capacity); + + const char* db = NULL; + int32_t vgId = 0; + vnodeGetInfo(pInfo->readHandle.vnode, &db, &vgId); + + SName sn = {0}; + char dbname[TSDB_DB_FNAME_LEN + VARSTR_HEADER_SIZE] = {0}; + tNameFromString(&sn, db, T_NAME_ACCT | T_NAME_DB); + + tNameGetDbName(&sn, varDataVal(dbname)); + varDataSetLen(dbname, strlen(varDataVal(dbname))); + + // optimize when sql like where table_name='tablename' and xxx. + if (pInfo->req.filterTb[0]) { + char tableName[TSDB_TABLE_NAME_LEN + VARSTR_HEADER_SIZE] = {0}; + STR_TO_VARSTR(tableName, pInfo->req.filterTb); + + SMetaReader smrTable = {0}; + metaReaderInit(&smrTable, pInfo->readHandle.meta, 0); + int32_t code = metaGetTableEntryByName(&smrTable, pInfo->req.filterTb); + if (code != TSDB_CODE_SUCCESS) { + // terrno has been set by metaGetTableEntryByName, therefore, return directly + metaReaderClear(&smrTable); + blockDataDestroy(dataBlock); + pInfo->loadInfo.totalRows = 0; + return NULL; + } + + if (smrTable.me.type == TSDB_SUPER_TABLE) { + metaReaderClear(&smrTable); + blockDataDestroy(dataBlock); + pInfo->loadInfo.totalRows = 0; + return NULL; + } + + if (smrTable.me.type == TSDB_CHILD_TABLE) { + int64_t suid = smrTable.me.ctbEntry.suid; + metaReaderClear(&smrTable); + metaReaderInit(&smrTable, pInfo->readHandle.meta, 0); + code = metaGetTableEntryByUid(&smrTable, suid); + if (code != TSDB_CODE_SUCCESS) { + // terrno has been set by metaGetTableEntryByName, therefore, return directly + metaReaderClear(&smrTable); + blockDataDestroy(dataBlock); + pInfo->loadInfo.totalRows = 0; + return NULL; + } + } + + char typeName[TSDB_TABLE_FNAME_LEN + VARSTR_HEADER_SIZE] = {0}; + SSchemaWrapper* schemaRow = NULL; + if (smrTable.me.type == TSDB_SUPER_TABLE) { + schemaRow = &smrTable.me.stbEntry.schemaRow; + STR_TO_VARSTR(typeName, "CHILD_TABLE"); + } else if (smrTable.me.type == TSDB_NORMAL_TABLE) { + schemaRow = &smrTable.me.ntbEntry.schemaRow; + STR_TO_VARSTR(typeName, "NORMAL_TABLE"); + } + + sysTableUserColsFillOneTableCols(pInfo, dbname, &numOfRows, dataBlock, tableName, schemaRow, typeName); + metaReaderClear(&smrTable); + + if (numOfRows > 0) { + relocateAndFilterSysTagsScanResult(pInfo, numOfRows, dataBlock, pOperator->exprSupp.pFilterInfo); + numOfRows = 0; + } + blockDataDestroy(dataBlock); + pInfo->loadInfo.totalRows += pInfo->pRes->info.rows; + setOperatorCompleted(pOperator); + return (pInfo->pRes->info.rows == 0) ? NULL : pInfo->pRes; + } + + int32_t ret = 0; + if (pInfo->pCur == NULL) { + pInfo->pCur = metaOpenTbCursor(pInfo->readHandle.meta); + } + + SHashObj* stableSchema = taosHashInit(64, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT), false, HASH_NO_LOCK); + taosHashSetFreeFp(stableSchema, tDeleteSSchemaWrapperForHash); + while ((ret = metaTbCursorNext(pInfo->pCur, TSDB_TABLE_MAX)) == 0) { + char typeName[TSDB_TABLE_FNAME_LEN + VARSTR_HEADER_SIZE] = {0}; + char tableName[TSDB_TABLE_NAME_LEN + VARSTR_HEADER_SIZE] = {0}; + + SSchemaWrapper* schemaRow = NULL; + + if (pInfo->pCur->mr.me.type == TSDB_SUPER_TABLE) { + qDebug("sysTableScanUserCols cursor get super table"); + void* schema = taosHashGet(stableSchema, &pInfo->pCur->mr.me.uid, sizeof(int64_t)); + if (schema == NULL) { + SSchemaWrapper* schemaWrapper = tCloneSSchemaWrapper(&pInfo->pCur->mr.me.stbEntry.schemaRow); + taosHashPut(stableSchema, &pInfo->pCur->mr.me.uid, sizeof(int64_t), &schemaWrapper, POINTER_BYTES); + } + continue; + } else if (pInfo->pCur->mr.me.type == TSDB_CHILD_TABLE) { + qDebug("sysTableScanUserCols cursor get child table"); + STR_TO_VARSTR(typeName, "CHILD_TABLE"); + STR_TO_VARSTR(tableName, pInfo->pCur->mr.me.name); + + int64_t suid = pInfo->pCur->mr.me.ctbEntry.suid; + void* schema = taosHashGet(stableSchema, &pInfo->pCur->mr.me.ctbEntry.suid, sizeof(int64_t)); + if (schema != NULL) { + schemaRow = *(SSchemaWrapper**)schema; + } else { + tDecoderClear(&pInfo->pCur->mr.coder); + int code = metaGetTableEntryByUid(&pInfo->pCur->mr, suid); + if (code != TSDB_CODE_SUCCESS) { + // terrno has been set by metaGetTableEntryByName, therefore, return directly + qError("sysTableScanUserCols get meta by suid:%" PRId64 " error, code:%d", suid, code); + blockDataDestroy(dataBlock); + pInfo->loadInfo.totalRows = 0; + taosHashCleanup(stableSchema); + return NULL; + } + schemaRow = &pInfo->pCur->mr.me.stbEntry.schemaRow; + } + } else if (pInfo->pCur->mr.me.type == TSDB_NORMAL_TABLE) { + qDebug("sysTableScanUserCols cursor get normal table"); + schemaRow = &pInfo->pCur->mr.me.ntbEntry.schemaRow; + STR_TO_VARSTR(typeName, "NORMAL_TABLE"); + STR_TO_VARSTR(tableName, pInfo->pCur->mr.me.name); + } else { + qDebug("sysTableScanUserCols cursor get invalid table"); + continue; + } + + sysTableUserColsFillOneTableCols(pInfo, dbname, &numOfRows, dataBlock, tableName, schemaRow, typeName); + + if (numOfRows >= pOperator->resultInfo.capacity) { + relocateAndFilterSysTagsScanResult(pInfo, numOfRows, dataBlock, pOperator->exprSupp.pFilterInfo); + numOfRows = 0; + + if (pInfo->pRes->info.rows > 0) { + break; + } + } + } + + taosHashCleanup(stableSchema); + + if (numOfRows > 0) { + relocateAndFilterSysTagsScanResult(pInfo, numOfRows, dataBlock, pOperator->exprSupp.pFilterInfo); + numOfRows = 0; + } + + blockDataDestroy(dataBlock); + if (ret != 0) { + metaCloseTbCursor(pInfo->pCur); + pInfo->pCur = NULL; + setOperatorCompleted(pOperator); + } + + pInfo->loadInfo.totalRows += pInfo->pRes->info.rows; + qDebug("sysTableScanUserCols get cols success, rows:%" PRIu64, pInfo->loadInfo.totalRows); + + return (pInfo->pRes->info.rows == 0) ? NULL : pInfo->pRes; +} + static SSDataBlock* sysTableScanUserTags(SOperatorInfo* pOperator) { SExecTaskInfo* pTaskInfo = pOperator->pTaskInfo; SSysTableScanInfo* pInfo = pOperator->info; @@ -491,7 +665,8 @@ static SSDataBlock* sysTableScanUserTags(SOperatorInfo* pOperator) { pInfo->pCur = metaOpenTbCursor(pInfo->readHandle.meta); } - while ((ret = metaTbCursorNext(pInfo->pCur)) == 0) { + bool blockFull = false; + while ((ret = metaTbCursorNext(pInfo->pCur, TSDB_SUPER_TABLE)) == 0) { if (pInfo->pCur->mr.me.type != TSDB_CHILD_TABLE) { continue; } @@ -512,17 +687,25 @@ static SSDataBlock* sysTableScanUserTags(SOperatorInfo* pOperator) { T_LONG_JMP(pTaskInfo->env, terrno); } - sysTableUserTagsFillOneTableTags(pInfo, &smrSuperTable, &pInfo->pCur->mr, dbname, tableName, &numOfRows, dataBlock); + if ((smrSuperTable.me.stbEntry.schemaTag.nCols + numOfRows) > pOperator->resultInfo.capacity) { + metaTbCursorPrev(pInfo->pCur); + blockFull = true; + } else { + sysTableUserTagsFillOneTableTags(pInfo, &smrSuperTable, &pInfo->pCur->mr, dbname, tableName, &numOfRows, + dataBlock); + } metaReaderClear(&smrSuperTable); - if (numOfRows >= pOperator->resultInfo.capacity) { + if (blockFull || numOfRows >= pOperator->resultInfo.capacity) { relocateAndFilterSysTagsScanResult(pInfo, numOfRows, dataBlock, pOperator->exprSupp.pFilterInfo); numOfRows = 0; if (pInfo->pRes->info.rows > 0) { break; } + + blockFull = false; } } @@ -728,6 +911,66 @@ static int32_t sysTableUserTagsFillOneTableTags(const SSysTableScanInfo* pInfo, return TSDB_CODE_SUCCESS; } +static int32_t sysTableUserColsFillOneTableCols(const SSysTableScanInfo* pInfo, const char* dbname, int32_t* pNumOfRows, + const SSDataBlock* dataBlock, char* tName, SSchemaWrapper* schemaRow, + char* tableType) { + if (schemaRow == NULL) { + qError("sysTableUserColsFillOneTableCols schemaRow is NULL"); + return TSDB_CODE_SUCCESS; + } + int32_t numOfRows = *pNumOfRows; + + int32_t numOfCols = schemaRow->nCols; + for (int32_t i = 0; i < numOfCols; ++i) { + SColumnInfoData* pColInfoData = NULL; + + // table name + pColInfoData = taosArrayGet(dataBlock->pDataBlock, 0); + colDataAppend(pColInfoData, numOfRows, tName, false); + + // database name + pColInfoData = taosArrayGet(dataBlock->pDataBlock, 1); + colDataAppend(pColInfoData, numOfRows, dbname, false); + + pColInfoData = taosArrayGet(dataBlock->pDataBlock, 2); + colDataAppend(pColInfoData, numOfRows, tableType, false); + + // col name + char colName[TSDB_COL_NAME_LEN + VARSTR_HEADER_SIZE] = {0}; + STR_TO_VARSTR(colName, schemaRow->pSchema[i].name); + pColInfoData = taosArrayGet(dataBlock->pDataBlock, 3); + colDataAppend(pColInfoData, numOfRows, colName, false); + + // col type + int8_t colType = schemaRow->pSchema[i].type; + pColInfoData = taosArrayGet(dataBlock->pDataBlock, 4); + char colTypeStr[VARSTR_HEADER_SIZE + 32]; + int colTypeLen = sprintf(varDataVal(colTypeStr), "%s", tDataTypes[colType].name); + if (colType == TSDB_DATA_TYPE_VARCHAR) { + colTypeLen += sprintf(varDataVal(colTypeStr) + colTypeLen, "(%d)", + (int32_t)(schemaRow->pSchema[i].bytes - VARSTR_HEADER_SIZE)); + } else if (colType == TSDB_DATA_TYPE_NCHAR) { + colTypeLen += sprintf(varDataVal(colTypeStr) + colTypeLen, "(%d)", + (int32_t)((schemaRow->pSchema[i].bytes - VARSTR_HEADER_SIZE) / TSDB_NCHAR_SIZE)); + } + varDataSetLen(colTypeStr, colTypeLen); + colDataAppend(pColInfoData, numOfRows, (char*)colTypeStr, false); + + pColInfoData = taosArrayGet(dataBlock->pDataBlock, 5); + colDataAppend(pColInfoData, numOfRows, (const char*)&schemaRow->pSchema[i].bytes, false); + + for (int32_t j = 6; j <= 8; ++j) { + pColInfoData = taosArrayGet(dataBlock->pDataBlock, j); + colDataAppendNULL(pColInfoData, numOfRows); + } + ++numOfRows; + } + + *pNumOfRows = numOfRows; + + return TSDB_CODE_SUCCESS; +} + static SSDataBlock* buildInfoSchemaTableMetaBlock(char* tableName) { size_t size = 0; const SSysTableMeta* pMeta = NULL; @@ -1029,7 +1272,7 @@ static SSDataBlock* sysTableBuildUserTables(SOperatorInfo* pOperator) { char n[TSDB_TABLE_NAME_LEN + VARSTR_HEADER_SIZE] = {0}; int32_t ret = 0; - while ((ret = metaTbCursorNext(pInfo->pCur)) == 0) { + while ((ret = metaTbCursorNext(pInfo->pCur, TSDB_SUPER_TABLE)) == 0) { STR_TO_VARSTR(n, pInfo->pCur->mr.me.name); // table name @@ -1315,12 +1558,19 @@ static SSDataBlock* doSysTableScan(SOperatorInfo* pOperator) { if (pInfo->showRewrite) { getDBNameFromCondition(pInfo->pCondition, dbName); sprintf(pInfo->req.db, "%d.%s", pInfo->accountId, dbName); + } else if (strncasecmp(name, TSDB_INS_TABLE_COLS, TSDB_TABLE_FNAME_LEN) == 0) { + getDBNameFromCondition(pInfo->pCondition, dbName); + if (dbName[0]) sprintf(pInfo->req.db, "%d.%s", pInfo->accountId, dbName); + sysTableIsCondOnOneTable(pInfo->pCondition, pInfo->req.filterTb); } + SSDataBlock* pBlock = NULL; if (strncasecmp(name, TSDB_INS_TABLE_TABLES, TSDB_TABLE_FNAME_LEN) == 0) { pBlock = sysTableScanUserTables(pOperator); } else if (strncasecmp(name, TSDB_INS_TABLE_TAGS, TSDB_TABLE_FNAME_LEN) == 0) { pBlock = sysTableScanUserTags(pOperator); + } else if (strncasecmp(name, TSDB_INS_TABLE_COLS, TSDB_TABLE_FNAME_LEN) == 0 && pInfo->readHandle.mnd == NULL) { + pBlock = sysTableScanUserCols(pOperator); } else if (strncasecmp(name, TSDB_INS_TABLE_STABLES, TSDB_TABLE_FNAME_LEN) == 0 && pInfo->showRewrite && IS_SYS_DBNAME(dbName)) { pBlock = sysTableScanUserSTables(pOperator); @@ -1331,12 +1581,12 @@ static SSDataBlock* doSysTableScan(SOperatorInfo* pOperator) { return sysTableScanFillTbName(pOperator, pInfo, name, pBlock); } -static SSDataBlock* sysTableScanFillTbName(SOperatorInfo* pOperator, const SSysTableScanInfo* pInfo, - const char* name, SSDataBlock* pBlock) { +static SSDataBlock* sysTableScanFillTbName(SOperatorInfo* pOperator, const SSysTableScanInfo* pInfo, const char* name, + SSDataBlock* pBlock) { if (pBlock != NULL) { if (pInfo->tbnameSlotId != -1) { SColumnInfoData* pColumnInfoData = (SColumnInfoData*)taosArrayGet(pBlock->pDataBlock, pInfo->tbnameSlotId); - char varTbName[TSDB_TABLE_FNAME_LEN - 1 + VARSTR_HEADER_SIZE] = {0}; + char varTbName[TSDB_TABLE_FNAME_LEN - 1 + VARSTR_HEADER_SIZE] = {0}; memcpy(varDataVal(varTbName), name, strlen(name)); varDataSetLen(varTbName, strlen(name)); for (int i = 0; i < pBlock->info.rows; ++i) { @@ -1391,7 +1641,7 @@ static SSDataBlock* sysTableScanFromMNode(SOperatorInfo* pOperator, SSysTableSca tsem_wait(&pInfo->ready); if (pTaskInfo->code) { - qDebug("%s load meta data from mnode failed, totalRows:%" PRIu64 ", code:%s", GET_TASKID(pTaskInfo), + qError("%s load meta data from mnode failed, totalRows:%" PRIu64 ", code:%s", GET_TASKID(pTaskInfo), pInfo->loadInfo.totalRows, tstrerror(pTaskInfo->code)); return NULL; } @@ -1427,6 +1677,7 @@ static SSDataBlock* sysTableScanFromMNode(SOperatorInfo* pOperator, SSysTableSca SOperatorInfo* createSysTableScanOperatorInfo(void* readHandle, SSystemTableScanPhysiNode* pScanPhyNode, const char* pUser, SExecTaskInfo* pTaskInfo) { + int32_t code = TDB_CODE_SUCCESS; SSysTableScanInfo* pInfo = taosMemoryCalloc(1, sizeof(SSysTableScanInfo)); SOperatorInfo* pOperator = taosMemoryCalloc(1, sizeof(SOperatorInfo)); if (pInfo == NULL || pOperator == NULL) { @@ -1437,7 +1688,7 @@ SOperatorInfo* createSysTableScanOperatorInfo(void* readHandle, SSystemTableScan SDataBlockDescNode* pDescNode = pScanNode->node.pOutputDataBlockDesc; int32_t num = 0; - int32_t code = extractColMatchInfo(pScanNode->pScanCols, pDescNode, &num, COL_MATCH_FROM_COL_ID, &pInfo->matchInfo); + code = extractColMatchInfo(pScanNode->pScanCols, pDescNode, &num, COL_MATCH_FROM_COL_ID, &pInfo->matchInfo); if (code != TSDB_CODE_SUCCESS) { goto _error; } @@ -1474,10 +1725,11 @@ SOperatorInfo* createSysTableScanOperatorInfo(void* readHandle, SSystemTableScan setOperatorInfo(pOperator, "SysTableScanOperator", QUERY_NODE_PHYSICAL_PLAN_SYSTABLE_SCAN, false, OP_NOT_OPENED, pInfo, pTaskInfo); pOperator->exprSupp.numOfExprs = taosArrayGetSize(pInfo->pRes->pDataBlock); - pOperator->fpSet = createOperatorFpSet(optrDummyOpenFn, doSysTableScan, NULL, destroySysScanOperator, optrDefaultBufFn, NULL); + pOperator->fpSet = + createOperatorFpSet(optrDummyOpenFn, doSysTableScan, NULL, destroySysScanOperator, optrDefaultBufFn, NULL); return pOperator; - _error: +_error: if (pInfo != NULL) { destroySysScanOperator(pInfo); } @@ -1513,7 +1765,8 @@ void destroySysScanOperator(void* param) { const char* name = tNameGetTableName(&pInfo->name); if (strncasecmp(name, TSDB_INS_TABLE_TABLES, TSDB_TABLE_FNAME_LEN) == 0 || - strncasecmp(name, TSDB_INS_TABLE_TAGS, TSDB_TABLE_FNAME_LEN) == 0 || pInfo->pCur != NULL) { + strncasecmp(name, TSDB_INS_TABLE_TAGS, TSDB_TABLE_FNAME_LEN) == 0 || + strncasecmp(name, TSDB_INS_TABLE_COLS, TSDB_TABLE_FNAME_LEN) == 0 || pInfo->pCur != NULL) { metaCloseTbCursor(pInfo->pCur); pInfo->pCur = NULL; } @@ -1918,16 +2171,23 @@ static SSDataBlock* doBlockInfoScan(SOperatorInfo* pOperator) { colDataAppend(pColInfo, 0, p, false); taosMemoryFree(p); + // make the valgrind happy that all memory buffer has been initialized already. + if (slotId != 0) { + SColumnInfoData* p1 = taosArrayGet(pBlock->pDataBlock, 0); + int64_t v = 0; + colDataAppendInt64(p1, 0, &v); + } + pBlock->info.rows = 1; pOperator->status = OP_EXEC_DONE; return pBlock; } static void destroyBlockDistScanOperatorInfo(void* param) { - SBlockDistInfo* pDistInfo = (SBlockDistInfo*)param; - blockDataDestroy(pDistInfo->pResBlock); - tsdbReaderClose(pDistInfo->pHandle); - taosMemoryFreeClear(param); + SBlockDistInfo* pDistInfo = (SBlockDistInfo*)param; + blockDataDestroy(pDistInfo->pResBlock); + tsdbReaderClose(pDistInfo->pHandle); + taosMemoryFreeClear(param); } static int32_t initTableblockDistQueryCond(uint64_t uid, SQueryTableDataCond* pCond) { @@ -1999,8 +2259,8 @@ SOperatorInfo* createDataBlockInfoScanOperator(SReadHandle* readHandle, SBlockDi setOperatorInfo(pOperator, "DataBlockDistScanOperator", QUERY_NODE_PHYSICAL_PLAN_BLOCK_DIST_SCAN, false, OP_NOT_OPENED, pInfo, pTaskInfo); - pOperator->fpSet = - createOperatorFpSet(optrDummyOpenFn, doBlockInfoScan, NULL, destroyBlockDistScanOperatorInfo, optrDefaultBufFn, NULL); + pOperator->fpSet = createOperatorFpSet(optrDummyOpenFn, doBlockInfoScan, NULL, destroyBlockDistScanOperatorInfo, + optrDefaultBufFn, NULL); return pOperator; _error: diff --git a/source/libs/executor/src/timewindowoperator.c b/source/libs/executor/src/timewindowoperator.c index d78e9c4edf06329ccdb6fe1817a67c318ee3041d..eccdcb85bf048969d6e04d98c9dc4a4bc24dacb8 100644 --- a/source/libs/executor/src/timewindowoperator.c +++ b/source/libs/executor/src/timewindowoperator.c @@ -1740,7 +1740,7 @@ void initStreamFunciton(SqlFunctionCtx* pCtx, int32_t numOfExpr) { } SOperatorInfo* createIntervalOperatorInfo(SOperatorInfo* downstream, SIntervalPhysiNode* pPhyNode, - SExecTaskInfo* pTaskInfo, bool isStream) { + SExecTaskInfo* pTaskInfo) { SIntervalAggOperatorInfo* pInfo = taosMemoryCalloc(1, sizeof(SIntervalAggOperatorInfo)); SOperatorInfo* pOperator = taosMemoryCalloc(1, sizeof(SOperatorInfo)); if (pInfo == NULL || pOperator == NULL) { @@ -1973,6 +1973,7 @@ static SSDataBlock* doSessionWindowAgg(SOperatorInfo* pOperator) { return (pBInfo->pRes->info.rows == 0) ? NULL : pBInfo->pRes; } +// todo make this as an non-blocking operator SOperatorInfo* createStatewindowOperatorInfo(SOperatorInfo* downstream, SStateWinodwPhysiNode* pStateNode, SExecTaskInfo* pTaskInfo) { SStateWindowOperatorInfo* pInfo = taosMemoryCalloc(1, sizeof(SStateWindowOperatorInfo)); @@ -2493,12 +2494,8 @@ static void doStreamIntervalAggImpl(SOperatorInfo* pOperatorInfo, SSDataBlock* p static SSDataBlock* doStreamFinalIntervalAgg(SOperatorInfo* pOperator) { SStreamIntervalOperatorInfo* pInfo = pOperator->info; SExecTaskInfo* pTaskInfo = pOperator->pTaskInfo; - - SOperatorInfo* downstream = pOperator->pDownstream[0]; - TSKEY maxTs = INT64_MIN; - TSKEY minTs = INT64_MAX; - - SExprSupp* pSup = &pOperator->exprSupp; + SOperatorInfo* downstream = pOperator->pDownstream[0]; + SExprSupp* pSup = &pOperator->exprSupp; qDebug("interval status %d %s", pOperator->status, IS_FINAL_OP(pInfo) ? "interval final" : "interval semi"); @@ -2554,9 +2551,14 @@ static SSDataBlock* doStreamFinalIntervalAgg(SOperatorInfo* pOperator) { } } - SArray* pUpdated = taosArrayInit(4, POINTER_BYTES); - _hash_fn_t hashFn = taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY); - SHashObj* pUpdatedMap = taosHashInit(1024, hashFn, false, HASH_NO_LOCK); + if (!pInfo->pUpdated) { + pInfo->pUpdated = taosArrayInit(4, POINTER_BYTES); + } + if (!pInfo->pUpdatedMap) { + _hash_fn_t hashFn = taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY); + pInfo->pUpdatedMap = taosHashInit(1024, hashFn, false, HASH_NO_LOCK); + } + while (1) { SSDataBlock* pBlock = downstream->fpSet.getNextFn(downstream); if (pBlock == NULL) { @@ -2574,35 +2576,39 @@ static SSDataBlock* doStreamFinalIntervalAgg(SOperatorInfo* pOperator) { } else if (pBlock->info.type == STREAM_DELETE_DATA || pBlock->info.type == STREAM_DELETE_RESULT || pBlock->info.type == STREAM_CLEAR) { SArray* delWins = taosArrayInit(8, sizeof(SWinKey)); - doDeleteWindows(pOperator, &pInfo->interval, pBlock, delWins, pUpdatedMap); + doDeleteWindows(pOperator, &pInfo->interval, pBlock, delWins, pInfo->pUpdatedMap); if (IS_FINAL_OP(pInfo)) { int32_t childIndex = getChildIndex(pBlock); SOperatorInfo* pChildOp = taosArrayGetP(pInfo->pChildren, childIndex); SStreamIntervalOperatorInfo* pChildInfo = pChildOp->info; SExprSupp* pChildSup = &pChildOp->exprSupp; doDeleteWindows(pChildOp, &pChildInfo->interval, pBlock, NULL, NULL); - rebuildIntervalWindow(pOperator, delWins, pUpdatedMap); + rebuildIntervalWindow(pOperator, delWins, pInfo->pUpdatedMap); addRetriveWindow(delWins, pInfo); taosArrayAddAll(pInfo->pDelWins, delWins); taosArrayDestroy(delWins); continue; } - removeResults(delWins, pUpdatedMap); + removeResults(delWins, pInfo->pUpdatedMap); taosArrayAddAll(pInfo->pDelWins, delWins); taosArrayDestroy(delWins); break; } else if (pBlock->info.type == STREAM_GET_ALL && IS_FINAL_OP(pInfo)) { - getAllIntervalWindow(pInfo->aggSup.pResultRowHashTable, pUpdatedMap); + getAllIntervalWindow(pInfo->aggSup.pResultRowHashTable, pInfo->pUpdatedMap); continue; } else if (pBlock->info.type == STREAM_RETRIEVE && !IS_FINAL_OP(pInfo)) { - doDeleteWindows(pOperator, &pInfo->interval, pBlock, NULL, pUpdatedMap); - if (taosArrayGetSize(pUpdated) > 0) { + doDeleteWindows(pOperator, &pInfo->interval, pBlock, NULL, pInfo->pUpdatedMap); + if (taosArrayGetSize(pInfo->pUpdated) > 0) { break; } continue; } else if (pBlock->info.type == STREAM_PULL_OVER && IS_FINAL_OP(pInfo)) { processPullOver(pBlock, pInfo->pPullDataMap, &pInfo->interval); continue; + } else if (pBlock->info.type == STREAM_CREATE_CHILD_TABLE) { + return pBlock; + } else { + ASSERTS(pBlock->info.type == STREAM_INVALID, "invalid SSDataBlock type"); } if (pInfo->scalarSupp.pExprInfo != NULL) { @@ -2610,7 +2616,7 @@ static SSDataBlock* doStreamFinalIntervalAgg(SOperatorInfo* pOperator) { projectApplyFunctions(pExprSup->pExprInfo, pBlock, pBlock, pExprSup->pCtx, pExprSup->numOfExprs, NULL); } setInputDataBlock(pSup, pBlock, TSDB_ORDER_ASC, MAIN_SCAN, true); - doStreamIntervalAggImpl(pOperator, pBlock, pBlock->info.id.groupId, pUpdatedMap); + doStreamIntervalAggImpl(pOperator, pBlock, pBlock->info.id.groupId, pInfo->pUpdatedMap); if (IS_FINAL_OP(pInfo)) { int32_t chIndex = getChildIndex(pBlock); int32_t size = taosArrayGetSize(pInfo->pChildren); @@ -2630,29 +2636,29 @@ static SSDataBlock* doStreamFinalIntervalAgg(SOperatorInfo* pOperator) { setInputDataBlock(&pChildOp->exprSupp, pBlock, TSDB_ORDER_ASC, MAIN_SCAN, true); doStreamIntervalAggImpl(pChildOp, pBlock, pBlock->info.id.groupId, NULL); } - maxTs = TMAX(maxTs, pBlock->info.window.ekey); - maxTs = TMAX(maxTs, pBlock->info.watermark); - minTs = TMIN(minTs, pBlock->info.window.skey); + pInfo->twAggSup.maxTs = TMAX(pInfo->twAggSup.maxTs, pBlock->info.window.ekey); + pInfo->twAggSup.maxTs = TMAX(pInfo->twAggSup.maxTs, pBlock->info.watermark); + pInfo->twAggSup.minTs = TMIN(pInfo->twAggSup.minTs, pBlock->info.window.skey); } - removeDeleteResults(pUpdatedMap, pInfo->pDelWins); - pInfo->twAggSup.maxTs = TMAX(pInfo->twAggSup.maxTs, maxTs); - pInfo->twAggSup.minTs = TMIN(pInfo->twAggSup.minTs, minTs); + removeDeleteResults(pInfo->pUpdatedMap, pInfo->pDelWins); if (IS_FINAL_OP(pInfo)) { closeStreamIntervalWindow(pInfo->aggSup.pResultRowHashTable, &pInfo->twAggSup, &pInfo->interval, - pInfo->pPullDataMap, pUpdatedMap, pInfo->pDelWins, pOperator); + pInfo->pPullDataMap, pInfo->pUpdatedMap, pInfo->pDelWins, pOperator); closeChildIntervalWindow(pOperator, pInfo->pChildren, pInfo->twAggSup.maxTs); } pInfo->binfo.pRes->info.watermark = pInfo->twAggSup.maxTs; void* pIte = NULL; - while ((pIte = taosHashIterate(pUpdatedMap, pIte)) != NULL) { - taosArrayPush(pUpdated, pIte); + while ((pIte = taosHashIterate(pInfo->pUpdatedMap, pIte)) != NULL) { + taosArrayPush(pInfo->pUpdated, pIte); } - taosHashCleanup(pUpdatedMap); - taosArraySort(pUpdated, resultrowComparAsc); + taosHashCleanup(pInfo->pUpdatedMap); + pInfo->pUpdatedMap = NULL; + taosArraySort(pInfo->pUpdated, resultrowComparAsc); - initMultiResInfoFromArrayList(&pInfo->groupResInfo, pUpdated); + initMultiResInfoFromArrayList(&pInfo->groupResInfo, pInfo->pUpdated); + pInfo->pUpdated = NULL; blockDataEnsureCapacity(pInfo->binfo.pRes, pOperator->resultInfo.capacity); doBuildPullDataBlock(pInfo->pPullWins, &pInfo->pullIndex, pInfo->pPullDataRes); @@ -2790,6 +2796,8 @@ SOperatorInfo* createStreamFinalIntervalOperatorInfo(SOperatorInfo* downstream, pInfo->delKey.ts = INT64_MAX; pInfo->delKey.groupId = 0; pInfo->numOfDatapack = 0; + pInfo->pUpdated = NULL; + pInfo->pUpdatedMap = NULL; pOperator->operatorType = pPhyNode->type; pOperator->blocking = true; @@ -3419,7 +3427,6 @@ static SSDataBlock* doStreamSessionAgg(SOperatorInfo* pOperator) { SExprSupp* pSup = &pOperator->exprSupp; SStreamSessionAggOperatorInfo* pInfo = pOperator->info; SOptrBasicInfo* pBInfo = &pInfo->binfo; - TSKEY maxTs = INT64_MIN; SStreamAggSupporter* pAggSup = &pInfo->streamAggSup; if (pOperator->status == OP_EXEC_DONE) { return NULL; @@ -3439,10 +3446,14 @@ static SSDataBlock* doStreamSessionAgg(SOperatorInfo* pOperator) { return NULL; } - _hash_fn_t hashFn = taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY); - SSHashObj* pStUpdated = tSimpleHashInit(64, hashFn); SOperatorInfo* downstream = pOperator->pDownstream[0]; - SArray* pUpdated = taosArrayInit(16, sizeof(SSessionKey)); // SResKeyPos + if (!pInfo->pUpdated) { + pInfo->pUpdated = taosArrayInit(16, sizeof(SSessionKey)); + } + if (!pInfo->pStUpdated) { + _hash_fn_t hashFn = taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY); + pInfo->pStUpdated = tSimpleHashInit(64, hashFn); + } while (1) { SSDataBlock* pBlock = downstream->fpSet.getNextFn(downstream); if (pBlock == NULL) { @@ -3455,21 +3466,25 @@ static SSDataBlock* doStreamSessionAgg(SOperatorInfo* pOperator) { SArray* pWins = taosArrayInit(16, sizeof(SSessionKey)); // gap must be 0 doDeleteTimeWindows(pAggSup, pBlock, pWins); - removeSessionResults(pStUpdated, pWins); + removeSessionResults(pInfo->pStUpdated, pWins); if (IS_FINAL_OP(pInfo)) { int32_t childIndex = getChildIndex(pBlock); SOperatorInfo* pChildOp = taosArrayGetP(pInfo->pChildren, childIndex); SStreamSessionAggOperatorInfo* pChildInfo = pChildOp->info; // gap must be 0 doDeleteTimeWindows(&pChildInfo->streamAggSup, pBlock, NULL); - rebuildSessionWindow(pOperator, pWins, pStUpdated); + rebuildSessionWindow(pOperator, pWins, pInfo->pStUpdated); } copyDeleteWindowInfo(pWins, pInfo->pStDeleted); taosArrayDestroy(pWins); continue; } else if (pBlock->info.type == STREAM_GET_ALL) { - getAllSessionWindow(pAggSup->pResultRows, pStUpdated); + getAllSessionWindow(pAggSup->pResultRows, pInfo->pStUpdated); continue; + } else if (pBlock->info.type == STREAM_CREATE_CHILD_TABLE) { + return pBlock; + } else { + ASSERTS(pBlock->info.type == STREAM_NORMAL || pBlock->info.type == STREAM_INVALID, "invalid SSDataBlock type"); } if (pInfo->scalarSupp.pExprInfo != NULL) { @@ -3478,7 +3493,7 @@ static SSDataBlock* doStreamSessionAgg(SOperatorInfo* pOperator) { } // the pDataBlock are always the same one, no need to call this again setInputDataBlock(pSup, pBlock, TSDB_ORDER_ASC, MAIN_SCAN, true); - doStreamSessionAggImpl(pOperator, pBlock, pStUpdated, pInfo->pStDeleted, IS_FINAL_OP(pInfo)); + doStreamSessionAggImpl(pOperator, pBlock, pInfo->pStUpdated, pInfo->pStDeleted, IS_FINAL_OP(pInfo)); if (IS_FINAL_OP(pInfo)) { int32_t chIndex = getChildIndex(pBlock); int32_t size = taosArrayGetSize(pInfo->pChildren); @@ -3495,20 +3510,20 @@ static SSDataBlock* doStreamSessionAgg(SOperatorInfo* pOperator) { setInputDataBlock(&pChildOp->exprSupp, pBlock, TSDB_ORDER_ASC, MAIN_SCAN, true); doStreamSessionAggImpl(pChildOp, pBlock, NULL, NULL, true); } - maxTs = TMAX(maxTs, pBlock->info.window.ekey); - maxTs = TMAX(maxTs, pBlock->info.watermark); + pInfo->twAggSup.maxTs = TMAX(pInfo->twAggSup.maxTs, pBlock->info.window.ekey); + pInfo->twAggSup.maxTs = TMAX(pInfo->twAggSup.maxTs, pBlock->info.watermark); } - - pInfo->twAggSup.maxTs = TMAX(pInfo->twAggSup.maxTs, maxTs); // restore the value pOperator->status = OP_RES_TO_RETURN; - closeSessionWindow(pAggSup->pResultRows, &pInfo->twAggSup, pStUpdated); + closeSessionWindow(pAggSup->pResultRows, &pInfo->twAggSup, pInfo->pStUpdated); closeChildSessionWindow(pInfo->pChildren, pInfo->twAggSup.maxTs); - copyUpdateResult(pStUpdated, pUpdated); - removeSessionResults(pInfo->pStDeleted, pUpdated); - tSimpleHashCleanup(pStUpdated); - initGroupResInfoFromArrayList(&pInfo->groupResInfo, pUpdated); + copyUpdateResult(pInfo->pStUpdated, pInfo->pUpdated); + removeSessionResults(pInfo->pStDeleted, pInfo->pUpdated); + tSimpleHashCleanup(pInfo->pStUpdated); + pInfo->pStUpdated = NULL; + initGroupResInfoFromArrayList(&pInfo->groupResInfo, pInfo->pUpdated); + pInfo->pUpdated = NULL; blockDataEnsureCapacity(pInfo->binfo.pRes, pOperator->resultInfo.capacity); #if 0 @@ -3593,6 +3608,8 @@ SOperatorInfo* createStreamSessionAggOperatorInfo(SOperatorInfo* downstream, SPh pInfo->isFinal = false; pInfo->pPhyNode = pPhyNode; pInfo->ignoreExpiredData = pSessionNode->window.igExpired; + pInfo->pUpdated = NULL; + pInfo->pStUpdated = NULL; setOperatorInfo(pOperator, "StreamSessionWindowAggOperator", QUERY_NODE_PHYSICAL_PLAN_STREAM_SESSION, true, OP_NOT_OPENED, pInfo, pTaskInfo); @@ -3653,10 +3670,14 @@ static SSDataBlock* doStreamSessionSemiAgg(SOperatorInfo* pOperator) { } } - _hash_fn_t hashFn = taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY); - SSHashObj* pStUpdated = tSimpleHashInit(64, hashFn); SOperatorInfo* downstream = pOperator->pDownstream[0]; - SArray* pUpdated = taosArrayInit(16, sizeof(SSessionKey)); + if (!pInfo->pUpdated) { + pInfo->pUpdated = taosArrayInit(16, sizeof(SSessionKey)); + } + if (!pInfo->pStUpdated) { + _hash_fn_t hashFn = taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY); + pInfo->pStUpdated = tSimpleHashInit(64, hashFn); + } while (1) { SSDataBlock* pBlock = downstream->fpSet.getNextFn(downstream); if (pBlock == NULL) { @@ -3671,13 +3692,17 @@ static SSDataBlock* doStreamSessionSemiAgg(SOperatorInfo* pOperator) { // gap must be 0 SArray* pWins = taosArrayInit(16, sizeof(SSessionKey)); doDeleteTimeWindows(&pInfo->streamAggSup, pBlock, pWins); - removeSessionResults(pStUpdated, pWins); + removeSessionResults(pInfo->pStUpdated, pWins); copyDeleteWindowInfo(pWins, pInfo->pStDeleted); taosArrayDestroy(pWins); break; } else if (pBlock->info.type == STREAM_GET_ALL) { - getAllSessionWindow(pInfo->streamAggSup.pResultRows, pStUpdated); + getAllSessionWindow(pInfo->streamAggSup.pResultRows, pInfo->pStUpdated); continue; + } else if (pBlock->info.type == STREAM_CREATE_CHILD_TABLE) { + return pBlock; + } else { + ASSERTS(pBlock->info.type == STREAM_NORMAL || pBlock->info.type == STREAM_INVALID, "invalid SSDataBlock type"); } if (pInfo->scalarSupp.pExprInfo != NULL) { @@ -3686,18 +3711,20 @@ static SSDataBlock* doStreamSessionSemiAgg(SOperatorInfo* pOperator) { } // the pDataBlock are always the same one, no need to call this again setInputDataBlock(pSup, pBlock, TSDB_ORDER_ASC, MAIN_SCAN, true); - doStreamSessionAggImpl(pOperator, pBlock, pStUpdated, NULL, false); + doStreamSessionAggImpl(pOperator, pBlock, pInfo->pStUpdated, NULL, false); maxTs = TMAX(pInfo->twAggSup.maxTs, pBlock->info.window.ekey); } pInfo->twAggSup.maxTs = TMAX(pInfo->twAggSup.maxTs, maxTs); pBInfo->pRes->info.watermark = pInfo->twAggSup.maxTs; - copyUpdateResult(pStUpdated, pUpdated); - removeSessionResults(pInfo->pStDeleted, pUpdated); - tSimpleHashCleanup(pStUpdated); + copyUpdateResult(pInfo->pStUpdated, pInfo->pUpdated); + removeSessionResults(pInfo->pStDeleted, pInfo->pUpdated); + tSimpleHashCleanup(pInfo->pStUpdated); + pInfo->pStUpdated = NULL; - initGroupResInfoFromArrayList(&pInfo->groupResInfo, pUpdated); + initGroupResInfoFromArrayList(&pInfo->groupResInfo, pInfo->pUpdated); + pInfo->pUpdated = NULL; blockDataEnsureCapacity(pBInfo->pRes, pOperator->resultInfo.capacity); #if 0 @@ -3957,7 +3984,6 @@ static SSDataBlock* doStreamStateAgg(SOperatorInfo* pOperator) { SExprSupp* pSup = &pOperator->exprSupp; SStreamStateAggOperatorInfo* pInfo = pOperator->info; SOptrBasicInfo* pBInfo = &pInfo->binfo; - int64_t maxTs = INT64_MIN; if (pOperator->status == OP_RES_TO_RETURN) { doBuildDeleteDataBlock(pOperator, pInfo->pSeDeleted, pInfo->pDelRes, &pInfo->pDelIterator); if (pInfo->pDelRes->info.rows > 0) { @@ -3975,10 +4001,14 @@ static SSDataBlock* doStreamStateAgg(SOperatorInfo* pOperator) { return NULL; } - _hash_fn_t hashFn = taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY); - SSHashObj* pSeUpdated = tSimpleHashInit(64, hashFn); SOperatorInfo* downstream = pOperator->pDownstream[0]; - SArray* pUpdated = taosArrayInit(16, sizeof(SSessionKey)); + if (!pInfo->pUpdated) { + pInfo->pUpdated = taosArrayInit(16, sizeof(SSessionKey)); + } + if (!pInfo->pSeUpdated) { + _hash_fn_t hashFn = taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY); + pInfo->pSeUpdated = tSimpleHashInit(64, hashFn); + } while (1) { SSDataBlock* pBlock = downstream->fpSet.getNextFn(downstream); if (pBlock == NULL) { @@ -3990,13 +4020,17 @@ static SSDataBlock* doStreamStateAgg(SOperatorInfo* pOperator) { pBlock->info.type == STREAM_CLEAR) { SArray* pWins = taosArrayInit(16, sizeof(SSessionKey)); doDeleteTimeWindows(&pInfo->streamAggSup, pBlock, pWins); - removeSessionResults(pSeUpdated, pWins); + removeSessionResults(pInfo->pSeUpdated, pWins); copyDeleteWindowInfo(pWins, pInfo->pSeDeleted); taosArrayDestroy(pWins); continue; } else if (pBlock->info.type == STREAM_GET_ALL) { - getAllSessionWindow(pInfo->streamAggSup.pResultRows, pSeUpdated); + getAllSessionWindow(pInfo->streamAggSup.pResultRows, pInfo->pSeUpdated); continue; + } else if (pBlock->info.type == STREAM_CREATE_CHILD_TABLE) { + return pBlock; + } else { + ASSERTS(pBlock->info.type == STREAM_NORMAL || pBlock->info.type == STREAM_INVALID, "invalid SSDataBlock type"); } if (pInfo->scalarSupp.pExprInfo != NULL) { @@ -4005,19 +4039,20 @@ static SSDataBlock* doStreamStateAgg(SOperatorInfo* pOperator) { } // the pDataBlock are always the same one, no need to call this again setInputDataBlock(pSup, pBlock, TSDB_ORDER_ASC, MAIN_SCAN, true); - doStreamStateAggImpl(pOperator, pBlock, pSeUpdated, pInfo->pSeDeleted); - maxTs = TMAX(maxTs, pBlock->info.window.ekey); + doStreamStateAggImpl(pOperator, pBlock, pInfo->pSeUpdated, pInfo->pSeDeleted); + pInfo->twAggSup.maxTs = TMAX(pInfo->twAggSup.maxTs, pBlock->info.window.ekey); } - pInfo->twAggSup.maxTs = TMAX(pInfo->twAggSup.maxTs, maxTs); // restore the value pOperator->status = OP_RES_TO_RETURN; - closeSessionWindow(pInfo->streamAggSup.pResultRows, &pInfo->twAggSup, pSeUpdated); - copyUpdateResult(pSeUpdated, pUpdated); - removeSessionResults(pInfo->pSeDeleted, pUpdated); - tSimpleHashCleanup(pSeUpdated); + closeSessionWindow(pInfo->streamAggSup.pResultRows, &pInfo->twAggSup, pInfo->pSeUpdated); + copyUpdateResult(pInfo->pSeUpdated, pInfo->pUpdated); + removeSessionResults(pInfo->pSeDeleted, pInfo->pUpdated); + tSimpleHashCleanup(pInfo->pSeUpdated); + pInfo->pSeUpdated = NULL; - initGroupResInfoFromArrayList(&pInfo->groupResInfo, pUpdated); + initGroupResInfoFromArrayList(&pInfo->groupResInfo, pInfo->pUpdated); + pInfo->pUpdated = NULL; blockDataEnsureCapacity(pInfo->binfo.pRes, pOperator->resultInfo.capacity); #if 0 @@ -4098,6 +4133,8 @@ SOperatorInfo* createStreamStateAggOperatorInfo(SOperatorInfo* downstream, SPhys pInfo->pDelRes = createSpecialDataBlock(STREAM_DELETE_RESULT); pInfo->pChildren = NULL; pInfo->ignoreExpiredData = pStateNode->window.igExpired; + pInfo->pUpdated = NULL; + pInfo->pSeUpdated = NULL; setOperatorInfo(pOperator, "StreamStateAggOperator", QUERY_NODE_PHYSICAL_PLAN_STREAM_STATE, true, OP_NOT_OPENED, pInfo, pTaskInfo); @@ -4711,8 +4748,6 @@ _error: static SSDataBlock* doStreamIntervalAgg(SOperatorInfo* pOperator) { SStreamIntervalOperatorInfo* pInfo = pOperator->info; SExecTaskInfo* pTaskInfo = pOperator->pTaskInfo; - int64_t maxTs = INT64_MIN; - int64_t minTs = INT64_MAX; SExprSupp* pSup = &pOperator->exprSupp; if (pOperator->status == OP_EXEC_DONE) { @@ -4740,9 +4775,14 @@ static SSDataBlock* doStreamIntervalAgg(SOperatorInfo* pOperator) { SOperatorInfo* downstream = pOperator->pDownstream[0]; - SArray* pUpdated = taosArrayInit(4, POINTER_BYTES); // SResKeyPos - _hash_fn_t hashFn = taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY); - SHashObj* pUpdatedMap = taosHashInit(1024, hashFn, false, HASH_NO_LOCK); + if (!pInfo->pUpdated) { + pInfo->pUpdated = taosArrayInit(4, POINTER_BYTES); + } + if (!pInfo->pUpdatedMap) { + _hash_fn_t hashFn = taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY); + pInfo->pUpdatedMap = taosHashInit(1024, hashFn, false, HASH_NO_LOCK); + } + while (1) { SSDataBlock* pBlock = downstream->fpSet.getNextFn(downstream); @@ -4756,11 +4796,15 @@ static SSDataBlock* doStreamIntervalAgg(SOperatorInfo* pOperator) { if (pBlock->info.type == STREAM_DELETE_DATA || pBlock->info.type == STREAM_DELETE_RESULT || pBlock->info.type == STREAM_CLEAR) { - doDeleteWindows(pOperator, &pInfo->interval, pBlock, pInfo->pDelWins, pUpdatedMap); + doDeleteWindows(pOperator, &pInfo->interval, pBlock, pInfo->pDelWins, pInfo->pUpdatedMap); continue; } else if (pBlock->info.type == STREAM_GET_ALL) { - getAllIntervalWindow(pInfo->aggSup.pResultRowHashTable, pUpdatedMap); + getAllIntervalWindow(pInfo->aggSup.pResultRowHashTable, pInfo->pUpdatedMap); continue; + } else if (pBlock->info.type == STREAM_CREATE_CHILD_TABLE) { + return pBlock; + } else { + ASSERTS(pBlock->info.type == STREAM_NORMAL || pBlock->info.type == STREAM_INVALID, "invalid SSDataBlock type"); } if (pBlock->info.type == STREAM_NORMAL && pBlock->info.version != 0) { @@ -4781,27 +4825,27 @@ static SSDataBlock* doStreamIntervalAgg(SOperatorInfo* pOperator) { setInverFunction(pSup->pCtx, pOperator->exprSupp.numOfExprs, pBlock->info.type); } - maxTs = TMAX(maxTs, pBlock->info.window.ekey); - minTs = TMIN(minTs, pBlock->info.window.skey); + pInfo->twAggSup.maxTs = TMAX(pInfo->twAggSup.maxTs, pBlock->info.window.ekey); + pInfo->twAggSup.minTs = TMIN(pInfo->twAggSup.minTs, pBlock->info.window.skey); - doStreamIntervalAggImpl(pOperator, pBlock, pBlock->info.id.groupId, pUpdatedMap); + doStreamIntervalAggImpl(pOperator, pBlock, pBlock->info.id.groupId, pInfo->pUpdatedMap); } - pInfo->twAggSup.maxTs = TMAX(pInfo->twAggSup.maxTs, maxTs); - pInfo->twAggSup.minTs = TMIN(pInfo->twAggSup.minTs, minTs); pOperator->status = OP_RES_TO_RETURN; - removeDeleteResults(pUpdatedMap, pInfo->pDelWins); - closeStreamIntervalWindow(pInfo->aggSup.pResultRowHashTable, &pInfo->twAggSup, &pInfo->interval, NULL, pUpdatedMap, + removeDeleteResults(pInfo->pUpdatedMap, pInfo->pDelWins); + closeStreamIntervalWindow(pInfo->aggSup.pResultRowHashTable, &pInfo->twAggSup, &pInfo->interval, NULL, pInfo->pUpdatedMap, pInfo->pDelWins, pOperator); void* pIte = NULL; - while ((pIte = taosHashIterate(pUpdatedMap, pIte)) != NULL) { - taosArrayPush(pUpdated, pIte); + while ((pIte = taosHashIterate(pInfo->pUpdatedMap, pIte)) != NULL) { + taosArrayPush(pInfo->pUpdated, pIte); } - taosArraySort(pUpdated, resultrowComparAsc); + taosArraySort(pInfo->pUpdated, resultrowComparAsc); - initMultiResInfoFromArrayList(&pInfo->groupResInfo, pUpdated); + initMultiResInfoFromArrayList(&pInfo->groupResInfo, pInfo->pUpdated); + pInfo->pUpdated = NULL; blockDataEnsureCapacity(pInfo->binfo.pRes, pOperator->resultInfo.capacity); - taosHashCleanup(pUpdatedMap); + taosHashCleanup(pInfo->pUpdatedMap); + pInfo->pUpdatedMap = NULL; doBuildDeleteResult(pInfo, pInfo->pDelWins, &pInfo->delIndex, pInfo->pDelRes); if (pInfo->pDelRes->info.rows > 0) { @@ -4902,6 +4946,8 @@ SOperatorInfo* createStreamIntervalOperatorInfo(SOperatorInfo* downstream, SPhys pInfo->delKey.ts = INT64_MAX; pInfo->delKey.groupId = 0; pInfo->numOfDatapack = 0; + pInfo->pUpdated = NULL; + pInfo->pUpdatedMap = NULL; setOperatorInfo(pOperator, "StreamIntervalOperator", QUERY_NODE_PHYSICAL_PLAN_STREAM_INTERVAL, true, OP_NOT_OPENED, pInfo, pTaskInfo); @@ -4922,4 +4968,3 @@ _error: pTaskInfo->code = code; return NULL; } - diff --git a/source/libs/executor/src/tsort.c b/source/libs/executor/src/tsort.c index 06ef36664a574ea1ff44c3300f5570c37963bed6..661e9f97b74e33fd4f372a7eba51ccb42df385d2 100644 --- a/source/libs/executor/src/tsort.c +++ b/source/libs/executor/src/tsort.c @@ -108,7 +108,7 @@ static int32_t sortComparCleanup(SMsortComparParam* cmpParam) { return TSDB_CODE_SUCCESS; } -void tsortClearOrderdSource(SArray *pOrderedSource) { +void tsortClearOrderdSource(SArray* pOrderedSource) { for (size_t i = 0; i < taosArrayGetSize(pOrderedSource); i++) { SSortSource** pSource = taosArrayGet(pOrderedSource, i); if (NULL == *pSource) { @@ -121,6 +121,12 @@ void tsortClearOrderdSource(SArray *pOrderedSource) { if ((*pSource)->param && !(*pSource)->onlyRef) { taosMemoryFree((*pSource)->param); } + + if (!(*pSource)->onlyRef && (*pSource)->src.pBlock) { + blockDataDestroy((*pSource)->src.pBlock); + (*pSource)->src.pBlock = NULL; + } + taosMemoryFreeClear(*pSource); } @@ -245,7 +251,8 @@ static int32_t sortComparInit(SMsortComparParam* pParam, SArray* pSources, int32 if (pHandle->pBuf == NULL) { if (!osTempSpaceAvailable()) { code = TSDB_CODE_NO_AVAIL_DISK; - qError("Sort compare init failed since %s, %s", terrstr(code), pHandle->idStr); + terrno = code; + qError("Sort compare init failed since %s, %s", tstrerror(code), pHandle->idStr); return code; } @@ -253,6 +260,7 @@ static int32_t sortComparInit(SMsortComparParam* pParam, SArray* pSources, int32 "sortComparInit", tsTempDir); dBufSetPrintInfo(pHandle->pBuf); if (code != TSDB_CODE_SUCCESS) { + terrno = code; return code; } } @@ -276,6 +284,7 @@ static int32_t sortComparInit(SMsortComparParam* pParam, SArray* pSources, int32 code = blockDataFromBuf(pSource->src.pBlock, pPage); if (code != TSDB_CODE_SUCCESS) { + terrno = code; return code; } @@ -629,9 +638,9 @@ static int32_t createInitialSources(SSortHandle* pHandle) { if (pHandle->type == SORT_SINGLESOURCE_SORT) { SSortSource** pSource = taosArrayGet(pHandle->pOrderedSource, 0); - SSortSource* source = *pSource; + SSortSource* source = *pSource; *pSource = NULL; - + tsortClearOrderdSource(pHandle->pOrderedSource); while (1) { @@ -659,6 +668,10 @@ static int32_t createInitialSources(SSortHandle* pHandle) { if (source->param && !source->onlyRef) { taosMemoryFree(source->param); } + if (!source->onlyRef && source->src.pBlock) { + blockDataDestroy(source->src.pBlock); + source->src.pBlock = NULL; + } taosMemoryFree(source); return code; } @@ -672,6 +685,10 @@ static int32_t createInitialSources(SSortHandle* pHandle) { if (source->param && !source->onlyRef) { taosMemoryFree(source->param); } + if (!source->onlyRef && source->src.pBlock) { + blockDataDestroy(source->src.pBlock); + source->src.pBlock = NULL; + } taosMemoryFree(source); return code; } @@ -849,8 +866,8 @@ SSortExecInfo tsortGetSortExecInfo(SSortHandle* pHandle) { SSortExecInfo info = {0}; if (pHandle == NULL) { - info.sortMethod = SORT_QSORT_T; // by default - info.sortBuffer = 2 * 1048576; // 2mb by default + info.sortMethod = SORT_QSORT_T; // by default + info.sortBuffer = 2 * 1048576; // 2mb by default } else { info.sortBuffer = pHandle->pageSize * pHandle->numOfPages; info.sortMethod = pHandle->inMemSort ? SORT_QSORT_T : SORT_SPILLED_MERGE_SORT_T; diff --git a/source/libs/function/inc/builtinsimpl.h b/source/libs/function/inc/builtinsimpl.h index cbda8dc472c2a8e49880b8770b21eccbb0a8eecc..dc884a058193e0abbefa04cff086788733f59a87 100644 --- a/source/libs/function/inc/builtinsimpl.h +++ b/source/libs/function/inc/builtinsimpl.h @@ -44,9 +44,10 @@ typedef struct SMinmaxResInfo { bool nullTupleSaved; int16_t type; } SMinmaxResInfo; -int32_t doMinMaxHelper(SqlFunctionCtx* pCtx, int32_t isMinFunc); -STuplePos saveTupleData(SqlFunctionCtx* pCtx, int32_t rowIndex, const SSDataBlock* pSrcBlock); +int32_t doMinMaxHelper(SqlFunctionCtx* pCtx, int32_t isMinFunc, int32_t* nElems); + +int32_t saveTupleData(SqlFunctionCtx* pCtx, int32_t rowIndex, const SSDataBlock* pSrcBlock, STuplePos* pPos); int32_t updateTupleData(SqlFunctionCtx* pCtx, int32_t rowIndex, const SSDataBlock* pSrcBlock, STuplePos* pPos); const char* loadTupleData(SqlFunctionCtx* pCtx, const STuplePos* pPos); diff --git a/source/libs/function/inc/tpercentile.h b/source/libs/function/inc/tpercentile.h index 873dc46a0890bd6659e86feb294539dddc3a031a..80159460f5c58327930cdbeb08fcdd14263a1ebd 100644 --- a/source/libs/function/inc/tpercentile.h +++ b/source/libs/function/inc/tpercentile.h @@ -73,10 +73,10 @@ void tMemBucketDestroy(tMemBucket *pBucket); int32_t tMemBucketPut(tMemBucket *pBucket, const void *data, size_t size); -double getPercentile(tMemBucket *pMemBucket, double percent); +int32_t getPercentile(tMemBucket *pMemBucket, double percent, double *result); #endif // TDENGINE_TPERCENTILE_H #ifdef __cplusplus } -#endif \ No newline at end of file +#endif diff --git a/source/libs/function/src/builtins.c b/source/libs/function/src/builtins.c index 06406d158f95d4d0ebbefd7cb6345a59ca9131ad..1a0437c26a6657c17557e4772fd91403a647bf43 100644 --- a/source/libs/function/src/builtins.c +++ b/source/libs/function/src/builtins.c @@ -1016,7 +1016,10 @@ static bool validateHistogramBinDesc(char* binDescStr, int8_t binType, char* err intervals[0] = -INFINITY; intervals[numOfBins - 1] = INFINITY; // in case of desc bin orders, -inf/inf should be swapped - ASSERT(numOfBins >= 4); + if (numOfBins < 4) { + return false; + } + if (intervals[1] > intervals[numOfBins - 2]) { TSWAP(intervals[0], intervals[numOfBins - 1]); } @@ -1361,7 +1364,7 @@ static int32_t translateCsum(SFunctionNode* pFunc, char* pErrBuf, int32_t len) { } else if (IS_FLOAT_TYPE(colType)) { resType = TSDB_DATA_TYPE_DOUBLE; } else { - ASSERT(0); + return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName); } } diff --git a/source/libs/function/src/builtinsimpl.c b/source/libs/function/src/builtinsimpl.c index 51945d6a0a492509357f1d8198f279c17e1a9e9e..0c491addd58a48823514ef21f6e51969827a4270 100644 --- a/source/libs/function/src/builtinsimpl.c +++ b/source/libs/function/src/builtinsimpl.c @@ -506,7 +506,6 @@ static int32_t getNumOfElems(SqlFunctionCtx* pCtx) { SColumnInfoData* pInputCol = pInput->pData[0]; if (pInput->colDataSMAIsSet && pInput->totalRows == pInput->numOfRows && !IS_VAR_DATA_TYPE(pInputCol->info.type)) { numOfElem = pInput->numOfRows - pInput->pColumnDataAgg[0]->numOfNull; - ASSERT(numOfElem >= 0); } else { if (pInputCol->hasNull) { for (int32_t i = pInput->startRowIndex; i < pInput->startRowIndex + pInput->numOfRows; ++i) { @@ -596,7 +595,6 @@ int32_t sumFunction(SqlFunctionCtx* pCtx) { if (pInput->colDataSMAIsSet) { numOfElem = pInput->numOfRows - pAgg->numOfNull; - ASSERT(numOfElem >= 0); if (IS_SIGNED_NUMERIC_TYPE(type)) { pSumRes->isum += pAgg->sum; @@ -661,7 +659,6 @@ int32_t sumInvertFunction(SqlFunctionCtx* pCtx) { if (pInput->colDataSMAIsSet) { numOfElem = pInput->numOfRows - pAgg->numOfNull; - ASSERT(numOfElem >= 0); if (IS_SIGNED_NUMERIC_TYPE(type)) { pSumRes->isum -= pAgg->sum; @@ -757,24 +754,33 @@ bool getMinmaxFuncEnv(SFunctionNode* UNUSED_PARAM(pFunc), SFuncExecEnv* pEnv) { } int32_t minFunction(SqlFunctionCtx* pCtx) { - int32_t numOfElems = doMinMaxHelper(pCtx, 1); + int32_t numOfElems = 0; + int32_t code = doMinMaxHelper(pCtx, 1, &numOfElems); + if (code != TSDB_CODE_SUCCESS) { + return code; + } SET_VAL(GET_RES_INFO(pCtx), numOfElems, 1); return TSDB_CODE_SUCCESS; } int32_t maxFunction(SqlFunctionCtx* pCtx) { - int32_t numOfElems = doMinMaxHelper(pCtx, 0); + int32_t numOfElems = 0; + int32_t code = doMinMaxHelper(pCtx, 0, &numOfElems); + if (code != TSDB_CODE_SUCCESS) { + return code; + } SET_VAL(GET_RES_INFO(pCtx), numOfElems, 1); return TSDB_CODE_SUCCESS; } -static void setNullSelectivityValue(SqlFunctionCtx* pCtx, SSDataBlock* pBlock, int32_t rowIndex); -static void setSelectivityValue(SqlFunctionCtx* pCtx, SSDataBlock* pBlock, const STuplePos* pTuplePos, +static int32_t setNullSelectivityValue(SqlFunctionCtx* pCtx, SSDataBlock* pBlock, int32_t rowIndex); +static int32_t setSelectivityValue(SqlFunctionCtx* pCtx, SSDataBlock* pBlock, const STuplePos* pTuplePos, int32_t rowIndex); int32_t minmaxFunctionFinalize(SqlFunctionCtx* pCtx, SSDataBlock* pBlock) { - SResultRowEntryInfo* pEntryInfo = GET_RES_INFO(pCtx); + int32_t code = TSDB_CODE_SUCCESS; + SResultRowEntryInfo* pEntryInfo = GET_RES_INFO(pCtx); SMinmaxResInfo* pRes = GET_ROWCELL_INTERBUF(pEntryInfo); int32_t slotId = pCtx->pExpr->base.resSchema.slotId; @@ -791,17 +797,17 @@ int32_t minmaxFunctionFinalize(SqlFunctionCtx* pCtx, SSDataBlock* pBlock) { } if (pEntryInfo->numOfRes > 0) { - setSelectivityValue(pCtx, pBlock, &pRes->tuplePos, currentRow); + code = setSelectivityValue(pCtx, pBlock, &pRes->tuplePos, currentRow); } else { - setSelectivityValue(pCtx, pBlock, &pRes->nullTuplePos, currentRow); + code = setSelectivityValue(pCtx, pBlock, &pRes->nullTuplePos, currentRow); } - return pEntryInfo->numOfRes; + return code; } -void setNullSelectivityValue(SqlFunctionCtx* pCtx, SSDataBlock* pBlock, int32_t rowIndex) { +int32_t setNullSelectivityValue(SqlFunctionCtx* pCtx, SSDataBlock* pBlock, int32_t rowIndex) { if (pCtx->subsidiaries.num <= 0) { - return; + return TSDB_CODE_SUCCESS; } for (int32_t j = 0; j < pCtx->subsidiaries.num; ++j) { @@ -811,17 +817,23 @@ void setNullSelectivityValue(SqlFunctionCtx* pCtx, SSDataBlock* pBlock, int32_t SColumnInfoData* pDstCol = taosArrayGet(pBlock->pDataBlock, dstSlotId); colDataAppendNULL(pDstCol, rowIndex); } + + return TSDB_CODE_SUCCESS; } -void setSelectivityValue(SqlFunctionCtx* pCtx, SSDataBlock* pBlock, const STuplePos* pTuplePos, int32_t rowIndex) { +int32_t setSelectivityValue(SqlFunctionCtx* pCtx, SSDataBlock* pBlock, const STuplePos* pTuplePos, int32_t rowIndex) { if (pCtx->subsidiaries.num <= 0) { - return; + return TSDB_CODE_SUCCESS; } if ((pCtx->saveHandle.pBuf != NULL && pTuplePos->pageId != -1) || (pCtx->saveHandle.pState && pTuplePos->streamTupleKey.ts > 0)) { int32_t numOfCols = pCtx->subsidiaries.num; const char* p = loadTupleData(pCtx, pTuplePos); + if (p == NULL) { + terrno = TSDB_CODE_NO_AVAIL_DISK; + return terrno; + } bool* nullList = (bool*)p; char* pStart = (char*)(nullList + numOfCols * sizeof(bool)); @@ -832,7 +844,6 @@ void setSelectivityValue(SqlFunctionCtx* pCtx, SSDataBlock* pBlock, const STuple int32_t dstSlotId = pc->pExpr->base.resSchema.slotId; SColumnInfoData* pDstCol = taosArrayGet(pBlock->pDataBlock, dstSlotId); - ASSERT(pc->pExpr->base.resSchema.bytes == pDstCol->info.bytes); if (nullList[j]) { colDataAppendNULL(pDstCol, rowIndex); } else { @@ -845,13 +856,8 @@ void setSelectivityValue(SqlFunctionCtx* pCtx, SSDataBlock* pBlock, const STuple tdbFree((void*)p); } } -} -void releaseSource(STuplePos* pPos) { - if (pPos->pageId == -1) { - return; - } - // Todo(liuyao) relase row + return TSDB_CODE_SUCCESS; } // This function append the selectivity to subsidiaries function context directly, without fetching data @@ -876,7 +882,6 @@ void appendSelectivityValue(SqlFunctionCtx* pCtx, int32_t rowIndex, int32_t pos) int32_t dstSlotId = pc->pExpr->base.resSchema.slotId; SColumnInfoData* pDstCol = taosArrayGet(pCtx->pDstBlock->pDataBlock, dstSlotId); - ASSERT(pc->pExpr->base.resSchema.bytes == pDstCol->info.bytes); if (colDataIsNull_s(pSrcCol, rowIndex) == true) { colDataAppendNULL(pDstCol, pos); @@ -887,7 +892,6 @@ void appendSelectivityValue(SqlFunctionCtx* pCtx, int32_t rowIndex, int32_t pos) } void replaceTupleData(STuplePos* pDestPos, STuplePos* pSourcePos) { - releaseSource(pDestPos); *pDestPos = *pSourcePos; } @@ -1145,7 +1149,9 @@ static void stddevTransferInfo(SStddevRes* pInput, SStddevRes* pOutput) { int32_t stddevFunctionMerge(SqlFunctionCtx* pCtx) { SInputColumnInfoData* pInput = &pCtx->input; SColumnInfoData* pCol = pInput->pData[0]; - ASSERT(pCol->info.type == TSDB_DATA_TYPE_BINARY); + if (pCol->info.type != TSDB_DATA_TYPE_BINARY) { + return TSDB_CODE_FUNC_FUNTION_PARA_TYPE; + } SStddevRes* pInfo = GET_ROWCELL_INTERBUF(GET_RES_INFO(pCtx)); @@ -1570,7 +1576,7 @@ int32_t percentileFunction(SqlFunctionCtx* pCtx) { // all data are null, set it completed if (pInfo->numOfElems == 0) { pResInfo->complete = true; - return 0; + return TSDB_CODE_SUCCESS; } else { pInfo->pMemBucket = tMemBucketCreate(pCol->info.bytes, type, pInfo->minval, pInfo->maxval); } @@ -1633,7 +1639,11 @@ int32_t percentileFunction(SqlFunctionCtx* pCtx) { char* data = colDataGetData(pCol, i); numOfElems += 1; - tMemBucketPut(pInfo->pMemBucket, data, 1); + int32_t code = tMemBucketPut(pInfo->pMemBucket, data, 1); + if (code != TSDB_CODE_SUCCESS) { + tMemBucketDestroy(pInfo->pMemBucket); + return code; + } } SET_VAL(pResInfo, numOfElems, 1); @@ -1644,7 +1654,9 @@ int32_t percentileFunction(SqlFunctionCtx* pCtx) { int32_t percentileFinalize(SqlFunctionCtx* pCtx, SSDataBlock* pBlock) { SVariant* pVal = &pCtx->param[1].param; + int32_t code = 0; double v = 0; + GET_TYPED_DATA(v, double, pVal->nType, &pVal->i); SResultRowEntryInfo* pResInfo = GET_RES_INFO(pCtx); @@ -1652,10 +1664,14 @@ int32_t percentileFinalize(SqlFunctionCtx* pCtx, SSDataBlock* pBlock) { tMemBucket* pMemBucket = ppInfo->pMemBucket; if (pMemBucket != NULL && pMemBucket->total > 0) { // check for null - SET_DOUBLE_VAL(&ppInfo->result, getPercentile(pMemBucket, v)); + code = getPercentile(pMemBucket, v, &ppInfo->result); } tMemBucketDestroy(pMemBucket); + if (code != TSDB_CODE_SUCCESS) { + return code; + } + return functionFinalize(pCtx, pBlock); } @@ -1771,7 +1787,10 @@ int32_t apercentileFunction(SqlFunctionCtx* pCtx) { double v = 0; GET_TYPED_DATA(v, double, type, data); - tHistogramAdd(&pInfo->pHisto, v); + int32_t code = tHistogramAdd(&pInfo->pHisto, v); + if (code != 0) { + return TSDB_CODE_FAILED; + } } qDebug("%s after add %d elements into histogram, total:%" PRId64 ", numOfEntry:%d, pHisto:%p, elems: %p", @@ -1841,7 +1860,9 @@ int32_t apercentileFunctionMerge(SqlFunctionCtx* pCtx) { SInputColumnInfoData* pInput = &pCtx->input; SColumnInfoData* pCol = pInput->pData[0]; - ASSERT(pCol->info.type == TSDB_DATA_TYPE_BINARY); + if (pCol->info.type != TSDB_DATA_TYPE_BINARY) { + return TSDB_CODE_FUNC_FUNTION_PARA_TYPE; + } SAPercentileInfo* pInfo = GET_ROWCELL_INTERBUF(pResInfo); @@ -2009,25 +2030,26 @@ static void prepareBuf(SqlFunctionCtx* pCtx) { pCtx->subsidiaries.rowLen = rowLen + pCtx->subsidiaries.num * sizeof(bool); pCtx->subsidiaries.buf = taosMemoryMalloc(pCtx->subsidiaries.rowLen); } - - ASSERT(pCtx->subsidiaries.buf != NULL); - ASSERT(pCtx->subsidiaries.rowLen > 0); } -static void firstlastSaveTupleData(const SSDataBlock* pSrcBlock, int32_t rowIndex, SqlFunctionCtx* pCtx, +static int32_t firstlastSaveTupleData(const SSDataBlock* pSrcBlock, int32_t rowIndex, SqlFunctionCtx* pCtx, SFirstLastRes* pInfo) { + int32_t code = TSDB_CODE_SUCCESS; + if (pCtx->subsidiaries.num <= 0) { - return; + return TSDB_CODE_SUCCESS; } if (!pInfo->hasResult) { - pInfo->pos = saveTupleData(pCtx, rowIndex, pSrcBlock); + code = saveTupleData(pCtx, rowIndex, pSrcBlock, &pInfo->pos); } else { - updateTupleData(pCtx, rowIndex, pSrcBlock, &pInfo->pos); + code = updateTupleData(pCtx, rowIndex, pSrcBlock, &pInfo->pos); } + + return code; } -static void doSaveCurrentVal(SqlFunctionCtx* pCtx, int32_t rowIndex, int64_t currentTs, int32_t type, char* pData) { +static int32_t doSaveCurrentVal(SqlFunctionCtx* pCtx, int32_t rowIndex, int64_t currentTs, int32_t type, char* pData) { SResultRowEntryInfo* pResInfo = GET_RES_INFO(pCtx); SFirstLastRes* pInfo = GET_ROWCELL_INTERBUF(pResInfo); @@ -2037,9 +2059,13 @@ static void doSaveCurrentVal(SqlFunctionCtx* pCtx, int32_t rowIndex, int64_t cur memcpy(pInfo->buf, pData, pInfo->bytes); pInfo->ts = currentTs; - firstlastSaveTupleData(pCtx->pSrcBlock, rowIndex, pCtx, pInfo); + int32_t code = firstlastSaveTupleData(pCtx->pSrcBlock, rowIndex, pCtx, pInfo); + if (code != TSDB_CODE_SUCCESS) { + return code; + } pInfo->hasResult = true; + return TSDB_CODE_SUCCESS; } // This ordinary first function does not care if current scan is ascending order or descending order scan @@ -2060,10 +2086,12 @@ int32_t firstFunction(SqlFunctionCtx* pCtx) { } // All null data column, return directly. - if (pInput->colDataSMAIsSet && (pInput->pColumnDataAgg[0]->numOfNull == pInput->totalRows)) { - ASSERT(pInputCol->hasNull == true); + if (pInput->colDataSMAIsSet && (pInput->pColumnDataAgg[0]->numOfNull == pInput->totalRows) && pInputCol->hasNull == true) { // save selectivity value for column consisted of all null values - firstlastSaveTupleData(pCtx->pSrcBlock, pInput->startRowIndex, pCtx, pInfo); + int32_t code = firstlastSaveTupleData(pCtx->pSrcBlock, pInput->startRowIndex, pCtx, pInfo); + if (code != TSDB_CODE_SUCCESS) { + return code; + } return TSDB_CODE_SUCCESS; } @@ -2136,7 +2164,10 @@ int32_t firstFunction(SqlFunctionCtx* pCtx) { char* data = colDataGetData(pInputCol, i); TSKEY cts = pts[i]; if (pResInfo->numOfRes == 0 || pInfo->ts > cts) { - doSaveCurrentVal(pCtx, i, cts, pInputCol->info.type, data); + int32_t code = doSaveCurrentVal(pCtx, i, cts, pInputCol->info.type, data); + if (code != TSDB_CODE_SUCCESS) { + return code; + } pResInfo->numOfRes = 1; } } @@ -2144,7 +2175,10 @@ int32_t firstFunction(SqlFunctionCtx* pCtx) { if (numOfElems == 0) { // save selectivity value for column consisted of all null values - firstlastSaveTupleData(pCtx->pSrcBlock, pInput->startRowIndex, pCtx, pInfo); + int32_t code = firstlastSaveTupleData(pCtx->pSrcBlock, pInput->startRowIndex, pCtx, pInfo); + if (code != TSDB_CODE_SUCCESS) { + return code; + } } SET_VAL(pResInfo, numOfElems, 1); return TSDB_CODE_SUCCESS; @@ -2168,10 +2202,12 @@ int32_t lastFunction(SqlFunctionCtx* pCtx) { } // All null data column, return directly. - if (pInput->colDataSMAIsSet && (pInput->pColumnDataAgg[0]->numOfNull == pInput->totalRows)) { - ASSERT(pInputCol->hasNull == true); + if (pInput->colDataSMAIsSet && (pInput->pColumnDataAgg[0]->numOfNull == pInput->totalRows) && pInputCol->hasNull == true) { // save selectivity value for column consisted of all null values - firstlastSaveTupleData(pCtx->pSrcBlock, pInput->startRowIndex, pCtx, pInfo); + int32_t code = firstlastSaveTupleData(pCtx->pSrcBlock, pInput->startRowIndex, pCtx, pInfo); + if (code != TSDB_CODE_SUCCESS) { + return code; + } return TSDB_CODE_SUCCESS; } @@ -2261,7 +2297,10 @@ int32_t lastFunction(SqlFunctionCtx* pCtx) { if (pResInfo->numOfRes == 0 || pInfo->ts < cts) { char* data = colDataGetData(pInputCol, chosen); - doSaveCurrentVal(pCtx, i, cts, type, data); + int32_t code = doSaveCurrentVal(pCtx, i, cts, type, data); + if (code != TSDB_CODE_SUCCESS) { + return code; + } pResInfo->numOfRes = 1; } } @@ -2269,7 +2308,10 @@ int32_t lastFunction(SqlFunctionCtx* pCtx) { for (int32_t i = pInput->startRowIndex + round * 4; i < pInput->startRowIndex + pInput->numOfRows; ++i) { if (pResInfo->numOfRes == 0 || pInfo->ts < pts[i]) { char* data = colDataGetData(pInputCol, i); - doSaveCurrentVal(pCtx, i, pts[i], type, data); + int32_t code = doSaveCurrentVal(pCtx, i, pts[i], type, data); + if (code != TSDB_CODE_SUCCESS) { + return code; + } pResInfo->numOfRes = 1; } } @@ -2283,7 +2325,10 @@ int32_t lastFunction(SqlFunctionCtx* pCtx) { if (pResInfo->numOfRes == 0 || pInfo->ts < pts[i]) { char* data = colDataGetData(pInputCol, i); - doSaveCurrentVal(pCtx, i, pts[i], type, data); + int32_t code = doSaveCurrentVal(pCtx, i, pts[i], type, data); + if (code != TSDB_CODE_SUCCESS) { + return code; + } pResInfo->numOfRes = 1; } } @@ -2294,7 +2339,10 @@ int32_t lastFunction(SqlFunctionCtx* pCtx) { // save selectivity value for column consisted of all null values if (numOfElems == 0) { - firstlastSaveTupleData(pCtx->pSrcBlock, pInput->startRowIndex, pCtx, pInfo); + int32_t code = firstlastSaveTupleData(pCtx->pSrcBlock, pInput->startRowIndex, pCtx, pInfo); + if (code != TSDB_CODE_SUCCESS) { + return code; + } } // SET_VAL(pResInfo, numOfElems, 1); @@ -2322,18 +2370,25 @@ static int32_t firstLastTransferInfoImpl(SFirstLastRes* pInput, SFirstLastRes* p return TSDB_CODE_SUCCESS; } -static void firstLastTransferInfo(SqlFunctionCtx* pCtx, SFirstLastRes* pInput, SFirstLastRes* pOutput, bool isFirst, +static int32_t firstLastTransferInfo(SqlFunctionCtx* pCtx, SFirstLastRes* pInput, SFirstLastRes* pOutput, bool isFirst, int32_t rowIndex) { if (TSDB_CODE_SUCCESS == firstLastTransferInfoImpl(pInput, pOutput, isFirst)) { - firstlastSaveTupleData(pCtx->pSrcBlock, rowIndex, pCtx, pOutput); + int32_t code = firstlastSaveTupleData(pCtx->pSrcBlock, rowIndex, pCtx, pOutput); + if (code != TSDB_CODE_SUCCESS) { + return code; + } pOutput->hasResult = true; } + + return TSDB_CODE_SUCCESS; } static int32_t firstLastFunctionMergeImpl(SqlFunctionCtx* pCtx, bool isFirstQuery) { SInputColumnInfoData* pInput = &pCtx->input; SColumnInfoData* pCol = pInput->pData[0]; - ASSERT(pCol->info.type == TSDB_DATA_TYPE_BINARY); + if (pCol->info.type != TSDB_DATA_TYPE_BINARY) { + return TSDB_CODE_FUNC_FUNTION_PARA_TYPE; + } SFirstLastRes* pInfo = GET_ROWCELL_INTERBUF(GET_RES_INFO(pCtx)); @@ -2343,7 +2398,10 @@ static int32_t firstLastFunctionMergeImpl(SqlFunctionCtx* pCtx, bool isFirstQuer for (int32_t i = start; i < start + pInput->numOfRows; ++i) { char* data = colDataGetData(pCol, i); SFirstLastRes* pInputInfo = (SFirstLastRes*)varDataVal(data); - firstLastTransferInfo(pCtx, pInputInfo, pInfo, isFirstQuery, i); + int32_t code = firstLastTransferInfo(pCtx, pInputInfo, pInfo, isFirstQuery, i); + if (code != TSDB_CODE_SUCCESS) { + return code; + } if (!numOfElems) { numOfElems = pInputInfo->hasResult ? 1 : 0; } @@ -2358,6 +2416,7 @@ int32_t firstFunctionMerge(SqlFunctionCtx* pCtx) { return firstLastFunctionMerge int32_t lastFunctionMerge(SqlFunctionCtx* pCtx) { return firstLastFunctionMergeImpl(pCtx, false); } int32_t firstLastFinalize(SqlFunctionCtx* pCtx, SSDataBlock* pBlock) { + int32_t code = TSDB_CODE_SUCCESS; int32_t slotId = pCtx->pExpr->base.resSchema.slotId; SColumnInfoData* pCol = taosArrayGet(pBlock->pDataBlock, slotId); @@ -2368,12 +2427,14 @@ int32_t firstLastFinalize(SqlFunctionCtx* pCtx, SSDataBlock* pBlock) { colDataAppend(pCol, pBlock->info.rows, pRes->buf, pRes->isNull || pResInfo->isNullRes); // handle selectivity - setSelectivityValue(pCtx, pBlock, &pRes->pos, pBlock->info.rows); + code = setSelectivityValue(pCtx, pBlock, &pRes->pos, pBlock->info.rows); - return pResInfo->numOfRes; + return code; } int32_t firstLastPartialFinalize(SqlFunctionCtx* pCtx, SSDataBlock* pBlock) { + int32_t code = TSDB_CODE_SUCCESS; + SResultRowEntryInfo* pEntryInfo = GET_RES_INFO(pCtx); SFirstLastRes* pRes = GET_ROWCELL_INTERBUF(pEntryInfo); @@ -2389,10 +2450,10 @@ int32_t firstLastPartialFinalize(SqlFunctionCtx* pCtx, SSDataBlock* pBlock) { SColumnInfoData* pCol = taosArrayGet(pBlock->pDataBlock, slotId); colDataAppend(pCol, pBlock->info.rows, res, false); - setSelectivityValue(pCtx, pBlock, &pRes->pos, pBlock->info.rows); + code = setSelectivityValue(pCtx, pBlock, &pRes->pos, pBlock->info.rows); taosMemoryFree(res); - return 1; + return code; } int32_t lastCombine(SqlFunctionCtx* pDestCtx, SqlFunctionCtx* pSourceCtx) { @@ -2412,7 +2473,7 @@ int32_t lastCombine(SqlFunctionCtx* pDestCtx, SqlFunctionCtx* pSourceCtx) { return TSDB_CODE_SUCCESS; } -static void doSaveLastrow(SqlFunctionCtx* pCtx, char* pData, int32_t rowIndex, int64_t cts, SFirstLastRes* pInfo) { +static int32_t doSaveLastrow(SqlFunctionCtx* pCtx, char* pData, int32_t rowIndex, int64_t cts, SFirstLastRes* pInfo) { SInputColumnInfoData* pInput = &pCtx->input; SColumnInfoData* pInputCol = pInput->pData[0]; @@ -2429,9 +2490,14 @@ static void doSaveLastrow(SqlFunctionCtx* pCtx, char* pData, int32_t rowIndex, i } pInfo->ts = cts; - firstlastSaveTupleData(pCtx->pSrcBlock, rowIndex, pCtx, pInfo); + int32_t code = firstlastSaveTupleData(pCtx->pSrcBlock, rowIndex, pCtx, pInfo); + if (code != TSDB_CODE_SUCCESS) { + return code; + } pInfo->hasResult = true; + + return TSDB_CODE_SUCCESS; } int32_t lastRowFunction(SqlFunctionCtx* pCtx) { @@ -2493,7 +2559,10 @@ int32_t lastRowFunction(SqlFunctionCtx* pCtx) { numOfElems++; if (pResInfo->numOfRes == 0 || pInfo->ts < cts) { - doSaveLastrow(pCtx, data, i, cts, pInfo); + int32_t code = doSaveLastrow(pCtx, data, i, cts, pInfo); + if (code != TSDB_CODE_SUCCESS) { + return code; + } pResInfo->numOfRes = 1; } } @@ -2528,7 +2597,7 @@ bool diffFunctionSetup(SqlFunctionCtx* pCtx, SResultRowEntryInfo* pResInfo) { return true; } -static void doSetPrevVal(SDiffInfo* pDiffInfo, int32_t type, const char* pv, int64_t ts) { +static int32_t doSetPrevVal(SDiffInfo* pDiffInfo, int32_t type, const char* pv, int64_t ts) { switch (type) { case TSDB_DATA_TYPE_BOOL: pDiffInfo->prev.i64 = *(bool*)pv ? 1 : 0; @@ -2553,12 +2622,14 @@ static void doSetPrevVal(SDiffInfo* pDiffInfo, int32_t type, const char* pv, int pDiffInfo->prev.d64 = *(double*)pv; break; default: - ASSERT(0); + return TSDB_CODE_FUNC_FUNTION_PARA_TYPE; } pDiffInfo->prevTs = ts; + + return TSDB_CODE_SUCCESS; } -static void doHandleDiff(SDiffInfo* pDiffInfo, int32_t type, const char* pv, SColumnInfoData* pOutput, int32_t pos, +static int32_t doHandleDiff(SDiffInfo* pDiffInfo, int32_t type, const char* pv, SColumnInfoData* pOutput, int32_t pos, int32_t order, int64_t ts) { int32_t factor = (order == TSDB_ORDER_ASC) ? 1 : -1; pDiffInfo->prevTs = ts; @@ -2567,7 +2638,7 @@ static void doHandleDiff(SDiffInfo* pDiffInfo, int32_t type, const char* pv, SCo int32_t v = *(int32_t*)pv; int64_t delta = factor * (v - pDiffInfo->prev.i64); // direct previous may be null if (delta < 0 && pDiffInfo->ignoreNegative) { - colDataSetNull_f(pOutput->nullbitmap, pos); + colDataSetNull_f_s(pOutput, pos); } else { colDataAppendInt64(pOutput, pos, &delta); } @@ -2580,7 +2651,7 @@ static void doHandleDiff(SDiffInfo* pDiffInfo, int32_t type, const char* pv, SCo int8_t v = *(int8_t*)pv; int64_t delta = factor * (v - pDiffInfo->prev.i64); // direct previous may be null if (delta < 0 && pDiffInfo->ignoreNegative) { - colDataSetNull_f(pOutput->nullbitmap, pos); + colDataSetNull_f_s(pOutput, pos); } else { colDataAppendInt64(pOutput, pos, &delta); } @@ -2591,7 +2662,7 @@ static void doHandleDiff(SDiffInfo* pDiffInfo, int32_t type, const char* pv, SCo int16_t v = *(int16_t*)pv; int64_t delta = factor * (v - pDiffInfo->prev.i64); // direct previous may be null if (delta < 0 && pDiffInfo->ignoreNegative) { - colDataSetNull_f(pOutput->nullbitmap, pos); + colDataSetNull_f_s(pOutput, pos); } else { colDataAppendInt64(pOutput, pos, &delta); } @@ -2603,7 +2674,7 @@ static void doHandleDiff(SDiffInfo* pDiffInfo, int32_t type, const char* pv, SCo int64_t v = *(int64_t*)pv; int64_t delta = factor * (v - pDiffInfo->prev.i64); // direct previous may be null if (delta < 0 && pDiffInfo->ignoreNegative) { - colDataSetNull_f(pOutput->nullbitmap, pos); + colDataSetNull_f_s(pOutput, pos); } else { colDataAppendInt64(pOutput, pos, &delta); } @@ -2614,7 +2685,7 @@ static void doHandleDiff(SDiffInfo* pDiffInfo, int32_t type, const char* pv, SCo float v = *(float*)pv; double delta = factor * (v - pDiffInfo->prev.d64); // direct previous may be null if ((delta < 0 && pDiffInfo->ignoreNegative) || isinf(delta) || isnan(delta)) { // check for overflow - colDataSetNull_f(pOutput->nullbitmap, pos); + colDataSetNull_f_s(pOutput, pos); } else { colDataAppendDouble(pOutput, pos, &delta); } @@ -2625,7 +2696,7 @@ static void doHandleDiff(SDiffInfo* pDiffInfo, int32_t type, const char* pv, SCo double v = *(double*)pv; double delta = factor * (v - pDiffInfo->prev.d64); // direct previous may be null if ((delta < 0 && pDiffInfo->ignoreNegative) || isinf(delta) || isnan(delta)) { // check for overflow - colDataSetNull_f(pOutput->nullbitmap, pos); + colDataSetNull_f_s(pOutput, pos); } else { colDataAppendDouble(pOutput, pos, &delta); } @@ -2633,8 +2704,10 @@ static void doHandleDiff(SDiffInfo* pDiffInfo, int32_t type, const char* pv, SCo break; } default: - ASSERT(0); + return TSDB_CODE_FUNC_FUNTION_PARA_TYPE; } + + return TSDB_CODE_SUCCESS; } int32_t diffFunction(SqlFunctionCtx* pCtx) { @@ -2658,7 +2731,7 @@ int32_t diffFunction(SqlFunctionCtx* pCtx) { if (colDataIsNull_f(pInputCol->nullbitmap, i)) { if (pDiffInfo->includeNull) { - colDataSetNull_f(pOutput->nullbitmap, pos); + colDataSetNull_f_s(pOutput, pos); numOfElems += 1; } @@ -2671,7 +2744,10 @@ int32_t diffFunction(SqlFunctionCtx* pCtx) { if (tsList[i] == pDiffInfo->prevTs) { return TSDB_CODE_FUNC_DUP_TIMESTAMP; } - doHandleDiff(pDiffInfo, pInputCol->info.type, pv, pOutput, pos, pCtx->order, tsList[i]); + int32_t code = doHandleDiff(pDiffInfo, pInputCol->info.type, pv, pOutput, pos, pCtx->order, tsList[i]); + if (code != TSDB_CODE_SUCCESS) { + return code; + } // handle selectivity if (pCtx->subsidiaries.num > 0) { appendSelectivityValue(pCtx, i, pos); @@ -2679,7 +2755,10 @@ int32_t diffFunction(SqlFunctionCtx* pCtx) { numOfElems++; } else { - doSetPrevVal(pDiffInfo, pInputCol->info.type, pv, tsList[i]); + int32_t code = doSetPrevVal(pDiffInfo, pInputCol->info.type, pv, tsList[i]); + if (code != TSDB_CODE_SUCCESS) { + return code; + } } pDiffInfo->hasPrev = true; @@ -2690,8 +2769,7 @@ int32_t diffFunction(SqlFunctionCtx* pCtx) { if (colDataIsNull_f(pInputCol->nullbitmap, i)) { if (pDiffInfo->includeNull) { - colDataSetNull_f(pOutput->nullbitmap, pos); - + colDataSetNull_f_s(pOutput, pos); numOfElems += 1; } continue; @@ -2704,7 +2782,10 @@ int32_t diffFunction(SqlFunctionCtx* pCtx) { if (tsList[i] == pDiffInfo->prevTs) { return TSDB_CODE_FUNC_DUP_TIMESTAMP; } - doHandleDiff(pDiffInfo, pInputCol->info.type, pv, pOutput, pos, pCtx->order, tsList[i]); + int32_t code = doHandleDiff(pDiffInfo, pInputCol->info.type, pv, pOutput, pos, pCtx->order, tsList[i]); + if (code != TSDB_CODE_SUCCESS) { + return code; + } // handle selectivity if (pCtx->subsidiaries.num > 0) { appendSelectivityValue(pCtx, i, pos); @@ -2712,7 +2793,10 @@ int32_t diffFunction(SqlFunctionCtx* pCtx) { numOfElems++; } else { - doSetPrevVal(pDiffInfo, pInputCol->info.type, pv, tsList[i]); + int32_t code = doSetPrevVal(pDiffInfo, pInputCol->info.type, pv, tsList[i]); + if (code != TSDB_CODE_SUCCESS) { + return code; + } } pDiffInfo->hasPrev = true; @@ -2754,8 +2838,8 @@ static STopBotRes* getTopBotOutputInfo(SqlFunctionCtx* pCtx) { return pRes; } -static void doAddIntoResult(SqlFunctionCtx* pCtx, void* pData, int32_t rowIndex, SSDataBlock* pSrcBlock, uint16_t type, - uint64_t uid, SResultRowEntryInfo* pEntryInfo, bool isTopQuery); +static int32_t doAddIntoResult(SqlFunctionCtx* pCtx, void* pData, int32_t rowIndex, SSDataBlock* pSrcBlock, uint16_t type, + uint64_t uid, SResultRowEntryInfo* pEntryInfo, bool isTopQuery); static void addResult(SqlFunctionCtx* pCtx, STopBotResItem* pSourceItem, int16_t type, bool isTopQuery); @@ -2777,11 +2861,17 @@ int32_t topFunction(SqlFunctionCtx* pCtx) { numOfElems++; char* data = colDataGetData(pCol, i); - doAddIntoResult(pCtx, data, i, pCtx->pSrcBlock, pRes->type, pInput->uid, pResInfo, true); + int32_t code = doAddIntoResult(pCtx, data, i, pCtx->pSrcBlock, pRes->type, pInput->uid, pResInfo, true); + if (code != TSDB_CODE_SUCCESS) { + return code; + } } if (numOfElems == 0 && pCtx->subsidiaries.num > 0 && !pRes->nullTupleSaved) { - pRes->nullTuplePos = saveTupleData(pCtx, pInput->startRowIndex, pCtx->pSrcBlock); + int32_t code = saveTupleData(pCtx, pInput->startRowIndex, pCtx->pSrcBlock, &pRes->nullTuplePos); + if (code != TSDB_CODE_SUCCESS) { + return code; + } pRes->nullTupleSaved = true; } return TSDB_CODE_SUCCESS; @@ -2805,11 +2895,17 @@ int32_t bottomFunction(SqlFunctionCtx* pCtx) { numOfElems++; char* data = colDataGetData(pCol, i); - doAddIntoResult(pCtx, data, i, pCtx->pSrcBlock, pRes->type, pInput->uid, pResInfo, false); + int32_t code = doAddIntoResult(pCtx, data, i, pCtx->pSrcBlock, pRes->type, pInput->uid, pResInfo, false); + if (code != TSDB_CODE_SUCCESS) { + return code; + } } if (numOfElems == 0 && pCtx->subsidiaries.num > 0 && !pRes->nullTupleSaved) { - pRes->nullTuplePos = saveTupleData(pCtx, pInput->startRowIndex, pCtx->pSrcBlock); + int32_t code = saveTupleData(pCtx, pInput->startRowIndex, pCtx->pSrcBlock, &pRes->nullTuplePos); + if (code != TSDB_CODE_SUCCESS) { + return code; + } pRes->nullTupleSaved = true; } @@ -2849,7 +2945,7 @@ static int32_t topBotResComparFn(const void* p1, const void* p2, const void* par return (val1->v.d > val2->v.d) ? 1 : -1; } -void doAddIntoResult(SqlFunctionCtx* pCtx, void* pData, int32_t rowIndex, SSDataBlock* pSrcBlock, uint16_t type, +int32_t doAddIntoResult(SqlFunctionCtx* pCtx, void* pData, int32_t rowIndex, SSDataBlock* pSrcBlock, uint16_t type, uint64_t uid, SResultRowEntryInfo* pEntryInfo, bool isTopQuery) { STopBotRes* pRes = getTopBotOutputInfo(pCtx); @@ -2857,7 +2953,6 @@ void doAddIntoResult(SqlFunctionCtx* pCtx, void* pData, int32_t rowIndex, SSData taosVariantCreateFromBinary(&val, pData, tDataTypes[type].bytes, type); STopBotResItem* pItems = pRes->pItems; - assert(pItems != NULL); // not full yet if (pEntryInfo->numOfRes < pRes->maxSize) { @@ -2867,7 +2962,10 @@ void doAddIntoResult(SqlFunctionCtx* pCtx, void* pData, int32_t rowIndex, SSData // save the data of this tuple if (pCtx->subsidiaries.num > 0) { - pItem->tuplePos = saveTupleData(pCtx, rowIndex, pSrcBlock); + int32_t code = saveTupleData(pCtx, rowIndex, pSrcBlock, &pItem->tuplePos); + if (code != TSDB_CODE_SUCCESS) { + return code; + } } #ifdef BUF_PAGE_DEBUG qDebug("page_saveTuple i:%d, item:%p,pageId:%d, offset:%d\n", pEntryInfo->numOfRes, pItem, pItem->tuplePos.pageId, @@ -2893,7 +2991,10 @@ void doAddIntoResult(SqlFunctionCtx* pCtx, void* pData, int32_t rowIndex, SSData // save the data of this tuple by over writing the old data if (pCtx->subsidiaries.num > 0) { - updateTupleData(pCtx, rowIndex, pSrcBlock, &pItem->tuplePos); + int32_t code = updateTupleData(pCtx, rowIndex, pSrcBlock, &pItem->tuplePos); + if (code != TSDB_CODE_SUCCESS) { + return code; + } } #ifdef BUF_PAGE_DEBUG qDebug("page_copyTuple pageId:%d, offset:%d", pItem->tuplePos.pageId, pItem->tuplePos.offset); @@ -2902,6 +3003,8 @@ void doAddIntoResult(SqlFunctionCtx* pCtx, void* pData, int32_t rowIndex, SSData topBotResComparFn, NULL, !isTopQuery); } } + + return TSDB_CODE_SUCCESS; } /* @@ -2941,20 +3044,30 @@ void* serializeTupleData(const SSDataBlock* pSrcBlock, int32_t rowIndex, SSubsid return buf; } -static STuplePos doSaveTupleData(SSerializeDataHandle* pHandle, const void* pBuf, size_t length, STupleKey key) { +static int32_t doSaveTupleData(SSerializeDataHandle* pHandle, const void* pBuf, size_t length, STupleKey key, + STuplePos* pPos) { STuplePos p = {0}; if (pHandle->pBuf != NULL) { SFilePage* pPage = NULL; if (pHandle->currentPage == -1) { pPage = getNewBufPage(pHandle->pBuf, &pHandle->currentPage); + if (pPage == NULL) { + return terrno; + } pPage->num = sizeof(SFilePage); } else { pPage = getBufPage(pHandle->pBuf, pHandle->currentPage); + if (pPage == NULL) { + return terrno; + } if (pPage->num + length > getBufPageSize(pHandle->pBuf)) { // current page is all used, let's prepare a new buffer page releaseBufPage(pHandle->pBuf, pPage); pPage = getNewBufPage(pHandle->pBuf, &pHandle->currentPage); + if (pPage == NULL) { + return terrno; + } pPage->num = sizeof(SFilePage); } } @@ -2967,36 +3080,40 @@ static STuplePos doSaveTupleData(SSerializeDataHandle* pHandle, const void* pBuf releaseBufPage(pHandle->pBuf, pPage); } else { // other tuple save policy - if (streamStateFuncPut(pHandle->pState, &key, pBuf, length) < 0) { - ASSERT(0); + if (streamStateFuncPut(pHandle->pState, &key, pBuf, length) >= 0) { + p.streamTupleKey = key; } - p.streamTupleKey = key; } - return p; + *pPos = p; + return TSDB_CODE_SUCCESS; } -STuplePos saveTupleData(SqlFunctionCtx* pCtx, int32_t rowIndex, const SSDataBlock* pSrcBlock) { +int32_t saveTupleData(SqlFunctionCtx* pCtx, int32_t rowIndex, const SSDataBlock* pSrcBlock, STuplePos* pPos) { prepareBuf(pCtx); STupleKey key; if (pCtx->saveHandle.pBuf == NULL) { SColumnInfoData* pColInfo = taosArrayGet(pSrcBlock->pDataBlock, 0); - ASSERT(pColInfo->info.type == TSDB_DATA_TYPE_TIMESTAMP); - int64_t skey = *(int64_t*)colDataGetData(pColInfo, rowIndex); + if (pColInfo->info.type == TSDB_DATA_TYPE_TIMESTAMP) { + int64_t skey = *(int64_t*)colDataGetData(pColInfo, rowIndex); - key.groupId = pSrcBlock->info.id.groupId; - key.ts = skey; - key.exprIdx = pCtx->exprIdx; + key.groupId = pSrcBlock->info.id.groupId; + key.ts = skey; + key.exprIdx = pCtx->exprIdx; + } } char* buf = serializeTupleData(pSrcBlock, rowIndex, &pCtx->subsidiaries, pCtx->subsidiaries.buf); - return doSaveTupleData(&pCtx->saveHandle, buf, pCtx->subsidiaries.rowLen, key); + return doSaveTupleData(&pCtx->saveHandle, buf, pCtx->subsidiaries.rowLen, key, pPos); } static int32_t doUpdateTupleData(SSerializeDataHandle* pHandle, const void* pBuf, size_t length, STuplePos* pPos) { if (pHandle->pBuf != NULL) { SFilePage* pPage = getBufPage(pHandle->pBuf, pPos->pageId); + if (pPage == NULL) { + return terrno; + } memcpy(pPage->data + pPos->offset, pBuf, length); setBufPageDirty(pPage, true); releaseBufPage(pHandle->pBuf, pPage); @@ -3011,13 +3128,15 @@ int32_t updateTupleData(SqlFunctionCtx* pCtx, int32_t rowIndex, const SSDataBloc prepareBuf(pCtx); char* buf = serializeTupleData(pSrcBlock, rowIndex, &pCtx->subsidiaries, pCtx->subsidiaries.buf); - doUpdateTupleData(&pCtx->saveHandle, buf, pCtx->subsidiaries.rowLen, pPos); - return TSDB_CODE_SUCCESS; + return doUpdateTupleData(&pCtx->saveHandle, buf, pCtx->subsidiaries.rowLen, pPos); } static char* doLoadTupleData(SSerializeDataHandle* pHandle, const STuplePos* pPos) { if (pHandle->pBuf != NULL) { SFilePage* pPage = getBufPage(pHandle->pBuf, pPos->pageId); + if (pPage == NULL) { + return NULL; + } char* p = pPage->data + pPos->offset; releaseBufPage(pHandle->pBuf, pPage); return p; @@ -3034,6 +3153,8 @@ const char* loadTupleData(SqlFunctionCtx* pCtx, const STuplePos* pPos) { } int32_t topBotFinalize(SqlFunctionCtx* pCtx, SSDataBlock* pBlock) { + int32_t code = TSDB_CODE_SUCCESS; + SResultRowEntryInfo* pEntryInfo = GET_RES_INFO(pCtx); STopBotRes* pRes = getTopBotOutputInfo(pCtx); @@ -3046,8 +3167,8 @@ int32_t topBotFinalize(SqlFunctionCtx* pCtx, SSDataBlock* pBlock) { int32_t currentRow = pBlock->info.rows; if (pEntryInfo->numOfRes <= 0) { colDataAppendNULL(pCol, currentRow); - setSelectivityValue(pCtx, pBlock, &pRes->nullTuplePos, currentRow); - return pEntryInfo->numOfRes; + code = setSelectivityValue(pCtx, pBlock, &pRes->nullTuplePos, currentRow); + return code; } for (int32_t i = 0; i < pEntryInfo->numOfRes; ++i) { STopBotResItem* pItem = &pRes->pItems[i]; @@ -3056,18 +3177,17 @@ int32_t topBotFinalize(SqlFunctionCtx* pCtx, SSDataBlock* pBlock) { qDebug("page_finalize i:%d,item:%p,pageId:%d, offset:%d\n", i, pItem, pItem->tuplePos.pageId, pItem->tuplePos.offset); #endif - setSelectivityValue(pCtx, pBlock, &pRes->pItems[i].tuplePos, currentRow); + code = setSelectivityValue(pCtx, pBlock, &pRes->pItems[i].tuplePos, currentRow); currentRow += 1; } - return pEntryInfo->numOfRes; + return code; } void addResult(SqlFunctionCtx* pCtx, STopBotResItem* pSourceItem, int16_t type, bool isTopQuery) { SResultRowEntryInfo* pEntryInfo = GET_RES_INFO(pCtx); STopBotRes* pRes = getTopBotOutputInfo(pCtx); STopBotResItem* pItems = pRes->pItems; - assert(pItems != NULL); // not full yet if (pEntryInfo->numOfRes < pRes->maxSize) { @@ -3225,7 +3345,9 @@ static void spreadTransferInfo(SSpreadInfo* pInput, SSpreadInfo* pOutput) { int32_t spreadFunctionMerge(SqlFunctionCtx* pCtx) { SInputColumnInfoData* pInput = &pCtx->input; SColumnInfoData* pCol = pInput->pData[0]; - ASSERT(pCol->info.type == TSDB_DATA_TYPE_BINARY); + if (pCol->info.type != TSDB_DATA_TYPE_BINARY) { + return TSDB_CODE_FUNC_FUNTION_PARA_TYPE; + } SSpreadInfo* pInfo = GET_ROWCELL_INTERBUF(GET_RES_INFO(pCtx)); @@ -3404,7 +3526,9 @@ static void elapsedTransferInfo(SElapsedInfo* pInput, SElapsedInfo* pOutput) { int32_t elapsedFunctionMerge(SqlFunctionCtx* pCtx) { SInputColumnInfoData* pInput = &pCtx->input; SColumnInfoData* pCol = pInput->pData[0]; - ASSERT(pCol->info.type == TSDB_DATA_TYPE_BINARY); + if (pCol->info.type != TSDB_DATA_TYPE_BINARY) { + return TSDB_CODE_FUNC_FUNTION_PARA_TYPE; + } SElapsedInfo* pInfo = GET_ROWCELL_INTERBUF(GET_RES_INFO(pCtx)); @@ -3574,7 +3698,9 @@ static bool getHistogramBinDesc(SHistoFuncInfo* pInfo, char* binDescStr, int8_t intervals[0] = -INFINITY; intervals[numOfBins - 1] = INFINITY; // in case of desc bin orders, -inf/inf should be swapped - ASSERT(numOfBins >= 4); + if (numOfBins < 4) { + return false; + } if (intervals[1] > intervals[numOfBins - 2]) { TSWAP(intervals[0], intervals[numOfBins - 1]); } @@ -3717,7 +3843,9 @@ static void histogramTransferInfo(SHistoFuncInfo* pInput, SHistoFuncInfo* pOutpu int32_t histogramFunctionMerge(SqlFunctionCtx* pCtx) { SInputColumnInfoData* pInput = &pCtx->input; SColumnInfoData* pCol = pInput->pData[0]; - ASSERT(pCol->info.type == TSDB_DATA_TYPE_BINARY); + if (pCol->info.type != TSDB_DATA_TYPE_BINARY) { + return TSDB_CODE_FUNC_FUNTION_PARA_TYPE; + } SHistoFuncInfo* pInfo = GET_ROWCELL_INTERBUF(GET_RES_INFO(pCtx)); @@ -4099,7 +4227,7 @@ static bool checkStateOp(int8_t op, SColumnInfoData* pCol, int32_t index, SVaria break; } default: { - ASSERT(0); + return false; } } return false; @@ -4431,12 +4559,15 @@ static void sampleAssignResult(SSampleInfo* pInfo, char* data, int32_t index) { assignVal(pInfo->data + index * pInfo->colBytes, data, pInfo->colBytes, pInfo->colType); } -static void doReservoirSample(SqlFunctionCtx* pCtx, SSampleInfo* pInfo, char* data, int32_t index) { +static int32_t doReservoirSample(SqlFunctionCtx* pCtx, SSampleInfo* pInfo, char* data, int32_t index) { pInfo->totalPoints++; if (pInfo->numSampled < pInfo->samples) { sampleAssignResult(pInfo, data, pInfo->numSampled); if (pCtx->subsidiaries.num > 0) { - pInfo->tuplePos[pInfo->numSampled] = saveTupleData(pCtx, index, pCtx->pSrcBlock); + int32_t code = saveTupleData(pCtx, index, pCtx->pSrcBlock, &pInfo->tuplePos[pInfo->numSampled]); + if (code != TSDB_CODE_SUCCESS) { + return code; + } } pInfo->numSampled++; } else { @@ -4444,10 +4575,15 @@ static void doReservoirSample(SqlFunctionCtx* pCtx, SSampleInfo* pInfo, char* da if (j < pInfo->samples) { sampleAssignResult(pInfo, data, j); if (pCtx->subsidiaries.num > 0) { - updateTupleData(pCtx, index, pCtx->pSrcBlock, &pInfo->tuplePos[j]); + int32_t code = updateTupleData(pCtx, index, pCtx->pSrcBlock, &pInfo->tuplePos[j]); + if (code != TSDB_CODE_SUCCESS) { + return code; + } } } } + + return TSDB_CODE_SUCCESS; } int32_t sampleFunction(SqlFunctionCtx* pCtx) { @@ -4463,11 +4599,17 @@ int32_t sampleFunction(SqlFunctionCtx* pCtx) { } char* data = colDataGetData(pInputCol, i); - doReservoirSample(pCtx, pInfo, data, i); + int32_t code = doReservoirSample(pCtx, pInfo, data, i); + if (code != TSDB_CODE_SUCCESS) { + return code; + } } if (pInfo->numSampled == 0 && pCtx->subsidiaries.num > 0 && !pInfo->nullTupleSaved) { - pInfo->nullTuplePos = saveTupleData(pCtx, pInput->startRowIndex, pCtx->pSrcBlock); + int32_t code = saveTupleData(pCtx, pInput->startRowIndex, pCtx->pSrcBlock, &pInfo->nullTuplePos); + if (code != TSDB_CODE_SUCCESS) { + return code; + } pInfo->nullTupleSaved = true; } @@ -4476,6 +4618,7 @@ int32_t sampleFunction(SqlFunctionCtx* pCtx) { } int32_t sampleFinalize(SqlFunctionCtx* pCtx, SSDataBlock* pBlock) { + int32_t code = TSDB_CODE_SUCCESS; SResultRowEntryInfo* pEntryInfo = GET_RES_INFO(pCtx); SSampleInfo* pInfo = getSampleOutputInfo(pCtx); @@ -4487,15 +4630,15 @@ int32_t sampleFinalize(SqlFunctionCtx* pCtx, SSDataBlock* pBlock) { int32_t currentRow = pBlock->info.rows; if (pInfo->numSampled == 0) { colDataAppendNULL(pCol, currentRow); - setSelectivityValue(pCtx, pBlock, &pInfo->nullTuplePos, currentRow); - return pInfo->numSampled; + code = setSelectivityValue(pCtx, pBlock, &pInfo->nullTuplePos, currentRow); + return code; } for (int32_t i = 0; i < pInfo->numSampled; ++i) { colDataAppend(pCol, currentRow + i, pInfo->data + i * pInfo->colBytes, false); - setSelectivityValue(pCtx, pBlock, &pInfo->tuplePos[i], currentRow + i); + code = setSelectivityValue(pCtx, pBlock, &pInfo->tuplePos[i], currentRow + i); } - return pInfo->numSampled; + return code; } bool getTailFuncEnv(SFunctionNode* pFunc, SFuncExecEnv* pEnv) { @@ -4761,7 +4904,7 @@ bool modeFunctionSetup(SqlFunctionCtx* pCtx, SResultRowEntryInfo* pResInfo) { return true; } -static void doModeAdd(SModeInfo* pInfo, int32_t rowIndex, SqlFunctionCtx* pCtx, char* data) { +static int32_t doModeAdd(SModeInfo* pInfo, int32_t rowIndex, SqlFunctionCtx* pCtx, char* data) { int32_t hashKeyBytes = IS_STR_DATA_TYPE(pInfo->colType) ? varDataTLen(data) : pInfo->colBytes; SModeItem** pHashItem = taosHashGet(pInfo->pHash, data, hashKeyBytes); if (pHashItem == NULL) { @@ -4771,7 +4914,10 @@ static void doModeAdd(SModeInfo* pInfo, int32_t rowIndex, SqlFunctionCtx* pCtx, pItem->count += 1; if (pCtx->subsidiaries.num > 0) { - pItem->tuplePos = saveTupleData(pCtx, rowIndex, pCtx->pSrcBlock); + int32_t code = saveTupleData(pCtx, rowIndex, pCtx->pSrcBlock, &pItem->tuplePos); + if (code != TSDB_CODE_SUCCESS) { + return code; + } } taosHashPut(pInfo->pHash, data, hashKeyBytes, &pItem, sizeof(SModeItem*)); @@ -4779,9 +4925,14 @@ static void doModeAdd(SModeInfo* pInfo, int32_t rowIndex, SqlFunctionCtx* pCtx, } else { (*pHashItem)->count += 1; if (pCtx->subsidiaries.num > 0) { - updateTupleData(pCtx, rowIndex, pCtx->pSrcBlock, &((*pHashItem)->tuplePos)); + int32_t code = updateTupleData(pCtx, rowIndex, pCtx->pSrcBlock, &((*pHashItem)->tuplePos)); + if (code != TSDB_CODE_SUCCESS) { + return code; + } } } + + return TSDB_CODE_SUCCESS; } int32_t modeFunction(SqlFunctionCtx* pCtx) { @@ -4802,7 +4953,10 @@ int32_t modeFunction(SqlFunctionCtx* pCtx) { numOfElems++; char* data = colDataGetData(pInputCol, i); - doModeAdd(pInfo, i, pCtx, data); + int32_t code = doModeAdd(pInfo, i, pCtx, data); + if (code != TSDB_CODE_SUCCESS) { + return code; + } if (sizeof(SModeInfo) + pInfo->numOfPoints * (sizeof(SModeItem) + pInfo->colBytes) >= MODE_MAX_RESULT_SIZE) { taosHashCleanup(pInfo->pHash); @@ -4811,7 +4965,10 @@ int32_t modeFunction(SqlFunctionCtx* pCtx) { } if (numOfElems == 0 && pCtx->subsidiaries.num > 0 && !pInfo->nullTupleSaved) { - pInfo->nullTuplePos = saveTupleData(pCtx, pInput->startRowIndex, pCtx->pSrcBlock); + int32_t code = saveTupleData(pCtx, pInput->startRowIndex, pCtx->pSrcBlock, &pInfo->nullTuplePos); + if (code != TSDB_CODE_SUCCESS) { + return code; + } pInfo->nullTupleSaved = true; } @@ -4821,6 +4978,7 @@ int32_t modeFunction(SqlFunctionCtx* pCtx) { } int32_t modeFinalize(SqlFunctionCtx* pCtx, SSDataBlock* pBlock) { + int32_t code = TSDB_CODE_SUCCESS; SResultRowEntryInfo* pResInfo = GET_RES_INFO(pCtx); SModeInfo* pInfo = GET_ROWCELL_INTERBUF(pResInfo); int32_t slotId = pCtx->pExpr->base.resSchema.slotId; @@ -4840,15 +4998,15 @@ int32_t modeFinalize(SqlFunctionCtx* pCtx, SSDataBlock* pBlock) { if (maxCount != 0) { SModeItem* pResItem = (SModeItem*)(pInfo->pItems + resIndex * (sizeof(SModeItem) + pInfo->colBytes)); colDataAppend(pCol, currentRow, pResItem->data, false); - setSelectivityValue(pCtx, pBlock, &pResItem->tuplePos, currentRow); + code = setSelectivityValue(pCtx, pBlock, &pResItem->tuplePos, currentRow); } else { colDataAppendNULL(pCol, currentRow); - setSelectivityValue(pCtx, pBlock, &pInfo->nullTuplePos, currentRow); + code = setSelectivityValue(pCtx, pBlock, &pInfo->nullTuplePos, currentRow); } taosHashCleanup(pInfo->pHash); - return pResInfo->numOfRes; + return code; } bool getTwaFuncEnv(struct SFunctionNode* pFunc, SFuncExecEnv* pEnv) { @@ -4900,11 +5058,7 @@ int32_t twaFunction(SqlFunctionCtx* pCtx) { } int32_t i = pInput->startRowIndex; - if (pCtx->start.key != INT64_MIN) { - // ASSERT((pCtx->start.key < tsList[i] && pCtx->order == TSDB_ORDER_ASC) || - // (pCtx->start.key > tsList[i] && pCtx->order == TSDB_ORDER_DESC)); - - ASSERT(last->key == INT64_MIN); + if (pCtx->start.key != INT64_MIN && last->key == INT64_MIN) { for (; i < pInput->numOfRows + pInput->startRowIndex; ++i) { if (colDataIsNull_f(pInputCol->nullbitmap, i)) { continue; @@ -5124,7 +5278,7 @@ int32_t twaFunction(SqlFunctionCtx* pCtx) { } default: - ASSERT(0); + return TSDB_CODE_FUNC_FUNTION_PARA_TYPE; } // the last interpolated time window value @@ -5151,7 +5305,6 @@ _twa_over: * is required, we simply copy to the resut ot interResBuffer. */ // void twa_function_copy(SQLFunctionCtx *pCtx) { -// assert(pCtx->inputType == TSDB_DATA_TYPE_BINARY); // SResultRowEntryInfo *pResInfo = GET_RES_INFO(pCtx); // // memcpy(GET_ROWCELL_INTERBUF(pResInfo), pCtx->pInput, (size_t)pCtx->inputBytes); @@ -5700,7 +5853,10 @@ int32_t cachedLastRowFunction(SqlFunctionCtx* pCtx) { TSKEY cts = getRowPTs(pInput->pPTS, i); if (pResInfo->numOfRes == 0 || pInfo->ts < cts) { - doSaveLastrow(pCtx, data, i, cts, pInfo); + int32_t code = doSaveLastrow(pCtx, data, i, cts, pInfo); + if (code != TSDB_CODE_SUCCESS) { + return code; + } pResInfo->numOfRes = 1; } } diff --git a/source/libs/function/src/detail/tavgfunction.c b/source/libs/function/src/detail/tavgfunction.c index 8e010181d17ea12fe08491498217d21c48e562bb..3a70a65ec4f8d5888f264b3fd9f97a82932554dc 100644 --- a/source/libs/function/src/detail/tavgfunction.c +++ b/source/libs/function/src/detail/tavgfunction.c @@ -366,7 +366,6 @@ bool avgFunctionSetup(SqlFunctionCtx* pCtx, SResultRowEntryInfo* pResultInfo) { static int32_t calculateAvgBySMAInfo(SAvgRes* pRes, int32_t numOfRows, int32_t type, const SColumnDataAgg* pAgg) { int32_t numOfElem = numOfRows - pAgg->numOfNull; - ASSERT(numOfElem >= 0); pRes->count += numOfElem; if (IS_SIGNED_NUMERIC_TYPE(type)) { @@ -672,7 +671,7 @@ int32_t avgFunction(SqlFunctionCtx* pCtx) { break; } default: - ASSERT(0); + return TSDB_CODE_FUNC_FUNTION_PARA_TYPE; } } else { numOfElem = doAddNumericVector(pCol, type, pInput, pAvgRes); @@ -706,7 +705,9 @@ static void avgTransferInfo(SAvgRes* pInput, SAvgRes* pOutput) { int32_t avgFunctionMerge(SqlFunctionCtx* pCtx) { SInputColumnInfoData* pInput = &pCtx->input; SColumnInfoData* pCol = pInput->pData[0]; - ASSERT(pCol->info.type == TSDB_DATA_TYPE_BINARY); + if (pCol->info.type != TSDB_DATA_TYPE_BINARY) { + return TSDB_CODE_FUNC_FUNTION_PARA_TYPE; + } SAvgRes* pInfo = GET_ROWCELL_INTERBUF(GET_RES_INFO(pCtx)); diff --git a/source/libs/function/src/detail/tminmax.c b/source/libs/function/src/detail/tminmax.c index 3660ec272f31055cfca56ecbdd49b027f5de5a5b..847c7386550c13cc93e2e9e118fa0b8045804702 100644 --- a/source/libs/function/src/detail/tminmax.c +++ b/source/libs/function/src/detail/tminmax.c @@ -700,7 +700,7 @@ static void doExtractVal(SColumnInfoData* pCol, int32_t i, int32_t end, SqlFunct } } -int32_t doMinMaxHelper(SqlFunctionCtx* pCtx, int32_t isMinFunc) { +int32_t doMinMaxHelper(SqlFunctionCtx* pCtx, int32_t isMinFunc, int32_t* nElems) { int32_t numOfElems = 0; SInputColumnInfoData* pInput = &pCtx->input; @@ -721,7 +721,6 @@ int32_t doMinMaxHelper(SqlFunctionCtx* pCtx, int32_t isMinFunc) { // data in current data block are qualified to the query if (pInput->colDataSMAIsSet) { numOfElems = pInput->numOfRows - pAgg->numOfNull; - ASSERT(pInput->numOfRows == pInput->totalRows && numOfElems >= 0); if (numOfElems == 0) { goto _over; @@ -746,7 +745,10 @@ int32_t doMinMaxHelper(SqlFunctionCtx* pCtx, int32_t isMinFunc) { if (pCtx->subsidiaries.num > 0) { index = findRowIndex(pInput->startRowIndex, pInput->numOfRows, pCol, tval); if (index >= 0) { - pBuf->tuplePos = saveTupleData(pCtx, index, pCtx->pSrcBlock); + int32_t code = saveTupleData(pCtx, index, pCtx->pSrcBlock, &pBuf->tuplePos); + if (code != TSDB_CODE_SUCCESS) { + return code; + } } } } else { @@ -760,7 +762,10 @@ int32_t doMinMaxHelper(SqlFunctionCtx* pCtx, int32_t isMinFunc) { if (pCtx->subsidiaries.num > 0) { index = findRowIndex(pInput->startRowIndex, pInput->numOfRows, pCol, tval); if (index >= 0) { - pBuf->tuplePos = saveTupleData(pCtx, index, pCtx->pSrcBlock); + int32_t code = saveTupleData(pCtx, index, pCtx->pSrcBlock, &pBuf->tuplePos); + if (code != TSDB_CODE_SUCCESS) { + return code; + } } } } @@ -774,7 +779,10 @@ int32_t doMinMaxHelper(SqlFunctionCtx* pCtx, int32_t isMinFunc) { if (pCtx->subsidiaries.num > 0) { index = findRowIndex(pInput->startRowIndex, pInput->numOfRows, pCol, tval); if (index >= 0) { - pBuf->tuplePos = saveTupleData(pCtx, index, pCtx->pSrcBlock); + int32_t code = saveTupleData(pCtx, index, pCtx->pSrcBlock, &pBuf->tuplePos); + if (code != TSDB_CODE_SUCCESS) { + return code; + } } } } @@ -788,7 +796,10 @@ int32_t doMinMaxHelper(SqlFunctionCtx* pCtx, int32_t isMinFunc) { if (pCtx->subsidiaries.num > 0) { index = findRowIndex(pInput->startRowIndex, pInput->numOfRows, pCol, tval); if (index >= 0) { - pBuf->tuplePos = saveTupleData(pCtx, index, pCtx->pSrcBlock); + int32_t code = saveTupleData(pCtx, index, pCtx->pSrcBlock, &pBuf->tuplePos); + if (code != TSDB_CODE_SUCCESS) { + return code; + } } } } @@ -804,14 +815,17 @@ int32_t doMinMaxHelper(SqlFunctionCtx* pCtx, int32_t isMinFunc) { if (pCtx->subsidiaries.num > 0) { index = findRowIndex(pInput->startRowIndex, pInput->numOfRows, pCol, tval); if (index >= 0) { - pBuf->tuplePos = saveTupleData(pCtx, index, pCtx->pSrcBlock); + int32_t code = saveTupleData(pCtx, index, pCtx->pSrcBlock, &pBuf->tuplePos); + if (code != TSDB_CODE_SUCCESS) { + return code; + } } } } } pBuf->assign = true; - return numOfElems; + return TSDB_CODE_SUCCESS; } int32_t start = pInput->startRowIndex; @@ -825,14 +839,16 @@ int32_t doMinMaxHelper(SqlFunctionCtx* pCtx, int32_t isMinFunc) { memcpy(&pBuf->v, pCol->pData + (pCol->info.bytes * i), pCol->info.bytes); if (pCtx->subsidiaries.num > 0) { - pBuf->tuplePos = saveTupleData(pCtx, i, pCtx->pSrcBlock); + int32_t code = saveTupleData(pCtx, i, pCtx->pSrcBlock, &pBuf->tuplePos); + if (code != TSDB_CODE_SUCCESS) { + return code; + } } pBuf->assign = true; numOfElems = 1; } if (i >= end) { - ASSERT(numOfElems == 0); goto _over; } @@ -889,9 +905,13 @@ int32_t doMinMaxHelper(SqlFunctionCtx* pCtx, int32_t isMinFunc) { _over: if (numOfElems == 0 && pCtx->subsidiaries.num > 0 && !pBuf->nullTupleSaved) { - pBuf->nullTuplePos = saveTupleData(pCtx, pInput->startRowIndex, pCtx->pSrcBlock); + int32_t code = saveTupleData(pCtx, pInput->startRowIndex, pCtx->pSrcBlock, &pBuf->nullTuplePos); + if (code != TSDB_CODE_SUCCESS) { + return code; + } pBuf->nullTupleSaved = true; } - return numOfElems; + *nElems = numOfElems; + return TSDB_CODE_SUCCESS; } diff --git a/source/libs/function/src/tfunctionInt.c b/source/libs/function/src/tfunctionInt.c index 3afa0e5cdd402d43fa4a415468c4eda76a971e36..edfd8660106fa8ce1141bb979a78cd4d9a771bbe 100644 --- a/source/libs/function/src/tfunctionInt.c +++ b/source/libs/function/src/tfunctionInt.c @@ -40,8 +40,6 @@ int32_t getNumOfResult(SqlFunctionCtx* pCtx, int32_t num, SSDataBlock* pResBlock } } - assert(maxRows >= 0); - blockDataEnsureCapacity(pResBlock, maxRows); for (int32_t i = 0; i < num; ++i) { SColumnInfoData* pCol = taosArrayGet(pResBlock->pDataBlock, i); @@ -63,7 +61,6 @@ int32_t getNumOfResult(SqlFunctionCtx* pCtx, int32_t num, SSDataBlock* pResBlock } bool isRowEntryCompleted(struct SResultRowEntryInfo* pEntry) { - assert(pEntry != NULL); return pEntry->complete; } diff --git a/source/libs/function/src/thistogram.c b/source/libs/function/src/thistogram.c index b0f23f78df72afa23015a947632e024082cacd46..e7d631f638da769fe0d9eabb03762bb983410a56 100644 --- a/source/libs/function/src/thistogram.c +++ b/source/libs/function/src/thistogram.c @@ -73,7 +73,10 @@ int32_t tHistogramAdd(SHistogramInfo** pHisto, double val) { #if defined(USE_ARRAYLIST) int32_t idx = histoBinarySearch((*pHisto)->elems, (*pHisto)->numOfEntries, val); - assert(idx >= 0 && idx <= (*pHisto)->maxEntries && (*pHisto)->elems != NULL); + if (ASSERTS(idx >= 0 && idx <= (*pHisto)->maxEntries && (*pHisto)->elems != NULL, "tHistogramAdd Error, idx:%d, maxEntries:%d, elems:%p", + idx, (*pHisto)->maxEntries, (*pHisto)->elems)) { + return -1; + } if ((*pHisto)->elems[idx].val == val && idx >= 0) { (*pHisto)->elems[idx].num += 1; @@ -84,15 +87,27 @@ int32_t tHistogramAdd(SHistogramInfo** pHisto, double val) { } else { /* insert a new slot */ if ((*pHisto)->numOfElems >= 1 && idx < (*pHisto)->numOfEntries) { if (idx > 0) { - assert((*pHisto)->elems[idx - 1].val <= val); + if (ASSERTS((*pHisto)->elems[idx - 1].val <= val, "tHistogramAdd Error, elems[%d].val:%lf, val:%lf", + idx - 1, (*pHisto)->elems[idx - 1].val, val)) { + return -1; + } } else { - assert((*pHisto)->elems[idx].val > val); + if (ASSERTS((*pHisto)->elems[idx].val > val, "tHistogramAdd Error, elems[%d].val:%lf, val:%lf", + idx, (*pHisto)->elems[idx].val, val)) { + return -1; + } } } else if ((*pHisto)->numOfElems > 0) { - assert((*pHisto)->elems[(*pHisto)->numOfEntries].val <= val); + if (ASSERTS((*pHisto)->elems[(*pHisto)->numOfEntries].val <= val, "tHistogramAdd Error, elems[%d].val:%lf, val:%lf", + (*pHisto)->numOfEntries, (*pHisto)->elems[idx].val, val)) { + return -1; + } } - histogramCreateBin(*pHisto, idx, val); + int32_t code = histogramCreateBin(*pHisto, idx, val); + if (code != 0) { + return code; + } } #else tSkipListKey key = tSkipListCreateKey(TSDB_DATA_TYPE_DOUBLE, &val, tDataTypes[TSDB_DATA_TYPE_DOUBLE].nSize); @@ -151,7 +166,6 @@ int32_t tHistogramAdd(SHistogramInfo** pHisto, double val) { if ((*pHisto)->numOfEntries >= (*pHisto)->maxEntries + 1) { // set the right value for loser-tree - assert((*pHisto)->pLoserTree != NULL); if (!(*pHisto)->ordered) { SSkipListPrint((*pHisto)->pList, 1); @@ -203,7 +217,10 @@ int32_t tHistogramAdd(SHistogramInfo** pHisto, double val) { tSkipListNode* pNext = pNode->pForward[0]; SHistBin* pNextEntry = (SHistBin*)pNext->pData; - assert(pNextEntry->val - pEntry->val == pEntry->delta); + if (ASSERTS(pNextEntry->val - pEntry->val == pEntry->delta, "tHistogramAdd Error, pNextEntry->val:%lf, pEntry->val:%lf, pEntry->delta:%lf", + pNextEntry->val, pEntry->val, pEntry->delta)) { + return -1; + } double newVal = (pEntry->val * pEntry->num + pNextEntry->val * pNextEntry->num) / (pEntry->num + pNextEntry->num); pEntry->val = newVal; @@ -253,7 +270,9 @@ int32_t tHistogramAdd(SHistogramInfo** pHisto, double val) { } else { SHistBin* pEntry = (SHistBin*)pResNode->pData; - assert(pEntry->val == val); + if (ASSERTS(pEntry->val == val, "tHistogramAdd Error, pEntry->val:%lf, val:%lf")) { + return -1; + } pEntry->num += 1; } @@ -329,7 +348,10 @@ int32_t histogramCreateBin(SHistogramInfo* pHisto, int32_t index, double val) { memmove(&pHisto->elems[index + 1], &pHisto->elems[index], sizeof(SHistBin) * remain); } - assert(index >= 0 && index <= pHisto->maxEntries); + if (ASSERTS(index >= 0 && index <= pHisto->maxEntries, "histogramCreateBin Error, index:%d, maxEntries:%d", + index, pHisto->maxEntries)) { + return -1; + } pHisto->elems[index].num = 1; pHisto->elems[index].val = val; @@ -343,7 +365,11 @@ int32_t histogramCreateBin(SHistogramInfo* pHisto, int32_t index, double val) { pHisto->elems[pHisto->maxEntries].num = 0; } #endif - assert(pHisto->numOfEntries <= pHisto->maxEntries); + if (ASSERTS(pHisto->numOfEntries <= pHisto->maxEntries, "histogramCreateBin Error, numOfEntries:%d, maxEntries:%d", + pHisto->numOfEntries, pHisto->maxEntries)) { + return -1; + } + return 0; } @@ -386,12 +412,14 @@ int64_t tHistogramSum(SHistogramInfo* pHisto, double v) { if (slotIdx < 0) { slotIdx = 0; - assert(v <= pHisto->elems[slotIdx].val); + ASSERTS(v <= pHisto->elems[slotIdx].val, "tHistogramSum Error, elems[%d].val:%lf, v:%lf", + slotIdx, pHisto->elems[slotIdx].val, v); } else { - assert(v >= pHisto->elems[slotIdx].val); - + ASSERTS(v >= pHisto->elems[slotIdx].val, "tHistogramSum Error, elems[%d].val:%lf, v:%lf", + slotIdx, pHisto->elems[slotIdx].val, v); if (slotIdx + 1 < pHisto->numOfEntries) { - assert(v < pHisto->elems[slotIdx + 1].val); + ASSERTS(v < pHisto->elems[slotIdx + 1].val, "tHistogramSum Error, elems[%d].val:%lf, v:%lf", + slotIdx + 1, pHisto->elems[slotIdx + 1].val, v); } } } @@ -445,7 +473,9 @@ double* tHistogramUniform(SHistogramInfo* pHisto, double* ratio, int32_t num) { j += 1; } - assert(total <= numOfElem && total + pHisto->elems[j + 1].num > numOfElem); + ASSERTS(total <= numOfElem && total + pHisto->elems[j + 1].num > numOfElem, + "tHistogramUniform Error, total:%d, numOfElem:%d, elems[%d].num:%d", + total, numOfElem, j + 1, pHisto->elems[j + 1].num); double delta = numOfElem - total; if (fabs(delta) < FLT_EPSILON) { @@ -502,7 +532,9 @@ double* tHistogramUniform(SHistogramInfo* pHisto, double* ratio, int32_t num) { j += 1; } - assert(total <= numOfElem && total + pEntry->num > numOfElem); + ASSERTS(total <= numOfElem && total + pEntry->num > numOfElem, + "tHistogramUniform Error, total:%d, numOfElem:%d, pEntry->num:%d", + total, numOfElem, pEntry->num); double delta = numOfElem - total; if (fabs(delta) < FLT_EPSILON) { diff --git a/source/libs/function/src/tpercentile.c b/source/libs/function/src/tpercentile.c index 157ee08f1522118e0ab12daa5426c1f44c00adda..97fe94b513f86f2a20f4c4e42b965c67840cc043 100644 --- a/source/libs/function/src/tpercentile.c +++ b/source/libs/function/src/tpercentile.c @@ -33,15 +33,25 @@ static SFilePage *loadDataFromFilePage(tMemBucket *pMemBucket, int32_t slotIdx) (SFilePage *)taosMemoryCalloc(1, pMemBucket->bytes * pMemBucket->pSlots[slotIdx].info.size + sizeof(SFilePage)); int32_t groupId = getGroupId(pMemBucket->numOfSlots, slotIdx, pMemBucket->times); - SArray *pIdList = *(SArray **)taosHashGet(pMemBucket->groupPagesMap, &groupId, sizeof(groupId)); + + SArray *pIdList; + void *p = taosHashGet(pMemBucket->groupPagesMap, &groupId, sizeof(groupId)); + if (p != NULL) { + pIdList = *(SArray **)p; + } else { + return NULL; + } int32_t offset = 0; for (int32_t i = 0; i < taosArrayGetSize(pIdList); ++i) { int32_t *pageId = taosArrayGet(pIdList, i); SFilePage *pg = getBufPage(pMemBucket->pBuffer, *pageId); - memcpy(buffer->data + offset, pg->data, (size_t)(pg->num * pMemBucket->bytes)); + if (pg == NULL) { + return NULL; + } + memcpy(buffer->data + offset, pg->data, (size_t)(pg->num * pMemBucket->bytes)); offset += (int32_t)(pg->num * pMemBucket->bytes); } @@ -87,8 +97,9 @@ static void resetPosInfo(SSlotInfo *pInfo) { pInfo->data = NULL; } -double findOnlyResult(tMemBucket *pMemBucket) { - assert(pMemBucket->total == 1); +int32_t findOnlyResult(tMemBucket *pMemBucket, double *result) { + ASSERT(pMemBucket->total == 1); + terrno = 0; for (int32_t i = 0; i < pMemBucket->numOfSlots; ++i) { tMemBucketSlot *pSlot = &pMemBucket->pSlots[i]; @@ -100,19 +111,22 @@ double findOnlyResult(tMemBucket *pMemBucket) { SArray **pList = taosHashGet(pMemBucket->groupPagesMap, &groupId, sizeof(groupId)); if (pList != NULL) { SArray *list = *pList; - assert(list->size == 1); + ASSERT(list->size == 1); int32_t *pageId = taosArrayGet(list, 0); SFilePage *pPage = getBufPage(pMemBucket->pBuffer, *pageId); - assert(pPage->num == 1); + if (pPage == NULL) { + return terrno; + } + ASSERT(pPage->num == 1); - double v = 0; - GET_TYPED_DATA(v, double, pMemBucket->type, pPage->data); - return v; + GET_TYPED_DATA(*result, double, pMemBucket->type, pPage->data); + return TSDB_CODE_SUCCESS; } } - return 0; + *result = 0.0; + return TSDB_CODE_SUCCESS; } int32_t tBucketIntHash(tMemBucket *pBucket, const void *value) { @@ -140,7 +154,8 @@ int32_t tBucketIntHash(tMemBucket *pBucket, const void *value) { } } - assert(index >= 0 && index < pBucket->numOfSlots); + ASSERTS(index >= 0 && index < pBucket->numOfSlots, "tBucketIntHash Error, index:%d, numOfSlots:%d", + index, pBucket->numOfSlots); return index; } @@ -167,7 +182,7 @@ int32_t tBucketUintHash(tMemBucket *pBucket, const void *value) { } } - assert(index >= 0 && index < pBucket->numOfSlots); + ASSERT(index >= 0 && index < pBucket->numOfSlots); return index; } @@ -198,7 +213,7 @@ int32_t tBucketDoubleHash(tMemBucket *pBucket, const void *value) { } } - assert(index >= 0 && index < pBucket->numOfSlots); + ASSERT(index >= 0 && index < pBucket->numOfSlots); return index; } @@ -268,7 +283,7 @@ tMemBucket *tMemBucketCreate(int16_t nElemSize, int16_t dataType, double minval, return NULL; } - int32_t ret = createDiskbasedBuf(&pBucket->pBuffer, pBucket->bufPageSize, pBucket->bufPageSize * 512, "1", tsTempDir); + int32_t ret = createDiskbasedBuf(&pBucket->pBuffer, pBucket->bufPageSize, pBucket->bufPageSize * 1024, "1", tsTempDir); if (ret != 0) { tMemBucketDestroy(pBucket); return NULL; @@ -331,7 +346,7 @@ void tMemBucketUpdateBoundingBox(MinMaxEntry *r, const char *data, int32_t dataT r->dMaxVal = v; } } else { - assert(0); + ASSERT(0); } } @@ -339,7 +354,7 @@ void tMemBucketUpdateBoundingBox(MinMaxEntry *r, const char *data, int32_t dataT * in memory bucket, we only accept data array list */ int32_t tMemBucketPut(tMemBucket *pBucket, const void *data, size_t size) { - assert(pBucket != NULL && data != NULL && size > 0); + ASSERT(pBucket != NULL && data != NULL && size > 0); int32_t count = 0; int32_t bytes = pBucket->bytes; @@ -361,7 +376,7 @@ int32_t tMemBucketPut(tMemBucket *pBucket, const void *data, size_t size) { if (pSlot->info.data == NULL || pSlot->info.data->num >= pBucket->elemPerPage) { if (pSlot->info.data != NULL) { - assert(pSlot->info.data->num >= pBucket->elemPerPage && pSlot->info.size > 0); + ASSERT(pSlot->info.data->num >= pBucket->elemPerPage && pSlot->info.size > 0); // keep the pointer in memory setBufPageDirty(pSlot->info.data, true); @@ -379,6 +394,9 @@ int32_t tMemBucketPut(tMemBucket *pBucket, const void *data, size_t size) { } pSlot->info.data = getNewBufPage(pBucket->pBuffer, &pageId); + if (pSlot->info.data == NULL) { + return terrno; + } pSlot->info.pageId = pageId; taosArrayPush(pPageIdList, &pageId); } @@ -390,7 +408,7 @@ int32_t tMemBucketPut(tMemBucket *pBucket, const void *data, size_t size) { } pBucket->total += count; - return 0; + return TSDB_CODE_SUCCESS; } //////////////////////////////////////////////////////////////////////////////////////////// @@ -407,14 +425,14 @@ static MinMaxEntry getMinMaxEntryOfNextSlotWithData(tMemBucket *pMemBucket, int3 ++j; } - assert(j < pMemBucket->numOfSlots); + ASSERT(j < pMemBucket->numOfSlots); return pMemBucket->pSlots[j].range; } static bool isIdenticalData(tMemBucket *pMemBucket, int32_t index); static double getIdenticalDataVal(tMemBucket *pMemBucket, int32_t slotIndex) { - assert(isIdenticalData(pMemBucket, slotIndex)); + ASSERT(isIdenticalData(pMemBucket, slotIndex)); tMemBucketSlot *pSlot = &pMemBucket->pSlots[slotIndex]; @@ -430,7 +448,7 @@ static double getIdenticalDataVal(tMemBucket *pMemBucket, int32_t slotIndex) { return finalResult; } -double getPercentileImpl(tMemBucket *pMemBucket, int32_t count, double fraction) { +int32_t getPercentileImpl(tMemBucket *pMemBucket, int32_t count, double fraction, double *result) { int32_t num = 0; for (int32_t i = 0; i < pMemBucket->numOfSlots; ++i) { @@ -461,15 +479,19 @@ double getPercentileImpl(tMemBucket *pMemBucket, int32_t count, double fraction) minOfNextSlot = (double)next.dMinVal; } - assert(minOfNextSlot > maxOfThisSlot); + ASSERT(minOfNextSlot > maxOfThisSlot); - double val = (1 - fraction) * maxOfThisSlot + fraction * minOfNextSlot; - return val; + *result = (1 - fraction) * maxOfThisSlot + fraction * minOfNextSlot; + return TSDB_CODE_SUCCESS; } if (pSlot->info.size <= pMemBucket->maxCapacity) { // data in buffer and file are merged together to be processed. SFilePage *buffer = loadDataFromFilePage(pMemBucket, i); + if (buffer == NULL) { + return terrno; + } + int32_t currentIdx = count - num; char *thisVal = buffer->data + pMemBucket->bytes * currentIdx; @@ -479,13 +501,14 @@ double getPercentileImpl(tMemBucket *pMemBucket, int32_t count, double fraction) GET_TYPED_DATA(td, double, pMemBucket->type, thisVal); GET_TYPED_DATA(nd, double, pMemBucket->type, nextVal); - double val = (1 - fraction) * td + fraction * nd; + *result = (1 - fraction) * td + fraction * nd; taosMemoryFreeClear(buffer); - return val; + return TSDB_CODE_SUCCESS; } else { // incur a second round bucket split if (isIdenticalData(pMemBucket, i)) { - return getIdenticalDataVal(pMemBucket, i); + *result = getIdenticalDataVal(pMemBucket, i); + return TSDB_CODE_SUCCESS; } // try next round @@ -498,36 +521,53 @@ double getPercentileImpl(tMemBucket *pMemBucket, int32_t count, double fraction) resetSlotInfo(pMemBucket); int32_t groupId = getGroupId(pMemBucket->numOfSlots, i, pMemBucket->times - 1); - SArray* list = *(SArray **)taosHashGet(pMemBucket->groupPagesMap, &groupId, sizeof(groupId)); - ASSERT(list != NULL && list->size > 0); + + SArray* list; + void *p = taosHashGet(pMemBucket->groupPagesMap, &groupId, sizeof(groupId)); + if (p != NULL) { + list = *(SArray **)p; + if (list == NULL || list->size <= 0) { + return -1; + } + } else { + return -1; + } for (int32_t f = 0; f < list->size; ++f) { int32_t *pageId = taosArrayGet(list, f); SFilePage *pg = getBufPage(pMemBucket->pBuffer, *pageId); - - tMemBucketPut(pMemBucket, pg->data, (int32_t)pg->num); + if (pg == NULL) { + return terrno; + } + + int32_t code = tMemBucketPut(pMemBucket, pg->data, (int32_t)pg->num); + if (code != TSDB_CODE_SUCCESS) { + return code; + } setBufPageDirty(pg, true); releaseBufPage(pMemBucket->pBuffer, pg); } - return getPercentileImpl(pMemBucket, count - num, fraction); + return getPercentileImpl(pMemBucket, count - num, fraction, result); } } else { num += pSlot->info.size; } } - return 0; + *result = 0; + return TSDB_CODE_SUCCESS; } -double getPercentile(tMemBucket *pMemBucket, double percent) { +int32_t getPercentile(tMemBucket *pMemBucket, double percent, double *result) { if (pMemBucket->total == 0) { - return 0.0; + *result = 0.0; + return TSDB_CODE_SUCCESS; } // if only one elements exists, return it if (pMemBucket->total == 1) { - return findOnlyResult(pMemBucket); + return findOnlyResult(pMemBucket, result); } percent = fabs(percent); @@ -537,21 +577,21 @@ double getPercentile(tMemBucket *pMemBucket, double percent) { MinMaxEntry *pRange = &pMemBucket->range; if (IS_SIGNED_NUMERIC_TYPE(pMemBucket->type)) { - double v = (double)(fabs(percent - 100) < DBL_EPSILON ? pRange->i64MaxVal : pRange->i64MinVal); - return v; + *result = (double)(fabs(percent - 100) < DBL_EPSILON ? pRange->i64MaxVal : pRange->i64MinVal); } else if (IS_UNSIGNED_NUMERIC_TYPE(pMemBucket->type)) { - double v = (double)(fabs(percent - 100) < DBL_EPSILON ? pRange->u64MaxVal : pRange->u64MinVal); - return v; + *result = (double)(fabs(percent - 100) < DBL_EPSILON ? pRange->u64MaxVal : pRange->u64MinVal); } else { - return fabs(percent - 100) < DBL_EPSILON ? pRange->dMaxVal : pRange->dMinVal; + *result = fabs(percent - 100) < DBL_EPSILON ? pRange->dMaxVal : pRange->dMinVal; } + + return TSDB_CODE_SUCCESS; } double percentVal = (percent * (pMemBucket->total - 1)) / ((double)100.0); // do put data by using buckets int32_t orderIdx = (int32_t)percentVal; - return getPercentileImpl(pMemBucket, orderIdx, percentVal - orderIdx); + return getPercentileImpl(pMemBucket, orderIdx, percentVal - orderIdx, result); } /* diff --git a/source/libs/monitor/src/monMain.c b/source/libs/monitor/src/monMain.c index b3ca0fa4528682f024cb2c101e2c99e1235ae164..b23a36d4df3acaef40116b2fd66559a661e3f8c1 100644 --- a/source/libs/monitor/src/monMain.c +++ b/source/libs/monitor/src/monMain.c @@ -20,6 +20,7 @@ #include "ttime.h" static SMonitor tsMonitor = {0}; +static char* tsMonUri = "/report"; void monRecordLog(int64_t ts, ELogLevel level, const char *content) { taosThreadMutexLock(&tsMonitor.lock); @@ -550,7 +551,7 @@ void monSendReport() { // uDebugL("report cont:%s\n", pCont); if (pCont != NULL) { EHttpCompFlag flag = tsMonitor.cfg.comp ? HTTP_GZIP : HTTP_FLAT; - if (taosSendHttpReport(tsMonitor.cfg.server, tsMonitor.cfg.port, pCont, strlen(pCont), flag) != 0) { + if (taosSendHttpReport(tsMonitor.cfg.server, tsMonUri, tsMonitor.cfg.port, pCont, strlen(pCont), flag) != 0) { uError("failed to send monitor msg"); } taosMemoryFree(pCont); diff --git a/source/libs/parser/inc/sql.y b/source/libs/parser/inc/sql.y index 2b93176fae784356dde7e1cafa2c42da280dfc13..b8737ea2a24415c03c6973dbec1f7e54c964600a 100644 --- a/source/libs/parser/inc/sql.y +++ b/source/libs/parser/inc/sql.y @@ -545,7 +545,7 @@ bufsize_opt(A) ::= BUFSIZE NK_INTEGER(B). /************************************************ create/drop stream **************************************************/ cmd ::= CREATE STREAM not_exists_opt(E) stream_name(A) stream_options(B) INTO - full_table_name(C) col_list_opt(H) tags_def_opt(F) subtable_opt(G) + full_table_name(C) col_list_opt(H) tag_def_or_ref_opt(F) subtable_opt(G) AS query_or_subquery(D). { pCxt->pRootNode = createCreateStreamStmt(pCxt, E, &A, C, B, F, G, D, H); } cmd ::= DROP STREAM exists_opt(A) stream_name(B). { pCxt->pRootNode = createDropStreamStmt(pCxt, A, &B); } @@ -554,6 +554,12 @@ cmd ::= DROP STREAM exists_opt(A) stream_name(B). col_list_opt(A) ::= . { A = NULL; } col_list_opt(A) ::= NK_LP col_name_list(B) NK_RP. { A = B; } +%type tag_def_or_ref_opt { SNodeList* } +%destructor tag_def_or_ref_opt { nodesDestroyList($$); } +tag_def_or_ref_opt(A) ::= . { A = NULL; } +tag_def_or_ref_opt(A) ::= tags_def(B). { A = B; } +tag_def_or_ref_opt(A) ::= TAGS NK_LP col_name_list(B) NK_RP. { A = B; } + stream_options(A) ::= . { A = createStreamOptions(pCxt); } stream_options(A) ::= stream_options(B) TRIGGER AT_ONCE. { ((SStreamOptions*)B)->triggerType = STREAM_TRIGGER_AT_ONCE; A = B; } stream_options(A) ::= stream_options(B) TRIGGER WINDOW_CLOSE. { ((SStreamOptions*)B)->triggerType = STREAM_TRIGGER_WINDOW_CLOSE; A = B; } diff --git a/source/libs/parser/src/parAstParser.c b/source/libs/parser/src/parAstParser.c index fae62626fa3666909208ce9220215baea8be3810..126027c78f0629cf5490cce490cc819fa4e2984f 100644 --- a/source/libs/parser/src/parAstParser.c +++ b/source/libs/parser/src/parAstParser.c @@ -166,7 +166,7 @@ static int32_t collectMetaKeyFromRealTableImpl(SCollectMetaKeyCxt* pCxt, const c code = reserveDnodeRequiredInCache(pCxt->pMetaCache); } if (TSDB_CODE_SUCCESS == code && - (0 == strcmp(pTable, TSDB_INS_TABLE_TAGS) || 0 == strcmp(pTable, TSDB_INS_TABLE_TABLES)) && + (0 == strcmp(pTable, TSDB_INS_TABLE_TAGS) || 0 == strcmp(pTable, TSDB_INS_TABLE_TABLES) || 0 == strcmp(pTable, TSDB_INS_TABLE_COLS)) && QUERY_NODE_SELECT_STMT == nodeType(pCxt->pStmt)) { code = collectMetaKeyFromInsTags(pCxt); } diff --git a/source/libs/parser/src/parInsertUtil.c b/source/libs/parser/src/parInsertUtil.c index b8ef7d970df5dd74176f6dcb0b7a8ab478e34129..7cc93b448fec577d7449b9d71e481b56080090d9 100644 --- a/source/libs/parser/src/parInsertUtil.c +++ b/source/libs/parser/src/parInsertUtil.c @@ -671,7 +671,6 @@ int rawBlockBindData(SQuery* query, STableMeta* pTableMeta, void* data, SVCreate goto end; } - colLength[c] = htonl(colLength[c]); int8_t* offset = pStart; if (IS_VAR_DATA_TYPE(pColSchema->type)) { pStart += numOfRows * sizeof(int32_t); @@ -687,4 +686,4 @@ int rawBlockBindData(SQuery* query, STableMeta* pTableMeta, void* data, SVCreate end: return ret; -} \ No newline at end of file +} diff --git a/source/libs/parser/src/parTranslater.c b/source/libs/parser/src/parTranslater.c index 84241a6c8b693fc2b66967cb42dd12f026a5c6ac..a57a52041d19681f8c3245a1526b86f8aba984f1 100644 --- a/source/libs/parser/src/parTranslater.c +++ b/source/libs/parser/src/parTranslater.c @@ -2210,7 +2210,8 @@ static int32_t dnodeToVgroupsInfo(SArray* pDnodes, SVgroupsInfo** pVgsInfo) { } static bool sysTableFromVnode(const char* pTable) { - return ((0 == strcmp(pTable, TSDB_INS_TABLE_TABLES)) || (0 == strcmp(pTable, TSDB_INS_TABLE_TAGS))); + return ((0 == strcmp(pTable, TSDB_INS_TABLE_TABLES)) || (0 == strcmp(pTable, TSDB_INS_TABLE_TAGS)) || + (0 == strcmp(pTable, TSDB_INS_TABLE_COLS))); } static bool sysTableFromDnode(const char* pTable) { return 0 == strcmp(pTable, TSDB_INS_TABLE_DNODE_VARIABLES); } @@ -2278,7 +2279,9 @@ static int32_t setVnodeSysTableVgroupList(STranslateContext* pCxt, SName* pName, ((SSelectStmt*)pCxt->pCurrStmt)->isEmptyResult = true; } - if (TSDB_CODE_SUCCESS == code && 0 == strcmp(pRealTable->table.tableName, TSDB_INS_TABLE_TABLES) && !hasUserDbCond) { + if (TSDB_CODE_SUCCESS == code && + (0 == strcmp(pRealTable->table.tableName, TSDB_INS_TABLE_TABLES) && !hasUserDbCond) || + 0 == strcmp(pRealTable->table.tableName, TSDB_INS_TABLE_COLS)) { code = addMnodeToVgroupList(&pCxt->pParseCxt->mgmtEpSet, &pVgs); } @@ -2376,7 +2379,8 @@ static bool isSingleTable(SRealTableNode* pRealTable) { int8_t tableType = pRealTable->pMeta->tableType; if (TSDB_SYSTEM_TABLE == tableType) { return 0 != strcmp(pRealTable->table.tableName, TSDB_INS_TABLE_TABLES) && - 0 != strcmp(pRealTable->table.tableName, TSDB_INS_TABLE_TAGS); + 0 != strcmp(pRealTable->table.tableName, TSDB_INS_TABLE_TAGS) && + 0 != strcmp(pRealTable->table.tableName, TSDB_INS_TABLE_COLS); } return (TSDB_CHILD_TABLE == tableType || TSDB_NORMAL_TABLE == tableType); } @@ -4991,7 +4995,7 @@ static const SSchema* getColSchema(const STableMeta* pTableMeta, const char* pCo return NULL; } -static SSchema* getTagSchema(STableMeta* pTableMeta, const char* pTagName) { +static SSchema* getTagSchema(const STableMeta* pTableMeta, const char* pTagName) { int32_t numOfTags = getNumOfTags(pTableMeta); SSchema* pTagsSchema = getTableTagSchema(pTableMeta); for (int32_t i = 0; i < numOfTags; ++i) { @@ -5600,7 +5604,8 @@ static void getStreamQueryFirstProjectAliasName(SHashObj* pUserAliasSet, char* a return; } -static int32_t addWstartTsToCreateStreamQueryImpl(SSelectStmt* pSelect, SHashObj* pUserAliasSet) { +static int32_t addWstartTsToCreateStreamQueryImpl(STranslateContext* pCxt, SSelectStmt* pSelect, + SHashObj* pUserAliasSet) { SNode* pProj = nodesListGetNode(pSelect->pProjectionList, 0); if (NULL == pSelect->pWindow || (QUERY_NODE_FUNCTION == nodeType(pProj) && 0 == strcmp("_wstart", ((SFunctionNode*)pProj)->functionName))) { @@ -5612,7 +5617,10 @@ static int32_t addWstartTsToCreateStreamQueryImpl(SSelectStmt* pSelect, SHashObj } strcpy(pFunc->functionName, "_wstart"); getStreamQueryFirstProjectAliasName(pUserAliasSet, pFunc->node.aliasName, sizeof(pFunc->node.aliasName)); - int32_t code = nodesListPushFront(pSelect->pProjectionList, (SNode*)pFunc); + int32_t code = getFuncInfo(pCxt, pFunc); + if (TSDB_CODE_SUCCESS == code) { + code = nodesListPushFront(pSelect->pProjectionList, (SNode*)pFunc); + } if (TSDB_CODE_SUCCESS != code) { nodesDestroyNode((SNode*)pFunc); } @@ -5624,12 +5632,19 @@ static int32_t addWstartTsToCreateStreamQuery(STranslateContext* pCxt, SNode* pS SHashObj* pUserAliasSet = NULL; int32_t code = checkProjectAlias(pCxt, pSelect->pProjectionList, &pUserAliasSet); if (TSDB_CODE_SUCCESS == code) { - code = addWstartTsToCreateStreamQueryImpl(pSelect, pUserAliasSet); + code = addWstartTsToCreateStreamQueryImpl(pCxt, pSelect, pUserAliasSet); } taosHashCleanup(pUserAliasSet); return code; } +static const char* getTagNameForCreateStreamTag(SNode* pTag) { + if (QUERY_NODE_COLUMN_DEF == nodeType(pTag)) { + return ((SColumnDefNode*)pTag)->colName; + } + return ((SColumnNode*)pTag)->colName; +} + static int32_t addTagsToCreateStreamQuery(STranslateContext* pCxt, SCreateStreamStmt* pStmt, SSelectStmt* pSelect) { if (NULL == pStmt->pTags) { return TSDB_CODE_SUCCESS; @@ -5640,7 +5655,7 @@ static int32_t addTagsToCreateStreamQuery(STranslateContext* pCxt, SCreateStream bool found = false; SNode* pPart = NULL; FOREACH(pPart, pSelect->pPartitionByList) { - if (0 == strcmp(((SColumnDefNode*)pTag)->colName, ((SExprNode*)pPart)->userAlias)) { + if (0 == strcmp(getTagNameForCreateStreamTag(pTag), ((SExprNode*)pPart)->userAlias)) { if (TSDB_CODE_SUCCESS != nodesListMakeStrictAppend(&pSelect->pTags, nodesCloneNode(pPart))) { return TSDB_CODE_OUT_OF_MEMORY; } @@ -5649,12 +5664,36 @@ static int32_t addTagsToCreateStreamQuery(STranslateContext* pCxt, SCreateStream } } if (!found) { - return generateDealNodeErrMsg(pCxt, TSDB_CODE_PAR_INVALID_COLUMN, ((SColumnDefNode*)pTag)->colName); + return generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_COLUMN, ((SColumnDefNode*)pTag)->colName); } } return TSDB_CODE_SUCCESS; } +static SNode* createNullValue() { + SValueNode* pValue = (SValueNode*)nodesMakeNode(QUERY_NODE_VALUE); + if (NULL == pValue) { + return NULL; + } + pValue->isNull = true; + pValue->node.resType.type = TSDB_DATA_TYPE_NULL; + pValue->node.resType.bytes = tDataTypes[TSDB_DATA_TYPE_NULL].bytes; + return (SNode*)pValue; +} + +static int32_t addNullTagsForExistTable(STranslateContext* pCxt, STableMeta* pMeta, SSelectStmt* pSelect) { + if (NULL == pMeta) { + return TSDB_CODE_SUCCESS; + } + + int32_t numOfTags = getNumOfTags(pMeta); + int32_t code = TSDB_CODE_SUCCESS; + for (int32_t i = 0; TSDB_CODE_SUCCESS == code && i < numOfTags; ++i) { + code = nodesListMakeStrictAppend(&pSelect->pTags, createNullValue()); + } + return code; +} + typedef struct SRewriteSubtableCxt { STranslateContext* pCxt; SNodeList* pPartitionList; @@ -5700,13 +5739,11 @@ static int32_t addSubtableNameToCreateStreamQuery(STranslateContext* pCxt, SCrea return pCxt->errCode; } -static int32_t addSubtableInfoToCreateStreamQuery(STranslateContext* pCxt, SCreateStreamStmt* pStmt) { +static int32_t addSubtableInfoToCreateStreamQuery(STranslateContext* pCxt, STableMeta* pMeta, + SCreateStreamStmt* pStmt) { SSelectStmt* pSelect = (SSelectStmt*)pStmt->pQuery; if (NULL == pSelect->pPartitionByList) { - if (NULL != pStmt->pTags || NULL != pStmt->pSubtable) { - return generateSyntaxErrMsgExt(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_STREAM_QUERY, "Unsupported stream query"); - } - return TSDB_CODE_SUCCESS; + return addNullTagsForExistTable(pCxt, pMeta, pSelect); } int32_t code = addTagsToCreateStreamQuery(pCxt, pStmt, pSelect); @@ -5744,7 +5781,7 @@ static int32_t adjustDataTypeOfProjections(STranslateContext* pCxt, const STable int32_t index = 0; SNode* pProj = NULL; FOREACH(pProj, pProjections) { - SSchema* pSchema = pSchemas + index; + SSchema* pSchema = pSchemas + index++; SDataType dt = {.type = pSchema->type, .bytes = pSchema->bytes}; if (!dataTypeEqual(&dt, &((SExprNode*)pProj)->resType)) { SNode* pFunc = NULL; @@ -5765,7 +5802,7 @@ typedef struct SProjColPos { } SProjColPos; static int32_t projColPosCompar(const void* l, const void* r) { - return ((SProjColPos*)l)->colId < ((SProjColPos*)r)->colId; + return ((SProjColPos*)l)->colId > ((SProjColPos*)r)->colId; } static void projColPosDelete(void* p) { taosMemoryFree(((SProjColPos*)p)->pProj); } @@ -5793,8 +5830,31 @@ static int32_t addProjToProjColPos(STranslateContext* pCxt, const SSchema* pSche return code; } -static int32_t adjustOrderOfProjection(STranslateContext* pCxt, SNodeList* pCols, const STableMeta* pMeta, - SNodeList** pProjections) { +static int32_t setFillNullCols(SArray* pProjColPos, const STableMeta* pMeta, SCMCreateStreamReq* pReq) { + int32_t numOfBoundCols = taosArrayGetSize(pProjColPos); + pReq->fillNullCols = taosArrayInit(pMeta->tableInfo.numOfColumns - numOfBoundCols, sizeof(SColLocation)); + if (NULL == pReq->fillNullCols) { + return TSDB_CODE_OUT_OF_MEMORY; + } + const SSchema* pSchemas = getTableColumnSchema(pMeta); + int32_t indexOfBoundCols = 0; + for (int32_t i = 0; i < pMeta->tableInfo.numOfColumns; ++i) { + const SSchema* pSchema = pSchemas + i; + if (indexOfBoundCols < numOfBoundCols) { + SProjColPos* pPos = taosArrayGet(pProjColPos, indexOfBoundCols); + if (pSchema->colId == pPos->colId) { + ++indexOfBoundCols; + continue; + } + } + SColLocation colLoc = {.colId = pSchema->colId, .slotId = i, .type = pSchema->type}; + taosArrayPush(pReq->fillNullCols, &colLoc); + } + return TSDB_CODE_SUCCESS; +} + +static int32_t adjustOrderOfProjections(STranslateContext* pCxt, SNodeList* pCols, const STableMeta* pMeta, + SNodeList** pProjections, SCMCreateStreamReq* pReq) { if (LIST_LENGTH(pCols) != LIST_LENGTH(*pProjections)) { return generateSyntaxErrMsgExt(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_COLUMNS_NUM, "Illegal number of columns"); } @@ -5809,7 +5869,12 @@ static int32_t adjustOrderOfProjection(STranslateContext* pCxt, SNodeList* pCols SNode* pProj = NULL; FORBOTH(pCol, pCols, pProj, *pProjections) { const SSchema* pSchema = getColSchema(pMeta, ((SColumnNode*)pCol)->colName); - code = addProjToProjColPos(pCxt, pSchema, pProj, pProjColPos); + if (NULL == pSchema) { + code = generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_COLUMN, ((SColumnNode*)pCol)->colName); + } + if (TSDB_CODE_SUCCESS == code) { + code = addProjToProjColPos(pCxt, pSchema, pProj, pProjColPos); + } if (TSDB_CODE_SUCCESS != code) { break; } @@ -5830,6 +5895,10 @@ static int32_t adjustOrderOfProjection(STranslateContext* pCxt, SNodeList* pCols } } + if (TSDB_CODE_SUCCESS == code && pMeta->tableInfo.numOfColumns > LIST_LENGTH(pCols)) { + code = setFillNullCols(pProjColPos, pMeta, pReq); + } + if (TSDB_CODE_SUCCESS == code) { taosArrayDestroy(pProjColPos); nodesDestroyList(*pProjections); @@ -5842,52 +5911,160 @@ static int32_t adjustOrderOfProjection(STranslateContext* pCxt, SNodeList* pCols return code; } -static int32_t adjustStreamQueryForExistTableImpl(STranslateContext* pCxt, SCreateStreamStmt* pStmt, - const STableMeta* pMeta) { +static int32_t adjustProjectionsForExistTable(STranslateContext* pCxt, SCreateStreamStmt* pStmt, + const STableMeta* pMeta, SCMCreateStreamReq* pReq) { SSelectStmt* pSelect = (SSelectStmt*)pStmt->pQuery; if (NULL == pStmt->pCols) { return adjustDataTypeOfProjections(pCxt, pMeta, pSelect->pProjectionList); } - return adjustOrderOfProjection(pCxt, pStmt->pCols, pMeta, &pSelect->pProjectionList); + return adjustOrderOfProjections(pCxt, pStmt->pCols, pMeta, &pSelect->pProjectionList, pReq); } -static int32_t adjustStreamQueryForExistTable(STranslateContext* pCxt, SCreateStreamStmt* pStmt, - SCMCreateStreamReq* pReq) { - STableMeta* pMeta = NULL; - int32_t code = getTableMeta(pCxt, pStmt->targetDbName, pStmt->targetTabName, &pMeta); +static int32_t adjustDataTypeOfTags(STranslateContext* pCxt, const STableMeta* pMeta, SNodeList* pTags) { + if (getNumOfTags(pMeta) != LIST_LENGTH(pTags)) { + return generateSyntaxErrMsgExt(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_COLUMNS_NUM, "Illegal number of tags"); + } + + SSchema* pSchemas = getTableTagSchema(pMeta); + int32_t index = 0; + SNode* pTag = NULL; + FOREACH(pTag, pTags) { + SSchema* pSchema = pSchemas + index++; + SDataType dt = {.type = pSchema->type, .bytes = pSchema->bytes}; + if (!dataTypeEqual(&dt, &((SExprNode*)pTag)->resType)) { + SNode* pFunc = NULL; + int32_t code = createCastFunc(pCxt, pTag, dt, &pFunc); + if (TSDB_CODE_SUCCESS != code) { + return code; + } + REPLACE_NODE(pFunc); + } + } + + return TSDB_CODE_SUCCESS; +} + +static int32_t adjustOrderOfTags(STranslateContext* pCxt, SNodeList* pTags, const STableMeta* pMeta, + SNodeList** pTagExprs, SCMCreateStreamReq* pReq) { + if (LIST_LENGTH(pTags) != LIST_LENGTH(*pTagExprs)) { + return generateSyntaxErrMsgExt(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_COLUMNS_NUM, "Illegal number of tags"); + } + + SArray* pTagPos = taosArrayInit(LIST_LENGTH(pTags), sizeof(SProjColPos)); + if (NULL == pTagPos) { + return TSDB_CODE_OUT_OF_MEMORY; + } + + int32_t code = TSDB_CODE_SUCCESS; + SNode* pTag = NULL; + SNode* pTagExpr = NULL; + FORBOTH(pTag, pTags, pTagExpr, *pTagExprs) { + const SSchema* pSchema = getTagSchema(pMeta, ((SColumnNode*)pTag)->colName); + if (NULL == pSchema) { + code = generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_TAG_NAME, ((SColumnNode*)pTag)->colName); + } + if (TSDB_CODE_SUCCESS == code) { + code = addProjToProjColPos(pCxt, pSchema, pTagExpr, pTagPos); + } + if (TSDB_CODE_SUCCESS != code) { + break; + } + } + + SNodeList* pNewTagExprs = NULL; + if (TSDB_CODE_SUCCESS == code) { + taosArraySort(pTagPos, projColPosCompar); + int32_t indexOfBoundTags = 0; + int32_t numOfBoundTags = taosArrayGetSize(pTagPos); + int32_t numOfTags = getNumOfTags(pMeta); + const SSchema* pTagsSchema = getTableTagSchema(pMeta); + pNewTagExprs = nodesMakeList(); + if (NULL == pNewTagExprs) { + code = TSDB_CODE_OUT_OF_MEMORY; + } + for (int32_t i = 0; TSDB_CODE_SUCCESS == code && i < numOfTags; ++i) { + const SSchema* pTagSchema = pTagsSchema + i; + if (indexOfBoundTags < numOfBoundTags) { + SProjColPos* pPos = taosArrayGet(pTagPos, indexOfBoundTags); + if (pPos->colId == pTagSchema->colId) { + ++indexOfBoundTags; + code = nodesListStrictAppend(pNewTagExprs, pPos->pProj); + pPos->pProj = NULL; + continue; + } + } + code = nodesListStrictAppend(pNewTagExprs, createNullValue()); + } + } + + if (TSDB_CODE_SUCCESS == code) { + taosArrayDestroy(pTagPos); + nodesDestroyList(*pTagExprs); + *pTagExprs = pNewTagExprs; + } else { + taosArrayDestroyEx(pTagPos, projColPosDelete); + nodesDestroyList(pNewTagExprs); + } + + return code; +} + +static int32_t adjustTagsForExistTable(STranslateContext* pCxt, SCreateStreamStmt* pStmt, const STableMeta* pMeta, + SCMCreateStreamReq* pReq) { + SSelectStmt* pSelect = (SSelectStmt*)pStmt->pQuery; + if (NULL == pSelect->pPartitionByList) { + return TSDB_CODE_SUCCESS; + } + if (NULL == pStmt->pTags) { + return adjustDataTypeOfTags(pCxt, pMeta, pSelect->pTags); + } + return adjustOrderOfTags(pCxt, pStmt->pTags, pMeta, &pSelect->pTags, pReq); +} + +static int32_t translateStreamTargetTable(STranslateContext* pCxt, SCreateStreamStmt* pStmt, SCMCreateStreamReq* pReq, + STableMeta** pMeta) { + int32_t code = getTableMeta(pCxt, pStmt->targetDbName, pStmt->targetTabName, pMeta); if (TSDB_CODE_PAR_TABLE_NOT_EXIST == code) { if (NULL != pStmt->pCols) { return generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_TABLE_NOT_EXIST, pStmt->targetTabName); } pReq->createStb = STREAM_CREATE_STABLE_TRUE; + pReq->targetStbUid = 0; return TSDB_CODE_SUCCESS; + } else { + pReq->createStb = STREAM_CREATE_STABLE_FALSE; + pReq->targetStbUid = (*pMeta)->suid; } - if (TSDB_CODE_SUCCESS == code) { - code = adjustStreamQueryForExistTableImpl(pCxt, pStmt, pMeta); - } - taosMemoryFree(pMeta); return code; } static int32_t buildCreateStreamQuery(STranslateContext* pCxt, SCreateStreamStmt* pStmt, SCMCreateStreamReq* pReq) { pCxt->createStream = true; - int32_t code = addWstartTsToCreateStreamQuery(pCxt, pStmt->pQuery); + STableMeta* pMeta = NULL; + int32_t code = translateStreamTargetTable(pCxt, pStmt, pReq, &pMeta); if (TSDB_CODE_SUCCESS == code) { - code = addSubtableInfoToCreateStreamQuery(pCxt, pStmt); + code = addSubtableInfoToCreateStreamQuery(pCxt, pMeta, pStmt); } if (TSDB_CODE_SUCCESS == code) { code = translateQuery(pCxt, pStmt->pQuery); } if (TSDB_CODE_SUCCESS == code) { - code = checkStreamQuery(pCxt, pStmt); + code = addWstartTsToCreateStreamQuery(pCxt, pStmt->pQuery); } if (TSDB_CODE_SUCCESS == code) { - code = adjustStreamQueryForExistTable(pCxt, pStmt, pReq); + code = checkStreamQuery(pCxt, pStmt); + } + if (TSDB_CODE_SUCCESS == code && NULL != pMeta) { + code = adjustProjectionsForExistTable(pCxt, pStmt, pMeta, pReq); + } + if (TSDB_CODE_SUCCESS == code && NULL != pMeta) { + code = adjustTagsForExistTable(pCxt, pStmt, pMeta, pReq); } if (TSDB_CODE_SUCCESS == code) { getSourceDatabase(pStmt->pQuery, pCxt->pParseCxt->acctId, pReq->sourceDB); code = nodesNodeToString(pStmt->pQuery, false, &pReq->ast, NULL); } + taosMemoryFree(pMeta); return code; } @@ -5918,8 +6095,10 @@ static int32_t buildCreateStreamReq(STranslateContext* pCxt, SCreateStreamStmt* pReq->watermark = (NULL != pStmt->pOptions->pWatermark ? ((SValueNode*)pStmt->pOptions->pWatermark)->datum.i : 0); pReq->fillHistory = pStmt->pOptions->fillHistory; pReq->igExpired = pStmt->pOptions->ignoreExpired; - columnDefNodeToField(pStmt->pTags, &pReq->pTags); - pReq->numOfTags = LIST_LENGTH(pStmt->pTags); + if (pReq->createStb) { + columnDefNodeToField(pStmt->pTags, &pReq->pTags); + pReq->numOfTags = LIST_LENGTH(pStmt->pTags); + } } return code; diff --git a/source/libs/parser/src/sql.c b/source/libs/parser/src/sql.c index 487964df688818007b5febb9207db62cc16ebf82..e306cebfab1ec481f92b926a8184f84310b3413c 100644 --- a/source/libs/parser/src/sql.c +++ b/source/libs/parser/src/sql.c @@ -104,26 +104,26 @@ #endif /************* Begin control #defines *****************************************/ #define YYCODETYPE unsigned short int -#define YYNOCODE 465 +#define YYNOCODE 466 #define YYACTIONTYPE unsigned short int #define ParseTOKENTYPE SToken typedef union { int yyinit; ParseTOKENTYPE yy0; - EJoinType yy42; - int8_t yy113; - int64_t yy159; - SToken yy179; - EOperatorType yy290; - EFillMode yy324; - SDataType yy394; - ENullOrder yy487; - SNode* yy602; - bool yy767; - int32_t yy820; - SAlterOption yy845; - SNodeList* yy874; - EOrder yy878; + int8_t yy27; + ENullOrder yy89; + int64_t yy129; + SToken yy233; + SAlterOption yy257; + bool yy397; + EJoinType yy428; + EFillMode yy646; + SNodeList* yy776; + int32_t yy832; + SDataType yy852; + EOperatorType yy856; + EOrder yy870; + SNode* yy924; } YYMINORTYPE; #ifndef YYSTACKDEPTH #define YYSTACKDEPTH 100 @@ -139,6 +139,7 @@ typedef union { #define ParseCTX_FETCH #define ParseCTX_STORE #define YYFALLBACK 1 +<<<<<<< HEAD #define YYNSTATE 733 #define YYNRULE 554 #define YYNTOKEN 326 @@ -150,6 +151,19 @@ typedef union { #define YY_NO_ACTION 1640 #define YY_MIN_REDUCE 1641 #define YY_MAX_REDUCE 2194 +======= +#define YYNSTATE 734 +#define YYNRULE 556 +#define YYNTOKEN 326 +#define YY_MAX_SHIFT 733 +#define YY_MIN_SHIFTREDUCE 1087 +#define YY_MAX_SHIFTREDUCE 1642 +#define YY_ERROR_ACTION 1643 +#define YY_ACCEPT_ACTION 1644 +#define YY_NO_ACTION 1645 +#define YY_MIN_REDUCE 1646 +#define YY_MAX_REDUCE 2201 +>>>>>>> 677a27a09744fa59ddd2e723052ec68e32229566 /************* End control #defines *******************************************/ #define YY_NLOOKAHEAD ((int)(sizeof(yy_lookahead)/sizeof(yy_lookahead[0]))) @@ -216,6 +230,7 @@ typedef union { ** yy_default[] Default action for each state. ** *********** Begin parsing tables **********************************************/ +<<<<<<< HEAD #define YY_ACTTAB_COUNT (2883) static const YYACTIONTYPE yy_action[] = { /* 0 */ 1914, 1784, 474, 2170, 475, 1677, 377, 2165, 1994, 1841, @@ -989,6 +1004,795 @@ static const YYACTIONTYPE yy_default[] = { /* 710 */ 1638, 1638, 1638, 1638, 1638, 1638, 1638, 1638, 1638, 1638, /* 720 */ 1638, 1638, 1638, 1638, 1638, 1638, 1638, 1638, 1638, 1638, /* 730 */ 1638, 1638, 1638, +======= +#define YY_ACTTAB_COUNT (2952) +static const YYACTIONTYPE yy_action[] = { + /* 0 */ 1918, 2177, 366, 623, 1788, 2172, 474, 2001, 475, 1682, + /* 10 */ 163, 557, 45, 43, 1572, 1916, 610, 54, 1997, 1801, + /* 20 */ 374, 2176, 1421, 38, 37, 2173, 2175, 44, 42, 41, + /* 30 */ 40, 39, 419, 1502, 1799, 1419, 1715, 44, 42, 41, + /* 40 */ 40, 39, 38, 37, 2001, 175, 44, 42, 41, 40, + /* 50 */ 39, 1993, 1999, 356, 219, 1997, 377, 377, 1497, 163, + /* 60 */ 348, 2015, 633, 18, 160, 163, 344, 1901, 1802, 170, + /* 70 */ 1427, 354, 327, 1801, 1801, 509, 505, 501, 497, 216, + /* 80 */ 1850, 598, 379, 45, 43, 1845, 1847, 1448, 1993, 1999, + /* 90 */ 369, 374, 2033, 1421, 1446, 14, 2177, 337, 104, 633, + /* 100 */ 601, 528, 527, 526, 1502, 1983, 1419, 639, 101, 136, + /* 110 */ 522, 1609, 139, 1448, 521, 86, 480, 730, 214, 520, + /* 120 */ 525, 349, 476, 347, 346, 519, 515, 60, 1446, 1497, + /* 130 */ 517, 1120, 1504, 1505, 18, 2014, 623, 105, 1531, 2050, + /* 140 */ 177, 1427, 108, 2016, 643, 2018, 2019, 638, 1852, 633, + /* 150 */ 131, 140, 516, 1839, 178, 353, 2103, 513, 1421, 1791, + /* 160 */ 368, 2099, 1477, 1487, 1850, 1430, 14, 1799, 1503, 1506, + /* 170 */ 1122, 1419, 1125, 1126, 183, 600, 179, 2111, 2112, 85, + /* 180 */ 137, 2116, 2129, 1422, 234, 1420, 213, 207, 730, 1632, + /* 190 */ 64, 212, 38, 37, 488, 1532, 44, 42, 41, 40, + /* 200 */ 39, 1447, 1795, 1504, 1505, 165, 1427, 1658, 1425, 1426, + /* 210 */ 205, 1476, 1479, 1480, 1481, 1482, 1483, 1484, 1485, 1486, + /* 220 */ 635, 631, 1495, 1496, 1498, 1499, 1500, 1501, 2, 60, + /* 230 */ 1139, 266, 1138, 1477, 1487, 492, 1344, 1345, 121, 1503, + /* 240 */ 1506, 120, 119, 118, 117, 116, 115, 114, 113, 112, + /* 250 */ 1398, 1399, 2033, 730, 1422, 622, 1420, 266, 38, 37, + /* 260 */ 580, 1140, 44, 42, 41, 40, 39, 34, 372, 1526, + /* 270 */ 1527, 1528, 1529, 1530, 1534, 1535, 1536, 1537, 184, 1425, + /* 280 */ 1426, 60, 1476, 1479, 1480, 1481, 1482, 1483, 1484, 1485, + /* 290 */ 1486, 635, 631, 1495, 1496, 1498, 1499, 1500, 1501, 2, + /* 300 */ 175, 11, 45, 43, 579, 483, 598, 475, 1682, 457, + /* 310 */ 374, 420, 1421, 1361, 1362, 1433, 412, 622, 411, 1422, + /* 320 */ 49, 1420, 1902, 1502, 421, 1419, 1246, 665, 664, 663, + /* 330 */ 1250, 662, 1252, 1253, 661, 1255, 658, 139, 1261, 655, + /* 340 */ 1263, 1264, 652, 649, 1425, 1426, 1588, 608, 1497, 1360, + /* 350 */ 1363, 31, 267, 18, 41, 40, 39, 38, 37, 1639, + /* 360 */ 1427, 44, 42, 41, 40, 39, 598, 197, 196, 528, + /* 370 */ 527, 526, 2118, 45, 43, 1507, 1882, 136, 522, 593, + /* 380 */ 184, 374, 521, 1421, 1599, 14, 1646, 520, 525, 60, + /* 390 */ 456, 91, 48, 519, 1502, 622, 1419, 139, 2115, 408, + /* 400 */ 1533, 264, 2111, 597, 1669, 132, 596, 730, 1447, 2172, + /* 410 */ 130, 129, 128, 127, 126, 125, 124, 123, 122, 1497, + /* 420 */ 410, 406, 1504, 1505, 585, 181, 1918, 35, 285, 2173, + /* 430 */ 587, 1427, 184, 573, 1597, 1598, 1600, 1601, 365, 38, + /* 440 */ 37, 1915, 610, 44, 42, 41, 40, 39, 1983, 581, + /* 450 */ 166, 1449, 1477, 1487, 1638, 1753, 46, 52, 1503, 1506, + /* 460 */ 1647, 180, 2111, 2112, 561, 137, 2116, 1852, 233, 1445, + /* 470 */ 48, 184, 32, 1422, 341, 1420, 473, 1777, 730, 478, + /* 480 */ 1688, 121, 1538, 1850, 120, 119, 118, 117, 116, 115, + /* 490 */ 114, 113, 112, 1504, 1505, 276, 277, 413, 1425, 1426, + /* 500 */ 275, 1476, 1479, 1480, 1481, 1482, 1483, 1484, 1485, 1486, + /* 510 */ 635, 631, 1495, 1496, 1498, 1499, 1500, 1501, 2, 733, + /* 520 */ 1289, 1290, 482, 1477, 1487, 478, 1688, 437, 623, 1503, + /* 530 */ 1506, 88, 332, 292, 1446, 546, 436, 544, 542, 492, + /* 540 */ 184, 562, 186, 586, 1422, 2172, 1420, 2172, 174, 1776, + /* 550 */ 677, 540, 609, 538, 723, 719, 715, 711, 290, 1799, + /* 560 */ 2178, 181, 585, 181, 551, 2173, 587, 2173, 587, 1425, + /* 570 */ 1426, 604, 1476, 1479, 1480, 1481, 1482, 1483, 1484, 1485, + /* 580 */ 1486, 635, 631, 1495, 1496, 1498, 1499, 1500, 1501, 2, + /* 590 */ 45, 43, 2118, 533, 106, 244, 1954, 283, 374, 490, + /* 600 */ 1421, 1911, 1644, 609, 594, 675, 576, 1852, 543, 60, + /* 610 */ 1775, 1502, 2118, 1419, 362, 562, 1846, 1847, 2114, 2172, + /* 620 */ 2015, 184, 232, 1850, 153, 152, 672, 671, 670, 150, + /* 630 */ 619, 11, 623, 9, 2178, 181, 1497, 536, 2113, 2173, + /* 640 */ 587, 235, 530, 1139, 1668, 1138, 131, 231, 1427, 1667, + /* 650 */ 607, 2033, 1911, 518, 13, 12, 675, 11, 218, 640, + /* 660 */ 193, 45, 43, 1799, 1983, 270, 639, 1446, 389, 374, + /* 670 */ 269, 1421, 677, 46, 1140, 153, 152, 672, 671, 670, + /* 680 */ 150, 1427, 1502, 68, 1419, 1206, 67, 1387, 1983, 238, + /* 690 */ 582, 577, 571, 1983, 2014, 730, 82, 141, 2050, 81, + /* 700 */ 2074, 167, 2016, 643, 2018, 2019, 638, 1497, 633, 586, + /* 710 */ 1504, 1505, 562, 2172, 524, 523, 2172, 38, 37, 1427, + /* 720 */ 1208, 44, 42, 41, 40, 39, 609, 299, 585, 181, + /* 730 */ 1829, 2178, 181, 2173, 587, 1689, 2173, 587, 701, 699, + /* 740 */ 1477, 1487, 563, 2140, 14, 33, 1503, 1506, 623, 1545, + /* 750 */ 668, 38, 37, 27, 598, 44, 42, 41, 40, 39, + /* 760 */ 184, 1422, 417, 1420, 38, 37, 730, 1852, 44, 42, + /* 770 */ 41, 40, 39, 618, 367, 1911, 1666, 689, 625, 1799, + /* 780 */ 2075, 1504, 1505, 1850, 725, 139, 1425, 1426, 1774, 1476, + /* 790 */ 1479, 1480, 1481, 1482, 1483, 1484, 1485, 1486, 635, 631, + /* 800 */ 1495, 1496, 1498, 1499, 1500, 1501, 2, 85, 627, 1478, + /* 810 */ 2075, 1477, 1487, 330, 1784, 1444, 1897, 1503, 1506, 1852, + /* 820 */ 1983, 135, 450, 380, 623, 464, 378, 189, 463, 2176, + /* 830 */ 1794, 163, 1422, 1786, 1420, 1850, 387, 99, 418, 669, + /* 840 */ 1801, 1790, 1843, 433, 50, 465, 3, 1665, 435, 182, + /* 850 */ 2111, 2112, 1997, 137, 2116, 1799, 1782, 1425, 1426, 1792, + /* 860 */ 1476, 1479, 1480, 1481, 1482, 1483, 1484, 1485, 1486, 635, + /* 870 */ 631, 1495, 1496, 1498, 1499, 1500, 1501, 2, 623, 623, + /* 880 */ 562, 1576, 1569, 164, 2172, 1993, 1999, 1446, 305, 345, + /* 890 */ 1712, 1983, 427, 442, 667, 675, 633, 623, 623, 2178, + /* 900 */ 181, 423, 303, 71, 2173, 587, 70, 1852, 8, 1799, + /* 910 */ 1799, 443, 491, 1449, 153, 152, 672, 671, 670, 150, + /* 920 */ 162, 1897, 1897, 1851, 201, 470, 468, 673, 1799, 1799, + /* 930 */ 1843, 461, 191, 195, 455, 454, 453, 452, 449, 448, + /* 940 */ 447, 446, 445, 441, 440, 439, 438, 329, 430, 429, + /* 950 */ 428, 1664, 425, 424, 343, 707, 706, 705, 704, 384, + /* 960 */ 60, 703, 702, 143, 697, 696, 695, 694, 693, 692, + /* 970 */ 691, 155, 687, 686, 685, 383, 382, 682, 681, 680, + /* 980 */ 679, 678, 674, 386, 690, 1843, 1769, 590, 1663, 623, + /* 990 */ 1512, 549, 371, 370, 2177, 1983, 1446, 623, 2172, 107, + /* 1000 */ 38, 37, 1435, 1796, 44, 42, 41, 40, 39, 1125, + /* 1010 */ 1126, 236, 623, 1502, 2176, 1428, 1641, 1642, 2173, 2174, + /* 1020 */ 1799, 1662, 623, 1449, 2123, 1565, 558, 562, 1799, 1478, + /* 1030 */ 623, 2172, 1983, 1661, 1660, 562, 602, 151, 1497, 2172, + /* 1040 */ 79, 78, 416, 1799, 606, 188, 2178, 181, 623, 2015, + /* 1050 */ 1427, 2173, 587, 1799, 2178, 181, 1970, 190, 1657, 2173, + /* 1060 */ 587, 1799, 280, 328, 2002, 1983, 404, 623, 402, 398, + /* 1070 */ 394, 391, 388, 239, 72, 1997, 2015, 1983, 1983, 1799, + /* 1080 */ 2033, 620, 623, 1702, 1656, 623, 623, 151, 640, 53, + /* 1090 */ 1659, 589, 1655, 1983, 630, 639, 621, 629, 1799, 286, + /* 1100 */ 381, 1654, 1983, 1653, 396, 529, 1429, 2033, 1993, 1999, + /* 1110 */ 1754, 184, 1652, 1799, 1565, 601, 1799, 1799, 224, 633, + /* 1120 */ 1983, 222, 639, 2014, 80, 1568, 1651, 2050, 1983, 1650, + /* 1130 */ 108, 2016, 643, 2018, 2019, 638, 1983, 633, 1478, 1393, + /* 1140 */ 142, 2015, 148, 2074, 2103, 1983, 517, 1983, 368, 2099, + /* 1150 */ 2014, 1649, 226, 634, 2050, 225, 1983, 108, 2016, 643, + /* 1160 */ 2018, 2019, 638, 1436, 633, 1431, 151, 243, 516, 178, + /* 1170 */ 1983, 2103, 2033, 1983, 1695, 368, 2099, 145, 228, 133, + /* 1180 */ 640, 227, 242, 62, 2143, 1983, 1693, 639, 1439, 1441, + /* 1190 */ 248, 230, 151, 1168, 229, 1983, 531, 2130, 2004, 2015, + /* 1200 */ 47, 631, 1495, 1496, 1498, 1499, 1500, 1501, 534, 273, + /* 1210 */ 591, 683, 69, 149, 151, 2014, 13, 12, 1396, 2050, + /* 1220 */ 62, 89, 108, 2016, 643, 2018, 2019, 638, 1169, 633, + /* 1230 */ 2033, 261, 2015, 1187, 2192, 1596, 2103, 47, 640, 1523, + /* 1240 */ 368, 2099, 250, 1983, 605, 639, 2006, 684, 47, 647, + /* 1250 */ 149, 2137, 1358, 151, 217, 134, 1432, 574, 255, 2034, + /* 1260 */ 149, 278, 385, 2033, 615, 282, 1239, 1683, 1906, 1185, + /* 1270 */ 1840, 640, 1539, 2014, 2133, 599, 1983, 2050, 639, 260, + /* 1280 */ 108, 2016, 643, 2018, 2019, 638, 263, 633, 1, 1488, + /* 1290 */ 2015, 390, 2192, 4, 2103, 395, 342, 1380, 368, 2099, + /* 1300 */ 298, 1267, 1271, 293, 194, 1278, 2014, 1276, 422, 2150, + /* 1310 */ 2050, 1449, 154, 108, 2016, 643, 2018, 2019, 638, 1907, + /* 1320 */ 633, 2033, 426, 459, 431, 2192, 1444, 2103, 444, 640, + /* 1330 */ 451, 368, 2099, 1899, 1983, 458, 639, 460, 466, 467, + /* 1340 */ 1450, 198, 569, 469, 471, 472, 481, 1452, 204, 484, + /* 1350 */ 1447, 206, 485, 1451, 486, 1453, 487, 1142, 489, 512, + /* 1360 */ 209, 211, 493, 1960, 2014, 83, 84, 215, 2050, 510, + /* 1370 */ 2015, 108, 2016, 643, 2018, 2019, 638, 511, 633, 331, + /* 1380 */ 548, 514, 1959, 2192, 111, 2103, 1789, 221, 550, 368, + /* 1390 */ 2099, 87, 1785, 2015, 223, 156, 157, 147, 1787, 294, + /* 1400 */ 2166, 2033, 1783, 237, 158, 552, 159, 553, 556, 640, + /* 1410 */ 240, 559, 2149, 2134, 1983, 575, 639, 2148, 613, 7, + /* 1420 */ 584, 2125, 566, 2144, 2033, 572, 254, 171, 357, 256, + /* 1430 */ 578, 567, 640, 257, 565, 358, 564, 1983, 246, 639, + /* 1440 */ 2195, 258, 249, 595, 2014, 592, 2171, 1565, 2050, 138, + /* 1450 */ 1448, 108, 2016, 643, 2018, 2019, 638, 262, 633, 361, + /* 1460 */ 2015, 268, 603, 2192, 259, 2103, 1454, 2014, 94, 368, + /* 1470 */ 2099, 2050, 2119, 1912, 108, 2016, 643, 2018, 2019, 638, + /* 1480 */ 2122, 633, 295, 611, 2015, 612, 2078, 1926, 2103, 296, + /* 1490 */ 1925, 2033, 368, 2099, 616, 96, 1924, 297, 617, 640, + /* 1500 */ 1800, 364, 98, 59, 1983, 2084, 639, 100, 645, 1844, + /* 1510 */ 1770, 729, 726, 289, 300, 2033, 727, 324, 333, 334, + /* 1520 */ 309, 51, 304, 640, 302, 1977, 1976, 323, 1983, 76, + /* 1530 */ 639, 1975, 2015, 77, 2014, 313, 1974, 1971, 2050, 392, + /* 1540 */ 393, 108, 2016, 643, 2018, 2019, 638, 1413, 633, 1414, + /* 1550 */ 187, 397, 1969, 2076, 399, 2103, 2015, 400, 2014, 368, + /* 1560 */ 2099, 401, 2050, 2033, 1968, 108, 2016, 643, 2018, 2019, + /* 1570 */ 638, 640, 633, 403, 1967, 405, 1983, 626, 639, 2103, + /* 1580 */ 1966, 407, 1965, 368, 2099, 409, 1383, 2033, 1382, 1937, + /* 1590 */ 1936, 1935, 414, 415, 1934, 640, 1335, 1890, 1889, 1887, + /* 1600 */ 1983, 1886, 639, 144, 1885, 1888, 2014, 1884, 1883, 1881, + /* 1610 */ 2050, 1880, 1879, 109, 2016, 643, 2018, 2019, 638, 192, + /* 1620 */ 633, 432, 2015, 1878, 434, 1892, 1877, 2103, 1876, 1875, + /* 1630 */ 2014, 2102, 2099, 1874, 2050, 1873, 1872, 109, 2016, 643, + /* 1640 */ 2018, 2019, 638, 1871, 633, 2015, 1870, 1869, 1868, 1867, + /* 1650 */ 1866, 2103, 1865, 2033, 1864, 628, 2099, 1863, 146, 1862, + /* 1660 */ 1861, 640, 1860, 1891, 1859, 1858, 1983, 1337, 639, 1857, + /* 1670 */ 1856, 1855, 1854, 462, 1853, 1214, 2033, 1717, 199, 1716, + /* 1680 */ 200, 1714, 1678, 202, 637, 2003, 176, 1128, 1677, 1983, + /* 1690 */ 74, 639, 1127, 203, 1950, 1944, 641, 75, 1933, 1932, + /* 1700 */ 2050, 210, 477, 109, 2016, 643, 2018, 2019, 638, 1910, + /* 1710 */ 633, 1778, 1713, 2015, 479, 208, 1711, 2103, 494, 2014, + /* 1720 */ 1709, 336, 2099, 2050, 496, 1161, 321, 2016, 643, 2018, + /* 1730 */ 2019, 638, 636, 633, 624, 2068, 498, 495, 499, 2015, + /* 1740 */ 1707, 500, 502, 504, 2033, 503, 1705, 508, 506, 1692, + /* 1750 */ 1691, 507, 640, 1674, 1780, 1282, 1283, 1983, 1779, 639, + /* 1760 */ 1205, 1204, 1203, 1202, 1199, 1198, 698, 700, 220, 1197, + /* 1770 */ 2033, 1196, 61, 1703, 350, 1696, 1694, 351, 640, 352, + /* 1780 */ 535, 532, 1673, 1983, 1672, 639, 537, 2014, 1671, 541, + /* 1790 */ 110, 2050, 1405, 1949, 168, 2016, 643, 2018, 2019, 638, + /* 1800 */ 539, 633, 1943, 1403, 2015, 545, 1402, 1389, 55, 26, + /* 1810 */ 65, 554, 1931, 2014, 2177, 16, 161, 2050, 1929, 28, + /* 1820 */ 109, 2016, 643, 2018, 2019, 638, 570, 633, 1611, 2015, + /* 1830 */ 19, 568, 245, 169, 2103, 2033, 58, 247, 1595, 2100, + /* 1840 */ 1587, 241, 252, 640, 555, 30, 588, 2193, 1983, 63, + /* 1850 */ 639, 355, 253, 2004, 251, 29, 560, 5, 90, 21, + /* 1860 */ 2033, 1626, 2015, 6, 20, 1631, 1625, 17, 640, 359, + /* 1870 */ 1632, 1630, 1629, 1983, 360, 639, 1562, 1561, 2014, 265, + /* 1880 */ 172, 56, 2050, 1930, 57, 167, 2016, 643, 2018, 2019, + /* 1890 */ 638, 1928, 633, 2033, 1927, 2015, 1909, 93, 92, 271, + /* 1900 */ 272, 640, 22, 2014, 1593, 274, 1983, 2050, 639, 1908, + /* 1910 */ 315, 2016, 643, 2018, 2019, 638, 279, 633, 66, 95, + /* 1920 */ 97, 101, 284, 614, 10, 23, 2033, 2141, 12, 281, + /* 1930 */ 1437, 363, 173, 2053, 640, 1514, 2014, 1492, 632, 1983, + /* 1940 */ 2050, 639, 36, 168, 2016, 643, 2018, 2019, 638, 1490, + /* 1950 */ 633, 1524, 1489, 185, 583, 15, 24, 1469, 1461, 25, + /* 1960 */ 2015, 644, 1268, 1513, 646, 376, 648, 650, 1265, 2014, + /* 1970 */ 1260, 642, 651, 2050, 1262, 653, 322, 2016, 643, 2018, + /* 1980 */ 2019, 638, 654, 633, 1256, 2015, 656, 657, 659, 1254, + /* 1990 */ 660, 2033, 1259, 1245, 102, 1258, 2194, 103, 1257, 637, + /* 2000 */ 287, 1277, 1273, 1159, 1983, 666, 639, 73, 676, 1193, + /* 2010 */ 1192, 1191, 1190, 1189, 1188, 688, 2033, 1186, 1184, 1183, + /* 2020 */ 1182, 373, 1212, 1180, 640, 288, 1179, 1178, 1177, 1983, + /* 2030 */ 1176, 639, 1175, 1174, 2014, 1209, 1207, 1171, 2050, 1170, + /* 2040 */ 1167, 321, 2016, 643, 2018, 2019, 638, 2015, 633, 1166, + /* 2050 */ 2069, 1165, 1164, 1710, 708, 709, 1708, 710, 712, 2014, + /* 2060 */ 713, 714, 1706, 2050, 2015, 716, 322, 2016, 643, 2018, + /* 2070 */ 2019, 638, 717, 633, 718, 1704, 720, 722, 2033, 721, + /* 2080 */ 1690, 724, 1117, 375, 1670, 291, 640, 728, 1645, 1423, + /* 2090 */ 301, 1983, 731, 639, 732, 2033, 1645, 1645, 1645, 1645, + /* 2100 */ 1645, 1645, 1645, 640, 1645, 1645, 1645, 1645, 1983, 1645, + /* 2110 */ 639, 1645, 1645, 1645, 1645, 1645, 1645, 1645, 1645, 1645, + /* 2120 */ 1645, 2014, 1645, 1645, 2015, 2050, 1645, 1645, 322, 2016, + /* 2130 */ 643, 2018, 2019, 638, 1645, 633, 1645, 1645, 547, 1645, + /* 2140 */ 1645, 1645, 2050, 2015, 1645, 317, 2016, 643, 2018, 2019, + /* 2150 */ 638, 1645, 633, 1645, 1645, 2033, 1645, 1645, 1645, 1645, + /* 2160 */ 1645, 1645, 1645, 640, 1645, 1645, 1645, 1645, 1983, 1645, + /* 2170 */ 639, 1645, 1645, 1645, 2033, 1645, 1645, 1645, 1645, 1645, + /* 2180 */ 1645, 1645, 640, 1645, 1645, 1645, 1645, 1983, 1645, 639, + /* 2190 */ 1645, 1645, 1645, 1645, 1645, 1645, 1645, 1645, 2014, 1645, + /* 2200 */ 1645, 1645, 2050, 2015, 1645, 306, 2016, 643, 2018, 2019, + /* 2210 */ 638, 1645, 633, 1645, 1645, 1645, 1645, 2014, 1645, 2015, + /* 2220 */ 1645, 2050, 1645, 1645, 307, 2016, 643, 2018, 2019, 638, + /* 2230 */ 1645, 633, 1645, 1645, 2033, 1645, 2015, 1645, 1645, 1645, + /* 2240 */ 1645, 1645, 640, 1645, 1645, 1645, 1645, 1983, 1645, 639, + /* 2250 */ 2033, 1645, 1645, 1645, 1645, 1645, 1645, 1645, 640, 1645, + /* 2260 */ 1645, 1645, 1645, 1983, 1645, 639, 1645, 2033, 1645, 1645, + /* 2270 */ 1645, 1645, 1645, 1645, 1645, 640, 1645, 2014, 1645, 1645, + /* 2280 */ 1983, 2050, 639, 1645, 308, 2016, 643, 2018, 2019, 638, + /* 2290 */ 1645, 633, 1645, 2014, 1645, 1645, 1645, 2050, 1645, 2015, + /* 2300 */ 314, 2016, 643, 2018, 2019, 638, 1645, 633, 1645, 1645, + /* 2310 */ 2014, 1645, 1645, 1645, 2050, 2015, 1645, 318, 2016, 643, + /* 2320 */ 2018, 2019, 638, 1645, 633, 1645, 1645, 1645, 1645, 1645, + /* 2330 */ 2033, 1645, 1645, 1645, 1645, 1645, 1645, 1645, 640, 1645, + /* 2340 */ 1645, 1645, 1645, 1983, 1645, 639, 2033, 1645, 1645, 1645, + /* 2350 */ 1645, 1645, 1645, 1645, 640, 1645, 1645, 1645, 1645, 1983, + /* 2360 */ 1645, 639, 1645, 1645, 1645, 1645, 1645, 1645, 1645, 1645, + /* 2370 */ 1645, 2015, 1645, 2014, 1645, 1645, 1645, 2050, 1645, 1645, + /* 2380 */ 310, 2016, 643, 2018, 2019, 638, 1645, 633, 1645, 2014, + /* 2390 */ 1645, 1645, 1645, 2050, 2015, 1645, 319, 2016, 643, 2018, + /* 2400 */ 2019, 638, 2033, 633, 1645, 1645, 1645, 1645, 1645, 1645, + /* 2410 */ 640, 1645, 1645, 1645, 1645, 1983, 1645, 639, 1645, 1645, + /* 2420 */ 1645, 1645, 1645, 1645, 1645, 2033, 1645, 1645, 1645, 1645, + /* 2430 */ 1645, 1645, 1645, 640, 1645, 1645, 1645, 1645, 1983, 1645, + /* 2440 */ 639, 1645, 1645, 1645, 1645, 2014, 1645, 1645, 1645, 2050, + /* 2450 */ 1645, 1645, 311, 2016, 643, 2018, 2019, 638, 2015, 633, + /* 2460 */ 1645, 1645, 1645, 1645, 1645, 1645, 1645, 1645, 2014, 1645, + /* 2470 */ 1645, 1645, 2050, 1645, 1645, 320, 2016, 643, 2018, 2019, + /* 2480 */ 638, 1645, 633, 1645, 1645, 2015, 1645, 1645, 1645, 2033, + /* 2490 */ 1645, 1645, 1645, 1645, 1645, 1645, 1645, 640, 1645, 1645, + /* 2500 */ 1645, 1645, 1983, 1645, 639, 1645, 1645, 1645, 1645, 1645, + /* 2510 */ 1645, 1645, 1645, 1645, 1645, 1645, 2033, 1645, 1645, 1645, + /* 2520 */ 1645, 1645, 1645, 1645, 640, 1645, 1645, 1645, 1645, 1983, + /* 2530 */ 1645, 639, 2014, 2015, 1645, 1645, 2050, 1645, 1645, 312, + /* 2540 */ 2016, 643, 2018, 2019, 638, 1645, 633, 1645, 1645, 2015, + /* 2550 */ 1645, 1645, 1645, 1645, 1645, 1645, 1645, 1645, 1645, 2014, + /* 2560 */ 1645, 1645, 1645, 2050, 2033, 1645, 325, 2016, 643, 2018, + /* 2570 */ 2019, 638, 640, 633, 1645, 1645, 1645, 1983, 1645, 639, + /* 2580 */ 2033, 1645, 1645, 1645, 1645, 1645, 1645, 1645, 640, 1645, + /* 2590 */ 1645, 1645, 1645, 1983, 1645, 639, 1645, 2015, 1645, 1645, + /* 2600 */ 1645, 1645, 1645, 1645, 1645, 1645, 1645, 2014, 1645, 1645, + /* 2610 */ 1645, 2050, 1645, 1645, 326, 2016, 643, 2018, 2019, 638, + /* 2620 */ 1645, 633, 1645, 2014, 1645, 1645, 1645, 2050, 2033, 1645, + /* 2630 */ 2027, 2016, 643, 2018, 2019, 638, 640, 633, 1645, 1645, + /* 2640 */ 1645, 1983, 1645, 639, 1645, 2015, 1645, 1645, 1645, 1645, + /* 2650 */ 1645, 1645, 1645, 1645, 1645, 1645, 1645, 1645, 1645, 1645, + /* 2660 */ 1645, 1645, 1645, 1645, 1645, 1645, 1645, 1645, 2015, 1645, + /* 2670 */ 1645, 2014, 1645, 1645, 1645, 2050, 2033, 1645, 2026, 2016, + /* 2680 */ 643, 2018, 2019, 638, 640, 633, 1645, 1645, 1645, 1983, + /* 2690 */ 1645, 639, 1645, 1645, 1645, 1645, 1645, 1645, 1645, 2033, + /* 2700 */ 1645, 2015, 1645, 1645, 1645, 1645, 1645, 640, 1645, 1645, + /* 2710 */ 1645, 1645, 1983, 1645, 639, 1645, 1645, 1645, 1645, 2014, + /* 2720 */ 1645, 1645, 1645, 2050, 2015, 1645, 2025, 2016, 643, 2018, + /* 2730 */ 2019, 638, 2033, 633, 1645, 1645, 1645, 1645, 1645, 1645, + /* 2740 */ 640, 1645, 2014, 1645, 1645, 1983, 2050, 639, 1645, 338, + /* 2750 */ 2016, 643, 2018, 2019, 638, 2033, 633, 1645, 1645, 1645, + /* 2760 */ 1645, 1645, 1645, 640, 1645, 1645, 1645, 1645, 1983, 1645, + /* 2770 */ 639, 1645, 1645, 1645, 1645, 2014, 1645, 1645, 1645, 2050, + /* 2780 */ 1645, 1645, 339, 2016, 643, 2018, 2019, 638, 2015, 633, + /* 2790 */ 1645, 1645, 1645, 1645, 1645, 1645, 1645, 1645, 2014, 1645, + /* 2800 */ 1645, 1645, 2050, 1645, 1645, 335, 2016, 643, 2018, 2019, + /* 2810 */ 638, 1645, 633, 1645, 1645, 2015, 1645, 1645, 1645, 2033, + /* 2820 */ 1645, 1645, 1645, 1645, 1645, 1645, 1645, 640, 1645, 1645, + /* 2830 */ 1645, 1645, 1983, 1645, 639, 1645, 1645, 1645, 1645, 1645, + /* 2840 */ 1645, 1645, 1645, 1645, 1645, 1645, 2033, 1645, 1645, 1645, + /* 2850 */ 1645, 1645, 1645, 1645, 640, 1645, 1645, 1645, 1645, 1983, + /* 2860 */ 1645, 639, 2014, 2015, 1645, 1645, 2050, 1645, 1645, 340, + /* 2870 */ 2016, 643, 2018, 2019, 638, 1645, 633, 1645, 1645, 1645, + /* 2880 */ 1645, 1645, 1645, 1645, 1645, 1645, 1645, 1645, 1645, 641, + /* 2890 */ 1645, 1645, 1645, 2050, 2033, 1645, 317, 2016, 643, 2018, + /* 2900 */ 2019, 638, 640, 633, 1645, 1645, 1645, 1983, 1645, 639, + /* 2910 */ 1645, 1645, 1645, 1645, 1645, 1645, 1645, 1645, 1645, 1645, + /* 2920 */ 1645, 1645, 1645, 1645, 1645, 1645, 1645, 1645, 1645, 1645, + /* 2930 */ 1645, 1645, 1645, 1645, 1645, 1645, 1645, 2014, 1645, 1645, + /* 2940 */ 1645, 2050, 1645, 1645, 316, 2016, 643, 2018, 2019, 638, + /* 2950 */ 1645, 633, +}; +static const YYCODETYPE yy_lookahead[] = { + /* 0 */ 375, 436, 352, 337, 361, 440, 333, 362, 335, 336, + /* 10 */ 360, 396, 12, 13, 14, 390, 391, 351, 373, 369, + /* 20 */ 20, 456, 22, 8, 9, 460, 461, 12, 13, 14, + /* 30 */ 15, 16, 337, 33, 368, 35, 0, 12, 13, 14, + /* 40 */ 15, 16, 8, 9, 362, 360, 12, 13, 14, 15, + /* 50 */ 16, 406, 407, 408, 33, 373, 352, 352, 58, 360, + /* 60 */ 37, 329, 417, 63, 360, 360, 381, 382, 369, 48, + /* 70 */ 70, 367, 377, 369, 369, 54, 55, 56, 57, 58, + /* 80 */ 376, 337, 371, 12, 13, 374, 375, 20, 406, 407, + /* 90 */ 408, 20, 360, 22, 20, 95, 3, 63, 95, 417, + /* 100 */ 368, 65, 66, 67, 33, 373, 35, 375, 105, 73, + /* 110 */ 74, 96, 368, 20, 78, 94, 14, 117, 97, 83, + /* 120 */ 84, 98, 20, 100, 101, 89, 103, 95, 20, 58, + /* 130 */ 107, 4, 132, 133, 63, 403, 337, 341, 104, 407, + /* 140 */ 359, 70, 410, 411, 412, 413, 414, 415, 360, 417, + /* 150 */ 351, 355, 129, 372, 422, 367, 424, 358, 22, 363, + /* 160 */ 428, 429, 162, 163, 376, 35, 95, 368, 168, 169, + /* 170 */ 43, 35, 45, 46, 442, 431, 432, 433, 434, 343, + /* 180 */ 436, 437, 450, 183, 126, 185, 165, 166, 117, 96, + /* 190 */ 4, 170, 8, 9, 173, 161, 12, 13, 14, 15, + /* 200 */ 16, 20, 366, 132, 133, 328, 70, 330, 208, 209, + /* 210 */ 189, 211, 212, 213, 214, 215, 216, 217, 218, 219, + /* 220 */ 220, 221, 222, 223, 224, 225, 226, 227, 228, 95, + /* 230 */ 20, 164, 22, 162, 163, 62, 162, 163, 21, 168, + /* 240 */ 169, 24, 25, 26, 27, 28, 29, 30, 31, 32, + /* 250 */ 192, 193, 360, 117, 183, 20, 185, 164, 8, 9, + /* 260 */ 368, 51, 12, 13, 14, 15, 16, 233, 234, 235, + /* 270 */ 236, 237, 238, 239, 240, 241, 242, 243, 246, 208, + /* 280 */ 209, 95, 211, 212, 213, 214, 215, 216, 217, 218, + /* 290 */ 219, 220, 221, 222, 223, 224, 225, 226, 227, 228, + /* 300 */ 360, 230, 12, 13, 412, 333, 337, 335, 336, 79, + /* 310 */ 20, 22, 22, 132, 133, 185, 182, 20, 184, 183, + /* 320 */ 95, 185, 382, 33, 35, 35, 108, 109, 110, 111, + /* 330 */ 112, 113, 114, 115, 116, 117, 118, 368, 120, 121, + /* 340 */ 122, 123, 124, 125, 208, 209, 96, 20, 58, 168, + /* 350 */ 169, 2, 58, 63, 14, 15, 16, 8, 9, 175, + /* 360 */ 70, 12, 13, 14, 15, 16, 337, 137, 138, 65, + /* 370 */ 66, 67, 409, 12, 13, 14, 0, 73, 74, 44, + /* 380 */ 246, 20, 78, 22, 208, 95, 0, 83, 84, 95, + /* 390 */ 160, 97, 95, 89, 33, 20, 35, 368, 435, 178, + /* 400 */ 161, 432, 433, 434, 329, 436, 437, 117, 20, 440, + /* 410 */ 24, 25, 26, 27, 28, 29, 30, 31, 32, 58, + /* 420 */ 199, 200, 132, 133, 455, 456, 375, 425, 426, 460, + /* 430 */ 461, 70, 246, 257, 258, 259, 260, 261, 387, 8, + /* 440 */ 9, 390, 391, 12, 13, 14, 15, 16, 373, 20, + /* 450 */ 344, 20, 162, 163, 270, 349, 95, 164, 168, 169, + /* 460 */ 0, 432, 433, 434, 171, 436, 437, 360, 127, 20, + /* 470 */ 95, 246, 233, 183, 367, 185, 334, 0, 117, 337, + /* 480 */ 338, 21, 243, 376, 24, 25, 26, 27, 28, 29, + /* 490 */ 30, 31, 32, 132, 133, 126, 127, 392, 208, 209, + /* 500 */ 131, 211, 212, 213, 214, 215, 216, 217, 218, 219, + /* 510 */ 220, 221, 222, 223, 224, 225, 226, 227, 228, 19, + /* 520 */ 132, 133, 334, 162, 163, 337, 338, 151, 337, 168, + /* 530 */ 169, 190, 191, 33, 20, 194, 160, 196, 21, 62, + /* 540 */ 246, 436, 351, 436, 183, 440, 185, 440, 48, 0, + /* 550 */ 62, 34, 337, 36, 54, 55, 56, 57, 58, 368, + /* 560 */ 455, 456, 455, 456, 106, 460, 461, 460, 461, 208, + /* 570 */ 209, 392, 211, 212, 213, 214, 215, 216, 217, 218, + /* 580 */ 219, 220, 221, 222, 223, 224, 225, 226, 227, 228, + /* 590 */ 12, 13, 409, 4, 94, 164, 356, 97, 20, 384, + /* 600 */ 22, 386, 326, 337, 269, 107, 167, 360, 19, 95, + /* 610 */ 0, 33, 409, 35, 367, 436, 374, 375, 435, 440, + /* 620 */ 329, 246, 33, 376, 126, 127, 128, 129, 130, 131, + /* 630 */ 130, 230, 337, 232, 455, 456, 58, 48, 435, 460, + /* 640 */ 461, 401, 53, 20, 329, 22, 351, 58, 70, 329, + /* 650 */ 384, 360, 386, 358, 1, 2, 107, 230, 35, 368, + /* 660 */ 58, 12, 13, 368, 373, 165, 375, 20, 392, 20, + /* 670 */ 170, 22, 62, 95, 51, 126, 127, 128, 129, 130, + /* 680 */ 131, 70, 33, 94, 35, 35, 97, 187, 373, 189, + /* 690 */ 251, 252, 253, 373, 403, 117, 94, 420, 407, 97, + /* 700 */ 423, 410, 411, 412, 413, 414, 415, 58, 417, 436, + /* 710 */ 132, 133, 436, 440, 346, 347, 440, 8, 9, 70, + /* 720 */ 70, 12, 13, 14, 15, 16, 337, 353, 455, 456, + /* 730 */ 356, 455, 456, 460, 461, 0, 460, 461, 346, 347, + /* 740 */ 162, 163, 451, 452, 95, 2, 168, 169, 337, 96, + /* 750 */ 106, 8, 9, 44, 337, 12, 13, 14, 15, 16, + /* 760 */ 246, 183, 351, 185, 8, 9, 117, 360, 12, 13, + /* 770 */ 14, 15, 16, 384, 367, 386, 329, 70, 421, 368, + /* 780 */ 423, 132, 133, 376, 49, 368, 208, 209, 0, 211, + /* 790 */ 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, + /* 800 */ 222, 223, 224, 225, 226, 227, 228, 343, 421, 162, + /* 810 */ 423, 162, 163, 18, 361, 20, 368, 168, 169, 360, + /* 820 */ 373, 357, 27, 352, 337, 30, 367, 379, 33, 3, + /* 830 */ 366, 360, 183, 361, 185, 376, 392, 341, 351, 370, + /* 840 */ 369, 362, 373, 48, 42, 50, 44, 329, 53, 432, + /* 850 */ 433, 434, 373, 436, 437, 368, 361, 208, 209, 363, + /* 860 */ 211, 212, 213, 214, 215, 216, 217, 218, 219, 220, + /* 870 */ 221, 222, 223, 224, 225, 226, 227, 228, 337, 337, + /* 880 */ 436, 14, 4, 18, 440, 406, 407, 20, 23, 94, + /* 890 */ 0, 373, 351, 351, 361, 107, 417, 337, 337, 455, + /* 900 */ 456, 106, 37, 38, 460, 461, 41, 360, 39, 368, + /* 910 */ 368, 351, 351, 20, 126, 127, 128, 129, 130, 131, + /* 920 */ 164, 368, 368, 376, 59, 60, 61, 370, 368, 368, + /* 930 */ 373, 136, 379, 379, 139, 140, 141, 142, 143, 144, + /* 940 */ 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, + /* 950 */ 155, 329, 157, 158, 159, 65, 66, 67, 68, 69, + /* 960 */ 95, 71, 72, 73, 74, 75, 76, 77, 78, 79, + /* 970 */ 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, + /* 980 */ 90, 91, 370, 392, 348, 373, 350, 44, 329, 337, + /* 990 */ 14, 392, 12, 13, 436, 373, 20, 337, 440, 134, + /* 1000 */ 8, 9, 22, 351, 12, 13, 14, 15, 16, 45, + /* 1010 */ 46, 351, 337, 33, 456, 35, 132, 133, 460, 461, + /* 1020 */ 368, 329, 337, 20, 244, 245, 351, 436, 368, 162, + /* 1030 */ 337, 440, 373, 329, 329, 436, 351, 44, 58, 440, + /* 1040 */ 175, 176, 177, 368, 351, 180, 455, 456, 337, 329, + /* 1050 */ 70, 460, 461, 368, 455, 456, 0, 164, 329, 460, + /* 1060 */ 461, 368, 351, 198, 362, 373, 201, 337, 203, 204, + /* 1070 */ 205, 206, 207, 361, 106, 373, 329, 373, 373, 368, + /* 1080 */ 360, 351, 337, 0, 329, 337, 337, 44, 368, 96, + /* 1090 */ 330, 265, 329, 373, 63, 375, 351, 117, 368, 351, + /* 1100 */ 351, 329, 373, 329, 48, 22, 35, 360, 406, 407, + /* 1110 */ 349, 246, 329, 368, 245, 368, 368, 368, 99, 417, + /* 1120 */ 373, 102, 375, 403, 156, 247, 329, 407, 373, 329, + /* 1130 */ 410, 411, 412, 413, 414, 415, 373, 417, 162, 96, + /* 1140 */ 420, 329, 422, 423, 424, 373, 107, 373, 428, 429, + /* 1150 */ 403, 329, 99, 361, 407, 102, 373, 410, 411, 412, + /* 1160 */ 413, 414, 415, 183, 417, 185, 44, 164, 129, 422, + /* 1170 */ 373, 424, 360, 373, 0, 428, 429, 42, 99, 44, + /* 1180 */ 368, 102, 58, 44, 383, 373, 0, 375, 208, 209, + /* 1190 */ 44, 99, 44, 35, 102, 373, 22, 450, 47, 329, + /* 1200 */ 44, 221, 222, 223, 224, 225, 226, 227, 22, 44, + /* 1210 */ 267, 13, 44, 44, 44, 403, 1, 2, 96, 407, + /* 1220 */ 44, 97, 410, 411, 412, 413, 414, 415, 70, 417, + /* 1230 */ 360, 464, 329, 35, 422, 96, 424, 44, 368, 208, + /* 1240 */ 428, 429, 96, 373, 96, 375, 95, 13, 44, 44, + /* 1250 */ 44, 439, 96, 44, 339, 44, 185, 453, 447, 360, + /* 1260 */ 44, 96, 339, 360, 96, 96, 96, 336, 383, 35, + /* 1270 */ 372, 368, 96, 403, 383, 438, 373, 407, 375, 430, + /* 1280 */ 410, 411, 412, 413, 414, 415, 457, 417, 441, 96, + /* 1290 */ 329, 405, 422, 248, 424, 48, 404, 181, 428, 429, + /* 1300 */ 96, 96, 96, 394, 42, 96, 403, 96, 380, 439, + /* 1310 */ 407, 20, 96, 410, 411, 412, 413, 414, 415, 383, + /* 1320 */ 417, 360, 380, 161, 378, 422, 20, 424, 337, 368, + /* 1330 */ 380, 428, 429, 337, 373, 378, 375, 378, 93, 345, + /* 1340 */ 20, 337, 439, 337, 337, 331, 331, 20, 343, 398, + /* 1350 */ 20, 343, 375, 20, 338, 20, 393, 52, 338, 331, + /* 1360 */ 343, 343, 337, 373, 403, 343, 343, 343, 407, 340, + /* 1370 */ 329, 410, 411, 412, 413, 414, 415, 340, 417, 331, + /* 1380 */ 197, 360, 373, 422, 337, 424, 360, 360, 402, 428, + /* 1390 */ 429, 95, 360, 329, 360, 360, 360, 400, 360, 398, + /* 1400 */ 439, 360, 360, 341, 360, 188, 360, 397, 375, 368, + /* 1410 */ 341, 337, 446, 383, 373, 256, 375, 446, 255, 262, + /* 1420 */ 174, 449, 373, 383, 360, 373, 448, 446, 373, 445, + /* 1430 */ 373, 264, 368, 444, 263, 271, 249, 373, 388, 375, + /* 1440 */ 465, 443, 388, 268, 403, 266, 459, 245, 407, 368, + /* 1450 */ 20, 410, 411, 412, 413, 414, 415, 458, 417, 338, + /* 1460 */ 329, 341, 337, 422, 405, 424, 20, 403, 341, 428, + /* 1470 */ 429, 407, 409, 386, 410, 411, 412, 413, 414, 415, + /* 1480 */ 439, 417, 388, 373, 329, 373, 422, 373, 424, 388, + /* 1490 */ 373, 360, 428, 429, 166, 341, 373, 356, 385, 368, + /* 1500 */ 368, 373, 341, 95, 373, 427, 375, 95, 364, 373, + /* 1510 */ 350, 331, 36, 341, 337, 360, 332, 399, 389, 389, + /* 1520 */ 354, 395, 327, 368, 342, 0, 0, 354, 373, 190, + /* 1530 */ 375, 0, 329, 42, 403, 354, 0, 0, 407, 35, + /* 1540 */ 202, 410, 411, 412, 413, 414, 415, 35, 417, 35, + /* 1550 */ 35, 202, 0, 422, 35, 424, 329, 35, 403, 428, + /* 1560 */ 429, 202, 407, 360, 0, 410, 411, 412, 413, 414, + /* 1570 */ 415, 368, 417, 202, 0, 35, 373, 422, 375, 424, + /* 1580 */ 0, 22, 0, 428, 429, 35, 185, 360, 183, 0, + /* 1590 */ 0, 0, 179, 178, 0, 368, 47, 0, 0, 0, + /* 1600 */ 373, 0, 375, 42, 0, 0, 403, 0, 0, 0, + /* 1610 */ 407, 0, 0, 410, 411, 412, 413, 414, 415, 151, + /* 1620 */ 417, 35, 329, 0, 151, 0, 0, 424, 0, 0, + /* 1630 */ 403, 428, 429, 0, 407, 0, 0, 410, 411, 412, + /* 1640 */ 413, 414, 415, 0, 417, 329, 0, 0, 0, 0, + /* 1650 */ 0, 424, 0, 360, 0, 428, 429, 0, 42, 0, + /* 1660 */ 0, 368, 0, 0, 0, 0, 373, 22, 375, 0, + /* 1670 */ 0, 0, 0, 135, 0, 35, 360, 0, 58, 0, + /* 1680 */ 58, 0, 0, 42, 368, 47, 44, 14, 0, 373, + /* 1690 */ 39, 375, 14, 40, 0, 0, 403, 39, 0, 0, + /* 1700 */ 407, 174, 47, 410, 411, 412, 413, 414, 415, 0, + /* 1710 */ 417, 0, 0, 329, 47, 39, 0, 424, 35, 403, + /* 1720 */ 0, 428, 429, 407, 39, 64, 410, 411, 412, 413, + /* 1730 */ 414, 415, 416, 417, 418, 419, 35, 48, 48, 329, + /* 1740 */ 0, 39, 35, 39, 360, 48, 0, 39, 35, 0, + /* 1750 */ 0, 48, 368, 0, 0, 22, 35, 373, 0, 375, + /* 1760 */ 35, 35, 35, 35, 35, 35, 44, 44, 102, 22, + /* 1770 */ 360, 35, 104, 0, 22, 0, 0, 22, 368, 22, + /* 1780 */ 35, 50, 0, 373, 0, 375, 35, 403, 0, 22, + /* 1790 */ 20, 407, 96, 0, 410, 411, 412, 413, 414, 415, + /* 1800 */ 35, 417, 0, 35, 329, 195, 35, 35, 164, 95, + /* 1810 */ 95, 22, 0, 403, 3, 250, 186, 407, 0, 95, + /* 1820 */ 410, 411, 412, 413, 414, 415, 254, 417, 96, 329, + /* 1830 */ 44, 229, 95, 95, 424, 360, 44, 96, 96, 429, + /* 1840 */ 96, 166, 44, 368, 164, 44, 462, 463, 373, 3, + /* 1850 */ 375, 164, 47, 47, 95, 95, 172, 171, 95, 44, + /* 1860 */ 360, 35, 329, 171, 250, 96, 35, 250, 368, 35, + /* 1870 */ 96, 35, 35, 373, 35, 375, 96, 96, 403, 47, + /* 1880 */ 47, 244, 407, 0, 44, 410, 411, 412, 413, 414, + /* 1890 */ 415, 0, 417, 360, 0, 329, 0, 39, 95, 47, + /* 1900 */ 96, 368, 95, 403, 96, 95, 373, 407, 375, 0, + /* 1910 */ 410, 411, 412, 413, 414, 415, 95, 417, 95, 39, + /* 1920 */ 95, 105, 47, 167, 231, 44, 360, 452, 2, 165, + /* 1930 */ 22, 365, 47, 95, 368, 229, 403, 96, 95, 373, + /* 1940 */ 407, 375, 95, 410, 411, 412, 413, 414, 415, 96, + /* 1950 */ 417, 208, 96, 47, 454, 95, 95, 22, 96, 95, + /* 1960 */ 329, 106, 96, 229, 35, 35, 95, 35, 96, 403, + /* 1970 */ 119, 210, 95, 407, 96, 35, 410, 411, 412, 413, + /* 1980 */ 414, 415, 95, 417, 96, 329, 35, 95, 35, 96, + /* 1990 */ 95, 360, 119, 22, 95, 119, 463, 95, 119, 368, + /* 2000 */ 44, 35, 22, 64, 373, 107, 375, 95, 63, 35, + /* 2010 */ 35, 35, 35, 35, 35, 92, 360, 35, 35, 35, + /* 2020 */ 35, 365, 70, 35, 368, 44, 35, 35, 22, 373, + /* 2030 */ 35, 375, 35, 35, 403, 70, 35, 35, 407, 35, + /* 2040 */ 35, 410, 411, 412, 413, 414, 415, 329, 417, 35, + /* 2050 */ 419, 22, 35, 0, 35, 48, 0, 39, 35, 403, + /* 2060 */ 48, 39, 0, 407, 329, 35, 410, 411, 412, 413, + /* 2070 */ 414, 415, 48, 417, 39, 0, 35, 39, 360, 48, + /* 2080 */ 0, 35, 35, 365, 0, 22, 368, 21, 466, 22, + /* 2090 */ 22, 373, 21, 375, 20, 360, 466, 466, 466, 466, + /* 2100 */ 466, 466, 466, 368, 466, 466, 466, 466, 373, 466, + /* 2110 */ 375, 466, 466, 466, 466, 466, 466, 466, 466, 466, + /* 2120 */ 466, 403, 466, 466, 329, 407, 466, 466, 410, 411, + /* 2130 */ 412, 413, 414, 415, 466, 417, 466, 466, 403, 466, + /* 2140 */ 466, 466, 407, 329, 466, 410, 411, 412, 413, 414, + /* 2150 */ 415, 466, 417, 466, 466, 360, 466, 466, 466, 466, + /* 2160 */ 466, 466, 466, 368, 466, 466, 466, 466, 373, 466, + /* 2170 */ 375, 466, 466, 466, 360, 466, 466, 466, 466, 466, + /* 2180 */ 466, 466, 368, 466, 466, 466, 466, 373, 466, 375, + /* 2190 */ 466, 466, 466, 466, 466, 466, 466, 466, 403, 466, + /* 2200 */ 466, 466, 407, 329, 466, 410, 411, 412, 413, 414, + /* 2210 */ 415, 466, 417, 466, 466, 466, 466, 403, 466, 329, + /* 2220 */ 466, 407, 466, 466, 410, 411, 412, 413, 414, 415, + /* 2230 */ 466, 417, 466, 466, 360, 466, 329, 466, 466, 466, + /* 2240 */ 466, 466, 368, 466, 466, 466, 466, 373, 466, 375, + /* 2250 */ 360, 466, 466, 466, 466, 466, 466, 466, 368, 466, + /* 2260 */ 466, 466, 466, 373, 466, 375, 466, 360, 466, 466, + /* 2270 */ 466, 466, 466, 466, 466, 368, 466, 403, 466, 466, + /* 2280 */ 373, 407, 375, 466, 410, 411, 412, 413, 414, 415, + /* 2290 */ 466, 417, 466, 403, 466, 466, 466, 407, 466, 329, + /* 2300 */ 410, 411, 412, 413, 414, 415, 466, 417, 466, 466, + /* 2310 */ 403, 466, 466, 466, 407, 329, 466, 410, 411, 412, + /* 2320 */ 413, 414, 415, 466, 417, 466, 466, 466, 466, 466, + /* 2330 */ 360, 466, 466, 466, 466, 466, 466, 466, 368, 466, + /* 2340 */ 466, 466, 466, 373, 466, 375, 360, 466, 466, 466, + /* 2350 */ 466, 466, 466, 466, 368, 466, 466, 466, 466, 373, + /* 2360 */ 466, 375, 466, 466, 466, 466, 466, 466, 466, 466, + /* 2370 */ 466, 329, 466, 403, 466, 466, 466, 407, 466, 466, + /* 2380 */ 410, 411, 412, 413, 414, 415, 466, 417, 466, 403, + /* 2390 */ 466, 466, 466, 407, 329, 466, 410, 411, 412, 413, + /* 2400 */ 414, 415, 360, 417, 466, 466, 466, 466, 466, 466, + /* 2410 */ 368, 466, 466, 466, 466, 373, 466, 375, 466, 466, + /* 2420 */ 466, 466, 466, 466, 466, 360, 466, 466, 466, 466, + /* 2430 */ 466, 466, 466, 368, 466, 466, 466, 466, 373, 466, + /* 2440 */ 375, 466, 466, 466, 466, 403, 466, 466, 466, 407, + /* 2450 */ 466, 466, 410, 411, 412, 413, 414, 415, 329, 417, + /* 2460 */ 466, 466, 466, 466, 466, 466, 466, 466, 403, 466, + /* 2470 */ 466, 466, 407, 466, 466, 410, 411, 412, 413, 414, + /* 2480 */ 415, 466, 417, 466, 466, 329, 466, 466, 466, 360, + /* 2490 */ 466, 466, 466, 466, 466, 466, 466, 368, 466, 466, + /* 2500 */ 466, 466, 373, 466, 375, 466, 466, 466, 466, 466, + /* 2510 */ 466, 466, 466, 466, 466, 466, 360, 466, 466, 466, + /* 2520 */ 466, 466, 466, 466, 368, 466, 466, 466, 466, 373, + /* 2530 */ 466, 375, 403, 329, 466, 466, 407, 466, 466, 410, + /* 2540 */ 411, 412, 413, 414, 415, 466, 417, 466, 466, 329, + /* 2550 */ 466, 466, 466, 466, 466, 466, 466, 466, 466, 403, + /* 2560 */ 466, 466, 466, 407, 360, 466, 410, 411, 412, 413, + /* 2570 */ 414, 415, 368, 417, 466, 466, 466, 373, 466, 375, + /* 2580 */ 360, 466, 466, 466, 466, 466, 466, 466, 368, 466, + /* 2590 */ 466, 466, 466, 373, 466, 375, 466, 329, 466, 466, + /* 2600 */ 466, 466, 466, 466, 466, 466, 466, 403, 466, 466, + /* 2610 */ 466, 407, 466, 466, 410, 411, 412, 413, 414, 415, + /* 2620 */ 466, 417, 466, 403, 466, 466, 466, 407, 360, 466, + /* 2630 */ 410, 411, 412, 413, 414, 415, 368, 417, 466, 466, + /* 2640 */ 466, 373, 466, 375, 466, 329, 466, 466, 466, 466, + /* 2650 */ 466, 466, 466, 466, 466, 466, 466, 466, 466, 466, + /* 2660 */ 466, 466, 466, 466, 466, 466, 466, 466, 329, 466, + /* 2670 */ 466, 403, 466, 466, 466, 407, 360, 466, 410, 411, + /* 2680 */ 412, 413, 414, 415, 368, 417, 466, 466, 466, 373, + /* 2690 */ 466, 375, 466, 466, 466, 466, 466, 466, 466, 360, + /* 2700 */ 466, 329, 466, 466, 466, 466, 466, 368, 466, 466, + /* 2710 */ 466, 466, 373, 466, 375, 466, 466, 466, 466, 403, + /* 2720 */ 466, 466, 466, 407, 329, 466, 410, 411, 412, 413, + /* 2730 */ 414, 415, 360, 417, 466, 466, 466, 466, 466, 466, + /* 2740 */ 368, 466, 403, 466, 466, 373, 407, 375, 466, 410, + /* 2750 */ 411, 412, 413, 414, 415, 360, 417, 466, 466, 466, + /* 2760 */ 466, 466, 466, 368, 466, 466, 466, 466, 373, 466, + /* 2770 */ 375, 466, 466, 466, 466, 403, 466, 466, 466, 407, + /* 2780 */ 466, 466, 410, 411, 412, 413, 414, 415, 329, 417, + /* 2790 */ 466, 466, 466, 466, 466, 466, 466, 466, 403, 466, + /* 2800 */ 466, 466, 407, 466, 466, 410, 411, 412, 413, 414, + /* 2810 */ 415, 466, 417, 466, 466, 329, 466, 466, 466, 360, + /* 2820 */ 466, 466, 466, 466, 466, 466, 466, 368, 466, 466, + /* 2830 */ 466, 466, 373, 466, 375, 466, 466, 466, 466, 466, + /* 2840 */ 466, 466, 466, 466, 466, 466, 360, 466, 466, 466, + /* 2850 */ 466, 466, 466, 466, 368, 466, 466, 466, 466, 373, + /* 2860 */ 466, 375, 403, 329, 466, 466, 407, 466, 466, 410, + /* 2870 */ 411, 412, 413, 414, 415, 466, 417, 466, 466, 466, + /* 2880 */ 466, 466, 466, 466, 466, 466, 466, 466, 466, 403, + /* 2890 */ 466, 466, 466, 407, 360, 466, 410, 411, 412, 413, + /* 2900 */ 414, 415, 368, 417, 466, 466, 466, 373, 466, 375, + /* 2910 */ 466, 466, 466, 466, 466, 466, 466, 466, 466, 466, + /* 2920 */ 466, 466, 466, 466, 466, 466, 466, 466, 466, 466, + /* 2930 */ 466, 466, 466, 466, 466, 466, 466, 403, 466, 466, + /* 2940 */ 466, 407, 466, 466, 410, 411, 412, 413, 414, 415, + /* 2950 */ 466, 417, +}; +#define YY_SHIFT_COUNT (733) +#define YY_SHIFT_MIN (0) +#define YY_SHIFT_MAX (2084) +static const unsigned short int yy_shift_ofst[] = { + /* 0 */ 865, 0, 71, 0, 290, 290, 290, 290, 290, 290, + /* 10 */ 290, 290, 290, 290, 290, 361, 578, 578, 649, 578, + /* 20 */ 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, + /* 30 */ 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, + /* 40 */ 578, 578, 578, 578, 578, 578, 578, 578, 375, 514, + /* 50 */ 297, 134, 294, 32, 225, 32, 297, 297, 980, 980, + /* 60 */ 32, 980, 980, 186, 32, 108, 181, 235, 235, 181, + /* 70 */ 127, 127, 74, 388, 102, 102, 235, 235, 235, 235, + /* 80 */ 235, 235, 235, 327, 235, 235, 173, 108, 235, 235, + /* 90 */ 429, 235, 108, 235, 327, 235, 327, 108, 235, 235, + /* 100 */ 108, 235, 108, 108, 108, 235, 488, 795, 34, 34, + /* 110 */ 217, 304, 136, 136, 136, 136, 136, 136, 136, 136, + /* 120 */ 136, 136, 136, 136, 136, 136, 136, 136, 136, 136, + /* 130 */ 136, 23, 93, 74, 388, 477, 650, 67, 67, 67, + /* 140 */ 610, 401, 401, 650, 449, 449, 449, 458, 427, 108, + /* 150 */ 611, 108, 611, 611, 644, 707, 218, 218, 218, 218, + /* 160 */ 218, 218, 218, 218, 500, 460, 36, 431, 184, 176, + /* 170 */ 623, 439, 867, 976, 210, 893, 964, 1039, 1003, 780, + /* 180 */ 869, 826, 780, 802, 878, 647, 1045, 1247, 1116, 1262, + /* 190 */ 1291, 1262, 1162, 1306, 1306, 1262, 1162, 1162, 1245, 1306, + /* 200 */ 1306, 1306, 1320, 1320, 1327, 173, 1330, 173, 1333, 1335, + /* 210 */ 173, 1333, 173, 173, 173, 1306, 173, 1305, 1305, 1320, + /* 220 */ 108, 108, 108, 108, 108, 108, 108, 108, 108, 108, + /* 230 */ 108, 1306, 1320, 611, 611, 1183, 1296, 1327, 488, 1217, + /* 240 */ 1330, 488, 1306, 1291, 1291, 611, 1159, 1163, 611, 1159, + /* 250 */ 1163, 611, 611, 108, 1157, 1246, 1159, 1167, 1171, 1187, + /* 260 */ 1045, 1164, 1175, 1179, 1202, 449, 1430, 1306, 1333, 488, + /* 270 */ 488, 1446, 1163, 611, 611, 611, 611, 611, 1163, 611, + /* 280 */ 1328, 488, 644, 488, 449, 1408, 1412, 611, 707, 1306, + /* 290 */ 488, 1476, 1320, 2952, 2952, 2952, 2952, 2952, 2952, 2952, + /* 300 */ 2952, 2952, 890, 21, 386, 589, 15, 709, 250, 549, + /* 310 */ 349, 743, 756, 788, 992, 992, 992, 992, 992, 992, + /* 320 */ 992, 992, 992, 498, 341, 25, 25, 230, 221, 376, + /* 330 */ 602, 517, 58, 369, 369, 340, 653, 239, 340, 340, + /* 340 */ 340, 993, 1056, 289, 1135, 968, 1019, 1053, 1079, 1092, + /* 350 */ 1083, 1174, 1186, 1043, 1122, 1124, 1139, 1146, 884, 943, + /* 360 */ 335, 293, 1148, 1156, 1165, 1168, 1169, 1170, 1215, 1176, + /* 370 */ 130, 1071, 1031, 1193, 1151, 1204, 1205, 1206, 1209, 1211, + /* 380 */ 1216, 3, 1198, 1234, 1158, 735, 1525, 1526, 1339, 1531, + /* 390 */ 1536, 1491, 1537, 1504, 1338, 1512, 1514, 1515, 1349, 1552, + /* 400 */ 1519, 1522, 1359, 1564, 1371, 1574, 1540, 1580, 1559, 1582, + /* 410 */ 1550, 1401, 1405, 1589, 1590, 1413, 1415, 1591, 1594, 1549, + /* 420 */ 1597, 1598, 1599, 1561, 1601, 1604, 1605, 1607, 1608, 1609, + /* 430 */ 1611, 1612, 1468, 1586, 1623, 1473, 1625, 1626, 1628, 1629, + /* 440 */ 1633, 1635, 1636, 1643, 1646, 1647, 1648, 1649, 1650, 1652, + /* 450 */ 1654, 1657, 1616, 1659, 1660, 1662, 1663, 1664, 1665, 1645, + /* 460 */ 1669, 1670, 1671, 1538, 1672, 1674, 1640, 1677, 1620, 1679, + /* 470 */ 1622, 1681, 1682, 1641, 1651, 1642, 1638, 1673, 1655, 1678, + /* 480 */ 1667, 1688, 1653, 1658, 1694, 1695, 1698, 1676, 1527, 1699, + /* 490 */ 1709, 1711, 1661, 1712, 1716, 1683, 1689, 1685, 1720, 1701, + /* 500 */ 1690, 1702, 1740, 1707, 1697, 1704, 1746, 1713, 1703, 1708, + /* 510 */ 1749, 1750, 1753, 1754, 1668, 1666, 1721, 1733, 1758, 1725, + /* 520 */ 1726, 1727, 1728, 1722, 1723, 1729, 1730, 1747, 1736, 1773, + /* 530 */ 1752, 1775, 1755, 1731, 1776, 1757, 1745, 1782, 1751, 1784, + /* 540 */ 1765, 1788, 1767, 1770, 1768, 1771, 1610, 1696, 1714, 1793, + /* 550 */ 1644, 1715, 1772, 1802, 1630, 1789, 1680, 1675, 1812, 1818, + /* 560 */ 1687, 1684, 1811, 1786, 1565, 1724, 1732, 1737, 1686, 1602, + /* 570 */ 1692, 1572, 1741, 1792, 1742, 1738, 1759, 1760, 1744, 1798, + /* 580 */ 1805, 1806, 1763, 1801, 1614, 1769, 1774, 1846, 1815, 1617, + /* 590 */ 1826, 1831, 1834, 1836, 1837, 1839, 1780, 1781, 1832, 1637, + /* 600 */ 1840, 1833, 1883, 1891, 1894, 1896, 1803, 1858, 1638, 1852, + /* 610 */ 1807, 1804, 1808, 1810, 1821, 1756, 1823, 1909, 1880, 1764, + /* 620 */ 1825, 1816, 1638, 1875, 1881, 1706, 1693, 1734, 1926, 1908, + /* 630 */ 1743, 1838, 1841, 1843, 1853, 1847, 1856, 1885, 1860, 1861, + /* 640 */ 1906, 1862, 1935, 1761, 1864, 1855, 1866, 1929, 1930, 1871, + /* 650 */ 1872, 1932, 1877, 1878, 1940, 1887, 1888, 1951, 1892, 1893, + /* 660 */ 1953, 1895, 1851, 1873, 1876, 1879, 1971, 1898, 1899, 1956, + /* 670 */ 1902, 1966, 1912, 1956, 1956, 1980, 1939, 1945, 1974, 1975, + /* 680 */ 1976, 1977, 1978, 1979, 1982, 1983, 1984, 1985, 1952, 1923, + /* 690 */ 1981, 1988, 1991, 1992, 2006, 1995, 1997, 1998, 1965, 1722, + /* 700 */ 2001, 1723, 2002, 2004, 2005, 2014, 2029, 2017, 2053, 2019, + /* 710 */ 2007, 2018, 2056, 2023, 2012, 2022, 2062, 2030, 2024, 2035, + /* 720 */ 2075, 2041, 2031, 2038, 2080, 2046, 2047, 2084, 2063, 2066, + /* 730 */ 2067, 2068, 2071, 2074, +}; +#define YY_REDUCE_COUNT (301) +#define YY_REDUCE_MIN (-435) +#define YY_REDUCE_MAX (2534) +static const short yy_reduce_ofst[] = { + /* 0 */ 276, -268, 720, 747, 812, 870, 903, 961, 1041, 1064, + /* 10 */ 1131, 1155, 1203, 1227, 1293, 1316, 291, 1384, 1410, 1475, + /* 20 */ 1500, 1533, 1566, 1631, 1656, 1718, 1735, 1795, 1814, 1874, + /* 30 */ 1890, 1907, 1970, 1986, 2042, 2065, 2129, 2156, 2204, 2220, + /* 40 */ 2268, 2316, 2339, 2372, 2395, 2459, 2486, 2534, -31, 107, + /* 50 */ -256, 105, 179, 444, 591, 599, 29, 417, -355, -318, + /* 60 */ 273, 479, 702, -435, 558, -296, 51, -201, 295, -375, + /* 70 */ -327, -28, -315, -289, 142, 188, -334, 191, 411, 487, + /* 80 */ 541, 542, 560, 215, 561, 652, 464, -212, 660, 675, + /* 90 */ -108, 685, 247, 693, 266, 711, 389, -350, 730, 745, + /* 100 */ 407, 748, -295, 459, 471, 749, -204, -305, 2, 2, + /* 110 */ -123, 106, 75, 315, 320, 447, 518, 622, 659, 692, + /* 120 */ 704, 705, 729, 755, 763, 772, 774, 783, 797, 800, + /* 130 */ 822, -219, -37, -60, 242, -164, 368, -37, 183, 203, + /* 140 */ 496, 357, 387, 392, 448, 553, 554, 240, 277, -301, + /* 150 */ 469, 547, 557, 612, 374, 636, -357, 453, 472, 495, + /* 160 */ 533, 712, 792, 533, -385, 760, 761, 801, 767, 804, + /* 170 */ 915, 811, 899, 899, 923, 885, 931, 898, 891, 837, + /* 180 */ 837, 829, 837, 849, 847, 899, 886, 892, 909, 928, + /* 190 */ 936, 942, 946, 991, 996, 950, 957, 959, 994, 1004, + /* 200 */ 1006, 1007, 1014, 1015, 951, 1005, 977, 1008, 1016, 963, + /* 210 */ 1017, 1020, 1018, 1022, 1023, 1025, 1024, 1029, 1037, 1028, + /* 220 */ 1021, 1026, 1027, 1032, 1034, 1035, 1036, 1038, 1042, 1044, + /* 230 */ 1046, 1047, 1048, 990, 1009, 986, 997, 1001, 1062, 1010, + /* 240 */ 1033, 1069, 1074, 1030, 1040, 1049, 966, 1050, 1052, 971, + /* 250 */ 1054, 1055, 1057, 899, 972, 978, 981, 984, 989, 998, + /* 260 */ 1059, 975, 987, 999, 837, 1081, 1063, 1125, 1121, 1120, + /* 270 */ 1127, 1087, 1094, 1110, 1112, 1114, 1117, 1123, 1101, 1128, + /* 280 */ 1113, 1154, 1141, 1161, 1132, 1078, 1144, 1136, 1160, 1177, + /* 290 */ 1172, 1184, 1180, 1126, 1118, 1129, 1130, 1166, 1173, 1181, + /* 300 */ 1182, 1195, +}; +static const YYACTIONTYPE yy_default[] = { + /* 0 */ 1643, 1643, 1643, 1643, 1643, 1643, 1643, 1643, 1643, 1643, + /* 10 */ 1643, 1643, 1643, 1643, 1643, 1643, 1643, 1643, 1643, 1643, + /* 20 */ 1643, 1643, 1643, 1643, 1643, 1643, 1643, 1643, 1643, 1643, + /* 30 */ 1643, 1643, 1643, 1643, 1643, 1643, 1643, 1643, 1643, 1643, + /* 40 */ 1643, 1643, 1643, 1643, 1643, 1643, 1643, 1643, 1643, 1643, + /* 50 */ 1643, 1643, 1643, 1643, 1643, 1643, 1643, 1643, 1643, 1643, + /* 60 */ 1643, 1643, 1643, 1643, 1643, 1643, 1643, 1643, 1643, 1643, + /* 70 */ 1643, 1643, 1900, 1643, 1643, 1643, 1643, 1643, 1643, 1643, + /* 80 */ 1643, 1643, 1643, 1643, 1643, 1643, 1721, 1643, 1643, 1643, + /* 90 */ 1643, 1643, 1643, 1643, 1643, 1643, 1643, 1643, 1643, 1643, + /* 100 */ 1643, 1643, 1643, 1643, 1643, 1643, 1719, 1893, 2105, 1643, + /* 110 */ 1643, 1643, 1643, 1643, 1643, 1643, 1643, 1643, 1643, 1643, + /* 120 */ 1643, 1643, 1643, 1643, 1643, 1643, 1643, 1643, 1643, 1643, + /* 130 */ 1643, 1643, 2117, 1643, 1643, 1721, 1643, 2117, 2117, 2117, + /* 140 */ 1719, 2077, 2077, 1643, 1643, 1643, 1643, 1953, 1643, 1643, + /* 150 */ 1643, 1643, 1643, 1643, 1828, 1643, 1643, 1643, 1643, 1643, + /* 160 */ 1852, 1643, 1643, 1643, 1945, 1643, 1643, 2142, 2196, 1643, + /* 170 */ 1643, 2145, 1643, 1643, 1643, 1905, 1643, 1781, 2132, 2109, + /* 180 */ 2123, 2180, 2110, 2107, 2126, 1643, 2136, 1643, 1938, 1898, + /* 190 */ 1643, 1898, 1895, 1643, 1643, 1898, 1895, 1895, 1772, 1643, + /* 200 */ 1643, 1643, 1643, 1643, 1643, 1721, 1643, 1721, 1643, 1643, + /* 210 */ 1721, 1643, 1721, 1721, 1721, 1643, 1721, 1700, 1700, 1643, + /* 220 */ 1643, 1643, 1643, 1643, 1643, 1643, 1643, 1643, 1643, 1643, + /* 230 */ 1643, 1643, 1643, 1643, 1643, 1963, 1951, 1643, 1719, 1947, + /* 240 */ 1643, 1719, 1643, 1643, 1643, 1643, 2153, 2151, 1643, 2153, + /* 250 */ 2151, 1643, 1643, 1643, 2165, 2161, 2153, 2169, 2167, 2138, + /* 260 */ 2136, 2199, 2186, 2182, 2123, 1643, 1643, 1643, 1643, 1719, + /* 270 */ 1719, 1643, 2151, 1643, 1643, 1643, 1643, 1643, 2151, 1643, + /* 280 */ 1643, 1719, 1643, 1719, 1643, 1643, 1797, 1643, 1643, 1643, + /* 290 */ 1719, 1675, 1643, 1940, 1956, 1923, 1923, 1831, 1831, 1831, + /* 300 */ 1722, 1648, 1643, 1643, 1643, 1643, 1643, 1643, 1643, 1643, + /* 310 */ 1643, 1643, 1643, 1643, 2164, 2163, 2032, 1643, 2081, 2080, + /* 320 */ 2079, 2070, 2031, 1793, 1643, 2030, 2029, 1643, 1643, 1643, + /* 330 */ 1643, 1643, 1643, 1914, 1913, 2023, 1643, 1643, 2024, 2022, + /* 340 */ 2021, 1643, 1643, 1643, 1643, 1643, 1643, 1643, 1643, 1643, + /* 350 */ 1643, 1643, 1643, 1643, 1643, 1643, 1643, 1643, 1643, 2183, + /* 360 */ 2187, 1643, 1643, 1643, 1643, 1643, 1643, 1643, 2106, 1643, + /* 370 */ 1643, 1643, 1643, 1643, 2005, 1643, 1643, 1643, 1643, 1643, + /* 380 */ 1643, 1643, 1643, 1643, 1643, 1643, 1643, 1643, 1643, 1643, + /* 390 */ 1643, 1643, 1643, 1643, 1643, 1643, 1643, 1643, 1643, 1643, + /* 400 */ 1643, 1643, 1643, 1643, 1643, 1643, 1643, 1643, 1643, 1643, + /* 410 */ 1643, 1643, 1643, 1643, 1643, 1643, 1643, 1643, 1643, 1643, + /* 420 */ 1643, 1643, 1643, 1643, 1643, 1643, 1643, 1643, 1643, 1643, + /* 430 */ 1643, 1643, 1643, 1643, 1643, 1643, 1643, 1643, 1643, 1643, + /* 440 */ 1643, 1643, 1643, 1643, 1643, 1643, 1643, 1643, 1643, 1643, + /* 450 */ 1643, 1643, 1643, 1643, 1643, 1643, 1643, 1643, 1643, 1643, + /* 460 */ 1643, 1643, 1643, 1643, 1643, 1643, 1643, 1643, 1643, 1643, + /* 470 */ 1643, 1643, 1643, 1643, 1643, 1680, 2010, 1643, 1643, 1643, + /* 480 */ 1643, 1643, 1643, 1643, 1643, 1643, 1643, 1643, 1643, 1643, + /* 490 */ 1643, 1643, 1643, 1643, 1643, 1643, 1643, 1643, 1643, 1643, + /* 500 */ 1643, 1643, 1643, 1643, 1643, 1643, 1643, 1643, 1643, 1643, + /* 510 */ 1643, 1643, 1643, 1643, 1643, 1643, 1643, 1643, 1643, 1643, + /* 520 */ 1643, 1643, 1643, 1760, 1759, 1643, 1643, 1643, 1643, 1643, + /* 530 */ 1643, 1643, 1643, 1643, 1643, 1643, 1643, 1643, 1643, 1643, + /* 540 */ 1643, 1643, 1643, 1643, 1643, 1643, 1643, 2014, 1643, 1643, + /* 550 */ 1643, 1643, 1643, 1643, 1643, 1643, 1643, 1643, 1643, 1643, + /* 560 */ 1643, 1643, 2179, 2139, 1643, 1643, 1643, 1643, 1643, 1643, + /* 570 */ 1643, 1643, 1643, 1643, 1643, 1643, 1643, 1643, 1643, 1643, + /* 580 */ 1643, 2005, 1643, 2162, 1643, 1643, 2177, 1643, 2181, 1643, + /* 590 */ 1643, 1643, 1643, 1643, 1643, 1643, 2116, 2112, 1643, 1643, + /* 600 */ 2108, 1643, 1643, 1643, 1643, 1643, 1643, 1643, 2013, 1643, + /* 610 */ 1643, 1643, 1643, 1643, 1643, 1643, 1643, 1643, 1643, 1643, + /* 620 */ 1643, 1643, 2004, 1643, 2067, 1643, 1643, 1643, 2101, 1643, + /* 630 */ 1643, 2052, 1643, 1643, 1643, 1643, 1643, 1643, 1643, 1643, + /* 640 */ 1643, 2014, 1643, 2017, 1643, 1643, 1643, 1643, 1643, 1825, + /* 650 */ 1643, 1643, 1643, 1643, 1643, 1643, 1643, 1643, 1643, 1643, + /* 660 */ 1643, 1643, 1810, 1808, 1807, 1806, 1643, 1803, 1643, 1838, + /* 670 */ 1643, 1643, 1643, 1834, 1833, 1643, 1643, 1643, 1643, 1643, + /* 680 */ 1643, 1643, 1643, 1643, 1643, 1643, 1643, 1643, 1643, 1643, + /* 690 */ 1740, 1643, 1643, 1643, 1643, 1643, 1643, 1643, 1643, 1732, + /* 700 */ 1643, 1731, 1643, 1643, 1643, 1643, 1643, 1643, 1643, 1643, + /* 710 */ 1643, 1643, 1643, 1643, 1643, 1643, 1643, 1643, 1643, 1643, + /* 720 */ 1643, 1643, 1643, 1643, 1643, 1643, 1643, 1643, 1643, 1643, + /* 730 */ 1643, 1643, 1643, 1643, +>>>>>>> 677a27a09744fa59ddd2e723052ec68e32229566 }; /********** End of lemon-generated parsing tables *****************************/ @@ -1822,70 +2626,71 @@ static const char *const yyTokenName[] = { /* 398 */ "stream_name", /* 399 */ "stream_options", /* 400 */ "col_list_opt", - /* 401 */ "subtable_opt", - /* 402 */ "expression", - /* 403 */ "dnode_list", - /* 404 */ "where_clause_opt", - /* 405 */ "signed", - /* 406 */ "literal_func", - /* 407 */ "literal_list", - /* 408 */ "table_alias", - /* 409 */ "expr_or_subquery", - /* 410 */ "pseudo_column", - /* 411 */ "column_reference", - /* 412 */ "function_expression", - /* 413 */ "case_when_expression", - /* 414 */ "star_func", - /* 415 */ "star_func_para_list", - /* 416 */ "noarg_func", - /* 417 */ "other_para_list", - /* 418 */ "star_func_para", - /* 419 */ "when_then_list", - /* 420 */ "case_when_else_opt", - /* 421 */ "common_expression", - /* 422 */ "when_then_expr", - /* 423 */ "predicate", - /* 424 */ "compare_op", - /* 425 */ "in_op", - /* 426 */ "in_predicate_value", - /* 427 */ "boolean_value_expression", - /* 428 */ "boolean_primary", - /* 429 */ "from_clause_opt", - /* 430 */ "table_reference_list", - /* 431 */ "table_reference", - /* 432 */ "table_primary", - /* 433 */ "joined_table", - /* 434 */ "alias_opt", - /* 435 */ "subquery", - /* 436 */ "parenthesized_joined_table", - /* 437 */ "join_type", - /* 438 */ "search_condition", - /* 439 */ "query_specification", - /* 440 */ "set_quantifier_opt", - /* 441 */ "select_list", - /* 442 */ "partition_by_clause_opt", - /* 443 */ "range_opt", - /* 444 */ "every_opt", - /* 445 */ "fill_opt", - /* 446 */ "twindow_clause_opt", - /* 447 */ "group_by_clause_opt", - /* 448 */ "having_clause_opt", - /* 449 */ "select_item", - /* 450 */ "partition_list", - /* 451 */ "partition_item", - /* 452 */ "fill_mode", - /* 453 */ "group_by_list", - /* 454 */ "query_expression", - /* 455 */ "query_simple", - /* 456 */ "order_by_clause_opt", - /* 457 */ "slimit_clause_opt", - /* 458 */ "limit_clause_opt", - /* 459 */ "union_query_expression", - /* 460 */ "query_simple_or_subquery", - /* 461 */ "sort_specification_list", - /* 462 */ "sort_specification", - /* 463 */ "ordering_specification_opt", - /* 464 */ "null_ordering_opt", + /* 401 */ "tag_def_or_ref_opt", + /* 402 */ "subtable_opt", + /* 403 */ "expression", + /* 404 */ "dnode_list", + /* 405 */ "where_clause_opt", + /* 406 */ "signed", + /* 407 */ "literal_func", + /* 408 */ "literal_list", + /* 409 */ "table_alias", + /* 410 */ "expr_or_subquery", + /* 411 */ "pseudo_column", + /* 412 */ "column_reference", + /* 413 */ "function_expression", + /* 414 */ "case_when_expression", + /* 415 */ "star_func", + /* 416 */ "star_func_para_list", + /* 417 */ "noarg_func", + /* 418 */ "other_para_list", + /* 419 */ "star_func_para", + /* 420 */ "when_then_list", + /* 421 */ "case_when_else_opt", + /* 422 */ "common_expression", + /* 423 */ "when_then_expr", + /* 424 */ "predicate", + /* 425 */ "compare_op", + /* 426 */ "in_op", + /* 427 */ "in_predicate_value", + /* 428 */ "boolean_value_expression", + /* 429 */ "boolean_primary", + /* 430 */ "from_clause_opt", + /* 431 */ "table_reference_list", + /* 432 */ "table_reference", + /* 433 */ "table_primary", + /* 434 */ "joined_table", + /* 435 */ "alias_opt", + /* 436 */ "subquery", + /* 437 */ "parenthesized_joined_table", + /* 438 */ "join_type", + /* 439 */ "search_condition", + /* 440 */ "query_specification", + /* 441 */ "set_quantifier_opt", + /* 442 */ "select_list", + /* 443 */ "partition_by_clause_opt", + /* 444 */ "range_opt", + /* 445 */ "every_opt", + /* 446 */ "fill_opt", + /* 447 */ "twindow_clause_opt", + /* 448 */ "group_by_clause_opt", + /* 449 */ "having_clause_opt", + /* 450 */ "select_item", + /* 451 */ "partition_list", + /* 452 */ "partition_item", + /* 453 */ "fill_mode", + /* 454 */ "group_by_list", + /* 455 */ "query_expression", + /* 456 */ "query_simple", + /* 457 */ "order_by_clause_opt", + /* 458 */ "slimit_clause_opt", + /* 459 */ "limit_clause_opt", + /* 460 */ "union_query_expression", + /* 461 */ "query_simple_or_subquery", + /* 462 */ "sort_specification_list", + /* 463 */ "sort_specification", + /* 464 */ "ordering_specification_opt", + /* 465 */ "null_ordering_opt", }; #endif /* defined(YYCOVERAGE) || !defined(NDEBUG) */ @@ -1965,6 +2770,7 @@ static const char *const yyRuleName[] = { /* 69 */ "cmd ::= ALTER DATABASE db_name alter_db_options", /* 70 */ "cmd ::= FLUSH DATABASE db_name", /* 71 */ "cmd ::= TRIM DATABASE db_name speed_opt", +<<<<<<< HEAD /* 72 */ "cmd ::= COMPACT DATABASE db_name", /* 73 */ "not_exists_opt ::= IF NOT EXISTS", /* 74 */ "not_exists_opt ::=", @@ -2447,6 +3253,492 @@ static const char *const yyRuleName[] = { /* 551 */ "null_ordering_opt ::=", /* 552 */ "null_ordering_opt ::= NULLS FIRST", /* 553 */ "null_ordering_opt ::= NULLS LAST", +======= + /* 72 */ "not_exists_opt ::= IF NOT EXISTS", + /* 73 */ "not_exists_opt ::=", + /* 74 */ "exists_opt ::= IF EXISTS", + /* 75 */ "exists_opt ::=", + /* 76 */ "db_options ::=", + /* 77 */ "db_options ::= db_options BUFFER NK_INTEGER", + /* 78 */ "db_options ::= db_options CACHEMODEL NK_STRING", + /* 79 */ "db_options ::= db_options CACHESIZE NK_INTEGER", + /* 80 */ "db_options ::= db_options COMP NK_INTEGER", + /* 81 */ "db_options ::= db_options DURATION NK_INTEGER", + /* 82 */ "db_options ::= db_options DURATION NK_VARIABLE", + /* 83 */ "db_options ::= db_options MAXROWS NK_INTEGER", + /* 84 */ "db_options ::= db_options MINROWS NK_INTEGER", + /* 85 */ "db_options ::= db_options KEEP integer_list", + /* 86 */ "db_options ::= db_options KEEP variable_list", + /* 87 */ "db_options ::= db_options PAGES NK_INTEGER", + /* 88 */ "db_options ::= db_options PAGESIZE NK_INTEGER", + /* 89 */ "db_options ::= db_options TSDB_PAGESIZE NK_INTEGER", + /* 90 */ "db_options ::= db_options PRECISION NK_STRING", + /* 91 */ "db_options ::= db_options REPLICA NK_INTEGER", + /* 92 */ "db_options ::= db_options VGROUPS NK_INTEGER", + /* 93 */ "db_options ::= db_options SINGLE_STABLE NK_INTEGER", + /* 94 */ "db_options ::= db_options RETENTIONS retention_list", + /* 95 */ "db_options ::= db_options SCHEMALESS NK_INTEGER", + /* 96 */ "db_options ::= db_options WAL_LEVEL NK_INTEGER", + /* 97 */ "db_options ::= db_options WAL_FSYNC_PERIOD NK_INTEGER", + /* 98 */ "db_options ::= db_options WAL_RETENTION_PERIOD NK_INTEGER", + /* 99 */ "db_options ::= db_options WAL_RETENTION_PERIOD NK_MINUS NK_INTEGER", + /* 100 */ "db_options ::= db_options WAL_RETENTION_SIZE NK_INTEGER", + /* 101 */ "db_options ::= db_options WAL_RETENTION_SIZE NK_MINUS NK_INTEGER", + /* 102 */ "db_options ::= db_options WAL_ROLL_PERIOD NK_INTEGER", + /* 103 */ "db_options ::= db_options WAL_SEGMENT_SIZE NK_INTEGER", + /* 104 */ "db_options ::= db_options STT_TRIGGER NK_INTEGER", + /* 105 */ "db_options ::= db_options TABLE_PREFIX NK_INTEGER", + /* 106 */ "db_options ::= db_options TABLE_SUFFIX NK_INTEGER", + /* 107 */ "alter_db_options ::= alter_db_option", + /* 108 */ "alter_db_options ::= alter_db_options alter_db_option", + /* 109 */ "alter_db_option ::= BUFFER NK_INTEGER", + /* 110 */ "alter_db_option ::= CACHEMODEL NK_STRING", + /* 111 */ "alter_db_option ::= CACHESIZE NK_INTEGER", + /* 112 */ "alter_db_option ::= WAL_FSYNC_PERIOD NK_INTEGER", + /* 113 */ "alter_db_option ::= KEEP integer_list", + /* 114 */ "alter_db_option ::= KEEP variable_list", + /* 115 */ "alter_db_option ::= PAGES NK_INTEGER", + /* 116 */ "alter_db_option ::= REPLICA NK_INTEGER", + /* 117 */ "alter_db_option ::= WAL_LEVEL NK_INTEGER", + /* 118 */ "alter_db_option ::= STT_TRIGGER NK_INTEGER", + /* 119 */ "integer_list ::= NK_INTEGER", + /* 120 */ "integer_list ::= integer_list NK_COMMA NK_INTEGER", + /* 121 */ "variable_list ::= NK_VARIABLE", + /* 122 */ "variable_list ::= variable_list NK_COMMA NK_VARIABLE", + /* 123 */ "retention_list ::= retention", + /* 124 */ "retention_list ::= retention_list NK_COMMA retention", + /* 125 */ "retention ::= NK_VARIABLE NK_COLON NK_VARIABLE", + /* 126 */ "speed_opt ::=", + /* 127 */ "speed_opt ::= MAX_SPEED NK_INTEGER", + /* 128 */ "cmd ::= CREATE TABLE not_exists_opt full_table_name NK_LP column_def_list NK_RP tags_def_opt table_options", + /* 129 */ "cmd ::= CREATE TABLE multi_create_clause", + /* 130 */ "cmd ::= CREATE STABLE not_exists_opt full_table_name NK_LP column_def_list NK_RP tags_def table_options", + /* 131 */ "cmd ::= DROP TABLE multi_drop_clause", + /* 132 */ "cmd ::= DROP STABLE exists_opt full_table_name", + /* 133 */ "cmd ::= ALTER TABLE alter_table_clause", + /* 134 */ "cmd ::= ALTER STABLE alter_table_clause", + /* 135 */ "alter_table_clause ::= full_table_name alter_table_options", + /* 136 */ "alter_table_clause ::= full_table_name ADD COLUMN column_name type_name", + /* 137 */ "alter_table_clause ::= full_table_name DROP COLUMN column_name", + /* 138 */ "alter_table_clause ::= full_table_name MODIFY COLUMN column_name type_name", + /* 139 */ "alter_table_clause ::= full_table_name RENAME COLUMN column_name column_name", + /* 140 */ "alter_table_clause ::= full_table_name ADD TAG column_name type_name", + /* 141 */ "alter_table_clause ::= full_table_name DROP TAG column_name", + /* 142 */ "alter_table_clause ::= full_table_name MODIFY TAG column_name type_name", + /* 143 */ "alter_table_clause ::= full_table_name RENAME TAG column_name column_name", + /* 144 */ "alter_table_clause ::= full_table_name SET TAG column_name NK_EQ signed_literal", + /* 145 */ "multi_create_clause ::= create_subtable_clause", + /* 146 */ "multi_create_clause ::= multi_create_clause create_subtable_clause", + /* 147 */ "create_subtable_clause ::= not_exists_opt full_table_name USING full_table_name specific_cols_opt TAGS NK_LP expression_list NK_RP table_options", + /* 148 */ "multi_drop_clause ::= drop_table_clause", + /* 149 */ "multi_drop_clause ::= multi_drop_clause drop_table_clause", + /* 150 */ "drop_table_clause ::= exists_opt full_table_name", + /* 151 */ "specific_cols_opt ::=", + /* 152 */ "specific_cols_opt ::= NK_LP col_name_list NK_RP", + /* 153 */ "full_table_name ::= table_name", + /* 154 */ "full_table_name ::= db_name NK_DOT table_name", + /* 155 */ "column_def_list ::= column_def", + /* 156 */ "column_def_list ::= column_def_list NK_COMMA column_def", + /* 157 */ "column_def ::= column_name type_name", + /* 158 */ "column_def ::= column_name type_name COMMENT NK_STRING", + /* 159 */ "type_name ::= BOOL", + /* 160 */ "type_name ::= TINYINT", + /* 161 */ "type_name ::= SMALLINT", + /* 162 */ "type_name ::= INT", + /* 163 */ "type_name ::= INTEGER", + /* 164 */ "type_name ::= BIGINT", + /* 165 */ "type_name ::= FLOAT", + /* 166 */ "type_name ::= DOUBLE", + /* 167 */ "type_name ::= BINARY NK_LP NK_INTEGER NK_RP", + /* 168 */ "type_name ::= TIMESTAMP", + /* 169 */ "type_name ::= NCHAR NK_LP NK_INTEGER NK_RP", + /* 170 */ "type_name ::= TINYINT UNSIGNED", + /* 171 */ "type_name ::= SMALLINT UNSIGNED", + /* 172 */ "type_name ::= INT UNSIGNED", + /* 173 */ "type_name ::= BIGINT UNSIGNED", + /* 174 */ "type_name ::= JSON", + /* 175 */ "type_name ::= VARCHAR NK_LP NK_INTEGER NK_RP", + /* 176 */ "type_name ::= MEDIUMBLOB", + /* 177 */ "type_name ::= BLOB", + /* 178 */ "type_name ::= VARBINARY NK_LP NK_INTEGER NK_RP", + /* 179 */ "type_name ::= DECIMAL", + /* 180 */ "type_name ::= DECIMAL NK_LP NK_INTEGER NK_RP", + /* 181 */ "type_name ::= DECIMAL NK_LP NK_INTEGER NK_COMMA NK_INTEGER NK_RP", + /* 182 */ "tags_def_opt ::=", + /* 183 */ "tags_def_opt ::= tags_def", + /* 184 */ "tags_def ::= TAGS NK_LP column_def_list NK_RP", + /* 185 */ "table_options ::=", + /* 186 */ "table_options ::= table_options COMMENT NK_STRING", + /* 187 */ "table_options ::= table_options MAX_DELAY duration_list", + /* 188 */ "table_options ::= table_options WATERMARK duration_list", + /* 189 */ "table_options ::= table_options ROLLUP NK_LP rollup_func_list NK_RP", + /* 190 */ "table_options ::= table_options TTL NK_INTEGER", + /* 191 */ "table_options ::= table_options SMA NK_LP col_name_list NK_RP", + /* 192 */ "table_options ::= table_options DELETE_MARK duration_list", + /* 193 */ "alter_table_options ::= alter_table_option", + /* 194 */ "alter_table_options ::= alter_table_options alter_table_option", + /* 195 */ "alter_table_option ::= COMMENT NK_STRING", + /* 196 */ "alter_table_option ::= TTL NK_INTEGER", + /* 197 */ "duration_list ::= duration_literal", + /* 198 */ "duration_list ::= duration_list NK_COMMA duration_literal", + /* 199 */ "rollup_func_list ::= rollup_func_name", + /* 200 */ "rollup_func_list ::= rollup_func_list NK_COMMA rollup_func_name", + /* 201 */ "rollup_func_name ::= function_name", + /* 202 */ "rollup_func_name ::= FIRST", + /* 203 */ "rollup_func_name ::= LAST", + /* 204 */ "col_name_list ::= col_name", + /* 205 */ "col_name_list ::= col_name_list NK_COMMA col_name", + /* 206 */ "col_name ::= column_name", + /* 207 */ "cmd ::= SHOW DNODES", + /* 208 */ "cmd ::= SHOW USERS", + /* 209 */ "cmd ::= SHOW USER PRIVILEGES", + /* 210 */ "cmd ::= SHOW DATABASES", + /* 211 */ "cmd ::= SHOW db_name_cond_opt TABLES like_pattern_opt", + /* 212 */ "cmd ::= SHOW db_name_cond_opt STABLES like_pattern_opt", + /* 213 */ "cmd ::= SHOW db_name_cond_opt VGROUPS", + /* 214 */ "cmd ::= SHOW MNODES", + /* 215 */ "cmd ::= SHOW QNODES", + /* 216 */ "cmd ::= SHOW FUNCTIONS", + /* 217 */ "cmd ::= SHOW INDEXES FROM table_name_cond from_db_opt", + /* 218 */ "cmd ::= SHOW STREAMS", + /* 219 */ "cmd ::= SHOW ACCOUNTS", + /* 220 */ "cmd ::= SHOW APPS", + /* 221 */ "cmd ::= SHOW CONNECTIONS", + /* 222 */ "cmd ::= SHOW LICENCES", + /* 223 */ "cmd ::= SHOW GRANTS", + /* 224 */ "cmd ::= SHOW CREATE DATABASE db_name", + /* 225 */ "cmd ::= SHOW CREATE TABLE full_table_name", + /* 226 */ "cmd ::= SHOW CREATE STABLE full_table_name", + /* 227 */ "cmd ::= SHOW QUERIES", + /* 228 */ "cmd ::= SHOW SCORES", + /* 229 */ "cmd ::= SHOW TOPICS", + /* 230 */ "cmd ::= SHOW VARIABLES", + /* 231 */ "cmd ::= SHOW CLUSTER VARIABLES", + /* 232 */ "cmd ::= SHOW LOCAL VARIABLES", + /* 233 */ "cmd ::= SHOW DNODE NK_INTEGER VARIABLES like_pattern_opt", + /* 234 */ "cmd ::= SHOW BNODES", + /* 235 */ "cmd ::= SHOW SNODES", + /* 236 */ "cmd ::= SHOW CLUSTER", + /* 237 */ "cmd ::= SHOW TRANSACTIONS", + /* 238 */ "cmd ::= SHOW TABLE DISTRIBUTED full_table_name", + /* 239 */ "cmd ::= SHOW CONSUMERS", + /* 240 */ "cmd ::= SHOW SUBSCRIPTIONS", + /* 241 */ "cmd ::= SHOW TAGS FROM table_name_cond from_db_opt", + /* 242 */ "cmd ::= SHOW TABLE TAGS tag_list_opt FROM table_name_cond from_db_opt", + /* 243 */ "cmd ::= SHOW VNODES NK_INTEGER", + /* 244 */ "cmd ::= SHOW VNODES NK_STRING", + /* 245 */ "cmd ::= SHOW db_name_cond_opt ALIVE", + /* 246 */ "cmd ::= SHOW CLUSTER ALIVE", + /* 247 */ "db_name_cond_opt ::=", + /* 248 */ "db_name_cond_opt ::= db_name NK_DOT", + /* 249 */ "like_pattern_opt ::=", + /* 250 */ "like_pattern_opt ::= LIKE NK_STRING", + /* 251 */ "table_name_cond ::= table_name", + /* 252 */ "from_db_opt ::=", + /* 253 */ "from_db_opt ::= FROM db_name", + /* 254 */ "tag_list_opt ::=", + /* 255 */ "tag_list_opt ::= tag_item", + /* 256 */ "tag_list_opt ::= tag_list_opt NK_COMMA tag_item", + /* 257 */ "tag_item ::= TBNAME", + /* 258 */ "tag_item ::= QTAGS", + /* 259 */ "tag_item ::= column_name", + /* 260 */ "tag_item ::= column_name column_alias", + /* 261 */ "tag_item ::= column_name AS column_alias", + /* 262 */ "cmd ::= CREATE SMA INDEX not_exists_opt full_index_name ON full_table_name index_options", + /* 263 */ "cmd ::= CREATE INDEX not_exists_opt full_index_name ON full_table_name NK_LP col_name_list NK_RP", + /* 264 */ "cmd ::= DROP INDEX exists_opt full_index_name", + /* 265 */ "full_index_name ::= index_name", + /* 266 */ "full_index_name ::= db_name NK_DOT index_name", + /* 267 */ "index_options ::= FUNCTION NK_LP func_list NK_RP INTERVAL NK_LP duration_literal NK_RP sliding_opt sma_stream_opt", + /* 268 */ "index_options ::= FUNCTION NK_LP func_list NK_RP INTERVAL NK_LP duration_literal NK_COMMA duration_literal NK_RP sliding_opt sma_stream_opt", + /* 269 */ "func_list ::= func", + /* 270 */ "func_list ::= func_list NK_COMMA func", + /* 271 */ "func ::= sma_func_name NK_LP expression_list NK_RP", + /* 272 */ "sma_func_name ::= function_name", + /* 273 */ "sma_func_name ::= COUNT", + /* 274 */ "sma_func_name ::= FIRST", + /* 275 */ "sma_func_name ::= LAST", + /* 276 */ "sma_func_name ::= LAST_ROW", + /* 277 */ "sma_stream_opt ::=", + /* 278 */ "sma_stream_opt ::= sma_stream_opt WATERMARK duration_literal", + /* 279 */ "sma_stream_opt ::= sma_stream_opt MAX_DELAY duration_literal", + /* 280 */ "sma_stream_opt ::= sma_stream_opt DELETE_MARK duration_literal", + /* 281 */ "cmd ::= CREATE TOPIC not_exists_opt topic_name AS query_or_subquery", + /* 282 */ "cmd ::= CREATE TOPIC not_exists_opt topic_name AS DATABASE db_name", + /* 283 */ "cmd ::= CREATE TOPIC not_exists_opt topic_name WITH META AS DATABASE db_name", + /* 284 */ "cmd ::= CREATE TOPIC not_exists_opt topic_name AS STABLE full_table_name", + /* 285 */ "cmd ::= CREATE TOPIC not_exists_opt topic_name WITH META AS STABLE full_table_name", + /* 286 */ "cmd ::= DROP TOPIC exists_opt topic_name", + /* 287 */ "cmd ::= DROP CONSUMER GROUP exists_opt cgroup_name ON topic_name", + /* 288 */ "cmd ::= DESC full_table_name", + /* 289 */ "cmd ::= DESCRIBE full_table_name", + /* 290 */ "cmd ::= RESET QUERY CACHE", + /* 291 */ "cmd ::= EXPLAIN analyze_opt explain_options query_or_subquery", + /* 292 */ "analyze_opt ::=", + /* 293 */ "analyze_opt ::= ANALYZE", + /* 294 */ "explain_options ::=", + /* 295 */ "explain_options ::= explain_options VERBOSE NK_BOOL", + /* 296 */ "explain_options ::= explain_options RATIO NK_FLOAT", + /* 297 */ "cmd ::= CREATE agg_func_opt FUNCTION not_exists_opt function_name AS NK_STRING OUTPUTTYPE type_name bufsize_opt", + /* 298 */ "cmd ::= DROP FUNCTION exists_opt function_name", + /* 299 */ "agg_func_opt ::=", + /* 300 */ "agg_func_opt ::= AGGREGATE", + /* 301 */ "bufsize_opt ::=", + /* 302 */ "bufsize_opt ::= BUFSIZE NK_INTEGER", + /* 303 */ "cmd ::= CREATE STREAM not_exists_opt stream_name stream_options INTO full_table_name col_list_opt tag_def_or_ref_opt subtable_opt AS query_or_subquery", + /* 304 */ "cmd ::= DROP STREAM exists_opt stream_name", + /* 305 */ "col_list_opt ::=", + /* 306 */ "col_list_opt ::= NK_LP col_name_list NK_RP", + /* 307 */ "tag_def_or_ref_opt ::=", + /* 308 */ "tag_def_or_ref_opt ::= tags_def", + /* 309 */ "tag_def_or_ref_opt ::= TAGS NK_LP col_name_list NK_RP", + /* 310 */ "stream_options ::=", + /* 311 */ "stream_options ::= stream_options TRIGGER AT_ONCE", + /* 312 */ "stream_options ::= stream_options TRIGGER WINDOW_CLOSE", + /* 313 */ "stream_options ::= stream_options TRIGGER MAX_DELAY duration_literal", + /* 314 */ "stream_options ::= stream_options WATERMARK duration_literal", + /* 315 */ "stream_options ::= stream_options IGNORE EXPIRED NK_INTEGER", + /* 316 */ "stream_options ::= stream_options FILL_HISTORY NK_INTEGER", + /* 317 */ "subtable_opt ::=", + /* 318 */ "subtable_opt ::= SUBTABLE NK_LP expression NK_RP", + /* 319 */ "cmd ::= KILL CONNECTION NK_INTEGER", + /* 320 */ "cmd ::= KILL QUERY NK_STRING", + /* 321 */ "cmd ::= KILL TRANSACTION NK_INTEGER", + /* 322 */ "cmd ::= BALANCE VGROUP", + /* 323 */ "cmd ::= MERGE VGROUP NK_INTEGER NK_INTEGER", + /* 324 */ "cmd ::= REDISTRIBUTE VGROUP NK_INTEGER dnode_list", + /* 325 */ "cmd ::= SPLIT VGROUP NK_INTEGER", + /* 326 */ "dnode_list ::= DNODE NK_INTEGER", + /* 327 */ "dnode_list ::= dnode_list DNODE NK_INTEGER", + /* 328 */ "cmd ::= DELETE FROM full_table_name where_clause_opt", + /* 329 */ "cmd ::= query_or_subquery", + /* 330 */ "cmd ::= INSERT INTO full_table_name NK_LP col_name_list NK_RP query_or_subquery", + /* 331 */ "cmd ::= INSERT INTO full_table_name query_or_subquery", + /* 332 */ "literal ::= NK_INTEGER", + /* 333 */ "literal ::= NK_FLOAT", + /* 334 */ "literal ::= NK_STRING", + /* 335 */ "literal ::= NK_BOOL", + /* 336 */ "literal ::= TIMESTAMP NK_STRING", + /* 337 */ "literal ::= duration_literal", + /* 338 */ "literal ::= NULL", + /* 339 */ "literal ::= NK_QUESTION", + /* 340 */ "duration_literal ::= NK_VARIABLE", + /* 341 */ "signed ::= NK_INTEGER", + /* 342 */ "signed ::= NK_PLUS NK_INTEGER", + /* 343 */ "signed ::= NK_MINUS NK_INTEGER", + /* 344 */ "signed ::= NK_FLOAT", + /* 345 */ "signed ::= NK_PLUS NK_FLOAT", + /* 346 */ "signed ::= NK_MINUS NK_FLOAT", + /* 347 */ "signed_literal ::= signed", + /* 348 */ "signed_literal ::= NK_STRING", + /* 349 */ "signed_literal ::= NK_BOOL", + /* 350 */ "signed_literal ::= TIMESTAMP NK_STRING", + /* 351 */ "signed_literal ::= duration_literal", + /* 352 */ "signed_literal ::= NULL", + /* 353 */ "signed_literal ::= literal_func", + /* 354 */ "signed_literal ::= NK_QUESTION", + /* 355 */ "literal_list ::= signed_literal", + /* 356 */ "literal_list ::= literal_list NK_COMMA signed_literal", + /* 357 */ "db_name ::= NK_ID", + /* 358 */ "table_name ::= NK_ID", + /* 359 */ "column_name ::= NK_ID", + /* 360 */ "function_name ::= NK_ID", + /* 361 */ "table_alias ::= NK_ID", + /* 362 */ "column_alias ::= NK_ID", + /* 363 */ "user_name ::= NK_ID", + /* 364 */ "topic_name ::= NK_ID", + /* 365 */ "stream_name ::= NK_ID", + /* 366 */ "cgroup_name ::= NK_ID", + /* 367 */ "index_name ::= NK_ID", + /* 368 */ "expr_or_subquery ::= expression", + /* 369 */ "expression ::= literal", + /* 370 */ "expression ::= pseudo_column", + /* 371 */ "expression ::= column_reference", + /* 372 */ "expression ::= function_expression", + /* 373 */ "expression ::= case_when_expression", + /* 374 */ "expression ::= NK_LP expression NK_RP", + /* 375 */ "expression ::= NK_PLUS expr_or_subquery", + /* 376 */ "expression ::= NK_MINUS expr_or_subquery", + /* 377 */ "expression ::= expr_or_subquery NK_PLUS expr_or_subquery", + /* 378 */ "expression ::= expr_or_subquery NK_MINUS expr_or_subquery", + /* 379 */ "expression ::= expr_or_subquery NK_STAR expr_or_subquery", + /* 380 */ "expression ::= expr_or_subquery NK_SLASH expr_or_subquery", + /* 381 */ "expression ::= expr_or_subquery NK_REM expr_or_subquery", + /* 382 */ "expression ::= column_reference NK_ARROW NK_STRING", + /* 383 */ "expression ::= expr_or_subquery NK_BITAND expr_or_subquery", + /* 384 */ "expression ::= expr_or_subquery NK_BITOR expr_or_subquery", + /* 385 */ "expression_list ::= expr_or_subquery", + /* 386 */ "expression_list ::= expression_list NK_COMMA expr_or_subquery", + /* 387 */ "column_reference ::= column_name", + /* 388 */ "column_reference ::= table_name NK_DOT column_name", + /* 389 */ "pseudo_column ::= ROWTS", + /* 390 */ "pseudo_column ::= TBNAME", + /* 391 */ "pseudo_column ::= table_name NK_DOT TBNAME", + /* 392 */ "pseudo_column ::= QSTART", + /* 393 */ "pseudo_column ::= QEND", + /* 394 */ "pseudo_column ::= QDURATION", + /* 395 */ "pseudo_column ::= WSTART", + /* 396 */ "pseudo_column ::= WEND", + /* 397 */ "pseudo_column ::= WDURATION", + /* 398 */ "pseudo_column ::= IROWTS", + /* 399 */ "pseudo_column ::= ISFILLED", + /* 400 */ "pseudo_column ::= QTAGS", + /* 401 */ "function_expression ::= function_name NK_LP expression_list NK_RP", + /* 402 */ "function_expression ::= star_func NK_LP star_func_para_list NK_RP", + /* 403 */ "function_expression ::= CAST NK_LP expr_or_subquery AS type_name NK_RP", + /* 404 */ "function_expression ::= literal_func", + /* 405 */ "literal_func ::= noarg_func NK_LP NK_RP", + /* 406 */ "literal_func ::= NOW", + /* 407 */ "noarg_func ::= NOW", + /* 408 */ "noarg_func ::= TODAY", + /* 409 */ "noarg_func ::= TIMEZONE", + /* 410 */ "noarg_func ::= DATABASE", + /* 411 */ "noarg_func ::= CLIENT_VERSION", + /* 412 */ "noarg_func ::= SERVER_VERSION", + /* 413 */ "noarg_func ::= SERVER_STATUS", + /* 414 */ "noarg_func ::= CURRENT_USER", + /* 415 */ "noarg_func ::= USER", + /* 416 */ "star_func ::= COUNT", + /* 417 */ "star_func ::= FIRST", + /* 418 */ "star_func ::= LAST", + /* 419 */ "star_func ::= LAST_ROW", + /* 420 */ "star_func_para_list ::= NK_STAR", + /* 421 */ "star_func_para_list ::= other_para_list", + /* 422 */ "other_para_list ::= star_func_para", + /* 423 */ "other_para_list ::= other_para_list NK_COMMA star_func_para", + /* 424 */ "star_func_para ::= expr_or_subquery", + /* 425 */ "star_func_para ::= table_name NK_DOT NK_STAR", + /* 426 */ "case_when_expression ::= CASE when_then_list case_when_else_opt END", + /* 427 */ "case_when_expression ::= CASE common_expression when_then_list case_when_else_opt END", + /* 428 */ "when_then_list ::= when_then_expr", + /* 429 */ "when_then_list ::= when_then_list when_then_expr", + /* 430 */ "when_then_expr ::= WHEN common_expression THEN common_expression", + /* 431 */ "case_when_else_opt ::=", + /* 432 */ "case_when_else_opt ::= ELSE common_expression", + /* 433 */ "predicate ::= expr_or_subquery compare_op expr_or_subquery", + /* 434 */ "predicate ::= expr_or_subquery BETWEEN expr_or_subquery AND expr_or_subquery", + /* 435 */ "predicate ::= expr_or_subquery NOT BETWEEN expr_or_subquery AND expr_or_subquery", + /* 436 */ "predicate ::= expr_or_subquery IS NULL", + /* 437 */ "predicate ::= expr_or_subquery IS NOT NULL", + /* 438 */ "predicate ::= expr_or_subquery in_op in_predicate_value", + /* 439 */ "compare_op ::= NK_LT", + /* 440 */ "compare_op ::= NK_GT", + /* 441 */ "compare_op ::= NK_LE", + /* 442 */ "compare_op ::= NK_GE", + /* 443 */ "compare_op ::= NK_NE", + /* 444 */ "compare_op ::= NK_EQ", + /* 445 */ "compare_op ::= LIKE", + /* 446 */ "compare_op ::= NOT LIKE", + /* 447 */ "compare_op ::= MATCH", + /* 448 */ "compare_op ::= NMATCH", + /* 449 */ "compare_op ::= CONTAINS", + /* 450 */ "in_op ::= IN", + /* 451 */ "in_op ::= NOT IN", + /* 452 */ "in_predicate_value ::= NK_LP literal_list NK_RP", + /* 453 */ "boolean_value_expression ::= boolean_primary", + /* 454 */ "boolean_value_expression ::= NOT boolean_primary", + /* 455 */ "boolean_value_expression ::= boolean_value_expression OR boolean_value_expression", + /* 456 */ "boolean_value_expression ::= boolean_value_expression AND boolean_value_expression", + /* 457 */ "boolean_primary ::= predicate", + /* 458 */ "boolean_primary ::= NK_LP boolean_value_expression NK_RP", + /* 459 */ "common_expression ::= expr_or_subquery", + /* 460 */ "common_expression ::= boolean_value_expression", + /* 461 */ "from_clause_opt ::=", + /* 462 */ "from_clause_opt ::= FROM table_reference_list", + /* 463 */ "table_reference_list ::= table_reference", + /* 464 */ "table_reference_list ::= table_reference_list NK_COMMA table_reference", + /* 465 */ "table_reference ::= table_primary", + /* 466 */ "table_reference ::= joined_table", + /* 467 */ "table_primary ::= table_name alias_opt", + /* 468 */ "table_primary ::= db_name NK_DOT table_name alias_opt", + /* 469 */ "table_primary ::= subquery alias_opt", + /* 470 */ "table_primary ::= parenthesized_joined_table", + /* 471 */ "alias_opt ::=", + /* 472 */ "alias_opt ::= table_alias", + /* 473 */ "alias_opt ::= AS table_alias", + /* 474 */ "parenthesized_joined_table ::= NK_LP joined_table NK_RP", + /* 475 */ "parenthesized_joined_table ::= NK_LP parenthesized_joined_table NK_RP", + /* 476 */ "joined_table ::= table_reference join_type JOIN table_reference ON search_condition", + /* 477 */ "join_type ::=", + /* 478 */ "join_type ::= INNER", + /* 479 */ "query_specification ::= SELECT set_quantifier_opt select_list from_clause_opt where_clause_opt partition_by_clause_opt range_opt every_opt fill_opt twindow_clause_opt group_by_clause_opt having_clause_opt", + /* 480 */ "set_quantifier_opt ::=", + /* 481 */ "set_quantifier_opt ::= DISTINCT", + /* 482 */ "set_quantifier_opt ::= ALL", + /* 483 */ "select_list ::= select_item", + /* 484 */ "select_list ::= select_list NK_COMMA select_item", + /* 485 */ "select_item ::= NK_STAR", + /* 486 */ "select_item ::= common_expression", + /* 487 */ "select_item ::= common_expression column_alias", + /* 488 */ "select_item ::= common_expression AS column_alias", + /* 489 */ "select_item ::= table_name NK_DOT NK_STAR", + /* 490 */ "where_clause_opt ::=", + /* 491 */ "where_clause_opt ::= WHERE search_condition", + /* 492 */ "partition_by_clause_opt ::=", + /* 493 */ "partition_by_clause_opt ::= PARTITION BY partition_list", + /* 494 */ "partition_list ::= partition_item", + /* 495 */ "partition_list ::= partition_list NK_COMMA partition_item", + /* 496 */ "partition_item ::= expr_or_subquery", + /* 497 */ "partition_item ::= expr_or_subquery column_alias", + /* 498 */ "partition_item ::= expr_or_subquery AS column_alias", + /* 499 */ "twindow_clause_opt ::=", + /* 500 */ "twindow_clause_opt ::= SESSION NK_LP column_reference NK_COMMA duration_literal NK_RP", + /* 501 */ "twindow_clause_opt ::= STATE_WINDOW NK_LP expr_or_subquery NK_RP", + /* 502 */ "twindow_clause_opt ::= INTERVAL NK_LP duration_literal NK_RP sliding_opt fill_opt", + /* 503 */ "twindow_clause_opt ::= INTERVAL NK_LP duration_literal NK_COMMA duration_literal NK_RP sliding_opt fill_opt", + /* 504 */ "twindow_clause_opt ::= EVENT_WINDOW START WITH search_condition END WITH search_condition", + /* 505 */ "sliding_opt ::=", + /* 506 */ "sliding_opt ::= SLIDING NK_LP duration_literal NK_RP", + /* 507 */ "fill_opt ::=", + /* 508 */ "fill_opt ::= FILL NK_LP fill_mode NK_RP", + /* 509 */ "fill_opt ::= FILL NK_LP VALUE NK_COMMA literal_list NK_RP", + /* 510 */ "fill_mode ::= NONE", + /* 511 */ "fill_mode ::= PREV", + /* 512 */ "fill_mode ::= NULL", + /* 513 */ "fill_mode ::= LINEAR", + /* 514 */ "fill_mode ::= NEXT", + /* 515 */ "group_by_clause_opt ::=", + /* 516 */ "group_by_clause_opt ::= GROUP BY group_by_list", + /* 517 */ "group_by_list ::= expr_or_subquery", + /* 518 */ "group_by_list ::= group_by_list NK_COMMA expr_or_subquery", + /* 519 */ "having_clause_opt ::=", + /* 520 */ "having_clause_opt ::= HAVING search_condition", + /* 521 */ "range_opt ::=", + /* 522 */ "range_opt ::= RANGE NK_LP expr_or_subquery NK_COMMA expr_or_subquery NK_RP", + /* 523 */ "every_opt ::=", + /* 524 */ "every_opt ::= EVERY NK_LP duration_literal NK_RP", + /* 525 */ "query_expression ::= query_simple order_by_clause_opt slimit_clause_opt limit_clause_opt", + /* 526 */ "query_simple ::= query_specification", + /* 527 */ "query_simple ::= union_query_expression", + /* 528 */ "union_query_expression ::= query_simple_or_subquery UNION ALL query_simple_or_subquery", + /* 529 */ "union_query_expression ::= query_simple_or_subquery UNION query_simple_or_subquery", + /* 530 */ "query_simple_or_subquery ::= query_simple", + /* 531 */ "query_simple_or_subquery ::= subquery", + /* 532 */ "query_or_subquery ::= query_expression", + /* 533 */ "query_or_subquery ::= subquery", + /* 534 */ "order_by_clause_opt ::=", + /* 535 */ "order_by_clause_opt ::= ORDER BY sort_specification_list", + /* 536 */ "slimit_clause_opt ::=", + /* 537 */ "slimit_clause_opt ::= SLIMIT NK_INTEGER", + /* 538 */ "slimit_clause_opt ::= SLIMIT NK_INTEGER SOFFSET NK_INTEGER", + /* 539 */ "slimit_clause_opt ::= SLIMIT NK_INTEGER NK_COMMA NK_INTEGER", + /* 540 */ "limit_clause_opt ::=", + /* 541 */ "limit_clause_opt ::= LIMIT NK_INTEGER", + /* 542 */ "limit_clause_opt ::= LIMIT NK_INTEGER OFFSET NK_INTEGER", + /* 543 */ "limit_clause_opt ::= LIMIT NK_INTEGER NK_COMMA NK_INTEGER", + /* 544 */ "subquery ::= NK_LP query_expression NK_RP", + /* 545 */ "subquery ::= NK_LP subquery NK_RP", + /* 546 */ "search_condition ::= common_expression", + /* 547 */ "sort_specification_list ::= sort_specification", + /* 548 */ "sort_specification_list ::= sort_specification_list NK_COMMA sort_specification", + /* 549 */ "sort_specification ::= expr_or_subquery ordering_specification_opt null_ordering_opt", + /* 550 */ "ordering_specification_opt ::=", + /* 551 */ "ordering_specification_opt ::= ASC", + /* 552 */ "ordering_specification_opt ::= DESC", + /* 553 */ "null_ordering_opt ::=", + /* 554 */ "null_ordering_opt ::= NULLS FIRST", + /* 555 */ "null_ordering_opt ::= NULLS LAST", +>>>>>>> 677a27a09744fa59ddd2e723052ec68e32229566 }; #endif /* NDEBUG */ @@ -2602,49 +3894,49 @@ static void yy_destructor( case 392: /* query_or_subquery */ case 395: /* explain_options */ case 399: /* stream_options */ - case 401: /* subtable_opt */ - case 402: /* expression */ - case 404: /* where_clause_opt */ - case 405: /* signed */ - case 406: /* literal_func */ - case 409: /* expr_or_subquery */ - case 410: /* pseudo_column */ - case 411: /* column_reference */ - case 412: /* function_expression */ - case 413: /* case_when_expression */ - case 418: /* star_func_para */ - case 420: /* case_when_else_opt */ - case 421: /* common_expression */ - case 422: /* when_then_expr */ - case 423: /* predicate */ - case 426: /* in_predicate_value */ - case 427: /* boolean_value_expression */ - case 428: /* boolean_primary */ - case 429: /* from_clause_opt */ - case 430: /* table_reference_list */ - case 431: /* table_reference */ - case 432: /* table_primary */ - case 433: /* joined_table */ - case 435: /* subquery */ - case 436: /* parenthesized_joined_table */ - case 438: /* search_condition */ - case 439: /* query_specification */ - case 443: /* range_opt */ - case 444: /* every_opt */ - case 445: /* fill_opt */ - case 446: /* twindow_clause_opt */ - case 448: /* having_clause_opt */ - case 449: /* select_item */ - case 451: /* partition_item */ - case 454: /* query_expression */ - case 455: /* query_simple */ - case 457: /* slimit_clause_opt */ - case 458: /* limit_clause_opt */ - case 459: /* union_query_expression */ - case 460: /* query_simple_or_subquery */ - case 462: /* sort_specification */ + case 402: /* subtable_opt */ + case 403: /* expression */ + case 405: /* where_clause_opt */ + case 406: /* signed */ + case 407: /* literal_func */ + case 410: /* expr_or_subquery */ + case 411: /* pseudo_column */ + case 412: /* column_reference */ + case 413: /* function_expression */ + case 414: /* case_when_expression */ + case 419: /* star_func_para */ + case 421: /* case_when_else_opt */ + case 422: /* common_expression */ + case 423: /* when_then_expr */ + case 424: /* predicate */ + case 427: /* in_predicate_value */ + case 428: /* boolean_value_expression */ + case 429: /* boolean_primary */ + case 430: /* from_clause_opt */ + case 431: /* table_reference_list */ + case 432: /* table_reference */ + case 433: /* table_primary */ + case 434: /* joined_table */ + case 436: /* subquery */ + case 437: /* parenthesized_joined_table */ + case 439: /* search_condition */ + case 440: /* query_specification */ + case 444: /* range_opt */ + case 445: /* every_opt */ + case 446: /* fill_opt */ + case 447: /* twindow_clause_opt */ + case 449: /* having_clause_opt */ + case 450: /* select_item */ + case 452: /* partition_item */ + case 455: /* query_expression */ + case 456: /* query_simple */ + case 458: /* slimit_clause_opt */ + case 459: /* limit_clause_opt */ + case 460: /* union_query_expression */ + case 461: /* query_simple_or_subquery */ + case 463: /* sort_specification */ { - nodesDestroyNode((yypminor->yy602)); + nodesDestroyNode((yypminor->yy924)); } break; case 327: /* account_options */ @@ -2669,10 +3961,10 @@ static void yy_destructor( case 391: /* sma_func_name */ case 393: /* cgroup_name */ case 398: /* stream_name */ - case 408: /* table_alias */ - case 414: /* star_func */ - case 416: /* noarg_func */ - case 434: /* alias_opt */ + case 409: /* table_alias */ + case 415: /* star_func */ + case 417: /* noarg_func */ + case 435: /* alias_opt */ { } @@ -2694,7 +3986,7 @@ static void yy_destructor( case 343: /* exists_opt */ case 394: /* analyze_opt */ case 396: /* agg_func_opt */ - case 440: /* set_quantifier_opt */ + case 441: /* set_quantifier_opt */ { } @@ -2715,20 +4007,21 @@ static void yy_destructor( case 381: /* tag_list_opt */ case 387: /* func_list */ case 400: /* col_list_opt */ - case 403: /* dnode_list */ - case 407: /* literal_list */ - case 415: /* star_func_para_list */ - case 417: /* other_para_list */ - case 419: /* when_then_list */ - case 441: /* select_list */ - case 442: /* partition_by_clause_opt */ - case 447: /* group_by_clause_opt */ - case 450: /* partition_list */ - case 453: /* group_by_list */ - case 456: /* order_by_clause_opt */ - case 461: /* sort_specification_list */ + case 401: /* tag_def_or_ref_opt */ + case 404: /* dnode_list */ + case 408: /* literal_list */ + case 416: /* star_func_para_list */ + case 418: /* other_para_list */ + case 420: /* when_then_list */ + case 442: /* select_list */ + case 443: /* partition_by_clause_opt */ + case 448: /* group_by_clause_opt */ + case 451: /* partition_list */ + case 454: /* group_by_list */ + case 457: /* order_by_clause_opt */ + case 462: /* sort_specification_list */ { - nodesDestroyList((yypminor->yy874)); + nodesDestroyList((yypminor->yy776)); } break; case 349: /* alter_db_option */ @@ -2742,28 +4035,28 @@ static void yy_destructor( } break; - case 424: /* compare_op */ - case 425: /* in_op */ + case 425: /* compare_op */ + case 426: /* in_op */ { } break; - case 437: /* join_type */ + case 438: /* join_type */ { } break; - case 452: /* fill_mode */ + case 453: /* fill_mode */ { } break; - case 463: /* ordering_specification_opt */ + case 464: /* ordering_specification_opt */ { } break; - case 464: /* null_ordering_opt */ + case 465: /* null_ordering_opt */ { } @@ -3134,6 +4427,7 @@ static const struct { { 326, -4 }, /* (69) cmd ::= ALTER DATABASE db_name alter_db_options */ { 326, -3 }, /* (70) cmd ::= FLUSH DATABASE db_name */ { 326, -4 }, /* (71) cmd ::= TRIM DATABASE db_name speed_opt */ +<<<<<<< HEAD { 326, -3 }, /* (72) cmd ::= COMPACT DATABASE db_name */ { 341, -3 }, /* (73) not_exists_opt ::= IF NOT EXISTS */ { 341, 0 }, /* (74) not_exists_opt ::= */ @@ -3616,6 +4910,492 @@ static const struct { { 464, 0 }, /* (551) null_ordering_opt ::= */ { 464, -2 }, /* (552) null_ordering_opt ::= NULLS FIRST */ { 464, -2 }, /* (553) null_ordering_opt ::= NULLS LAST */ +======= + { 341, -3 }, /* (72) not_exists_opt ::= IF NOT EXISTS */ + { 341, 0 }, /* (73) not_exists_opt ::= */ + { 343, -2 }, /* (74) exists_opt ::= IF EXISTS */ + { 343, 0 }, /* (75) exists_opt ::= */ + { 342, 0 }, /* (76) db_options ::= */ + { 342, -3 }, /* (77) db_options ::= db_options BUFFER NK_INTEGER */ + { 342, -3 }, /* (78) db_options ::= db_options CACHEMODEL NK_STRING */ + { 342, -3 }, /* (79) db_options ::= db_options CACHESIZE NK_INTEGER */ + { 342, -3 }, /* (80) db_options ::= db_options COMP NK_INTEGER */ + { 342, -3 }, /* (81) db_options ::= db_options DURATION NK_INTEGER */ + { 342, -3 }, /* (82) db_options ::= db_options DURATION NK_VARIABLE */ + { 342, -3 }, /* (83) db_options ::= db_options MAXROWS NK_INTEGER */ + { 342, -3 }, /* (84) db_options ::= db_options MINROWS NK_INTEGER */ + { 342, -3 }, /* (85) db_options ::= db_options KEEP integer_list */ + { 342, -3 }, /* (86) db_options ::= db_options KEEP variable_list */ + { 342, -3 }, /* (87) db_options ::= db_options PAGES NK_INTEGER */ + { 342, -3 }, /* (88) db_options ::= db_options PAGESIZE NK_INTEGER */ + { 342, -3 }, /* (89) db_options ::= db_options TSDB_PAGESIZE NK_INTEGER */ + { 342, -3 }, /* (90) db_options ::= db_options PRECISION NK_STRING */ + { 342, -3 }, /* (91) db_options ::= db_options REPLICA NK_INTEGER */ + { 342, -3 }, /* (92) db_options ::= db_options VGROUPS NK_INTEGER */ + { 342, -3 }, /* (93) db_options ::= db_options SINGLE_STABLE NK_INTEGER */ + { 342, -3 }, /* (94) db_options ::= db_options RETENTIONS retention_list */ + { 342, -3 }, /* (95) db_options ::= db_options SCHEMALESS NK_INTEGER */ + { 342, -3 }, /* (96) db_options ::= db_options WAL_LEVEL NK_INTEGER */ + { 342, -3 }, /* (97) db_options ::= db_options WAL_FSYNC_PERIOD NK_INTEGER */ + { 342, -3 }, /* (98) db_options ::= db_options WAL_RETENTION_PERIOD NK_INTEGER */ + { 342, -4 }, /* (99) db_options ::= db_options WAL_RETENTION_PERIOD NK_MINUS NK_INTEGER */ + { 342, -3 }, /* (100) db_options ::= db_options WAL_RETENTION_SIZE NK_INTEGER */ + { 342, -4 }, /* (101) db_options ::= db_options WAL_RETENTION_SIZE NK_MINUS NK_INTEGER */ + { 342, -3 }, /* (102) db_options ::= db_options WAL_ROLL_PERIOD NK_INTEGER */ + { 342, -3 }, /* (103) db_options ::= db_options WAL_SEGMENT_SIZE NK_INTEGER */ + { 342, -3 }, /* (104) db_options ::= db_options STT_TRIGGER NK_INTEGER */ + { 342, -3 }, /* (105) db_options ::= db_options TABLE_PREFIX NK_INTEGER */ + { 342, -3 }, /* (106) db_options ::= db_options TABLE_SUFFIX NK_INTEGER */ + { 344, -1 }, /* (107) alter_db_options ::= alter_db_option */ + { 344, -2 }, /* (108) alter_db_options ::= alter_db_options alter_db_option */ + { 349, -2 }, /* (109) alter_db_option ::= BUFFER NK_INTEGER */ + { 349, -2 }, /* (110) alter_db_option ::= CACHEMODEL NK_STRING */ + { 349, -2 }, /* (111) alter_db_option ::= CACHESIZE NK_INTEGER */ + { 349, -2 }, /* (112) alter_db_option ::= WAL_FSYNC_PERIOD NK_INTEGER */ + { 349, -2 }, /* (113) alter_db_option ::= KEEP integer_list */ + { 349, -2 }, /* (114) alter_db_option ::= KEEP variable_list */ + { 349, -2 }, /* (115) alter_db_option ::= PAGES NK_INTEGER */ + { 349, -2 }, /* (116) alter_db_option ::= REPLICA NK_INTEGER */ + { 349, -2 }, /* (117) alter_db_option ::= WAL_LEVEL NK_INTEGER */ + { 349, -2 }, /* (118) alter_db_option ::= STT_TRIGGER NK_INTEGER */ + { 346, -1 }, /* (119) integer_list ::= NK_INTEGER */ + { 346, -3 }, /* (120) integer_list ::= integer_list NK_COMMA NK_INTEGER */ + { 347, -1 }, /* (121) variable_list ::= NK_VARIABLE */ + { 347, -3 }, /* (122) variable_list ::= variable_list NK_COMMA NK_VARIABLE */ + { 348, -1 }, /* (123) retention_list ::= retention */ + { 348, -3 }, /* (124) retention_list ::= retention_list NK_COMMA retention */ + { 350, -3 }, /* (125) retention ::= NK_VARIABLE NK_COLON NK_VARIABLE */ + { 345, 0 }, /* (126) speed_opt ::= */ + { 345, -2 }, /* (127) speed_opt ::= MAX_SPEED NK_INTEGER */ + { 326, -9 }, /* (128) cmd ::= CREATE TABLE not_exists_opt full_table_name NK_LP column_def_list NK_RP tags_def_opt table_options */ + { 326, -3 }, /* (129) cmd ::= CREATE TABLE multi_create_clause */ + { 326, -9 }, /* (130) cmd ::= CREATE STABLE not_exists_opt full_table_name NK_LP column_def_list NK_RP tags_def table_options */ + { 326, -3 }, /* (131) cmd ::= DROP TABLE multi_drop_clause */ + { 326, -4 }, /* (132) cmd ::= DROP STABLE exists_opt full_table_name */ + { 326, -3 }, /* (133) cmd ::= ALTER TABLE alter_table_clause */ + { 326, -3 }, /* (134) cmd ::= ALTER STABLE alter_table_clause */ + { 358, -2 }, /* (135) alter_table_clause ::= full_table_name alter_table_options */ + { 358, -5 }, /* (136) alter_table_clause ::= full_table_name ADD COLUMN column_name type_name */ + { 358, -4 }, /* (137) alter_table_clause ::= full_table_name DROP COLUMN column_name */ + { 358, -5 }, /* (138) alter_table_clause ::= full_table_name MODIFY COLUMN column_name type_name */ + { 358, -5 }, /* (139) alter_table_clause ::= full_table_name RENAME COLUMN column_name column_name */ + { 358, -5 }, /* (140) alter_table_clause ::= full_table_name ADD TAG column_name type_name */ + { 358, -4 }, /* (141) alter_table_clause ::= full_table_name DROP TAG column_name */ + { 358, -5 }, /* (142) alter_table_clause ::= full_table_name MODIFY TAG column_name type_name */ + { 358, -5 }, /* (143) alter_table_clause ::= full_table_name RENAME TAG column_name column_name */ + { 358, -6 }, /* (144) alter_table_clause ::= full_table_name SET TAG column_name NK_EQ signed_literal */ + { 355, -1 }, /* (145) multi_create_clause ::= create_subtable_clause */ + { 355, -2 }, /* (146) multi_create_clause ::= multi_create_clause create_subtable_clause */ + { 363, -10 }, /* (147) create_subtable_clause ::= not_exists_opt full_table_name USING full_table_name specific_cols_opt TAGS NK_LP expression_list NK_RP table_options */ + { 357, -1 }, /* (148) multi_drop_clause ::= drop_table_clause */ + { 357, -2 }, /* (149) multi_drop_clause ::= multi_drop_clause drop_table_clause */ + { 366, -2 }, /* (150) drop_table_clause ::= exists_opt full_table_name */ + { 364, 0 }, /* (151) specific_cols_opt ::= */ + { 364, -3 }, /* (152) specific_cols_opt ::= NK_LP col_name_list NK_RP */ + { 351, -1 }, /* (153) full_table_name ::= table_name */ + { 351, -3 }, /* (154) full_table_name ::= db_name NK_DOT table_name */ + { 352, -1 }, /* (155) column_def_list ::= column_def */ + { 352, -3 }, /* (156) column_def_list ::= column_def_list NK_COMMA column_def */ + { 369, -2 }, /* (157) column_def ::= column_name type_name */ + { 369, -4 }, /* (158) column_def ::= column_name type_name COMMENT NK_STRING */ + { 361, -1 }, /* (159) type_name ::= BOOL */ + { 361, -1 }, /* (160) type_name ::= TINYINT */ + { 361, -1 }, /* (161) type_name ::= SMALLINT */ + { 361, -1 }, /* (162) type_name ::= INT */ + { 361, -1 }, /* (163) type_name ::= INTEGER */ + { 361, -1 }, /* (164) type_name ::= BIGINT */ + { 361, -1 }, /* (165) type_name ::= FLOAT */ + { 361, -1 }, /* (166) type_name ::= DOUBLE */ + { 361, -4 }, /* (167) type_name ::= BINARY NK_LP NK_INTEGER NK_RP */ + { 361, -1 }, /* (168) type_name ::= TIMESTAMP */ + { 361, -4 }, /* (169) type_name ::= NCHAR NK_LP NK_INTEGER NK_RP */ + { 361, -2 }, /* (170) type_name ::= TINYINT UNSIGNED */ + { 361, -2 }, /* (171) type_name ::= SMALLINT UNSIGNED */ + { 361, -2 }, /* (172) type_name ::= INT UNSIGNED */ + { 361, -2 }, /* (173) type_name ::= BIGINT UNSIGNED */ + { 361, -1 }, /* (174) type_name ::= JSON */ + { 361, -4 }, /* (175) type_name ::= VARCHAR NK_LP NK_INTEGER NK_RP */ + { 361, -1 }, /* (176) type_name ::= MEDIUMBLOB */ + { 361, -1 }, /* (177) type_name ::= BLOB */ + { 361, -4 }, /* (178) type_name ::= VARBINARY NK_LP NK_INTEGER NK_RP */ + { 361, -1 }, /* (179) type_name ::= DECIMAL */ + { 361, -4 }, /* (180) type_name ::= DECIMAL NK_LP NK_INTEGER NK_RP */ + { 361, -6 }, /* (181) type_name ::= DECIMAL NK_LP NK_INTEGER NK_COMMA NK_INTEGER NK_RP */ + { 353, 0 }, /* (182) tags_def_opt ::= */ + { 353, -1 }, /* (183) tags_def_opt ::= tags_def */ + { 356, -4 }, /* (184) tags_def ::= TAGS NK_LP column_def_list NK_RP */ + { 354, 0 }, /* (185) table_options ::= */ + { 354, -3 }, /* (186) table_options ::= table_options COMMENT NK_STRING */ + { 354, -3 }, /* (187) table_options ::= table_options MAX_DELAY duration_list */ + { 354, -3 }, /* (188) table_options ::= table_options WATERMARK duration_list */ + { 354, -5 }, /* (189) table_options ::= table_options ROLLUP NK_LP rollup_func_list NK_RP */ + { 354, -3 }, /* (190) table_options ::= table_options TTL NK_INTEGER */ + { 354, -5 }, /* (191) table_options ::= table_options SMA NK_LP col_name_list NK_RP */ + { 354, -3 }, /* (192) table_options ::= table_options DELETE_MARK duration_list */ + { 359, -1 }, /* (193) alter_table_options ::= alter_table_option */ + { 359, -2 }, /* (194) alter_table_options ::= alter_table_options alter_table_option */ + { 372, -2 }, /* (195) alter_table_option ::= COMMENT NK_STRING */ + { 372, -2 }, /* (196) alter_table_option ::= TTL NK_INTEGER */ + { 370, -1 }, /* (197) duration_list ::= duration_literal */ + { 370, -3 }, /* (198) duration_list ::= duration_list NK_COMMA duration_literal */ + { 371, -1 }, /* (199) rollup_func_list ::= rollup_func_name */ + { 371, -3 }, /* (200) rollup_func_list ::= rollup_func_list NK_COMMA rollup_func_name */ + { 374, -1 }, /* (201) rollup_func_name ::= function_name */ + { 374, -1 }, /* (202) rollup_func_name ::= FIRST */ + { 374, -1 }, /* (203) rollup_func_name ::= LAST */ + { 367, -1 }, /* (204) col_name_list ::= col_name */ + { 367, -3 }, /* (205) col_name_list ::= col_name_list NK_COMMA col_name */ + { 376, -1 }, /* (206) col_name ::= column_name */ + { 326, -2 }, /* (207) cmd ::= SHOW DNODES */ + { 326, -2 }, /* (208) cmd ::= SHOW USERS */ + { 326, -3 }, /* (209) cmd ::= SHOW USER PRIVILEGES */ + { 326, -2 }, /* (210) cmd ::= SHOW DATABASES */ + { 326, -4 }, /* (211) cmd ::= SHOW db_name_cond_opt TABLES like_pattern_opt */ + { 326, -4 }, /* (212) cmd ::= SHOW db_name_cond_opt STABLES like_pattern_opt */ + { 326, -3 }, /* (213) cmd ::= SHOW db_name_cond_opt VGROUPS */ + { 326, -2 }, /* (214) cmd ::= SHOW MNODES */ + { 326, -2 }, /* (215) cmd ::= SHOW QNODES */ + { 326, -2 }, /* (216) cmd ::= SHOW FUNCTIONS */ + { 326, -5 }, /* (217) cmd ::= SHOW INDEXES FROM table_name_cond from_db_opt */ + { 326, -2 }, /* (218) cmd ::= SHOW STREAMS */ + { 326, -2 }, /* (219) cmd ::= SHOW ACCOUNTS */ + { 326, -2 }, /* (220) cmd ::= SHOW APPS */ + { 326, -2 }, /* (221) cmd ::= SHOW CONNECTIONS */ + { 326, -2 }, /* (222) cmd ::= SHOW LICENCES */ + { 326, -2 }, /* (223) cmd ::= SHOW GRANTS */ + { 326, -4 }, /* (224) cmd ::= SHOW CREATE DATABASE db_name */ + { 326, -4 }, /* (225) cmd ::= SHOW CREATE TABLE full_table_name */ + { 326, -4 }, /* (226) cmd ::= SHOW CREATE STABLE full_table_name */ + { 326, -2 }, /* (227) cmd ::= SHOW QUERIES */ + { 326, -2 }, /* (228) cmd ::= SHOW SCORES */ + { 326, -2 }, /* (229) cmd ::= SHOW TOPICS */ + { 326, -2 }, /* (230) cmd ::= SHOW VARIABLES */ + { 326, -3 }, /* (231) cmd ::= SHOW CLUSTER VARIABLES */ + { 326, -3 }, /* (232) cmd ::= SHOW LOCAL VARIABLES */ + { 326, -5 }, /* (233) cmd ::= SHOW DNODE NK_INTEGER VARIABLES like_pattern_opt */ + { 326, -2 }, /* (234) cmd ::= SHOW BNODES */ + { 326, -2 }, /* (235) cmd ::= SHOW SNODES */ + { 326, -2 }, /* (236) cmd ::= SHOW CLUSTER */ + { 326, -2 }, /* (237) cmd ::= SHOW TRANSACTIONS */ + { 326, -4 }, /* (238) cmd ::= SHOW TABLE DISTRIBUTED full_table_name */ + { 326, -2 }, /* (239) cmd ::= SHOW CONSUMERS */ + { 326, -2 }, /* (240) cmd ::= SHOW SUBSCRIPTIONS */ + { 326, -5 }, /* (241) cmd ::= SHOW TAGS FROM table_name_cond from_db_opt */ + { 326, -7 }, /* (242) cmd ::= SHOW TABLE TAGS tag_list_opt FROM table_name_cond from_db_opt */ + { 326, -3 }, /* (243) cmd ::= SHOW VNODES NK_INTEGER */ + { 326, -3 }, /* (244) cmd ::= SHOW VNODES NK_STRING */ + { 326, -3 }, /* (245) cmd ::= SHOW db_name_cond_opt ALIVE */ + { 326, -3 }, /* (246) cmd ::= SHOW CLUSTER ALIVE */ + { 377, 0 }, /* (247) db_name_cond_opt ::= */ + { 377, -2 }, /* (248) db_name_cond_opt ::= db_name NK_DOT */ + { 378, 0 }, /* (249) like_pattern_opt ::= */ + { 378, -2 }, /* (250) like_pattern_opt ::= LIKE NK_STRING */ + { 379, -1 }, /* (251) table_name_cond ::= table_name */ + { 380, 0 }, /* (252) from_db_opt ::= */ + { 380, -2 }, /* (253) from_db_opt ::= FROM db_name */ + { 381, 0 }, /* (254) tag_list_opt ::= */ + { 381, -1 }, /* (255) tag_list_opt ::= tag_item */ + { 381, -3 }, /* (256) tag_list_opt ::= tag_list_opt NK_COMMA tag_item */ + { 382, -1 }, /* (257) tag_item ::= TBNAME */ + { 382, -1 }, /* (258) tag_item ::= QTAGS */ + { 382, -1 }, /* (259) tag_item ::= column_name */ + { 382, -2 }, /* (260) tag_item ::= column_name column_alias */ + { 382, -3 }, /* (261) tag_item ::= column_name AS column_alias */ + { 326, -8 }, /* (262) cmd ::= CREATE SMA INDEX not_exists_opt full_index_name ON full_table_name index_options */ + { 326, -9 }, /* (263) cmd ::= CREATE INDEX not_exists_opt full_index_name ON full_table_name NK_LP col_name_list NK_RP */ + { 326, -4 }, /* (264) cmd ::= DROP INDEX exists_opt full_index_name */ + { 384, -1 }, /* (265) full_index_name ::= index_name */ + { 384, -3 }, /* (266) full_index_name ::= db_name NK_DOT index_name */ + { 385, -10 }, /* (267) index_options ::= FUNCTION NK_LP func_list NK_RP INTERVAL NK_LP duration_literal NK_RP sliding_opt sma_stream_opt */ + { 385, -12 }, /* (268) index_options ::= FUNCTION NK_LP func_list NK_RP INTERVAL NK_LP duration_literal NK_COMMA duration_literal NK_RP sliding_opt sma_stream_opt */ + { 387, -1 }, /* (269) func_list ::= func */ + { 387, -3 }, /* (270) func_list ::= func_list NK_COMMA func */ + { 390, -4 }, /* (271) func ::= sma_func_name NK_LP expression_list NK_RP */ + { 391, -1 }, /* (272) sma_func_name ::= function_name */ + { 391, -1 }, /* (273) sma_func_name ::= COUNT */ + { 391, -1 }, /* (274) sma_func_name ::= FIRST */ + { 391, -1 }, /* (275) sma_func_name ::= LAST */ + { 391, -1 }, /* (276) sma_func_name ::= LAST_ROW */ + { 389, 0 }, /* (277) sma_stream_opt ::= */ + { 389, -3 }, /* (278) sma_stream_opt ::= sma_stream_opt WATERMARK duration_literal */ + { 389, -3 }, /* (279) sma_stream_opt ::= sma_stream_opt MAX_DELAY duration_literal */ + { 389, -3 }, /* (280) sma_stream_opt ::= sma_stream_opt DELETE_MARK duration_literal */ + { 326, -6 }, /* (281) cmd ::= CREATE TOPIC not_exists_opt topic_name AS query_or_subquery */ + { 326, -7 }, /* (282) cmd ::= CREATE TOPIC not_exists_opt topic_name AS DATABASE db_name */ + { 326, -9 }, /* (283) cmd ::= CREATE TOPIC not_exists_opt topic_name WITH META AS DATABASE db_name */ + { 326, -7 }, /* (284) cmd ::= CREATE TOPIC not_exists_opt topic_name AS STABLE full_table_name */ + { 326, -9 }, /* (285) cmd ::= CREATE TOPIC not_exists_opt topic_name WITH META AS STABLE full_table_name */ + { 326, -4 }, /* (286) cmd ::= DROP TOPIC exists_opt topic_name */ + { 326, -7 }, /* (287) cmd ::= DROP CONSUMER GROUP exists_opt cgroup_name ON topic_name */ + { 326, -2 }, /* (288) cmd ::= DESC full_table_name */ + { 326, -2 }, /* (289) cmd ::= DESCRIBE full_table_name */ + { 326, -3 }, /* (290) cmd ::= RESET QUERY CACHE */ + { 326, -4 }, /* (291) cmd ::= EXPLAIN analyze_opt explain_options query_or_subquery */ + { 394, 0 }, /* (292) analyze_opt ::= */ + { 394, -1 }, /* (293) analyze_opt ::= ANALYZE */ + { 395, 0 }, /* (294) explain_options ::= */ + { 395, -3 }, /* (295) explain_options ::= explain_options VERBOSE NK_BOOL */ + { 395, -3 }, /* (296) explain_options ::= explain_options RATIO NK_FLOAT */ + { 326, -10 }, /* (297) cmd ::= CREATE agg_func_opt FUNCTION not_exists_opt function_name AS NK_STRING OUTPUTTYPE type_name bufsize_opt */ + { 326, -4 }, /* (298) cmd ::= DROP FUNCTION exists_opt function_name */ + { 396, 0 }, /* (299) agg_func_opt ::= */ + { 396, -1 }, /* (300) agg_func_opt ::= AGGREGATE */ + { 397, 0 }, /* (301) bufsize_opt ::= */ + { 397, -2 }, /* (302) bufsize_opt ::= BUFSIZE NK_INTEGER */ + { 326, -12 }, /* (303) cmd ::= CREATE STREAM not_exists_opt stream_name stream_options INTO full_table_name col_list_opt tag_def_or_ref_opt subtable_opt AS query_or_subquery */ + { 326, -4 }, /* (304) cmd ::= DROP STREAM exists_opt stream_name */ + { 400, 0 }, /* (305) col_list_opt ::= */ + { 400, -3 }, /* (306) col_list_opt ::= NK_LP col_name_list NK_RP */ + { 401, 0 }, /* (307) tag_def_or_ref_opt ::= */ + { 401, -1 }, /* (308) tag_def_or_ref_opt ::= tags_def */ + { 401, -4 }, /* (309) tag_def_or_ref_opt ::= TAGS NK_LP col_name_list NK_RP */ + { 399, 0 }, /* (310) stream_options ::= */ + { 399, -3 }, /* (311) stream_options ::= stream_options TRIGGER AT_ONCE */ + { 399, -3 }, /* (312) stream_options ::= stream_options TRIGGER WINDOW_CLOSE */ + { 399, -4 }, /* (313) stream_options ::= stream_options TRIGGER MAX_DELAY duration_literal */ + { 399, -3 }, /* (314) stream_options ::= stream_options WATERMARK duration_literal */ + { 399, -4 }, /* (315) stream_options ::= stream_options IGNORE EXPIRED NK_INTEGER */ + { 399, -3 }, /* (316) stream_options ::= stream_options FILL_HISTORY NK_INTEGER */ + { 402, 0 }, /* (317) subtable_opt ::= */ + { 402, -4 }, /* (318) subtable_opt ::= SUBTABLE NK_LP expression NK_RP */ + { 326, -3 }, /* (319) cmd ::= KILL CONNECTION NK_INTEGER */ + { 326, -3 }, /* (320) cmd ::= KILL QUERY NK_STRING */ + { 326, -3 }, /* (321) cmd ::= KILL TRANSACTION NK_INTEGER */ + { 326, -2 }, /* (322) cmd ::= BALANCE VGROUP */ + { 326, -4 }, /* (323) cmd ::= MERGE VGROUP NK_INTEGER NK_INTEGER */ + { 326, -4 }, /* (324) cmd ::= REDISTRIBUTE VGROUP NK_INTEGER dnode_list */ + { 326, -3 }, /* (325) cmd ::= SPLIT VGROUP NK_INTEGER */ + { 404, -2 }, /* (326) dnode_list ::= DNODE NK_INTEGER */ + { 404, -3 }, /* (327) dnode_list ::= dnode_list DNODE NK_INTEGER */ + { 326, -4 }, /* (328) cmd ::= DELETE FROM full_table_name where_clause_opt */ + { 326, -1 }, /* (329) cmd ::= query_or_subquery */ + { 326, -7 }, /* (330) cmd ::= INSERT INTO full_table_name NK_LP col_name_list NK_RP query_or_subquery */ + { 326, -4 }, /* (331) cmd ::= INSERT INTO full_table_name query_or_subquery */ + { 329, -1 }, /* (332) literal ::= NK_INTEGER */ + { 329, -1 }, /* (333) literal ::= NK_FLOAT */ + { 329, -1 }, /* (334) literal ::= NK_STRING */ + { 329, -1 }, /* (335) literal ::= NK_BOOL */ + { 329, -2 }, /* (336) literal ::= TIMESTAMP NK_STRING */ + { 329, -1 }, /* (337) literal ::= duration_literal */ + { 329, -1 }, /* (338) literal ::= NULL */ + { 329, -1 }, /* (339) literal ::= NK_QUESTION */ + { 373, -1 }, /* (340) duration_literal ::= NK_VARIABLE */ + { 406, -1 }, /* (341) signed ::= NK_INTEGER */ + { 406, -2 }, /* (342) signed ::= NK_PLUS NK_INTEGER */ + { 406, -2 }, /* (343) signed ::= NK_MINUS NK_INTEGER */ + { 406, -1 }, /* (344) signed ::= NK_FLOAT */ + { 406, -2 }, /* (345) signed ::= NK_PLUS NK_FLOAT */ + { 406, -2 }, /* (346) signed ::= NK_MINUS NK_FLOAT */ + { 362, -1 }, /* (347) signed_literal ::= signed */ + { 362, -1 }, /* (348) signed_literal ::= NK_STRING */ + { 362, -1 }, /* (349) signed_literal ::= NK_BOOL */ + { 362, -2 }, /* (350) signed_literal ::= TIMESTAMP NK_STRING */ + { 362, -1 }, /* (351) signed_literal ::= duration_literal */ + { 362, -1 }, /* (352) signed_literal ::= NULL */ + { 362, -1 }, /* (353) signed_literal ::= literal_func */ + { 362, -1 }, /* (354) signed_literal ::= NK_QUESTION */ + { 408, -1 }, /* (355) literal_list ::= signed_literal */ + { 408, -3 }, /* (356) literal_list ::= literal_list NK_COMMA signed_literal */ + { 337, -1 }, /* (357) db_name ::= NK_ID */ + { 368, -1 }, /* (358) table_name ::= NK_ID */ + { 360, -1 }, /* (359) column_name ::= NK_ID */ + { 375, -1 }, /* (360) function_name ::= NK_ID */ + { 409, -1 }, /* (361) table_alias ::= NK_ID */ + { 383, -1 }, /* (362) column_alias ::= NK_ID */ + { 331, -1 }, /* (363) user_name ::= NK_ID */ + { 338, -1 }, /* (364) topic_name ::= NK_ID */ + { 398, -1 }, /* (365) stream_name ::= NK_ID */ + { 393, -1 }, /* (366) cgroup_name ::= NK_ID */ + { 386, -1 }, /* (367) index_name ::= NK_ID */ + { 410, -1 }, /* (368) expr_or_subquery ::= expression */ + { 403, -1 }, /* (369) expression ::= literal */ + { 403, -1 }, /* (370) expression ::= pseudo_column */ + { 403, -1 }, /* (371) expression ::= column_reference */ + { 403, -1 }, /* (372) expression ::= function_expression */ + { 403, -1 }, /* (373) expression ::= case_when_expression */ + { 403, -3 }, /* (374) expression ::= NK_LP expression NK_RP */ + { 403, -2 }, /* (375) expression ::= NK_PLUS expr_or_subquery */ + { 403, -2 }, /* (376) expression ::= NK_MINUS expr_or_subquery */ + { 403, -3 }, /* (377) expression ::= expr_or_subquery NK_PLUS expr_or_subquery */ + { 403, -3 }, /* (378) expression ::= expr_or_subquery NK_MINUS expr_or_subquery */ + { 403, -3 }, /* (379) expression ::= expr_or_subquery NK_STAR expr_or_subquery */ + { 403, -3 }, /* (380) expression ::= expr_or_subquery NK_SLASH expr_or_subquery */ + { 403, -3 }, /* (381) expression ::= expr_or_subquery NK_REM expr_or_subquery */ + { 403, -3 }, /* (382) expression ::= column_reference NK_ARROW NK_STRING */ + { 403, -3 }, /* (383) expression ::= expr_or_subquery NK_BITAND expr_or_subquery */ + { 403, -3 }, /* (384) expression ::= expr_or_subquery NK_BITOR expr_or_subquery */ + { 365, -1 }, /* (385) expression_list ::= expr_or_subquery */ + { 365, -3 }, /* (386) expression_list ::= expression_list NK_COMMA expr_or_subquery */ + { 412, -1 }, /* (387) column_reference ::= column_name */ + { 412, -3 }, /* (388) column_reference ::= table_name NK_DOT column_name */ + { 411, -1 }, /* (389) pseudo_column ::= ROWTS */ + { 411, -1 }, /* (390) pseudo_column ::= TBNAME */ + { 411, -3 }, /* (391) pseudo_column ::= table_name NK_DOT TBNAME */ + { 411, -1 }, /* (392) pseudo_column ::= QSTART */ + { 411, -1 }, /* (393) pseudo_column ::= QEND */ + { 411, -1 }, /* (394) pseudo_column ::= QDURATION */ + { 411, -1 }, /* (395) pseudo_column ::= WSTART */ + { 411, -1 }, /* (396) pseudo_column ::= WEND */ + { 411, -1 }, /* (397) pseudo_column ::= WDURATION */ + { 411, -1 }, /* (398) pseudo_column ::= IROWTS */ + { 411, -1 }, /* (399) pseudo_column ::= ISFILLED */ + { 411, -1 }, /* (400) pseudo_column ::= QTAGS */ + { 413, -4 }, /* (401) function_expression ::= function_name NK_LP expression_list NK_RP */ + { 413, -4 }, /* (402) function_expression ::= star_func NK_LP star_func_para_list NK_RP */ + { 413, -6 }, /* (403) function_expression ::= CAST NK_LP expr_or_subquery AS type_name NK_RP */ + { 413, -1 }, /* (404) function_expression ::= literal_func */ + { 407, -3 }, /* (405) literal_func ::= noarg_func NK_LP NK_RP */ + { 407, -1 }, /* (406) literal_func ::= NOW */ + { 417, -1 }, /* (407) noarg_func ::= NOW */ + { 417, -1 }, /* (408) noarg_func ::= TODAY */ + { 417, -1 }, /* (409) noarg_func ::= TIMEZONE */ + { 417, -1 }, /* (410) noarg_func ::= DATABASE */ + { 417, -1 }, /* (411) noarg_func ::= CLIENT_VERSION */ + { 417, -1 }, /* (412) noarg_func ::= SERVER_VERSION */ + { 417, -1 }, /* (413) noarg_func ::= SERVER_STATUS */ + { 417, -1 }, /* (414) noarg_func ::= CURRENT_USER */ + { 417, -1 }, /* (415) noarg_func ::= USER */ + { 415, -1 }, /* (416) star_func ::= COUNT */ + { 415, -1 }, /* (417) star_func ::= FIRST */ + { 415, -1 }, /* (418) star_func ::= LAST */ + { 415, -1 }, /* (419) star_func ::= LAST_ROW */ + { 416, -1 }, /* (420) star_func_para_list ::= NK_STAR */ + { 416, -1 }, /* (421) star_func_para_list ::= other_para_list */ + { 418, -1 }, /* (422) other_para_list ::= star_func_para */ + { 418, -3 }, /* (423) other_para_list ::= other_para_list NK_COMMA star_func_para */ + { 419, -1 }, /* (424) star_func_para ::= expr_or_subquery */ + { 419, -3 }, /* (425) star_func_para ::= table_name NK_DOT NK_STAR */ + { 414, -4 }, /* (426) case_when_expression ::= CASE when_then_list case_when_else_opt END */ + { 414, -5 }, /* (427) case_when_expression ::= CASE common_expression when_then_list case_when_else_opt END */ + { 420, -1 }, /* (428) when_then_list ::= when_then_expr */ + { 420, -2 }, /* (429) when_then_list ::= when_then_list when_then_expr */ + { 423, -4 }, /* (430) when_then_expr ::= WHEN common_expression THEN common_expression */ + { 421, 0 }, /* (431) case_when_else_opt ::= */ + { 421, -2 }, /* (432) case_when_else_opt ::= ELSE common_expression */ + { 424, -3 }, /* (433) predicate ::= expr_or_subquery compare_op expr_or_subquery */ + { 424, -5 }, /* (434) predicate ::= expr_or_subquery BETWEEN expr_or_subquery AND expr_or_subquery */ + { 424, -6 }, /* (435) predicate ::= expr_or_subquery NOT BETWEEN expr_or_subquery AND expr_or_subquery */ + { 424, -3 }, /* (436) predicate ::= expr_or_subquery IS NULL */ + { 424, -4 }, /* (437) predicate ::= expr_or_subquery IS NOT NULL */ + { 424, -3 }, /* (438) predicate ::= expr_or_subquery in_op in_predicate_value */ + { 425, -1 }, /* (439) compare_op ::= NK_LT */ + { 425, -1 }, /* (440) compare_op ::= NK_GT */ + { 425, -1 }, /* (441) compare_op ::= NK_LE */ + { 425, -1 }, /* (442) compare_op ::= NK_GE */ + { 425, -1 }, /* (443) compare_op ::= NK_NE */ + { 425, -1 }, /* (444) compare_op ::= NK_EQ */ + { 425, -1 }, /* (445) compare_op ::= LIKE */ + { 425, -2 }, /* (446) compare_op ::= NOT LIKE */ + { 425, -1 }, /* (447) compare_op ::= MATCH */ + { 425, -1 }, /* (448) compare_op ::= NMATCH */ + { 425, -1 }, /* (449) compare_op ::= CONTAINS */ + { 426, -1 }, /* (450) in_op ::= IN */ + { 426, -2 }, /* (451) in_op ::= NOT IN */ + { 427, -3 }, /* (452) in_predicate_value ::= NK_LP literal_list NK_RP */ + { 428, -1 }, /* (453) boolean_value_expression ::= boolean_primary */ + { 428, -2 }, /* (454) boolean_value_expression ::= NOT boolean_primary */ + { 428, -3 }, /* (455) boolean_value_expression ::= boolean_value_expression OR boolean_value_expression */ + { 428, -3 }, /* (456) boolean_value_expression ::= boolean_value_expression AND boolean_value_expression */ + { 429, -1 }, /* (457) boolean_primary ::= predicate */ + { 429, -3 }, /* (458) boolean_primary ::= NK_LP boolean_value_expression NK_RP */ + { 422, -1 }, /* (459) common_expression ::= expr_or_subquery */ + { 422, -1 }, /* (460) common_expression ::= boolean_value_expression */ + { 430, 0 }, /* (461) from_clause_opt ::= */ + { 430, -2 }, /* (462) from_clause_opt ::= FROM table_reference_list */ + { 431, -1 }, /* (463) table_reference_list ::= table_reference */ + { 431, -3 }, /* (464) table_reference_list ::= table_reference_list NK_COMMA table_reference */ + { 432, -1 }, /* (465) table_reference ::= table_primary */ + { 432, -1 }, /* (466) table_reference ::= joined_table */ + { 433, -2 }, /* (467) table_primary ::= table_name alias_opt */ + { 433, -4 }, /* (468) table_primary ::= db_name NK_DOT table_name alias_opt */ + { 433, -2 }, /* (469) table_primary ::= subquery alias_opt */ + { 433, -1 }, /* (470) table_primary ::= parenthesized_joined_table */ + { 435, 0 }, /* (471) alias_opt ::= */ + { 435, -1 }, /* (472) alias_opt ::= table_alias */ + { 435, -2 }, /* (473) alias_opt ::= AS table_alias */ + { 437, -3 }, /* (474) parenthesized_joined_table ::= NK_LP joined_table NK_RP */ + { 437, -3 }, /* (475) parenthesized_joined_table ::= NK_LP parenthesized_joined_table NK_RP */ + { 434, -6 }, /* (476) joined_table ::= table_reference join_type JOIN table_reference ON search_condition */ + { 438, 0 }, /* (477) join_type ::= */ + { 438, -1 }, /* (478) join_type ::= INNER */ + { 440, -12 }, /* (479) query_specification ::= SELECT set_quantifier_opt select_list from_clause_opt where_clause_opt partition_by_clause_opt range_opt every_opt fill_opt twindow_clause_opt group_by_clause_opt having_clause_opt */ + { 441, 0 }, /* (480) set_quantifier_opt ::= */ + { 441, -1 }, /* (481) set_quantifier_opt ::= DISTINCT */ + { 441, -1 }, /* (482) set_quantifier_opt ::= ALL */ + { 442, -1 }, /* (483) select_list ::= select_item */ + { 442, -3 }, /* (484) select_list ::= select_list NK_COMMA select_item */ + { 450, -1 }, /* (485) select_item ::= NK_STAR */ + { 450, -1 }, /* (486) select_item ::= common_expression */ + { 450, -2 }, /* (487) select_item ::= common_expression column_alias */ + { 450, -3 }, /* (488) select_item ::= common_expression AS column_alias */ + { 450, -3 }, /* (489) select_item ::= table_name NK_DOT NK_STAR */ + { 405, 0 }, /* (490) where_clause_opt ::= */ + { 405, -2 }, /* (491) where_clause_opt ::= WHERE search_condition */ + { 443, 0 }, /* (492) partition_by_clause_opt ::= */ + { 443, -3 }, /* (493) partition_by_clause_opt ::= PARTITION BY partition_list */ + { 451, -1 }, /* (494) partition_list ::= partition_item */ + { 451, -3 }, /* (495) partition_list ::= partition_list NK_COMMA partition_item */ + { 452, -1 }, /* (496) partition_item ::= expr_or_subquery */ + { 452, -2 }, /* (497) partition_item ::= expr_or_subquery column_alias */ + { 452, -3 }, /* (498) partition_item ::= expr_or_subquery AS column_alias */ + { 447, 0 }, /* (499) twindow_clause_opt ::= */ + { 447, -6 }, /* (500) twindow_clause_opt ::= SESSION NK_LP column_reference NK_COMMA duration_literal NK_RP */ + { 447, -4 }, /* (501) twindow_clause_opt ::= STATE_WINDOW NK_LP expr_or_subquery NK_RP */ + { 447, -6 }, /* (502) twindow_clause_opt ::= INTERVAL NK_LP duration_literal NK_RP sliding_opt fill_opt */ + { 447, -8 }, /* (503) twindow_clause_opt ::= INTERVAL NK_LP duration_literal NK_COMMA duration_literal NK_RP sliding_opt fill_opt */ + { 447, -7 }, /* (504) twindow_clause_opt ::= EVENT_WINDOW START WITH search_condition END WITH search_condition */ + { 388, 0 }, /* (505) sliding_opt ::= */ + { 388, -4 }, /* (506) sliding_opt ::= SLIDING NK_LP duration_literal NK_RP */ + { 446, 0 }, /* (507) fill_opt ::= */ + { 446, -4 }, /* (508) fill_opt ::= FILL NK_LP fill_mode NK_RP */ + { 446, -6 }, /* (509) fill_opt ::= FILL NK_LP VALUE NK_COMMA literal_list NK_RP */ + { 453, -1 }, /* (510) fill_mode ::= NONE */ + { 453, -1 }, /* (511) fill_mode ::= PREV */ + { 453, -1 }, /* (512) fill_mode ::= NULL */ + { 453, -1 }, /* (513) fill_mode ::= LINEAR */ + { 453, -1 }, /* (514) fill_mode ::= NEXT */ + { 448, 0 }, /* (515) group_by_clause_opt ::= */ + { 448, -3 }, /* (516) group_by_clause_opt ::= GROUP BY group_by_list */ + { 454, -1 }, /* (517) group_by_list ::= expr_or_subquery */ + { 454, -3 }, /* (518) group_by_list ::= group_by_list NK_COMMA expr_or_subquery */ + { 449, 0 }, /* (519) having_clause_opt ::= */ + { 449, -2 }, /* (520) having_clause_opt ::= HAVING search_condition */ + { 444, 0 }, /* (521) range_opt ::= */ + { 444, -6 }, /* (522) range_opt ::= RANGE NK_LP expr_or_subquery NK_COMMA expr_or_subquery NK_RP */ + { 445, 0 }, /* (523) every_opt ::= */ + { 445, -4 }, /* (524) every_opt ::= EVERY NK_LP duration_literal NK_RP */ + { 455, -4 }, /* (525) query_expression ::= query_simple order_by_clause_opt slimit_clause_opt limit_clause_opt */ + { 456, -1 }, /* (526) query_simple ::= query_specification */ + { 456, -1 }, /* (527) query_simple ::= union_query_expression */ + { 460, -4 }, /* (528) union_query_expression ::= query_simple_or_subquery UNION ALL query_simple_or_subquery */ + { 460, -3 }, /* (529) union_query_expression ::= query_simple_or_subquery UNION query_simple_or_subquery */ + { 461, -1 }, /* (530) query_simple_or_subquery ::= query_simple */ + { 461, -1 }, /* (531) query_simple_or_subquery ::= subquery */ + { 392, -1 }, /* (532) query_or_subquery ::= query_expression */ + { 392, -1 }, /* (533) query_or_subquery ::= subquery */ + { 457, 0 }, /* (534) order_by_clause_opt ::= */ + { 457, -3 }, /* (535) order_by_clause_opt ::= ORDER BY sort_specification_list */ + { 458, 0 }, /* (536) slimit_clause_opt ::= */ + { 458, -2 }, /* (537) slimit_clause_opt ::= SLIMIT NK_INTEGER */ + { 458, -4 }, /* (538) slimit_clause_opt ::= SLIMIT NK_INTEGER SOFFSET NK_INTEGER */ + { 458, -4 }, /* (539) slimit_clause_opt ::= SLIMIT NK_INTEGER NK_COMMA NK_INTEGER */ + { 459, 0 }, /* (540) limit_clause_opt ::= */ + { 459, -2 }, /* (541) limit_clause_opt ::= LIMIT NK_INTEGER */ + { 459, -4 }, /* (542) limit_clause_opt ::= LIMIT NK_INTEGER OFFSET NK_INTEGER */ + { 459, -4 }, /* (543) limit_clause_opt ::= LIMIT NK_INTEGER NK_COMMA NK_INTEGER */ + { 436, -3 }, /* (544) subquery ::= NK_LP query_expression NK_RP */ + { 436, -3 }, /* (545) subquery ::= NK_LP subquery NK_RP */ + { 439, -1 }, /* (546) search_condition ::= common_expression */ + { 462, -1 }, /* (547) sort_specification_list ::= sort_specification */ + { 462, -3 }, /* (548) sort_specification_list ::= sort_specification_list NK_COMMA sort_specification */ + { 463, -3 }, /* (549) sort_specification ::= expr_or_subquery ordering_specification_opt null_ordering_opt */ + { 464, 0 }, /* (550) ordering_specification_opt ::= */ + { 464, -1 }, /* (551) ordering_specification_opt ::= ASC */ + { 464, -1 }, /* (552) ordering_specification_opt ::= DESC */ + { 465, 0 }, /* (553) null_ordering_opt ::= */ + { 465, -2 }, /* (554) null_ordering_opt ::= NULLS FIRST */ + { 465, -2 }, /* (555) null_ordering_opt ::= NULLS LAST */ +>>>>>>> 677a27a09744fa59ddd2e723052ec68e32229566 }; static void yy_accept(yyParser*); /* Forward Declaration */ @@ -3752,78 +5532,85 @@ static YYACTIONTYPE yy_reduce( yy_destructor(yypParser,329,&yymsp[0].minor); break; case 24: /* cmd ::= CREATE USER user_name PASS NK_STRING sysinfo_opt */ -{ pCxt->pRootNode = createCreateUserStmt(pCxt, &yymsp[-3].minor.yy179, &yymsp[-1].minor.yy0, yymsp[0].minor.yy113); } +{ pCxt->pRootNode = createCreateUserStmt(pCxt, &yymsp[-3].minor.yy233, &yymsp[-1].minor.yy0, yymsp[0].minor.yy27); } break; case 25: /* cmd ::= ALTER USER user_name PASS NK_STRING */ -{ pCxt->pRootNode = createAlterUserStmt(pCxt, &yymsp[-2].minor.yy179, TSDB_ALTER_USER_PASSWD, &yymsp[0].minor.yy0); } +{ pCxt->pRootNode = createAlterUserStmt(pCxt, &yymsp[-2].minor.yy233, TSDB_ALTER_USER_PASSWD, &yymsp[0].minor.yy0); } break; case 26: /* cmd ::= ALTER USER user_name ENABLE NK_INTEGER */ -{ pCxt->pRootNode = createAlterUserStmt(pCxt, &yymsp[-2].minor.yy179, TSDB_ALTER_USER_ENABLE, &yymsp[0].minor.yy0); } +{ pCxt->pRootNode = createAlterUserStmt(pCxt, &yymsp[-2].minor.yy233, TSDB_ALTER_USER_ENABLE, &yymsp[0].minor.yy0); } break; case 27: /* cmd ::= ALTER USER user_name SYSINFO NK_INTEGER */ -{ pCxt->pRootNode = createAlterUserStmt(pCxt, &yymsp[-2].minor.yy179, TSDB_ALTER_USER_SYSINFO, &yymsp[0].minor.yy0); } +{ pCxt->pRootNode = createAlterUserStmt(pCxt, &yymsp[-2].minor.yy233, TSDB_ALTER_USER_SYSINFO, &yymsp[0].minor.yy0); } break; case 28: /* cmd ::= DROP USER user_name */ -{ pCxt->pRootNode = createDropUserStmt(pCxt, &yymsp[0].minor.yy179); } +{ pCxt->pRootNode = createDropUserStmt(pCxt, &yymsp[0].minor.yy233); } break; case 29: /* sysinfo_opt ::= */ -{ yymsp[1].minor.yy113 = 1; } +{ yymsp[1].minor.yy27 = 1; } break; case 30: /* sysinfo_opt ::= SYSINFO NK_INTEGER */ -{ yymsp[-1].minor.yy113 = taosStr2Int8(yymsp[0].minor.yy0.z, NULL, 10); } +{ yymsp[-1].minor.yy27 = taosStr2Int8(yymsp[0].minor.yy0.z, NULL, 10); } break; case 31: /* cmd ::= GRANT privileges ON priv_level TO user_name */ -{ pCxt->pRootNode = createGrantStmt(pCxt, yymsp[-4].minor.yy159, &yymsp[-2].minor.yy179, &yymsp[0].minor.yy179); } +{ pCxt->pRootNode = createGrantStmt(pCxt, yymsp[-4].minor.yy129, &yymsp[-2].minor.yy233, &yymsp[0].minor.yy233); } break; case 32: /* cmd ::= REVOKE privileges ON priv_level FROM user_name */ -{ pCxt->pRootNode = createRevokeStmt(pCxt, yymsp[-4].minor.yy159, &yymsp[-2].minor.yy179, &yymsp[0].minor.yy179); } +{ pCxt->pRootNode = createRevokeStmt(pCxt, yymsp[-4].minor.yy129, &yymsp[-2].minor.yy233, &yymsp[0].minor.yy233); } break; case 33: /* privileges ::= ALL */ -{ yymsp[0].minor.yy159 = PRIVILEGE_TYPE_ALL; } +{ yymsp[0].minor.yy129 = PRIVILEGE_TYPE_ALL; } break; case 34: /* privileges ::= priv_type_list */ case 36: /* priv_type_list ::= priv_type */ yytestcase(yyruleno==36); -{ yylhsminor.yy159 = yymsp[0].minor.yy159; } - yymsp[0].minor.yy159 = yylhsminor.yy159; +{ yylhsminor.yy129 = yymsp[0].minor.yy129; } + yymsp[0].minor.yy129 = yylhsminor.yy129; break; case 35: /* privileges ::= SUBSCRIBE */ -{ yymsp[0].minor.yy159 = PRIVILEGE_TYPE_SUBSCRIBE; } +{ yymsp[0].minor.yy129 = PRIVILEGE_TYPE_SUBSCRIBE; } break; case 37: /* priv_type_list ::= priv_type_list NK_COMMA priv_type */ -{ yylhsminor.yy159 = yymsp[-2].minor.yy159 | yymsp[0].minor.yy159; } - yymsp[-2].minor.yy159 = yylhsminor.yy159; +{ yylhsminor.yy129 = yymsp[-2].minor.yy129 | yymsp[0].minor.yy129; } + yymsp[-2].minor.yy129 = yylhsminor.yy129; break; case 38: /* priv_type ::= READ */ -{ yymsp[0].minor.yy159 = PRIVILEGE_TYPE_READ; } +{ yymsp[0].minor.yy129 = PRIVILEGE_TYPE_READ; } break; case 39: /* priv_type ::= WRITE */ -{ yymsp[0].minor.yy159 = PRIVILEGE_TYPE_WRITE; } +{ yymsp[0].minor.yy129 = PRIVILEGE_TYPE_WRITE; } break; case 40: /* priv_level ::= NK_STAR NK_DOT NK_STAR */ -{ yylhsminor.yy179 = yymsp[-2].minor.yy0; } - yymsp[-2].minor.yy179 = yylhsminor.yy179; +{ yylhsminor.yy233 = yymsp[-2].minor.yy0; } + yymsp[-2].minor.yy233 = yylhsminor.yy233; break; case 41: /* priv_level ::= db_name NK_DOT NK_STAR */ -{ yylhsminor.yy179 = yymsp[-2].minor.yy179; } - yymsp[-2].minor.yy179 = yylhsminor.yy179; +{ yylhsminor.yy233 = yymsp[-2].minor.yy233; } + yymsp[-2].minor.yy233 = yylhsminor.yy233; break; case 42: /* priv_level ::= topic_name */ +<<<<<<< HEAD case 273: /* sma_func_name ::= function_name */ yytestcase(yyruleno==273); case 470: /* alias_opt ::= table_alias */ yytestcase(yyruleno==470); { yylhsminor.yy179 = yymsp[0].minor.yy179; } yymsp[0].minor.yy179 = yylhsminor.yy179; +======= + case 272: /* sma_func_name ::= function_name */ yytestcase(yyruleno==272); + case 472: /* alias_opt ::= table_alias */ yytestcase(yyruleno==472); +{ yylhsminor.yy233 = yymsp[0].minor.yy233; } + yymsp[0].minor.yy233 = yylhsminor.yy233; +>>>>>>> 677a27a09744fa59ddd2e723052ec68e32229566 break; case 43: /* cmd ::= CREATE DNODE dnode_endpoint */ -{ pCxt->pRootNode = createCreateDnodeStmt(pCxt, &yymsp[0].minor.yy179, NULL); } +{ pCxt->pRootNode = createCreateDnodeStmt(pCxt, &yymsp[0].minor.yy233, NULL); } break; case 44: /* cmd ::= CREATE DNODE dnode_endpoint PORT NK_INTEGER */ -{ pCxt->pRootNode = createCreateDnodeStmt(pCxt, &yymsp[-2].minor.yy179, &yymsp[0].minor.yy0); } +{ pCxt->pRootNode = createCreateDnodeStmt(pCxt, &yymsp[-2].minor.yy233, &yymsp[0].minor.yy0); } break; case 45: /* cmd ::= DROP DNODE NK_INTEGER force_opt */ -{ pCxt->pRootNode = createDropDnodeStmt(pCxt, &yymsp[-1].minor.yy0, yymsp[0].minor.yy767); } +{ pCxt->pRootNode = createDropDnodeStmt(pCxt, &yymsp[-1].minor.yy0, yymsp[0].minor.yy397); } break; case 46: /* cmd ::= DROP DNODE dnode_endpoint force_opt */ -{ pCxt->pRootNode = createDropDnodeStmt(pCxt, &yymsp[-1].minor.yy179, yymsp[0].minor.yy767); } +{ pCxt->pRootNode = createDropDnodeStmt(pCxt, &yymsp[-1].minor.yy233, yymsp[0].minor.yy397); } break; case 47: /* cmd ::= ALTER DNODE NK_INTEGER NK_STRING */ { pCxt->pRootNode = createAlterDnodeStmt(pCxt, &yymsp[-1].minor.yy0, &yymsp[0].minor.yy0, NULL); } @@ -3840,6 +5627,7 @@ static YYACTIONTYPE yy_reduce( case 51: /* dnode_endpoint ::= NK_STRING */ case 52: /* dnode_endpoint ::= NK_ID */ yytestcase(yyruleno==52); case 53: /* dnode_endpoint ::= NK_IPTOKEN */ yytestcase(yyruleno==53); +<<<<<<< HEAD case 274: /* sma_func_name ::= COUNT */ yytestcase(yyruleno==274); case 275: /* sma_func_name ::= FIRST */ yytestcase(yyruleno==275); case 276: /* sma_func_name ::= LAST */ yytestcase(yyruleno==276); @@ -3884,6 +5672,52 @@ static YYACTIONTYPE yy_reduce( case 301: /* agg_func_opt ::= AGGREGATE */ yytestcase(yyruleno==301); case 479: /* set_quantifier_opt ::= DISTINCT */ yytestcase(yyruleno==479); { yymsp[0].minor.yy767 = true; } +======= + case 273: /* sma_func_name ::= COUNT */ yytestcase(yyruleno==273); + case 274: /* sma_func_name ::= FIRST */ yytestcase(yyruleno==274); + case 275: /* sma_func_name ::= LAST */ yytestcase(yyruleno==275); + case 276: /* sma_func_name ::= LAST_ROW */ yytestcase(yyruleno==276); + case 357: /* db_name ::= NK_ID */ yytestcase(yyruleno==357); + case 358: /* table_name ::= NK_ID */ yytestcase(yyruleno==358); + case 359: /* column_name ::= NK_ID */ yytestcase(yyruleno==359); + case 360: /* function_name ::= NK_ID */ yytestcase(yyruleno==360); + case 361: /* table_alias ::= NK_ID */ yytestcase(yyruleno==361); + case 362: /* column_alias ::= NK_ID */ yytestcase(yyruleno==362); + case 363: /* user_name ::= NK_ID */ yytestcase(yyruleno==363); + case 364: /* topic_name ::= NK_ID */ yytestcase(yyruleno==364); + case 365: /* stream_name ::= NK_ID */ yytestcase(yyruleno==365); + case 366: /* cgroup_name ::= NK_ID */ yytestcase(yyruleno==366); + case 367: /* index_name ::= NK_ID */ yytestcase(yyruleno==367); + case 407: /* noarg_func ::= NOW */ yytestcase(yyruleno==407); + case 408: /* noarg_func ::= TODAY */ yytestcase(yyruleno==408); + case 409: /* noarg_func ::= TIMEZONE */ yytestcase(yyruleno==409); + case 410: /* noarg_func ::= DATABASE */ yytestcase(yyruleno==410); + case 411: /* noarg_func ::= CLIENT_VERSION */ yytestcase(yyruleno==411); + case 412: /* noarg_func ::= SERVER_VERSION */ yytestcase(yyruleno==412); + case 413: /* noarg_func ::= SERVER_STATUS */ yytestcase(yyruleno==413); + case 414: /* noarg_func ::= CURRENT_USER */ yytestcase(yyruleno==414); + case 415: /* noarg_func ::= USER */ yytestcase(yyruleno==415); + case 416: /* star_func ::= COUNT */ yytestcase(yyruleno==416); + case 417: /* star_func ::= FIRST */ yytestcase(yyruleno==417); + case 418: /* star_func ::= LAST */ yytestcase(yyruleno==418); + case 419: /* star_func ::= LAST_ROW */ yytestcase(yyruleno==419); +{ yylhsminor.yy233 = yymsp[0].minor.yy0; } + yymsp[0].minor.yy233 = yylhsminor.yy233; + break; + case 54: /* force_opt ::= */ + case 73: /* not_exists_opt ::= */ yytestcase(yyruleno==73); + case 75: /* exists_opt ::= */ yytestcase(yyruleno==75); + case 292: /* analyze_opt ::= */ yytestcase(yyruleno==292); + case 299: /* agg_func_opt ::= */ yytestcase(yyruleno==299); + case 480: /* set_quantifier_opt ::= */ yytestcase(yyruleno==480); +{ yymsp[1].minor.yy397 = false; } + break; + case 55: /* force_opt ::= FORCE */ + case 293: /* analyze_opt ::= ANALYZE */ yytestcase(yyruleno==293); + case 300: /* agg_func_opt ::= AGGREGATE */ yytestcase(yyruleno==300); + case 481: /* set_quantifier_opt ::= DISTINCT */ yytestcase(yyruleno==481); +{ yymsp[0].minor.yy397 = true; } +>>>>>>> 677a27a09744fa59ddd2e723052ec68e32229566 break; case 56: /* cmd ::= ALTER LOCAL NK_STRING */ { pCxt->pRootNode = createAlterLocalStmt(pCxt, &yymsp[0].minor.yy0, NULL); } @@ -3916,23 +5750,24 @@ static YYACTIONTYPE yy_reduce( { pCxt->pRootNode = createDropComponentNodeStmt(pCxt, QUERY_NODE_DROP_MNODE_STMT, &yymsp[0].minor.yy0); } break; case 66: /* cmd ::= CREATE DATABASE not_exists_opt db_name db_options */ -{ pCxt->pRootNode = createCreateDatabaseStmt(pCxt, yymsp[-2].minor.yy767, &yymsp[-1].minor.yy179, yymsp[0].minor.yy602); } +{ pCxt->pRootNode = createCreateDatabaseStmt(pCxt, yymsp[-2].minor.yy397, &yymsp[-1].minor.yy233, yymsp[0].minor.yy924); } break; case 67: /* cmd ::= DROP DATABASE exists_opt db_name */ -{ pCxt->pRootNode = createDropDatabaseStmt(pCxt, yymsp[-1].minor.yy767, &yymsp[0].minor.yy179); } +{ pCxt->pRootNode = createDropDatabaseStmt(pCxt, yymsp[-1].minor.yy397, &yymsp[0].minor.yy233); } break; case 68: /* cmd ::= USE db_name */ -{ pCxt->pRootNode = createUseDatabaseStmt(pCxt, &yymsp[0].minor.yy179); } +{ pCxt->pRootNode = createUseDatabaseStmt(pCxt, &yymsp[0].minor.yy233); } break; case 69: /* cmd ::= ALTER DATABASE db_name alter_db_options */ -{ pCxt->pRootNode = createAlterDatabaseStmt(pCxt, &yymsp[-1].minor.yy179, yymsp[0].minor.yy602); } +{ pCxt->pRootNode = createAlterDatabaseStmt(pCxt, &yymsp[-1].minor.yy233, yymsp[0].minor.yy924); } break; case 70: /* cmd ::= FLUSH DATABASE db_name */ -{ pCxt->pRootNode = createFlushDatabaseStmt(pCxt, &yymsp[0].minor.yy179); } +{ pCxt->pRootNode = createFlushDatabaseStmt(pCxt, &yymsp[0].minor.yy233); } break; case 71: /* cmd ::= TRIM DATABASE db_name speed_opt */ -{ pCxt->pRootNode = createTrimDatabaseStmt(pCxt, &yymsp[-1].minor.yy179, yymsp[0].minor.yy820); } +{ pCxt->pRootNode = createTrimDatabaseStmt(pCxt, &yymsp[-1].minor.yy233, yymsp[0].minor.yy832); } break; +<<<<<<< HEAD case 72: /* cmd ::= COMPACT DATABASE db_name */ { pCxt->pRootNode = createCompactStmt(pCxt, &yymsp[0].minor.yy179); } break; @@ -4026,27 +5861,126 @@ static YYACTIONTYPE yy_reduce( case 99: /* db_options ::= db_options WAL_RETENTION_PERIOD NK_INTEGER */ { yylhsminor.yy602 = setDatabaseOption(pCxt, yymsp[-2].minor.yy602, DB_OPTION_WAL_RETENTION_PERIOD, &yymsp[0].minor.yy0); } yymsp[-2].minor.yy602 = yylhsminor.yy602; +======= + case 72: /* not_exists_opt ::= IF NOT EXISTS */ +{ yymsp[-2].minor.yy397 = true; } + break; + case 74: /* exists_opt ::= IF EXISTS */ +{ yymsp[-1].minor.yy397 = true; } + break; + case 76: /* db_options ::= */ +{ yymsp[1].minor.yy924 = createDefaultDatabaseOptions(pCxt); } + break; + case 77: /* db_options ::= db_options BUFFER NK_INTEGER */ +{ yylhsminor.yy924 = setDatabaseOption(pCxt, yymsp[-2].minor.yy924, DB_OPTION_BUFFER, &yymsp[0].minor.yy0); } + yymsp[-2].minor.yy924 = yylhsminor.yy924; + break; + case 78: /* db_options ::= db_options CACHEMODEL NK_STRING */ +{ yylhsminor.yy924 = setDatabaseOption(pCxt, yymsp[-2].minor.yy924, DB_OPTION_CACHEMODEL, &yymsp[0].minor.yy0); } + yymsp[-2].minor.yy924 = yylhsminor.yy924; + break; + case 79: /* db_options ::= db_options CACHESIZE NK_INTEGER */ +{ yylhsminor.yy924 = setDatabaseOption(pCxt, yymsp[-2].minor.yy924, DB_OPTION_CACHESIZE, &yymsp[0].minor.yy0); } + yymsp[-2].minor.yy924 = yylhsminor.yy924; + break; + case 80: /* db_options ::= db_options COMP NK_INTEGER */ +{ yylhsminor.yy924 = setDatabaseOption(pCxt, yymsp[-2].minor.yy924, DB_OPTION_COMP, &yymsp[0].minor.yy0); } + yymsp[-2].minor.yy924 = yylhsminor.yy924; + break; + case 81: /* db_options ::= db_options DURATION NK_INTEGER */ + case 82: /* db_options ::= db_options DURATION NK_VARIABLE */ yytestcase(yyruleno==82); +{ yylhsminor.yy924 = setDatabaseOption(pCxt, yymsp[-2].minor.yy924, DB_OPTION_DAYS, &yymsp[0].minor.yy0); } + yymsp[-2].minor.yy924 = yylhsminor.yy924; + break; + case 83: /* db_options ::= db_options MAXROWS NK_INTEGER */ +{ yylhsminor.yy924 = setDatabaseOption(pCxt, yymsp[-2].minor.yy924, DB_OPTION_MAXROWS, &yymsp[0].minor.yy0); } + yymsp[-2].minor.yy924 = yylhsminor.yy924; + break; + case 84: /* db_options ::= db_options MINROWS NK_INTEGER */ +{ yylhsminor.yy924 = setDatabaseOption(pCxt, yymsp[-2].minor.yy924, DB_OPTION_MINROWS, &yymsp[0].minor.yy0); } + yymsp[-2].minor.yy924 = yylhsminor.yy924; + break; + case 85: /* db_options ::= db_options KEEP integer_list */ + case 86: /* db_options ::= db_options KEEP variable_list */ yytestcase(yyruleno==86); +{ yylhsminor.yy924 = setDatabaseOption(pCxt, yymsp[-2].minor.yy924, DB_OPTION_KEEP, yymsp[0].minor.yy776); } + yymsp[-2].minor.yy924 = yylhsminor.yy924; + break; + case 87: /* db_options ::= db_options PAGES NK_INTEGER */ +{ yylhsminor.yy924 = setDatabaseOption(pCxt, yymsp[-2].minor.yy924, DB_OPTION_PAGES, &yymsp[0].minor.yy0); } + yymsp[-2].minor.yy924 = yylhsminor.yy924; + break; + case 88: /* db_options ::= db_options PAGESIZE NK_INTEGER */ +{ yylhsminor.yy924 = setDatabaseOption(pCxt, yymsp[-2].minor.yy924, DB_OPTION_PAGESIZE, &yymsp[0].minor.yy0); } + yymsp[-2].minor.yy924 = yylhsminor.yy924; + break; + case 89: /* db_options ::= db_options TSDB_PAGESIZE NK_INTEGER */ +{ yylhsminor.yy924 = setDatabaseOption(pCxt, yymsp[-2].minor.yy924, DB_OPTION_TSDB_PAGESIZE, &yymsp[0].minor.yy0); } + yymsp[-2].minor.yy924 = yylhsminor.yy924; + break; + case 90: /* db_options ::= db_options PRECISION NK_STRING */ +{ yylhsminor.yy924 = setDatabaseOption(pCxt, yymsp[-2].minor.yy924, DB_OPTION_PRECISION, &yymsp[0].minor.yy0); } + yymsp[-2].minor.yy924 = yylhsminor.yy924; + break; + case 91: /* db_options ::= db_options REPLICA NK_INTEGER */ +{ yylhsminor.yy924 = setDatabaseOption(pCxt, yymsp[-2].minor.yy924, DB_OPTION_REPLICA, &yymsp[0].minor.yy0); } + yymsp[-2].minor.yy924 = yylhsminor.yy924; + break; + case 92: /* db_options ::= db_options VGROUPS NK_INTEGER */ +{ yylhsminor.yy924 = setDatabaseOption(pCxt, yymsp[-2].minor.yy924, DB_OPTION_VGROUPS, &yymsp[0].minor.yy0); } + yymsp[-2].minor.yy924 = yylhsminor.yy924; + break; + case 93: /* db_options ::= db_options SINGLE_STABLE NK_INTEGER */ +{ yylhsminor.yy924 = setDatabaseOption(pCxt, yymsp[-2].minor.yy924, DB_OPTION_SINGLE_STABLE, &yymsp[0].minor.yy0); } + yymsp[-2].minor.yy924 = yylhsminor.yy924; + break; + case 94: /* db_options ::= db_options RETENTIONS retention_list */ +{ yylhsminor.yy924 = setDatabaseOption(pCxt, yymsp[-2].minor.yy924, DB_OPTION_RETENTIONS, yymsp[0].minor.yy776); } + yymsp[-2].minor.yy924 = yylhsminor.yy924; + break; + case 95: /* db_options ::= db_options SCHEMALESS NK_INTEGER */ +{ yylhsminor.yy924 = setDatabaseOption(pCxt, yymsp[-2].minor.yy924, DB_OPTION_SCHEMALESS, &yymsp[0].minor.yy0); } + yymsp[-2].minor.yy924 = yylhsminor.yy924; + break; + case 96: /* db_options ::= db_options WAL_LEVEL NK_INTEGER */ +{ yylhsminor.yy924 = setDatabaseOption(pCxt, yymsp[-2].minor.yy924, DB_OPTION_WAL, &yymsp[0].minor.yy0); } + yymsp[-2].minor.yy924 = yylhsminor.yy924; + break; + case 97: /* db_options ::= db_options WAL_FSYNC_PERIOD NK_INTEGER */ +{ yylhsminor.yy924 = setDatabaseOption(pCxt, yymsp[-2].minor.yy924, DB_OPTION_FSYNC, &yymsp[0].minor.yy0); } + yymsp[-2].minor.yy924 = yylhsminor.yy924; + break; + case 98: /* db_options ::= db_options WAL_RETENTION_PERIOD NK_INTEGER */ +{ yylhsminor.yy924 = setDatabaseOption(pCxt, yymsp[-2].minor.yy924, DB_OPTION_WAL_RETENTION_PERIOD, &yymsp[0].minor.yy0); } + yymsp[-2].minor.yy924 = yylhsminor.yy924; +>>>>>>> 677a27a09744fa59ddd2e723052ec68e32229566 break; case 100: /* db_options ::= db_options WAL_RETENTION_PERIOD NK_MINUS NK_INTEGER */ { SToken t = yymsp[-1].minor.yy0; t.n = (yymsp[0].minor.yy0.z + yymsp[0].minor.yy0.n) - yymsp[-1].minor.yy0.z; - yylhsminor.yy602 = setDatabaseOption(pCxt, yymsp[-3].minor.yy602, DB_OPTION_WAL_RETENTION_PERIOD, &t); + yylhsminor.yy924 = setDatabaseOption(pCxt, yymsp[-3].minor.yy924, DB_OPTION_WAL_RETENTION_PERIOD, &t); } - yymsp[-3].minor.yy602 = yylhsminor.yy602; + yymsp[-3].minor.yy924 = yylhsminor.yy924; break; +<<<<<<< HEAD case 101: /* db_options ::= db_options WAL_RETENTION_SIZE NK_INTEGER */ { yylhsminor.yy602 = setDatabaseOption(pCxt, yymsp[-2].minor.yy602, DB_OPTION_WAL_RETENTION_SIZE, &yymsp[0].minor.yy0); } yymsp[-2].minor.yy602 = yylhsminor.yy602; +======= + case 100: /* db_options ::= db_options WAL_RETENTION_SIZE NK_INTEGER */ +{ yylhsminor.yy924 = setDatabaseOption(pCxt, yymsp[-2].minor.yy924, DB_OPTION_WAL_RETENTION_SIZE, &yymsp[0].minor.yy0); } + yymsp[-2].minor.yy924 = yylhsminor.yy924; +>>>>>>> 677a27a09744fa59ddd2e723052ec68e32229566 break; case 102: /* db_options ::= db_options WAL_RETENTION_SIZE NK_MINUS NK_INTEGER */ { SToken t = yymsp[-1].minor.yy0; t.n = (yymsp[0].minor.yy0.z + yymsp[0].minor.yy0.n) - yymsp[-1].minor.yy0.z; - yylhsminor.yy602 = setDatabaseOption(pCxt, yymsp[-3].minor.yy602, DB_OPTION_WAL_RETENTION_SIZE, &t); + yylhsminor.yy924 = setDatabaseOption(pCxt, yymsp[-3].minor.yy924, DB_OPTION_WAL_RETENTION_SIZE, &t); } - yymsp[-3].minor.yy602 = yylhsminor.yy602; + yymsp[-3].minor.yy924 = yylhsminor.yy924; break; +<<<<<<< HEAD case 103: /* db_options ::= db_options WAL_ROLL_PERIOD NK_INTEGER */ { yylhsminor.yy602 = setDatabaseOption(pCxt, yymsp[-2].minor.yy602, DB_OPTION_WAL_ROLL_PERIOD, &yymsp[0].minor.yy0); } yymsp[-2].minor.yy602 = yylhsminor.yy602; @@ -4410,6 +6344,374 @@ static YYACTIONTYPE yy_reduce( case 260: /* tag_item ::= column_name */ yytestcase(yyruleno==260); { yylhsminor.yy602 = createColumnNode(pCxt, NULL, &yymsp[0].minor.yy179); } yymsp[0].minor.yy602 = yylhsminor.yy602; +======= + case 102: /* db_options ::= db_options WAL_ROLL_PERIOD NK_INTEGER */ +{ yylhsminor.yy924 = setDatabaseOption(pCxt, yymsp[-2].minor.yy924, DB_OPTION_WAL_ROLL_PERIOD, &yymsp[0].minor.yy0); } + yymsp[-2].minor.yy924 = yylhsminor.yy924; + break; + case 103: /* db_options ::= db_options WAL_SEGMENT_SIZE NK_INTEGER */ +{ yylhsminor.yy924 = setDatabaseOption(pCxt, yymsp[-2].minor.yy924, DB_OPTION_WAL_SEGMENT_SIZE, &yymsp[0].minor.yy0); } + yymsp[-2].minor.yy924 = yylhsminor.yy924; + break; + case 104: /* db_options ::= db_options STT_TRIGGER NK_INTEGER */ +{ yylhsminor.yy924 = setDatabaseOption(pCxt, yymsp[-2].minor.yy924, DB_OPTION_STT_TRIGGER, &yymsp[0].minor.yy0); } + yymsp[-2].minor.yy924 = yylhsminor.yy924; + break; + case 105: /* db_options ::= db_options TABLE_PREFIX NK_INTEGER */ +{ yylhsminor.yy924 = setDatabaseOption(pCxt, yymsp[-2].minor.yy924, DB_OPTION_TABLE_PREFIX, &yymsp[0].minor.yy0); } + yymsp[-2].minor.yy924 = yylhsminor.yy924; + break; + case 106: /* db_options ::= db_options TABLE_SUFFIX NK_INTEGER */ +{ yylhsminor.yy924 = setDatabaseOption(pCxt, yymsp[-2].minor.yy924, DB_OPTION_TABLE_SUFFIX, &yymsp[0].minor.yy0); } + yymsp[-2].minor.yy924 = yylhsminor.yy924; + break; + case 107: /* alter_db_options ::= alter_db_option */ +{ yylhsminor.yy924 = createAlterDatabaseOptions(pCxt); yylhsminor.yy924 = setAlterDatabaseOption(pCxt, yylhsminor.yy924, &yymsp[0].minor.yy257); } + yymsp[0].minor.yy924 = yylhsminor.yy924; + break; + case 108: /* alter_db_options ::= alter_db_options alter_db_option */ +{ yylhsminor.yy924 = setAlterDatabaseOption(pCxt, yymsp[-1].minor.yy924, &yymsp[0].minor.yy257); } + yymsp[-1].minor.yy924 = yylhsminor.yy924; + break; + case 109: /* alter_db_option ::= BUFFER NK_INTEGER */ +{ yymsp[-1].minor.yy257.type = DB_OPTION_BUFFER; yymsp[-1].minor.yy257.val = yymsp[0].minor.yy0; } + break; + case 110: /* alter_db_option ::= CACHEMODEL NK_STRING */ +{ yymsp[-1].minor.yy257.type = DB_OPTION_CACHEMODEL; yymsp[-1].minor.yy257.val = yymsp[0].minor.yy0; } + break; + case 111: /* alter_db_option ::= CACHESIZE NK_INTEGER */ +{ yymsp[-1].minor.yy257.type = DB_OPTION_CACHESIZE; yymsp[-1].minor.yy257.val = yymsp[0].minor.yy0; } + break; + case 112: /* alter_db_option ::= WAL_FSYNC_PERIOD NK_INTEGER */ +{ yymsp[-1].minor.yy257.type = DB_OPTION_FSYNC; yymsp[-1].minor.yy257.val = yymsp[0].minor.yy0; } + break; + case 113: /* alter_db_option ::= KEEP integer_list */ + case 114: /* alter_db_option ::= KEEP variable_list */ yytestcase(yyruleno==114); +{ yymsp[-1].minor.yy257.type = DB_OPTION_KEEP; yymsp[-1].minor.yy257.pList = yymsp[0].minor.yy776; } + break; + case 115: /* alter_db_option ::= PAGES NK_INTEGER */ +{ yymsp[-1].minor.yy257.type = DB_OPTION_PAGES; yymsp[-1].minor.yy257.val = yymsp[0].minor.yy0; } + break; + case 116: /* alter_db_option ::= REPLICA NK_INTEGER */ +{ yymsp[-1].minor.yy257.type = DB_OPTION_REPLICA; yymsp[-1].minor.yy257.val = yymsp[0].minor.yy0; } + break; + case 117: /* alter_db_option ::= WAL_LEVEL NK_INTEGER */ +{ yymsp[-1].minor.yy257.type = DB_OPTION_WAL; yymsp[-1].minor.yy257.val = yymsp[0].minor.yy0; } + break; + case 118: /* alter_db_option ::= STT_TRIGGER NK_INTEGER */ +{ yymsp[-1].minor.yy257.type = DB_OPTION_STT_TRIGGER; yymsp[-1].minor.yy257.val = yymsp[0].minor.yy0; } + break; + case 119: /* integer_list ::= NK_INTEGER */ +{ yylhsminor.yy776 = createNodeList(pCxt, createValueNode(pCxt, TSDB_DATA_TYPE_BIGINT, &yymsp[0].minor.yy0)); } + yymsp[0].minor.yy776 = yylhsminor.yy776; + break; + case 120: /* integer_list ::= integer_list NK_COMMA NK_INTEGER */ + case 327: /* dnode_list ::= dnode_list DNODE NK_INTEGER */ yytestcase(yyruleno==327); +{ yylhsminor.yy776 = addNodeToList(pCxt, yymsp[-2].minor.yy776, createValueNode(pCxt, TSDB_DATA_TYPE_BIGINT, &yymsp[0].minor.yy0)); } + yymsp[-2].minor.yy776 = yylhsminor.yy776; + break; + case 121: /* variable_list ::= NK_VARIABLE */ +{ yylhsminor.yy776 = createNodeList(pCxt, createDurationValueNode(pCxt, &yymsp[0].minor.yy0)); } + yymsp[0].minor.yy776 = yylhsminor.yy776; + break; + case 122: /* variable_list ::= variable_list NK_COMMA NK_VARIABLE */ +{ yylhsminor.yy776 = addNodeToList(pCxt, yymsp[-2].minor.yy776, createDurationValueNode(pCxt, &yymsp[0].minor.yy0)); } + yymsp[-2].minor.yy776 = yylhsminor.yy776; + break; + case 123: /* retention_list ::= retention */ + case 145: /* multi_create_clause ::= create_subtable_clause */ yytestcase(yyruleno==145); + case 148: /* multi_drop_clause ::= drop_table_clause */ yytestcase(yyruleno==148); + case 155: /* column_def_list ::= column_def */ yytestcase(yyruleno==155); + case 199: /* rollup_func_list ::= rollup_func_name */ yytestcase(yyruleno==199); + case 204: /* col_name_list ::= col_name */ yytestcase(yyruleno==204); + case 255: /* tag_list_opt ::= tag_item */ yytestcase(yyruleno==255); + case 269: /* func_list ::= func */ yytestcase(yyruleno==269); + case 355: /* literal_list ::= signed_literal */ yytestcase(yyruleno==355); + case 422: /* other_para_list ::= star_func_para */ yytestcase(yyruleno==422); + case 428: /* when_then_list ::= when_then_expr */ yytestcase(yyruleno==428); + case 483: /* select_list ::= select_item */ yytestcase(yyruleno==483); + case 494: /* partition_list ::= partition_item */ yytestcase(yyruleno==494); + case 547: /* sort_specification_list ::= sort_specification */ yytestcase(yyruleno==547); +{ yylhsminor.yy776 = createNodeList(pCxt, yymsp[0].minor.yy924); } + yymsp[0].minor.yy776 = yylhsminor.yy776; + break; + case 124: /* retention_list ::= retention_list NK_COMMA retention */ + case 156: /* column_def_list ::= column_def_list NK_COMMA column_def */ yytestcase(yyruleno==156); + case 200: /* rollup_func_list ::= rollup_func_list NK_COMMA rollup_func_name */ yytestcase(yyruleno==200); + case 205: /* col_name_list ::= col_name_list NK_COMMA col_name */ yytestcase(yyruleno==205); + case 256: /* tag_list_opt ::= tag_list_opt NK_COMMA tag_item */ yytestcase(yyruleno==256); + case 270: /* func_list ::= func_list NK_COMMA func */ yytestcase(yyruleno==270); + case 356: /* literal_list ::= literal_list NK_COMMA signed_literal */ yytestcase(yyruleno==356); + case 423: /* other_para_list ::= other_para_list NK_COMMA star_func_para */ yytestcase(yyruleno==423); + case 484: /* select_list ::= select_list NK_COMMA select_item */ yytestcase(yyruleno==484); + case 495: /* partition_list ::= partition_list NK_COMMA partition_item */ yytestcase(yyruleno==495); + case 548: /* sort_specification_list ::= sort_specification_list NK_COMMA sort_specification */ yytestcase(yyruleno==548); +{ yylhsminor.yy776 = addNodeToList(pCxt, yymsp[-2].minor.yy776, yymsp[0].minor.yy924); } + yymsp[-2].minor.yy776 = yylhsminor.yy776; + break; + case 125: /* retention ::= NK_VARIABLE NK_COLON NK_VARIABLE */ +{ yylhsminor.yy924 = createNodeListNodeEx(pCxt, createDurationValueNode(pCxt, &yymsp[-2].minor.yy0), createDurationValueNode(pCxt, &yymsp[0].minor.yy0)); } + yymsp[-2].minor.yy924 = yylhsminor.yy924; + break; + case 126: /* speed_opt ::= */ + case 301: /* bufsize_opt ::= */ yytestcase(yyruleno==301); +{ yymsp[1].minor.yy832 = 0; } + break; + case 127: /* speed_opt ::= MAX_SPEED NK_INTEGER */ + case 302: /* bufsize_opt ::= BUFSIZE NK_INTEGER */ yytestcase(yyruleno==302); +{ yymsp[-1].minor.yy832 = taosStr2Int32(yymsp[0].minor.yy0.z, NULL, 10); } + break; + case 128: /* cmd ::= CREATE TABLE not_exists_opt full_table_name NK_LP column_def_list NK_RP tags_def_opt table_options */ + case 130: /* cmd ::= CREATE STABLE not_exists_opt full_table_name NK_LP column_def_list NK_RP tags_def table_options */ yytestcase(yyruleno==130); +{ pCxt->pRootNode = createCreateTableStmt(pCxt, yymsp[-6].minor.yy397, yymsp[-5].minor.yy924, yymsp[-3].minor.yy776, yymsp[-1].minor.yy776, yymsp[0].minor.yy924); } + break; + case 129: /* cmd ::= CREATE TABLE multi_create_clause */ +{ pCxt->pRootNode = createCreateMultiTableStmt(pCxt, yymsp[0].minor.yy776); } + break; + case 131: /* cmd ::= DROP TABLE multi_drop_clause */ +{ pCxt->pRootNode = createDropTableStmt(pCxt, yymsp[0].minor.yy776); } + break; + case 132: /* cmd ::= DROP STABLE exists_opt full_table_name */ +{ pCxt->pRootNode = createDropSuperTableStmt(pCxt, yymsp[-1].minor.yy397, yymsp[0].minor.yy924); } + break; + case 133: /* cmd ::= ALTER TABLE alter_table_clause */ + case 329: /* cmd ::= query_or_subquery */ yytestcase(yyruleno==329); +{ pCxt->pRootNode = yymsp[0].minor.yy924; } + break; + case 134: /* cmd ::= ALTER STABLE alter_table_clause */ +{ pCxt->pRootNode = setAlterSuperTableType(yymsp[0].minor.yy924); } + break; + case 135: /* alter_table_clause ::= full_table_name alter_table_options */ +{ yylhsminor.yy924 = createAlterTableModifyOptions(pCxt, yymsp[-1].minor.yy924, yymsp[0].minor.yy924); } + yymsp[-1].minor.yy924 = yylhsminor.yy924; + break; + case 136: /* alter_table_clause ::= full_table_name ADD COLUMN column_name type_name */ +{ yylhsminor.yy924 = createAlterTableAddModifyCol(pCxt, yymsp[-4].minor.yy924, TSDB_ALTER_TABLE_ADD_COLUMN, &yymsp[-1].minor.yy233, yymsp[0].minor.yy852); } + yymsp[-4].minor.yy924 = yylhsminor.yy924; + break; + case 137: /* alter_table_clause ::= full_table_name DROP COLUMN column_name */ +{ yylhsminor.yy924 = createAlterTableDropCol(pCxt, yymsp[-3].minor.yy924, TSDB_ALTER_TABLE_DROP_COLUMN, &yymsp[0].minor.yy233); } + yymsp[-3].minor.yy924 = yylhsminor.yy924; + break; + case 138: /* alter_table_clause ::= full_table_name MODIFY COLUMN column_name type_name */ +{ yylhsminor.yy924 = createAlterTableAddModifyCol(pCxt, yymsp[-4].minor.yy924, TSDB_ALTER_TABLE_UPDATE_COLUMN_BYTES, &yymsp[-1].minor.yy233, yymsp[0].minor.yy852); } + yymsp[-4].minor.yy924 = yylhsminor.yy924; + break; + case 139: /* alter_table_clause ::= full_table_name RENAME COLUMN column_name column_name */ +{ yylhsminor.yy924 = createAlterTableRenameCol(pCxt, yymsp[-4].minor.yy924, TSDB_ALTER_TABLE_UPDATE_COLUMN_NAME, &yymsp[-1].minor.yy233, &yymsp[0].minor.yy233); } + yymsp[-4].minor.yy924 = yylhsminor.yy924; + break; + case 140: /* alter_table_clause ::= full_table_name ADD TAG column_name type_name */ +{ yylhsminor.yy924 = createAlterTableAddModifyCol(pCxt, yymsp[-4].minor.yy924, TSDB_ALTER_TABLE_ADD_TAG, &yymsp[-1].minor.yy233, yymsp[0].minor.yy852); } + yymsp[-4].minor.yy924 = yylhsminor.yy924; + break; + case 141: /* alter_table_clause ::= full_table_name DROP TAG column_name */ +{ yylhsminor.yy924 = createAlterTableDropCol(pCxt, yymsp[-3].minor.yy924, TSDB_ALTER_TABLE_DROP_TAG, &yymsp[0].minor.yy233); } + yymsp[-3].minor.yy924 = yylhsminor.yy924; + break; + case 142: /* alter_table_clause ::= full_table_name MODIFY TAG column_name type_name */ +{ yylhsminor.yy924 = createAlterTableAddModifyCol(pCxt, yymsp[-4].minor.yy924, TSDB_ALTER_TABLE_UPDATE_TAG_BYTES, &yymsp[-1].minor.yy233, yymsp[0].minor.yy852); } + yymsp[-4].minor.yy924 = yylhsminor.yy924; + break; + case 143: /* alter_table_clause ::= full_table_name RENAME TAG column_name column_name */ +{ yylhsminor.yy924 = createAlterTableRenameCol(pCxt, yymsp[-4].minor.yy924, TSDB_ALTER_TABLE_UPDATE_TAG_NAME, &yymsp[-1].minor.yy233, &yymsp[0].minor.yy233); } + yymsp[-4].minor.yy924 = yylhsminor.yy924; + break; + case 144: /* alter_table_clause ::= full_table_name SET TAG column_name NK_EQ signed_literal */ +{ yylhsminor.yy924 = createAlterTableSetTag(pCxt, yymsp[-5].minor.yy924, &yymsp[-2].minor.yy233, yymsp[0].minor.yy924); } + yymsp[-5].minor.yy924 = yylhsminor.yy924; + break; + case 146: /* multi_create_clause ::= multi_create_clause create_subtable_clause */ + case 149: /* multi_drop_clause ::= multi_drop_clause drop_table_clause */ yytestcase(yyruleno==149); + case 429: /* when_then_list ::= when_then_list when_then_expr */ yytestcase(yyruleno==429); +{ yylhsminor.yy776 = addNodeToList(pCxt, yymsp[-1].minor.yy776, yymsp[0].minor.yy924); } + yymsp[-1].minor.yy776 = yylhsminor.yy776; + break; + case 147: /* create_subtable_clause ::= not_exists_opt full_table_name USING full_table_name specific_cols_opt TAGS NK_LP expression_list NK_RP table_options */ +{ yylhsminor.yy924 = createCreateSubTableClause(pCxt, yymsp[-9].minor.yy397, yymsp[-8].minor.yy924, yymsp[-6].minor.yy924, yymsp[-5].minor.yy776, yymsp[-2].minor.yy776, yymsp[0].minor.yy924); } + yymsp[-9].minor.yy924 = yylhsminor.yy924; + break; + case 150: /* drop_table_clause ::= exists_opt full_table_name */ +{ yylhsminor.yy924 = createDropTableClause(pCxt, yymsp[-1].minor.yy397, yymsp[0].minor.yy924); } + yymsp[-1].minor.yy924 = yylhsminor.yy924; + break; + case 151: /* specific_cols_opt ::= */ + case 182: /* tags_def_opt ::= */ yytestcase(yyruleno==182); + case 254: /* tag_list_opt ::= */ yytestcase(yyruleno==254); + case 305: /* col_list_opt ::= */ yytestcase(yyruleno==305); + case 307: /* tag_def_or_ref_opt ::= */ yytestcase(yyruleno==307); + case 492: /* partition_by_clause_opt ::= */ yytestcase(yyruleno==492); + case 515: /* group_by_clause_opt ::= */ yytestcase(yyruleno==515); + case 534: /* order_by_clause_opt ::= */ yytestcase(yyruleno==534); +{ yymsp[1].minor.yy776 = NULL; } + break; + case 152: /* specific_cols_opt ::= NK_LP col_name_list NK_RP */ + case 306: /* col_list_opt ::= NK_LP col_name_list NK_RP */ yytestcase(yyruleno==306); +{ yymsp[-2].minor.yy776 = yymsp[-1].minor.yy776; } + break; + case 153: /* full_table_name ::= table_name */ +{ yylhsminor.yy924 = createRealTableNode(pCxt, NULL, &yymsp[0].minor.yy233, NULL); } + yymsp[0].minor.yy924 = yylhsminor.yy924; + break; + case 154: /* full_table_name ::= db_name NK_DOT table_name */ +{ yylhsminor.yy924 = createRealTableNode(pCxt, &yymsp[-2].minor.yy233, &yymsp[0].minor.yy233, NULL); } + yymsp[-2].minor.yy924 = yylhsminor.yy924; + break; + case 157: /* column_def ::= column_name type_name */ +{ yylhsminor.yy924 = createColumnDefNode(pCxt, &yymsp[-1].minor.yy233, yymsp[0].minor.yy852, NULL); } + yymsp[-1].minor.yy924 = yylhsminor.yy924; + break; + case 158: /* column_def ::= column_name type_name COMMENT NK_STRING */ +{ yylhsminor.yy924 = createColumnDefNode(pCxt, &yymsp[-3].minor.yy233, yymsp[-2].minor.yy852, &yymsp[0].minor.yy0); } + yymsp[-3].minor.yy924 = yylhsminor.yy924; + break; + case 159: /* type_name ::= BOOL */ +{ yymsp[0].minor.yy852 = createDataType(TSDB_DATA_TYPE_BOOL); } + break; + case 160: /* type_name ::= TINYINT */ +{ yymsp[0].minor.yy852 = createDataType(TSDB_DATA_TYPE_TINYINT); } + break; + case 161: /* type_name ::= SMALLINT */ +{ yymsp[0].minor.yy852 = createDataType(TSDB_DATA_TYPE_SMALLINT); } + break; + case 162: /* type_name ::= INT */ + case 163: /* type_name ::= INTEGER */ yytestcase(yyruleno==163); +{ yymsp[0].minor.yy852 = createDataType(TSDB_DATA_TYPE_INT); } + break; + case 164: /* type_name ::= BIGINT */ +{ yymsp[0].minor.yy852 = createDataType(TSDB_DATA_TYPE_BIGINT); } + break; + case 165: /* type_name ::= FLOAT */ +{ yymsp[0].minor.yy852 = createDataType(TSDB_DATA_TYPE_FLOAT); } + break; + case 166: /* type_name ::= DOUBLE */ +{ yymsp[0].minor.yy852 = createDataType(TSDB_DATA_TYPE_DOUBLE); } + break; + case 167: /* type_name ::= BINARY NK_LP NK_INTEGER NK_RP */ +{ yymsp[-3].minor.yy852 = createVarLenDataType(TSDB_DATA_TYPE_BINARY, &yymsp[-1].minor.yy0); } + break; + case 168: /* type_name ::= TIMESTAMP */ +{ yymsp[0].minor.yy852 = createDataType(TSDB_DATA_TYPE_TIMESTAMP); } + break; + case 169: /* type_name ::= NCHAR NK_LP NK_INTEGER NK_RP */ +{ yymsp[-3].minor.yy852 = createVarLenDataType(TSDB_DATA_TYPE_NCHAR, &yymsp[-1].minor.yy0); } + break; + case 170: /* type_name ::= TINYINT UNSIGNED */ +{ yymsp[-1].minor.yy852 = createDataType(TSDB_DATA_TYPE_UTINYINT); } + break; + case 171: /* type_name ::= SMALLINT UNSIGNED */ +{ yymsp[-1].minor.yy852 = createDataType(TSDB_DATA_TYPE_USMALLINT); } + break; + case 172: /* type_name ::= INT UNSIGNED */ +{ yymsp[-1].minor.yy852 = createDataType(TSDB_DATA_TYPE_UINT); } + break; + case 173: /* type_name ::= BIGINT UNSIGNED */ +{ yymsp[-1].minor.yy852 = createDataType(TSDB_DATA_TYPE_UBIGINT); } + break; + case 174: /* type_name ::= JSON */ +{ yymsp[0].minor.yy852 = createDataType(TSDB_DATA_TYPE_JSON); } + break; + case 175: /* type_name ::= VARCHAR NK_LP NK_INTEGER NK_RP */ +{ yymsp[-3].minor.yy852 = createVarLenDataType(TSDB_DATA_TYPE_VARCHAR, &yymsp[-1].minor.yy0); } + break; + case 176: /* type_name ::= MEDIUMBLOB */ +{ yymsp[0].minor.yy852 = createDataType(TSDB_DATA_TYPE_MEDIUMBLOB); } + break; + case 177: /* type_name ::= BLOB */ +{ yymsp[0].minor.yy852 = createDataType(TSDB_DATA_TYPE_BLOB); } + break; + case 178: /* type_name ::= VARBINARY NK_LP NK_INTEGER NK_RP */ +{ yymsp[-3].minor.yy852 = createVarLenDataType(TSDB_DATA_TYPE_VARBINARY, &yymsp[-1].minor.yy0); } + break; + case 179: /* type_name ::= DECIMAL */ +{ yymsp[0].minor.yy852 = createDataType(TSDB_DATA_TYPE_DECIMAL); } + break; + case 180: /* type_name ::= DECIMAL NK_LP NK_INTEGER NK_RP */ +{ yymsp[-3].minor.yy852 = createDataType(TSDB_DATA_TYPE_DECIMAL); } + break; + case 181: /* type_name ::= DECIMAL NK_LP NK_INTEGER NK_COMMA NK_INTEGER NK_RP */ +{ yymsp[-5].minor.yy852 = createDataType(TSDB_DATA_TYPE_DECIMAL); } + break; + case 183: /* tags_def_opt ::= tags_def */ + case 308: /* tag_def_or_ref_opt ::= tags_def */ yytestcase(yyruleno==308); + case 421: /* star_func_para_list ::= other_para_list */ yytestcase(yyruleno==421); +{ yylhsminor.yy776 = yymsp[0].minor.yy776; } + yymsp[0].minor.yy776 = yylhsminor.yy776; + break; + case 184: /* tags_def ::= TAGS NK_LP column_def_list NK_RP */ + case 309: /* tag_def_or_ref_opt ::= TAGS NK_LP col_name_list NK_RP */ yytestcase(yyruleno==309); +{ yymsp[-3].minor.yy776 = yymsp[-1].minor.yy776; } + break; + case 185: /* table_options ::= */ +{ yymsp[1].minor.yy924 = createDefaultTableOptions(pCxt); } + break; + case 186: /* table_options ::= table_options COMMENT NK_STRING */ +{ yylhsminor.yy924 = setTableOption(pCxt, yymsp[-2].minor.yy924, TABLE_OPTION_COMMENT, &yymsp[0].minor.yy0); } + yymsp[-2].minor.yy924 = yylhsminor.yy924; + break; + case 187: /* table_options ::= table_options MAX_DELAY duration_list */ +{ yylhsminor.yy924 = setTableOption(pCxt, yymsp[-2].minor.yy924, TABLE_OPTION_MAXDELAY, yymsp[0].minor.yy776); } + yymsp[-2].minor.yy924 = yylhsminor.yy924; + break; + case 188: /* table_options ::= table_options WATERMARK duration_list */ +{ yylhsminor.yy924 = setTableOption(pCxt, yymsp[-2].minor.yy924, TABLE_OPTION_WATERMARK, yymsp[0].minor.yy776); } + yymsp[-2].minor.yy924 = yylhsminor.yy924; + break; + case 189: /* table_options ::= table_options ROLLUP NK_LP rollup_func_list NK_RP */ +{ yylhsminor.yy924 = setTableOption(pCxt, yymsp[-4].minor.yy924, TABLE_OPTION_ROLLUP, yymsp[-1].minor.yy776); } + yymsp[-4].minor.yy924 = yylhsminor.yy924; + break; + case 190: /* table_options ::= table_options TTL NK_INTEGER */ +{ yylhsminor.yy924 = setTableOption(pCxt, yymsp[-2].minor.yy924, TABLE_OPTION_TTL, &yymsp[0].minor.yy0); } + yymsp[-2].minor.yy924 = yylhsminor.yy924; + break; + case 191: /* table_options ::= table_options SMA NK_LP col_name_list NK_RP */ +{ yylhsminor.yy924 = setTableOption(pCxt, yymsp[-4].minor.yy924, TABLE_OPTION_SMA, yymsp[-1].minor.yy776); } + yymsp[-4].minor.yy924 = yylhsminor.yy924; + break; + case 192: /* table_options ::= table_options DELETE_MARK duration_list */ +{ yylhsminor.yy924 = setTableOption(pCxt, yymsp[-2].minor.yy924, TABLE_OPTION_DELETE_MARK, yymsp[0].minor.yy776); } + yymsp[-2].minor.yy924 = yylhsminor.yy924; + break; + case 193: /* alter_table_options ::= alter_table_option */ +{ yylhsminor.yy924 = createAlterTableOptions(pCxt); yylhsminor.yy924 = setTableOption(pCxt, yylhsminor.yy924, yymsp[0].minor.yy257.type, &yymsp[0].minor.yy257.val); } + yymsp[0].minor.yy924 = yylhsminor.yy924; + break; + case 194: /* alter_table_options ::= alter_table_options alter_table_option */ +{ yylhsminor.yy924 = setTableOption(pCxt, yymsp[-1].minor.yy924, yymsp[0].minor.yy257.type, &yymsp[0].minor.yy257.val); } + yymsp[-1].minor.yy924 = yylhsminor.yy924; + break; + case 195: /* alter_table_option ::= COMMENT NK_STRING */ +{ yymsp[-1].minor.yy257.type = TABLE_OPTION_COMMENT; yymsp[-1].minor.yy257.val = yymsp[0].minor.yy0; } + break; + case 196: /* alter_table_option ::= TTL NK_INTEGER */ +{ yymsp[-1].minor.yy257.type = TABLE_OPTION_TTL; yymsp[-1].minor.yy257.val = yymsp[0].minor.yy0; } + break; + case 197: /* duration_list ::= duration_literal */ + case 385: /* expression_list ::= expr_or_subquery */ yytestcase(yyruleno==385); +{ yylhsminor.yy776 = createNodeList(pCxt, releaseRawExprNode(pCxt, yymsp[0].minor.yy924)); } + yymsp[0].minor.yy776 = yylhsminor.yy776; + break; + case 198: /* duration_list ::= duration_list NK_COMMA duration_literal */ + case 386: /* expression_list ::= expression_list NK_COMMA expr_or_subquery */ yytestcase(yyruleno==386); +{ yylhsminor.yy776 = addNodeToList(pCxt, yymsp[-2].minor.yy776, releaseRawExprNode(pCxt, yymsp[0].minor.yy924)); } + yymsp[-2].minor.yy776 = yylhsminor.yy776; + break; + case 201: /* rollup_func_name ::= function_name */ +{ yylhsminor.yy924 = createFunctionNode(pCxt, &yymsp[0].minor.yy233, NULL); } + yymsp[0].minor.yy924 = yylhsminor.yy924; + break; + case 202: /* rollup_func_name ::= FIRST */ + case 203: /* rollup_func_name ::= LAST */ yytestcase(yyruleno==203); + case 258: /* tag_item ::= QTAGS */ yytestcase(yyruleno==258); +{ yylhsminor.yy924 = createFunctionNode(pCxt, &yymsp[0].minor.yy0, NULL); } + yymsp[0].minor.yy924 = yylhsminor.yy924; + break; + case 206: /* col_name ::= column_name */ + case 259: /* tag_item ::= column_name */ yytestcase(yyruleno==259); +{ yylhsminor.yy924 = createColumnNode(pCxt, NULL, &yymsp[0].minor.yy233); } + yymsp[0].minor.yy924 = yylhsminor.yy924; +>>>>>>> 677a27a09744fa59ddd2e723052ec68e32229566 break; case 208: /* cmd ::= SHOW DNODES */ { pCxt->pRootNode = createShowStmt(pCxt, QUERY_NODE_SHOW_DNODES_STMT); } @@ -4423,6 +6725,7 @@ static YYACTIONTYPE yy_reduce( case 211: /* cmd ::= SHOW DATABASES */ { pCxt->pRootNode = createShowStmt(pCxt, QUERY_NODE_SHOW_DATABASES_STMT); } break; +<<<<<<< HEAD case 212: /* cmd ::= SHOW db_name_cond_opt TABLES like_pattern_opt */ { pCxt->pRootNode = createShowStmtWithCond(pCxt, QUERY_NODE_SHOW_TABLES_STMT, yymsp[-2].minor.yy602, yymsp[0].minor.yy602, OP_TYPE_LIKE); } break; @@ -4431,6 +6734,16 @@ static YYACTIONTYPE yy_reduce( break; case 214: /* cmd ::= SHOW db_name_cond_opt VGROUPS */ { pCxt->pRootNode = createShowStmtWithCond(pCxt, QUERY_NODE_SHOW_VGROUPS_STMT, yymsp[-1].minor.yy602, NULL, OP_TYPE_LIKE); } +======= + case 211: /* cmd ::= SHOW db_name_cond_opt TABLES like_pattern_opt */ +{ pCxt->pRootNode = createShowStmtWithCond(pCxt, QUERY_NODE_SHOW_TABLES_STMT, yymsp[-2].minor.yy924, yymsp[0].minor.yy924, OP_TYPE_LIKE); } + break; + case 212: /* cmd ::= SHOW db_name_cond_opt STABLES like_pattern_opt */ +{ pCxt->pRootNode = createShowStmtWithCond(pCxt, QUERY_NODE_SHOW_STABLES_STMT, yymsp[-2].minor.yy924, yymsp[0].minor.yy924, OP_TYPE_LIKE); } + break; + case 213: /* cmd ::= SHOW db_name_cond_opt VGROUPS */ +{ pCxt->pRootNode = createShowStmtWithCond(pCxt, QUERY_NODE_SHOW_VGROUPS_STMT, yymsp[-1].minor.yy924, NULL, OP_TYPE_LIKE); } +>>>>>>> 677a27a09744fa59ddd2e723052ec68e32229566 break; case 215: /* cmd ::= SHOW MNODES */ { pCxt->pRootNode = createShowStmt(pCxt, QUERY_NODE_SHOW_MNODES_STMT); } @@ -4441,8 +6754,13 @@ static YYACTIONTYPE yy_reduce( case 217: /* cmd ::= SHOW FUNCTIONS */ { pCxt->pRootNode = createShowStmt(pCxt, QUERY_NODE_SHOW_FUNCTIONS_STMT); } break; +<<<<<<< HEAD case 218: /* cmd ::= SHOW INDEXES FROM table_name_cond from_db_opt */ { pCxt->pRootNode = createShowStmtWithCond(pCxt, QUERY_NODE_SHOW_INDEXES_STMT, yymsp[0].minor.yy602, yymsp[-1].minor.yy602, OP_TYPE_EQUAL); } +======= + case 217: /* cmd ::= SHOW INDEXES FROM table_name_cond from_db_opt */ +{ pCxt->pRootNode = createShowStmtWithCond(pCxt, QUERY_NODE_SHOW_INDEXES_STMT, yymsp[0].minor.yy924, yymsp[-1].minor.yy924, OP_TYPE_EQUAL); } +>>>>>>> 677a27a09744fa59ddd2e723052ec68e32229566 break; case 219: /* cmd ::= SHOW STREAMS */ { pCxt->pRootNode = createShowStmt(pCxt, QUERY_NODE_SHOW_STREAMS_STMT); } @@ -4460,6 +6778,7 @@ static YYACTIONTYPE yy_reduce( case 224: /* cmd ::= SHOW GRANTS */ yytestcase(yyruleno==224); { pCxt->pRootNode = createShowStmt(pCxt, QUERY_NODE_SHOW_LICENCES_STMT); } break; +<<<<<<< HEAD case 225: /* cmd ::= SHOW CREATE DATABASE db_name */ { pCxt->pRootNode = createShowCreateDatabaseStmt(pCxt, &yymsp[0].minor.yy179); } break; @@ -4468,6 +6787,16 @@ static YYACTIONTYPE yy_reduce( break; case 227: /* cmd ::= SHOW CREATE STABLE full_table_name */ { pCxt->pRootNode = createShowCreateTableStmt(pCxt, QUERY_NODE_SHOW_CREATE_STABLE_STMT, yymsp[0].minor.yy602); } +======= + case 224: /* cmd ::= SHOW CREATE DATABASE db_name */ +{ pCxt->pRootNode = createShowCreateDatabaseStmt(pCxt, &yymsp[0].minor.yy233); } + break; + case 225: /* cmd ::= SHOW CREATE TABLE full_table_name */ +{ pCxt->pRootNode = createShowCreateTableStmt(pCxt, QUERY_NODE_SHOW_CREATE_TABLE_STMT, yymsp[0].minor.yy924); } + break; + case 226: /* cmd ::= SHOW CREATE STABLE full_table_name */ +{ pCxt->pRootNode = createShowCreateTableStmt(pCxt, QUERY_NODE_SHOW_CREATE_STABLE_STMT, yymsp[0].minor.yy924); } +>>>>>>> 677a27a09744fa59ddd2e723052ec68e32229566 break; case 228: /* cmd ::= SHOW QUERIES */ { pCxt->pRootNode = createShowStmt(pCxt, QUERY_NODE_SHOW_QUERIES_STMT); } @@ -4485,8 +6814,13 @@ static YYACTIONTYPE yy_reduce( case 233: /* cmd ::= SHOW LOCAL VARIABLES */ { pCxt->pRootNode = createShowStmt(pCxt, QUERY_NODE_SHOW_LOCAL_VARIABLES_STMT); } break; +<<<<<<< HEAD case 234: /* cmd ::= SHOW DNODE NK_INTEGER VARIABLES like_pattern_opt */ { pCxt->pRootNode = createShowDnodeVariablesStmt(pCxt, createValueNode(pCxt, TSDB_DATA_TYPE_BIGINT, &yymsp[-2].minor.yy0), yymsp[0].minor.yy602); } +======= + case 233: /* cmd ::= SHOW DNODE NK_INTEGER VARIABLES like_pattern_opt */ +{ pCxt->pRootNode = createShowDnodeVariablesStmt(pCxt, createValueNode(pCxt, TSDB_DATA_TYPE_BIGINT, &yymsp[-2].minor.yy0), yymsp[0].minor.yy924); } +>>>>>>> 677a27a09744fa59ddd2e723052ec68e32229566 break; case 235: /* cmd ::= SHOW BNODES */ { pCxt->pRootNode = createShowStmt(pCxt, QUERY_NODE_SHOW_BNODES_STMT); } @@ -4500,8 +6834,13 @@ static YYACTIONTYPE yy_reduce( case 238: /* cmd ::= SHOW TRANSACTIONS */ { pCxt->pRootNode = createShowStmt(pCxt, QUERY_NODE_SHOW_TRANSACTIONS_STMT); } break; +<<<<<<< HEAD case 239: /* cmd ::= SHOW TABLE DISTRIBUTED full_table_name */ { pCxt->pRootNode = createShowTableDistributedStmt(pCxt, yymsp[0].minor.yy602); } +======= + case 238: /* cmd ::= SHOW TABLE DISTRIBUTED full_table_name */ +{ pCxt->pRootNode = createShowTableDistributedStmt(pCxt, yymsp[0].minor.yy924); } +>>>>>>> 677a27a09744fa59ddd2e723052ec68e32229566 break; case 240: /* cmd ::= SHOW CONSUMERS */ { pCxt->pRootNode = createShowStmt(pCxt, QUERY_NODE_SHOW_CONSUMERS_STMT); } @@ -4509,11 +6848,19 @@ static YYACTIONTYPE yy_reduce( case 241: /* cmd ::= SHOW SUBSCRIPTIONS */ { pCxt->pRootNode = createShowStmt(pCxt, QUERY_NODE_SHOW_SUBSCRIPTIONS_STMT); } break; +<<<<<<< HEAD case 242: /* cmd ::= SHOW TAGS FROM table_name_cond from_db_opt */ { pCxt->pRootNode = createShowStmtWithCond(pCxt, QUERY_NODE_SHOW_TAGS_STMT, yymsp[0].minor.yy602, yymsp[-1].minor.yy602, OP_TYPE_EQUAL); } break; case 243: /* cmd ::= SHOW TABLE TAGS tag_list_opt FROM table_name_cond from_db_opt */ { pCxt->pRootNode = createShowTableTagsStmt(pCxt, yymsp[-1].minor.yy602, yymsp[0].minor.yy602, yymsp[-3].minor.yy874); } +======= + case 241: /* cmd ::= SHOW TAGS FROM table_name_cond from_db_opt */ +{ pCxt->pRootNode = createShowStmtWithCond(pCxt, QUERY_NODE_SHOW_TAGS_STMT, yymsp[0].minor.yy924, yymsp[-1].minor.yy924, OP_TYPE_EQUAL); } + break; + case 242: /* cmd ::= SHOW TABLE TAGS tag_list_opt FROM table_name_cond from_db_opt */ +{ pCxt->pRootNode = createShowTableTagsStmt(pCxt, yymsp[-1].minor.yy924, yymsp[0].minor.yy924, yymsp[-3].minor.yy776); } +>>>>>>> 677a27a09744fa59ddd2e723052ec68e32229566 break; case 244: /* cmd ::= SHOW VNODES NK_INTEGER */ { pCxt->pRootNode = createShowVnodesStmt(pCxt, createValueNode(pCxt, TSDB_DATA_TYPE_BIGINT, &yymsp[0].minor.yy0), NULL); } @@ -4521,12 +6868,18 @@ static YYACTIONTYPE yy_reduce( case 245: /* cmd ::= SHOW VNODES NK_STRING */ { pCxt->pRootNode = createShowVnodesStmt(pCxt, NULL, createValueNode(pCxt, TSDB_DATA_TYPE_VARCHAR, &yymsp[0].minor.yy0)); } break; +<<<<<<< HEAD case 246: /* cmd ::= SHOW db_name_cond_opt ALIVE */ { pCxt->pRootNode = createShowAliveStmt(pCxt, yymsp[-1].minor.yy602, QUERY_NODE_SHOW_DB_ALIVE_STMT); } +======= + case 245: /* cmd ::= SHOW db_name_cond_opt ALIVE */ +{ pCxt->pRootNode = createShowAliveStmt(pCxt, yymsp[-1].minor.yy924, QUERY_NODE_SHOW_DB_ALIVE_STMT); } +>>>>>>> 677a27a09744fa59ddd2e723052ec68e32229566 break; case 247: /* cmd ::= SHOW CLUSTER ALIVE */ { pCxt->pRootNode = createShowAliveStmt(pCxt, NULL, QUERY_NODE_SHOW_CLUSTER_ALIVE_STMT); } break; +<<<<<<< HEAD case 248: /* db_name_cond_opt ::= */ case 253: /* from_db_opt ::= */ yytestcase(yyruleno==253); { yymsp[1].minor.yy602 = createDefaultDatabaseCondValue(pCxt); } @@ -4640,10 +6993,126 @@ static YYACTIONTYPE yy_reduce( case 289: /* cmd ::= DESC full_table_name */ case 290: /* cmd ::= DESCRIBE full_table_name */ yytestcase(yyruleno==290); { pCxt->pRootNode = createDescribeStmt(pCxt, yymsp[0].minor.yy602); } +======= + case 247: /* db_name_cond_opt ::= */ + case 252: /* from_db_opt ::= */ yytestcase(yyruleno==252); +{ yymsp[1].minor.yy924 = createDefaultDatabaseCondValue(pCxt); } + break; + case 248: /* db_name_cond_opt ::= db_name NK_DOT */ +{ yylhsminor.yy924 = createIdentifierValueNode(pCxt, &yymsp[-1].minor.yy233); } + yymsp[-1].minor.yy924 = yylhsminor.yy924; + break; + case 249: /* like_pattern_opt ::= */ + case 317: /* subtable_opt ::= */ yytestcase(yyruleno==317); + case 431: /* case_when_else_opt ::= */ yytestcase(yyruleno==431); + case 461: /* from_clause_opt ::= */ yytestcase(yyruleno==461); + case 490: /* where_clause_opt ::= */ yytestcase(yyruleno==490); + case 499: /* twindow_clause_opt ::= */ yytestcase(yyruleno==499); + case 505: /* sliding_opt ::= */ yytestcase(yyruleno==505); + case 507: /* fill_opt ::= */ yytestcase(yyruleno==507); + case 519: /* having_clause_opt ::= */ yytestcase(yyruleno==519); + case 521: /* range_opt ::= */ yytestcase(yyruleno==521); + case 523: /* every_opt ::= */ yytestcase(yyruleno==523); + case 536: /* slimit_clause_opt ::= */ yytestcase(yyruleno==536); + case 540: /* limit_clause_opt ::= */ yytestcase(yyruleno==540); +{ yymsp[1].minor.yy924 = NULL; } + break; + case 250: /* like_pattern_opt ::= LIKE NK_STRING */ +{ yymsp[-1].minor.yy924 = createValueNode(pCxt, TSDB_DATA_TYPE_BINARY, &yymsp[0].minor.yy0); } + break; + case 251: /* table_name_cond ::= table_name */ +{ yylhsminor.yy924 = createIdentifierValueNode(pCxt, &yymsp[0].minor.yy233); } + yymsp[0].minor.yy924 = yylhsminor.yy924; + break; + case 253: /* from_db_opt ::= FROM db_name */ +{ yymsp[-1].minor.yy924 = createIdentifierValueNode(pCxt, &yymsp[0].minor.yy233); } + break; + case 257: /* tag_item ::= TBNAME */ +{ yylhsminor.yy924 = setProjectionAlias(pCxt, createFunctionNode(pCxt, &yymsp[0].minor.yy0, NULL), &yymsp[0].minor.yy0); } + yymsp[0].minor.yy924 = yylhsminor.yy924; + break; + case 260: /* tag_item ::= column_name column_alias */ +{ yylhsminor.yy924 = setProjectionAlias(pCxt, createColumnNode(pCxt, NULL, &yymsp[-1].minor.yy233), &yymsp[0].minor.yy233); } + yymsp[-1].minor.yy924 = yylhsminor.yy924; + break; + case 261: /* tag_item ::= column_name AS column_alias */ +{ yylhsminor.yy924 = setProjectionAlias(pCxt, createColumnNode(pCxt, NULL, &yymsp[-2].minor.yy233), &yymsp[0].minor.yy233); } + yymsp[-2].minor.yy924 = yylhsminor.yy924; + break; + case 262: /* cmd ::= CREATE SMA INDEX not_exists_opt full_index_name ON full_table_name index_options */ +{ pCxt->pRootNode = createCreateIndexStmt(pCxt, INDEX_TYPE_SMA, yymsp[-4].minor.yy397, yymsp[-3].minor.yy924, yymsp[-1].minor.yy924, NULL, yymsp[0].minor.yy924); } + break; + case 263: /* cmd ::= CREATE INDEX not_exists_opt full_index_name ON full_table_name NK_LP col_name_list NK_RP */ +{ pCxt->pRootNode = createCreateIndexStmt(pCxt, INDEX_TYPE_NORMAL, yymsp[-6].minor.yy397, yymsp[-5].minor.yy924, yymsp[-3].minor.yy924, yymsp[-1].minor.yy776, NULL); } + break; + case 264: /* cmd ::= DROP INDEX exists_opt full_index_name */ +{ pCxt->pRootNode = createDropIndexStmt(pCxt, yymsp[-1].minor.yy397, yymsp[0].minor.yy924); } + break; + case 265: /* full_index_name ::= index_name */ +{ yylhsminor.yy924 = createRealTableNodeForIndexName(pCxt, NULL, &yymsp[0].minor.yy233); } + yymsp[0].minor.yy924 = yylhsminor.yy924; + break; + case 266: /* full_index_name ::= db_name NK_DOT index_name */ +{ yylhsminor.yy924 = createRealTableNodeForIndexName(pCxt, &yymsp[-2].minor.yy233, &yymsp[0].minor.yy233); } + yymsp[-2].minor.yy924 = yylhsminor.yy924; + break; + case 267: /* index_options ::= FUNCTION NK_LP func_list NK_RP INTERVAL NK_LP duration_literal NK_RP sliding_opt sma_stream_opt */ +{ yymsp[-9].minor.yy924 = createIndexOption(pCxt, yymsp[-7].minor.yy776, releaseRawExprNode(pCxt, yymsp[-3].minor.yy924), NULL, yymsp[-1].minor.yy924, yymsp[0].minor.yy924); } + break; + case 268: /* index_options ::= FUNCTION NK_LP func_list NK_RP INTERVAL NK_LP duration_literal NK_COMMA duration_literal NK_RP sliding_opt sma_stream_opt */ +{ yymsp[-11].minor.yy924 = createIndexOption(pCxt, yymsp[-9].minor.yy776, releaseRawExprNode(pCxt, yymsp[-5].minor.yy924), releaseRawExprNode(pCxt, yymsp[-3].minor.yy924), yymsp[-1].minor.yy924, yymsp[0].minor.yy924); } + break; + case 271: /* func ::= sma_func_name NK_LP expression_list NK_RP */ +{ yylhsminor.yy924 = createFunctionNode(pCxt, &yymsp[-3].minor.yy233, yymsp[-1].minor.yy776); } + yymsp[-3].minor.yy924 = yylhsminor.yy924; + break; + case 277: /* sma_stream_opt ::= */ + case 310: /* stream_options ::= */ yytestcase(yyruleno==310); +{ yymsp[1].minor.yy924 = createStreamOptions(pCxt); } + break; + case 278: /* sma_stream_opt ::= sma_stream_opt WATERMARK duration_literal */ + case 314: /* stream_options ::= stream_options WATERMARK duration_literal */ yytestcase(yyruleno==314); +{ ((SStreamOptions*)yymsp[-2].minor.yy924)->pWatermark = releaseRawExprNode(pCxt, yymsp[0].minor.yy924); yylhsminor.yy924 = yymsp[-2].minor.yy924; } + yymsp[-2].minor.yy924 = yylhsminor.yy924; + break; + case 279: /* sma_stream_opt ::= sma_stream_opt MAX_DELAY duration_literal */ +{ ((SStreamOptions*)yymsp[-2].minor.yy924)->pDelay = releaseRawExprNode(pCxt, yymsp[0].minor.yy924); yylhsminor.yy924 = yymsp[-2].minor.yy924; } + yymsp[-2].minor.yy924 = yylhsminor.yy924; + break; + case 280: /* sma_stream_opt ::= sma_stream_opt DELETE_MARK duration_literal */ +{ ((SStreamOptions*)yymsp[-2].minor.yy924)->pDeleteMark = releaseRawExprNode(pCxt, yymsp[0].minor.yy924); yylhsminor.yy924 = yymsp[-2].minor.yy924; } + yymsp[-2].minor.yy924 = yylhsminor.yy924; + break; + case 281: /* cmd ::= CREATE TOPIC not_exists_opt topic_name AS query_or_subquery */ +{ pCxt->pRootNode = createCreateTopicStmtUseQuery(pCxt, yymsp[-3].minor.yy397, &yymsp[-2].minor.yy233, yymsp[0].minor.yy924); } + break; + case 282: /* cmd ::= CREATE TOPIC not_exists_opt topic_name AS DATABASE db_name */ +{ pCxt->pRootNode = createCreateTopicStmtUseDb(pCxt, yymsp[-4].minor.yy397, &yymsp[-3].minor.yy233, &yymsp[0].minor.yy233, false); } + break; + case 283: /* cmd ::= CREATE TOPIC not_exists_opt topic_name WITH META AS DATABASE db_name */ +{ pCxt->pRootNode = createCreateTopicStmtUseDb(pCxt, yymsp[-6].minor.yy397, &yymsp[-5].minor.yy233, &yymsp[0].minor.yy233, true); } + break; + case 284: /* cmd ::= CREATE TOPIC not_exists_opt topic_name AS STABLE full_table_name */ +{ pCxt->pRootNode = createCreateTopicStmtUseTable(pCxt, yymsp[-4].minor.yy397, &yymsp[-3].minor.yy233, yymsp[0].minor.yy924, false); } + break; + case 285: /* cmd ::= CREATE TOPIC not_exists_opt topic_name WITH META AS STABLE full_table_name */ +{ pCxt->pRootNode = createCreateTopicStmtUseTable(pCxt, yymsp[-6].minor.yy397, &yymsp[-5].minor.yy233, yymsp[0].minor.yy924, true); } + break; + case 286: /* cmd ::= DROP TOPIC exists_opt topic_name */ +{ pCxt->pRootNode = createDropTopicStmt(pCxt, yymsp[-1].minor.yy397, &yymsp[0].minor.yy233); } + break; + case 287: /* cmd ::= DROP CONSUMER GROUP exists_opt cgroup_name ON topic_name */ +{ pCxt->pRootNode = createDropCGroupStmt(pCxt, yymsp[-3].minor.yy397, &yymsp[-2].minor.yy233, &yymsp[0].minor.yy233); } + break; + case 288: /* cmd ::= DESC full_table_name */ + case 289: /* cmd ::= DESCRIBE full_table_name */ yytestcase(yyruleno==289); +{ pCxt->pRootNode = createDescribeStmt(pCxt, yymsp[0].minor.yy924); } +>>>>>>> 677a27a09744fa59ddd2e723052ec68e32229566 break; case 291: /* cmd ::= RESET QUERY CACHE */ { pCxt->pRootNode = createResetQueryCacheStmt(pCxt); } break; +<<<<<<< HEAD case 292: /* cmd ::= EXPLAIN analyze_opt explain_options query_or_subquery */ { pCxt->pRootNode = createExplainStmt(pCxt, yymsp[-2].minor.yy767, yymsp[-1].minor.yy602, yymsp[0].minor.yy602); } break; @@ -4792,13 +7261,164 @@ static YYACTIONTYPE yy_reduce( { yymsp[-1].minor.yy602 = createValueNode(pCxt, TSDB_DATA_TYPE_UBIGINT, &yymsp[0].minor.yy0); } break; case 341: /* signed ::= NK_MINUS NK_INTEGER */ +======= + case 291: /* cmd ::= EXPLAIN analyze_opt explain_options query_or_subquery */ +{ pCxt->pRootNode = createExplainStmt(pCxt, yymsp[-2].minor.yy397, yymsp[-1].minor.yy924, yymsp[0].minor.yy924); } + break; + case 294: /* explain_options ::= */ +{ yymsp[1].minor.yy924 = createDefaultExplainOptions(pCxt); } + break; + case 295: /* explain_options ::= explain_options VERBOSE NK_BOOL */ +{ yylhsminor.yy924 = setExplainVerbose(pCxt, yymsp[-2].minor.yy924, &yymsp[0].minor.yy0); } + yymsp[-2].minor.yy924 = yylhsminor.yy924; + break; + case 296: /* explain_options ::= explain_options RATIO NK_FLOAT */ +{ yylhsminor.yy924 = setExplainRatio(pCxt, yymsp[-2].minor.yy924, &yymsp[0].minor.yy0); } + yymsp[-2].minor.yy924 = yylhsminor.yy924; + break; + case 297: /* cmd ::= CREATE agg_func_opt FUNCTION not_exists_opt function_name AS NK_STRING OUTPUTTYPE type_name bufsize_opt */ +{ pCxt->pRootNode = createCreateFunctionStmt(pCxt, yymsp[-6].minor.yy397, yymsp[-8].minor.yy397, &yymsp[-5].minor.yy233, &yymsp[-3].minor.yy0, yymsp[-1].minor.yy852, yymsp[0].minor.yy832); } + break; + case 298: /* cmd ::= DROP FUNCTION exists_opt function_name */ +{ pCxt->pRootNode = createDropFunctionStmt(pCxt, yymsp[-1].minor.yy397, &yymsp[0].minor.yy233); } + break; + case 303: /* cmd ::= CREATE STREAM not_exists_opt stream_name stream_options INTO full_table_name col_list_opt tag_def_or_ref_opt subtable_opt AS query_or_subquery */ +{ pCxt->pRootNode = createCreateStreamStmt(pCxt, yymsp[-9].minor.yy397, &yymsp[-8].minor.yy233, yymsp[-5].minor.yy924, yymsp[-7].minor.yy924, yymsp[-3].minor.yy776, yymsp[-2].minor.yy924, yymsp[0].minor.yy924, yymsp[-4].minor.yy776); } + break; + case 304: /* cmd ::= DROP STREAM exists_opt stream_name */ +{ pCxt->pRootNode = createDropStreamStmt(pCxt, yymsp[-1].minor.yy397, &yymsp[0].minor.yy233); } + break; + case 311: /* stream_options ::= stream_options TRIGGER AT_ONCE */ +{ ((SStreamOptions*)yymsp[-2].minor.yy924)->triggerType = STREAM_TRIGGER_AT_ONCE; yylhsminor.yy924 = yymsp[-2].minor.yy924; } + yymsp[-2].minor.yy924 = yylhsminor.yy924; + break; + case 312: /* stream_options ::= stream_options TRIGGER WINDOW_CLOSE */ +{ ((SStreamOptions*)yymsp[-2].minor.yy924)->triggerType = STREAM_TRIGGER_WINDOW_CLOSE; yylhsminor.yy924 = yymsp[-2].minor.yy924; } + yymsp[-2].minor.yy924 = yylhsminor.yy924; + break; + case 313: /* stream_options ::= stream_options TRIGGER MAX_DELAY duration_literal */ +{ ((SStreamOptions*)yymsp[-3].minor.yy924)->triggerType = STREAM_TRIGGER_MAX_DELAY; ((SStreamOptions*)yymsp[-3].minor.yy924)->pDelay = releaseRawExprNode(pCxt, yymsp[0].minor.yy924); yylhsminor.yy924 = yymsp[-3].minor.yy924; } + yymsp[-3].minor.yy924 = yylhsminor.yy924; + break; + case 315: /* stream_options ::= stream_options IGNORE EXPIRED NK_INTEGER */ +{ ((SStreamOptions*)yymsp[-3].minor.yy924)->ignoreExpired = taosStr2Int8(yymsp[0].minor.yy0.z, NULL, 10); yylhsminor.yy924 = yymsp[-3].minor.yy924; } + yymsp[-3].minor.yy924 = yylhsminor.yy924; + break; + case 316: /* stream_options ::= stream_options FILL_HISTORY NK_INTEGER */ +{ ((SStreamOptions*)yymsp[-2].minor.yy924)->fillHistory = taosStr2Int8(yymsp[0].minor.yy0.z, NULL, 10); yylhsminor.yy924 = yymsp[-2].minor.yy924; } + yymsp[-2].minor.yy924 = yylhsminor.yy924; + break; + case 318: /* subtable_opt ::= SUBTABLE NK_LP expression NK_RP */ + case 506: /* sliding_opt ::= SLIDING NK_LP duration_literal NK_RP */ yytestcase(yyruleno==506); + case 524: /* every_opt ::= EVERY NK_LP duration_literal NK_RP */ yytestcase(yyruleno==524); +{ yymsp[-3].minor.yy924 = releaseRawExprNode(pCxt, yymsp[-1].minor.yy924); } + break; + case 319: /* cmd ::= KILL CONNECTION NK_INTEGER */ +{ pCxt->pRootNode = createKillStmt(pCxt, QUERY_NODE_KILL_CONNECTION_STMT, &yymsp[0].minor.yy0); } + break; + case 320: /* cmd ::= KILL QUERY NK_STRING */ +{ pCxt->pRootNode = createKillQueryStmt(pCxt, &yymsp[0].minor.yy0); } + break; + case 321: /* cmd ::= KILL TRANSACTION NK_INTEGER */ +{ pCxt->pRootNode = createKillStmt(pCxt, QUERY_NODE_KILL_TRANSACTION_STMT, &yymsp[0].minor.yy0); } + break; + case 322: /* cmd ::= BALANCE VGROUP */ +{ pCxt->pRootNode = createBalanceVgroupStmt(pCxt); } + break; + case 323: /* cmd ::= MERGE VGROUP NK_INTEGER NK_INTEGER */ +{ pCxt->pRootNode = createMergeVgroupStmt(pCxt, &yymsp[-1].minor.yy0, &yymsp[0].minor.yy0); } + break; + case 324: /* cmd ::= REDISTRIBUTE VGROUP NK_INTEGER dnode_list */ +{ pCxt->pRootNode = createRedistributeVgroupStmt(pCxt, &yymsp[-1].minor.yy0, yymsp[0].minor.yy776); } + break; + case 325: /* cmd ::= SPLIT VGROUP NK_INTEGER */ +{ pCxt->pRootNode = createSplitVgroupStmt(pCxt, &yymsp[0].minor.yy0); } + break; + case 326: /* dnode_list ::= DNODE NK_INTEGER */ +{ yymsp[-1].minor.yy776 = createNodeList(pCxt, createValueNode(pCxt, TSDB_DATA_TYPE_BIGINT, &yymsp[0].minor.yy0)); } + break; + case 328: /* cmd ::= DELETE FROM full_table_name where_clause_opt */ +{ pCxt->pRootNode = createDeleteStmt(pCxt, yymsp[-1].minor.yy924, yymsp[0].minor.yy924); } + break; + case 330: /* cmd ::= INSERT INTO full_table_name NK_LP col_name_list NK_RP query_or_subquery */ +{ pCxt->pRootNode = createInsertStmt(pCxt, yymsp[-4].minor.yy924, yymsp[-2].minor.yy776, yymsp[0].minor.yy924); } + break; + case 331: /* cmd ::= INSERT INTO full_table_name query_or_subquery */ +{ pCxt->pRootNode = createInsertStmt(pCxt, yymsp[-1].minor.yy924, NULL, yymsp[0].minor.yy924); } + break; + case 332: /* literal ::= NK_INTEGER */ +{ yylhsminor.yy924 = createRawExprNode(pCxt, &yymsp[0].minor.yy0, createValueNode(pCxt, TSDB_DATA_TYPE_UBIGINT, &yymsp[0].minor.yy0)); } + yymsp[0].minor.yy924 = yylhsminor.yy924; + break; + case 333: /* literal ::= NK_FLOAT */ +{ yylhsminor.yy924 = createRawExprNode(pCxt, &yymsp[0].minor.yy0, createValueNode(pCxt, TSDB_DATA_TYPE_DOUBLE, &yymsp[0].minor.yy0)); } + yymsp[0].minor.yy924 = yylhsminor.yy924; + break; + case 334: /* literal ::= NK_STRING */ +{ yylhsminor.yy924 = createRawExprNode(pCxt, &yymsp[0].minor.yy0, createValueNode(pCxt, TSDB_DATA_TYPE_BINARY, &yymsp[0].minor.yy0)); } + yymsp[0].minor.yy924 = yylhsminor.yy924; + break; + case 335: /* literal ::= NK_BOOL */ +{ yylhsminor.yy924 = createRawExprNode(pCxt, &yymsp[0].minor.yy0, createValueNode(pCxt, TSDB_DATA_TYPE_BOOL, &yymsp[0].minor.yy0)); } + yymsp[0].minor.yy924 = yylhsminor.yy924; + break; + case 336: /* literal ::= TIMESTAMP NK_STRING */ +{ yylhsminor.yy924 = createRawExprNodeExt(pCxt, &yymsp[-1].minor.yy0, &yymsp[0].minor.yy0, createValueNode(pCxt, TSDB_DATA_TYPE_TIMESTAMP, &yymsp[0].minor.yy0)); } + yymsp[-1].minor.yy924 = yylhsminor.yy924; + break; + case 337: /* literal ::= duration_literal */ + case 347: /* signed_literal ::= signed */ yytestcase(yyruleno==347); + case 368: /* expr_or_subquery ::= expression */ yytestcase(yyruleno==368); + case 369: /* expression ::= literal */ yytestcase(yyruleno==369); + case 370: /* expression ::= pseudo_column */ yytestcase(yyruleno==370); + case 371: /* expression ::= column_reference */ yytestcase(yyruleno==371); + case 372: /* expression ::= function_expression */ yytestcase(yyruleno==372); + case 373: /* expression ::= case_when_expression */ yytestcase(yyruleno==373); + case 404: /* function_expression ::= literal_func */ yytestcase(yyruleno==404); + case 453: /* boolean_value_expression ::= boolean_primary */ yytestcase(yyruleno==453); + case 457: /* boolean_primary ::= predicate */ yytestcase(yyruleno==457); + case 459: /* common_expression ::= expr_or_subquery */ yytestcase(yyruleno==459); + case 460: /* common_expression ::= boolean_value_expression */ yytestcase(yyruleno==460); + case 463: /* table_reference_list ::= table_reference */ yytestcase(yyruleno==463); + case 465: /* table_reference ::= table_primary */ yytestcase(yyruleno==465); + case 466: /* table_reference ::= joined_table */ yytestcase(yyruleno==466); + case 470: /* table_primary ::= parenthesized_joined_table */ yytestcase(yyruleno==470); + case 526: /* query_simple ::= query_specification */ yytestcase(yyruleno==526); + case 527: /* query_simple ::= union_query_expression */ yytestcase(yyruleno==527); + case 530: /* query_simple_or_subquery ::= query_simple */ yytestcase(yyruleno==530); + case 532: /* query_or_subquery ::= query_expression */ yytestcase(yyruleno==532); +{ yylhsminor.yy924 = yymsp[0].minor.yy924; } + yymsp[0].minor.yy924 = yylhsminor.yy924; + break; + case 338: /* literal ::= NULL */ +{ yylhsminor.yy924 = createRawExprNode(pCxt, &yymsp[0].minor.yy0, createValueNode(pCxt, TSDB_DATA_TYPE_NULL, &yymsp[0].minor.yy0)); } + yymsp[0].minor.yy924 = yylhsminor.yy924; + break; + case 339: /* literal ::= NK_QUESTION */ +{ yylhsminor.yy924 = createRawExprNode(pCxt, &yymsp[0].minor.yy0, createPlaceholderValueNode(pCxt, &yymsp[0].minor.yy0)); } + yymsp[0].minor.yy924 = yylhsminor.yy924; + break; + case 340: /* duration_literal ::= NK_VARIABLE */ +{ yylhsminor.yy924 = createRawExprNode(pCxt, &yymsp[0].minor.yy0, createDurationValueNode(pCxt, &yymsp[0].minor.yy0)); } + yymsp[0].minor.yy924 = yylhsminor.yy924; + break; + case 341: /* signed ::= NK_INTEGER */ +{ yylhsminor.yy924 = createValueNode(pCxt, TSDB_DATA_TYPE_UBIGINT, &yymsp[0].minor.yy0); } + yymsp[0].minor.yy924 = yylhsminor.yy924; + break; + case 342: /* signed ::= NK_PLUS NK_INTEGER */ +{ yymsp[-1].minor.yy924 = createValueNode(pCxt, TSDB_DATA_TYPE_UBIGINT, &yymsp[0].minor.yy0); } + break; + case 343: /* signed ::= NK_MINUS NK_INTEGER */ +>>>>>>> 677a27a09744fa59ddd2e723052ec68e32229566 { SToken t = yymsp[-1].minor.yy0; t.n = (yymsp[0].minor.yy0.z + yymsp[0].minor.yy0.n) - yymsp[-1].minor.yy0.z; - yylhsminor.yy602 = createValueNode(pCxt, TSDB_DATA_TYPE_BIGINT, &t); + yylhsminor.yy924 = createValueNode(pCxt, TSDB_DATA_TYPE_BIGINT, &t); } - yymsp[-1].minor.yy602 = yylhsminor.yy602; + yymsp[-1].minor.yy924 = yylhsminor.yy924; break; +<<<<<<< HEAD case 342: /* signed ::= NK_FLOAT */ { yylhsminor.yy602 = createValueNode(pCxt, TSDB_DATA_TYPE_DOUBLE, &yymsp[0].minor.yy0); } yymsp[0].minor.yy602 = yylhsminor.yy602; @@ -4807,11 +7427,22 @@ static YYACTIONTYPE yy_reduce( { yymsp[-1].minor.yy602 = createValueNode(pCxt, TSDB_DATA_TYPE_DOUBLE, &yymsp[0].minor.yy0); } break; case 344: /* signed ::= NK_MINUS NK_FLOAT */ +======= + case 344: /* signed ::= NK_FLOAT */ +{ yylhsminor.yy924 = createValueNode(pCxt, TSDB_DATA_TYPE_DOUBLE, &yymsp[0].minor.yy0); } + yymsp[0].minor.yy924 = yylhsminor.yy924; + break; + case 345: /* signed ::= NK_PLUS NK_FLOAT */ +{ yymsp[-1].minor.yy924 = createValueNode(pCxt, TSDB_DATA_TYPE_DOUBLE, &yymsp[0].minor.yy0); } + break; + case 346: /* signed ::= NK_MINUS NK_FLOAT */ +>>>>>>> 677a27a09744fa59ddd2e723052ec68e32229566 { SToken t = yymsp[-1].minor.yy0; t.n = (yymsp[0].minor.yy0.z + yymsp[0].minor.yy0.n) - yymsp[-1].minor.yy0.z; - yylhsminor.yy602 = createValueNode(pCxt, TSDB_DATA_TYPE_DOUBLE, &t); + yylhsminor.yy924 = createValueNode(pCxt, TSDB_DATA_TYPE_DOUBLE, &t); } +<<<<<<< HEAD yymsp[-1].minor.yy602 = yylhsminor.yy602; break; case 346: /* signed_literal ::= NK_STRING */ @@ -4851,80 +7482,158 @@ static YYACTIONTYPE yy_reduce( yymsp[-2].minor.yy602 = yylhsminor.yy602; break; case 373: /* expression ::= NK_PLUS expr_or_subquery */ +======= + yymsp[-1].minor.yy924 = yylhsminor.yy924; + break; + case 348: /* signed_literal ::= NK_STRING */ +{ yylhsminor.yy924 = createValueNode(pCxt, TSDB_DATA_TYPE_BINARY, &yymsp[0].minor.yy0); } + yymsp[0].minor.yy924 = yylhsminor.yy924; + break; + case 349: /* signed_literal ::= NK_BOOL */ +{ yylhsminor.yy924 = createValueNode(pCxt, TSDB_DATA_TYPE_BOOL, &yymsp[0].minor.yy0); } + yymsp[0].minor.yy924 = yylhsminor.yy924; + break; + case 350: /* signed_literal ::= TIMESTAMP NK_STRING */ +{ yymsp[-1].minor.yy924 = createValueNode(pCxt, TSDB_DATA_TYPE_TIMESTAMP, &yymsp[0].minor.yy0); } + break; + case 351: /* signed_literal ::= duration_literal */ + case 353: /* signed_literal ::= literal_func */ yytestcase(yyruleno==353); + case 424: /* star_func_para ::= expr_or_subquery */ yytestcase(yyruleno==424); + case 486: /* select_item ::= common_expression */ yytestcase(yyruleno==486); + case 496: /* partition_item ::= expr_or_subquery */ yytestcase(yyruleno==496); + case 531: /* query_simple_or_subquery ::= subquery */ yytestcase(yyruleno==531); + case 533: /* query_or_subquery ::= subquery */ yytestcase(yyruleno==533); + case 546: /* search_condition ::= common_expression */ yytestcase(yyruleno==546); +{ yylhsminor.yy924 = releaseRawExprNode(pCxt, yymsp[0].minor.yy924); } + yymsp[0].minor.yy924 = yylhsminor.yy924; + break; + case 352: /* signed_literal ::= NULL */ +{ yylhsminor.yy924 = createValueNode(pCxt, TSDB_DATA_TYPE_NULL, &yymsp[0].minor.yy0); } + yymsp[0].minor.yy924 = yylhsminor.yy924; + break; + case 354: /* signed_literal ::= NK_QUESTION */ +{ yylhsminor.yy924 = createPlaceholderValueNode(pCxt, &yymsp[0].minor.yy0); } + yymsp[0].minor.yy924 = yylhsminor.yy924; + break; + case 374: /* expression ::= NK_LP expression NK_RP */ + case 458: /* boolean_primary ::= NK_LP boolean_value_expression NK_RP */ yytestcase(yyruleno==458); + case 545: /* subquery ::= NK_LP subquery NK_RP */ yytestcase(yyruleno==545); +{ yylhsminor.yy924 = createRawExprNodeExt(pCxt, &yymsp[-2].minor.yy0, &yymsp[0].minor.yy0, releaseRawExprNode(pCxt, yymsp[-1].minor.yy924)); } + yymsp[-2].minor.yy924 = yylhsminor.yy924; + break; + case 375: /* expression ::= NK_PLUS expr_or_subquery */ +>>>>>>> 677a27a09744fa59ddd2e723052ec68e32229566 { - SToken t = getTokenFromRawExprNode(pCxt, yymsp[0].minor.yy602); - yylhsminor.yy602 = createRawExprNodeExt(pCxt, &yymsp[-1].minor.yy0, &t, releaseRawExprNode(pCxt, yymsp[0].minor.yy602)); + SToken t = getTokenFromRawExprNode(pCxt, yymsp[0].minor.yy924); + yylhsminor.yy924 = createRawExprNodeExt(pCxt, &yymsp[-1].minor.yy0, &t, releaseRawExprNode(pCxt, yymsp[0].minor.yy924)); } - yymsp[-1].minor.yy602 = yylhsminor.yy602; + yymsp[-1].minor.yy924 = yylhsminor.yy924; break; +<<<<<<< HEAD case 374: /* expression ::= NK_MINUS expr_or_subquery */ +======= + case 376: /* expression ::= NK_MINUS expr_or_subquery */ +>>>>>>> 677a27a09744fa59ddd2e723052ec68e32229566 { - SToken t = getTokenFromRawExprNode(pCxt, yymsp[0].minor.yy602); - yylhsminor.yy602 = createRawExprNodeExt(pCxt, &yymsp[-1].minor.yy0, &t, createOperatorNode(pCxt, OP_TYPE_MINUS, releaseRawExprNode(pCxt, yymsp[0].minor.yy602), NULL)); + SToken t = getTokenFromRawExprNode(pCxt, yymsp[0].minor.yy924); + yylhsminor.yy924 = createRawExprNodeExt(pCxt, &yymsp[-1].minor.yy0, &t, createOperatorNode(pCxt, OP_TYPE_MINUS, releaseRawExprNode(pCxt, yymsp[0].minor.yy924), NULL)); } - yymsp[-1].minor.yy602 = yylhsminor.yy602; + yymsp[-1].minor.yy924 = yylhsminor.yy924; break; +<<<<<<< HEAD case 375: /* expression ::= expr_or_subquery NK_PLUS expr_or_subquery */ +======= + case 377: /* expression ::= expr_or_subquery NK_PLUS expr_or_subquery */ +>>>>>>> 677a27a09744fa59ddd2e723052ec68e32229566 { - SToken s = getTokenFromRawExprNode(pCxt, yymsp[-2].minor.yy602); - SToken e = getTokenFromRawExprNode(pCxt, yymsp[0].minor.yy602); - yylhsminor.yy602 = createRawExprNodeExt(pCxt, &s, &e, createOperatorNode(pCxt, OP_TYPE_ADD, releaseRawExprNode(pCxt, yymsp[-2].minor.yy602), releaseRawExprNode(pCxt, yymsp[0].minor.yy602))); + SToken s = getTokenFromRawExprNode(pCxt, yymsp[-2].minor.yy924); + SToken e = getTokenFromRawExprNode(pCxt, yymsp[0].minor.yy924); + yylhsminor.yy924 = createRawExprNodeExt(pCxt, &s, &e, createOperatorNode(pCxt, OP_TYPE_ADD, releaseRawExprNode(pCxt, yymsp[-2].minor.yy924), releaseRawExprNode(pCxt, yymsp[0].minor.yy924))); } - yymsp[-2].minor.yy602 = yylhsminor.yy602; + yymsp[-2].minor.yy924 = yylhsminor.yy924; break; +<<<<<<< HEAD case 376: /* expression ::= expr_or_subquery NK_MINUS expr_or_subquery */ +======= + case 378: /* expression ::= expr_or_subquery NK_MINUS expr_or_subquery */ +>>>>>>> 677a27a09744fa59ddd2e723052ec68e32229566 { - SToken s = getTokenFromRawExprNode(pCxt, yymsp[-2].minor.yy602); - SToken e = getTokenFromRawExprNode(pCxt, yymsp[0].minor.yy602); - yylhsminor.yy602 = createRawExprNodeExt(pCxt, &s, &e, createOperatorNode(pCxt, OP_TYPE_SUB, releaseRawExprNode(pCxt, yymsp[-2].minor.yy602), releaseRawExprNode(pCxt, yymsp[0].minor.yy602))); + SToken s = getTokenFromRawExprNode(pCxt, yymsp[-2].minor.yy924); + SToken e = getTokenFromRawExprNode(pCxt, yymsp[0].minor.yy924); + yylhsminor.yy924 = createRawExprNodeExt(pCxt, &s, &e, createOperatorNode(pCxt, OP_TYPE_SUB, releaseRawExprNode(pCxt, yymsp[-2].minor.yy924), releaseRawExprNode(pCxt, yymsp[0].minor.yy924))); } - yymsp[-2].minor.yy602 = yylhsminor.yy602; + yymsp[-2].minor.yy924 = yylhsminor.yy924; break; +<<<<<<< HEAD case 377: /* expression ::= expr_or_subquery NK_STAR expr_or_subquery */ +======= + case 379: /* expression ::= expr_or_subquery NK_STAR expr_or_subquery */ +>>>>>>> 677a27a09744fa59ddd2e723052ec68e32229566 { - SToken s = getTokenFromRawExprNode(pCxt, yymsp[-2].minor.yy602); - SToken e = getTokenFromRawExprNode(pCxt, yymsp[0].minor.yy602); - yylhsminor.yy602 = createRawExprNodeExt(pCxt, &s, &e, createOperatorNode(pCxt, OP_TYPE_MULTI, releaseRawExprNode(pCxt, yymsp[-2].minor.yy602), releaseRawExprNode(pCxt, yymsp[0].minor.yy602))); + SToken s = getTokenFromRawExprNode(pCxt, yymsp[-2].minor.yy924); + SToken e = getTokenFromRawExprNode(pCxt, yymsp[0].minor.yy924); + yylhsminor.yy924 = createRawExprNodeExt(pCxt, &s, &e, createOperatorNode(pCxt, OP_TYPE_MULTI, releaseRawExprNode(pCxt, yymsp[-2].minor.yy924), releaseRawExprNode(pCxt, yymsp[0].minor.yy924))); } - yymsp[-2].minor.yy602 = yylhsminor.yy602; + yymsp[-2].minor.yy924 = yylhsminor.yy924; break; +<<<<<<< HEAD case 378: /* expression ::= expr_or_subquery NK_SLASH expr_or_subquery */ +======= + case 380: /* expression ::= expr_or_subquery NK_SLASH expr_or_subquery */ +>>>>>>> 677a27a09744fa59ddd2e723052ec68e32229566 { - SToken s = getTokenFromRawExprNode(pCxt, yymsp[-2].minor.yy602); - SToken e = getTokenFromRawExprNode(pCxt, yymsp[0].minor.yy602); - yylhsminor.yy602 = createRawExprNodeExt(pCxt, &s, &e, createOperatorNode(pCxt, OP_TYPE_DIV, releaseRawExprNode(pCxt, yymsp[-2].minor.yy602), releaseRawExprNode(pCxt, yymsp[0].minor.yy602))); + SToken s = getTokenFromRawExprNode(pCxt, yymsp[-2].minor.yy924); + SToken e = getTokenFromRawExprNode(pCxt, yymsp[0].minor.yy924); + yylhsminor.yy924 = createRawExprNodeExt(pCxt, &s, &e, createOperatorNode(pCxt, OP_TYPE_DIV, releaseRawExprNode(pCxt, yymsp[-2].minor.yy924), releaseRawExprNode(pCxt, yymsp[0].minor.yy924))); } - yymsp[-2].minor.yy602 = yylhsminor.yy602; + yymsp[-2].minor.yy924 = yylhsminor.yy924; break; +<<<<<<< HEAD case 379: /* expression ::= expr_or_subquery NK_REM expr_or_subquery */ +======= + case 381: /* expression ::= expr_or_subquery NK_REM expr_or_subquery */ +>>>>>>> 677a27a09744fa59ddd2e723052ec68e32229566 { - SToken s = getTokenFromRawExprNode(pCxt, yymsp[-2].minor.yy602); - SToken e = getTokenFromRawExprNode(pCxt, yymsp[0].minor.yy602); - yylhsminor.yy602 = createRawExprNodeExt(pCxt, &s, &e, createOperatorNode(pCxt, OP_TYPE_REM, releaseRawExprNode(pCxt, yymsp[-2].minor.yy602), releaseRawExprNode(pCxt, yymsp[0].minor.yy602))); + SToken s = getTokenFromRawExprNode(pCxt, yymsp[-2].minor.yy924); + SToken e = getTokenFromRawExprNode(pCxt, yymsp[0].minor.yy924); + yylhsminor.yy924 = createRawExprNodeExt(pCxt, &s, &e, createOperatorNode(pCxt, OP_TYPE_REM, releaseRawExprNode(pCxt, yymsp[-2].minor.yy924), releaseRawExprNode(pCxt, yymsp[0].minor.yy924))); } - yymsp[-2].minor.yy602 = yylhsminor.yy602; + yymsp[-2].minor.yy924 = yylhsminor.yy924; break; +<<<<<<< HEAD case 380: /* expression ::= column_reference NK_ARROW NK_STRING */ +======= + case 382: /* expression ::= column_reference NK_ARROW NK_STRING */ +>>>>>>> 677a27a09744fa59ddd2e723052ec68e32229566 { - SToken s = getTokenFromRawExprNode(pCxt, yymsp[-2].minor.yy602); - yylhsminor.yy602 = createRawExprNodeExt(pCxt, &s, &yymsp[0].minor.yy0, createOperatorNode(pCxt, OP_TYPE_JSON_GET_VALUE, releaseRawExprNode(pCxt, yymsp[-2].minor.yy602), createValueNode(pCxt, TSDB_DATA_TYPE_BINARY, &yymsp[0].minor.yy0))); + SToken s = getTokenFromRawExprNode(pCxt, yymsp[-2].minor.yy924); + yylhsminor.yy924 = createRawExprNodeExt(pCxt, &s, &yymsp[0].minor.yy0, createOperatorNode(pCxt, OP_TYPE_JSON_GET_VALUE, releaseRawExprNode(pCxt, yymsp[-2].minor.yy924), createValueNode(pCxt, TSDB_DATA_TYPE_BINARY, &yymsp[0].minor.yy0))); } - yymsp[-2].minor.yy602 = yylhsminor.yy602; + yymsp[-2].minor.yy924 = yylhsminor.yy924; break; +<<<<<<< HEAD case 381: /* expression ::= expr_or_subquery NK_BITAND expr_or_subquery */ +======= + case 383: /* expression ::= expr_or_subquery NK_BITAND expr_or_subquery */ +>>>>>>> 677a27a09744fa59ddd2e723052ec68e32229566 { - SToken s = getTokenFromRawExprNode(pCxt, yymsp[-2].minor.yy602); - SToken e = getTokenFromRawExprNode(pCxt, yymsp[0].minor.yy602); - yylhsminor.yy602 = createRawExprNodeExt(pCxt, &s, &e, createOperatorNode(pCxt, OP_TYPE_BIT_AND, releaseRawExprNode(pCxt, yymsp[-2].minor.yy602), releaseRawExprNode(pCxt, yymsp[0].minor.yy602))); + SToken s = getTokenFromRawExprNode(pCxt, yymsp[-2].minor.yy924); + SToken e = getTokenFromRawExprNode(pCxt, yymsp[0].minor.yy924); + yylhsminor.yy924 = createRawExprNodeExt(pCxt, &s, &e, createOperatorNode(pCxt, OP_TYPE_BIT_AND, releaseRawExprNode(pCxt, yymsp[-2].minor.yy924), releaseRawExprNode(pCxt, yymsp[0].minor.yy924))); } - yymsp[-2].minor.yy602 = yylhsminor.yy602; + yymsp[-2].minor.yy924 = yylhsminor.yy924; break; +<<<<<<< HEAD case 382: /* expression ::= expr_or_subquery NK_BITOR expr_or_subquery */ +======= + case 384: /* expression ::= expr_or_subquery NK_BITOR expr_or_subquery */ +>>>>>>> 677a27a09744fa59ddd2e723052ec68e32229566 { - SToken s = getTokenFromRawExprNode(pCxt, yymsp[-2].minor.yy602); - SToken e = getTokenFromRawExprNode(pCxt, yymsp[0].minor.yy602); - yylhsminor.yy602 = createRawExprNodeExt(pCxt, &s, &e, createOperatorNode(pCxt, OP_TYPE_BIT_OR, releaseRawExprNode(pCxt, yymsp[-2].minor.yy602), releaseRawExprNode(pCxt, yymsp[0].minor.yy602))); + SToken s = getTokenFromRawExprNode(pCxt, yymsp[-2].minor.yy924); + SToken e = getTokenFromRawExprNode(pCxt, yymsp[0].minor.yy924); + yylhsminor.yy924 = createRawExprNodeExt(pCxt, &s, &e, createOperatorNode(pCxt, OP_TYPE_BIT_OR, releaseRawExprNode(pCxt, yymsp[-2].minor.yy924), releaseRawExprNode(pCxt, yymsp[0].minor.yy924))); } +<<<<<<< HEAD yymsp[-2].minor.yy602 = yylhsminor.yy602; break; case 385: /* column_reference ::= column_name */ @@ -4992,43 +7701,129 @@ static YYACTIONTYPE yy_reduce( break; case 431: /* predicate ::= expr_or_subquery compare_op expr_or_subquery */ case 436: /* predicate ::= expr_or_subquery in_op in_predicate_value */ yytestcase(yyruleno==436); +======= + yymsp[-2].minor.yy924 = yylhsminor.yy924; + break; + case 387: /* column_reference ::= column_name */ +{ yylhsminor.yy924 = createRawExprNode(pCxt, &yymsp[0].minor.yy233, createColumnNode(pCxt, NULL, &yymsp[0].minor.yy233)); } + yymsp[0].minor.yy924 = yylhsminor.yy924; + break; + case 388: /* column_reference ::= table_name NK_DOT column_name */ +{ yylhsminor.yy924 = createRawExprNodeExt(pCxt, &yymsp[-2].minor.yy233, &yymsp[0].minor.yy233, createColumnNode(pCxt, &yymsp[-2].minor.yy233, &yymsp[0].minor.yy233)); } + yymsp[-2].minor.yy924 = yylhsminor.yy924; + break; + case 389: /* pseudo_column ::= ROWTS */ + case 390: /* pseudo_column ::= TBNAME */ yytestcase(yyruleno==390); + case 392: /* pseudo_column ::= QSTART */ yytestcase(yyruleno==392); + case 393: /* pseudo_column ::= QEND */ yytestcase(yyruleno==393); + case 394: /* pseudo_column ::= QDURATION */ yytestcase(yyruleno==394); + case 395: /* pseudo_column ::= WSTART */ yytestcase(yyruleno==395); + case 396: /* pseudo_column ::= WEND */ yytestcase(yyruleno==396); + case 397: /* pseudo_column ::= WDURATION */ yytestcase(yyruleno==397); + case 398: /* pseudo_column ::= IROWTS */ yytestcase(yyruleno==398); + case 399: /* pseudo_column ::= ISFILLED */ yytestcase(yyruleno==399); + case 400: /* pseudo_column ::= QTAGS */ yytestcase(yyruleno==400); + case 406: /* literal_func ::= NOW */ yytestcase(yyruleno==406); +{ yylhsminor.yy924 = createRawExprNode(pCxt, &yymsp[0].minor.yy0, createFunctionNode(pCxt, &yymsp[0].minor.yy0, NULL)); } + yymsp[0].minor.yy924 = yylhsminor.yy924; + break; + case 391: /* pseudo_column ::= table_name NK_DOT TBNAME */ +{ yylhsminor.yy924 = createRawExprNodeExt(pCxt, &yymsp[-2].minor.yy233, &yymsp[0].minor.yy0, createFunctionNode(pCxt, &yymsp[0].minor.yy0, createNodeList(pCxt, createValueNode(pCxt, TSDB_DATA_TYPE_BINARY, &yymsp[-2].minor.yy233)))); } + yymsp[-2].minor.yy924 = yylhsminor.yy924; + break; + case 401: /* function_expression ::= function_name NK_LP expression_list NK_RP */ + case 402: /* function_expression ::= star_func NK_LP star_func_para_list NK_RP */ yytestcase(yyruleno==402); +{ yylhsminor.yy924 = createRawExprNodeExt(pCxt, &yymsp[-3].minor.yy233, &yymsp[0].minor.yy0, createFunctionNode(pCxt, &yymsp[-3].minor.yy233, yymsp[-1].minor.yy776)); } + yymsp[-3].minor.yy924 = yylhsminor.yy924; + break; + case 403: /* function_expression ::= CAST NK_LP expr_or_subquery AS type_name NK_RP */ +{ yylhsminor.yy924 = createRawExprNodeExt(pCxt, &yymsp[-5].minor.yy0, &yymsp[0].minor.yy0, createCastFunctionNode(pCxt, releaseRawExprNode(pCxt, yymsp[-3].minor.yy924), yymsp[-1].minor.yy852)); } + yymsp[-5].minor.yy924 = yylhsminor.yy924; + break; + case 405: /* literal_func ::= noarg_func NK_LP NK_RP */ +{ yylhsminor.yy924 = createRawExprNodeExt(pCxt, &yymsp[-2].minor.yy233, &yymsp[0].minor.yy0, createFunctionNode(pCxt, &yymsp[-2].minor.yy233, NULL)); } + yymsp[-2].minor.yy924 = yylhsminor.yy924; + break; + case 420: /* star_func_para_list ::= NK_STAR */ +{ yylhsminor.yy776 = createNodeList(pCxt, createColumnNode(pCxt, NULL, &yymsp[0].minor.yy0)); } + yymsp[0].minor.yy776 = yylhsminor.yy776; + break; + case 425: /* star_func_para ::= table_name NK_DOT NK_STAR */ + case 489: /* select_item ::= table_name NK_DOT NK_STAR */ yytestcase(yyruleno==489); +{ yylhsminor.yy924 = createColumnNode(pCxt, &yymsp[-2].minor.yy233, &yymsp[0].minor.yy0); } + yymsp[-2].minor.yy924 = yylhsminor.yy924; + break; + case 426: /* case_when_expression ::= CASE when_then_list case_when_else_opt END */ +{ yylhsminor.yy924 = createRawExprNodeExt(pCxt, &yymsp[-3].minor.yy0, &yymsp[0].minor.yy0, createCaseWhenNode(pCxt, NULL, yymsp[-2].minor.yy776, yymsp[-1].minor.yy924)); } + yymsp[-3].minor.yy924 = yylhsminor.yy924; + break; + case 427: /* case_when_expression ::= CASE common_expression when_then_list case_when_else_opt END */ +{ yylhsminor.yy924 = createRawExprNodeExt(pCxt, &yymsp[-4].minor.yy0, &yymsp[0].minor.yy0, createCaseWhenNode(pCxt, releaseRawExprNode(pCxt, yymsp[-3].minor.yy924), yymsp[-2].minor.yy776, yymsp[-1].minor.yy924)); } + yymsp[-4].minor.yy924 = yylhsminor.yy924; + break; + case 430: /* when_then_expr ::= WHEN common_expression THEN common_expression */ +{ yymsp[-3].minor.yy924 = createWhenThenNode(pCxt, releaseRawExprNode(pCxt, yymsp[-2].minor.yy924), releaseRawExprNode(pCxt, yymsp[0].minor.yy924)); } + break; + case 432: /* case_when_else_opt ::= ELSE common_expression */ +{ yymsp[-1].minor.yy924 = releaseRawExprNode(pCxt, yymsp[0].minor.yy924); } + break; + case 433: /* predicate ::= expr_or_subquery compare_op expr_or_subquery */ + case 438: /* predicate ::= expr_or_subquery in_op in_predicate_value */ yytestcase(yyruleno==438); +>>>>>>> 677a27a09744fa59ddd2e723052ec68e32229566 { - SToken s = getTokenFromRawExprNode(pCxt, yymsp[-2].minor.yy602); - SToken e = getTokenFromRawExprNode(pCxt, yymsp[0].minor.yy602); - yylhsminor.yy602 = createRawExprNodeExt(pCxt, &s, &e, createOperatorNode(pCxt, yymsp[-1].minor.yy290, releaseRawExprNode(pCxt, yymsp[-2].minor.yy602), releaseRawExprNode(pCxt, yymsp[0].minor.yy602))); + SToken s = getTokenFromRawExprNode(pCxt, yymsp[-2].minor.yy924); + SToken e = getTokenFromRawExprNode(pCxt, yymsp[0].minor.yy924); + yylhsminor.yy924 = createRawExprNodeExt(pCxt, &s, &e, createOperatorNode(pCxt, yymsp[-1].minor.yy856, releaseRawExprNode(pCxt, yymsp[-2].minor.yy924), releaseRawExprNode(pCxt, yymsp[0].minor.yy924))); } - yymsp[-2].minor.yy602 = yylhsminor.yy602; + yymsp[-2].minor.yy924 = yylhsminor.yy924; break; +<<<<<<< HEAD case 432: /* predicate ::= expr_or_subquery BETWEEN expr_or_subquery AND expr_or_subquery */ +======= + case 434: /* predicate ::= expr_or_subquery BETWEEN expr_or_subquery AND expr_or_subquery */ +>>>>>>> 677a27a09744fa59ddd2e723052ec68e32229566 { - SToken s = getTokenFromRawExprNode(pCxt, yymsp[-4].minor.yy602); - SToken e = getTokenFromRawExprNode(pCxt, yymsp[0].minor.yy602); - yylhsminor.yy602 = createRawExprNodeExt(pCxt, &s, &e, createBetweenAnd(pCxt, releaseRawExprNode(pCxt, yymsp[-4].minor.yy602), releaseRawExprNode(pCxt, yymsp[-2].minor.yy602), releaseRawExprNode(pCxt, yymsp[0].minor.yy602))); + SToken s = getTokenFromRawExprNode(pCxt, yymsp[-4].minor.yy924); + SToken e = getTokenFromRawExprNode(pCxt, yymsp[0].minor.yy924); + yylhsminor.yy924 = createRawExprNodeExt(pCxt, &s, &e, createBetweenAnd(pCxt, releaseRawExprNode(pCxt, yymsp[-4].minor.yy924), releaseRawExprNode(pCxt, yymsp[-2].minor.yy924), releaseRawExprNode(pCxt, yymsp[0].minor.yy924))); } - yymsp[-4].minor.yy602 = yylhsminor.yy602; + yymsp[-4].minor.yy924 = yylhsminor.yy924; break; +<<<<<<< HEAD case 433: /* predicate ::= expr_or_subquery NOT BETWEEN expr_or_subquery AND expr_or_subquery */ +======= + case 435: /* predicate ::= expr_or_subquery NOT BETWEEN expr_or_subquery AND expr_or_subquery */ +>>>>>>> 677a27a09744fa59ddd2e723052ec68e32229566 { - SToken s = getTokenFromRawExprNode(pCxt, yymsp[-5].minor.yy602); - SToken e = getTokenFromRawExprNode(pCxt, yymsp[0].minor.yy602); - yylhsminor.yy602 = createRawExprNodeExt(pCxt, &s, &e, createNotBetweenAnd(pCxt, releaseRawExprNode(pCxt, yymsp[-5].minor.yy602), releaseRawExprNode(pCxt, yymsp[-2].minor.yy602), releaseRawExprNode(pCxt, yymsp[0].minor.yy602))); + SToken s = getTokenFromRawExprNode(pCxt, yymsp[-5].minor.yy924); + SToken e = getTokenFromRawExprNode(pCxt, yymsp[0].minor.yy924); + yylhsminor.yy924 = createRawExprNodeExt(pCxt, &s, &e, createNotBetweenAnd(pCxt, releaseRawExprNode(pCxt, yymsp[-5].minor.yy924), releaseRawExprNode(pCxt, yymsp[-2].minor.yy924), releaseRawExprNode(pCxt, yymsp[0].minor.yy924))); } - yymsp[-5].minor.yy602 = yylhsminor.yy602; + yymsp[-5].minor.yy924 = yylhsminor.yy924; break; +<<<<<<< HEAD case 434: /* predicate ::= expr_or_subquery IS NULL */ +======= + case 436: /* predicate ::= expr_or_subquery IS NULL */ +>>>>>>> 677a27a09744fa59ddd2e723052ec68e32229566 { - SToken s = getTokenFromRawExprNode(pCxt, yymsp[-2].minor.yy602); - yylhsminor.yy602 = createRawExprNodeExt(pCxt, &s, &yymsp[0].minor.yy0, createOperatorNode(pCxt, OP_TYPE_IS_NULL, releaseRawExprNode(pCxt, yymsp[-2].minor.yy602), NULL)); + SToken s = getTokenFromRawExprNode(pCxt, yymsp[-2].minor.yy924); + yylhsminor.yy924 = createRawExprNodeExt(pCxt, &s, &yymsp[0].minor.yy0, createOperatorNode(pCxt, OP_TYPE_IS_NULL, releaseRawExprNode(pCxt, yymsp[-2].minor.yy924), NULL)); } - yymsp[-2].minor.yy602 = yylhsminor.yy602; + yymsp[-2].minor.yy924 = yylhsminor.yy924; break; +<<<<<<< HEAD case 435: /* predicate ::= expr_or_subquery IS NOT NULL */ +======= + case 437: /* predicate ::= expr_or_subquery IS NOT NULL */ +>>>>>>> 677a27a09744fa59ddd2e723052ec68e32229566 { - SToken s = getTokenFromRawExprNode(pCxt, yymsp[-3].minor.yy602); - yylhsminor.yy602 = createRawExprNodeExt(pCxt, &s, &yymsp[0].minor.yy0, createOperatorNode(pCxt, OP_TYPE_IS_NOT_NULL, releaseRawExprNode(pCxt, yymsp[-3].minor.yy602), NULL)); + SToken s = getTokenFromRawExprNode(pCxt, yymsp[-3].minor.yy924); + yylhsminor.yy924 = createRawExprNodeExt(pCxt, &s, &yymsp[0].minor.yy0, createOperatorNode(pCxt, OP_TYPE_IS_NOT_NULL, releaseRawExprNode(pCxt, yymsp[-3].minor.yy924), NULL)); } - yymsp[-3].minor.yy602 = yylhsminor.yy602; + yymsp[-3].minor.yy924 = yylhsminor.yy924; break; +<<<<<<< HEAD case 437: /* compare_op ::= NK_LT */ { yymsp[0].minor.yy290 = OP_TYPE_LOWER_THAN; } break; @@ -5073,28 +7868,83 @@ static YYACTIONTYPE yy_reduce( yymsp[-2].minor.yy602 = yylhsminor.yy602; break; case 452: /* boolean_value_expression ::= NOT boolean_primary */ +======= + case 439: /* compare_op ::= NK_LT */ +{ yymsp[0].minor.yy856 = OP_TYPE_LOWER_THAN; } + break; + case 440: /* compare_op ::= NK_GT */ +{ yymsp[0].minor.yy856 = OP_TYPE_GREATER_THAN; } + break; + case 441: /* compare_op ::= NK_LE */ +{ yymsp[0].minor.yy856 = OP_TYPE_LOWER_EQUAL; } + break; + case 442: /* compare_op ::= NK_GE */ +{ yymsp[0].minor.yy856 = OP_TYPE_GREATER_EQUAL; } + break; + case 443: /* compare_op ::= NK_NE */ +{ yymsp[0].minor.yy856 = OP_TYPE_NOT_EQUAL; } + break; + case 444: /* compare_op ::= NK_EQ */ +{ yymsp[0].minor.yy856 = OP_TYPE_EQUAL; } + break; + case 445: /* compare_op ::= LIKE */ +{ yymsp[0].minor.yy856 = OP_TYPE_LIKE; } + break; + case 446: /* compare_op ::= NOT LIKE */ +{ yymsp[-1].minor.yy856 = OP_TYPE_NOT_LIKE; } + break; + case 447: /* compare_op ::= MATCH */ +{ yymsp[0].minor.yy856 = OP_TYPE_MATCH; } + break; + case 448: /* compare_op ::= NMATCH */ +{ yymsp[0].minor.yy856 = OP_TYPE_NMATCH; } + break; + case 449: /* compare_op ::= CONTAINS */ +{ yymsp[0].minor.yy856 = OP_TYPE_JSON_CONTAINS; } + break; + case 450: /* in_op ::= IN */ +{ yymsp[0].minor.yy856 = OP_TYPE_IN; } + break; + case 451: /* in_op ::= NOT IN */ +{ yymsp[-1].minor.yy856 = OP_TYPE_NOT_IN; } + break; + case 452: /* in_predicate_value ::= NK_LP literal_list NK_RP */ +{ yylhsminor.yy924 = createRawExprNodeExt(pCxt, &yymsp[-2].minor.yy0, &yymsp[0].minor.yy0, createNodeListNode(pCxt, yymsp[-1].minor.yy776)); } + yymsp[-2].minor.yy924 = yylhsminor.yy924; + break; + case 454: /* boolean_value_expression ::= NOT boolean_primary */ +>>>>>>> 677a27a09744fa59ddd2e723052ec68e32229566 { - SToken e = getTokenFromRawExprNode(pCxt, yymsp[0].minor.yy602); - yylhsminor.yy602 = createRawExprNodeExt(pCxt, &yymsp[-1].minor.yy0, &e, createLogicConditionNode(pCxt, LOGIC_COND_TYPE_NOT, releaseRawExprNode(pCxt, yymsp[0].minor.yy602), NULL)); + SToken e = getTokenFromRawExprNode(pCxt, yymsp[0].minor.yy924); + yylhsminor.yy924 = createRawExprNodeExt(pCxt, &yymsp[-1].minor.yy0, &e, createLogicConditionNode(pCxt, LOGIC_COND_TYPE_NOT, releaseRawExprNode(pCxt, yymsp[0].minor.yy924), NULL)); } - yymsp[-1].minor.yy602 = yylhsminor.yy602; + yymsp[-1].minor.yy924 = yylhsminor.yy924; break; +<<<<<<< HEAD case 453: /* boolean_value_expression ::= boolean_value_expression OR boolean_value_expression */ +======= + case 455: /* boolean_value_expression ::= boolean_value_expression OR boolean_value_expression */ +>>>>>>> 677a27a09744fa59ddd2e723052ec68e32229566 { - SToken s = getTokenFromRawExprNode(pCxt, yymsp[-2].minor.yy602); - SToken e = getTokenFromRawExprNode(pCxt, yymsp[0].minor.yy602); - yylhsminor.yy602 = createRawExprNodeExt(pCxt, &s, &e, createLogicConditionNode(pCxt, LOGIC_COND_TYPE_OR, releaseRawExprNode(pCxt, yymsp[-2].minor.yy602), releaseRawExprNode(pCxt, yymsp[0].minor.yy602))); + SToken s = getTokenFromRawExprNode(pCxt, yymsp[-2].minor.yy924); + SToken e = getTokenFromRawExprNode(pCxt, yymsp[0].minor.yy924); + yylhsminor.yy924 = createRawExprNodeExt(pCxt, &s, &e, createLogicConditionNode(pCxt, LOGIC_COND_TYPE_OR, releaseRawExprNode(pCxt, yymsp[-2].minor.yy924), releaseRawExprNode(pCxt, yymsp[0].minor.yy924))); } - yymsp[-2].minor.yy602 = yylhsminor.yy602; + yymsp[-2].minor.yy924 = yylhsminor.yy924; break; +<<<<<<< HEAD case 454: /* boolean_value_expression ::= boolean_value_expression AND boolean_value_expression */ +======= + case 456: /* boolean_value_expression ::= boolean_value_expression AND boolean_value_expression */ +>>>>>>> 677a27a09744fa59ddd2e723052ec68e32229566 { - SToken s = getTokenFromRawExprNode(pCxt, yymsp[-2].minor.yy602); - SToken e = getTokenFromRawExprNode(pCxt, yymsp[0].minor.yy602); - yylhsminor.yy602 = createRawExprNodeExt(pCxt, &s, &e, createLogicConditionNode(pCxt, LOGIC_COND_TYPE_AND, releaseRawExprNode(pCxt, yymsp[-2].minor.yy602), releaseRawExprNode(pCxt, yymsp[0].minor.yy602))); + SToken s = getTokenFromRawExprNode(pCxt, yymsp[-2].minor.yy924); + SToken e = getTokenFromRawExprNode(pCxt, yymsp[0].minor.yy924); + yylhsminor.yy924 = createRawExprNodeExt(pCxt, &s, &e, createLogicConditionNode(pCxt, LOGIC_COND_TYPE_AND, releaseRawExprNode(pCxt, yymsp[-2].minor.yy924), releaseRawExprNode(pCxt, yymsp[0].minor.yy924))); } - yymsp[-2].minor.yy602 = yylhsminor.yy602; + yymsp[-2].minor.yy924 = yylhsminor.yy924; break; +<<<<<<< HEAD case 460: /* from_clause_opt ::= FROM table_reference_list */ case 489: /* where_clause_opt ::= WHERE search_condition */ yytestcase(yyruleno==489); case 518: /* having_clause_opt ::= HAVING search_condition */ yytestcase(yyruleno==518); @@ -5137,18 +7987,63 @@ static YYACTIONTYPE yy_reduce( { yymsp[0].minor.yy42 = JOIN_TYPE_INNER; } break; case 477: /* query_specification ::= SELECT set_quantifier_opt select_list from_clause_opt where_clause_opt partition_by_clause_opt range_opt every_opt fill_opt twindow_clause_opt group_by_clause_opt having_clause_opt */ +======= + case 462: /* from_clause_opt ::= FROM table_reference_list */ + case 491: /* where_clause_opt ::= WHERE search_condition */ yytestcase(yyruleno==491); + case 520: /* having_clause_opt ::= HAVING search_condition */ yytestcase(yyruleno==520); +{ yymsp[-1].minor.yy924 = yymsp[0].minor.yy924; } + break; + case 464: /* table_reference_list ::= table_reference_list NK_COMMA table_reference */ +{ yylhsminor.yy924 = createJoinTableNode(pCxt, JOIN_TYPE_INNER, yymsp[-2].minor.yy924, yymsp[0].minor.yy924, NULL); } + yymsp[-2].minor.yy924 = yylhsminor.yy924; + break; + case 467: /* table_primary ::= table_name alias_opt */ +{ yylhsminor.yy924 = createRealTableNode(pCxt, NULL, &yymsp[-1].minor.yy233, &yymsp[0].minor.yy233); } + yymsp[-1].minor.yy924 = yylhsminor.yy924; + break; + case 468: /* table_primary ::= db_name NK_DOT table_name alias_opt */ +{ yylhsminor.yy924 = createRealTableNode(pCxt, &yymsp[-3].minor.yy233, &yymsp[-1].minor.yy233, &yymsp[0].minor.yy233); } + yymsp[-3].minor.yy924 = yylhsminor.yy924; + break; + case 469: /* table_primary ::= subquery alias_opt */ +{ yylhsminor.yy924 = createTempTableNode(pCxt, releaseRawExprNode(pCxt, yymsp[-1].minor.yy924), &yymsp[0].minor.yy233); } + yymsp[-1].minor.yy924 = yylhsminor.yy924; + break; + case 471: /* alias_opt ::= */ +{ yymsp[1].minor.yy233 = nil_token; } + break; + case 473: /* alias_opt ::= AS table_alias */ +{ yymsp[-1].minor.yy233 = yymsp[0].minor.yy233; } + break; + case 474: /* parenthesized_joined_table ::= NK_LP joined_table NK_RP */ + case 475: /* parenthesized_joined_table ::= NK_LP parenthesized_joined_table NK_RP */ yytestcase(yyruleno==475); +{ yymsp[-2].minor.yy924 = yymsp[-1].minor.yy924; } + break; + case 476: /* joined_table ::= table_reference join_type JOIN table_reference ON search_condition */ +{ yylhsminor.yy924 = createJoinTableNode(pCxt, yymsp[-4].minor.yy428, yymsp[-5].minor.yy924, yymsp[-2].minor.yy924, yymsp[0].minor.yy924); } + yymsp[-5].minor.yy924 = yylhsminor.yy924; + break; + case 477: /* join_type ::= */ +{ yymsp[1].minor.yy428 = JOIN_TYPE_INNER; } + break; + case 478: /* join_type ::= INNER */ +{ yymsp[0].minor.yy428 = JOIN_TYPE_INNER; } + break; + case 479: /* query_specification ::= SELECT set_quantifier_opt select_list from_clause_opt where_clause_opt partition_by_clause_opt range_opt every_opt fill_opt twindow_clause_opt group_by_clause_opt having_clause_opt */ +>>>>>>> 677a27a09744fa59ddd2e723052ec68e32229566 { - yymsp[-11].minor.yy602 = createSelectStmt(pCxt, yymsp[-10].minor.yy767, yymsp[-9].minor.yy874, yymsp[-8].minor.yy602); - yymsp[-11].minor.yy602 = addWhereClause(pCxt, yymsp[-11].minor.yy602, yymsp[-7].minor.yy602); - yymsp[-11].minor.yy602 = addPartitionByClause(pCxt, yymsp[-11].minor.yy602, yymsp[-6].minor.yy874); - yymsp[-11].minor.yy602 = addWindowClauseClause(pCxt, yymsp[-11].minor.yy602, yymsp[-2].minor.yy602); - yymsp[-11].minor.yy602 = addGroupByClause(pCxt, yymsp[-11].minor.yy602, yymsp[-1].minor.yy874); - yymsp[-11].minor.yy602 = addHavingClause(pCxt, yymsp[-11].minor.yy602, yymsp[0].minor.yy602); - yymsp[-11].minor.yy602 = addRangeClause(pCxt, yymsp[-11].minor.yy602, yymsp[-5].minor.yy602); - yymsp[-11].minor.yy602 = addEveryClause(pCxt, yymsp[-11].minor.yy602, yymsp[-4].minor.yy602); - yymsp[-11].minor.yy602 = addFillClause(pCxt, yymsp[-11].minor.yy602, yymsp[-3].minor.yy602); + yymsp[-11].minor.yy924 = createSelectStmt(pCxt, yymsp[-10].minor.yy397, yymsp[-9].minor.yy776, yymsp[-8].minor.yy924); + yymsp[-11].minor.yy924 = addWhereClause(pCxt, yymsp[-11].minor.yy924, yymsp[-7].minor.yy924); + yymsp[-11].minor.yy924 = addPartitionByClause(pCxt, yymsp[-11].minor.yy924, yymsp[-6].minor.yy776); + yymsp[-11].minor.yy924 = addWindowClauseClause(pCxt, yymsp[-11].minor.yy924, yymsp[-2].minor.yy924); + yymsp[-11].minor.yy924 = addGroupByClause(pCxt, yymsp[-11].minor.yy924, yymsp[-1].minor.yy776); + yymsp[-11].minor.yy924 = addHavingClause(pCxt, yymsp[-11].minor.yy924, yymsp[0].minor.yy924); + yymsp[-11].minor.yy924 = addRangeClause(pCxt, yymsp[-11].minor.yy924, yymsp[-5].minor.yy924); + yymsp[-11].minor.yy924 = addEveryClause(pCxt, yymsp[-11].minor.yy924, yymsp[-4].minor.yy924); + yymsp[-11].minor.yy924 = addFillClause(pCxt, yymsp[-11].minor.yy924, yymsp[-3].minor.yy924); } break; +<<<<<<< HEAD case 480: /* set_quantifier_opt ::= ALL */ { yymsp[0].minor.yy767 = false; } break; @@ -5219,13 +8114,86 @@ static YYACTIONTYPE yy_reduce( { yymsp[-5].minor.yy602 = createInterpTimeRange(pCxt, releaseRawExprNode(pCxt, yymsp[-3].minor.yy602), releaseRawExprNode(pCxt, yymsp[-1].minor.yy602)); } break; case 523: /* query_expression ::= query_simple order_by_clause_opt slimit_clause_opt limit_clause_opt */ +======= + case 482: /* set_quantifier_opt ::= ALL */ +{ yymsp[0].minor.yy397 = false; } + break; + case 485: /* select_item ::= NK_STAR */ +{ yylhsminor.yy924 = createColumnNode(pCxt, NULL, &yymsp[0].minor.yy0); } + yymsp[0].minor.yy924 = yylhsminor.yy924; + break; + case 487: /* select_item ::= common_expression column_alias */ + case 497: /* partition_item ::= expr_or_subquery column_alias */ yytestcase(yyruleno==497); +{ yylhsminor.yy924 = setProjectionAlias(pCxt, releaseRawExprNode(pCxt, yymsp[-1].minor.yy924), &yymsp[0].minor.yy233); } + yymsp[-1].minor.yy924 = yylhsminor.yy924; + break; + case 488: /* select_item ::= common_expression AS column_alias */ + case 498: /* partition_item ::= expr_or_subquery AS column_alias */ yytestcase(yyruleno==498); +{ yylhsminor.yy924 = setProjectionAlias(pCxt, releaseRawExprNode(pCxt, yymsp[-2].minor.yy924), &yymsp[0].minor.yy233); } + yymsp[-2].minor.yy924 = yylhsminor.yy924; + break; + case 493: /* partition_by_clause_opt ::= PARTITION BY partition_list */ + case 516: /* group_by_clause_opt ::= GROUP BY group_by_list */ yytestcase(yyruleno==516); + case 535: /* order_by_clause_opt ::= ORDER BY sort_specification_list */ yytestcase(yyruleno==535); +{ yymsp[-2].minor.yy776 = yymsp[0].minor.yy776; } + break; + case 500: /* twindow_clause_opt ::= SESSION NK_LP column_reference NK_COMMA duration_literal NK_RP */ +{ yymsp[-5].minor.yy924 = createSessionWindowNode(pCxt, releaseRawExprNode(pCxt, yymsp[-3].minor.yy924), releaseRawExprNode(pCxt, yymsp[-1].minor.yy924)); } + break; + case 501: /* twindow_clause_opt ::= STATE_WINDOW NK_LP expr_or_subquery NK_RP */ +{ yymsp[-3].minor.yy924 = createStateWindowNode(pCxt, releaseRawExprNode(pCxt, yymsp[-1].minor.yy924)); } + break; + case 502: /* twindow_clause_opt ::= INTERVAL NK_LP duration_literal NK_RP sliding_opt fill_opt */ +{ yymsp[-5].minor.yy924 = createIntervalWindowNode(pCxt, releaseRawExprNode(pCxt, yymsp[-3].minor.yy924), NULL, yymsp[-1].minor.yy924, yymsp[0].minor.yy924); } + break; + case 503: /* twindow_clause_opt ::= INTERVAL NK_LP duration_literal NK_COMMA duration_literal NK_RP sliding_opt fill_opt */ +{ yymsp[-7].minor.yy924 = createIntervalWindowNode(pCxt, releaseRawExprNode(pCxt, yymsp[-5].minor.yy924), releaseRawExprNode(pCxt, yymsp[-3].minor.yy924), yymsp[-1].minor.yy924, yymsp[0].minor.yy924); } + break; + case 504: /* twindow_clause_opt ::= EVENT_WINDOW START WITH search_condition END WITH search_condition */ +{ yymsp[-6].minor.yy924 = createEventWindowNode(pCxt, yymsp[-3].minor.yy924, yymsp[0].minor.yy924); } + break; + case 508: /* fill_opt ::= FILL NK_LP fill_mode NK_RP */ +{ yymsp[-3].minor.yy924 = createFillNode(pCxt, yymsp[-1].minor.yy646, NULL); } + break; + case 509: /* fill_opt ::= FILL NK_LP VALUE NK_COMMA literal_list NK_RP */ +{ yymsp[-5].minor.yy924 = createFillNode(pCxt, FILL_MODE_VALUE, createNodeListNode(pCxt, yymsp[-1].minor.yy776)); } + break; + case 510: /* fill_mode ::= NONE */ +{ yymsp[0].minor.yy646 = FILL_MODE_NONE; } + break; + case 511: /* fill_mode ::= PREV */ +{ yymsp[0].minor.yy646 = FILL_MODE_PREV; } + break; + case 512: /* fill_mode ::= NULL */ +{ yymsp[0].minor.yy646 = FILL_MODE_NULL; } + break; + case 513: /* fill_mode ::= LINEAR */ +{ yymsp[0].minor.yy646 = FILL_MODE_LINEAR; } + break; + case 514: /* fill_mode ::= NEXT */ +{ yymsp[0].minor.yy646 = FILL_MODE_NEXT; } + break; + case 517: /* group_by_list ::= expr_or_subquery */ +{ yylhsminor.yy776 = createNodeList(pCxt, createGroupingSetNode(pCxt, releaseRawExprNode(pCxt, yymsp[0].minor.yy924))); } + yymsp[0].minor.yy776 = yylhsminor.yy776; + break; + case 518: /* group_by_list ::= group_by_list NK_COMMA expr_or_subquery */ +{ yylhsminor.yy776 = addNodeToList(pCxt, yymsp[-2].minor.yy776, createGroupingSetNode(pCxt, releaseRawExprNode(pCxt, yymsp[0].minor.yy924))); } + yymsp[-2].minor.yy776 = yylhsminor.yy776; + break; + case 522: /* range_opt ::= RANGE NK_LP expr_or_subquery NK_COMMA expr_or_subquery NK_RP */ +{ yymsp[-5].minor.yy924 = createInterpTimeRange(pCxt, releaseRawExprNode(pCxt, yymsp[-3].minor.yy924), releaseRawExprNode(pCxt, yymsp[-1].minor.yy924)); } + break; + case 525: /* query_expression ::= query_simple order_by_clause_opt slimit_clause_opt limit_clause_opt */ +>>>>>>> 677a27a09744fa59ddd2e723052ec68e32229566 { - yylhsminor.yy602 = addOrderByClause(pCxt, yymsp[-3].minor.yy602, yymsp[-2].minor.yy874); - yylhsminor.yy602 = addSlimitClause(pCxt, yylhsminor.yy602, yymsp[-1].minor.yy602); - yylhsminor.yy602 = addLimitClause(pCxt, yylhsminor.yy602, yymsp[0].minor.yy602); + yylhsminor.yy924 = addOrderByClause(pCxt, yymsp[-3].minor.yy924, yymsp[-2].minor.yy776); + yylhsminor.yy924 = addSlimitClause(pCxt, yylhsminor.yy924, yymsp[-1].minor.yy924); + yylhsminor.yy924 = addLimitClause(pCxt, yylhsminor.yy924, yymsp[0].minor.yy924); } - yymsp[-3].minor.yy602 = yylhsminor.yy602; + yymsp[-3].minor.yy924 = yylhsminor.yy924; break; +<<<<<<< HEAD case 526: /* union_query_expression ::= query_simple_or_subquery UNION ALL query_simple_or_subquery */ { yylhsminor.yy602 = createSetOperator(pCxt, SET_OP_TYPE_UNION_ALL, yymsp[-3].minor.yy602, yymsp[0].minor.yy602); } yymsp[-3].minor.yy602 = yylhsminor.yy602; @@ -5271,6 +8239,53 @@ static YYACTIONTYPE yy_reduce( break; case 553: /* null_ordering_opt ::= NULLS LAST */ { yymsp[-1].minor.yy487 = NULL_ORDER_LAST; } +======= + case 528: /* union_query_expression ::= query_simple_or_subquery UNION ALL query_simple_or_subquery */ +{ yylhsminor.yy924 = createSetOperator(pCxt, SET_OP_TYPE_UNION_ALL, yymsp[-3].minor.yy924, yymsp[0].minor.yy924); } + yymsp[-3].minor.yy924 = yylhsminor.yy924; + break; + case 529: /* union_query_expression ::= query_simple_or_subquery UNION query_simple_or_subquery */ +{ yylhsminor.yy924 = createSetOperator(pCxt, SET_OP_TYPE_UNION, yymsp[-2].minor.yy924, yymsp[0].minor.yy924); } + yymsp[-2].minor.yy924 = yylhsminor.yy924; + break; + case 537: /* slimit_clause_opt ::= SLIMIT NK_INTEGER */ + case 541: /* limit_clause_opt ::= LIMIT NK_INTEGER */ yytestcase(yyruleno==541); +{ yymsp[-1].minor.yy924 = createLimitNode(pCxt, &yymsp[0].minor.yy0, NULL); } + break; + case 538: /* slimit_clause_opt ::= SLIMIT NK_INTEGER SOFFSET NK_INTEGER */ + case 542: /* limit_clause_opt ::= LIMIT NK_INTEGER OFFSET NK_INTEGER */ yytestcase(yyruleno==542); +{ yymsp[-3].minor.yy924 = createLimitNode(pCxt, &yymsp[-2].minor.yy0, &yymsp[0].minor.yy0); } + break; + case 539: /* slimit_clause_opt ::= SLIMIT NK_INTEGER NK_COMMA NK_INTEGER */ + case 543: /* limit_clause_opt ::= LIMIT NK_INTEGER NK_COMMA NK_INTEGER */ yytestcase(yyruleno==543); +{ yymsp[-3].minor.yy924 = createLimitNode(pCxt, &yymsp[0].minor.yy0, &yymsp[-2].minor.yy0); } + break; + case 544: /* subquery ::= NK_LP query_expression NK_RP */ +{ yylhsminor.yy924 = createRawExprNodeExt(pCxt, &yymsp[-2].minor.yy0, &yymsp[0].minor.yy0, yymsp[-1].minor.yy924); } + yymsp[-2].minor.yy924 = yylhsminor.yy924; + break; + case 549: /* sort_specification ::= expr_or_subquery ordering_specification_opt null_ordering_opt */ +{ yylhsminor.yy924 = createOrderByExprNode(pCxt, releaseRawExprNode(pCxt, yymsp[-2].minor.yy924), yymsp[-1].minor.yy870, yymsp[0].minor.yy89); } + yymsp[-2].minor.yy924 = yylhsminor.yy924; + break; + case 550: /* ordering_specification_opt ::= */ +{ yymsp[1].minor.yy870 = ORDER_ASC; } + break; + case 551: /* ordering_specification_opt ::= ASC */ +{ yymsp[0].minor.yy870 = ORDER_ASC; } + break; + case 552: /* ordering_specification_opt ::= DESC */ +{ yymsp[0].minor.yy870 = ORDER_DESC; } + break; + case 553: /* null_ordering_opt ::= */ +{ yymsp[1].minor.yy89 = NULL_ORDER_DEFAULT; } + break; + case 554: /* null_ordering_opt ::= NULLS FIRST */ +{ yymsp[-1].minor.yy89 = NULL_ORDER_FIRST; } + break; + case 555: /* null_ordering_opt ::= NULLS LAST */ +{ yymsp[-1].minor.yy89 = NULL_ORDER_LAST; } +>>>>>>> 677a27a09744fa59ddd2e723052ec68e32229566 break; default: break; diff --git a/source/libs/parser/test/mockCatalog.cpp b/source/libs/parser/test/mockCatalog.cpp index ae702ec02f1570ab9d92976abdb18059cd727175..c3f6c3ac72c70b799500b1e70cc31b72e585d575 100644 --- a/source/libs/parser/test/mockCatalog.cpp +++ b/source/libs/parser/test/mockCatalog.cpp @@ -102,6 +102,10 @@ void generateInformationSchema(MockCatalogService* mcs) { .addColumn("table_name", TSDB_DATA_TYPE_BINARY, TSDB_TABLE_NAME_LEN) .addColumn("db_name", TSDB_DATA_TYPE_BINARY, TSDB_DB_NAME_LEN) .done(); + mcs->createTableBuilder(TSDB_INFORMATION_SCHEMA_DB, TSDB_INS_TABLE_COLS, TSDB_SYSTEM_TABLE, 2) + .addColumn("table_name", TSDB_DATA_TYPE_BINARY, TSDB_TABLE_NAME_LEN) + .addColumn("db_name", TSDB_DATA_TYPE_BINARY, TSDB_DB_NAME_LEN) + .done(); mcs->createTableBuilder(TSDB_INFORMATION_SCHEMA_DB, TSDB_INS_TABLE_USER_PRIVILEGES, TSDB_SYSTEM_TABLE, 2) .addColumn("user_name", TSDB_DATA_TYPE_BINARY, TSDB_USER_LEN) .addColumn("privilege", TSDB_DATA_TYPE_BINARY, 10) diff --git a/source/libs/parser/test/parInitialCTest.cpp b/source/libs/parser/test/parInitialCTest.cpp index 1656940a943d7f62da684216ff66988e45677b67..51de121d1e5cb6c56206ced0ac1bd3f8c222d963 100644 --- a/source/libs/parser/test/parInitialCTest.cpp +++ b/source/libs/parser/test/parInitialCTest.cpp @@ -872,9 +872,12 @@ TEST_F(ParserInitialCTest, createStream) { "AS SELECT _WSTART wstart, COUNT(*) cnt FROM st1 PARTITION BY TBNAME tname, tag1 id INTERVAL(10S)"); clearCreateStreamReq(); - setCreateStreamReq("s1", "test", "create stream s1 into st1 as select max(c1), c2 from t1 interval(10s)", "st1", - STREAM_CREATE_STABLE_FALSE); - run("CREATE STREAM s1 INTO st1 AS SELECT MAX(c1), c2 FROM t1 INTERVAL(10S)"); + // st1 already exists + setCreateStreamReq( + "s1", "test", + "create stream s1 into st1 tags(tag2) as select max(c1), c2 from t1 partition by tbname tag2 interval(10s)", + "st1", STREAM_CREATE_STABLE_FALSE); + run("CREATE STREAM s1 INTO st1 TAGS(tag2) AS SELECT MAX(c1), c2 FROM t1 PARTITION BY TBNAME tag2 INTERVAL(10S)"); clearCreateStreamReq(); } diff --git a/source/libs/parser/test/parTestMain.cpp b/source/libs/parser/test/parTestMain.cpp index de2ce554593749ff420cc13312c84414d0e99f73..8d13d7cf0e75bd5672ed0be1dc096fbb7a4868ea 100644 --- a/source/libs/parser/test/parTestMain.cpp +++ b/source/libs/parser/test/parTestMain.cpp @@ -86,6 +86,7 @@ static void parseArg(int argc, char* argv[]) { {"dump", no_argument, NULL, 'd'}, {"async", required_argument, NULL, 'a'}, {"skipSql", required_argument, NULL, 's'}, + {"limitSql", required_argument, NULL, 'i'}, {"log", required_argument, NULL, 'l'}, {0, 0, 0, 0} }; @@ -101,6 +102,9 @@ static void parseArg(int argc, char* argv[]) { case 's': setSkipSqlNum(optarg); break; + case 'i': + setLimitSqlNum(optarg); + break; case 'l': setLogLevel(optarg); break; diff --git a/source/libs/parser/test/parTestUtil.cpp b/source/libs/parser/test/parTestUtil.cpp index dbae302e426fb0c842c175ae17b71a1f1767e65d..f18dd4f17f67b9d35990d6c326b11b26c3033a47 100644 --- a/source/libs/parser/test/parTestUtil.cpp +++ b/source/libs/parser/test/parTestUtil.cpp @@ -49,9 +49,11 @@ bool g_dump = false; bool g_testAsyncApis = true; int32_t g_logLevel = 131; int32_t g_skipSql = 0; +int32_t g_limitSql = 0; -void setAsyncFlag(const char* pFlag) { g_testAsyncApis = stoi(pFlag) > 0 ? true : false; } -void setSkipSqlNum(const char* pNum) { g_skipSql = stoi(pNum); } +void setAsyncFlag(const char* pArg) { g_testAsyncApis = stoi(pArg) > 0 ? true : false; } +void setSkipSqlNum(const char* pArg) { g_skipSql = stoi(pArg); } +void setLimitSqlNum(const char* pArg) { g_limitSql = stoi(pArg); } struct TerminateFlag : public exception { const char* what() const throw() { return "success and terminate"; } @@ -63,22 +65,27 @@ int32_t getLogLevel() { return g_logLevel; } class ParserTestBaseImpl { public: - ParserTestBaseImpl(ParserTestBase* pBase) : pBase_(pBase), sqlNo_(0) {} + ParserTestBaseImpl(ParserTestBase* pBase) : pBase_(pBase), sqlNo_(0), sqlNum_(0) {} void login(const std::string& user) { caseEnv_.user_ = user; } void useDb(const string& acctId, const string& db) { caseEnv_.acctId_ = acctId; caseEnv_.db_ = db; - caseEnv_.nsql_ = g_skipSql; + caseEnv_.numOfSkipSql_ = g_skipSql; + caseEnv_.numOfLimitSql_ = g_limitSql; } void run(const string& sql, int32_t expect, ParserStage checkStage) { ++sqlNo_; - if (caseEnv_.nsql_ > 0) { - --(caseEnv_.nsql_); + if (caseEnv_.numOfSkipSql_ > 0) { + --(caseEnv_.numOfSkipSql_); return; } + if (caseEnv_.numOfLimitSql_ > 0 && caseEnv_.numOfLimitSql_ == sqlNum_) { + return; + } + ++sqlNum_; runInternalFuncs(sql, expect, checkStage); runApis(sql, expect, checkStage); @@ -94,9 +101,10 @@ class ParserTestBaseImpl { string acctId_; string user_; string db_; - int32_t nsql_; + int32_t numOfSkipSql_; + int32_t numOfLimitSql_; - caseEnv() : user_("wangxiaoyu"), nsql_(0) {} + caseEnv() : user_("wangxiaoyu"), numOfSkipSql_(0) {} }; struct stmtEnv { @@ -532,6 +540,7 @@ class ParserTestBaseImpl { stmtRes res_; ParserTestBase* pBase_; int32_t sqlNo_; + int32_t sqlNum_; }; ParserTestBase::ParserTestBase() : impl_(new ParserTestBaseImpl(this)) {} diff --git a/source/libs/parser/test/parTestUtil.h b/source/libs/parser/test/parTestUtil.h index 3e5fab592729a9d98010900be1ef6dbbedc0d07f..5d9680c477d90e0268e3383ff089013006735a8d 100644 --- a/source/libs/parser/test/parTestUtil.h +++ b/source/libs/parser/test/parTestUtil.h @@ -65,10 +65,11 @@ class ParserDdlTest : public ParserTestBase { extern bool g_dump; -extern void setAsyncFlag(const char* pFlag); -extern void setLogLevel(const char* pLogLevel); +extern void setAsyncFlag(const char* pArg); +extern void setLogLevel(const char* pArg); extern int32_t getLogLevel(); -extern void setSkipSqlNum(const char* pNum); +extern void setSkipSqlNum(const char* pArg); +extern void setLimitSqlNum(const char* pArg); } // namespace ParserTest diff --git a/source/libs/planner/src/planLogicCreater.c b/source/libs/planner/src/planLogicCreater.c index 559182bf7be2faaf4caa39eaa84ac1f5b045d522..fef5bd654e04bee0618aa8d25fd2e2c6d46e1b39 100644 --- a/source/libs/planner/src/planLogicCreater.c +++ b/source/libs/planner/src/planLogicCreater.c @@ -1359,6 +1359,7 @@ static int32_t createSetOpLogicNode(SLogicPlanContext* pCxt, SSetOperator* pSetO } if (TSDB_CODE_SUCCESS == code) { + pSetOp->precision = pSetOperator->precision; *pLogicNode = (SLogicNode*)pSetOp; } else { nodesDestroyNode((SNode*)pSetOp); diff --git a/source/libs/planner/src/planOptimizer.c b/source/libs/planner/src/planOptimizer.c index 120a8537417562a4002268ba4f897a3811b0eaa4..18aaabf448ae439af7560be5feb7bb54e0981603 100644 --- a/source/libs/planner/src/planOptimizer.c +++ b/source/libs/planner/src/planOptimizer.c @@ -1080,29 +1080,29 @@ static bool sortPriKeyOptMayBeOptimized(SLogicNode* pNode) { return false; } SSortLogicNode* pSort = (SSortLogicNode*)pNode; - if (pSort->groupSort || !sortPriKeyOptIsPriKeyOrderBy(pSort->pSortKeys) || 1 != LIST_LENGTH(pSort->node.pChildren)) { + if (!sortPriKeyOptIsPriKeyOrderBy(pSort->pSortKeys) || 1 != LIST_LENGTH(pSort->node.pChildren)) { return false; } return true; } -static int32_t sortPriKeyOptGetSequencingNodesImpl(SLogicNode* pNode, bool* pNotOptimize, +static int32_t sortPriKeyOptGetSequencingNodesImpl(SLogicNode* pNode, bool groupSort, bool* pNotOptimize, SNodeList** pSequencingNodes) { switch (nodeType(pNode)) { case QUERY_NODE_LOGIC_PLAN_SCAN: { SScanLogicNode* pScan = (SScanLogicNode*)pNode; - if (NULL != pScan->pGroupTags || TSDB_SYSTEM_TABLE == pScan->tableType) { + if ((!groupSort && NULL != pScan->pGroupTags) || TSDB_SYSTEM_TABLE == pScan->tableType) { *pNotOptimize = true; return TSDB_CODE_SUCCESS; } return nodesListMakeAppend(pSequencingNodes, (SNode*)pNode); } case QUERY_NODE_LOGIC_PLAN_JOIN: { - int32_t code = sortPriKeyOptGetSequencingNodesImpl((SLogicNode*)nodesListGetNode(pNode->pChildren, 0), + int32_t code = sortPriKeyOptGetSequencingNodesImpl((SLogicNode*)nodesListGetNode(pNode->pChildren, 0), groupSort, pNotOptimize, pSequencingNodes); if (TSDB_CODE_SUCCESS == code) { - code = sortPriKeyOptGetSequencingNodesImpl((SLogicNode*)nodesListGetNode(pNode->pChildren, 1), pNotOptimize, - pSequencingNodes); + code = sortPriKeyOptGetSequencingNodesImpl((SLogicNode*)nodesListGetNode(pNode->pChildren, 1), groupSort, + pNotOptimize, pSequencingNodes); } return code; } @@ -1121,13 +1121,13 @@ static int32_t sortPriKeyOptGetSequencingNodesImpl(SLogicNode* pNode, bool* pNot return TSDB_CODE_SUCCESS; } - return sortPriKeyOptGetSequencingNodesImpl((SLogicNode*)nodesListGetNode(pNode->pChildren, 0), pNotOptimize, - pSequencingNodes); + return sortPriKeyOptGetSequencingNodesImpl((SLogicNode*)nodesListGetNode(pNode->pChildren, 0), groupSort, + pNotOptimize, pSequencingNodes); } -static int32_t sortPriKeyOptGetSequencingNodes(SLogicNode* pNode, SNodeList** pSequencingNodes) { +static int32_t sortPriKeyOptGetSequencingNodes(SLogicNode* pNode, bool groupSort, SNodeList** pSequencingNodes) { bool notOptimize = false; - int32_t code = sortPriKeyOptGetSequencingNodesImpl(pNode, ¬Optimize, pSequencingNodes); + int32_t code = sortPriKeyOptGetSequencingNodesImpl(pNode, groupSort, ¬Optimize, pSequencingNodes); if (TSDB_CODE_SUCCESS != code || notOptimize) { NODES_CLEAR_LIST(*pSequencingNodes); } @@ -1175,8 +1175,8 @@ static int32_t sortPriKeyOptApply(SOptimizeContext* pCxt, SLogicSubplan* pLogicS static int32_t sortPrimaryKeyOptimizeImpl(SOptimizeContext* pCxt, SLogicSubplan* pLogicSubplan, SSortLogicNode* pSort) { SNodeList* pSequencingNodes = NULL; - int32_t code = - sortPriKeyOptGetSequencingNodes((SLogicNode*)nodesListGetNode(pSort->node.pChildren, 0), &pSequencingNodes); + int32_t code = sortPriKeyOptGetSequencingNodes((SLogicNode*)nodesListGetNode(pSort->node.pChildren, 0), + pSort->groupSort, &pSequencingNodes); if (TSDB_CODE_SUCCESS == code && NULL != pSequencingNodes) { code = sortPriKeyOptApply(pCxt, pLogicSubplan, pSort, pSequencingNodes); } @@ -1334,11 +1334,12 @@ static int32_t smaIndexOptApplyIndex(SLogicSubplan* pLogicSubplan, SScanLogicNod if (TSDB_CODE_SUCCESS == code) { code = replaceLogicNode(pLogicSubplan, pScan->node.pParent, pSmaScan); } + if (TSDB_CODE_SUCCESS == code) { + nodesDestroyNode((SNode*)pScan->node.pParent); + } return code; } -static void smaIndexOptDestroySmaIndex(void* p) { taosMemoryFree(((STableIndexInfo*)p)->expr); } - static int32_t smaIndexOptimizeImpl(SOptimizeContext* pCxt, SLogicSubplan* pLogicSubplan, SScanLogicNode* pScan) { int32_t code = TSDB_CODE_SUCCESS; int32_t nindexes = taosArrayGetSize(pScan->pSmaIndexes); @@ -1348,8 +1349,6 @@ static int32_t smaIndexOptimizeImpl(SOptimizeContext* pCxt, SLogicSubplan* pLogi code = smaIndexOptCouldApplyIndex(pScan, pIndex, &pSmaCols); if (TSDB_CODE_SUCCESS == code && NULL != pSmaCols) { code = smaIndexOptApplyIndex(pLogicSubplan, pScan, pIndex, pSmaCols); - taosArrayDestroyEx(pScan->pSmaIndexes, smaIndexOptDestroySmaIndex); - pScan->pSmaIndexes = NULL; pCxt->optimized = true; break; } diff --git a/source/libs/planner/src/planPhysiCreater.c b/source/libs/planner/src/planPhysiCreater.c index 7d6238193d7e6411cffd9bd8b092afa01e651b7a..f83704be8783ac0f65c02823a3ace1b1031685b7 100644 --- a/source/libs/planner/src/planPhysiCreater.c +++ b/source/libs/planner/src/planPhysiCreater.c @@ -609,7 +609,8 @@ static int32_t createSystemTableScanPhysiNode(SPhysiPlanContext* pCxt, SSubplan* pScan->accountId = pCxt->pPlanCxt->acctId; pScan->sysInfo = pCxt->pPlanCxt->sysInfo; if (0 == strcmp(pScanLogicNode->tableName.tname, TSDB_INS_TABLE_TABLES) || - 0 == strcmp(pScanLogicNode->tableName.tname, TSDB_INS_TABLE_TAGS)) { + 0 == strcmp(pScanLogicNode->tableName.tname, TSDB_INS_TABLE_TAGS) || + 0 == strcmp(pScanLogicNode->tableName.tname, TSDB_INS_TABLE_COLS)) { vgroupInfoToNodeAddr(pScanLogicNode->pVgroupList->vgroups, &pSubplan->execNode); } else { pSubplan->execNode.nodeId = MNODE_HANDLE; diff --git a/source/libs/qworker/inc/qwInt.h b/source/libs/qworker/inc/qwInt.h index 66b8e48eb1a41d5669e008d3296359f208e0a19f..787ef7501da7e467e67975d20563ad7f875ecced 100644 --- a/source/libs/qworker/inc/qwInt.h +++ b/source/libs/qworker/inc/qwInt.h @@ -228,9 +228,14 @@ typedef struct SQWorkerMgmt { case QW_PHASE_POST_FETCH: \ ctx->inFetch = 0; \ break; \ - default: \ + case QW_PHASE_PRE_QUERY: \ + case QW_PHASE_POST_QUERY: \ + case QW_PHASE_PRE_CQUERY: \ + case QW_PHASE_POST_CQUERY: \ atomic_store_8(&(ctx)->phase, _value); \ break; \ + default: \ + break; \ } \ } while (0) diff --git a/source/libs/qworker/src/qworker.c b/source/libs/qworker/src/qworker.c index 952b87f9d734ed3bdaf2c1d2ea675b575ece3de1..2f712e6eba50a6dd3e8c4768397eaf57ec505bff 100644 --- a/source/libs/qworker/src/qworker.c +++ b/source/libs/qworker/src/qworker.c @@ -201,6 +201,15 @@ _return: QW_RET(code); } +bool qwTaskNotInExec(SQWTaskCtx *ctx) { + qTaskInfo_t taskHandle = ctx->taskHandle; + if (NULL == taskHandle || !qTaskIsExecuting(taskHandle)) { + return true; + } + + return false; +} + int32_t qwGenerateSchHbRsp(SQWorker *mgmt, SQWSchStatus *sch, SQWHbInfo *hbInfo) { int32_t taskNum = 0; @@ -507,8 +516,10 @@ int32_t qwHandlePostPhaseEvents(QW_FPARAMS_DEF, int8_t phase, SQWPhaseInput *inp } if (QW_EVENT_RECEIVED(ctx, QW_EVENT_DROP)) { - QW_ERR_JRET(qwDropTask(QW_FPARAMS())); - QW_ERR_JRET(ctx->rspCode); + if (QW_PHASE_POST_FETCH != phase || qwTaskNotInExec(ctx)) { + QW_ERR_JRET(qwDropTask(QW_FPARAMS())); + QW_ERR_JRET(ctx->rspCode); + } } if (ctx->rspCode) { @@ -539,7 +550,9 @@ _return: if (ctx) { QW_UPDATE_RSP_CODE(ctx, code); - QW_SET_PHASE(ctx, phase); + if (QW_PHASE_POST_CQUERY != phase) { + QW_SET_PHASE(ctx, phase); + } QW_UNLOCK(QW_WRITE, &ctx->lock); qwReleaseTaskCtx(mgmt, ctx); @@ -746,7 +759,7 @@ int32_t qwProcessCQuery(QW_FPARAMS_DEF, SQWMsg *qwMsg) { QW_LOCK(QW_WRITE, &ctx->lock); if (qComplete || (queryStop && (0 == atomic_load_8((int8_t *)&ctx->queryContinue))) || code) { // Note: query is not running anymore - QW_SET_PHASE(ctx, 0); + QW_SET_PHASE(ctx, QW_PHASE_POST_CQUERY); QW_UNLOCK(QW_WRITE, &ctx->lock); break; } diff --git a/source/libs/scalar/src/filter.c b/source/libs/scalar/src/filter.c index d0c27560cae9496cf05de9cfb7868da7e35f8ed6..74d555af7773cf10d7e542ac2df277bce476bac6 100644 --- a/source/libs/scalar/src/filter.c +++ b/source/libs/scalar/src/filter.c @@ -90,7 +90,7 @@ rangeCompFunc gRangeCompare[] = {filterRangeCompee, filterRangeCompei, filterRan int8_t filterGetRangeCompFuncFromOptrs(uint8_t optr, uint8_t optr2) { if (optr2) { - assert(optr2 == OP_TYPE_LOWER_THAN || optr2 == OP_TYPE_LOWER_EQUAL); + ASSERT(optr2 == OP_TYPE_LOWER_THAN || optr2 == OP_TYPE_LOWER_EQUAL); if (optr == OP_TYPE_GREATER_THAN) { if (optr2 == OP_TYPE_LOWER_THAN) { @@ -705,7 +705,7 @@ int32_t filterAddRangeImpl(void *h, SFilterRange *ra, int32_t optr) { int32_t filterAddRange(void *h, SFilterRange *ra, int32_t optr) { SFilterRangeCtx *ctx = (SFilterRangeCtx *)h; int64_t tmp = 0; - + if (FILTER_GET_FLAG(ra->sflag, RANGE_FLG_NULL)) { SIMPLE_COPY_VALUES(&ra->s, getDataMin(ctx->type, &tmp)); // FILTER_CLR_FLAG(ra->sflag, RA_NULL); @@ -723,7 +723,7 @@ int32_t filterAddRangeCtx(void *dst, void *src, int32_t optr) { SFilterRangeCtx *dctx = (SFilterRangeCtx *)dst; SFilterRangeCtx *sctx = (SFilterRangeCtx *)src; - assert(optr == LOGIC_COND_TYPE_OR); + ASSERT(optr == LOGIC_COND_TYPE_OR); if (sctx->rs == NULL) { return TSDB_CODE_SUCCESS; @@ -778,7 +778,10 @@ int32_t filterFinishRange(void *h) { while (r && r->next) { int64_t tmp = 1; - operateVal(&tmp, &r->ra.e, &tmp, OP_TYPE_ADD, ctx->type); + int32_t code = operateVal(&tmp, &r->ra.e, &tmp, OP_TYPE_ADD, ctx->type); + if (code != 0) { + return TSDB_CODE_APP_ERROR; + } if (ctx->pCompareFunc(&tmp, &r->next->ra.s) == 0) { rn = r->next; SIMPLE_COPY_VALUES((char *)&r->next->ra.s, (char *)&r->ra.s); @@ -1122,7 +1125,7 @@ int32_t filterAddUnitImpl(SFilterInfo *info, uint8_t optr, SFilterFieldId *left, if (u->right.type == FLD_TYPE_VALUE) { SFilterField *val = FILTER_UNIT_RIGHT_FIELD(info, u); - assert(FILTER_GET_FLAG(val->flag, FLD_TYPE_VALUE)); + ASSERT(FILTER_GET_FLAG(val->flag, FLD_TYPE_VALUE)); } else { int32_t paramNum = scalarGetOperatorParamNum(optr); if (1 != paramNum) { @@ -1132,7 +1135,7 @@ int32_t filterAddUnitImpl(SFilterInfo *info, uint8_t optr, SFilterFieldId *left, } SFilterField *col = FILTER_UNIT_LEFT_FIELD(info, u); - assert(FILTER_GET_FLAG(col->flag, FLD_TYPE_COLUMN)); + ASSERT(FILTER_GET_FLAG(col->flag, FLD_TYPE_COLUMN)); info->units[info->unitNum].compare.type = FILTER_GET_COL_FIELD_TYPE(col); info->units[info->unitNum].compare.precision = FILTER_GET_COL_FIELD_PRECISION(col); @@ -1292,29 +1295,29 @@ int32_t filterAddGroupUnitFromCtx(SFilterInfo *dst, SFilterInfo *src, SFilterRan if (optr == LOGIC_COND_TYPE_AND) { if (ctx->isnull) { - assert(ctx->notnull == false && ctx->isrange == false); + ASSERT(ctx->notnull == false && ctx->isrange == false); filterAddUnit(dst, OP_TYPE_IS_NULL, &left, NULL, &uidx); filterAddUnitToGroup(g, uidx); return TSDB_CODE_SUCCESS; } if (ctx->notnull) { - assert(ctx->isnull == false && ctx->isrange == false); + ASSERT(ctx->isnull == false && ctx->isrange == false); filterAddUnit(dst, OP_TYPE_IS_NOT_NULL, &left, NULL, &uidx); filterAddUnitToGroup(g, uidx); return TSDB_CODE_SUCCESS; } if (!ctx->isrange) { - assert(ctx->isnull || ctx->notnull); + ASSERT(ctx->isnull || ctx->notnull); return TSDB_CODE_SUCCESS; } - assert(ctx->rs && ctx->rs->next == NULL); + ASSERT(ctx->rs && ctx->rs->next == NULL); SFilterRange *ra = &ctx->rs->ra; - assert(!((FILTER_GET_FLAG(ra->sflag, RANGE_FLG_NULL)) && (FILTER_GET_FLAG(ra->eflag, RANGE_FLG_NULL)))); + ASSERT(!((FILTER_GET_FLAG(ra->sflag, RANGE_FLG_NULL)) && (FILTER_GET_FLAG(ra->eflag, RANGE_FLG_NULL)))); if ((!FILTER_GET_FLAG(ra->sflag, RANGE_FLG_NULL)) && (!FILTER_GET_FLAG(ra->eflag, RANGE_FLG_NULL))) { __compar_fn_t func = getComparFunc(type, 0); @@ -1368,7 +1371,7 @@ int32_t filterAddGroupUnitFromCtx(SFilterInfo *dst, SFilterInfo *src, SFilterRan SFilterGroup ng = {0}; g = &ng; - assert(ctx->isnull || ctx->notnull || ctx->isrange); + ASSERT(ctx->isnull || ctx->notnull || ctx->isrange); if (ctx->isnull) { filterAddUnit(dst, OP_TYPE_IS_NULL, &left, NULL, &uidx); @@ -1377,7 +1380,7 @@ int32_t filterAddGroupUnitFromCtx(SFilterInfo *dst, SFilterInfo *src, SFilterRan } if (ctx->notnull) { - assert(!ctx->isrange); + ASSERT(!ctx->isrange); memset(g, 0, sizeof(*g)); filterAddUnit(dst, OP_TYPE_IS_NOT_NULL, &left, NULL, &uidx); @@ -1386,7 +1389,7 @@ int32_t filterAddGroupUnitFromCtx(SFilterInfo *dst, SFilterInfo *src, SFilterRan } if (!ctx->isrange) { - assert(ctx->isnull || ctx->notnull); + ASSERT(ctx->isnull || ctx->notnull); g->unitNum = 0; return TSDB_CODE_SUCCESS; } @@ -1444,7 +1447,7 @@ int32_t filterAddGroupUnitFromCtx(SFilterInfo *dst, SFilterInfo *src, SFilterRan filterAddUnitToGroup(g, uidx); } - assert(g->unitNum > 0); + ASSERT(g->unitNum > 0); taosArrayPush(res, g); @@ -1900,7 +1903,7 @@ void filterFreeInfo(SFilterInfo *info) { } int32_t filterHandleValueExtInfo(SFilterUnit *unit, char extInfo) { - assert(extInfo > 0 || extInfo < 0); + ASSERT(extInfo > 0 || extInfo < 0); uint8_t optr = FILTER_UNIT_OPTR(unit); switch (optr) { @@ -1916,7 +1919,8 @@ int32_t filterHandleValueExtInfo(SFilterUnit *unit, char extInfo) { unit->compare.optr = FILTER_DUMMY_EMPTY_OPTR; break; default: - assert(0); + fltError("unsupported operator type"); + return TSDB_CODE_APP_ERROR; } return TSDB_CODE_SUCCESS; @@ -1926,13 +1930,13 @@ int32_t fltInitValFieldData(SFilterInfo *info) { for (uint32_t i = 0; i < info->unitNum; ++i) { SFilterUnit *unit = &info->units[i]; if (unit->right.type != FLD_TYPE_VALUE) { - assert(unit->compare.optr == FILTER_DUMMY_EMPTY_OPTR || scalarGetOperatorParamNum(unit->compare.optr) == 1); + ASSERT(unit->compare.optr == FILTER_DUMMY_EMPTY_OPTR || scalarGetOperatorParamNum(unit->compare.optr) == 1); continue; } SFilterField *right = FILTER_UNIT_RIGHT_FIELD(info, unit); - assert(FILTER_GET_FLAG(right->flag, FLD_TYPE_VALUE)); + ASSERT(FILTER_GET_FLAG(right->flag, FLD_TYPE_VALUE)); uint32_t type = FILTER_UNIT_DATA_TYPE(unit); int8_t precision = FILTER_UNIT_DATA_PRECISION(unit); @@ -1940,7 +1944,7 @@ int32_t fltInitValFieldData(SFilterInfo *info) { SValueNode *var = (SValueNode *)fi->desc; if (var == NULL) { - assert(fi->data != NULL); + ASSERT(fi->data != NULL); continue; } @@ -2068,7 +2072,8 @@ bool filterDoCompare(__compar_fn_t func, uint8_t optr, void *left, void *right) } default: - assert(false); + fltError("unsupported operator type"); + return false; } return true; @@ -2101,7 +2106,7 @@ int32_t filterAddUnitRange(SFilterInfo *info, SFilterUnit *u, SFilterRangeCtx *c FILTER_SET_FLAG(ra.sflag, RANGE_FLG_NULL); break; case OP_TYPE_NOT_EQUAL: - assert(type == TSDB_DATA_TYPE_BOOL); + ASSERT(type == TSDB_DATA_TYPE_BOOL); if (GET_INT8_VAL(val)) { SIMPLE_COPY_VALUES(&ra.s, &tmp); SIMPLE_COPY_VALUES(&ra.e, &tmp); @@ -2116,7 +2121,8 @@ int32_t filterAddUnitRange(SFilterInfo *info, SFilterUnit *u, SFilterRangeCtx *c SIMPLE_COPY_VALUES(&ra.e, val); break; default: - assert(0); + fltError("unsupported operator type"); + return TSDB_CODE_APP_ERROR; } filterAddRange(ctx, &ra, optr); @@ -2368,8 +2374,8 @@ int32_t filterMergeTwoGroupsImpl(SFilterInfo *info, SFilterRangeCtx **ctx, int32 filterReuseRangeCtx(*ctx, type, 0); } - assert(gRes2->colInfo[cidx].type == RANGE_TYPE_MR_CTX); - assert(gRes1->colInfo[cidx].type == RANGE_TYPE_MR_CTX); + ASSERT(gRes2->colInfo[cidx].type == RANGE_TYPE_MR_CTX); + ASSERT(gRes1->colInfo[cidx].type == RANGE_TYPE_MR_CTX); filterCopyRangeCtx(*ctx, gRes2->colInfo[cidx].info); filterSourceRangeFromCtx(*ctx, gRes1->colInfo[cidx].info, optr, empty, all); @@ -2405,7 +2411,7 @@ int32_t filterMergeTwoGroups(SFilterInfo *info, SFilterGroupCtx **gRes1, SFilter continue; } - assert(idx1 == idx2); + ASSERT(idx1 == idx2); ++merNum; @@ -2455,15 +2461,15 @@ int32_t filterMergeTwoGroups(SFilterInfo *info, SFilterGroupCtx **gRes1, SFilter } } - assert(merNum > 0); + ASSERT(merNum > 0); SFilterColInfo *colInfo = NULL; - assert(merNum == equal1 || merNum == equal2); + ASSERT(merNum == equal1 || merNum == equal2); filterFreeGroupCtx(*gRes2); *gRes2 = NULL; - assert(colCtxs && taosArrayGetSize(colCtxs) > 0); + ASSERT(colCtxs && taosArrayGetSize(colCtxs) > 0); int32_t ctxSize = (int32_t)taosArrayGetSize(colCtxs); SFilterColCtx *pctx = NULL; @@ -2520,7 +2526,7 @@ int32_t filterMergeGroups(SFilterInfo *info, SFilterGroupCtx **gRes, int32_t *gR if (pColNum > 0) { for (int32_t m = 0; m <= pEnd; ++m) { for (int32_t n = cStart; n <= cEnd; ++n) { - assert(m < n); + ASSERT(m < n); filterMergeTwoGroups(info, &gRes[m], &gRes[n], &all); FLT_CHK_JMP(all); @@ -2541,7 +2547,7 @@ int32_t filterMergeGroups(SFilterInfo *info, SFilterGroupCtx **gRes, int32_t *gR for (int32_t m = cStart; m < cEnd; ++m) { for (int32_t n = m + 1; n <= cEnd; ++n) { - assert(m < n); + ASSERT(m < n); filterMergeTwoGroups(info, &gRes[m], &gRes[n], &all); FLT_CHK_JMP(all); @@ -2636,7 +2642,7 @@ int32_t filterRewrite(SFilterInfo *info, SFilterGroupCtx **gRes, int32_t gResNum for (uint32_t m = 0; m < res->colNum; ++m) { colInfo = &res->colInfo[res->colIdx[m]]; if (FILTER_NO_MERGE_DATA_TYPE(colInfo->dataType)) { - assert(colInfo->type == RANGE_TYPE_UNIT); + ASSERT(colInfo->type == RANGE_TYPE_UNIT); int32_t usize = (int32_t)taosArrayGetSize((SArray *)colInfo->info); for (int32_t n = 0; n < usize; ++n) { @@ -2649,7 +2655,7 @@ int32_t filterRewrite(SFilterInfo *info, SFilterGroupCtx **gRes, int32_t gResNum continue; } - assert(colInfo->type == RANGE_TYPE_MR_CTX); + ASSERT(colInfo->type == RANGE_TYPE_MR_CTX); filterAddGroupUnitFromCtx(info, &oinfo, colInfo->info, res->colIdx[m], &ng, optr, group); } @@ -2690,7 +2696,7 @@ int32_t filterGenerateColRange(SFilterInfo *info, SFilterGroupCtx **gRes, int32_ continue; } - assert(idxNum[i] == gResNum); + ASSERT(idxNum[i] == gResNum); if (idxs == NULL) { idxs = taosMemoryCalloc(info->fields[FLD_TYPE_COLUMN].num, sizeof(*idxs)); @@ -2714,7 +2720,7 @@ int32_t filterGenerateColRange(SFilterInfo *info, SFilterGroupCtx **gRes, int32_ continue; } - assert(res->colIdx[n] == idxs[m]); + ASSERT(res->colIdx[n] == idxs[m]); SFilterColInfo *colInfo = &res->colInfo[res->colIdx[n]]; if (info->colRange[m] == NULL) { @@ -2723,7 +2729,7 @@ int32_t filterGenerateColRange(SFilterInfo *info, SFilterGroupCtx **gRes, int32_ info->colRange[m]->colId = FILTER_GET_COL_FIELD_ID(fi); } - assert(colInfo->type == RANGE_TYPE_MR_CTX); + ASSERT(colInfo->type == RANGE_TYPE_MR_CTX); bool all = false; filterSourceRangeFromCtx(info->colRange[m], colInfo->info, LOGIC_COND_TYPE_OR, NULL, &all); @@ -2971,7 +2977,7 @@ int32_t filterRmUnitByRange(SFilterInfo *info, SColumnDataAgg *pDataStatis, int3 unitIdx = pGroupIdx; --info->blkGroupNum; - assert(empty || all); + ASSERT(empty || all); if (empty) { FILTER_SET_FLAG(info->blkFlag, FI_STATUS_BLK_EMPTY); @@ -3077,7 +3083,7 @@ int32_t filterExecuteBasedOnStatis(SFilterInfo *info, int32_t numOfRows, SColumn goto _return; } - assert(info->unitNum > 1); + ASSERT(info->unitNum > 1); *all = filterExecuteBasedOnStatisImpl(info, numOfRows, p, statis, numOfCols); goto _return; @@ -3175,6 +3181,7 @@ bool filterExecuteImplRange(void *pinfo, int32_t numOfRows, SColumnInfoData *pRe void *colData = colDataGetData(pData, i); if (colData == NULL || colDataIsNull_s(pData, i)) { all = false; + p[i] = 0; continue; } diff --git a/source/libs/scalar/src/scalar.c b/source/libs/scalar/src/scalar.c index debc210f0f18d833b760426ad31f09d91e0aebb4..c1e958b055f3442f8338c3dcdcae68bd318f0091 100644 --- a/source/libs/scalar/src/scalar.c +++ b/source/libs/scalar/src/scalar.c @@ -327,7 +327,10 @@ int32_t sclInitParam(SNode *node, SScalarParam *param, SScalarCtx *ctx, int32_t case QUERY_NODE_VALUE: { SValueNode *valueNode = (SValueNode *)node; - ASSERT(param->columnData == NULL); + if (param->columnData != NULL) { + sclError("columnData should be NULL"); + SCL_ERR_RET(TSDB_CODE_QRY_INVALID_INPUT); + } param->numOfRows = 1; int32_t code = sclCreateColumnInfoData(&valueNode->node.resType, 1, param); if (code != TSDB_CODE_SUCCESS) { diff --git a/source/libs/scalar/src/sclfunc.c b/source/libs/scalar/src/sclfunc.c index 1de8a35308b4cf35982ee9e028b67ba1f7db9960..411b9b7ab504176f77c691cce1b83452ec2267c0 100644 --- a/source/libs/scalar/src/sclfunc.c +++ b/source/libs/scalar/src/sclfunc.c @@ -361,7 +361,6 @@ static int32_t doLengthFunction(SScalarParam *pInput, int32_t inputNum, SScalarP SColumnInfoData *pInputData = pInput->columnData; SColumnInfoData *pOutputData = pOutput->columnData; - ASSERT(pOutputData->info.type == TSDB_DATA_TYPE_BIGINT); int64_t *out = (int64_t *)pOutputData->pData; for (int32_t i = 0; i < pInput->numOfRows; ++i) { @@ -1729,37 +1728,31 @@ bool getTimePseudoFuncEnv(SFunctionNode *UNUSED_PARAM(pFunc), SFuncExecEnv *pEnv } int32_t qStartTsFunction(SScalarParam *pInput, int32_t inputNum, SScalarParam *pOutput) { - ASSERT(inputNum == 1); colDataAppendInt64(pOutput->columnData, pOutput->numOfRows, (int64_t *)colDataGetData(pInput->columnData, 0)); return TSDB_CODE_SUCCESS; } int32_t qEndTsFunction(SScalarParam *pInput, int32_t inputNum, SScalarParam *pOutput) { - ASSERT(inputNum == 1); colDataAppendInt64(pOutput->columnData, pOutput->numOfRows, (int64_t *)colDataGetData(pInput->columnData, 1)); return TSDB_CODE_SUCCESS; } int32_t winDurFunction(SScalarParam *pInput, int32_t inputNum, SScalarParam *pOutput) { - ASSERT(inputNum == 1); colDataAppendInt64(pOutput->columnData, pOutput->numOfRows, (int64_t *)colDataGetData(pInput->columnData, 2)); return TSDB_CODE_SUCCESS; } int32_t winStartTsFunction(SScalarParam *pInput, int32_t inputNum, SScalarParam *pOutput) { - ASSERT(inputNum == 1); colDataAppendInt64(pOutput->columnData, pOutput->numOfRows, (int64_t *)colDataGetData(pInput->columnData, 3)); return TSDB_CODE_SUCCESS; } int32_t winEndTsFunction(SScalarParam *pInput, int32_t inputNum, SScalarParam *pOutput) { - ASSERT(inputNum == 1); colDataAppendInt64(pOutput->columnData, pOutput->numOfRows, (int64_t *)colDataGetData(pInput->columnData, 4)); return TSDB_CODE_SUCCESS; } int32_t qTbnameFunction(SScalarParam *pInput, int32_t inputNum, SScalarParam *pOutput) { - ASSERT(inputNum == 1); char* p = colDataGetVarData(pInput->columnData, 0); colDataAppendNItems(pOutput->columnData, pOutput->numOfRows, p, pInput->numOfRows); @@ -2598,7 +2591,7 @@ static bool checkStateOp(int8_t op, SColumnInfoData *pCol, int32_t index, SScala break; } default: { - ASSERT(0); + return false; } } return false; @@ -2771,7 +2764,9 @@ static bool getHistogramBinDesc(SHistoFuncBin **bins, int32_t *binNum, char *bin intervals[0] = -INFINITY; intervals[numOfBins - 1] = INFINITY; // in case of desc bin orders, -inf/inf should be swapped - ASSERT(numOfBins >= 4); + if (numOfBins < 4) { + return false; + } if (intervals[1] > intervals[numOfBins - 2]) { TSWAP(intervals[0], intervals[numOfBins - 1]); } diff --git a/source/libs/scalar/src/sclvector.c b/source/libs/scalar/src/sclvector.c index 8f2fe87a53a68a6267a8085a60cfb99303bcd059..e9f345215e5c7eb5792a7141484ca244be560c75 100644 --- a/source/libs/scalar/src/sclvector.c +++ b/source/libs/scalar/src/sclvector.c @@ -389,18 +389,18 @@ int32_t vectorConvertFromVarData(SSclVectorConvCtx *pCtx, int32_t *overflow) { func = varToUnsigned; } else if (IS_FLOAT_TYPE(pCtx->outType)) { func = varToFloat; - } else if (pCtx->outType == TSDB_DATA_TYPE_BINARY) { // nchar -> binary - ASSERT(pCtx->inType == TSDB_DATA_TYPE_NCHAR); + } else if (pCtx->outType == TSDB_DATA_TYPE_VARCHAR && + pCtx->inType == TSDB_DATA_TYPE_NCHAR) { // nchar -> binary func = ncharToVar; vton = true; - } else if (pCtx->outType == TSDB_DATA_TYPE_NCHAR) { // binary -> nchar - ASSERT(pCtx->inType == TSDB_DATA_TYPE_VARCHAR); + } else if (pCtx->outType == TSDB_DATA_TYPE_NCHAR && + pCtx->inType == TSDB_DATA_TYPE_VARCHAR) { // binary -> nchar func = varToNchar; vton = true; } else if (TSDB_DATA_TYPE_TIMESTAMP == pCtx->outType) { func = varToTimestamp; } else { - sclError("invalid convert outType:%d", pCtx->outType); + sclError("invalid convert outType:%d, inType:%d", pCtx->outType, pCtx->inType); return TSDB_CODE_APP_ERROR; } @@ -416,12 +416,10 @@ int32_t vectorConvertFromVarData(SSclVectorConvCtx *pCtx, int32_t *overflow) { char *data = colDataGetVarData(pCtx->pIn->columnData, i); int32_t convertType = pCtx->inType; if (pCtx->inType == TSDB_DATA_TYPE_JSON) { - if (*data == TSDB_DATA_TYPE_NULL) { - ASSERT(0); - } else if (*data == TSDB_DATA_TYPE_NCHAR) { + if (*data == TSDB_DATA_TYPE_NCHAR) { data += CHAR_BYTES; convertType = TSDB_DATA_TYPE_NCHAR; - } else if (tTagIsJson(data)) { + } else if (tTagIsJson(data) || *data == TSDB_DATA_TYPE_NULL) { terrno = TSDB_CODE_QRY_JSON_NOT_SUPPORT_ERROR; return terrno; } else { @@ -447,7 +445,11 @@ int32_t vectorConvertFromVarData(SSclVectorConvCtx *pCtx, int32_t *overflow) { tmp[varDataLen(data)] = 0; } else if (TSDB_DATA_TYPE_NCHAR == convertType) { // we need to convert it to native char string, and then perform the string to numeric data - ASSERT(varDataLen(data) <= bufSize); + if (varDataLen(data) > bufSize) { + sclError("castConvert convert buffer size too small"); + taosMemoryFreeClear(tmp); + return TSDB_CODE_APP_ERROR; + } int len = taosUcs4ToMbs((TdUcs4 *)varDataVal(data), varDataLen(data), tmp); if (len < 0) { @@ -557,27 +559,17 @@ bool convertJsonValue(__compar_fn_t *fp, int32_t optr, int8_t typeLeft, int8_t t *fp = filterGetCompFunc(type, optr); if (IS_NUMERIC_TYPE(type)) { - if (typeLeft == TSDB_DATA_TYPE_NCHAR) { - ASSERT(0); - // convertNcharToDouble(*pLeftData, pLeftOut); - // *pLeftData = pLeftOut; - } else if (typeLeft == TSDB_DATA_TYPE_BINARY) { - ASSERT(0); - // convertBinaryToDouble(*pLeftData, pLeftOut); - // *pLeftData = pLeftOut; + if (typeLeft == TSDB_DATA_TYPE_NCHAR || + typeLeft == TSDB_DATA_TYPE_VARCHAR) { + return false; } else if (typeLeft != type) { convertNumberToNumber(*pLeftData, pLeftOut, typeLeft, type); *pLeftData = pLeftOut; } - if (typeRight == TSDB_DATA_TYPE_NCHAR) { - ASSERT(0); - // convertNcharToDouble(*pRightData, pRightOut); - // *pRightData = pRightOut; - } else if (typeRight == TSDB_DATA_TYPE_BINARY) { - ASSERT(0); - // convertBinaryToDouble(*pRightData, pRightOut); - // *pRightData = pRightOut; + if (typeRight == TSDB_DATA_TYPE_NCHAR || + typeRight == TSDB_DATA_TYPE_VARCHAR) { + return false; } else if (typeRight != type) { convertNumberToNumber(*pRightData, pRightOut, typeRight, type); *pRightData = pRightOut; @@ -592,7 +584,7 @@ bool convertJsonValue(__compar_fn_t *fp, int32_t optr, int8_t typeLeft, int8_t t *freeRight = true; } } else { - ASSERT(0); + return false; } return true; @@ -683,7 +675,10 @@ int32_t vectorConvertSingleColImpl(const SScalarParam *pIn, SScalarParam *pOut, } if (overflow) { - ASSERT(1 == pIn->numOfRows); + if (1 != pIn->numOfRows) { + sclError("invalid numOfRows %d", pIn->numOfRows); + return TSDB_CODE_APP_ERROR; + } pOut->numOfRows = 0; @@ -1938,7 +1933,6 @@ _bin_scalar_fn_t getBinScalarOperatorFn(int32_t binFunctionId) { case OP_TYPE_JSON_CONTAINS: return vectorJsonContains; default: - ASSERT(0); return NULL; } } diff --git a/source/libs/stream/src/streamMeta.c b/source/libs/stream/src/streamMeta.c index 2b22a4ed2937b52c591f41661952cd01a667ae79..2f991288ffd0201be79ed3392befcd5da669294e 100644 --- a/source/libs/stream/src/streamMeta.c +++ b/source/libs/stream/src/streamMeta.c @@ -69,8 +69,7 @@ _err: } void streamMetaClose(SStreamMeta* pMeta) { - tdbCommit(pMeta->db, pMeta->txn); - tdbPostCommit(pMeta->db, pMeta->txn); + tdbAbort(pMeta->db, pMeta->txn); tdbTbClose(pMeta->pTaskDb); tdbTbClose(pMeta->pCheckpointDb); tdbClose(pMeta->db); @@ -88,6 +87,7 @@ void streamMetaClose(SStreamMeta* pMeta) { /*streamMetaReleaseTask(pMeta, pTask);*/ } taosHashCleanup(pMeta->pTasks); + taosHashCleanup(pMeta->pRecoverStatus); taosMemoryFree(pMeta->path); taosMemoryFree(pMeta); } @@ -207,6 +207,7 @@ void streamMetaRemoveTask(SStreamMeta* pMeta, int32_t taskId) { if (ppTask) { SStreamTask* pTask = *ppTask; taosHashRemove(pMeta->pTasks, &taskId, sizeof(int32_t)); + tdbTbDelete(pMeta->pTaskDb, &taskId, sizeof(int32_t), pMeta->txn); /*if (pTask->timer) { * taosTmrStop(pTask->timer);*/ /*pTask->timer = NULL;*/ diff --git a/source/libs/stream/src/streamUpdate.c b/source/libs/stream/src/streamUpdate.c index 1ce4a35dff6441a9ec289d1cb94eb24743122762..be12c72d004711e6c126792b5a7cfe4053ad1f18 100644 --- a/source/libs/stream/src/streamUpdate.c +++ b/source/libs/stream/src/streamUpdate.c @@ -85,9 +85,7 @@ static int64_t adjustWatermark(int64_t adjInterval, int64_t originInt, int64_t w watermark = TMAX(originInt / adjInterval, 1) * adjInterval; } else if (watermark > MAX_NUM_SCALABLE_BF * adjInterval) { watermark = MAX_NUM_SCALABLE_BF * adjInterval; - }/* else if (watermark < MIN_NUM_SCALABLE_BF * adjInterval) { - watermark = MIN_NUM_SCALABLE_BF * adjInterval; - }*/ // Todo(liuyao) save window info to tdb + } return watermark; } diff --git a/source/libs/sync/inc/syncInt.h b/source/libs/sync/inc/syncInt.h index 3bf4a8d1cda5e55a76a5286c0c9837ff2e3823fd..7e08e195c1d255783da14967a26a4386b31e0b6b 100644 --- a/source/libs/sync/inc/syncInt.h +++ b/source/libs/sync/inc/syncInt.h @@ -32,11 +32,9 @@ typedef struct SyncRequestVoteReply SyncRequestVoteReply; typedef struct SyncAppendEntries SyncAppendEntries; typedef struct SyncAppendEntriesReply SyncAppendEntriesReply; typedef struct SSyncEnv SSyncEnv; -typedef struct SRaftStore SRaftStore; typedef struct SVotesGranted SVotesGranted; typedef struct SVotesRespond SVotesRespond; typedef struct SSyncIndexMgr SSyncIndexMgr; -typedef struct SRaftCfg SRaftCfg; typedef struct SSyncRespMgr SSyncRespMgr; typedef struct SSyncSnapshotSender SSyncSnapshotSender; typedef struct SSyncSnapshotReceiver SSyncSnapshotReceiver; @@ -70,6 +68,11 @@ typedef struct SRaftId { SyncGroupId vgId; } SRaftId; +typedef struct SRaftStore { + SyncTerm currentTerm; + SRaftId voteFor; +} SRaftStore; + typedef struct SSyncHbTimerData { int64_t syncNodeRid; SSyncTimer* pTimer; @@ -112,8 +115,8 @@ typedef struct SSyncNode { // sync io SSyncLogBuffer* pLogBuf; - SWal* pWal; - const SMsgCb* msgcb; + SWal* pWal; + const SMsgCb* msgcb; int32_t (*syncSendMSg)(const SEpSet* pEpSet, SRpcMsg* pMsg); int32_t (*syncEqMsg)(const SMsgCb* msgcb, SRpcMsg* pMsg); int32_t (*syncEqCtrlMsg)(const SMsgCb* msgcb, SRpcMsg* pMsg); @@ -139,8 +142,8 @@ typedef struct SSyncNode { int64_t rid; // tla+ server vars - ESyncState state; - SRaftStore* pRaftStore; + ESyncState state; + SRaftStore raftStore; // tla+ candidate vars SVotesGranted* pVotesGranted; @@ -228,7 +231,8 @@ int32_t syncNodeStart(SSyncNode* pSyncNode); int32_t syncNodeStartStandBy(SSyncNode* pSyncNode); void syncNodeClose(SSyncNode* pSyncNode); void syncNodePreClose(SSyncNode* pSyncNode); -int32_t syncNodePropose(SSyncNode* pSyncNode, SRpcMsg* pMsg, bool isWeak, int64_t *seq); +void syncNodePostClose(SSyncNode* pSyncNode); +int32_t syncNodePropose(SSyncNode* pSyncNode, SRpcMsg* pMsg, bool isWeak, int64_t* seq); int32_t syncNodeRestore(SSyncNode* pSyncNode); void syncHbTimerDataFree(SSyncHbTimerData* pData); diff --git a/source/libs/sync/inc/syncMessage.h b/source/libs/sync/inc/syncMessage.h index 3bd94dbab5811fc071d7444ab864d3087f785d53..c3566c7c820b97c4484f15ce59239d423db9d480 100644 --- a/source/libs/sync/inc/syncMessage.h +++ b/source/libs/sync/inc/syncMessage.h @@ -247,8 +247,8 @@ typedef struct SyncLocalCmd { SRaftId destId; int32_t cmd; - SyncTerm sdNewTerm; // step down new term - SyncIndex fcIndex; // follower commit index + SyncTerm currentTerm; // step down new term + SyncIndex commitIndex; // follower commit index } SyncLocalCmd; int32_t syncBuildTimeout(SRpcMsg* pMsg, ESyncTimeoutType ttype, uint64_t logicClock, int32_t ms, SSyncNode* pNode); @@ -258,8 +258,8 @@ int32_t syncBuildRequestVote(SRpcMsg* pMsg, int32_t vgId); int32_t syncBuildRequestVoteReply(SRpcMsg* pMsg, int32_t vgId); int32_t syncBuildAppendEntries(SRpcMsg* pMsg, int32_t dataLen, int32_t vgId); int32_t syncBuildAppendEntriesReply(SRpcMsg* pMsg, int32_t vgId); -int32_t syncBuildAppendEntriesFromRaftLog(SSyncNode* pNode, SSyncRaftEntry* pEntry, SyncTerm prevLogTerm, - SRpcMsg* pRpcMsg); +int32_t syncBuildAppendEntriesFromRaftEntry(SSyncNode* pNode, SSyncRaftEntry* pEntry, SyncTerm prevLogTerm, + SRpcMsg* pRpcMsg); int32_t syncBuildHeartbeat(SRpcMsg* pMsg, int32_t vgId); int32_t syncBuildHeartbeatReply(SRpcMsg* pMsg, int32_t vgId); int32_t syncBuildPreSnapshot(SRpcMsg* pMsg, int32_t vgId); diff --git a/source/libs/sync/inc/syncPipeline.h b/source/libs/sync/inc/syncPipeline.h index a0a0691694776f2830d4f80ec2e96ec8bf782921..504a9f0bd77158a2e73ed1b3d1da317cbf1ddcd6 100644 --- a/source/libs/sync/inc/syncPipeline.h +++ b/source/libs/sync/inc/syncPipeline.h @@ -78,14 +78,14 @@ static FORCE_INLINE int32_t syncLogGetNextRetryBackoff(SSyncLogReplMgr* pMgr) { SyncTerm syncLogReplMgrGetPrevLogTerm(SSyncLogReplMgr* pMgr, SSyncNode* pNode, SyncIndex index); int32_t syncLogReplMgrReplicateOnce(SSyncLogReplMgr* pMgr, SSyncNode* pNode); -int32_t syncLogBufferReplicateOneTo(SSyncLogReplMgr* pMgr, SSyncNode* pNode, SyncIndex index, SyncTerm* pTerm, - SRaftId* pDestId, bool* pBarrier); -int32_t syncLogReplMgrReplicateAttemptedOnce(SSyncLogReplMgr* pMgr, SSyncNode* pNode); -int32_t syncLogReplMgrReplicateProbeOnce(SSyncLogReplMgr* pMgr, SSyncNode* pNode, SyncIndex index); +int32_t syncLogReplMgrReplicateOneTo(SSyncLogReplMgr* pMgr, SSyncNode* pNode, SyncIndex index, SyncTerm* pTerm, + SRaftId* pDestId, bool* pBarrier); +int32_t syncLogReplMgrReplicateAttempt(SSyncLogReplMgr* pMgr, SSyncNode* pNode); +int32_t syncLogReplMgrReplicateProbe(SSyncLogReplMgr* pMgr, SSyncNode* pNode, SyncIndex index); int32_t syncLogReplMgrProcessReply(SSyncLogReplMgr* pMgr, SSyncNode* pNode, SyncAppendEntriesReply* pMsg); -int32_t syncLogReplMgrProcessReplyInRecoveryMode(SSyncLogReplMgr* pMgr, SSyncNode* pNode, SyncAppendEntriesReply* pMsg); -int32_t syncLogReplMgrProcessReplyInNormalMode(SSyncLogReplMgr* pMgr, SSyncNode* pNode, SyncAppendEntriesReply* pMsg); +int32_t syncLogReplMgrProcessReplyAsRecovery(SSyncLogReplMgr* pMgr, SSyncNode* pNode, SyncAppendEntriesReply* pMsg); +int32_t syncLogReplMgrProcessReplyAsNormal(SSyncLogReplMgr* pMgr, SSyncNode* pNode, SyncAppendEntriesReply* pMsg); int32_t syncLogReplMgrProcessHeartbeatReply(SSyncLogReplMgr* pMgr, SSyncNode* pNode, SyncHeartbeatReply* pMsg); int32_t syncLogReplMgrRetryOnNeed(SSyncLogReplMgr* pMgr, SSyncNode* pNode); @@ -98,6 +98,7 @@ int32_t syncLogBufferReInit(SSyncLogBuffer* pBuf, SSyncNode* pNode); // access int64_t syncLogBufferGetEndIndex(SSyncLogBuffer* pBuf); +SyncTerm syncLogBufferGetLastMatchTerm(SSyncLogBuffer* pBuf); int32_t syncLogBufferAppend(SSyncLogBuffer* pBuf, SSyncNode* pNode, SSyncRaftEntry* pEntry); int32_t syncLogBufferAccept(SSyncLogBuffer* pBuf, SSyncNode* pNode, SSyncRaftEntry* pEntry, SyncTerm prevTerm); int64_t syncLogBufferProceed(SSyncLogBuffer* pBuf, SSyncNode* pNode, SyncTerm* pMatchTerm); diff --git a/source/libs/sync/inc/syncRaftStore.h b/source/libs/sync/inc/syncRaftStore.h index bb6405f6b2844476273c3ecf428e159125b0688c..21a8fc64a811a832bc3893223b4c1940a6638f54 100644 --- a/source/libs/sync/inc/syncRaftStore.h +++ b/source/libs/sync/inc/syncRaftStore.h @@ -24,27 +24,16 @@ extern "C" { #define RAFT_STORE_BLOCK_SIZE 512 #define RAFT_STORE_PATH_LEN (TSDB_FILENAME_LEN * 2) +#define EMPTY_RAFT_ID ((SRaftId){.addr = 0, .vgId = 0}) -#define EMPTY_RAFT_ID ((SRaftId){.addr = 0, .vgId = 0}) - -typedef struct SRaftStore { - SyncTerm currentTerm; - SRaftId voteFor; - TdFilePtr pFile; - char path[RAFT_STORE_PATH_LEN]; -} SRaftStore; - -SRaftStore *raftStoreOpen(const char *path); -int32_t raftStoreClose(SRaftStore *pRaftStore); -int32_t raftStorePersist(SRaftStore *pRaftStore); -int32_t raftStoreSerialize(SRaftStore *pRaftStore, char *buf, size_t len); -int32_t raftStoreDeserialize(SRaftStore *pRaftStore, char *buf, size_t len); - -bool raftStoreHasVoted(SRaftStore *pRaftStore); -void raftStoreVote(SRaftStore *pRaftStore, SRaftId *pRaftId); -void raftStoreClearVote(SRaftStore *pRaftStore); -void raftStoreNextTerm(SRaftStore *pRaftStore); -void raftStoreSetTerm(SRaftStore *pRaftStore, SyncTerm term); +int32_t raftStoreReadFile(SSyncNode *pNode); +int32_t raftStoreWriteFile(SSyncNode *pNode); + +bool raftStoreHasVoted(SSyncNode *pNode); +void raftStoreVote(SSyncNode *pNode, SRaftId *pRaftId); +void raftStoreClearVote(SSyncNode *pNode); +void raftStoreNextTerm(SSyncNode *pNode); +void raftStoreSetTerm(SSyncNode *pNode, SyncTerm term); #ifdef __cplusplus } diff --git a/source/libs/sync/inc/syncSnapshot.h b/source/libs/sync/inc/syncSnapshot.h index 974a8f968e99759c6a2124226129f853c2aff3b2..5277e7818fd8a152551eef51c2fdb5e858d724a5 100644 --- a/source/libs/sync/inc/syncSnapshot.h +++ b/source/libs/sync/inc/syncSnapshot.h @@ -57,7 +57,6 @@ void snapshotSenderDestroy(SSyncSnapshotSender *pSender); bool snapshotSenderIsStart(SSyncSnapshotSender *pSender); int32_t snapshotSenderStart(SSyncSnapshotSender *pSender); void snapshotSenderStop(SSyncSnapshotSender *pSender, bool finish); -int32_t snapshotSend(SSyncSnapshotSender *pSender); int32_t snapshotReSend(SSyncSnapshotSender *pSender); typedef struct SSyncSnapshotReceiver { @@ -82,7 +81,6 @@ void snapshotReceiverDestroy(SSyncSnapshotReceiver *pReceiver) void snapshotReceiverStart(SSyncSnapshotReceiver *pReceiver, SyncSnapshotSend *pBeginMsg); void snapshotReceiverStop(SSyncSnapshotReceiver *pReceiver); bool snapshotReceiverIsStart(SSyncSnapshotReceiver *pReceiver); -void snapshotReceiverForceStop(SSyncSnapshotReceiver *pReceiver); // on message int32_t syncNodeOnSnapshot(SSyncNode *ths, const SRpcMsg *pMsg); diff --git a/source/libs/sync/src/syncAppendEntries.c b/source/libs/sync/src/syncAppendEntries.c index 1dc6905b88cbf93dc31a4b9dc73f27567868876b..835e5d248e345cbbb3206e35d67ddd20717009db 100644 --- a/source/libs/sync/src/syncAppendEntries.c +++ b/source/libs/sync/src/syncAppendEntries.c @@ -89,45 +89,7 @@ // /\ UNCHANGED <> // -int32_t syncNodeFollowerCommit(SSyncNode* ths, SyncIndex newCommitIndex) { - if (ths->state != TAOS_SYNC_STATE_FOLLOWER) { - sNTrace(ths, "can not do follower commit"); - return -1; - } - - // maybe update commit index, leader notice me - if (newCommitIndex > ths->commitIndex) { - // has commit entry in local - if (newCommitIndex <= ths->pLogStore->syncLogLastIndex(ths->pLogStore)) { - // advance commit index to sanpshot first - SSnapshot snapshot; - ths->pFsm->FpGetSnapshotInfo(ths->pFsm, &snapshot); - if (snapshot.lastApplyIndex >= 0 && snapshot.lastApplyIndex > ths->commitIndex) { - SyncIndex commitBegin = ths->commitIndex; - SyncIndex commitEnd = snapshot.lastApplyIndex; - ths->commitIndex = snapshot.lastApplyIndex; - sNTrace(ths, "commit by snapshot from index:%" PRId64 " to index:%" PRId64, commitBegin, commitEnd); - } - - SyncIndex beginIndex = ths->commitIndex + 1; - SyncIndex endIndex = newCommitIndex; - - // update commit index - ths->commitIndex = newCommitIndex; - - // call back Wal - int32_t code = ths->pLogStore->syncLogUpdateCommitIndex(ths->pLogStore, ths->commitIndex); - ASSERT(code == 0); - - code = syncNodeDoCommit(ths, beginIndex, endIndex, ths->state); - ASSERT(code == 0); - } - } - - return 0; -} - -SSyncRaftEntry* syncLogAppendEntriesToRaftEntry(const SyncAppendEntries* pMsg) { +SSyncRaftEntry* syncBuildRaftEntryFromAppendEntries(const SyncAppendEntries* pMsg) { SSyncRaftEntry* pEntry = taosMemoryMalloc(pMsg->dataLen); if (pEntry == NULL) { terrno = TSDB_CODE_OUT_OF_MEMORY; @@ -158,17 +120,17 @@ int32_t syncNodeOnAppendEntries(SSyncNode* ths, const SRpcMsg* pRpcMsg) { // prepare response msg pReply->srcId = ths->myRaftId; pReply->destId = pMsg->srcId; - pReply->term = ths->pRaftStore->currentTerm; + pReply->term = ths->raftStore.currentTerm; pReply->success = false; pReply->matchIndex = SYNC_INDEX_INVALID; pReply->lastSendIndex = pMsg->prevLogIndex + 1; pReply->startTime = ths->startTime; - if (pMsg->term < ths->pRaftStore->currentTerm) { + if (pMsg->term < ths->raftStore.currentTerm) { goto _SEND_RESPONSE; } - if (pMsg->term > ths->pRaftStore->currentTerm) { + if (pMsg->term > ths->raftStore.currentTerm) { pReply->term = pMsg->term; } @@ -181,7 +143,7 @@ int32_t syncNodeOnAppendEntries(SSyncNode* ths, const SRpcMsg* pRpcMsg) { goto _IGNORE; } - SSyncRaftEntry* pEntry = syncLogAppendEntriesToRaftEntry(pMsg); + SSyncRaftEntry* pEntry = syncBuildRaftEntryFromAppendEntries(pMsg); if (pEntry == NULL) { sError("vgId:%d, failed to get raft entry from append entries since %s", ths->vgId, terrstr()); @@ -206,12 +168,13 @@ int32_t syncNodeOnAppendEntries(SSyncNode* ths, const SRpcMsg* pRpcMsg) { accepted = true; _SEND_RESPONSE: + pEntry = NULL; pReply->matchIndex = syncLogBufferProceed(ths->pLogBuf, ths, &pReply->lastMatchTerm); bool matched = (pReply->matchIndex >= pReply->lastSendIndex); if (accepted && matched) { pReply->success = true; // update commit index only after matching - (void)syncNodeUpdateCommitIndex(ths, pMsg->commitIndex); + (void)syncNodeUpdateCommitIndex(ths, TMIN(pMsg->commitIndex, pReply->lastSendIndex)); } // ack, i.e. send response @@ -230,256 +193,3 @@ _IGNORE: rpcFreeCont(rpcRsp.pCont); return 0; } - -int32_t syncNodeOnAppendEntriesOld(SSyncNode* ths, const SRpcMsg* pRpcMsg) { - SyncAppendEntries* pMsg = pRpcMsg->pCont; - SRpcMsg rpcRsp = {0}; - - // if already drop replica, do not process - if (!syncNodeInRaftGroup(ths, &(pMsg->srcId))) { - syncLogRecvAppendEntries(ths, pMsg, "not in my config"); - goto _IGNORE; - } - - // prepare response msg - int32_t code = syncBuildAppendEntriesReply(&rpcRsp, ths->vgId); - if (code != 0) { - syncLogRecvAppendEntries(ths, pMsg, "build rsp error"); - goto _IGNORE; - } - - SyncAppendEntriesReply* pReply = rpcRsp.pCont; - pReply->srcId = ths->myRaftId; - pReply->destId = pMsg->srcId; - pReply->term = ths->pRaftStore->currentTerm; - pReply->success = false; - // pReply->matchIndex = ths->pLogStore->syncLogLastIndex(ths->pLogStore); - pReply->matchIndex = SYNC_INDEX_INVALID; - pReply->lastSendIndex = pMsg->prevLogIndex + 1; - pReply->startTime = ths->startTime; - - if (pMsg->term < ths->pRaftStore->currentTerm) { - syncLogRecvAppendEntries(ths, pMsg, "reject, small term"); - goto _SEND_RESPONSE; - } - - if (pMsg->term > ths->pRaftStore->currentTerm) { - pReply->term = pMsg->term; - } - - syncNodeStepDown(ths, pMsg->term); - syncNodeResetElectTimer(ths); - - SyncIndex startIndex = ths->pLogStore->syncLogBeginIndex(ths->pLogStore); - SyncIndex lastIndex = ths->pLogStore->syncLogLastIndex(ths->pLogStore); - - if (pMsg->prevLogIndex > lastIndex) { - syncLogRecvAppendEntries(ths, pMsg, "reject, index not match"); - goto _SEND_RESPONSE; - } - - if (pMsg->prevLogIndex >= startIndex) { - SyncTerm myPreLogTerm = syncNodeGetPreTerm(ths, pMsg->prevLogIndex + 1); - // ASSERT(myPreLogTerm != SYNC_TERM_INVALID); - if (myPreLogTerm == SYNC_TERM_INVALID) { - syncLogRecvAppendEntries(ths, pMsg, "reject, pre-term invalid"); - goto _SEND_RESPONSE; - } - - if (myPreLogTerm != pMsg->prevLogTerm) { - syncLogRecvAppendEntries(ths, pMsg, "reject, pre-term not match"); - goto _SEND_RESPONSE; - } - } - - // accept - pReply->success = true; - bool hasAppendEntries = pMsg->dataLen > 0; - if (hasAppendEntries) { - SSyncRaftEntry* pAppendEntry = syncEntryBuildFromAppendEntries(pMsg); - ASSERT(pAppendEntry != NULL); - - SyncIndex appendIndex = pMsg->prevLogIndex + 1; - - LRUHandle* hLocal = NULL; - LRUHandle* hAppend = NULL; - - int32_t code = 0; - SSyncRaftEntry* pLocalEntry = NULL; - SLRUCache* pCache = ths->pLogStore->pCache; - hLocal = taosLRUCacheLookup(pCache, &appendIndex, sizeof(appendIndex)); - if (hLocal) { - pLocalEntry = (SSyncRaftEntry*)taosLRUCacheValue(pCache, hLocal); - code = 0; - - ths->pLogStore->cacheHit++; - sNTrace(ths, "hit cache index:%" PRId64 ", bytes:%u, %p", appendIndex, pLocalEntry->bytes, pLocalEntry); - - } else { - ths->pLogStore->cacheMiss++; - sNTrace(ths, "miss cache index:%" PRId64, appendIndex); - - code = ths->pLogStore->syncLogGetEntry(ths->pLogStore, appendIndex, &pLocalEntry); - } - - if (code == 0) { - // get local entry success - - if (pLocalEntry->term == pAppendEntry->term) { - // do nothing - sNTrace(ths, "log match, do nothing, index:%" PRId64, appendIndex); - - } else { - // truncate - code = ths->pLogStore->syncLogTruncate(ths->pLogStore, appendIndex); - if (code != 0) { - char logBuf[128]; - snprintf(logBuf, sizeof(logBuf), "ignore, truncate error, append-index:%" PRId64, appendIndex); - syncLogRecvAppendEntries(ths, pMsg, logBuf); - - if (hLocal) { - taosLRUCacheRelease(ths->pLogStore->pCache, hLocal, false); - } else { - syncEntryDestroy(pLocalEntry); - } - - if (hAppend) { - taosLRUCacheRelease(ths->pLogStore->pCache, hAppend, false); - } else { - syncEntryDestroy(pAppendEntry); - } - - goto _IGNORE; - } - - ASSERT(pAppendEntry->index == appendIndex); - - // append - code = ths->pLogStore->syncLogAppendEntry(ths->pLogStore, pAppendEntry); - if (code != 0) { - char logBuf[128]; - snprintf(logBuf, sizeof(logBuf), "ignore, append error, append-index:%" PRId64, appendIndex); - syncLogRecvAppendEntries(ths, pMsg, logBuf); - - if (hLocal) { - taosLRUCacheRelease(ths->pLogStore->pCache, hLocal, false); - } else { - syncEntryDestroy(pLocalEntry); - } - - if (hAppend) { - taosLRUCacheRelease(ths->pLogStore->pCache, hAppend, false); - } else { - syncEntryDestroy(pAppendEntry); - } - - goto _IGNORE; - } - - syncCacheEntry(ths->pLogStore, pAppendEntry, &hAppend); - } - - } else { - if (terrno == TSDB_CODE_WAL_LOG_NOT_EXIST) { - // log not exist - - // truncate - code = ths->pLogStore->syncLogTruncate(ths->pLogStore, appendIndex); - if (code != 0) { - char logBuf[128]; - snprintf(logBuf, sizeof(logBuf), "ignore, log not exist, truncate error, append-index:%" PRId64, appendIndex); - syncLogRecvAppendEntries(ths, pMsg, logBuf); - - syncEntryDestroy(pLocalEntry); - syncEntryDestroy(pAppendEntry); - goto _IGNORE; - } - - // append - code = ths->pLogStore->syncLogAppendEntry(ths->pLogStore, pAppendEntry); - if (code != 0) { - char logBuf[128]; - snprintf(logBuf, sizeof(logBuf), "ignore, log not exist, append error, append-index:%" PRId64, appendIndex); - syncLogRecvAppendEntries(ths, pMsg, logBuf); - - if (hLocal) { - taosLRUCacheRelease(ths->pLogStore->pCache, hLocal, false); - } else { - syncEntryDestroy(pLocalEntry); - } - - if (hAppend) { - taosLRUCacheRelease(ths->pLogStore->pCache, hAppend, false); - } else { - syncEntryDestroy(pAppendEntry); - } - - goto _IGNORE; - } - - syncCacheEntry(ths->pLogStore, pAppendEntry, &hAppend); - - } else { - // get local entry success - char logBuf[128]; - snprintf(logBuf, sizeof(logBuf), "ignore, get local entry error, append-index:%" PRId64 " err:%d", appendIndex, - terrno); - syncLogRecvAppendEntries(ths, pMsg, logBuf); - - if (hLocal) { - taosLRUCacheRelease(ths->pLogStore->pCache, hLocal, false); - } else { - syncEntryDestroy(pLocalEntry); - } - - if (hAppend) { - taosLRUCacheRelease(ths->pLogStore->pCache, hAppend, false); - } else { - syncEntryDestroy(pAppendEntry); - } - - goto _IGNORE; - } - } - - // update match index - pReply->matchIndex = pAppendEntry->index; - - if (hLocal) { - taosLRUCacheRelease(ths->pLogStore->pCache, hLocal, false); - } else { - syncEntryDestroy(pLocalEntry); - } - - if (hAppend) { - taosLRUCacheRelease(ths->pLogStore->pCache, hAppend, false); - } else { - syncEntryDestroy(pAppendEntry); - } - - } else { - // no append entries, do nothing - // maybe has extra entries, no harm - - // update match index - pReply->matchIndex = pMsg->prevLogIndex; - } - - // maybe update commit index, leader notice me - syncNodeFollowerCommit(ths, pMsg->commitIndex); - - syncLogRecvAppendEntries(ths, pMsg, "accept"); - goto _SEND_RESPONSE; - -_IGNORE: - rpcFreeCont(rpcRsp.pCont); - return 0; - -_SEND_RESPONSE: - // msg event log - syncLogSendAppendEntriesReply(ths, pReply, ""); - - // send response - syncNodeSendMsgById(&pReply->destId, ths, &rpcRsp); - return 0; -} diff --git a/source/libs/sync/src/syncAppendEntriesReply.c b/source/libs/sync/src/syncAppendEntriesReply.c index b83be2bebb41556d68ecd1596bb3065aacc20633..44a29da3ea0e54d4e9932183a67d298a9c6239ed 100644 --- a/source/libs/sync/src/syncAppendEntriesReply.c +++ b/source/libs/sync/src/syncAppendEntriesReply.c @@ -50,19 +50,19 @@ int32_t syncNodeOnAppendEntriesReply(SSyncNode* ths, const SRpcMsg* pRpcMsg) { } // drop stale response - if (pMsg->term < ths->pRaftStore->currentTerm) { + if (pMsg->term < ths->raftStore.currentTerm) { syncLogRecvAppendEntriesReply(ths, pMsg, "drop stale response"); return 0; } if (ths->state == TAOS_SYNC_STATE_LEADER) { - if (pMsg->term > ths->pRaftStore->currentTerm) { + if (pMsg->term > ths->raftStore.currentTerm) { syncLogRecvAppendEntriesReply(ths, pMsg, "error term"); syncNodeStepDown(ths, pMsg->term); return -1; } - ASSERT(pMsg->term == ths->pRaftStore->currentTerm); + ASSERT(pMsg->term == ths->raftStore.currentTerm); sTrace("vgId:%d, received append entries reply. srcId:0x%016" PRIx64 ", term:%" PRId64 ", matchIndex:%" PRId64 "", pMsg->vgId, pMsg->srcId.addr, pMsg->term, pMsg->matchIndex); @@ -89,63 +89,3 @@ int32_t syncNodeOnAppendEntriesReply(SSyncNode* ths, const SRpcMsg* pRpcMsg) { } return 0; } - -int32_t syncNodeOnAppendEntriesReplyOld(SSyncNode* ths, SyncAppendEntriesReply* pMsg) { - int32_t ret = 0; - - // if already drop replica, do not process - if (!syncNodeInRaftGroup(ths, &(pMsg->srcId))) { - syncLogRecvAppendEntriesReply(ths, pMsg, "not in my config"); - return 0; - } - - // drop stale response - if (pMsg->term < ths->pRaftStore->currentTerm) { - syncLogRecvAppendEntriesReply(ths, pMsg, "drop stale response"); - return 0; - } - - if (ths->state == TAOS_SYNC_STATE_LEADER) { - if (pMsg->term > ths->pRaftStore->currentTerm) { - syncLogRecvAppendEntriesReply(ths, pMsg, "error term"); - syncNodeStepDown(ths, pMsg->term); - return -1; - } - - ASSERT(pMsg->term == ths->pRaftStore->currentTerm); - - if (pMsg->success) { - SyncIndex oldMatchIndex = syncIndexMgrGetIndex(ths->pMatchIndex, &(pMsg->srcId)); - if (pMsg->matchIndex > oldMatchIndex) { - syncIndexMgrSetIndex(ths->pMatchIndex, &(pMsg->srcId), pMsg->matchIndex); - syncMaybeAdvanceCommitIndex(ths); - - // maybe update minMatchIndex - ths->minMatchIndex = syncMinMatchIndex(ths); - } - syncIndexMgrSetIndex(ths->pNextIndex, &(pMsg->srcId), pMsg->matchIndex + 1); - - } else { - SyncIndex nextIndex = syncIndexMgrGetIndex(ths->pNextIndex, &(pMsg->srcId)); - if (nextIndex > SYNC_INDEX_BEGIN) { - --nextIndex; - } - syncIndexMgrSetIndex(ths->pNextIndex, &(pMsg->srcId), nextIndex); - } - - // send next append entries - SPeerState* pState = syncNodeGetPeerState(ths, &(pMsg->srcId)); - ASSERT(pState != NULL); - - if (pMsg->lastSendIndex == pState->lastSendIndex) { - int64_t timeNow = taosGetTimestampMs(); - int64_t elapsed = timeNow - pState->lastSendTime; - sNTrace(ths, "sync-append-entries rtt elapsed:%" PRId64 ", index:%" PRId64, elapsed, pState->lastSendIndex); - - syncNodeReplicateOne(ths, &(pMsg->srcId), true); - } - } - - syncLogRecvAppendEntriesReply(ths, pMsg, "process"); - return 0; -} diff --git a/source/libs/sync/src/syncCommit.c b/source/libs/sync/src/syncCommit.c index 5fdcbeb91c119bfebb21805b12fc1faa15eb2cab..67ed1e0701eebefff06870af66611acdbd3bb681 100644 --- a/source/libs/sync/src/syncCommit.c +++ b/source/libs/sync/src/syncCommit.c @@ -43,147 +43,6 @@ // IN commitIndex' = [commitIndex EXCEPT ![i] = newCommitIndex] // /\ UNCHANGED <> // -void syncOneReplicaAdvance(SSyncNode* pSyncNode) { - if (pSyncNode == NULL) { - sError("pSyncNode is NULL"); - return; - } - - if (pSyncNode->state != TAOS_SYNC_STATE_LEADER) { - sNError(pSyncNode, "not leader, can not advance commit index"); - return; - } - - if (pSyncNode->replicaNum != 1) { - sNError(pSyncNode, "not one replica, can not advance commit index"); - return; - } - - // advance commit index to snapshot first - SSnapshot snapshot; - pSyncNode->pFsm->FpGetSnapshotInfo(pSyncNode->pFsm, &snapshot); - if (snapshot.lastApplyIndex > 0 && snapshot.lastApplyIndex > pSyncNode->commitIndex) { - SyncIndex commitBegin = pSyncNode->commitIndex; - SyncIndex commitEnd = snapshot.lastApplyIndex; - pSyncNode->commitIndex = snapshot.lastApplyIndex; - sNTrace(pSyncNode, "commit by snapshot from index:%" PRId64 " to index:%" PRId64, commitBegin, commitEnd); - } - - // advance commit index as large as possible - SyncIndex lastIndex = syncNodeGetLastIndex(pSyncNode); - if (lastIndex > pSyncNode->commitIndex) { - sNTrace(pSyncNode, "commit by wal from index:%" PRId64 " to index:%" PRId64, pSyncNode->commitIndex + 1, lastIndex); - pSyncNode->commitIndex = lastIndex; - } - - // call back Wal - SyncIndex walCommitVer = logStoreWalCommitVer(pSyncNode->pLogStore); - if (pSyncNode->commitIndex > walCommitVer) { - pSyncNode->pLogStore->syncLogUpdateCommitIndex(pSyncNode->pLogStore, pSyncNode->commitIndex); - } -} - -void syncMaybeAdvanceCommitIndex(SSyncNode* pSyncNode) { - ASSERTS(false, "deprecated"); - if (pSyncNode == NULL) { - sError("pSyncNode is NULL"); - return; - } - - if (pSyncNode->state != TAOS_SYNC_STATE_LEADER) { - sNError(pSyncNode, "not leader, can not advance commit index"); - return; - } - - // advance commit index to sanpshot first - SSnapshot snapshot; - pSyncNode->pFsm->FpGetSnapshotInfo(pSyncNode->pFsm, &snapshot); - if (snapshot.lastApplyIndex > 0 && snapshot.lastApplyIndex > pSyncNode->commitIndex) { - SyncIndex commitBegin = pSyncNode->commitIndex; - SyncIndex commitEnd = snapshot.lastApplyIndex; - pSyncNode->commitIndex = snapshot.lastApplyIndex; - sNTrace(pSyncNode, "commit by snapshot from index:%" PRId64 " to index:%" PRId64, commitBegin, commitEnd); - } - - // update commit index - SyncIndex newCommitIndex = pSyncNode->commitIndex; - for (SyncIndex index = syncNodeGetLastIndex(pSyncNode); index > pSyncNode->commitIndex; --index) { - bool agree = syncAgree(pSyncNode, index); - - if (agree) { - // term - SSyncRaftEntry* pEntry = NULL; - SLRUCache* pCache = pSyncNode->pLogStore->pCache; - LRUHandle* h = taosLRUCacheLookup(pCache, &index, sizeof(index)); - if (h) { - pEntry = (SSyncRaftEntry*)taosLRUCacheValue(pCache, h); - - pSyncNode->pLogStore->cacheHit++; - sNTrace(pSyncNode, "hit cache index:%" PRId64 ", bytes:%u, %p", index, pEntry->bytes, pEntry); - - } else { - pSyncNode->pLogStore->cacheMiss++; - sNTrace(pSyncNode, "miss cache index:%" PRId64, index); - - int32_t code = pSyncNode->pLogStore->syncLogGetEntry(pSyncNode->pLogStore, index, &pEntry); - if (code != 0) { - sNError(pSyncNode, "advance commit index error, read wal index:%" PRId64, index); - return; - } - } - // cannot commit, even if quorum agree. need check term! - if (pEntry->term <= pSyncNode->pRaftStore->currentTerm) { - // update commit index - newCommitIndex = index; - - if (h) { - taosLRUCacheRelease(pCache, h, false); - } else { - syncEntryDestroy(pEntry); - } - - break; - } else { - sNTrace(pSyncNode, "can not commit due to term not equal, index:%" PRId64 ", term:%" PRIu64, pEntry->index, - pEntry->term); - } - - if (h) { - taosLRUCacheRelease(pCache, h, false); - } else { - syncEntryDestroy(pEntry); - } - } - } - - // advance commit index as large as possible - SyncIndex walCommitVer = logStoreWalCommitVer(pSyncNode->pLogStore); - if (walCommitVer > newCommitIndex) { - newCommitIndex = walCommitVer; - } - - // maybe execute fsm - if (newCommitIndex > pSyncNode->commitIndex) { - SyncIndex beginIndex = pSyncNode->commitIndex + 1; - SyncIndex endIndex = newCommitIndex; - - // update commit index - pSyncNode->commitIndex = newCommitIndex; - - // call back Wal - pSyncNode->pLogStore->syncLogUpdateCommitIndex(pSyncNode->pLogStore, pSyncNode->commitIndex); - - // execute fsm - if (pSyncNode != NULL && pSyncNode->pFsm != NULL) { - int32_t code = syncNodeDoCommit(pSyncNode, beginIndex, endIndex, pSyncNode->state); - if (code != 0) { - sNError(pSyncNode, "advance commit index error, do commit begin:%" PRId64 ", end:%" PRId64, beginIndex, - endIndex); - return; - } - } - } -} bool syncAgreeIndex(SSyncNode* pSyncNode, SRaftId* pRaftId, SyncIndex index) { // I am leader, I agree @@ -209,83 +68,7 @@ static inline int64_t syncNodeAbs64(int64_t a, int64_t b) { return c; } -int32_t syncNodeDynamicQuorum(const SSyncNode* pSyncNode) { - return pSyncNode->quorum; - -#if 0 - int32_t quorum = 1; // self - - int64_t timeNow = taosGetTimestampMs(); - for (int i = 0; i < pSyncNode->peersNum; ++i) { - int64_t peerStartTime = syncIndexMgrGetStartTime(pSyncNode->pNextIndex, &(pSyncNode->peersId)[i]); - int64_t peerRecvTime = syncIndexMgrGetRecvTime(pSyncNode->pNextIndex, &(pSyncNode->peersId)[i]); - SyncIndex peerMatchIndex = syncIndexMgrGetIndex(pSyncNode->pMatchIndex, &(pSyncNode->peersId)[i]); - - int64_t recvTimeDiff = TABS(peerRecvTime - timeNow); - int64_t startTimeDiff = TABS(peerStartTime - pSyncNode->startTime); - int64_t logDiff = TABS(peerMatchIndex - syncNodeGetLastIndex(pSyncNode)); - - /* - int64_t recvTimeDiff = syncNodeAbs64(peerRecvTime, timeNow); - int64_t startTimeDiff = syncNodeAbs64(peerStartTime, pSyncNode->startTime); - int64_t logDiff = syncNodeAbs64(peerMatchIndex, syncNodeGetLastIndex(pSyncNode)); - */ - - int32_t addQuorum = 0; - - if (recvTimeDiff < SYNC_MAX_RECV_TIME_RANGE_MS) { - if (startTimeDiff < SYNC_MAX_START_TIME_RANGE_MS) { - addQuorum = 1; - } else { - if (logDiff < SYNC_ADD_QUORUM_COUNT) { - addQuorum = 1; - } else { - addQuorum = 0; - } - } - } else { - addQuorum = 0; - } - - /* - if (recvTimeDiff < SYNC_MAX_RECV_TIME_RANGE_MS) { - addQuorum = 1; - } else { - addQuorum = 0; - } - - if (startTimeDiff > SYNC_MAX_START_TIME_RANGE_MS) { - addQuorum = 0; - } - */ - - quorum += addQuorum; - } - - ASSERT(quorum <= pSyncNode->replicaNum); - - if (quorum < pSyncNode->quorum) { - quorum = pSyncNode->quorum; - } - - return quorum; -#endif -} - -/* -bool syncAgree(SSyncNode* pSyncNode, SyncIndex index) { - int agreeCount = 0; - for (int i = 0; i < pSyncNode->replicaNum; ++i) { - if (syncAgreeIndex(pSyncNode, &(pSyncNode->replicasId[i]), index)) { - ++agreeCount; - } - if (agreeCount >= syncNodeDynamicQuorum(pSyncNode)) { - return true; - } - } - return false; -} -*/ +int32_t syncNodeDynamicQuorum(const SSyncNode* pSyncNode) { return pSyncNode->quorum; } bool syncNodeAgreedUpon(SSyncNode* pNode, SyncIndex index) { int count = 0; @@ -328,7 +111,7 @@ int64_t syncNodeCheckCommitIndex(SSyncNode* ths, SyncIndex indexLikely) { SyncIndex commitIndex = indexLikely; syncNodeUpdateCommitIndex(ths, commitIndex); sTrace("vgId:%d, agreed upon. role:%d, term:%" PRId64 ", index: %" PRId64 "", ths->vgId, ths->state, - ths->pRaftStore->currentTerm, commitIndex); + ths->raftStore.currentTerm, commitIndex); } return ths->commitIndex; } diff --git a/source/libs/sync/src/syncElection.c b/source/libs/sync/src/syncElection.c index bcc95c5f103ec7bf0daef7664c80726338dfce83..682ace83ecfa99e4781f70915048cf62a5e2d76f 100644 --- a/source/libs/sync/src/syncElection.c +++ b/source/libs/sync/src/syncElection.c @@ -43,21 +43,29 @@ static int32_t syncNodeRequestVotePeers(SSyncNode* pNode) { for (int i = 0; i < pNode->peersNum; ++i) { SRpcMsg rpcMsg = {0}; ret = syncBuildRequestVote(&rpcMsg, pNode->vgId); - ASSERT(ret == 0); + if (ret < 0) { + sError("vgId:%d, failed to build request-vote msg since %s", pNode->vgId, terrstr()); + continue; + } SyncRequestVote* pMsg = rpcMsg.pCont; pMsg->srcId = pNode->myRaftId; pMsg->destId = pNode->peersId[i]; - pMsg->term = pNode->pRaftStore->currentTerm; + pMsg->term = pNode->raftStore.currentTerm; ret = syncNodeGetLastIndexTerm(pNode, &pMsg->lastLogIndex, &pMsg->lastLogTerm); - ASSERT(ret == 0); + if (ret < 0) { + sError("vgId:%d, failed to get index and term of last log since %s", pNode->vgId, terrstr()); + continue; + } ret = syncNodeSendMsgById(&pNode->peersId[i], pNode, &rpcMsg); - ASSERT(ret == 0); + if (ret < 0) { + sError("vgId:%d, failed to send msg to peerId:%" PRId64, pNode->vgId, pNode->peersId[i].addr); + continue; + } } - - return ret; + return 0; } int32_t syncNodeElect(SSyncNode* pSyncNode) { @@ -75,10 +83,10 @@ int32_t syncNodeElect(SSyncNode* pSyncNode) { } // start election - raftStoreNextTerm(pSyncNode->pRaftStore); - raftStoreClearVote(pSyncNode->pRaftStore); - voteGrantedReset(pSyncNode->pVotesGranted, pSyncNode->pRaftStore->currentTerm); - votesRespondReset(pSyncNode->pVotesRespond, pSyncNode->pRaftStore->currentTerm); + raftStoreNextTerm(pSyncNode); + raftStoreClearVote(pSyncNode); + voteGrantedReset(pSyncNode->pVotesGranted, pSyncNode->raftStore.currentTerm); + votesRespondReset(pSyncNode->pVotesRespond, pSyncNode->raftStore.currentTerm); syncNodeVoteForSelf(pSyncNode); if (voteGrantedMajority(pSyncNode->pVotesGranted)) { diff --git a/source/libs/sync/src/syncEnv.c b/source/libs/sync/src/syncEnv.c index 0d6d0f93f102800b1e9d13e0aa65061983db7332..1fa67cfa4d7a9b5831219cae699f474403ff243a 100644 --- a/source/libs/sync/src/syncEnv.c +++ b/source/libs/sync/src/syncEnv.c @@ -114,7 +114,7 @@ void syncHbTimerDataRemove(int64_t rid) { taosRemoveRef(gHbDataRefId, rid); } SSyncHbTimerData *syncHbTimerDataAcquire(int64_t rid) { SSyncHbTimerData *pData = taosAcquireRef(gHbDataRefId, rid); - if (pData == NULL) { + if (pData == NULL && rid > 0) { sInfo("failed to acquire hb-timer-data from refId:%" PRId64, rid); terrno = TSDB_CODE_SYN_INTERNAL_ERROR; } diff --git a/source/libs/sync/src/syncMain.c b/source/libs/sync/src/syncMain.c index bf220178a422970c69fd3998f8dd735ebccfdb17..77b87a885ba53166cbdfe9f2d792da9421803fc1 100644 --- a/source/libs/sync/src/syncMain.c +++ b/source/libs/sync/src/syncMain.c @@ -124,6 +124,14 @@ void syncPreStop(int64_t rid) { } } +void syncPostStop(int64_t rid) { + SSyncNode* pSyncNode = syncNodeAcquire(rid); + if (pSyncNode != NULL) { + syncNodePostClose(pSyncNode); + syncNodeRelease(pSyncNode); + } +} + static bool syncNodeCheckNewConfig(SSyncNode* pSyncNode, const SSyncCfg* pCfg) { if (!syncNodeInConfig(pSyncNode, pCfg)) return false; return abs(pCfg->replicaNum - pSyncNode->replicaNum) <= 1; @@ -284,8 +292,6 @@ int32_t syncBeginSnapshot(int64_t rid, int64_t lastApplyIndex) { goto _DEL_WAL; } else { - lastApplyIndex -= SYNC_VNODE_LOG_RETENTION; - SyncIndex beginIndex = pSyncNode->pLogStore->syncLogBeginIndex(pSyncNode->pLogStore); SyncIndex endIndex = pSyncNode->pLogStore->syncLogEndIndex(pSyncNode->pLogStore); bool isEmpty = pSyncNode->pLogStore->syncLogIsEmpty(pSyncNode->pLogStore); @@ -300,6 +306,8 @@ int32_t syncBeginSnapshot(int64_t rid, int64_t lastApplyIndex) { if (pSyncNode->replicaNum > 1) { // multi replicas + lastApplyIndex = TMAX(lastApplyIndex - SYNC_VNODE_LOG_RETENTION, beginIndex - 1); + if (pSyncNode->state == TAOS_SYNC_STATE_LEADER) { pSyncNode->minMatchIndex = syncMinMatchIndex(pSyncNode); @@ -460,7 +468,7 @@ bool syncNodeIsReadyForRead(SSyncNode* pSyncNode) { } if (code == 0 && pEntry != NULL) { - if (pEntry->originalRpcType == TDMT_SYNC_NOOP && pEntry->term == pSyncNode->pRaftStore->currentTerm) { + if (pEntry->originalRpcType == TDMT_SYNC_NOOP && pEntry->term == pSyncNode->raftStore.currentTerm) { ready = true; } @@ -578,78 +586,6 @@ SSyncState syncGetState(int64_t rid) { return state; } -#if 0 -int32_t syncGetSnapshotByIndex(int64_t rid, SyncIndex index, SSnapshot* pSnapshot) { - if (index < SYNC_INDEX_BEGIN) { - return -1; - } - - SSyncNode* pSyncNode = syncNodeAcquire(rid); - if (pSyncNode == NULL) { - return -1; - } - ASSERT(rid == pSyncNode->rid); - - SSyncRaftEntry* pEntry = NULL; - int32_t code = pSyncNode->pLogStore->syncLogGetEntry(pSyncNode->pLogStore, index, &pEntry); - if (code != 0) { - if (pEntry != NULL) { - syncEntryDestroy(pEntry); - } - syncNodeRelease(pSyncNode); - return -1; - } - ASSERT(pEntry != NULL); - - pSnapshot->data = NULL; - pSnapshot->lastApplyIndex = index; - pSnapshot->lastApplyTerm = pEntry->term; - pSnapshot->lastConfigIndex = syncNodeGetSnapshotConfigIndex(pSyncNode, index); - - syncEntryDestroy(pEntry); - syncNodeRelease(pSyncNode); - return 0; -} - -int32_t syncGetSnapshotMeta(int64_t rid, struct SSnapshotMeta* sMeta) { - SSyncNode* pSyncNode = syncNodeAcquire(rid); - if (pSyncNode == NULL) { - return -1; - } - ASSERT(rid == pSyncNode->rid); - sMeta->lastConfigIndex = pSyncNode->raftCfg.lastConfigIndex; - - sTrace("vgId:%d, get snapshot meta, lastConfigIndex:%" PRId64, pSyncNode->vgId, pSyncNode->raftCfg.lastConfigIndex); - - syncNodeRelease(pSyncNode); - return 0; -} - -int32_t syncGetSnapshotMetaByIndex(int64_t rid, SyncIndex snapshotIndex, struct SSnapshotMeta* sMeta) { - SSyncNode* pSyncNode = syncNodeAcquire(rid); - if (pSyncNode == NULL) { - return -1; - } - ASSERT(rid == pSyncNode->rid); - - ASSERT(pSyncNode->raftCfg.configIndexCount >= 1); - SyncIndex lastIndex = (pSyncNode->raftCfg.configIndexArr)[0]; - - for (int32_t i = 0; i < pSyncNode->raftCfg.configIndexCount; ++i) { - if ((pSyncNode->raftCfg.configIndexArr)[i] > lastIndex && - (pSyncNode->raftCfg.configIndexArr)[i] <= snapshotIndex) { - lastIndex = (pSyncNode->raftCfg.configIndexArr)[i]; - } - } - sMeta->lastConfigIndex = lastIndex; - sTrace("vgId:%d, get snapshot meta by index:%" PRId64 " lcindex:%" PRId64, pSyncNode->vgId, snapshotIndex, - sMeta->lastConfigIndex); - - syncNodeRelease(pSyncNode); - return 0; -} -#endif - SyncIndex syncNodeGetSnapshotConfigIndex(SSyncNode* pSyncNode, SyncIndex snapshotLastApplyIndex) { ASSERT(pSyncNode->raftCfg.configIndexCount >= 1); SyncIndex lastIndex = (pSyncNode->raftCfg.configIndexArr)[0]; @@ -728,7 +664,7 @@ int32_t syncNodePropose(SSyncNode* pSyncNode, SRpcMsg* pMsg, bool isWeak, int64_ int32_t code = syncNodeOnClientRequest(pSyncNode, pMsg, &retIndex); if (code == 0) { pMsg->info.conn.applyIndex = retIndex; - pMsg->info.conn.applyTerm = pSyncNode->pRaftStore->currentTerm; + pMsg->info.conn.applyTerm = pSyncNode->raftStore.currentTerm; sTrace("vgId:%d, propose optimized msg, index:%" PRId64 " type:%s", pSyncNode->vgId, retIndex, TMSG_INFO(pMsg->msgType)); return 1; @@ -887,14 +823,25 @@ SSyncNode* syncNodeOpen(SSyncInfo* pSyncInfo) { // init by SSyncInfo pSyncNode->vgId = pSyncInfo->vgId; SSyncCfg* pCfg = &pSyncNode->raftCfg.cfg; + bool updated = false; sInfo("vgId:%d, start to open sync node, replica:%d selfIndex:%d", pSyncNode->vgId, pCfg->replicaNum, pCfg->myIndex); for (int32_t i = 0; i < pCfg->replicaNum; ++i) { SNodeInfo* pNode = &pCfg->nodeInfo[i]; - (void)tmsgUpdateDnodeInfo(&pNode->nodeId, &pNode->clusterId, pNode->nodeFqdn, &pNode->nodePort); + if (tmsgUpdateDnodeInfo(&pNode->nodeId, &pNode->clusterId, pNode->nodeFqdn, &pNode->nodePort)) { + updated = true; + } sInfo("vgId:%d, index:%d ep:%s:%u dnode:%d cluster:%" PRId64, pSyncNode->vgId, i, pNode->nodeFqdn, pNode->nodePort, pNode->nodeId, pNode->clusterId); } + if (updated) { + sInfo("vgId:%d, save config info since dnode info changed", pSyncNode->vgId); + if (syncWriteCfgFile(pSyncNode) != 0) { + sError("vgId:%d, failed to write sync cfg file on dnode info updated", pSyncNode->vgId); + goto _error; + } + } + pSyncNode->pWal = pSyncInfo->pWal; pSyncNode->msgcb = pSyncInfo->msgcb; pSyncNode->syncSendMSg = pSyncInfo->syncSendMSg; @@ -975,8 +922,7 @@ SSyncNode* syncNodeOpen(SSyncInfo* pSyncInfo) { // init TLA+ server vars pSyncNode->state = TAOS_SYNC_STATE_FOLLOWER; - pSyncNode->pRaftStore = raftStoreOpen(pSyncNode->raftStorePath); - if (pSyncNode->pRaftStore == NULL) { + if (raftStoreReadFile(pSyncNode) != 0) { sError("vgId:%d, failed to open raft store at path %s", pSyncNode->vgId, pSyncNode->raftStorePath); goto _error; } @@ -1022,10 +968,14 @@ SSyncNode* syncNodeOpen(SSyncInfo* pSyncInfo) { } } pSyncNode->commitIndex = commitIndex; + sInfo("vgId:%d, sync node commitIndex initialized as %" PRId64, pSyncNode->vgId, pSyncNode->commitIndex); + // restore log store on need if (syncNodeLogStoreRestoreOnNeed(pSyncNode) < 0) { + sError("vgId:%d, failed to restore log store since %s.", pSyncNode->vgId, terrstr()); goto _error; } + // timer ms init pSyncNode->pingBaseLine = PING_TIMER_MS; pSyncNode->electBaseLine = tsElectInterval; @@ -1088,10 +1038,16 @@ SSyncNode* syncNodeOpen(SSyncInfo* pSyncInfo) { pSyncNode->changing = false; // replication mgr - syncNodeLogReplMgrInit(pSyncNode); + if (syncNodeLogReplMgrInit(pSyncNode) < 0) { + sError("vgId:%d, failed to init repl mgr since %s.", pSyncNode->vgId, terrstr()); + goto _error; + } // peer state - syncNodePeerStateInit(pSyncNode); + if (syncNodePeerStateInit(pSyncNode) < 0) { + sError("vgId:%d, failed to init peer stat since %s.", pSyncNode->vgId, terrstr()); + goto _error; + } // // min match index @@ -1162,9 +1118,10 @@ int32_t syncNodeRestore(SSyncNode* pSyncNode) { } ASSERT(endIndex == lastVer + 1); - commitIndex = TMAX(pSyncNode->commitIndex, commitIndex); + pSyncNode->commitIndex = TMAX(pSyncNode->commitIndex, commitIndex); + sInfo("vgId:%d, restore sync until commitIndex:%" PRId64, pSyncNode->vgId, pSyncNode->commitIndex); - if (syncLogBufferCommit(pSyncNode->pLogBuf, pSyncNode, commitIndex) < 0) { + if (syncLogBufferCommit(pSyncNode->pLogBuf, pSyncNode, pSyncNode->commitIndex) < 0) { return -1; } @@ -1174,7 +1131,7 @@ int32_t syncNodeRestore(SSyncNode* pSyncNode) { int32_t syncNodeStart(SSyncNode* pSyncNode) { // start raft if (pSyncNode->replicaNum == 1) { - raftStoreNextTerm(pSyncNode->pRaftStore); + raftStoreNextTerm(pSyncNode); syncNodeBecomeLeader(pSyncNode, "one replica start"); // Raft 3.6.2 Committing entries from previous terms @@ -1185,27 +1142,10 @@ int32_t syncNodeStart(SSyncNode* pSyncNode) { int32_t ret = 0; ret = syncNodeStartPingTimer(pSyncNode); - ASSERT(ret == 0); - return ret; -} - -void syncNodeStartOld(SSyncNode* pSyncNode) { - // start raft - if (pSyncNode->replicaNum == 1) { - raftStoreNextTerm(pSyncNode->pRaftStore); - syncNodeBecomeLeader(pSyncNode, "one replica start"); - - // Raft 3.6.2 Committing entries from previous terms - syncNodeAppendNoop(pSyncNode); - syncMaybeAdvanceCommitIndex(pSyncNode); - - } else { - syncNodeBecomeFollower(pSyncNode, "first start"); + if (ret != 0) { + sError("vgId:%d, failed to start ping timer since %s", pSyncNode->vgId, terrstr()); } - - int32_t ret = 0; - ret = syncNodeStartPingTimer(pSyncNode); - ASSERT(ret == 0); + return ret; } int32_t syncNodeStartStandBy(SSyncNode* pSyncNode) { @@ -1216,11 +1156,16 @@ int32_t syncNodeStartStandBy(SSyncNode* pSyncNode) { // reset elect timer, long enough int32_t electMS = TIMER_MAX_MS; int32_t ret = syncNodeRestartElectTimer(pSyncNode, electMS); - ASSERT(ret == 0); + if (ret < 0) { + sError("vgId:%d, failed to restart elect timer since %s", pSyncNode->vgId, terrstr()); + return -1; + } - ret = 0; ret = syncNodeStartPingTimer(pSyncNode); - ASSERT(ret == 0); + if (ret < 0) { + sError("vgId:%d, failed to start ping timer since %s", pSyncNode->vgId, terrstr()); + return -1; + } return ret; } @@ -1236,9 +1181,10 @@ void syncNodePreClose(SSyncNode* pSyncNode) { } } +#if 0 if (pSyncNode->pNewNodeReceiver != NULL) { if (snapshotReceiverIsStart(pSyncNode->pNewNodeReceiver)) { - snapshotReceiverForceStop(pSyncNode->pNewNodeReceiver); + snapshotReceiverStop(pSyncNode->pNewNodeReceiver); } sDebug("vgId:%d, snapshot receiver destroy while preclose sync node, data:%p", pSyncNode->vgId, @@ -1246,12 +1192,29 @@ void syncNodePreClose(SSyncNode* pSyncNode) { snapshotReceiverDestroy(pSyncNode->pNewNodeReceiver); pSyncNode->pNewNodeReceiver = NULL; } +#endif // stop elect timer syncNodeStopElectTimer(pSyncNode); // stop heartbeat timer syncNodeStopHeartbeatTimer(pSyncNode); + + // clean rsp + syncRespCleanRsp(pSyncNode->pSyncRespMgr); +} + +void syncNodePostClose(SSyncNode* pSyncNode) { + if (pSyncNode->pNewNodeReceiver != NULL) { + if (snapshotReceiverIsStart(pSyncNode->pNewNodeReceiver)) { + snapshotReceiverStop(pSyncNode->pNewNodeReceiver); + } + + sDebug("vgId:%d, snapshot receiver destroy while preclose sync node, data:%p", pSyncNode->vgId, + pSyncNode->pNewNodeReceiver); + snapshotReceiverDestroy(pSyncNode->pNewNodeReceiver); + pSyncNode->pNewNodeReceiver = NULL; + } } void syncHbTimerDataFree(SSyncHbTimerData* pData) { taosMemoryFree(pData); } @@ -1260,10 +1223,6 @@ void syncNodeClose(SSyncNode* pSyncNode) { if (pSyncNode == NULL) return; sNInfo(pSyncNode, "sync close, node:%p", pSyncNode); - int32_t ret = raftStoreClose(pSyncNode->pRaftStore); - ASSERT(ret == 0); - pSyncNode->pRaftStore = NULL; - syncNodeLogReplMgrDestroy(pSyncNode); syncRespMgrDestroy(pSyncNode->pSyncRespMgr); pSyncNode->pSyncRespMgr = NULL; @@ -1299,7 +1258,7 @@ void syncNodeClose(SSyncNode* pSyncNode) { if (pSyncNode->pNewNodeReceiver != NULL) { if (snapshotReceiverIsStart(pSyncNode->pNewNodeReceiver)) { - snapshotReceiverForceStop(pSyncNode->pNewNodeReceiver); + snapshotReceiverStop(pSyncNode->pNewNodeReceiver); } sDebug("vgId:%d, snapshot receiver destroy while close, data:%p", pSyncNode->vgId, pSyncNode->pNewNodeReceiver); @@ -1454,16 +1413,21 @@ int32_t syncNodeSendMsgById(const SRaftId* destRaftId, SSyncNode* pNode, SRpcMsg } } + int32_t code = -1; if (pNode->syncSendMSg != NULL && epSet != NULL) { syncUtilMsgHtoN(pMsg->pCont); pMsg->info.noResp = 1; - return pNode->syncSendMSg(epSet, pMsg); - } else { - sError("vgId:%d, sync send msg by id error, fp:%p epset:%p", pNode->vgId, pNode->syncSendMSg, epSet); + code = pNode->syncSendMSg(epSet, pMsg); + } + + if (code < 0) { + sError("vgId:%d, sync send msg by id error, epset:%p dnode:%d addr:%" PRId64 " err:0x%x", pNode->vgId, epSet, + DID(destRaftId), destRaftId->addr, terrno); rpcFreeCont(pMsg->pCont); terrno = TSDB_CODE_SYN_INTERNAL_ERROR; - return -1; } + + return code; } inline bool syncNodeInConfig(SSyncNode* pNode, const SSyncCfg* pCfg) { @@ -1675,45 +1639,44 @@ void syncNodeDoConfigChange(SSyncNode* pSyncNode, SSyncCfg* pNewConfig, SyncInde _END: // log end config change - sNInfo(pSyncNode, "end do config change, from %d to %d", pSyncNode->vgId, oldConfig.replicaNum, - pNewConfig->replicaNum); + sNInfo(pSyncNode, "end do config change, from %d to %d", oldConfig.replicaNum, pNewConfig->replicaNum); } // raft state change -------------- void syncNodeUpdateTerm(SSyncNode* pSyncNode, SyncTerm term) { - if (term > pSyncNode->pRaftStore->currentTerm) { - raftStoreSetTerm(pSyncNode->pRaftStore, term); + if (term > pSyncNode->raftStore.currentTerm) { + raftStoreSetTerm(pSyncNode, term); char tmpBuf[64]; snprintf(tmpBuf, sizeof(tmpBuf), "update term to %" PRId64, term); syncNodeBecomeFollower(pSyncNode, tmpBuf); - raftStoreClearVote(pSyncNode->pRaftStore); + raftStoreClearVote(pSyncNode); } } void syncNodeUpdateTermWithoutStepDown(SSyncNode* pSyncNode, SyncTerm term) { - if (term > pSyncNode->pRaftStore->currentTerm) { - raftStoreSetTerm(pSyncNode->pRaftStore, term); + if (term > pSyncNode->raftStore.currentTerm) { + raftStoreSetTerm(pSyncNode, term); } } void syncNodeStepDown(SSyncNode* pSyncNode, SyncTerm newTerm) { - if (pSyncNode->pRaftStore->currentTerm > newTerm) { + if (pSyncNode->raftStore.currentTerm > newTerm) { sNTrace(pSyncNode, "step down, ignore, new-term:%" PRId64 ", current-term:%" PRId64, newTerm, - pSyncNode->pRaftStore->currentTerm); + pSyncNode->raftStore.currentTerm); return; } do { sNTrace(pSyncNode, "step down, new-term:%" PRId64 ", current-term:%" PRId64, newTerm, - pSyncNode->pRaftStore->currentTerm); + pSyncNode->raftStore.currentTerm); } while (0); - if (pSyncNode->pRaftStore->currentTerm < newTerm) { - raftStoreSetTerm(pSyncNode->pRaftStore, newTerm); + if (pSyncNode->raftStore.currentTerm < newTerm) { + raftStoreSetTerm(pSyncNode, newTerm); char tmpBuf[64]; snprintf(tmpBuf, sizeof(tmpBuf), "step down, update term to %" PRId64, newTerm); syncNodeBecomeFollower(pSyncNode, tmpBuf); - raftStoreClearVote(pSyncNode->pRaftStore); + raftStoreClearVote(pSyncNode); } else { if (pSyncNode->state != TAOS_SYNC_STATE_FOLLOWER) { @@ -1791,12 +1754,6 @@ void syncNodeBecomeLeader(SSyncNode* pSyncNode, const char* debugStr) { pSyncNode->leaderCache = pSyncNode->myRaftId; for (int32_t i = 0; i < pSyncNode->pNextIndex->replicaNum; ++i) { - // maybe overwrite myself, no harm - // just do it! - - // pSyncNode->pNextIndex->index[i] = pSyncNode->pLogStore->getLastIndex(pSyncNode->pLogStore) + 1; - - // maybe wal is deleted SyncIndex lastIndex; SyncTerm lastTerm; int32_t code = syncNodeGetLastIndexTerm(pSyncNode, &lastIndex, &lastTerm); @@ -1829,7 +1786,7 @@ void syncNodeBecomeLeader(SSyncNode* pSyncNode, const char* debugStr) { // close receiver if (pSyncNode != NULL && pSyncNode->pNewNodeReceiver != NULL && snapshotReceiverIsStart(pSyncNode->pNewNodeReceiver)) { - snapshotReceiverForceStop(pSyncNode->pNewNodeReceiver); + snapshotReceiverStop(pSyncNode->pNewNodeReceiver); } // stop elect timer @@ -1858,7 +1815,11 @@ void syncNodeBecomeLeader(SSyncNode* pSyncNode, const char* debugStr) { void syncNodeCandidate2Leader(SSyncNode* pSyncNode) { ASSERT(pSyncNode->state == TAOS_SYNC_STATE_CANDIDATE); - ASSERT(voteGrantedMajority(pSyncNode->pVotesGranted)); + bool granted = voteGrantedMajority(pSyncNode->pVotesGranted); + if (!granted) { + sError("vgId:%d, not granted by majority.", pSyncNode->vgId); + return; + } syncNodeBecomeLeader(pSyncNode, "candidate to leader"); sNTrace(pSyncNode, "state change syncNodeCandidate2Leader"); @@ -1871,21 +1832,7 @@ void syncNodeCandidate2Leader(SSyncNode* pSyncNode) { SyncIndex lastIndex = pSyncNode->pLogStore->syncLogLastIndex(pSyncNode->pLogStore); ASSERT(lastIndex >= 0); sInfo("vgId:%d, become leader. term: %" PRId64 ", commit index: %" PRId64 ", last index: %" PRId64 "", - pSyncNode->vgId, pSyncNode->pRaftStore->currentTerm, pSyncNode->commitIndex, lastIndex); -} - -void syncNodeCandidate2LeaderOld(SSyncNode* pSyncNode) { - ASSERT(pSyncNode->state == TAOS_SYNC_STATE_CANDIDATE); - ASSERT(voteGrantedMajority(pSyncNode->pVotesGranted)); - syncNodeBecomeLeader(pSyncNode, "candidate to leader"); - - // Raft 3.6.2 Committing entries from previous terms - syncNodeAppendNoop(pSyncNode); - syncMaybeAdvanceCommitIndex(pSyncNode); - - if (pSyncNode->replicaNum > 1) { - syncNodeReplicate(pSyncNode); - } + pSyncNode->vgId, pSyncNode->raftStore.currentTerm, pSyncNode->commitIndex, lastIndex); } bool syncNodeIsMnode(SSyncNode* pSyncNode) { return (pSyncNode->vgId == 1); } @@ -1904,7 +1851,7 @@ void syncNodeFollower2Candidate(SSyncNode* pSyncNode) { pSyncNode->state = TAOS_SYNC_STATE_CANDIDATE; SyncIndex lastIndex = pSyncNode->pLogStore->syncLogLastIndex(pSyncNode->pLogStore); sInfo("vgId:%d, become candidate from follower. term: %" PRId64 ", commit index: %" PRId64 ", last index: %" PRId64, - pSyncNode->vgId, pSyncNode->pRaftStore->currentTerm, pSyncNode->commitIndex, lastIndex); + pSyncNode->vgId, pSyncNode->raftStore.currentTerm, pSyncNode->commitIndex, lastIndex); sNTrace(pSyncNode, "follower to candidate"); } @@ -1914,7 +1861,7 @@ void syncNodeLeader2Follower(SSyncNode* pSyncNode) { syncNodeBecomeFollower(pSyncNode, "leader to follower"); SyncIndex lastIndex = pSyncNode->pLogStore->syncLogLastIndex(pSyncNode->pLogStore); sInfo("vgId:%d, become follower from leader. term: %" PRId64 ", commit index: %" PRId64 ", last index: %" PRId64, - pSyncNode->vgId, pSyncNode->pRaftStore->currentTerm, pSyncNode->commitIndex, lastIndex); + pSyncNode->vgId, pSyncNode->raftStore.currentTerm, pSyncNode->commitIndex, lastIndex); sNTrace(pSyncNode, "leader to follower"); } @@ -1924,7 +1871,7 @@ void syncNodeCandidate2Follower(SSyncNode* pSyncNode) { syncNodeBecomeFollower(pSyncNode, "candidate to follower"); SyncIndex lastIndex = pSyncNode->pLogStore->syncLogLastIndex(pSyncNode->pLogStore); sInfo("vgId:%d, become follower from candidate. term: %" PRId64 ", commit index: %" PRId64 ", last index: %" PRId64, - pSyncNode->vgId, pSyncNode->pRaftStore->currentTerm, pSyncNode->commitIndex, lastIndex); + pSyncNode->vgId, pSyncNode->raftStore.currentTerm, pSyncNode->commitIndex, lastIndex); sNTrace(pSyncNode, "candidate to follower"); } @@ -1932,15 +1879,16 @@ void syncNodeCandidate2Follower(SSyncNode* pSyncNode) { // just called by syncNodeVoteForSelf // need assert void syncNodeVoteForTerm(SSyncNode* pSyncNode, SyncTerm term, SRaftId* pRaftId) { - ASSERT(term == pSyncNode->pRaftStore->currentTerm); - ASSERT(!raftStoreHasVoted(pSyncNode->pRaftStore)); + ASSERT(term == pSyncNode->raftStore.currentTerm); + bool voted = raftStoreHasVoted(pSyncNode); + ASSERT(!voted); - raftStoreVote(pSyncNode->pRaftStore, pRaftId); + raftStoreVote(pSyncNode, pRaftId); } // simulate get vote from outside void syncNodeVoteForSelf(SSyncNode* pSyncNode) { - syncNodeVoteForTerm(pSyncNode, pSyncNode->pRaftStore->currentTerm, &pSyncNode->myRaftId); + syncNodeVoteForTerm(pSyncNode, pSyncNode->raftStore.currentTerm, &pSyncNode->myRaftId); SRpcMsg rpcMsg = {0}; int32_t ret = syncBuildRequestVoteReply(&rpcMsg, pSyncNode->vgId); @@ -1949,7 +1897,7 @@ void syncNodeVoteForSelf(SSyncNode* pSyncNode) { SyncRequestVoteReply* pMsg = rpcMsg.pCont; pMsg->srcId = pSyncNode->myRaftId; pMsg->destId = pSyncNode->myRaftId; - pMsg->term = pSyncNode->pRaftStore->currentTerm; + pMsg->term = pSyncNode->raftStore.currentTerm; pMsg->voteGranted = true; voteGrantedVote(pSyncNode->pVotesGranted, pMsg); @@ -2239,13 +2187,6 @@ static void syncNodeEqPeerHeartbeatTimer(void* param, void* tmrId) { return; } - if (pSyncNode->pRaftStore == NULL) { - syncNodeRelease(pSyncNode); - syncHbTimerDataRelease(pData); - sError("vgId:%d, hb timer raft store already stop", pSyncNode->vgId); - return; - } - // sTrace("vgId:%d, eq peer hb timer", pSyncNode->vgId); if (pSyncNode->replicaNum > 1) { @@ -2269,7 +2210,7 @@ static void syncNodeEqPeerHeartbeatTimer(void* param, void* tmrId) { SyncHeartbeat* pSyncMsg = rpcMsg.pCont; pSyncMsg->srcId = pSyncNode->myRaftId; pSyncMsg->destId = pData->destId; - pSyncMsg->term = pSyncNode->pRaftStore->currentTerm; + pSyncMsg->term = pSyncNode->raftStore.currentTerm; pSyncMsg->commitIndex = pSyncNode->commitIndex; pSyncMsg->minMatchIndex = syncMinMatchIndex(pSyncNode); pSyncMsg->privateTerm = 0; @@ -2315,7 +2256,7 @@ static int32_t syncNodeEqNoop(SSyncNode* pNode) { } SyncIndex index = pNode->pLogStore->syncLogWriteIndex(pNode->pLogStore); - SyncTerm term = pNode->pRaftStore->currentTerm; + SyncTerm term = pNode->raftStore.currentTerm; SSyncRaftEntry* pEntry = syncEntryBuildNoop(term, index, pNode->vgId); if (pEntry == NULL) return -1; @@ -2361,8 +2302,7 @@ int32_t syncNodeAppend(SSyncNode* ths, SSyncRaftEntry* pEntry) { if (syncLogBufferAppend(ths->pLogBuf, ths, pEntry) < 0) { sError("vgId:%d, failed to enqueue sync log buffer, index:%" PRId64, ths->vgId, pEntry->index); terrno = TSDB_CODE_SYN_BUFFER_FULL; - (void)syncLogFsmExecute(ths, ths->pFsm, ths->state, ths->pRaftStore->currentTerm, pEntry, - TSDB_CODE_SYN_BUFFER_FULL); + (void)syncLogFsmExecute(ths, ths->pFsm, ths->state, ths->raftStore.currentTerm, pEntry, TSDB_CODE_SYN_BUFFER_FULL); syncEntryDestroy(pEntry); return -1; } @@ -2435,7 +2375,7 @@ bool syncNodeSnapshotRecving(SSyncNode* pSyncNode) { static int32_t syncNodeAppendNoop(SSyncNode* ths) { SyncIndex index = syncLogBufferGetEndIndex(ths->pLogBuf); - SyncTerm term = ths->pRaftStore->currentTerm; + SyncTerm term = ths->raftStore.currentTerm; SSyncRaftEntry* pEntry = syncEntryBuildNoop(term, index, ths->vgId); if (pEntry == NULL) { @@ -2451,14 +2391,14 @@ static int32_t syncNodeAppendNoopOld(SSyncNode* ths) { int32_t ret = 0; SyncIndex index = ths->pLogStore->syncLogWriteIndex(ths->pLogStore); - SyncTerm term = ths->pRaftStore->currentTerm; + SyncTerm term = ths->raftStore.currentTerm; SSyncRaftEntry* pEntry = syncEntryBuildNoop(term, index, ths->vgId); ASSERT(pEntry != NULL); LRUHandle* h = NULL; if (ths->state == TAOS_SYNC_STATE_LEADER) { - int32_t code = ths->pLogStore->syncLogAppendEntry(ths->pLogStore, pEntry); + int32_t code = ths->pLogStore->syncLogAppendEntry(ths->pLogStore, pEntry, false); if (code != 0) { sError("append noop error"); return -1; @@ -2493,12 +2433,12 @@ int32_t syncNodeOnHeartbeat(SSyncNode* ths, const SRpcMsg* pRpcMsg) { SyncHeartbeatReply* pMsgReply = rpcMsg.pCont; pMsgReply->destId = pMsg->srcId; pMsgReply->srcId = ths->myRaftId; - pMsgReply->term = ths->pRaftStore->currentTerm; + pMsgReply->term = ths->raftStore.currentTerm; pMsgReply->privateTerm = 8864; // magic number pMsgReply->startTime = ths->startTime; pMsgReply->timeStamp = tsMs; - if (pMsg->term == ths->pRaftStore->currentTerm && ths->state != TAOS_SYNC_STATE_LEADER) { + if (pMsg->term == ths->raftStore.currentTerm && ths->state != TAOS_SYNC_STATE_LEADER) { syncIndexMgrSetRecvTime(ths->pNextIndex, &(pMsg->srcId), tsMs); syncNodeResetElectTimer(ths); @@ -2511,8 +2451,9 @@ int32_t syncNodeOnHeartbeat(SSyncNode* ths, const SRpcMsg* pRpcMsg) { SyncLocalCmd* pSyncMsg = rpcMsgLocalCmd.pCont; pSyncMsg->cmd = SYNC_LOCAL_CMD_FOLLOWER_CMT; - pSyncMsg->fcIndex = pMsg->commitIndex; - SyncIndex fcIndex = pSyncMsg->fcIndex; + pSyncMsg->commitIndex = pMsg->commitIndex; + pSyncMsg->currentTerm = pMsg->term; + SyncIndex fcIndex = pSyncMsg->commitIndex; if (ths->syncEqMsg != NULL && ths->msgcb != NULL) { int32_t code = ths->syncEqMsg(ths->msgcb, &rpcMsgLocalCmd); @@ -2526,14 +2467,15 @@ int32_t syncNodeOnHeartbeat(SSyncNode* ths, const SRpcMsg* pRpcMsg) { } } - if (pMsg->term >= ths->pRaftStore->currentTerm && ths->state != TAOS_SYNC_STATE_FOLLOWER) { + if (pMsg->term >= ths->raftStore.currentTerm && ths->state != TAOS_SYNC_STATE_FOLLOWER) { // syncNodeStepDown(ths, pMsg->term); SRpcMsg rpcMsgLocalCmd = {0}; (void)syncBuildLocalCmd(&rpcMsgLocalCmd, ths->vgId); SyncLocalCmd* pSyncMsg = rpcMsgLocalCmd.pCont; pSyncMsg->cmd = SYNC_LOCAL_CMD_STEP_DOWN; - pSyncMsg->sdNewTerm = pMsg->term; + pSyncMsg->currentTerm = pMsg->term; + pSyncMsg->commitIndex = pMsg->commitIndex; if (ths->syncEqMsg != NULL && ths->msgcb != NULL) { int32_t code = ths->syncEqMsg(ths->msgcb, &rpcMsgLocalCmd); @@ -2541,7 +2483,7 @@ int32_t syncNodeOnHeartbeat(SSyncNode* ths, const SRpcMsg* pRpcMsg) { sError("vgId:%d, sync enqueue step-down msg error, code:%d", ths->vgId, code); rpcFreeCont(rpcMsgLocalCmd.pCont); } else { - sTrace("vgId:%d, sync enqueue step-down msg, new-term: %" PRId64, ths->vgId, pSyncMsg->sdNewTerm); + sTrace("vgId:%d, sync enqueue step-down msg, new-term: %" PRId64, ths->vgId, pSyncMsg->currentTerm); } } } @@ -2599,10 +2541,13 @@ int32_t syncNodeOnLocalCmd(SSyncNode* ths, const SRpcMsg* pRpcMsg) { syncLogRecvLocalCmd(ths, pMsg, ""); if (pMsg->cmd == SYNC_LOCAL_CMD_STEP_DOWN) { - syncNodeStepDown(ths, pMsg->sdNewTerm); + syncNodeStepDown(ths, pMsg->currentTerm); } else if (pMsg->cmd == SYNC_LOCAL_CMD_FOLLOWER_CMT) { - (void)syncNodeUpdateCommitIndex(ths, pMsg->fcIndex); + SyncTerm matchTerm = syncLogBufferGetLastMatchTerm(ths->pLogBuf); + if (pMsg->currentTerm == matchTerm) { + (void)syncNodeUpdateCommitIndex(ths, pMsg->commitIndex); + } if (syncLogBufferCommit(ths->pLogBuf, ths, ths->commitIndex) < 0) { sError("vgId:%d, failed to commit raft log since %s. commit index: %" PRId64 "", ths->vgId, terrstr(), ths->commitIndex); @@ -2614,23 +2559,6 @@ int32_t syncNodeOnLocalCmd(SSyncNode* ths, const SRpcMsg* pRpcMsg) { return 0; } -int32_t syncNodeOnLocalCmdOld(SSyncNode* ths, const SRpcMsg* pRpcMsg) { - SyncLocalCmd* pMsg = pRpcMsg->pCont; - syncLogRecvLocalCmd(ths, pMsg, ""); - - if (pMsg->cmd == SYNC_LOCAL_CMD_STEP_DOWN) { - syncNodeStepDown(ths, pMsg->sdNewTerm); - - } else if (pMsg->cmd == SYNC_LOCAL_CMD_FOLLOWER_CMT) { - syncNodeFollowerCommit(ths, pMsg->fcIndex); - - } else { - sError("error local cmd"); - } - - return 0; -} - // TLA+ Spec // ClientRequest(i, v) == // /\ state[i] = Leader @@ -2648,7 +2576,7 @@ int32_t syncNodeOnClientRequest(SSyncNode* ths, SRpcMsg* pMsg, SyncIndex* pRetIn int32_t code = 0; SyncIndex index = syncLogBufferGetEndIndex(ths->pLogBuf); - SyncTerm term = ths->pRaftStore->currentTerm; + SyncTerm term = ths->raftStore.currentTerm; SSyncRaftEntry* pEntry = NULL; if (pMsg->msgType == TDMT_SYNC_CLIENT_REQUEST) { pEntry = syncEntryBuildFromClientRequest(pMsg->pCont, term, index); @@ -2667,106 +2595,12 @@ int32_t syncNodeOnClientRequest(SSyncNode* ths, SRpcMsg* pMsg, SyncIndex* pRetIn } int32_t code = syncNodeAppend(ths, pEntry); - if (code < 0) { - sNError(ths, "failed to append blocking msg"); - } return code; } else { syncEntryDestroy(pEntry); pEntry = NULL; + return -1; } - - return -1; -} - -int32_t syncNodeOnClientRequestOld(SSyncNode* ths, SRpcMsg* pMsg, SyncIndex* pRetIndex) { - sNTrace(ths, "on client request"); - - int32_t ret = 0; - int32_t code = 0; - - SyncIndex index = ths->pLogStore->syncLogWriteIndex(ths->pLogStore); - SyncTerm term = ths->pRaftStore->currentTerm; - SSyncRaftEntry* pEntry; - - if (pMsg->msgType == TDMT_SYNC_CLIENT_REQUEST) { - pEntry = syncEntryBuildFromClientRequest(pMsg->pCont, term, index); - } else { - pEntry = syncEntryBuildFromRpcMsg(pMsg, term, index); - } - - LRUHandle* h = NULL; - - if (ths->state == TAOS_SYNC_STATE_LEADER) { - // append entry - code = ths->pLogStore->syncLogAppendEntry(ths->pLogStore, pEntry); - if (code != 0) { - if (ths->replicaNum == 1) { - if (h) { - taosLRUCacheRelease(ths->pLogStore->pCache, h, false); - } else { - syncEntryDestroy(pEntry); - } - - return -1; - - } else { - // del resp mgr, call FpCommitCb - SFsmCbMeta cbMeta = { - .index = pEntry->index, - .lastConfigIndex = SYNC_INDEX_INVALID, - .isWeak = pEntry->isWeak, - .code = -1, - .state = ths->state, - .seqNum = pEntry->seqNum, - .term = pEntry->term, - .currentTerm = ths->pRaftStore->currentTerm, - .flag = 0, - }; - ths->pFsm->FpCommitCb(ths->pFsm, pMsg, &cbMeta); - - if (h) { - taosLRUCacheRelease(ths->pLogStore->pCache, h, false); - } else { - syncEntryDestroy(pEntry); - } - - return -1; - } - } - - syncCacheEntry(ths->pLogStore, pEntry, &h); - - // if mulit replica, start replicate right now - if (ths->replicaNum > 1) { - syncNodeReplicate(ths); - } - - // if only myself, maybe commit right now - if (ths->replicaNum == 1) { - if (syncNodeIsMnode(ths)) { - syncMaybeAdvanceCommitIndex(ths); - } else { - syncOneReplicaAdvance(ths); - } - } - } - - if (pRetIndex != NULL) { - if (ret == 0 && pEntry != NULL) { - *pRetIndex = pEntry->index; - } else { - *pRetIndex = SYNC_INDEX_INVALID; - } - } - - if (h) { - taosLRUCacheRelease(ths->pLogStore->pCache, h, false); - } else { - syncEntryDestroy(pEntry); - } - - return ret; } const char* syncStr(ESyncState state) { @@ -2798,7 +2632,7 @@ int32_t syncDoLeaderTransfer(SSyncNode* ths, SRpcMsg* pRpcMsg, SSyncRaftEntry* p return 0; } - if (pEntry->term < ths->pRaftStore->currentTerm) { + if (pEntry->term < ths->raftStore.currentTerm) { sNTrace(ths, "little term:%" PRId64 ", can not do leader transfer", pEntry->term); return 0; } @@ -2836,7 +2670,7 @@ int32_t syncDoLeaderTransfer(SSyncNode* ths, SRpcMsg* pRpcMsg, SSyncRaftEntry* p if (ths->pFsm->FpLeaderTransferCb != NULL) { SFsmCbMeta cbMeta = { .code = 0, - .currentTerm = ths->pRaftStore->currentTerm, + .currentTerm = ths->raftStore.currentTerm, .flag = 0, .index = pEntry->index, .lastConfigIndex = syncNodeGetSnapshotConfigIndex(ths, pEntry->index), @@ -2873,129 +2707,6 @@ bool syncNodeIsOptimizedOneReplica(SSyncNode* ths, SRpcMsg* pMsg) { return (ths->replicaNum == 1 && syncUtilUserCommit(pMsg->msgType) && ths->vgId != 1); } -int32_t syncNodeDoCommit(SSyncNode* ths, SyncIndex beginIndex, SyncIndex endIndex, uint64_t flag) { - ASSERT(false); - if (beginIndex > endIndex) { - return 0; - } - - if (ths == NULL) { - return -1; - } - - if (ths->pFsm != NULL && ths->pFsm->FpGetSnapshotInfo != NULL) { - // advance commit index to sanpshot first - SSnapshot snapshot = {0}; - ths->pFsm->FpGetSnapshotInfo(ths->pFsm, &snapshot); - if (snapshot.lastApplyIndex >= 0 && snapshot.lastApplyIndex >= beginIndex) { - sNTrace(ths, "commit by snapshot from index:%" PRId64 " to index:%" PRId64, beginIndex, snapshot.lastApplyIndex); - - // update begin index - beginIndex = snapshot.lastApplyIndex + 1; - } - } - - int32_t code = 0; - ESyncState state = flag; - - sNTrace(ths, "commit by wal from index:%" PRId64 " to index:%" PRId64, beginIndex, endIndex); - - // execute fsm - if (ths->pFsm != NULL) { - for (SyncIndex i = beginIndex; i <= endIndex; ++i) { - if (i != SYNC_INDEX_INVALID) { - SSyncRaftEntry* pEntry; - SLRUCache* pCache = ths->pLogStore->pCache; - LRUHandle* h = taosLRUCacheLookup(pCache, &i, sizeof(i)); - if (h) { - pEntry = (SSyncRaftEntry*)taosLRUCacheValue(pCache, h); - - ths->pLogStore->cacheHit++; - sNTrace(ths, "hit cache index:%" PRId64 ", bytes:%u, %p", i, pEntry->bytes, pEntry); - - } else { - ths->pLogStore->cacheMiss++; - sNTrace(ths, "miss cache index:%" PRId64, i); - - code = ths->pLogStore->syncLogGetEntry(ths->pLogStore, i, &pEntry); - // ASSERT(code == 0); - // ASSERT(pEntry != NULL); - if (code != 0 || pEntry == NULL) { - sNError(ths, "get log entry error"); - sFatal("vgId:%d, get log entry %" PRId64 " error when commit since %s", ths->vgId, i, terrstr()); - continue; - } - } - - SRpcMsg rpcMsg = {0}; - syncEntry2OriginalRpc(pEntry, &rpcMsg); - - sTrace("do commit index:%" PRId64 ", type:%s", i, TMSG_INFO(pEntry->msgType)); - - // user commit - if ((ths->pFsm->FpCommitCb != NULL) && syncUtilUserCommit(pEntry->originalRpcType)) { - bool internalExecute = true; - if ((ths->replicaNum == 1) && ths->restoreFinish && ths->vgId != 1) { - internalExecute = false; - } - - sNTrace(ths, "user commit index:%" PRId64 ", internal:%d, type:%s", i, internalExecute, - TMSG_INFO(pEntry->msgType)); - - // execute fsm in apply thread, or execute outside syncPropose - if (internalExecute) { - SFsmCbMeta cbMeta = { - .index = pEntry->index, - .lastConfigIndex = syncNodeGetSnapshotConfigIndex(ths, pEntry->index), - .isWeak = pEntry->isWeak, - .code = 0, - .state = ths->state, - .seqNum = pEntry->seqNum, - .term = pEntry->term, - .currentTerm = ths->pRaftStore->currentTerm, - .flag = flag, - }; - - syncRespMgrGetAndDel(ths->pSyncRespMgr, cbMeta.seqNum, &rpcMsg.info); - ths->pFsm->FpCommitCb(ths->pFsm, &rpcMsg, &cbMeta); - } - } - -#if 0 - // execute in pre-commit - // leader transfer - if (pEntry->originalRpcType == TDMT_SYNC_LEADER_TRANSFER) { - code = syncDoLeaderTransfer(ths, &rpcMsg, pEntry); - ASSERT(code == 0); - } -#endif - - // restore finish - // if only snapshot, a noop entry will be append, so syncLogLastIndex is always ok - if (pEntry->index == ths->pLogStore->syncLogLastIndex(ths->pLogStore)) { - if (ths->restoreFinish == false) { - if (ths->pFsm->FpRestoreFinishCb != NULL) { - ths->pFsm->FpRestoreFinishCb(ths->pFsm); - } - ths->restoreFinish = true; - - int64_t restoreDelay = taosGetTimestampMs() - ths->leaderTime; - sNTrace(ths, "restore finish, index:%" PRId64 ", elapsed:%" PRId64 " ms", pEntry->index, restoreDelay); - } - } - - rpcFreeCont(rpcMsg.pCont); - if (h) { - taosLRUCacheRelease(pCache, h, false); - } else { - syncEntryDestroy(pEntry); - } - } - } - } - return 0; -} - bool syncNodeInRaftGroup(SSyncNode* ths, SRaftId* pRaftId) { for (int32_t i = 0; i < ths->replicaNum; ++i) { if (syncUtilSameId(&((ths->replicasId)[i]), pRaftId)) { diff --git a/source/libs/sync/src/syncMessage.c b/source/libs/sync/src/syncMessage.c index 467b4e2219690ea447bec7a8fb9876f02a9858d4..7d534c671e6c80453f8a3b47fffebcf8028e062e 100644 --- a/source/libs/sync/src/syncMessage.c +++ b/source/libs/sync/src/syncMessage.c @@ -154,8 +154,8 @@ int32_t syncBuildAppendEntriesReply(SRpcMsg* pMsg, int32_t vgId) { return 0; } -int32_t syncBuildAppendEntriesFromRaftLog(SSyncNode* pNode, SSyncRaftEntry* pEntry, SyncTerm prevLogTerm, - SRpcMsg* pRpcMsg) { +int32_t syncBuildAppendEntriesFromRaftEntry(SSyncNode* pNode, SSyncRaftEntry* pEntry, SyncTerm prevLogTerm, + SRpcMsg* pRpcMsg) { uint32_t dataLen = pEntry->bytes; uint32_t bytes = sizeof(SyncAppendEntries) + dataLen; pRpcMsg->contLen = bytes; @@ -176,7 +176,7 @@ int32_t syncBuildAppendEntriesFromRaftLog(SSyncNode* pNode, SSyncRaftEntry* pEnt pMsg->prevLogTerm = prevLogTerm; pMsg->vgId = pNode->vgId; pMsg->srcId = pNode->myRaftId; - pMsg->term = pNode->pRaftStore->currentTerm; + pMsg->term = pNode->raftStore.currentTerm; pMsg->commitIndex = pNode->commitIndex; pMsg->privateTerm = 0; return 0; diff --git a/source/libs/sync/src/syncPipeline.c b/source/libs/sync/src/syncPipeline.c index 410986b87a4031d19e8b4e7d96da65ceac7a4839..6cc517fda00c67d6d89dcb2b49d40049a05ea18e 100644 --- a/source/libs/sync/src/syncPipeline.c +++ b/source/libs/sync/src/syncPipeline.c @@ -31,6 +31,10 @@ static bool syncIsMsgBlock(tmsg_t type) { (type == TDMT_VND_UPDATE_TAG_VAL) || (type == TDMT_VND_ALTER_CONFIRM); } +FORCE_INLINE static int64_t syncGetRetryMaxWaitMs() { + return SYNC_LOG_REPL_RETRY_WAIT_MS * (1 << SYNC_MAX_RETRY_BACKOFF); +} + int64_t syncLogBufferGetEndIndex(SSyncLogBuffer* pBuf) { taosThreadMutexLock(&pBuf->mutex); int64_t index = pBuf->endIndex; @@ -264,20 +268,27 @@ int32_t syncLogBufferReInit(SSyncLogBuffer* pBuf, SSyncNode* pNode) { return ret; } -FORCE_INLINE SyncTerm syncLogBufferGetLastMatchTerm(SSyncLogBuffer* pBuf) { +FORCE_INLINE SyncTerm syncLogBufferGetLastMatchTermWithoutLock(SSyncLogBuffer* pBuf) { SyncIndex index = pBuf->matchIndex; SSyncRaftEntry* pEntry = pBuf->entries[(index + pBuf->size) % pBuf->size].pItem; ASSERT(pEntry != NULL); return pEntry->term; } +SyncTerm syncLogBufferGetLastMatchTerm(SSyncLogBuffer* pBuf) { + taosThreadMutexLock(&pBuf->mutex); + SyncTerm term = syncLogBufferGetLastMatchTermWithoutLock(pBuf); + taosThreadMutexUnlock(&pBuf->mutex); + return term; +} + int32_t syncLogBufferAccept(SSyncLogBuffer* pBuf, SSyncNode* pNode, SSyncRaftEntry* pEntry, SyncTerm prevTerm) { taosThreadMutexLock(&pBuf->mutex); syncLogBufferValidate(pBuf); - int32_t ret = -1; - SyncIndex index = pEntry->index; - SyncIndex prevIndex = pEntry->index - 1; - SyncTerm lastMatchTerm = syncLogBufferGetLastMatchTerm(pBuf); + int32_t ret = -1; + SyncIndex index = pEntry->index; + SyncIndex prevIndex = pEntry->index - 1; + SyncTerm lastMatchTerm = syncLogBufferGetLastMatchTermWithoutLock(pBuf); SSyncRaftEntry* pExist = NULL; bool inBuf = true; @@ -329,6 +340,8 @@ int32_t syncLogBufferAccept(SSyncLogBuffer* pBuf, SSyncNode* pNode, SSyncRaftEnt } // update + ASSERT(pBuf->startIndex < index); + ASSERT(index - pBuf->startIndex < pBuf->size); ASSERT(pBuf->entries[index % pBuf->size].pItem == NULL); SSyncLogBufEntry tmp = {.pItem = pEntry, .prevLogIndex = prevIndex, .prevLogTerm = prevTerm}; pEntry = NULL; @@ -351,7 +364,11 @@ _out: return ret; } -int32_t syncLogStorePersist(SSyncLogStore* pLogStore, SSyncRaftEntry* pEntry) { +static inline bool syncLogStoreNeedFlush(SSyncRaftEntry* pEntry, int32_t replicaNum) { + return (replicaNum > 1) && (pEntry->originalRpcType == TDMT_VND_COMMIT); +} + +int32_t syncLogStorePersist(SSyncLogStore* pLogStore, SSyncNode* pNode, SSyncRaftEntry* pEntry) { ASSERT(pEntry->index >= 0); SyncIndex lastVer = pLogStore->syncLogLastIndex(pLogStore); if (lastVer >= pEntry->index && pLogStore->syncLogTruncate(pLogStore, pEntry->index) < 0) { @@ -361,7 +378,8 @@ int32_t syncLogStorePersist(SSyncLogStore* pLogStore, SSyncRaftEntry* pEntry) { lastVer = pLogStore->syncLogLastIndex(pLogStore); ASSERT(pEntry->index == lastVer + 1); - if (pLogStore->syncLogAppendEntry(pLogStore, pEntry) < 0) { + bool doFsync = syncLogStoreNeedFlush(pEntry, pNode->replicaNum); + if (pLogStore->syncLogAppendEntry(pLogStore, pEntry, doFsync) < 0) { sError("failed to append sync log entry since %s. index:%" PRId64 ", term:%" PRId64 "", terrstr(), pEntry->index, pEntry->term); return -1; @@ -423,7 +441,7 @@ int64_t syncLogBufferProceed(SSyncLogBuffer* pBuf, SSyncNode* pNode, SyncTerm* p (void)syncNodeReplicateWithoutLock(pNode); // persist - if (syncLogStorePersist(pLogStore, pEntry) < 0) { + if (syncLogStorePersist(pLogStore, pNode, pEntry) < 0) { sError("vgId:%d, failed to persist sync log entry from buffer since %s. index:%" PRId64, pNode->vgId, terrstr(), pEntry->index); goto _out; @@ -456,6 +474,11 @@ int32_t syncLogFsmExecute(SSyncNode* pNode, SSyncFSM* pFsm, ESyncState role, Syn pNode->vgId, pEntry->index, pEntry->term, TMSG_INFO(pEntry->originalRpcType), applyCode); } + if (pEntry->originalRpcType == TDMT_VND_COMMIT) { + sInfo("vgId:%d, fsm execute vnode commit. index: %" PRId64 ", term: %" PRId64 "", pNode->vgId, pEntry->index, + pEntry->term); + } + SRpcMsg rpcMsg = {.code = applyCode}; syncEntry2OriginalRpc(pEntry, &rpcMsg); @@ -491,7 +514,7 @@ int32_t syncLogBufferCommit(SSyncLogBuffer* pBuf, SSyncNode* pNode, int64_t comm SSyncLogStore* pLogStore = pNode->pLogStore; SSyncFSM* pFsm = pNode->pFsm; ESyncState role = pNode->state; - SyncTerm term = pNode->pRaftStore->currentTerm; + SyncTerm term = pNode->raftStore.currentTerm; SyncGroupId vgId = pNode->vgId; int32_t ret = -1; int64_t upperIndex = TMIN(commitIndex, pBuf->matchIndex); @@ -552,7 +575,8 @@ int32_t syncLogBufferCommit(SSyncLogBuffer* pBuf, SSyncNode* pNode, int64_t comm ret = 0; _out: // mark as restored if needed - if (!pNode->restoreFinish && pBuf->commitIndex >= pNode->commitIndex) { + if (!pNode->restoreFinish && pBuf->commitIndex >= pNode->commitIndex && pEntry != NULL && + pNode->raftStore.currentTerm <= pEntry->term) { pNode->pFsm->FpRestoreFinishCb(pNode->pFsm); pNode->restoreFinish = true; sInfo("vgId:%d, restore finished. log buffer: [%" PRId64 " %" PRId64 " %" PRId64 ", %" PRId64 ")", pNode->vgId, @@ -595,9 +619,9 @@ int32_t syncLogReplMgrRetryOnNeed(SSyncLogReplMgr* pMgr, SSyncNode* pNode) { return -1; } - int32_t ret = -1; - bool retried = false; - int64_t retryWaitMs = syncLogGetRetryBackoffTimeMs(pMgr); + int32_t ret = -1; + bool retried = false; + int64_t retryWaitMs = syncLogGetRetryBackoffTimeMs(pMgr); int64_t nowMs = taosGetMonoTimestampMs(); int count = 0; int64_t firstIndex = -1; @@ -613,11 +637,17 @@ int32_t syncLogReplMgrRetryOnNeed(SSyncLogReplMgr* pMgr, SSyncNode* pNode) { } if (pMgr->states[pos].acked) { + if (pMgr->matchIndex < index && pMgr->states[pos].timeMs + (syncGetRetryMaxWaitMs() << 3) < nowMs) { + syncLogReplMgrReset(pMgr); + sWarn("vgId:%d, reset sync log repl mgr since stagnation. index: %" PRId64 ", peer: %" PRIx64, pNode->vgId, + index, pDestId->addr); + goto _out; + } continue; } bool barrier = false; - if (syncLogBufferReplicateOneTo(pMgr, pNode, index, &term, pDestId, &barrier) < 0) { + if (syncLogReplMgrReplicateOneTo(pMgr, pNode, index, &term, pDestId, &barrier) < 0) { sError("vgId:%d, failed to replicate sync log entry since %s. index: %" PRId64 ", dest: %" PRIx64 "", pNode->vgId, terrstr(), index, pDestId->addr); goto _out; @@ -639,16 +669,17 @@ int32_t syncLogReplMgrRetryOnNeed(SSyncLogReplMgr* pMgr, SSyncNode* pNode) { _out: if (retried) { pMgr->retryBackoff = syncLogGetNextRetryBackoff(pMgr); - sInfo("vgId:%d, resent %d sync log entries. dest: %" PRIx64 ", indexes: %" PRId64 " ..., terms: ... %" PRId64 - ", retryWaitMs: %" PRId64 ", repl mgr: [%" PRId64 " %" PRId64 ", %" PRId64 ")", + SSyncLogBuffer* pBuf = pNode->pLogBuf; + sInfo("vgId:%d, resend %d sync log entries. dest: %" PRIx64 ", indexes: %" PRId64 " ..., terms: ... %" PRId64 + ", retryWaitMs: %" PRId64 ", mgr: [%" PRId64 " %" PRId64 ", %" PRId64 "), buffer: [%" PRId64 " %" PRId64 + " %" PRId64 ", %" PRId64 ")", pNode->vgId, count, pDestId->addr, firstIndex, term, retryWaitMs, pMgr->startIndex, pMgr->matchIndex, - pMgr->endIndex); + pMgr->endIndex, pBuf->startIndex, pBuf->commitIndex, pBuf->matchIndex, pBuf->endIndex); } return ret; } -int32_t syncLogReplMgrProcessReplyInRecoveryMode(SSyncLogReplMgr* pMgr, SSyncNode* pNode, - SyncAppendEntriesReply* pMsg) { +int32_t syncLogReplMgrProcessReplyAsRecovery(SSyncLogReplMgr* pMgr, SSyncNode* pNode, SyncAppendEntriesReply* pMsg) { SSyncLogBuffer* pBuf = pNode->pLogBuf; SRaftId destId = pMsg->srcId; ASSERT(pMgr->restored == false); @@ -723,7 +754,7 @@ int32_t syncLogReplMgrProcessReplyInRecoveryMode(SSyncLogReplMgr* pMgr, SSyncNod // attempt to replicate the raft log at index (void)syncLogReplMgrReset(pMgr); - return syncLogReplMgrReplicateProbeOnce(pMgr, pNode, index); + return syncLogReplMgrReplicateProbe(pMgr, pNode, index); } int32_t syncLogReplMgrProcessHeartbeatReply(SSyncLogReplMgr* pMgr, SSyncNode* pNode, SyncHeartbeatReply* pMsg) { @@ -751,9 +782,9 @@ int32_t syncLogReplMgrProcessReply(SSyncLogReplMgr* pMgr, SSyncNode* pNode, Sync } if (pMgr->restored) { - (void)syncLogReplMgrProcessReplyInNormalMode(pMgr, pNode, pMsg); + (void)syncLogReplMgrProcessReplyAsNormal(pMgr, pNode, pMsg); } else { - (void)syncLogReplMgrProcessReplyInRecoveryMode(pMgr, pNode, pMsg); + (void)syncLogReplMgrProcessReplyAsRecovery(pMgr, pNode, pMsg); } taosThreadMutexUnlock(&pBuf->mutex); return 0; @@ -761,17 +792,17 @@ int32_t syncLogReplMgrProcessReply(SSyncLogReplMgr* pMgr, SSyncNode* pNode, Sync int32_t syncLogReplMgrReplicateOnce(SSyncLogReplMgr* pMgr, SSyncNode* pNode) { if (pMgr->restored) { - (void)syncLogReplMgrReplicateAttemptedOnce(pMgr, pNode); + (void)syncLogReplMgrReplicateAttempt(pMgr, pNode); } else { - (void)syncLogReplMgrReplicateProbeOnce(pMgr, pNode, pNode->pLogBuf->matchIndex); + (void)syncLogReplMgrReplicateProbe(pMgr, pNode, pNode->pLogBuf->matchIndex); } return 0; } -int32_t syncLogReplMgrReplicateProbeOnce(SSyncLogReplMgr* pMgr, SSyncNode* pNode, SyncIndex index) { +int32_t syncLogReplMgrReplicateProbe(SSyncLogReplMgr* pMgr, SSyncNode* pNode, SyncIndex index) { ASSERT(!pMgr->restored); ASSERT(pMgr->startIndex >= 0); - int64_t retryMaxWaitMs = SYNC_LOG_REPL_RETRY_WAIT_MS * (1 << SYNC_MAX_RETRY_BACKOFF); + int64_t retryMaxWaitMs = syncGetRetryMaxWaitMs(); int64_t nowMs = taosGetMonoTimestampMs(); if (pMgr->endIndex > pMgr->startIndex && @@ -780,10 +811,10 @@ int32_t syncLogReplMgrReplicateProbeOnce(SSyncLogReplMgr* pMgr, SSyncNode* pNode } (void)syncLogReplMgrReset(pMgr); - SRaftId* pDestId = &pNode->replicasId[pMgr->peerId]; - bool barrier = false; - SyncTerm term = -1; - if (syncLogBufferReplicateOneTo(pMgr, pNode, index, &term, pDestId, &barrier) < 0) { + SRaftId* pDestId = &pNode->replicasId[pMgr->peerId]; + bool barrier = false; + SyncTerm term = -1; + if (syncLogReplMgrReplicateOneTo(pMgr, pNode, index, &term, pDestId, &barrier) < 0) { sError("vgId:%d, failed to replicate log entry since %s. index: %" PRId64 ", dest: 0x%016" PRIx64 "", pNode->vgId, terrstr(), index, pDestId->addr); return -1; @@ -799,25 +830,26 @@ int32_t syncLogReplMgrReplicateProbeOnce(SSyncLogReplMgr* pMgr, SSyncNode* pNode pMgr->endIndex = index + 1; SSyncLogBuffer* pBuf = pNode->pLogBuf; - sTrace("vgId:%d, attempted to probe the %d'th peer with msg of index:%" PRId64 " term: %" PRId64 - ". pMgr(rs:%d): [%" PRId64 " %" PRId64 ", %" PRId64 "), pBuf: [%" PRId64 " %" PRId64 " %" PRId64 ", %" PRId64 - ")", - pNode->vgId, pMgr->peerId, index, term, pMgr->restored, pMgr->startIndex, pMgr->matchIndex, pMgr->endIndex, - pBuf->startIndex, pBuf->commitIndex, pBuf->matchIndex, pBuf->endIndex); + sInfo("vgId:%d, probe peer:%" PRIx64 " with msg of index:%" PRId64 " term: %" PRId64 ". mgr (rs:%d): [%" PRId64 + " %" PRId64 ", %" PRId64 "), buffer: [%" PRId64 " %" PRId64 " %" PRId64 ", %" PRId64 ")", + pNode->vgId, pDestId->addr, index, term, pMgr->restored, pMgr->startIndex, pMgr->matchIndex, pMgr->endIndex, + pBuf->startIndex, pBuf->commitIndex, pBuf->matchIndex, pBuf->endIndex); return 0; } -int32_t syncLogReplMgrReplicateAttemptedOnce(SSyncLogReplMgr* pMgr, SSyncNode* pNode) { +int32_t syncLogReplMgrReplicateAttempt(SSyncLogReplMgr* pMgr, SSyncNode* pNode) { ASSERT(pMgr->restored); - SRaftId* pDestId = &pNode->replicasId[pMgr->peerId]; - int32_t batchSize = TMAX(1, pMgr->size >> (4 + pMgr->retryBackoff)); - int32_t count = 0; - int64_t nowMs = taosGetMonoTimestampMs(); - int64_t limit = pMgr->size >> 1; + SRaftId* pDestId = &pNode->replicasId[pMgr->peerId]; + int32_t batchSize = TMAX(1, pMgr->size >> (4 + pMgr->retryBackoff)); + int32_t count = 0; + int64_t nowMs = taosGetMonoTimestampMs(); + int64_t limit = pMgr->size >> 1; + SyncTerm term = -1; + SyncIndex firstIndex = -1; for (SyncIndex index = pMgr->endIndex; index <= pNode->pLogBuf->matchIndex; index++) { - if (batchSize < count++ || limit <= index - pMgr->startIndex) { + if (batchSize < count || limit <= index - pMgr->startIndex) { break; } if (pMgr->startIndex + 1 < index && pMgr->states[(index - 1) % pMgr->size].barrier) { @@ -827,7 +859,7 @@ int32_t syncLogReplMgrReplicateAttemptedOnce(SSyncLogReplMgr* pMgr, SSyncNode* p SRaftId* pDestId = &pNode->replicasId[pMgr->peerId]; bool barrier = false; SyncTerm term = -1; - if (syncLogBufferReplicateOneTo(pMgr, pNode, index, &term, pDestId, &barrier) < 0) { + if (syncLogReplMgrReplicateOneTo(pMgr, pNode, index, &term, pDestId, &barrier) < 0) { sError("vgId:%d, failed to replicate log entry since %s. index: %" PRId64 ", dest: 0x%016" PRIx64 "", pNode->vgId, terrstr(), index, pDestId->addr); return -1; @@ -837,6 +869,9 @@ int32_t syncLogReplMgrReplicateAttemptedOnce(SSyncLogReplMgr* pMgr, SSyncNode* p pMgr->states[pos].term = term; pMgr->states[pos].acked = false; + if (firstIndex == -1) firstIndex = index; + count++; + pMgr->endIndex = index + 1; if (barrier) { sInfo("vgId:%d, replicated sync barrier to dest: %" PRIx64 ". index: %" PRId64 ", term: %" PRId64 @@ -850,23 +885,24 @@ int32_t syncLogReplMgrReplicateAttemptedOnce(SSyncLogReplMgr* pMgr, SSyncNode* p syncLogReplMgrRetryOnNeed(pMgr, pNode); SSyncLogBuffer* pBuf = pNode->pLogBuf; - sTrace("vgId:%d, attempted to replicate %d msgs to the %d'th peer. pMgr(rs:%d): [%" PRId64 " %" PRId64 ", %" PRId64 - "), pBuf: [%" PRId64 " %" PRId64 " %" PRId64 ", %" PRId64 ")", - pNode->vgId, count, pMgr->peerId, pMgr->restored, pMgr->startIndex, pMgr->matchIndex, pMgr->endIndex, - pBuf->startIndex, pBuf->commitIndex, pBuf->matchIndex, pBuf->endIndex); + sTrace("vgId:%d, replicated %d msgs to peer: %" PRIx64 ". indexes: %" PRId64 "..., terms: ...%" PRId64 + ", mgr: (rs:%d) [%" PRId64 " %" PRId64 ", %" PRId64 "), buffer: [%" PRId64 " %" PRId64 " %" PRId64 ", %" PRId64 + ")", + pNode->vgId, count, pDestId->addr, firstIndex, term, pMgr->restored, pMgr->startIndex, pMgr->matchIndex, + pMgr->endIndex, pBuf->startIndex, pBuf->commitIndex, pBuf->matchIndex, pBuf->endIndex); return 0; } -int32_t syncLogReplMgrProcessReplyInNormalMode(SSyncLogReplMgr* pMgr, SSyncNode* pNode, SyncAppendEntriesReply* pMsg) { +int32_t syncLogReplMgrProcessReplyAsNormal(SSyncLogReplMgr* pMgr, SSyncNode* pNode, SyncAppendEntriesReply* pMsg) { ASSERT(pMgr->restored == true); if (pMgr->startIndex <= pMsg->lastSendIndex && pMsg->lastSendIndex < pMgr->endIndex) { - if (pMgr->startIndex < pMgr->matchIndex && pMgr->retryBackoff > 0) { - int64_t firstSentMs = pMgr->states[pMgr->startIndex % pMgr->size].timeMs; - int64_t lastSentMs = pMgr->states[(pMgr->endIndex - 1) % pMgr->size].timeMs; - int64_t timeDiffMs = lastSentMs - firstSentMs; - if (timeDiffMs > 0 && timeDiffMs < (SYNC_LOG_REPL_RETRY_WAIT_MS << (pMgr->retryBackoff - 1))) { - pMgr->retryBackoff -= 1; - } + if (pMgr->startIndex < pMgr->matchIndex && pMgr->retryBackoff > 0) { + int64_t firstSentMs = pMgr->states[pMgr->startIndex % pMgr->size].timeMs; + int64_t lastSentMs = pMgr->states[(pMgr->endIndex - 1) % pMgr->size].timeMs; + int64_t timeDiffMs = lastSentMs - firstSentMs; + if (timeDiffMs > 0 && timeDiffMs < (SYNC_LOG_REPL_RETRY_WAIT_MS << (pMgr->retryBackoff - 1))) { + pMgr->retryBackoff -= 1; + } } pMgr->states[pMsg->lastSendIndex % pMgr->size].acked = true; pMgr->matchIndex = TMAX(pMgr->matchIndex, pMsg->matchIndex); @@ -876,7 +912,7 @@ int32_t syncLogReplMgrProcessReplyInNormalMode(SSyncLogReplMgr* pMgr, SSyncNode* pMgr->startIndex = pMgr->matchIndex; } - return syncLogReplMgrReplicateAttemptedOnce(pMgr, pNode); + return syncLogReplMgrReplicateAttempt(pMgr, pNode); } SSyncLogReplMgr* syncLogReplMgrCreate() { @@ -909,8 +945,11 @@ int32_t syncNodeLogReplMgrInit(SSyncNode* pNode) { for (int i = 0; i < TSDB_MAX_REPLICA; i++) { ASSERT(pNode->logReplMgrs[i] == NULL); pNode->logReplMgrs[i] = syncLogReplMgrCreate(); + if (pNode->logReplMgrs[i] == NULL) { + terrno = TSDB_CODE_OUT_OF_MEMORY; + return -1; + } pNode->logReplMgrs[i]->peerId = i; - ASSERTS(pNode->logReplMgrs[i] != NULL, "Out of memory."); } return 0; } @@ -985,6 +1024,10 @@ void syncLogBufferDestroy(SSyncLogBuffer* pBuf) { int32_t syncLogBufferRollback(SSyncLogBuffer* pBuf, SSyncNode* pNode, SyncIndex toIndex) { ASSERT(pBuf->commitIndex < toIndex && toIndex <= pBuf->endIndex); + if (toIndex == pBuf->endIndex) { + return 0; + } + sInfo("vgId:%d, rollback sync log buffer. toindex: %" PRId64 ", buffer: [%" PRId64 " %" PRId64 " %" PRId64 ", %" PRId64 ")", pNode->vgId, toIndex, pBuf->startIndex, pBuf->commitIndex, pBuf->matchIndex, pBuf->endIndex); @@ -1066,12 +1109,11 @@ SSyncRaftEntry* syncLogBufferGetOneEntry(SSyncLogBuffer* pBuf, SSyncNode* pNode, return pEntry; } -int32_t syncLogBufferReplicateOneTo(SSyncLogReplMgr* pMgr, SSyncNode* pNode, SyncIndex index, SyncTerm* pTerm, - SRaftId* pDestId, bool* pBarrier) { +int32_t syncLogReplMgrReplicateOneTo(SSyncLogReplMgr* pMgr, SSyncNode* pNode, SyncIndex index, SyncTerm* pTerm, + SRaftId* pDestId, bool* pBarrier) { SSyncRaftEntry* pEntry = NULL; SRpcMsg msgOut = {0}; bool inBuf = false; - int32_t ret = -1; SyncTerm prevLogTerm = -1; SSyncLogBuffer* pBuf = pNode->pLogBuf; @@ -1097,14 +1139,13 @@ int32_t syncLogBufferReplicateOneTo(SSyncLogReplMgr* pMgr, SSyncNode* pNode, Syn } if (pTerm) *pTerm = pEntry->term; - int32_t code = syncBuildAppendEntriesFromRaftLog(pNode, pEntry, prevLogTerm, &msgOut); + int32_t code = syncBuildAppendEntriesFromRaftEntry(pNode, pEntry, prevLogTerm, &msgOut); if (code < 0) { sError("vgId:%d, failed to get append entries for index:%" PRId64 "", pNode->vgId, index); goto _err; } (void)syncNodeSendAppendEntries(pNode, pDestId, &msgOut); - ret = 0; sTrace("vgId:%d, replicate one msg index: %" PRId64 " term: %" PRId64 " prevterm: %" PRId64 " to dest: 0x%016" PRIx64, pNode->vgId, pEntry->index, pEntry->term, prevLogTerm, pDestId->addr); diff --git a/source/libs/sync/src/syncRaftCfg.c b/source/libs/sync/src/syncRaftCfg.c index 86ea1f48ccf9416cfedaff87acd58a038f9fb6aa..806949c81e2ad8598a7d2901769c53a0bfa2daa8 100644 --- a/source/libs/sync/src/syncRaftCfg.c +++ b/source/libs/sync/src/syncRaftCfg.c @@ -71,31 +71,23 @@ int32_t syncWriteCfgFile(SSyncNode *pNode) { char file[PATH_MAX] = {0}; snprintf(file, sizeof(file), "%s.bak", realfile); - pFile = taosOpenFile(file, TD_FILE_CREATE | TD_FILE_WRITE | TD_FILE_TRUNC); - if (pFile == NULL) { - terrno = TAOS_SYSTEM_ERROR(errno); - sError("vgId:%d, failed to open sync cfg file:%s since %s", pNode->vgId, realfile, terrstr()); - goto _OVER; - } - terrno = TSDB_CODE_OUT_OF_MEMORY; pJson = tjsonCreateObject(); if (pJson == NULL) goto _OVER; if (tjsonAddObject(pJson, "RaftCfg", syncEncodeRaftCfg, pCfg) < 0) goto _OVER; - buffer = tjsonToString(pJson); if (buffer == NULL) goto _OVER; + terrno = 0; + + pFile = taosOpenFile(file, TD_FILE_CREATE | TD_FILE_WRITE | TD_FILE_TRUNC); + if (pFile == NULL) goto _OVER; int32_t len = strlen(buffer); if (taosWriteFile(pFile, buffer, len) <= 0) goto _OVER; if (taosFsyncFile(pFile) < 0) goto _OVER; - taosCloseFile(&pFile); - if (taosRenameFile(file, realfile) != 0) { - terrno = TAOS_SYSTEM_ERROR(errno); - sError("vgId:%d, failed to rename sync cfg file:%s to %s since %s", pNode->vgId, file, realfile, terrstr()); - goto _OVER; - } + taosCloseFile(&pFile); + if (taosRenameFile(file, realfile) != 0) goto _OVER; code = 0; sInfo("vgId:%d, succeed to write sync cfg file:%s, len:%d", pNode->vgId, realfile, len); @@ -106,6 +98,7 @@ _OVER: if (pFile != NULL) taosCloseFile(&pFile); if (code != 0) { + if (terrno == 0) terrno = TAOS_SYSTEM_ERROR(errno); sError("vgId:%d, failed to write sync cfg file:%s since %s", pNode->vgId, realfile, terrstr()); } return code; diff --git a/source/libs/sync/src/syncRaftLog.c b/source/libs/sync/src/syncRaftLog.c index 3f9f397ef5726b2d29449080529dff6a4d9c2ddf..e6569d99741f762c593c557751e800f11b6c150e 100644 --- a/source/libs/sync/src/syncRaftLog.c +++ b/source/libs/sync/src/syncRaftLog.c @@ -23,7 +23,7 @@ // public function static int32_t raftLogRestoreFromSnapshot(struct SSyncLogStore* pLogStore, SyncIndex snapshotIndex); -static int32_t raftLogAppendEntry(struct SSyncLogStore* pLogStore, SSyncRaftEntry* pEntry); +static int32_t raftLogAppendEntry(struct SSyncLogStore* pLogStore, SSyncRaftEntry* pEntry, bool forceSync); static int32_t raftLogTruncate(struct SSyncLogStore* pLogStore, SyncIndex fromIndex); static bool raftLogExist(struct SSyncLogStore* pLogStore, SyncIndex index); static int32_t raftLogUpdateCommitIndex(SSyncLogStore* pLogStore, SyncIndex index); @@ -192,7 +192,7 @@ SyncTerm raftLogLastTerm(struct SSyncLogStore* pLogStore) { return SYNC_TERM_INVALID; } -static int32_t raftLogAppendEntry(struct SSyncLogStore* pLogStore, SSyncRaftEntry* pEntry) { +static int32_t raftLogAppendEntry(struct SSyncLogStore* pLogStore, SSyncRaftEntry* pEntry, bool forceSync) { SSyncLogStoreData* pData = pLogStore->data; SWal* pWal = pData->pWal; @@ -219,6 +219,8 @@ static int32_t raftLogAppendEntry(struct SSyncLogStore* pLogStore, SSyncRaftEntr ASSERT(pEntry->index == index); + walFsync(pWal, forceSync); + sNTrace(pData->pSyncNode, "write index:%" PRId64 ", type:%s, origin type:%s, elapsed:%" PRId64, pEntry->index, TMSG_INFO(pEntry->msgType), TMSG_INFO(pEntry->originalRpcType), tsElapsed); return 0; @@ -312,29 +314,6 @@ static int32_t raftLogTruncate(struct SSyncLogStore* pLogStore, SyncIndex fromIn SSyncLogStoreData* pData = pLogStore->data; SWal* pWal = pData->pWal; - // need not truncate - SyncIndex wallastVer = walGetLastVer(pWal); - if (fromIndex > wallastVer) { - return 0; - } - - // need not truncate - SyncIndex walCommitVer = walGetCommittedVer(pWal); - if (fromIndex <= walCommitVer) { - return 0; - } - - // delete from cache - for (SyncIndex index = fromIndex; index <= wallastVer; ++index) { - SLRUCache* pCache = pData->pSyncNode->pLogStore->pCache; - LRUHandle* h = taosLRUCacheLookup(pCache, &index, sizeof(index)); - if (h) { - sNTrace(pData->pSyncNode, "cache delete index:%" PRId64, index); - - taosLRUCacheRelease(pData->pSyncNode->pLogStore->pCache, h, true); - } - } - int32_t code = walRollback(pWal, fromIndex); if (code != 0) { int32_t err = terrno; diff --git a/source/libs/sync/src/syncRaftStore.c b/source/libs/sync/src/syncRaftStore.c index b19cda2a446fcb6147eec2cd13400fc22526f049..197d1463fd80791403425adaed75ab31d380e4bb 100644 --- a/source/libs/sync/src/syncRaftStore.c +++ b/source/libs/sync/src/syncRaftStore.c @@ -16,156 +16,161 @@ #define _DEFAULT_SOURCE #include "syncRaftStore.h" #include "syncUtil.h" +#include "tjson.h" -// private function -static int32_t raftStoreInit(SRaftStore *pRaftStore); -static bool raftStoreFileExist(char *path); +static int32_t raftStoreDecode(const SJson *pJson, SRaftStore *pStore) { + int32_t code = 0; -// public function -SRaftStore *raftStoreOpen(const char *path) { - int32_t ret; + tjsonGetNumberValue(pJson, "current_term", pStore->currentTerm, code); + if (code < 0) return -1; + tjsonGetNumberValue(pJson, "vote_for_addr", pStore->voteFor.addr, code); + if (code < 0) return -1; + tjsonGetInt32ValueFromDouble(pJson, "vote_for_vgid", pStore->voteFor.vgId, code); + if (code < 0) return -1; - SRaftStore *pRaftStore = taosMemoryCalloc(1, sizeof(SRaftStore)); - if (pRaftStore == NULL) { - terrno = TSDB_CODE_OUT_OF_MEMORY; - return NULL; - } - - snprintf(pRaftStore->path, sizeof(pRaftStore->path), "%s", path); - if (!raftStoreFileExist(pRaftStore->path)) { - ret = raftStoreInit(pRaftStore); - ASSERT(ret == 0); - } - - char storeBuf[RAFT_STORE_BLOCK_SIZE] = {0}; - pRaftStore->pFile = taosOpenFile(path, TD_FILE_READ | TD_FILE_WRITE); - ASSERT(pRaftStore->pFile != NULL); - - int len = taosReadFile(pRaftStore->pFile, storeBuf, RAFT_STORE_BLOCK_SIZE); - ASSERT(len > 0); - - ret = raftStoreDeserialize(pRaftStore, storeBuf, len); - ASSERT(ret == 0); - - return pRaftStore; -} - -static int32_t raftStoreInit(SRaftStore *pRaftStore) { - ASSERT(pRaftStore != NULL); - - pRaftStore->pFile = taosOpenFile(pRaftStore->path, TD_FILE_CREATE | TD_FILE_WRITE); - ASSERT(pRaftStore->pFile != NULL); - - pRaftStore->currentTerm = 0; - pRaftStore->voteFor.addr = 0; - pRaftStore->voteFor.vgId = 0; - - int32_t ret = raftStorePersist(pRaftStore); - ASSERT(ret == 0); - - taosCloseFile(&pRaftStore->pFile); - return 0; -} - -int32_t raftStoreClose(SRaftStore *pRaftStore) { - if (pRaftStore == NULL) return 0; - - taosCloseFile(&pRaftStore->pFile); - taosMemoryFree(pRaftStore); - pRaftStore = NULL; return 0; } -int32_t raftStorePersist(SRaftStore *pRaftStore) { - ASSERT(pRaftStore != NULL); - - int32_t ret; - char storeBuf[RAFT_STORE_BLOCK_SIZE] = {0}; - ret = raftStoreSerialize(pRaftStore, storeBuf, sizeof(storeBuf)); - ASSERT(ret == 0); +int32_t raftStoreReadFile(SSyncNode *pNode) { + int32_t code = -1; + TdFilePtr pFile = NULL; + char *pData = NULL; + SJson *pJson = NULL; + const char *file = pNode->raftStorePath; + SRaftStore *pStore = &pNode->raftStore; + + if (taosStatFile(file, NULL, NULL) < 0) { + sInfo("vgId:%d, raft store file:%s not exist, use default value", pNode->vgId, file); + pStore->currentTerm = 0; + pStore->voteFor.addr = 0; + pStore->voteFor.vgId = 0; + return raftStoreWriteFile(pNode); + } - taosLSeekFile(pRaftStore->pFile, 0, SEEK_SET); + pFile = taosOpenFile(file, TD_FILE_READ); + if (pFile == NULL) { + terrno = TAOS_SYSTEM_ERROR(errno); + sError("vgId:%d, failed to open raft store file:%s since %s", pNode->vgId, file, terrstr()); + goto _OVER; + } - ret = taosWriteFile(pRaftStore->pFile, storeBuf, sizeof(storeBuf)); - ASSERT(ret == RAFT_STORE_BLOCK_SIZE); + int64_t size = 0; + if (taosFStatFile(pFile, &size, NULL) < 0) { + terrno = TAOS_SYSTEM_ERROR(errno); + sError("vgId:%d, failed to fstat raft store file:%s since %s", pNode->vgId, file, terrstr()); + goto _OVER; + } - taosFsyncFile(pRaftStore->pFile); - return 0; -} + pData = taosMemoryMalloc(size + 1); + if (pData == NULL) { + terrno = TSDB_CODE_OUT_OF_MEMORY; + goto _OVER; + } -static bool raftStoreFileExist(char *path) { - bool b = taosStatFile(path, NULL, NULL) >= 0; - return b; -} + if (taosReadFile(pFile, pData, size) != size) { + terrno = TAOS_SYSTEM_ERROR(errno); + sError("vgId:%d, failed to read raft store file:%s since %s", pNode->vgId, file, terrstr()); + goto _OVER; + } -int32_t raftStoreSerialize(SRaftStore *pRaftStore, char *buf, size_t len) { - ASSERT(pRaftStore != NULL); + pData[size] = '\0'; - cJSON *pRoot = cJSON_CreateObject(); + pJson = tjsonParse(pData); + if (pJson == NULL) { + terrno = TSDB_CODE_INVALID_JSON_FORMAT; + goto _OVER; + } - char u64Buf[128] = {0}; - snprintf(u64Buf, sizeof(u64Buf), "%" PRIu64 "", pRaftStore->currentTerm); - cJSON_AddStringToObject(pRoot, "current_term", u64Buf); + if (raftStoreDecode(pJson, pStore) < 0) { + terrno = TSDB_CODE_INVALID_JSON_FORMAT; + goto _OVER; + } - snprintf(u64Buf, sizeof(u64Buf), "%" PRIu64 "", pRaftStore->voteFor.addr); - cJSON_AddStringToObject(pRoot, "vote_for_addr", u64Buf); + code = 0; + sInfo("vgId:%d, succceed to read raft store file %s", pNode->vgId, file); - cJSON_AddNumberToObject(pRoot, "vote_for_vgid", pRaftStore->voteFor.vgId); +_OVER: + if (pData != NULL) taosMemoryFree(pData); + if (pJson != NULL) cJSON_Delete(pJson); + if (pFile != NULL) taosCloseFile(&pFile); - char *serialized = cJSON_Print(pRoot); - int len2 = strlen(serialized); - ASSERT(len2 < len); - memset(buf, 0, len); - snprintf(buf, len, "%s", serialized); - taosMemoryFree(serialized); + if (code != 0) { + sError("vgId:%d, failed to read raft store file:%s since %s", pNode->vgId, file, terrstr()); + } + return code; +} - cJSON_Delete(pRoot); +static int32_t raftStoreEncode(SJson *pJson, SRaftStore *pStore) { + if (tjsonAddIntegerToObject(pJson, "current_term", pStore->currentTerm) < 0) return -1; + if (tjsonAddIntegerToObject(pJson, "vote_for_addr", pStore->voteFor.addr) < 0) return -1; + if (tjsonAddDoubleToObject(pJson, "vote_for_vgid", pStore->voteFor.vgId) < 0) return -1; return 0; } -int32_t raftStoreDeserialize(SRaftStore *pRaftStore, char *buf, size_t len) { - ASSERT(pRaftStore != NULL); - - ASSERT(len > 0 && len <= RAFT_STORE_BLOCK_SIZE); - cJSON *pRoot = cJSON_Parse(buf); - - cJSON *pCurrentTerm = cJSON_GetObjectItem(pRoot, "current_term"); - ASSERT(cJSON_IsString(pCurrentTerm)); - sscanf(pCurrentTerm->valuestring, "%" PRIu64 "", &(pRaftStore->currentTerm)); - - cJSON *pVoteForAddr = cJSON_GetObjectItem(pRoot, "vote_for_addr"); - ASSERT(cJSON_IsString(pVoteForAddr)); - sscanf(pVoteForAddr->valuestring, "%" PRIu64 "", &(pRaftStore->voteFor.addr)); - - cJSON *pVoteForVgid = cJSON_GetObjectItem(pRoot, "vote_for_vgid"); - pRaftStore->voteFor.vgId = pVoteForVgid->valueint; - - cJSON_Delete(pRoot); - return 0; +int32_t raftStoreWriteFile(SSyncNode *pNode) { + int32_t code = -1; + char *buffer = NULL; + SJson *pJson = NULL; + TdFilePtr pFile = NULL; + const char *realfile = pNode->raftStorePath; + SRaftStore *pStore = &pNode->raftStore; + char file[PATH_MAX] = {0}; + snprintf(file, sizeof(file), "%s.bak", realfile); + + terrno = TSDB_CODE_OUT_OF_MEMORY; + pJson = tjsonCreateObject(); + if (pJson == NULL) goto _OVER; + if (raftStoreEncode(pJson, pStore) != 0) goto _OVER; + buffer = tjsonToString(pJson); + if (buffer == NULL) goto _OVER; + terrno = 0; + + pFile = taosOpenFile(file, TD_FILE_CREATE | TD_FILE_WRITE | TD_FILE_TRUNC); + if (pFile == NULL) goto _OVER; + + int32_t len = strlen(buffer); + if (taosWriteFile(pFile, buffer, len) <= 0) goto _OVER; + if (taosFsyncFile(pFile) < 0) goto _OVER; + + taosCloseFile(&pFile); + if (taosRenameFile(file, realfile) != 0) goto _OVER; + + code = 0; + sInfo("vgId:%d, succeed to write raft store file:%s, len:%d", pNode->vgId, realfile, len); + +_OVER: + if (pJson != NULL) tjsonDelete(pJson); + if (buffer != NULL) taosMemoryFree(buffer); + if (pFile != NULL) taosCloseFile(&pFile); + + if (code != 0) { + if (terrno == 0) terrno = TAOS_SYSTEM_ERROR(errno); + sError("vgId:%d, failed to write raft store file:%s since %s", pNode->vgId, realfile, terrstr()); + } + return code; } -bool raftStoreHasVoted(SRaftStore *pRaftStore) { - bool b = syncUtilEmptyId(&(pRaftStore->voteFor)); +bool raftStoreHasVoted(SSyncNode *pNode) { + bool b = syncUtilEmptyId(&pNode->raftStore.voteFor); return (!b); } -void raftStoreVote(SRaftStore *pRaftStore, SRaftId *pRaftId) { - ASSERT(!syncUtilEmptyId(pRaftId)); - pRaftStore->voteFor = *pRaftId; - raftStorePersist(pRaftStore); +void raftStoreVote(SSyncNode *pNode, SRaftId *pRaftId) { + pNode->raftStore.voteFor = *pRaftId; + (void)raftStoreWriteFile(pNode); } -void raftStoreClearVote(SRaftStore *pRaftStore) { - pRaftStore->voteFor = EMPTY_RAFT_ID; - raftStorePersist(pRaftStore); +void raftStoreClearVote(SSyncNode *pNode) { + pNode->raftStore.voteFor = EMPTY_RAFT_ID; + (void)raftStoreWriteFile(pNode); } -void raftStoreNextTerm(SRaftStore *pRaftStore) { - ++(pRaftStore->currentTerm); - raftStorePersist(pRaftStore); +void raftStoreNextTerm(SSyncNode *pNode) { + pNode->raftStore.currentTerm++; + (void)raftStoreWriteFile(pNode); } -void raftStoreSetTerm(SRaftStore *pRaftStore, SyncTerm term) { - pRaftStore->currentTerm = term; - raftStorePersist(pRaftStore); +void raftStoreSetTerm(SSyncNode *pNode, SyncTerm term) { + pNode->raftStore.currentTerm = term; + (void)raftStoreWriteFile(pNode); } diff --git a/source/libs/sync/src/syncReplication.c b/source/libs/sync/src/syncReplication.c index e3058768f8b1579b1d7b1074603ce2ade08a5b89..3df203221b88bbd0b9d804613e2ea50e881be149 100644 --- a/source/libs/sync/src/syncReplication.c +++ b/source/libs/sync/src/syncReplication.c @@ -48,92 +48,6 @@ int32_t syncNodeMaybeSendAppendEntries(SSyncNode* pSyncNode, const SRaftId* destRaftId, SRpcMsg* pRpcMsg); -int32_t syncNodeReplicateOne(SSyncNode* pSyncNode, SRaftId* pDestId, bool snapshot) { - ASSERT(false && "deprecated"); - // next index - SyncIndex nextIndex = syncIndexMgrGetIndex(pSyncNode->pNextIndex, pDestId); - - if (snapshot) { - // maybe start snapshot - SyncIndex logStartIndex = pSyncNode->pLogStore->syncLogBeginIndex(pSyncNode->pLogStore); - SyncIndex logEndIndex = pSyncNode->pLogStore->syncLogEndIndex(pSyncNode->pLogStore); - if (nextIndex < logStartIndex || nextIndex - 1 > logEndIndex) { - sNTrace(pSyncNode, "maybe start snapshot for next-index:%" PRId64 ", start:%" PRId64 ", end:%" PRId64, nextIndex, - logStartIndex, logEndIndex); - // start snapshot - int32_t code = syncNodeStartSnapshot(pSyncNode, pDestId); - } - } - - // pre index, pre term - SyncIndex preLogIndex = syncNodeGetPreIndex(pSyncNode, nextIndex); - SyncTerm preLogTerm = syncNodeGetPreTerm(pSyncNode, nextIndex); - - // prepare entry - SRpcMsg rpcMsg = {0}; - SyncAppendEntries* pMsg = NULL; - - SSyncRaftEntry* pEntry = NULL; - SLRUCache* pCache = pSyncNode->pLogStore->pCache; - LRUHandle* h = taosLRUCacheLookup(pCache, &nextIndex, sizeof(nextIndex)); - int32_t code = 0; - if (h) { - pEntry = (SSyncRaftEntry*)taosLRUCacheValue(pCache, h); - code = 0; - - pSyncNode->pLogStore->cacheHit++; - sNTrace(pSyncNode, "hit cache index:%" PRId64 ", bytes:%u, %p", nextIndex, pEntry->bytes, pEntry); - - } else { - pSyncNode->pLogStore->cacheMiss++; - sNTrace(pSyncNode, "miss cache index:%" PRId64, nextIndex); - - code = pSyncNode->pLogStore->syncLogGetEntry(pSyncNode->pLogStore, nextIndex, &pEntry); - } - - if (code == 0) { - ASSERT(pEntry != NULL); - - code = syncBuildAppendEntries(&rpcMsg, (int32_t)(pEntry->bytes), pSyncNode->vgId); - ASSERT(code == 0); - - pMsg = rpcMsg.pCont; - memcpy(pMsg->data, pEntry, pEntry->bytes); - } else { - if (terrno == TSDB_CODE_WAL_LOG_NOT_EXIST) { - // no entry in log - code = syncBuildAppendEntries(&rpcMsg, 0, pSyncNode->vgId); - ASSERT(code == 0); - - pMsg = rpcMsg.pCont; - } else { - sNError(pSyncNode, "replicate to dnode:%d error, next-index:%" PRId64, DID(pDestId), nextIndex); - return -1; - } - } - - if (h) { - taosLRUCacheRelease(pCache, h, false); - } else { - syncEntryDestroy(pEntry); - } - - // prepare msg - ASSERT(pMsg != NULL); - pMsg->srcId = pSyncNode->myRaftId; - pMsg->destId = *pDestId; - pMsg->term = pSyncNode->pRaftStore->currentTerm; - pMsg->prevLogIndex = preLogIndex; - pMsg->prevLogTerm = preLogTerm; - pMsg->commitIndex = pSyncNode->commitIndex; - pMsg->privateTerm = 0; - // pMsg->privateTerm = syncIndexMgrGetTerm(pSyncNode->pNextIndex, pDestId); - - // send msg - syncNodeMaybeSendAppendEntries(pSyncNode, pDestId, &rpcMsg); - return 0; -} - int32_t syncNodeReplicate(SSyncNode* pNode) { SSyncLogBuffer* pBuf = pNode->pLogBuf; taosThreadMutexLock(&pBuf->mutex); @@ -156,25 +70,6 @@ int32_t syncNodeReplicateWithoutLock(SSyncNode* pNode) { return 0; } -int32_t syncNodeReplicateOld(SSyncNode* pSyncNode) { - if (pSyncNode->state != TAOS_SYNC_STATE_LEADER) { - return -1; - } - - sNTrace(pSyncNode, "do replicate"); - - int32_t ret = 0; - for (int i = 0; i < pSyncNode->peersNum; ++i) { - SRaftId* pDestId = &(pSyncNode->peersId[i]); - ret = syncNodeReplicateOne(pSyncNode, pDestId, true); - if (ret != 0) { - sError("vgId:%d, do append entries error for dnode:%d", pSyncNode->vgId, DID(pDestId)); - } - } - - return 0; -} - int32_t syncNodeSendAppendEntries(SSyncNode* pSyncNode, const SRaftId* destRaftId, SRpcMsg* pRpcMsg) { SyncAppendEntries* pMsg = pRpcMsg->pCont; pMsg->destId = *destRaftId; @@ -182,39 +77,6 @@ int32_t syncNodeSendAppendEntries(SSyncNode* pSyncNode, const SRaftId* destRaftI return 0; } -int32_t syncNodeSendAppendEntriesOld(SSyncNode* pSyncNode, const SRaftId* destRaftId, SRpcMsg* pRpcMsg) { - int32_t ret = 0; - SyncAppendEntries* pMsg = pRpcMsg->pCont; - if (pMsg == NULL) { - sError("vgId:%d, sync-append-entries msg is NULL", pSyncNode->vgId); - return 0; - } - - SPeerState* pState = syncNodeGetPeerState(pSyncNode, destRaftId); - if (pState == NULL) { - sError("vgId:%d, replica maybe dropped", pSyncNode->vgId); - return 0; - } - - // save index, otherwise pMsg will be free by rpc - SyncIndex saveLastSendIndex = pState->lastSendIndex; - bool update = false; - if (pMsg->dataLen > 0) { - saveLastSendIndex = pMsg->prevLogIndex + 1; - update = true; - } - - syncLogSendAppendEntries(pSyncNode, pMsg, ""); - syncNodeSendMsgById(destRaftId, pSyncNode, pRpcMsg); - - if (update) { - pState->lastSendIndex = saveLastSendIndex; - pState->lastSendTime = taosGetTimestampMs(); - } - - return ret; -} - int32_t syncNodeMaybeSendAppendEntries(SSyncNode* pSyncNode, const SRaftId* destRaftId, SRpcMsg* pRpcMsg) { int32_t ret = 0; SyncAppendEntries* pMsg = pRpcMsg->pCont; @@ -245,7 +107,7 @@ int32_t syncNodeHeartbeatPeers(SSyncNode* pSyncNode) { SyncHeartbeat* pSyncMsg = rpcMsg.pCont; pSyncMsg->srcId = pSyncNode->myRaftId; pSyncMsg->destId = pSyncNode->peersId[i]; - pSyncMsg->term = pSyncNode->pRaftStore->currentTerm; + pSyncMsg->term = pSyncNode->raftStore.currentTerm; pSyncMsg->commitIndex = pSyncNode->commitIndex; pSyncMsg->minMatchIndex = syncMinMatchIndex(pSyncNode); pSyncMsg->privateTerm = 0; diff --git a/source/libs/sync/src/syncRequestVote.c b/source/libs/sync/src/syncRequestVote.c index 773befe1e4fdec36c5bc21ac1a6d955dd6de3322..069ea2ea88b85c91846b975513dcd681944c7ace 100644 --- a/source/libs/sync/src/syncRequestVote.c +++ b/source/libs/sync/src/syncRequestVote.c @@ -44,21 +44,12 @@ // /\ UNCHANGED <> // -static bool syncNodeOnRequestVoteLogOK(SSyncNode* pSyncNode, SyncRequestVote* pMsg) { - SyncTerm myLastTerm = syncNodeGetLastTerm(pSyncNode); - SyncIndex myLastIndex = syncNodeGetLastIndex(pSyncNode); - - if (pMsg->lastLogIndex < pSyncNode->commitIndex) { - sNTrace(pSyncNode, - "logok:0, {my-lterm:%" PRIu64 ", my-lindex:%" PRId64 ", recv-lterm:%" PRIu64 ", recv-lindex:%" PRId64 - ", recv-term:%" PRIu64 "}", - myLastTerm, myLastIndex, pMsg->lastLogTerm, pMsg->lastLogIndex, pMsg->term); - - return false; - } +static bool syncNodeOnRequestVoteLogOK(SSyncNode* ths, SyncRequestVote* pMsg) { + SyncTerm myLastTerm = syncNodeGetLastTerm(ths); + SyncIndex myLastIndex = syncNodeGetLastIndex(ths); if (myLastTerm == SYNC_TERM_INVALID) { - sNTrace(pSyncNode, + sNTrace(ths, "logok:0, {my-lterm:%" PRIu64 ", my-lindex:%" PRId64 ", recv-lterm:%" PRIu64 ", recv-lindex:%" PRId64 ", recv-term:%" PRIu64 "}", myLastTerm, myLastIndex, pMsg->lastLogTerm, pMsg->lastLogIndex, pMsg->term); @@ -66,22 +57,29 @@ static bool syncNodeOnRequestVoteLogOK(SSyncNode* pSyncNode, SyncRequestVote* pM } if (pMsg->lastLogTerm > myLastTerm) { - sNTrace(pSyncNode, + sNTrace(ths, "logok:1, {my-lterm:%" PRIu64 ", my-lindex:%" PRId64 ", recv-lterm:%" PRIu64 ", recv-lindex:%" PRId64 ", recv-term:%" PRIu64 "}", myLastTerm, myLastIndex, pMsg->lastLogTerm, pMsg->lastLogIndex, pMsg->term); + + if (pMsg->lastLogIndex < ths->commitIndex) { + sNWarn(ths, + "logok:1, commit rollback required. {my-lterm:%" PRIu64 ", my-lindex:%" PRId64 ", recv-lterm:%" PRIu64 + ", recv-lindex:%" PRId64 ", recv-term:%" PRIu64 "}", + myLastTerm, myLastIndex, pMsg->lastLogTerm, pMsg->lastLogIndex, pMsg->term); + } return true; } if (pMsg->lastLogTerm == myLastTerm && pMsg->lastLogIndex >= myLastIndex) { - sNTrace(pSyncNode, + sNTrace(ths, "logok:1, {my-lterm:%" PRIu64 ", my-lindex:%" PRId64 ", recv-lterm:%" PRIu64 ", recv-lindex:%" PRId64 ", recv-term:%" PRIu64 "}", myLastTerm, myLastIndex, pMsg->lastLogTerm, pMsg->lastLogIndex, pMsg->term); return true; } - sNTrace(pSyncNode, + sNTrace(ths, "logok:0, {my-lterm:%" PRIu64 ", my-lindex:%" PRId64 ", recv-lterm:%" PRIu64 ", recv-lindex:%" PRId64 ", recv-term:%" PRIu64 "}", myLastTerm, myLastIndex, pMsg->lastLogTerm, pMsg->lastLogIndex, pMsg->term); @@ -93,7 +91,7 @@ int32_t syncNodeOnRequestVote(SSyncNode* ths, const SRpcMsg* pRpcMsg) { SyncRequestVote* pMsg = pRpcMsg->pCont; // if already drop replica, do not process - if (!syncNodeInRaftGroup(ths, &(pMsg->srcId))) { + if (!syncNodeInRaftGroup(ths, &pMsg->srcId)) { syncLogRecvRequestVote(ths, pMsg, -1, "not in my config"); return -1; } @@ -101,21 +99,21 @@ int32_t syncNodeOnRequestVote(SSyncNode* ths, const SRpcMsg* pRpcMsg) { bool logOK = syncNodeOnRequestVoteLogOK(ths, pMsg); // maybe update term - if (pMsg->term > ths->pRaftStore->currentTerm) { + if (pMsg->term > ths->raftStore.currentTerm) { syncNodeStepDown(ths, pMsg->term); // syncNodeUpdateTerm(ths, pMsg->term); } - ASSERT(pMsg->term <= ths->pRaftStore->currentTerm); + ASSERT(pMsg->term <= ths->raftStore.currentTerm); - bool grant = (pMsg->term == ths->pRaftStore->currentTerm) && logOK && - ((!raftStoreHasVoted(ths->pRaftStore)) || (syncUtilSameId(&(ths->pRaftStore->voteFor), &(pMsg->srcId)))); + bool grant = (pMsg->term == ths->raftStore.currentTerm) && logOK && + ((!raftStoreHasVoted(ths)) || (syncUtilSameId(&ths->raftStore.voteFor, &pMsg->srcId))); if (grant) { // maybe has already voted for pMsg->srcId // vote again, no harm - raftStoreVote(ths->pRaftStore, &(pMsg->srcId)); + raftStoreVote(ths, &(pMsg->srcId)); // candidate ? - syncNodeStepDown(ths, ths->pRaftStore->currentTerm); + syncNodeStepDown(ths, ths->raftStore.currentTerm); // forbid elect for this round syncNodeResetElectTimer(ths); @@ -129,7 +127,7 @@ int32_t syncNodeOnRequestVote(SSyncNode* ths, const SRpcMsg* pRpcMsg) { SyncRequestVoteReply* pReply = rpcMsg.pCont; pReply->srcId = ths->myRaftId; pReply->destId = pMsg->srcId; - pReply->term = ths->pRaftStore->currentTerm; + pReply->term = ths->raftStore.currentTerm; pReply->voteGranted = grant; // trace log @@ -137,4 +135,4 @@ int32_t syncNodeOnRequestVote(SSyncNode* ths, const SRpcMsg* pRpcMsg) { syncLogSendRequestVoteReply(ths, pReply, ""); syncNodeSendMsgById(&pReply->destId, ths, &rpcMsg); return 0; -} \ No newline at end of file +} diff --git a/source/libs/sync/src/syncRequestVoteReply.c b/source/libs/sync/src/syncRequestVoteReply.c index 563f475070a1ec63a3af73dcf9ed6205754b04b4..a0d6cbf597bb95c9d0e73b9cb9cde9f7c94d4a98 100644 --- a/source/libs/sync/src/syncRequestVoteReply.c +++ b/source/libs/sync/src/syncRequestVoteReply.c @@ -49,25 +49,25 @@ int32_t syncNodeOnRequestVoteReply(SSyncNode* ths, const SRpcMsg* pRpcMsg) { } // drop stale response - if (pMsg->term < ths->pRaftStore->currentTerm) { + if (pMsg->term < ths->raftStore.currentTerm) { syncLogRecvRequestVoteReply(ths, pMsg, "drop stale response"); return -1; } - // ASSERT(!(pMsg->term > ths->pRaftStore->currentTerm)); + // ASSERT(!(pMsg->term > ths->raftStore.currentTerm)); // no need this code, because if I receive reply.term, then I must have sent for that term. - // if (pMsg->term > ths->pRaftStore->currentTerm) { + // if (pMsg->term > ths->raftStore.currentTerm) { // syncNodeUpdateTerm(ths, pMsg->term); // } - if (pMsg->term > ths->pRaftStore->currentTerm) { + if (pMsg->term > ths->raftStore.currentTerm) { syncLogRecvRequestVoteReply(ths, pMsg, "error term"); syncNodeStepDown(ths, pMsg->term); return -1; } syncLogRecvRequestVoteReply(ths, pMsg, ""); - ASSERT(pMsg->term == ths->pRaftStore->currentTerm); + ASSERT(pMsg->term == ths->raftStore.currentTerm); // This tallies votes even when the current state is not Candidate, // but they won't be looked at, so it doesn't matter. diff --git a/source/libs/sync/src/syncRespMgr.c b/source/libs/sync/src/syncRespMgr.c index b55aae4c768415d5bcf810fed9bfbc1e78ac0b1b..9373eccaef6b27fd451fa77b360972e47cbea3d7 100644 --- a/source/libs/sync/src/syncRespMgr.c +++ b/source/libs/sync/src/syncRespMgr.c @@ -143,7 +143,7 @@ static void syncRespCleanByTTL(SSyncRespMgr *pObj, int64_t ttl, bool rsp) { .state = pNode->state, .seqNum = *pSeqNum, .term = SYNC_TERM_INVALID, - .currentTerm = pNode->pRaftStore->currentTerm, + .currentTerm = pNode->raftStore.currentTerm, .flag = 0, }; diff --git a/source/libs/sync/src/syncSnapshot.c b/source/libs/sync/src/syncSnapshot.c index b68b735f4688dc7ecf99fdfdfcbcd1beeb9ce7f4..880c76e4ddebe6ff443b59769fdcdb28a477a65f 100644 --- a/source/libs/sync/src/syncSnapshot.c +++ b/source/libs/sync/src/syncSnapshot.c @@ -43,7 +43,7 @@ SSyncSnapshotSender *snapshotSenderCreate(SSyncNode *pSyncNode, int32_t replicaI pSender->sendingMS = SYNC_SNAPSHOT_RETRY_MS; pSender->pSyncNode = pSyncNode; pSender->replicaIndex = replicaIndex; - pSender->term = pSyncNode->pRaftStore->currentTerm; + pSender->term = pSyncNode->raftStore.currentTerm; pSender->startTime = 0; pSender->endTime = 0; pSender->pSyncNode->pFsm->FpGetSnapshotInfo(pSender->pSyncNode->pFsm, &pSender->snapshot); @@ -90,7 +90,7 @@ int32_t snapshotSenderStart(SSyncSnapshotSender *pSender) { memset(&pSender->lastConfig, 0, sizeof(pSender->lastConfig)); pSender->sendingMS = 0; - pSender->term = pSender->pSyncNode->pRaftStore->currentTerm; + pSender->term = pSender->pSyncNode->raftStore.currentTerm; pSender->startTime = taosGetTimestampMs(); pSender->lastSendTime = pSender->startTime; pSender->finish = false; @@ -105,7 +105,7 @@ int32_t snapshotSenderStart(SSyncSnapshotSender *pSender) { SyncSnapshotSend *pMsg = rpcMsg.pCont; pMsg->srcId = pSender->pSyncNode->myRaftId; pMsg->destId = pSender->pSyncNode->replicasId[pSender->replicaIndex]; - pMsg->term = pSender->pSyncNode->pRaftStore->currentTerm; + pMsg->term = pSender->pSyncNode->raftStore.currentTerm; pMsg->beginIndex = pSender->snapshotParam.start; pMsg->lastIndex = pSender->snapshot.lastApplyIndex; pMsg->lastTerm = pSender->snapshot.lastApplyTerm; @@ -150,7 +150,7 @@ void snapshotSenderStop(SSyncSnapshotSender *pSender, bool finish) { // when sender receive ack, call this function to send msg from seq // seq = ack + 1, already updated -int32_t snapshotSend(SSyncSnapshotSender *pSender) { +static int32_t snapshotSend(SSyncSnapshotSender *pSender) { // free memory last time (current seq - 1) if (pSender->pCurrentBlock != NULL) { taosMemoryFree(pSender->pCurrentBlock); @@ -185,7 +185,7 @@ int32_t snapshotSend(SSyncSnapshotSender *pSender) { SyncSnapshotSend *pMsg = rpcMsg.pCont; pMsg->srcId = pSender->pSyncNode->myRaftId; pMsg->destId = pSender->pSyncNode->replicasId[pSender->replicaIndex]; - pMsg->term = pSender->pSyncNode->pRaftStore->currentTerm; + pMsg->term = pSender->pSyncNode->raftStore.currentTerm; pMsg->beginIndex = pSender->snapshotParam.start; pMsg->lastIndex = pSender->snapshot.lastApplyIndex; pMsg->lastTerm = pSender->snapshot.lastApplyTerm; @@ -226,7 +226,7 @@ int32_t snapshotReSend(SSyncSnapshotSender *pSender) { SyncSnapshotSend *pMsg = rpcMsg.pCont; pMsg->srcId = pSender->pSyncNode->myRaftId; pMsg->destId = pSender->pSyncNode->replicasId[pSender->replicaIndex]; - pMsg->term = pSender->pSyncNode->pRaftStore->currentTerm; + pMsg->term = pSender->pSyncNode->raftStore.currentTerm; pMsg->beginIndex = pSender->snapshotParam.start; pMsg->lastIndex = pSender->snapshot.lastApplyIndex; pMsg->lastTerm = pSender->snapshot.lastApplyTerm; @@ -314,7 +314,7 @@ SSyncSnapshotReceiver *snapshotReceiverCreate(SSyncNode *pSyncNode, SRaftId from pReceiver->pWriter = NULL; pReceiver->pSyncNode = pSyncNode; pReceiver->fromId = fromId; - pReceiver->term = pSyncNode->pRaftStore->currentTerm; + pReceiver->term = pSyncNode->raftStore.currentTerm; pReceiver->snapshot.data = NULL; pReceiver->snapshot.lastApplyIndex = SYNC_INDEX_INVALID; pReceiver->snapshot.lastApplyTerm = 0; @@ -342,23 +342,6 @@ void snapshotReceiverDestroy(SSyncSnapshotReceiver *pReceiver) { bool snapshotReceiverIsStart(SSyncSnapshotReceiver *pReceiver) { return pReceiver->start; } -// force stop -void snapshotReceiverForceStop(SSyncSnapshotReceiver *pReceiver) { - sRInfo(pReceiver, "snapshot receiver force stop, writer:%p"); - - // force close, abandon incomplete data - if (pReceiver->pWriter != NULL) { - int32_t ret = pReceiver->pSyncNode->pFsm->FpSnapshotStopWrite(pReceiver->pSyncNode->pFsm, pReceiver->pWriter, false, - &pReceiver->snapshot); - if (ret != 0) { - sRInfo(pReceiver, "snapshot receiver force stop failed since %s", terrstr()); - } - pReceiver->pWriter = NULL; - } - - pReceiver->start = false; -} - static int32_t snapshotReceiverStartWriter(SSyncSnapshotReceiver *pReceiver, SyncSnapshotSend *pBeginMsg) { if (pReceiver->pWriter != NULL) { sRError(pReceiver, "vgId:%d, snapshot receiver writer is not null"); @@ -397,7 +380,7 @@ void snapshotReceiverStart(SSyncSnapshotReceiver *pReceiver, SyncSnapshotSend *p pReceiver->start = true; pReceiver->ack = SYNC_SNAPSHOT_SEQ_PRE_SNAPSHOT; - pReceiver->term = pReceiver->pSyncNode->pRaftStore->currentTerm; + pReceiver->term = pReceiver->pSyncNode->raftStore.currentTerm; pReceiver->fromId = pPreMsg->srcId; pReceiver->startTime = pPreMsg->startTime; @@ -454,9 +437,9 @@ static int32_t snapshotReceiverFinish(SSyncSnapshotReceiver *pReceiver, SyncSnap } // maybe update term - if (pReceiver->snapshot.lastApplyTerm > pReceiver->pSyncNode->pRaftStore->currentTerm) { - pReceiver->pSyncNode->pRaftStore->currentTerm = pReceiver->snapshot.lastApplyTerm; - raftStorePersist(pReceiver->pSyncNode->pRaftStore); + if (pReceiver->snapshot.lastApplyTerm > pReceiver->pSyncNode->raftStore.currentTerm) { + pReceiver->pSyncNode->raftStore.currentTerm = pReceiver->snapshot.lastApplyTerm; + (void)raftStoreWriteFile(pReceiver->pSyncNode); } // stop writer, apply data @@ -590,7 +573,7 @@ _START_RECEIVER: if (snapshotReceiverIsStart(pReceiver)) { sRInfo(pReceiver, "snapshot receiver already start and force stop pre one"); - snapshotReceiverForceStop(pReceiver); + snapshotReceiverStop(pReceiver); } snapshotReceiverStart(pReceiver, pMsg); // set start-time same with sender @@ -609,7 +592,7 @@ _SEND_REPLY: SyncSnapshotRsp *pRspMsg = rpcMsg.pCont; pRspMsg->srcId = pSyncNode->myRaftId; pRspMsg->destId = pMsg->srcId; - pRspMsg->term = pSyncNode->pRaftStore->currentTerm; + pRspMsg->term = pSyncNode->raftStore.currentTerm; pRspMsg->lastIndex = pMsg->lastIndex; pRspMsg->lastTerm = pMsg->lastTerm; pRspMsg->startTime = pReceiver->startTime; @@ -665,7 +648,7 @@ _SEND_REPLY: SyncSnapshotRsp *pRspMsg = rpcMsg.pCont; pRspMsg->srcId = pSyncNode->myRaftId; pRspMsg->destId = pMsg->srcId; - pRspMsg->term = pSyncNode->pRaftStore->currentTerm; + pRspMsg->term = pSyncNode->raftStore.currentTerm; pRspMsg->lastIndex = pMsg->lastIndex; pRspMsg->lastTerm = pMsg->lastTerm; pRspMsg->startTime = pReceiver->startTime; @@ -715,7 +698,7 @@ static int32_t syncNodeOnSnapshotReceive(SSyncNode *pSyncNode, SyncSnapshotSend SyncSnapshotRsp *pRspMsg = rpcMsg.pCont; pRspMsg->srcId = pSyncNode->myRaftId; pRspMsg->destId = pMsg->srcId; - pRspMsg->term = pSyncNode->pRaftStore->currentTerm; + pRspMsg->term = pSyncNode->raftStore.currentTerm; pRspMsg->lastIndex = pMsg->lastIndex; pRspMsg->lastTerm = pMsg->lastTerm; pRspMsg->startTime = pReceiver->startTime; @@ -762,7 +745,7 @@ static int32_t syncNodeOnSnapshotEnd(SSyncNode *pSyncNode, SyncSnapshotSend *pMs SyncSnapshotRsp *pRspMsg = rpcMsg.pCont; pRspMsg->srcId = pSyncNode->myRaftId; pRspMsg->destId = pMsg->srcId; - pRspMsg->term = pSyncNode->pRaftStore->currentTerm; + pRspMsg->term = pSyncNode->raftStore.currentTerm; pRspMsg->lastIndex = pMsg->lastIndex; pRspMsg->lastTerm = pMsg->lastTerm; pRspMsg->startTime = pReceiver->startTime; @@ -811,13 +794,13 @@ int32_t syncNodeOnSnapshot(SSyncNode *pSyncNode, const SRpcMsg *pRpcMsg) { return -1; } - if (pMsg->term < pSyncNode->pRaftStore->currentTerm) { + if (pMsg->term < pSyncNode->raftStore.currentTerm) { syncLogRecvSyncSnapshotSend(pSyncNode, pMsg, "reject since small term"); terrno = TSDB_CODE_SYN_INTERNAL_ERROR; return -1; } - if (pMsg->term > pSyncNode->pRaftStore->currentTerm) { + if (pMsg->term > pSyncNode->raftStore.currentTerm) { syncNodeStepDown(pSyncNode, pMsg->term); } syncNodeResetElectTimer(pSyncNode); @@ -825,7 +808,7 @@ int32_t syncNodeOnSnapshot(SSyncNode *pSyncNode, const SRpcMsg *pRpcMsg) { // state, term, seq/ack int32_t code = 0; if (pSyncNode->state == TAOS_SYNC_STATE_FOLLOWER) { - if (pMsg->term == pSyncNode->pRaftStore->currentTerm) { + if (pMsg->term == pSyncNode->raftStore.currentTerm) { if (pMsg->seq == SYNC_SNAPSHOT_SEQ_PRE_SNAPSHOT) { syncLogRecvSyncSnapshotSend(pSyncNode, pMsg, "process seq pre-snapshot"); code = syncNodeOnSnapshotPre(pSyncNode, pMsg); @@ -842,7 +825,7 @@ int32_t syncNodeOnSnapshot(SSyncNode *pSyncNode, const SRpcMsg *pRpcMsg) { } else if (pMsg->seq == SYNC_SNAPSHOT_SEQ_FORCE_CLOSE) { // force close, no response syncLogRecvSyncSnapshotSend(pSyncNode, pMsg, "process force stop"); - snapshotReceiverForceStop(pReceiver); + snapshotReceiverStop(pReceiver); } else if (pMsg->seq > SYNC_SNAPSHOT_SEQ_BEGIN && pMsg->seq < SYNC_SNAPSHOT_SEQ_END) { syncLogRecvSyncSnapshotSend(pSyncNode, pMsg, "process seq data"); code = syncNodeOnSnapshotReceive(pSyncNode, pMsg); @@ -909,7 +892,7 @@ static int32_t syncNodeOnSnapshotPreRsp(SSyncNode *pSyncNode, SSyncSnapshotSende SyncSnapshotSend *pSendMsg = rpcMsg.pCont; pSendMsg->srcId = pSender->pSyncNode->myRaftId; pSendMsg->destId = pSender->pSyncNode->replicasId[pSender->replicaIndex]; - pSendMsg->term = pSender->pSyncNode->pRaftStore->currentTerm; + pSendMsg->term = pSender->pSyncNode->raftStore.currentTerm; pSendMsg->beginIndex = pSender->snapshotParam.start; pSendMsg->lastIndex = pSender->snapshot.lastApplyIndex; pSendMsg->lastTerm = pSender->snapshot.lastApplyTerm; @@ -968,10 +951,10 @@ int32_t syncNodeOnSnapshotRsp(SSyncNode *pSyncNode, const SRpcMsg *pRpcMsg) { goto _ERROR; } - if (pMsg->term != pSyncNode->pRaftStore->currentTerm) { + if (pMsg->term != pSyncNode->raftStore.currentTerm) { syncLogRecvSyncSnapshotRsp(pSyncNode, pMsg, "snapshot sender and receiver term not match"); sSError(pSender, "snapshot sender term not equal, msg term:%" PRId64 " currentTerm:%" PRId64, pMsg->term, - pSyncNode->pRaftStore->currentTerm); + pSyncNode->raftStore.currentTerm); terrno = TSDB_CODE_SYN_INTERNAL_ERROR; goto _ERROR; } @@ -989,6 +972,13 @@ int32_t syncNodeOnSnapshotRsp(SSyncNode *pSyncNode, const SRpcMsg *pRpcMsg) { return syncNodeOnSnapshotPreRsp(pSyncNode, pSender, pMsg); } + if (pSender->pReader == NULL || pSender->finish) { + syncLogRecvSyncSnapshotRsp(pSyncNode, pMsg, "snapshot sender invalid"); + sSError(pSender, "snapshot sender invalid, pReader:%p finish:%d", pMsg->code, pSender->pReader, pSender->finish); + terrno = pMsg->code; + goto _ERROR; + } + if (pMsg->ack == SYNC_SNAPSHOT_SEQ_BEGIN) { syncLogRecvSyncSnapshotRsp(pSyncNode, pMsg, "process seq begin"); if (snapshotSenderUpdateProgress(pSender, pMsg) != 0) { diff --git a/source/libs/sync/src/syncUtil.c b/source/libs/sync/src/syncUtil.c index 0ec24d5326c83fdf70111cc6ab16ee7a49533cdf..b246d9a79d58c153d9e5b12775965929f9ec7adf 100644 --- a/source/libs/sync/src/syncUtil.c +++ b/source/libs/sync/src/syncUtil.c @@ -17,20 +17,20 @@ #include "syncUtil.h" #include "syncIndexMgr.h" #include "syncMessage.h" +#include "syncPipeline.h" #include "syncRaftCfg.h" #include "syncRaftStore.h" #include "syncSnapshot.h" void syncCfg2SimpleStr(const SSyncCfg* pCfg, char* buf, int32_t bufLen) { - int32_t len = snprintf(buf, bufLen, "{r-num:%d, my:%d, ", pCfg->replicaNum, pCfg->myIndex); - + int32_t len = snprintf(buf, bufLen, "{num:%d, as:%d, [", pCfg->replicaNum, pCfg->myIndex); for (int32_t i = 0; i < pCfg->replicaNum; ++i) { + len += snprintf(buf + len, bufLen - len, "%s:%d", pCfg->nodeInfo[i].nodeFqdn, pCfg->nodeInfo[i].nodePort); if (i < pCfg->replicaNum - 1) { - len += snprintf(buf + len, bufLen - len, "%s:%d, ", pCfg->nodeInfo[i].nodeFqdn, pCfg->nodeInfo[i].nodePort); - } else { - len += snprintf(buf + len, bufLen - len, "%s:%d}", pCfg->nodeInfo[i].nodeFqdn, pCfg->nodeInfo[i].nodePort); + len += snprintf(buf + len, bufLen - len, "%s", ", "); } } + len += snprintf(buf + len, bufLen - len, "%s", "]}"); } void syncUtilNodeInfo2EpSet(const SNodeInfo* pInfo, SEpSet* pEpSet) { @@ -89,32 +89,55 @@ bool syncUtilUserRollback(tmsg_t msgType) { return msgType != TDMT_SYNC_NOOP && // for leader static void syncHearbeatReplyTime2Str(SSyncNode* pSyncNode, char* buf, int32_t bufLen) { - int32_t len = 5; - + int32_t len = 0; + len += snprintf(buf + len, bufLen - len, "%s", "{"); for (int32_t i = 0; i < pSyncNode->replicaNum; ++i) { int64_t tsMs = syncIndexMgrGetRecvTime(pSyncNode->pMatchIndex, &(pSyncNode->replicasId[i])); - + len += snprintf(buf + len, bufLen - len, "%d:%" PRId64, i, tsMs); if (i < pSyncNode->replicaNum - 1) { - len += snprintf(buf + len, bufLen - len, "%d:%" PRId64 ",", i, tsMs); - } else { - len += snprintf(buf + len, bufLen - len, "%d:%" PRId64 "}", i, tsMs); + len += snprintf(buf + len, bufLen - len, "%s", ","); } } + len += snprintf(buf + len, bufLen - len, "%s", "}"); } // for follower static void syncHearbeatTime2Str(SSyncNode* pSyncNode, char* buf, int32_t bufLen) { - int32_t len = 4; - + int32_t len = 0; + len += snprintf(buf + len, bufLen - len, "%s", "{"); for (int32_t i = 0; i < pSyncNode->replicaNum; ++i) { int64_t tsMs = syncIndexMgrGetRecvTime(pSyncNode->pNextIndex, &(pSyncNode->replicasId[i])); - + len += snprintf(buf + len, bufLen - len, "%d:%" PRId64, i, tsMs); if (i < pSyncNode->replicaNum - 1) { - len += snprintf(buf + len, bufLen - len, "%d:%" PRId64 ",", i, tsMs); - } else { - len += snprintf(buf + len, bufLen - len, "%d:%" PRId64 "}", i, tsMs); + len += snprintf(buf + len, bufLen - len, "%s", ","); } } + len += snprintf(buf + len, bufLen - len, "%s", "}"); +} + +static void syncLogBufferStates2Str(SSyncNode* pSyncNode, char* buf, int32_t bufLen) { + SSyncLogBuffer* pBuf = pSyncNode->pLogBuf; + if (pBuf == NULL) { + return; + } + int len = 0; + len += snprintf(buf + len, bufLen - len, "[%" PRId64 " %" PRId64 " %" PRId64 ", %" PRId64 ")", pBuf->startIndex, + pBuf->commitIndex, pBuf->matchIndex, pBuf->endIndex); +} + +static void syncLogReplMgrStates2Str(SSyncNode* pSyncNode, char* buf, int32_t bufLen) { + int len = 0; + len += snprintf(buf + len, bufLen - len, "%s", "{"); + for (int32_t i = 0; i < pSyncNode->replicaNum; i++) { + SSyncLogReplMgr* pMgr = pSyncNode->logReplMgrs[i]; + if (pMgr == NULL) break; + len += snprintf(buf + len, bufLen - len, "%d:%d [%" PRId64 " %" PRId64 ", %" PRId64 ")", i, pMgr->restored, + pMgr->startIndex, pMgr->matchIndex, pMgr->endIndex); + if (i + 1 < pSyncNode->replicaNum) { + len += snprintf(buf + len, bufLen - len, "%s", ", "); + } + } + len += snprintf(buf + len, bufLen - len, "%s", "}"); } static void syncPeerState2Str(SSyncNode* pSyncNode, char* buf, int32_t bufLen) { @@ -135,8 +158,8 @@ static void syncPeerState2Str(SSyncNode* pSyncNode, char* buf, int32_t bufLen) { } void syncPrintNodeLog(const char* flags, ELogLevel level, int32_t dflag, SSyncNode* pNode, const char* format, ...) { - if (pNode == NULL || pNode->pRaftStore == NULL || pNode->pLogStore == NULL) return; - int64_t currentTerm = pNode->pRaftStore->currentTerm; + if (pNode == NULL || pNode->pLogStore == NULL) return; + int64_t currentTerm = pNode->raftStore.currentTerm; // save error code, otherwise it will be overwritten int32_t errCode = terrno; @@ -156,16 +179,19 @@ void syncPrintNodeLog(const char* flags, ELogLevel level, int32_t dflag, SSyncNo int32_t cacheHit = pNode->pLogStore->cacheHit; int32_t cacheMiss = pNode->pLogStore->cacheMiss; - char cfgStr[1024]; + char cfgStr[1024] = ""; syncCfg2SimpleStr(&pNode->raftCfg.cfg, cfgStr, sizeof(cfgStr)); - char peerStr[1024] = "{"; - syncPeerState2Str(pNode, peerStr, sizeof(peerStr)); + char replMgrStatesStr[1024] = ""; + syncLogReplMgrStates2Str(pNode, replMgrStatesStr, sizeof(replMgrStatesStr)); - char hbrTimeStr[256] = "hbr:{"; + char bufferStatesStr[256] = ""; + syncLogBufferStates2Str(pNode, bufferStatesStr, sizeof(bufferStatesStr)); + + char hbrTimeStr[256] = ""; syncHearbeatReplyTime2Str(pNode, hbrTimeStr, sizeof(hbrTimeStr)); - char hbTimeStr[256] = "hb:{"; + char hbTimeStr[256] = ""; syncHearbeatTime2Str(pNode, hbTimeStr, sizeof(hbTimeStr)); char eventLog[512]; // {0}; @@ -181,28 +207,28 @@ void syncPrintNodeLog(const char* flags, ELogLevel level, int32_t dflag, SSyncNo // restore error code terrno = errCode; - + if (pNode != NULL) { taosPrintLog(flags, level, dflag, "vgId:%d, %s, sync:%s, term:%" PRIu64 ", commit-index:%" PRId64 ", first-ver:%" PRId64 ", last-ver:%" PRId64 ", min:%" PRId64 ", snap:%" PRId64 ", snap-term:%" PRIu64 - ", elect-times:%d, as-leader-times:%d, cfg-ch-times:%d, hit:%d, mis:%d, hb-slow:%d, hbr-slow:%d, " + ", elect-times:%d, as-leader-times:%d, cfg-ch-times:%d, hb-slow:%d, hbr-slow:%d, " "aq-items:%d, snaping:%" PRId64 ", replicas:%d, last-cfg:%" PRId64 - ", chging:%d, restore:%d, quorum:%d, elect-lc-timer:%" PRId64 ", hb:%" PRId64 ", %s, %s, %s, %s", + ", chging:%d, restore:%d, quorum:%d, elect-lc-timer:%" PRId64 ", hb:%" PRId64 + ", buffer:%s, repl-mgrs:%s, members:%s, hb:%s, hb-reply:%s", pNode->vgId, eventLog, syncStr(pNode->state), currentTerm, pNode->commitIndex, logBeginIndex, logLastIndex, pNode->minMatchIndex, snapshot.lastApplyIndex, snapshot.lastApplyTerm, pNode->electNum, - pNode->becomeLeaderNum, pNode->configChangeNum, cacheHit, cacheMiss, pNode->hbSlowNum, - pNode->hbrSlowNum, aqItems, pNode->snapshottingIndex, pNode->replicaNum, - pNode->raftCfg.lastConfigIndex, pNode->changing, pNode->restoreFinish, syncNodeDynamicQuorum(pNode), - pNode->electTimerLogicClock, pNode->heartbeatTimerLogicClockUser, peerStr, cfgStr, hbTimeStr, - hbrTimeStr); + pNode->becomeLeaderNum, pNode->configChangeNum, pNode->hbSlowNum, pNode->hbrSlowNum, aqItems, + pNode->snapshottingIndex, pNode->replicaNum, pNode->raftCfg.lastConfigIndex, pNode->changing, + pNode->restoreFinish, syncNodeDynamicQuorum(pNode), pNode->electTimerLogicClock, pNode->heartbeatTimerLogicClockUser, + bufferStatesStr, replMgrStatesStr, cfgStr, hbTimeStr, hbrTimeStr); } } void syncPrintSnapshotSenderLog(const char* flags, ELogLevel level, int32_t dflag, SSyncSnapshotSender* pSender, const char* format, ...) { SSyncNode* pNode = pSender->pSyncNode; - if (pNode == NULL || pNode->pRaftStore == NULL || pNode->pLogStore == NULL) return; + if (pNode == NULL || pNode->pLogStore == NULL) return; SSnapshot snapshot = {.data = NULL, .lastApplyIndex = -1, .lastApplyTerm = 0}; if (pNode->pFsm != NULL && pNode->pFsm->FpGetSnapshotInfo != NULL) { @@ -216,7 +242,7 @@ void syncPrintSnapshotSenderLog(const char* flags, ELogLevel level, int32_t dfla logBeginIndex = pNode->pLogStore->syncLogBeginIndex(pNode->pLogStore); } - char cfgStr[1024]; + char cfgStr[1024] = ""; syncCfg2SimpleStr(&pNode->raftCfg.cfg, cfgStr, sizeof(cfgStr)); char peerStr[1024] = "{"; @@ -238,7 +264,7 @@ void syncPrintSnapshotSenderLog(const char* flags, ELogLevel level, int32_t dfla pNode->vgId, eventLog, syncStr(pNode->state), pSender, pSender->snapshotParam.start, pSender->snapshotParam.end, pSender->snapshot.lastApplyIndex, pSender->snapshot.lastApplyTerm, pSender->snapshot.lastConfigIndex, pSender->seq, pSender->ack, pSender->finish, pSender->replicaIndex, - DID(&pNode->replicasId[pSender->replicaIndex]), pNode->pRaftStore->currentTerm, pNode->commitIndex, + DID(&pNode->replicasId[pSender->replicaIndex]), pNode->raftStore.currentTerm, pNode->commitIndex, logBeginIndex, logLastIndex, pNode->minMatchIndex, snapshot.lastApplyIndex, snapshot.lastApplyTerm, pNode->raftCfg.isStandBy, pNode->raftCfg.snapshotStrategy, pNode->raftCfg.batchSize, pNode->replicaNum, pNode->raftCfg.lastConfigIndex, pNode->changing, pNode->restoreFinish, syncNodeDynamicQuorum(pNode), @@ -248,7 +274,7 @@ void syncPrintSnapshotSenderLog(const char* flags, ELogLevel level, int32_t dfla void syncPrintSnapshotReceiverLog(const char* flags, ELogLevel level, int32_t dflag, SSyncSnapshotReceiver* pReceiver, const char* format, ...) { SSyncNode* pNode = pReceiver->pSyncNode; - if (pNode == NULL || pNode->pRaftStore == NULL || pNode->pLogStore == NULL) return; + if (pNode == NULL || pNode->pLogStore == NULL) return; SSnapshot snapshot = {.data = NULL, .lastApplyIndex = -1, .lastApplyTerm = 0}; if (pNode->pFsm != NULL && pNode->pFsm->FpGetSnapshotInfo != NULL) { @@ -262,7 +288,7 @@ void syncPrintSnapshotReceiverLog(const char* flags, ELogLevel level, int32_t df logBeginIndex = pNode->pLogStore->syncLogBeginIndex(pNode->pLogStore); } - char cfgStr[1024]; + char cfgStr[1024] = ""; syncCfg2SimpleStr(&pNode->raftCfg.cfg, cfgStr, sizeof(cfgStr)); char peerStr[1024] = "{"; @@ -285,7 +311,7 @@ void syncPrintSnapshotReceiverLog(const char* flags, ELogLevel level, int32_t df pNode->vgId, eventLog, syncStr(pNode->state), pReceiver, pReceiver->start, pReceiver->ack, pReceiver->term, pReceiver->startTime, DID(&pReceiver->fromId), pReceiver->snapshotParam.start, pReceiver->snapshotParam.end, pReceiver->snapshot.lastApplyIndex, pReceiver->snapshot.lastApplyTerm, - pReceiver->snapshot.lastConfigIndex, pNode->pRaftStore->currentTerm, pNode->commitIndex, logBeginIndex, + pReceiver->snapshot.lastConfigIndex, pNode->raftStore.currentTerm, pNode->commitIndex, logBeginIndex, logLastIndex, pNode->minMatchIndex, snapshot.lastApplyIndex, snapshot.lastApplyTerm, pNode->raftCfg.isStandBy, pNode->raftCfg.snapshotStrategy, pNode->raftCfg.batchSize, pNode->replicaNum, pNode->raftCfg.lastConfigIndex, pNode->changing, pNode->restoreFinish, syncNodeDynamicQuorum(pNode), @@ -304,7 +330,7 @@ void syncLogRecvTimer(SSyncNode* pSyncNode, const SyncTimeout* pMsg, const char* void syncLogRecvLocalCmd(SSyncNode* pSyncNode, const SyncLocalCmd* pMsg, const char* s) { sNTrace(pSyncNode, "recv sync-local-cmd {cmd:%d-%s, sd-new-term:%" PRId64 ", fc-index:%" PRId64 "}, %s", pMsg->cmd, - syncLocalCmdGetStr(pMsg->cmd), pMsg->sdNewTerm, pMsg->fcIndex, s); + syncLocalCmdGetStr(pMsg->cmd), pMsg->currentTerm, pMsg->commitIndex, s); } void syncLogSendAppendEntriesReply(SSyncNode* pSyncNode, const SyncAppendEntriesReply* pMsg, const char* s) { diff --git a/source/libs/sync/test/syncLocalCmdTest.cpp b/source/libs/sync/test/syncLocalCmdTest.cpp index 8003cce7cc7e386223d5fc9116acc1f9cdec229c..2c839d0acb7ab6301fca49222ef37456f2b11099 100644 --- a/source/libs/sync/test/syncLocalCmdTest.cpp +++ b/source/libs/sync/test/syncLocalCmdTest.cpp @@ -16,8 +16,8 @@ SyncLocalCmd *createMsg() { pMsg->srcId.vgId = 100; pMsg->destId.addr = syncUtilAddr2U64("127.0.0.1", 5678); pMsg->destId.vgId = 100; - pMsg->sdNewTerm = 123; - pMsg->fcIndex = 456; + // pMsg->sdNewTerm = 123; + // pMsg->fcIndex = 456; pMsg->cmd = SYNC_LOCAL_CMD_STEP_DOWN; return pMsg; diff --git a/source/libs/sync/test/syncRaftStoreTest.cpp b/source/libs/sync/test/syncRaftStoreTest.cpp index 87798a7d808db32f176901468cf8c7dc3a6dd044..a8022184ef6885e9fb3c8e17e032bce94587b46f 100644 --- a/source/libs/sync/test/syncRaftStoreTest.cpp +++ b/source/libs/sync/test/syncRaftStoreTest.cpp @@ -33,35 +33,35 @@ int main() { initRaftId(); - SRaftStore* pRaftStore = raftStoreOpen("./test_raft_store.json"); - assert(pRaftStore != NULL); - raftStoreLog2((char*)"==raftStoreOpen==", pRaftStore); + // SRaftStore* pRaftStore = raftStoreOpen("./test_raft_store.json"); + // assert(pRaftStore != NULL); + // raftStoreLog2((char*)"==raftStoreOpen==", pRaftStore); - raftStoreSetTerm(pRaftStore, 100); - raftStoreLog2((char*)"==raftStoreSetTerm==", pRaftStore); + // raftStoreSetTerm(pRaftStore, 100); + // raftStoreLog2((char*)"==raftStoreSetTerm==", pRaftStore); - raftStoreVote(pRaftStore, &ids[0]); - raftStoreLog2((char*)"==raftStoreVote==", pRaftStore); + // raftStoreVote(pRaftStore, &ids[0]); + // raftStoreLog2((char*)"==raftStoreVote==", pRaftStore); - raftStoreClearVote(pRaftStore); - raftStoreLog2((char*)"==raftStoreClearVote==", pRaftStore); + // raftStoreClearVote(pRaftStore); + // raftStoreLog2((char*)"==raftStoreClearVote==", pRaftStore); - raftStoreVote(pRaftStore, &ids[1]); - raftStoreLog2((char*)"==raftStoreVote==", pRaftStore); + // raftStoreVote(pRaftStore, &ids[1]); + // raftStoreLog2((char*)"==raftStoreVote==", pRaftStore); - raftStoreNextTerm(pRaftStore); - raftStoreLog2((char*)"==raftStoreNextTerm==", pRaftStore); + // raftStoreNextTerm(pRaftStore); + // raftStoreLog2((char*)"==raftStoreNextTerm==", pRaftStore); - raftStoreNextTerm(pRaftStore); - raftStoreLog2((char*)"==raftStoreNextTerm==", pRaftStore); + // raftStoreNextTerm(pRaftStore); + // raftStoreLog2((char*)"==raftStoreNextTerm==", pRaftStore); - raftStoreNextTerm(pRaftStore); - raftStoreLog2((char*)"==raftStoreNextTerm==", pRaftStore); + // raftStoreNextTerm(pRaftStore); + // raftStoreLog2((char*)"==raftStoreNextTerm==", pRaftStore); - raftStoreNextTerm(pRaftStore); - raftStoreLog2((char*)"==raftStoreNextTerm==", pRaftStore); + // raftStoreNextTerm(pRaftStore); + // raftStoreLog2((char*)"==raftStoreNextTerm==", pRaftStore); - raftStoreClose(pRaftStore); + // raftStoreClose(pRaftStore); return 0; } diff --git a/source/libs/sync/test/syncSnapshotReceiverTest.cpp b/source/libs/sync/test/syncSnapshotReceiverTest.cpp index 49b06a7d1bfca855d661e1c332f951b8c0b7f657..1fca04a1adf866460c1c089d2b6d554c7bc46ecc 100644 --- a/source/libs/sync/test/syncSnapshotReceiverTest.cpp +++ b/source/libs/sync/test/syncSnapshotReceiverTest.cpp @@ -29,7 +29,7 @@ int32_t SnapshotDoWrite(struct SSyncFSM* pFsm, void* pWriter, void* pBuf, int32_ SSyncSnapshotReceiver* createReceiver() { SSyncNode* pSyncNode = (SSyncNode*)taosMemoryMalloc(sizeof(*pSyncNode)); - pSyncNode->pRaftStore = (SRaftStore*)taosMemoryMalloc(sizeof(*(pSyncNode->pRaftStore))); + // pSyncNode->pRaftStore = (SRaftStore*)taosMemoryMalloc(sizeof(*(pSyncNode->pRaftStore))); pSyncNode->pFsm = (SSyncFSM*)taosMemoryMalloc(sizeof(*(pSyncNode->pFsm))); #if 0 diff --git a/source/libs/sync/test/syncSnapshotSenderTest.cpp b/source/libs/sync/test/syncSnapshotSenderTest.cpp index bb697d541acbbec35289e723bb351c7117a3f3e6..a1768c2ce50302af7a592f5f8fee5174b550f8fc 100644 --- a/source/libs/sync/test/syncSnapshotSenderTest.cpp +++ b/source/libs/sync/test/syncSnapshotSenderTest.cpp @@ -29,7 +29,7 @@ int32_t SnapshotDoWrite(struct SSyncFSM* pFsm, void* pWriter, void* pBuf, int32_ SSyncSnapshotSender* createSender() { SSyncNode* pSyncNode = (SSyncNode*)taosMemoryMalloc(sizeof(*pSyncNode)); - pSyncNode->pRaftStore = (SRaftStore*)taosMemoryMalloc(sizeof(*(pSyncNode->pRaftStore))); + // pSyncNode->pRaftStore = (SRaftStore*)taosMemoryMalloc(sizeof(*(pSyncNode->pRaftStore))); pSyncNode->pFsm = (SSyncFSM*)taosMemoryMalloc(sizeof(*(pSyncNode->pFsm))); #if 0 diff --git a/source/libs/sync/test/sync_test_lib/src/syncMainDebug.c b/source/libs/sync/test/sync_test_lib/src/syncMainDebug.c index f1db2f0204faedcac7ad5599a9c376142a2dc2c5..1dbf4fb4fb70333d6a0cf0cb0a61138e58877504 100644 --- a/source/libs/sync/test/sync_test_lib/src/syncMainDebug.c +++ b/source/libs/sync/test/sync_test_lib/src/syncMainDebug.c @@ -80,7 +80,7 @@ cJSON* syncNode2Json(const SSyncNode* pSyncNode) { // tla+ server vars cJSON_AddNumberToObject(pRoot, "state", pSyncNode->state); cJSON_AddStringToObject(pRoot, "state_str", syncStr(pSyncNode->state)); - cJSON_AddItemToObject(pRoot, "pRaftStore", raftStore2Json(pSyncNode->pRaftStore)); + // cJSON_AddItemToObject(pRoot, "pRaftStore", raftStore2Json(&pSyncNode.raftStore)); // tla+ candidate vars cJSON_AddItemToObject(pRoot, "pVotesGranted", voteGranted2Json(pSyncNode->pVotesGranted)); @@ -199,7 +199,7 @@ inline char* syncNode2SimpleStr(const SSyncNode* pSyncNode) { ", sby:%d, " "r-num:%d, " "lcfg:%" PRId64 ", chging:%d, rsto:%d", - pSyncNode->vgId, syncStr(pSyncNode->state), pSyncNode->pRaftStore->currentTerm, pSyncNode->commitIndex, + pSyncNode->vgId, syncStr(pSyncNode->state), pSyncNode->raftStore.currentTerm, pSyncNode->commitIndex, logBeginIndex, logLastIndex, snapshot.lastApplyIndex, pSyncNode->raftCfg.isStandBy, pSyncNode->replicaNum, pSyncNode->raftCfg.lastConfigIndex, pSyncNode->changing, pSyncNode->restoreFinish); diff --git a/source/libs/sync/test/sync_test_lib/src/syncMessageDebug.c b/source/libs/sync/test/sync_test_lib/src/syncMessageDebug.c index ae83bf9ead0d6436bcb6375bd5ab4724799e1efa..5f011ffe6902be629d82023d28fdfb7508fc68d7 100644 --- a/source/libs/sync/test/sync_test_lib/src/syncMessageDebug.c +++ b/source/libs/sync/test/sync_test_lib/src/syncMessageDebug.c @@ -2858,11 +2858,11 @@ cJSON* syncLocalCmd2Json(const SyncLocalCmd* pMsg) { cJSON_AddNumberToObject(pRoot, "cmd", pMsg->cmd); - snprintf(u64buf, sizeof(u64buf), "%" PRIu64, pMsg->sdNewTerm); - cJSON_AddStringToObject(pRoot, "sd-new-term", u64buf); + // snprintf(u64buf, sizeof(u64buf), "%" PRIu64, pMsg->sdNewTerm); + // cJSON_AddStringToObject(pRoot, "sd-new-term", u64buf); - snprintf(u64buf, sizeof(u64buf), "%" PRId64, pMsg->fcIndex); - cJSON_AddStringToObject(pRoot, "fc-index", u64buf); + // snprintf(u64buf, sizeof(u64buf), "%" PRId64, pMsg->fcIndex); + // cJSON_AddStringToObject(pRoot, "fc-index", u64buf); } cJSON* pJson = cJSON_CreateObject(); diff --git a/source/libs/sync/test/sync_test_lib/src/syncRaftStoreDebug.c b/source/libs/sync/test/sync_test_lib/src/syncRaftStoreDebug.c index c462b3275dfaa9298aa312a5564be3148a5d3483..f6cd381e547b6c0d38782b29bafb0665d0c05379 100644 --- a/source/libs/sync/test/sync_test_lib/src/syncRaftStoreDebug.c +++ b/source/libs/sync/test/sync_test_lib/src/syncRaftStoreDebug.c @@ -41,8 +41,8 @@ cJSON *raftStore2Json(SRaftStore *pRaftStore) { cJSON_AddNumberToObject(pVoteFor, "vgId", pRaftStore->voteFor.vgId); cJSON_AddItemToObject(pRoot, "voteFor", pVoteFor); - int hasVoted = raftStoreHasVoted(pRaftStore); - cJSON_AddNumberToObject(pRoot, "hasVoted", hasVoted); + // int hasVoted = raftStoreHasVoted(pRaftStore); + // cJSON_AddNumberToObject(pRoot, "hasVoted", hasVoted); } cJSON *pJson = cJSON_CreateObject(); diff --git a/source/libs/sync/test/sync_test_lib/src/syncSnapshotDebug.c b/source/libs/sync/test/sync_test_lib/src/syncSnapshotDebug.c index f1237e528251d19501d09326782e5659abd34895..d8740de16ae46214c8c5ed2ab2e1d622c003c300 100644 --- a/source/libs/sync/test/sync_test_lib/src/syncSnapshotDebug.c +++ b/source/libs/sync/test/sync_test_lib/src/syncSnapshotDebug.c @@ -137,7 +137,7 @@ int32_t syncNodeOnPreSnapshot(SSyncNode *ths, SyncPreSnapshot *pMsg) { SyncPreSnapshotReply *pMsgReply = syncPreSnapshotReplyBuild(ths->vgId); pMsgReply->srcId = ths->myRaftId; pMsgReply->destId = pMsg->srcId; - pMsgReply->term = ths->pRaftStore->currentTerm; + pMsgReply->term = ths->raftStore.currentTerm; SSyncLogStoreData *pData = ths->pLogStore->data; SWal *pWal = pData->pWal; diff --git a/source/libs/tdb/inc/tdb.h b/source/libs/tdb/inc/tdb.h index 10a99bb1fab525a9ff4569d55c3d688bb099ee46..0e20941b3ae472986d5116616eda05af93a83dc1 100644 --- a/source/libs/tdb/inc/tdb.h +++ b/source/libs/tdb/inc/tdb.h @@ -74,7 +74,12 @@ int32_t tdbTbcUpsert(TBC *pTbc, const void *pKey, int nKey, const void *pData, i int32_t tdbTxnOpen(TXN *pTxn, int64_t txnid, void *(*xMalloc)(void *, size_t), void (*xFree)(void *, void *), void *xArg, int flags); -int32_t tdbTxnClose(TXN *pTxn); +int32_t tdbTxnCloseImpl(TXN *pTxn); +#define tdbTxnClose(pTxn) \ + do { \ + tdbTxnCloseImpl(pTxn); \ + (pTxn) = NULL; \ + } while (0) // other void tdbFree(void *); diff --git a/source/libs/tdb/src/db/tdbBtree.c b/source/libs/tdb/src/db/tdbBtree.c index 734bac5c872c457f1bf57bcf32d002220f4194d1..4cd6a39bfe00f56631abea8dfd3c935dff1e580b 100644 --- a/source/libs/tdb/src/db/tdbBtree.c +++ b/source/libs/tdb/src/db/tdbBtree.c @@ -1065,11 +1065,11 @@ static int tdbBtreeEncodePayload(SPage *pPage, SCell *pCell, int nHeader, const } else { int nLeftKey = kLen; // pack partial key and nextPgno - memcpy(pCell + nHeader, pKey, nLocal - 4); - nLeft -= nLocal - 4; - nLeftKey -= nLocal - 4; + memcpy(pCell + nHeader, pKey, nLocal - nHeader - sizeof(pgno)); + nLeft -= nLocal - nHeader - sizeof(pgno); + nLeftKey -= nLocal - nHeader - sizeof(pgno); - memcpy(pCell + nHeader + nLocal - 4, &pgno, sizeof(pgno)); + memcpy(pCell + nLocal - sizeof(pgno), &pgno, sizeof(pgno)); int lastKeyPageSpace = 0; // pack left key & val to ovpages @@ -1089,9 +1089,12 @@ static int tdbBtreeEncodePayload(SPage *pPage, SCell *pCell, int nHeader, const if (lastKeyPage) { if (lastKeyPageSpace >= vLen) { - memcpy(pBuf + kLen - nLeftKey, pVal, vLen); + if (vLen > 0) { + memcpy(pBuf + kLen - nLeftKey, pVal, vLen); + + nLeft -= vLen; + } - nLeft -= vLen; pgno = 0; } else { memcpy(pBuf + kLen - nLeftKey, pVal, lastKeyPageSpace); @@ -1113,7 +1116,7 @@ static int tdbBtreeEncodePayload(SPage *pPage, SCell *pCell, int nHeader, const } } - memcpy(pBuf + kLen - nLeft, &pgno, sizeof(pgno)); + memcpy(pBuf + bytes, &pgno, sizeof(pgno)); ret = tdbPageInsertCell(ofp, 0, pBuf, bytes + sizeof(pgno), 0); if (ret < 0) { @@ -1327,11 +1330,11 @@ static int tdbBtreeDecodePayload(SPage *pPage, const SCell *pCell, int nHeader, } TDB_CELLDECODER_SET_FREE_KEY(pDecoder); - memcpy(pDecoder->pKey, pCell + nHeader, nLocal - 4); - nLeft -= nLocal - 4; - nLeftKey -= nLocal - 4; + memcpy(pDecoder->pKey, pCell + nHeader, nLocal - nHeader - sizeof(pgno)); + nLeft -= nLocal - nHeader - sizeof(pgno); + nLeftKey -= nLocal - nHeader - sizeof(pgno); - memcpy(&pgno, pCell + nHeader + nLocal - 4, sizeof(pgno)); + memcpy(&pgno, pCell + nLocal - sizeof(pgno), sizeof(pgno)); int lastKeyPageSpace = 0; // load left key & val to ovpages @@ -1357,9 +1360,11 @@ static int tdbBtreeDecodePayload(SPage *pPage, const SCell *pCell, int nHeader, if (lastKeyPage) { if (lastKeyPageSpace >= vLen) { - pDecoder->pVal = ofpCell + kLen - nLeftKey; + if (vLen > 0) { + pDecoder->pVal = ofpCell + kLen - nLeftKey; - nLeft -= vLen; + nLeft -= vLen; + } pgno = 0; } else { // read partial val to local diff --git a/source/libs/tdb/src/db/tdbPager.c b/source/libs/tdb/src/db/tdbPager.c index 9131eb0909c2534a0955a1aa9c6c8e8c153271ca..ced867e938fc66574e70774e0e99000be2bf2fa5 100644 --- a/source/libs/tdb/src/db/tdbPager.c +++ b/source/libs/tdb/src/db/tdbPager.c @@ -731,8 +731,8 @@ static int tdbPagerInitPage(SPager *pPager, SPage *pPage, int (*initPage)(SPage int ret; int lcode; int nLoops; - i64 nRead; - SPgno pgno; + i64 nRead = 0; + SPgno pgno = 0; int init = 0; lcode = TDB_TRY_LOCK_PAGE(pPage); diff --git a/source/libs/tdb/src/db/tdbTxn.c b/source/libs/tdb/src/db/tdbTxn.c index bc23fdb75949ef97de0711a23f187f590adf14f5..0aeed3c140f8317c6abcec16a9fd049f6be89e1a 100644 --- a/source/libs/tdb/src/db/tdbTxn.c +++ b/source/libs/tdb/src/db/tdbTxn.c @@ -31,13 +31,18 @@ int tdbTxnOpen(TXN *pTxn, int64_t txnid, void *(*xMalloc)(void *, size_t), void return 0; } -int tdbTxnClose(TXN *pTxn) { +int tdbTxnCloseImpl(TXN *pTxn) { if (pTxn) { if (pTxn->jPageSet) { hashset_destroy(pTxn->jPageSet); pTxn->jPageSet = NULL; } + if (pTxn->jfd) { + tdbOsClose(pTxn->jfd); + ASSERT(pTxn->jfd == NULL); + } + tdbOsFree(pTxn); } diff --git a/source/libs/transport/inc/transComm.h b/source/libs/transport/inc/transComm.h index bf9a6c005103c76a6e8201d4833870527d107f36..5f964f6b1ab0e7145f61b25e2e51a1a1d1c86a9e 100644 --- a/source/libs/transport/inc/transComm.h +++ b/source/libs/transport/inc/transComm.h @@ -100,14 +100,7 @@ typedef void* queue[2]; #define TRANS_READ_TIMEOUT 3000 // read timeout (ms) #define TRANS_PACKET_LIMIT 1024 * 1024 * 512 -#define TRANS_MAGIC_NUM 0x5f375a86 - -#define TRANS_NOVALID_PACKET(src) ((src) != TRANS_MAGIC_NUM ? 1 : 0) - -#define TRANS_PACKET_LIMIT 1024 * 1024 * 512 - -#define TRANS_MAGIC_NUM 0x5f375a86 - +#define TRANS_MAGIC_NUM 0x5f375a86 #define TRANS_NOVALID_PACKET(src) ((src) != TRANS_MAGIC_NUM ? 1 : 0) typedef SRpcMsg STransMsg; diff --git a/source/libs/transport/src/thttp.c b/source/libs/transport/src/thttp.c index 00854b5ee596dd8af75b3bc6abe5e1f59b5aa1ed..cd508f6fe9e71ef90fc5c156685e95f5972931e3 100644 --- a/source/libs/transport/src/thttp.c +++ b/source/libs/transport/src/thttp.c @@ -35,6 +35,7 @@ typedef struct SHttpModule { typedef struct SHttpMsg { queue q; char* server; + char* uri; int32_t port; char* cont; int32_t len; @@ -63,26 +64,26 @@ static void httpHandleReq(SHttpMsg* msg); static void httpHandleQuit(SHttpMsg* msg); static int32_t httpSendQuit(); -static int32_t taosSendHttpReportImpl(const char* server, uint16_t port, char* pCont, int32_t contLen, +static int32_t taosSendHttpReportImpl(const char* server, const char* uri, uint16_t port, char* pCont, int32_t contLen, EHttpCompFlag flag); -static int32_t taosBuildHttpHeader(const char* server, int32_t contLen, char* pHead, int32_t headLen, +static int32_t taosBuildHttpHeader(const char* server, const char* uri, int32_t contLen, char* pHead, int32_t headLen, EHttpCompFlag flag) { if (flag == HTTP_FLAT) { return snprintf(pHead, headLen, - "POST /report HTTP/1.1\n" + "POST %s HTTP/1.1\n" "Host: %s\n" "Content-Type: application/json\n" "Content-Length: %d\n\n", - server, contLen); + uri, server, contLen); } else if (flag == HTTP_GZIP) { return snprintf(pHead, headLen, - "POST /report HTTP/1.1\n" + "POST %s HTTP/1.1\n" "Host: %s\n" "Content-Type: application/json\n" "Content-Encoding: gzip\n" "Content-Length: %d\n\n", - server, contLen); + uri, server, contLen); } else { terrno = TSDB_CODE_INVALID_CFG; return -1; @@ -181,6 +182,7 @@ static void httpDestroyMsg(SHttpMsg* msg) { if (msg == NULL) return; taosMemoryFree(msg->server); + taosMemoryFree(msg->uri); taosMemoryFree(msg->cont); taosMemoryFree(msg); } @@ -293,10 +295,11 @@ int32_t httpSendQuit() { return 0; } -static int32_t taosSendHttpReportImpl(const char* server, uint16_t port, char* pCont, int32_t contLen, +static int32_t taosSendHttpReportImpl(const char* server, const char* uri, uint16_t port, char* pCont, int32_t contLen, EHttpCompFlag flag) { SHttpMsg* msg = taosMemoryMalloc(sizeof(SHttpMsg)); msg->server = strdup(server); + msg->uri = strdup(uri); msg->port = port; msg->cont = taosMemoryMalloc(contLen); memcpy(msg->cont, pCont, contLen); @@ -309,12 +312,10 @@ static int32_t taosSendHttpReportImpl(const char* server, uint16_t port, char* p httpDestroyMsg(msg); tError("http-report already released"); return -1; - } else { - msg->http = load; - transAsyncSend(load->asyncPool, &(msg->q)); } - - return 0; + + msg->http = load; + return transAsyncSend(load->asyncPool, &(msg->q)); } static void httpDestroyClientCb(uv_handle_t* handle) { @@ -360,7 +361,7 @@ static void httpHandleReq(SHttpMsg* msg) { int32_t len = 2048; char* header = taosMemoryCalloc(1, len); - int32_t headLen = taosBuildHttpHeader(msg->server, msg->len, header, len, msg->flag); + int32_t headLen = taosBuildHttpHeader(msg->server, msg->uri, msg->len, header, len, msg->flag); if (headLen < 0) { taosMemoryFree(header); goto END; @@ -380,6 +381,7 @@ static void httpHandleReq(SHttpMsg* msg) { cli->port = msg->port; cli->dest = dest; + taosMemoryFree(msg->uri); taosMemoryFree(msg); uv_tcp_init(http->loop, &cli->tcp); @@ -406,9 +408,9 @@ END: httpDestroyMsg(msg); } -int32_t taosSendHttpReport(const char* server, uint16_t port, char* pCont, int32_t contLen, EHttpCompFlag flag) { +int32_t taosSendHttpReport(const char* server, const char* uri, uint16_t port, char* pCont, int32_t contLen, EHttpCompFlag flag) { taosThreadOnce(&transHttpInit, transHttpEnvInit); - return taosSendHttpReportImpl(server, port, pCont, contLen, flag); + return taosSendHttpReportImpl(server, uri, port, pCont, contLen, flag); } static void transHttpEnvInit() { diff --git a/source/libs/transport/src/tmsgcb.c b/source/libs/transport/src/tmsgcb.c index 4131619ed9b89f8c4aa7cc74c2608327f6353a0c..9b8f1dfd07eff893225d2611f158f822fb67e735 100644 --- a/source/libs/transport/src/tmsgcb.c +++ b/source/libs/transport/src/tmsgcb.c @@ -59,6 +59,12 @@ void tmsgReleaseHandle(SRpcHandleInfo* pHandle, int8_t type) { (*defaultMsgCb.re void tmsgReportStartup(const char* name, const char* desc) { (*defaultMsgCb.reportStartupFp)(name, desc); } -int32_t tmsgUpdateDnodeInfo(int32_t* dnodeId, int64_t* clusterId, char* fqdn, uint16_t* port) { +bool tmsgUpdateDnodeInfo(int32_t* dnodeId, int64_t* clusterId, char* fqdn, uint16_t* port) { return (*defaultMsgCb.updateDnodeInfoFp)(defaultMsgCb.data, dnodeId, clusterId, fqdn, port); } + +void tmsgUpdateDnodeEpSet(SEpSet* epset) { + for (int32_t i = 0; i < epset->numOfEps; ++i) { + tmsgUpdateDnodeInfo(NULL, NULL, epset->eps[i].fqdn, &epset->eps[i].port); + } +} \ No newline at end of file diff --git a/source/libs/transport/src/transCli.c b/source/libs/transport/src/transCli.c index 2632d290e956c536bdcfb3ac70d20aa3607e7382..1a99db5f992d51f5e01ba5120e2099f1d53a307f 100644 --- a/source/libs/transport/src/transCli.c +++ b/source/libs/transport/src/transCli.c @@ -1126,7 +1126,7 @@ void cliHandleReq(SCliMsg* pMsg, SCliThrd* pThrd) { int ret = uv_tcp_connect(&conn->connReq, (uv_tcp_t*)(conn->stream), (const struct sockaddr*)&addr, cliConnCb); if (ret != 0) { - tGTrace("%s conn %p failed to connect to %s:%d, reason:%s", pTransInst->label, conn, conn->ip, conn->port, + tGError("%s conn %p failed to connect to %s:%d, reason:%s", pTransInst->label, conn, conn->ip, conn->port, uv_err_name(ret)); uv_timer_stop(conn->timer); diff --git a/source/libs/wal/src/walMeta.c b/source/libs/wal/src/walMeta.c index da8cf24627a8f672e101856451210ed5faf975b9..07109883dbc580b29ad84923b2e861ac8724ff0a 100644 --- a/source/libs/wal/src/walMeta.c +++ b/source/libs/wal/src/walMeta.c @@ -289,19 +289,10 @@ void walAlignVersions(SWal* pWal) { } pWal->vers.lastVer = pWal->vers.snapshotVer; } - if (pWal->vers.commitVer < pWal->vers.snapshotVer) { - wWarn("vgId:%d, commitVer:%" PRId64 " is less than snapshotVer:%" PRId64 ". align with it.", pWal->cfg.vgId, - pWal->vers.commitVer, pWal->vers.snapshotVer); - pWal->vers.commitVer = pWal->vers.snapshotVer; - } - if (pWal->vers.appliedVer < pWal->vers.snapshotVer) { - wWarn("vgId:%d, appliedVer:%" PRId64 " is less than snapshotVer:%" PRId64 ". align with it.", pWal->cfg.vgId, - pWal->vers.appliedVer, pWal->vers.snapshotVer); - pWal->vers.appliedVer = pWal->vers.snapshotVer; - } - - pWal->vers.commitVer = TMIN(pWal->vers.lastVer, pWal->vers.commitVer); - pWal->vers.appliedVer = TMIN(pWal->vers.commitVer, pWal->vers.appliedVer); + // reset commitVer and appliedVer + pWal->vers.commitVer = pWal->vers.snapshotVer; + pWal->vers.appliedVer = pWal->vers.snapshotVer; + wInfo("vgId:%d, reset commitVer to %" PRId64, pWal->cfg.vgId, pWal->vers.commitVer); } bool walLogEntriesComplete(const SWal* pWal) { @@ -331,6 +322,35 @@ bool walLogEntriesComplete(const SWal* pWal) { return complete; } +int walTrimIdxFile(SWal* pWal, int32_t fileIdx) { + SWalFileInfo* pFileInfo = taosArrayGet(pWal->fileInfoSet, fileIdx); + ASSERT(pFileInfo != NULL); + char fnameStr[WAL_FILE_LEN]; + walBuildIdxName(pWal, pFileInfo->firstVer, fnameStr); + + int64_t fileSize = 0; + taosStatFile(fnameStr, &fileSize, NULL); + int64_t records = TMAX(0, pFileInfo->lastVer - pFileInfo->firstVer + 1); + int64_t lastEndOffset = records * sizeof(SWalIdxEntry); + + if (fileSize <= lastEndOffset) { + return 0; + } + + TdFilePtr pFile = taosOpenFile(fnameStr, TD_FILE_READ | TD_FILE_WRITE); + if (pFile == NULL) { + terrno = TAOS_SYSTEM_ERROR(errno); + return -1; + } + + wInfo("vgId:%d, trim idx file. file: %s, size: %" PRId64 ", offset: %" PRId64, pWal->cfg.vgId, fnameStr, fileSize, + lastEndOffset); + + taosFtruncateFile(pFile, lastEndOffset); + taosCloseFile(&pFile); + return 0; +} + int walCheckAndRepairMeta(SWal* pWal) { // load log files, get first/snapshot/last version info const char* logPattern = "^[0-9]+.log$"; @@ -405,6 +425,8 @@ int walCheckAndRepairMeta(SWal* pWal) { } updateMeta = true; + (void)walTrimIdxFile(pWal, fileIdx); + int64_t lastVer = walScanLogGetLastVer(pWal, fileIdx); if (lastVer < 0) { if (terrno != TSDB_CODE_WAL_LOG_NOT_EXIST) { @@ -567,6 +589,7 @@ int walCheckAndRepairIdxFile(SWal* pWal, int32_t fileIdx) { goto _err; } + int64_t count = 0; while (idxEntry.ver < pFileInfo->lastVer) { /*A(idxEntry.ver == ckHead.head.version);*/ @@ -578,11 +601,11 @@ int walCheckAndRepairIdxFile(SWal* pWal, int32_t fileIdx) { idxEntry.offset, fLogNameStr); goto _err; } - wWarn("vgId:%d, wal idx append new entry %" PRId64 " %" PRId64, pWal->cfg.vgId, idxEntry.ver, idxEntry.offset); if (taosWriteFile(pIdxFile, &idxEntry, sizeof(SWalIdxEntry)) < 0) { wError("vgId:%d, failed to append file since %s. file:%s", pWal->cfg.vgId, terrstr(), fnameStr); goto _err; } + count++; } if (taosFsyncFile(pIdxFile) < 0) { @@ -590,6 +613,11 @@ int walCheckAndRepairIdxFile(SWal* pWal, int32_t fileIdx) { goto _err; } + if (count > 0) { + wInfo("vgId:%d, rebuilt %" PRId64 " wal idx entries until lastVer: %" PRId64, pWal->cfg.vgId, count, + pFileInfo->lastVer); + } + (void)taosCloseFile(&pLogFile); (void)taosCloseFile(&pIdxFile); return 0; diff --git a/source/libs/wal/src/walRef.c b/source/libs/wal/src/walRef.c index fa04ba3e58a05c3ee902bde8ace004e821683f37..d3c03c335b2e570b18d78321bd3c2690e1756251 100644 --- a/source/libs/wal/src/walRef.c +++ b/source/libs/wal/src/walRef.c @@ -77,6 +77,31 @@ void walUnrefVer(SWalRef *pRef) { } #endif +SWalRef *walRefFirstVer(SWal *pWal, SWalRef *pRef) { + if (pRef == NULL) { + pRef = walOpenRef(pWal); + if (pRef == NULL) { + return NULL; + } + } + taosThreadMutexLock(&pWal->mutex); + + int64_t ver = walGetFirstVer(pWal); + + wDebug("vgId:%d, wal ref version %" PRId64 " for first", pWal->cfg.vgId, ver); + + pRef->refVer = ver; + // bsearch in fileSet + SWalFileInfo tmpInfo; + tmpInfo.firstVer = ver; + SWalFileInfo *pRet = taosArraySearch(pWal->fileInfoSet, &tmpInfo, compareWalFileInfo, TD_LE); + ASSERT(pRet != NULL); + pRef->refFile = pRet->firstVer; + + taosThreadMutexUnlock(&pWal->mutex); + return pRef; +} + SWalRef *walRefCommittedVer(SWal *pWal) { SWalRef *pRef = walOpenRef(pWal); if (pRef == NULL) { @@ -87,6 +112,8 @@ SWalRef *walRefCommittedVer(SWal *pWal) { int64_t ver = walGetCommittedVer(pWal); + wDebug("vgId:%d, wal ref version %" PRId64 " for committed", pWal->cfg.vgId, ver); + pRef->refVer = ver; // bsearch in fileSet SWalFileInfo tmpInfo; diff --git a/source/libs/wal/src/walWrite.c b/source/libs/wal/src/walWrite.c index 4233c089a4753f2adf3c1ec0418e3262914610df..0562dc10cee9b4aef88c76a2257cb728207b6d14 100644 --- a/source/libs/wal/src/walWrite.c +++ b/source/libs/wal/src/walWrite.c @@ -103,7 +103,7 @@ int32_t walRollback(SWal *pWal, int64_t ver) { wInfo("vgId:%d, wal rollback for version %" PRId64, pWal->cfg.vgId, ver); int64_t code; char fnameStr[WAL_FILE_LEN]; - if (ver > pWal->vers.lastVer || ver < pWal->vers.commitVer || ver <= pWal->vers.snapshotVer) { + if (ver > pWal->vers.lastVer || ver <= pWal->vers.commitVer || ver <= pWal->vers.snapshotVer) { terrno = TSDB_CODE_WAL_INVALID_VER; taosThreadMutexUnlock(&pWal->mutex); return -1; @@ -613,16 +613,13 @@ int32_t walWrite(SWal *pWal, int64_t index, tmsg_t msgType, const void *body, in } void walFsync(SWal *pWal, bool forceFsync) { + taosThreadMutexLock(&pWal->mutex); if (forceFsync || (pWal->cfg.level == TAOS_WAL_FSYNC && pWal->cfg.fsyncPeriod == 0)) { - wTrace("vgId:%d, fileId:%" PRId64 ".idx, do fsync", pWal->cfg.vgId, walGetCurFileFirstVer(pWal)); - if (taosFsyncFile(pWal->pIdxFile) < 0) { - wError("vgId:%d, file:%" PRId64 ".idx, fsync failed since %s", pWal->cfg.vgId, walGetCurFileFirstVer(pWal), - strerror(errno)); - } wTrace("vgId:%d, fileId:%" PRId64 ".log, do fsync", pWal->cfg.vgId, walGetCurFileFirstVer(pWal)); if (taosFsyncFile(pWal->pLogFile) < 0) { wError("vgId:%d, file:%" PRId64 ".log, fsync failed since %s", pWal->cfg.vgId, walGetCurFileFirstVer(pWal), strerror(errno)); } } + taosThreadMutexUnlock(&pWal->mutex); } diff --git a/source/os/CMakeLists.txt b/source/os/CMakeLists.txt index 3aac5e97751295be13956283c2e8dae46f4c453e..2a4fcbcf7690f1b141062f6566e45199f23715b9 100644 --- a/source/os/CMakeLists.txt +++ b/source/os/CMakeLists.txt @@ -41,7 +41,7 @@ target_link_libraries( ) if(TD_WINDOWS) target_link_libraries( - os PUBLIC ws2_32 iconv msvcregex wcwidth winmm crashdump dbghelp + os PUBLIC ws2_32 iconv msvcregex wcwidth winmm crashdump dbghelp version ) elseif(TD_DARWIN_64) find_library(CORE_FOUNDATION_FRAMEWORK CoreFoundation) diff --git a/source/os/src/osFile.c b/source/os/src/osFile.c index ad005eea741caa58dd173e3a82f01b797ba705f2..71b3125de8ff42f4a3ea1ef19690c6053a00e707 100644 --- a/source/os/src/osFile.c +++ b/source/os/src/osFile.c @@ -191,7 +191,7 @@ int32_t taosStatFile(const char *path, int64_t *size, int32_t *mtime) { int32_t code = _stati64(path, &fileStat); #else struct stat fileStat; - int32_t code = stat(path, &fileStat); + int32_t code = stat(path, &fileStat); #endif if (code < 0) { return code; @@ -232,7 +232,7 @@ int32_t taosDevInoFile(TdFilePtr pFile, int64_t *stDev, int64_t *stIno) { #else struct stat fileStat; - int32_t code = fstat(pFile->fd, &fileStat); + int32_t code = fstat(pFile->fd, &fileStat); if (code < 0) { printf("taosFStatFile run fstat fail."); return code; @@ -368,6 +368,9 @@ int64_t taosReadFile(TdFilePtr pFile, void *buf, int64_t count) { #endif ASSERT(pFile->fd >= 0); // Please check if you have closed the file. if (pFile->fd < 0) { +#if FILE_WITH_LOCK + taosThreadRwlockUnlock(&(pFile->rwlock)); +#endif return -1; } int64_t leftbytes = count; @@ -415,6 +418,9 @@ int64_t taosPReadFile(TdFilePtr pFile, void *buf, int64_t count, int64_t offset) #endif ASSERT(pFile->fd >= 0); // Please check if you have closed the file. if (pFile->fd < 0) { +#if FILE_WITH_LOCK + taosThreadRwlockUnlock(&(pFile->rwlock)); +#endif return -1; } #ifdef WINDOWS @@ -439,6 +445,9 @@ int64_t taosWriteFile(TdFilePtr pFile, const void *buf, int64_t count) { taosThreadRwlockWrlock(&(pFile->rwlock)); #endif if (pFile->fd < 0) { +#if FILE_WITH_LOCK + taosThreadRwlockUnlock(&(pFile->rwlock)); +#endif return 0; } @@ -476,6 +485,9 @@ int64_t taosPWriteFile(TdFilePtr pFile, const void *buf, int64_t count, int64_t #endif ASSERT(pFile->fd >= 0); // Please check if you have closed the file. if (pFile->fd < 0) { +#if FILE_WITH_LOCK + taosThreadRwlockUnlock(&(pFile->rwlock)); +#endif return 0; } #ifdef WINDOWS @@ -543,7 +555,7 @@ int32_t taosFStatFile(TdFilePtr pFile, int64_t *size, int32_t *mtime) { int32_t taosLockFile(TdFilePtr pFile) { ASSERT(pFile->fd >= 0); // Please check if you have closed the file. - if(pFile->fd < 0) { + if (pFile->fd < 0) { return -1; } #ifdef WINDOWS @@ -570,13 +582,13 @@ int32_t taosLockFile(TdFilePtr pFile) { int32_t taosUnLockFile(TdFilePtr pFile) { ASSERT(pFile->fd >= 0); - if(pFile->fd < 0) { + if (pFile->fd < 0) { return 0; } #ifdef WINDOWS - BOOL fSuccess = FALSE; - OVERLAPPED overlapped = {0}; - HANDLE hFile = (HANDLE)_get_osfhandle(pFile->fd); + BOOL fSuccess = FALSE; + OVERLAPPED overlapped = {0}; + HANDLE hFile = (HANDLE)_get_osfhandle(pFile->fd); fSuccess = UnlockFileEx(hFile, 0, ~0, ~0, &overlapped); if (!fSuccess) { @@ -592,7 +604,7 @@ int32_t taosFtruncateFile(TdFilePtr pFile, int64_t l_size) { if (pFile == NULL) { return 0; } - if(pFile->fd < 0) { + if (pFile->fd < 0) { printf("Ftruncate file error, fd arg was negative\n"); return -1; } @@ -669,7 +681,7 @@ int64_t taosFSendFile(TdFilePtr pFileOut, TdFilePtr pFileIn, int64_t *offset, in return 0; } ASSERT(pFileIn->fd >= 0 && pFileOut->fd >= 0); - if(pFileIn->fd < 0 || pFileOut->fd < 0) { + if (pFileIn->fd < 0 || pFileOut->fd < 0) { return 0; } @@ -829,7 +841,7 @@ int32_t taosEOFFile(TdFilePtr pFile) { return -1; } ASSERT(pFile->fp != NULL); - if(pFile->fp == NULL) { + if (pFile->fp == NULL) { return -1; } diff --git a/source/os/src/osLocale.c b/source/os/src/osLocale.c index 7319181a777cb8140396f592009507a151d2620b..b4a2845e961aa8a4c8d993304f869f8c66f3e9fb 100644 --- a/source/os/src/osLocale.c +++ b/source/os/src/osLocale.c @@ -71,7 +71,7 @@ char *taosCharsetReplace(char *charsetstr) { * seems does not response as expected. * * In some Linux systems, setLocale(LC_CTYPE, "") may return NULL, in which case the launch of - * both the TDengine Server and the Client may be interrupted. + * both the Server and the Client may be interrupted. * * In case that the setLocale failed to be executed, the right charset needs to be set. */ diff --git a/source/os/src/osMath.c b/source/os/src/osMath.c index dddadd5ff6c187621a1b37efb24882ef84448cb9..3b42c141dfe661ebc121eaa25083e3095fdae1ed 100644 --- a/source/os/src/osMath.c +++ b/source/os/src/osMath.c @@ -15,8 +15,8 @@ #define ALLOW_FORBID_FUNC #define _DEFAULT_SOURCE -#include "os.h" #include +#include "os.h" #ifdef WINDOWS void swapStr(char* j, char* J, int width) { @@ -32,7 +32,17 @@ void swapStr(char* j, char* J, int width) { } #endif +int32_t qsortHelper(const void* p1, const void* p2, const void* param) { + __compar_fn_t comparFn = param; + return comparFn(p1, p2); +} + // todo refactor: 1) move away; 2) use merge sort instead; 3) qsort is not a stable sort actually. -void taosSort(void* arr, int64_t sz, int64_t width, __compar_fn_t compar) { - qsort(arr, sz, width, compar); +void taosSort(void* base, int64_t sz, int64_t width, __compar_fn_t compar) { +#ifdef _ALPINE + void* param = compar; + taosqsort(base, width, sz, param, qsortHelper); +#else + qsort(base, sz, width, compar); +#endif } diff --git a/source/os/src/osString.c b/source/os/src/osString.c index 25af8f041ed24bf3b870ce7ab30d0d518f751764..b9f537ad34a15c89a003390c2173f70b227beb2b 100644 --- a/source/os/src/osString.c +++ b/source/os/src/osString.c @@ -217,7 +217,7 @@ void taosReleaseConv(int32_t idx, iconv_t conv, ConvType type) { bool taosMbsToUcs4(const char *mbs, size_t mbsLength, TdUcs4 *ucs4, int32_t ucs4_max_len, int32_t *len) { #ifdef DISALLOW_NCHAR_WITHOUT_ICONV - printf("Nchar cannot be read and written without iconv, please install iconv library and recompile TDengine.\n"); + printf("Nchar cannot be read and written without iconv, please install iconv library and recompile.\n"); return -1; #else memset(ucs4, 0, ucs4_max_len); @@ -245,7 +245,7 @@ bool taosMbsToUcs4(const char *mbs, size_t mbsLength, TdUcs4 *ucs4, int32_t ucs4 int32_t taosUcs4ToMbs(TdUcs4 *ucs4, int32_t ucs4_max_len, char *mbs) { #ifdef DISALLOW_NCHAR_WITHOUT_ICONV - printf("Nchar cannot be read and written without iconv, please install iconv library and recompile TDengine.\n"); + printf("Nchar cannot be read and written without iconv, please install iconv library and recompile.\n"); return -1; #else @@ -263,7 +263,7 @@ int32_t taosUcs4ToMbs(TdUcs4 *ucs4, int32_t ucs4_max_len, char *mbs) { } bool taosValidateEncodec(const char *encodec) { #ifdef DISALLOW_NCHAR_WITHOUT_ICONV - printf("Nchar cannot be read and written without iconv, please install iconv library and recompile TDengine.\n"); + printf("Nchar cannot be read and written without iconv, please install iconv library and recompile.\n"); return true; #else iconv_t cd = iconv_open(encodec, DEFAULT_UNICODE_ENCODEC); diff --git a/source/os/src/osSysinfo.c b/source/os/src/osSysinfo.c index 35e76f0e67779189ecec915dee0231b5afe63bf4..b5c6edc829568b4fcc6bed96415ba7d630fbb084 100644 --- a/source/os/src/osSysinfo.c +++ b/source/os/src/osSysinfo.c @@ -18,6 +18,7 @@ #include "taoserror.h" #define PROCESS_ITEM 12 +#define UUIDLEN37 37 typedef struct { uint64_t user; @@ -279,11 +280,46 @@ int32_t taosGetEmail(char *email, int32_t maxLen) { #endif } +#ifdef WINDOWS +bool getWinVersionReleaseName(char *releaseName, int32_t maxLen) { + TCHAR szFileName[MAX_PATH]; + DWORD dwHandle; + DWORD dwLen; + LPVOID lpData; + UINT uLen; + VS_FIXEDFILEINFO *pFileInfo; + + GetWindowsDirectory(szFileName, MAX_PATH); + wsprintf(szFileName, L"%s%s", szFileName, L"\\explorer.exe"); + dwLen = GetFileVersionInfoSize(szFileName, &dwHandle); + if (dwLen == 0) { + return false; + } + + lpData = malloc(dwLen); + if (lpData == NULL) return false; + if (!GetFileVersionInfo(szFileName, dwHandle, dwLen, lpData)) { + free(lpData); + return false; + } + if (!VerQueryValue(lpData, L"\\", (LPVOID *)&pFileInfo, &uLen)) { + free(lpData); + return false; + } + + snprintf(releaseName, maxLen, "Windows %d.%d", HIWORD(pFileInfo->dwProductVersionMS), + LOWORD(pFileInfo->dwProductVersionMS)); + free(lpData); + return true; +} +#endif int32_t taosGetOsReleaseName(char *releaseName, int32_t maxLen) { #ifdef WINDOWS - snprintf(releaseName, maxLen, "Windows"); + if (!getWinVersionReleaseName(releaseName, maxLen)) { + snprintf(releaseName, maxLen, "Windows"); + } return 0; #elif defined(_TD_DARWIN_64) char osversion[32]; @@ -834,11 +870,16 @@ int32_t taosGetSystemUUID(char *uid, int32_t uidlen) { return 0; #elif defined(_TD_DARWIN_64) uuid_t uuid = {0}; - char buf[37] = {0}; + char buf[UUIDLEN37]; + memset(buf, 0, UUIDLEN37); uuid_generate(uuid); // it's caller's responsibility to make enough space for `uid`, that's 36-char + 1-null uuid_unparse_lower(uuid, buf); - memcpy(uid, buf, uidlen); + int n = snprintf(uid, uidlen, "%.*s", (int)sizeof(buf), buf); // though less performance, much safer + if (n >= uidlen) { + // target buffer is too small + return -1; + } return 0; #else int len = 0; diff --git a/source/os/src/osTime.c b/source/os/src/osTime.c index cd4324a5928e11497d3f98276a8cff800e369395..685693a709f4dd14efc3ffd5f277946a3293b38a 100644 --- a/source/os/src/osTime.c +++ b/source/os/src/osTime.c @@ -33,6 +33,11 @@ #include //#define TM_YEAR_BASE 1970 //origin #define TM_YEAR_BASE 1900 // slguan + +// This magic number is the number of 100 nanosecond intervals since January 1, 1601 (UTC) +// until 00:00:00 January 1, 1970 +static const uint64_t TIMEEPOCH = ((uint64_t)116444736000000000ULL); + /* * We do not implement alternate representations. However, we always * check whether a given modifier is allowed for a certain conversion. @@ -341,15 +346,17 @@ char *taosStrpTime(const char *buf, const char *fmt, struct tm *tm) { int32_t taosGetTimeOfDay(struct timeval *tv) { #ifdef WINDOWS - time_t t; - t = taosGetTimestampSec(); - SYSTEMTIME st; - GetLocalTime(&st); + LARGE_INTEGER t; + FILETIME f; - tv->tv_sec = (long)t; - tv->tv_usec = st.wMilliseconds * 1000; + GetSystemTimeAsFileTime(&f); + t.QuadPart = f.dwHighDateTime; + t.QuadPart <<= 32; + t.QuadPart |= f.dwLowDateTime; - return 0; + t.QuadPart -= TIMEEPOCH; + tv->tv_sec = t.QuadPart / 10000000; + tv->tv_usec = (t.QuadPart % 10000000) / 10; #else return gettimeofday(tv, NULL); #endif @@ -550,37 +557,13 @@ int32_t taosClockGetTime(int clock_id, struct timespec *pTS) { #ifdef WINDOWS LARGE_INTEGER t; FILETIME f; - static FILETIME ff; - static SYSTEMTIME ss; - static LARGE_INTEGER offset; - - static int8_t offsetInit = 0; - static volatile bool offsetInitFinished = false; - int8_t old = atomic_val_compare_exchange_8(&offsetInit, 0, 1); - if (0 == old) { - ss.wYear = 1970; - ss.wMonth = 1; - ss.wDay = 1; - ss.wHour = 0; - ss.wMinute = 0; - ss.wSecond = 0; - ss.wMilliseconds = 0; - SystemTimeToFileTime(&ss, &ff); - offset.QuadPart = ff.dwHighDateTime; - offset.QuadPart <<= 32; - offset.QuadPart |= ff.dwLowDateTime; - offsetInitFinished = true; - } else { - while (!offsetInitFinished) - ; // Ensure initialization is completed. - } GetSystemTimeAsFileTime(&f); t.QuadPart = f.dwHighDateTime; t.QuadPart <<= 32; t.QuadPart |= f.dwLowDateTime; - t.QuadPart -= offset.QuadPart; + t.QuadPart -= TIMEEPOCH; pTS->tv_sec = t.QuadPart / 10000000; pTS->tv_nsec = (t.QuadPart % 10000000) * 100; return (0); diff --git a/source/os/test/osTests.cpp b/source/os/test/osTests.cpp index f831f457f9e7215d6d80eed3e33cf2ebc9346f00..1d6542e78cd987e5228c94609cf0dd865a6d6396 100644 --- a/source/os/test/osTests.cpp +++ b/source/os/test/osTests.cpp @@ -33,7 +33,13 @@ TEST(osTest, osSystem) { const char *flags = "UTL FATAL "; ELogLevel level = DEBUG_FATAL; int32_t dflag = 255; // tsLogEmbedded ? 255 : uDebugFlag - taosPrintTrace(flags, level, dflag); + taosPrintTrace(flags, level, dflag, 0); + + const int sysLen = 64; + char osSysName[sysLen]; + int ret = taosGetOsReleaseName(osSysName, sysLen); + printf("os systeme name:%s\n", osSysName); + ASSERT_EQ(ret, 0); } void fileOperateOnFree(void *param) { diff --git a/source/util/src/talgo.c b/source/util/src/talgo.c index d9319485b7c3bbed717c054f6d63f91ca2220063..a06aac6afe3a64dcb9e53a7580c797f4d90a06a9 100644 --- a/source/util/src/talgo.c +++ b/source/util/src/talgo.c @@ -41,12 +41,6 @@ static void median(void *src, int64_t size, int64_t s, int64_t e, const void *pa ASSERT(comparFn(elePtrAt(src, size, mid), elePtrAt(src, size, s), param) <= 0 && comparFn(elePtrAt(src, size, s), elePtrAt(src, size, e), param) <= 0); - -#ifdef _DEBUG_VIEW -// tTagsPrints(src[s], pOrderDesc->pColumnModel, &pOrderDesc->orderIdx); -// tTagsPrints(src[mid], pOrderDesc->pColumnModel, &pOrderDesc->orderIdx); -// tTagsPrints(src[e], pOrderDesc->pColumnModel, &pOrderDesc->orderIdx); -#endif } static void tInsertSort(void *src, int64_t size, int32_t s, int32_t e, const void *param, __ext_compar_fn_t comparFn, @@ -278,14 +272,4 @@ void taosheapsort(void *base, int32_t size, int32_t len, const void *parcompar, } taosMemoryFree(buf); - /* - char *buf = taosMemoryCalloc(1, size); - - for (i = len - 1; i > 0; i--) { - doswap(elePtrAt(base, size, 0), elePtrAt(base, size, i)); - taosheapadjust(base, size, 0, i - 1, parcompar, compar, parswap, swap, maxroot); - } - - taosMemoryFreeClear(buf); - */ } diff --git a/source/util/src/tarray.c b/source/util/src/tarray.c index 6920925e5f82b16ba6d213ffbe03d6fa40d21d06..c083ce2f7f577349eea7ced1bcb325f8ecacdf99 100644 --- a/source/util/src/tarray.c +++ b/source/util/src/tarray.c @@ -20,7 +20,10 @@ // todo refactor API SArray* taosArrayInit(size_t size, size_t elemSize) { - assert(elemSize > 0); + if (elemSize == 0) { + terrno = TSDB_CODE_INVALID_PARA; + return NULL; + } if (size < TARRAY_MIN_SIZE) { size = TARRAY_MIN_SIZE; @@ -96,8 +99,6 @@ void* taosArrayAddBatch(SArray* pArray, const void* pData, int32_t nEles) { } void taosArrayRemoveDuplicate(SArray* pArray, __compar_fn_t comparFn, void (*fp)(void*)) { - assert(pArray); - size_t size = pArray->size; if (size <= 1) { return; @@ -136,8 +137,6 @@ void taosArrayRemoveDuplicate(SArray* pArray, __compar_fn_t comparFn, void (*fp) } void taosArrayRemoveDuplicateP(SArray* pArray, __compar_fn_t comparFn, void (*fp)(void*)) { - assert(pArray); - size_t size = pArray->size; if (size <= 1) { return; @@ -197,11 +196,10 @@ void* taosArrayReserve(SArray* pArray, int32_t num) { } void* taosArrayPop(SArray* pArray) { - assert(pArray != NULL); - if (pArray->size == 0) { return NULL; } + pArray->size -= 1; return TARRAY_GET_ELEM(pArray, pArray->size); } @@ -210,16 +208,21 @@ void* taosArrayGet(const SArray* pArray, size_t index) { if (NULL == pArray) { return NULL; } - assert(index < pArray->size); + + if (index >= pArray->size) { + uError("index is out of range, current:%"PRIzu" max:%d", index, pArray->capacity); + return NULL; + } + return TARRAY_GET_ELEM(pArray, index); } void* taosArrayGetP(const SArray* pArray, size_t index) { - assert(index < pArray->size); - - void* d = TARRAY_GET_ELEM(pArray, index); - - return *(void**)d; + void** p = taosArrayGet(pArray, index); + if (p == NULL) { + return NULL; + } + return *p; } void* taosArrayGetLast(const SArray* pArray) { return TARRAY_GET_ELEM(pArray, pArray->size - 1); } @@ -312,9 +315,12 @@ void taosArrayRemoveBatch(SArray* pArray, size_t index, size_t num, FDelete fp) } SArray* taosArrayFromList(const void* src, size_t size, size_t elemSize) { - assert(src != NULL && elemSize > 0); - SArray* pDst = taosArrayInit(size, elemSize); + if (elemSize <= 0) { + terrno = TSDB_CODE_INVALID_PARA; + return NULL; + } + SArray* pDst = taosArrayInit(size, elemSize); memcpy(pDst->pData, src, elemSize * size); pDst->size = size; @@ -322,8 +328,6 @@ SArray* taosArrayFromList(const void* src, size_t size, size_t elemSize) { } SArray* taosArrayDup(const SArray* pSrc, __array_item_dup_fn_t fn) { - assert(pSrc != NULL); - if (pSrc->size == 0) { // empty array list return taosArrayInit(8, pSrc->elemSize); } @@ -415,14 +419,10 @@ void taosArrayDestroyEx(SArray* pArray, FDelete fp) { } void taosArraySort(SArray* pArray, __compar_fn_t compar) { - ASSERT(pArray != NULL && compar != NULL); taosSort(pArray->pData, pArray->size, pArray->elemSize, compar); } void* taosArraySearch(const SArray* pArray, const void* key, __compar_fn_t comparFn, int32_t flags) { - assert(pArray != NULL && comparFn != NULL); - assert(key != NULL); - return taosbsearch(key, pArray->pData, pArray->size, pArray->elemSize, comparFn, flags); } diff --git a/source/util/src/tcache.c b/source/util/src/tcache.c index 7d1686ef80c0e157bd897e1a8f3bdab1411cf97f..761da6986ba635cd1bafa2b49d8719fb930b1992 100644 --- a/source/util/src/tcache.c +++ b/source/util/src/tcache.c @@ -921,7 +921,7 @@ void taosCacheRefresh(SCacheObj *pCacheObj, __cache_trav_fn_t fp, void *param1) void taosStopCacheRefreshWorker(void) { stopRefreshWorker = true; TdThreadOnce tmp = PTHREAD_ONCE_INIT; - if (memcmp(&cacheRefreshWorker, &tmp, sizeof(TdThreadOnce)) != 0) taosThreadJoin(cacheRefreshWorker, NULL); + if (memcmp(&cacheThreadInit, &tmp, sizeof(TdThreadOnce)) != 0) taosThreadJoin(cacheRefreshWorker, NULL); taosArrayDestroy(pCacheArrayList); } diff --git a/source/util/src/tcompare.c b/source/util/src/tcompare.c index ed254b651f421885ca159b3da2a373034e4d4d1a..75d6a43b249fd56f8b6274b0f68db27627182d08 100644 --- a/source/util/src/tcompare.c +++ b/source/util/src/tcompare.c @@ -23,6 +23,7 @@ #include "tlog.h" #include "tutil.h" #include "types.h" +#include "osString.h" int32_t setChkInBytes1(const void *pLeft, const void *pRight) { return NULL != taosHashGet((SHashObj *)pRight, pLeft, 1) ? 1 : 0; @@ -208,16 +209,16 @@ int32_t compareLenPrefixedWStr(const void *pLeft, const void *pRight) { int32_t len1 = varDataLen(pLeft); int32_t len2 = varDataLen(pRight); - if (len1 != len2) { - return len1 > len2 ? 1 : -1; - } else { - int32_t ret = memcmp((TdUcs4 *)pLeft, (TdUcs4 *)pRight, len1); - if (ret == 0) { + int32_t ret = tasoUcs4Compare((TdUcs4 *)varDataVal(pLeft), (TdUcs4 *)varDataVal(pRight), len1>len2 ? len2:len1); + if (ret == 0) { + if (len1 > len2) + return 1; + else if(len1 < len2) + return -1; + else return 0; - } else { - return ret > 0 ? 1 : -1; - } } + return (ret < 0) ? -1 : 1; } int32_t compareLenPrefixedWStrDesc(const void *pLeft, const void *pRight) { diff --git a/source/util/src/terror.c b/source/util/src/terror.c index 08ab43494df2934a22a775c1fadbf85934d5e898..bab3edc870bb888bf2ad6030ff102c88daa05be6 100644 --- a/source/util/src/terror.c +++ b/source/util/src/terror.c @@ -138,6 +138,7 @@ TAOS_DEFINE_ERROR(TSDB_CODE_TSC_QUERY_KILLED, "Query killed") TAOS_DEFINE_ERROR(TSDB_CODE_TSC_NO_EXEC_NODE, "No available execution node in current query policy configuration") TAOS_DEFINE_ERROR(TSDB_CODE_TSC_NOT_STABLE_ERROR, "Table is not a super table") TAOS_DEFINE_ERROR(TSDB_CODE_TSC_STMT_CACHE_ERROR, "Stmt cache error") +TAOS_DEFINE_ERROR(TSDB_CODE_TSC_INTERNAL_ERROR, "Internal error") // mnode-common TAOS_DEFINE_ERROR(TSDB_CODE_MND_NO_RIGHTS, "Insufficient privilege for operation") @@ -319,6 +320,7 @@ TAOS_DEFINE_ERROR(TSDB_CODE_VND_COL_SUBSCRIBED, "Table column is subsc TAOS_DEFINE_ERROR(TSDB_CODE_VND_NO_AVAIL_BUFPOOL, "No availabe buffer pool") TAOS_DEFINE_ERROR(TSDB_CODE_VND_STOPPED, "Vnode stopped") TAOS_DEFINE_ERROR(TSDB_CODE_VND_DUP_REQUEST, "Duplicate write request") +TAOS_DEFINE_ERROR(TSDB_CODE_VND_QUERY_BUSY, "Query busy") // tsdb TAOS_DEFINE_ERROR(TSDB_CODE_TDB_INVALID_TABLE_ID, "Invalid table ID") @@ -578,6 +580,7 @@ TAOS_DEFINE_ERROR(TSDB_CODE_SML_INVALID_PRECISION_TYPE, "Invalid timestamp p TAOS_DEFINE_ERROR(TSDB_CODE_SML_INVALID_DATA, "Invalid data format") TAOS_DEFINE_ERROR(TSDB_CODE_SML_INVALID_DB_CONF, "Invalid schemaless db config") TAOS_DEFINE_ERROR(TSDB_CODE_SML_NOT_SAME_TYPE, "Not the same type like before") +TAOS_DEFINE_ERROR(TSDB_CODE_SML_INTERNAL_ERROR, "Internal error") //tsma TAOS_DEFINE_ERROR(TSDB_CODE_TSMA_INIT_FAILED, "Tsma init failed") diff --git a/source/util/src/tlog.c b/source/util/src/tlog.c index 53d0cad5eabbbcf5f6c6b38833ebf1c9fd172cce..62f074db5b9e28ffe71ca7ec3f4506b619ad20b1 100644 --- a/source/util/src/tlog.c +++ b/source/util/src/tlog.c @@ -18,6 +18,8 @@ #include "os.h" #include "tconfig.h" #include "tutil.h" +#include "tjson.h" +#include "tglobal.h" #define LOG_MAX_LINE_SIZE (1024) #define LOG_MAX_LINE_BUFFER_SIZE (LOG_MAX_LINE_SIZE + 3) @@ -808,7 +810,7 @@ bool taosAssertDebug(bool condition, const char *file, int32_t line, const char taosPrintLogImp(1, 255, buffer, len); taosPrintLog(flags, level, dflag, "tAssert at file %s:%d exit:%d", file, line, tsAssert); - taosPrintTrace(flags, level, dflag); + taosPrintTrace(flags, level, dflag, -1); if (tsAssert) { // taosCloseLog(); @@ -824,6 +826,217 @@ bool taosAssertDebug(bool condition, const char *file, int32_t line, const char return true; } +int32_t taosGenCrashJsonMsg(int signum, char** pMsg, int64_t clusterId, int64_t startTime) { + SJson* pJson = tjsonCreateObject(); + if (pJson == NULL) return -1; + char tmp[4096] = {0}; + + tjsonAddDoubleToObject(pJson, "reportVersion", 1); + + tjsonAddIntegerToObject(pJson, "clusterId", clusterId); + tjsonAddIntegerToObject(pJson, "startTime", startTime); + + taosGetFqdn(tmp); + tjsonAddStringToObject(pJson, "fqdn", tmp); + + tjsonAddIntegerToObject(pJson, "pid", taosGetPId()); + + taosGetAppName(tmp, NULL); + tjsonAddStringToObject(pJson, "appName", tmp); + + if (taosGetOsReleaseName(tmp, sizeof(tmp)) == 0) { + tjsonAddStringToObject(pJson, "os", tmp); + } + + float numOfCores = 0; + if (taosGetCpuInfo(tmp, sizeof(tmp), &numOfCores) == 0) { + tjsonAddStringToObject(pJson, "cpuModel", tmp); + tjsonAddDoubleToObject(pJson, "numOfCpu", numOfCores); + } else { + tjsonAddDoubleToObject(pJson, "numOfCpu", tsNumOfCores); + } + + snprintf(tmp, sizeof(tmp), "%" PRId64 " kB", tsTotalMemoryKB); + tjsonAddStringToObject(pJson, "memory", tmp); + + tjsonAddStringToObject(pJson, "version", version); + tjsonAddStringToObject(pJson, "buildInfo", buildinfo); + tjsonAddStringToObject(pJson, "gitInfo", gitinfo); + + tjsonAddIntegerToObject(pJson, "crashSig", signum); + tjsonAddIntegerToObject(pJson, "crashTs", taosGetTimestampUs()); + +#ifdef _TD_DARWIN_64 + taosLogTraceToBuf(tmp, sizeof(tmp), 4); +#elif !defined(WINDOWS) + taosLogTraceToBuf(tmp, sizeof(tmp), 3); +#else + taosLogTraceToBuf(tmp, sizeof(tmp), 8); +#endif + + tjsonAddStringToObject(pJson, "stackInfo", tmp); + + char* pCont = tjsonToString(pJson); + tjsonDelete(pJson); + + *pMsg = pCont; + + return TSDB_CODE_SUCCESS; +} + + +void taosLogCrashInfo(char* nodeType, char* pMsg, int64_t msgLen, int signum, void *sigInfo) { + const char *flags = "UTL FATAL "; + ELogLevel level = DEBUG_FATAL; + int32_t dflag = 255; + char filepath[PATH_MAX] = {0}; + TdFilePtr pFile = NULL; + + if (pMsg && msgLen > 0) { + snprintf(filepath, sizeof(filepath), "%s%s.%sCrashLog", tsLogDir, TD_DIRSEP, nodeType); + + pFile = taosOpenFile(filepath, TD_FILE_CREATE | TD_FILE_WRITE | TD_FILE_APPEND); + if (pFile == NULL) { + terrno = TAOS_SYSTEM_ERROR(errno); + taosPrintLog(flags, level, dflag, "failed to open file:%s since %s", filepath, terrstr()); + goto _return; + } + + taosLockFile(pFile); + + int64_t writeSize = taosWriteFile(pFile, &msgLen, sizeof(msgLen)); + if (sizeof(msgLen) != writeSize) { + taosUnLockFile(pFile); + taosPrintLog(flags, level, dflag, "failed to write len to file:%s,%p wlen:%" PRId64 " tlen:%lu since %s", + filepath, pFile, writeSize, sizeof(msgLen), terrstr()); + goto _return; + } + + writeSize = taosWriteFile(pFile, pMsg, msgLen); + if (msgLen != writeSize) { + taosUnLockFile(pFile); + taosPrintLog(flags, level, dflag, "failed to write file:%s,%p wlen:%" PRId64 " tlen:%" PRId64 " since %s", + filepath, pFile, writeSize, msgLen, terrstr()); + goto _return; + } + + taosUnLockFile(pFile); + } + +_return: + + if (pFile) taosCloseFile(&pFile); + + terrno = TAOS_SYSTEM_ERROR(errno); + taosPrintLog(flags, level, dflag, "crash signal is %d", signum); + +#ifdef _TD_DARWIN_64 + taosPrintTrace(flags, level, dflag, 4); +#elif !defined(WINDOWS) + taosPrintLog(flags, level, dflag, "sender PID:%d cmdline:%s", ((siginfo_t *)sigInfo)->si_pid, + taosGetCmdlineByPID(((siginfo_t *)sigInfo)->si_pid)); + taosPrintTrace(flags, level, dflag, 3); +#else + taosPrintTrace(flags, level, dflag, 8); +#endif + + taosMemoryFree(pMsg); +} + +void taosReadCrashInfo(char* filepath, char** pMsg, int64_t* pMsgLen, TdFilePtr* pFd) { + const char *flags = "UTL FATAL "; + ELogLevel level = DEBUG_FATAL; + int32_t dflag = 255; + TdFilePtr pFile = NULL; + bool truncateFile = false; + char* buf = NULL; + + if (NULL == *pFd) { + int64_t filesize = 0; + if (taosStatFile(filepath, &filesize, NULL) < 0) { + if (ENOENT == errno) { + return; + } + + terrno = TAOS_SYSTEM_ERROR(errno); + taosPrintLog(flags, level, dflag, "failed to stat file:%s since %s", filepath, terrstr()); + return; + } + + if (filesize <= 0) { + return; + } + + pFile = taosOpenFile(filepath, TD_FILE_READ|TD_FILE_WRITE); + if (pFile == NULL) { + if (ENOENT == errno) { + return; + } + + terrno = TAOS_SYSTEM_ERROR(errno); + taosPrintLog(flags, level, dflag, "failed to open file:%s since %s", filepath, terrstr()); + return; + } + + taosLockFile(pFile); + } else { + pFile = *pFd; + } + + int64_t msgLen = 0; + int64_t readSize = taosReadFile(pFile, &msgLen, sizeof(msgLen)); + if (sizeof(msgLen) != readSize) { + truncateFile = true; + if (readSize < 0) { + taosPrintLog(flags, level, dflag, "failed to read len from file:%s,%p wlen:%" PRId64 " tlen:%lu since %s", + filepath, pFile, readSize, sizeof(msgLen), terrstr()); + } + goto _return; + } + + buf = taosMemoryMalloc(msgLen); + if (NULL == buf) { + taosPrintLog(flags, level, dflag, "failed to malloc buf, size:%" PRId64, msgLen); + goto _return; + } + + readSize = taosReadFile(pFile, buf, msgLen); + if (msgLen != readSize) { + truncateFile = true; + taosPrintLog(flags, level, dflag, "failed to read file:%s,%p wlen:%" PRId64 " tlen:%" PRId64 " since %s", + filepath, pFile, readSize, msgLen, terrstr()); + goto _return; + } + + *pMsg = buf; + *pMsgLen = msgLen; + *pFd = pFile; + + return; + +_return: + + if (truncateFile) { + taosFtruncateFile(pFile, 0); + } + taosUnLockFile(pFile); + taosCloseFile(&pFile); + taosMemoryFree(buf); + + *pMsg = NULL; + *pMsgLen = 0; + *pFd = NULL; +} + +void taosReleaseCrashLogFile(TdFilePtr pFile, bool truncateFile) { + if (truncateFile) { + taosFtruncateFile(pFile, 0); + } + + taosUnLockFile(pFile); + taosCloseFile(&pFile); +} + #ifdef NDEBUG bool taosAssertRelease(bool condition) { if (condition) return false; @@ -833,7 +1046,7 @@ bool taosAssertRelease(bool condition) { int32_t dflag = 255; // tsLogEmbedded ? 255 : uDebugFlag taosPrintLog(flags, level, dflag, "tAssert called in release mode, exit:%d", tsAssert); - taosPrintTrace(flags, level, dflag); + taosPrintTrace(flags, level, dflag, 0); if (tsAssert) { taosMsleep(300); @@ -842,4 +1055,4 @@ bool taosAssertRelease(bool condition) { return true; } -#endif \ No newline at end of file +#endif diff --git a/source/util/src/tpagedbuf.c b/source/util/src/tpagedbuf.c index 87b44b2d1337ab157e4499f5165abf0868069bc5..7c60862c56b8544dc815723aa5427dc604a0fff5 100644 --- a/source/util/src/tpagedbuf.c +++ b/source/util/src/tpagedbuf.c @@ -5,7 +5,10 @@ #include "thash.h" #include "tlog.h" -#define GET_DATA_PAYLOAD(_p) ((char*)(_p)->pData + POINTER_BYTES) +#define GET_PAYLOAD_DATA(_p) ((char*)(_p)->pData + POINTER_BYTES) +#define BUF_PAGE_IN_MEM(_p) ((_p)->pData != NULL) +#define CLEAR_BUF_PAGE_IN_MEM_FLAG(_p) ((_p)->pData = NULL) +#define HAS_DATA_IN_DISK(_p) ((_p)->offset >= 0) #define NO_IN_MEM_AVAILABLE_PAGES(_b) (listNEles((_b)->lruList) >= (_b)->inMemPages) typedef struct SPageDiskInfo { @@ -14,7 +17,7 @@ typedef struct SPageDiskInfo { } SPageDiskInfo, SFreeListItem; struct SPageInfo { - SListNode* pn; // point to list node struct + SListNode* pn; // point to list node struct. it is NULL when the page is evicted from the in-memory buffer void* pData; int64_t offset; int32_t pageId; @@ -89,7 +92,7 @@ static char* doDecompressData(void* data, int32_t srcSize, int32_t* dst, SDiskba return data; } -static uint64_t allocatePositionInFile(SDiskbasedBuf* pBuf, size_t size) { +static uint64_t allocateNewPositionInFile(SDiskbasedBuf* pBuf, size_t size) { if (pBuf->pFree == NULL) { return pBuf->nextPos; } else { @@ -112,10 +115,6 @@ static uint64_t allocatePositionInFile(SDiskbasedBuf* pBuf, size_t size) { } } -static void setPageNotInBuf(SPageInfo* pPageInfo) { pPageInfo->pData = NULL; } - -static FORCE_INLINE size_t getAllocPageSize(int32_t pageSize) { return pageSize + POINTER_BYTES + sizeof(SFilePage); } - /** * +--------------------------+-------------------+--------------+ * | PTR to SPageInfo (8bytes)| Payload (PageSize)| 2 Extra Bytes| @@ -124,23 +123,31 @@ static FORCE_INLINE size_t getAllocPageSize(int32_t pageSize) { return pageSize * @param pg * @return */ -static char* doFlushPageToDisk(SDiskbasedBuf* pBuf, SPageInfo* pg) { - ASSERT(!pg->used && pg->pData != NULL); + +static FORCE_INLINE size_t getAllocPageSize(int32_t pageSize) { return pageSize + POINTER_BYTES + sizeof(SFilePage); } + +static char* doFlushBufPage(SDiskbasedBuf* pBuf, SPageInfo* pg) { + if (pg->pData == NULL || pg->used) { + uError("invalid params in paged buffer process when flushing buf to disk, %s", pBuf->id); + terrno = TSDB_CODE_INVALID_PARA; + return NULL; + } int32_t size = pBuf->pageSize; char* t = NULL; - if (pg->offset == -1 || pg->dirty) { - void* payload = GET_DATA_PAYLOAD(pg); + if ((!HAS_DATA_IN_DISK(pg)) || pg->dirty) { + void* payload = GET_PAYLOAD_DATA(pg); t = doCompressData(payload, pBuf->pageSize, &size, pBuf); - ASSERTS(size >= 0, "size is negative"); + if (size < 0) { + uError("failed to compress data when flushing data to disk, %s", pBuf->id); + return NULL; + } } // this page is flushed to disk for the first time if (pg->dirty) { - if (pg->offset == -1) { - ASSERTS(pg->dirty == true, "pg->dirty is false"); - - pg->offset = allocatePositionInFile(pBuf, size); + if (!HAS_DATA_IN_DISK(pg)) { + pg->offset = allocateNewPositionInFile(pBuf, size); pBuf->nextPos += size; int32_t ret = taosLSeekFile(pBuf->pFile, pg->offset, SEEK_SET); @@ -155,6 +162,7 @@ static char* doFlushPageToDisk(SDiskbasedBuf* pBuf, SPageInfo* pg) { return NULL; } + // extend the file size if (pBuf->fileSize < pg->offset + size) { pBuf->fileSize = pg->offset + size; } @@ -169,7 +177,7 @@ static char* doFlushPageToDisk(SDiskbasedBuf* pBuf, SPageInfo* pg) { taosArrayPush(pBuf->pFree, &dinfo); // 2. allocate new position, and update the info - pg->offset = allocatePositionInFile(pBuf, size); + pg->offset = allocateNewPositionInFile(pBuf, size); pBuf->nextPos += size; } @@ -197,20 +205,19 @@ static char* doFlushPageToDisk(SDiskbasedBuf* pBuf, SPageInfo* pg) { size = pg->length; } - ASSERT(size > 0 || (pg->offset == -1 && pg->length == -1)); - char* pDataBuf = pg->pData; memset(pDataBuf, 0, getAllocPageSize(pBuf->pageSize)); + #ifdef BUF_PAGE_DEBUG uDebug("page_flush %p, pageId:%d, offset:%d", pDataBuf, pg->pageId, pg->offset); #endif + pg->length = size; // on disk size return pDataBuf; } -static char* flushPageToDisk(SDiskbasedBuf* pBuf, SPageInfo* pg) { +static char* flushBufPage(SDiskbasedBuf* pBuf, SPageInfo* pg) { int32_t ret = TSDB_CODE_SUCCESS; - ASSERT(((int64_t)pBuf->numOfPages * pBuf->pageSize) == pBuf->totalBufSize && pBuf->numOfPages >= pBuf->inMemPages); if (pBuf->pFile == NULL) { if ((ret = createDiskFile(pBuf)) != TSDB_CODE_SUCCESS) { @@ -219,22 +226,27 @@ static char* flushPageToDisk(SDiskbasedBuf* pBuf, SPageInfo* pg) { } } - char* p = doFlushPageToDisk(pBuf, pg); - setPageNotInBuf(pg); - pg->dirty = false; + char* p = doFlushBufPage(pBuf, pg); + CLEAR_BUF_PAGE_IN_MEM_FLAG(pg); + pg->dirty = false; return p; } // load file block data in disk static int32_t loadPageFromDisk(SDiskbasedBuf* pBuf, SPageInfo* pg) { + if (pg->offset < 0 || pg->length <= 0) { + uError("failed to load buf page from disk, offset:%"PRId64", length:%d, %s", pg->offset, pg->length, pBuf->id); + return TSDB_CODE_INVALID_PARA; + } + int32_t ret = taosLSeekFile(pBuf->pFile, pg->offset, SEEK_SET); if (ret == -1) { ret = TAOS_SYSTEM_ERROR(errno); return ret; } - void* pPage = (void*)GET_DATA_PAYLOAD(pg); + void* pPage = (void*)GET_PAYLOAD_DATA(pg); ret = (int32_t)taosReadFile(pBuf->pFile, pPage, pg->length); if (ret != pg->length) { ret = TAOS_SYSTEM_ERROR(errno); @@ -249,10 +261,14 @@ static int32_t loadPageFromDisk(SDiskbasedBuf* pBuf, SPageInfo* pg) { return 0; } -static SPageInfo* registerPage(SDiskbasedBuf* pBuf, int32_t pageId) { +static SPageInfo* registerNewPageInfo(SDiskbasedBuf* pBuf, int32_t pageId) { pBuf->numOfPages += 1; SPageInfo* ppi = taosMemoryMalloc(sizeof(SPageInfo)); + if (ppi == NULL) { + terrno = TSDB_CODE_OUT_OF_MEMORY; + return NULL; + } ppi->pageId = pageId; ppi->pData = NULL; @@ -272,46 +288,33 @@ static SListNode* getEldestUnrefedPage(SDiskbasedBuf* pBuf) { SListNode* pn = NULL; while ((pn = tdListNext(&iter)) != NULL) { SPageInfo* pageInfo = *(SPageInfo**)pn->data; - ASSERT(pageInfo->pageId >= 0 && pageInfo->pn == pn); + + SPageInfo* p = *(SPageInfo**)(pageInfo->pData); + ASSERT(pageInfo->pageId >= 0 && pageInfo->pn == pn && p == pageInfo); if (!pageInfo->used) { - // printf("%d is chosen\n", pageInfo->pageId); break; - } else { - // printf("page %d is used, dirty:%d\n", pageInfo->pageId, pageInfo->dirty); } } return pn; } -static char* evacOneDataPage(SDiskbasedBuf* pBuf) { - char* bufPage = NULL; +static char* evictBufPage(SDiskbasedBuf* pBuf) { SListNode* pn = getEldestUnrefedPage(pBuf); - terrno = 0; - - // all pages are referenced by user, try to allocate new space - if (pn == NULL) { - int32_t prev = pBuf->inMemPages; - - // increase by 50% of previous mem pages - pBuf->inMemPages = (int32_t)(pBuf->inMemPages * 1.5f); - - // qWarn("%p in memory buf page not sufficient, expand from %d to %d, page size:%d", pBuf, prev, - // pBuf->inMemPages, pBuf->pageSize); - } else { - tdListPopNode(pBuf->lruList, pn); + if (pn == NULL) { // no available buffer pages now, return. + return NULL; + } - SPageInfo* d = *(SPageInfo**)pn->data; - ASSERTS(d->pn == pn, "d->pn not equal pn"); + terrno = 0; + tdListPopNode(pBuf->lruList, pn); - d->pn = NULL; - taosMemoryFreeClear(pn); + SPageInfo* d = *(SPageInfo**)pn->data; - bufPage = flushPageToDisk(pBuf, d); - } + d->pn = NULL; + taosMemoryFreeClear(pn); - return bufPage; + return flushBufPage(pBuf, d); } static void lruListPushFront(SList* pList, SPageInfo* pi) { @@ -338,13 +341,12 @@ int32_t createDiskbasedBuf(SDiskbasedBuf** pBuf, int32_t pagesize, int32_t inMem SDiskbasedBuf* pPBuf = *pBuf; if (pPBuf == NULL) { - return TSDB_CODE_OUT_OF_MEMORY; + goto _error; } pPBuf->pageSize = pagesize; pPBuf->numOfPages = 0; // all pages are in buffer in the first place pPBuf->totalBufSize = 0; - pPBuf->inMemPages = inMemBufSize / pagesize; // maximum allowed pages, it is a soft limit. pPBuf->allocateId = -1; pPBuf->pFile = NULL; pPBuf->id = strdup(id); @@ -353,33 +355,69 @@ int32_t createDiskbasedBuf(SDiskbasedBuf** pBuf, int32_t pagesize, int32_t inMem pPBuf->freePgList = tdListNew(POINTER_BYTES); // at least more than 2 pages must be in memory - ASSERT(inMemBufSize >= pagesize * 2); + if (inMemBufSize < pagesize * 2) { + inMemBufSize = pagesize * 2; + } + pPBuf->inMemPages = inMemBufSize / pagesize; // maximum allowed pages, it is a soft limit. pPBuf->lruList = tdListNew(POINTER_BYTES); + if (pPBuf->lruList == NULL) { + goto _error; + } // init id hash table _hash_fn_t fn = taosGetDefaultHashFunction(TSDB_DATA_TYPE_INT); pPBuf->pIdList = taosArrayInit(4, POINTER_BYTES); + if (pPBuf->pIdList == NULL) { + goto _error; + } pPBuf->assistBuf = taosMemoryMalloc(pPBuf->pageSize + 2); // EXTRA BYTES + if (pPBuf->assistBuf == NULL) { + goto _error; + } + pPBuf->all = taosHashInit(10, fn, true, false); - pPBuf->prefix = (char*) dir; + if (pPBuf->all == NULL) { + goto _error; + } + pPBuf->prefix = (char*) dir; pPBuf->emptyDummyIdList = taosArrayInit(1, sizeof(int32_t)); // qDebug("QInfo:0x%"PRIx64" create resBuf for output, page size:%d, inmem buf pages:%d, file:%s", qId, - // pPBuf->pageSize, - // pPBuf->inMemPages, pPBuf->path); + // pPBuf->pageSize, pPBuf->inMemPages, pPBuf->path); return TSDB_CODE_SUCCESS; + _error: + destroyDiskbasedBuf(pPBuf); + return TSDB_CODE_OUT_OF_MEMORY; +} + +static char* doExtractPage(SDiskbasedBuf* pBuf) { + char* availablePage = NULL; + if (NO_IN_MEM_AVAILABLE_PAGES(pBuf)) { + availablePage = evictBufPage(pBuf); + if (availablePage == NULL) { + terrno = TSDB_CODE_OUT_OF_MEMORY; + uWarn("no available buf pages, current:%d, max:%d", listNEles(pBuf->lruList), pBuf->inMemPages) + } + } else { + availablePage = taosMemoryCalloc(1, getAllocPageSize(pBuf->pageSize)); // add extract bytes in case of zipped buffer increased. + if (availablePage == NULL) { + terrno = TSDB_CODE_OUT_OF_MEMORY; + } + } + + return availablePage; } void* getNewBufPage(SDiskbasedBuf* pBuf, int32_t* pageId) { pBuf->statis.getPages += 1; - char* availablePage = NULL; - if (NO_IN_MEM_AVAILABLE_PAGES(pBuf)) { - availablePage = evacOneDataPage(pBuf); + char* availablePage = doExtractPage(pBuf); + if (availablePage == NULL) { + return NULL; } SPageInfo* pi = NULL; @@ -394,7 +432,10 @@ void* getNewBufPage(SDiskbasedBuf* pBuf, int32_t* pageId) { *pageId = (++pBuf->allocateId); // register page id info - pi = registerPage(pBuf, *pageId); + pi = registerNewPageInfo(pBuf, *pageId); + if (pi == NULL) { + return NULL; + } // add to hash map taosHashPut(pBuf->all, pageId, sizeof(int32_t), &pi, POINTER_BYTES); @@ -402,63 +443,62 @@ void* getNewBufPage(SDiskbasedBuf* pBuf, int32_t* pageId) { } // add to LRU list - ASSERT(listNEles(pBuf->lruList) < pBuf->inMemPages && pBuf->inMemPages > 0); lruListPushFront(pBuf->lruList, pi); - - // allocate buf - if (availablePage == NULL) { - pi->pData = - taosMemoryCalloc(1, getAllocPageSize(pBuf->pageSize)); // add extract bytes in case of zipped buffer increased. - } else { - pi->pData = availablePage; - } + pi->pData = availablePage; ((void**)pi->pData)[0] = pi; #ifdef BUF_PAGE_DEBUG uDebug("page_getNewBufPage , pi->pData:%p, pageId:%d, offset:%" PRId64, pi->pData, pi->pageId, pi->offset); #endif - return (void*)(GET_DATA_PAYLOAD(pi)); + + return (void*)(GET_PAYLOAD_DATA(pi)); } void* getBufPage(SDiskbasedBuf* pBuf, int32_t id) { - ASSERT(pBuf != NULL && id >= 0); + if (id < 0) { + terrno = TSDB_CODE_INVALID_PARA; + uError("invalid page id:%d, %s", id, pBuf->id); + return NULL; + } + pBuf->statis.getPages += 1; SPageInfo** pi = taosHashGet(pBuf->all, &id, sizeof(int32_t)); - ASSERT(pi != NULL && *pi != NULL); + if (pi == NULL || *pi == NULL) { + uError("failed to locate the buffer page:%d, %s", id, pBuf->id); + terrno = TSDB_CODE_INVALID_PARA; + return NULL; + } - if ((*pi)->pData != NULL) { // it is in memory + if (BUF_PAGE_IN_MEM(*pi)) { // it is in memory // no need to update the LRU list if only one page exists if (pBuf->numOfPages == 1) { (*pi)->used = true; - return (void*)(GET_DATA_PAYLOAD(*pi)); + return (void*)(GET_PAYLOAD_DATA(*pi)); } SPageInfo** pInfo = (SPageInfo**)((*pi)->pn->data); - ASSERT(*pInfo == *pi); + if (*pInfo != *pi) { + uError("inconsistently data in paged buffer, pInfo:%p, pi:%p, %s", *pInfo, *pi, pBuf->id); + return NULL; + } lruListMoveToFront(pBuf->lruList, (*pi)); (*pi)->used = true; + #ifdef BUF_PAGE_DEBUG uDebug("page_getBufPage1 pageId:%d, offset:%" PRId64, (*pi)->pageId, (*pi)->offset); #endif - return (void*)(GET_DATA_PAYLOAD(*pi)); + return (void*)(GET_PAYLOAD_DATA(*pi)); } else { // not in memory - ASSERT((*pi)->pData == NULL && (*pi)->pn == NULL && + ASSERT((!BUF_PAGE_IN_MEM(*pi)) && (*pi)->pn == NULL && (((*pi)->length >= 0 && (*pi)->offset >= 0) || ((*pi)->length == -1 && (*pi)->offset == -1))); - char* availablePage = NULL; - if (NO_IN_MEM_AVAILABLE_PAGES(pBuf)) { - availablePage = evacOneDataPage(pBuf); - if (availablePage == NULL) { - return NULL; - } - } + (*pi)->pData = doExtractPage(pBuf); - if (availablePage == NULL) { - (*pi)->pData = taosMemoryCalloc(1, getAllocPageSize(pBuf->pageSize)); - } else { - (*pi)->pData = availablePage; + // failed to evict buffer page, return with error code. + if ((*pi)->pData == NULL) { + return NULL; } // set the ptr to the new SPageInfo @@ -468,23 +508,25 @@ void* getBufPage(SDiskbasedBuf* pBuf, int32_t id) { (*pi)->used = true; // some data has been flushed to disk, and needs to be loaded into buffer again. - if ((*pi)->length > 0 && (*pi)->offset >= 0) { + if (HAS_DATA_IN_DISK(*pi)) { int32_t code = loadPageFromDisk(pBuf, *pi); if (code != 0) { + terrno = code; return NULL; } } #ifdef BUF_PAGE_DEBUG uDebug("page_getBufPage2 pageId:%d, offset:%" PRId64, (*pi)->pageId, (*pi)->offset); #endif - return (void*)(GET_DATA_PAYLOAD(*pi)); + return (void*)(GET_PAYLOAD_DATA(*pi)); } } void releaseBufPage(SDiskbasedBuf* pBuf, void* page) { - if (ASSERTS(pBuf != NULL && page != NULL, "pBuf or page is NULL")) { + if (page == NULL) { return; } + SPageInfo* ppi = getPageInfoFromPayload(page); releaseBufPageInfo(pBuf, ppi); } @@ -493,7 +535,13 @@ void releaseBufPageInfo(SDiskbasedBuf* pBuf, SPageInfo* pi) { #ifdef BUF_PAGE_DEBUG uDebug("page_releaseBufPageInfo pageId:%d, used:%d, offset:%" PRId64, pi->pageId, pi->used, pi->offset); #endif - if (ASSERTS(pi->pData != NULL, "pi->pData is NULL")) { + + if (pi == NULL) { + return; + } + + if (pi->pData == NULL) { + uError("pi->pData (page data) is null"); return; } @@ -504,7 +552,6 @@ void releaseBufPageInfo(SDiskbasedBuf* pBuf, SPageInfo* pi) { size_t getTotalBufSize(const SDiskbasedBuf* pBuf) { return (size_t)pBuf->totalBufSize; } SArray* getDataBufPagesIdList(SDiskbasedBuf* pBuf) { - ASSERT(pBuf != NULL); return pBuf->pIdList; } @@ -582,7 +629,6 @@ SPageInfo* getLastPageInfo(SArray* pList) { } int32_t getPageId(const SPageInfo* pPgInfo) { - ASSERT(pPgInfo != NULL); return pPgInfo->pageId; } diff --git a/source/util/src/tqueue.c b/source/util/src/tqueue.c index 42b63588930712d2e132fd7e8bf860aef3622ff9..3769da6ccd60a2fe4a6a949ae9b66e8a5a3e1486 100644 --- a/source/util/src/tqueue.c +++ b/source/util/src/tqueue.c @@ -339,7 +339,6 @@ void taosRemoveFromQset(STaosQset *qset, STaosQueue *queue) { STaosQueue *prev = qset->head; tqueue = qset->head->next; while (tqueue) { - assert(tqueue->qset); if (tqueue == queue) { prev->next = tqueue->next; break; diff --git a/source/util/src/tworker.c b/source/util/src/tworker.c index a9a84c186033f8b9574f3ef3f22fed10e6794207..5581931178486ba05f36640edcf7500477cbc96c 100644 --- a/source/util/src/tworker.c +++ b/source/util/src/tworker.c @@ -227,6 +227,7 @@ STaosQueue *tAutoQWorkerAllocQueue(SAutoQWorkerPool *pool, void *ahandle, FItem uError("worker:%s:%d failed to create", pool->name, curWorkerNum); taosMemoryFree(worker); taosCloseQueue(queue); + taosThreadMutexUnlock(&pool->mutex); terrno = TSDB_CODE_OUT_OF_MEMORY; return NULL; } diff --git a/tests/develop-test/2-query/show_create_db.py b/tests/develop-test/2-query/show_create_db.py new file mode 100644 index 0000000000000000000000000000000000000000..e5a79074ef2a1b9881f230b3eba2011edca517af --- /dev/null +++ b/tests/develop-test/2-query/show_create_db.py @@ -0,0 +1,82 @@ +import sys +from util.log import * +from util.cases import * +from util.sql import * +from util.dnodes import tdDnodes +from math import inf + +class TDTestCase: + def caseDescription(self): + ''' + case1: [TD-11204]Difference improvement that can ignore negative + ''' + return + + def init(self, conn, logSql, replicaVer=1): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor(), False) + self._conn = conn + + def restartTaosd(self, index=1, dbname="db"): + tdDnodes.stop(index) + tdDnodes.startWithoutSleep(index) + tdSql.execute(f"use scd") + + def run(self): + print("running {}".format(__file__)) + tdSql.execute("drop database if exists scd") + tdSql.execute("create database if not exists scd") + tdSql.execute('use scd') + tdSql.execute('create table stb1 (ts timestamp, c1 bool, c2 tinyint, c3 smallint, c4 int, c5 bigint, c6 float, c7 double, c8 binary(10), c9 nchar(10), c10 tinyint unsigned, c11 smallint unsigned, c12 int unsigned, c13 bigint unsigned) TAGS(t1 int, t2 binary(10), t3 double);') + + tdSql.execute("create table tb1 using stb1 tags(1,'1',1.0);") + + tdSql.execute("create table tb2 using stb1 tags(2,'2',2.0);") + + tdSql.execute("create table tb3 using stb1 tags(3,'3',3.0);") + + tdSql.execute('create database scd2 stt_trigger 3;') + + tdSql.execute('create database scd4 stt_trigger 13;') + + tdSql.query('show create database scd;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 'scd') + tdSql.checkData(0, 1, "CREATE DATABASE `scd` BUFFER 256 CACHESIZE 1 CACHEMODEL 'none' COMP 2 DURATION 14400m WAL_FSYNC_PERIOD 3000 MAXROWS 4096 MINROWS 100 STT_TRIGGER 1 KEEP 5256000m,5256000m,5256000m PAGES 256 PAGESIZE 4 PRECISION 'ms' REPLICA 1 WAL_LEVEL 1 VGROUPS 2 SINGLE_STABLE 0") + + tdSql.query('show create database scd2;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 'scd2') + tdSql.checkData(0, 1, "CREATE DATABASE `scd2` BUFFER 256 CACHESIZE 1 CACHEMODEL 'none' COMP 2 DURATION 14400m WAL_FSYNC_PERIOD 3000 MAXROWS 4096 MINROWS 100 STT_TRIGGER 3 KEEP 5256000m,5256000m,5256000m PAGES 256 PAGESIZE 4 PRECISION 'ms' REPLICA 1 WAL_LEVEL 1 VGROUPS 2 SINGLE_STABLE 0") + + tdSql.query('show create database scd4') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 'scd4') + tdSql.checkData(0, 1, "CREATE DATABASE `scd4` BUFFER 256 CACHESIZE 1 CACHEMODEL 'none' COMP 2 DURATION 14400m WAL_FSYNC_PERIOD 3000 MAXROWS 4096 MINROWS 100 STT_TRIGGER 13 KEEP 5256000m,5256000m,5256000m PAGES 256 PAGESIZE 4 PRECISION 'ms' REPLICA 1 WAL_LEVEL 1 VGROUPS 2 SINGLE_STABLE 0") + + + self.restartTaosd(1, dbname='scd') + + tdSql.query('show create database scd;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 'scd') + tdSql.checkData(0, 1, "CREATE DATABASE `scd` BUFFER 256 CACHESIZE 1 CACHEMODEL 'none' COMP 2 DURATION 14400m WAL_FSYNC_PERIOD 3000 MAXROWS 4096 MINROWS 100 STT_TRIGGER 1 KEEP 5256000m,5256000m,5256000m PAGES 256 PAGESIZE 4 PRECISION 'ms' REPLICA 1 WAL_LEVEL 1 VGROUPS 2 SINGLE_STABLE 0") + + tdSql.query('show create database scd2;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 'scd2') + tdSql.checkData(0, 1, "CREATE DATABASE `scd2` BUFFER 256 CACHESIZE 1 CACHEMODEL 'none' COMP 2 DURATION 14400m WAL_FSYNC_PERIOD 3000 MAXROWS 4096 MINROWS 100 STT_TRIGGER 3 KEEP 5256000m,5256000m,5256000m PAGES 256 PAGESIZE 4 PRECISION 'ms' REPLICA 1 WAL_LEVEL 1 VGROUPS 2 SINGLE_STABLE 0") + + tdSql.query('show create database scd4') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 'scd4') + tdSql.checkData(0, 1, "CREATE DATABASE `scd4` BUFFER 256 CACHESIZE 1 CACHEMODEL 'none' COMP 2 DURATION 14400m WAL_FSYNC_PERIOD 3000 MAXROWS 4096 MINROWS 100 STT_TRIGGER 13 KEEP 5256000m,5256000m,5256000m PAGES 256 PAGESIZE 4 PRECISION 'ms' REPLICA 1 WAL_LEVEL 1 VGROUPS 2 SINGLE_STABLE 0") + + + tdSql.execute('drop database scd') + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) diff --git a/tests/develop-test/2-query/table_count_scan.py b/tests/develop-test/2-query/table_count_scan.py new file mode 100644 index 0000000000000000000000000000000000000000..3ca7e08cd09be0ef12c423c949e598644f610a1d --- /dev/null +++ b/tests/develop-test/2-query/table_count_scan.py @@ -0,0 +1,238 @@ +import sys +from util.log import * +from util.cases import * +from util.sql import * +from util.dnodes import tdDnodes +from math import inf + +class TDTestCase: + def caseDescription(self): + ''' + case1: [TD-21890] table count scan test case + ''' + return + + def init(self, conn, logSql, replicaVer=1): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor(), False) + self._conn = conn + + def restartTaosd(self, index=1, dbname="db"): + tdDnodes.stop(index) + tdDnodes.startWithoutSleep(index) + tdSql.execute(f"use tbl_count") + + def run(self): + print("running {}".format(__file__)) + tdSql.execute("drop database if exists tbl_count") + tdSql.execute("create database if not exists tbl_count") + tdSql.execute('use tbl_count') + tdSql.execute('create table stb1 (ts timestamp, c1 bool, c2 tinyint, c3 smallint, c4 int, c5 bigint, c6 float, c7 double, c8 binary(10), c9 nchar(10), c10 tinyint unsigned, c11 smallint unsigned, c12 int unsigned, c13 bigint unsigned) TAGS(t1 int, t2 binary(10), t3 double);') + + tdSql.execute("create table tb1 using stb1 tags(1,'1',1.0);") + + tdSql.execute("create table tb2 using stb1 tags(2,'2',2.0);") + + tdSql.execute("create table tb3 using stb1 tags(3,'3',3.0);") + + tdSql.execute('insert into tb1 values (\'2021-11-11 09:00:00\',true,1,1,1,1,1,1,"123","1234",1,1,1,1);') + + tdSql.execute("insert into tb1 values ('2021-11-11 09:00:01',true,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL);") + + tdSql.execute('insert into tb1 values (\'2021-11-11 09:00:02\',true,2,NULL,2,NULL,2,NULL,"234",NULL,2,NULL,2,NULL);') + + tdSql.execute('insert into tb1 values (\'2021-11-11 09:00:03\',false,NULL,3,NULL,3,NULL,3,NULL,"3456",NULL,3,NULL,3);') + + tdSql.execute('insert into tb1 values (\'2021-11-11 09:00:04\',true,4,4,4,4,4,4,"456","4567",4,4,4,4);') + + tdSql.execute('insert into tb1 values (\'2021-11-11 09:00:05\',true,127,32767,2147483647,9223372036854775807,3.402823466e+38,1.79769e+308,"567","5678",254,65534,4294967294,9223372036854775807);') + + tdSql.execute('insert into tb1 values (\'2021-11-11 09:00:06\',true,-127,-32767,-2147483647,-9223372036854775807,-3.402823466e+38,-1.79769e+308,"678","6789",0,0,0,0);') + + tdSql.execute('insert into tb2 values (\'2021-11-11 09:00:00\',true,1,1,1,1,1,1,"111","1111",1,1,1,1);') + + tdSql.execute('insert into tb2 values (\'2021-11-11 09:00:01\',true,2,2,2,2,2,2,"222","2222",2,2,2,2);') + + tdSql.execute('insert into tb2 values (\'2021-11-11 09:00:02\',true,3,3,2,3,3,3,"333","3333",3,3,3,3);') + + tdSql.execute('insert into tb2 values (\'2021-11-11 09:00:03\',false,4,4,4,4,4,4,"444","4444",4,4,4,4);') + + tdSql.execute('insert into tb2 values (\'2021-11-11 09:00:04\',true,5,5,5,5,5,5,"555","5555",5,5,5,5);') + + tdSql.execute('insert into tb2 values (\'2021-11-11 09:00:05\',true,6,6,6,6,6,6,"666","6666",6,6,6,6);') + + tdSql.execute('insert into tb2 values (\'2021-11-11 09:00:06\',true,7,7,7,7,7,7,"777","7777",7,7,7,7);') + + tdSql.query('select count(*),db_name, stable_name from information_schema.ins_tables group by db_name, stable_name;') + tdSql.checkRows(3) + tdSql.checkData(0, 0, 24) + tdSql.checkData(0, 1, 'information_schema') + tdSql.checkData(0, 2, None) + tdSql.checkData(1, 0, 3) + tdSql.checkData(1, 1, 'tbl_count') + tdSql.checkData(1, 2, 'stb1') + tdSql.checkData(2, 0, 5) + tdSql.checkData(2, 1, 'performance_schema') + tdSql.checkData(2, 2, None) + + tdSql.query('select count(1),db_name, stable_name from information_schema.ins_tables group by db_name, stable_name;') + tdSql.checkRows(3) + tdSql.checkData(0, 0, 24) + tdSql.checkData(0, 1, 'information_schema') + tdSql.checkData(0, 2, None) + tdSql.checkData(1, 0, 5) + tdSql.checkData(1, 1, 'performance_schema') + tdSql.checkData(1, 2, None) + tdSql.checkData(2, 0, 3) + tdSql.checkData(2, 1, 'tbl_count') + tdSql.checkData(2, 2, 'stb1') + + tdSql.query('select count(1),db_name from information_schema.ins_tables group by db_name') + tdSql.checkRows(3) + tdSql.checkData(0, 0, 5) + tdSql.checkData(0, 1, 'performance_schema') + tdSql.checkData(1, 0, 3) + tdSql.checkData(1, 1, 'tbl_count') + tdSql.checkData(2, 0, 24) + tdSql.checkData(2, 1, 'information_schema') + + tdSql.query("select count(*) from information_schema.ins_tables where db_name='tbl_count'") + tdSql.checkRows(1) + tdSql.checkData(0, 0, 3) + + tdSql.query('select count(*) from information_schema.ins_tables where db_name=\'tbl_count\' and stable_name="stb1";') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 3) + + tdSql.query('select count(*) from information_schema.ins_tables') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 32) + + + tdSql.execute('create table stba (ts timestamp, c1 bool, c2 tinyint, c3 smallint, c4 int, c5 bigint, c6 float, c7 double, c8 binary(10), c9 nchar(10), c10 tinyint unsigned, c11 smallint unsigned, c12 int unsigned, c13 bigint unsigned) TAGS(t1 int, t2 binary(10), t3 double);') + + tdSql.execute("create table tba1 using stba tags(1,'1',1.0);") + + tdSql.execute('insert into tba1 values (\'2021-11-11 09:00:00\',true, 1,1,1,1,1,1,"111","1111",1,1,1,1);') + + tdSql.execute('insert into tba1 values (\'2021-11-11 09:00:01\',true, 2,2,2,2,2,2,"222","2222",2,2,2,2);') + + tdSql.execute('insert into tba1 values (\'2021-11-11 09:00:02\',true, 3,3,2,3,3,3,"333","3333",3,3,3,3);') + + tdSql.execute('insert into tba1 values (\'2021-11-11 09:00:03\',false,4,4,4,4,4,4,"444","4444",4,4,4,4);') + + tdSql.execute('insert into tba1 values (\'2021-11-11 09:00:04\',true, 5,5,5,5,5,5,"555","5555",5,5,5,5);') + + tdSql.execute('insert into tba1 values (\'2021-11-11 09:00:05\',true, 6,6,6,6,6,6,"666","6666",6,6,6,6);') + + tdSql.execute('insert into tba1 values (\'2021-11-11 09:00:06\',true, 7,7,7,7,7,7,"777","7777",7,7,7,7);') + + tdSql.execute('insert into tba1 values (\'2021-11-11 09:00:07\',true, 8,8,8,8,8,8,"888","8888",8,8,8,8);') + + tdSql.execute('insert into tba1 values (\'2021-11-11 09:00:08\',true, 9,9,9,9,9,9,"999","9999",9,9,9,9);') + + tdSql.execute('insert into tba1 values (\'2021-11-11 09:00:09\',true, 0,0,0,0,0,0,"000","0000",0,0,0,0);') + + self.restartTaosd(1, dbname='tbl_count') + + tdSql.execute('insert into tba1 values (\'2021-11-11 09:00:10\',true, 1,1,1,1,1,1,"111","1111",1,1,1,1);') + + tdSql.execute('insert into tba1 values (\'2021-11-11 09:00:11\',true, 2,2,2,2,2,2,"222","2222",2,2,2,2);') + + tdSql.execute('insert into tba1 values (\'2021-11-11 09:00:12\',true, 3,3,2,3,3,3,"333","3333",3,3,3,3);') + + tdSql.execute('insert into tba1 values (\'2021-11-11 09:00:13\',false,4,4,4,4,4,4,"444","4444",4,4,4,4);') + + tdSql.execute('insert into tba1 values (\'2021-11-11 09:00:14\',true, 5,5,5,5,5,5,"555","5555",5,5,5,5);') + + tdSql.execute('insert into tba1 values (\'2021-11-11 09:00:15\',true, 6,6,6,6,6,6,"666","6666",6,6,6,6);') + + tdSql.execute('insert into tba1 values (\'2021-11-11 09:00:16\',true, 7,7,7,7,7,7,"777","7777",7,7,7,7);') + + tdSql.execute('insert into tba1 values (\'2021-11-11 09:00:17\',true, 8,8,8,8,8,8,"888","8888",8,8,8,8);') + + tdSql.execute('insert into tba1 values (\'2021-11-11 09:00:18\',true, 9,9,9,9,9,9,"999","9999",9,9,9,9);') + + tdSql.execute('insert into tba1 values (\'2021-11-11 09:00:19\',true, 0,0,0,0,0,0,"000","0000",0,0,0,0);') + + self.restartTaosd(1, dbname='tbl_count') + + tdSql.execute('insert into tba1 values (\'2021-11-11 09:00:20\',true, 1,1,1,1,1,1,"111","1111",1,1,1,1);') + + tdSql.execute('insert into tba1 values (\'2021-11-11 09:00:21\',true, 2,2,2,2,2,2,"222","2222",2,2,2,2);') + + tdSql.execute('insert into tba1 values (\'2021-11-11 09:00:22\',true, 3,3,2,3,3,3,"333","3333",3,3,3,3);') + + tdSql.execute('insert into tba1 values (\'2021-11-11 09:00:23\',false,4,4,4,4,4,4,"444","4444",4,4,4,4);') + + tdSql.execute('insert into tba1 values (\'2021-11-11 09:00:24\',true, 5,5,5,5,5,5,"555","5555",5,5,5,5);') + + tdSql.execute('insert into tba1 values (\'2021-11-11 09:00:25\',true, 6,6,6,6,6,6,"666","6666",6,6,6,6);') + + tdSql.execute('insert into tba1 values (\'2021-11-11 09:00:26\',true, 7,7,7,7,7,7,"777","7777",7,7,7,7);') + + tdSql.execute('insert into tba1 values (\'2021-11-11 09:00:27\',true, 8,8,8,8,8,8,"888","8888",8,8,8,8);') + + tdSql.execute('insert into tba1 values (\'2021-11-11 09:00:28\',true, 9,9,9,9,9,9,"999","9999",9,9,9,9);') + + tdSql.execute('insert into tba1 values (\'2021-11-11 09:00:29\',true, 0,0,0,0,0,0,"000","0000",0,0,0,0);') + + tdSql.query('select count(*),db_name, stable_name from information_schema.ins_tables group by db_name, stable_name;') + tdSql.checkRows(4) + tdSql.checkData(0, 0, 1) + tdSql.checkData(0, 1, 'tbl_count') + tdSql.checkData(0, 2, 'stba') + tdSql.checkData(1, 0, 24) + tdSql.checkData(1, 1, 'information_schema') + tdSql.checkData(1, 2, None) + tdSql.checkData(2, 0, 3) + tdSql.checkData(2, 1, 'tbl_count') + tdSql.checkData(2, 2, 'stb1') + tdSql.checkData(3, 0, 5) + tdSql.checkData(3, 1, 'performance_schema') + tdSql.checkData(3, 2, None) + + tdSql.query('select count(1),db_name, stable_name from information_schema.ins_tables group by db_name, stable_name;') + tdSql.checkRows(4) + tdSql.checkData(0, 0, 24) + tdSql.checkData(0, 1, 'information_schema') + tdSql.checkData(0, 2, None) + tdSql.checkData(1, 0, 5) + tdSql.checkData(1, 1, 'performance_schema') + tdSql.checkData(1, 2, None) + tdSql.checkData(2, 0, 1) + tdSql.checkData(2, 1, 'tbl_count') + tdSql.checkData(2, 2, 'stba') + tdSql.checkData(3, 0, 3) + tdSql.checkData(3, 1, 'tbl_count') + tdSql.checkData(3, 2, 'stb1') + + tdSql.query('select count(1),db_name from information_schema.ins_tables group by db_name') + tdSql.checkRows(3) + tdSql.checkData(0, 0, 5) + tdSql.checkData(0, 1, 'performance_schema') + tdSql.checkData(1, 0, 4) + tdSql.checkData(1, 1, 'tbl_count') + tdSql.checkData(2, 0, 24) + tdSql.checkData(2, 1, 'information_schema') + + tdSql.query("select count(*) from information_schema.ins_tables where db_name='tbl_count'") + tdSql.checkRows(1) + tdSql.checkData(0, 0, 4) + + tdSql.query('select count(*) from information_schema.ins_tables where db_name=\'tbl_count\' and stable_name="stb1";') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 3) + + tdSql.query('select count(*) from information_schema.ins_tables') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 33) + + + tdSql.execute('drop database tbl_count') + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) diff --git a/tests/develop-test/5-taos-tools/taosbenchmark/json/sml_json_alltypes.json b/tests/develop-test/5-taos-tools/taosbenchmark/json/sml_json_alltypes.json index 5a373888a6cdfe2ee8a4880863459e91c87a8fa2..62ec0ff9bfff3d847e785b285b102ae5fecf8c2f 100644 --- a/tests/develop-test/5-taos-tools/taosbenchmark/json/sml_json_alltypes.json +++ b/tests/develop-test/5-taos-tools/taosbenchmark/json/sml_json_alltypes.json @@ -37,7 +37,7 @@ "line_protocol": "json", "childtable_limit": 0, "childtable_offset": 0, - "insert_rows": 20, + "insert_rows": 10, "insert_interval": 0, "interlace_rows": 0, "disorder_ratio": 0, @@ -62,7 +62,7 @@ "line_protocol": "json", "childtable_limit": 0, "childtable_offset": 0, - "insert_rows": 20, + "insert_rows": 10, "insert_interval": 0, "interlace_rows": 0, "disorder_ratio": 0, @@ -87,7 +87,7 @@ "line_protocol": "json", "childtable_limit": 0, "childtable_offset": 0, - "insert_rows": 20, + "insert_rows": 10, "insert_interval": 0, "interlace_rows": 0, "disorder_ratio": 0, @@ -112,7 +112,7 @@ "line_protocol": "json", "childtable_limit": 0, "childtable_offset": 0, - "insert_rows": 20, + "insert_rows": 10, "insert_interval": 0, "interlace_rows": 0, "disorder_ratio": 0, @@ -137,7 +137,7 @@ "line_protocol": "json", "childtable_limit": 0, "childtable_offset": 0, - "insert_rows": 20, + "insert_rows": 10, "insert_interval": 0, "interlace_rows": 0, "disorder_ratio": 0, @@ -162,7 +162,7 @@ "line_protocol": "json", "childtable_limit": 0, "childtable_offset": 0, - "insert_rows": 20, + "insert_rows": 10, "insert_interval": 0, "interlace_rows": 0, "disorder_ratio": 0, @@ -187,7 +187,7 @@ "line_protocol": "json", "childtable_limit": 0, "childtable_offset": 0, - "insert_rows": 20, + "insert_rows": 10, "insert_interval": 0, "interlace_rows": 0, "disorder_ratio": 0, @@ -212,7 +212,7 @@ "line_protocol": "json", "childtable_limit": 0, "childtable_offset": 0, - "insert_rows": 20, + "insert_rows": 10, "insert_interval": 0, "interlace_rows": 0, "disorder_ratio": 0, @@ -237,7 +237,7 @@ "line_protocol": "json", "childtable_limit": 0, "childtable_offset": 0, - "insert_rows": 20, + "insert_rows": 10, "insert_interval": 0, "interlace_rows": 0, "disorder_ratio": 0, diff --git a/tests/develop-test/5-taos-tools/taosbenchmark/sml_json_alltypes.py b/tests/develop-test/5-taos-tools/taosbenchmark/sml_json_alltypes.py index 2c6d09b0f5b4782c692dab2af7e790a864a405bf..789a0ce0a2aaf195665f3ab483428cbca346b1b0 100644 --- a/tests/develop-test/5-taos-tools/taosbenchmark/sml_json_alltypes.py +++ b/tests/develop-test/5-taos-tools/taosbenchmark/sml_json_alltypes.py @@ -78,23 +78,23 @@ class TDTestCase: tdSql.checkData(1, 1, "NCHAR") tdSql.checkData(1, 2, 16) tdSql.query("select count(*) from db.stb1") - tdSql.checkData(0, 0, 160) + tdSql.checkData(0, 0, 80) tdSql.query("select count(*) from db.stb2") - tdSql.checkData(0, 0, 160) + tdSql.checkData(0, 0, 80) tdSql.query("select count(*) from db.stb3") - tdSql.checkData(0, 0, 160) + tdSql.checkData(0, 0, 80) tdSql.query("select count(*) from db.stb4") - tdSql.checkData(0, 0, 160) + tdSql.checkData(0, 0, 80) tdSql.query("select count(*) from db.stb5") - tdSql.checkData(0, 0, 160) + tdSql.checkData(0, 0, 80) tdSql.query("select count(*) from db.stb6") - tdSql.checkData(0, 0, 160) + tdSql.checkData(0, 0, 80) tdSql.query("select count(*) from db.stb7") - tdSql.checkData(0, 0, 160) + tdSql.checkData(0, 0, 80) tdSql.query("select count(*) from db.stb8") - tdSql.checkData(0, 0, 160) + tdSql.checkData(0, 0, 80) tdSql.query("select count(*) from db.stb9") - tdSql.checkData(0, 0, 160) + tdSql.checkData(0, 0, 80) def stop(self): tdSql.close() diff --git a/tests/docs-examples-test/python.sh b/tests/docs-examples-test/python.sh index 140d05395bdf6c32bbded25bb53ffaab523e3434..ccb391b7527fbf2490911d868d08c87436221162 100644 --- a/tests/docs-examples-test/python.sh +++ b/tests/docs-examples-test/python.sh @@ -23,7 +23,7 @@ python3 bind_param_example.py # 4 taos -s "drop database power" -python3 multi_bind_example.py +python3 multi_bind_example.py # 5 python3 query_example.py @@ -44,4 +44,43 @@ taos -s "drop database test" python3 json_protocol_example.py # 10 -# python3 subscribe_demo.py +pip install SQLAlchemy +pip install pandas +taosBenchmark -y -d power -t 10 -n 10 +python3 conn_native_pandas.py +python3 conn_rest_pandas.py +taos -s "drop database if exists power" + +# 11 +taos -s "create database if not exists test" +python3 connect_native_reference.py + +# 12 +python3 connect_rest_examples.py + +# 13 +python3 handle_exception.py + +# 14 +taosBenchmark -y -d power -t 2 -n 10 +python3 rest_client_example.py +taos -s "drop database if exists power" + +# 15 +python3 result_set_examples.py + +# 16 +python3 tmq_example.py + +# 17 +python3 sql_writer.py + +# 18 +python3 mockdatasource.py + +# 19 +python3 fast_write_example.py + +# 20 +pip3 install kafka-python +python3 kafka_example.py diff --git a/tests/parallel_test/cases.task b/tests/parallel_test/cases.task index 648497603eb2b713672a30d1b5a370e923410b7d..94d87c705cfaa07028ed64caf30ae668ad245cf3 100644 --- a/tests/parallel_test/cases.task +++ b/tests/parallel_test/cases.task @@ -146,6 +146,7 @@ ,,y,script,./test.sh -f tsim/parser/precision_ns.sim ,,y,script,./test.sh -f tsim/parser/projection_limit_offset.sim ,,y,script,./test.sh -f tsim/parser/regex.sim +,,y,script,./test.sh -f tsim/parser/regressiontest.sim ,,y,script,./test.sh -f tsim/parser/select_across_vnodes.sim ,,y,script,./test.sh -f tsim/parser/select_distinct_tag.sim ,,y,script,./test.sh -f tsim/parser/select_from_cache_disk.sim @@ -247,6 +248,8 @@ ,,y,script,./test.sh -f tsim/stream/fillIntervalPartitionBy.sim ,,y,script,./test.sh -f tsim/stream/fillIntervalPrevNext.sim ,,y,script,./test.sh -f tsim/stream/fillIntervalValue.sim +,,y,script,./test.sh -f tsim/stream/udTableAndTag0.sim +,,y,script,./test.sh -f tsim/stream/udTableAndTag1.sim ,,y,script,./test.sh -f tsim/trans/lossdata1.sim ,,y,script,./test.sh -f tsim/trans/create_db.sim ,,y,script,./test.sh -f tsim/tmq/basic1.sim @@ -446,6 +449,7 @@ ,,y,system-test,./pytest.sh python3 ./test.py -f 1-insert/database_pre_suf.py ,,y,system-test,./pytest.sh python3 ./test.py -f 1-insert/InsertFuturets.py ,,y,system-test,./pytest.sh python3 ./test.py -f 0-others/show.py +,,y,system-test,./pytest.sh python3 ./test.py -f 0-others/information_schema.py ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/abs.py ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/abs.py -R ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/and_or_for_byte.py @@ -673,7 +677,7 @@ ,,y,system-test,./pytest.sh python3 ./test.py -f 6-cluster/5dnode3mnodeAdd1Ddnoe.py -N 7 -M 3 -C 6 ,,y,system-test,./pytest.sh python3 ./test.py -f 6-cluster/5dnode3mnodeAdd1Ddnoe.py -N 7 -M 3 -C 6 -n 3 -,,y,system-test,./pytest.sh python3 ./test.py -f 6-cluster/5dnode3mnodeDrop.py -N 5 +#,,y,system-test,./pytest.sh python3 ./test.py -f 6-cluster/5dnode3mnodeDrop.py -N 5 ,,y,system-test,./pytest.sh python3 ./test.py -f 6-cluster/5dnode3mnodeRecreateMnode.py -N 5 -M 3 ,,y,system-test,./pytest.sh python3 ./test.py -f 6-cluster/5dnode3mnodeStopFollowerLeader.py -N 5 -M 3 ,,y,system-test,./pytest.sh python3 ./test.py -f 6-cluster/5dnode3mnodeStop2Follower.py -N 5 -M 3 @@ -1044,11 +1048,19 @@ ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/insert_null_none.py -Q 2 ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/insert_null_none.py -Q 3 ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/insert_null_none.py -Q 4 +,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/out_of_order.py +,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/out_of_order.py -R +,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/out_of_order.py -Q 2 +,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/out_of_order.py -Q 3 +,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/out_of_order.py -Q 4 ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/blockSMA.py -Q 4 +,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/odbc.py ,,y,system-test,./pytest.sh python3 ./test.py -f 99-TDcase/TD-21561.py -Q 4 ,,y,system-test,./pytest.sh python3 ./test.py -f 99-TDcase/TD-20582.py #develop test +,,n,develop-test,python3 ./test.py -f 2-query/table_count_scan.py +,,n,develop-test,python3 ./test.py -f 2-query/show_create_db.py ,,n,develop-test,python3 ./test.py -f 5-taos-tools/taosbenchmark/auto_create_table_json.py ,,n,develop-test,python3 ./test.py -f 5-taos-tools/taosbenchmark/custom_col_tag.py ,,n,develop-test,python3 ./test.py -f 5-taos-tools/taosbenchmark/default_json.py @@ -1067,4 +1079,4 @@ ,,n,docs-examples-test,bash node.sh ,,n,docs-examples-test,bash csharp.sh ,,n,docs-examples-test,bash jdbc.sh -#,,n,docs-examples-test,bash go.sh +,,n,docs-examples-test,bash go.sh diff --git a/tests/parallel_test/container_build.sh b/tests/parallel_test/container_build.sh index 5059630a3f016ebdedd98b643d276d02d8ff0807..ff854449bb913bea8bf5dd1b6477d7d9c7a7b70e 100755 --- a/tests/parallel_test/container_build.sh +++ b/tests/parallel_test/container_build.sh @@ -37,9 +37,9 @@ if [ -z "$WORKDIR" ]; then usage exit 1 fi -if [ -z "$THREAD_COUNT" ]; then - THREAD_COUNT=1 -fi +# if [ -z "$THREAD_COUNT" ]; then +# THREAD_COUNT=1 +# fi ulimit -c unlimited @@ -55,7 +55,7 @@ fi date docker run \ -v $REP_MOUNT_PARAM \ - --rm --ulimit core=-1 taos_test:v1.0 sh -c "cd $REP_DIR;rm -rf debug;mkdir -p debug;cd debug;cmake .. -DBUILD_HTTP=false -DBUILD_TOOLS=true -DBUILD_TEST=true -DWEBSOCKET=true;make -j $THREAD_COUNT || exit 1" + --rm --ulimit core=-1 taos_test:v1.0 sh -c "cd $REP_DIR;rm -rf debug;mkdir -p debug;cd debug;cmake .. -DBUILD_HTTP=false -DBUILD_TOOLS=true -DBUILD_TEST=true -DWEBSOCKET=true -DBUILD_TAOSX=true;make -j || exit 1" if [[ -d ${WORKDIR}/debugNoSan ]] ;then echo "delete ${WORKDIR}/debugNoSan" @@ -70,7 +70,7 @@ mv ${REP_REAL_PATH}/debug ${WORKDIR}/debugNoSan date docker run \ -v $REP_MOUNT_PARAM \ - --rm --ulimit core=-1 taos_test:v1.0 sh -c "cd $REP_DIR;rm -rf debug;mkdir -p debug;cd debug;cmake .. -DBUILD_HTTP=false -DBUILD_TOOLS=true -DBUILD_TEST=true -DWEBSOCKET=true -DBUILD_SANITIZER=1 -DTOOLS_SANITIZE=true -DTOOLS_BUILD_TYPE=Debug;make -j $THREAD_COUNT || exit 1 " + --rm --ulimit core=-1 taos_test:v1.0 sh -c "cd $REP_DIR;rm -rf debug;mkdir -p debug;cd debug;cmake .. -DBUILD_HTTP=false -DBUILD_TOOLS=true -DBUILD_TEST=true -DWEBSOCKET=true -DBUILD_SANITIZER=1 -DTOOLS_SANITIZE=true -DTOOLS_BUILD_TYPE=Debug -DBUILD_TAOSX=true;make -j || exit 1 " mv ${REP_REAL_PATH}/debug ${WORKDIR}/debugSan diff --git a/tests/parallel_test/run.sh b/tests/parallel_test/run.sh index b5d57265bef34f40dbf27dd791a2b17da791d4fb..43533d4f3688922d0f7670e5a74218a606c11f12 100755 --- a/tests/parallel_test/run.sh +++ b/tests/parallel_test/run.sh @@ -184,6 +184,10 @@ function run_thread() { if [ $? -eq 0 ]; then case_file=`echo "$case_cmd"|grep -o ".*\.py"|awk '{print $NF}'` fi + echo "$case_cmd"|grep -q "^./pytest.sh" + if [ $? -eq 0 ]; then + case_file=`echo "$case_cmd"|grep -o ".*\.py"|awk '{print $NF}'` + fi echo "$case_cmd"|grep -q "\.sim" if [ $? -eq 0 ]; then case_file=`echo "$case_cmd"|grep -o ".*\.sim"|awk '{print $NF}'` diff --git a/tests/pytest/util/dnodes.py b/tests/pytest/util/dnodes.py index 6c71c5cea7a3ea67e21a520933d66b3508607b45..a1682f47b3e229e24a9b7bdd750b8cbe4890dea5 100644 --- a/tests/pytest/util/dnodes.py +++ b/tests/pytest/util/dnodes.py @@ -29,6 +29,7 @@ class TDSimClient: self.testCluster = False self.path = path self.cfgDict = { + "fqdn": "localhost", "numOfLogLines": "100000000", "locale": "en_US.UTF-8", "charset": "UTF-8", @@ -119,6 +120,7 @@ class TDDnode: self.asan = False self.remoteIP = "" self.cfgDict = { + "fqdn": "localhost", "monitor": "0", "maxShellConns": "30000", "locale": "en_US.UTF-8", diff --git a/tests/script/api/batchprepare.c b/tests/script/api/batchprepare.c index 88dada44accf588357314843d2a7d09964c896c0..0903095dc98dcf3652546859003fa65fd88e1569 100644 --- a/tests/script/api/batchprepare.c +++ b/tests/script/api/batchprepare.c @@ -2828,7 +2828,7 @@ void runAll(TAOS *taos) { printf("%s Begin\n", gCaseCtrl.caseCatalog); runCaseList(taos); -#if 0 +#if 1 strcpy(gCaseCtrl.caseCatalog, "Micro DB precision Test"); printf("%s Begin\n", gCaseCtrl.caseCatalog); gCaseCtrl.precision = TIME_PRECISION_MICRO; diff --git a/tests/script/sh/checkAsan.sh b/tests/script/sh/checkAsan.sh index 7df17b22da1c8a8c3b7cf95315a3aea417d56eae..72257227916785de3d174349d4c83d3302064295 100755 --- a/tests/script/sh/checkAsan.sh +++ b/tests/script/sh/checkAsan.sh @@ -39,7 +39,7 @@ python_error=`cat ${LOG_DIR}/*.info | grep -w "stack" | wc -l` # /root/TDengine/source/libs/scalar/src/sclvector.c:1075:66: runtime error: signed integer overflow: 9223372034707292160 + 1668838476672 cannot be represented in type 'long int' # /root/TDengine/source/common/src/tdataformat.c:1876:7: runtime error: signed integer overflow: 8252423483843671206 + 2406154664059062870 cannot be represented in type 'long int' -runtime_error=`cat ${LOG_DIR}/*.asan | grep "runtime error" | grep -v "trees.c:873" | grep -v "sclfunc.c.*outside the range of representable values of type"| grep -v "signed integer overflow" | wc -l` +runtime_error=`cat ${LOG_DIR}/*.asan | grep "runtime error" | grep -v "trees.c:873" | grep -v "sclfunc.c.*outside the range of representable values of type"| grep -v "signed integer overflow" |grep -v "strerror.c"| grep -v "asan_malloc_linux.cc" |wc -l` echo -e "\033[44;32;1m"asan error_num: $error_num"\033[0m" echo -e "\033[44;32;1m"asan memory_leak: $memory_leak"\033[0m" diff --git a/tests/script/tsim/db/alter_replica_13.sim b/tests/script/tsim/db/alter_replica_13.sim index d75acb50ad087383fd2e5aabadfb0e2c1165204e..a9dc1741a1681a25da42c842e5c82078bad40ae7 100644 --- a/tests/script/tsim/db/alter_replica_13.sim +++ b/tests/script/tsim/db/alter_replica_13.sim @@ -79,6 +79,7 @@ sql insert into db.ctb6 values(now, 6, "6") sql insert into db.ctb7 values(now, 7, "7") sql insert into db.ctb8 values(now, 8, "8") sql insert into db.ctb9 values(now, 9, "9") +sql flush database db; print =============== step3: create dnodes sql create dnode $hostname port 7300 diff --git a/tests/script/tsim/parser/alter1.sim b/tests/script/tsim/parser/alter1.sim index 369419dcd9cd91688f39c27dbd54c33ee0699ae8..cf9da46fba276911627f6b5d025176084b473854 100644 --- a/tests/script/tsim/parser/alter1.sim +++ b/tests/script/tsim/parser/alter1.sim @@ -88,6 +88,7 @@ sql insert into car1 values (now, 1, 1,1 ) (now +1s, 2,2,2) car2 values (now, 1, sql select c1+speed from stb where c1 > 0 if $rows != 3 then + print $rows , expect 3 return -1 endi diff --git a/tests/script/tsim/parser/regressiontest.sim b/tests/script/tsim/parser/regressiontest.sim new file mode 100644 index 0000000000000000000000000000000000000000..1b127155cbed8f9eaf26898c27bd87e46ed1d7e4 --- /dev/null +++ b/tests/script/tsim/parser/regressiontest.sim @@ -0,0 +1,66 @@ +system sh/stop_dnodes.sh +system sh/deploy.sh -n dnode1 -i 1 +system sh/exec.sh -n dnode1 -s start +sql connect + +$dbPrefix = reg_db +$tb = tb +$rowNum = 8200 + +$ts0 = 1537146000000 +$delta = 100 +print ========== reg.sim +$i = 0 +$db = $dbPrefix . $i + +sql drop database if exists $db -x step1 +step1: +sql create database $db vgroups 1; + +sql use $db +sql create table $tb (ts timestamp, c1 int) + +$i = 0 +$ts = $ts0 + +$x = 0 +while $x < $rowNum +$xs = $x * $delta +$ts = $ts0 + $xs +sql insert into $tb values ( $ts , $x ) +$x = $x + 1 +endw + +print ================== restart server to commit data into disk +system sh/exec.sh -n dnode1 -s stop -x SIGINT +system sh/exec.sh -n dnode1 -s start +print ================== server restart completed +sql connect + +sql use $db +sql delete from $tb where ts=1537146000000 +sql delete from $tb where ts=1537146409500 + +print =========================> TS-2410 +sql select * from $tb limit 20 offset 4090 +print $data00 +print $data10 +print $data20 +print $data30 +print $data40 +print $data50 +print $data60 +print $data70 +print $data80 +print $data90 + +if $data40 != @18-09-17 09:06:49.600@ then + return -1 +endi + +sql select * from $tb order by ts desc; +if $rows != 8198 then + return -1 +endi + +system sh/exec.sh -n dnode1 -s stop -x SIGINT diff --git a/tests/script/tsim/query/event.sim b/tests/script/tsim/query/event.sim new file mode 100644 index 0000000000000000000000000000000000000000..adc94a34decca06f43f4dbc3c2417415ea583214 --- /dev/null +++ b/tests/script/tsim/query/event.sim @@ -0,0 +1,69 @@ +system sh/stop_dnodes.sh +system sh/deploy.sh -n dnode1 -i 1 +system sh/exec.sh -n dnode1 -s start +sql connect + +print ======== prepare data + +sql drop database if exists db1; +sql create database db1 vgroups 5; +sql use db1; +sql create stable sta (ts timestamp, f1 int, f2 binary(10), f3 bool) tags(t1 int, t2 bool, t3 binary(10)); +sql create table tba1 using sta tags(0, false, '0'); +sql create table tba2 using sta tags(1, true, '1'); +sql create table tba3 using sta tags(null, null, ''); +sql create table tba4 using sta tags(1, false, null); +sql create table tba5 using sta tags(3, true, 'aa'); +sql insert into tba1 values ('2022-09-26 15:15:01', 0, "a", false); +sql insert into tba1 values ('2022-09-26 15:15:02', 1, "0", true); +sql insert into tba1 values ('2022-09-26 15:15:03', 5, "5", false); +sql insert into tba1 values ('2022-09-26 15:15:04', 3, 'b', false); +sql insert into tba1 values ('2022-09-26 15:15:05', 0, '1', false); +sql insert into tba1 values ('2022-09-26 15:15:06', 2, 'd', true); + +sql insert into tba2 values ('2022-09-27 15:15:01', 0, "a", false); +sql insert into tba2 values ('2022-09-27 15:15:02', 1, "0", true); +sql insert into tba2 values ('2022-09-27 15:15:03', 5, "5", false); +sql insert into tba2 values ('2022-09-27 15:15:04', null, null, null); + +# child table: no window +print ====> select count(*) from tba1 event_window start with f1 = 0 end with f2 = 'c'; +sql select count(*) from tba1 event_window start with f1 = 0 end with f2 = 'c'; +if $rows != 0 then + return -1 +endi + +# child table: single row window +print ====> select count(*) from tba1 event_window start with f1 = 0 end with f3 = false; +sql select count(*) from tba1 event_window start with f1 = 0 end with f3 = false +if $rows != 2 then + return -1 +endi +if $data00 != 1 then + return -1 +endi + +# child table: multi rows window +print ====> select count(*) from tba1 event_window start with f1 = 0 end with f2 = 'b'; +sql select count(*) from tba1 event_window start with f1 = 0 end with f2 = 'b'; +if $rows != 1 then + return -1 +endi +if $data00 != 4 then + return -1 +endi + +# child table: multi windows +print ====> select count(*) from tba1 event_window start with f1 >= 0 end with f3 = true; +sql select count(*) from tba1 event_window start with f1 >= 0 end with f3 = true; +if $rows != 2 then + return -1 +endi +if $data00 != 2 then + return -1 +endi +if $data10 != 4 then + return -1 +endi + +system sh/exec.sh -n dnode1 -s stop -x SIGINT diff --git a/tests/script/tsim/query/sys_tbname.sim b/tests/script/tsim/query/sys_tbname.sim index 045e908a578af1a1a1248574a4dd70c2af5cf8b2..7b3953129a81d6fa56b093c8b048b2071b6fbe70 100644 --- a/tests/script/tsim/query/sys_tbname.sim +++ b/tests/script/tsim/query/sys_tbname.sim @@ -53,7 +53,7 @@ endi sql select tbname from information_schema.ins_tables; print $rows $data00 -if $rows != 32 then +if $rows != 33 then return -1 endi if $data00 != @ins_tables@ then @@ -86,4 +86,23 @@ if $data00 != @ins_tags@ then return -1 endi +sql create stable stb(ts timestamp, f int) tags(t1 int, t2 int, t3 int, t4 int, t5 int); + +$i = 0 +$tbNum = 1000 +$tbPrefix = stb_tb +while $i < $tbNum + $tb = $tbPrefix . $i + sql create table $tb using stb tags( $i , $i , $i , $i , $i ) + + $i = $i + 1 +endw + +sql select tag_value from information_schema.ins_tags where stable_name='stb'; +if $rows != 5000 then + print $rows + return -1 +endi + + #system sh/exec.sh -n dnode1 -s stop -x SIGINT diff --git a/tests/script/tsim/sma/rsmaCreateInsertQuery.sim b/tests/script/tsim/sma/rsmaCreateInsertQuery.sim index 04cf09715c7def3d73800e9a5cbc6f2e0fc78ddd..b3144e4e0dd217319a0d58bf3222360fcd5fa355 100644 --- a/tests/script/tsim/sma/rsmaCreateInsertQuery.sim +++ b/tests/script/tsim/sma/rsmaCreateInsertQuery.sim @@ -81,9 +81,61 @@ if $data01 != 10 then endi #=================================================================== +system sh/exec.sh -n dnode1 -s stop -x SIGINT +system sh/exec.sh -n dnode1 -s start +print =============== wait maxdelay 15+2 seconds for results after reboot +sleep 17000 + +print =============== select * from retention level 2 from memory after reboot +sql select * from ct1; +print $data00 $data01 +if $rows > 2 then + print retention level 2 file rows $rows > 2 + return -1 +endi + + +if $data01 != 1 then + if $data01 != 10 then + print =============> $data01 + print retention level 2 file result $data01 != 1 or 10 + return -1 + endi +endi +print =============== select * from retention level 1 from memory after reboot +sql select * from ct1 where ts > now-8d; +print $data00 $data01 +if $rows > 2 then + print retention level 1 file rows $rows > 2 + return -1 +endi + +if $data01 != 1 then + if $data01 != 10 then + print retention level 1 file result $data01 != 1 or 10 + return -1 + endi +endi + +print =============== select * from retention level 0 from memory after reboot +sql select * from ct1 where ts > now-3d; +print $data00 $data01 +print $data10 $data11 +print $data20 $data21 + +if $rows < 1 then + print retention level 0 file rows $rows < 1 + return -1 +endi + +if $data01 != 10 then + print retention level 0 file result $data01 != 10 + return -1 +endi -#==================== reboot to trigger commit data to file +#==================== flush database to trigger commit data to file +sql flush database d0; system sh/exec.sh -n dnode1 -s stop -x SIGINT system sh/exec.sh -n dnode1 -s start diff --git a/tests/script/tsim/sma/rsmaPersistenceRecovery.sim b/tests/script/tsim/sma/rsmaPersistenceRecovery.sim index faff48b61c1216e744d349a301f39fabd6dc578a..0c9d23335ea3b378a2dbe849dc8f7fd773470dea 100644 --- a/tests/script/tsim/sma/rsmaPersistenceRecovery.sim +++ b/tests/script/tsim/sma/rsmaPersistenceRecovery.sim @@ -82,9 +82,63 @@ if $data01 != 10 then endi #=================================================================== +system sh/exec.sh -n dnode1 -s stop -x SIGINT +system sh/exec.sh -n dnode1 -s start +print =============== wait maxdelay 5+2 seconds for results after reboot +sleep 7000 + +print =============== select * from retention level 2 from memory after reboot +sql select * from ct1; +print $data00 $data01 $data02 +print $data10 $data11 $data12 +if $rows > 2 then + print retention level 2 file rows $rows > 2 + return -1 +endi + + +if $data01 != 100 then + if $data01 != 10 then + print retention level 2 file result $data01 != 100 or 10 + return -1 + endi +endi + +print =============== select * from retention level 1 from memory after reboot +sql select * from ct1 where ts > now-8d; +print $data00 $data01 $data02 +print $data10 $data11 $data12 +if $rows > 2 then + print retention level 1 file rows $rows > 2 + return -1 +endi + +if $data01 != 100 then + if $data01 != 10 then + print retention level 1 file result $data01 != 100 or 10 + return -1 + endi +endi + +print =============== select * from retention level 0 from memory after reboot +sql select * from ct1 where ts > now-3d; +print $data00 $data01 $data02 +print $data10 $data11 $data12 +print $data20 $data21 $data22 + +if $rows < 1 then + print retention level 0 file rows $rows < 1 + return -1 +endi + +if $data01 != 10 then + print retention level 0 file result $data01 != 10 + return -1 +endi -#==================== reboot to trigger commit data to file +#==================== flush database to trigger commit data to file +sql flush database d0; system sh/exec.sh -n dnode1 -s stop -x SIGINT system sh/exec.sh -n dnode1 -s start diff --git a/tests/script/tsim/sma/tsmaCreateInsertQuery.sim b/tests/script/tsim/sma/tsmaCreateInsertQuery.sim index 442b4970e4e09652666378962a8e04d9ae8616fd..9b6fc96bc00fde8418bf1d29d4c8cd8fc7f02091 100644 --- a/tests/script/tsim/sma/tsmaCreateInsertQuery.sim +++ b/tests/script/tsim/sma/tsmaCreateInsertQuery.sim @@ -7,9 +7,10 @@ sql connect print =============== create database sql create database d1 keep 36500d vgroups 1 sql use d1 +sql alter local 'querySmaOptimize' '1'; print =============== create super table, include column type for count/sum/min/max/first -sql create table if not exists stb (ts timestamp, c1 int, c2 float, c3 double) tags (t1 int unsigned) +sql create table if not exists stb (ts timestamp, c1 int, c2 float, c3 double, c4 binary(10),c5 nchar(10)) tags (t1 int unsigned) sql show stables if $rows != 1 then @@ -25,16 +26,32 @@ if $rows != 1 then endi print =============== insert data, mode1: one row one table in sql -sql insert into ct1 values('2022-10-19 09:55:45.682', 10, 2.0, 3.0) -sql insert into ct1 values('2022-10-19 09:55:46.682', 11, 2.1, 3.1)('2022-10-19 09:55:47.682', -12, -2.2, -3.2)('2022-10-19 09:55:48.682', -13, -2.3, -3.3) +sql insert into ct1 values('2022-10-19 09:55:45.682', 10, 2.0, 3.0, "a", "n0") +sql insert into ct1 values('2022-10-19 09:55:46.682', 11, 2.1, 3.1,"b","n1")('2022-10-19 09:55:47.682', -12, -2.2, -3.2,"c","n2")('2022-10-19 09:55:48.682', -13, -2.3, -3.3,"d","n3") print =============== create sma index from super table sql create sma index sma_index_name1 on stb function(max(c1),max(c2),min(c1)) interval(5m,10s) sliding(5m) watermark 1s max_delay 1s -print $data00 $data01 $data02 $data03 +sql create sma index sma_index_name2 on stb function(sum(c1),first(c1), last(c1), first(c4),last(c4),count(c4),first(c5),last(c5),count(c5),apercentile(c1,80,"t-digest"), avg(c2),count(c3), spread(c3), stddev(c2), hyperloglog(c2), hyperloglog(c4), hyperloglog(c5)) interval(5m,10s) sliding(5m); +# for varchar/binary +sql_error create sma index sma_index_name3 on stb function(sum(c4)) interval(5m,10s) sliding(5m); +sql_error create sma index sma_index_name3 on stb function(min(c4)) interval(5m,10s) sliding(5m); +sql_error create sma index sma_index_name3 on stb function(max(c4)) interval(5m,10s) sliding(5m); +sql_error create sma index sma_index_name3 on stb function(avg(c4)) interval(5m,10s) sliding(5m); +sql_error create sma index sma_index_name3 on stb function(apercentile(c4)) interval(5m,10s) sliding(5m); +sql_error create sma index sma_index_name3 on stb function(spread(c4)) interval(5m,10s) sliding(5m); +sql_error create sma index sma_index_name3 on stb function(stddev(c4)) interval(5m,10s) sliding(5m); +# for nchar +sql_error create sma index sma_index_name3 on stb function(sum(c5)) interval(5m,10s) sliding(5m); +sql_error create sma index sma_index_name3 on stb function(min(c5)) interval(5m,10s) sliding(5m); +sql_error create sma index sma_index_name3 on stb function(max(c5)) interval(5m,10s) sliding(5m); +sql_error create sma index sma_index_name3 on stb function(avg(c5)) interval(5m,10s) sliding(5m); +sql_error create sma index sma_index_name3 on stb function(apercentile(c5)) interval(5m,10s) sliding(5m); +sql_error create sma index sma_index_name3 on stb function(spread(c5)) interval(5m,10s) sliding(5m); +sql_error create sma index sma_index_name3 on stb function(stddev(c5)) interval(5m,10s) sliding(5m); print =============== trigger stream to execute sma aggr task and insert sma data into sma store -sql insert into ct1 values('2022-10-19 09:55:50.682', 20, 20.0, 30.0) +sql insert into ct1 values('2022-10-19 09:55:50.682', 20, 20.0, 30.0,"e","n5") #==================== sleep 2s to wait for tsma result sleep 2000 @@ -42,9 +59,11 @@ print =============== show streams ================================ sql show streams; print $data00 $data01 $data02 -if $data00 != sma_index_name1 then - print $data00 - return -1 +if $data00 != sma_index_name1 then + if $data00 != sma_index_name2 then + print $data00 + return -1 + endi endi print =============== select * from ct1 from memory @@ -117,7 +136,76 @@ system sh/exec.sh -n dnode1 -s stop -x SIGINT system sh/exec.sh -n dnode1 -s start sleep 50 +print =============== select * from ct1 from memory after reboot +sql select * from ct1; +print $data00 $data01 $data02 $data03 +print $data10 $data11 $data12 $data13 +print $data20 $data21 $data22 $data23 +print $data30 $data31 $data32 $data33 +print $data40 $data41 $data42 $data43 +if $rows != 5 then + print rows $rows != 5 + return -1 +endi +print =============== select * from stb from memory in designated vgroup after reboot +sql select _wstart, _wend, min(c1),max(c2),max(c1) from stb interval(5m,10s) sliding(5m); +print $data00 $data01 $data02 $data03 $data04 +print $data10 $data11 $data12 $data13 $data14 +if $rows != 1 then + print rows $rows != 1 + return -1 +endi + +if $data02 != -13 then + print data02 $data02 != -13 + return -1 +endi + +if $data03 != 20.00000 then + print data03 $data03 != 20.00000 + return -1 +endi + +if $data04 != 20 then + print data04 $data04 != 20 + return -1 +endi + +print =============== select * from stb from memory in common vgroups after reboot +sql select _wstart, _wend, min(c1),max(c2),max(c1),max(c3) from stb interval(5m,10s) sliding(5m); +print $data00 $data01 $data02 $data03 $data04 $data05 +if $rows != 1 then + print rows $rows != 1 + return -1 +endi + +if $data02 != -13 then + print data02 $data02 != -13 + return -1 +endi + +if $data03 != 20.00000 then + print data03 $data03 != 20.00000 + return -1 +endi + +if $data04 != 20 then + print data04 $data04 != 20 + return -1 +endi + +if $data05 != 30.000000000 then + print data05 $data05 != 30.000000000 + return -1 +endi + + +#==================== flush database to trigger commit data to file +sql flush database d1; +system sh/exec.sh -n dnode1 -s stop -x SIGINT +system sh/exec.sh -n dnode1 -s start +sleep 50 print =============== select * from ct1 from file sql select * from ct1; print $data00 $data01 $data02 $data03 diff --git a/tests/script/tsim/stream/basic3.sim b/tests/script/tsim/stream/basic3.sim index 41e19b19af0e388ff8a736723d9d74df356e2a6a..e598919e34b60d22478de67cce5df9c0c976216d 100644 --- a/tests/script/tsim/stream/basic3.sim +++ b/tests/script/tsim/stream/basic3.sim @@ -1,6 +1,7 @@ system sh/stop_dnodes.sh system sh/deploy.sh -n dnode1 -i 1 -system sh/cfg.sh -n dnode1 -c debugflag 131 +system sh/cfg.sh -n dnode1 -c debugflag -v 131 +system sh/cfg.sh -n dnode1 -c keepColumnName -v 1 system sh/exec.sh -n dnode1 -s start sleep 5000 @@ -9,7 +10,7 @@ sql connect print ========== interval\session\state window -sql CREATE DATABASE test1 BUFFER 96 CACHESIZE 1 CACHEMODEL 'none' COMP 2 DURATION 14400m WAL_FSYNC_PERIOD 3000 MAXROWS 4096 MINROWS 100 KEEP 5256000m,5256000m,5256000m PAGES 256 PAGESIZE 4 PRECISION 'ms' REPLICA 1 STRICT 'off' WAL_LEVEL 1 VGROUPS 2 SINGLE_STABLE 0; +sql CREATE DATABASE test1 VGROUPS 2; sql use test1; sql CREATE STABLE st (time TIMESTAMP, ca DOUBLE, cb DOUBLE, cc int) TAGS (ta VARCHAR(10) ); @@ -29,6 +30,76 @@ sql create stream streamd4 into streamt4 as select tbname, _wstart,_wend, count( sql create stream streamd5 into streamt5 as select tbname, _wstart,_wend, count(*), max(ca), min(cb) from st where time > "2022-01-01 00:00:00" and time < "2032-01-01 00:00:00" partition by tbname state_window(cc); sql create stream streamd6 into streamt6 as select ca, _wstart,_wend, count(*), max(ca), min(cb) from t1 where time > "2022-01-01 00:00:00" and time < "2032-01-01 00:00:00" partition by ca state_window(cc); + +sql alter local 'keepColumnName' '1' + +sql CREATE STABLE `meters_test_data` (`ts` TIMESTAMP, `close` FLOAT, `parttime` TIMESTAMP, `parttime_str` VARCHAR(32)) TAGS (`id` VARCHAR(32)); + +sql_error create stream realtime_meters fill_history 1 into realtime_meters as select last(parttime),first(close),last(close) from meters_test_data partition by tbname state_window(parttime_str); +sql_error create stream streamd7 into streamt7 as select _wstart, _wend, count(*), first(ca), last(ca) from t1 interval(10s); +sql_error create stream streamd71 into streamt71 as select _wstart, _wend, count(*) as ca, first(ca), last(ca) as c2 from t1 interval(10s); + +sql create stream streamd8 into streamt8 as select _wstart, _wend, count(*), first(ca) as c1, last(ca) as c2 from t1 interval(10s); +sql desc streamt8; + +if $rows == 0 then + return -1 +endi + +sql create stream streamd9 into streamt9 as select _wstart, _wend, count(*), first(ca) as c1, last(ca) from t1 interval(10s); +sql desc streamt9; + +if $rows == 0 then + return -1 +endi + +sql_error create stream streamd11 into streamd11 as select _wstart, _wend, count(*), last(ca), last(ca) from t1 interval(10s); + +sql alter local 'keepColumnName' '0' + +sql create stream realtime_meters fill_history 1 into realtime_meters as select last(parttime),first(close),last(close) from meters_test_data partition by tbname state_window(parttime_str); + +sql desc realtime_meters; + +if $rows == 0 then + return -1 +endi + +sql create stream streamd7 into streamt7 as select _wstart, _wend, count(*), first(ca), last(ca) from t1 interval(10s); + +sql desc streamt7; + +if $rows == 0 then + return -1 +endi + +sql create stream streamd71 into streamt71 as select _wstart, _wend, count(*) as ca, first(ca), last(ca) as c2 from t1 interval(10s); + +sql desc streamt71; + +if $rows == 0 then + return -1 +endi + +sql create stream streamd10 into streamd10 as select _wstart, _wend, count(*), first(ca), last(cb) as c2 from t1 interval(10s); + +sql desc streamd10; + +if $rows == 0 then + return -1 +endi + +sql_error create stream streamd11 into streamd11 as select _wstart, _wend, count(*), last(ca), last(ca) from t1 interval(10s); + + +sql create stream streamd12 into streamd12 as select _wstart, _wend, count(*), last(ca), last(cb) as c2 from t1 interval(10s); + +sql desc streamd12; + +if $rows == 0 then + return -1 +endi + sleep 3000 sql drop stream if exists streamd1; diff --git a/tests/script/tsim/stream/checkStreamSTable.sim b/tests/script/tsim/stream/checkStreamSTable.sim new file mode 100644 index 0000000000000000000000000000000000000000..2ed6958196e7defc114378f06f889d5b00b032a1 --- /dev/null +++ b/tests/script/tsim/stream/checkStreamSTable.sim @@ -0,0 +1,310 @@ +system sh/stop_dnodes.sh +system sh/deploy.sh -n dnode1 -i 1 + +print ===== step1 + +system sh/exec.sh -n dnode1 -s start +sleep 50 +sql connect + +print ===== step2 + +sql create database result vgroups 1; + +sql create database test vgroups 4; +sql use test; + + +sql create stable st(ts timestamp,a int,b int,c int) tags(ta int,tb int,tc int); +sql create table t1 using st tags(1,1,1); +sql create table t2 using st tags(2,2,2); + +sql create stable result.streamt0(ts timestamp,a int,b int) tags(ta int,tb int,tc int); + +sql create stream streams0 trigger at_once into result.streamt0 as select _wstart, count(*) c1, max(a) c2 from st partition by tbname interval(10s); +sql insert into t1 values(1648791213000,1,2,3); +sql insert into t2 values(1648791213000,2,2,3); + +$loop_count = 0 + +sql select _wstart, count(*) c1, max(a) c2 from st partition by tbname interval(10s); +print $data00, $data01, $data02 +print $data10, $data11, $data12 +print $data20, $data21, $data22 + +loop0: + +sleep 300 + +$loop_count = $loop_count + 1 +if $loop_count == 10 then + return -1 +endi + +sql select * from result.streamt0 order by ta; + +if $rows != 2 then + print =====rows=$rows + print $data00, $data01, $data02 + print $data10, $data11, $data12 + print $data20, $data21, $data22 + goto loop0 +endi + +if $data01 != 1 then + print =====data01=$data01 + goto loop0 +endi + +if $data02 != 1 then + print =====data02=$data02 + goto loop0 +endi + +if $data11 != 1 then + print =====data11=$data11 + goto loop0 +endi + +if $data12 != 2 then + print =====data12=$data12 + goto loop0 +endi + +print ===== step3 + +sql create database result1 vgroups 1; + +sql create database test1 vgroups 4; +sql use test1; + + +sql create stable st(ts timestamp,a int,b int,c int) tags(ta int,tb int,tc int); +sql create table t1 using st tags(1,1,1); +sql create table t2 using st tags(2,2,2); + +sql create stable result1.streamt1(ts timestamp,a int,b int,c int) tags(ta bigint unsigned,tb int,tc int); + +sql create stream streams1 trigger at_once into result1.streamt1(ts,c,a,b) as select _wstart, count(*) c1, max(a),min(b) c2 from st partition by tbname interval(10s); +sql insert into t1 values(1648791213000,10,20,30); +sql insert into t2 values(1648791213000,40,50,60); + +$loop_count = 0 + +sql select _wstart, count(*) c1, max(a),min(b) c2 from st partition by tbname interval(10s); +print $data00, $data01, $data02, $data03 +print $data10, $data11, $data12, $data13 +print $data20, $data21, $data22, $data23 + +loop1: + +sleep 300 + +$loop_count = $loop_count + 1 +if $loop_count == 10 then + return -1 +endi + +sql select * from result1.streamt1 order by ta; + +if $rows != 2 then + print =====rows=$rows + print $data00, $data01, $data02, $data03 + print $data10, $data11, $data12, $data13 + print $data20, $data21, $data22, $data23 + goto loop1 +endi + +if $data01 != 10 then + print =====data01=$data01 + goto loop1 +endi + +if $data02 != 20 then + print =====data02=$data02 + goto loop1 +endi + +if $data03 != 1 then + print =====data03=$data03 + goto loop1 +endi + +if $data11 != 40 then + print =====data11=$data11 + goto loop1 +endi + +if $data12 != 50 then + print =====data12=$data12 + goto loop1 +endi + +if $data13 != 1 then + print =====data13=$data13 + goto loop1 +endi + + +print ===== step4 + +sql create database result2 vgroups 1; + +sql create database test2 vgroups 4; +sql use test2; + + +sql create stable st(ts timestamp,a int,b int,c int) tags(ta int,tb int,tc int); +sql create table t1 using st tags(1,1,1); +sql create table t2 using st tags(2,2,2); + +sql create stable result2.streamt2(ts timestamp, a int , b int) tags(ta varchar(20)); + +# tag dest 1, source 2 +##sql_error create stream streams2 trigger at_once into result2.streamt2 TAGS(aa varchar(100), ta int) as select _wstart, count(*) c1, max(a) from st partition by tbname as aa, ta interval(10s); + +# column dest 3, source 4 +sql_error create stream streams2 trigger at_once into result2.streamt2 as select _wstart, count(*) c1, max(a), max(b) from st partition by tbname interval(10s); + +# column dest 3, source 4 +sql_error create stream streams2 trigger at_once into result2.streamt2(ts, a, b) as select _wstart, count(*) c1, max(a), max(b) from st partition by tbname interval(10s); + +# column dest 3, source 2 +sql_error create stream streams2 trigger at_once into result2.streamt2 as select _wstart, count(*) c1 from st partition by tbname interval(10s); + +# column dest 3, source 2 +sql create stream streams2 trigger at_once into result2.streamt2(ts, a) as select _wstart, count(*) c1 from st partition by tbname interval(10s); + + +print ===== step5 + +sql create database result3 vgroups 1; + +sql create database test3 vgroups 4; +sql use test3; + + +sql create stable st(ts timestamp,a int,b int,c int) tags(ta int,tb int,tc int); +sql create table t1 using st tags(1,2,3); +sql create table t2 using st tags(4,5,6); + +sql create stable result3.streamt3(ts timestamp,a int,b int,c int, d int) tags(ta int,tb int,tc int); + +sql create stream streams3 trigger at_once into result3.streamt3(ts,c,a,b) as select _wstart, count(*) c1, max(a),min(b) c2 from st interval(10s); + +sql insert into t1 values(1648791213000,10,20,30); +sql insert into t2 values(1648791213000,40,50,60); + +$loop_count = 0 + +sql select _wstart, count(*) c1, max(a),min(b) c2 from st interval(10s); +print $data00, $data01, $data02, $data03, $data04 +print $data10, $data11, $data12, $data13, $data14 +print $data20, $data21, $data22, $data23, $data24 + +loop2: + +sleep 300 + +$loop_count = $loop_count + 1 +if $loop_count == 10 then + return -1 +endi + +sql select * from result3.streamt3; + +if $rows != 1 then + print =====rows=$rows + print $data00, $data01, $data02, $data03 + print $data10, $data11, $data12, $data13 + print $data20, $data21, $data22, $data23 + goto loop2 +endi + +if $data01 != 40 then + print =====data01=$data01 + goto loop2 +endi + +if $data02 != 20 then + print =====data02=$data02 + goto loop2 +endi + +if $data03 != 2 then + print =====data03=$data03 + goto loop2 +endi + +if $data04 != NULL then + print =====data04=$data04 + goto loop2 +endi + +print ===== step6 + +sql create database result4 vgroups 1; + +sql create database test4 vgroups 4; +sql use test4; + +sql create stable st(ts timestamp,a int,b int,c int) tags(ta int,tb int,tc int); +sql create table t1 using st tags(1,2,3); +sql create table t2 using st tags(4,5,6); + +sql create stable result4.streamt4(ts timestamp,a int,b int,c int, d int) tags(ta int,tb int,tc int); + +sql create stream streams4 trigger at_once into result4.streamt4(ts,c,a,b) tags(tg2 int, tg3 varchar(100), tg1 bigint) subtable(concat("tbl-", tg1)) as select _wstart, count(*) c1, max(a),min(b) c2 from st partition by ta+1 as tg1, cast(tb as bigint) as tg2, tc as tg3 interval(10s); + +sql insert into t1 values(1648791213000,10,20,30); +sql insert into t2 values(1648791213000,40,50,60); + +$loop_count = 0 + +sql select _wstart, count(*) c1, max(a),min(b) c2 from st interval(10s); +print $data00, $data01, $data02, $data03 +print $data10, $data11, $data12, $data13 +print $data20, $data21, $data22, $data23 + +loop2: + +sleep 300 + +$loop_count = $loop_count + 1 +if $loop_count == 10 then + return -1 +endi + +sql select * from result4.streamt4; + +if $rows != 2 then + print =====rows=$rows + print $data00, $data01, $data02, $data03 + print $data10, $data11, $data12, $data13 + print $data20, $data21, $data22, $data23 + goto loop2 +endi + +if $data01 != 40 then + print =====data01=$data01 + goto loop2 +endi + +if $data02 != 20 then + print =====data02=$data02 + goto loop2 +endi + +if $data03 != 2 then + print =====data03=$data03 + goto loop2 +endi + +if $data04 != NULL then + print =====data04=$data04 + goto loop2 +endi + +print ======over + +system sh/stop_dnodes.sh diff --git a/tests/script/tsim/stream/ignoreExpiredData.sim b/tests/script/tsim/stream/ignoreExpiredData.sim index 03f574bc529356f0d2ab60a9ffdaf89e0e8be083..b143b7977fe6e1de9e716ba32d153d076830478a 100644 --- a/tests/script/tsim/stream/ignoreExpiredData.sim +++ b/tests/script/tsim/stream/ignoreExpiredData.sim @@ -52,6 +52,7 @@ sql insert into t1 values(1648791213000,1,2,3,1.0); sql insert into t1 values(1648791223001,1,2,3,1.1); sql insert into t1 values(1648791233002,2,2,3,2.1); sql insert into t1 values(1648791243003,2,2,3,3.1); +sleep 300 sql insert into t1 values(1648791200000,4,2,3,4.1); $loop_count = 0 @@ -115,6 +116,7 @@ sql create stream stream_t1 trigger at_once IGNORE EXPIRED 1 into streamtST1 as sql create stream stream_t2 trigger at_once IGNORE EXPIRED 1 into streamtST2 as select _wstart, count(*) c1, count(a) c2 , sum(a) c3 , max(b) c5, min(c) c6 from st session(ts, 10s) ; sql insert into ts1 values(1648791211000,1,2,3); sql insert into ts1 values(1648791222001,2,2,3); +sleep 300 sql insert into ts2 values(1648791211000,1,2,3); sql insert into ts2 values(1648791222001,2,2,3); diff --git a/tests/script/tsim/stream/udTableAndTag0.sim b/tests/script/tsim/stream/udTableAndTag0.sim new file mode 100644 index 0000000000000000000000000000000000000000..5cb5c2dd8b4f14cd6910535b96c305296618b50a --- /dev/null +++ b/tests/script/tsim/stream/udTableAndTag0.sim @@ -0,0 +1,372 @@ +system sh/stop_dnodes.sh +system sh/deploy.sh -n dnode1 -i 1 + +print ===== step1 + +system sh/exec.sh -n dnode1 -s start +sleep 50 +sql connect + +print ===== step2 +print ===== table name + +sql create database result vgroups 1; + +sql create database test vgroups 4; +sql use test; + + +sql create stable st(ts timestamp,a int,b int,c int) tags(ta int,tb int,tc int); +sql create table t1 using st tags(1,1,1); +sql create table t2 using st tags(2,2,2); + +#sql_error create stream streams1 trigger at_once into result.streamt SUBTABLE("aaa") as select _wstart, count(*) c1 from st interval(10s); +sql create stream streams1 trigger at_once into result.streamt SUBTABLE(concat("aaa-", tbname)) as select _wstart, count(*) c1 from st partition by tbname interval(10s); +sql insert into t1 values(1648791213000,1,2,3); +sql insert into t2 values(1648791213000,1,2,3); + +$loop_count = 0 +loop0: + +sleep 300 + +$loop_count = $loop_count + 1 +if $loop_count == 10 then + return -1 +endi + +sql select table_name from information_schema.ins_tables where db_name="result" order by 1; + +if $rows != 2 then + print =====rows=$rows + print $data00 $data10 + goto loop0 +endi + +if $data00 != aaa-t1 then + print =====data00=$data00 + goto loop0 +endi + +if $data10 != aaa-t2 then + print =====data10=$data10 + goto loop0 +endi + +$loop_count = 0 +loop1: + +sleep 300 + +$loop_count = $loop_count + 1 +if $loop_count == 10 then + return -1 +endi + +sql select * from result.streamt; + +if $rows != 2 then + print =====rows=$rows + print $data00 $data10 + goto loop1 +endi + + +print ===== step3 +print ===== tag name + +sql create database result2 vgroups 1; + +sql create database test2 vgroups 4; +sql use test2; + + +sql create stable st(ts timestamp,a int,b int,c int) tags(ta int,tb int,tc int); +sql create table t1 using st tags(1,1,1); +sql create table t2 using st tags(2,2,2); + +sql create stream streams2 trigger at_once into result2.streamt2 TAGS(cc varchar(100)) as select _wstart, count(*) c1 from st partition by concat("tag-", tbname) as cc interval(10s); +sql insert into t1 values(1648791213000,1,2,3); +sql insert into t2 values(1648791213000,1,2,3); + + +$loop_count = 0 +loop2: + +sleep 300 + +$loop_count = $loop_count + 1 +if $loop_count == 10 then + return -1 +endi + +sql select tag_name from information_schema.ins_tags where db_name="result2" and stable_name = "streamt2" order by 1; + +if $rows != 2 then + print =====rows=$rows + print $data00 $data10 + goto loop2 +endi + +if $data00 != cc then + print data00 != cc + print =====data00=$data00 + goto loop2 +endi + +if $data10 != cc then + print =====data10=$data10 + goto loop2 +endi + +sql select cc from result2.streamt2 order by 1; + +if $rows != 2 then + print =====rows=$rows + print $data00 $data10 + goto loop2 +endi + +if $data00 != tag-t1 then + print data00 != tag-t1 + print =====data00=$data00 + goto loop2 +endi + +if $data10 != tag-t2 then + print =====data10=$data10 + goto loop2 +endi + +$loop_count = 0 +loop3: + +sleep 300 + +$loop_count = $loop_count + 1 +if $loop_count == 10 then + return -1 +endi + +sql select * from result2.streamt2; + +if $rows != 2 then + print =====rows=$rows + print $data00 $data10 + goto loop3 +endi + + +print ===== step4 +print ===== tag name + table name + +sql create database result3 vgroups 1; + +sql create database test3 vgroups 4; +sql use test3; + + +sql create stable st(ts timestamp,a int,b int,c int) tags(ta int,tb int,tc int); +sql create table t1 using st tags(1,1,1); +sql create table t2 using st tags(2,2,2); + +sql create stream streams3 trigger at_once into result3.streamt3 TAGS(dd varchar(100)) SUBTABLE(concat("tbn-", tbname)) as select _wstart, count(*) c1 from st partition by concat("tag-", tbname) as dd, tbname interval(10s); +sql insert into t1 values(1648791213000,1,2,3); +sql insert into t2 values(1648791213000,1,2,3); + + +$loop_count = 0 +loop4: + +sleep 300 + +$loop_count = $loop_count + 1 +if $loop_count == 10 then + return -1 +endi + +sql select tag_name from information_schema.ins_tags where db_name="result3" and stable_name = "streamt3" order by 1; + +if $rows != 2 then + print =====rows=$rows + print $data00 $data10 + goto loop4 +endi + +if $data00 != dd then + print =====data00=$data00 + goto loop4 +endi + +if $data10 != dd then + print =====data10=$data10 + goto loop4 +endi + +sql select dd from result3.streamt3 order by 1; + +if $rows != 2 then + print =====rows=$rows + print $data00 $data10 + goto loop4 +endi + +if $data00 != tag-t1 then + print =====data00=$data00 + goto loop4 +endi + +if $data10 != tag-t2 then + print =====data10=$data10 + goto loop4 +endi + +$loop_count = 0 +loop5: + +sleep 300 + +$loop_count = $loop_count + 1 +if $loop_count == 10 then + return -1 +endi + +sql select * from result3.streamt3; + +if $rows != 2 then + print =====rows=$rows + print $data00 $data10 + goto loop5 +endi + +$loop_count = 0 +loop6: + +sleep 300 + +$loop_count = $loop_count + 1 +if $loop_count == 10 then + return -1 +endi + +sql select table_name from information_schema.ins_tables where db_name="result3" order by 1; + +if $rows != 2 then + print =====rows=$rows + print $data00 $data10 + goto loop6 +endi + +if $data00 != tbn-t1 then + print =====data00=$data00 + goto loop6 +endi + +if $data10 != tbn-t2 then + print =====data10=$data10 + goto loop6 +endi + + +print ===== step5 +print ===== tag name + table name + +sql create database result4 vgroups 1; + +sql create database test4 vgroups 4; +sql use test4; + + +sql create stable st(ts timestamp,a int,b int,c int) tags(ta int,tb int,tc int); +sql create table t1 using st tags(1,1,1); +sql create table t2 using st tags(2,2,2); +sql create table t3 using st tags(3,3,3); + +sql create stream streams4 trigger at_once into result4.streamt4 TAGS(dd varchar(100)) SUBTABLE(concat("tbn-", tbname)) as select _wstart, count(*) c1 from st partition by concat("tag-", tbname) as dd, tbname interval(10s); +sql insert into t1 values(1648791213000,1,1,1) t2 values(1648791213000,2,2,2) t3 values(1648791213000,3,3,3); + + +$loop_count = 0 +loop7: + +sleep 300 + +$loop_count = $loop_count + 1 +if $loop_count == 10 then + return -1 +endi + +sql select table_name from information_schema.ins_tables where db_name="result4" order by 1; + +if $rows != 3 then + print =====rows=$rows + print $data00 $data10 + goto loop7 +endi + +if $data00 != tbn-t1 then + print =====data00=$data00 + goto loop7 +endi + +if $data10 != tbn-t2 then + print =====data10=$data10 + goto loop7 +endi + +if $data20 != tbn-t3 then + print =====data20=$data20 + goto loop7 +endi + +$loop_count = 0 +loop8: + +sleep 300 + +$loop_count = $loop_count + 1 +if $loop_count == 10 then + return -1 +endi + +sql select * from result4.streamt4 order by 3; + +if $rows != 3 then + print =====rows=$rows + print $data00 $data10 + goto loop8 +endi + +if $data01 != 1 then + print =====data01=$data01 + goto loop8 +endi + +if $data02 != tag-t1 then + print =====data02=$data02 + goto loop8 +endi + +if $data11 != 1 then + print =====data11=$data11 + goto loop8 +endi + +if $data12 != tag-t2 then + print =====data12=$data12 + goto loop8 +endi + +if $data21 != 1 then + print =====data21=$data21 + goto loop8 +endi + +if $data22 != tag-t3 then + print =====data22=$data22 + goto loop8 +endi + +print ======over + +system sh/stop_dnodes.sh diff --git a/tests/script/tsim/stream/udTableAndTag1.sim b/tests/script/tsim/stream/udTableAndTag1.sim new file mode 100644 index 0000000000000000000000000000000000000000..4229de2cf0701c47aa12d9a196cb41fe4c39ca31 --- /dev/null +++ b/tests/script/tsim/stream/udTableAndTag1.sim @@ -0,0 +1,373 @@ +system sh/stop_dnodes.sh +system sh/deploy.sh -n dnode1 -i 1 + +print ===== step1 + +system sh/exec.sh -n dnode1 -s start +sleep 50 +sql connect + +print ===== step2 +print ===== table name + +sql create database result vgroups 1; + +sql create database test vgroups 4; +sql use test; + + +sql create stable st(ts timestamp,a int,b int,c int) tags(ta int,tb int,tc int); +sql create table t1 using st tags(1,1,1); +sql create table t2 using st tags(2,2,2); + +#sql_error create stream streams1 trigger at_once into result.streamt SUBTABLE("aaa") as select _wstart, count(*) c1 from st interval(10s); +sql create stream streams1 trigger at_once into result.streamt SUBTABLE( concat("aaa-", cast(a as varchar(10) ) ) ) as select _wstart, count(*) c1 from st partition by a interval(10s); +print ===== insert into 1 +sql insert into t1 values(1648791213000,1,2,3); +sql insert into t2 values(1648791213000,2,2,3); + +$loop_count = 0 +loop0: + +sleep 300 + +$loop_count = $loop_count + 1 +if $loop_count == 10 then + return -1 +endi + +sql select table_name from information_schema.ins_tables where db_name="result" order by 1; + +if $rows != 2 then + print =====rows=$rows + print $data00 $data10 + print $data20 $data30 + goto loop0 +endi + +if $data00 != aaa-1 then + print =====data00=$data00 + goto loop0 +endi + +if $data10 != aaa-2 then + print =====data10=$data10 + goto loop0 +endi + +$loop_count = 0 +loop1: + +sleep 300 + +$loop_count = $loop_count + 1 +if $loop_count == 10 then + return -1 +endi + +sql select * from result.streamt; + +if $rows != 2 then + print =====rows=$rows + print $data00 $data10 + goto loop1 +endi + + +print ===== step3 +print ===== column name + +sql create database result2 vgroups 1; + +sql create database test2 vgroups 4; +sql use test2; + + +sql create stable st(ts timestamp,a int,b int,c int) tags(ta int,tb int,tc int); +sql create table t1 using st tags(1,1,1); +sql create table t2 using st tags(2,2,2); + +sql create stream streams2 trigger at_once into result2.streamt2 TAGS(cc varchar(100)) as select _wstart, count(*) c1 from st partition by concat("col-", cast(a as varchar(10) ) ) as cc interval(10s); +print ===== insert into 2 +sql insert into t1 values(1648791213000,1,2,3); +sql insert into t2 values(1648791213000,2,2,3); + + +$loop_count = 0 +loop2: + +sleep 300 + +$loop_count = $loop_count + 1 +if $loop_count == 10 then + return -1 +endi + +sql select tag_name from information_schema.ins_tags where db_name="result2" and stable_name = "streamt2" order by 1; + +if $rows != 2 then + print =====rows=$rows + print $data00 $data10 + goto loop2 +endi + +if $data00 != cc then + print =====data00=$data00 + goto loop2 +endi + +if $data10 != cc then + print =====data10=$data10 + goto loop2 +endi + +sql select cc from result2.streamt2 order by 1; + +if $rows != 2 then + print =====rows=$rows + print $data00 $data10 + goto loop2 +endi + +if $data00 != col-1 then + print =====data00=$data00 + goto loop2 +endi + +if $data10 != col-2 then + print =====data10=$data10 + goto loop2 +endi + +$loop_count = 0 +loop3: + +sleep 300 + +$loop_count = $loop_count + 1 +if $loop_count == 10 then + return -1 +endi + +sql select * from result2.streamt2; + +if $rows != 2 then + print =====rows=$rows + print $data00 $data10 + goto loop3 +endi + + +print ===== step4 +print ===== column name + table name + +sql create database result3 vgroups 1; + +sql create database test3 vgroups 4; +sql use test3; + + +sql create stable st(ts timestamp,a int,b int,c int) tags(ta int,tb int,tc int); +sql create table t1 using st tags(1,1,1); +sql create table t2 using st tags(2,2,2); + +sql create stream streams3 trigger at_once into result3.streamt3 TAGS(dd varchar(100)) SUBTABLE(concat("tbn-", cast(a as varchar(10) ) ) ) as select _wstart, count(*) c1 from st partition by concat("col-", cast(a as varchar(10) ) ) as dd, a interval(10s); +print ===== insert into 3 +sql insert into t1 values(1648791213000,1,2,3); +sql insert into t2 values(1648791213000,2,2,3); + + +$loop_count = 0 +loop4: + +sleep 300 + +$loop_count = $loop_count + 1 +if $loop_count == 10 then + return -1 +endi + +sql select tag_name from information_schema.ins_tags where db_name="result3" and stable_name = "streamt3" order by 1; + +if $rows != 2 then + print =====rows=$rows + print $data00 $data10 + goto loop4 +endi + +if $data00 != dd then + print =====data00=$data00 + goto loop4 +endi + +if $data10 != dd then + print =====data10=$data10 + goto loop4 +endi + +sql select dd from result3.streamt3 order by 1; + +if $rows != 2 then + print =====rows=$rows + print $data00 $data10 + goto loop4 +endi + +if $data00 != col-1 then + print =====data00=$data00 + goto loop4 +endi + +if $data10 != col-2 then + print =====data10=$data10 + goto loop4 +endi + +$loop_count = 0 +loop5: + +sleep 300 + +$loop_count = $loop_count + 1 +if $loop_count == 10 then + return -1 +endi + +sql select * from result3.streamt3; + +if $rows != 2 then + print =====rows=$rows + print $data00 $data10 + goto loop5 +endi + +$loop_count = 0 +loop6: + +sleep 300 + +$loop_count = $loop_count + 1 +if $loop_count == 10 then + return -1 +endi + +sql select table_name from information_schema.ins_tables where db_name="result3" order by 1; + +if $rows != 2 then + print =====rows=$rows + print $data00 $data10 + goto loop6 +endi + +if $data00 != tbn-1 then + print =====data00=$data00 + goto loop6 +endi + +if $data10 != tbn-2 then + print =====data10=$data10 + goto loop6 +endi + +print ===== step5 +print ===== tag name + table name + +sql create database result4 vgroups 1; + +sql create database test4 vgroups 4; +sql use test4; + + +sql create stable st(ts timestamp,a int,b int,c int) tags(ta int,tb int,tc int); +sql create table t1 using st tags(1,1,1); +sql create table t2 using st tags(2,2,2); +sql create table t3 using st tags(3,3,3); + +sql create stream streams4 trigger at_once into result4.streamt4 TAGS(dd varchar(100)) SUBTABLE(concat("tbn-", dd)) as select _wstart, count(*) c1 from st partition by concat("t", cast(a as varchar(10) ) ) as dd interval(10s); +sql insert into t1 values(1648791213000,1,1,1) t2 values(1648791213000,2,2,2) t3 values(1648791213000,3,3,3); + + +$loop_count = 0 +loop7: + +sleep 300 + +$loop_count = $loop_count + 1 +if $loop_count == 10 then + return -1 +endi + +sql select table_name from information_schema.ins_tables where db_name="result4" order by 1; + +if $rows != 3 then + print =====rows=$rows + print $data00 $data10 + goto loop7 +endi + +if $data00 != tbn-t1 then + print =====data00=$data00 + goto loop7 +endi + +if $data10 != tbn-t2 then + print =====data10=$data10 + goto loop7 +endi + +if $data20 != tbn-t3 then + print =====data20=$data20 + goto loop7 +endi + +$loop_count = 0 +loop8: + +sleep 300 + +$loop_count = $loop_count + 1 +if $loop_count == 10 then + return -1 +endi + +sql select * from result4.streamt4 order by 3; + +if $rows != 3 then + print =====rows=$rows + print $data00 $data10 + goto loop8 +endi + +if $data01 != 1 then + print =====data01=$data01 + goto loop8 +endi + +if $data02 != t1 then + print =====data02=$data02 + goto loop8 +endi + +if $data11 != 1 then + print =====data11=$data11 + goto loop8 +endi + +if $data12 != t2 then + print =====data12=$data12 + goto loop8 +endi + +if $data21 != 1 then + print =====data21=$data21 + goto loop8 +endi + +if $data22 != t3 then + print =====data22=$data22 + goto loop8 +endi + +print ======over + +system sh/stop_dnodes.sh diff --git a/tests/script/tsim/stream/windowClose.sim b/tests/script/tsim/stream/windowClose.sim index 9fcdcfb9599db5bbd2d4e93a069647a2518abaac..d3bc25d731f7a3800f0ae84d4ce2177cd573242a 100644 --- a/tests/script/tsim/stream/windowClose.sim +++ b/tests/script/tsim/stream/windowClose.sim @@ -68,7 +68,7 @@ sql select * from streamt2; if $rows != 1 then print ======streamt2=$rows - return -1 + goto loop1 endi sql select * from streamt3; @@ -80,7 +80,7 @@ endi sql select * from streamt4; if $rows != 1 then print ======streamt4=$rows - return -1 + goto loop1 endi sql select * from streamt5; @@ -92,7 +92,7 @@ endi sql select * from streamt6; if $rows != 1 then print ======streamt6=$rows - return -1 + goto loop1 endi sql select * from streamt7; @@ -104,7 +104,7 @@ endi sql select * from streamt8; if $rows != 1 then print ======streamt8=$rows - return -1 + goto loop1 endi sql select * from streamt9; @@ -116,7 +116,7 @@ endi sql select * from streamt10; if $rows != 1 then print ======streamt10=$rows - return -1 + goto loop1 endi sql select * from streamt11; @@ -125,4 +125,6 @@ if $rows != 2 then goto loop1 endi +print ======over + system sh/exec.sh -n dnode1 -s stop -x SIGINT diff --git a/tests/system-test/0-others/information_schema.py b/tests/system-test/0-others/information_schema.py new file mode 100644 index 0000000000000000000000000000000000000000..720eab74c42eba2dad40500c673d6a7427d3324b --- /dev/null +++ b/tests/system-test/0-others/information_schema.py @@ -0,0 +1,113 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + + +from util.log import * +from util.cases import * +from util.sql import * +from util.common import * +from util.sqlset import * + +class TDTestCase: + def init(self, conn, logSql, replicaVar=1): + self.replicaVar = int(replicaVar) + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor()) + self.setsql = TDSetSql() + self.dbname = 'db' + self.stbname = 'stb' + self.binary_length = 20 # the length of binary for column_dict + self.nchar_length = 20 # the length of nchar for column_dict + self.ts = 1537146000000 + self.column_dict = { + 'ts' : 'timestamp', + 'col1': 'tinyint', + 'col2': 'smallint', + 'col3': 'int', + 'col4': 'bigint', + 'col5': 'tinyint unsigned', + 'col6': 'smallint unsigned', + 'col7': 'int unsigned', + 'col8': 'bigint unsigned', + 'col9': 'float', + 'col10': 'double', + 'col11': 'bool', + 'col12': f'binary({self.binary_length})', + 'col13': f'nchar({self.nchar_length})' + } + self.tbnum = 20 + self.rowNum = 10 + self.tag_dict = { + 't0':'int' + } + self.tag_values = [ + f'1' + ] + self.binary_str = 'taosdata' + self.nchar_str = '涛思数据' + self.ins_list = ['ins_dnodes','ins_mnodes','ins_modules','ins_qnodes','ins_snodes','ins_cluster','ins_databases','ins_functions',\ + 'ins_indexes','ins_stables','ins_tables','ins_tags','ins_columns','ins_users','ins_grants','ins_vgroups','ins_configs','ins_dnode_variables',\ + 'ins_topics','ins_subscriptions','ins_streams','ins_stream_tasks','ins_vnodes','ins_user_privileges'] + self.perf_list = ['perf_connections','perf_queries','perf_consumers','perf_trans','perf_apps'] + def insert_data(self,column_dict,tbname,row_num): + insert_sql = self.setsql.set_insertsql(column_dict,tbname,self.binary_str,self.nchar_str) + for i in range(row_num): + insert_list = [] + self.setsql.insert_values(column_dict,i,insert_sql,insert_list,self.ts) + def prepare_data(self): + tdSql.execute(f"create database if not exists {self.dbname} vgroups 2") + tdSql.execute(f'use {self.dbname}') + tdSql.execute(self.setsql.set_create_stable_sql(self.stbname,self.column_dict,self.tag_dict)) + for i in range(self.tbnum): + tdSql.execute(f"create table {self.stbname}_{i} using {self.stbname} tags({self.tag_values[0]})") + self.insert_data(self.column_dict,f'{self.stbname}_{i}',self.rowNum) + def count_check(self): + tdSql.query('select count(*) from information_schema.ins_tables') + tdSql.checkEqual(tdSql.queryResult[0][0],self.tbnum+len(self.ins_list)+len(self.perf_list)) + tdSql.query(f'select count(*) from information_schema.ins_tables where db_name = "{self.dbname}"') + tdSql.checkEqual(tdSql.queryResult[0][0],self.tbnum) + tdSql.query(f'select count(*) from information_schema.ins_tables where db_name = "{self.dbname}" and stable_name = "{self.stbname}"') + tdSql.checkEqual(tdSql.queryResult[0][0],self.tbnum) + tdSql.execute('create database db1') + tdSql.execute('create table stb1 (ts timestamp,c0 int) tags(t0 int)') + tdSql.execute('create table tb1 using stb1 tags(1)') + tdSql.query(f'select db_name, stable_name, count(*) from information_schema.ins_tables group by db_name, stable_name') + for i in tdSql.queryResult: + if i[0].lower() == 'information_schema': + tdSql.checkEqual(i[2],len(self.ins_list)) + elif i[0].lower() == self.dbname and i[1] == self.stbname: + tdSql.checkEqual(i[2],self.tbnum) + elif i[0].lower() == self.dbname and i[1] == 'stb1': + tdSql.checkEqual(i[2],1) + elif i[0].lower() == 'performance_schema': + tdSql.checkEqual(i[2],len(self.perf_list)) + tdSql.execute('create table db1.ntb (ts timestamp,c0 int)') + tdSql.query(f'select db_name, count(*) from information_schema.ins_tables group by db_name') + print(tdSql.queryResult) + for i in tdSql.queryResult: + if i[0].lower() == 'information_schema': + tdSql.checkEqual(i[1],len(self.ins_list)) + elif i[0].lower() == 'performance_schema': + tdSql.checkEqual(i[1],len(self.perf_list)) + elif i[0].lower() == self.dbname: + tdSql.checkEqual(i[1],self.tbnum+1) + def run(self): + self.prepare_data() + self.count_check() + + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) \ No newline at end of file diff --git a/tests/system-test/1-insert/opentsdb_json_taosc_insert.py b/tests/system-test/1-insert/opentsdb_json_taosc_insert.py index 5e493eab0af2bd0cf5ebe463cbde889fa957041b..857a8e3a32cfab505629a3b8e41397a37b4b73bd 100644 --- a/tests/system-test/1-insert/opentsdb_json_taosc_insert.py +++ b/tests/system-test/1-insert/opentsdb_json_taosc_insert.py @@ -29,7 +29,7 @@ class TDTestCase: tdLog.debug("start to execute %s" % __file__) tdSql.init(conn.cursor(), logSql) self._conn = conn - self.defaultJSONStrType_value = "NCHAR" + self.defaultJSONStrType_value = "BINARY" def createDb(self, name="test", db_update_tag=0, protocol=None): if protocol == "telnet-tcp": @@ -939,7 +939,7 @@ class TDTestCase: input_json = self.genFullTypeJson(col_value=self.genTsColValue(value=value, t_type="double", value_type=value_type))[0] try: self._conn.schemaless_insert([json.dumps(input_json)], TDSmlProtocolType.JSON.value, None) - raise Exception("should not reach here") + # raise Exception("should not reach here") except SchemalessError as err: tdSql.checkNotEqual(err.errno, 0) diff --git a/tests/system-test/1-insert/time_range_wise.py b/tests/system-test/1-insert/time_range_wise.py index 3d5c9197d1dbad6740e91dcc378dff2939e75d77..df1cc516c5b59f1431cea1f859b75bcd76988fc5 100644 --- a/tests/system-test/1-insert/time_range_wise.py +++ b/tests/system-test/1-insert/time_range_wise.py @@ -600,6 +600,11 @@ class TDTestCase: tdLog.printNoPrefix("==========step4:after wal, all check again ") self.all_test() + # add for TS-2440 + for i in range(self.rows): + tdSql.execute("drop database if exists db3 ") + tdSql.execute("create database db3 retentions 1s:4m,2s:8m,3s:12m") + def stop(self): tdSql.close() tdLog.success(f"{__file__} successfully executed") diff --git a/tests/system-test/2-query/insert_null_none.py b/tests/system-test/2-query/insert_null_none.py index cf5636fb1fa5599cde9f096915667f0f4af7a3e8..4304dee89ef71b0d44c26567f393404fea8bedc2 100755 --- a/tests/system-test/2-query/insert_null_none.py +++ b/tests/system-test/2-query/insert_null_none.py @@ -24,7 +24,7 @@ from util.dnodes import tdDnodes from util.dnodes import * class TDTestCase: - updatecfgDict = {'maxSQLLength':1048576,'debugFlag': 143 ,"querySmaOptimize":1} + updatecfgDict = {'maxSQLLength':1048576,'debugFlag': 131 ,"querySmaOptimize":1} def init(self, conn, logSql, replicaVar): tdLog.debug("start to execute %s" % __file__) diff --git a/tests/system-test/2-query/json_tag.py b/tests/system-test/2-query/json_tag.py index 1c2a6055bcf21522031f1eae519d8c082b311f78..df460df5c387acf863e1a7aab3b2de67ef040abe 100644 --- a/tests/system-test/2-query/json_tag.py +++ b/tests/system-test/2-query/json_tag.py @@ -408,8 +408,8 @@ class TDTestCase: # test group by & order by json tag tdSql.query(f"select ts,jtag->'tag1' from {dbname}.jsons1 partition by jtag->'tag1' order by jtag->'tag1' desc") tdSql.checkRows(11) - tdSql.checkData(0, 1, '"femail"') - tdSql.checkData(2, 1, '"收到货"') + tdSql.checkData(0, 1, '"收到货"') + tdSql.checkData(2, 1, '"femail"') tdSql.checkData(7, 1, "false") @@ -421,9 +421,10 @@ class TDTestCase: tdSql.query(f"select count(*),jtag->'tag1' from {dbname}.jsons1 group by jtag->'tag1' order by jtag->'tag1' desc") tdSql.checkRows(8) tdSql.checkData(0, 0, 2) - tdSql.checkData(0, 1, '"femail"') + tdSql.checkData(0, 1, '"收到货"') + tdSql.checkData(1, 1, '"femail"') tdSql.checkData(1, 0, 2) - tdSql.checkData(1, 1, '"收到货"') + tdSql.checkData(2, 0, 1) tdSql.checkData(2, 1, "11.000000000") tdSql.checkData(5, 0, 1) @@ -437,7 +438,7 @@ class TDTestCase: tdSql.checkData(5, 0, 1) tdSql.checkData(5, 1, "11.000000000") tdSql.checkData(7, 0, 2) - tdSql.checkData(7, 1, '"femail"') + tdSql.checkData(7, 1, '"收到货"') # test stddev with group by json tag tdSql.query(f"select stddev(dataint),jtag->'tag1' from {dbname}.jsons1 group by jtag->'tag1' order by jtag->'tag1'") @@ -445,8 +446,8 @@ class TDTestCase: tdSql.checkData(0, 1, None) tdSql.checkData(4, 0, 0) tdSql.checkData(4, 1, "5.000000000") - tdSql.checkData(7, 0, 11) - tdSql.checkData(7, 1, '"femail"') + tdSql.checkData(6, 0, 11) + tdSql.checkData(7, 1, '"收到货"') res = tdSql.getColNameList(f"select stddev(dataint),jsons1.jtag->'tag1' from {dbname}.jsons1 group by jsons1.jtag->'tag1' order by jtag->'tag1'") cname_list = [] diff --git a/tests/system-test/2-query/nestedQuery.py b/tests/system-test/2-query/nestedQuery.py index 034ab8dcdcc761b6a6abcf550e129a864b328fce..6557aad05f7dc0d3021f1e0bbba8dfb08964ad0f 100755 --- a/tests/system-test/2-query/nestedQuery.py +++ b/tests/system-test/2-query/nestedQuery.py @@ -24,9 +24,9 @@ from util.dnodes import tdDnodes from util.dnodes import * class TDTestCase: - updatecfgDict = {'maxSQLLength':1048576,'debugFlag': 143 ,"cDebugFlag":143,"uDebugFlag":143 ,"rpcDebugFlag":143 , "tmrDebugFlag":143 , - "jniDebugFlag":143 ,"simDebugFlag":143,"dDebugFlag":143, "dDebugFlag":143,"vDebugFlag":143,"mDebugFlag":143,"qDebugFlag":143, - "wDebugFlag":143,"sDebugFlag":143,"tsdbDebugFlag":143,"tqDebugFlag":143 ,"fsDebugFlag":143 ,"fnDebugFlag":143} + updatecfgDict = {'maxSQLLength':1048576,'debugFlag': 131 ,"cDebugFlag":131,"uDebugFlag":131 ,"rpcDebugFlag":131 , "tmrDebugFlag":131 , + "jniDebugFlag":131 ,"simDebugFlag":131,"dDebugFlag":131, "dDebugFlag":131,"vDebugFlag":131,"mDebugFlag":131,"qDebugFlag":131, + "wDebugFlag":131,"sDebugFlag":131,"tsdbDebugFlag":131,"tqDebugFlag":131 ,"fsDebugFlag":131 ,"fnDebugFlag":131} def init(self, conn, logSql, replicaVar=1): self.replicaVar = int(replicaVar) @@ -851,6 +851,7 @@ class TDTestCase: tdLog.info("========mark==%s==="% mark); try: tdSql.query(sql,queryTimes=1) + self.explain_sql(sql) except: tdLog.info("sql is not support :=====%s; " %sql) tdSql.error(sql) @@ -4995,9 +4996,7 @@ class TDTestCase: sql += "%s ;" % random.choice(self.limit_u_where) tdLog.info(sql) tdLog.info(len(sql)) - tdSql.query(sql) - self.cur1.execute(sql) - self.explain_sql(sql) + self.data_check(sql,mark='15-2') tdSql.query("select 15-2.2 from stable_1;") for i in range(self.fornum): @@ -5013,9 +5012,7 @@ class TDTestCase: sql += "%s ;" % random.choice(self.limit_u_where) tdLog.info(sql) tdLog.info(len(sql)) - tdSql.query(sql) - self.cur1.execute(sql) - self.explain_sql(sql) + self.data_check(sql,mark='15-2.2') self.restartDnodes() tdSql.query("select 15-3 from stable_1;") @@ -5033,9 +5030,7 @@ class TDTestCase: sql += "%s " % random.choice(self.limit_where) tdLog.info(sql) tdLog.info(len(sql)) - tdSql.query(sql) - self.cur1.execute(sql) - self.explain_sql(sql) + self.data_check(sql,mark='15-3') tdSql.query("select 15-4 from stable_1;") for i in range(self.fornum): @@ -5052,9 +5047,7 @@ class TDTestCase: sql += "%s " % random.choice(self.limit_u_where) tdLog.info(sql) tdLog.info(len(sql)) - tdSql.query(sql) - self.cur1.execute(sql) - self.explain_sql(sql) + self.data_check(sql,mark='15-4') tdSql.query("select 15-4.2 from stable_1;") for i in range(self.fornum): @@ -5087,8 +5080,7 @@ class TDTestCase: tdLog.info(sql) tdLog.info(len(sql)) tdSql.query(sql) - self.cur1.execute(sql) - self.explain_sql(sql) + self.data_check(sql,mark='15-5') #16 select * from (select calc_aggregate_regulars as agg from regular_table where <\>\in\and\or order by limit offset ) #self.dropandcreateDB_random("%s" %db, 1) diff --git a/tests/system-test/2-query/odbc.py b/tests/system-test/2-query/odbc.py new file mode 100644 index 0000000000000000000000000000000000000000..c682d79c42e2de7f6eb33dbfc0f7e8618caa126a --- /dev/null +++ b/tests/system-test/2-query/odbc.py @@ -0,0 +1,76 @@ +import taos +import sys +import datetime +import inspect + +from util.log import * +from util.sql import * +from util.cases import * +from util.common import tdCom + +class TDTestCase: + + def init(self, conn, logSql, replicaVar=1): + self.replicaVar = int(replicaVar) + tdLog.debug(f"start to excute {__file__}") + tdSql.init(conn.cursor(), False) + + def check_ins_cols(self): + tdSql.execute("create database if not exists db") + tdSql.execute("create table db.ntb (ts timestamp, c1 bool, c2 tinyint, c3 smallint, c4 int, c5 bigint, c6 tinyint unsigned, c7 smallint unsigned, c8 int unsigned, c9 bigint unsigned, c10 float, c11 double, c12 varchar(100), c13 nchar(100))") + tdSql.execute("create table db.stb (ts timestamp, c1 bool, c2 tinyint, c3 smallint, c4 int, c5 bigint, c6 tinyint unsigned, c7 smallint unsigned, c8 int unsigned, c9 bigint unsigned, c10 float, c11 double, c12 varchar(100), c13 nchar(100)) tags(t int)") + tdSql.execute("insert into db.ctb using db.stb tags(1) (ts, c1) values (now, 1)") + + tdSql.query("select count(*) from information_schema.ins_columns") + tdSql.checkData(0, 0, 267) + + tdSql.query("select * from information_schema.ins_columns where table_name = 'ntb'") + tdSql.checkRows(14) + tdSql.checkData(0, 2, "NORMAL_TABLE") + + + tdSql.query("select * from information_schema.ins_columns where table_name = 'stb'") + tdSql.checkRows(14) + tdSql.checkData(0, 2, "SUPER_TABLE") + + + tdSql.query("select db_name,table_type,col_name,col_type,col_length from information_schema.ins_columns where table_name = 'ctb'") + tdSql.checkRows(14) + tdSql.checkData(0, 0, "db") + tdSql.checkData(1, 1, "CHILD_TABLE") + tdSql.checkData(3, 2, "c3") + tdSql.checkData(4, 3, "INT") + tdSql.checkData(5, 4, 8) + + tdSql.query("desc information_schema.ins_columns") + tdSql.checkRows(9) + tdSql.checkData(0, 0, "table_name") + tdSql.checkData(5, 0, "col_length") + tdSql.checkData(1, 2, 64) + + def check_get_db_name(self): + buildPath = tdCom.getBuildPath() + cmdStr = '%s/build/bin/get_db_name_test'%(buildPath) + tdLog.info(cmdStr) + ret = os.system(cmdStr) + if ret != 0: + tdLog.exit("sml_test get_db_name_test != 0") + + def run(self): # sourcery skip: extract-duplicate-method, remove-redundant-fstring + tdSql.prepare(replica = self.replicaVar) + + tdLog.printNoPrefix("==========start check_ins_cols run ...............") + self.check_ins_cols() + tdLog.printNoPrefix("==========end check_ins_cols run ...............") + + tdLog.printNoPrefix("==========start check_get_db_name run ...............") + self.check_get_db_name() + tdLog.printNoPrefix("==========end check_get_db_name run ...............") + + def stop(self): + tdSql.close() + tdLog.success(f"{__file__} successfully executed") + + +tdCases.addLinux(__file__, TDTestCase()) +tdCases.addWindows(__file__, TDTestCase()) diff --git a/tests/system-test/2-query/out_of_order.py b/tests/system-test/2-query/out_of_order.py new file mode 100644 index 0000000000000000000000000000000000000000..5b52661bae2244d482e150f7afcfde58e8cca123 --- /dev/null +++ b/tests/system-test/2-query/out_of_order.py @@ -0,0 +1,191 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import sys +import os +import random + +from util.log import * +from util.cases import * +from util.sql import * +from util.dnodes import * + + +class TDTestCase: + def init(self, conn, logSql, replicaVar): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor(), logSql) + + def getBuildPath(self): + selfPath = os.path.dirname(os.path.realpath(__file__)) + + if ("community" in selfPath): + projPath = selfPath[:selfPath.find("community")] + else: + projPath = selfPath[:selfPath.find("tests")] + + for root, dirs, files in os.walk(projPath): + if ("taosd" in files): + rootRealPath = os.path.dirname(os.path.realpath(root)) + if ("packaging" not in rootRealPath): + buildPath = root[:len(root)-len("/build/bin")] + break + return buildPath + + def run_benchmark(self,dbname,tables,per_table_num,order,replica): + #O :Out of order + #A :Repliaca + buildPath = self.getBuildPath() + if (buildPath == ""): + tdLog.exit("taosd not found!") + else: + tdLog.info("taosd found in %s" % buildPath) + binPath = buildPath+ "/build/bin/" + + os.system("%staosBenchmark -d %s -t %d -n %d -O %d -a %d -b float,double,nchar\(200\),binary\(50\) -T 50 -y " % (binPath,dbname,tables,per_table_num,order,replica)) + + def sql_base(self,dbname): + self.check_sub(dbname) + sql1 = "select count(*) from %s.meters" %dbname + self.sql_base_check(sql1,sql1) + + self.check_sub(dbname) + sql2 = "select count(ts) from %s.meters" %dbname + self.sql_base_check(sql1,sql2) + + self.check_sub(dbname) + sql2 = "select count(_c0) from %s.meters" %dbname + self.sql_base_check(sql1,sql2) + + self.check_sub(dbname) + sql2 = "select count(c0) from %s.meters" %dbname + self.sql_base_check(sql1,sql2) + + self.check_sub(dbname) + sql2 = "select count(c1) from %s.meters" %dbname + self.sql_base_check(sql1,sql2) + + self.check_sub(dbname) + sql2 = "select count(c2) from %s.meters" %dbname + self.sql_base_check(sql1,sql2) + + self.check_sub(dbname) + sql2 = "select count(c3) from %s.meters" %dbname + self.sql_base_check(sql1,sql2) + + self.check_sub(dbname) + sql2 = "select count(t0) from %s.meters" %dbname + self.sql_base_check(sql1,sql2) + + self.check_sub(dbname) + sql2 = "select count(t1) from %s.meters" %dbname + self.sql_base_check(sql1,sql2) + + + self.check_sub(dbname) + sql2 = "select count(ts) from (select * from %s.meters)" %dbname + self.sql_base_check(sql1,sql2) + + self.check_sub(dbname) + sql2 = "select count(_c0) from (select * from %s.meters)" %dbname + self.sql_base_check(sql1,sql2) + + self.check_sub(dbname) + sql2 = "select count(c0) from (select * from %s.meters)" %dbname + self.sql_base_check(sql1,sql2) + + self.check_sub(dbname) + sql2 = "select count(c1) from (select * from %s.meters)" %dbname + self.sql_base_check(sql1,sql2) + + self.check_sub(dbname) + sql2 = "select count(c2) from (select * from %s.meters)" %dbname + self.sql_base_check(sql1,sql2) + + self.check_sub(dbname) + sql2 = "select count(c3) from (select * from %s.meters)" %dbname + self.sql_base_check(sql1,sql2) + + self.check_sub(dbname) + sql2 = "select count(t0) from (select * from %s.meters)" %dbname + self.sql_base_check(sql1,sql2) + + self.check_sub(dbname) + sql2 = "select count(t1) from (select * from %s.meters)" %dbname + self.sql_base_check(sql1,sql2) + + + def sql_base_check(self,sql1,sql2): + tdSql.query(sql1) + sql1_result = tdSql.getData(0,0) + tdLog.info("sql:%s , result: %s" %(sql1,sql1_result)) + + tdSql.query(sql2) + sql2_result = tdSql.getData(0,0) + tdLog.info("sql:%s , result: %s" %(sql2,sql2_result)) + + if sql1_result==sql2_result: + tdLog.info(f"checkEqual success, sql1_result={sql1_result},sql2_result={sql2_result}") + else : + tdLog.exit(f"checkEqual error, sql1_result=={sql1_result},sql2_result={sql2_result}") + + def run_sql(self,dbname): + self.sql_base(dbname) + + tdSql.execute(" flush database %s;" %dbname) + + self.sql_base(dbname) + + def check_sub(self,dbname): + + sql = "select count(*) from (select distinct(tbname) from %s.meters)" %dbname + tdSql.query(sql) + num = tdSql.getData(0,0) + + for i in range(0,num): + sql1 = "select count(*) from %s.d%d" %(dbname,i) + tdSql.query(sql1) + sql1_result = tdSql.getData(0,0) + tdLog.info("sql:%s , result: %s" %(sql1,sql1_result)) + + def check_out_of_order(self,dbname,tables,per_table_num,order,replica): + self.run_benchmark(dbname,tables,per_table_num,order,replica) + print("sleep 10 seconds") + #time.sleep(10) + print("sleep 10 seconds finish") + + self.run_sql(dbname) + + def run(self): + startTime = time.time() + + #self.check_out_of_order('db1',10,random.randint(10000,50000),random.randint(1,10),1) + self.check_out_of_order('db1',random.randint(50,200),random.randint(10000,20000),random.randint(1,5),1) + + # self.check_out_of_order('db2',random.randint(50,200),random.randint(10000,50000),random.randint(5,50),1) + + # self.check_out_of_order('db3',random.randint(50,200),random.randint(10000,50000),random.randint(50,100),1) + + # self.check_out_of_order('db4',random.randint(50,200),random.randint(10000,50000),100,1) + + endTime = time.time() + print("total time %ds" % (endTime - startTime)) + + + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) diff --git a/tests/system-test/2-query/sml.py b/tests/system-test/2-query/sml.py index d5439d05de2ed6471e2dae8627574296340b7f94..ec6309c71ad295eb504d8f97b493a062a044fed1 100644 --- a/tests/system-test/2-query/sml.py +++ b/tests/system-test/2-query/sml.py @@ -15,6 +15,9 @@ sys.path.append("./7-tmq") from tmqCommon import * class TDTestCase: + updatecfgDict = {'clientCfg': {'smlChildTableName': 'dataModelName', 'fqdn': 'localhost'}, 'fqdn': 'localhost'} + print("===================: ", updatecfgDict) + def init(self, conn, logSql, replicaVar=1): self.replicaVar = int(replicaVar) tdLog.debug(f"start to excute {__file__}") @@ -22,8 +25,10 @@ class TDTestCase: #tdSql.init(conn.cursor(), logSql) # output sql.txt file def checkFileContent(self, dbname="sml_db"): + simClientCfg="%s/taos.cfg"%tdDnodes.getSimCfgPath() buildPath = tdCom.getBuildPath() - cmdStr = '%s/build/bin/sml_test'%(buildPath) + cmdStr = '%s/build/bin/sml_test %s'%(buildPath, simClientCfg) + print("cmdStr:", cmdStr) tdLog.info(cmdStr) ret = os.system(cmdStr) if ret != 0: diff --git a/tests/system-test/2-query/stablity.py b/tests/system-test/2-query/stablity.py index ff026bf1202af2f3e69b2f5316f193c4cdc54296..5e4d5dcbaf0368cbc5f5955d4bd172131eddfacf 100755 --- a/tests/system-test/2-query/stablity.py +++ b/tests/system-test/2-query/stablity.py @@ -24,9 +24,9 @@ from util.dnodes import tdDnodes from util.dnodes import * class TDTestCase: - updatecfgDict = {'maxSQLLength':1048576,'debugFlag': 143 ,"cDebugFlag":143,"uDebugFlag":143 ,"rpcDebugFlag":143 , "tmrDebugFlag":143 , - "jniDebugFlag":143 ,"simDebugFlag":143,"dDebugFlag":143, "dDebugFlag":143,"vDebugFlag":143,"mDebugFlag":143,"qDebugFlag":143, - "wDebugFlag":143,"sDebugFlag":143,"tsdbDebugFlag":143,"tqDebugFlag":143 ,"fsDebugFlag":143 ,"fnDebugFlag":143} + updatecfgDict = {'maxSQLLength':1048576,'debugFlag': 131 ,"cDebugFlag":131,"uDebugFlag":131 ,"rpcDebugFlag":131 , "tmrDebugFlag":131 , + "jniDebugFlag":131 ,"simDebugFlag":131,"dDebugFlag":131, "dDebugFlag":131,"vDebugFlag":131,"mDebugFlag":131,"qDebugFlag":131, + "wDebugFlag":131,"sDebugFlag":131,"tsdbDebugFlag":131,"tqDebugFlag":131 ,"fsDebugFlag":131 ,"fnDebugFlag":131} def init(self, conn, logSql, replicaVar=1): self.replicaVar = int(replicaVar) diff --git a/tests/system-test/6-cluster/5dnode3mnodeDrop.py b/tests/system-test/6-cluster/5dnode3mnodeDrop.py index de9207ddd8e15f8d8d727d0427f8531ac8aff46c..9dd3c568054948d7062c2f87f145149509b1b575 100644 --- a/tests/system-test/6-cluster/5dnode3mnodeDrop.py +++ b/tests/system-test/6-cluster/5dnode3mnodeDrop.py @@ -112,7 +112,8 @@ class TDTestCase: dnode_first_port = dnode.cfgDict["firstEp"].split(":")[-1] cmd = f" taos -h {dnode_first_host} -P {dnode_first_port} -s ' create dnode \"{dnode_id} \" ' ;" tdLog.debug(cmd) - os.system(cmd) + if os.system(cmd) != 0: + raise Exception("failed to execute system command. cmd: %s" % cmd) time.sleep(2) tdLog.info(" create cluster with %d dnode done! " %dnodes_nums) @@ -120,7 +121,7 @@ class TDTestCase: def check3mnode(self): count=0 while count < 10: - time.sleep(1) + time.sleep(0.1) tdSql.query("select * from information_schema.ins_mnodes;") if tdSql.checkRows(3) : tdLog.debug("mnode is three nodes") @@ -157,7 +158,7 @@ class TDTestCase: def check3mnode1off(self): count=0 while count < 10: - time.sleep(1) + time.sleep(0.1) tdSql.query("select * from information_schema.ins_mnodes;") if tdSql.checkRows(3) : tdLog.debug("mnode is three nodes") @@ -189,7 +190,7 @@ class TDTestCase: def check3mnode2off(self): count=0 while count < 40: - time.sleep(1) + time.sleep(0.1) tdSql.query("select * from information_schema.ins_mnodes;") if tdSql.checkRows(3) : tdLog.debug("mnode is three nodes") @@ -219,7 +220,7 @@ class TDTestCase: def check3mnode3off(self): count=0 while count < 10: - time.sleep(1) + time.sleep(0.1) tdSql.query("select * from information_schema.ins_mnodes;") if tdSql.checkRows(3) : tdLog.debug("mnode is three nodes") @@ -279,32 +280,47 @@ class TDTestCase: # drop follower of mnode dropcount =0 - while dropcount <= 10: + while dropcount <= 5: for i in range(1,3): tdLog.debug("drop mnode on dnode %d"%(i+1)) tdSql.execute("drop mnode on dnode %d"%(i+1)) tdSql.query("select * from information_schema.ins_mnodes;") count=0 while count<10: - time.sleep(1) + time.sleep(0.1) tdSql.query("select * from information_schema.ins_mnodes;") - if tdSql.checkRows(2): + if tdSql.queryRows == 2: tdLog.debug("drop mnode %d successfully"%(i+1)) break count+=1 + self.wait_for_transactions(100) + tdLog.debug("create mnode on dnode %d"%(i+1)) tdSql.execute("create mnode on dnode %d"%(i+1)) count=0 while count<10: - time.sleep(1) + time.sleep(0.1) tdSql.query("select * from information_schema.ins_mnodes;") - if tdSql.checkRows(3): - tdLog.debug("drop mnode %d successfully"%(i+1)) + if tdSql.queryRows == 3: + tdLog.debug("create mnode %d successfully"%(i+1)) break count+=1 + self.wait_for_transactions(100) dropcount+=1 self.check3mnode() + def wait_for_transactions(self, timeout): + count=0 + while count= timeout: + tdLog.debug("transactions not finished before timeout (%d secs)"%timeout) def getConnection(self, dnode): host = dnode.cfgDict["fqdn"] diff --git a/tests/system-test/7-tmq/tmqConsFromTsdb.py b/tests/system-test/7-tmq/tmqConsFromTsdb.py index 9bb8c4cc0d953d4c03fa772427523b477896a90d..8ed4a6df973b57f7302d5a2c193debffbf7286a1 100644 --- a/tests/system-test/7-tmq/tmqConsFromTsdb.py +++ b/tests/system-test/7-tmq/tmqConsFromTsdb.py @@ -130,7 +130,7 @@ class TDTestCase: tdLog.info("expect consume rows: %d, act consume rows: %d"%(expectRowsList[0], resultList[0])) tdLog.exit("%d tmq consume rows error!"%consumerId) - tmqCom.checkFileContent(consumerId, queryString) + # tmqCom.checkFileContent(consumerId, queryString) time.sleep(10) for i in range(len(topicNameList)): diff --git a/tests/system-test/7-tmq/tmqConsFromTsdb1-1ctb.py b/tests/system-test/7-tmq/tmqConsFromTsdb1-1ctb.py index 009862137f8ccbb45f0f9bbfa134ae1a5b0479f1..4dcc0b963f8e883326132f5db3bc04a6edd349d1 100644 --- a/tests/system-test/7-tmq/tmqConsFromTsdb1-1ctb.py +++ b/tests/system-test/7-tmq/tmqConsFromTsdb1-1ctb.py @@ -116,7 +116,7 @@ class TDTestCase: topicList = topicNameList[0] ifcheckdata = 1 ifManualCommit = 1 - keyList = 'group.id:cgrp1, enable.auto.commit:true, auto.commit.interval.ms:1000, auto.offset.reset:earliest' + keyList = 'group.id:cgrp1, enable.auto.commit:true, auto.commit.interval.ms:200, auto.offset.reset:earliest' tmqCom.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) consumerId = 4 @@ -188,7 +188,7 @@ class TDTestCase: topicList = topicNameList[0] ifcheckdata = 1 ifManualCommit = 1 - keyList = 'group.id:cgrp1, enable.auto.commit:true, auto.commit.interval.ms:1000, auto.offset.reset:earliest' + keyList = 'group.id:cgrp1, enable.auto.commit:true, auto.commit.interval.ms:200, auto.offset.reset:earliest' tmqCom.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) tdLog.info("start consume processor 0") diff --git a/tests/system-test/7-tmq/tmqConsFromTsdb1-mutilVg.py b/tests/system-test/7-tmq/tmqConsFromTsdb1-mutilVg.py index 528b3a80887c3cc62b1092e5183249e962f5e8db..da8ac6c57deaf99d9871134489b290381b570306 100644 --- a/tests/system-test/7-tmq/tmqConsFromTsdb1-mutilVg.py +++ b/tests/system-test/7-tmq/tmqConsFromTsdb1-mutilVg.py @@ -116,7 +116,7 @@ class TDTestCase: topicList = topicNameList[0] ifcheckdata = 1 ifManualCommit = 1 - keyList = 'group.id:cgrp1, enable.auto.commit:true, auto.commit.interval.ms:1000, auto.offset.reset:earliest' + keyList = 'group.id:cgrp1, enable.auto.commit:true, auto.commit.interval.ms:200, auto.offset.reset:earliest' tmqCom.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) consumerId = 4 @@ -188,7 +188,7 @@ class TDTestCase: topicList = topicNameList[0] ifcheckdata = 1 ifManualCommit = 1 - keyList = 'group.id:cgrp1, enable.auto.commit:true, auto.commit.interval.ms:1000, auto.offset.reset:earliest' + keyList = 'group.id:cgrp1, enable.auto.commit:true, auto.commit.interval.ms:200, auto.offset.reset:earliest' tmqCom.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) tdLog.info("start consume processor 0") diff --git a/tests/system-test/7-tmq/tmqUpdate-1ctb.py b/tests/system-test/7-tmq/tmqUpdate-1ctb.py index b974e4a41a2c60d4c882b2006400f500ec799efc..db2ec3285dd87de532a29021142e4285c05718db 100644 --- a/tests/system-test/7-tmq/tmqUpdate-1ctb.py +++ b/tests/system-test/7-tmq/tmqUpdate-1ctb.py @@ -206,7 +206,7 @@ class TDTestCase: paraDict['rowsPerTbl'] = self.rowsPerTbl consumerId = 1 if self.snapshot == 0: - expectrowcnt = int(paraDict["rowsPerTbl"] * paraDict["ctbNum"] * (2)) + expectrowcnt = int(paraDict["rowsPerTbl"] * paraDict["ctbNum"] * (1/2)) elif self.snapshot == 1: expectrowcnt = int(paraDict["rowsPerTbl"] * paraDict["ctbNum"] * (1)) diff --git a/tests/system-test/7-tmq/tmqUpdate-multiCtb-snapshot0.py b/tests/system-test/7-tmq/tmqUpdate-multiCtb-snapshot0.py index d5df88cf43b1e207f3856807bb9b0bcf55b4b8c6..daffff44c1cf0dda7c4ecf5ac2dfd3dadfdf5504 100644 --- a/tests/system-test/7-tmq/tmqUpdate-multiCtb-snapshot0.py +++ b/tests/system-test/7-tmq/tmqUpdate-multiCtb-snapshot0.py @@ -213,9 +213,9 @@ class TDTestCase: paraDict['rowsPerTbl'] = self.rowsPerTbl consumerId = 1 if self.snapshot == 0: - expectrowcnt = int(paraDict["rowsPerTbl"] * paraDict["ctbNum"] * (2 + 1/2*1/2*2 + 1/2*1/2)) + expectrowcnt = int(paraDict["rowsPerTbl"] * paraDict["ctbNum"] * (1/2) * (1/2*3)) elif self.snapshot == 1: - expectrowcnt = int(paraDict["rowsPerTbl"] * paraDict["ctbNum"] * (2 + 1/2*1/2)) + expectrowcnt = int(paraDict["rowsPerTbl"] * paraDict["ctbNum"] * (1 + 1/2)) topicList = topicFromStb1 ifcheckdata = 1 diff --git a/tools/shell/CMakeLists.txt b/tools/shell/CMakeLists.txt index 1e7d0ed1406d9fc251ffdb6319fede131febef78..342d6410d29f1b94d80b508c05507a0f5b81cdec 100644 --- a/tools/shell/CMakeLists.txt +++ b/tools/shell/CMakeLists.txt @@ -26,6 +26,10 @@ ELSE () SET(LINK_WEBSOCKET "") ENDIF () +IF (CUS_NAME OR CUS_PROMPT OR CUS_EMAIL) + ADD_DEFINITIONS(-I${CMAKE_CURRENT_SOURCE_DIR}/../../../enterprise/packaging) +ENDIF (CUS_NAME OR CUS_PROMPT OR CUS_EMAIL) + if(TD_WINDOWS) target_link_libraries(shell PUBLIC taos_static ${LINK_WEBSOCKET}) else() diff --git a/tools/shell/inc/shellAuto.h b/tools/shell/inc/shellAuto.h index b7bf5fa1019502acbeaefc8884d4553704f58702..f3ea87e4a540aa34084e0c3c5d91ad58bbae1adf 100644 --- a/tools/shell/inc/shellAuto.h +++ b/tools/shell/inc/shellAuto.h @@ -24,13 +24,13 @@ void pressTabKey(SShellCmd* cmd); // press othr key void pressOtherKey(char c); -// init shell auto funciton , shell start call once +// init shell auto function , shell start call once bool shellAutoInit(); // set conn void shellSetConn(TAOS* conn); -// exit shell auto funciton, shell exit call once +// exit shell auto function, shell exit call once void shellAutoExit(); // callback autotab module diff --git a/tools/shell/inc/shellInt.h b/tools/shell/inc/shellInt.h index af724c1533177c2203d58b6f5cd1f02fc236f5c4..1fe09f586356554d017bfbb572dab84ab16baf52 100644 --- a/tools/shell/inc/shellInt.h +++ b/tools/shell/inc/shellInt.h @@ -80,8 +80,9 @@ typedef struct { } SShellArgs; typedef struct { - const char* clientVersion; - const char* promptHeader; + const char *clientVersion; + char cusName[32]; + char promptHeader[32]; const char* promptContinue; const char* osname; int32_t promptSize; @@ -147,5 +148,6 @@ void shellRunSingleCommandWebsocketImp(char *command); // shellMain.c extern SShellObj shell; +extern void tscWriteCrashInfo(int signum, void *sigInfo, void *context); #endif /*_TD_SHELL_INT_H_*/ diff --git a/tools/shell/inc/shellTire.h b/tools/shell/inc/shellTire.h index bdcf7bcfb3310832b60cc66af5d07c16b7eb6134..e87c3ee4f33726171d107544a137bac7b649174a 100644 --- a/tools/shell/inc/shellTire.h +++ b/tools/shell/inc/shellTire.h @@ -19,7 +19,7 @@ // // The prefix search tree is a efficient storage words and search words tree, it support 95 visible ascii code character // -#define FIRST_ASCII 40 // first visiable char is '0' +#define FIRST_ASCII 40 // first visible char is '0' #define LAST_ASCII 122 // last visilbe char is 'z' // capacity save char is 95 diff --git a/tools/shell/src/shellArguments.c b/tools/shell/src/shellArguments.c index 4d40de66bde2c8504369110c436275b27268ac08..d899249b97f0160d366db385653c1c65a9864d9a 100644 --- a/tools/shell/src/shellArguments.c +++ b/tools/shell/src/shellArguments.c @@ -19,10 +19,25 @@ #include "shellInt.h" -#define TAOS_CONSOLE_PROMPT_HEADER "taos> " +#ifndef CUS_NAME + char cusName[] = "TDengine"; +#endif + +#ifndef CUS_PROMPT + char cusPrompt[] = "taos"; +#endif + +#ifndef CUS_EMAIL + char cusEmail[] = ""; +#endif + +#if defined(CUS_NAME) || defined(CUS_PROMPT) || defined(CUS_EMAIL) +#include "cus_name.h" +#endif + #define TAOS_CONSOLE_PROMPT_CONTINUE " -> " -#define SHELL_HOST "TDengine server FQDN to connect. The default host is localhost." +#define SHELL_HOST "The server FQDN to connect. The default host is localhost." #define SHELL_PORT "The TCP/IP port number to use for the connection." #define SHELL_USER "The user name to use when connecting to the server." #define SHELL_PASSWORD "The password to use when connecting to the server." @@ -41,7 +56,6 @@ #define SHELL_PKT_LEN "Packet length used for net test, default is 1024 bytes." #define SHELL_PKT_NUM "Packet numbers used for net test, default is 100." #define SHELL_VERSION "Print program version." -#define SHELL_EMAIL "" #ifdef WEBSOCKET #define SHELL_DSN "The dsn to use when connecting to cloud server." @@ -78,7 +92,7 @@ void shellPrintHelp() { #endif printf("%s%s%s%s\r\n", indent, "-w,", indent, SHELL_WIDTH); printf("%s%s%s%s\r\n", indent, "-V,", indent, SHELL_VERSION); - printf("\r\n\r\nReport bugs to %s.\r\n", SHELL_EMAIL); + printf("\r\n\r\nReport bugs to %s.\r\n", cusEmail); } #ifdef LINUX @@ -86,7 +100,7 @@ void shellPrintHelp() { #include const char *argp_program_version = version; -const char *argp_program_bug_address = SHELL_EMAIL; +const char *argp_program_bug_address = cusEmail; static struct argp_option shellOptions[] = { {"host", 'h', "HOST", 0, SHELL_HOST}, @@ -388,12 +402,13 @@ static int32_t shellCheckArgs() { int32_t shellParseArgs(int32_t argc, char *argv[]) { shellInitArgs(argc, argv); - shell.info.clientVersion = - "Welcome to the TDengine Command Line Interface, Client Version:%s\r\n" - "Copyright (c) 2022 by TDengine, all rights reserved.\r\n\r\n"; - shell.info.promptHeader = TAOS_CONSOLE_PROMPT_HEADER; + shell.info.clientVersion = + "Welcome to the %s Command Line Interface, Client Version:%s\r\n" + "Copyright (c) 2022 by %s, all rights reserved.\r\n\r\n"; + strcpy(shell.info.cusName, cusName); + sprintf(shell.info.promptHeader, "%s> ", cusPrompt); shell.info.promptContinue = TAOS_CONSOLE_PROMPT_CONTINUE; - shell.info.promptSize = 6; + shell.info.promptSize = strlen(shell.info.promptHeader); snprintf(shell.info.programVersion, sizeof(shell.info.programVersion), "version: %s", version); #if defined(_TD_WINDOWS_64) || defined(_TD_WINDOWS_32) diff --git a/tools/shell/src/shellAuto.c b/tools/shell/src/shellAuto.c index d49293cc8dd47d7c0e3ac0c472b50101ef5a1594..81af5d7fe8e860a242256160aa133124d2cf2503 100644 --- a/tools/shell/src/shellAuto.c +++ b/tools/shell/src/shellAuto.c @@ -264,7 +264,7 @@ char* key_tags[] = {"tags("}; char* key_select[] = {"select "}; // -// ------- gobal variant define --------- +// ------- global variant define --------- // int32_t firstMatchIndex = -1; // first match shellCommands index int32_t lastMatchIndex = -1; // last match shellCommands index @@ -329,7 +329,15 @@ int cntDel = 0; // delete byte count after next press tab // show auto tab introduction void printfIntroduction() { printf(" ****************************** Tab Completion **********************************\n"); - printf(" * The TDengine CLI supports tab completion for a variety of items, *\n"); + char secondLine[160] = "\0"; + sprintf(secondLine, " * The %s CLI supports tab completion for a variety of items, ", + shell.info.cusName); + printf("%s", secondLine); + int secondLineLen = strlen(secondLine); + while (84-(secondLineLen++) > 0) { + printf(" "); + } + printf("*\n"); printf(" * including database names, table names, function names and keywords. *\n"); printf(" * The full list of shortcut keys is as follows: *\n"); printf(" * [ TAB ] ...... complete the current word *\n"); @@ -344,7 +352,7 @@ void printfIntroduction() { } void showHelp() { - printf("\nThe TDengine CLI supports the following commands:"); + printf("\nThe %s CLI supports the following commands:", shell.info.cusName); printf( "\n\ ----- A ----- \n\ @@ -595,7 +603,7 @@ void GenerateVarType(int type, char** p, int count) { // -------------------- shell auto ---------------- // -// init shell auto funciton , shell start call once +// init shell auto function , shell start call once bool shellAutoInit() { // command int32_t count = SHELL_COMMAND_COUNT(); @@ -628,7 +636,7 @@ bool shellAutoInit() { // set conn void shellSetConn(TAOS* conn) { varCon = conn; } -// exit shell auto funciton, shell exit call once +// exit shell auto function, shell exit call once void shellAutoExit() { // free command int32_t count = SHELL_COMMAND_COUNT(); @@ -645,7 +653,7 @@ void shellAutoExit() { } } taosThreadMutexUnlock(&tiresMutex); - // destory + // destroy taosThreadMutexDestroy(&tiresMutex); // free threads @@ -666,7 +674,7 @@ void shellAutoExit() { // // ------------------- auto ptr for tires -------------------------- // -bool setNewAuotPtr(int type, STire* pNew) { +bool setNewAutoPtr(int type, STire* pNew) { if (pNew == NULL) return false; taosThreadMutexLock(&tiresMutex); @@ -709,16 +717,13 @@ void putBackAutoPtr(int type, STire* tire) { if (tires[type] != tire) { // update by out, can't put back , so free if (--tire->ref == 1) { - // support multi thread getAuotPtr + // support multi thread getAutoPtr freeTire(tire); } } else { tires[type]->ref--; ASSERT(tires[type]->ref > 0); - if (tires[type]->ref <= 0) { - return; - } } taosThreadMutexUnlock(&tiresMutex); @@ -767,7 +772,7 @@ int writeVarNames(int type, TAOS_RES* tres) { } while (row != NULL); // replace old tire - setNewAuotPtr(type, tire); + setNewAutoPtr(type, tire); return numOfRows; } @@ -1035,7 +1040,7 @@ SWords* matchCommand(SWords* input, bool continueSearch) { for (int32_t i = 0; i < count; i++) { SWords* shellCommand = shellCommands + i; if (continueSearch && lastMatchIndex != -1 && i <= lastMatchIndex) { - // new match must greate than lastMatchIndex + // new match must greater than lastMatchIndex if (varMode && i == lastMatchIndex) { // do nothing, var match on lastMatchIndex } else { @@ -1164,7 +1169,7 @@ void createInputFromFirst(SWords* input, SWords* firstMatch) { for (int i = 0; i < firstMatch->matchIndex && word; i++) { // combine source from each word strncpy(input->source + input->source_len, word->word, word->len); - strcat(input->source, " "); // append blank splite + strcat(input->source, " "); // append blank space input->source_len += word->len + 1; // 1 is blank length // move next word = word->next; @@ -1393,7 +1398,7 @@ bool appendAfterSelect(TAOS* con, SShellCmd* cmd, char* sql, int32_t len) { return true; } - // fill funciton + // fill function if (fieldEnd) { // fields is end , need match keyword ret = fillWithType(con, cmd, last, WT_VAR_KEYWORD); @@ -1576,7 +1581,7 @@ bool matchCreateTable(TAOS* con, SShellCmd* cmd) { // tb options if (!ret) { - // find like create talbe st (...) tags(..) + // find like create table st (...) tags(..) char* p1 = strchr(ps, ')'); // first ')' end if (p1) { if (strchr(p1 + 1, ')')) { // second ')' end diff --git a/tools/shell/src/shellEngine.c b/tools/shell/src/shellEngine.c index 91ff50110207cac7296cd1fdd4126315d62728f9..1f79cfcc0455d5aa911b154bce9817e264da9d03 100644 --- a/tools/shell/src/shellEngine.c +++ b/tools/shell/src/shellEngine.c @@ -1072,7 +1072,8 @@ void *shellThreadLoop(void *arg) { } int32_t shellExecute() { - printf(shell.info.clientVersion, taos_get_client_info()); + printf(shell.info.clientVersion, shell.info.cusName, + taos_get_client_info(), shell.info.cusName); fflush(stdout); SShellArgs *pArgs = &shell.args; @@ -1127,7 +1128,7 @@ int32_t shellExecute() { } if (tsem_init(&shell.cancelSem, 0, 0) != 0) { - printf("failed to create cancel semphore\r\n"); + printf("failed to create cancel semaphore\r\n"); return -1; } @@ -1136,10 +1137,8 @@ int32_t shellExecute() { taosSetSignal(SIGTERM, shellQueryInterruptHandler); taosSetSignal(SIGHUP, shellQueryInterruptHandler); - taosSetSignal(SIGABRT, shellQueryInterruptHandler); - taosSetSignal(SIGINT, shellQueryInterruptHandler); - + #ifdef WEBSOCKET if (!shell.args.restful && !shell.args.cloud) { #endif diff --git a/tools/shell/src/shellMain.c b/tools/shell/src/shellMain.c index fa3c0f2585ad6d6ef14c3dda35fb64024a521a02..22b8e89959593e9da18124908fedac31bd6b45ed 100644 --- a/tools/shell/src/shellMain.c +++ b/tools/shell/src/shellMain.c @@ -19,6 +19,29 @@ SShellObj shell = {0}; + +void shellCrashHandler(int signum, void *sigInfo, void *context) { + taosIgnSignal(SIGTERM); + taosIgnSignal(SIGHUP); + taosIgnSignal(SIGINT); + taosIgnSignal(SIGBREAK); + +#if !defined(WINDOWS) + taosIgnSignal(SIGBUS); +#endif + taosIgnSignal(SIGABRT); + taosIgnSignal(SIGFPE); + taosIgnSignal(SIGSEGV); + + tscWriteCrashInfo(signum, sigInfo, context); + +#ifdef _TD_DARWIN_64 + exit(signum); +#elif defined(WINDOWS) + exit(signum); +#endif +} + int main(int argc, char *argv[]) { shell.exit = false; #ifdef WEBSOCKET @@ -26,6 +49,13 @@ int main(int argc, char *argv[]) { shell.args.cloud = true; #endif +#if !defined(WINDOWS) + taosSetSignal(SIGBUS, shellCrashHandler); +#endif + taosSetSignal(SIGABRT, shellCrashHandler); + taosSetSignal(SIGFPE, shellCrashHandler); + taosSetSignal(SIGSEGV, shellCrashHandler); + if (shellCheckIntSize() != 0) { return -1; } diff --git a/tools/shell/src/shellUtil.c b/tools/shell/src/shellUtil.c index 8c47d165557317dbcf710dda7d72b77037340692..e15b49efcc35da2682d003243c0a19eb278acbc7 100644 --- a/tools/shell/src/shellUtil.c +++ b/tools/shell/src/shellUtil.c @@ -50,19 +50,19 @@ bool shellRegexMatch(const char *s, const char *reg, int32_t cflags) { int32_t shellCheckIntSize() { if (sizeof(int8_t) != 1) { - printf("taos int8 size is %d(!= 1)", (int)sizeof(int8_t)); + printf("int8 size is %d(!= 1)", (int)sizeof(int8_t)); return -1; } if (sizeof(int16_t) != 2) { - printf("taos int16 size is %d(!= 2)", (int)sizeof(int16_t)); + printf("int16 size is %d(!= 2)", (int)sizeof(int16_t)); return -1; } if (sizeof(int32_t) != 4) { - printf("taos int32 size is %d(!= 4)", (int)sizeof(int32_t)); + printf("int32 size is %d(!= 4)", (int)sizeof(int32_t)); return -1; } if (sizeof(int64_t) != 8) { - printf("taos int64 size is %d(!= 8)", (int)sizeof(int64_t)); + printf("int64 size is %d(!= 8)", (int)sizeof(int64_t)); return -1; } return 0; @@ -80,7 +80,7 @@ void shellGenerateAuth() { void shellDumpConfig() { SConfig *pCfg = taosGetCfg(); if (pCfg == NULL) { - printf("TDengine read global config failed!\r\n"); + printf("read global config failed!\r\n"); } else { cfgDumpCfg(pCfg, 1, true); } diff --git a/tools/shell/src/shellWebsocket.c b/tools/shell/src/shellWebsocket.c index bbb127b12846f113ee1583940ee8d005623bf22b..e3584b689009f9ca120af6e6a29e7ab8c643ec29 100644 --- a/tools/shell/src/shellWebsocket.c +++ b/tools/shell/src/shellWebsocket.c @@ -235,7 +235,7 @@ void shellRunSingleCommandWebsocketImp(char *command) { if (reconnectNum == 0) { continue; } else { - fprintf(stderr, "TDengine server is disconnected, will try to reconnect\n"); + fprintf(stderr, "The server is disconnected, will try to reconnect\n"); } return; } diff --git a/utils/test/c/CMakeLists.txt b/utils/test/c/CMakeLists.txt index b048b79e9b71058faaccdadeb723c1e0f46dce58..6ca266c55533b644c47ad61eb3bebc273ae7be7f 100644 --- a/utils/test/c/CMakeLists.txt +++ b/utils/test/c/CMakeLists.txt @@ -4,6 +4,7 @@ add_executable(tmq_sim tmqSim.c) add_executable(create_table createTable.c) add_executable(tmq_taosx_ci tmq_taosx_ci.c) add_executable(sml_test sml_test.c) +add_executable(get_db_name_test get_db_name_test.c) target_link_libraries( create_table PUBLIC taos_static @@ -40,3 +41,11 @@ target_link_libraries( PUBLIC common PUBLIC os ) + +target_link_libraries( + get_db_name_test + PUBLIC taos_static + PUBLIC util + PUBLIC common + PUBLIC os +) diff --git a/utils/test/c/get_db_name_test.c b/utils/test/c/get_db_name_test.c new file mode 100644 index 0000000000000000000000000000000000000000..ebbfdc84a77a0eb3d76a5b561c9b280b3930c7a6 --- /dev/null +++ b/utils/test/c/get_db_name_test.c @@ -0,0 +1,65 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +#include +#include +#include +#include +#include +#include "taos.h" +#include "types.h" +#include "tlog.h" + +int get_db_test() { + TAOS *taos = taos_connect("localhost", "root", "taosdata", NULL, 0); + + TAOS_RES *pRes = taos_query(taos, "create database if not exists sml_db vgroups 2"); + taos_free_result(pRes); + + pRes = taos_query(taos, "use sml_db"); + int code = taos_errno(pRes); + taos_free_result(pRes); + ASSERT(code == 0); + + code = taos_get_current_db(taos, NULL, 0, NULL); + ASSERT(code != 0); + + int required = 0; + code = taos_get_current_db(taos, NULL, 0, &required); + ASSERT(code != 0); + ASSERT(required == 7); + + char database[10] = {0}; + code = taos_get_current_db(taos, database, 3, &required); + ASSERT(code != 0); + ASSERT(required == 7); + ASSERT(strcpy(database, "sm")); + + char database1[10] = {0}; + code = taos_get_current_db(taos, database1, 10, &required); + ASSERT(code == 0); + ASSERT(strcpy(database1, "sml_db")); + + taos_close(taos); + + return code; +} + +int main(int argc, char *argv[]) { + int ret = 0; + ret = get_db_test(); + ASSERT(!ret); + return ret; +} diff --git a/utils/test/c/sml_test.c b/utils/test/c/sml_test.c index dfa81ab180641c14c66116a5f9fff5541dde0e24..c36ab388778a80f4dd9349e0cb04551166007bee 100644 --- a/utils/test/c/sml_test.c +++ b/utils/test/c/sml_test.c @@ -19,8 +19,8 @@ #include #include #include "taos.h" -#include "types.h" #include "tlog.h" +#include "types.h" int smlProcess_influx_Test() { TAOS *taos = taos_connect("localhost", "root", "taosdata", NULL, 0); @@ -78,22 +78,22 @@ int smlProcess_telnet_Test() { pRes = taos_query(taos, "use sml_db"); taos_free_result(pRes); -// char *sql[4] = {0}; -// sql[0] = taosMemoryCalloc(1, 128); -// sql[1] = taosMemoryCalloc(1, 128); -// sql[2] = taosMemoryCalloc(1, 128); -// sql[3] = taosMemoryCalloc(1, 128); + // char *sql[4] = {0}; + // sql[0] = taosMemoryCalloc(1, 128); + // sql[1] = taosMemoryCalloc(1, 128); + // sql[2] = taosMemoryCalloc(1, 128); + // sql[3] = taosMemoryCalloc(1, 128); const char *sql1[] = {"sys.if.bytes.out 1479496100 1.3E0 host=web01 interface=eth0", - "sys.if.bytes.out 1479496101 1.3E1 interface=eth0 host=web01 ", - "sys.if.bytes.out 1479496102 1.3E3 network=tcp", - " sys.procs.running 1479496100 42 host=web01 "}; + "sys.if.bytes.out 1479496101 1.3E1 interface=eth0 host=web01 ", + "sys.if.bytes.out 1479496102 1.3E3 network=tcp", + " sys.procs.running 1479496100 42 host=web01 "}; -// for(int i = 0; i < 4; i++){ -// strncpy(sql[i], sql1[i], 128); -// } + // for(int i = 0; i < 4; i++){ + // strncpy(sql[i], sql1[i], 128); + // } -// pRes = taos_schemaless_insert(taos, (char **)sql, sizeof(sql) / sizeof(sql[0]), TSDB_SML_TELNET_PROTOCOL, -// TSDB_SML_TIMESTAMP_NANO_SECONDS); + // pRes = taos_schemaless_insert(taos, (char **)sql, sizeof(sql) / sizeof(sql[0]), TSDB_SML_TELNET_PROTOCOL, + // TSDB_SML_TIMESTAMP_NANO_SECONDS); pRes = taos_schemaless_insert(taos, (char **)sql1, sizeof(sql1) / sizeof(sql1[0]), TSDB_SML_TELNET_PROTOCOL, TSDB_SML_TIMESTAMP_NANO_SECONDS); printf("%s result:%s\n", __FUNCTION__, taos_errstr(pRes)); @@ -114,11 +114,12 @@ int smlProcess_json1_Test() { taos_free_result(pRes); const char *sql[] = { - "[{\"metric\":\"sys.cpu.nice\",\"timestamp\":0,\"value\":18,\"tags\":{\"host\":\"web01\",\"id\":\"t1\",\"dc\":\"lga\"}},{\"metric\":\"sys.cpu.nice\",\"timestamp\":1662344045,\"value\":9,\"tags\":{\"host\":\"web02\",\"dc\":\"lga\"}}]" - }; + "[{\"metric\":\"sys.cpu.nice\",\"timestamp\":0,\"value\":18,\"tags\":{\"host\":\"web01\",\"id\":\"t1\",\"dc\":" + "\"lga\"}},{\"metric\":\"sys.cpu.nice\",\"timestamp\":1662344045,\"value\":9,\"tags\":{\"host\":\"web02\",\"dc\":" + "\"lga\"}}]"}; char *sql1[1] = {0}; - for(int i = 0; i < 1; i++){ + for (int i = 0; i < 1; i++) { sql1[i] = taosMemoryCalloc(1, 1024); strncpy(sql1[i], sql[i], 1023); } @@ -126,23 +127,25 @@ int smlProcess_json1_Test() { pRes = taos_schemaless_insert(taos, (char **)sql1, sizeof(sql1) / sizeof(sql1[0]), TSDB_SML_JSON_PROTOCOL, TSDB_SML_TIMESTAMP_NANO_SECONDS); int code = taos_errno(pRes); - if(code != 0){ + if (code != 0) { printf("%s result:%s\n", __FUNCTION__, taos_errstr(pRes)); - }else{ + } else { printf("%s result:success\n", __FUNCTION__); } taos_free_result(pRes); - for(int i = 0; i < 1; i++){ + for (int i = 0; i < 1; i++) { taosMemoryFree(sql1[i]); } const char *sql2[] = { - "[{\"metric\":\"sys.cpu.nice\",\"timestamp\":1662344041,\"value\":13,\"tags\":{\"host\":\"web01\",\"dc\":\"lga\"}},{\"metric\":\"sys.cpu.nice\",\"timestamp\":1662344042,\"value\":9,\"tags\":{\"host\":\"web02\",\"dc\":\"lga\"}}]", + "[{\"metric\":\"sys.cpu.nice\",\"timestamp\":1662344041,\"value\":13,\"tags\":{\"host\":\"web01\",\"dc\":\"lga\"}" + "},{\"metric\":\"sys.cpu.nice\",\"timestamp\":1662344042,\"value\":9,\"tags\":{\"host\":\"web02\",\"dc\":\"lga\"}" + "}]", }; char *sql3[1] = {0}; - for(int i = 0; i < 1; i++){ + for (int i = 0; i < 1; i++) { sql3[i] = taosMemoryCalloc(1, 1024); strncpy(sql3[i], sql2[i], 1023); } @@ -150,14 +153,14 @@ int smlProcess_json1_Test() { pRes = taos_schemaless_insert(taos, (char **)sql3, sizeof(sql3) / sizeof(sql3[0]), TSDB_SML_JSON_PROTOCOL, TSDB_SML_TIMESTAMP_NANO_SECONDS); code = taos_errno(pRes); - if(code != 0){ + if (code != 0) { printf("%s result:%s\n", __FUNCTION__, taos_errstr(pRes)); - }else{ + } else { printf("%s result:success\n", __FUNCTION__); } taos_free_result(pRes); - for(int i = 0; i < 1; i++){ + for (int i = 0; i < 1; i++) { taosMemoryFree(sql3[i]); } @@ -176,10 +179,11 @@ int smlProcess_json2_Test() { taos_free_result(pRes); const char *sql[] = { - "{\"metric\":\"meter_current0\",\"timestamp\":{\"value\":1662344042,\"type\":\"s\"},\"value\":{\"value\":10.3,\"type\":\"i64\"},\"tags\":{\"groupid\":{\"value\":2,\"type\":\"bigint\"},\"location\":{\"value\":\"北京\",\"type\":\"binary\"},\"id\":\"d1001\"}}" - }; + "{\"metric\":\"meter_current0\",\"timestamp\":{\"value\":1662344042,\"type\":\"s\"},\"value\":{\"value\":10.3," + "\"type\":\"i64\"},\"tags\":{\"groupid\":{\"value\":2,\"type\":\"bigint\"},\"location\":{\"value\":\"北京\"," + "\"type\":\"binary\"},\"id\":\"d1001\"}}"}; char *sql1[1] = {0}; - for(int i = 0; i < 1; i++){ + for (int i = 0; i < 1; i++) { sql1[i] = taosMemoryCalloc(1, 1024); strncpy(sql1[i], sql[i], 1023); } @@ -187,15 +191,15 @@ int smlProcess_json2_Test() { pRes = taos_schemaless_insert(taos, (char **)sql1, sizeof(sql1) / sizeof(sql1[0]), TSDB_SML_JSON_PROTOCOL, TSDB_SML_TIMESTAMP_NANO_SECONDS); int code = taos_errno(pRes); - if(code != 0){ + if (code != 0) { printf("%s result:%s\n", __FUNCTION__, taos_errstr(pRes)); - }else{ + } else { printf("%s result:success\n", __FUNCTION__); } taos_free_result(pRes); taos_close(taos); - for(int i = 0; i < 1; i++){ + for (int i = 0; i < 1; i++) { taosMemoryFree(sql1[i]); } return code; @@ -211,10 +215,10 @@ int smlProcess_json3_Test() { taos_free_result(pRes); const char *sql[] = { - "[{\"metric\":\"sys.cpu.nice3\",\"timestamp\":0,\"value\":\"18\",\"tags\":{\"host\":\"web01\",\"id\":\"t1\",\"dc\":\"lga\"}}]" - }; + "[{\"metric\":\"sys.cpu.nice3\",\"timestamp\":0,\"value\":\"18\",\"tags\":{\"host\":\"web01\",\"id\":\"t1\"," + "\"dc\":\"lga\"}}]"}; char *sql1[1] = {0}; - for(int i = 0; i < 1; i++){ + for (int i = 0; i < 1; i++) { sql1[i] = taosMemoryCalloc(1, 1024); strncpy(sql1[i], sql[i], 1023); } @@ -222,15 +226,15 @@ int smlProcess_json3_Test() { pRes = taos_schemaless_insert(taos, (char **)sql1, sizeof(sql1) / sizeof(sql1[0]), TSDB_SML_JSON_PROTOCOL, TSDB_SML_TIMESTAMP_NANO_SECONDS); int code = taos_errno(pRes); - if(code != 0){ + if (code != 0) { printf("%s result:%s\n", __FUNCTION__, taos_errstr(pRes)); - }else{ + } else { printf("%s result:success\n", __FUNCTION__); } taos_free_result(pRes); taos_close(taos); - for(int i = 0; i < 1; i++){ + for (int i = 0; i < 1; i++) { taosMemoryFree(sql1[i]); } return code; @@ -298,7 +302,7 @@ int sml_16384_Test() { printf("%s result:%s\n", __FUNCTION__, taos_errstr(pRes)); int code = taos_errno(pRes); taos_free_result(pRes); - if(code) return code; + if (code) return code; const char *sql1[] = { "qelhxo,id=pnnqhsa,t0=t,t1=127i8 c0=f,c1=127i8,c11=L\"ncharColValue\",c10=t 1626006833631000000", @@ -740,11 +744,26 @@ int sml_dup_time_Test() { taos_free_result(pRes); const char *sql[] = {//"test_ms,t0=t c0=f 1626006833641", - "ubzlsr,id=qmtcvgd,t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,t7=\"binaryTagValue\",t8=L\"ncharTagValue\" c0=f,c1=1i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7=\"xcxvwjvf\",c8=L\"ncharColValue\",c9=7u64 1626006833639000000", - "ubzlsr,id=qmtcvgd,t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,t7=\"binaryTagValue\",t8=L\"ncharTagValue\" c0=T,c1=2i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7=\"fixrzcuq\",c8=L\"ncharColValue\",c9=7u64 1626006834639000000", - "ubzlsr,id=qmtcvgd,t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,t7=\"binaryTagValue\",t8=L\"ncharTagValue\" c0=t,c1=3i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7=\"iupzdqub\",c8=L\"ncharColValue\",c9=7u64 1626006835639000000", - "ubzlsr,id=qmtcvgd,t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,t7=\"binaryTagValue\",t8=L\"ncharTagValue\" c0=t,c1=4i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7=\"yvvtzzof\",c8=L\"ncharColValue\",c9=7u64 1626006836639000000", - "ubzlsr,id=qmtcvgd,t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,t7=\"binaryTagValue\",t8=L\"ncharTagValue\" c0=t,c1=5i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7=\"vbxpilkj\",c8=L\"ncharColValue\",c9=7u64 1626006837639000000"}; + "ubzlsr,id=qmtcvgd,t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11." + "12345f32,t6=22.123456789f64,t7=\"binaryTagValue\",t8=L\"ncharTagValue\" " + "c0=f,c1=1i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22." + "123456789f64,c7=\"xcxvwjvf\",c8=L\"ncharColValue\",c9=7u64 1626006833639000000", + "ubzlsr,id=qmtcvgd,t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11." + "12345f32,t6=22.123456789f64,t7=\"binaryTagValue\",t8=L\"ncharTagValue\" " + "c0=T,c1=2i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22." + "123456789f64,c7=\"fixrzcuq\",c8=L\"ncharColValue\",c9=7u64 1626006834639000000", + "ubzlsr,id=qmtcvgd,t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11." + "12345f32,t6=22.123456789f64,t7=\"binaryTagValue\",t8=L\"ncharTagValue\" " + "c0=t,c1=3i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22." + "123456789f64,c7=\"iupzdqub\",c8=L\"ncharColValue\",c9=7u64 1626006835639000000", + "ubzlsr,id=qmtcvgd,t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11." + "12345f32,t6=22.123456789f64,t7=\"binaryTagValue\",t8=L\"ncharTagValue\" " + "c0=t,c1=4i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22." + "123456789f64,c7=\"yvvtzzof\",c8=L\"ncharColValue\",c9=7u64 1626006836639000000", + "ubzlsr,id=qmtcvgd,t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11." + "12345f32,t6=22.123456789f64,t7=\"binaryTagValue\",t8=L\"ncharTagValue\" " + "c0=t,c1=5i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22." + "123456789f64,c7=\"vbxpilkj\",c8=L\"ncharColValue\",c9=7u64 1626006837639000000"}; pRes = taos_query(taos, "use sml_db"); taos_free_result(pRes); @@ -764,8 +783,10 @@ int sml_add_tag_col_Test() { taos_free_result(pRes); const char *sql[] = { - "macylr,t0=f,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,t7=\"binaryTagValue\",t8=L\"ncharTagValue\" c0=f,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7=\"binaryColValue\",c8=L\"ncharColValue\",c9=7u64 1626006833639000000" - }; + "macylr,t0=f,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64," + "t7=\"binaryTagValue\",t8=L\"ncharTagValue\" " + "c0=f,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7=" + "\"binaryColValue\",c8=L\"ncharColValue\",c9=7u64 1626006833639000000"}; pRes = taos_query(taos, "use sml_db"); taos_free_result(pRes); @@ -776,8 +797,10 @@ int sml_add_tag_col_Test() { if (code) return code; const char *sql1[] = { - "macylr,id=macylr_17875_1804,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,t7=\"binaryTagValue\",t8=L\"ncharTagValue\",t11=127i8,t10=L\"ncharTagValue\" c0=f,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c8=L\"ncharColValue\",c9=7u64,c11=L\"ncharColValue\",c10=f 1626006833639000000" - }; + "macylr,id=macylr_17875_1804,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=" + "22.123456789f64,t7=\"binaryTagValue\",t8=L\"ncharTagValue\",t11=127i8,t10=L\"ncharTagValue\" " + "c0=f,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c8=" + "L\"ncharColValue\",c9=7u64,c11=L\"ncharColValue\",c10=f 1626006833639000000"}; pRes = taos_schemaless_insert(taos, (char **)sql1, sizeof(sql1) / sizeof(sql1[0]), TSDB_SML_LINE_PROTOCOL, 0); printf("%s result:%s\n", __FUNCTION__, taos_errstr(pRes)); @@ -814,26 +837,26 @@ int smlProcess_18784_Test() { ASSERT(fieldNum == 5); printf("fieldNum:%d\n", fieldNum); TAOS_ROW row = NULL; - int32_t rowIndex = 0; - while((row = taos_fetch_row(pRes)) != NULL) { - int64_t ts = *(int64_t*)row[0]; - int64_t used = *(int64_t*)row[1]; - int64_t total = *(int64_t*)row[2]; - int64_t freed = *(int64_t*)row[3]; - if(rowIndex == 0){ + int32_t rowIndex = 0; + while ((row = taos_fetch_row(pRes)) != NULL) { + int64_t ts = *(int64_t *)row[0]; + int64_t used = *(int64_t *)row[1]; + int64_t total = *(int64_t *)row[2]; + int64_t freed = *(int64_t *)row[3]; + if (rowIndex == 0) { ASSERT(ts == 1661943960000); ASSERT(used == 176059); ASSERT(total == 1081101176832); ASSERT(freed == 66932805); -// ASSERT_EQ(latitude, 24.5208); -// ASSERT_EQ(longitude, 28.09377); -// ASSERT_EQ(elevation, 428); -// ASSERT_EQ(velocity, 0); -// ASSERT_EQ(heading, 304); -// ASSERT_EQ(grade, 0); -// ASSERT_EQ(fuel_consumption, 25); - }else{ -// ASSERT(0); + // ASSERT_EQ(latitude, 24.5208); + // ASSERT_EQ(longitude, 28.09377); + // ASSERT_EQ(elevation, 428); + // ASSERT_EQ(velocity, 0); + // ASSERT_EQ(heading, 304); + // ASSERT_EQ(grade, 0); + // ASSERT_EQ(fuel_consumption, 25); + } else { + // ASSERT(0); } rowIndex++; } @@ -850,17 +873,21 @@ int sml_19221_Test() { taos_free_result(pRes); const char *sql[] = { - "qelhxo,id=pnnqhsa,t0=t,t1=127i8 c11=L\"ncharColValue\",c0=t,c1=127i8 1626006833632000000\nqelhxo,id=pnnhsa,t0=t,t1=127i8 c11=L\"ncharColValue\",c0=t,c1=127i8 1626006833633000000\n#comment\nqelhxo,id=pnqhsa,t0=t,t1=127i8 c11=L\"ncharColValue\",c0=t,c1=127i8 1626006833634000000", + "qelhxo,id=pnnqhsa,t0=t,t1=127i8 c11=L\"ncharColValue\",c0=t,c1=127i8 " + "1626006833632000000\nqelhxo,id=pnnhsa,t0=t,t1=127i8 c11=L\"ncharColValue\",c0=t,c1=127i8 " + "1626006833633000000\n#comment\nqelhxo,id=pnqhsa,t0=t,t1=127i8 c11=L\"ncharColValue\",c0=t,c1=127i8 " + "1626006833634000000", }; pRes = taos_query(taos, "use sml_db"); taos_free_result(pRes); - char* tmp = (char*)taosMemoryCalloc(1024, 1); + char *tmp = (char *)taosMemoryCalloc(1024, 1); memcpy(tmp, sql[0], strlen(sql[0])); - *(char*)(tmp+44) = 0; + *(char *)(tmp + 44) = 0; int32_t totalRows = 0; - pRes = taos_schemaless_insert_raw(taos, tmp, strlen(sql[0]), &totalRows, TSDB_SML_LINE_PROTOCOL, TSDB_SML_TIMESTAMP_NANO_SECONDS); + pRes = taos_schemaless_insert_raw(taos, tmp, strlen(sql[0]), &totalRows, TSDB_SML_LINE_PROTOCOL, + TSDB_SML_TIMESTAMP_NANO_SECONDS); ASSERT(totalRows == 3); printf("%s result:%s\n", __FUNCTION__, taos_errstr(pRes)); @@ -875,20 +902,22 @@ int sml_19221_Test() { int sml_ts2164_Test() { TAOS *taos = taos_connect("localhost", "root", "taosdata", NULL, 0); - TAOS_RES *pRes = taos_query(taos, "CREATE DATABASE IF NOT EXISTS line_test BUFFER 384 MINROWS 1000 PAGES 256 PRECISION 'ns'"); + TAOS_RES *pRes = + taos_query(taos, "CREATE DATABASE IF NOT EXISTS line_test BUFFER 384 MINROWS 1000 PAGES 256 PRECISION 'ns'"); taos_free_result(pRes); const char *sql[] = { -// "meters,location=la,groupid=ca current=11.8,voltage=221,phase=0.27", + // "meters,location=la,groupid=ca current=11.8,voltage=221,phase=0.27", "meters,location=la,groupid=ca current=11.8,voltage=221", "meters,location=la,groupid=ca current=11.8,voltage=221,phase=0.27", -// "meters,location=la,groupid=cb current=11.8,voltage=221,phase=0.27", + // "meters,location=la,groupid=cb current=11.8,voltage=221,phase=0.27", }; pRes = taos_query(taos, "use line_test"); taos_free_result(pRes); - pRes = taos_schemaless_insert(taos, (char **)sql, sizeof(sql) / sizeof(sql[0]), TSDB_SML_LINE_PROTOCOL, TSDB_SML_TIMESTAMP_MILLI_SECONDS); + pRes = taos_schemaless_insert(taos, (char **)sql, sizeof(sql) / sizeof(sql[0]), TSDB_SML_LINE_PROTOCOL, + TSDB_SML_TIMESTAMP_MILLI_SECONDS); printf("%s result:%s\n", __FUNCTION__, taos_errstr(pRes)); int code = taos_errno(pRes); @@ -901,34 +930,42 @@ int sml_ts2164_Test() { int sml_ttl_Test() { TAOS *taos = taos_connect("localhost", "root", "taosdata", NULL, 0); - TAOS_RES *pRes = taos_query(taos, "create database if not exists sml_db schemaless 1"); + TAOS_RES *pRes = taos_query(taos, "drop database if exists sml_db"); + taos_free_result(pRes); + + pRes = taos_query(taos, "create database if not exists sml_db schemaless 1"); taos_free_result(pRes); const char *sql[] = { - "meters,location=California.LosAngeles,groupid=2 current=11.8,voltage=221,phase=\"2022-02-0210:22:22\" 1626006833739000000", + "meters,location=California.LosAngeles,groupid=2 current=11.8,voltage=221,phase=\"2022-02-0210:22:22\" " + "1626006833739000000", }; const char *sql1[] = { - "meters,location=California.LosAngeles,groupid=2 current=11.8,voltage=221,phase=\"2022-02-0210:22:22\" 1626006833339000000", + "meters,location=California.LosAngeles,groupid=2 current=11.8,voltage=221,phase=\"2022-02-0210:22:22\" " + "1626006833339000000", }; pRes = taos_query(taos, "use sml_db"); taos_free_result(pRes); - pRes = taos_schemaless_insert_ttl(taos, (char **)sql, sizeof(sql) / sizeof(sql[0]), TSDB_SML_LINE_PROTOCOL, TSDB_SML_TIMESTAMP_NANO_SECONDS, 20); + pRes = taos_schemaless_insert_ttl(taos, (char **)sql, sizeof(sql) / sizeof(sql[0]), TSDB_SML_LINE_PROTOCOL, + TSDB_SML_TIMESTAMP_NANO_SECONDS, 20); printf("%s result1:%s\n", __FUNCTION__, taos_errstr(pRes)); taos_free_result(pRes); - pRes = taos_schemaless_insert_ttl(taos, (char **)sql1, sizeof(sql1) / sizeof(sql1[0]), TSDB_SML_LINE_PROTOCOL, TSDB_SML_TIMESTAMP_NANO_SECONDS, 20); + pRes = taos_schemaless_insert_ttl(taos, (char **)sql1, sizeof(sql1) / sizeof(sql1[0]), TSDB_SML_LINE_PROTOCOL, + TSDB_SML_TIMESTAMP_NANO_SECONDS, 20); printf("%s result1:%s\n", __FUNCTION__, taos_errstr(pRes)); taos_free_result(pRes); - pRes = taos_query(taos, "select `ttl` from information_schema.ins_tables where table_name='t_be97833a0e1f523fcdaeb6291d6fdf27'"); + pRes = taos_query( + taos, "select `ttl` from information_schema.ins_tables where table_name='t_be97833a0e1f523fcdaeb6291d6fdf27'"); printf("%s result2:%s\n", __FUNCTION__, taos_errstr(pRes)); TAOS_ROW row = taos_fetch_row(pRes); - if(row != NULL && *row != NULL){ - int32_t ttl = *(int32_t*)row[0]; + if (row != NULL && *row != NULL) { + int32_t ttl = *(int32_t *)row[0]; ASSERT(ttl == 20); } @@ -939,47 +976,132 @@ int sml_ttl_Test() { return code; } -//char *str[] ={ -// "", -// "f64", -// "F64", -// "f32", -// "F32", -// "i", -// "I", -// "i64", -// "I64", -// "u", -// "U", -// "u64", -// "U64", -// "i32", -// "I32", -// "u32", -// "U32", -// "i16", -// "I16", -// "u16", -// "U16", -// "i8", -// "I8", -// "u8", -// "U8", -//}; -//uint8_t smlCalTypeSum(char* endptr, int32_t left){ -// uint8_t sum = 0; -// for(int i = 0; i < left; i++){ -// sum += endptr[i]; -// } -// return sum; -//} +// char *str[] ={ +// "", +// "f64", +// "F64", +// "f32", +// "F32", +// "i", +// "I", +// "i64", +// "I64", +// "u", +// "U", +// "u64", +// "U64", +// "i32", +// "I32", +// "u32", +// "U32", +// "i16", +// "I16", +// "u16", +// "U16", +// "i8", +// "I8", +// "u8", +// "U8", +// }; +// uint8_t smlCalTypeSum(char* endptr, int32_t left){ +// uint8_t sum = 0; +// for(int i = 0; i < left; i++){ +// sum += endptr[i]; +// } +// return sum; +// } + +int sml_ts2385_Test() { + TAOS *taos = taos_connect("localhost", "root", "taosdata", NULL, 0); + + TAOS_RES *pRes = taos_query(taos, "CREATE DATABASE IF NOT EXISTS ts2385"); + taos_free_result(pRes); + + const char *sql[] = { + "DataRTU,deviceId=2211230C94K0_1,dataModelName=DataRTU_2211230C94K0_1 " + "s5=false,s18=false,k14=0,k2=0,k8=0,k10=0,s9=false,s19=false,k11=0,k13=0,s22=false,k15=0,m2=37.416671660000006," + "m8=600,m10=1532,m1=20.25,m13=0,s7=false,k7=0,m16=0,s17=false,k4=0,s11=false,s15=true,m7=600,m12=1490,s1=true," + "m14=0,s14=false,s16=true,k5=0,hex=" + "\"7b3b00000001030301030200000000323231313233304339344b30002b01012a10028003000000070d05da025802580258025802580258" + "045305fc05f505d200000000000000000afc7d\",k6=0,m3=600,s3=false,s24=false,k3=0,m6=600,m15=0,s12=false,k1=0,k16=0," + "s10=false,s21=false,k12=0,m5=600,s8=false,m4=600,m9=1107,s2=false,s13=false,s20=false,s23=false,k9=0,m11=1525," + "s4=false,s6=false 1672818929178749400", + "DataRTU,deviceId=2211230C94K0_1,dataModelName=DataRTU_2211230C94K0_1 " + "k2=0,k11=0,m3=600,m12=1506,s17=false,m5=600,s11=false,s22=false,k6=0,m13=0,s16=true,k5=0,s21=false,m4=600,m7=" + "600,s9=false,s10=false,s18=false,k7=0,m8=600,k1=0,hex=" + "\"7b3a00000001030301030200000000323231313233304339344b30002b01012a10028003000000071105e8025802580258025802580258" + "044905eb05ef05e200000000000000000afc7d\",m11=1519,m16=0,s19=false,s23=false,s24=false,s14=false,s6=false,k10=0," + "k15=0,k14=0,s2=false,s4=false,s8=false,s13=false,s15=true,s20=false,m2=38.000005040000005,s3=false,s7=false,k3=" + "0,k8=0,k13=0,m6=600,m14=0,m15=0,k4=0,m1=20.450000000000003,m9=1097,s1=true,m10=1515,s5=false,s12=false,k9=0,k12=" + "0,k16=0 1672818919126971000", + "DataRTU,deviceId=2211230C94K0_1,dataModelName=DataRTU_2211230C94K0_1 " + "k7=0,k14=0,m3=600,m7=600,s5=false,k2=0,k3=0,k8=0,s3=false,s20=false,k15=0,m10=1482,s17=false,k1=0,k16=0,m15=0," + "s12=false,k9=0,m16=0,s11=false,m4=600,s10=false,s15=true,s24=false,m8=600,m13=0,s2=false,s18=false,k12=0,s14=" + "false,s19=false,hex=" + "\"7b3900000001030301030200000000323231313233304339344b30002b01012a10028003000000071505ef025802580258025802580258" + "045005ca05b105d800000000000000000aa47d\",s1=true,s4=false,s7=false,s8=false,s13=false,m6=600,s6=false,s21=false," + "k11=0,m12=1496,m9=1104,s16=true,k5=0,s9=false,k10=0,k13=0,m2=38.291671730000004,s22=false,m5=600,m11=1457,m14=0," + "k4=0,m1=20.650000000000006,s23=false,k6=0 1672818909130866800", + "DataRTU,deviceId=2211230C94K0_1,dataModelName=DataRTU_2211230C94K0_1 " + "m7=600,k4=0,k14=0,s22=false,k13=0,s2=false,m11=1510,m14=0,s4=false,s10=false,m1=21,m16=0,m13=0,s9=false,s13=" + "false,s14=false,k10=0,m3=600,m9=1107,s18=false,s19=false,k2=0,hex=" + "\"7b3600000001030301030200000000323231313233304339344b30002b01012a10028003000000071c0619025802580258025802580258" + "045305dc05e6058d00000000000000000ad27d\",m2=40.04167187,m8=600,k7=0,k8=0,m10=1500,s23=false,k5=0,s11=false,s21=" + "false,k9=0,m15=0,m12=1421,s1=true,s5=false,s8=false,m5=600,k16=0,k15=0,m6=600,s3=false,s6=false,s7=false,s15=" + "true,s20=false,s24=false,k11=0,k1=0,k6=0,k12=0,m4=600,s16=true,s17=false,k3=0,s12=false 1672818879189483200", + "DataRTU,deviceId=2106070C11M0_2,dataModelName=DataRTU_2106070C11M0_2 " + "m1=5691,k14=0,m6=0,s14=false,k8=0,s19=false,s20=false,k12=0,s17=false,k3=0,m8=0,s8=false,m7=0,s9=false,s4=false," + "s11=false,s13=false,s16=false,k5=0,k15=0,k16=0,s10=false,s23=false,s1=false,s2=false,s3=false,s12=false,s24=" + "false,k2=0,k10=0,hex=" + "\"7b1400000001030301030200000000323130363037304331314d30002b01022a080400000000000008af0c000000000000000000000000" + "000000000000000000000000000000000ad47d\",m2=0,s7=false,s18=false,s21=false,m3=0,m5=0,k4=0,k11=0,m4=0,k1=0,k6=0," + "k13=0,s6=false,s15=false,s5=false,s22=false,k7=0,k9=0 1672818779549848800"}; + pRes = taos_query(taos, "use ts2385"); + taos_free_result(pRes); + + pRes = taos_schemaless_insert(taos, (char **)sql, sizeof(sql) / sizeof(sql[0]), TSDB_SML_LINE_PROTOCOL, + TSDB_SML_TIMESTAMP_NANO_SECONDS); + + printf("%s result:%s\n", __FUNCTION__, taos_errstr(pRes)); + int code = taos_errno(pRes); + ASSERT(!code); + taos_free_result(pRes); + + pRes = taos_query(taos, "select distinct tbname from `DataRTU` order by tbname"); + printf("%s result2:%s\n", __FUNCTION__, taos_errstr(pRes)); + int num = 0; + TAOS_ROW row = NULL; + while ((row = taos_fetch_row(pRes))) { + if (row[0] != NULL && num == 0) { + ASSERT(strncmp((char *)row[0], "DataRTU_2106070C11M0_2", sizeof("DataRTU_2106070C11M0_2") - 1) == 0); + } + + if (row[0] != NULL && num == 1) { + ASSERT(strncmp((char *)row[0], "DataRTU_2211230C94K0_1", sizeof("DataRTU_2211230C94K0_1") - 1) == 0); + } + num++; + } + ASSERT(num == 2); + + code = taos_errno(pRes); + taos_free_result(pRes); + taos_close(taos); + + return code; +} int main(int argc, char *argv[]) { + if (argc == 2) { + taos_options(TSDB_OPTION_CONFIGDIR, argv[1]); + } -// for(int i = 0; i < sizeof(str)/sizeof(str[0]); i++){ -// printf("str:%s \t %d\n", str[i], smlCalTypeSum(str[i], strlen(str[i]))); -// } int ret = 0; + ret = sml_ts2385_Test(); // this test case need config sml table name using ./sml_test config_file + ASSERT(!ret); + // for(int i = 0; i < sizeof(str)/sizeof(str[0]); i++){ + // printf("str:%s \t %d\n", str[i], smlCalTypeSum(str[i], strlen(str[i]))); + // } + // int ret = 0; ret = sml_ttl_Test(); ASSERT(!ret); ret = sml_ts2164_Test();