diff --git a/cmake/cmake.version b/cmake/cmake.version
index eef860e26742943462ac07c810d57b22aa6ea5db..a87049fb8acf9a31ef865f5f9086243c928b34d2 100644
--- a/cmake/cmake.version
+++ b/cmake/cmake.version
@@ -2,7 +2,7 @@
IF (DEFINED VERNUMBER)
SET(TD_VER_NUMBER ${VERNUMBER})
ELSE ()
- SET(TD_VER_NUMBER "3.0.6.1.alpha")
+ SET(TD_VER_NUMBER "3.0.7.1.alpha")
ENDIF ()
IF (DEFINED VERCOMPATIBLE)
diff --git a/docs/en/05-get-started/03-package.md b/docs/en/05-get-started/03-package.md
index 5a54c32a5137cdfdf25b6b6eca25a265c72c9242..91bf94034c75aa4b7a2b1e911483292a8272d7c3 100644
--- a/docs/en/05-get-started/03-package.md
+++ b/docs/en/05-get-started/03-package.md
@@ -201,7 +201,7 @@ You can use the TDengine CLI to monitor your TDengine deployment and execute ad
-After the installation is complete, please run `sc start taosd` or run `C:\TDengine\taosd.exe` with administrator privilege to start TDengine Server.
+After the installation is complete, please run `sc start taosd` or run `C:\TDengine\taosd.exe` with administrator privilege to start TDengine Server. Please run `sc start taosadapter` or run `C:\TDengine\taosadapter.exe` with administrator privilege to start taosAdapter to provide http/REST service.
## Command Line Interface (CLI)
diff --git a/docs/en/12-taos-sql/19-limit.md b/docs/en/12-taos-sql/19-limit.md
index 22ad2055e4633fcf63baeb4470d0f2aafddac5b3..23bb8ce91705ddbb0a7e1d3580072d467efa95ad 100644
--- a/docs/en/12-taos-sql/19-limit.md
+++ b/docs/en/12-taos-sql/19-limit.md
@@ -36,7 +36,7 @@ The following characters cannot occur in a password: single quotation marks ('),
- Maximum numbers of databases, STables, tables are dependent only on the system resources.
- The number of replicas can only be 1 or 3.
- The maximum length of a username is 23 bytes.
-- The maximum length of a password is 128 bytes.
+- The maximum length of a password is 31 bytes.
- The maximum number of rows depends on system resources.
- The maximum number of vnodes in a database is 1024.
diff --git a/docs/en/12-taos-sql/25-grant.md b/docs/en/12-taos-sql/25-grant.md
index 8e3e7c869e9db38e060f8ed2e942aaf123fb3415..c214e11876c77f4c617e1103b14ee46e6d211ca9 100644
--- a/docs/en/12-taos-sql/25-grant.md
+++ b/docs/en/12-taos-sql/25-grant.md
@@ -16,7 +16,7 @@ This statement creates a user account.
The maximum length of user_name is 23 bytes.
-The maximum length of password is 32 bytes. The password can include leters, digits, and special characters excluding single quotation marks, double quotation marks, backticks, backslashes, and spaces. The password cannot be empty.
+The maximum length of password is 31 bytes. The password can include leters, digits, and special characters excluding single quotation marks, double quotation marks, backticks, backslashes, and spaces. The password cannot be empty.
`SYSINFO` indicates whether the user is allowed to view system information. `1` means allowed, `0` means not allowed. System information includes server configuration, dnode, vnode, storage. The default value is `1`.
diff --git a/docs/en/12-taos-sql/27-index.md b/docs/en/12-taos-sql/27-index.md
index 7586e4af76983e785b2c6b3f03870a8bcd6df4a3..e3eb69bdb39386cb07b0992a240f318bb9ea2551 100644
--- a/docs/en/12-taos-sql/27-index.md
+++ b/docs/en/12-taos-sql/27-index.md
@@ -28,6 +28,24 @@ Performs pre-aggregation on the specified column over the time window defined by
- WATERMARK: Enter a value between 0ms and 900000ms. The most precise unit supported is milliseconds. The default value is 5 seconds. This option can be used only on supertables.
- MAX_DELAY: Enter a value between 1ms and 900000ms. The most precise unit supported is milliseconds. The default value is the value of interval provided that it does not exceed 900000ms. This option can be used only on supertables. Note: Retain the default value if possible. Configuring a small MAX_DELAY may cause results to be frequently pushed, affecting storage and query performance.
+```sql
+DROP DATABASE IF EXISTS d0;
+CREATE DATABASE d0;
+USE d0;
+CREATE TABLE IF NOT EXISTS st1 (ts timestamp, c1 int, c2 float, c3 double) TAGS (t1 int unsigned);
+CREATE TABLE ct1 USING st1 TAGS(1000);
+CREATE TABLE ct2 USING st1 TAGS(2000);
+INSERT INTO ct1 VALUES(now+0s, 10, 2.0, 3.0);
+INSERT INTO ct1 VALUES(now+1s, 11, 2.1, 3.1)(now+2s, 12, 2.2, 3.2)(now+3s, 13, 2.3, 3.3);
+CREATE SMA INDEX sma_index_name1 ON st1 FUNCTION(max(c1),max(c2),min(c1)) INTERVAL(5m,10s) SLIDING(5m) WATERMARK 5s MAX_DELAY 1m;
+-- query from SMA Index
+ALTER LOCAL 'querySmaOptimize' '1';
+SELECT max(c2),min(c1) FROM st1 INTERVAL(5m,10s) SLIDING(5m);
+SELECT _wstart,_wend,_wduration,max(c2),min(c1) FROM st1 INTERVAL(5m,10s) SLIDING(5m);
+-- query from raw data
+ALTER LOCAL 'querySmaOptimize' '0';
+```
+
### FULLTEXT Indexing
Creates a text index for the specified column. FULLTEXT indexing improves performance for queries with text filtering. The index_option syntax is not supported for FULLTEXT indexing. FULLTEXT indexing is supported for JSON tag columns only. Multiple columns cannot be indexed together. However, separate indices can be created for each column.
@@ -41,7 +59,6 @@ DROP INDEX index_name;
## View Indices
````sql
-```sql
SHOW INDEXES FROM tbl_name [FROM db_name];
````
diff --git a/docs/en/14-reference/03-connector/04-java.mdx b/docs/en/14-reference/03-connector/04-java.mdx
index b68aeda94cc20986abe961bd7fd33ddb329773e9..69bbd287edbbeae608cee4ec1f1070f10d96532b 100644
--- a/docs/en/14-reference/03-connector/04-java.mdx
+++ b/docs/en/14-reference/03-connector/04-java.mdx
@@ -36,8 +36,8 @@ REST connection supports all platforms that can run Java.
| taos-jdbcdriver version | major changes | TDengine version |
| :---------------------: | :------------------------------------------------------------------------------------------------------------------------------------------------: | :--------------: |
-| 3.2.4 | Subscription add the enable.auto.commit parameter and the unsubscribe() method in the WebSocket connection | 3.0.5.0 or later |
-| 3.2.3 | Fixed resultSet data parsing failure in some cases | 3.0.5.0 or later |
+| 3.2.4 | Subscription add the enable.auto.commit parameter and the unsubscribe() method in the WebSocket connection | - |
+| 3.2.3 | Fixed resultSet data parsing failure in some cases | - |
| 3.2.2 | Subscription add seek function | 3.0.5.0 or later |
| 3.2.1 | JDBC REST connection supports schemaless/prepareStatement over WebSocket | 3.0.3.0 or later |
| 3.2.0 | This version has been deprecated | - |
@@ -1019,11 +1019,13 @@ while(true) {
#### Assignment subscription Offset
```java
+// get offset
long position(TopicPartition partition) throws SQLException;
Map position(String topic) throws SQLException;
Map beginningOffsets(String topic) throws SQLException;
Map endOffsets(String topic) throws SQLException;
+// Overrides the fetch offsets that the consumer will use on the next poll(timeout).
void seek(TopicPartition partition, long offset) throws SQLException;
```
diff --git a/docs/en/14-reference/03-connector/07-python.mdx b/docs/en/14-reference/03-connector/07-python.mdx
index 2a6cd9ecf77febdcc56528f34112944dc25f0aec..f0a59842fecbe783fb2353f62e0ecb2bc59e2d6d 100644
--- a/docs/en/14-reference/03-connector/07-python.mdx
+++ b/docs/en/14-reference/03-connector/07-python.mdx
@@ -87,9 +87,9 @@ TDengine currently supports timestamp, number, character, Boolean type, and the
|NCHAR|str|
|JSON|str|
-## Installation
+## Installation Steps
-### Preparation
+### Pre-installation preparation
1. Install Python. The recent taospy package requires Python 3.6.2+. The earlier versions of taospy require Python 3.7+. The taos-ws-py package requires Python 3.7+. If Python is not available on your system, refer to the [Python BeginnersGuide](https://wiki.python.org/moin/BeginnersGuide/Download) to install it.
2. Install [pip](https://pypi.org/project/pip/). In most cases, the Python installer comes with the pip utility. If not, please refer to [pip documentation](https://pip.pypa.io/en/stable/installation/) to install it.
@@ -275,7 +275,7 @@ Transfer-Encoding: chunked
-### Using connectors to establish connections
+### Specify the Host and Properties to get the connection
The following example code assumes that TDengine is installed locally and that the default configuration is used for both FQDN and serverPort.
@@ -331,7 +331,69 @@ The parameter of `connect()` is the url of TDengine, and the protocol is `taosws
-## Example program
+### Priority of configuration parameters
+
+If the configuration parameters are duplicated in the parameters or client configuration file, the priority of the parameters, from highest to lowest, are as follows:
+
+1. Parameters in `connect` function.
+2. the configuration file taos.cfg of the TDengine client driver when using a native connection.
+
+## Usage examples
+
+### Create database and tables
+
+
+
+
+```python
+conn = taos.connect()
+# Execute a sql, ignore the result set, just get affected rows. It's useful for DDL and DML statement.
+conn.execute("DROP DATABASE IF EXISTS test")
+conn.execute("CREATE DATABASE test")
+# change database. same as execute "USE db"
+conn.select_db("test")
+conn.execute("CREATE STABLE weather(ts TIMESTAMP, temperature FLOAT) TAGS (location INT)")
+```
+
+
+
+
+
+```python
+conn = taosrest.connect(url="http://localhost:6041")
+# Execute a sql, ignore the result set, just get affected rows. It's useful for DDL and DML statement.
+conn.execute("DROP DATABASE IF EXISTS test")
+conn.execute("CREATE DATABASE test")
+conn.execute("USE test")
+conn.execute("CREATE STABLE weather(ts TIMESTAMP, temperature FLOAT) TAGS (location INT)")
+```
+
+
+
+
+
+```python
+conn = taosws.connect(url="ws://localhost:6041")
+# Execute a sql, ignore the result set, just get affected rows. It's useful for DDL and DML statement.
+conn.execute("DROP DATABASE IF EXISTS test")
+conn.execute("CREATE DATABASE test")
+conn.execute("USE test")
+conn.execute("CREATE STABLE weather(ts TIMESTAMP, temperature FLOAT) TAGS (location INT)")
+```
+
+
+
+
+### Insert data
+
+```python
+conn.execute("INSERT INTO t1 USING weather TAGS(1) VALUES (now, 23.5) (now+1m, 23.5) (now+2m, 24.4)")
+```
+
+:::
+now is an internal function. The default is the current time of the client's computer. now + 1s represents the current time of the client plus 1 second, followed by the number representing the unit of time: a (milliseconds), s (seconds), m (minutes), h (hours), d (days), w (weeks), n (months), y (years).
+:::
+
### Basic Usage
@@ -453,7 +515,7 @@ The `query` method of the `TaosConnection` class can be used to query data and r
-### Usage with req_id
+### Execute SQL with reqId
By using the optional req_id parameter, you can specify a request ID that can be used for tracing.
@@ -553,171 +615,138 @@ As the way to connect introduced above but add `req_id` argument.
-### Subscription
+### Writing data via parameter binding
-Connector support data subscription. For more information about subscroption, please refer to [Data Subscription](../../../develop/tmq/).
+The Python connector provides a parameter binding api for inserting data. Similar to most databases, TDengine currently only supports the question mark `?` to indicate the parameters to be bound.
-
+
-The `consumer` in the connector contains the subscription api.
-
-##### Create Consumer
-
-The syntax for creating a consumer is `consumer = Consumer(configs)`. For more subscription api parameters, please refer to [Data Subscription](../../../develop/tmq/).
+##### Create Stmt
-```python
-from taos.tmq import Consumer
+Call the `statement` method in `Connection` to create the `stmt` for parameter binding.
-consumer = Consumer({"group.id": "local", "td.connect.ip": "127.0.0.1"})
```
+import taos
-##### Subscribe topics
-
-The `subscribe` function is used to subscribe to a list of topics.
-
-```python
-consumer.subscribe(['topic1', 'topic2'])
+conn = taos.connect()
+stmt = conn.statement("insert into log values(?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?)")
```
-##### Consume
-
-The `poll` function is used to consume data in tmq. The parameter of the `poll` function is a value of type float representing the timeout in seconds. It returns a `Message` before timing out, or `None` on timing out. You have to handle error messages in response data.
+##### parameter binding
-```python
-while True:
- res = consumer.poll(1)
- if not res:
- continue
- err = res.error()
- if err is not None:
- raise err
- val = res.value()
+Call the `new_multi_binds` function to create the parameter list for parameter bindings.
- for block in val:
- print(block.fetchall())
```
-
-##### assignment
-
-The `assignment` function is used to get the assignment of the topic.
-
-```python
-assignments = consumer.assignment()
+params = new_multi_binds(16)
+params[0].timestamp((1626861392589, 1626861392590, 1626861392591))
+params[1].bool((True, None, False))
+params[2].tinyint([-128, -128, None]) # -128 is tinyint null
+params[3].tinyint([0, 127, None])
+params[4].smallint([3, None, 2])
+params[5].int([3, 4, None])
+params[6].bigint([3, 4, None])
+params[7].tinyint_unsigned([3, 4, None])
+params[8].smallint_unsigned([3, 4, None])
+params[9].int_unsigned([3, 4, None])
+params[10].bigint_unsigned([3, 4, None])
+params[11].float([3, None, 1])
+params[12].double([3, None, 1.2])
+params[13].binary(["abc", "dddafadfadfadfadfa", None])
+params[14].nchar(["涛思数据", None, "a long string with 中文字符"])
+params[15].timestamp([None, None, 1626861392591])
```
-##### Seek
-
-The `seek` function is used to reset the assignment of the topic.
+Call the `bind_param` (for a single row) method or the `bind_param_batch` (for multiple rows) method to set the values.
-```python
-tp = TopicPartition(topic='topic1', partition=0, offset=0)
-consumer.seek(tp)
+```
+stmt.bind_param_batch(params)
```
-##### After consuming data
+##### execute sql
-You should unsubscribe to the topics and close the consumer after consuming.
+Call `execute` method to execute sql.
-```python
-consumer.unsubscribe()
-consumer.close()
+```
+stmt.execute()
```
-##### Tmq subscription example
+##### Close Stmt
-```python
-{{#include docs/examples/python/tmq_example.py}}
+```
+stmt.close()
```
-##### assignment and seek example
+##### Example
```python
-{{#include docs/examples/python/tmq_assignment_example.py:taos_get_assignment_and_seek_demo}}
+{{#include docs/examples/python/stmt_example.py}}
```
-
-In addition to native connections, the connector also supports subscriptions via websockets.
-
-##### Create Consumer
+##### Create Stmt
-The syntax for creating a consumer is "consumer = consumer = Consumer(conf=configs)". You need to specify that the `td.connect.websocket.scheme` parameter is set to "ws" in the configuration. For more subscription api parameters, please refer to [Data Subscription](../../../develop/tmq/#create-a-consumer).
+Call the `statement` method in `Connection` to create the `stmt` for parameter binding.
-```python
+```
import taosws
-consumer = taosws.(conf={"group.id": "local", "td.connect.websocket.scheme": "ws"})
+conn = taosws.connect('taosws://localhost:6041/test')
+stmt = conn.statement()
```
-##### subscribe topics
+##### Prepare sql
-The `subscribe` function is used to subscribe to a list of topics.
+Call `prepare` method in stmt to prepare sql.
-```python
-consumer.subscribe(['topic1', 'topic2'])
```
-
-##### Consume
-
-The `poll` function is used to consume data in tmq. The parameter of the `poll` function is a value of type float representing the timeout in seconds. It returns a `Message` before timing out, or `None` on timing out. You have to handle error messages in response data.
-
-```python
-while True:
- res = consumer.poll(timeout=1.0)
- if not res:
- continue
- err = res.error()
- if err is not None:
- raise err
- for block in message:
- for row in block:
- print(row)
+stmt.prepare("insert into t1 values (?, ?, ?, ?)")
```
-##### assignment
+##### parameter binding
-The `assignment` function is used to get the assignment of the topic.
+Call the `bind_param` method to bind parameters.
-```python
-assignments = consumer.assignment()
+```
+stmt.bind_param([
+ taosws.millis_timestamps_to_column([1686844800000, 1686844801000, 1686844802000, 1686844803000]),
+ taosws.ints_to_column([1, 2, 3, 4]),
+ taosws.floats_to_column([1.1, 2.2, 3.3, 4.4]),
+ taosws.varchar_to_column(['a', 'b', 'c', 'd']),
+])
```
-##### Seek
-
-The `seek` function is used to reset the assignment of the topic.
+Call the `add_batch` method to add parameters to the batch.
-```python
-consumer.seek(topic='topic1', partition=0, offset=0)
+```
+stmt.add_batch()
```
-##### After consuming data
+##### execute sql
-You should unsubscribe to the topics and close the consumer after consuming.
+Call `execute` method to execute sql.
-```python
-consumer.unsubscribe()
-consumer.close()
+```
+stmt.execute()
```
-##### Subscription example
+##### Close Stmt
-```python
-{{#include docs/examples/python/tmq_websocket_example.py}}
+```
+stmt.close()
```
-##### Assignment and seek example
+##### Example
```python
-{{#include docs/examples/python/tmq_websocket_assgnment_example.py:taosws_get_assignment_and_seek_demo}}
+{{#include docs/examples/python/stmt_websocket_example.py}}
```
-
-### Schemaless Insert
+### Schemaless Writing
Connector support schemaless insert.
@@ -767,134 +796,211 @@ Connector support schemaless insert.
-### Parameter Binding
+### Schemaless with reqId
-The Python connector provides a parameter binding api for inserting data. Similar to most databases, TDengine currently only supports the question mark `?` to indicate the parameters to be bound.
+There is a optional parameter called `req_id` in `schemaless_insert` and `schemaless_insert_raw` method. This reqId can be used to request link tracing.
+
+```python
+{{#include docs/examples/python/schemaless_insert_req_id.py}}
+```
+
+```python
+{{#include docs/examples/python/schemaless_insert_raw_req_id.py}}
+```
+
+### Data Subscription
+
+Connector support data subscription. For more information about subscroption, please refer to [Data Subscription](../../../develop/tmq/).
+
+#### Create a Topic
+
+To create topic, please refer to [Data Subscription](../../../develop/tmq/#create-a-topic).
+
+#### Create a Consumer
+
+
-
-##### Create Stmt
+The consumer in the connector contains the subscription api. The syntax for creating a consumer is consumer = Consumer(configs). For more subscription api parameters, please refer to [Data Subscription](../../../develop/tmq/#create-a-consumer).
-Call the `statement` method in `Connection` to create the `stmt` for parameter binding.
+```python
+from taos.tmq import Consumer
+consumer = Consumer({"group.id": "local", "td.connect.ip": "127.0.0.1"})
```
-import taos
+
-conn = taos.connect()
-stmt = conn.statement("insert into log values(?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?)")
-```
+
-##### parameter binding
+In addition to native connections, the connector also supports subscriptions via websockets.
-Call the `new_multi_binds` function to create the parameter list for parameter bindings.
+The syntax for creating a consumer is "consumer = consumer = Consumer(conf=configs)". You need to specify that the `td.connect.websocket.scheme` parameter is set to "ws" in the configuration. For more subscription api parameters, please refer to [Data Subscription](../../../develop/tmq/#create-a-consumer).
-```
-params = new_multi_binds(16)
-params[0].timestamp((1626861392589, 1626861392590, 1626861392591))
-params[1].bool((True, None, False))
-params[2].tinyint([-128, -128, None]) # -128 is tinyint null
-params[3].tinyint([0, 127, None])
-params[4].smallint([3, None, 2])
-params[5].int([3, 4, None])
-params[6].bigint([3, 4, None])
-params[7].tinyint_unsigned([3, 4, None])
-params[8].smallint_unsigned([3, 4, None])
-params[9].int_unsigned([3, 4, None])
-params[10].bigint_unsigned([3, 4, None])
-params[11].float([3, None, 1])
-params[12].double([3, None, 1.2])
-params[13].binary(["abc", "dddafadfadfadfadfa", None])
-params[14].nchar(["涛思数据", None, "a long string with 中文字符"])
-params[15].timestamp([None, None, 1626861392591])
+```python
+import taosws
+
+consumer = taosws.(conf={"group.id": "local", "td.connect.websocket.scheme": "ws"})
```
-Call the `bind_param` (for a single row) method or the `bind_param_batch` (for multiple rows) method to set the values.
+
+
-```
-stmt.bind_param_batch(params)
-```
+#### Subscribe to a Topic
-##### execute sql
+
-Call `execute` method to execute sql.
+
-```
-stmt.execute()
+The `subscribe` function is used to subscribe to a list of topics.
+
+```python
+consumer.subscribe(['topic1', 'topic2'])
```
-##### Close Stmt
+
+
+
+The `subscribe` function is used to subscribe to a list of topics.
+```python
+consumer.subscribe(['topic1', 'topic2'])
```
-stmt.close()
+
+
+
+
+#### Consume messages
+
+
+
+
+
+The `poll` function is used to consume data in tmq. The parameter of the `poll` function is a value of type float representing the timeout in seconds. It returns a `Message` before timing out, or `None` on timing out. You have to handle error messages in response data.
+
+```python
+while True:
+ res = consumer.poll(1)
+ if not res:
+ continue
+ err = res.error()
+ if err is not None:
+ raise err
+ val = res.value()
+
+ for block in val:
+ print(block.fetchall())
```
-##### Example
+
+
+
+The `poll` function is used to consume data in tmq. The parameter of the `poll` function is a value of type float representing the timeout in seconds. It returns a `Message` before timing out, or `None` on timing out. You have to handle error messages in response data.
```python
-{{#include docs/examples/python/stmt_example.py}}
+while True:
+ res = consumer.poll(timeout=1.0)
+ if not res:
+ continue
+ err = res.error()
+ if err is not None:
+ raise err
+ for block in message:
+ for row in block:
+ print(row)
```
+
+
-
+#### Assignment subscription Offset
-##### Create Stmt
+
-Call the `statement` method in `Connection` to create the `stmt` for parameter binding.
+
+The `assignment` function is used to get the assignment of the topic.
+
+```python
+assignments = consumer.assignment()
```
-import taosws
-conn = taosws.connect('taosws://localhost:6041/test')
-stmt = conn.statement()
+The `seek` function is used to reset the assignment of the topic.
+
+```python
+tp = TopicPartition(topic='topic1', partition=0, offset=0)
+consumer.seek(tp)
```
-##### Prepare sql
+
+
-Call `prepare` method in stmt to prepare sql.
+The `assignment` function is used to get the assignment of the topic.
+```python
+assignments = consumer.assignment()
```
-stmt.prepare("insert into t1 values (?, ?, ?, ?)")
+
+The `seek` function is used to reset the assignment of the topic.
+
+```python
+consumer.seek(topic='topic1', partition=0, offset=0)
```
-##### parameter binding
+
+
-Call the `bind_param` method to bind parameters.
+#### Close subscriptions
-```
-stmt.bind_param([
- taosws.millis_timestamps_to_column([1686844800000, 1686844801000, 1686844802000, 1686844803000]),
- taosws.ints_to_column([1, 2, 3, 4]),
- taosws.floats_to_column([1.1, 2.2, 3.3, 4.4]),
- taosws.varchar_to_column(['a', 'b', 'c', 'd']),
-])
-```
+
-Call the `add_batch` method to add parameters to the batch.
+
-```
-stmt.add_batch()
+You should unsubscribe to the topics and close the consumer after consuming.
+
+```python
+consumer.unsubscribe()
+consumer.close()
```
-##### execute sql
+
+
-Call `execute` method to execute sql.
+You should unsubscribe to the topics and close the consumer after consuming.
-```
-stmt.execute()
+```python
+consumer.unsubscribe()
+consumer.close()
```
-##### Close Stmt
+
+
+
+#### Full Sample Code
+
+
+
+
+```python
+{{#include docs/examples/python/tmq_example.py}}
```
-stmt.close()
+
+```python
+{{#include docs/examples/python/tmq_assignment_example.py:taos_get_assignment_and_seek_demo}}
```
-##### Example
+
+
```python
-{{#include docs/examples/python/stmt_websocket_example.py}}
+{{#include docs/examples/python/tmq_websocket_example.py}}
+```
+
+```python
+{{#include docs/examples/python/tmq_websocket_assgnment_example.py:taosws_get_assignment_and_seek_demo}}
```
+
diff --git a/docs/en/14-reference/05-taosbenchmark.md b/docs/en/14-reference/05-taosbenchmark.md
index 8fc20c149f5f5dbaf56ed7fc7d065d42b8af81c5..2348810d9e20c85a22d4e4f29d949c8598fe024e 100644
--- a/docs/en/14-reference/05-taosbenchmark.md
+++ b/docs/en/14-reference/05-taosbenchmark.md
@@ -470,3 +470,26 @@ The configuration parameters for subscribing to a super table are set in `super_
- **sql**: The SQL command to be executed. For the query SQL of super table, keep "xxxx" in the SQL command. The program will automatically replace it with all the sub-table names of the super table.
Replace it with all the sub-table names in the super table.
- **result**: The file to save the query result. If not specified, taosBenchmark will not save result.
+
+#### data type on taosBenchmark
+
+| # | **TDengine** | **taosBenchmark**
+| --- | :----------------: | :---------------:
+| 1 | TIMESTAMP | timestamp
+| 2 | INT | int
+| 3 | INT UNSIGNED | uint
+| 4 | BIGINT | bigint
+| 5 | BIGINT UNSIGNED | ubigint
+| 6 | FLOAT | float
+| 7 | DOUBLE | double
+| 8 | BINARY | binary
+| 9 | SMALLINT | smallint
+| 10 | SMALLINT UNSIGNED | usmallint
+| 11 | TINYINT | tinyint
+| 12 | TINYINT UNSIGNED | utinyint
+| 13 | BOOL | bool
+| 14 | NCHAR | nchar
+| 15 | VARCHAR | varchar
+| 15 | JSON | json
+
+note:Lowercase characters must be used on taosBenchmark datatype
diff --git a/docs/en/20-third-party/11-kafka.md b/docs/en/20-third-party/11-kafka.md
index d40efc702cf7bc0a2103f012118fec05bc598208..a98c3e3a6ba10747675ddcedfc5a2697aae040c4 100644
--- a/docs/en/20-third-party/11-kafka.md
+++ b/docs/en/20-third-party/11-kafka.md
@@ -363,7 +363,10 @@ The following configuration items apply to TDengine Sink Connector and TDengine
7. `out.format`: Result output format. `line` indicates that the output format is InfluxDB line protocol format, `json` indicates that the output format is json. The default is line.
8. `topic.per.stable`: If it's set to true, it means one super table in TDengine corresponds to a topic in Kafka, the topic naming rule is ``; if it's set to false, it means the whole DB corresponds to a topic in Kafka, the topic naming rule is ``.
9. `topic.ignore.db`: Whether the topic naming rule contains the database name: true indicates that the rule is ``, false indicates that the rule is ``, and the default is false. Does not take effect when `topic.per.stable` is set to false.
-10. `topic.delimiter`: topic name delimiter,default is `-`。
+10. `topic.delimiter`: topic name delimiter,default is `-`.
+11. `read.method`: read method for query TDengine data, query or subscription. default is subscription.
+12. `subscription.group.id`: subscription group id for subscription data from TDengine, this field is required when `read.method` is subscription.
+13. `subscription.from`: subscription from latest or earliest. default is latest。
## Other notes
diff --git a/docs/en/28-releases/01-tdengine.md b/docs/en/28-releases/01-tdengine.md
index a5c1553402a75f902197c5e466d12aaf663eedb8..83b0fe5ac444e488d0c0d5cc211e2b4ffa2609a8 100644
--- a/docs/en/28-releases/01-tdengine.md
+++ b/docs/en/28-releases/01-tdengine.md
@@ -10,6 +10,10 @@ For TDengine 2.x installation packages by version, please visit [here](https://w
import Release from "/components/ReleaseV3";
+## 3.0.7.0
+
+
+
## 3.0.6.0
diff --git a/docs/zh/05-get-started/03-package.md b/docs/zh/05-get-started/03-package.md
index f6d1c85a60ba5bbd08b122266ca42815a58d094c..621effa6fd52f17fd9ae36994dbfe2f0d4dfc52e 100644
--- a/docs/zh/05-get-started/03-package.md
+++ b/docs/zh/05-get-started/03-package.md
@@ -201,7 +201,7 @@ Active: inactive (dead)
-安装后,可以在拥有管理员权限的 cmd 窗口执行 `sc start taosd` 或在 `C:\TDengine` 目录下,运行 `taosd.exe` 来启动 TDengine 服务进程。
+安装后,可以在拥有管理员权限的 cmd 窗口执行 `sc start taosd` 或在 `C:\TDengine` 目录下,运行 `taosd.exe` 来启动 TDengine 服务进程。如需使用 http/REST 服务,请执行 `sc start taosadapter` 或运行 `taosadapter.exe` 来启动 taosAdapter 服务进程。
**TDengine 命令行(CLI)**
diff --git a/docs/zh/08-connector/14-java.mdx b/docs/zh/08-connector/14-java.mdx
index 96f8991eea47c422e9b3212deac8085671591376..5dcdd61a5f436be9fad03ca08bc176b5ead9bfa4 100644
--- a/docs/zh/08-connector/14-java.mdx
+++ b/docs/zh/08-connector/14-java.mdx
@@ -1022,11 +1022,13 @@ while(true) {
#### 指定订阅 Offset
```java
+// 获取 offset
long position(TopicPartition partition) throws SQLException;
Map position(String topic) throws SQLException;
Map beginningOffsets(String topic) throws SQLException;
Map endOffsets(String topic) throws SQLException;
+// 指定下一次 poll 中使用的 offset
void seek(TopicPartition partition, long offset) throws SQLException;
```
diff --git a/docs/zh/08-connector/30-python.mdx b/docs/zh/08-connector/30-python.mdx
index fcae6e2b6b944fba59f337c5fd357c3ec483110d..15c11d05c3009376c469441a669d8f983fafb33d 100644
--- a/docs/zh/08-connector/30-python.mdx
+++ b/docs/zh/08-connector/30-python.mdx
@@ -71,7 +71,7 @@ Python Connector 的所有数据库操作如果出现异常,都会直接抛出
{{#include docs/examples/python/handle_exception.py}}
```
-TDengine DataType 和 Python DataType
+## TDengine DataType 和 Python DataType
TDengine 目前支持时间戳、数字、字符、布尔类型,与 Python 对应类型转换如下:
@@ -277,7 +277,7 @@ Transfer-Encoding: chunked
-### 使用连接器建立连接
+### 指定 Host 和 Properties 获取连接
以下示例代码假设 TDengine 安装在本机, 且 FQDN 和 serverPort 都使用了默认配置。
@@ -333,8 +333,69 @@ Transfer-Encoding: chunked
+### 配置参数的优先级
+
+如果配置参数在参数和客户端配置文件中有重复,则参数的优先级由高到低分别如下:
+
+1. 连接参数
+2. 使用原生连接时,TDengine 客户端驱动的配置文件 taos.cfg
+
## 使用示例
+### 创建数据库和表
+
+
+
+
+```python
+conn = taos.connect()
+# Execute a sql, ignore the result set, just get affected rows. It's useful for DDL and DML statement.
+conn.execute("DROP DATABASE IF EXISTS test")
+conn.execute("CREATE DATABASE test")
+# change database. same as execute "USE db"
+conn.select_db("test")
+conn.execute("CREATE STABLE weather(ts TIMESTAMP, temperature FLOAT) TAGS (location INT)")
+```
+
+
+
+
+
+```python
+conn = taosrest.connect(url="http://localhost:6041")
+# Execute a sql, ignore the result set, just get affected rows. It's useful for DDL and DML statement.
+conn.execute("DROP DATABASE IF EXISTS test")
+conn.execute("CREATE DATABASE test")
+conn.execute("USE test")
+conn.execute("CREATE STABLE weather(ts TIMESTAMP, temperature FLOAT) TAGS (location INT)")
+```
+
+
+
+
+
+```python
+conn = taosws.connect(url="ws://localhost:6041")
+# Execute a sql, ignore the result set, just get affected rows. It's useful for DDL and DML statement.
+conn.execute("DROP DATABASE IF EXISTS test")
+conn.execute("CREATE DATABASE test")
+conn.execute("USE test")
+conn.execute("CREATE STABLE weather(ts TIMESTAMP, temperature FLOAT) TAGS (location INT)")
+```
+
+
+
+
+### 插入数据
+
+```python
+conn.execute("INSERT INTO t1 USING weather TAGS(1) VALUES (now, 23.5) (now+1m, 23.5) (now+2m, 24.4)")
+```
+
+:::
+now 为系统内部函数,默认为客户端所在计算机当前时间。 now + 1s 代表客户端当前时间往后加 1 秒,数字后面代表时间单位:a(毫秒),s(秒),m(分),h(小时),d(天),w(周),n(月),y(年)。
+:::
+
### 基本使用
@@ -373,7 +434,6 @@ Transfer-Encoding: chunked
:::note
TaosCursor 类使用原生连接进行写入、查询操作。在客户端多线程的场景下,这个游标实例必须保持线程独享,不能跨线程共享使用,否则会导致返回结果出现错误。
-
:::
@@ -456,7 +516,7 @@ RestClient 类是对于 REST API 的直接封装。它只包含一个 sql() 方
-### 与 req_id 一起使用
+### 执行带有 reqId 的 SQL
使用可选的 req_id 参数,指定请求 id,可以用于 tracing
@@ -557,171 +617,138 @@ RestClient 类是对于 REST API 的直接封装。它只包含一个 sql() 方
-### 数据订阅
+### 通过参数绑定写入数据
-连接器支持数据订阅功能,数据订阅功能请参考 [数据订阅文档](../../develop/tmq/)。
+TDengine 的 Python 连接器支持参数绑定风格的 Prepare API 方式写入数据,和大多数数据库类似,目前仅支持用 `?` 来代表待绑定的参数。
-
+
-`Consumer` 提供了 Python 连接器订阅 TMQ 数据的 API。
-
-##### 创建 Consumer
-
-创建 Consumer 语法为 `consumer = Consumer(configs)`,参数定义请参考 [数据订阅文档](../../develop/tmq/#%E5%88%9B%E5%BB%BA%E6%B6%88%E8%B4%B9%E8%80%85-consumer)。
+##### 创建 stmt
-```python
-from taos.tmq import Consumer
+Python 连接器的 `Connection` 提供了 `statement` 方法用于创建参数绑定对象 stmt,该方法接收 sql 字符串作为参数,sql 字符串目前仅支持用 `?` 来代表绑定的参数。
-consumer = Consumer({"group.id": "local", "td.connect.ip": "127.0.0.1"})
```
+import taos
-##### 订阅 topics
-
-Consumer API 的 `subscribe` 方法用于订阅 topics,consumer 支持同时订阅多个 topic。
-
-```python
-consumer.subscribe(['topic1', 'topic2'])
+conn = taos.connect()
+stmt = conn.statement("insert into log values(?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?)")
```
-##### 消费数据
-
-Consumer API 的 `poll` 方法用于消费数据,`poll` 方法接收一个 float 类型的超时时间,超时时间单位为秒(s),`poll` 方法在超时之前返回一条 Message 类型的数据或超时返回 `None`。消费者必须通过 Message 的 `error()` 方法校验返回数据的 error 信息。
+##### 参数绑定
-```python
-while True:
- res = consumer.poll(1)
- if not res:
- continue
- err = res.error()
- if err is not None:
- raise err
- val = res.value()
+调用 `new_multi_binds` 函数创建 params 列表,用于参数绑定。
- for block in val:
- print(block.fetchall())
+```
+params = new_multi_binds(16)
+params[0].timestamp((1626861392589, 1626861392590, 1626861392591))
+params[1].bool((True, None, False))
+params[2].tinyint([-128, -128, None]) # -128 is tinyint null
+params[3].tinyint([0, 127, None])
+params[4].smallint([3, None, 2])
+params[5].int([3, 4, None])
+params[6].bigint([3, 4, None])
+params[7].tinyint_unsigned([3, 4, None])
+params[8].smallint_unsigned([3, 4, None])
+params[9].int_unsigned([3, 4, None])
+params[10].bigint_unsigned([3, 4, None])
+params[11].float([3, None, 1])
+params[12].double([3, None, 1.2])
+params[13].binary(["abc", "dddafadfadfadfadfa", None])
+params[14].nchar(["涛思数据", None, "a long string with 中文字符"])
+params[15].timestamp([None, None, 1626861392591])
```
-##### 获取消费进度
-
-Consumer API 的 `assignment` 方法用于获取 Consumer 订阅的所有 topic 的消费进度,返回结果类型为 TopicPartition 列表。
+调用 stmt 的 `bind_param` 以单行的方式设置 values 或 `bind_param_batch` 以多行的方式设置 values 方法绑定参数。
-```python
-assignments = consumer.assignment()
+```
+stmt.bind_param_batch(params)
```
-##### 指定订阅 Offset
+##### 执行 sql
-Consumer API 的 `seek` 方法用于重置 Consumer 的消费进度到指定位置,方法参数类型为 TopicPartition。
+调用 stmt 的 `execute` 方法执行 sql
-```python
-tp = TopicPartition(topic='topic1', partition=0, offset=0)
-consumer.seek(tp)
+```
+stmt.execute()
```
-##### 关闭订阅
+##### 关闭 stmt
-消费结束后,应当取消订阅,并关闭 Consumer。
+最后需要关闭 stmt。
-```python
-consumer.unsubscribe()
-consumer.close()
```
-
-##### 完整示例
-
-```python
-{{#include docs/examples/python/tmq_example.py}}
+stmt.close()
```
-##### 获取和重置消费进度示例代码
+##### 示例代码
```python
-{{#include docs/examples/python/tmq_assignment_example.py:taos_get_assignment_and_seek_demo}}
+{{#include docs/examples/python/stmt_example.py}}
```
-
-除了原生的连接方式,Python 连接器还支持通过 websocket 订阅 TMQ 数据,使用 websocket 方式订阅 TMQ 数据需要安装 `taos-ws-py`。
-
-taosws `Consumer` API 提供了基于 Websocket 订阅 TMQ 数据的 API。
-
-##### 创建 Consumer
+##### 创建 stmt
-创建 Consumer 语法为 `consumer = Consumer(conf=configs)`,使用时需要指定 `td.connect.websocket.scheme` 参数值为 "ws",参数定义请参考 [数据订阅文档](../../develop/tmq/#%E5%88%9B%E5%BB%BA%E6%B6%88%E8%B4%B9%E8%80%85-consumer)。
+Python WebSocket 连接器的 `Connection` 提供了 `statement` 方法用于创建参数绑定对象 stmt,该方法接收 sql 字符串作为参数,sql 字符串目前仅支持用 `?` 来代表绑定的参数。
-```python
+```
import taosws
-consumer = taosws.(conf={"group.id": "local", "td.connect.websocket.scheme": "ws"})
+conn = taosws.connect('taosws://localhost:6041/test')
+stmt = conn.statement()
```
-##### 订阅 topics
+##### 解析 sql
-Consumer API 的 `subscribe` 方法用于订阅 topics,consumer 支持同时订阅多个 topic。
+调用 stmt 的 `prepare` 方法来解析 insert 语句。
-```python
-consumer.subscribe(['topic1', 'topic2'])
+```
+stmt.prepare("insert into t1 values (?, ?, ?, ?)")
```
-##### 消费数据
+##### 参数绑定
-Consumer API 的 `poll` 方法用于消费数据,`poll` 方法接收一个 float 类型的超时时间,超时时间单位为秒(s),`poll` 方法在超时之前返回一条 Message 类型的数据或超时返回 `None`。消费者必须通过 Message 的 `error()` 方法校验返回数据的 error 信息。
+调用 stmt 的 `bind_param` 方法绑定参数。
-```python
-while True:
- res = consumer.poll(timeout=1.0)
- if not res:
- continue
- err = res.error()
- if err is not None:
- raise err
- for block in message:
- for row in block:
- print(row)
+```
+stmt.bind_param([
+ taosws.millis_timestamps_to_column([1686844800000, 1686844801000, 1686844802000, 1686844803000]),
+ taosws.ints_to_column([1, 2, 3, 4]),
+ taosws.floats_to_column([1.1, 2.2, 3.3, 4.4]),
+ taosws.varchar_to_column(['a', 'b', 'c', 'd']),
+])
```
-##### 获取消费进度
-
-Consumer API 的 `assignment` 方法用于获取 Consumer 订阅的所有 topic 的消费进度,返回结果类型为 TopicPartition 列表。
+调用 stmt 的 `add_batch` 方法,将参数加入批处理。
-```python
-assignments = consumer.assignment()
+```
+stmt.add_batch()
```
-##### 重置消费进度
+##### 执行 sql
-Consumer API 的 `seek` 方法用于重置 Consumer 的消费进度到指定位置。
+调用 stmt 的 `execute` 方法执行 sql
-```python
-consumer.seek(topic='topic1', partition=0, offset=0)
+```
+stmt.execute()
```
-##### 结束消费
+##### 关闭 stmt
-消费结束后,应当取消订阅,并关闭 Consumer。
+最后需要关闭 stmt。
-```python
-consumer.unsubscribe()
-consumer.close()
```
-
-##### tmq 订阅示例代码
-
-```python
-{{#include docs/examples/python/tmq_websocket_example.py}}
+stmt.close()
```
-连接器提供了 `assignment` 接口,用于获取 topic assignment 的功能,可以查询订阅的 topic 的消费进度,并提供 `seek` 接口,用于重置 topic 的消费进度。
-
-##### 获取和重置消费进度示例代码
+##### 示例代码
```python
-{{#include docs/examples/python/tmq_websocket_assgnment_example.py:taosws_get_assignment_and_seek_demo}}
+{{#include docs/examples/python/stmt_websocket_example.py}}
```
-
@@ -775,138 +802,211 @@ consumer.close()
-### 通过参数绑定写入数据
+### 执行带有 reqId 的无模式写入
-TDengine 的 Python 连接器支持参数绑定风格的 Prepare API 方式写入数据,和大多数数据库类似,目前仅支持用 `?` 来代表待绑定的参数。
+连接器的 `schemaless_insert` 和 `schemaless_insert_raw` 方法支持 `req_id` 可选参数,此 `req_Id` 可用于请求链路追踪。
+
+```python
+{{#include docs/examples/python/schemaless_insert_req_id.py}}
+```
+
+```python
+{{#include docs/examples/python/schemaless_insert_raw_req_id.py}}
+```
+
+### 数据订阅
+
+连接器支持数据订阅功能,数据订阅功能请参考 [数据订阅文档](../../develop/tmq/)。
+
+#### 创建 Topic
+
+创建 Topic 相关请参考 [数据订阅文档](../../develop/tmq/#创建-topic)。
+
+#### 创建 Consumer
+
+
-
-##### 创建 stmt
+`Consumer` 提供了 Python 连接器订阅 TMQ 数据的 API。创建 Consumer 语法为 `consumer = Consumer(configs)`,参数定义请参考 [数据订阅文档](../../develop/tmq/#创建消费者-consumer)。
-Python 连接器的 `Connection` 提供了 `statement` 方法用于创建参数绑定对象 stmt,该方法接收 sql 字符串作为参数,sql 字符串目前仅支持用 `?` 来代表绑定的参数。
+```python
+from taos.tmq import Consumer
+consumer = Consumer({"group.id": "local", "td.connect.ip": "127.0.0.1"})
```
-import taos
+
-conn = taos.connect()
-stmt = conn.statement("insert into log values(?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?)")
-```
+
-##### 参数绑定
+除了原生的连接方式,Python 连接器还支持通过 websocket 订阅 TMQ 数据,使用 websocket 方式订阅 TMQ 数据需要安装 `taos-ws-py`。
-调用 `new_multi_binds` 函数创建 params 列表,用于参数绑定。
+taosws `Consumer` API 提供了基于 Websocket 订阅 TMQ 数据的 API。创建 Consumer 语法为 `consumer = Consumer(conf=configs)`,使用时需要指定 `td.connect.websocket.scheme` 参数值为 "ws",参数定义请参考 [数据订阅文档](../../develop/tmq/#%E5%88%9B%E5%BB%BA%E6%B6%88%E8%B4%B9%E8%80%85-consumer)。
-```
-params = new_multi_binds(16)
-params[0].timestamp((1626861392589, 1626861392590, 1626861392591))
-params[1].bool((True, None, False))
-params[2].tinyint([-128, -128, None]) # -128 is tinyint null
-params[3].tinyint([0, 127, None])
-params[4].smallint([3, None, 2])
-params[5].int([3, 4, None])
-params[6].bigint([3, 4, None])
-params[7].tinyint_unsigned([3, 4, None])
-params[8].smallint_unsigned([3, 4, None])
-params[9].int_unsigned([3, 4, None])
-params[10].bigint_unsigned([3, 4, None])
-params[11].float([3, None, 1])
-params[12].double([3, None, 1.2])
-params[13].binary(["abc", "dddafadfadfadfadfa", None])
-params[14].nchar(["涛思数据", None, "a long string with 中文字符"])
-params[15].timestamp([None, None, 1626861392591])
+```python
+import taosws
+
+consumer = taosws.(conf={"group.id": "local", "td.connect.websocket.scheme": "ws"})
```
-调用 stmt 的 `bind_param` 以单行的方式设置 values 或 `bind_param_batch` 以多行的方式设置 values 方法绑定参数。
+
+
-```
-stmt.bind_param_batch(params)
-```
+#### 订阅 topics
-##### 执行 sql
+
-调用 stmt 的 `execute` 方法执行 sql
+
-```
-stmt.execute()
+Consumer API 的 `subscribe` 方法用于订阅 topics,consumer 支持同时订阅多个 topic。
+
+```python
+consumer.subscribe(['topic1', 'topic2'])
```
-##### 关闭 stmt
+
+
-最后需要关闭 stmt。
+Consumer API 的 `subscribe` 方法用于订阅 topics,consumer 支持同时订阅多个 topic。
+```python
+consumer.subscribe(['topic1', 'topic2'])
```
-stmt.close()
+
+
+
+
+#### 消费数据
+
+
+
+
+
+Consumer API 的 `poll` 方法用于消费数据,`poll` 方法接收一个 float 类型的超时时间,超时时间单位为秒(s),`poll` 方法在超时之前返回一条 Message 类型的数据或超时返回 `None`。消费者必须通过 Message 的 `error()` 方法校验返回数据的 error 信息。
+
+```python
+while True:
+ res = consumer.poll(1)
+ if not res:
+ continue
+ err = res.error()
+ if err is not None:
+ raise err
+ val = res.value()
+
+ for block in val:
+ print(block.fetchall())
```
-##### 示例代码
+
+
+
+Consumer API 的 `poll` 方法用于消费数据,`poll` 方法接收一个 float 类型的超时时间,超时时间单位为秒(s),`poll` 方法在超时之前返回一条 Message 类型的数据或超时返回 `None`。消费者必须通过 Message 的 `error()` 方法校验返回数据的 error 信息。
```python
-{{#include docs/examples/python/stmt_example.py}}
+while True:
+ res = consumer.poll(timeout=1.0)
+ if not res:
+ continue
+ err = res.error()
+ if err is not None:
+ raise err
+ for block in message:
+ for row in block:
+ print(row)
```
+
+
-
+#### 获取消费进度
-##### 创建 stmt
+
-Python WebSocket 连接器的 `Connection` 提供了 `statement` 方法用于创建参数绑定对象 stmt,该方法接收 sql 字符串作为参数,sql 字符串目前仅支持用 `?` 来代表绑定的参数。
+
+
+Consumer API 的 `assignment` 方法用于获取 Consumer 订阅的所有 topic 的消费进度,返回结果类型为 TopicPartition 列表。
+```python
+assignments = consumer.assignment()
```
-import taosws
-conn = taosws.connect('taosws://localhost:6041/test')
-stmt = conn.statement()
+Consumer API 的 `seek` 方法用于重置 Consumer 的消费进度到指定位置,方法参数类型为 TopicPartition。
+
+```python
+tp = TopicPartition(topic='topic1', partition=0, offset=0)
+consumer.seek(tp)
```
-##### 解析 sql
+
+
-调用 stmt 的 `prepare` 方法来解析 insert 语句。
+Consumer API 的 `assignment` 方法用于获取 Consumer 订阅的所有 topic 的消费进度,返回结果类型为 TopicPartition 列表。
+```python
+assignments = consumer.assignment()
```
-stmt.prepare("insert into t1 values (?, ?, ?, ?)")
+
+Consumer API 的 `seek` 方法用于重置 Consumer 的消费进度到指定位置。
+
+```python
+consumer.seek(topic='topic1', partition=0, offset=0)
```
-##### 参数绑定
+
+
-调用 stmt 的 `bind_param` 方法绑定参数。
+#### 关闭订阅
-```
-stmt.bind_param([
- taosws.millis_timestamps_to_column([1686844800000, 1686844801000, 1686844802000, 1686844803000]),
- taosws.ints_to_column([1, 2, 3, 4]),
- taosws.floats_to_column([1.1, 2.2, 3.3, 4.4]),
- taosws.varchar_to_column(['a', 'b', 'c', 'd']),
-])
-```
+
-调用 stmt 的 `add_batch` 方法,将参数加入批处理。
+
-```
-stmt.add_batch()
+消费结束后,应当取消订阅,并关闭 Consumer。
+
+```python
+consumer.unsubscribe()
+consumer.close()
```
-##### 执行 sql
+
+
-调用 stmt 的 `execute` 方法执行 sql
+消费结束后,应当取消订阅,并关闭 Consumer。
-```
-stmt.execute()
+```python
+consumer.unsubscribe()
+consumer.close()
```
-##### 关闭 stmt
+
+
-最后需要关闭 stmt。
+#### 完整示例
+
+
+
+
+```python
+{{#include docs/examples/python/tmq_example.py}}
```
-stmt.close()
+
+```python
+{{#include docs/examples/python/tmq_assignment_example.py:taos_get_assignment_and_seek_demo}}
```
-##### 示例代码
+
+
```python
-{{#include docs/examples/python/stmt_websocket_example.py}}
+{{#include docs/examples/python/tmq_websocket_example.py}}
+```
+
+```python
+{{#include docs/examples/python/tmq_websocket_assgnment_example.py:taosws_get_assignment_and_seek_demo}}
```
+
diff --git a/docs/zh/12-taos-sql/19-limit.md b/docs/zh/12-taos-sql/19-limit.md
index e5a492580ec8ee63d41ca0f09370b96356fd9489..6c815fc5f0fe8967fe3ae0bf350a16a2e86ded62 100644
--- a/docs/zh/12-taos-sql/19-limit.md
+++ b/docs/zh/12-taos-sql/19-limit.md
@@ -36,7 +36,7 @@ description: 合法字符集和命名中的限制规则
- 库的数目,超级表的数目、表的数目,系统不做限制,仅受系统资源限制
- 数据库的副本数只能设置为 1 或 3
- 用户名的最大长度是 23 字节
-- 用户密码的最大长度是 128 字节
+- 用户密码的最大长度是 31 字节
- 总数据行数取决于可用资源
- 单个数据库的虚拟结点数上限为 1024
diff --git a/docs/zh/12-taos-sql/25-grant.md b/docs/zh/12-taos-sql/25-grant.md
index e4ba02cbb5768f85efc3f8b89ff82d77199e8bbb..a9c3910500d7ad3ba2435eb4bebb74085a10da78 100644
--- a/docs/zh/12-taos-sql/25-grant.md
+++ b/docs/zh/12-taos-sql/25-grant.md
@@ -16,7 +16,7 @@ CREATE USER use_name PASS 'password' [SYSINFO {1|0}];
use_name 最长为 23 字节。
-password 最长为 32 字节,合法字符包括"a-zA-Z0-9!?$%^&*()_–+={[}]:;@~#|<,>.?/",不可以出现单双引号、撇号、反斜杠和空格,且不可以为空。
+password 最长为 31 字节,合法字符包括"a-zA-Z0-9!?$%^&*()_–+={[}]:;@~#|<,>.?/",不可以出现单双引号、撇号、反斜杠和空格,且不可以为空。
SYSINFO 表示用户是否可以查看系统信息。1 表示可以查看,0 表示不可以查看。系统信息包括服务端配置信息、服务端各种节点信息(如 DNODE、QNODE等)、存储相关的信息等。默认为可以查看系统信息。
diff --git a/docs/zh/12-taos-sql/27-index.md b/docs/zh/12-taos-sql/27-index.md
index aa84140296832f79a6498d0da2b5a8f500cd1e90..3f3091b19c7da17982a29ee3d17d141557942d93 100644
--- a/docs/zh/12-taos-sql/27-index.md
+++ b/docs/zh/12-taos-sql/27-index.md
@@ -28,6 +28,24 @@ functions:
- WATERMARK: 最小单位毫秒,取值范围 [0ms, 900000ms],默认值为 5 秒,只可用于超级表。
- MAX_DELAY: 最小单位毫秒,取值范围 [1ms, 900000ms],默认值为 interval 的值(但不能超过最大值),只可用于超级表。注:不建议 MAX_DELAY 设置太小,否则会过于频繁的推送结果,影响存储和查询性能,如无特殊需求,取默认值即可。
+```sql
+DROP DATABASE IF EXISTS d0;
+CREATE DATABASE d0;
+USE d0;
+CREATE TABLE IF NOT EXISTS st1 (ts timestamp, c1 int, c2 float, c3 double) TAGS (t1 int unsigned);
+CREATE TABLE ct1 USING st1 TAGS(1000);
+CREATE TABLE ct2 USING st1 TAGS(2000);
+INSERT INTO ct1 VALUES(now+0s, 10, 2.0, 3.0);
+INSERT INTO ct1 VALUES(now+1s, 11, 2.1, 3.1)(now+2s, 12, 2.2, 3.2)(now+3s, 13, 2.3, 3.3);
+CREATE SMA INDEX sma_index_name1 ON st1 FUNCTION(max(c1),max(c2),min(c1)) INTERVAL(5m,10s) SLIDING(5m) WATERMARK 5s MAX_DELAY 1m;
+-- 从 SMA 索引查询
+ALTER LOCAL 'querySmaOptimize' '1';
+SELECT max(c2),min(c1) FROM st1 INTERVAL(5m,10s) SLIDING(5m);
+SELECT _wstart,_wend,_wduration,max(c2),min(c1) FROM st1 INTERVAL(5m,10s) SLIDING(5m);
+-- 从原始数据查询
+ALTER LOCAL 'querySmaOptimize' '0';
+```
+
### FULLTEXT 索引
对指定列建立文本索引,可以提升含有文本过滤的查询的性能。FULLTEXT 索引不支持 index_option 语法。现阶段只支持对 JSON 类型的标签列创建 FULLTEXT 索引。不支持多列联合索引,但可以为每个列分布创建 FULLTEXT 索引。
@@ -41,7 +59,6 @@ DROP INDEX index_name;
## 查看索引
````sql
-```sql
SHOW INDEXES FROM tbl_name [FROM db_name];
````
diff --git a/docs/zh/14-reference/05-taosbenchmark.md b/docs/zh/14-reference/05-taosbenchmark.md
index c5d98767f9134c8eeb9bbc1ee2fb887370d9da54..319046ba8f6981ec75feb9095ebfa72b03ed10f0 100644
--- a/docs/zh/14-reference/05-taosbenchmark.md
+++ b/docs/zh/14-reference/05-taosbenchmark.md
@@ -437,3 +437,29 @@ taosBenchmark -A INT,DOUBLE,NCHAR,BINARY\(16\)
- **sqls** :
- **sql** : 执行的 SQL 命令,必填。
+
+#### 配置文件中数据类型书写对照表
+
+| # | **引擎** | **taosBenchmark**
+| --- | :----------------: | :---------------:
+| 1 | TIMESTAMP | timestamp
+| 2 | INT | int
+| 3 | INT UNSIGNED | uint
+| 4 | BIGINT | bigint
+| 5 | BIGINT UNSIGNED | ubigint
+| 6 | FLOAT | float
+| 7 | DOUBLE | double
+| 8 | BINARY | binary
+| 9 | SMALLINT | smallint
+| 10 | SMALLINT UNSIGNED | usmallint
+| 11 | TINYINT | tinyint
+| 12 | TINYINT UNSIGNED | utinyint
+| 13 | BOOL | bool
+| 14 | NCHAR | nchar
+| 15 | VARCHAR | varchar
+| 15 | JSON | json
+
+注意:taosBenchmark 配置文件中数据类型必须小写方可识别
+
+
+
diff --git a/docs/zh/20-third-party/11-kafka.md b/docs/zh/20-third-party/11-kafka.md
index 76e546c3456d88be957a25ff98baf879662ad5b7..dc4f25cbe8422cc73db129b25bacac539cc20130 100644
--- a/docs/zh/20-third-party/11-kafka.md
+++ b/docs/zh/20-third-party/11-kafka.md
@@ -369,6 +369,9 @@ curl -X DELETE http://localhost:8083/connectors/TDengineSourceConnector
8. `topic.per.stable`: 如果设置为 true,表示一个超级表对应一个 Kafka topic,topic的命名规则 ``;如果设置为 false,则指定的 DB 中的所有数据进入一个 Kafka topic,topic 的命名规则为 ``
9. `topic.ignore.db`: topic 命名规则是否包含 database 名称,true 表示规则为 ``,false 表示规则为 ``,默认 false。此配置项在 `topic.per.stable` 设置为 false 时不生效。
10. `topic.delimiter`: topic 名称分割符,默认为 `-`。
+11. `read.method`: 从 TDengine 读取数据方式,query 或是 subscription。默认为 subscription。
+12. `subscription.group.id`: 指定 TDengine 数据订阅的组 id,当 `read.method` 为 subscription 时,此项为必填项。
+13. `subscription.from`: 指定 TDengine 数据订阅起始位置,latest 或是 earliest。默认为 latest。
## 其他说明
diff --git a/docs/zh/28-releases/01-tdengine.md b/docs/zh/28-releases/01-tdengine.md
index 557552bc1c1b56688a3706fb63834a58128036f6..67718d59bf155399fff34a126d9c826a549aea77 100644
--- a/docs/zh/28-releases/01-tdengine.md
+++ b/docs/zh/28-releases/01-tdengine.md
@@ -10,6 +10,10 @@ TDengine 2.x 各版本安装包请访问[这里](https://www.taosdata.com/all-do
import Release from "/components/ReleaseV3";
+## 3.0.7.0
+
+
+
## 3.0.6.0
diff --git a/examples/C#/taosdemo/README.md b/examples/C#/taosdemo/README.md
index 3cba3529bf513e2bf3d4ab0c169e7f3d03b2e6a8..970d5332acabc5e19525fcb1a941dc8145dc8591 100644
--- a/examples/C#/taosdemo/README.md
+++ b/examples/C#/taosdemo/README.md
@@ -36,7 +36,11 @@ dotnet build -c Release
## Usage
```
-Usage: mono taosdemo.exe [OPTION...]
+Usage with mono:
+$ mono taosdemo.exe [OPTION...]
+
+Usage with dotnet:
+Usage: .\bin\Release\net5.0\taosdemo.exe [OPTION...]
--help Show usage.
diff --git a/examples/C#/taosdemo/taosdemo.cs b/examples/C#/taosdemo/taosdemo.cs
index e092c48f15314f5cad0a9509190d7b9970a7073a..a48439d192bb5dfcd0083f27abb258ece0f4f28d 100644
--- a/examples/C#/taosdemo/taosdemo.cs
+++ b/examples/C#/taosdemo/taosdemo.cs
@@ -72,7 +72,7 @@ namespace TDengineDriver
{
if ("--help" == argv[i])
{
- Console.WriteLine("Usage: mono taosdemo.exe [OPTION...]");
+ Console.WriteLine("Usage: taosdemo.exe [OPTION...]");
Console.WriteLine("");
HelpPrint("--help", "Show usage.");
Console.WriteLine("");
@@ -305,7 +305,7 @@ namespace TDengineDriver
this.conn = TDengine.Connect(this.host, this.user, this.password, db, this.port);
if (this.conn == IntPtr.Zero)
{
- Console.WriteLine("Connect to TDengine failed");
+ Console.WriteLine("Connect to TDengine failed. Reason: {0}\n", TDengine.Error(0));
CleanAndExitProgram(1);
}
else
diff --git a/include/libs/parser/parser.h b/include/libs/parser/parser.h
index f253b47e50d9eae8017d9edd52ef76b6a4a517ad..58bdb77df3d3f6cde847fa1cd7dddfd2096d275f 100644
--- a/include/libs/parser/parser.h
+++ b/include/libs/parser/parser.h
@@ -58,6 +58,7 @@ typedef struct SParseContext {
bool isSuperUser;
bool enableSysInfo;
bool async;
+ bool hasInvisibleCol;
const char* svrVer;
bool nodeOffline;
SArray* pTableMetaPos; // sql table pos => catalog data pos
diff --git a/include/libs/scalar/filter.h b/include/libs/scalar/filter.h
index f20ba287de0ac2ec429ad44107418c8bfe58e0d7..adabe6d67c16953f2204becbf6da123587cb8058 100644
--- a/include/libs/scalar/filter.h
+++ b/include/libs/scalar/filter.h
@@ -41,7 +41,7 @@ typedef struct SFilterColumnParam {
} SFilterColumnParam;
extern int32_t filterInitFromNode(SNode *pNode, SFilterInfo **pinfo, uint32_t options);
-extern bool filterExecute(SFilterInfo *info, SSDataBlock *pSrc, SColumnInfoData **p, SColumnDataAgg *statis,
+extern int32_t filterExecute(SFilterInfo *info, SSDataBlock *pSrc, SColumnInfoData **p, SColumnDataAgg *statis,
int16_t numOfCols, int32_t *pFilterResStatus);
extern int32_t filterSetDataFromSlotId(SFilterInfo *info, void *param);
extern int32_t filterSetDataFromColId(SFilterInfo *info, void *param);
diff --git a/include/util/taoserror.h b/include/util/taoserror.h
index 8b10e4217cddc74868b1a726b2f971afe13ce9df..ff5d37bf006ee2e4c3f797439e274a9910e63f2f 100644
--- a/include/util/taoserror.h
+++ b/include/util/taoserror.h
@@ -764,6 +764,9 @@ int32_t* taosGetErrno();
#define TSDB_CODE_INDEX_REBUILDING TAOS_DEF_ERROR_CODE(0, 0x3200)
#define TSDB_CODE_INDEX_INVALID_FILE TAOS_DEF_ERROR_CODE(0, 0x3201)
+//scalar
+#define TSDB_CODE_SCALAR_CONVERT_ERROR TAOS_DEF_ERROR_CODE(0, 0x3250)
+
//tmq
#define TSDB_CODE_TMQ_INVALID_MSG TAOS_DEF_ERROR_CODE(0, 0x4000)
#define TSDB_CODE_TMQ_CONSUMER_MISMATCH TAOS_DEF_ERROR_CODE(0, 0x4001)
diff --git a/source/client/src/clientSmlJson.c b/source/client/src/clientSmlJson.c
index 0f59505f8c02c64543ebb674c8ad86ae996a3f0f..9683d6799a0cc340ec53da56b76210fd77a2b64e 100644
--- a/source/client/src/clientSmlJson.c
+++ b/source/client/src/clientSmlJson.c
@@ -456,7 +456,7 @@ int smlJsonParseObj(char **start, SSmlLineInfo *element, int8_t *offset) {
static inline int32_t smlParseMetricFromJSON(SSmlHandle *info, cJSON *metric, SSmlLineInfo *elements) {
elements->measureLen = strlen(metric->valuestring);
if (IS_INVALID_TABLE_LEN(elements->measureLen)) {
- uError("OTD:0x%" PRIx64 " Metric lenght is 0 or large than 192", info->id);
+ uError("OTD:0x%" PRIx64 " Metric length is 0 or large than 192", info->id);
return TSDB_CODE_TSC_INVALID_TABLE_ID_LENGTH;
}
diff --git a/source/common/src/systable.c b/source/common/src/systable.c
index c2024a9a779661eb1876525be29497664f9c6eaa..53692c94a4d906e94bf98515b49c53663fe5bb56 100644
--- a/source/common/src/systable.c
+++ b/source/common/src/systable.c
@@ -317,7 +317,7 @@ static const SSysTableMeta infosMeta[] = {
{TSDB_INS_TABLE_MNODES, mnodesSchema, tListLen(mnodesSchema), true},
{TSDB_INS_TABLE_MODULES, modulesSchema, tListLen(modulesSchema), true},
{TSDB_INS_TABLE_QNODES, qnodesSchema, tListLen(qnodesSchema), true},
- {TSDB_INS_TABLE_SNODES, snodesSchema, tListLen(snodesSchema)},
+ {TSDB_INS_TABLE_SNODES, snodesSchema, tListLen(snodesSchema), true},
{TSDB_INS_TABLE_CLUSTER, clusterSchema, tListLen(clusterSchema), true},
{TSDB_INS_TABLE_DATABASES, userDBSchema, tListLen(userDBSchema), false},
{TSDB_INS_TABLE_FUNCTIONS, userFuncSchema, tListLen(userFuncSchema), false},
diff --git a/source/dnode/mgmt/node_mgmt/src/dmNodes.c b/source/dnode/mgmt/node_mgmt/src/dmNodes.c
index 19d5e06c5b6d118feaa8bf6d50fe222e7a73007f..a8bf5be3e21136ddb290adbd6215e35e5a7f2d3b 100644
--- a/source/dnode/mgmt/node_mgmt/src/dmNodes.c
+++ b/source/dnode/mgmt/node_mgmt/src/dmNodes.c
@@ -41,7 +41,7 @@ int32_t dmOpenNode(SMgmtWrapper *pWrapper) {
pWrapper->pMgmt = output.pMgmt;
}
- dmReportStartup(pWrapper->name, "openned");
+ dmReportStartup(pWrapper->name, "opened");
return 0;
}
@@ -159,7 +159,7 @@ int32_t dmRunDnode(SDnode *pDnode) {
} else {
count++;
}
-
+
taosMsleep(100);
}
}
diff --git a/source/dnode/mnode/impl/src/mndSma.c b/source/dnode/mnode/impl/src/mndSma.c
index c337d85b688da80266899376969d68dc5f2b6855..889f0d76df2029efcfe8f21acf45a38e2a242549 100644
--- a/source/dnode/mnode/impl/src/mndSma.c
+++ b/source/dnode/mnode/impl/src/mndSma.c
@@ -504,6 +504,11 @@ static void mndDestroySmaObj(SSmaObj *pSmaObj) {
static int32_t mndCreateSma(SMnode *pMnode, SRpcMsg *pReq, SMCreateSmaReq *pCreate, SDbObj *pDb, SStbObj *pStb,
const char *streamName) {
+ if (pDb->cfg.replications > 1) {
+ terrno = TSDB_CODE_MND_INVALID_SMA_OPTION;
+ mError("sma:%s, failed to create since not support multiple replicas", pCreate->name);
+ return -1;
+ }
SSmaObj smaObj = {0};
memcpy(smaObj.name, pCreate->name, TSDB_TABLE_FNAME_LEN);
memcpy(smaObj.stb, pStb->name, TSDB_TABLE_FNAME_LEN);
diff --git a/source/dnode/mnode/impl/src/mndUser.c b/source/dnode/mnode/impl/src/mndUser.c
index 2d00e383a23507f65a11ee0e31dc3d814c33f5fe..1fc2e42b8ca9be737adfb60e8312c390dbf3f5b7 100644
--- a/source/dnode/mnode/impl/src/mndUser.c
+++ b/source/dnode/mnode/impl/src/mndUser.c
@@ -802,7 +802,7 @@ static int32_t mndProcessAlterUserReq(SRpcMsg *pReq) {
}
if (TSDB_ALTER_USER_PASSWD == alterReq.alterType &&
- (alterReq.pass[0] == 0 || strlen(alterReq.pass) > TSDB_PASSWORD_LEN)) {
+ (alterReq.pass[0] == 0 || strlen(alterReq.pass) >= TSDB_PASSWORD_LEN)) {
terrno = TSDB_CODE_MND_INVALID_PASS_FORMAT;
goto _OVER;
}
diff --git a/source/dnode/vnode/src/tsdb/tsdbRead.c b/source/dnode/vnode/src/tsdb/tsdbRead.c
index 29e59daeb9fa4ae6510f0117fa6af82cab39d152..88027e2891dd9daa1150177a6c8b53de40874fe4 100644
--- a/source/dnode/vnode/src/tsdb/tsdbRead.c
+++ b/source/dnode/vnode/src/tsdb/tsdbRead.c
@@ -289,6 +289,10 @@ static int32_t setColumnIdSlotList(SBlockLoadSuppInfo* pSupInfo, SColumnInfo* pC
static int32_t updateBlockSMAInfo(STSchema* pSchema, SBlockLoadSuppInfo* pSupInfo) {
int32_t i = 0, j = 0;
+ if (j < pSupInfo->numOfCols && PRIMARYKEY_TIMESTAMP_COL_ID == pSupInfo->colId[j]) {
+ j += 1;
+ }
+
while (i < pSchema->numOfCols && j < pSupInfo->numOfCols) {
STColumn* pTCol = &pSchema->columns[i];
if (pTCol->colId == pSupInfo->colId[j]) {
diff --git a/source/libs/executor/inc/executorInt.h b/source/libs/executor/inc/executorInt.h
index 5d663df50e0e532cc185a1c226a178c600414dda..51731faece7ea65ae4696433d2192cc2662d63d7 100644
--- a/source/libs/executor/inc/executorInt.h
+++ b/source/libs/executor/inc/executorInt.h
@@ -613,7 +613,7 @@ int32_t getBufferPgSize(int32_t rowSize, uint32_t* defaultPgsz, uint32_t* de
extern void doDestroyExchangeOperatorInfo(void* param);
-void doFilter(SSDataBlock* pBlock, SFilterInfo* pFilterInfo, SColMatchInfo* pColMatchInfo);
+int32_t doFilter(SSDataBlock* pBlock, SFilterInfo* pFilterInfo, SColMatchInfo* pColMatchInfo);
int32_t addTagPseudoColumnData(SReadHandle* pHandle, const SExprInfo* pExpr, int32_t numOfExpr, SSDataBlock* pBlock,
int32_t rows, const char* idStr, STableMetaCacheInfo* pCache);
diff --git a/source/libs/executor/src/executorInt.c b/source/libs/executor/src/executorInt.c
index 42b8a9d31c4afb42b824f0a458bcd3decd2d98a8..0855203e9182d09e73dcd3fea1f96fb878943a25 100644
--- a/source/libs/executor/src/executorInt.c
+++ b/source/libs/executor/src/executorInt.c
@@ -77,8 +77,7 @@ static void setBlockSMAInfo(SqlFunctionCtx* pCtx, SExprInfo* pExpr, SSDataBlock*
static void initCtxOutputBuffer(SqlFunctionCtx* pCtx, int32_t size);
static void doApplyScalarCalculation(SOperatorInfo* pOperator, SSDataBlock* pBlock, int32_t order, int32_t scanFlag);
-static void extractQualifiedTupleByFilterResult(SSDataBlock* pBlock, const SColumnInfoData* p, bool keep,
- int32_t status);
+static void extractQualifiedTupleByFilterResult(SSDataBlock* pBlock, const SColumnInfoData* p, int32_t status);
static int32_t doSetInputDataBlock(SExprSupp* pExprSup, SSDataBlock* pBlock, int32_t order, int32_t scanFlag,
bool createDummyCol);
static int32_t doCopyToSDataBlock(SExecTaskInfo* pTaskInfo, SSDataBlock* pBlock, SExprSupp* pSup, SDiskbasedBuf* pBuf,
@@ -501,20 +500,26 @@ void clearResultRowInitFlag(SqlFunctionCtx* pCtx, int32_t numOfOutput) {
}
}
-void doFilter(SSDataBlock* pBlock, SFilterInfo* pFilterInfo, SColMatchInfo* pColMatchInfo) {
+int32_t doFilter(SSDataBlock* pBlock, SFilterInfo* pFilterInfo, SColMatchInfo* pColMatchInfo) {
if (pFilterInfo == NULL || pBlock->info.rows == 0) {
- return;
+ return TSDB_CODE_SUCCESS;
}
SFilterColumnParam param1 = {.numOfCols = taosArrayGetSize(pBlock->pDataBlock), .pDataBlock = pBlock->pDataBlock};
- int32_t code = filterSetDataFromSlotId(pFilterInfo, ¶m1);
+ SColumnInfoData* p = NULL;
+
+ int32_t code = filterSetDataFromSlotId(pFilterInfo, ¶m1);
+ if (code != TSDB_CODE_SUCCESS) {
+ goto _err;
+ }
- SColumnInfoData* p = NULL;
- int32_t status = 0;
+ int32_t status = 0;
+ code = filterExecute(pFilterInfo, pBlock, &p, NULL, param1.numOfCols, &status);
+ if (code != TSDB_CODE_SUCCESS) {
+ goto _err;
+ }
- // todo the keep seems never to be True??
- bool keep = filterExecute(pFilterInfo, pBlock, &p, NULL, param1.numOfCols, &status);
- extractQualifiedTupleByFilterResult(pBlock, p, keep, status);
+ extractQualifiedTupleByFilterResult(pBlock, p, status);
if (pColMatchInfo != NULL) {
size_t size = taosArrayGetSize(pColMatchInfo->pList);
@@ -529,16 +534,15 @@ void doFilter(SSDataBlock* pBlock, SFilterInfo* pFilterInfo, SColMatchInfo* pCol
}
}
}
+ code = TSDB_CODE_SUCCESS;
+_err:
colDataDestroy(p);
taosMemoryFree(p);
+ return code;
}
-void extractQualifiedTupleByFilterResult(SSDataBlock* pBlock, const SColumnInfoData* p, bool keep, int32_t status) {
- if (keep) {
- return;
- }
-
+void extractQualifiedTupleByFilterResult(SSDataBlock* pBlock, const SColumnInfoData* p, int32_t status) {
int8_t* pIndicator = (int8_t*)p->pData;
int32_t totalRows = pBlock->info.rows;
@@ -546,7 +550,7 @@ void extractQualifiedTupleByFilterResult(SSDataBlock* pBlock, const SColumnInfoD
// here nothing needs to be done
} else if (status == FILTER_RESULT_NONE_QUALIFIED) {
pBlock->info.rows = 0;
- } else {
+ } else if (status == FILTER_RESULT_PARTIAL_QUALIFIED) {
int32_t bmLen = BitmapLen(totalRows);
char* pBitmap = NULL;
int32_t maxRows = 0;
@@ -674,6 +678,8 @@ void extractQualifiedTupleByFilterResult(SSDataBlock* pBlock, const SColumnInfoD
if (pBitmap != NULL) {
taosMemoryFree(pBitmap);
}
+ } else {
+ qError("unknown filter result type: %d", status);
}
}
@@ -715,7 +721,7 @@ void copyResultrowToDataBlock(SExprInfo* pExprInfo, int32_t numOfExprs, SResultR
pCtx[j].resultInfo->numOfRes = pRow->numOfRows;
}
}
-
+
blockDataEnsureCapacity(pBlock, pBlock->info.rows + pCtx[j].resultInfo->numOfRes);
int32_t code = pCtx[j].fpSet.finalize(&pCtx[j], pBlock);
if (TAOS_FAILED(code)) {
diff --git a/source/libs/executor/src/projectoperator.c b/source/libs/executor/src/projectoperator.c
index cd450c5bb7fcfb0f9cbeb4767fa584cc30ecc3aa..cd9bbbbb02fc91d79559e181351152e3adbe51b2 100644
--- a/source/libs/executor/src/projectoperator.c
+++ b/source/libs/executor/src/projectoperator.c
@@ -38,7 +38,7 @@ typedef struct SIndefOperatorInfo {
SSDataBlock* pNextGroupRes;
} SIndefOperatorInfo;
-static SSDataBlock* doGenerateSourceData(SOperatorInfo* pOperator);
+static int32_t doGenerateSourceData(SOperatorInfo* pOperator);
static SSDataBlock* doProjectOperation(SOperatorInfo* pOperator);
static SSDataBlock* doApplyIndefinitFunction(SOperatorInfo* pOperator);
static SArray* setRowTsColumnOutputInfo(SqlFunctionCtx* pCtx, int32_t numOfCols);
@@ -200,7 +200,7 @@ static int32_t setInfoForNewGroup(SSDataBlock* pBlock, SLimitInfo* pLimitInfo, S
if (newGroup) {
resetLimitInfoForNextGroup(pLimitInfo);
}
-
+
return PROJECT_RETRIEVE_CONTINUE;
}
@@ -252,7 +252,12 @@ SSDataBlock* doProjectOperation(SOperatorInfo* pOperator) {
SLimitInfo* pLimitInfo = &pProjectInfo->limitInfo;
if (downstream == NULL) {
- return doGenerateSourceData(pOperator);
+ code = doGenerateSourceData(pOperator);
+ if (code != TSDB_CODE_SUCCESS) {
+ T_LONG_JMP(pTaskInfo->env, code);
+ }
+
+ return (pRes->info.rows > 0) ? pRes : NULL;
}
while (1) {
@@ -601,7 +606,7 @@ SArray* setRowTsColumnOutputInfo(SqlFunctionCtx* pCtx, int32_t numOfCols) {
return pList;
}
-SSDataBlock* doGenerateSourceData(SOperatorInfo* pOperator) {
+int32_t doGenerateSourceData(SOperatorInfo* pOperator) {
SProjectOperatorInfo* pProjectInfo = pOperator->info;
SExprSupp* pSup = &pOperator->exprSupp;
@@ -615,14 +620,45 @@ SSDataBlock* doGenerateSourceData(SOperatorInfo* pOperator) {
for (int32_t k = 0; k < pSup->numOfExprs; ++k) {
int32_t outputSlotId = pExpr[k].base.resSchema.slotId;
- ASSERT(pExpr[k].pExpr->nodeType == QUERY_NODE_VALUE);
- SColumnInfoData* pColInfoData = taosArrayGet(pRes->pDataBlock, outputSlotId);
+ if (pExpr[k].pExpr->nodeType == QUERY_NODE_VALUE) {
+ SColumnInfoData* pColInfoData = taosArrayGet(pRes->pDataBlock, outputSlotId);
+
+ int32_t type = pExpr[k].base.pParam[0].param.nType;
+ if (TSDB_DATA_TYPE_NULL == type) {
+ colDataSetNNULL(pColInfoData, 0, 1);
+ } else {
+ colDataSetVal(pColInfoData, 0, taosVariantGet(&pExpr[k].base.pParam[0].param, type), false);
+ }
+ } else if (pExpr[k].pExpr->nodeType == QUERY_NODE_FUNCTION) {
+ SqlFunctionCtx* pfCtx = &pSup->pCtx[k];
+
+ // UDF scalar functions will be calculated here, for example, select foo(n) from (select 1 n).
+ // UDF aggregate functions will be handled in agg operator.
+ if (fmIsScalarFunc(pfCtx->functionId)) {
+ SArray* pBlockList = taosArrayInit(4, POINTER_BYTES);
+ taosArrayPush(pBlockList, &pRes);
- int32_t type = pExpr[k].base.pParam[0].param.nType;
- if (TSDB_DATA_TYPE_NULL == type) {
- colDataSetNNULL(pColInfoData, 0, 1);
+ SColumnInfoData* pResColData = taosArrayGet(pRes->pDataBlock, outputSlotId);
+ SColumnInfoData idata = {.info = pResColData->info, .hasNull = true};
+
+ SScalarParam dest = {.columnData = &idata};
+ int32_t code = scalarCalculate((SNode*)pExpr[k].pExpr->_function.pFunctNode, pBlockList, &dest);
+ if (code != TSDB_CODE_SUCCESS) {
+ taosArrayDestroy(pBlockList);
+ return code;
+ }
+
+ int32_t startOffset = pRes->info.rows;
+ ASSERT(pRes->info.capacity > 0);
+ colDataAssign(pResColData, &idata, dest.numOfRows, &pRes->info);
+ colDataDestroy(&idata);
+
+ taosArrayDestroy(pBlockList);
+ } else {
+ return TSDB_CODE_OPS_NOT_SUPPORT;
+ }
} else {
- colDataSetVal(pColInfoData, 0, taosVariantGet(&pExpr[k].base.pParam[0].param, type), false);
+ return TSDB_CODE_OPS_NOT_SUPPORT;
}
}
@@ -638,7 +674,7 @@ SSDataBlock* doGenerateSourceData(SOperatorInfo* pOperator) {
pOperator->cost.openCost = (taosGetTimestampUs() - st) / 1000.0;
}
- return (pRes->info.rows > 0) ? pRes : NULL;
+ return TSDB_CODE_SUCCESS;
}
static void setPseudoOutputColInfo(SSDataBlock* pResult, SqlFunctionCtx* pCtx, SArray* pPseudoList) {
diff --git a/source/libs/executor/src/scanoperator.c b/source/libs/executor/src/scanoperator.c
index b3a9699718eb4e17a22d9dfe52e4746a329933a3..c1c99750ea5b44621b53b935c7d83542672d1fb4 100644
--- a/source/libs/executor/src/scanoperator.c
+++ b/source/libs/executor/src/scanoperator.c
@@ -400,9 +400,10 @@ static int32_t loadDataBlock(SOperatorInfo* pOperator, STableScanBase* pTableSca
pCost->totalRows -= pBlock->info.rows;
if (pOperator->exprSupp.pFilterInfo != NULL) {
- int64_t st = taosGetTimestampUs();
- doFilter(pBlock, pOperator->exprSupp.pFilterInfo, &pTableScanInfo->matchInfo);
+ int32_t code = doFilter(pBlock, pOperator->exprSupp.pFilterInfo, &pTableScanInfo->matchInfo);
+ if (code != TSDB_CODE_SUCCESS) return code;
+ int64_t st = taosGetTimestampUs();
double el = (taosGetTimestampUs() - st) / 1000.0;
pTableScanInfo->readRecorder.filterTime += el;
@@ -2798,7 +2799,7 @@ int32_t startGroupTableMergeScan(SOperatorInfo* pOperator) {
} else if (kWay <= 2) {
kWay = 2;
} else {
- int i = 2;
+ int i = 2;
while (i * 2 <= kWay) i = i * 2;
kWay = i;
}
diff --git a/source/libs/executor/src/timesliceoperator.c b/source/libs/executor/src/timesliceoperator.c
index 022440b2ad238fcca62180c4b03174a4dec4eb75..db1eed851a0ed36f5b3dd3e8acbc93fe40e59d06 100644
--- a/source/libs/executor/src/timesliceoperator.c
+++ b/source/libs/executor/src/timesliceoperator.c
@@ -315,7 +315,7 @@ static bool genInterpolationResult(STimeSliceOperatorInfo* pSliceInfo, SExprSupp
if (pDst->info.type == TSDB_DATA_TYPE_FLOAT) {
float v = 0;
if (!IS_VAR_DATA_TYPE(pVar->nType)) {
- GET_TYPED_DATA(v, float, pVar->nType, &pVar->i);
+ GET_TYPED_DATA(v, float, pVar->nType, &pVar->f);
} else {
v = taosStr2Float(varDataVal(pVar->pz), NULL);
}
@@ -323,7 +323,7 @@ static bool genInterpolationResult(STimeSliceOperatorInfo* pSliceInfo, SExprSupp
} else if (pDst->info.type == TSDB_DATA_TYPE_DOUBLE) {
double v = 0;
if (!IS_VAR_DATA_TYPE(pVar->nType)) {
- GET_TYPED_DATA(v, double, pVar->nType, &pVar->i);
+ GET_TYPED_DATA(v, double, pVar->nType, &pVar->d);
} else {
v = taosStr2Double(varDataVal(pVar->pz), NULL);
}
@@ -333,7 +333,15 @@ static bool genInterpolationResult(STimeSliceOperatorInfo* pSliceInfo, SExprSupp
if (!IS_VAR_DATA_TYPE(pVar->nType)) {
GET_TYPED_DATA(v, int64_t, pVar->nType, &pVar->i);
} else {
- v = taosStr2int64(varDataVal(pVar->pz));
+ v = taosStr2Int64(varDataVal(pVar->pz), NULL, 10);
+ }
+ colDataSetVal(pDst, rows, (char*)&v, false);
+ } else if (IS_UNSIGNED_NUMERIC_TYPE(pDst->info.type)) {
+ uint64_t v = 0;
+ if (!IS_VAR_DATA_TYPE(pVar->nType)) {
+ GET_TYPED_DATA(v, uint64_t, pVar->nType, &pVar->u);
+ } else {
+ v = taosStr2UInt64(varDataVal(pVar->pz), NULL, 10);
}
colDataSetVal(pDst, rows, (char*)&v, false);
} else if (IS_BOOLEAN_TYPE(pDst->info.type)) {
diff --git a/source/libs/parser/src/parTranslater.c b/source/libs/parser/src/parTranslater.c
index c318e25fd9ccd88a18a04f0f042eec169e300723..2ccbc0dfc4e2f027445e8d8f979396889bedf86a 100644
--- a/source/libs/parser/src/parTranslater.c
+++ b/source/libs/parser/src/parTranslater.c
@@ -878,6 +878,7 @@ static int32_t createColumnsByTable(STranslateContext* pCxt, const STableNode* p
(igTags ? 0 : ((TSDB_SUPER_TABLE == pMeta->tableType) ? pMeta->tableInfo.numOfTags : 0));
for (int32_t i = 0; i < nums; ++i) {
if (invisibleColumn(pCxt->pParseCxt->enableSysInfo, pMeta->tableType, pMeta->schema[i].flags)) {
+ pCxt->pParseCxt->hasInvisibleCol = true;
continue;
}
SColumnNode* pCol = (SColumnNode*)nodesMakeNode(QUERY_NODE_COLUMN);
@@ -3203,7 +3204,11 @@ static int32_t translateSelectList(STranslateContext* pCxt, SSelectStmt* pSelect
code = translateFillValues(pCxt, pSelect);
}
if (NULL == pSelect->pProjectionList || 0 >= pSelect->pProjectionList->length) {
- code = TSDB_CODE_PAR_INVALID_SELECTED_EXPR;
+ if (pCxt->pParseCxt->hasInvisibleCol) {
+ code = TSDB_CODE_PAR_PERMISSION_DENIED;
+ } else {
+ code = TSDB_CODE_PAR_INVALID_SELECTED_EXPR;
+ }
}
return code;
}
diff --git a/source/libs/scalar/src/filter.c b/source/libs/scalar/src/filter.c
index b3afbb53c1daa0314ab07e73a16a2bb67a5e24d3..892fd588b6f274dd4418a03063986b1463491dd1 100644
--- a/source/libs/scalar/src/filter.c
+++ b/source/libs/scalar/src/filter.c
@@ -1979,7 +1979,7 @@ int32_t fltInitValFieldData(SFilterInfo *info) {
int32_t code = sclConvertValueToSclParam(var, &out, NULL);
if (code != TSDB_CODE_SUCCESS) {
qError("convert value to type[%d] failed", type);
- return TSDB_CODE_TSC_INVALID_OPERATION;
+ return code;
}
size_t bufBytes = IS_VAR_DATA_TYPE(type) ? varDataTLen(out.columnData->pData)
@@ -4644,11 +4644,11 @@ _return:
FLT_RET(code);
}
-bool filterExecute(SFilterInfo *info, SSDataBlock *pSrc, SColumnInfoData **p, SColumnDataAgg *statis, int16_t numOfCols,
- int32_t *pResultStatus) {
+int32_t filterExecute(SFilterInfo *info, SSDataBlock *pSrc, SColumnInfoData **p, SColumnDataAgg *statis,
+ int16_t numOfCols, int32_t *pResultStatus) {
if (NULL == info) {
*pResultStatus = FILTER_RESULT_ALL_QUALIFIED;
- return false;
+ return TSDB_CODE_SUCCESS;
}
SScalarParam output = {0};
@@ -4656,7 +4656,7 @@ bool filterExecute(SFilterInfo *info, SSDataBlock *pSrc, SColumnInfoData **p, SC
int32_t code = sclCreateColumnInfoData(&type, pSrc->info.rows, &output);
if (code != TSDB_CODE_SUCCESS) {
- return false;
+ return code;
}
if (info->scalarMode) {
@@ -4666,7 +4666,7 @@ bool filterExecute(SFilterInfo *info, SSDataBlock *pSrc, SColumnInfoData **p, SC
code = scalarCalculate(info->sclCtx.node, pList, &output);
taosArrayDestroy(pList);
- FLT_ERR_RET(code); // TODO: current errcode returns as true
+ FLT_ERR_RET(code);
*p = output.columnData;
@@ -4677,18 +4677,23 @@ bool filterExecute(SFilterInfo *info, SSDataBlock *pSrc, SColumnInfoData **p, SC
} else {
*pResultStatus = FILTER_RESULT_PARTIAL_QUALIFIED;
}
- return false;
- } else {
- *p = output.columnData;
- output.numOfRows = pSrc->info.rows;
+ return TSDB_CODE_SUCCESS;
+ }
- if (*p == NULL) {
- return false;
- }
+ ASSERT(false == info->scalarMode);
+ *p = output.columnData;
+ output.numOfRows = pSrc->info.rows;
- bool keep = (*info->func)(info, pSrc->info.rows, *p, statis, numOfCols, &output.numOfQualified);
+ if (*p == NULL) {
+ return TSDB_CODE_APP_ERROR;
+ }
- // todo this should be return during filter procedure
+ bool keepAll = (*info->func)(info, pSrc->info.rows, *p, statis, numOfCols, &output.numOfQualified);
+
+ // todo this should be return during filter procedure
+ if (keepAll) {
+ *pResultStatus = FILTER_RESULT_ALL_QUALIFIED;
+ } else {
int32_t num = 0;
for (int32_t i = 0; i < output.numOfRows; ++i) {
if (((int8_t *)((*p)->pData))[i] == 1) {
@@ -4703,9 +4708,9 @@ bool filterExecute(SFilterInfo *info, SSDataBlock *pSrc, SColumnInfoData **p, SC
} else {
*pResultStatus = FILTER_RESULT_PARTIAL_QUALIFIED;
}
-
- return keep;
}
+
+ return TSDB_CODE_SUCCESS;
}
typedef struct SClassifyConditionCxt {
diff --git a/source/libs/scalar/src/scalar.c b/source/libs/scalar/src/scalar.c
index d9295656e8c4b882e5af1d735d7399b7dfb5a332..4eb0f0e1bce3e9f21ce0b4891e7c7c65e3c439cd 100644
--- a/source/libs/scalar/src/scalar.c
+++ b/source/libs/scalar/src/scalar.c
@@ -1694,7 +1694,8 @@ int32_t scalarCalculate(SNode *pNode, SArray *pBlockList, SScalarParam *pDst) {
SCL_ERR_JRET(TSDB_CODE_APP_ERROR);
}
- if (1 == res->numOfRows) {
+ SSDataBlock *pb = taosArrayGetP(pBlockList, 0);
+ if (1 == res->numOfRows && pb->info.rows > 0) {
SCL_ERR_JRET(sclExtendResRows(pDst, res, pBlockList));
} else {
colInfoDataEnsureCapacity(pDst->columnData, res->numOfRows, true);
diff --git a/source/libs/scalar/src/sclvector.c b/source/libs/scalar/src/sclvector.c
index 35256d0c96c5e7e2a51df3f575c3c8bf57341fab..0246724c5be21ee9eda89c1da2f2605341ccb1af 100644
--- a/source/libs/scalar/src/sclvector.c
+++ b/source/libs/scalar/src/sclvector.c
@@ -240,15 +240,20 @@ _getValueAddr_fn_t getVectorValueAddrFn(int32_t srcType) {
}
static FORCE_INLINE void varToTimestamp(char *buf, SScalarParam *pOut, int32_t rowIndex, int32_t *overflow) {
+ terrno = TSDB_CODE_SUCCESS;
+
int64_t value = 0;
if (taosParseTime(buf, &value, strlen(buf), pOut->columnData->info.precision, tsDaylight) != TSDB_CODE_SUCCESS) {
value = 0;
+ terrno = TSDB_CODE_SCALAR_CONVERT_ERROR;
}
colDataSetInt64(pOut->columnData, rowIndex, &value);
}
static FORCE_INLINE void varToSigned(char *buf, SScalarParam *pOut, int32_t rowIndex, int32_t *overflow) {
+ terrno = TSDB_CODE_SUCCESS;
+
if (overflow) {
int64_t minValue = tDataTypes[pOut->columnData->info.type].minValue;
int64_t maxValue = tDataTypes[pOut->columnData->info.type].maxValue;
@@ -290,6 +295,8 @@ static FORCE_INLINE void varToSigned(char *buf, SScalarParam *pOut, int32_t rowI
}
static FORCE_INLINE void varToUnsigned(char *buf, SScalarParam *pOut, int32_t rowIndex, int32_t *overflow) {
+ terrno = TSDB_CODE_SUCCESS;
+
if (overflow) {
uint64_t minValue = (uint64_t)tDataTypes[pOut->columnData->info.type].minValue;
uint64_t maxValue = (uint64_t)tDataTypes[pOut->columnData->info.type].maxValue;
@@ -330,6 +337,8 @@ static FORCE_INLINE void varToUnsigned(char *buf, SScalarParam *pOut, int32_t ro
}
static FORCE_INLINE void varToFloat(char *buf, SScalarParam *pOut, int32_t rowIndex, int32_t *overflow) {
+ terrno = TSDB_CODE_SUCCESS;
+
if (TSDB_DATA_TYPE_FLOAT == pOut->columnData->info.type) {
float value = taosStr2Float(buf, NULL);
colDataSetFloat(pOut->columnData, rowIndex, &value);
@@ -341,6 +350,8 @@ static FORCE_INLINE void varToFloat(char *buf, SScalarParam *pOut, int32_t rowIn
}
static FORCE_INLINE void varToBool(char *buf, SScalarParam *pOut, int32_t rowIndex, int32_t *overflow) {
+ terrno = TSDB_CODE_SUCCESS;
+
int64_t value = taosStr2Int64(buf, NULL, 10);
bool v = (value != 0) ? true : false;
colDataSetInt8(pOut->columnData, rowIndex, (int8_t *)&v);
@@ -348,6 +359,8 @@ static FORCE_INLINE void varToBool(char *buf, SScalarParam *pOut, int32_t rowInd
// todo remove this malloc
static FORCE_INLINE void varToNchar(char *buf, SScalarParam *pOut, int32_t rowIndex, int32_t *overflow) {
+ terrno = TSDB_CODE_SUCCESS;
+
int32_t len = 0;
int32_t inputLen = varDataLen(buf);
int32_t outputMaxLen = (inputLen + 1) * TSDB_NCHAR_SIZE + VARSTR_HEADER_SIZE;
@@ -357,6 +370,7 @@ static FORCE_INLINE void varToNchar(char *buf, SScalarParam *pOut, int32_t rowIn
taosMbsToUcs4(varDataVal(buf), inputLen, (TdUcs4 *)varDataVal(t), outputMaxLen - VARSTR_HEADER_SIZE, &len);
if (!ret) {
sclError("failed to convert to NCHAR");
+ terrno = TSDB_CODE_SCALAR_CONVERT_ERROR;
}
varDataSetLen(t, len);
@@ -365,11 +379,14 @@ static FORCE_INLINE void varToNchar(char *buf, SScalarParam *pOut, int32_t rowIn
}
static FORCE_INLINE void ncharToVar(char *buf, SScalarParam *pOut, int32_t rowIndex, int32_t *overflow) {
+ terrno = TSDB_CODE_SUCCESS;
+
int32_t inputLen = varDataLen(buf);
char *t = taosMemoryCalloc(1, inputLen + VARSTR_HEADER_SIZE);
int32_t len = taosUcs4ToMbs((TdUcs4 *)varDataVal(buf), varDataLen(buf), varDataVal(t));
if (len < 0) {
+ terrno = TSDB_CODE_SCALAR_CONVERT_ERROR;
taosMemoryFree(t);
return;
}
@@ -379,22 +396,26 @@ static FORCE_INLINE void ncharToVar(char *buf, SScalarParam *pOut, int32_t rowIn
taosMemoryFree(t);
}
-// todo remove this malloc
static FORCE_INLINE void varToGeometry(char *buf, SScalarParam *pOut, int32_t rowIndex, int32_t *overflow) {
//[ToDo] support to parse WKB as well as WKT
- unsigned char *t = NULL;
+ terrno = TSDB_CODE_SUCCESS;
+
size_t len = 0;
+ unsigned char *t = NULL;
+ char *output = NULL;
if (initCtxGeomFromText()) {
- sclError("failed to init geometry ctx");
- return;
+ sclError("failed to init geometry ctx, %s", getThreadLocalGeosCtx()->errMsg);
+ terrno = TSDB_CODE_APP_ERROR;
+ goto _err;
}
if (doGeomFromText(buf, &t, &len)) {
- sclDebug("failed to convert text to geometry");
- return;
+ sclInfo("failed to convert text to geometry, %s", getThreadLocalGeosCtx()->errMsg);
+ terrno = TSDB_CODE_SCALAR_CONVERT_ERROR;
+ goto _err;
}
- char *output = taosMemoryCalloc(1, len + VARSTR_HEADER_SIZE);
+ output = taosMemoryCalloc(1, len + VARSTR_HEADER_SIZE);
memcpy(output + VARSTR_HEADER_SIZE, t, len);
varDataSetLen(output, len);
@@ -402,10 +423,19 @@ static FORCE_INLINE void varToGeometry(char *buf, SScalarParam *pOut, int32_t ro
taosMemoryFree(output);
geosFreeBuffer(t);
+
+ return;
+
+_err:
+ ASSERT(t == NULL && len == 0);
+ VarDataLenT dummyHeader = 0;
+ colDataSetVal(pOut->columnData, rowIndex, (const char *)&dummyHeader, false);
}
// TODO opt performance, tmp is not needed.
int32_t vectorConvertFromVarData(SSclVectorConvCtx *pCtx, int32_t *overflow) {
+ terrno = TSDB_CODE_SUCCESS;
+
bool vton = false;
_bufConverteFunc func = NULL;
@@ -431,7 +461,8 @@ int32_t vectorConvertFromVarData(SSclVectorConvCtx *pCtx, int32_t *overflow) {
func = varToGeometry;
} else {
sclError("invalid convert outType:%d, inType:%d", pCtx->outType, pCtx->inType);
- return TSDB_CODE_APP_ERROR;
+ terrno = TSDB_CODE_APP_ERROR;
+ return terrno;
}
pCtx->pOut->numOfRows = pCtx->pIn->numOfRows;
@@ -451,7 +482,7 @@ int32_t vectorConvertFromVarData(SSclVectorConvCtx *pCtx, int32_t *overflow) {
convertType = TSDB_DATA_TYPE_NCHAR;
} else if (tTagIsJson(data) || *data == TSDB_DATA_TYPE_NULL) {
terrno = TSDB_CODE_QRY_JSON_NOT_SUPPORT_ERROR;
- return terrno;
+ goto _err;
} else {
convertNumberToNumber(data + CHAR_BYTES, colDataGetNumData(pCtx->pOut->columnData, i), *data, pCtx->outType);
continue;
@@ -463,7 +494,8 @@ int32_t vectorConvertFromVarData(SSclVectorConvCtx *pCtx, int32_t *overflow) {
tmp = taosMemoryMalloc(bufSize);
if (tmp == NULL) {
sclError("out of memory in vectorConvertFromVarData");
- return TSDB_CODE_OUT_OF_MEMORY;
+ terrno = TSDB_CODE_OUT_OF_MEMORY;
+ goto _err;
}
}
@@ -477,15 +509,15 @@ int32_t vectorConvertFromVarData(SSclVectorConvCtx *pCtx, int32_t *overflow) {
// we need to convert it to native char string, and then perform the string to numeric data
if (varDataLen(data) > bufSize) {
sclError("castConvert convert buffer size too small");
- taosMemoryFreeClear(tmp);
- return TSDB_CODE_APP_ERROR;
+ terrno = TSDB_CODE_APP_ERROR;
+ goto _err;
}
int len = taosUcs4ToMbs((TdUcs4 *)varDataVal(data), varDataLen(data), tmp);
if (len < 0) {
sclError("castConvert taosUcs4ToMbs error 1");
- taosMemoryFreeClear(tmp);
- return TSDB_CODE_APP_ERROR;
+ terrno = TSDB_CODE_SCALAR_CONVERT_ERROR;
+ goto _err;
}
tmp[len] = 0;
@@ -493,12 +525,16 @@ int32_t vectorConvertFromVarData(SSclVectorConvCtx *pCtx, int32_t *overflow) {
}
(*func)(tmp, pCtx->pOut, i, overflow);
+ if (terrno != TSDB_CODE_SUCCESS) {
+ goto _err;
+ }
}
+_err:
if (tmp != NULL) {
taosMemoryFreeClear(tmp);
}
- return TSDB_CODE_SUCCESS;
+ return terrno;
}
double getVectorDoubleValue_JSON(void *src, int32_t index) {
@@ -911,25 +947,25 @@ int32_t vectorConvertSingleColImpl(const SScalarParam *pIn, SScalarParam *pOut,
int8_t gConvertTypes[TSDB_DATA_TYPE_MAX][TSDB_DATA_TYPE_MAX] = {
/* NULL BOOL TINY SMAL INT BIG FLOA DOUB VARC TIME NCHA UTIN USMA UINT UBIG JSON VARB DECI BLOB MEDB GEOM*/
/*NULL*/ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- /*BOOL*/ 0, 0, 2, 3, 4, 5, 6, 7, 5, 9, 7, 11, 12, 13, 14, 0, 7, 0, 0, 0, 0,
- /*TINY*/ 0, 0, 0, 3, 4, 5, 6, 7, 5, 9, 7, 3, 4, 5, 7, 0, 7, 0, 0, 0, 0,
- /*SMAL*/ 0, 0, 0, 0, 4, 5, 6, 7, 5, 9, 7, 3, 4, 5, 7, 0, 7, 0, 0, 0, 0,
- /*INT */ 0, 0, 0, 0, 0, 5, 6, 7, 5, 9, 7, 4, 4, 5, 7, 0, 7, 0, 0, 0, 0,
- /*BIGI*/ 0, 0, 0, 0, 0, 0, 6, 7, 5, 9, 7, 5, 5, 5, 7, 0, 7, 0, 0, 0, 0,
- /*FLOA*/ 0, 0, 0, 0, 0, 0, 0, 7, 7, 6, 7, 6, 6, 6, 6, 0, 7, 0, 0, 0, 0,
- /*DOUB*/ 0, 0, 0, 0, 0, 0, 0, 0, 7, 7, 7, 7, 7, 7, 7, 0, 7, 0, 0, 0, 0,
+ /*BOOL*/ 0, 0, 2, 3, 4, 5, 6, 7, 5, 9, 7, 11, 12, 13, 14, 0, 7, 0, 0, 0, -1,
+ /*TINY*/ 0, 0, 0, 3, 4, 5, 6, 7, 5, 9, 7, 3, 4, 5, 7, 0, 7, 0, 0, 0, -1,
+ /*SMAL*/ 0, 0, 0, 0, 4, 5, 6, 7, 5, 9, 7, 3, 4, 5, 7, 0, 7, 0, 0, 0, -1,
+ /*INT */ 0, 0, 0, 0, 0, 5, 6, 7, 5, 9, 7, 4, 4, 5, 7, 0, 7, 0, 0, 0, -1,
+ /*BIGI*/ 0, 0, 0, 0, 0, 0, 6, 7, 5, 9, 7, 5, 5, 5, 7, 0, 7, 0, 0, 0, -1,
+ /*FLOA*/ 0, 0, 0, 0, 0, 0, 0, 7, 7, 6, 7, 6, 6, 6, 6, 0, 7, 0, 0, 0, -1,
+ /*DOUB*/ 0, 0, 0, 0, 0, 0, 0, 0, 7, 7, 7, 7, 7, 7, 7, 0, 7, 0, 0, 0, -1,
/*VARC*/ 0, 0, 0, 0, 0, 0, 0, 0, 0, 9, 8, 7, 7, 7, 7, 0, 0, 0, 0, 0, 20,
- /*TIME*/ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 9, 9, 9, 9, 7, 0, 7, 0, 0, 0, 0,
- /*NCHA*/ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 7, 7, 7, 7, 0, 0, 0, 0, 0, 0,
- /*UTIN*/ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 12, 13, 14, 0, 7, 0, 0, 0, 0,
- /*USMA*/ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 13, 14, 0, 7, 0, 0, 0, 0,
- /*UINT*/ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 14, 0, 7, 0, 0, 0, 0,
- /*UBIG*/ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 7, 0, 0, 0, 0,
- /*JSON*/ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- /*VARB*/ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- /*DECI*/ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- /*BLOB*/ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- /*MEDB*/ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ /*TIME*/ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 9, 9, 9, 9, 7, 0, 7, 0, 0, 0, -1,
+ /*NCHA*/ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 7, 7, 7, 7, 0, 0, 0, 0, 0, -1,
+ /*UTIN*/ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 12, 13, 14, 0, 7, 0, 0, 0, -1,
+ /*USMA*/ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 13, 14, 0, 7, 0, 0, 0, -1,
+ /*UINT*/ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 14, 0, 7, 0, 0, 0, -1,
+ /*UBIG*/ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 7, 0, 0, 0, -1,
+ /*JSON*/ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -1,
+ /*VARB*/ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -1,
+ /*DECI*/ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -1,
+ /*BLOB*/ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -1,
+ /*MEDB*/ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -1,
/*GEOM*/ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
int32_t vectorGetConvertType(int32_t type1, int32_t type2) {
@@ -1010,6 +1046,11 @@ int32_t vectorConvertCols(SScalarParam *pLeft, SScalarParam *pRight, SScalarPara
if (0 == type) {
return TSDB_CODE_SUCCESS;
}
+ if (-1 == type) {
+ sclError("invalid convert type1:%d, type2:%d", GET_PARAM_TYPE(param1), GET_PARAM_TYPE(param2));
+ terrno = TSDB_CODE_SCALAR_CONVERT_ERROR;
+ return TSDB_CODE_SCALAR_CONVERT_ERROR;
+ }
}
if (type != GET_PARAM_TYPE(param1)) {
@@ -1753,7 +1794,9 @@ void vectorCompareImpl(SScalarParam *pLeft, SScalarParam *pRight, SScalarParam *
param1 = pLeft;
param2 = pRight;
} else {
- vectorConvertCols(pLeft, pRight, &pLeftOut, &pRightOut, startIndex, numOfRows);
+ if (vectorConvertCols(pLeft, pRight, &pLeftOut, &pRightOut, startIndex, numOfRows)) {
+ return;
+ }
param1 = (pLeftOut.columnData != NULL) ? &pLeftOut : pLeft;
param2 = (pRightOut.columnData != NULL) ? &pRightOut : pRight;
}
diff --git a/source/libs/scheduler/src/scheduler.c b/source/libs/scheduler/src/scheduler.c
index e7561ccb7ecf68c5030e12a19c04f3d1a9661cb5..841066a4c9caebc3c69ea86a8fc8533f17289999 100644
--- a/source/libs/scheduler/src/scheduler.c
+++ b/source/libs/scheduler/src/scheduler.c
@@ -35,7 +35,7 @@ int32_t schedulerInit() {
schMgmt.cfg.schPolicy = SCHEDULE_DEFAULT_POLICY;
schMgmt.cfg.enableReSchedule = true;
- qDebug("schedule init, policy: %d, maxNodeTableNum: %" PRId64", reSchedule:%d",
+ qDebug("schedule init, policy: %d, maxNodeTableNum: %" PRId64", reSchedule:%d",
schMgmt.cfg.schPolicy, schMgmt.cfg.maxNodeTableNum, schMgmt.cfg.enableReSchedule);
schMgmt.jobRef = taosOpenRef(schMgmt.cfg.maxJobNum, schFreeJobImpl);
@@ -57,11 +57,11 @@ int32_t schedulerInit() {
}
if (taosGetSystemUUID((char *)&schMgmt.sId, sizeof(schMgmt.sId))) {
- qError("generate schdulerId failed, errno:%d", errno);
+ qError("generate schedulerId failed, errno:%d", errno);
SCH_ERR_RET(TSDB_CODE_QRY_SYS_ERROR);
}
- qInfo("scheduler 0x%" PRIx64 " initizlized, maxJob:%u", schMgmt.sId, schMgmt.cfg.maxJobNum);
+ qInfo("scheduler 0x%" PRIx64 " initialized, maxJob:%u", schMgmt.sId, schMgmt.cfg.maxJobNum);
return TSDB_CODE_SUCCESS;
}
diff --git a/source/libs/stream/src/streamExec.c b/source/libs/stream/src/streamExec.c
index fc0003e20a736ea4f68d3815cdad16c4c4d4e10c..298d585481d77ecc903dbf4392f38c5f67e42e8c 100644
--- a/source/libs/stream/src/streamExec.c
+++ b/source/libs/stream/src/streamExec.c
@@ -404,7 +404,12 @@ int32_t streamExecForAll(SStreamTask* pTask) {
while (pTask->taskLevel == TASK_LEVEL__SOURCE) {
int8_t status = atomic_load_8(&pTask->status.taskStatus);
if (status == TASK_STATUS__DROPPING) {
- break;
+ if (pInput != NULL) {
+ streamFreeQitem(pInput);
+ }
+
+ qError("s-task:%s task is dropped, abort exec", id);
+ return TSDB_CODE_SUCCESS;
}
if (status != TASK_STATUS__NORMAL && status != TASK_STATUS__PAUSE && status != TASK_STATUS__STOP) {
diff --git a/source/util/src/terror.c b/source/util/src/terror.c
index 4c52f89bdc01c969353815b35d686c281fbc83d7..8231fad3a78ccb8f0114544d82eb068803ff9486 100644
--- a/source/util/src/terror.c
+++ b/source/util/src/terror.c
@@ -626,6 +626,9 @@ TAOS_DEFINE_ERROR(TSDB_CODE_RSMA_FS_UPDATE, "Rsma fs update erro
TAOS_DEFINE_ERROR(TSDB_CODE_INDEX_REBUILDING, "Index is rebuilding")
TAOS_DEFINE_ERROR(TSDB_CODE_INDEX_INVALID_FILE, "Index file is invalid")
+//scalar
+TAOS_DEFINE_ERROR(TSDB_CODE_SCALAR_CONVERT_ERROR, "Cannot convert to specific type")
+
//tmq
TAOS_DEFINE_ERROR(TSDB_CODE_TMQ_INVALID_MSG, "Invalid message")
TAOS_DEFINE_ERROR(TSDB_CODE_TMQ_SNAPSHOT_ERROR, "Can not operate in snapshot mode")
@@ -679,7 +682,7 @@ const char* tstrerror(int32_t err) {
if ((err & 0x00ff0000) == 0x00ff0000) {
int32_t code = err & 0x0000ffff;
// strerror can handle any invalid code
- // invalid code return Unknown error
+ // invalid code return Unknown error
return strerror(code);
}
int32_t s = 0;
diff --git a/tests/parallel_test/cases.task b/tests/parallel_test/cases.task
index 90e055799704e8c4f89df3c75cce774ed3e60c11..85582e68c483b59ef06c82a2cebd48b537ec74b5 100644
--- a/tests/parallel_test/cases.task
+++ b/tests/parallel_test/cases.task
@@ -755,6 +755,8 @@
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/odbc.py
,,y,system-test,./pytest.sh python3 ./test.py -f 99-TDcase/TD-21561.py -Q 4
,,y,system-test,./pytest.sh python3 ./test.py -f 99-TDcase/TD-20582.py
+,,n,system-test,python3 ./test.py -f 5-taos-tools/taosbenchmark/insertMix.py -N 3
+,,n,system-test,python3 ./test.py -f 5-taos-tools/taosbenchmark/stt.py -N 3
#tsim test
,,y,script,./test.sh -f tsim/tmq/basic2Of2ConsOverlap.sim
diff --git a/tests/system-test/0-others/udfTest.py b/tests/system-test/0-others/udfTest.py
index 78020cb9586e6c59f7c0f84e5aeacdbb596b421c..88d0d420f73f9c06e633ebcf2dbc14a454f6e878 100644
--- a/tests/system-test/0-others/udfTest.py
+++ b/tests/system-test/0-others/udfTest.py
@@ -234,6 +234,11 @@ class TDTestCase:
tdSql.checkData(20,6,88)
tdSql.checkData(20,7,1)
+ tdSql.query("select udf1(1) from (select 1)")
+ tdSql.checkData(0,0,1)
+
+ tdSql.query("select udf1(n) from (select 1 n)")
+ tdSql.checkData(0,0,1)
# aggregate functions
tdSql.query("select udf2(num1) ,udf2(num2), udf2(num3) from tb")
diff --git a/tests/system-test/0-others/user_privilege_all.py b/tests/system-test/0-others/user_privilege_all.py
new file mode 100644
index 0000000000000000000000000000000000000000..2e796882c8d2c11d4ac93f46f4a7a5d660412a22
--- /dev/null
+++ b/tests/system-test/0-others/user_privilege_all.py
@@ -0,0 +1,409 @@
+from itertools import product
+import taos
+import time
+from taos.tmq import *
+from util.cases import *
+from util.common import *
+from util.log import *
+from util.sql import *
+from util.sqlset import *
+
+
+class TDTestCase:
+ """This test case is used to veirfy the user privilege for insert and select operation on
+ stable、child table and table
+ """
+ def init(self, conn, logSql, replicaVar=1):
+ self.replicaVar = int(replicaVar)
+ tdLog.debug("start to execute %s" % __file__)
+ # init the tdsql
+ tdSql.init(conn.cursor())
+ self.setsql = TDSetSql()
+ # user info
+ self.username = 'test'
+ self.password = 'test'
+ # db info
+ self.dbname = "user_privilege_all_db"
+ self.stbname = 'stb'
+ self.common_tbname = "tb"
+ self.ctbname_list = ["ct1", "ct2"]
+ self.common_table_dict = {
+ 'ts':'timestamp',
+ 'col1':'float',
+ 'col2':'int'
+ }
+ self.stable_column_dict = {
+ 'ts': 'timestamp',
+ 'col1': 'float',
+ 'col2': 'int',
+ }
+ self.tag_dict = {
+ 'ctbname': 'binary(10)'
+ }
+
+ # case list
+ self.cases = {
+ "test_db_table_both_no_permission": {
+ "db_privilege": "none",
+ "stable_priviege": "none",
+ "child_table_ct1_privilege": "none",
+ "child_table_ct2_privilege": "none",
+ "table_tb_privilege": "none",
+ "sql": ["insert into ct1 using stb tags('ct1') values(now, 1.1, 1)",
+ "select * from stb;",
+ "select * from ct1;",
+ "select * from ct2;",
+ "insert into tb values(now, 3.3, 3);",
+ "select * from tb;"],
+ "res": [False, False, False, False, False, False]
+ },
+ "test_db_no_permission_table_read": {
+ "db_privilege": "none",
+ "stable_priviege": "none",
+ "child_table_ct1_privilege": "none",
+ "child_table_ct2_privilege": "none",
+ "table_tb_privilege": "read",
+ "sql": ["insert into ct1 using stb tags('ct1') values(now, 1.1, 1)",
+ "select * from stb;",
+ "select * from ct1;",
+ "select * from ct2;",
+ "insert into tb values(now, 3.3, 3);",
+ "select * from tb;"],
+ "res": [False, False, False, False, False, True]
+ },
+ "test_db_no_permission_childtable_read": {
+ "db_privilege": "none",
+ "stable_priviege": "none",
+ "child_table_ct1_privilege": "read",
+ "child_table_ct2_privilege": "none",
+ "table_tb_privilege": "none",
+ "sql": ["insert into ct1 using stb tags('ct1') values(now, 1.1, 1)",
+ "select * from stb;",
+ "select * from ct1;",
+ "select * from ct2;",
+ "insert into tb values(now, 3.3, 3);",
+ "select * from tb;"],
+ "res": [False, True, True, False, False, False]
+ },
+ "test_db_no_permission_table_write": {
+ "db_privilege": "none",
+ "stable_priviege": "none",
+ "child_table_ct1_privilege": "none",
+ "child_table_ct2_privilege": "none",
+ "table_tb_privilege": "write",
+ "sql": ["insert into ct1 using stb tags('ct1') values(now, 1.1, 1)",
+ "select * from stb;",
+ "select * from ct1;",
+ "select * from ct2;",
+ "insert into tb values(now, 3.3, 3);",
+ "select * from tb;"],
+ "res": [False, False, False, False, True, False]
+ },
+ "test_db_no_permission_childtable_write": {
+ "db_privilege": "none",
+ "stable_priviege": "none",
+ "child_table_ct1_privilege": "none",
+ "child_table_ct2_privilege": "write",
+ "table_tb_privilege": "none",
+ "sql": ["insert into ct2 using stb tags('ct2') values(now, 1.1, 1)",
+ "select * from stb;",
+ "select * from ct1;",
+ "select * from ct2;",
+ "insert into tb values(now, 3.3, 3);",
+ "select * from tb;"],
+ "res": [True, False, False, False, False, False]
+ },
+ "test_db_read_table_no_permission": {
+ "db_privilege": "read",
+ "stable_priviege": "none",
+ "child_table_ct1_privilege": "none",
+ "child_table_ct2_privilege": "none",
+ "table_tb_privilege": "none",
+ "sql": ["insert into ct2 using stb tags('ct2') values(now, 1.1, 1)",
+ "select * from stb;",
+ "select * from ct1;",
+ "select * from ct2;",
+ "insert into tb values(now, 3.3, 3);",
+ "select * from tb;"],
+ "res": [False, True, True, True, False, True]
+ },
+ "test_db_read_table_read": {
+ "db_privilege": "read",
+ "stable_priviege": "none",
+ "child_table_ct1_privilege": "none",
+ "child_table_ct2_privilege": "none",
+ "table_tb_privilege": "read",
+ "sql": ["insert into ct2 using stb tags('ct2') values(now, 1.1, 1)",
+ "select * from stb;",
+ "select * from ct1;",
+ "select * from ct2;",
+ "insert into tb values(now, 3.3, 3);",
+ "select * from tb;"],
+ "res": [False, True, True, True, False, True]
+ },
+ "test_db_read_childtable_read": {
+ "db_privilege": "read",
+ "stable_priviege": "none",
+ "child_table_ct1_privilege": "read",
+ "child_table_ct2_privilege": "read",
+ "table_tb_privilege": "none",
+ "sql": ["insert into ct2 using stb tags('ct2') values(now, 1.1, 1)",
+ "select * from stb;",
+ "select * from ct1;",
+ "select * from ct2;",
+ "insert into tb values(now, 3.3, 3);",
+ "select * from tb;"],
+ "res": [False, True, True, True, False, True]
+ },
+ "test_db_read_table_write": {
+ "db_privilege": "read",
+ "stable_priviege": "none",
+ "child_table_ct1_privilege": "none",
+ "child_table_ct2_privilege": "none",
+ "table_tb_privilege": "write",
+ "sql": ["insert into ct2 using stb tags('ct2') values(now, 1.1, 1)",
+ "select * from stb;",
+ "select * from ct1;",
+ "select * from ct2;",
+ "insert into tb values(now, 4.4, 4);",
+ "select * from tb;"],
+ "res": [False, True, True, True, True, True]
+ },
+ "test_db_read_childtable_write": {
+ "db_privilege": "read",
+ "stable_priviege": "none",
+ "child_table_ct1_privilege": "write",
+ "child_table_ct2_privilege": "none",
+ "table_tb_privilege": "none",
+ "sql": ["insert into ct2 using stb tags('ct2') values(now, 1.1, 1)",
+ "insert into ct1 using stb tags('ct1') values(now, 5.5, 5)",
+ "select * from stb;",
+ "select * from ct1;",
+ "select * from ct2;",
+ "insert into tb values(now, 4.4, 4);",
+ "select * from tb;"],
+ "res": [False, True, True, True, True, False, True]
+ },
+ "test_db_write_table_no_permission": {
+ "db_privilege": "write",
+ "stable_priviege": "none",
+ "child_table_ct1_privilege": "none",
+ "child_table_ct2_privilege": "none",
+ "table_tb_privilege": "none",
+ "sql": ["insert into ct2 using stb tags('ct2') values(now, 6.6, 6)",
+ "insert into ct1 using stb tags('ct1') values(now, 7.7, 7)",
+ "select * from stb;",
+ "select * from ct1;",
+ "select * from ct2;",
+ "insert into tb values(now, 8.8, 8);",
+ "select * from tb;"],
+ "res": [True, True, False, False, False, True, False]
+ },
+ "test_db_write_table_write": {
+ "db_privilege": "write",
+ "stable_priviege": "none",
+ "child_table_ct1_privilege": "none",
+ "child_table_ct2_privilege": "none",
+ "table_tb_privilege": "none",
+ "sql": ["insert into ct2 using stb tags('ct2') values(now, 9.9, 9)",
+ "insert into ct1 using stb tags('ct1') values(now, 10.0, 10)",
+ "select * from stb;",
+ "select * from ct1;",
+ "select * from ct2;",
+ "insert into tb values(now, 11.1, 11);",
+ "select * from tb;"],
+ "res": [True, True, False, False, False, True, False]
+ },
+ "test_db_write_childtable_write": {
+ "db_privilege": "write",
+ "stable_priviege": "none",
+ "child_table_ct1_privilege": "none",
+ "child_table_ct2_privilege": "none",
+ "table_tb_privilege": "none",
+ "sql": ["insert into ct2 using stb tags('ct2') values(now, 12.2, 12)",
+ "insert into ct1 using stb tags('ct1') values(now, 13.3, 13)",
+ "select * from stb;",
+ "select * from ct1;",
+ "select * from ct2;",
+ "insert into tb values(now, 14.4, 14);",
+ "select * from tb;"],
+ "res": [True, True, False, False, False, True, False]
+ },
+ "test_db_write_table_read": {
+ "db_privilege": "write",
+ "stable_priviege": "none",
+ "child_table_ct1_privilege": "none",
+ "child_table_ct2_privilege": "none",
+ "table_tb_privilege": "read",
+ "sql": ["insert into ct2 using stb tags('ct2') values(now, 15.5, 15)",
+ "insert into ct1 using stb tags('ct1') values(now, 16.6, 16)",
+ "select * from stb;",
+ "select * from ct1;",
+ "select * from ct2;",
+ "insert into tb values(now, 17.7, 17);",
+ "select * from tb;"],
+ "res": [True, True, False, False, False, True, True]
+ },
+ "test_db_write_childtable_read": {
+ "db_privilege": "write",
+ "stable_priviege": "none",
+ "child_table_ct1_privilege": "read",
+ "child_table_ct2_privilege": "none",
+ "table_tb_privilege": "none",
+ "sql": ["insert into ct2 using stb tags('ct2') values(now, 18.8, 18)",
+ "insert into ct1 using stb tags('ct1') values(now, 19.9, 19)",
+ "select * from stb;",
+ "select * from ct1;",
+ "select * from ct2;",
+ "insert into tb values(now, 20.0, 20);",
+ "select * from tb;"],
+ "res": [True, True, True, True, False, True, False]
+ }
+ }
+
+ def prepare_data(self):
+ """Create the db and data for test
+ """
+ tdLog.debug("Start to prepare the data for test")
+ # create datebase
+ tdSql.execute(f"create database {self.dbname}")
+ tdSql.execute(f"use {self.dbname}")
+
+ # create stable
+ tdSql.execute(self.setsql.set_create_stable_sql(self.stbname, self.stable_column_dict, self.tag_dict))
+ tdLog.debug("Create stable {} successfully".format(self.stbname))
+
+ # insert data into child table
+ for ctname in self.ctbname_list:
+ tdSql.execute(f"insert into {ctname} using {self.stbname} tags('{ctname}') values(now, 1.1, 1)")
+ tdSql.execute(f"insert into {ctname} using {self.stbname} tags('{ctname}') values(now, 2.1, 2)")
+
+ # create common table
+ tdSql.execute(self.setsql.set_create_normaltable_sql(self.common_tbname, self.common_table_dict))
+ tdLog.debug("Create common table {} successfully".format(self.common_tbname))
+
+ # insert data into common table
+ tdSql.execute(f"insert into {self.common_tbname} values(now, 1.1, 1)")
+ tdSql.execute(f"insert into {self.common_tbname} values(now, 2.2, 2)")
+ tdLog.debug("Finish to prepare the data")
+
+ def create_user(self):
+ """Create the user for test
+ """
+ tdSql.execute(f'create user {self.username} pass "{self.password}"')
+ tdLog.debug("sql:" + f'create user {self.username} pass "{self.password}" successfully')
+
+ def grant_privilege(self, username, privilege, table, tag_condition=None):
+ """Add the privilege for the user
+ """
+ try:
+ if tag_condition:
+ tdSql.execute(f'grant {privilege} on {self.dbname}.{table} with {tag_condition} to {username}')
+ else:
+ tdSql.execute(f'grant {privilege} on {self.dbname}.{table} to {username}')
+ time.sleep(2)
+ tdLog.debug("Grant {} privilege on {}.{} with condition {} to {} successfully".format(privilege, self.dbname, table, tag_condition, username))
+ except Exception as ex:
+ tdLog.exit(ex)
+
+ def remove_privilege(self, username, privilege, table, tag_condition=None):
+ """Remove the privilege for the user
+ """
+ try:
+ if tag_condition:
+ tdSql.execute(f'revoke {privilege} on {self.dbname}.{table} with {tag_condition} from {username}')
+ else:
+ tdSql.execute(f'revoke {privilege} on {self.dbname}.{table} from {username}')
+ tdLog.debug("Revoke {} privilege on {}.{} with condition {} from {} successfully".format(privilege, self.dbname, table, tag_condition, username))
+ except Exception as ex:
+ tdLog.exit(ex)
+
+ def run(self):
+ self.create_user()
+ # prepare the test data
+ self.prepare_data()
+
+ for case_name in self.cases.keys():
+ tdLog.debug("Execute the case {} with params {}".format(case_name, str(self.cases[case_name])))
+ # grant privilege for user test if case need
+ if self.cases[case_name]["db_privilege"] != "none":
+ self.grant_privilege(self.username, self.cases[case_name]["db_privilege"], "*")
+ if self.cases[case_name]["stable_priviege"] != "none":
+ self.grant_privilege(self.username, self.cases[case_name]["stable_priviege"], self.stbname)
+ if self.cases[case_name]["child_table_ct1_privilege"] != "none" and self.cases[case_name]["child_table_ct2_privilege"] != "none":
+ self.grant_privilege(self.username, self.cases[case_name]["child_table_ct1_privilege"], self.stbname, "ctbname='ct1' or ctbname='ct2'")
+ elif self.cases[case_name]["child_table_ct1_privilege"] != "none":
+ self.grant_privilege(self.username, self.cases[case_name]["child_table_ct1_privilege"], self.stbname, "ctbname='ct1'")
+ elif self.cases[case_name]["child_table_ct2_privilege"] != "none":
+ self.grant_privilege(self.username, self.cases[case_name]["child_table_ct2_privilege"], self.stbname, "ctbname='ct2'")
+ if self.cases[case_name]["table_tb_privilege"] != "none":
+ self.grant_privilege(self.username, self.cases[case_name]["table_tb_privilege"], self.common_tbname)
+ # connect db with user test
+ testconn = taos.connect(user=self.username, password=self.password)
+ if case_name != "test_db_table_both_no_permission":
+ testconn.execute("use %s;" % self.dbname)
+ # check privilege of user test from ins_user_privileges table
+ res = testconn.query("select * from information_schema.ins_user_privileges;")
+ tdLog.debug("Current information_schema.ins_user_privileges values: {}".format(res.fetch_all()))
+ # check privilege of user test by executing sql query
+ for index in range(len(self.cases[case_name]["sql"])):
+ tdLog.debug("Execute sql: {}".format(self.cases[case_name]["sql"][index]))
+ try:
+ # for write privilege
+ if "insert " in self.cases[case_name]["sql"][index]:
+ testconn.execute(self.cases[case_name]["sql"][index])
+ # check the expected result
+ if self.cases[case_name]["res"][index]:
+ tdLog.debug("Write data with sql {} successfully".format(self.cases[case_name]["sql"][index]))
+ # for read privilege
+ elif "select " in self.cases[case_name]["sql"][index]:
+ res = testconn.query(self.cases[case_name]["sql"][index])
+ data = res.fetch_all()
+ tdLog.debug("query result: {}".format(data))
+ # check query results by cases
+ if case_name in ["test_db_no_permission_childtable_read", "test_db_write_childtable_read"] and self.cases[case_name]["sql"][index] == "select * from ct2;":
+ if not self.cases[case_name]["res"][index]:
+ if 0 == len(data):
+ tdLog.debug("Query with sql {} successfully as expected with empty result".format(self.cases[case_name]["sql"][index]))
+ continue
+ else:
+ tdLog.exit("Query with sql {} failed with result {}".format(self.cases[case_name]["sql"][index], data))
+ # check the expected result
+ if self.cases[case_name]["res"][index]:
+ if len(data) > 0:
+ tdLog.debug("Query with sql {} successfully".format(self.cases[case_name]["sql"][index]))
+ else:
+ tdLog.exit("Query with sql {} failed with result {}".format(self.cases[case_name]["sql"][index], data))
+ else:
+ tdLog.exit("Execute query sql {} successfully, but expected failed".format(self.cases[case_name]["sql"][index]))
+ except BaseException as ex:
+ # check the expect false result
+ if not self.cases[case_name]["res"][index]:
+ tdLog.debug("Execute sql {} failed with {} as expected".format(self.cases[case_name]["sql"][index], str(ex)))
+ continue
+ # unexpected exception
+ else:
+ tdLog.exit(ex)
+ # remove the privilege
+ if self.cases[case_name]["db_privilege"] != "none":
+ self.remove_privilege(self.username, self.cases[case_name]["db_privilege"], "*")
+ if self.cases[case_name]["stable_priviege"] != "none":
+ self.remove_privilege(self.username, self.cases[case_name]["stable_priviege"], self.stbname)
+ if self.cases[case_name]["child_table_ct1_privilege"] != "none":
+ self.remove_privilege(self.username, self.cases[case_name]["child_table_ct1_privilege"], self.stbname, "ctbname='ct1'")
+ if self.cases[case_name]["child_table_ct2_privilege"] != "none":
+ self.remove_privilege(self.username, self.cases[case_name]["child_table_ct2_privilege"], self.stbname, "ctbname='ct2'")
+ if self.cases[case_name]["table_tb_privilege"] != "none":
+ self.remove_privilege(self.username, self.cases[case_name]["table_tb_privilege"], self.common_tbname)
+ # close the connection of user test
+ testconn.close()
+
+ def stop(self):
+ # remove the user
+ tdSql.execute(f'drop user {self.username}')
+ # close the connection
+ tdSql.close()
+ tdLog.success("%s successfully executed" % __file__)
+
+tdCases.addWindows(__file__, TDTestCase())
+tdCases.addLinux(__file__, TDTestCase())
diff --git a/tests/system-test/2-query/interp.py b/tests/system-test/2-query/interp.py
index 47a4bc4dcf4cdb71c1a4ee87662f1c8af433dc84..b6cefbe36fda9954188d59f813db9be4069a1af8 100644
--- a/tests/system-test/2-query/interp.py
+++ b/tests/system-test/2-query/interp.py
@@ -44,7 +44,7 @@ class TDTestCase:
tdSql.execute(
f'''create table if not exists {dbname}.{tbname}
- (ts timestamp, c0 tinyint, c1 smallint, c2 int, c3 bigint, c4 double, c5 float, c6 bool, c7 varchar(10), c8 nchar(10))
+ (ts timestamp, c0 tinyint, c1 smallint, c2 int, c3 bigint, c4 double, c5 float, c6 bool, c7 varchar(10), c8 nchar(10), c9 tinyint unsigned, c10 smallint unsigned, c11 int unsigned, c12 bigint unsigned)
'''
)
@@ -52,9 +52,9 @@ class TDTestCase:
tdSql.execute(f"use db")
- tdSql.execute(f"insert into {dbname}.{tbname} values ('2020-02-01 00:00:05', 5, 5, 5, 5, 5.0, 5.0, true, 'varchar', 'nchar')")
- tdSql.execute(f"insert into {dbname}.{tbname} values ('2020-02-01 00:00:10', 10, 10, 10, 10, 10.0, 10.0, true, 'varchar', 'nchar')")
- tdSql.execute(f"insert into {dbname}.{tbname} values ('2020-02-01 00:00:15', 15, 15, 15, 15, 15.0, 15.0, true, 'varchar', 'nchar')")
+ tdSql.execute(f"insert into {dbname}.{tbname} values ('2020-02-01 00:00:05', 5, 5, 5, 5, 5.0, 5.0, true, 'varchar', 'nchar', 5, 5, 5, 5)")
+ tdSql.execute(f"insert into {dbname}.{tbname} values ('2020-02-01 00:00:10', 10, 10, 10, 10, 10.0, 10.0, true, 'varchar', 'nchar', 10, 10, 10, 10)")
+ tdSql.execute(f"insert into {dbname}.{tbname} values ('2020-02-01 00:00:15', 15, 15, 15, 15, 15.0, 15.0, true, 'varchar', 'nchar', 15, 15, 15, 15)")
tdLog.printNoPrefix("==========step3:fill null")
@@ -129,21 +129,71 @@ class TDTestCase:
tdLog.printNoPrefix("==========step4:fill value")
## {. . .}
- tdSql.query(f"select interp(c0) from {dbname}.{tbname} range('2020-02-01 00:00:04', '2020-02-01 00:00:16') every(1s) fill(value, 1)")
+ col_list = {'c0', 'c1', 'c2', 'c3', 'c9', 'c10', 'c11', 'c12'}
+ for col in col_list:
+ tdSql.query(f"select interp({col}) from {dbname}.{tbname} range('2020-02-01 00:00:04', '2020-02-01 00:00:16') every(1s) fill(value, 1)")
+ tdSql.checkRows(13)
+ tdSql.checkData(0, 0, 1)
+ tdSql.checkData(1, 0, 5)
+ tdSql.checkData(2, 0, 1)
+ tdSql.checkData(3, 0, 1)
+ tdSql.checkData(4, 0, 1)
+ tdSql.checkData(5, 0, 1)
+ tdSql.checkData(6, 0, 10)
+ tdSql.checkData(7, 0, 1)
+ tdSql.checkData(8, 0, 1)
+ tdSql.checkData(9, 0, 1)
+ tdSql.checkData(10, 0, 1)
+ tdSql.checkData(11, 0, 15)
+ tdSql.checkData(12, 0, 1)
+
+ tdSql.query(f"select interp(c4) from {dbname}.{tbname} range('2020-02-01 00:00:04', '2020-02-01 00:00:16') every(1s) fill(value, 1)")
tdSql.checkRows(13)
- tdSql.checkData(0, 0, 1)
- tdSql.checkData(1, 0, 5)
- tdSql.checkData(2, 0, 1)
- tdSql.checkData(3, 0, 1)
- tdSql.checkData(4, 0, 1)
- tdSql.checkData(5, 0, 1)
- tdSql.checkData(6, 0, 10)
- tdSql.checkData(7, 0, 1)
- tdSql.checkData(8, 0, 1)
- tdSql.checkData(9, 0, 1)
- tdSql.checkData(10, 0, 1)
- tdSql.checkData(11, 0, 15)
- tdSql.checkData(12, 0, 1)
+ tdSql.checkData(0, 0, 1.0)
+ tdSql.checkData(1, 0, 5.0)
+ tdSql.checkData(2, 0, 1.0)
+ tdSql.checkData(3, 0, 1.0)
+ tdSql.checkData(4, 0, 1.0)
+ tdSql.checkData(5, 0, 1.0)
+ tdSql.checkData(6, 0, 10.0)
+ tdSql.checkData(7, 0, 1.0)
+ tdSql.checkData(8, 0, 1.0)
+ tdSql.checkData(9, 0, 1.0)
+ tdSql.checkData(10, 0, 1.0)
+ tdSql.checkData(11, 0, 15.0)
+ tdSql.checkData(12, 0, 1.0)
+
+ tdSql.query(f"select interp(c5) from {dbname}.{tbname} range('2020-02-01 00:00:04', '2020-02-01 00:00:16') every(1s) fill(value, 1)")
+ tdSql.checkRows(13)
+ tdSql.checkData(0, 0, 1.0)
+ tdSql.checkData(1, 0, 5.0)
+ tdSql.checkData(2, 0, 1.0)
+ tdSql.checkData(3, 0, 1.0)
+ tdSql.checkData(4, 0, 1.0)
+ tdSql.checkData(5, 0, 1.0)
+ tdSql.checkData(6, 0, 10.0)
+ tdSql.checkData(7, 0, 1.0)
+ tdSql.checkData(8, 0, 1.0)
+ tdSql.checkData(9, 0, 1.0)
+ tdSql.checkData(10, 0, 1.0)
+ tdSql.checkData(11, 0, 15.0)
+ tdSql.checkData(12, 0, 1.0)
+
+ tdSql.query(f"select interp(c6) from {dbname}.{tbname} range('2020-02-01 00:00:04', '2020-02-01 00:00:16') every(1s) fill(value, 1)")
+ tdSql.checkRows(13)
+ tdSql.checkData(0, 0, True)
+ tdSql.checkData(1, 0, True)
+ tdSql.checkData(2, 0, True)
+ tdSql.checkData(3, 0, True)
+ tdSql.checkData(4, 0, True)
+ tdSql.checkData(5, 0, True)
+ tdSql.checkData(6, 0, True)
+ tdSql.checkData(7, 0, True)
+ tdSql.checkData(8, 0, True)
+ tdSql.checkData(9, 0, True)
+ tdSql.checkData(10, 0, True)
+ tdSql.checkData(11, 0, True)
+ tdSql.checkData(12, 0, True)
## {} ...
tdSql.query(f"select interp(c0) from {dbname}.{tbname} range('2020-02-01 00:00:01', '2020-02-01 00:00:04') every(1s) fill(value, 1)")
diff --git a/tests/system-test/5-taos-tools/taosbenchmark/insertMix.py b/tests/system-test/5-taos-tools/taosbenchmark/insertMix.py
new file mode 100644
index 0000000000000000000000000000000000000000..60daa8cdc27f5e683239d6722e4f28ae1b8d90d1
--- /dev/null
+++ b/tests/system-test/5-taos-tools/taosbenchmark/insertMix.py
@@ -0,0 +1,102 @@
+###################################################################
+# Copyright (c) 2016 by TAOS Technologies, Inc.
+# All rights reserved.
+#
+# This file is proprietary and confidential to TAOS Technologies.
+# No part of this file may be reproduced, stored, transmitted,
+# disclosed or used in any form or by any means other than as
+# expressly provided by the written permission from Jianhui Tao
+#
+###################################################################
+
+# -*- coding: utf-8 -*-
+import os
+import subprocess
+import time
+
+from util.log import *
+from util.cases import *
+from util.sql import *
+from util.dnodes import *
+
+
+class TDTestCase:
+ def caseDescription(self):
+ """
+ [TD-13823] taosBenchmark test cases
+ """
+ return
+
+ def init(self, conn, logSql, replicaVar=1):
+ # comment off by Shuduo for CI self.replicaVar = int(replicaVar)
+ tdLog.debug("start to execute %s" % __file__)
+ tdSql.init(conn.cursor(), logSql)
+
+ def getPath(self, tool="taosBenchmark"):
+ if (platform.system().lower() == 'windows'):
+ tool = tool + ".exe"
+ selfPath = os.path.dirname(os.path.realpath(__file__))
+
+ if "community" in selfPath:
+ projPath = selfPath[: selfPath.find("community")]
+ else:
+ projPath = selfPath[: selfPath.find("tests")]
+
+ paths = []
+ for root, dirs, files in os.walk(projPath):
+ if (tool) in files:
+ rootRealPath = os.path.dirname(os.path.realpath(root))
+ if "packaging" not in rootRealPath:
+ paths.append(os.path.join(root, tool))
+ break
+ if len(paths) == 0:
+ tdLog.exit("taosBenchmark not found!")
+ return
+ else:
+ tdLog.info("taosBenchmark found in %s" % paths[0])
+ return paths[0]
+
+ def checkDataCorrect(self):
+ sql = "select count(*) from meters"
+ tdSql.query(sql)
+ allCnt = tdSql.getData(0, 0)
+ if allCnt < 2000000:
+ tdLog.exit(f"taosbenchmark insert row small. row count={allCnt} sql={sql}")
+ return
+
+ # group by 10 child table
+ rowCnt = tdSql.query("select count(*),tbname from meters group by tbname")
+ tdSql.checkRows(10)
+
+ # interval
+ sql = "select count(*),max(ic),min(dc),last(*) from meters interval(1s)"
+ rowCnt = tdSql.query(sql)
+ if rowCnt < 10:
+ tdLog.exit(f"taosbenchmark interval(1s) count small. row cout={rowCnt} sql={sql}")
+ return
+
+ # nest query
+ tdSql.query("select count(*) from (select * from meters order by ts desc)")
+ tdSql.checkData(0, 0, allCnt)
+
+
+ def run(self):
+ binPath = self.getPath()
+ cmd = "%s -f ./5-taos-tools/taosbenchmark/json/insertMix.json" % binPath
+ tdLog.info("%s" % cmd)
+ errcode = os.system("%s" % cmd)
+ if errcode != 0:
+ tdLog.exit(f"execute taosBenchmark ret error code={errcode}")
+ return
+
+ tdSql.execute("use mixdb")
+ self.checkDataCorrect()
+
+
+ def stop(self):
+ tdSql.close()
+ tdLog.success("%s successfully executed" % __file__)
+
+
+tdCases.addWindows(__file__, TDTestCase())
+tdCases.addLinux(__file__, TDTestCase())
diff --git a/tests/system-test/5-taos-tools/taosbenchmark/json/insertMix.json b/tests/system-test/5-taos-tools/taosbenchmark/json/insertMix.json
new file mode 100644
index 0000000000000000000000000000000000000000..7f3b2103cc607c53233acb04687a209612632e5c
--- /dev/null
+++ b/tests/system-test/5-taos-tools/taosbenchmark/json/insertMix.json
@@ -0,0 +1,81 @@
+{
+ "filetype": "insert",
+ "cfgdir": "/etc/taos",
+ "host": "127.0.0.1",
+ "port": 6030,
+ "user": "root",
+ "password": "taosdata",
+ "connection_pool_size": 8,
+ "num_of_records_per_req": 3000,
+ "thread_count": 10,
+ "create_table_thread_count": 2,
+ "result_file": "./insert_res_mix.txt",
+ "confirm_parameter_prompt": "no",
+ "insert_interval": 0,
+ "check_sql": "yes",
+ "continue_if_fail": "no",
+ "databases": [
+ {
+ "dbinfo": {
+ "name": "mixdb",
+ "drop": "yes",
+ "vgroups": 6,
+ "replica": 3,
+ "precision": "ms",
+ "keep": 3650,
+ "minRows": 100,
+ "maxRows": 4096
+ },
+ "super_tables": [
+ {
+ "name": "meters",
+ "child_table_exists": "no",
+ "childtable_count": 10,
+ "insert_rows": 300000,
+ "childtable_prefix": "d",
+ "insert_mode": "taosc",
+ "insert_interval": 0,
+ "timestamp_step": 100,
+ "start_timestamp":1500000000000,
+ "disorder_ratio": 10,
+ "update_ratio": 5,
+ "delete_ratio": 1,
+ "disorder_fill_interval": 300,
+ "update_fill_interval": 25,
+ "generate_row_rule": 2,
+ "columns": [
+ { "type": "bool", "name": "bc"},
+ { "type": "float", "name": "fc", "max": 1, "min": 0 },
+ { "type": "double", "name": "dc", "max": 1, "min": 0 },
+ { "type": "tinyint", "name": "ti", "max": 100, "min": 0 },
+ { "type": "smallint", "name": "si", "max": 100, "min": 0 },
+ { "type": "int", "name": "ic", "max": 100, "min": 0 },
+ { "type": "bigint", "name": "bi", "max": 100, "min": 0 },
+ { "type": "utinyint", "name": "uti", "max": 100, "min": 0 },
+ { "type": "usmallint", "name": "usi", "max": 100, "min": 0 },
+ { "type": "uint", "name": "ui", "max": 100, "min": 0 },
+ { "type": "ubigint", "name": "ubi", "max": 100, "min": 0 },
+ { "type": "binary", "name": "bin", "len": 32},
+ { "type": "nchar", "name": "nch", "len": 64}
+ ],
+ "tags": [
+ {
+ "type": "tinyint",
+ "name": "groupid",
+ "max": 10,
+ "min": 1
+ },
+ {
+ "name": "location",
+ "type": "binary",
+ "len": 16,
+ "values": ["San Francisco", "Los Angles", "San Diego",
+ "San Jose", "Palo Alto", "Campbell", "Mountain View",
+ "Sunnyvale", "Santa Clara", "Cupertino"]
+ }
+ ]
+ }
+ ]
+ }
+ ]
+}
diff --git a/tests/system-test/5-taos-tools/taosbenchmark/json/stt.json b/tests/system-test/5-taos-tools/taosbenchmark/json/stt.json
new file mode 100644
index 0000000000000000000000000000000000000000..27f32010ed5d07647b76c7d2ea797c809101aaf3
--- /dev/null
+++ b/tests/system-test/5-taos-tools/taosbenchmark/json/stt.json
@@ -0,0 +1,81 @@
+{
+ "filetype": "insert",
+ "cfgdir": "/etc/taos",
+ "host": "127.0.0.1",
+ "port": 6030,
+ "user": "root",
+ "password": "taosdata",
+ "connection_pool_size": 8,
+ "num_of_records_per_req": 3000,
+ "thread_count": 20,
+ "create_table_thread_count": 5,
+ "result_file": "./insert_res_wal.txt",
+ "confirm_parameter_prompt": "no",
+ "insert_interval": 0,
+ "databases": [
+ {
+ "dbinfo": {
+ "name": "db",
+ "drop": "yes",
+ "flush_each_batch": "yes",
+ "vgroups": 2,
+ "replica": 1,
+ "precision": "ms",
+ "keep": 3650,
+ "minRows": 100,
+ "maxRows": 4096
+ },
+ "super_tables": [
+ {
+ "name": "meters",
+ "child_table_exists": "no",
+ "childtable_count": 1000,
+ "insert_rows": 2850,
+ "childtable_prefix": "d",
+ "insert_mode": "taosc",
+ "insert_interval": 0,
+ "timestamp_step": 10,
+ "disorder_ratio": 10,
+ "update_ratio": 5,
+ "delete_ratio": 1,
+ "disorder_fill_interval": 30,
+ "update_fill_interval": 25,
+ "generate_row_rule": 2,
+ "start_timestamp":"2022-01-01 10:00:00",
+ "columns": [
+ { "type": "bool", "name": "bc"},
+ { "type": "float", "name": "fc", "max": 1, "min": 0 },
+ { "type": "double", "name": "dc", "max": 1, "min": 0 },
+ { "type": "tinyint", "name": "ti", "max": 100, "min": 0 },
+ { "type": "smallint", "name": "si", "max": 100, "min": 0 },
+ { "type": "int", "name": "ic", "max": 100, "min": 0 },
+ { "type": "bigint", "name": "bi", "max": 100, "min": 0 },
+ { "type": "utinyint", "name": "uti", "max": 100, "min": 0 },
+ { "type": "usmallint", "name": "usi", "max": 100, "min": 0 },
+ { "type": "uint", "name": "ui", "max": 100, "min": 0 },
+ { "type": "ubigint", "name": "ubi", "max": 100, "min": 0 },
+ { "type": "binary", "name": "bin", "len": 32},
+ { "type": "nchar", "name": "nch", "len": 64}
+ ],
+ "tags": [
+ {
+ "type": "tinyint",
+ "name": "groupid",
+ "max": 10,
+ "min": 1
+ },
+ {
+ "name": "location",
+ "type": "binary",
+ "len": 16,
+ "values": ["San Francisco", "Los Angles", "San Diego",
+ "San Jose", "Palo Alto", "Campbell", "Mountain View",
+ "Sunnyvale", "Santa Clara", "Cupertino"]
+ }
+ ]
+ }
+ ]
+ }
+ ]
+}
+
diff --git a/tests/system-test/5-taos-tools/taosbenchmark/stt.py b/tests/system-test/5-taos-tools/taosbenchmark/stt.py
new file mode 100644
index 0000000000000000000000000000000000000000..9b86bd8e40b22f22959e9d4b218caf5a210b7f9e
--- /dev/null
+++ b/tests/system-test/5-taos-tools/taosbenchmark/stt.py
@@ -0,0 +1,102 @@
+###################################################################
+# Copyright (c) 2016 by TAOS Technologies, Inc.
+# All rights reserved.
+#
+# This file is proprietary and confidential to TAOS Technologies.
+# No part of this file may be reproduced, stored, transmitted,
+# disclosed or used in any form or by any means other than as
+# expressly provided by the written permission from Jianhui Tao
+#
+###################################################################
+
+# -*- coding: utf-8 -*-
+import os
+import subprocess
+import time
+
+from util.log import *
+from util.cases import *
+from util.sql import *
+from util.dnodes import *
+
+
+class TDTestCase:
+ def caseDescription(self):
+ """
+ [TD-13823] taosBenchmark test cases
+ """
+ return
+
+ def init(self, conn, logSql, replicaVar=1):
+ # comment off by Shuduo for CI self.replicaVar = int(replicaVar)
+ tdLog.debug("start to execute %s" % __file__)
+ tdSql.init(conn.cursor(), logSql)
+
+ def getPath(self, tool="taosBenchmark"):
+ if (platform.system().lower() == 'windows'):
+ tool = tool + ".exe"
+ selfPath = os.path.dirname(os.path.realpath(__file__))
+
+ if "community" in selfPath:
+ projPath = selfPath[: selfPath.find("community")]
+ else:
+ projPath = selfPath[: selfPath.find("tests")]
+
+ paths = []
+ for root, dirs, files in os.walk(projPath):
+ if (tool) in files:
+ rootRealPath = os.path.dirname(os.path.realpath(root))
+ if "packaging" not in rootRealPath:
+ paths.append(os.path.join(root, tool))
+ break
+ if len(paths) == 0:
+ tdLog.exit("taosBenchmark not found!")
+ return
+ else:
+ tdLog.info("taosBenchmark found in %s" % paths[0])
+ return paths[0]
+
+ def checkDataCorrect(self):
+ sql = "select count(*) from meters"
+ tdSql.query(sql)
+ allCnt = tdSql.getData(0, 0)
+ if allCnt < 2000000:
+ tdLog.exit(f"taosbenchmark insert row small. row count={allCnt} sql={sql}")
+ return
+
+ # group by 10 child table
+ rowCnt = tdSql.query("select count(*),tbname from meters group by tbname")
+ tdSql.checkRows(1000)
+
+ # interval
+ sql = "select count(*),max(ic),min(dc),last(*) from meters interval(1s)"
+ rowCnt = tdSql.query(sql)
+ if rowCnt < 10:
+ tdLog.exit(f"taosbenchmark interval(1s) count small. row cout={rowCnt} sql={sql}")
+ return
+
+ # nest query
+ tdSql.query("select count(*) from (select * from meters order by ts desc)")
+ tdSql.checkData(0, 0, allCnt)
+
+
+ def run(self):
+ binPath = self.getPath()
+ cmd = "%s -f ./5-taos-tools/taosbenchmark/json/stt.json" % binPath
+ tdLog.info("%s" % cmd)
+ errcode = os.system("%s" % cmd)
+ if errcode != 0:
+ tdLog.exit(f"execute taosBenchmark ret error code={errcode}")
+ return
+
+ tdSql.execute("use db")
+ self.checkDataCorrect()
+
+
+ def stop(self):
+ tdSql.close()
+ tdLog.success("%s successfully executed" % __file__)
+
+
+tdCases.addWindows(__file__, TDTestCase())
+tdCases.addLinux(__file__, TDTestCase())
diff --git a/tests/system-test/7-tmq/tmqParamsTest.py b/tests/system-test/7-tmq/tmqParamsTest.py
index 3741f230012b333bf8106c37db6f5adfb4a3854b..6c8253c54286d6a8074e1361acba67e8c156a5ad 100644
--- a/tests/system-test/7-tmq/tmqParamsTest.py
+++ b/tests/system-test/7-tmq/tmqParamsTest.py
@@ -21,7 +21,7 @@ class TDTestCase:
self.commit_value_list = ["true", "false"]
self.offset_value_list = ["", "earliest", "latest", "none"]
self.tbname_value_list = ["true", "false"]
- self.snapshot_value_list = ["true", "false"]
+ self.snapshot_value_list = ["false"]
# self.commit_value_list = ["true"]
# self.offset_value_list = [""]