diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml new file mode 100644 index 0000000000000000000000000000000000000000..b873e47b7405bd73b8cb9ef5f90e58c72409977b --- /dev/null +++ b/.pre-commit-config.yaml @@ -0,0 +1,28 @@ +repos: + - repo: https://github.com/pre-commit/pre-commit-hooks + rev: v2.3.0 + hooks: + - id: check-yaml + - id: check-json + - id: end-of-file-fixer + - id: trailing-whitespace + +repos: + - repo: https://github.com/psf/black + rev: stable + hooks: + - id: black + +repos: + - repo: https://github.com/pocc/pre-commit-hooks + rev: master + hooks: + - id: cppcheck + args: ["--error-exitcode=0"] + +repos: + - repo: https://github.com/crate-ci/typos + rev: v1.15.7 + hooks: + - id: typos + diff --git a/cmake/cmake.version b/cmake/cmake.version index edc51f206cfeb0ee6eb3d4c92bda64b5af3d6814..fe35fbe7bd7822b447828f34627c1a9421393a62 100644 --- a/cmake/cmake.version +++ b/cmake/cmake.version @@ -2,7 +2,7 @@ IF (DEFINED VERNUMBER) SET(TD_VER_NUMBER ${VERNUMBER}) ELSE () - SET(TD_VER_NUMBER "3.0.6.0.alpha") + SET(TD_VER_NUMBER "3.1.0.0.alpha") ENDIF () IF (DEFINED VERCOMPATIBLE) diff --git a/docs/en/07-develop/03-insert-data/01-sql-writing.mdx b/docs/en/07-develop/03-insert-data/01-sql-writing.mdx index 3731882fb23677588e72ba5e9d39049af2dfd97d..4d1b67e451ecf50697156c2a838f83b31262b0b9 100644 --- a/docs/en/07-develop/03-insert-data/01-sql-writing.mdx +++ b/docs/en/07-develop/03-insert-data/01-sql-writing.mdx @@ -33,7 +33,7 @@ The below SQL statement is used to insert one row into table "d1001". INSERT INTO d1001 VALUES (ts1, 10.3, 219, 0.31); ``` -`ts1` is Unix timestamp, the timestamps which is larger than the difference between current time and KEEP in config is only allowed. For further detial, refer to [TDengine SQL insert timestamp section](/taos-sql/insert). +`ts1` is Unix timestamp, the timestamps which is larger than the difference between current time and KEEP in config is only allowed. For further detail, refer to [TDengine SQL insert timestamp section](/taos-sql/insert). ### Insert Multiple Rows @@ -43,7 +43,7 @@ Multiple rows can be inserted in a single SQL statement. The example below inser INSERT INTO d1001 VALUES (ts2, 10.2, 220, 0.23) (ts2, 10.3, 218, 0.25); ``` -`ts1` and `ts2` is Unix timestamp, the timestamps which is larger than the difference between current time and KEEP in config is only allowed. For further detial, refer to [TDengine SQL insert timestamp section](/taos-sql/insert). +`ts1` and `ts2` is Unix timestamp, the timestamps which is larger than the difference between current time and KEEP in config is only allowed. For further detail, refer to [TDengine SQL insert timestamp section](/taos-sql/insert). ### Insert into Multiple Tables @@ -53,7 +53,7 @@ Data can be inserted into multiple tables in the same SQL statement. The example INSERT INTO d1001 VALUES (ts1, 10.3, 219, 0.31) (ts2, 12.6, 218, 0.33) d1002 VALUES (ts3, 12.3, 221, 0.31); ``` -`ts1`, `ts2` and `ts3` is Unix timestamp, the timestamps which is larger than the difference between current time and KEEP in config is only allowed. For further detial, refer to [TDengine SQL insert timestamp section](/taos-sql/insert). +`ts1`, `ts2` and `ts3` is Unix timestamp, the timestamps which is larger than the difference between current time and KEEP in config is only allowed. For further detail, refer to [TDengine SQL insert timestamp section](/taos-sql/insert). For more details about `INSERT` please refer to [INSERT](/taos-sql/insert). diff --git a/docs/en/07-develop/07-tmq.mdx b/docs/en/07-develop/07-tmq.mdx index 578f38e73d02efa0da04531986c037176d68482b..f5e0378a00eaeae5b2ff00f80bd8bc3e858a6e88 100644 --- a/docs/en/07-develop/07-tmq.mdx +++ b/docs/en/07-develop/07-tmq.mdx @@ -244,6 +244,8 @@ The following SQL statement creates a topic in TDengine: CREATE TOPIC topic_name AS SELECT ts, c1, c2, c3 FROM tmqdb.stb WHERE c1 > 1; ``` +- There is an upper limit to the number of topics created, controlled by the parameter tmqMaxTopicNum, with a default of 20 + Multiple subscription types are supported. #### Subscribe to a Column @@ -265,14 +267,15 @@ You can subscribe to a topic through a SELECT statement. Statements that specify Syntax: ```sql -CREATE TOPIC topic_name AS STABLE stb_name +CREATE TOPIC topic_name [with meta] AS STABLE stb_name [where_condition] ``` Creating a topic in this manner differs from a `SELECT * from stbName` statement as follows: - The table schema can be modified. - Unstructured data is returned. The format of the data returned changes based on the supertable schema. -- A different table schema may exist for every data block to be processed. +- The 'with meta' parameter is optional. When selected, statements such as creating super tables and sub tables will be returned, mainly used for Taosx to perform super table migration +- The 'where_condition' parameter is optional and will be used to filter and subscribe to sub tables that meet the criteria. Where conditions cannot have ordinary columns, only tags or tbnames. Functions can be used in where conditions to filter tags, but cannot be aggregate functions because sub table tag values cannot be aggregated. It can also be a constant expression, such as 2>1 (subscribing to all child tables), Or false (subscribe to 0 sub tables) - The data returned does not include tags. ### Subscribe to a Database @@ -280,10 +283,12 @@ Creating a topic in this manner differs from a `SELECT * from stbName` statement Syntax: ```sql -CREATE TOPIC topic_name [WITH META] AS DATABASE db_name; +CREATE TOPIC topic_name [with meta] AS DATABASE db_name; ``` -This SQL statement creates a subscription to all tables in the database. You can add the `WITH META` parameter to include schema changes in the subscription, including creating and deleting supertables; adding, deleting, and modifying columns; and creating, deleting, and modifying the tags of subtables. Consumers can determine the message type from the API. Note that this differs from Kafka. +This SQL statement creates a subscription to all tables in the database. + +- The 'with meta' parameter is optional. When selected, it will return statements for creating all super tables and sub tables in the database, mainly used for Taosx database migration ## Create a Consumer @@ -295,7 +300,7 @@ You configure the following parameters when creating a consumer: | `td.connect.user` | string | User Name | | | `td.connect.pass` | string | Password | | | `td.connect.port` | string | Port of the server side | | -| `group.id` | string | Consumer group ID; consumers with the same ID are in the same group | **Required**. Maximum length: 192. | +| `group.id` | string | Consumer group ID; consumers with the same ID are in the same group | **Required**. Maximum length: 192. Each topic can create up to 100 consumer groups. | | `client.id` | string | Client ID | Maximum length: 192. | | `auto.offset.reset` | enum | Initial offset for the consumer group | Specify `earliest`, `latest`, or `none`(default) | | `enable.auto.commit` | boolean | Commit automatically; true: user application doesn't need to explicitly commit; false: user application need to handle commit by itself | Default value is true | diff --git a/docs/en/07-develop/09-udf.md b/docs/en/07-develop/09-udf.md index 825d3c6f8b9faf1d9147efed03697648d5c99ae0..5137e35c0a83ec972fb45b6aa37ee10d434bbfad 100644 --- a/docs/en/07-develop/09-udf.md +++ b/docs/en/07-develop/09-udf.md @@ -17,7 +17,7 @@ When you create a user-defined function, you must implement standard interface f - For aggregate functions, implement the `aggfn_start`, `aggfn`, and `aggfn_finish` interface functions. - To initialize your function, implement the `udf_init` function. To terminate your function, implement the `udf_destroy` function. -There are strict naming conventions for these interface functions. The names of the start, finish, init, and destroy interfaces must be _start, _finish, _init, and _destroy, respectively. Replace `scalarfn`, `aggfn`, and `udf` with the name of your user-defined function. +There are strict naming conventions for these interface functions. The names of the start, finish, init, and destroy interfaces must be `_start`, `_finish`, `_init`, and `_destroy`, respectively. Replace `scalarfn`, `aggfn`, and `udf` with the name of your user-defined function. ### Implementing a Scalar Function in C The implementation of a scalar function is described as follows: @@ -318,7 +318,7 @@ The implementation of a scalar UDF is described as follows: def process(input: datablock) -> tuple[output_type]: ``` -Description: this function prcesses datablock, which is the input; you can use datablock.data(row, col) to access the python object at location(row,col); the output is a tuple object consisted of objects of type outputtype +Description: this function processes datablock, which is the input; you can use datablock.data(row, col) to access the python object at location(row,col); the output is a tuple object consisted of objects of type outputtype #### Aggregate UDF Interface @@ -356,7 +356,7 @@ def process(input: datablock) -> tuple[output_type]: # return tuple object consisted of object of type outputtype ``` -Noteļ¼šprocess() must be implemeted, init() and destroy() must be defined too but they can do nothing. +Noteļ¼šprocess() must be implemented, init() and destroy() must be defined too but they can do nothing. #### Aggregate Template @@ -377,7 +377,7 @@ def finish(buf: bytes) -> output_type: #return obj of type outputtype ``` -Note: aggregate UDF requires init(), destroy(), start(), reduce() and finish() to be impemented. start() generates the initial result in buffer, then the input data is divided into multiple row data blocks, reduce() is invoked for each data block `inputs` and intermediate `buf`, finally finish() is invoked to generate final result from the intermediate result `buf`. +Note: aggregate UDF requires init(), destroy(), start(), reduce() and finish() to be implemented. start() generates the initial result in buffer, then the input data is divided into multiple row data blocks, reduce() is invoked for each data block `inputs` and intermediate `buf`, finally finish() is invoked to generate final result from the intermediate result `buf`. ### Data Mapping between TDengine SQL and Python UDF @@ -559,7 +559,7 @@ Note: Prior to TDengine 3.0.5.0 (excluding), updating a UDF requires to restart #### Sample 3: UDF with n arguments -A UDF which accepts n intergers, likee (x1, x2, ..., xn) and output the sum of the product of each value and its sequence number: 1 * x1 + 2 * x2 + ... + n * xn. If there is `null` in the input, then the result is `null`. The difference from sample 1 is that it can accept any number of columns as input and process each column. Assume the program is written in /root/udf/nsum.py: +A UDF which accepts n integers, likee (x1, x2, ..., xn) and output the sum of the product of each value and its sequence number: 1 * x1 + 2 * x2 + ... + n * xn. If there is `null` in the input, then the result is `null`. The difference from sample 1 is that it can accept any number of columns as input and process each column. Assume the program is written in /root/udf/nsum.py: ```python def init(): @@ -607,7 +607,7 @@ Query OK, 4 row(s) in set (0.010653s) #### Sample 4: Utilize 3rd party package -A UDF which accepts a timestamp and output the next closed Sunday. This sample requires to use third party package `moment`, you need to install it firslty. +A UDF which accepts a timestamp and output the next closed Sunday. This sample requires to use third party package `moment`, you need to install it firstly. ```shell pip3 install moment @@ -701,7 +701,7 @@ Query OK, 4 row(s) in set (1.011474s) #### Sample 5: Aggregate Function -An aggregate function which calculates the difference of the maximum and the minimum in a column. An aggregate funnction takes multiple rows as input and output only one data. The execution process of an aggregate UDF is like map-reduce, the framework divides the input into multiple parts, each mapper processes one block and the reducer aggregates the result of the mappers. The reduce() of Python UDF has the functionality of both map() and reduce(). The reduce() takes two arguments: the data to be processed; and the result of other tasks executing reduce(). For exmaple, assume the code is in `/root/udf/myspread.py`. +An aggregate function which calculates the difference of the maximum and the minimum in a column. An aggregate funnction takes multiple rows as input and output only one data. The execution process of an aggregate UDF is like map-reduce, the framework divides the input into multiple parts, each mapper processes one block and the reducer aggregates the result of the mappers. The reduce() of Python UDF has the functionality of both map() and reduce(). The reduce() takes two arguments: the data to be processed; and the result of other tasks executing reduce(). For example, assume the code is in `/root/udf/myspread.py`. ```python import io @@ -755,7 +755,7 @@ In this example, we implemented an aggregate function, and added some logging. 2. log() is the function for logging, it converts the input object to string and output with an end of line 3. destroy() closes the log file \ 4. start() returns the initial buffer for storing the intermediate result -5. reduce() processes each daa block and aggregates the result +5. reduce() processes each data block and aggregates the result 6. finish() converts the final buffer() to final result\ Create the UDF. diff --git a/docs/en/12-taos-sql/10-function.md b/docs/en/12-taos-sql/10-function.md index b517bcb3ccdd90b52d778914ba77db3dba71d393..afc1581c226b0c0fda57261d5f0c435da37874ee 100644 --- a/docs/en/12-taos-sql/10-function.md +++ b/docs/en/12-taos-sql/10-function.md @@ -672,7 +672,7 @@ If you input a specific column, the number of non-null values in the column is r ELAPSED(ts_primary_key [, time_unit]) ``` -**Description**: `elapsed` function can be used to calculate the continuous time length in which there is valid data. If it's used with `INTERVAL` clause, the returned result is the calculated time length within each time window. If it's used without `INTERVAL` caluse, the returned result is the calculated time length within the specified time range. Please be noted that the return value of `elapsed` is the number of `time_unit` in the calculated time length. +**Description**: `elapsed` function can be used to calculate the continuous time length in which there is valid data. If it's used with `INTERVAL` clause, the returned result is the calculated time length within each time window. If it's used without `INTERVAL` clause, the returned result is the calculated time length within the specified time range. Please be noted that the return value of `elapsed` is the number of `time_unit` in the calculated time length. **Return value type**: Double if the input value is not NULL; @@ -999,18 +999,14 @@ SAMPLE(expr, k) **Description**: _k_ sampling values of a specific column. The applicable range of _k_ is [1,1000]. -**Return value type**: Same as the column being operated plus the associated timestamp +**Return value type**: Same as the column being operated -**Applicable data types**: Any data type except for tags of STable +**Applicable data types**: Any data type **Applicable nested query**: Inner query and Outer query **Applicable table types**: standard tables and supertables -**More explanations**: - -- This function cannot be used in expression calculation. - ### TAIL @@ -1055,11 +1051,11 @@ TOP(expr, k) UNIQUE(expr) ``` -**Description**: The values that occur the first time in the specified column. The effect is similar to `distinct` keyword, but it can also be used to match tags or timestamp. The first occurrence of a timestamp or tag is used. +**Description**: The values that occur the first time in the specified column. The effect is similar to `distinct` keyword. **Return value type**:Same as the data type of the column being operated upon -**Applicable column types**: Any data types except for timestamp +**Applicable column types**: Any data types **Applicable table types**: table, STable diff --git a/docs/en/12-taos-sql/12-distinguished.md b/docs/en/12-taos-sql/12-distinguished.md index b082f7b888dc2e09e4a6ae5c38e7ece591d667e3..7f0b8c7769298b460ec7102d5e3fc0b8f2637ca7 100644 --- a/docs/en/12-taos-sql/12-distinguished.md +++ b/docs/en/12-taos-sql/12-distinguished.md @@ -21,7 +21,7 @@ part_list can be any scalar expression, such as a column, constant, scalar funct A PARTITION BY clause is processed as follows: - The PARTITION BY clause must occur after the WHERE clause -- The PARTITION BY caluse partitions the data according to the specified dimensions, then perform computation on each partition. The performed computation is determined by the rest of the statement - a window clause, GROUP BY clause, or SELECT clause. +- The PARTITION BY clause partitions the data according to the specified dimensions, then perform computation on each partition. The performed computation is determined by the rest of the statement - a window clause, GROUP BY clause, or SELECT clause. - The PARTITION BY clause can be used together with a window clause or GROUP BY clause. In this case, the window or GROUP BY clause takes effect on every partition. For example, the following statement partitions the table by the location tag, performs downsampling over a 10 minute window, and returns the maximum value: ```sql diff --git a/docs/en/12-taos-sql/24-show.md b/docs/en/12-taos-sql/24-show.md index eb70a7664b857a7971f24ea3ec7bc7f707f9fe38..bd4a60b20e87e21f8948aa64ed0f9bb86da6b6c6 100644 --- a/docs/en/12-taos-sql/24-show.md +++ b/docs/en/12-taos-sql/24-show.md @@ -36,7 +36,7 @@ Shows information about connections to the system. SHOW CONSUMERS; ``` -Shows information about all active consumers in the system. +Shows information about all consumers in the system. ## SHOW CREATE DATABASE diff --git a/docs/en/14-reference/03-connector/04-java.mdx b/docs/en/14-reference/03-connector/04-java.mdx index ebd2891a9ee444b4c1649385bb94b35b698cc52d..e8c407b125ab4d44a6b9512352fea9abb196ddcb 100644 --- a/docs/en/14-reference/03-connector/04-java.mdx +++ b/docs/en/14-reference/03-connector/04-java.mdx @@ -36,7 +36,8 @@ REST connection supports all platforms that can run Java. | taos-jdbcdriver version | major changes | TDengine version | | :---------------------: | :------------------------------------------------------------------------------------------------------------------------------------------------: | :--------------: | -| 3.2.1 | subscription add seek function | 3.0.5.0 or later | +| 3.2.3 | Fixed resultSet data parsing failure in some cases | 3.0.5.0 or later | +| 3.2.2 | subscription add seek function | 3.0.5.0 or later | | 3.2.1 | JDBC REST connection supports schemaless/prepareStatement over WebSocket | 3.0.3.0 or later | | 3.2.0 | This version has been deprecated | - | | 3.1.0 | JDBC REST connection supports subscription over WebSocket | - | @@ -284,9 +285,9 @@ The configuration parameters in the URL are as follows: - batchfetch: true: pulls result sets in batches when executing queries; false: pulls result sets row by row. The default value is: false. batchfetch uses HTTP for data transfer. JDBC REST supports batch pulls. taos-jdbcdriver and TDengine transfer data via WebSocket connection. Compared with HTTP, WebSocket enables JDBC REST connection to support large data volume querying and improve query performance. - charset: specify the charset to parse the string, this parameter is valid only when set batchfetch to true. - batchErrorIgnore: true: when executing executeBatch of Statement, if one SQL execution fails in the middle, continue to execute the following SQL. false: no longer execute any statement after the failed SQL. The default value is: false. -- httpConnectTimeout: REST connection timeout in milliseconds, the default value is 5000 ms. -- httpSocketTimeout: socket timeout in milliseconds, the default value is 5000 ms. It only takes effect when batchfetch is false. -- messageWaitTimeout: message transmission timeout in milliseconds, the default value is 3000 ms. It only takes effect when batchfetch is true. +- httpConnectTimeout: REST connection timeout in milliseconds, the default value is 60000 ms. +- httpSocketTimeout: socket timeout in milliseconds, the default value is 60000 ms. It only takes effect when batchfetch is false. +- messageWaitTimeout: message transmission timeout in milliseconds, the default value is 60000 ms. It only takes effect when batchfetch is true. - useSSL: connecting Securely Using SSL. true: using SSL connection, false: not using SSL connection. - httpPoolSize: size of REST concurrent requests. The default value is 20. @@ -352,9 +353,9 @@ The configuration parameters in properties are as follows. - TSDBDriver.PROPERTY_KEY_CHARSET: In the character set used by the client, the default value is the system character set. - TSDBDriver.PROPERTY_KEY_LOCALE: this only takes effect when using JDBC native connection. Client language environment, the default value is system current locale. - TSDBDriver.PROPERTY_KEY_TIME_ZONE: only takes effect when using JDBC native connection. In the time zone used by the client, the default value is the system's current time zone. -- TSDBDriver.HTTP_CONNECT_TIMEOUT: REST connection timeout in milliseconds, the default value is 5000 ms. It only takes effect when using JDBC REST connection. -- TSDBDriver.HTTP_SOCKET_TIMEOUT: socket timeout in milliseconds, the default value is 5000 ms. It only takes effect when using JDBC REST connection and batchfetch is false. -- TSDBDriver.PROPERTY_KEY_MESSAGE_WAIT_TIMEOUT: message transmission timeout in milliseconds, the default value is 3000 ms. It only takes effect when using JDBC REST connection and batchfetch is true. +- TSDBDriver.HTTP_CONNECT_TIMEOUT: REST connection timeout in milliseconds, the default value is 60000 ms. It only takes effect when using JDBC REST connection. +- TSDBDriver.HTTP_SOCKET_TIMEOUT: socket timeout in milliseconds, the default value is 60000 ms. It only takes effect when using JDBC REST connection and batchfetch is false. +- TSDBDriver.PROPERTY_KEY_MESSAGE_WAIT_TIMEOUT: message transmission timeout in milliseconds, the default value is 60000 ms. It only takes effect when using JDBC REST connection and batchfetch is true. - TSDBDriver.PROPERTY_KEY_USE_SSL: connecting Securely Using SSL. true: using SSL connection, false: not using SSL connection. It only takes effect when using JDBC REST connection. - TSDBDriver.HTTP_POOL_SIZE: size of REST concurrent requests. The default value is 20. For JDBC native connections, you can specify other parameters, such as log level, SQL length, etc., by specifying URL and Properties. For more detailed configuration, please refer to [Client Configuration](/reference/config/#Client-Only). diff --git a/docs/en/14-reference/03-connector/05-go.mdx b/docs/en/14-reference/03-connector/05-go.mdx index 06d643c6c83e677a0cdcade91296ae2339f80fda..b3d4857d75e22f18c0dbcb4f2798c268f6fbcd3a 100644 --- a/docs/en/14-reference/03-connector/05-go.mdx +++ b/docs/en/14-reference/03-connector/05-go.mdx @@ -31,63 +31,78 @@ REST connections are supported on all platforms that can run Go. Please refer to [version support list](https://github.com/taosdata/driver-go#remind) -## Supported features +## Handling exceptions -### Native connections - -A "native connection" is established by the connector directly to the TDengine instance via the TDengine client driver (taosc). The supported functional features are: - -* Normal queries -* Continuous queries -* Subscriptions -* Schemaless interface -* Parameter binding interface - -### REST connection - -A "REST connection" is a connection between the application and the TDengine instance via the REST API provided by the taosAdapter component. The following features are supported: +If it is a TDengine error, you can get the error code and error information in the following ways. +```go +// import "github.com/taosdata/driver-go/v3/errors" + if err != nil { + tError, is := err.(*errors.TaosError) + if is { + fmt.Println("errorCode:", int(tError.Code)) + fmt.Println("errorMessage:", tError.ErrStr) + } else { + fmt.Println(err.Error()) + } + } +``` -* Normal queries -* Continuous queries +## TDengine DataType vs. Go DataType + +| TDengine DataType | Go Type | +|-------------------|-----------| +| TIMESTAMP | time.Time | +| TINYINT | int8 | +| SMALLINT | int16 | +| INT | int32 | +| BIGINT | int64 | +| TINYINT UNSIGNED | uint8 | +| SMALLINT UNSIGNED | uint16 | +| INT UNSIGNED | uint32 | +| BIGINT UNSIGNED | uint64 | +| FLOAT | float32 | +| DOUBLE | float64 | +| BOOL | bool | +| BINARY | string | +| NCHAR | string | +| JSON | []byte | + +**Note**: Only TAG supports JSON types ## Installation Steps ### Pre-installation preparation * Install Go development environment (Go 1.14 and above, GCC 4.8.5 and above) -- If you use the native connector, please install the TDengine client driver. Please refer to [Install Client Driver](/reference/connector/#install-client-driver) for specific steps +* If you use the native connector, please install the TDengine client driver. Please refer to [Install Client Driver](/reference/connector/#install-client-driver) for specific steps Configure the environment variables and check the command. * ```go env``` * ```gcc -v``` -### Use go get to install - -`go get -u github.com/taosdata/driver-go/v3@latest` - -### Manage with go mod +### Install the connectors 1. Initialize the project with the `go mod` command. - ```text - go mod init taos-demo - ``` + ```text + go mod init taos-demo + ``` 2. Introduce taosSql - ```go - import ( - "database/sql" - _ "github.com/taosdata/driver-go/v3/taosSql" - ) - ``` + ```go + import ( + "database/sql" + _ "github.com/taosdata/driver-go/v3/taosSql" + ) + ``` 3. Update the dependency packages with `go mod tidy`. - ```text - go mod tidy - ``` + ```text + go mod tidy + ``` 4. Run the program with `go run taos-demo` or compile the binary with the `go build` command. @@ -98,8 +113,6 @@ Configure the environment variables and check the command. ## Establishing a connection -### Data source name (DSN) - Data source names have a standard format, e.g. [PEAR DB](http://pear.php.net/manual/en/package.database.db.intro-dsn.php), but no type prefix (square brackets indicate optionally): ``` text @@ -111,9 +124,7 @@ DSN in full form. ```text username:password@protocol(address)/dbname?param=value ``` -### Connecting via connector - - + _taosSql_ implements Go's `database/sql/driver` interface via cgo. You can use the [`database/sql`](https://golang.org/pkg/database/sql/) interface by simply introducing the driver. @@ -209,340 +220,902 @@ func main() { -## Usage examples +### Specify the URL and Properties to get the connection -### Write data +The Go connector does not support this feature -#### SQL Write +### Priority of configuration parameters - +The Go connector does not support this feature -#### InfluxDB line protocol write - - +## Usage examples -#### OpenTSDB Telnet line protocol write +### Create database and tables - +```go +var taosDSN = "root:taosdata@tcp(localhost:6030)/" +taos, err := sql.Open("taosSql", taosDSN) +if err != nil { + log.Fatalln("failed to connect TDengine, err:", err) +} +defer taos.Close() +_, err := taos.Exec("CREATE DATABASE power") +if err != nil { + log.Fatalln("failed to create database, err:", err) +} +_, err = taos.Exec("CREATE STABLE power.meters (ts TIMESTAMP, current FLOAT, voltage INT, phase FLOAT) TAGS (location BINARY(64), groupId INT)") +if err != nil { + log.Fatalln("failed to create stable, err:", err) +} +``` -#### OpenTSDB JSON line protocol write +### Insert data - + -### Query data +### Querying data -### More sample programs - -* [sample program](https://github.com/taosdata/driver-go/tree/3.0/examples) +### execute SQL with reqId +This reqId can be used to request link tracing. -## Usage limitations - -Since the REST interface is stateless, the `use db` syntax will not work. You need to put the db name into the SQL command, e.g. `create table if not exists tb1 (ts timestamp, a int)` to `create table if not exists test.tb1 (ts timestamp, a int)` otherwise it will report the error `[0x217] Database not specified or available`. +```go +db, err := sql.Open("taosSql", "root:taosdata@tcp(localhost:6030)/") +if err != nil { + panic(err) +} +defer db.Close() +ctx := context.WithValue(context.Background(), common.ReqIDKey, common.GetReqID()) +_, err = db.ExecContext(ctx, "create database if not exists example_taos_sql") +if err != nil { + panic(err) +} +``` -You can also put the db name in the DSN by changing `root:taosdata@http(localhost:6041)/` to `root:taosdata@http(localhost:6041)/test`. Executing the `create database` statement when the specified db does not exist will not report an error while executing other queries or writing against that db will report an error. +### Writing data via parameter binding -The complete example is as follows. + + ```go package main import ( - "database/sql" - "fmt" "time" - _ "github.com/taosdata/driver-go/v3/taosRestful" + "github.com/taosdata/driver-go/v3/af" + "github.com/taosdata/driver-go/v3/common" + "github.com/taosdata/driver-go/v3/common/param" ) func main() { - var taosDSN = "root:taosdata@http(localhost:6041)/test" - taos, err := sql.Open("taosRestful", taosDSN) + db, err := af.Open("", "root", "taosdata", "", 0) if err != nil { - fmt.Println("failed to connect TDengine, err:", err) - return + panic(err) } - defer taos.Close() - taos.Exec("create database if not exists test") - taos.Exec("create table if not exists tb1 (ts timestamp, a int)") - _, err = taos.Exec("insert into tb1 values(now, 0)(now+1s,1)(now+2s,2)(now+3s,3)") + defer db.Close() + _, err = db.Exec("create database if not exists example_stmt") if err != nil { - fmt.Println("failed to insert, err:", err) - return + panic(err) } - rows, err := taos.Query("select * from tb1") + _, err = db.Exec("create table if not exists example_stmt.tb1(ts timestamp," + + "c1 bool," + + "c2 tinyint," + + "c3 smallint," + + "c4 int," + + "c5 bigint," + + "c6 tinyint unsigned," + + "c7 smallint unsigned," + + "c8 int unsigned," + + "c9 bigint unsigned," + + "c10 float," + + "c11 double," + + "c12 binary(20)," + + "c13 nchar(20)" + + ")") if err != nil { - fmt.Println("failed to select from table, err:", err) - return + panic(err) } - - defer rows.Close() - for rows.Next() { - var r struct { - ts time.Time - a int - } - err := rows.Scan(&r.ts, &r.a) - if err != nil { - fmt.Println("scan error:\n", err) - return - } - fmt.Println(r.ts, r.a) + stmt := db.InsertStmt() + err = stmt.Prepare("insert into example_stmt.tb1 values(?,?,?,?,?,?,?,?,?,?,?,?,?,?)") + if err != nil { + panic(err) } + now := time.Now() + params := make([]*param.Param, 14) + params[0] = param.NewParam(2). + AddTimestamp(now, common.PrecisionMilliSecond). + AddTimestamp(now.Add(time.Second), common.PrecisionMilliSecond) + params[1] = param.NewParam(2).AddBool(true).AddNull() + params[2] = param.NewParam(2).AddTinyint(2).AddNull() + params[3] = param.NewParam(2).AddSmallint(3).AddNull() + params[4] = param.NewParam(2).AddInt(4).AddNull() + params[5] = param.NewParam(2).AddBigint(5).AddNull() + params[6] = param.NewParam(2).AddUTinyint(6).AddNull() + params[7] = param.NewParam(2).AddUSmallint(7).AddNull() + params[8] = param.NewParam(2).AddUInt(8).AddNull() + params[9] = param.NewParam(2).AddUBigint(9).AddNull() + params[10] = param.NewParam(2).AddFloat(10).AddNull() + params[11] = param.NewParam(2).AddDouble(11).AddNull() + params[12] = param.NewParam(2).AddBinary([]byte("binary")).AddNull() + params[13] = param.NewParam(2).AddNchar("nchar").AddNull() + + paramTypes := param.NewColumnType(14). + AddTimestamp(). + AddBool(). + AddTinyint(). + AddSmallint(). + AddInt(). + AddBigint(). + AddUTinyint(). + AddUSmallint(). + AddUInt(). + AddUBigint(). + AddFloat(). + AddDouble(). + AddBinary(6). + AddNchar(5) + err = stmt.BindParam(params, paramTypes) + if err != nil { + panic(err) + } + err = stmt.AddBatch() + if err != nil { + panic(err) + } + err = stmt.Execute() + if err != nil { + panic(err) + } + err = stmt.Close() + if err != nil { + panic(err) + } + // select * from example_stmt.tb1 } ``` -## Frequently Asked Questions - -1. bind interface in database/sql crashes - - REST does not support parameter binding related interface. It is recommended to use `db.Exec` and `db.Query`. - -2. error `[0x217] Database not specified or available` after executing other statements with `use db` statement - - The execution of SQL command in the REST interface is not contextual, so using `use db` statement will not work, see the usage restrictions section above. - -3. use `taosSql` without error but use `taosRestful` with error `[0x217] Database not specified or available` - - Because the REST interface is stateless, using the `use db` statement will not take effect. See the usage restrictions section above. - -4. `readBufferSize` parameter has no significant effect after being increased - - Increasing `readBufferSize` will reduce the number of `syscall` calls when fetching results. If the query result is smaller, modifying this parameter will not improve performance significantly. If you increase the parameter value too much, the bottleneck will be parsing JSON data. If you need to optimize the query speed, you must adjust the value based on the actual situation to achieve the best query performance. - -5. `disableCompression` parameter is set to `false` when the query efficiency is reduced - - When set `disableCompression` parameter to `false`, the query result will be compressed by `gzip` and then transmitted, so you have to decompress the data by `gzip` after getting it. - -6. `go get` command can't get the package, or timeout to get the package - - Set Go proxy `go env -w GOPROXY=https://goproxy.cn,direct`. - -## Common APIs - -### database/sql API - -* `sql.Open(DRIVER_NAME string, dataSourceName string) *DB` - - Use This API to open a DB, returning an object of type \*DB. - -:::info -This API is created successfully without checking permissions, but only when you execute a Query or Exec, and check if user/password/host/port is legal. -::: - -* `func (db *DB) Exec(query string, args ...interface{}) (Result, error)` - - `sql.Open` built-in method to execute non-query related SQL. - -* `func (db *DB) Query(query string, args ...interface{}) (*Rows, error)` - - `sql.Open` Built-in method to execute query statements. - -### Advanced functions (af) API - -The `af` package encapsulates TDengine advanced functions such as connection management, subscriptions, schemaless, parameter binding, etc. - -#### Connection management - -* `af.Open(host, user, pass, db string, port int) (*Connector, error)` - - This API creates a connection to taosd via cgo. - -* `func (conn *Connector) Close() error` - - Closes the connection. - -#### Subscribe - -* `func NewConsumer(conf *tmq.ConfigMap) (*Consumer, error)` - -Creates consumer group. - -* `func (c *Consumer) Subscribe(topic string, rebalanceCb RebalanceCb) error` -Note: `rebalanceCb` is reserved for compatibility purpose - -Subscribes a topic. - -* `func (c *Consumer) SubscribeTopics(topics []string, rebalanceCb RebalanceCb) error` -Note: `rebalanceCb` is reserved for compatibility purpose - -Subscribes to topics. - -* `func (c *Consumer) Poll(timeoutMs int) tmq.Event` - -Polling information. - -* `func (c *Consumer) Commit() ([]tmq.TopicPartition, error)` -Note: `tmq.TopicPartition` is reserved for compatibility purpose - -Commit information. - -* `func (c *Consumer) Assignment() (partitions []tmq.TopicPartition, err error)` - -Get Assignment(TDengine >= 3.0.5.0 and driver-go >= v3.5.0 are required). - -* `func (c *Consumer) Seek(partition tmq.TopicPartition, ignoredTimeoutMs int) error` -Note: `ignoredTimeoutMs` is reserved for compatibility purpose - -Seek offset(TDengine >= 3.0.5.0 and driver-go >= v3.5.0 are required). - -* `func (c *Consumer) Unsubscribe() error` - -Unsubscribe. - -* `func (c *Consumer) Close() error` - -Close consumer. - -#### schemaless - -* `func (conn *Connector) InfluxDBInsertLines(lines []string, precision string) error` + + - Write to InfluxDB line protocol. +```go +package main -* `func (conn *Connector) OpenTSDBInsertTelnetLines(lines []string) error` +import ( + "database/sql" + "fmt" + "time" - Write OpenTDSB telnet protocol data. + "github.com/taosdata/driver-go/v3/common" + "github.com/taosdata/driver-go/v3/common/param" + _ "github.com/taosdata/driver-go/v3/taosRestful" + "github.com/taosdata/driver-go/v3/ws/stmt" +) -* `func (conn *Connector) OpenTSDBInsertJsonPayload(payload string) error` +func main() { + db, err := sql.Open("taosRestful", "root:taosdata@http(localhost:6041)/") + if err != nil { + panic(err) + } + defer db.Close() + prepareEnv(db) + + config := stmt.NewConfig("ws://127.0.0.1:6041/rest/stmt", 0) + config.SetConnectUser("root") + config.SetConnectPass("taosdata") + config.SetConnectDB("example_ws_stmt") + config.SetMessageTimeout(common.DefaultMessageTimeout) + config.SetWriteWait(common.DefaultWriteWait) + config.SetErrorHandler(func(connector *stmt.Connector, err error) { + panic(err) + }) + config.SetCloseHandler(func() { + fmt.Println("stmt connector closed") + }) + + connector, err := stmt.NewConnector(config) + if err != nil { + panic(err) + } + now := time.Now() + { + stmt, err := connector.Init() + if err != nil { + panic(err) + } + err = stmt.Prepare("insert into ? using all_json tags(?) values(?,?,?,?,?,?,?,?,?,?,?,?,?,?)") + if err != nil { + panic(err) + } + err = stmt.SetTableName("tb1") + if err != nil { + panic(err) + } + err = stmt.SetTags(param.NewParam(1).AddJson([]byte(`{"tb":1}`)), param.NewColumnType(1).AddJson(0)) + if err != nil { + panic(err) + } + params := []*param.Param{ + param.NewParam(3).AddTimestamp(now, 0).AddTimestamp(now.Add(time.Second), 0).AddTimestamp(now.Add(time.Second*2), 0), + param.NewParam(3).AddBool(true).AddNull().AddBool(true), + param.NewParam(3).AddTinyint(1).AddNull().AddTinyint(1), + param.NewParam(3).AddSmallint(1).AddNull().AddSmallint(1), + param.NewParam(3).AddInt(1).AddNull().AddInt(1), + param.NewParam(3).AddBigint(1).AddNull().AddBigint(1), + param.NewParam(3).AddUTinyint(1).AddNull().AddUTinyint(1), + param.NewParam(3).AddUSmallint(1).AddNull().AddUSmallint(1), + param.NewParam(3).AddUInt(1).AddNull().AddUInt(1), + param.NewParam(3).AddUBigint(1).AddNull().AddUBigint(1), + param.NewParam(3).AddFloat(1).AddNull().AddFloat(1), + param.NewParam(3).AddDouble(1).AddNull().AddDouble(1), + param.NewParam(3).AddBinary([]byte("test_binary")).AddNull().AddBinary([]byte("test_binary")), + param.NewParam(3).AddNchar("test_nchar").AddNull().AddNchar("test_nchar"), + } + paramTypes := param.NewColumnType(14). + AddTimestamp(). + AddBool(). + AddTinyint(). + AddSmallint(). + AddInt(). + AddBigint(). + AddUTinyint(). + AddUSmallint(). + AddUInt(). + AddUBigint(). + AddFloat(). + AddDouble(). + AddBinary(0). + AddNchar(0) + err = stmt.BindParam(params, paramTypes) + if err != nil { + panic(err) + } + err = stmt.AddBatch() + if err != nil { + panic(err) + } + err = stmt.Exec() + if err != nil { + panic(err) + } + affected := stmt.GetAffectedRows() + fmt.Println("all_json affected rows:", affected) + err = stmt.Close() + if err != nil { + panic(err) + } + } + { + stmt, err := connector.Init() + if err != nil { + panic(err) + } + err = stmt.Prepare("insert into ? using all_all tags(?,?,?,?,?,?,?,?,?,?,?,?,?,?) values(?,?,?,?,?,?,?,?,?,?,?,?,?,?)") + err = stmt.SetTableName("tb1") + if err != nil { + panic(err) + } - Writes OpenTSDB JSON protocol data. + err = stmt.SetTableName("tb2") + if err != nil { + panic(err) + } + err = stmt.SetTags( + param.NewParam(14). + AddTimestamp(now, 0). + AddBool(true). + AddTinyint(2). + AddSmallint(2). + AddInt(2). + AddBigint(2). + AddUTinyint(2). + AddUSmallint(2). + AddUInt(2). + AddUBigint(2). + AddFloat(2). + AddDouble(2). + AddBinary([]byte("tb2")). + AddNchar("tb2"), + param.NewColumnType(14). + AddTimestamp(). + AddBool(). + AddTinyint(). + AddSmallint(). + AddInt(). + AddBigint(). + AddUTinyint(). + AddUSmallint(). + AddUInt(). + AddUBigint(). + AddFloat(). + AddDouble(). + AddBinary(0). + AddNchar(0), + ) + if err != nil { + panic(err) + } + params := []*param.Param{ + param.NewParam(3).AddTimestamp(now, 0).AddTimestamp(now.Add(time.Second), 0).AddTimestamp(now.Add(time.Second*2), 0), + param.NewParam(3).AddBool(true).AddNull().AddBool(true), + param.NewParam(3).AddTinyint(1).AddNull().AddTinyint(1), + param.NewParam(3).AddSmallint(1).AddNull().AddSmallint(1), + param.NewParam(3).AddInt(1).AddNull().AddInt(1), + param.NewParam(3).AddBigint(1).AddNull().AddBigint(1), + param.NewParam(3).AddUTinyint(1).AddNull().AddUTinyint(1), + param.NewParam(3).AddUSmallint(1).AddNull().AddUSmallint(1), + param.NewParam(3).AddUInt(1).AddNull().AddUInt(1), + param.NewParam(3).AddUBigint(1).AddNull().AddUBigint(1), + param.NewParam(3).AddFloat(1).AddNull().AddFloat(1), + param.NewParam(3).AddDouble(1).AddNull().AddDouble(1), + param.NewParam(3).AddBinary([]byte("test_binary")).AddNull().AddBinary([]byte("test_binary")), + param.NewParam(3).AddNchar("test_nchar").AddNull().AddNchar("test_nchar"), + } + paramTypes := param.NewColumnType(14). + AddTimestamp(). + AddBool(). + AddTinyint(). + AddSmallint(). + AddInt(). + AddBigint(). + AddUTinyint(). + AddUSmallint(). + AddUInt(). + AddUBigint(). + AddFloat(). + AddDouble(). + AddBinary(0). + AddNchar(0) + err = stmt.BindParam(params, paramTypes) + if err != nil { + panic(err) + } + err = stmt.AddBatch() + if err != nil { + panic(err) + } + err = stmt.Exec() + if err != nil { + panic(err) + } + affected := stmt.GetAffectedRows() + fmt.Println("all_all affected rows:", affected) + err = stmt.Close() + if err != nil { + panic(err) + } -#### parameter binding + } +} -* `func (conn *Connector) StmtExecute(sql string, params *param.Param) (res driver.Result, err error)` +func prepareEnv(db *sql.DB) { + steps := []string{ + "create database example_ws_stmt", + "create table example_ws_stmt.all_json(ts timestamp," + + "c1 bool," + + "c2 tinyint," + + "c3 smallint," + + "c4 int," + + "c5 bigint," + + "c6 tinyint unsigned," + + "c7 smallint unsigned," + + "c8 int unsigned," + + "c9 bigint unsigned," + + "c10 float," + + "c11 double," + + "c12 binary(20)," + + "c13 nchar(20)" + + ")" + + "tags(t json)", + "create table example_ws_stmt.all_all(" + + "ts timestamp," + + "c1 bool," + + "c2 tinyint," + + "c3 smallint," + + "c4 int," + + "c5 bigint," + + "c6 tinyint unsigned," + + "c7 smallint unsigned," + + "c8 int unsigned," + + "c9 bigint unsigned," + + "c10 float," + + "c11 double," + + "c12 binary(20)," + + "c13 nchar(20)" + + ")" + + "tags(" + + "tts timestamp," + + "tc1 bool," + + "tc2 tinyint," + + "tc3 smallint," + + "tc4 int," + + "tc5 bigint," + + "tc6 tinyint unsigned," + + "tc7 smallint unsigned," + + "tc8 int unsigned," + + "tc9 bigint unsigned," + + "tc10 float," + + "tc11 double," + + "tc12 binary(20)," + + "tc13 nchar(20))", + } + for _, step := range steps { + _, err := db.Exec(step) + if err != nil { + panic(err) + } + } +} - Parameter bound single row insert. +``` -* `func (conn *Connector) InsertStmt() *insertstmt.InsertStmt` + + - Initialize the parameters. -* `func (stmt *InsertStmt) Prepare(sql string) error` +### Schemaless Writing - Parameter binding preprocessing SQL statement. + + -* `func (stmt *InsertStmt) SetTableName(name string) error` +```go +import ( + "fmt" - Bind the table name parameter. + "github.com/taosdata/driver-go/v3/af" +) -* `func (stmt *InsertStmt) SetSubTableName(name string) error` +func main() { + conn, err := af.Open("localhost", "root", "taosdata", "", 6030) + if err != nil { + fmt.Println("fail to connect, err:", err) + } + defer conn.Close() + _, err = conn.Exec("create database if not exists example") + if err != nil { + panic(err) + } + _, err = conn.Exec("use example") + if err != nil { + panic(err) + } + influxdbData := "st,t1=3i64,t2=4f64,t3=\"t3\" c1=3i64,c3=L\"passit\",c2=false,c4=4f64 1626006833639000000" + err = conn.InfluxDBInsertLines([]string{influxdbData}, "ns") + if err != nil { + panic(err) + } + telnetData := "stb0_0 1626006833 4 host=host0 interface=eth0" + err = conn.OpenTSDBInsertTelnetLines([]string{telnetData}) + if err != nil { + panic(err) + } + jsonData := "{\"metric\": \"meter_current\",\"timestamp\": 1626846400,\"value\": 10.3, \"tags\": {\"groupid\": 2, \"location\": \"California.SanFrancisco\", \"id\": \"d1001\"}}" + err = conn.OpenTSDBInsertJsonPayload(jsonData) + if err != nil { + panic(err) + } +} +``` - Parameter binding to set the sub table name. + + -* `func (stmt *InsertStmt) BindParam(params []*param.Param, bindType *param.ColumnType) error` +```go +import ( + "database/sql" + "log" + "time" - Parameter bind multiple rows of data. + "github.com/taosdata/driver-go/v3/common" + _ "github.com/taosdata/driver-go/v3/taosWS" + "github.com/taosdata/driver-go/v3/ws/schemaless" +) -* `func (stmt *InsertStmt) AddBatch() error` +func main() { + db, err := sql.Open("taosWS", "root:taosdata@ws(localhost:6041)/") + if err != nil { + log.Fatal(err) + } + defer db.Close() + _, err = db.Exec("create database if not exists schemaless_ws") + if err != nil { + log.Fatal(err) + } + s, err := schemaless.NewSchemaless(schemaless.NewConfig("ws://localhost:6041/rest/schemaless", 1, + schemaless.SetDb("schemaless_ws"), + schemaless.SetReadTimeout(10*time.Second), + schemaless.SetWriteTimeout(10*time.Second), + schemaless.SetUser("root"), + schemaless.SetPassword("taosdata"), + schemaless.SetErrorHandler(func(err error) { + log.Fatal(err) + }), + )) + if err != nil { + panic(err) + } + influxdbData := "st,t1=3i64,t2=4f64,t3=\"t3\" c1=3i64,c3=L\"passit\",c2=false,c4=4f64 1626006833639000000" + telnetData := "stb0_0 1626006833 4 host=host0 interface=eth0" + jsonData := "{\"metric\": \"meter_current\",\"timestamp\": 1626846400,\"value\": 10.3, \"tags\": {\"groupid\": 2, \"location\": \"California.SanFrancisco\", \"id\": \"d1001\"}}" - Add to a parameter-bound batch. + err = s.Insert(influxdbData, schemaless.InfluxDBLineProtocol, "ns", 0, common.GetReqID()) + if err != nil { + panic(err) + } + err = s.Insert(telnetData, schemaless.OpenTSDBTelnetLineProtocol, "ms", 0, common.GetReqID()) + if err != nil { + panic(err) + } + err = s.Insert(jsonData, schemaless.OpenTSDBJsonFormatProtocol, "ms", 0, common.GetReqID()) + if err != nil { + panic(err) + } +} +``` -* `func (stmt *InsertStmt) Execute() error` + + - Execute a parameter binding. -* `func (stmt *InsertStmt) GetAffectedRows() int` +### Schemaless with reqId - Gets the number of affected rows inserted by the parameter binding. +```go +func (s *Schemaless) Insert(lines string, protocol int, precision string, ttl int, reqID int64) error +``` -* `func (stmt *InsertStmt) Close() error` +You can get the unique id by `common.GetReqID()`. - Closes the parameter binding. +### Data Subscription -### Subscribe via WebSocket +The TDengine Go Connector supports subscription functionality with the following application API. -* `func NewConsumer(conf *tmq.ConfigMap) (*Consumer, error)` +#### Create a Topic -Creates consumer group. +```go + db, err := af.Open("", "root", "taosdata", "", 0) + if err != nil { + panic(err) + } + defer db.Close() + _, err = db.Exec("create database if not exists example_tmq WAL_RETENTION_PERIOD 86400") + if err != nil { + panic(err) + } + _, err = db.Exec("create topic if not exists example_tmq_topic as DATABASE example_tmq") + if err != nil { + panic(err) + } +``` -* `func (c *Consumer) Subscribe(topic string, rebalanceCb RebalanceCb) error` -Note: `rebalanceCb` is reserved for compatibility purpose +#### Create a Consumer -Subscribes a topic. +```go + consumer, err := tmq.NewConsumer(&tmqcommon.ConfigMap{ + "group.id": "test", + "auto.offset.reset": "earliest", + "td.connect.ip": "127.0.0.1", + "td.connect.user": "root", + "td.connect.pass": "taosdata", + "td.connect.port": "6030", + "client.id": "test_tmq_client", + "enable.auto.commit": "false", + "msg.with.table.name": "true", + }) + if err != nil { + panic(err) + } +``` -* `func (c *Consumer) SubscribeTopics(topics []string, rebalanceCb RebalanceCb) error` -Note: `rebalanceCb` is reserved for compatibility purpose +#### Subscribe to consume data -Subscribes to topics. +```go + err = consumer.Subscribe("example_tmq_topic", nil) + if err != nil { + panic(err) + } + for i := 0; i < 5; i++ { + ev := consumer.Poll(500) + if ev != nil { + switch e := ev.(type) { + case *tmqcommon.DataMessage: + fmt.Printf("get message:%v\n", e) + case tmqcommon.Error: + fmt.Fprintf(os.Stderr, "%% Error: %v: %v\n", e.Code(), e) + panic(e) + } + consumer.Commit() + } + } +``` -* `func (c *Consumer) Poll(timeoutMs int) tmq.Event` +#### Assignment subscription Offset -Polling information. +```go + partitions, err := consumer.Assignment() + if err != nil { + panic(err) + } + for i := 0; i < len(partitions); i++ { + fmt.Println(partitions[i]) + err = consumer.Seek(tmqcommon.TopicPartition{ + Topic: partitions[i].Topic, + Partition: partitions[i].Partition, + Offset: 0, + }, 0) + if err != nil { + panic(err) + } + } +``` -* `func (c *Consumer) Commit() ([]tmq.TopicPartition, error)` -Note: `tmq.TopicPartition` is reserved for compatibility purpose +#### Close subscriptions -Commit information. +```go + err = consumer.Close() + if err != nil { + panic(err) + } +``` -* `func (c *Consumer) Assignment() (partitions []tmq.TopicPartition, err error)` +#### Full Sample Code -Get Assignment(TDengine >= 3.0.5.0 and driver-go >= v3.5.0 are required). + + -* `func (c *Consumer) Seek(partition tmq.TopicPartition, ignoredTimeoutMs int) error` -Note: `ignoredTimeoutMs` is reserved for compatibility purpose +```go +package main -Seek offset(TDengine >= 3.0.5.0 and driver-go >= v3.5.0 are required). +import ( + "fmt" + "os" -* `func (c *Consumer) Unsubscribe() error` + "github.com/taosdata/driver-go/v3/af" + "github.com/taosdata/driver-go/v3/af/tmq" + tmqcommon "github.com/taosdata/driver-go/v3/common/tmq" +) -Unsubscribe. +func main() { + db, err := af.Open("", "root", "taosdata", "", 0) + if err != nil { + panic(err) + } + defer db.Close() + _, err = db.Exec("create database if not exists example_tmq WAL_RETENTION_PERIOD 86400") + if err != nil { + panic(err) + } + _, err = db.Exec("create topic if not exists example_tmq_topic as DATABASE example_tmq") + if err != nil { + panic(err) + } + if err != nil { + panic(err) + } + consumer, err := tmq.NewConsumer(&tmqcommon.ConfigMap{ + "group.id": "test", + "auto.offset.reset": "earliest", + "td.connect.ip": "127.0.0.1", + "td.connect.user": "root", + "td.connect.pass": "taosdata", + "td.connect.port": "6030", + "client.id": "test_tmq_client", + "enable.auto.commit": "false", + "msg.with.table.name": "true", + }) + if err != nil { + panic(err) + } + err = consumer.Subscribe("example_tmq_topic", nil) + if err != nil { + panic(err) + } + _, err = db.Exec("create table example_tmq.t1 (ts timestamp,v int)") + if err != nil { + panic(err) + } + _, err = db.Exec("insert into example_tmq.t1 values(now,1)") + if err != nil { + panic(err) + } + for i := 0; i < 5; i++ { + ev := consumer.Poll(500) + if ev != nil { + switch e := ev.(type) { + case *tmqcommon.DataMessage: + fmt.Printf("get message:%v\n", e) + case tmqcommon.Error: + fmt.Fprintf(os.Stderr, "%% Error: %v: %v\n", e.Code(), e) + panic(e) + } + consumer.Commit() + } + } + partitions, err := consumer.Assignment() + if err != nil { + panic(err) + } + for i := 0; i < len(partitions); i++ { + fmt.Println(partitions[i]) + err = consumer.Seek(tmqcommon.TopicPartition{ + Topic: partitions[i].Topic, + Partition: partitions[i].Partition, + Offset: 0, + }, 0) + if err != nil { + panic(err) + } + } -* `func (c *Consumer) Close() error` + partitions, err = consumer.Assignment() + if err != nil { + panic(err) + } + for i := 0; i < len(partitions); i++ { + fmt.Println(partitions[i]) + } -Close consumer. + err = consumer.Close() + if err != nil { + panic(err) + } +} +``` -For a complete example see [GitHub sample file](https://github.com/taosdata/driver-go/blob/main/examples/tmqoverws/main.go) + + -### parameter binding via WebSocket +```go +package main -* `func NewConnector(config *Config) (*Connector, error)` +import ( + "database/sql" + "fmt" - Create a connection. + "github.com/taosdata/driver-go/v3/common" + tmqcommon "github.com/taosdata/driver-go/v3/common/tmq" + _ "github.com/taosdata/driver-go/v3/taosRestful" + "github.com/taosdata/driver-go/v3/ws/tmq" +) -* `func (c *Connector) Init() (*Stmt, error)` +func main() { + db, err := sql.Open("taosRestful", "root:taosdata@http(localhost:6041)/") + if err != nil { + panic(err) + } + defer db.Close() + prepareEnv(db) + consumer, err := tmq.NewConsumer(&tmqcommon.ConfigMap{ + "ws.url": "ws://127.0.0.1:6041/rest/tmq", + "ws.message.channelLen": uint(0), + "ws.message.timeout": common.DefaultMessageTimeout, + "ws.message.writeWait": common.DefaultWriteWait, + "td.connect.user": "root", + "td.connect.pass": "taosdata", + "group.id": "example", + "client.id": "example_consumer", + "auto.offset.reset": "earliest", + }) + if err != nil { + panic(err) + } + err = consumer.Subscribe("example_ws_tmq_topic", nil) + if err != nil { + panic(err) + } + go func() { + _, err := db.Exec("create table example_ws_tmq.t_all(ts timestamp," + + "c1 bool," + + "c2 tinyint," + + "c3 smallint," + + "c4 int," + + "c5 bigint," + + "c6 tinyint unsigned," + + "c7 smallint unsigned," + + "c8 int unsigned," + + "c9 bigint unsigned," + + "c10 float," + + "c11 double," + + "c12 binary(20)," + + "c13 nchar(20)" + + ")") + if err != nil { + panic(err) + } + _, err = db.Exec("insert into example_ws_tmq.t_all values(now,true,2,3,4,5,6,7,8,9,10.123,11.123,'binary','nchar')") + if err != nil { + panic(err) + } + }() + for i := 0; i < 5; i++ { + ev := consumer.Poll(500) + if ev != nil { + switch e := ev.(type) { + case *tmqcommon.DataMessage: + fmt.Printf("get message:%v\n", e) + case tmqcommon.Error: + fmt.Printf("%% Error: %v: %v\n", e.Code(), e) + panic(e) + } + consumer.Commit() + } + } + partitions, err := consumer.Assignment() + if err != nil { + panic(err) + } + for i := 0; i < len(partitions); i++ { + fmt.Println(partitions[i]) + err = consumer.Seek(tmqcommon.TopicPartition{ + Topic: partitions[i].Topic, + Partition: partitions[i].Partition, + Offset: 0, + }, 0) + if err != nil { + panic(err) + } + } - Initialize the parameters. + partitions, err = consumer.Assignment() + if err != nil { + panic(err) + } + for i := 0; i < len(partitions); i++ { + fmt.Println(partitions[i]) + } -* `func (c *Connector) Close() error` + err = consumer.Close() + if err != nil { + panic(err) + } +} - Close the connection. +func prepareEnv(db *sql.DB) { + _, err := db.Exec("create database example_ws_tmq WAL_RETENTION_PERIOD 86400") + if err != nil { + panic(err) + } + _, err = db.Exec("create topic example_ws_tmq_topic as database example_ws_tmq") + if err != nil { + panic(err) + } +} +``` -* `func (s *Stmt) Prepare(sql string) error` + + - Parameter binding preprocessing SQL statement. +### More sample programs -* `func (s *Stmt) SetTableName(name string) error` +* [sample program](https://github.com/taosdata/driver-go/tree/3.0/examples) - Bind the table name parameter. -* `func (s *Stmt) SetTags(tags *param.Param, bindType *param.ColumnType) error` +## Frequently Asked Questions - Set tags. +1. bind interface in database/sql crashes -* `func (s *Stmt) BindParam(params []*param.Param, bindType *param.ColumnType) error` + REST does not support parameter binding related interface. It is recommended to use `db.Exec` and `db.Query`. - Parameter bind multiple rows of data. +2. error `[0x217] Database not specified or available` after executing other statements with `use db` statement -* `func (s *Stmt) AddBatch() error` + The execution of SQL command in the REST interface is not contextual, so using `use db` statement will not work, see the usage restrictions section above. - Add to a parameter-bound batch. +3. use `taosSql` without error but use `taosRestful` with error `[0x217] Database not specified or available` -* `func (s *Stmt) Exec() error` + Because the REST interface is stateless, using the `use db` statement will not take effect. See the usage restrictions section above. - Execute a parameter binding. +4. `readBufferSize` parameter has no significant effect after being increased -* `func (s *Stmt) GetAffectedRows() int` + Increasing `readBufferSize` will reduce the number of `syscall` calls when fetching results. If the query result is smaller, modifying this parameter will not improve performance significantly. If you increase the parameter value too much, the bottleneck will be parsing JSON data. If you need to optimize the query speed, you must adjust the value based on the actual situation to achieve the best query performance. - Gets the number of affected rows inserted by the parameter binding. +5. `disableCompression` parameter is set to `false` when the query efficiency is reduced -* `func (s *Stmt) Close() error` + When set `disableCompression` parameter to `false`, the query result will be compressed by `gzip` and then transmitted, so you have to decompress the data by `gzip` after getting it. - Closes the parameter binding. +6. `go get` command can't get the package, or timeout to get the package -For a complete example see [GitHub sample file](https://github.com/taosdata/driver-go/blob/main/examples/stmtoverws/main.go) + Set Go proxy `go env -w GOPROXY=https://goproxy.cn,direct`. ## API Reference diff --git a/docs/en/14-reference/03-connector/06-rust.mdx b/docs/en/14-reference/03-connector/06-rust.mdx index 344bd3590ec3e970055f9d2bf3381a20de77534e..986b5cd104e0aef2dadefb60efd6f574576e7a4d 100644 --- a/docs/en/14-reference/03-connector/06-rust.mdx +++ b/docs/en/14-reference/03-connector/06-rust.mdx @@ -31,21 +31,57 @@ Websocket connections are supported on all platforms that can run Go. | connector-rust version | TDengine version | major features | | :----------------: | :--------------: | :--------------------------------------------------: | -| v0.8.10 | 3.0.5.0 or later | TMQ: Get consuming progress and seek offset to consume. | +| v0.8.12 | 3.0.5.0 or later | TMQ: Get consuming progress and seek offset to consume. | | v0.8.0 | 3.0.4.0 | Support schemaless insert. | | v0.7.6 | 3.0.3.0 | Support req_id in query. | | v0.6.0 | 3.0.0.0 | Base features. | The Rust Connector is still under rapid development and is not guaranteed to be backward compatible before 1.0. We recommend using TDengine version 3.0 or higher to avoid known issues. -## Installation +## Handling exceptions + +After the error is reported, the specific information of the error can be obtained: + +```rust +match conn.exec(sql) { + Ok(_) => { + Ok(()) + } + Err(e) => { + eprintln!("ERROR: {:?}", e); + Err(e) + } +} +``` + +## TDengine DataType vs. Rust DataType + +TDengine currently supports timestamp, number, character, Boolean type, and the corresponding type conversion with Rust is as follows: + +| TDengine DataType | Rust DataType | +| ----------------- | ----------------- | +| TIMESTAMP | Timestamp | +| INT | i32 | +| BIGINT | i64 | +| FLOAT | f32 | +| DOUBLE | f64 | +| SMALLINT | i16 | +| TINYINT | i8 | +| BOOL | bool | +| BINARY | Vec | +| NCHAR | String | +| JSON | serde_json::Value | + +Note: Only TAG supports JSON types + +## Installation Steps ### Pre-installation preparation * Install the Rust development toolchain * If using the native connection, please install the TDengine client driver. Please refer to [install client driver](/reference/connector#install-client-driver) -### Add taos dependency +### Install the connectors Depending on the connection method, add the [taos][taos] dependency in your Rust project as follows: @@ -146,7 +182,8 @@ let builder = TaosBuilder::from_dsn("taos://localhost:6030")?; let conn1 = builder.build(); // use websocket protocol. -let conn2 = TaosBuilder::from_dsn("taos+ws://localhost:6041")?; +let builder2 = TaosBuilder::from_dsn("taos+ws://localhost:6041")?; +let conn2 = builder2.build(); ``` After the connection is established, you can perform operations on your database. @@ -228,41 +265,191 @@ There are two ways to query data: Using built-in types or the [serde](https://se ## Usage examples -### Write data +### Create database and tables + +```rust +use taos::*; -#### SQL Write +#[tokio::main] +async fn main() -> anyhow::Result<()> { + let dsn = "taos://localhost:6030"; + let builder = TaosBuilder::from_dsn(dsn)?; + + let taos = builder.build()?; + + let db = "query"; + + // create database + taos.exec_many([ + format!("DROP DATABASE IF EXISTS `{db}`"), + format!("CREATE DATABASE `{db}`"), + format!("USE `{db}`"), + ]) + .await?; + + // create table + taos.exec_many([ + // create super table + "CREATE TABLE `meters` (`ts` TIMESTAMP, `current` FLOAT, `voltage` INT, `phase` FLOAT) \ + TAGS (`groupid` INT, `location` BINARY(16))", + // create child table + "CREATE TABLE `d0` USING `meters` TAGS(0, 'Los Angles')", + ]).await?; +} +``` + +> The query is consistent with operating a relational database. When using subscripts to get the contents of the returned fields, you have to start from 1. However, we recommend using the field names to get the values of the fields in the result set. + +### Insert data -#### STMT Write +### Query data + + + +### execute SQL with req_id + +This req_id can be used to request link tracing. + +```rust +let rs = taos.query_with_req_id("select * from stable where tag1 is null", 1)?; +``` + +### Writing data via parameter binding + +TDengine has significantly improved the bind APIs to support data writing (INSERT) scenarios. Writing data in this way avoids the resource consumption of SQL syntax parsing, resulting in significant write performance improvements in many cases. + +Parameter binding details see [API Reference](#stmt-api) -#### Schemaless Write +### Schemaless Writing + +TDengine supports schemaless writing. It is compatible with InfluxDB's Line Protocol, OpenTSDB's telnet line protocol, and OpenTSDB's JSON format protocol. For more information, see [Schemaless Writing](../../schemaless). -### Query data +### Schemaless with req_id - +This req_id can be used to request link tracing. -## API Reference +```rust +let sml_data = SmlDataBuilder::default() + .protocol(SchemalessProtocol::Line) + .data(data) + .req_id(100u64) + .build()?; + +client.put(&sml_data)? +``` + +### Data Subscription + +TDengine starts subscriptions through [TMQ](../../../taos-sql/tmq/). + +#### Create a Topic + +```rust +taos.exec_many([ + // create topic for subscription + format!("CREATE TOPIC tmq_meters with META AS DATABASE {db}") +]) +.await?; +``` + +#### Create a Consumer + +You create a TMQ connector by using a DSN. + +```rust +let tmq = TmqBuilder::from_dsn("taos://localhost:6030/?group.id=test")?; +``` + +Create a consumer: + +```rust +let mut consumer = tmq.build()?; +``` + +#### Subscribe to consume data + +A single consumer can subscribe to one or more topics. + +```rust +consumer.subscribe(["tmq_meters"]).await?; +``` + +The TMQ is of [futures::Stream](https://docs.rs/futures/latest/futures/stream/index.html) type. You can use the corresponding API to consume each message in the queue and then use `.commit` to mark them as consumed. + +```rust +{ + let mut stream = consumer.stream(); + + while let Some((offset, message)) = stream.try_next().await? { + // get information from offset + + // the topic + let topic = offset.topic(); + // the vgroup id, like partition id in kafka. + let vgroup_id = offset.vgroup_id(); + println!("* in vgroup id {vgroup_id} of topic {topic}\n"); + + if let Some(data) = message.into_data() { + while let Some(block) = data.fetch_raw_block().await? { + // one block for one table, get table name if needed + let name = block.table_name(); + let records: Vec = block.deserialize().try_collect()?; + println!( + "** table: {}, got {} records: {:#?}\n", + name.unwrap(), + records.len(), + records + ); + } + } + consumer.commit(offset).await?; + } +} +``` + +Get assignmentsļ¼š + +Version requirements connector-rust >= v0.8.8, TDengine >= 3.0.5.0 + +```rust +let assignments = consumer.assignments().await.unwrap(); +``` -### Connector Constructor +#### Assignment subscription Offset -You create a connector constructor by using a DSN. +Seek offsetļ¼š + +Version requirements connector-rust >= v0.8.8, TDengine >= 3.0.5.0 ```rust -let cfg = TaosBuilder::default().build()?; +consumer.offset_seek(topic, vgroup_id, offset).await; ``` -You use the builder object to create multiple connections. +#### Close subscriptions ```rust -let conn: Taos = cfg.build(); +consumer.unsubscribe().await; ``` -### Connection pooling +The following parameters can be configured for the TMQ DSN. Only `group.id` is mandatory. + +- `group.id`: Within a consumer group, load balancing is implemented by consuming messages on an at-least-once basis. +- `client.id`: Subscriber client ID. +- `auto.offset.reset`: Initial point of subscription. *earliest* subscribes from the beginning, and *latest* subscribes from the newest message. The default is earliest. Note: This parameter is set per consumer group. +- `enable.auto.commit`: Automatically commits. This can be enabled when data consistency is not essential. +- `auto.commit.interval.ms`: Interval for automatic commits. + +#### Full Sample Code + +For more information, see [GitHub sample file](https://github.com/taosdata/TDengine/blob/3.0/docs/examples/rust/nativeexample/examples/subscribe_demo.rs). + +### Use with connection pool In complex applications, we recommend enabling connection pools. [taos] implements connection pools based on [r2d2]. @@ -292,7 +479,17 @@ In the application code, use `pool.get()? ` to get a connection object [Taos]. let taos = pool.get()?; ``` -### Connectors +### More sample programs + +The source code of the sample application is under `TDengine/examples/rust` : + +[rust example](https://github.com/taosdata/TDengine/tree/3.0/examples/rust) + +## Frequently Asked Questions + +For additional troubleshooting, see [FAQ](../../../train-faq/faq). + +## API Reference The [Taos][struct.Taos] object provides an API to perform operations on multiple databases. @@ -378,9 +575,13 @@ Note that Rust asynchronous functions and an asynchronous runtime are required. - `.create_database(database: &str)`: Executes the `CREATE DATABASE` statement. - `.use_database(database: &str)`: Executes the `USE` statement. -In addition, this structure is also the entry point for [Parameter Binding](#Parameter Binding Interface) and [Line Protocol Interface](#Line Protocol Interface). Please refer to the specific API descriptions for usage. +In addition, this structure is also the entry point for Parameter Binding and Line Protocol Interface. Please refer to the specific API descriptions for usage. -### Bind Interface +

+ +Bind Interface + +

Similar to the C interface, Rust provides the bind interface's wrapping. First, the [Taos][struct.taos] object creates a parameter binding object [Stmt] for an SQL statement. @@ -391,7 +592,7 @@ stmt.prepare("INSERT INTO ? USING meters TAGS(?, ?) VALUES(?, ?, ?, ?)")?; The bind object provides a set of interfaces for implementing parameter binding. -#### `.set_tbname(name)` +`.set_tbname(name)` To bind table names. @@ -400,7 +601,7 @@ let mut stmt = taos.stmt("insert into ? values(? ,?)")?; stmt.set_tbname("d0")?; ``` -#### `.set_tags(&[tag])` +`.set_tags(&[tag])` Bind sub-table table names and tag values when the SQL statement uses a super table. @@ -410,7 +611,7 @@ stmt.set_tbname("d0")?; stmt.set_tags(&[Value::VarChar("taos".to_string())])?; ``` -#### `.bind(&[column])` +`.bind(&[column])` Bind value types. Use the [ColumnView] structure to create and bind the required types. @@ -434,7 +635,7 @@ let params = vec![ let rows = stmt.bind(¶ms)?.add_batch()?.execute()?; ``` -#### `.execute()` +`.execute()` Execute SQL. [Stmt] objects can be reused, re-binded, and executed after execution. Before execution, ensure that all data has been added to the queue with `.add_batch`. @@ -449,92 +650,6 @@ stmt.execute()?; For a working example, see [GitHub](https://github.com/taosdata/taos-connector-rust/blob/main/examples/bind.rs). -### Subscriptions - -TDengine starts subscriptions through [TMQ](../../../taos-sql/tmq/). - -You create a TMQ connector by using a DSN. - -```rust -let tmq = TmqBuilder::from_dsn("taos://localhost:6030/?group.id=test")?; -``` - -Create a consumer: - -```rust -let mut consumer = tmq.build()?; -``` - -A single consumer can subscribe to one or more topics. - -```rust -consumer.subscribe(["tmq_meters"]).await?; -``` - -The TMQ is of [futures::Stream](https://docs.rs/futures/latest/futures/stream/index.html) type. You can use the corresponding API to consume each message in the queue and then use `.commit` to mark them as consumed. - -```rust -{ - let mut stream = consumer.stream(); - - while let Some((offset, message)) = stream.try_next().await? { - // get information from offset - - // the topic - let topic = offset.topic(); - // the vgroup id, like partition id in kafka. - let vgroup_id = offset.vgroup_id(); - println!("* in vgroup id {vgroup_id} of topic {topic}\n"); - - if let Some(data) = message.into_data() { - while let Some(block) = data.fetch_raw_block().await? { - // one block for one table, get table name if needed - let name = block.table_name(); - let records: Vec = block.deserialize().try_collect()?; - println!( - "** table: {}, got {} records: {:#?}\n", - name.unwrap(), - records.len(), - records - ); - } - } - consumer.commit(offset).await?; - } -} -``` - -Get assignmentsļ¼š - -Version requirements connector-rust >= v0.8.8, TDengine >= 3.0.5.0 - -```rust -let assignments = consumer.assignments().await.unwrap(); -``` - -Seek offsetļ¼š - -Version requirements connector-rust >= v0.8.8, TDengine >= 3.0.5.0 - -```rust -consumer.offset_seek(topic, vgroup_id, offset).await; -``` - -Unsubscribe: - -```rust -consumer.unsubscribe().await; -``` - -The following parameters can be configured for the TMQ DSN. Only `group.id` is mandatory. - -- `group.id`: Within a consumer group, load balancing is implemented by consuming messages on an at-least-once basis. -- `client.id`: Subscriber client ID. -- `auto.offset.reset`: Initial point of subscription. *earliest* subscribes from the beginning, and *latest* subscribes from the newest message. The default is earliest. Note: This parameter is set per consumer group. -- `enable.auto.commit`: Automatically commits. This can be enabled when data consistency is not essential. -- `auto.commit.interval.ms`: Interval for automatic commits. - -For more information, see [GitHub sample file](https://github.com/taosdata/TDengine/blob/3.0/docs/examples/rust/nativeexample/examples/subscribe_demo.rs). For information about other structure APIs, see the [Rust documentation](https://docs.rs/taos). diff --git a/docs/en/14-reference/03-connector/07-python.mdx b/docs/en/14-reference/03-connector/07-python.mdx index 461bdfbf162e696b430c1edb9b09ada70e086fb9..2a6cd9ecf77febdcc56528f34112944dc25f0aec 100644 --- a/docs/en/14-reference/03-connector/07-python.mdx +++ b/docs/en/14-reference/03-connector/07-python.mdx @@ -20,10 +20,25 @@ The source code for the Python connector is hosted on [GitHub](https://github.co - The [supported platforms](/reference/connector/#supported-platforms) for the native connection are the same as the ones supported by the TDengine client. - REST connections are supported on all platforms that can run Python. +### Supported features + +- Native connections support all the core features of TDengine, including connection management, SQL execution, bind interface, subscriptions, and schemaless writing. +- REST connections support features such as connection management and SQL execution. (SQL execution allows you to: manage databases, tables, and supertables, write data, query data, create continuous queries, etc.). + ## Version selection We recommend using the latest version of `taospy`, regardless of the version of TDengine. +|Python Connector Version|major changes| +|:-------------------:|:----:| +|2.7.9|support for getting assignment and seek function on subscription| +|2.7.8|add `execute_many` method| + +|Python Websocket Connector Version|major changes| +|:----------------------------:|:-----:| +|0.2.5|1. support for getting assignment and seek function on subscription
2. support schemaless
3. support STMT| +|0.2.4|support `unsubscribe` on subscription| + ## Handling Exceptions There are 4 types of exception in python connector. @@ -54,10 +69,23 @@ All exceptions from the Python Connector are thrown directly. Applications shoul {{#include docs/examples/python/handle_exception.py}} ``` -## Supported features +## TDengine DataType vs. Python DataType -- Native connections support all the core features of TDengine, including connection management, SQL execution, bind interface, subscriptions, and schemaless writing. -- REST connections support features such as connection management and SQL execution. (SQL execution allows you to: manage databases, tables, and supertables, write data, query data, create continuous queries, etc.). +TDengine currently supports timestamp, number, character, Boolean type, and the corresponding type conversion with Python is as follows: + +|TDengine DataType|Python DataType| +|:---------------:|:-------------:| +|TIMESTAMP|datetime| +|INT|int| +|BIGINT|int| +|FLOAT|float| +|DOUBLE|int| +|SMALLINT|int| +|TINYINT|int| +|BOOL|bool| +|BINARY|str| +|NCHAR|str| +|JSON|str| ## Installation @@ -534,7 +562,7 @@ Connector support data subscription. For more information about subscroption, pl The `consumer` in the connector contains the subscription api. -#### Create Consumer +##### Create Consumer The syntax for creating a consumer is `consumer = Consumer(configs)`. For more subscription api parameters, please refer to [Data Subscription](../../../develop/tmq/). @@ -544,7 +572,7 @@ from taos.tmq import Consumer consumer = Consumer({"group.id": "local", "td.connect.ip": "127.0.0.1"}) ``` -#### Subscribe topics +##### Subscribe topics The `subscribe` function is used to subscribe to a list of topics. @@ -552,7 +580,7 @@ The `subscribe` function is used to subscribe to a list of topics. consumer.subscribe(['topic1', 'topic2']) ``` -#### Consume +##### Consume The `poll` function is used to consume data in tmq. The parameter of the `poll` function is a value of type float representing the timeout in seconds. It returns a `Message` before timing out, or `None` on timing out. You have to handle error messages in response data. @@ -570,7 +598,7 @@ while True: print(block.fetchall()) ``` -#### assignment +##### assignment The `assignment` function is used to get the assignment of the topic. @@ -578,7 +606,7 @@ The `assignment` function is used to get the assignment of the topic. assignments = consumer.assignment() ``` -#### Seek +##### Seek The `seek` function is used to reset the assignment of the topic. @@ -587,7 +615,7 @@ tp = TopicPartition(topic='topic1', partition=0, offset=0) consumer.seek(tp) ``` -#### After consuming data +##### After consuming data You should unsubscribe to the topics and close the consumer after consuming. @@ -596,13 +624,13 @@ consumer.unsubscribe() consumer.close() ``` -#### Tmq subscription example +##### Tmq subscription example ```python {{#include docs/examples/python/tmq_example.py}} ``` -#### assignment and seek example +##### assignment and seek example ```python {{#include docs/examples/python/tmq_assignment_example.py:taos_get_assignment_and_seek_demo}} @@ -614,7 +642,7 @@ consumer.close() In addition to native connections, the connector also supports subscriptions via websockets. -#### Create Consumer +##### Create Consumer The syntax for creating a consumer is "consumer = consumer = Consumer(conf=configs)". You need to specify that the `td.connect.websocket.scheme` parameter is set to "ws" in the configuration. For more subscription api parameters, please refer to [Data Subscription](../../../develop/tmq/#create-a-consumer). @@ -624,7 +652,7 @@ import taosws consumer = taosws.(conf={"group.id": "local", "td.connect.websocket.scheme": "ws"}) ``` -#### subscribe topics +##### subscribe topics The `subscribe` function is used to subscribe to a list of topics. @@ -632,7 +660,7 @@ The `subscribe` function is used to subscribe to a list of topics. consumer.subscribe(['topic1', 'topic2']) ``` -#### Consume +##### Consume The `poll` function is used to consume data in tmq. The parameter of the `poll` function is a value of type float representing the timeout in seconds. It returns a `Message` before timing out, or `None` on timing out. You have to handle error messages in response data. @@ -649,7 +677,7 @@ while True: print(row) ``` -#### assignment +##### assignment The `assignment` function is used to get the assignment of the topic. @@ -657,7 +685,7 @@ The `assignment` function is used to get the assignment of the topic. assignments = consumer.assignment() ``` -#### Seek +##### Seek The `seek` function is used to reset the assignment of the topic. @@ -665,7 +693,7 @@ The `seek` function is used to reset the assignment of the topic. consumer.seek(topic='topic1', partition=0, offset=0) ``` -#### After consuming data +##### After consuming data You should unsubscribe to the topics and close the consumer after consuming. @@ -674,13 +702,13 @@ consumer.unsubscribe() consumer.close() ``` -#### Subscription example +##### Subscription example ```python {{#include docs/examples/python/tmq_websocket_example.py}} ``` -#### Assignment and seek example +##### Assignment and seek example ```python {{#include docs/examples/python/tmq_websocket_assgnment_example.py:taosws_get_assignment_and_seek_demo}} @@ -696,19 +724,19 @@ Connector support schemaless insert. -Simple insert +##### Simple insert ```python {{#include docs/examples/python/schemaless_insert.py}} ``` -Insert with ttl argument +##### Insert with ttl argument ```python {{#include docs/examples/python/schemaless_insert_ttl.py}} ``` -Insert with req_id argument +##### Insert with req_id argument ```python {{#include docs/examples/python/schemaless_insert_req_id.py}} @@ -718,19 +746,19 @@ Insert with req_id argument -Simple insert +##### Simple insert ```python {{#include docs/examples/python/schemaless_insert_raw.py}} ``` -Insert with ttl argument +##### Insert with ttl argument ```python {{#include docs/examples/python/schemaless_insert_raw_ttl.py}} ``` -Insert with req_id argument +##### Insert with req_id argument ```python {{#include docs/examples/python/schemaless_insert_raw_req_id.py}} @@ -746,7 +774,7 @@ The Python connector provides a parameter binding api for inserting data. Simila -#### Create Stmt +##### Create Stmt Call the `statement` method in `Connection` to create the `stmt` for parameter binding. @@ -757,7 +785,7 @@ conn = taos.connect() stmt = conn.statement("insert into log values(?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?)") ``` -#### parameter binding +##### parameter binding Call the `new_multi_binds` function to create the parameter list for parameter bindings. @@ -787,7 +815,7 @@ Call the `bind_param` (for a single row) method or the `bind_param_batch` (for m stmt.bind_param_batch(params) ``` -#### execute sql +##### execute sql Call `execute` method to execute sql. @@ -795,13 +823,13 @@ Call `execute` method to execute sql. stmt.execute() ``` -#### Close Stmt +##### Close Stmt ``` stmt.close() ``` -#### Example +##### Example ```python {{#include docs/examples/python/stmt_example.py}} @@ -810,7 +838,7 @@ stmt.close() -#### Create Stmt +##### Create Stmt Call the `statement` method in `Connection` to create the `stmt` for parameter binding. @@ -821,7 +849,7 @@ conn = taosws.connect('taosws://localhost:6041/test') stmt = conn.statement() ``` -#### Prepare sql +##### Prepare sql Call `prepare` method in stmt to prepare sql. @@ -829,7 +857,7 @@ Call `prepare` method in stmt to prepare sql. stmt.prepare("insert into t1 values (?, ?, ?, ?)") ``` -#### parameter binding +##### parameter binding Call the `bind_param` method to bind parameters. @@ -848,7 +876,7 @@ Call the `add_batch` method to add parameters to the batch. stmt.add_batch() ``` -#### execute sql +##### execute sql Call `execute` method to execute sql. @@ -856,13 +884,13 @@ Call `execute` method to execute sql. stmt.execute() ``` -#### Close Stmt +##### Close Stmt ``` stmt.close() ``` -#### Example +##### Example ```python {{#include docs/examples/python/stmt_websocket_example.py}} diff --git a/docs/en/28-releases/01-tdengine.md b/docs/en/28-releases/01-tdengine.md index f4d9ba8e428792cbd525f15632eff5e14a3ba83a..a5c1553402a75f902197c5e466d12aaf663eedb8 100644 --- a/docs/en/28-releases/01-tdengine.md +++ b/docs/en/28-releases/01-tdengine.md @@ -10,6 +10,10 @@ For TDengine 2.x installation packages by version, please visit [here](https://w import Release from "/components/ReleaseV3"; +## 3.0.6.0 + + + ## 3.0.5.1 diff --git a/docs/zh/07-develop/07-tmq.mdx b/docs/zh/07-develop/07-tmq.mdx index a87a1f64f80223a8b19b21bd277973952cf8dfc8..54a8af2287b517ee7a8792fc427731cbe9e0500f 100644 --- a/docs/zh/07-develop/07-tmq.mdx +++ b/docs/zh/07-develop/07-tmq.mdx @@ -243,6 +243,7 @@ TDengine ä½æē”Ø SQL 创å»ŗäø€äøŖ topicļ¼š ```sql CREATE TOPIC topic_name AS SELECT ts, c1, c2, c3 FROM tmqdb.stb WHERE c1 > 1; ``` +- topic创å»ŗäøŖꕰ꜉äøŠé™ļ¼Œé€ščæ‡å‚ę•° tmqMaxTopicNum ęŽ§åˆ¶ļ¼Œé»˜č®¤ 20 äøŖ TMQ ę”ÆęŒå¤šē§č®¢é˜…ē±»åž‹ļ¼š @@ -265,14 +266,15 @@ CREATE TOPIC topic_name as subquery čÆ­ę³•ļ¼š ```sql -CREATE TOPIC topic_name AS STABLE stb_name +CREATE TOPIC topic_name [with meta] AS STABLE stb_name [where_condition] ``` äøŽ `SELECT * from stbName` č®¢é˜…ēš„åŒŗ别ę˜Æļ¼š - äøä¼šé™åˆ¶ē”Øꈷēš„č”Øē»“ęž„å˜ę›“怂 - čæ”回ēš„ę˜Æ非ē»“ęž„åŒ–ēš„ę•°ę®ļ¼ščæ”å›žę•°ę®ēš„ē»“ęž„ä¼šéšä¹‹č¶…ēŗ§č”Øēš„č”Øē»“ęž„å˜åŒ–č€Œå˜åŒ–ć€‚ -- ē”ØꈷåƹäŗŽč¦å¤„ē†ēš„ęƏäø€äøŖę•°ę®å—éƒ½åÆčƒ½ęœ‰äøåŒēš„č”Øē»“Ꞅ怂 +- with meta å‚ę•°åÆ选ļ¼Œé€‰ę‹©ę—¶å°†čæ”回创å»ŗ超ēŗ§č”Øļ¼Œå­č”Øē­‰čƭ叄ļ¼Œäø»č¦ē”ØäŗŽtaosxåšč¶…ēŗ§č”Øčæē§» +- where_condition å‚ę•°åÆ选ļ¼Œé€‰ę‹©ę—¶å°†ē”Øę„čæ‡ę»¤ē¬¦åˆę”件ēš„子č”Øļ¼Œč®¢é˜…čæ™äŗ›å­č”Ø怂where ę”ä»¶é‡Œäøčƒ½ęœ‰ę™®é€šåˆ—ļ¼ŒåŖčƒ½ę˜Ætagꈖtbnameļ¼Œwhereę”ä»¶é‡ŒåÆ仄ē”Øå‡½ę•°ļ¼Œē”Øę„čæ‡ę»¤tagļ¼Œä½†ę˜Æäøčƒ½ę˜Æčšåˆå‡½ę•°ļ¼Œå› äøŗ子č”Øtagå€¼ę— ę³•åščšåˆć€‚ä¹ŸåÆ仄ę˜Æåøø量č”Øč¾¾å¼ļ¼ŒęƔ如 2 > 1ļ¼ˆč®¢é˜…å…ØéƒØ子č”Øļ¼‰ļ¼Œęˆ–者 falseļ¼ˆč®¢é˜…0äøŖ子č”Øļ¼‰ - čæ”å›žę•°ę®äøåŒ…å«ę ‡ē­¾ć€‚ ### ę•°ę®åŗ“č®¢é˜… @@ -280,11 +282,13 @@ CREATE TOPIC topic_name AS STABLE stb_name čÆ­ę³•ļ¼š ```sql -CREATE TOPIC topic_name AS DATABASE db_name; +CREATE TOPIC topic_name [with meta] AS DATABASE db_name; ``` 通čæ‡čÆ„čƭ叄åÆ创å»ŗäø€äøŖåŒ…å«ę•°ę®åŗ“ę‰€ęœ‰č”Øę•°ę®ēš„č®¢é˜… +- with meta å‚ę•°åÆ选ļ¼Œé€‰ę‹©ę—¶å°†čæ”回创å»ŗę•°ę®åŗ“é‡Œę‰€ęœ‰č¶…ēŗ§č”Øļ¼Œå­č”Øēš„čƭ叄ļ¼Œäø»č¦ē”ØäŗŽtaosxåšę•°ę®åŗ“čæē§» + ## 创å»ŗę¶ˆč“¹č€… *consumer* ę¶ˆč“¹č€…éœ€č¦é€ščæ‡äø€ē³»åˆ—配ē½®é€‰é”¹åˆ›å»ŗļ¼ŒåŸŗē”€é…ē½®é”¹å¦‚äø‹č”Øꉀē¤ŗļ¼š @@ -295,7 +299,7 @@ CREATE TOPIC topic_name AS DATABASE db_name; | `td.connect.user` | string | ē”Øęˆ·å | | | `td.connect.pass` | string | åƆē  | | | `td.connect.port` | integer | ęœåŠ”ē«Æēš„ē«Æ口号 | | -| `group.id` | string | ę¶ˆč“¹ē»„ IDļ¼ŒåŒäø€ę¶ˆč“¹ē»„å…±äŗ«ę¶ˆč“¹čæ›åŗ¦ | **åæ…唫锹**ć€‚ęœ€å¤§é•æåŗ¦ļ¼š192怂 | +| `group.id` | string | ę¶ˆč“¹ē»„ IDļ¼ŒåŒäø€ę¶ˆč“¹ē»„å…±äŗ«ę¶ˆč“¹čæ›åŗ¦ |
**åæ…唫锹**ć€‚ęœ€å¤§é•æåŗ¦ļ¼š192怂
ęƏäøŖtopicęœ€å¤šåÆå»ŗē«‹100äøŖ consumer group | | `client.id` | string | å®¢ęˆ·ē«Æ ID | ęœ€å¤§é•æåŗ¦ļ¼š192怂 | | `auto.offset.reset` | enum | ę¶ˆč“¹ē»„č®¢é˜…ēš„初始位ē½® |
`earliest`: default;ä»Žå¤“å¼€å§‹č®¢é˜…;
`latest`: ä»…ä»Žęœ€ę–°ę•°ę®å¼€å§‹č®¢é˜…;
`none`: ę²”ęœ‰ęäŗ¤ēš„ offset ę— ę³•č®¢é˜… | | `enable.auto.commit` | boolean | ę˜Æ否åÆē”Øę¶ˆč“¹ä½ē‚¹č‡ŖåŠØꏐäŗ¤ļ¼Œtrue: č‡ŖåŠØꏐäŗ¤ļ¼Œå®¢ęˆ·ē«Æåŗ”ē”Øꗠ需commitļ¼›falseļ¼šå®¢ęˆ·ē«Æåŗ”ē”Øéœ€č¦č‡Ŗč”Œcommit | é»˜č®¤å€¼äøŗ true | diff --git a/docs/zh/07-develop/09-udf.md b/docs/zh/07-develop/09-udf.md index ae11273a39779bd5cc83968f48767cace7ff346a..ff464376873767f1d6bee28b254d1f58640abffb 100644 --- a/docs/zh/07-develop/09-udf.md +++ b/docs/zh/07-develop/09-udf.md @@ -17,7 +17,7 @@ TDengine ę”Æꌁ通čæ‡ C/Python čÆ­č؀čæ›č”Œ UDF å®šä¹‰ć€‚ęŽ„äø‹ę„ē»“合ē¤ŗ例 - čšåˆå‡½ę•°éœ€č¦å®žēŽ°čšåˆęŽ„å£å‡½ę•° aggfn_start ļ¼Œ aggfn ļ¼Œ aggfn_finish怂 - å¦‚ęžœéœ€č¦åˆå§‹åŒ–ļ¼Œå®žēŽ° udf_initļ¼›å¦‚ęžœéœ€č¦ęø…ē†å·„作ļ¼Œå®žēŽ°udf_destroy怂 -ęŽ„å£å‡½ę•°ēš„名ē§°ę˜Æ UDF 名ē§°ļ¼Œęˆ–者ę˜Æ UDF 名ē§°å’Œē‰¹å®šåŽē¼€ļ¼ˆ_start, _finish, _init, _destroy)ēš„čæžęŽ„ć€‚åˆ—č”Øäø­ēš„scalarfnļ¼Œaggfn, udféœ€č¦ę›æę¢ęˆudfå‡½ę•°åć€‚ +ęŽ„å£å‡½ę•°ēš„名ē§°ę˜Æ UDF 名ē§°ļ¼Œęˆ–者ę˜Æ UDF 名ē§°å’Œē‰¹å®šåŽē¼€ļ¼ˆ`_start`, `_finish`, `_init`, `_destroy`)ēš„čæžęŽ„ć€‚åˆ—č”Øäø­ēš„scalarfnļ¼Œaggfn, udféœ€č¦ę›æę¢ęˆudfå‡½ę•°åć€‚ ### ē”Ø C čÆ­č؀实ēŽ°ę ‡é‡å‡½ę•° ę ‡é‡å‡½ę•°å®žēŽ°ęØ”ęæ如äø‹ diff --git a/docs/zh/08-connector/14-java.mdx b/docs/zh/08-connector/14-java.mdx index 27b732b8835c2290c6cc1e55c35cb6e69f3b957d..c7da2bd4f545155e62a8bb83ff7554021aa16864 100644 --- a/docs/zh/08-connector/14-java.mdx +++ b/docs/zh/08-connector/14-java.mdx @@ -36,14 +36,15 @@ REST čæžęŽ„ę”ÆęŒę‰€ęœ‰čƒ½čæč”Œ Java ēš„å¹³å°ć€‚ | taos-jdbcdriver ē‰ˆęœ¬ | äø»č¦å˜åŒ– | TDengine ē‰ˆęœ¬ | | :------------------: | :----------------------------------------------------------------------------------------------------------------------------------------------------: | :----------------: | +| 3.2.3 | äæ®å¤ ResultSet åœØäø€äŗ›ęƒ…å†µę•°ę®č§£ęžå¤±č“„ | - | | 3.2.2 | ę–°å¢žåŠŸčƒ½ļ¼šę•°ę®č®¢é˜…ę”Æꌁ seek åŠŸčƒ½ć€‚ | 3.0.5.0 åŠę›“é«˜ē‰ˆęœ¬ | | 3.2.1 | ę–°å¢žåŠŸčƒ½ļ¼šWebSocket čæžęŽ„ę”Æꌁ schemaless äøŽ prepareStatement å†™å…„ć€‚å˜ę›“ļ¼šconsumer poll čæ”回ē»“ęžœé›†äøŗ ConsumerRecordļ¼ŒåÆ通čæ‡ value() čŽ·å–ęŒ‡å®šē»“ęžœé›†ę•°ę®ć€‚ | 3.0.3.0 åŠę›“é«˜ē‰ˆęœ¬ | | 3.2.0 | 存åœØčæžęŽ„问题ļ¼ŒäøęŽØ荐ä½æē”Ø | - | | 3.1.0 | WebSocket čæžęŽ„ę”ÆęŒč®¢é˜…åŠŸčƒ½ | - | | 3.0.1 - 3.0.4 | äæ®å¤äø€äŗ›ęƒ…况äø‹ē»“ęžœé›†ę•°ę®č§£ęžé”™čÆÆēš„é—®é¢˜ć€‚3.0.1 åœØ JDK 11 ēŽÆ境ē¼–čƑļ¼ŒJDK 8 ēŽÆ境äø‹å»ŗč®®ä½æē”Ø其他ē‰ˆęœ¬ | - | | 3.0.0 | ę”Æꌁ TDengine 3.0 | 3.0.0.0 åŠę›“é«˜ē‰ˆęœ¬ | -| 2.0.42 | äæ®åœØ WebSocket čæžęŽ„äø­ wasNull ęŽ„å£čæ”回值 | - | -| 2.0.41 | äæ®ę­£ REST čæžęŽ„äø­ē”Øęˆ·åå’ŒåƆē č½¬ē ę–¹å¼ | - | +| 2.0.42 | äæ®å¤ WebSocket čæžęŽ„äø­ wasNull ęŽ„å£čæ”回值 | - | +| 2.0.41 | äæ®å¤ REST čæžęŽ„äø­ē”Øęˆ·åå’ŒåƆē č½¬ē ę–¹å¼ | - | | 2.0.39 - 2.0.40 | 增加 REST čæžęŽ„/čÆ·ę±‚ č¶…ę—¶č®¾ē½® | - | | 2.0.38 | JDBC REST čæžęŽ„å¢žåŠ ę‰¹é‡ę‹‰å–åŠŸčƒ½ | - | | 2.0.37 | 增加åƹ json tag ę”Æꌁ | - | @@ -287,9 +288,9 @@ url äø­ēš„配ē½®å‚ę•°å¦‚äø‹ļ¼š - batchfetch: trueļ¼šåœØę‰§č”ŒęŸ„čÆ¢ę—¶ę‰¹é‡ę‹‰å–ē»“ęžœé›†ļ¼›falseļ¼šé€č”Œę‹‰å–ē»“ęžœé›†ć€‚é»˜č®¤å€¼äøŗļ¼šfalseć€‚é€č”Œę‹‰å–ē»“ęžœé›†ä½æē”Ø HTTP ę–¹å¼čæ›č”Œę•°ę®ä¼ č¾“怂JDBC REST čæžęŽ„ę”ÆęŒę‰¹é‡ę‹‰å–ę•°ę®åŠŸčƒ½ć€‚taos-jdbcdriver äøŽ TDengine 之闓通čæ‡ WebSocket čæžęŽ„čæ›č”Œę•°ę®ä¼ č¾“怂ē›øč¾ƒäŗŽ HTTPļ¼ŒWebSocket åÆ仄ä½æ JDBC REST čæžęŽ„ę”ÆęŒå¤§ę•°ę®é‡ęŸ„čÆ¢ļ¼Œå¹¶ęå‡ęŸ„čÆ¢ę€§čƒ½ć€‚ - charset: 当开åÆę‰¹é‡ę‹‰å–ę•°ę®ę—¶ļ¼ŒęŒ‡å®šč§£ęžå­—ē¬¦äø²ę•°ę®ēš„å­—ē¬¦é›†ć€‚ - batchErrorIgnoreļ¼štrueļ¼šåœØę‰§č”Œ Statement ēš„ executeBatch ę—¶ļ¼Œå¦‚ęžœäø­é—“꜉äø€ę” SQL ę‰§č”Œå¤±č“„ļ¼Œē»§ē»­ę‰§č”Œäø‹é¢ēš„ SQL äŗ†ć€‚falseļ¼šäøå†ę‰§č”Œå¤±č“„ SQL 后ēš„任何čÆ­å„ć€‚é»˜č®¤å€¼äøŗļ¼šfalse怂 -- httpConnectTimeout: čæžęŽ„超ꗶꗶ闓ļ¼Œå•ä½ msļ¼Œ é»˜č®¤å€¼äøŗ 5000怂 -- httpSocketTimeout: socket 超ꗶꗶ闓ļ¼Œå•ä½ msļ¼Œé»˜č®¤å€¼äøŗ 5000怂仅åœØ batchfetch č®¾ē½®äøŗ false ę—¶ē”Ÿę•ˆć€‚ -- messageWaitTimeout: 궈ęÆ超ꗶꗶ闓, 单位 msļ¼Œ é»˜č®¤å€¼äøŗ 3000怂 仅åœØ batchfetch č®¾ē½®äøŗ true ę—¶ē”Ÿę•ˆć€‚ +- httpConnectTimeout: čæžęŽ„超ꗶꗶ闓ļ¼Œå•ä½ msļ¼Œ é»˜č®¤å€¼äøŗ 60000怂 +- httpSocketTimeout: socket 超ꗶꗶ闓ļ¼Œå•ä½ msļ¼Œé»˜č®¤å€¼äøŗ 60000怂仅åœØ batchfetch č®¾ē½®äøŗ false ę—¶ē”Ÿę•ˆć€‚ +- messageWaitTimeout: 궈ęÆ超ꗶꗶ闓, 单位 msļ¼Œ é»˜č®¤å€¼äøŗ 60000怂 仅åœØ batchfetch č®¾ē½®äøŗ true ę—¶ē”Ÿę•ˆć€‚ - useSSL: čæžęŽ„äø­ę˜Æ否ä½æē”Ø SSL怂 - httpPoolSize: REST 并发čÆ·ę±‚å¤§å°ļ¼Œé»˜č®¤ 20怂 @@ -355,9 +356,9 @@ properties äø­ēš„配ē½®å‚ę•°å¦‚äø‹ļ¼š - TSDBDriver.PROPERTY_KEY_CHARSETļ¼šå®¢ęˆ·ē«Æä½æē”Øēš„å­—ē¬¦é›†ļ¼Œé»˜č®¤å€¼äøŗē³»ē»Ÿå­—ē¬¦é›†ć€‚ - TSDBDriver.PROPERTY_KEY_LOCALEļ¼šä»…åœØä½æē”Ø JDBC 原ē”ŸčæžęŽ„ę—¶ē”Ÿę•ˆć€‚ å®¢ęˆ·ē«ÆčÆ­č؀ēŽÆ境ļ¼Œé»˜č®¤å€¼ē³»ē»Ÿå½“前 locale怂 - TSDBDriver.PROPERTY_KEY_TIME_ZONEļ¼šä»…åœØä½æē”Ø JDBC 原ē”ŸčæžęŽ„ę—¶ē”Ÿę•ˆć€‚ å®¢ęˆ·ē«Æä½æē”Øēš„ę—¶åŒŗļ¼Œé»˜č®¤å€¼äøŗē³»ē»Ÿå½“å‰ę—¶åŒŗ怂 -- TSDBDriver.HTTP_CONNECT_TIMEOUT: čæžęŽ„超ꗶꗶ闓ļ¼Œå•ä½ msļ¼Œ é»˜č®¤å€¼äøŗ 5000怂仅åœØ REST čæžęŽ„ę—¶ē”Ÿę•ˆć€‚ -- TSDBDriver.HTTP_SOCKET_TIMEOUT: socket 超ꗶꗶ闓ļ¼Œå•ä½ msļ¼Œé»˜č®¤å€¼äøŗ 5000怂仅åœØ REST čæžęŽ„äø” batchfetch č®¾ē½®äøŗ false ę—¶ē”Ÿę•ˆć€‚ -- TSDBDriver.PROPERTY_KEY_MESSAGE_WAIT_TIMEOUT: 궈ęÆ超ꗶꗶ闓, 单位 msļ¼Œ é»˜č®¤å€¼äøŗ 3000怂 仅åœØ REST čæžęŽ„äø” batchfetch č®¾ē½®äøŗ true ę—¶ē”Ÿę•ˆć€‚ +- TSDBDriver.HTTP_CONNECT_TIMEOUT: čæžęŽ„超ꗶꗶ闓ļ¼Œå•ä½ msļ¼Œ é»˜č®¤å€¼äøŗ 60000怂仅åœØ REST čæžęŽ„ę—¶ē”Ÿę•ˆć€‚ +- TSDBDriver.HTTP_SOCKET_TIMEOUT: socket 超ꗶꗶ闓ļ¼Œå•ä½ msļ¼Œé»˜č®¤å€¼äøŗ 60000怂仅åœØ REST čæžęŽ„äø” batchfetch č®¾ē½®äøŗ false ę—¶ē”Ÿę•ˆć€‚ +- TSDBDriver.PROPERTY_KEY_MESSAGE_WAIT_TIMEOUT: 궈ęÆ超ꗶꗶ闓, 单位 msļ¼Œ é»˜č®¤å€¼äøŗ 60000怂 仅åœØ REST čæžęŽ„äø” batchfetch č®¾ē½®äøŗ true ę—¶ē”Ÿę•ˆć€‚ - TSDBDriver.PROPERTY_KEY_USE_SSL: čæžęŽ„äø­ę˜Æ否ä½æē”Ø SSL怂仅åœØ REST čæžęŽ„ę—¶ē”Ÿę•ˆć€‚ - TSDBDriver.HTTP_POOL_SIZE: REST 并发čÆ·ę±‚å¤§å°ļ¼Œé»˜č®¤ 20怂 ę­¤å¤–åƹ JDBC 原ē”ŸčæžęŽ„ļ¼Œé€ščæ‡ęŒ‡å®š URL 和 Properties čæ˜åÆä»„ęŒ‡å®šå…¶ä»–å‚ę•°ļ¼ŒęÆ”å¦‚ę—„åæ—ēŗ§åˆ«ć€SQL é•æåŗ¦ē­‰ć€‚ꛓ多čƦē»†é…ē½®čÆ·å‚č€ƒ[å®¢ęˆ·ē«Æ配ē½®](/reference/config/#ä»…å®¢ęˆ·ē«Æ适ē”Ø)怂 diff --git a/docs/zh/08-connector/20-go.mdx b/docs/zh/08-connector/20-go.mdx index d431be35cb0b709cdc6c5cadd2c8043702acbe11..90ef4d83cafa604fbe531a9f9ea0ece5b11b2df4 100644 --- a/docs/zh/08-connector/20-go.mdx +++ b/docs/zh/08-connector/20-go.mdx @@ -32,24 +32,44 @@ REST čæžęŽ„ę”ÆęŒę‰€ęœ‰čƒ½čæč”Œ Go ēš„å¹³å°ć€‚ čÆ·å‚č€ƒ[ē‰ˆęœ¬ę”ÆęŒåˆ—č”Ø](https://github.com/taosdata/driver-go#remind) -## ę”Æꌁēš„åŠŸčƒ½ē‰¹ę€§ +## 处ē†å¼‚åøø -### 原ē”ŸčæžęŽ„ +å¦‚ęžœę˜Æ TDengine 错čÆÆåÆ仄通čæ‡ä»„äø‹ę–¹å¼čŽ·å–é”™čÆÆē å’Œé”™čÆÆäæ”ęÆ怂 -ā€œåŽŸē”ŸčæžęŽ„ā€ęŒ‡čæžęŽ„å™Ø通čæ‡ TDengine å®¢ęˆ·ē«Æ驱åŠØļ¼ˆtaoscļ¼‰ē›“ꎄäøŽ TDengine čæč”Œå®žä¾‹å»ŗē«‹ēš„čæžęŽ„怂ę”Æꌁēš„åŠŸčƒ½ē‰¹ę€§ęœ‰ļ¼š - -* ę™®é€šęŸ„čÆ¢ -* čæžē»­ęŸ„čÆ¢ -* č®¢é˜… -* schemaless ęŽ„å£ -* å‚ę•°ē»‘å®šęŽ„口 - -### REST čæžęŽ„ - -"REST čæžęŽ„"ꌇčæžęŽ„å™Ø通čæ‡ taosAdapter ē»„ä»¶ęä¾›ēš„ REST API äøŽ TDengine čæč”Œå®žä¾‹å»ŗē«‹ēš„čæžęŽ„怂ę”Æꌁēš„åŠŸčƒ½ē‰¹ę€§ęœ‰ļ¼š +```go +// import "github.com/taosdata/driver-go/v3/errors" + if err != nil { + tError, is := err.(*errors.TaosError) + if is { + fmt.Println("errorCode:", int(tError.Code)) + fmt.Println("errorMessage:", tError.ErrStr) + } else { + fmt.Println(err.Error()) + } + } +``` -* ę™®é€šęŸ„čÆ¢ -* čæžē»­ęŸ„čÆ¢ +## TDengine DataType 和 Go DataType + +| TDengine DataType | Go Type | +|-------------------|-----------| +| TIMESTAMP | time.Time | +| TINYINT | int8 | +| SMALLINT | int16 | +| INT | int32 | +| BIGINT | int64 | +| TINYINT UNSIGNED | uint8 | +| SMALLINT UNSIGNED | uint16 | +| INT UNSIGNED | uint32 | +| BIGINT UNSIGNED | uint64 | +| FLOAT | float32 | +| DOUBLE | float64 | +| BOOL | bool | +| BINARY | string | +| NCHAR | string | +| JSON | []byte | + +**ę³Øꄏ**ļ¼šJSON ē±»åž‹ä»…åœØ tag äø­ę”Æꌁ怂 ## å®‰č£…ę­„éŖ¤ @@ -63,32 +83,28 @@ REST čæžęŽ„ę”ÆęŒę‰€ęœ‰čƒ½čæč”Œ Go ēš„å¹³å°ć€‚ * ```go env``` * ```gcc -v``` -### ä½æē”Ø go get å®‰č£… - -`go get -u github.com/taosdata/driver-go/v3@latest` - -### ä½æē”Ø go mod ē®”ē† +### å®‰č£…čæžęŽ„å™Ø 1. ä½æē”Ø `go mod` 命令初始化锹ē›®ļ¼š - ```text - go mod init taos-demo - ``` + ```text + go mod init taos-demo + ``` 2. 引兄 taosSql ļ¼š - ```go - import ( - "database/sql" - _ "github.com/taosdata/driver-go/v3/taosSql" - ) - ``` + ```go + import ( + "database/sql" + _ "github.com/taosdata/driver-go/v3/taosSql" + ) + ``` 3. ä½æē”Ø `go mod tidy` ę›“ę–°ä¾čµ–åŒ…ļ¼š - ```text - go mod tidy - ``` + ```text + go mod tidy + ``` 4. ä½æē”Ø `go run taos-demo` čæč”Œē؋åŗęˆ–ä½æē”Ø `go build` 命令ē¼–čƑå‡ŗäŗŒčæ›åˆ¶ę–‡ä»¶ć€‚ @@ -99,8 +115,6 @@ REST čæžęŽ„ę”ÆęŒę‰€ęœ‰čƒ½čæč”Œ Go ēš„å¹³å°ć€‚ ## å»ŗē«‹čæžęŽ„ -### ę•°ę®ęŗåē§°ļ¼ˆDSNļ¼‰ - ę•°ę®ęŗåē§°å…·ęœ‰é€šē”Øę ¼å¼ļ¼Œä¾‹å¦‚ [PEAR DB](http://pear.php.net/manual/en/package.database.db.intro-dsn.php)ļ¼Œä½†ę²”꜉ē±»åž‹å‰ē¼€ļ¼ˆę–¹ę‹¬å·č”Øē¤ŗåÆ选ļ¼‰ļ¼š ``` text @@ -113,9 +127,7 @@ REST čæžęŽ„ę”ÆęŒę‰€ęœ‰čƒ½čæč”Œ Go ēš„å¹³å°ć€‚ username:password@protocol(address)/dbname?param=value ``` -### ä½æē”ØčæžęŽ„å™Øčæ›č”ŒčæžęŽ„ - - + _taosSql_ 通čæ‡ cgo 实ēŽ°äŗ† Go ēš„ `database/sql/driver` ęŽ„å£ć€‚åŖéœ€č¦å¼•å…„é©±åŠØå°±åÆ仄ä½æē”Ø [`database/sql`](https://golang.org/pkg/database/sql/) ēš„ęŽ„å£ć€‚ @@ -213,332 +225,900 @@ func main() { -## ä½æē”Øē¤ŗ例 +### ęŒ‡å®š URL 和 Properties čŽ·å–čæžęŽ„ -### å†™å…„ę•°ę® +Go čæžęŽ„å™Øäøę”ÆęŒę­¤åŠŸčƒ½ -#### SQL 写兄 - - +### 配ē½®å‚ę•°ēš„优先ēŗ§ -#### InfluxDB č”Œåč®®å†™å…„ +Go čæžęŽ„å™Øäøę”ÆęŒę­¤åŠŸčƒ½ - +## ä½æē”Øē¤ŗ例 -#### OpenTSDB Telnet č”Œåč®®å†™å…„ +### 创å»ŗę•°ę®åŗ“å’Œč”Ø - +```go +var taosDSN = "root:taosdata@tcp(localhost:6030)/" +taos, err := sql.Open("taosSql", taosDSN) +if err != nil { + log.Fatalln("failed to connect TDengine, err:", err) +} +defer taos.Close() +_, err := taos.Exec("CREATE DATABASE power") +if err != nil { + log.Fatalln("failed to create database, err:", err) +} +_, err = taos.Exec("CREATE STABLE power.meters (ts TIMESTAMP, current FLOAT, voltage INT, phase FLOAT) TAGS (location BINARY(64), groupId INT)") +if err != nil { + log.Fatalln("failed to create stable, err:", err) +} +``` -#### OpenTSDB JSON č”Œåč®®å†™å…„ +### ę’å…„ę•°ę® - + ### ęŸ„čÆ¢ę•°ę® -### ę›“å¤šē¤ŗ例ē؋åŗ - -* [ē¤ŗ例ē؋åŗ](https://github.com/taosdata/driver-go/tree/3.0/examples) -* [č§†é¢‘ę•™ē؋](https://www.taosdata.com/blog/2020/11/11/1951.html)怂 +### ę‰§č”Œåø¦ęœ‰ reqId ēš„ SQL -## ä½æē”Ø限制 +ę­¤ reqId åÆē”ØäŗŽčÆ·ę±‚é“¾č·Æčæ½čøŖ怂 -ē”±äŗŽ REST ęŽ„å£ę— ēŠ¶ę€ę‰€ä»„ `use db` čÆ­ę³•äøä¼šē”Ÿę•ˆļ¼Œéœ€č¦å°† db 名ē§°ę”¾åˆ° SQL čƭ叄äø­ļ¼Œå¦‚ļ¼š`create table if not exists tb1 (ts timestamp, a int)`ę”¹äøŗ`create table if not exists test.tb1 (ts timestamp, a int)`å¦åˆ™å°†ęŠ„é”™`[0x217] Database not specified or available`怂 +```go +db, err := sql.Open("taosSql", "root:taosdata@tcp(localhost:6030)/") +if err != nil { + panic(err) +} +defer db.Close() +ctx := context.WithValue(context.Background(), common.ReqIDKey, common.GetReqID()) +_, err = db.ExecContext(ctx, "create database if not exists example_taos_sql") +if err != nil { + panic(err) +} +``` -也åÆ仄将 db 名ē§°ę”¾åˆ° DSN äø­ļ¼Œå°† `root:taosdata@http(localhost:6041)/` ę”¹äøŗ `root:taosdata@http(localhost:6041)/test`ć€‚å½“ęŒ‡å®šēš„ db äøå­˜åœØę—¶ę‰§č”Œ `create database` čƭ叄äøä¼šęŠ„错ļ¼Œč€Œę‰§č”Œé’ˆåƹčÆ„ db ēš„å…¶ä»–ęŸ„čÆ¢ęˆ–å†™å…„ę“ä½œä¼šęŠ„é”™ć€‚ +### 通čæ‡å‚ę•°ē»‘å®šå†™å…„ę•°ę® -å®Œę•“ē¤ŗ例如äø‹ļ¼š + + ```go package main import ( - "database/sql" - "fmt" "time" - _ "github.com/taosdata/driver-go/v3/taosRestful" + "github.com/taosdata/driver-go/v3/af" + "github.com/taosdata/driver-go/v3/common" + "github.com/taosdata/driver-go/v3/common/param" ) func main() { - var taosDSN = "root:taosdata@http(localhost:6041)/test" - taos, err := sql.Open("taosRestful", taosDSN) + db, err := af.Open("", "root", "taosdata", "", 0) if err != nil { - fmt.Println("failed to connect TDengine, err:", err) - return + panic(err) } - defer taos.Close() - taos.Exec("create database if not exists test") - taos.Exec("create table if not exists tb1 (ts timestamp, a int)") - _, err = taos.Exec("insert into tb1 values(now, 0)(now+1s,1)(now+2s,2)(now+3s,3)") + defer db.Close() + _, err = db.Exec("create database if not exists example_stmt") if err != nil { - fmt.Println("failed to insert, err:", err) - return + panic(err) } - rows, err := taos.Query("select * from tb1") + _, err = db.Exec("create table if not exists example_stmt.tb1(ts timestamp," + + "c1 bool," + + "c2 tinyint," + + "c3 smallint," + + "c4 int," + + "c5 bigint," + + "c6 tinyint unsigned," + + "c7 smallint unsigned," + + "c8 int unsigned," + + "c9 bigint unsigned," + + "c10 float," + + "c11 double," + + "c12 binary(20)," + + "c13 nchar(20)" + + ")") if err != nil { - fmt.Println("failed to select from table, err:", err) - return + panic(err) } - - defer rows.Close() - for rows.Next() { - var r struct { - ts time.Time - a int - } - err := rows.Scan(&r.ts, &r.a) - if err != nil { - fmt.Println("scan error:\n", err) - return - } - fmt.Println(r.ts, r.a) + stmt := db.InsertStmt() + err = stmt.Prepare("insert into example_stmt.tb1 values(?,?,?,?,?,?,?,?,?,?,?,?,?,?)") + if err != nil { + panic(err) + } + now := time.Now() + params := make([]*param.Param, 14) + params[0] = param.NewParam(2). + AddTimestamp(now, common.PrecisionMilliSecond). + AddTimestamp(now.Add(time.Second), common.PrecisionMilliSecond) + params[1] = param.NewParam(2).AddBool(true).AddNull() + params[2] = param.NewParam(2).AddTinyint(2).AddNull() + params[3] = param.NewParam(2).AddSmallint(3).AddNull() + params[4] = param.NewParam(2).AddInt(4).AddNull() + params[5] = param.NewParam(2).AddBigint(5).AddNull() + params[6] = param.NewParam(2).AddUTinyint(6).AddNull() + params[7] = param.NewParam(2).AddUSmallint(7).AddNull() + params[8] = param.NewParam(2).AddUInt(8).AddNull() + params[9] = param.NewParam(2).AddUBigint(9).AddNull() + params[10] = param.NewParam(2).AddFloat(10).AddNull() + params[11] = param.NewParam(2).AddDouble(11).AddNull() + params[12] = param.NewParam(2).AddBinary([]byte("binary")).AddNull() + params[13] = param.NewParam(2).AddNchar("nchar").AddNull() + + paramTypes := param.NewColumnType(14). + AddTimestamp(). + AddBool(). + AddTinyint(). + AddSmallint(). + AddInt(). + AddBigint(). + AddUTinyint(). + AddUSmallint(). + AddUInt(). + AddUBigint(). + AddFloat(). + AddDouble(). + AddBinary(6). + AddNchar(5) + err = stmt.BindParam(params, paramTypes) + if err != nil { + panic(err) + } + err = stmt.AddBatch() + if err != nil { + panic(err) } + err = stmt.Execute() + if err != nil { + panic(err) + } + err = stmt.Close() + if err != nil { + panic(err) + } + // select * from example_stmt.tb1 } ``` -## åøøč§é—®é¢˜ - -1. database/sql äø­ stmtļ¼ˆå‚ę•°ē»‘定ļ¼‰ē›øå…³ęŽ„å£å“©ęŗƒ - - REST äøę”ÆęŒå‚ę•°ē»‘定ē›øå…³ęŽ„å£ļ¼Œå»ŗč®®ä½æē”Ø`db.Exec`和`db.Query`怂 - -2. ä½æē”Ø `use db` čÆ­å„åŽę‰§č”Œå…¶ä»–čÆ­å„ęŠ„é”™ `[0x217] Database not specified or available` - - åœØ REST ęŽ„å£äø­ SQL čƭ叄ēš„ę‰§č”Œę— äøŠäø‹ę–‡å…³č”ļ¼Œä½æē”Ø `use db` čƭ叄äøä¼šē”Ÿę•ˆļ¼Œč§£å†³åŠžę³•č§äøŠę–¹ä½æē”Ø限制ē« čŠ‚怂 - -3. ä½æē”Ø taosSql äøęŠ„错ä½æē”Ø taosRestful ꊄ错 `[0x217] Database not specified or available` - - 因äøŗ REST ęŽ„å£ę— ēŠ¶ę€ļ¼Œä½æē”Ø `use db` čƭ叄äøä¼šē”Ÿę•ˆļ¼Œč§£å†³åŠžę³•č§äøŠę–¹ä½æē”Ø限制ē« čŠ‚怂 - -4. `readBufferSize` å‚ę•°č°ƒå¤§åŽę— ę˜Žę˜¾ę•ˆęžœ - - `readBufferSize` č°ƒå¤§åŽä¼šå‡å°‘čŽ·å–ē»“ęžœę—¶ `syscall` ēš„č°ƒē”Øć€‚å¦‚ęžœęŸ„čÆ¢ē»“ęžœēš„ę•°ę®é‡äøå¤§ļ¼Œäæ®ę”¹čÆ„å‚ę•°äøä¼šåø¦ę„ę˜Žę˜¾ęå‡ļ¼Œå¦‚ęžœčÆ„å‚ę•°äæ®ę”¹čæ‡å¤§ļ¼Œē“¶é¢ˆä¼šåœØč§£ęž JSON ę•°ę®ć€‚å¦‚ęžœéœ€č¦ä¼˜åŒ–ęŸ„čƢ速åŗ¦ļ¼Œéœ€č¦ę ¹ę®å®žé™…ęƒ…å†µč°ƒę•“čÆ„å€¼ę„č¾¾åˆ°ęŸ„čÆ¢ę•ˆęžœęœ€ä¼˜ć€‚ - -5. `disableCompression` å‚ę•°č®¾ē½®äøŗ `false` ę—¶ęŸ„čÆ¢ę•ˆēŽ‡é™ä½Ž - - 当 `disableCompression` å‚ę•°č®¾ē½®äøŗ `false` ę—¶ęŸ„čÆ¢ē»“ęžœä¼šä½æē”Ø `gzip` 压ē¼©åŽä¼ č¾“ļ¼Œę‹æåˆ°ę•°ę®åŽč¦å…ˆčæ›č”Œ `gzip` č§£åŽ‹ć€‚ - -6. `go get` å‘½ä»¤ę— ę³•čŽ·å–åŒ…ļ¼Œęˆ–č€…čŽ·å–åŒ…č¶…ę—¶ - - č®¾ē½® Go 代ē† `go env -w GOPROXY=https://goproxy.cn,direct`怂 - -## åøøē”Ø API - -### database/sql API - -* `sql.Open(DRIVER_NAME string, dataSourceName string) *DB` - - čÆ„ API ē”Øę„ę‰“å¼€ DBļ¼Œčæ”回äø€äøŖē±»åž‹äøŗ \*DB ēš„åÆ¹č±”ć€‚ - -:::info -čÆ„ API ęˆåŠŸåˆ›å»ŗēš„ę—¶å€™ļ¼Œå¹¶ę²”ęœ‰åšęƒé™ē­‰ę£€ęŸ„ļ¼ŒåŖ꜉åœØēœŸę­£ę‰§č”Œ Query ꈖ者 Exec ēš„ę—¶å€™ę‰čƒ½ēœŸę­£ēš„去创å»ŗčæžęŽ„ļ¼Œå¹¶åŒę—¶ę£€ęŸ„ user/password/host/port ę˜Æäøę˜Æåˆę³•ć€‚ -::: - -* `func (db *DB) Exec(query string, args ...interface{}) (Result, error)` - - `sql.Open` 内ē½®ēš„ę–¹ę³•ļ¼Œē”Øę„ę‰§č”ŒéžęŸ„čÆ¢ē›ø关 SQL怂 - -* `func (db *DB) Query(query string, args ...interface{}) (*Rows, error)` - - `sql.Open` 内ē½®ēš„ę–¹ę³•ļ¼Œē”Øę„ę‰§č”ŒęŸ„čÆ¢čÆ­å„ć€‚ - -### 高ēŗ§åŠŸčƒ½ļ¼ˆafļ¼‰API - -`af` åŒ…å°č£…äŗ†čæžęŽ„ē®”ē†ć€č®¢é˜…态schemalessć€å‚ę•°ē»‘定ē­‰ TDengine 高ēŗ§åŠŸčƒ½ć€‚ - -#### čæžęŽ„ē®”ē† - -* `af.Open(host, user, pass, db string, port int) (*Connector, error)` - - čÆ„ API 通čæ‡ cgo 创å»ŗäøŽ taosd ēš„čæžęŽ„怂 - -* `func (conn *Connector) Close() error` - - 关闭äøŽ taosd ēš„čæžęŽ„怂 - -#### č®¢é˜… - -* `func NewConsumer(conf *tmq.ConfigMap) (*Consumer, error)` - - 创å»ŗę¶ˆč“¹č€…ć€‚ - -* `func (c *Consumer) Subscribe(topic string, rebalanceCb RebalanceCb) error` -ę³Øꄏļ¼šå‡ŗäŗŽå…¼å®¹ē›®ēš„äæē•™ `rebalanceCb` å‚ę•°ļ¼Œå½“前ęœŖä½æē”Ø - - č®¢é˜…å•äøŖäø»é¢˜ć€‚ - -* `func (c *Consumer) SubscribeTopics(topics []string, rebalanceCb RebalanceCb) error` -ę³Øꄏļ¼šå‡ŗäŗŽå…¼å®¹ē›®ēš„äæē•™ `rebalanceCb` å‚ę•°ļ¼Œå½“前ęœŖä½æē”Ø - - č®¢é˜…äø»é¢˜ć€‚ - -* `func (c *Consumer) Poll(timeoutMs int) tmq.Event` - - č½®čÆ¢ę¶ˆęÆ怂 - -* `func (c *Consumer) Commit() ([]tmq.TopicPartition, error)` -ę³Øꄏļ¼šå‡ŗäŗŽå…¼å®¹ē›®ēš„äæē•™ `tmq.TopicPartition` å‚ę•°ļ¼Œå½“前ęœŖä½æē”Ø - - ꏐäŗ¤ę¶ˆęÆ怂 - -* `func (c *Consumer) Assignment() (partitions []tmq.TopicPartition, err error)` - - čŽ·å–ę¶ˆč“¹čæ›åŗ¦ć€‚(éœ€č¦ TDengine >= 3.0.5.0ļ¼Œ driver-go >= v3.5.0) - -* `func (c *Consumer) Seek(partition tmq.TopicPartition, ignoredTimeoutMs int) error` -ę³Øꄏļ¼šå‡ŗäŗŽå…¼å®¹ē›®ēš„äæē•™ `ignoredTimeoutMs` å‚ę•°ļ¼Œå½“前ęœŖä½æē”Ø - - ꌉē…§ęŒ‡å®šēš„čæ›åŗ¦ę¶ˆč“¹ć€‚(éœ€č¦ TDengine >= 3.0.5.0ļ¼Œ driver-go >= v3.5.0) - -* `func (c *Consumer) Close() error` - - 关闭čæžęŽ„怂 - -#### schemaless - -* `func (conn *Connector) InfluxDBInsertLines(lines []string, precision string) error` - - 写兄 InfluxDB č”Œåč®®ć€‚ + + -* `func (conn *Connector) OpenTSDBInsertTelnetLines(lines []string) error` +```go +package main - 写兄 OpenTDSB telnet åč®®ę•°ę®ć€‚ +import ( + "database/sql" + "fmt" + "time" -* `func (conn *Connector) OpenTSDBInsertJsonPayload(payload string) error` + "github.com/taosdata/driver-go/v3/common" + "github.com/taosdata/driver-go/v3/common/param" + _ "github.com/taosdata/driver-go/v3/taosRestful" + "github.com/taosdata/driver-go/v3/ws/stmt" +) - 写兄 OpenTSDB JSON åč®®ę•°ę®ć€‚ +func main() { + db, err := sql.Open("taosRestful", "root:taosdata@http(localhost:6041)/") + if err != nil { + panic(err) + } + defer db.Close() + prepareEnv(db) + + config := stmt.NewConfig("ws://127.0.0.1:6041/rest/stmt", 0) + config.SetConnectUser("root") + config.SetConnectPass("taosdata") + config.SetConnectDB("example_ws_stmt") + config.SetMessageTimeout(common.DefaultMessageTimeout) + config.SetWriteWait(common.DefaultWriteWait) + config.SetErrorHandler(func(connector *stmt.Connector, err error) { + panic(err) + }) + config.SetCloseHandler(func() { + fmt.Println("stmt connector closed") + }) + + connector, err := stmt.NewConnector(config) + if err != nil { + panic(err) + } + now := time.Now() + { + stmt, err := connector.Init() + if err != nil { + panic(err) + } + err = stmt.Prepare("insert into ? using all_json tags(?) values(?,?,?,?,?,?,?,?,?,?,?,?,?,?)") + if err != nil { + panic(err) + } + err = stmt.SetTableName("tb1") + if err != nil { + panic(err) + } + err = stmt.SetTags(param.NewParam(1).AddJson([]byte(`{"tb":1}`)), param.NewColumnType(1).AddJson(0)) + if err != nil { + panic(err) + } + params := []*param.Param{ + param.NewParam(3).AddTimestamp(now, 0).AddTimestamp(now.Add(time.Second), 0).AddTimestamp(now.Add(time.Second*2), 0), + param.NewParam(3).AddBool(true).AddNull().AddBool(true), + param.NewParam(3).AddTinyint(1).AddNull().AddTinyint(1), + param.NewParam(3).AddSmallint(1).AddNull().AddSmallint(1), + param.NewParam(3).AddInt(1).AddNull().AddInt(1), + param.NewParam(3).AddBigint(1).AddNull().AddBigint(1), + param.NewParam(3).AddUTinyint(1).AddNull().AddUTinyint(1), + param.NewParam(3).AddUSmallint(1).AddNull().AddUSmallint(1), + param.NewParam(3).AddUInt(1).AddNull().AddUInt(1), + param.NewParam(3).AddUBigint(1).AddNull().AddUBigint(1), + param.NewParam(3).AddFloat(1).AddNull().AddFloat(1), + param.NewParam(3).AddDouble(1).AddNull().AddDouble(1), + param.NewParam(3).AddBinary([]byte("test_binary")).AddNull().AddBinary([]byte("test_binary")), + param.NewParam(3).AddNchar("test_nchar").AddNull().AddNchar("test_nchar"), + } + paramTypes := param.NewColumnType(14). + AddTimestamp(). + AddBool(). + AddTinyint(). + AddSmallint(). + AddInt(). + AddBigint(). + AddUTinyint(). + AddUSmallint(). + AddUInt(). + AddUBigint(). + AddFloat(). + AddDouble(). + AddBinary(0). + AddNchar(0) + err = stmt.BindParam(params, paramTypes) + if err != nil { + panic(err) + } + err = stmt.AddBatch() + if err != nil { + panic(err) + } + err = stmt.Exec() + if err != nil { + panic(err) + } + affected := stmt.GetAffectedRows() + fmt.Println("all_json affected rows:", affected) + err = stmt.Close() + if err != nil { + panic(err) + } + } + { + stmt, err := connector.Init() + if err != nil { + panic(err) + } + err = stmt.Prepare("insert into ? using all_all tags(?,?,?,?,?,?,?,?,?,?,?,?,?,?) values(?,?,?,?,?,?,?,?,?,?,?,?,?,?)") + err = stmt.SetTableName("tb1") + if err != nil { + panic(err) + } -#### å‚ę•°ē»‘定 + err = stmt.SetTableName("tb2") + if err != nil { + panic(err) + } + err = stmt.SetTags( + param.NewParam(14). + AddTimestamp(now, 0). + AddBool(true). + AddTinyint(2). + AddSmallint(2). + AddInt(2). + AddBigint(2). + AddUTinyint(2). + AddUSmallint(2). + AddUInt(2). + AddUBigint(2). + AddFloat(2). + AddDouble(2). + AddBinary([]byte("tb2")). + AddNchar("tb2"), + param.NewColumnType(14). + AddTimestamp(). + AddBool(). + AddTinyint(). + AddSmallint(). + AddInt(). + AddBigint(). + AddUTinyint(). + AddUSmallint(). + AddUInt(). + AddUBigint(). + AddFloat(). + AddDouble(). + AddBinary(0). + AddNchar(0), + ) + if err != nil { + panic(err) + } + params := []*param.Param{ + param.NewParam(3).AddTimestamp(now, 0).AddTimestamp(now.Add(time.Second), 0).AddTimestamp(now.Add(time.Second*2), 0), + param.NewParam(3).AddBool(true).AddNull().AddBool(true), + param.NewParam(3).AddTinyint(1).AddNull().AddTinyint(1), + param.NewParam(3).AddSmallint(1).AddNull().AddSmallint(1), + param.NewParam(3).AddInt(1).AddNull().AddInt(1), + param.NewParam(3).AddBigint(1).AddNull().AddBigint(1), + param.NewParam(3).AddUTinyint(1).AddNull().AddUTinyint(1), + param.NewParam(3).AddUSmallint(1).AddNull().AddUSmallint(1), + param.NewParam(3).AddUInt(1).AddNull().AddUInt(1), + param.NewParam(3).AddUBigint(1).AddNull().AddUBigint(1), + param.NewParam(3).AddFloat(1).AddNull().AddFloat(1), + param.NewParam(3).AddDouble(1).AddNull().AddDouble(1), + param.NewParam(3).AddBinary([]byte("test_binary")).AddNull().AddBinary([]byte("test_binary")), + param.NewParam(3).AddNchar("test_nchar").AddNull().AddNchar("test_nchar"), + } + paramTypes := param.NewColumnType(14). + AddTimestamp(). + AddBool(). + AddTinyint(). + AddSmallint(). + AddInt(). + AddBigint(). + AddUTinyint(). + AddUSmallint(). + AddUInt(). + AddUBigint(). + AddFloat(). + AddDouble(). + AddBinary(0). + AddNchar(0) + err = stmt.BindParam(params, paramTypes) + if err != nil { + panic(err) + } + err = stmt.AddBatch() + if err != nil { + panic(err) + } + err = stmt.Exec() + if err != nil { + panic(err) + } + affected := stmt.GetAffectedRows() + fmt.Println("all_all affected rows:", affected) + err = stmt.Close() + if err != nil { + panic(err) + } -* `func (conn *Connector) StmtExecute(sql string, params *param.Param) (res driver.Result, err error)` + } +} - å‚ę•°ē»‘å®šå•č”Œę’å…„怂 +func prepareEnv(db *sql.DB) { + steps := []string{ + "create database example_ws_stmt", + "create table example_ws_stmt.all_json(ts timestamp," + + "c1 bool," + + "c2 tinyint," + + "c3 smallint," + + "c4 int," + + "c5 bigint," + + "c6 tinyint unsigned," + + "c7 smallint unsigned," + + "c8 int unsigned," + + "c9 bigint unsigned," + + "c10 float," + + "c11 double," + + "c12 binary(20)," + + "c13 nchar(20)" + + ")" + + "tags(t json)", + "create table example_ws_stmt.all_all(" + + "ts timestamp," + + "c1 bool," + + "c2 tinyint," + + "c3 smallint," + + "c4 int," + + "c5 bigint," + + "c6 tinyint unsigned," + + "c7 smallint unsigned," + + "c8 int unsigned," + + "c9 bigint unsigned," + + "c10 float," + + "c11 double," + + "c12 binary(20)," + + "c13 nchar(20)" + + ")" + + "tags(" + + "tts timestamp," + + "tc1 bool," + + "tc2 tinyint," + + "tc3 smallint," + + "tc4 int," + + "tc5 bigint," + + "tc6 tinyint unsigned," + + "tc7 smallint unsigned," + + "tc8 int unsigned," + + "tc9 bigint unsigned," + + "tc10 float," + + "tc11 double," + + "tc12 binary(20)," + + "tc13 nchar(20))", + } + for _, step := range steps { + _, err := db.Exec(step) + if err != nil { + panic(err) + } + } +} -* `func (conn *Connector) InsertStmt() *insertstmt.InsertStmt` +``` - åˆå§‹åŒ–å‚ę•°ć€‚ + + -* `func (stmt *InsertStmt) Prepare(sql string) error` +### ꗠęؔ式写兄 - å‚ę•°ē»‘定预处ē† SQL čÆ­å„ć€‚ + + -* `func (stmt *InsertStmt) SetTableName(name string) error` +```go +import ( + "fmt" - å‚ę•°ē»‘å®šč®¾ē½®č”Øåć€‚ + "github.com/taosdata/driver-go/v3/af" +) -* `func (stmt *InsertStmt) SetSubTableName(name string) error` +func main() { + conn, err := af.Open("localhost", "root", "taosdata", "", 6030) + if err != nil { + fmt.Println("fail to connect, err:", err) + } + defer conn.Close() + _, err = conn.Exec("create database if not exists example") + if err != nil { + panic(err) + } + _, err = conn.Exec("use example") + if err != nil { + panic(err) + } + influxdbData := "st,t1=3i64,t2=4f64,t3=\"t3\" c1=3i64,c3=L\"passit\",c2=false,c4=4f64 1626006833639000000" + err = conn.InfluxDBInsertLines([]string{influxdbData}, "ns") + if err != nil { + panic(err) + } + telnetData := "stb0_0 1626006833 4 host=host0 interface=eth0" + err = conn.OpenTSDBInsertTelnetLines([]string{telnetData}) + if err != nil { + panic(err) + } + jsonData := "{\"metric\": \"meter_current\",\"timestamp\": 1626846400,\"value\": 10.3, \"tags\": {\"groupid\": 2, \"location\": \"California.SanFrancisco\", \"id\": \"d1001\"}}" + err = conn.OpenTSDBInsertJsonPayload(jsonData) + if err != nil { + panic(err) + } +} +``` - å‚ę•°ē»‘å®šč®¾ē½®å­č”Øåć€‚ + + -* `func (stmt *InsertStmt) BindParam(params []*param.Param, bindType *param.ColumnType) error` +```go +import ( + "database/sql" + "log" + "time" - å‚ę•°ē»‘å®šå¤šč”Œę•°ę®ć€‚ + "github.com/taosdata/driver-go/v3/common" + _ "github.com/taosdata/driver-go/v3/taosWS" + "github.com/taosdata/driver-go/v3/ws/schemaless" +) -* `func (stmt *InsertStmt) AddBatch() error` +func main() { + db, err := sql.Open("taosWS", "root:taosdata@ws(localhost:6041)/") + if err != nil { + log.Fatal(err) + } + defer db.Close() + _, err = db.Exec("create database if not exists schemaless_ws") + if err != nil { + log.Fatal(err) + } + s, err := schemaless.NewSchemaless(schemaless.NewConfig("ws://localhost:6041/rest/schemaless", 1, + schemaless.SetDb("schemaless_ws"), + schemaless.SetReadTimeout(10*time.Second), + schemaless.SetWriteTimeout(10*time.Second), + schemaless.SetUser("root"), + schemaless.SetPassword("taosdata"), + schemaless.SetErrorHandler(func(err error) { + log.Fatal(err) + }), + )) + if err != nil { + panic(err) + } + influxdbData := "st,t1=3i64,t2=4f64,t3=\"t3\" c1=3i64,c3=L\"passit\",c2=false,c4=4f64 1626006833639000000" + telnetData := "stb0_0 1626006833 4 host=host0 interface=eth0" + jsonData := "{\"metric\": \"meter_current\",\"timestamp\": 1626846400,\"value\": 10.3, \"tags\": {\"groupid\": 2, \"location\": \"California.SanFrancisco\", \"id\": \"d1001\"}}" - ę·»åŠ åˆ°å‚ę•°ē»‘å®šę‰¹å¤„ē†ć€‚ + err = s.Insert(influxdbData, schemaless.InfluxDBLineProtocol, "ns", 0, common.GetReqID()) + if err != nil { + panic(err) + } + err = s.Insert(telnetData, schemaless.OpenTSDBTelnetLineProtocol, "ms", 0, common.GetReqID()) + if err != nil { + panic(err) + } + err = s.Insert(jsonData, schemaless.OpenTSDBJsonFormatProtocol, "ms", 0, common.GetReqID()) + if err != nil { + panic(err) + } +} +``` -* `func (stmt *InsertStmt) Execute() error` + + - ę‰§č”Œå‚ę•°ē»‘å®šć€‚ +### ę‰§č”Œåø¦ęœ‰ reqId ēš„ę— ęؔ式写兄 -* `func (stmt *InsertStmt) GetAffectedRows() int` +```go +func (s *Schemaless) Insert(lines string, protocol int, precision string, ttl int, reqID int64) error +``` - čŽ·å–å‚ę•°ē»‘å®šę’å…„å—å½±å“č”Œę•°ć€‚ +åÆ仄通čæ‡ `common.GetReqID()` čŽ·å–å”Æäø€ id怂 -* `func (stmt *InsertStmt) Close() error` +### ę•°ę®č®¢é˜… - ē»“ęŸå‚ę•°ē»‘å®šć€‚ +TDengine Go čæžęŽ„å™Øę”ÆęŒč®¢é˜…åŠŸčƒ½ļ¼Œåŗ”ē”Ø API 如äø‹ļ¼š -### 通čæ‡ WebSocket č®¢é˜… +#### 创å»ŗ Topic -* `func NewConsumer(conf *tmq.ConfigMap) (*Consumer, error)` +```go + db, err := af.Open("", "root", "taosdata", "", 0) + if err != nil { + panic(err) + } + defer db.Close() + _, err = db.Exec("create database if not exists example_tmq WAL_RETENTION_PERIOD 86400") + if err != nil { + panic(err) + } + _, err = db.Exec("create topic if not exists example_tmq_topic as DATABASE example_tmq") + if err != nil { + panic(err) + } +``` - 创å»ŗę¶ˆč“¹č€…ć€‚ +#### 创å»ŗ Consumer -* `func (c *Consumer) Subscribe(topic string, rebalanceCb RebalanceCb) error` -ę³Øꄏļ¼šå‡ŗäŗŽå…¼å®¹ē›®ēš„äæē•™ `rebalanceCb` å‚ę•°ļ¼Œå½“前ęœŖä½æē”Ø +```go + consumer, err := tmq.NewConsumer(&tmqcommon.ConfigMap{ + "group.id": "test", + "auto.offset.reset": "earliest", + "td.connect.ip": "127.0.0.1", + "td.connect.user": "root", + "td.connect.pass": "taosdata", + "td.connect.port": "6030", + "client.id": "test_tmq_client", + "enable.auto.commit": "false", + "msg.with.table.name": "true", + }) + if err != nil { + panic(err) + } +``` - č®¢é˜…å•äøŖäø»é¢˜ć€‚ +#### č®¢é˜…ę¶ˆč“¹ę•°ę® -* `func (c *Consumer) SubscribeTopics(topics []string, rebalanceCb RebalanceCb) error` -ę³Øꄏļ¼šå‡ŗäŗŽå…¼å®¹ē›®ēš„äæē•™ `rebalanceCb` å‚ę•°ļ¼Œå½“前ęœŖä½æē”Ø +```go + err = consumer.Subscribe("example_tmq_topic", nil) + if err != nil { + panic(err) + } + for i := 0; i < 5; i++ { + ev := consumer.Poll(500) + if ev != nil { + switch e := ev.(type) { + case *tmqcommon.DataMessage: + fmt.Printf("get message:%v\n", e) + case tmqcommon.Error: + fmt.Fprintf(os.Stderr, "%% Error: %v: %v\n", e.Code(), e) + panic(e) + } + consumer.Commit() + } + } +``` - č®¢é˜…äø»é¢˜ć€‚ +#### ęŒ‡å®šč®¢é˜… Offset -* `func (c *Consumer) Poll(timeoutMs int) tmq.Event` +```go + partitions, err := consumer.Assignment() + if err != nil { + panic(err) + } + for i := 0; i < len(partitions); i++ { + fmt.Println(partitions[i]) + err = consumer.Seek(tmqcommon.TopicPartition{ + Topic: partitions[i].Topic, + Partition: partitions[i].Partition, + Offset: 0, + }, 0) + if err != nil { + panic(err) + } + } +``` - č½®čÆ¢ę¶ˆęÆ怂 +#### å…³é—­č®¢é˜… -* `func (c *Consumer) Commit() ([]tmq.TopicPartition, error)` -ę³Øꄏļ¼šå‡ŗäŗŽå…¼å®¹ē›®ēš„äæē•™ `tmq.TopicPartition` å‚ę•°ļ¼Œå½“前ęœŖä½æē”Ø +```go + err = consumer.Close() + if err != nil { + panic(err) + } +``` - ꏐäŗ¤ę¶ˆęÆ怂 +#### å®Œę•“ē¤ŗ例 -* `func (c *Consumer) Assignment() (partitions []tmq.TopicPartition, err error)` + + - čŽ·å–ę¶ˆč“¹čæ›åŗ¦ć€‚(éœ€č¦ TDengine >= 3.0.5.0ļ¼Œ driver-go >= v3.5.0) +```go +package main -* `func (c *Consumer) Seek(partition tmq.TopicPartition, ignoredTimeoutMs int) error` -ę³Øꄏļ¼šå‡ŗäŗŽå…¼å®¹ē›®ēš„äæē•™ `ignoredTimeoutMs` å‚ę•°ļ¼Œå½“前ęœŖä½æē”Ø +import ( + "fmt" + "os" - ꌉē…§ęŒ‡å®šēš„čæ›åŗ¦ę¶ˆč“¹ć€‚(éœ€č¦ TDengine >= 3.0.5.0ļ¼Œ driver-go >= v3.5.0) + "github.com/taosdata/driver-go/v3/af" + "github.com/taosdata/driver-go/v3/af/tmq" + tmqcommon "github.com/taosdata/driver-go/v3/common/tmq" +) -* `func (c *Consumer) Close() error` +func main() { + db, err := af.Open("", "root", "taosdata", "", 0) + if err != nil { + panic(err) + } + defer db.Close() + _, err = db.Exec("create database if not exists example_tmq WAL_RETENTION_PERIOD 86400") + if err != nil { + panic(err) + } + _, err = db.Exec("create topic if not exists example_tmq_topic as DATABASE example_tmq") + if err != nil { + panic(err) + } + if err != nil { + panic(err) + } + consumer, err := tmq.NewConsumer(&tmqcommon.ConfigMap{ + "group.id": "test", + "auto.offset.reset": "earliest", + "td.connect.ip": "127.0.0.1", + "td.connect.user": "root", + "td.connect.pass": "taosdata", + "td.connect.port": "6030", + "client.id": "test_tmq_client", + "enable.auto.commit": "false", + "msg.with.table.name": "true", + }) + if err != nil { + panic(err) + } + err = consumer.Subscribe("example_tmq_topic", nil) + if err != nil { + panic(err) + } + _, err = db.Exec("create table example_tmq.t1 (ts timestamp,v int)") + if err != nil { + panic(err) + } + _, err = db.Exec("insert into example_tmq.t1 values(now,1)") + if err != nil { + panic(err) + } + for i := 0; i < 5; i++ { + ev := consumer.Poll(500) + if ev != nil { + switch e := ev.(type) { + case *tmqcommon.DataMessage: + fmt.Printf("get message:%v\n", e) + case tmqcommon.Error: + fmt.Fprintf(os.Stderr, "%% Error: %v: %v\n", e.Code(), e) + panic(e) + } + consumer.Commit() + } + } + partitions, err := consumer.Assignment() + if err != nil { + panic(err) + } + for i := 0; i < len(partitions); i++ { + fmt.Println(partitions[i]) + err = consumer.Seek(tmqcommon.TopicPartition{ + Topic: partitions[i].Topic, + Partition: partitions[i].Partition, + Offset: 0, + }, 0) + if err != nil { + panic(err) + } + } - 关闭čæžęŽ„怂 + partitions, err = consumer.Assignment() + if err != nil { + panic(err) + } + for i := 0; i < len(partitions); i++ { + fmt.Println(partitions[i]) + } -å®Œę•“č®¢é˜…ē¤ŗä¾‹å‚č§ [GitHub ē¤ŗä¾‹ę–‡ä»¶](https://github.com/taosdata/driver-go/blob/main/examples/tmqoverws/main.go) + err = consumer.Close() + if err != nil { + panic(err) + } +} +``` -### 通čæ‡ WebSocket čæ›č”Œå‚ę•°ē»‘定 + + -* `func NewConnector(config *Config) (*Connector, error)` +```go +package main - 创å»ŗčæžęŽ„怂 +import ( + "database/sql" + "fmt" -* `func (c *Connector) Init() (*Stmt, error)` + "github.com/taosdata/driver-go/v3/common" + tmqcommon "github.com/taosdata/driver-go/v3/common/tmq" + _ "github.com/taosdata/driver-go/v3/taosRestful" + "github.com/taosdata/driver-go/v3/ws/tmq" +) - åˆå§‹åŒ–å‚ę•°ć€‚ +func main() { + db, err := sql.Open("taosRestful", "root:taosdata@http(localhost:6041)/") + if err != nil { + panic(err) + } + defer db.Close() + prepareEnv(db) + consumer, err := tmq.NewConsumer(&tmqcommon.ConfigMap{ + "ws.url": "ws://127.0.0.1:6041/rest/tmq", + "ws.message.channelLen": uint(0), + "ws.message.timeout": common.DefaultMessageTimeout, + "ws.message.writeWait": common.DefaultWriteWait, + "td.connect.user": "root", + "td.connect.pass": "taosdata", + "group.id": "example", + "client.id": "example_consumer", + "auto.offset.reset": "earliest", + }) + if err != nil { + panic(err) + } + err = consumer.Subscribe("example_ws_tmq_topic", nil) + if err != nil { + panic(err) + } + go func() { + _, err := db.Exec("create table example_ws_tmq.t_all(ts timestamp," + + "c1 bool," + + "c2 tinyint," + + "c3 smallint," + + "c4 int," + + "c5 bigint," + + "c6 tinyint unsigned," + + "c7 smallint unsigned," + + "c8 int unsigned," + + "c9 bigint unsigned," + + "c10 float," + + "c11 double," + + "c12 binary(20)," + + "c13 nchar(20)" + + ")") + if err != nil { + panic(err) + } + _, err = db.Exec("insert into example_ws_tmq.t_all values(now,true,2,3,4,5,6,7,8,9,10.123,11.123,'binary','nchar')") + if err != nil { + panic(err) + } + }() + for i := 0; i < 5; i++ { + ev := consumer.Poll(500) + if ev != nil { + switch e := ev.(type) { + case *tmqcommon.DataMessage: + fmt.Printf("get message:%v\n", e) + case tmqcommon.Error: + fmt.Printf("%% Error: %v: %v\n", e.Code(), e) + panic(e) + } + consumer.Commit() + } + } + partitions, err := consumer.Assignment() + if err != nil { + panic(err) + } + for i := 0; i < len(partitions); i++ { + fmt.Println(partitions[i]) + err = consumer.Seek(tmqcommon.TopicPartition{ + Topic: partitions[i].Topic, + Partition: partitions[i].Partition, + Offset: 0, + }, 0) + if err != nil { + panic(err) + } + } -* `func (c *Connector) Close() error` + partitions, err = consumer.Assignment() + if err != nil { + panic(err) + } + for i := 0; i < len(partitions); i++ { + fmt.Println(partitions[i]) + } - 关闭čæžęŽ„怂 + err = consumer.Close() + if err != nil { + panic(err) + } +} -* `func (s *Stmt) Prepare(sql string) error` +func prepareEnv(db *sql.DB) { + _, err := db.Exec("create database example_ws_tmq WAL_RETENTION_PERIOD 86400") + if err != nil { + panic(err) + } + _, err = db.Exec("create topic example_ws_tmq_topic as database example_ws_tmq") + if err != nil { + panic(err) + } +} +``` - å‚ę•°ē»‘定预处ē† SQL čÆ­å„ć€‚ + + -* `func (s *Stmt) SetTableName(name string) error` +### ę›“å¤šē¤ŗ例ē؋åŗ - å‚ę•°ē»‘å®šč®¾ē½®č”Øåć€‚ +* [ē¤ŗ例ē؋åŗ](https://github.com/taosdata/driver-go/tree/3.0/examples) +* [č§†é¢‘ę•™ē؋](https://www.taosdata.com/blog/2020/11/11/1951.html)怂 -* `func (s *Stmt) SetTags(tags *param.Param, bindType *param.ColumnType) error` +## åøøč§é—®é¢˜ - å‚ę•°ē»‘å®šč®¾ē½®ę ‡ē­¾ć€‚ +1. database/sql äø­ stmtļ¼ˆå‚ę•°ē»‘定ļ¼‰ē›øå…³ęŽ„å£å“©ęŗƒ -* `func (s *Stmt) BindParam(params []*param.Param, bindType *param.ColumnType) error` + REST äøę”ÆęŒå‚ę•°ē»‘定ē›øå…³ęŽ„å£ļ¼Œå»ŗč®®ä½æē”Ø`db.Exec`和`db.Query`怂 - å‚ę•°ē»‘å®šå¤šč”Œę•°ę®ć€‚ +2. ä½æē”Ø `use db` čÆ­å„åŽę‰§č”Œå…¶ä»–čÆ­å„ęŠ„é”™ `[0x217] Database not specified or available` -* `func (s *Stmt) AddBatch() error` + åœØ REST ęŽ„å£äø­ SQL čƭ叄ēš„ę‰§č”Œę— äøŠäø‹ę–‡å…³č”ļ¼Œä½æē”Ø `use db` čƭ叄äøä¼šē”Ÿę•ˆļ¼Œč§£å†³åŠžę³•č§äøŠę–¹ä½æē”Ø限制ē« čŠ‚怂 - ę·»åŠ åˆ°å‚ę•°ē»‘å®šę‰¹å¤„ē†ć€‚ +3. ä½æē”Ø taosSql äøęŠ„错ä½æē”Ø taosRestful ꊄ错 `[0x217] Database not specified or available` -* `func (s *Stmt) Exec() error` + 因äøŗ REST ęŽ„å£ę— ēŠ¶ę€ļ¼Œä½æē”Ø `use db` čƭ叄äøä¼šē”Ÿę•ˆļ¼Œč§£å†³åŠžę³•č§äøŠę–¹ä½æē”Ø限制ē« čŠ‚怂 - ę‰§č”Œå‚ę•°ē»‘å®šć€‚ +4. `readBufferSize` å‚ę•°č°ƒå¤§åŽę— ę˜Žę˜¾ę•ˆęžœ -* `func (s *Stmt) GetAffectedRows() int` + `readBufferSize` č°ƒå¤§åŽä¼šå‡å°‘čŽ·å–ē»“ęžœę—¶ `syscall` ēš„č°ƒē”Øć€‚å¦‚ęžœęŸ„čÆ¢ē»“ęžœēš„ę•°ę®é‡äøå¤§ļ¼Œäæ®ę”¹čÆ„å‚ę•°äøä¼šåø¦ę„ę˜Žę˜¾ęå‡ļ¼Œå¦‚ęžœčÆ„å‚ę•°äæ®ę”¹čæ‡å¤§ļ¼Œē“¶é¢ˆä¼šåœØč§£ęž JSON ę•°ę®ć€‚å¦‚ęžœéœ€č¦ä¼˜åŒ–ęŸ„čƢ速åŗ¦ļ¼Œéœ€č¦ę ¹ę®å®žé™…ęƒ…å†µč°ƒę•“čÆ„å€¼ę„č¾¾åˆ°ęŸ„čÆ¢ę•ˆęžœęœ€ä¼˜ć€‚ - čŽ·å–å‚ę•°ē»‘å®šę’å…„å—å½±å“č”Œę•°ć€‚ +5. `disableCompression` å‚ę•°č®¾ē½®äøŗ `false` ę—¶ęŸ„čÆ¢ę•ˆēŽ‡é™ä½Ž -* `func (s *Stmt) Close() error` + 当 `disableCompression` å‚ę•°č®¾ē½®äøŗ `false` ę—¶ęŸ„čÆ¢ē»“ęžœä¼šä½æē”Ø `gzip` 压ē¼©åŽä¼ č¾“ļ¼Œę‹æåˆ°ę•°ę®åŽč¦å…ˆčæ›č”Œ `gzip` č§£åŽ‹ć€‚ - ē»“ęŸå‚ę•°ē»‘å®šć€‚ +6. `go get` å‘½ä»¤ę— ę³•čŽ·å–åŒ…ļ¼Œęˆ–č€…čŽ·å–åŒ…č¶…ę—¶ -å®Œę•“å‚ę•°ē»‘定ē¤ŗä¾‹å‚č§ [GitHub ē¤ŗä¾‹ę–‡ä»¶](https://github.com/taosdata/driver-go/blob/main/examples/stmtoverws/main.go) + č®¾ē½® Go 代ē† `go env -w GOPROXY=https://goproxy.cn,direct`怂 ## API å‚č€ƒ diff --git a/docs/zh/08-connector/26-rust.mdx b/docs/zh/08-connector/26-rust.mdx index c23228c8cf737c25bf3bce0269a7c08cb14a874d..79a6badfead70c27fc344b1e506aa8ea5afb624d 100644 --- a/docs/zh/08-connector/26-rust.mdx +++ b/docs/zh/08-connector/26-rust.mdx @@ -30,21 +30,57 @@ Websocket čæžęŽ„ę”ÆęŒę‰€ęœ‰čƒ½čæč”Œ Rust ēš„å¹³å°ć€‚ | Rust čæžęŽ„å™Øē‰ˆęœ¬ | TDengine ē‰ˆęœ¬ | äø»č¦åŠŸčƒ½ | | :----------------: | :--------------: | :--------------------------------------------------: | -| v0.8.10 | 3.0.5.0 or later | 궈ęÆč®¢é˜…ļ¼ščŽ·å–ę¶ˆč“¹čæ›åŗ¦åŠęŒ‰ē…§ęŒ‡å®ščæ›åŗ¦å¼€å§‹ę¶ˆč“¹ć€‚ | +| v0.8.12 | 3.0.5.0 or later | 궈ęÆč®¢é˜…ļ¼ščŽ·å–ę¶ˆč“¹čæ›åŗ¦åŠęŒ‰ē…§ęŒ‡å®ščæ›åŗ¦å¼€å§‹ę¶ˆč“¹ć€‚ | | v0.8.0 | 3.0.4.0 | ę”ÆꌁꗠęØ”å¼å†™å…„ć€‚ | | v0.7.6 | 3.0.3.0 | ę”ÆꌁåœØčÆ·ę±‚äø­ä½æē”Ø req_id怂 | | v0.6.0 | 3.0.0.0 | åŸŗē”€åŠŸčƒ½ć€‚ | Rust čæžęŽ„å™Ø仍ē„¶åœØåæ«é€Ÿå¼€å‘äø­ļ¼Œ1.0 ä¹‹å‰ę— ę³•äæčÆå…¶å‘åŽå…¼å®¹ć€‚å»ŗč®®ä½æē”Ø 3.0 ē‰ˆęœ¬ä»„äøŠēš„ TDengineļ¼Œä»„éæ免已ēŸ„é—®é¢˜ć€‚ -## å®‰č£… +## 处ē†é”™čÆÆ + +åœØęŠ„é”™åŽļ¼ŒåÆä»„čŽ·å–åˆ°é”™čÆÆēš„具体äæ”ęÆļ¼š + +```rust +match conn.exec(sql) { + Ok(_) => { + Ok(()) + } + Err(e) => { + eprintln!("ERROR: {:?}", e); + Err(e) + } +} +``` + +## TDengine DataType 和 Rust DataType + +TDengine ē›®å‰ę”ÆęŒę—¶é—“ęˆ³ć€ę•°å­—ć€å­—ē¬¦ć€åøƒå°”ē±»åž‹ļ¼ŒäøŽ Rust åƹåŗ”ē±»åž‹č½¬ę¢å¦‚äø‹ļ¼š + +| TDengine DataType | Rust DataType | +| ----------------- | ----------------- | +| TIMESTAMP | Timestamp | +| INT | i32 | +| BIGINT | i64 | +| FLOAT | f32 | +| DOUBLE | f64 | +| SMALLINT | i16 | +| TINYINT | i8 | +| BOOL | bool | +| BINARY | Vec | +| NCHAR | String | +| JSON | serde_json::Value | + +**ę³Øꄏ**ļ¼šJSON ē±»åž‹ä»…åœØ tag äø­ę”Æꌁ怂 + +## å®‰č£…ę­„éŖ¤ ### å®‰č£…å‰å‡†å¤‡ * å®‰č£… Rust 开发巄具链 * å¦‚ęžœä½æē”Ø原ē”ŸčæžęŽ„ļ¼ŒčÆ·å®‰č£… TDengine å®¢ęˆ·ē«Æ驱åŠØļ¼Œå…·ä½“ę­„éŖ¤čÆ·å‚č€ƒ[å®‰č£…å®¢ęˆ·ē«Æ驱åŠØ](../#å®‰č£…å®¢ęˆ·ē«Æ驱åŠØ) -### ę·»åŠ  taos ä¾čµ– +### å®‰č£…čæžęŽ„å™Ø ę ¹ę®é€‰ę‹©ēš„čæžęŽ„ę–¹å¼ļ¼ŒęŒ‰ē…§å¦‚äø‹čÆ“ę˜ŽåœØ [Rust](https://rust-lang.org) 锹ē›®äø­ę·»åŠ  [taos][taos] ä¾čµ–ļ¼š @@ -151,7 +187,8 @@ let builder = TaosBuilder::from_dsn("taos://localhost:6030")?; let conn1 = builder.build(); // use websocket protocol. -let conn2 = TaosBuilder::from_dsn("taos+ws://localhost:6041")?; +let builder2 = TaosBuilder::from_dsn("taos+ws://localhost:6041")?; +let conn2 = builder2.build(); ``` å»ŗē«‹čæžęŽ„后ļ¼Œę‚ØåÆ仄čæ›č”Œē›øå…³ę•°ę®åŗ“ę“ä½œļ¼š @@ -233,41 +270,191 @@ async fn demo(taos: &Taos, db: &str) -> Result<(), Error> { ## ä½æē”Øē¤ŗ例 -### å†™å…„ę•°ę® +### 创å»ŗę•°ę®åŗ“å’Œč”Ø + +```rust +use taos::*; -#### SQL 写兄 +#[tokio::main] +async fn main() -> anyhow::Result<()> { + let dsn = "taos://localhost:6030"; + let builder = TaosBuilder::from_dsn(dsn)?; + + let taos = builder.build()?; + + let db = "query"; + + // create database + taos.exec_many([ + format!("DROP DATABASE IF EXISTS `{db}`"), + format!("CREATE DATABASE `{db}`"), + format!("USE `{db}`"), + ]) + .await?; + + // create table + taos.exec_many([ + // create super table + "CREATE TABLE `meters` (`ts` TIMESTAMP, `current` FLOAT, `voltage` INT, `phase` FLOAT) \ + TAGS (`groupid` INT, `location` BINARY(16))", + // create child table + "CREATE TABLE `d0` USING `meters` TAGS(0, 'Los Angles')", + ]).await?; +} +``` + +> **ę³Øꄏ**ļ¼šå¦‚ęžœäøä½æē”Ø `use db` ęŒ‡å®šę•°ę®åŗ“ļ¼Œåˆ™åŽē»­åƹč”Øēš„ę“ä½œéƒ½éœ€č¦å¢žåŠ ę•°ę®åŗ“名ē§°ä½œäøŗ前ē¼€ļ¼Œå¦‚ db.tb怂 + +### ę’å…„ę•°ę® -#### STMT 写兄 +### ęŸ„čÆ¢ę•°ę® + + + +### ę‰§č”Œåø¦ęœ‰ req_id ēš„ SQL + +ę­¤ req_id åÆē”ØäŗŽčÆ·ę±‚é“¾č·Æčæ½čøŖ怂 + +```rust +let rs = taos.query_with_req_id("select * from stable where tag1 is null", 1)?; +``` + +### 通čæ‡å‚ę•°ē»‘å®šå†™å…„ę•°ę® + +TDengine ēš„ Rust čæžęŽ„å™Ø实ēŽ°äŗ†å‚ę•°ē»‘å®šę–¹å¼åÆ¹ę•°ę®å†™å…„ļ¼ˆINSERTļ¼‰åœŗę™Æēš„ę”Æꌁ怂采ē”Øčæ™ē§ę–¹å¼å†™å…„ę•°ę®ę—¶ļ¼Œčƒ½éæ免 SQL čÆ­ę³•č§£ęžēš„资ęŗę¶ˆč€—ļ¼Œä»Žč€ŒåœØå¾ˆå¤šęƒ…å†µäø‹ę˜¾č‘—ęå‡å†™å…„ę€§čƒ½ć€‚ + +å‚ę•°ē»‘å®šęŽ„口čÆ¦č§[APIå‚č€ƒ](#stmt-api) -#### Schemaless 写兄 +### ꗠęؔ式写兄 + +TDengine ę”ÆꌁꗠęØ”å¼å†™å…„åŠŸčƒ½ć€‚ę— ęؔ式写兄兼容 InfluxDB ēš„ č”Œåč®®ļ¼ˆLine Protocolļ¼‰ć€OpenTSDB ēš„ telnet č”Œåč®®å’Œ OpenTSDB ēš„ JSON ę ¼å¼åč®®ć€‚čÆ¦ęƒ…čÆ·å‚č§[ꗠęؔ式写兄](../../reference/schemaless/)怂 -### ęŸ„čÆ¢ę•°ę® +### ę‰§č”Œåø¦ęœ‰ req_id ēš„ę— ęؔ式写兄 - +ę­¤ req_id åÆē”ØäŗŽčÆ·ę±‚é“¾č·Æčæ½čøŖ怂 -## API å‚č€ƒ +```rust +let sml_data = SmlDataBuilder::default() + .protocol(SchemalessProtocol::Line) + .data(data) + .req_id(100u64) + .build()?; -### čæžęŽ„Ꞅ造å™Ø +client.put(&sml_data)? +``` + +### ę•°ę®č®¢é˜… + +TDengine 通čæ‡ę¶ˆęÆ队列 [TMQ](../../../taos-sql/tmq/) åÆåŠØäø€äøŖč®¢é˜…ć€‚ -通čæ‡ DSN ę„ęž„å»ŗäø€äøŖčæžęŽ„å™ØꞄ造å™Ø怂 +#### 创å»ŗ Topic ```rust -let cfg = TaosBuilder::default().build()?; +taos.exec_many([ + // create topic for subscription + format!("CREATE TOPIC tmq_meters with META AS DATABASE {db}") +]) +.await?; ``` -ä½æē”Ø `builder` åÆ¹č±”åˆ›å»ŗ多äøŖčæžęŽ„ļ¼š +#### 创å»ŗ Consumer + +从 DSN 开始ļ¼Œęž„å»ŗäø€äøŖ TMQ čæžęŽ„å™Ø怂 ```rust -let conn: Taos = cfg.build(); +let tmq = TmqBuilder::from_dsn("taos://localhost:6030/?group.id=test")?; ``` -### čæžęŽ„ę±  +创å»ŗę¶ˆč“¹č€…ļ¼š + +```rust +let mut consumer = tmq.build()?; +``` + +#### č®¢é˜…ę¶ˆč“¹ę•°ę® + +ę¶ˆč“¹č€…åÆč®¢é˜…äø€äøŖęˆ–å¤šäøŖ `TOPIC`怂 + +```rust +consumer.subscribe(["tmq_meters"]).await?; +``` + +TMQ 궈ęÆ队列ę˜Æäø€äøŖ [futures::Stream](https://docs.rs/futures/latest/futures/stream/index.html) ē±»åž‹ļ¼ŒåÆ仄ä½æē”Øē›øåŗ” API åƹęƏäøŖ궈ęÆčæ›č”Œę¶ˆč“¹ļ¼Œå¹¶é€ščæ‡ `.commit` čæ›č”Œå·²ę¶ˆč“¹ę ‡č®°ć€‚ + +```rust +{ + let mut stream = consumer.stream(); + + while let Some((offset, message)) = stream.try_next().await? { + // get information from offset + + // the topic + let topic = offset.topic(); + // the vgroup id, like partition id in kafka. + let vgroup_id = offset.vgroup_id(); + println!("* in vgroup id {vgroup_id} of topic {topic}\n"); + + if let Some(data) = message.into_data() { + while let Some(block) = data.fetch_raw_block().await? { + // one block for one table, get table name if needed + let name = block.table_name(); + let records: Vec = block.deserialize().try_collect()?; + println!( + "** table: {}, got {} records: {:#?}\n", + name.unwrap(), + records.len(), + records + ); + } + } + consumer.commit(offset).await?; + } +} +``` + +čŽ·å–ę¶ˆč“¹čæ›åŗ¦ļ¼š + +ē‰ˆęœ¬č¦ę±‚ connector-rust >= v0.8.8ļ¼Œ TDengine >= 3.0.5.0 + +```rust +let assignments = consumer.assignments().await.unwrap(); +``` + +#### ęŒ‡å®šč®¢é˜… Offset + +ꌉē…§ęŒ‡å®šēš„čæ›åŗ¦ę¶ˆč“¹ļ¼š + +ē‰ˆęœ¬č¦ę±‚ connector-rust >= v0.8.8ļ¼Œ TDengine >= 3.0.5.0 + +```rust +consumer.offset_seek(topic, vgroup_id, offset).await; +``` + +#### å…³é—­č®¢é˜… + +```rust +consumer.unsubscribe().await; +``` + +åƹäŗŽ TMQ DSN, ęœ‰ä»„äø‹é…ē½®é”¹åÆ仄čæ›č”Œč®¾ē½®ļ¼Œéœ€č¦ę³Øꄏēš„ę˜Æļ¼Œ`group.id` ę˜Æåæ…é”»ēš„怂 + +- `group.id`: 同äø€äøŖę¶ˆč“¹č€…ē»„ļ¼Œå°†ä»„č‡³å°‘ę¶ˆč“¹äø€ę¬”ēš„ę–¹å¼čæ›č”Œę¶ˆęÆč“Ÿč½½å‡č””ć€‚ +- `client.id`: åÆ选ēš„č®¢é˜…å®¢ęˆ·ē«ÆčÆ†åˆ«é”¹ć€‚ +- `auto.offset.reset`: åÆé€‰åˆå§‹åŒ–č®¢é˜…čµ·ē‚¹ļ¼Œ *earliest* äøŗä»Žå¤“å¼€å§‹č®¢é˜…ļ¼Œ *latest* äøŗä»…ä»Žęœ€ę–°ę•°ę®å¼€å§‹č®¢é˜…ļ¼Œé»˜č®¤äøŗä»Žå¤“č®¢é˜…ć€‚ę³Øꄏļ¼Œę­¤é€‰é”¹åœØ同äø€äøŖ `group.id` äø­ä»…ē”Ÿę•ˆäø€ę¬”怂 +- `enable.auto.commit`: å½“č®¾ē½®äøŗ `true` ę—¶ļ¼Œå°†åÆē”Øč‡ŖåŠØꠇ记ęؔ式ļ¼Œå½“åÆ¹ę•°ę®äø€č‡“ꀧäøę•ę„Ÿę—¶ļ¼ŒåÆ仄åÆē”Øę­¤ę–¹å¼ć€‚ +- `auto.commit.interval.ms`: č‡ŖåŠØꠇ记ēš„ꗶ闓闓隔怂 + +#### å®Œę•“ē¤ŗ例 + +å®Œę•“č®¢é˜…ē¤ŗä¾‹å‚č§ [GitHub ē¤ŗä¾‹ę–‡ä»¶](https://github.com/taosdata/TDengine/blob/3.0/docs/examples/rust/nativeexample/examples/subscribe_demo.rs). + +### äøŽčæžęŽ„ę± ä½æē”Ø åœØå¤ę‚åŗ”ē”Øäø­ļ¼Œå»ŗč®®åÆē”ØčæžęŽ„걠怂[taos] ēš„čæžęŽ„걠默认ļ¼ˆå¼‚ę­„ęؔ式ļ¼‰ä½æē”Ø [deadpool] 实ēŽ°ć€‚ @@ -295,7 +482,17 @@ let pool: Pool = Pool::builder(Manager::from_dsn(self.dsn.clone()). let taos = pool.get()?; ``` -### čæžęŽ„ +### ę›“å¤šē¤ŗ例ē؋åŗ + +ē¤ŗ例ē؋åŗęŗē ä½äŗŽ `TDengine/examples/rust` äø‹: + +čÆ·å‚č€ƒļ¼š[rust example](https://github.com/taosdata/TDengine/tree/3.0/examples/rust) + +## åøøč§é—®é¢˜ + +čÆ·å‚č€ƒ [FAQ](../../../train-faq/faq) + +## API å‚č€ƒ [Taos][struct.Taos] åÆ¹č±”ęä¾›äŗ†å¤šäøŖę•°ę®åŗ“ę“ä½œēš„ APIļ¼š @@ -381,9 +578,13 @@ let taos = pool.get()?; - `.create_database(database: &str)`: ę‰§č”Œ `CREATE DATABASE` čÆ­å„ć€‚ - `.use_database(database: &str)`: ę‰§č”Œ `USE` čÆ­å„ć€‚ -é™¤ę­¤ä¹‹å¤–ļ¼ŒčÆ„ē»“ęž„ä¹Ÿę˜Æ [å‚ę•°ē»‘定](#å‚ę•°ē»‘å®šęŽ„口) 和 [č”Œåč®®ęŽ„å£](#č”Œåč®®ęŽ„å£) ēš„å…„口ļ¼Œä½æē”Øę–¹ę³•čÆ·å‚č€ƒå…·ä½“ēš„ API čÆ“ę˜Žć€‚ +é™¤ę­¤ä¹‹å¤–ļ¼ŒčÆ„ē»“ęž„ä¹Ÿę˜Æå‚ę•°ē»‘å®šå’Œč”Œåč®®ęŽ„口ēš„å…„口ļ¼Œä½æē”Øę–¹ę³•čÆ·å‚č€ƒå…·ä½“ēš„ API čÆ“ę˜Žć€‚ -### å‚ę•°ē»‘å®šęŽ„口 +

+ +å‚ę•°ē»‘å®šęŽ„口 + +

äøŽ C ęŽ„å£ē±»ä¼¼ļ¼ŒRust ęä¾›å‚ę•°ē»‘å®šęŽ„å£ć€‚é¦–å…ˆļ¼Œé€ščæ‡ [Taos][struct.Taos] åÆ¹č±”åˆ›å»ŗäø€äøŖ SQL čƭ叄ēš„å‚ę•°ē»‘定åÆ¹č±” [Stmt]ļ¼š @@ -394,7 +595,7 @@ stmt.prepare("INSERT INTO ? USING meters TAGS(?, ?) VALUES(?, ?, ?, ?)")?; å‚ę•°ē»‘定åÆ¹č±”ęä¾›äŗ†äø€ē»„ꎄ口ē”ØäŗŽå®žēŽ°å‚ę•°ē»‘定ļ¼š -#### `.set_tbname(name)` +`.set_tbname(name)` ē”ØäŗŽē»‘定č”Øåć€‚ @@ -403,7 +604,7 @@ let mut stmt = taos.stmt("insert into ? values(? ,?)")?; stmt.set_tbname("d0")?; ``` -#### `.set_tags(&[tag])` +`.set_tags(&[tag])` 当 SQL čƭ叄ä½æē”Ø超ēŗ§č”Øę—¶ļ¼Œē”ØäŗŽē»‘定子č”Øč”Øåå’Œę ‡ē­¾å€¼ļ¼š @@ -413,7 +614,7 @@ stmt.set_tbname("d0")?; stmt.set_tags(&[Value::VarChar("ę¶›ę€".to_string())])?; ``` -#### `.bind(&[column])` +`.bind(&[column])` ē”ØäŗŽē»‘定值ē±»åž‹ć€‚ä½æē”Ø [ColumnView] ē»“ęž„ä½“ęž„å»ŗéœ€č¦ēš„ē±»åž‹å¹¶ē»‘定ļ¼š @@ -437,7 +638,7 @@ let params = vec![ let rows = stmt.bind(¶ms)?.add_batch()?.execute()?; ``` -#### `.execute()` +`.execute()` ę‰§č”Œ SQL怂[Stmt] åÆ¹č±”åÆ仄复ē”Øļ¼ŒåœØę‰§č”ŒåŽåÆä»„é‡ę–°ē»‘å®šå¹¶ę‰§č”Œć€‚ę‰§č”Œå‰čÆ·ē”®äæę‰€ęœ‰ę•°ę®å·²é€ščæ‡ `.add_batch` åŠ å…„åˆ°ę‰§č”Œé˜Ÿåˆ—äø­ć€‚ @@ -452,92 +653,6 @@ stmt.execute()?; äø€äøŖåÆčæč”Œēš„ē¤ŗ例čÆ·č§ [GitHub äøŠēš„ē¤ŗ例](https://github.com/taosdata/taos-connector-rust/blob/main/examples/bind.rs)怂 -### č®¢é˜… - -TDengine 通čæ‡ę¶ˆęÆ队列 [TMQ](../../../taos-sql/tmq/) åÆåŠØäø€äøŖč®¢é˜…ć€‚ - -从 DSN 开始ļ¼Œęž„å»ŗäø€äøŖ TMQ čæžęŽ„å™Ø怂 - -```rust -let tmq = TmqBuilder::from_dsn("taos://localhost:6030/?group.id=test")?; -``` - -创å»ŗę¶ˆč“¹č€…ļ¼š - -```rust -let mut consumer = tmq.build()?; -``` - -ę¶ˆč“¹č€…åÆč®¢é˜…äø€äøŖęˆ–å¤šäøŖ `TOPIC`怂 - -```rust -consumer.subscribe(["tmq_meters"]).await?; -``` - -TMQ 궈ęÆ队列ę˜Æäø€äøŖ [futures::Stream](https://docs.rs/futures/latest/futures/stream/index.html) ē±»åž‹ļ¼ŒåÆ仄ä½æē”Øē›øåŗ” API åƹęƏäøŖ궈ęÆčæ›č”Œę¶ˆč“¹ļ¼Œå¹¶é€ščæ‡ `.commit` čæ›č”Œå·²ę¶ˆč“¹ę ‡č®°ć€‚ - -```rust -{ - let mut stream = consumer.stream(); - - while let Some((offset, message)) = stream.try_next().await? { - // get information from offset - - // the topic - let topic = offset.topic(); - // the vgroup id, like partition id in kafka. - let vgroup_id = offset.vgroup_id(); - println!("* in vgroup id {vgroup_id} of topic {topic}\n"); - - if let Some(data) = message.into_data() { - while let Some(block) = data.fetch_raw_block().await? { - // one block for one table, get table name if needed - let name = block.table_name(); - let records: Vec = block.deserialize().try_collect()?; - println!( - "** table: {}, got {} records: {:#?}\n", - name.unwrap(), - records.len(), - records - ); - } - } - consumer.commit(offset).await?; - } -} -``` - -čŽ·å–ę¶ˆč“¹čæ›åŗ¦ļ¼š - -ē‰ˆęœ¬č¦ę±‚ connector-rust >= v0.8.8ļ¼Œ TDengine >= 3.0.5.0 - -```rust -let assignments = consumer.assignments().await.unwrap(); -``` - -ꌉē…§ęŒ‡å®šēš„čæ›åŗ¦ę¶ˆč“¹ļ¼š - -ē‰ˆęœ¬č¦ę±‚ connector-rust >= v0.8.8ļ¼Œ TDengine >= 3.0.5.0 - -```rust -consumer.offset_seek(topic, vgroup_id, offset).await; -``` - -åœę­¢č®¢é˜…ļ¼š - -```rust -consumer.unsubscribe().await; -``` - -åƹäŗŽ TMQ DSN, ęœ‰ä»„äø‹é…ē½®é”¹åÆ仄čæ›č”Œč®¾ē½®ļ¼Œéœ€č¦ę³Øꄏēš„ę˜Æļ¼Œ`group.id` ę˜Æåæ…é”»ēš„怂 - -- `group.id`: 同äø€äøŖę¶ˆč“¹č€…ē»„ļ¼Œå°†ä»„č‡³å°‘ę¶ˆč“¹äø€ę¬”ēš„ę–¹å¼čæ›č”Œę¶ˆęÆč“Ÿč½½å‡č””ć€‚ -- `client.id`: åÆ选ēš„č®¢é˜…å®¢ęˆ·ē«ÆčÆ†åˆ«é”¹ć€‚ -- `auto.offset.reset`: åÆé€‰åˆå§‹åŒ–č®¢é˜…čµ·ē‚¹ļ¼Œ *earliest* äøŗä»Žå¤“å¼€å§‹č®¢é˜…ļ¼Œ *latest* äøŗä»…ä»Žęœ€ę–°ę•°ę®å¼€å§‹č®¢é˜…ļ¼Œé»˜č®¤äøŗä»Žå¤“č®¢é˜…ć€‚ę³Øꄏļ¼Œę­¤é€‰é”¹åœØ同äø€äøŖ `group.id` äø­ä»…ē”Ÿę•ˆäø€ę¬”怂 -- `enable.auto.commit`: å½“č®¾ē½®äøŗ `true` ę—¶ļ¼Œå°†åÆē”Øč‡ŖåŠØꠇ记ęؔ式ļ¼Œå½“åÆ¹ę•°ę®äø€č‡“ꀧäøę•ę„Ÿę—¶ļ¼ŒåÆ仄åÆē”Øę­¤ę–¹å¼ć€‚ -- `auto.commit.interval.ms`: č‡ŖåŠØꠇ记ēš„ꗶ闓闓隔怂 - -å®Œę•“č®¢é˜…ē¤ŗä¾‹å‚č§ [GitHub ē¤ŗä¾‹ę–‡ä»¶](https://github.com/taosdata/TDengine/blob/3.0/docs/examples/rust/nativeexample/examples/subscribe_demo.rs). 其他ē›ø关ē»“ęž„ä½“ API ä½æē”ØčÆ“ę˜ŽčÆ·ē§»ę­„ Rust ę–‡ę”£ę‰˜ē®”ē½‘锵ļ¼šć€‚ diff --git a/docs/zh/08-connector/30-python.mdx b/docs/zh/08-connector/30-python.mdx index 8752dc214565c7834cdc6903f5247cd4c64194a2..0b9f2d75a7779feb267f7a6eb4110ee3308957a0 100644 --- a/docs/zh/08-connector/30-python.mdx +++ b/docs/zh/08-connector/30-python.mdx @@ -21,10 +21,25 @@ Python čæžęŽ„å™Øēš„ęŗē ę‰˜ē®”åœØ [GitHub](https://github.com/taosdata/taos-con - 原ē”ŸčæžęŽ„[ę”Æꌁēš„平台](../#ę”Æꌁēš„平台)和 TDengine å®¢ęˆ·ē«Æę”Æꌁēš„平台äø€č‡“怂 - REST čæžęŽ„ę”ÆęŒę‰€ęœ‰čƒ½čæč”Œ Python ēš„å¹³å°ć€‚ -## ē‰ˆęœ¬é€‰ę‹© +### ę”Æꌁēš„åŠŸčƒ½ + +- 原ē”ŸčæžęŽ„ę”Æꌁ TDengine ēš„ę‰€ęœ‰ę øåæƒåŠŸčƒ½ļ¼Œ åŒ…ę‹¬ļ¼š čæžęŽ„ē®”ē†ć€ę‰§č”Œ SQLć€å‚ę•°ē»‘å®šć€č®¢é˜…ć€ę— ęؔ式写兄ļ¼ˆschemalessļ¼‰ć€‚ +- REST čæžęŽ„ę”Æꌁēš„åŠŸčƒ½åŒ…ę‹¬ļ¼ščæžęŽ„ē®”ē†ć€ę‰§č”Œ SQL怂 (通čæ‡ę‰§č”Œ SQL åÆ仄ļ¼š ē®”ē†ę•°ę®åŗ“态ē®”ē†č”Øå’Œč¶…ēŗ§č”Øć€å†™å…„ę•°ę®ć€ęŸ„čÆ¢ę•°ę®ć€åˆ›å»ŗčæžē»­ęŸ„čÆ¢ē­‰)怂 + +## 历史ē‰ˆęœ¬ ꗠč®ŗä½æē”Ø什么ē‰ˆęœ¬ēš„ TDengine 都å»ŗč®®ä½æē”Ø꜀ꖰē‰ˆęœ¬ēš„ `taospy`怂 +|Python Connector ē‰ˆęœ¬|äø»č¦å˜åŒ–| +|:-------------------:|:----:| +|2.7.9|ę•°ę®č®¢é˜…ę”ÆęŒčŽ·å–ę¶ˆč“¹čæ›åŗ¦å’Œé‡ē½®ę¶ˆč“¹čæ›åŗ¦| +|2.7.8|ę–°å¢ž `execute_many`| + +|Python Websocket Connector ē‰ˆęœ¬|äø»č¦å˜åŒ–| +|:----------------------------:|:-----:| +|0.2.5|1. ę•°ę®č®¢é˜…ę”ÆęŒčŽ·å–ę¶ˆč“¹čæ›åŗ¦å’Œé‡ē½®ę¶ˆč“¹čæ›åŗ¦
2. ę”Æꌁ schemaless
3. ę”Æꌁ STMT| +|0.2.4|ę•°ę®č®¢é˜…ę–°å¢žå–ę¶ˆč®¢é˜…ę–¹ę³•| + ## 处ē†å¼‚åøø Python čæžęŽ„å™ØåÆčƒ½ä¼šäŗ§ē”Ÿ 4 ē§å¼‚åøøļ¼š @@ -55,12 +70,25 @@ Python Connector ēš„ę‰€ęœ‰ę•°ę®åŗ“ę“ä½œå¦‚ęžœå‡ŗēŽ°å¼‚åøøļ¼Œéƒ½ä¼šē›“ꎄꊛå‡ŗ {{#include docs/examples/python/handle_exception.py}} ``` -## ę”Æꌁēš„åŠŸčƒ½ +TDengine DataType 和 Python DataType -- 原ē”ŸčæžęŽ„ę”Æꌁ TDengine ēš„ę‰€ęœ‰ę øåæƒåŠŸčƒ½ļ¼Œ åŒ…ę‹¬ļ¼š čæžęŽ„ē®”ē†ć€ę‰§č”Œ SQLć€å‚ę•°ē»‘å®šć€č®¢é˜…ć€ę— ęؔ式写兄ļ¼ˆschemalessļ¼‰ć€‚ -- REST čæžęŽ„ę”Æꌁēš„åŠŸčƒ½åŒ…ę‹¬ļ¼ščæžęŽ„ē®”ē†ć€ę‰§č”Œ SQL怂 (通čæ‡ę‰§č”Œ SQL åÆ仄ļ¼š ē®”ē†ę•°ę®åŗ“态ē®”ē†č”Øå’Œč¶…ēŗ§č”Øć€å†™å…„ę•°ę®ć€ęŸ„čÆ¢ę•°ę®ć€åˆ›å»ŗčæžē»­ęŸ„čÆ¢ē­‰)怂 +TDengine ē›®å‰ę”ÆęŒę—¶é—“ęˆ³ć€ę•°å­—ć€å­—ē¬¦ć€åøƒå°”ē±»åž‹ļ¼ŒäøŽ Python åƹåŗ”ē±»åž‹č½¬ę¢å¦‚äø‹ļ¼š + +|TDengine DataType|Python DataType| +|:---------------:|:-------------:| +|TIMESTAMP|datetime| +|INT|int| +|BIGINT|int| +|FLOAT|float| +|DOUBLE|int| +|SMALLINT|int| +|TINYINT|int| +|BOOL|bool| +|BINARY|str| +|NCHAR|str| +|JSON|str| -## å®‰č£… +## å®‰č£…ę­„éŖ¤ ### å®‰č£…å‰å‡†å¤‡ @@ -373,7 +401,7 @@ TaosCursor ē±»ä½æē”Ø原ē”ŸčæžęŽ„čæ›č”Œå†™å…„ć€ęŸ„čÆ¢ę“ä½œć€‚åœØå®¢ęˆ·ē«Æ多ēŗæ
-#### Connection ē±»ēš„ä½æē”Ø +##### Connection ē±»ēš„ä½æē”Ø `Connection` ē±»ę—¢åŒ…含åƹ PEP249 Connection ęŽ„å£ēš„实ēŽ°(如ļ¼šcursorę–¹ę³•å’Œ close ę–¹ę³•)ļ¼Œä¹ŸåŒ…å«å¾ˆå¤šę‰©å±•åŠŸčƒ½ļ¼ˆå¦‚ļ¼š execute态 query态schemaless_insert 和 subscribe ę–¹ę³•ć€‚ @@ -537,7 +565,7 @@ RestClient ē±»ę˜ÆåƹäŗŽ REST API ēš„ē›“ęŽ„å°č£…ć€‚å®ƒåŖ包含äø€äøŖ sql() ę–¹ `Consumer` ęä¾›äŗ† Python čæžęŽ„å™Øč®¢é˜… TMQ ę•°ę®ēš„ API怂 -#### 创å»ŗ Consumer +##### 创å»ŗ Consumer 创å»ŗ Consumer čÆ­ę³•äøŗ `consumer = Consumer(configs)`ļ¼Œå‚ę•°å®šä¹‰čÆ·å‚č€ƒ [ę•°ę®č®¢é˜…ę–‡ę”£](../../develop/tmq/#%E5%88%9B%E5%BB%BA%E6%B6%88%E8%B4%B9%E8%80%85-consumer)怂 @@ -547,15 +575,15 @@ from taos.tmq import Consumer consumer = Consumer({"group.id": "local", "td.connect.ip": "127.0.0.1"}) ``` -#### č®¢é˜… topics +##### č®¢é˜… topics -Comsumer API ēš„ `subscribe` ę–¹ę³•ē”ØäŗŽč®¢é˜… topicsļ¼Œconsumer ę”ÆęŒåŒę—¶č®¢é˜…å¤šäøŖ topic怂 +Consumer API ēš„ `subscribe` ę–¹ę³•ē”ØäŗŽč®¢é˜… topicsļ¼Œconsumer ę”ÆęŒåŒę—¶č®¢é˜…å¤šäøŖ topic怂 ```python consumer.subscribe(['topic1', 'topic2']) ``` -#### ę¶ˆč“¹ę•°ę® +##### ę¶ˆč“¹ę•°ę® Consumer API ēš„ `poll` ę–¹ę³•ē”ØäŗŽę¶ˆč“¹ę•°ę®ļ¼Œ`poll` ę–¹ę³•ęŽ„ę”¶äø€äøŖ float ē±»åž‹ēš„超ꗶꗶ闓ļ¼Œč¶…ꗶꗶ闓单位äøŗē§’ļ¼ˆsļ¼‰ļ¼Œ`poll` ę–¹ę³•åœØč¶…ę—¶ä¹‹å‰čæ”回äø€ę” Message ē±»åž‹ēš„ę•°ę®ęˆ–č¶…ę—¶čæ”回 `None`ć€‚ę¶ˆč“¹č€…åæ…锻通čæ‡ Message ēš„ `error()` ę–¹ę³•ę ”éŖŒčæ”å›žę•°ę®ēš„ error äæ”ęÆ怂 @@ -573,7 +601,7 @@ while True: print(block.fetchall()) ``` -#### čŽ·å–ę¶ˆč“¹čæ›åŗ¦ +##### čŽ·å–ę¶ˆč“¹čæ›åŗ¦ Consumer API ēš„ `assignment` ę–¹ę³•ē”ØäŗŽčŽ·å– Consumer č®¢é˜…ēš„ę‰€ęœ‰ topic ēš„ę¶ˆč“¹čæ›åŗ¦ļ¼Œčæ”回ē»“ęžœē±»åž‹äøŗ TopicPartition 列č”Ø怂 @@ -581,7 +609,7 @@ Consumer API ēš„ `assignment` ę–¹ę³•ē”ØäŗŽčŽ·å– Consumer č®¢é˜…ēš„ę‰€ęœ‰ topic assignments = consumer.assignment() ``` -#### 重ē½®ę¶ˆč“¹čæ›åŗ¦ +##### ęŒ‡å®šč®¢é˜… Offset Consumer API ēš„ `seek` ę–¹ę³•ē”ØäŗŽé‡ē½® Consumer ēš„ę¶ˆč“¹čæ›åŗ¦åˆ°ęŒ‡å®šä½ē½®ļ¼Œę–¹ę³•å‚ę•°ē±»åž‹äøŗ TopicPartition怂 @@ -590,7 +618,7 @@ tp = TopicPartition(topic='topic1', partition=0, offset=0) consumer.seek(tp) ``` -#### ē»“ęŸę¶ˆč“¹ +##### å…³é—­č®¢é˜… ę¶ˆč“¹ē»“ęŸåŽļ¼Œåŗ”å½“å–ę¶ˆč®¢é˜…ļ¼Œå¹¶å…³é—­ Consumer怂 @@ -599,13 +627,13 @@ consumer.unsubscribe() consumer.close() ``` -#### tmq č®¢é˜…ē¤ŗ例代ē  +##### å®Œę•“ē¤ŗ例 ```python {{#include docs/examples/python/tmq_example.py}} ``` -#### čŽ·å–å’Œé‡ē½®ę¶ˆč“¹čæ›åŗ¦ē¤ŗ例代ē  +##### čŽ·å–å’Œé‡ē½®ę¶ˆč“¹čæ›åŗ¦ē¤ŗ例代ē  ```python {{#include docs/examples/python/tmq_assignment_example.py:taos_get_assignment_and_seek_demo}} @@ -619,7 +647,7 @@ consumer.close() taosws `Consumer` API ęä¾›äŗ†åŸŗäŗŽ Websocket č®¢é˜… TMQ ę•°ę®ēš„ API怂 -#### 创å»ŗ Consumer +##### 创å»ŗ Consumer 创å»ŗ Consumer čÆ­ę³•äøŗ `consumer = Consumer(conf=configs)`ļ¼Œä½æē”Øę—¶éœ€č¦ęŒ‡å®š `td.connect.websocket.scheme` å‚ę•°å€¼äøŗ "ws"ļ¼Œå‚ę•°å®šä¹‰čÆ·å‚č€ƒ [ę•°ę®č®¢é˜…ę–‡ę”£](../../develop/tmq/#%E5%88%9B%E5%BB%BA%E6%B6%88%E8%B4%B9%E8%80%85-consumer)怂 @@ -629,15 +657,15 @@ import taosws consumer = taosws.(conf={"group.id": "local", "td.connect.websocket.scheme": "ws"}) ``` -#### č®¢é˜… topics +##### č®¢é˜… topics -Comsumer API ēš„ `subscribe` ę–¹ę³•ē”ØäŗŽč®¢é˜… topicsļ¼Œconsumer ę”ÆęŒåŒę—¶č®¢é˜…å¤šäøŖ topic怂 +Consumer API ēš„ `subscribe` ę–¹ę³•ē”ØäŗŽč®¢é˜… topicsļ¼Œconsumer ę”ÆęŒåŒę—¶č®¢é˜…å¤šäøŖ topic怂 ```python consumer.subscribe(['topic1', 'topic2']) ``` -#### ę¶ˆč“¹ę•°ę® +##### ę¶ˆč“¹ę•°ę® Consumer API ēš„ `poll` ę–¹ę³•ē”ØäŗŽę¶ˆč“¹ę•°ę®ļ¼Œ`poll` ę–¹ę³•ęŽ„ę”¶äø€äøŖ float ē±»åž‹ēš„超ꗶꗶ闓ļ¼Œč¶…ꗶꗶ闓单位äøŗē§’ļ¼ˆsļ¼‰ļ¼Œ`poll` ę–¹ę³•åœØč¶…ę—¶ä¹‹å‰čæ”回äø€ę” Message ē±»åž‹ēš„ę•°ę®ęˆ–č¶…ę—¶čæ”回 `None`ć€‚ę¶ˆč“¹č€…åæ…锻通čæ‡ Message ēš„ `error()` ę–¹ę³•ę ”éŖŒčæ”å›žę•°ę®ēš„ error äæ”ęÆ怂 @@ -654,7 +682,7 @@ while True: print(row) ``` -#### čŽ·å–ę¶ˆč“¹čæ›åŗ¦ +##### čŽ·å–ę¶ˆč“¹čæ›åŗ¦ Consumer API ēš„ `assignment` ę–¹ę³•ē”ØäŗŽčŽ·å– Consumer č®¢é˜…ēš„ę‰€ęœ‰ topic ēš„ę¶ˆč“¹čæ›åŗ¦ļ¼Œčæ”回ē»“ęžœē±»åž‹äøŗ TopicPartition 列č”Ø怂 @@ -662,7 +690,7 @@ Consumer API ēš„ `assignment` ę–¹ę³•ē”ØäŗŽčŽ·å– Consumer č®¢é˜…ēš„ę‰€ęœ‰ topic assignments = consumer.assignment() ``` -#### 重ē½®ę¶ˆč“¹čæ›åŗ¦ +##### 重ē½®ę¶ˆč“¹čæ›åŗ¦ Consumer API ēš„ `seek` ę–¹ę³•ē”ØäŗŽé‡ē½® Consumer ēš„ę¶ˆč“¹čæ›åŗ¦åˆ°ęŒ‡å®šä½ē½®ć€‚ @@ -670,7 +698,7 @@ Consumer API ēš„ `seek` ę–¹ę³•ē”ØäŗŽé‡ē½® Consumer ēš„ę¶ˆč“¹čæ›åŗ¦åˆ°ęŒ‡å®šä½ consumer.seek(topic='topic1', partition=0, offset=0) ``` -#### ē»“ęŸę¶ˆč“¹ +##### ē»“ęŸę¶ˆč“¹ ę¶ˆč“¹ē»“ęŸåŽļ¼Œåŗ”å½“å–ę¶ˆč®¢é˜…ļ¼Œå¹¶å…³é—­ Consumer怂 @@ -679,7 +707,7 @@ consumer.unsubscribe() consumer.close() ``` -#### tmq č®¢é˜…ē¤ŗ例代ē  +##### tmq č®¢é˜…ē¤ŗ例代ē  ```python {{#include docs/examples/python/tmq_websocket_example.py}} @@ -687,7 +715,7 @@ consumer.close() čæžęŽ„å™Øęä¾›äŗ† `assignment` ęŽ„å£ļ¼Œē”ØäŗŽčŽ·å– topic assignment ēš„åŠŸčƒ½ļ¼ŒåÆä»„ęŸ„čÆ¢č®¢é˜…ēš„ topic ēš„ę¶ˆč“¹čæ›åŗ¦ļ¼Œå¹¶ęä¾› `seek` ęŽ„å£ļ¼Œē”ØäŗŽé‡ē½® topic ēš„ę¶ˆč“¹čæ›åŗ¦ć€‚ -#### čŽ·å–å’Œé‡ē½®ę¶ˆč“¹čæ›åŗ¦ē¤ŗ例代ē  +##### čŽ·å–å’Œé‡ē½®ę¶ˆč“¹čæ›åŗ¦ē¤ŗ例代ē  ```python {{#include docs/examples/python/tmq_websocket_assgnment_example.py:taosws_get_assignment_and_seek_demo}} @@ -703,19 +731,19 @@ consumer.close() -ē®€å•å†™å…„ +##### ē®€å•å†™å…„ ```python {{#include docs/examples/python/schemaless_insert.py}} ``` -åø¦ęœ‰ ttl å‚ę•°ēš„写兄 +##### åø¦ęœ‰ ttl å‚ę•°ēš„写兄 ```python {{#include docs/examples/python/schemaless_insert_ttl.py}} ``` -åø¦ęœ‰ req_id å‚ę•°ēš„写兄 +##### åø¦ęœ‰ req_id å‚ę•°ēš„写兄 ```python {{#include docs/examples/python/schemaless_insert_req_id.py}} @@ -725,19 +753,19 @@ consumer.close() -ē®€å•å†™å…„ +##### ē®€å•å†™å…„ ```python {{#include docs/examples/python/schemaless_insert_raw.py}} ``` -åø¦ęœ‰ ttl å‚ę•°ēš„写兄 +##### åø¦ęœ‰ ttl å‚ę•°ēš„写兄 ```python {{#include docs/examples/python/schemaless_insert_raw_ttl.py}} ``` -åø¦ęœ‰ req_id å‚ę•°ēš„写兄 +##### åø¦ęœ‰ req_id å‚ę•°ēš„写兄 ```python {{#include docs/examples/python/schemaless_insert_raw_req_id.py}} @@ -753,7 +781,7 @@ TDengine ēš„ Python čæžęŽ„å™Øę”ÆęŒå‚ę•°ē»‘å®šé£Žę ¼ēš„ Prepare API ę–¹å¼å†™ -#### 创å»ŗ stmt +##### 创å»ŗ stmt Python čæžęŽ„å™Øēš„ `Connection` ęä¾›äŗ† `statement` ę–¹ę³•ē”ØäŗŽåˆ›å»ŗå‚ę•°ē»‘定åÆ¹č±” stmtļ¼ŒčÆ„ę–¹ę³•ęŽ„ę”¶ sql 字ē¬¦äø²ä½œäøŗå‚ę•°ļ¼Œsql 字ē¬¦äø²ē›®å‰ä»…ę”Æꌁē”Ø `?` ę„ä»£č”Øē»‘定ēš„å‚ę•°ć€‚ @@ -764,7 +792,7 @@ conn = taos.connect() stmt = conn.statement("insert into log values(?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?)") ``` -#### å‚ę•°ē»‘定 +##### å‚ę•°ē»‘定 调ē”Ø `new_multi_binds` å‡½ę•°åˆ›å»ŗ params 列č”Øļ¼Œē”ØäŗŽå‚ę•°ē»‘å®šć€‚ @@ -794,7 +822,7 @@ params[15].timestamp([None, None, 1626861392591]) stmt.bind_param_batch(params) ``` -#### ę‰§č”Œ sql +##### ę‰§č”Œ sql 调ē”Ø stmt ēš„ `execute` ę–¹ę³•ę‰§č”Œ sql @@ -802,7 +830,7 @@ stmt.bind_param_batch(params) stmt.execute() ``` -#### 关闭 stmt +##### 关闭 stmt ęœ€åŽéœ€č¦å…³é—­ stmt怂 @@ -810,7 +838,7 @@ stmt.execute() stmt.close() ``` -#### ē¤ŗ例代ē  +##### ē¤ŗ例代ē  ```python {{#include docs/examples/python/stmt_example.py}} @@ -819,7 +847,7 @@ stmt.close() -#### 创å»ŗ stmt +##### 创å»ŗ stmt Python WebSocket čæžęŽ„å™Øēš„ `Connection` ęä¾›äŗ† `statement` ę–¹ę³•ē”ØäŗŽåˆ›å»ŗå‚ę•°ē»‘定åÆ¹č±” stmtļ¼ŒčÆ„ę–¹ę³•ęŽ„ę”¶ sql 字ē¬¦äø²ä½œäøŗå‚ę•°ļ¼Œsql 字ē¬¦äø²ē›®å‰ä»…ę”Æꌁē”Ø `?` ę„ä»£č”Øē»‘定ēš„å‚ę•°ć€‚ @@ -830,7 +858,7 @@ conn = taosws.connect('taosws://localhost:6041/test') stmt = conn.statement() ``` -#### č§£ęž sql +##### č§£ęž sql 调ē”Ø stmt ēš„ `prepare` ę–¹ę³•ę„č§£ęž insert čÆ­å„ć€‚ @@ -838,7 +866,7 @@ stmt = conn.statement() stmt.prepare("insert into t1 values (?, ?, ?, ?)") ``` -#### å‚ę•°ē»‘定 +##### å‚ę•°ē»‘定 调ē”Ø stmt ēš„ `bind_param` ę–¹ę³•ē»‘å®šå‚ę•°ć€‚ @@ -857,7 +885,7 @@ stmt.bind_param([ stmt.add_batch() ``` -#### ę‰§č”Œ sql +##### ę‰§č”Œ sql 调ē”Ø stmt ēš„ `execute` ę–¹ę³•ę‰§č”Œ sql @@ -865,7 +893,7 @@ stmt.add_batch() stmt.execute() ``` -#### 关闭 stmt +##### 关闭 stmt ęœ€åŽéœ€č¦å…³é—­ stmt怂 @@ -873,7 +901,7 @@ stmt.execute() stmt.close() ``` -#### ē¤ŗ例代ē  +##### ē¤ŗ例代ē  ```python {{#include docs/examples/python/stmt_websocket_example.py}} diff --git a/docs/zh/12-taos-sql/10-function.md b/docs/zh/12-taos-sql/10-function.md index 416d41614d01b6c887d8ea462937b2b4fc509a55..fc0cfbe3305fbc4a70cf38ef41f9b7966a60feed 100644 --- a/docs/zh/12-taos-sql/10-function.md +++ b/docs/zh/12-taos-sql/10-function.md @@ -991,18 +991,14 @@ SAMPLE(expr, k) **åŠŸčƒ½čÆ“ę˜Ž**ļ¼š čŽ·å–ę•°ę®ēš„ k äøŖé‡‡ę ·å€¼ć€‚å‚ę•° k ēš„åˆę³•č¾“å…„čŒƒå›“ę˜Æ 1ā‰¤ k ā‰¤ 1000怂 -**čæ”回ē»“ęžœē±»åž‹**ļ¼š åŒåŽŸå§‹ę•°ę®ē±»åž‹ļ¼Œ čæ”回ē»“ęžœäø­åø¦ęœ‰čÆ„č”Œč®°å½•ēš„ę—¶é—“ęˆ³ć€‚ +**čæ”回ē»“ęžœē±»åž‹**ļ¼š åŒåŽŸå§‹ę•°ę®ē±»åž‹ć€‚ -**适ē”Øę•°ę®ē±»åž‹**ļ¼š åœØ超ēŗ§č”ØęŸ„čÆ¢äø­ä½æē”Øę—¶ļ¼Œäøčƒ½åŗ”ē”ØåœØꠇē­¾ä¹‹äøŠć€‚ +**适ē”Øę•°ę®ē±»åž‹**ļ¼š å…ØéƒØē±»åž‹å­—ꮵ怂 **åµŒå„—å­ęŸ„čÆ¢ę”Æꌁ**ļ¼š 适ē”ØäŗŽå†…å±‚ęŸ„čÆ¢å’Œå¤–å±‚ęŸ„čÆ¢ć€‚ **适ē”ØäŗŽ**ļ¼šč”Øå’Œč¶…ēŗ§č”Ø怂 -**ä½æē”ØčÆ“ę˜Ž**ļ¼š - -- äøčƒ½å‚äøŽč”Øč¾¾å¼č®”ē®—ļ¼›čÆ„å‡½ę•°åÆ仄åŗ”ē”ØåœØꙮ通č”Øå’Œč¶…ēŗ§č”ØäøŠļ¼› - ### TAIL @@ -1047,11 +1043,11 @@ TOP(expr, k) UNIQUE(expr) ``` -**åŠŸčƒ½čÆ“ę˜Ž**ļ¼ščæ”回čƄ列ēš„ę•°å€¼é¦–ę¬”å‡ŗēŽ°ēš„å€¼ć€‚čÆ„å‡½ę•°åŠŸčƒ½äøŽ distinct ē›øä¼¼ļ¼Œä½†ę˜ÆåÆä»„åŒ¹é…ę ‡ē­¾å’Œę—¶é—“ęˆ³äæ”ęÆ怂åÆ仄针åÆ¹é™¤ę—¶é—“åˆ—ä»„å¤–ēš„å­—ę®µčæ›č”ŒęŸ„čÆ¢ļ¼ŒåÆä»„åŒ¹é…ę ‡ē­¾å’Œę—¶é—“ęˆ³ļ¼Œå…¶äø­ēš„ę ‡ē­¾å’Œę—¶é—“ęˆ³ę˜Æē¬¬äø€ę¬”å‡ŗēŽ°ę—¶åˆ»ēš„ę ‡ē­¾å’Œę—¶é—“ęˆ³ć€‚ +**åŠŸčƒ½čÆ“ę˜Ž**ļ¼ščæ”回čÆ„åˆ—ę•°ę®é¦–ę¬”å‡ŗēŽ°ēš„å€¼ć€‚čÆ„å‡½ę•°åŠŸčƒ½äøŽ distinct ē›øä¼¼ć€‚ **čæ”å›žę•°ę®ē±»åž‹**ļ¼šåŒåŗ”ē”Øēš„å­—ę®µć€‚ -**适ē”Øę•°ę®ē±»åž‹**ļ¼šé€‚合äŗŽé™¤ę—¶é—“ē±»åž‹ä»„外ēš„å­—ę®µć€‚ +**适ē”Øę•°ę®ē±»åž‹**ļ¼šå…ØéƒØē±»åž‹å­—ꮵ怂 **适ē”ØäŗŽ**: č”Øå’Œč¶…ēŗ§č”Ø怂 diff --git a/docs/zh/12-taos-sql/24-show.md b/docs/zh/12-taos-sql/24-show.md index 12ad665e42fc8bf8b177efdbe43b5356d7a04d3b..f3397ae82dcc0465fb23d2f9770025e7b46f6a48 100644 --- a/docs/zh/12-taos-sql/24-show.md +++ b/docs/zh/12-taos-sql/24-show.md @@ -36,7 +36,7 @@ SHOW CONNECTIONS; SHOW CONSUMERS; ``` -ę˜¾ē¤ŗå½“å‰ę•°ę®åŗ“äø‹ę‰€ęœ‰ę“»č·ƒēš„ę¶ˆč“¹č€…ēš„äæ”ęÆ怂 +ę˜¾ē¤ŗå½“å‰ę•°ę®åŗ“äø‹ę‰€ęœ‰ę¶ˆč“¹č€…ēš„äæ”ęÆ怂 ## SHOW CREATE DATABASE diff --git a/docs/zh/28-releases/01-tdengine.md b/docs/zh/28-releases/01-tdengine.md index ae47388566ef7c1104e50b5a35bee08e8889134a..557552bc1c1b56688a3706fb63834a58128036f6 100644 --- a/docs/zh/28-releases/01-tdengine.md +++ b/docs/zh/28-releases/01-tdengine.md @@ -10,6 +10,10 @@ TDengine 2.x 各ē‰ˆęœ¬å®‰č£…包čÆ·č®æ问[čæ™é‡Œ](https://www.taosdata.com/all-do import Release from "/components/ReleaseV3"; +## 3.0.6.0 + + + ## 3.0.5.1 diff --git a/include/common/tglobal.h b/include/common/tglobal.h index d53f78b41e25f127c7bb46e7d5218ecf0f2bc6df..bc4037c64234540f53b390cca8f82a7feb286a8e 100644 --- a/include/common/tglobal.h +++ b/include/common/tglobal.h @@ -164,6 +164,8 @@ extern char tsSmlTagName[]; // extern bool tsSmlDataFormat; // extern int32_t tsSmlBatchSize; +extern int32_t tmqMaxTopicNum; + // wal extern int64_t tsWalFsyncDataSizeLimit; diff --git a/include/common/tmsgdef.h b/include/common/tmsgdef.h index e98453f57179bc5ac6771bce5a8dbdfc4b40f9a0..8ebf07bfccfff6edc638f2adf270db5ded8afdba 100644 --- a/include/common/tmsgdef.h +++ b/include/common/tmsgdef.h @@ -145,7 +145,7 @@ enum { TD_DEF_MSG_TYPE(TDMT_MND_TMQ_DROP_TOPIC, "drop-topic", NULL, NULL) TD_DEF_MSG_TYPE(TDMT_MND_TMQ_SUBSCRIBE, "subscribe", SCMSubscribeReq, SCMSubscribeRsp) TD_DEF_MSG_TYPE(TDMT_MND_TMQ_ASK_EP, "ask-ep", SMqAskEpReq, SMqAskEpRsp) - TD_DEF_MSG_TYPE(TDMT_MND_TMQ_CONSUMER_LOST, "consumer-lost", SMqConsumerLostMsg, NULL) +// TD_DEF_MSG_TYPE(TDMT_MND_TMQ_CONSUMER_LOST, "consumer-lost", SMqConsumerLostMsg, NULL) TD_DEF_MSG_TYPE(TDMT_MND_TMQ_CONSUMER_RECOVER, "consumer-recover", SMqConsumerRecoverMsg, NULL) TD_DEF_MSG_TYPE(TDMT_MND_TMQ_HB, "consumer-hb", NULL, NULL) TD_DEF_MSG_TYPE(TDMT_MND_TMQ_DO_REBALANCE, "do-rebalance", SMqDoRebalanceMsg, NULL) diff --git a/include/libs/executor/executor.h b/include/libs/executor/executor.h index f4713f7a6ffd9265aee912a7b4b3c42589190c6a..3bef15f3a7c49b7a89112344b67182b3da9f3696 100644 --- a/include/libs/executor/executor.h +++ b/include/libs/executor/executor.h @@ -56,7 +56,8 @@ typedef struct { void* pStateBackend; struct SStorageAPI api; - int8_t fillHistory; + int8_t fillHistory; + STimeWindow winRange; } SReadHandle; // in queue mode, data streams are seperated by msg diff --git a/include/libs/function/tudf.h b/include/libs/function/tudf.h index b71d50d43cc59988407576c1c1e0b9c2bce8fa3b..6b15833917cb9bf9fde78363f57740dd4e061647 100644 --- a/include/libs/function/tudf.h +++ b/include/libs/function/tudf.h @@ -111,6 +111,12 @@ int32_t udfStartUdfd(int32_t startDnodeId); */ int32_t udfStopUdfd(); +/** + * get udfd pid + * + */ + int32_t udfGetUdfdPid(int32_t* pUdfdPid); + #ifdef __cplusplus } #endif diff --git a/include/util/taoserror.h b/include/util/taoserror.h index ce24761df978422a677241e7ed8249ab3356deff..772a668f0fbb17c6ce233aa217b0f9dd14a09620 100644 --- a/include/util/taoserror.h +++ b/include/util/taoserror.h @@ -66,8 +66,8 @@ int32_t* taosGetErrno(); #define TSDB_CODE_RPC_BROKEN_LINK TAOS_DEF_ERROR_CODE(0, 0x0018) // #define TSDB_CODE_RPC_TIMEOUT TAOS_DEF_ERROR_CODE(0, 0x0019) // #define TSDB_CODE_RPC_SOMENODE_NOT_CONNECTED TAOS_DEF_ERROR_CODE(0, 0x0020) // "Vgroup could not be connected" -#define TSDB_CODE_RPC_SOMENODE_BROKEN_LINK TAOS_DEF_ERROR_CODE(0, 0x0021) // -#define TSDB_CODE_RPC_MAX_SESSIONS TAOS_DEF_ERROR_CODE(0, 0x0022) // +#define TSDB_CODE_RPC_SOMENODE_BROKEN_LINK TAOS_DEF_ERROR_CODE(0, 0x0021) // +#define TSDB_CODE_RPC_MAX_SESSIONS TAOS_DEF_ERROR_CODE(0, 0x0022) // @@ -277,7 +277,7 @@ int32_t* taosGetErrno(); #define TSDB_CODE_MND_INVALID_FUNC_COMMENT TAOS_DEF_ERROR_CODE(0, 0x0378) #define TSDB_CODE_MND_INVALID_FUNC_RETRIEVE TAOS_DEF_ERROR_CODE(0, 0x0379) - + // mnode-db #define TSDB_CODE_MND_DB_NOT_SELECTED TAOS_DEF_ERROR_CODE(0, 0x0380) @@ -288,9 +288,9 @@ int32_t* taosGetErrno(); #define TSDB_CODE_MND_TOO_MANY_DATABASES TAOS_DEF_ERROR_CODE(0, 0x0385) #define TSDB_CODE_MND_DB_IN_DROPPING TAOS_DEF_ERROR_CODE(0, 0x0386) // // #define TSDB_CODE_MND_VGROUP_NOT_READY TAOS_DEF_ERROR_CODE(0, 0x0387) // 2.x -#define TSDB_CODE_MND_DB_NOT_EXIST TAOS_DEF_ERROR_CODE(0, 0x0388) // +#define TSDB_CODE_MND_DB_NOT_EXIST TAOS_DEF_ERROR_CODE(0, 0x0388) // #define TSDB_CODE_MND_INVALID_DB_ACCT TAOS_DEF_ERROR_CODE(0, 0x0389) // internal -#define TSDB_CODE_MND_DB_OPTION_UNCHANGED TAOS_DEF_ERROR_CODE(0, 0x038A) // +#define TSDB_CODE_MND_DB_OPTION_UNCHANGED TAOS_DEF_ERROR_CODE(0, 0x038A) // #define TSDB_CODE_MND_DB_INDEX_NOT_EXIST TAOS_DEF_ERROR_CODE(0, 0x038B) #define TSDB_CODE_MND_DB_RETENTION_PERIOD_ZERO TAOS_DEF_ERROR_CODE(0, 0x038C) // #define TSDB_CODE_MND_INVALID_DB_OPTION_DAYS TAOS_DEF_ERROR_CODE(0, 0x0390) // 2.x @@ -516,6 +516,7 @@ int32_t* taosGetErrno(); #define TSDB_CODE_QRY_JSON_IN_GROUP_ERROR TAOS_DEF_ERROR_CODE(0, 0x072E) #define TSDB_CODE_QRY_JOB_NOT_EXIST TAOS_DEF_ERROR_CODE(0, 0x072F) #define TSDB_CODE_QRY_QWORKER_QUIT TAOS_DEF_ERROR_CODE(0, 0x0730) +#define TSDB_CODE_QRY_GEO_NOT_SUPPORT_ERROR TAOS_DEF_ERROR_CODE(0, 0x0731) // grant #define TSDB_CODE_GRANT_EXPIRED TAOS_DEF_ERROR_CODE(0, 0x0800) @@ -768,6 +769,8 @@ int32_t* taosGetErrno(); #define TSDB_CODE_TMQ_CONSUMER_MISMATCH TAOS_DEF_ERROR_CODE(0, 0x4001) #define TSDB_CODE_TMQ_CONSUMER_CLOSED TAOS_DEF_ERROR_CODE(0, 0x4002) #define TSDB_CODE_TMQ_CONSUMER_ERROR TAOS_DEF_ERROR_CODE(0, 0x4003) +#define TSDB_CODE_TMQ_TOPIC_OUT_OF_RANGE TAOS_DEF_ERROR_CODE(0, 0x4004) +#define TSDB_CODE_TMQ_GROUP_OUT_OF_RANGE TAOS_DEF_ERROR_CODE(0, 0x4005) // stream #define TSDB_CODE_STREAM_TASK_NOT_EXIST TAOS_DEF_ERROR_CODE(0, 0x4100) @@ -778,7 +781,7 @@ int32_t* taosGetErrno(); #define TSDB_CODE_TDLITE_IVLD_OPEN_DIR TAOS_DEF_ERROR_CODE(0, 0x5101) // UTIL -#define TSDB_CODE_UTIL_QUEUE_OUT_OF_MEMORY TAOS_DEF_ERROR_CODE(0, 0x6000) +#define TSDB_CODE_UTIL_QUEUE_OUT_OF_MEMORY TAOS_DEF_ERROR_CODE(0, 0x6000) #ifdef __cplusplus } diff --git a/include/util/tcompare.h b/include/util/tcompare.h index f92e1c3970a828fdfe109ee51a8e1f52f1ae0389..2fa736f4df73b0e8661484151e9b22c929e62b9c 100644 --- a/include/util/tcompare.h +++ b/include/util/tcompare.h @@ -79,6 +79,7 @@ int32_t compareDoubleVal(const void *pLeft, const void *pRight); int32_t compareLenPrefixedStr(const void *pLeft, const void *pRight); int32_t compareLenPrefixedWStr(const void *pLeft, const void *pRight); +int32_t compareLenBinaryVal(const void *pLeft, const void *pRight); int32_t comparestrRegexMatch(const void *pLeft, const void *pRight); int32_t comparestrRegexNMatch(const void *pLeft, const void *pRight); diff --git a/packaging/testpackage.sh b/packaging/testpackage.sh index 081383f89b358325f55cc1a7641015efcb0a4eed..0622b01f2b6c03e26e1b5968f208baf96b0e786e 100755 --- a/packaging/testpackage.sh +++ b/packaging/testpackage.sh @@ -152,7 +152,7 @@ function wgetFile { file=$1 versionPath=$2 sourceP=$3 -nasServerIP="192.168.1.131" +nasServerIP="192.168.1.213" packagePath="/nas/TDengine/v${versionPath}/${verMode}" if [ -f ${file} ];then echoColor YD "${file} already exists ,it will delete it and download it again " diff --git a/source/client/src/clientSml.c b/source/client/src/clientSml.c index 503120fe85e93bac6bafdc95f0ed14bfa8094700..13dc019feb29892fd1b48bf7fb8051f1da216652 100644 --- a/source/client/src/clientSml.c +++ b/source/client/src/clientSml.c @@ -749,6 +749,9 @@ static int32_t smlSendMetaMsg(SSmlHandle *info, SName *pName, SArray *pColumns, pReq.suid = pTableMeta->uid; pReq.source = TD_REQ_FROM_TAOX; pSql = (action == SCHEMA_ACTION_ADD_COLUMN) ? "sml_add_column" : "sml_modify_column_size"; + } else{ + uError("SML:0x%" PRIx64 " invalid action:%d", info->id, action); + goto end; } code = buildRequest(info->taos->id, pSql, strlen(pSql), NULL, false, &pRequest, 0); diff --git a/source/client/src/clientStmt.c b/source/client/src/clientStmt.c index 975b304bf4574bb84e16b1287b3dfec73c8ce869..8ac9550aca4e6705a848b3fef4e95a972008c819 100644 --- a/source/client/src/clientStmt.c +++ b/source/client/src/clientStmt.c @@ -939,8 +939,6 @@ int stmtClose(TAOS_STMT* stmt) { stmtCleanSQLInfo(pStmt); taosMemoryFree(stmt); - STMT_DLOG_E("stmt freed"); - return TSDB_CODE_SUCCESS; } diff --git a/source/client/src/clientTmq.c b/source/client/src/clientTmq.c index 8758cec2ec7eacabd13aba5b0ef8d61d15a2aef2..83550aa15d50b595aed684bc21694f904cc5843e 100644 --- a/source/client/src/clientTmq.c +++ b/source/client/src/clientTmq.c @@ -652,7 +652,7 @@ static void asyncCommitOffset(tmq_t* tmq, const TAOS_RES* pRes, int32_t type, tm int32_t j = 0; int32_t numOfVgroups = taosArrayGetSize(pTopic->vgs); for (j = 0; j < numOfVgroups; j++) { - SMqClientVg* pVg = taosArrayGet(pTopic->vgs, j); + SMqClientVg* pVg = (SMqClientVg*)taosArrayGet(pTopic->vgs, j); if (pVg->vgId == vgId) { break; } @@ -666,7 +666,7 @@ static void asyncCommitOffset(tmq_t* tmq, const TAOS_RES* pRes, int32_t type, tm return; } - SMqClientVg* pVg = taosArrayGet(pTopic->vgs, j); + SMqClientVg* pVg = (SMqClientVg*)taosArrayGet(pTopic->vgs, j); if (pVg->offsetInfo.currentOffset.type > 0 && !tOffsetEqual(&pVg->offsetInfo.currentOffset, &pVg->offsetInfo.committedOffset)) { code = doSendCommitMsg(tmq, pVg, pTopic->topicName, pParamSet, j, numOfVgroups, type); @@ -742,13 +742,15 @@ static void asyncCommitAllOffsets(tmq_t* tmq, tmq_commit_cb* pCommitFp, void* us static void generateTimedTask(int64_t refId, int32_t type) { tmq_t* tmq = taosAcquireRef(tmqMgmt.rsetId, refId); - if (tmq != NULL) { - int8_t* pTaskType = taosAllocateQitem(sizeof(int8_t), DEF_QITEM, 0); - *pTaskType = type; - taosWriteQitem(tmq->delayedTask, pTaskType); - tsem_post(&tmq->rspSem); - taosReleaseRef(tmqMgmt.rsetId, refId); - } + if(tmq == NULL) return; + + int8_t* pTaskType = taosAllocateQitem(sizeof(int8_t), DEF_QITEM, 0); + if(pTaskType == NULL) return; + + *pTaskType = type; + taosWriteQitem(tmq->delayedTask, pTaskType); + tsem_post(&tmq->rspSem); + taosReleaseRef(tmqMgmt.rsetId, refId); } void tmqAssignAskEpTask(void* param, void* tmrId) { @@ -763,19 +765,19 @@ void tmqAssignDelayedCommitTask(void* param, void* tmrId) { taosMemoryFree(param); } -void tmqAssignDelayedReportTask(void* param, void* tmrId) { - int64_t refId = *(int64_t*)param; - tmq_t* tmq = taosAcquireRef(tmqMgmt.rsetId, refId); - if (tmq != NULL) { - int8_t* pTaskType = taosAllocateQitem(sizeof(int8_t), DEF_QITEM, 0); - *pTaskType = TMQ_DELAYED_TASK__REPORT; - taosWriteQitem(tmq->delayedTask, pTaskType); - tsem_post(&tmq->rspSem); - } - - taosReleaseRef(tmqMgmt.rsetId, refId); - taosMemoryFree(param); -} +//void tmqAssignDelayedReportTask(void* param, void* tmrId) { +// int64_t refId = *(int64_t*)param; +// tmq_t* tmq = taosAcquireRef(tmqMgmt.rsetId, refId); +// if (tmq != NULL) { +// int8_t* pTaskType = taosAllocateQitem(sizeof(int8_t), DEF_QITEM, 0); +// *pTaskType = TMQ_DELAYED_TASK__REPORT; +// taosWriteQitem(tmq->delayedTask, pTaskType); +// tsem_post(&tmq->rspSem); +// } +// +// taosReleaseRef(tmqMgmt.rsetId, refId); +// taosMemoryFree(param); +//} int32_t tmqHbCb(void* param, SDataBuf* pMsg, int32_t code) { if (pMsg) { @@ -813,7 +815,7 @@ void tmqSendHbReq(void* param, void* tmrId) { offRows->offset = pVg->offsetInfo.currentOffset; char buf[TSDB_OFFSET_LEN] = {0}; tFormatOffset(buf, TSDB_OFFSET_LEN, &offRows->offset); - tscInfo("report offset: vgId:%d, offset:%s, rows:%"PRId64, offRows->vgId, buf, offRows->rows); + tscInfo("consumer:0x%" PRIx64 ",report offset: vgId:%d, offset:%s, rows:%"PRId64, tmq->consumerId, offRows->vgId, buf, offRows->rows); } } // tmq->needReportOffsetRows = false; @@ -1489,7 +1491,8 @@ static void initClientTopicFromRsp(SMqClientTopic* pTopic, SMqSubTopicEp* pTopic makeTopicVgroupKey(vgKey, pTopic->topicName, pVgEp->vgId); SVgroupSaveInfo* pInfo = taosHashGet(pVgOffsetHashMap, vgKey, strlen(vgKey)); - STqOffsetVal offsetNew = {.type = tmq->resetOffsetCfg}; + STqOffsetVal offsetNew = {0}; + offsetNew.type = tmq->resetOffsetCfg; SMqClientVg clientVg = { .pollCnt = 0, diff --git a/source/common/src/systable.c b/source/common/src/systable.c index 12ec97080f9e3d8fa60bae0c90e1df9e0fa5a96c..5d1854ee2c3b1a50af448508c655388c4c059fcd 100644 --- a/source/common/src/systable.c +++ b/source/common/src/systable.c @@ -162,7 +162,7 @@ static const SSysDbTableSchema streamTaskSchema[] = { {.name = "stream_name", .bytes = SYSTABLE_SCH_DB_NAME_LEN, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false}, {.name = "task_id", .bytes = 32, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false}, {.name = "node_type", .bytes = SYSTABLE_SCH_DB_NAME_LEN, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false}, - {.name = "node_id", .bytes = 8, .type = TSDB_DATA_TYPE_INT, .sysInfo = false}, + {.name = "node_id", .bytes = 4, .type = TSDB_DATA_TYPE_INT, .sysInfo = false}, {.name = "level", .bytes = 20 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false}, {.name = "status", .bytes = 20 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false}, }; @@ -290,7 +290,7 @@ static const SSysDbTableSchema subscriptionSchema[] = { {.name = "topic_name", .bytes = TSDB_TOPIC_FNAME_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_BINARY, .sysInfo = false}, {.name = "consumer_group", .bytes = TSDB_CGROUP_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_BINARY, .sysInfo = false}, {.name = "vgroup_id", .bytes = 4, .type = TSDB_DATA_TYPE_INT, .sysInfo = false}, - {.name = "consumer_id", .bytes = 8, .type = TSDB_DATA_TYPE_BIGINT, .sysInfo = false}, + {.name = "consumer_id", .bytes = 32, .type = TSDB_DATA_TYPE_BINARY, .sysInfo = false}, {.name = "offset", .bytes = TSDB_OFFSET_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_BINARY, .sysInfo = false}, {.name = "rows", .bytes = 8, .type = TSDB_DATA_TYPE_BIGINT, .sysInfo = false}, }; @@ -352,7 +352,7 @@ static const SSysDbTableSchema connectionsSchema[] = { static const SSysDbTableSchema consumerSchema[] = { - {.name = "consumer_id", .bytes = 8, .type = TSDB_DATA_TYPE_BIGINT, .sysInfo = false}, + {.name = "consumer_id", .bytes = 32, .type = TSDB_DATA_TYPE_BINARY, .sysInfo = false}, {.name = "consumer_group", .bytes = SYSTABLE_SCH_TABLE_NAME_LEN, .type = TSDB_DATA_TYPE_BINARY, .sysInfo = false}, {.name = "client_id", .bytes = SYSTABLE_SCH_TABLE_NAME_LEN, .type = TSDB_DATA_TYPE_BINARY, .sysInfo = false}, {.name = "status", .bytes = 20 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_BINARY, .sysInfo = false}, diff --git a/source/common/src/tglobal.c b/source/common/src/tglobal.c index 5f6ec92d50bd0d33dcee43abee82ce25d56f887e..74471eca9ae3db6fd1b8c3cbdd7312220dd02dd9 100644 --- a/source/common/src/tglobal.c +++ b/source/common/src/tglobal.c @@ -105,11 +105,13 @@ char tsSmlChildTableName[TSDB_TABLE_NAME_LEN] = ""; // user defined child table // bool tsSmlDataFormat = false; // int32_t tsSmlBatchSize = 10000; +// tmq +int32_t tmqMaxTopicNum = 20; // query int32_t tsQueryPolicy = 1; int32_t tsQueryRspPolicy = 0; int64_t tsQueryMaxConcurrentTables = 200; // unit is TSDB_TABLE_NUM_UNIT -bool tsEnableQueryHb = false; +bool tsEnableQueryHb = true; bool tsEnableScience = false; // on taos-cli show float and doulbe with scientific notation if true bool tsTtlChangeOnWrite = false; // ttl delete time changes on last write if true int32_t tsQuerySmaOptimize = 0; @@ -511,6 +513,8 @@ static int32_t taosAddServerCfg(SConfig *pCfg) { if (cfgAddString(pCfg, "telemetryServer", tsTelemServer, 0) != 0) return -1; if (cfgAddInt32(pCfg, "telemetryPort", tsTelemPort, 1, 65056, 0) != 0) return -1; + if (cfgAddInt32(pCfg, "tmqMaxTopicNum", tmqMaxTopicNum, 1, 10000, 1) != 0) return -1; + if (cfgAddInt32(pCfg, "transPullupInterval", tsTransPullupInterval, 1, 10000, 1) != 0) return -1; if (cfgAddInt32(pCfg, "mqRebalanceInterval", tsMqRebalanceInterval, 1, 10000, 1) != 0) return -1; if (cfgAddInt32(pCfg, "ttlUnit", tsTtlUnit, 1, 86400 * 365, 1) != 0) return -1; @@ -882,6 +886,8 @@ static int32_t taosSetServerCfg(SConfig *pCfg) { tstrncpy(tsTelemServer, cfgGetItem(pCfg, "telemetryServer")->str, TSDB_FQDN_LEN); tsTelemPort = (uint16_t)cfgGetItem(pCfg, "telemetryPort")->i32; + tmqMaxTopicNum= cfgGetItem(pCfg, "tmqMaxTopicNum")->i32; + tsTransPullupInterval = cfgGetItem(pCfg, "transPullupInterval")->i32; tsMqRebalanceInterval = cfgGetItem(pCfg, "mqRebalanceInterval")->i32; tsTtlUnit = cfgGetItem(pCfg, "ttlUnit")->i32; diff --git a/source/common/src/tmsg.c b/source/common/src/tmsg.c index 4e8797b1ecaf83f03c353e5a9f6a80d0680692f4..debb93e8ba75f4608f0faedfade250ebfb1e295f 100644 --- a/source/common/src/tmsg.c +++ b/source/common/src/tmsg.c @@ -6982,8 +6982,11 @@ int32_t tDecodeSVAlterTbReqSetCtime(SDecoder* pDecoder, SVAlterTbReq* pReq, int6 if (tStartDecode(pDecoder) < 0) return -1; if (tDecodeSVAlterTbReqCommon(pDecoder, pReq) < 0) return -1; - *(int64_t *)(pDecoder->data + pDecoder->pos) = ctimeMs; - if (tDecodeI64(pDecoder, &pReq->ctimeMs) < 0) return -1; + pReq->ctimeMs = 0; + if (!tDecodeIsEnd(pDecoder)) { + *(int64_t *)(pDecoder->data + pDecoder->pos) = ctimeMs; + if (tDecodeI64(pDecoder, &pReq->ctimeMs) < 0) return -1; + } tEndDecode(pDecoder); return 0; @@ -7541,8 +7544,11 @@ int32_t tDecodeSBatchDeleteReq(SDecoder *pDecoder, SBatchDeleteReq *pReq) { int32_t tDecodeSBatchDeleteReqSetCtime(SDecoder *pDecoder, SBatchDeleteReq *pReq, int64_t ctimeMs) { if (tDecodeSBatchDeleteReqCommon(pDecoder, pReq)) return -1; - *(int64_t *)(pDecoder->data + pDecoder->pos) = ctimeMs; - if (tDecodeI64(pDecoder, &pReq->ctimeMs) < 0) return -1; + pReq->ctimeMs = 0; + if (!tDecodeIsEnd(pDecoder)) { + *(int64_t *)(pDecoder->data + pDecoder->pos) = ctimeMs; + if (tDecodeI64(pDecoder, &pReq->ctimeMs) < 0) return -1; + } return 0; } diff --git a/source/common/src/ttime.c b/source/common/src/ttime.c index d8c43747f7b92822fad5455a143e35c5e918f15c..7a5581efbe210a64c2201830b7634dc7baca279c 100644 --- a/source/common/src/ttime.c +++ b/source/common/src/ttime.c @@ -969,7 +969,7 @@ void taosFormatUtcTime(char* buf, int32_t bufLen, int64_t t, int32_t precision) default: fractionLen = 0; - ASSERT(false); + return; } if (taosLocalTime(", &ptm, buf) == NULL) { diff --git a/source/dnode/mnode/impl/inc/mndConsumer.h b/source/dnode/mnode/impl/inc/mndConsumer.h index 96401511d2cd4832ad6d548a4b7286ba62227a7d..a3a31cfc5a5cfd0fdde3830ab015d2ca8cd72c98 100644 --- a/source/dnode/mnode/impl/inc/mndConsumer.h +++ b/source/dnode/mnode/impl/inc/mndConsumer.h @@ -25,14 +25,15 @@ extern "C" { enum { MQ_CONSUMER_STATUS_REBALANCE = 1, // MQ_CONSUMER_STATUS__MODIFY_IN_REB, // this value is not used anymore - MQ_CONSUMER_STATUS__READY, - MQ_CONSUMER_STATUS__LOST, + MQ_CONSUMER_STATUS_READY, + MQ_CONSUMER_STATUS_LOST, // MQ_CONSUMER_STATUS__LOST_IN_REB, // this value is not used anymore - MQ_CONSUMER_STATUS__LOST_REBD, -}; +// MQ_CONSUMER_STATUS__LOST_REBD, +};\ int32_t mndInitConsumer(SMnode *pMnode); void mndCleanupConsumer(SMnode *pMnode); +void mndDropConsumerFromSdb(SMnode *pMnode, int64_t consumerId); SMqConsumerObj *mndAcquireConsumer(SMnode *pMnode, int64_t consumerId); void mndReleaseConsumer(SMnode *pMnode, SMqConsumerObj *pConsumer); diff --git a/source/dnode/mnode/impl/inc/mndDef.h b/source/dnode/mnode/impl/inc/mndDef.h index 03ad1ed58159d9bf892649847f1f25ba6f5577ee..1f4bc19e338a2372846f371b5bb5408e895ce127 100644 --- a/source/dnode/mnode/impl/inc/mndDef.h +++ b/source/dnode/mnode/impl/inc/mndDef.h @@ -137,12 +137,12 @@ typedef enum { } EDndReason; typedef enum { - CONSUMER_UPDATE__TOUCH = 1, // rebalance req do not need change consume topic - CONSUMER_UPDATE__ADD, - CONSUMER_UPDATE__REMOVE, - CONSUMER_UPDATE__LOST, - CONSUMER_UPDATE__RECOVER, - CONSUMER_UPDATE__REBALANCE, // subscribe req need change consume topic + CONSUMER_UPDATE_REB_MODIFY_NOTOPIC = 1, // topic do not need modified after rebalance + CONSUMER_UPDATE_REB_MODIFY_TOPIC, // topic need modified after rebalance + CONSUMER_UPDATE_REB_MODIFY_REMOVE, // topic need removed after rebalance +// CONSUMER_UPDATE_TIMER_LOST, + CONSUMER_UPDATE_RECOVER, + CONSUMER_UPDATE_SUB_MODIFY, // modify after subscribe req } ECsmUpdateType; typedef struct { @@ -549,7 +549,7 @@ typedef struct { // data for display int32_t pid; SEpSet ep; - int64_t upTime; + int64_t createTime; int64_t subscribeTime; int64_t rebalanceTime; @@ -560,7 +560,7 @@ typedef struct { } SMqConsumerObj; SMqConsumerObj* tNewSMqConsumerObj(int64_t consumerId, char cgroup[TSDB_CGROUP_LEN]); -void tDeleteSMqConsumerObj(SMqConsumerObj* pConsumer); +void tDeleteSMqConsumerObj(SMqConsumerObj* pConsumer, bool delete); int32_t tEncodeSMqConsumerObj(void** buf, const SMqConsumerObj* pConsumer); void* tDecodeSMqConsumerObj(const void* buf, SMqConsumerObj* pConsumer, int8_t sver); diff --git a/source/dnode/mnode/impl/inc/mndSubscribe.h b/source/dnode/mnode/impl/inc/mndSubscribe.h index fad316ea12edce96bde4c21694b5402d97bf4ae0..ba4328b8fe821e0b8f858fb69d2deb687c36ac93 100644 --- a/source/dnode/mnode/impl/inc/mndSubscribe.h +++ b/source/dnode/mnode/impl/inc/mndSubscribe.h @@ -25,6 +25,7 @@ extern "C" { int32_t mndInitSubscribe(SMnode *pMnode); void mndCleanupSubscribe(SMnode *pMnode); +int32_t mndGetGroupNumByTopic(SMnode *pMnode, const char *topicName); SMqSubscribeObj *mndAcquireSubscribe(SMnode *pMnode, const char *CGroup, const char *topicName); SMqSubscribeObj *mndAcquireSubscribeByKey(SMnode *pMnode, const char *key); void mndReleaseSubscribe(SMnode *pMnode, SMqSubscribeObj *pSub); diff --git a/source/dnode/mnode/impl/src/mndConsumer.c b/source/dnode/mnode/impl/src/mndConsumer.c index 4dded61ce3fbbe4663a1047ee6aa1b9562eda767..47cc4a1ce7b4a0df57f54dfcd2d3e3af94acf399 100644 --- a/source/dnode/mnode/impl/src/mndConsumer.c +++ b/source/dnode/mnode/impl/src/mndConsumer.c @@ -26,6 +26,7 @@ #define MND_CONSUMER_VER_NUMBER 2 #define MND_CONSUMER_RESERVE_SIZE 64 +#define MND_MAX_GROUP_PER_TOPIC 100 #define MND_CONSUMER_LOST_HB_CNT 6 #define MND_CONSUMER_LOST_CLEAR_THRESHOLD 43200 @@ -63,7 +64,7 @@ int32_t mndInitConsumer(SMnode *pMnode) { mndSetMsgHandle(pMnode, TDMT_MND_TMQ_HB, mndProcessMqHbReq); mndSetMsgHandle(pMnode, TDMT_MND_TMQ_ASK_EP, mndProcessAskEpReq); mndSetMsgHandle(pMnode, TDMT_MND_TMQ_TIMER, mndProcessMqTimerMsg); - mndSetMsgHandle(pMnode, TDMT_MND_TMQ_CONSUMER_LOST, mndProcessConsumerLostMsg); +// mndSetMsgHandle(pMnode, TDMT_MND_TMQ_CONSUMER_LOST, mndProcessConsumerLostMsg); mndSetMsgHandle(pMnode, TDMT_MND_TMQ_CONSUMER_RECOVER, mndProcessConsumerRecoverMsg); mndSetMsgHandle(pMnode, TDMT_MND_TMQ_LOST_CONSUMER_CLEAR, mndProcessConsumerClearMsg); @@ -75,6 +76,22 @@ int32_t mndInitConsumer(SMnode *pMnode) { void mndCleanupConsumer(SMnode *pMnode) {} +void mndDropConsumerFromSdb(SMnode *pMnode, int64_t consumerId){ + SMqConsumerClearMsg *pClearMsg = rpcMallocCont(sizeof(SMqConsumerClearMsg)); + if (pClearMsg == NULL) { + mError("consumer:0x%"PRIx64" failed to clear consumer due to out of memory. alloc size:%d", consumerId, (int32_t)sizeof(SMqConsumerClearMsg)); + return; + } + + pClearMsg->consumerId = consumerId; + SRpcMsg rpcMsg = { + .msgType = TDMT_MND_TMQ_LOST_CONSUMER_CLEAR, .pCont = pClearMsg, .contLen = sizeof(SMqConsumerClearMsg)}; + + mInfo("consumer:0x%" PRIx64 " drop from sdb", consumerId); + tmsgPutToQueue(&pMnode->msgCb, WRITE_QUEUE, &rpcMsg); + return; +} + bool mndRebTryStart() { int32_t old = atomic_val_compare_exchange_32(&mqRebInExecCnt, 0, 1); mDebug("tq timer, rebalance counter old val:%d", old); @@ -105,50 +122,48 @@ void mndRebCntDec() { } } -static int32_t mndProcessConsumerLostMsg(SRpcMsg *pMsg) { - SMnode *pMnode = pMsg->info.node; - SMqConsumerLostMsg *pLostMsg = pMsg->pCont; - SMqConsumerObj *pConsumer = mndAcquireConsumer(pMnode, pLostMsg->consumerId); - if (pConsumer == NULL) { - return 0; - } - - mInfo("process consumer lost msg, consumer:0x%" PRIx64 " status:%d(%s)", pLostMsg->consumerId, pConsumer->status, - mndConsumerStatusName(pConsumer->status)); - - if (pConsumer->status != MQ_CONSUMER_STATUS__READY) { - mndReleaseConsumer(pMnode, pConsumer); - return -1; - } - - SMqConsumerObj *pConsumerNew = tNewSMqConsumerObj(pConsumer->consumerId, pConsumer->cgroup); - pConsumerNew->updateType = CONSUMER_UPDATE__LOST; - - mndReleaseConsumer(pMnode, pConsumer); - - STrans *pTrans = mndTransCreate(pMnode, TRN_POLICY_ROLLBACK, TRN_CONFLICT_NOTHING, pMsg, "lost-csm"); - if (pTrans == NULL) { - goto FAIL; - } - - if (mndSetConsumerCommitLogs(pMnode, pTrans, pConsumerNew) != 0) { - goto FAIL; - } - - if (mndTransPrepare(pMnode, pTrans) != 0) { - goto FAIL; - } - - tDeleteSMqConsumerObj(pConsumerNew); - taosMemoryFree(pConsumerNew); - mndTransDrop(pTrans); - return 0; -FAIL: - tDeleteSMqConsumerObj(pConsumerNew); - taosMemoryFree(pConsumerNew); - mndTransDrop(pTrans); - return -1; -} +//static int32_t mndProcessConsumerLostMsg(SRpcMsg *pMsg) { +// SMnode *pMnode = pMsg->info.node; +// SMqConsumerLostMsg *pLostMsg = pMsg->pCont; +// SMqConsumerObj *pConsumer = mndAcquireConsumer(pMnode, pLostMsg->consumerId); +// if (pConsumer == NULL) { +// return 0; +// } +// +// mInfo("process consumer lost msg, consumer:0x%" PRIx64 " status:%d(%s)", pLostMsg->consumerId, pConsumer->status, +// mndConsumerStatusName(pConsumer->status)); +// +// if (pConsumer->status != MQ_CONSUMER_STATUS_READY) { +// mndReleaseConsumer(pMnode, pConsumer); +// return -1; +// } +// +// SMqConsumerObj *pConsumerNew = tNewSMqConsumerObj(pConsumer->consumerId, pConsumer->cgroup); +// pConsumerNew->updateType = CONSUMER_UPDATE_TIMER_LOST; +// +// mndReleaseConsumer(pMnode, pConsumer); +// +// STrans *pTrans = mndTransCreate(pMnode, TRN_POLICY_ROLLBACK, TRN_CONFLICT_NOTHING, pMsg, "lost-csm"); +// if (pTrans == NULL) { +// goto FAIL; +// } +// +// if (mndSetConsumerCommitLogs(pMnode, pTrans, pConsumerNew) != 0) { +// goto FAIL; +// } +// +// if (mndTransPrepare(pMnode, pTrans) != 0) { +// goto FAIL; +// } +// +// tDeleteSMqConsumerObj(pConsumerNew, true); +// mndTransDrop(pTrans); +// return 0; +//FAIL: +// tDeleteSMqConsumerObj(pConsumerNew, true); +// mndTransDrop(pTrans); +// return -1; +//} static int32_t mndProcessConsumerRecoverMsg(SRpcMsg *pMsg) { SMnode *pMnode = pMsg->info.node; @@ -162,14 +177,14 @@ static int32_t mndProcessConsumerRecoverMsg(SRpcMsg *pMsg) { mInfo("receive consumer recover msg, consumer:0x%" PRIx64 " status:%d(%s)", pRecoverMsg->consumerId, pConsumer->status, mndConsumerStatusName(pConsumer->status)); - if (pConsumer->status != MQ_CONSUMER_STATUS__LOST_REBD) { + if (pConsumer->status != MQ_CONSUMER_STATUS_LOST) { mndReleaseConsumer(pMnode, pConsumer); terrno = TSDB_CODE_MND_CONSUMER_NOT_READY; return -1; } SMqConsumerObj *pConsumerNew = tNewSMqConsumerObj(pConsumer->consumerId, pConsumer->cgroup); - pConsumerNew->updateType = CONSUMER_UPDATE__RECOVER; + pConsumerNew->updateType = CONSUMER_UPDATE_RECOVER; mndReleaseConsumer(pMnode, pConsumer); @@ -181,13 +196,13 @@ static int32_t mndProcessConsumerRecoverMsg(SRpcMsg *pMsg) { if (mndSetConsumerCommitLogs(pMnode, pTrans, pConsumerNew) != 0) goto FAIL; if (mndTransPrepare(pMnode, pTrans) != 0) goto FAIL; - tDeleteSMqConsumerObj(pConsumerNew); - taosMemoryFree(pConsumerNew); + tDeleteSMqConsumerObj(pConsumerNew, true); + mndTransDrop(pTrans); return 0; FAIL: - tDeleteSMqConsumerObj(pConsumerNew); - taosMemoryFree(pConsumerNew); + tDeleteSMqConsumerObj(pConsumerNew, true); + mndTransDrop(pTrans); return -1; } @@ -206,13 +221,13 @@ static int32_t mndProcessConsumerClearMsg(SRpcMsg *pMsg) { mInfo("consumer:0x%" PRIx64 " needs to be cleared, status %s", pClearMsg->consumerId, mndConsumerStatusName(pConsumer->status)); - if (pConsumer->status != MQ_CONSUMER_STATUS__LOST_REBD) { - mndReleaseConsumer(pMnode, pConsumer); - return -1; - } +// if (pConsumer->status != MQ_CONSUMER_STATUS_LOST) { +// mndReleaseConsumer(pMnode, pConsumer); +// return -1; +// } SMqConsumerObj *pConsumerNew = tNewSMqConsumerObj(pConsumer->consumerId, pConsumer->cgroup); - pConsumerNew->updateType = CONSUMER_UPDATE__LOST; +// pConsumerNew->updateType = CONSUMER_UPDATE_TIMER_LOST; mndReleaseConsumer(pMnode, pConsumer); @@ -223,14 +238,14 @@ static int32_t mndProcessConsumerClearMsg(SRpcMsg *pMsg) { if (mndSetConsumerDropLogs(pMnode, pTrans, pConsumerNew) != 0) goto FAIL; if (mndTransPrepare(pMnode, pTrans) != 0) goto FAIL; - tDeleteSMqConsumerObj(pConsumerNew); - taosMemoryFree(pConsumerNew); + tDeleteSMqConsumerObj(pConsumerNew, true); + mndTransDrop(pTrans); return 0; FAIL: - tDeleteSMqConsumerObj(pConsumerNew); - taosMemoryFree(pConsumerNew); + tDeleteSMqConsumerObj(pConsumerNew, true); + mndTransDrop(pTrans); return -1; } @@ -297,56 +312,29 @@ static int32_t mndProcessMqTimerMsg(SRpcMsg *pMsg) { int32_t hbStatus = atomic_add_fetch_32(&pConsumer->hbStatus, 1); int32_t status = atomic_load_32(&pConsumer->status); - mDebug("check for consumer:0x%" PRIx64 " status:%d(%s), sub-time:%" PRId64 ", uptime:%" PRId64 ", hbstatus:%d", - pConsumer->consumerId, status, mndConsumerStatusName(status), pConsumer->subscribeTime, pConsumer->upTime, + mDebug("check for consumer:0x%" PRIx64 " status:%d(%s), sub-time:%" PRId64 ", createTime:%" PRId64 ", hbstatus:%d", + pConsumer->consumerId, status, mndConsumerStatusName(status), pConsumer->subscribeTime, pConsumer->createTime, hbStatus); - if (status == MQ_CONSUMER_STATUS__READY) { - if (hbStatus > MND_CONSUMER_LOST_HB_CNT) { - SMqConsumerLostMsg *pLostMsg = rpcMallocCont(sizeof(SMqConsumerLostMsg)); - if (pLostMsg == NULL) { - mError("consumer:0x%"PRIx64" failed to transfer consumer status to lost due to out of memory. alloc size:%d", - pConsumer->consumerId, (int32_t)sizeof(SMqConsumerLostMsg)); - continue; + if (status == MQ_CONSUMER_STATUS_READY) { + if (taosArrayGetSize(pConsumer->assignedTopics) == 0) { // unsubscribe or close + mndDropConsumerFromSdb(pMnode, pConsumer->consumerId); + } else if (hbStatus > MND_CONSUMER_LOST_HB_CNT) { + taosRLockLatch(&pConsumer->lock); + int32_t topicNum = taosArrayGetSize(pConsumer->currentTopics); + for (int32_t i = 0; i < topicNum; i++) { + char key[TSDB_SUBSCRIBE_KEY_LEN]; + char *removedTopic = taosArrayGetP(pConsumer->currentTopics, i); + mndMakeSubscribeKey(key, pConsumer->cgroup, removedTopic); + SMqRebInfo *pRebSub = mndGetOrCreateRebSub(pRebMsg->rebSubHash, key); + taosArrayPush(pRebSub->removedConsumers, &pConsumer->consumerId); } - - pLostMsg->consumerId = pConsumer->consumerId; - SRpcMsg rpcMsg = { - .msgType = TDMT_MND_TMQ_CONSUMER_LOST, .pCont = pLostMsg, .contLen = sizeof(SMqConsumerLostMsg)}; - - mDebug("consumer:0x%"PRIx64" hb not received beyond threshold %d, set to lost", pConsumer->consumerId, - MND_CONSUMER_LOST_HB_CNT); - tmsgPutToQueue(&pMnode->msgCb, WRITE_QUEUE, &rpcMsg); - } - } else if (status == MQ_CONSUMER_STATUS__LOST_REBD) { - // if the client is lost longer than one day, clear it. Otherwise, do nothing about the lost consumers. - if (hbStatus > MND_CONSUMER_LOST_CLEAR_THRESHOLD) { - SMqConsumerClearMsg *pClearMsg = rpcMallocCont(sizeof(SMqConsumerClearMsg)); - if (pClearMsg == NULL) { - mError("consumer:0x%"PRIx64" failed to clear consumer due to out of memory. alloc size:%d", - pConsumer->consumerId, (int32_t)sizeof(SMqConsumerClearMsg)); - continue; - } - - pClearMsg->consumerId = pConsumer->consumerId; - SRpcMsg rpcMsg = { - .msgType = TDMT_MND_TMQ_LOST_CONSUMER_CLEAR, .pCont = pClearMsg, .contLen = sizeof(SMqConsumerClearMsg)}; - - mDebug("consumer:0x%" PRIx64 " lost beyond threshold %d, clear it", pConsumer->consumerId, - MND_CONSUMER_LOST_CLEAR_THRESHOLD); - tmsgPutToQueue(&pMnode->msgCb, WRITE_QUEUE, &rpcMsg); + taosRUnLockLatch(&pConsumer->lock); } - } else if (status == MQ_CONSUMER_STATUS__LOST) { - taosRLockLatch(&pConsumer->lock); - int32_t topicNum = taosArrayGetSize(pConsumer->currentTopics); - for (int32_t i = 0; i < topicNum; i++) { - char key[TSDB_SUBSCRIBE_KEY_LEN]; - char *removedTopic = taosArrayGetP(pConsumer->currentTopics, i); - mndMakeSubscribeKey(key, pConsumer->cgroup, removedTopic); - SMqRebInfo *pRebSub = mndGetOrCreateRebSub(pRebMsg->rebSubHash, key); - taosArrayPush(pRebSub->removedConsumers, &pConsumer->consumerId); + } else if (status == MQ_CONSUMER_STATUS_LOST) { + if (hbStatus > MND_CONSUMER_LOST_CLEAR_THRESHOLD) { // clear consumer if lost a day + mndDropConsumerFromSdb(pMnode, pConsumer->consumerId); } - taosRUnLockLatch(&pConsumer->lock); } else { // MQ_CONSUMER_STATUS_REBALANCE taosRLockLatch(&pConsumer->lock); @@ -413,7 +401,7 @@ static int32_t mndProcessMqHbReq(SRpcMsg *pMsg) { int32_t status = atomic_load_32(&pConsumer->status); - if (status == MQ_CONSUMER_STATUS__LOST_REBD) { + if (status == MQ_CONSUMER_STATUS_LOST) { mInfo("try to recover consumer:0x%" PRIx64 "", consumerId); SMqConsumerRecoverMsg *pRecoverMsg = rpcMallocCont(sizeof(SMqConsumerRecoverMsg)); @@ -475,7 +463,7 @@ static int32_t mndProcessAskEpReq(SRpcMsg *pMsg) { mError("consumer:0x%" PRIx64 " group:%s not consistent with data in sdb, saved cgroup:%s", consumerId, req.cgroup, pConsumer->cgroup); terrno = TSDB_CODE_MND_CONSUMER_NOT_EXIST; - return -1; + goto FAIL; } atomic_store_32(&pConsumer->hbStatus, 0); @@ -483,7 +471,7 @@ static int32_t mndProcessAskEpReq(SRpcMsg *pMsg) { // 1. check consumer status int32_t status = atomic_load_32(&pConsumer->status); - if (status == MQ_CONSUMER_STATUS__LOST_REBD) { + if (status == MQ_CONSUMER_STATUS_LOST) { mInfo("try to recover consumer:0x%" PRIx64, consumerId); SMqConsumerRecoverMsg *pRecoverMsg = rpcMallocCont(sizeof(SMqConsumerRecoverMsg)); @@ -497,10 +485,10 @@ static int32_t mndProcessAskEpReq(SRpcMsg *pMsg) { tmsgPutToQueue(&pMnode->msgCb, WRITE_QUEUE, &pRpcMsg); } - if (status != MQ_CONSUMER_STATUS__READY) { + if (status != MQ_CONSUMER_STATUS_READY) { mInfo("consumer:0x%" PRIx64 " not ready, status: %s", consumerId, mndConsumerStatusName(status)); terrno = TSDB_CODE_MND_CONSUMER_NOT_READY; - return -1; + goto FAIL; } int32_t serverEpoch = atomic_load_32(&pConsumer->epoch); @@ -582,7 +570,7 @@ static int32_t mndProcessAskEpReq(SRpcMsg *pMsg) { void *buf = rpcMallocCont(tlen); if (buf == NULL) { terrno = TSDB_CODE_OUT_OF_MEMORY; - return -1; + goto FAIL; } SMqRspHead* pHead = buf; @@ -669,6 +657,7 @@ int32_t mndProcessSubscribeReq(SRpcMsg *pMsg) { char *cgroup = subscribe.cgroup; SMqConsumerObj *pExistedConsumer = NULL; SMqConsumerObj *pConsumerNew = NULL; + STrans *pTrans = NULL; int32_t code = -1; SArray *pTopicList = subscribe.topicNames; @@ -676,9 +665,17 @@ int32_t mndProcessSubscribeReq(SRpcMsg *pMsg) { taosArrayRemoveDuplicate(pTopicList, taosArrayCompareString, freeItem); int32_t newTopicNum = taosArrayGetSize(pTopicList); + for(int i = 0; i < newTopicNum; i++){ + int32_t gNum = mndGetGroupNumByTopic(pMnode, (const char*)taosArrayGetP(pTopicList, i)); + if(gNum >= MND_MAX_GROUP_PER_TOPIC){ + terrno = TSDB_CODE_TMQ_GROUP_OUT_OF_RANGE; + code = terrno; + goto _over; + } + } // check topic existence - STrans *pTrans = mndTransCreate(pMnode, TRN_POLICY_RETRY, TRN_CONFLICT_NOTHING, pMsg, "subscribe"); + pTrans = mndTransCreate(pMnode, TRN_POLICY_RETRY, TRN_CONFLICT_NOTHING, pMsg, "subscribe"); if (pTrans == NULL) { goto _over; } @@ -701,8 +698,7 @@ int32_t mndProcessSubscribeReq(SRpcMsg *pMsg) { pConsumerNew->autoCommitInterval = subscribe.autoCommitInterval; pConsumerNew->resetOffsetCfg = subscribe.resetOffsetCfg; - // set the update type - pConsumerNew->updateType = CONSUMER_UPDATE__REBALANCE; +// pConsumerNew->updateType = CONSUMER_UPDATE_SUB_MODIFY; // use insert logic taosArrayDestroy(pConsumerNew->assignedTopics); pConsumerNew->assignedTopics = taosArrayDup(pTopicList, topicNameDup); @@ -721,7 +717,7 @@ int32_t mndProcessSubscribeReq(SRpcMsg *pMsg) { " cgroup:%s, current status:%d(%s), subscribe topic num: %d", consumerId, subscribe.cgroup, status, mndConsumerStatusName(status), newTopicNum); - if (status != MQ_CONSUMER_STATUS__READY) { + if (status != MQ_CONSUMER_STATUS_READY) { terrno = TSDB_CODE_MND_CONSUMER_NOT_READY; goto _over; } @@ -732,11 +728,11 @@ int32_t mndProcessSubscribeReq(SRpcMsg *pMsg) { } // set the update type - pConsumerNew->updateType = CONSUMER_UPDATE__REBALANCE; + pConsumerNew->updateType = CONSUMER_UPDATE_SUB_MODIFY; taosArrayDestroy(pConsumerNew->assignedTopics); pConsumerNew->assignedTopics = taosArrayDup(pTopicList, topicNameDup); - int32_t oldTopicNum = (pExistedConsumer->currentTopics) ? taosArrayGetSize(pExistedConsumer->currentTopics) : 0; + int32_t oldTopicNum = taosArrayGetSize(pExistedConsumer->currentTopics); int32_t i = 0, j = 0; while (i < oldTopicNum || j < newTopicNum) { @@ -791,10 +787,7 @@ _over: mndReleaseConsumer(pMnode, pExistedConsumer); } - if (pConsumerNew) { - tDeleteSMqConsumerObj(pConsumerNew); - taosMemoryFree(pConsumerNew); - } + tDeleteSMqConsumerObj(pConsumerNew, true); // TODO: replace with destroy subscribe msg taosArrayDestroyP(subscribe.topicNames, (FDelete)taosMemoryFree); @@ -894,17 +887,17 @@ CM_DECODE_OVER: } static int32_t mndConsumerActionInsert(SSdb *pSdb, SMqConsumerObj *pConsumer) { - mDebug("consumer:0x%" PRIx64 " cgroup:%s status:%d(%s) epoch:%d load from sdb, perform insert action", + mInfo("consumer:0x%" PRIx64 " sub insert, cgroup:%s status:%d(%s) epoch:%d", pConsumer->consumerId, pConsumer->cgroup, pConsumer->status, mndConsumerStatusName(pConsumer->status), pConsumer->epoch); - pConsumer->subscribeTime = pConsumer->upTime; + pConsumer->subscribeTime = taosGetTimestampMs(); return 0; } static int32_t mndConsumerActionDelete(SSdb *pSdb, SMqConsumerObj *pConsumer) { - mDebug("consumer:0x%" PRIx64 " perform delete action, status:(%d)%s", pConsumer->consumerId, pConsumer->status, + mInfo("consumer:0x%" PRIx64 " perform delete action, status:(%d)%s", pConsumer->consumerId, pConsumer->status, mndConsumerStatusName(pConsumer->status)); - tDeleteSMqConsumerObj(pConsumer); + tDeleteSMqConsumerObj(pConsumer, false); return 0; } @@ -913,10 +906,9 @@ static void updateConsumerStatus(SMqConsumerObj *pConsumer) { if (taosArrayGetSize(pConsumer->rebNewTopics) == 0 && taosArrayGetSize(pConsumer->rebRemovedTopics) == 0) { if (status == MQ_CONSUMER_STATUS_REBALANCE) { - pConsumer->status = MQ_CONSUMER_STATUS__READY; - } else if (status == MQ_CONSUMER_STATUS__LOST) { - ASSERT(taosArrayGetSize(pConsumer->currentTopics) == 0); - pConsumer->status = MQ_CONSUMER_STATUS__LOST_REBD; + pConsumer->status = MQ_CONSUMER_STATUS_READY; + } else if (status == MQ_CONSUMER_STATUS_READY) { + pConsumer->status = MQ_CONSUMER_STATUS_LOST; } } } @@ -930,7 +922,7 @@ static void removeFromNewTopicList(SMqConsumerObj *pConsumer, const char *pTopic taosArrayRemove(pConsumer->rebNewTopics, i); taosMemoryFree(p); - mDebug("consumer:0x%" PRIx64 " remove new topic:%s in the topic list, remain newTopics:%d", pConsumer->consumerId, + mInfo("consumer:0x%" PRIx64 " remove new topic:%s in the topic list, remain newTopics:%d", pConsumer->consumerId, pTopic, (int)taosArrayGetSize(pConsumer->rebNewTopics)); break; } @@ -946,7 +938,7 @@ static void removeFromRemoveTopicList(SMqConsumerObj *pConsumer, const char *pTo taosArrayRemove(pConsumer->rebRemovedTopics, i); taosMemoryFree(p); - mDebug("consumer:0x%" PRIx64 " remove topic:%s in the removed topic list, remain removedTopics:%d", + mInfo("consumer:0x%" PRIx64 " remove topic:%s in the removed topic list, remain removedTopics:%d", pConsumer->consumerId, pTopic, (int)taosArrayGetSize(pConsumer->rebRemovedTopics)); break; } @@ -961,7 +953,7 @@ static void removeFromCurrentTopicList(SMqConsumerObj *pConsumer, const char *pT taosArrayRemove(pConsumer->currentTopics, i); taosMemoryFree(topic); - mDebug("consumer:0x%" PRIx64 " remove topic:%s in the current topic list, remain currentTopics:%d", + mInfo("consumer:0x%" PRIx64 " remove topic:%s in the current topic list, remain currentTopics:%d", pConsumer->consumerId, pTopic, (int)taosArrayGetSize(pConsumer->currentTopics)); break; } @@ -984,47 +976,46 @@ static bool existInCurrentTopicList(const SMqConsumerObj* pConsumer, const char* } static int32_t mndConsumerActionUpdate(SSdb *pSdb, SMqConsumerObj *pOldConsumer, SMqConsumerObj *pNewConsumer) { - mDebug("consumer:0x%" PRIx64 " perform update action, update type:%d, subscribe-time:%" PRId64 ", uptime:%" PRId64, - pOldConsumer->consumerId, pNewConsumer->updateType, pOldConsumer->subscribeTime, pOldConsumer->upTime); + mInfo("consumer:0x%" PRIx64 " perform update action, update type:%d, subscribe-time:%" PRId64 ", createTime:%" PRId64, + pOldConsumer->consumerId, pNewConsumer->updateType, pOldConsumer->subscribeTime, pOldConsumer->createTime); taosWLockLatch(&pOldConsumer->lock); - if (pNewConsumer->updateType == CONSUMER_UPDATE__REBALANCE) { + if (pNewConsumer->updateType == CONSUMER_UPDATE_SUB_MODIFY) { TSWAP(pOldConsumer->rebNewTopics, pNewConsumer->rebNewTopics); TSWAP(pOldConsumer->rebRemovedTopics, pNewConsumer->rebRemovedTopics); TSWAP(pOldConsumer->assignedTopics, pNewConsumer->assignedTopics); - pOldConsumer->subscribeTime = pNewConsumer->upTime; + pOldConsumer->subscribeTime = taosGetTimestampMs(); pOldConsumer->status = MQ_CONSUMER_STATUS_REBALANCE; - } else if (pNewConsumer->updateType == CONSUMER_UPDATE__LOST) { - int32_t sz = taosArrayGetSize(pOldConsumer->currentTopics); - for (int32_t i = 0; i < sz; i++) { - char *topic = taosStrdup(taosArrayGetP(pOldConsumer->currentTopics, i)); - taosArrayPush(pOldConsumer->rebRemovedTopics, &topic); - } - - pOldConsumer->rebalanceTime = pNewConsumer->upTime; - - int32_t prevStatus = pOldConsumer->status; - pOldConsumer->status = MQ_CONSUMER_STATUS__LOST; - mDebug("consumer:0x%" PRIx64 " state %s -> %s, reb-time:%" PRId64 ", reb-removed-topics:%d", - pOldConsumer->consumerId, mndConsumerStatusName(prevStatus), mndConsumerStatusName(pOldConsumer->status), - pOldConsumer->rebalanceTime, (int)taosArrayGetSize(pOldConsumer->rebRemovedTopics)); - } else if (pNewConsumer->updateType == CONSUMER_UPDATE__RECOVER) { + mInfo("consumer:0x%" PRIx64 " sub update, modify existed consumer",pOldConsumer->consumerId); +// } else if (pNewConsumer->updateType == CONSUMER_UPDATE_TIMER_LOST) { +// int32_t sz = taosArrayGetSize(pOldConsumer->currentTopics); +// for (int32_t i = 0; i < sz; i++) { +// char *topic = taosStrdup(taosArrayGetP(pOldConsumer->currentTopics, i)); +// taosArrayPush(pOldConsumer->rebRemovedTopics, &topic); +// } +// +// int32_t prevStatus = pOldConsumer->status; +// pOldConsumer->status = MQ_CONSUMER_STATUS_LOST; +// mInfo("consumer:0x%" PRIx64 " timer update, timer lost. state %s -> %s, reb-time:%" PRId64 ", reb-removed-topics:%d", +// pOldConsumer->consumerId, mndConsumerStatusName(prevStatus), mndConsumerStatusName(pOldConsumer->status), +// pOldConsumer->rebalanceTime, (int)taosArrayGetSize(pOldConsumer->rebRemovedTopics)); + } else if (pNewConsumer->updateType == CONSUMER_UPDATE_RECOVER) { int32_t sz = taosArrayGetSize(pOldConsumer->assignedTopics); for (int32_t i = 0; i < sz; i++) { char *topic = taosStrdup(taosArrayGetP(pOldConsumer->assignedTopics, i)); taosArrayPush(pOldConsumer->rebNewTopics, &topic); } - pOldConsumer->rebalanceTime = pNewConsumer->upTime; pOldConsumer->status = MQ_CONSUMER_STATUS_REBALANCE; - } else if (pNewConsumer->updateType == CONSUMER_UPDATE__TOUCH) { + mInfo("consumer:0x%" PRIx64 " timer update, timer recover",pOldConsumer->consumerId); + } else if (pNewConsumer->updateType == CONSUMER_UPDATE_REB_MODIFY_NOTOPIC) { atomic_add_fetch_32(&pOldConsumer->epoch, 1); - pOldConsumer->rebalanceTime = pNewConsumer->upTime; - - } else if (pNewConsumer->updateType == CONSUMER_UPDATE__ADD) { + pOldConsumer->rebalanceTime = taosGetTimestampMs(); + mInfo("consumer:0x%" PRIx64 " reb update, only rebalance time", pOldConsumer->consumerId); + } else if (pNewConsumer->updateType == CONSUMER_UPDATE_REB_MODIFY_TOPIC) { char *pNewTopic = taosStrdup(taosArrayGetP(pNewConsumer->rebNewTopics, 0)); // check if exist in current topic @@ -1033,6 +1024,7 @@ static int32_t mndConsumerActionUpdate(SSdb *pSdb, SMqConsumerObj *pOldConsumer, // add to current topic bool existing = existInCurrentTopicList(pOldConsumer, pNewTopic); if (existing) { + mError("consumer:0x%" PRIx64 "new topic:%s should not in currentTopics", pOldConsumer->consumerId, pNewTopic); taosMemoryFree(pNewTopic); } else { // added into current topic list taosArrayPush(pOldConsumer->currentTopics, &pNewTopic); @@ -1044,17 +1036,17 @@ static int32_t mndConsumerActionUpdate(SSdb *pSdb, SMqConsumerObj *pOldConsumer, updateConsumerStatus(pOldConsumer); // the re-balance is triggered when the new consumer is launched. - pOldConsumer->rebalanceTime = pNewConsumer->upTime; + pOldConsumer->rebalanceTime = taosGetTimestampMs(); atomic_add_fetch_32(&pOldConsumer->epoch, 1); - mDebug("consumer:0x%" PRIx64 " state (%d)%s -> (%d)%s, new epoch:%d, reb-time:%" PRId64 + mInfo("consumer:0x%" PRIx64 " reb update add, state (%d)%s -> (%d)%s, new epoch:%d, reb-time:%" PRId64 ", current topics:%d, newTopics:%d, removeTopics:%d", pOldConsumer->consumerId, status, mndConsumerStatusName(status), pOldConsumer->status, mndConsumerStatusName(pOldConsumer->status), pOldConsumer->epoch, pOldConsumer->rebalanceTime, (int)taosArrayGetSize(pOldConsumer->currentTopics), (int)taosArrayGetSize(pOldConsumer->rebNewTopics), (int)taosArrayGetSize(pOldConsumer->rebRemovedTopics)); - } else if (pNewConsumer->updateType == CONSUMER_UPDATE__REMOVE) { + } else if (pNewConsumer->updateType == CONSUMER_UPDATE_REB_MODIFY_REMOVE) { char *removedTopic = taosArrayGetP(pNewConsumer->rebRemovedTopics, 0); // remove from removed topic @@ -1067,10 +1059,10 @@ static int32_t mndConsumerActionUpdate(SSdb *pSdb, SMqConsumerObj *pOldConsumer, int32_t status = pOldConsumer->status; updateConsumerStatus(pOldConsumer); - pOldConsumer->rebalanceTime = pNewConsumer->upTime; + pOldConsumer->rebalanceTime = taosGetTimestampMs(); atomic_add_fetch_32(&pOldConsumer->epoch, 1); - mDebug("consumer:0x%" PRIx64 " state (%d)%s -> (%d)%s, new epoch:%d, reb-time:%" PRId64 + mInfo("consumer:0x%" PRIx64 " reb update remove, state (%d)%s -> (%d)%s, new epoch:%d, reb-time:%" PRId64 ", current topics:%d, newTopics:%d, removeTopics:%d", pOldConsumer->consumerId, status, mndConsumerStatusName(status), pOldConsumer->status, mndConsumerStatusName(pOldConsumer->status), pOldConsumer->epoch, pOldConsumer->rebalanceTime, @@ -1133,8 +1125,12 @@ static int32_t mndRetrieveConsumer(SRpcMsg *pReq, SShowObj *pShow, SSDataBlock * int32_t cols = 0; // consumer id + char consumerIdHex[32] = {0}; + sprintf(varDataVal(consumerIdHex), "0x%"PRIx64, pConsumer->consumerId); + varDataSetLen(consumerIdHex, strlen(varDataVal(consumerIdHex))); + pColInfo = taosArrayGet(pBlock->pDataBlock, cols++); - colDataSetVal(pColInfo, numOfRows, (const char *)&pConsumer->consumerId, false); + colDataSetVal(pColInfo, numOfRows, (const char *)consumerIdHex, false); // consumer group char cgroup[TSDB_CGROUP_LEN + VARSTR_HEADER_SIZE] = {0}; @@ -1175,7 +1171,7 @@ static int32_t mndRetrieveConsumer(SRpcMsg *pReq, SShowObj *pShow, SSDataBlock * // up time pColInfo = taosArrayGet(pBlock->pDataBlock, cols++); - colDataSetVal(pColInfo, numOfRows, (const char *)&pConsumer->upTime, false); + colDataSetVal(pColInfo, numOfRows, (const char *)&pConsumer->createTime, false); // subscribe time pColInfo = taosArrayGet(pBlock->pDataBlock, cols++); @@ -1190,7 +1186,7 @@ static int32_t mndRetrieveConsumer(SRpcMsg *pReq, SShowObj *pShow, SSDataBlock * tFormatOffset(buf, TSDB_OFFSET_LEN, &pVal); char parasStr[64 + TSDB_OFFSET_LEN + VARSTR_HEADER_SIZE] = {0}; - sprintf(varDataVal(parasStr), "tbname:%d,commit:%d,interval:%d,reset:%s", pConsumer->withTbName, pConsumer->autoCommit, pConsumer->autoCommitInterval, buf); + sprintf(varDataVal(parasStr), "tbname:%d,commit:%d,interval:%dms,reset:%s", pConsumer->withTbName, pConsumer->autoCommit, pConsumer->autoCommitInterval, buf); varDataSetLen(parasStr, strlen(varDataVal(parasStr))); pColInfo = taosArrayGet(pBlock->pDataBlock, cols++); @@ -1216,10 +1212,9 @@ static void mndCancelGetNextConsumer(SMnode *pMnode, void *pIter) { static const char *mndConsumerStatusName(int status) { switch (status) { - case MQ_CONSUMER_STATUS__READY: + case MQ_CONSUMER_STATUS_READY: return "ready"; - case MQ_CONSUMER_STATUS__LOST: - case MQ_CONSUMER_STATUS__LOST_REBD: + case MQ_CONSUMER_STATUS_LOST: return "lost"; case MQ_CONSUMER_STATUS_REBALANCE: return "rebalancing"; diff --git a/source/dnode/mnode/impl/src/mndDef.c b/source/dnode/mnode/impl/src/mndDef.c index 5e5d52b310ff39e4a75fb7427f783ee09c348890..a8a719edda4d204baf753d7ccb622ed85f50d9a0 100644 --- a/source/dnode/mnode/impl/src/mndDef.c +++ b/source/dnode/mnode/impl/src/mndDef.c @@ -223,7 +223,7 @@ void *tDecodeSMqVgEp(const void *buf, SMqVgEp *pVgEp, int8_t sver) { return (void *)buf; } -SMqConsumerObj *tNewSMqConsumerObj(int64_t consumerId, char cgroup[TSDB_CGROUP_LEN]) { +SMqConsumerObj *tNewSMqConsumerObj(int64_t consumerId, char* cgroup) { SMqConsumerObj *pConsumer = taosMemoryCalloc(1, sizeof(SMqConsumerObj)); if (pConsumer == NULL) { terrno = TSDB_CODE_OUT_OF_MEMORY; @@ -254,16 +254,20 @@ SMqConsumerObj *tNewSMqConsumerObj(int64_t consumerId, char cgroup[TSDB_CGROUP_L return NULL; } - pConsumer->upTime = taosGetTimestampMs(); + pConsumer->createTime = taosGetTimestampMs(); return pConsumer; } -void tDeleteSMqConsumerObj(SMqConsumerObj *pConsumer) { +void tDeleteSMqConsumerObj(SMqConsumerObj *pConsumer, bool delete) { + if(pConsumer == NULL) return; taosArrayDestroyP(pConsumer->currentTopics, (FDelete)taosMemoryFree); taosArrayDestroyP(pConsumer->rebNewTopics, (FDelete)taosMemoryFree); taosArrayDestroyP(pConsumer->rebRemovedTopics, (FDelete)taosMemoryFree); taosArrayDestroyP(pConsumer->assignedTopics, (FDelete)taosMemoryFree); + if(delete){ + taosMemoryFree(pConsumer); + } } int32_t tEncodeSMqConsumerObj(void **buf, const SMqConsumerObj *pConsumer) { @@ -278,7 +282,7 @@ int32_t tEncodeSMqConsumerObj(void **buf, const SMqConsumerObj *pConsumer) { tlen += taosEncodeFixedI32(buf, pConsumer->pid); tlen += taosEncodeSEpSet(buf, &pConsumer->ep); - tlen += taosEncodeFixedI64(buf, pConsumer->upTime); + tlen += taosEncodeFixedI64(buf, pConsumer->createTime); tlen += taosEncodeFixedI64(buf, pConsumer->subscribeTime); tlen += taosEncodeFixedI64(buf, pConsumer->rebalanceTime); @@ -348,7 +352,7 @@ void *tDecodeSMqConsumerObj(const void *buf, SMqConsumerObj *pConsumer, int8_t s buf = taosDecodeFixedI32(buf, &pConsumer->pid); buf = taosDecodeSEpSet(buf, &pConsumer->ep); - buf = taosDecodeFixedI64(buf, &pConsumer->upTime); + buf = taosDecodeFixedI64(buf, &pConsumer->createTime); buf = taosDecodeFixedI64(buf, &pConsumer->subscribeTime); buf = taosDecodeFixedI64(buf, &pConsumer->rebalanceTime); diff --git a/source/dnode/mnode/impl/src/mndProfile.c b/source/dnode/mnode/impl/src/mndProfile.c index 5482f369409a89484974444de27f111e680c2f10..3c2335a6ee0216e83bae4250a4eaed54fbebe916 100644 --- a/source/dnode/mnode/impl/src/mndProfile.c +++ b/source/dnode/mnode/impl/src/mndProfile.c @@ -233,7 +233,6 @@ static int32_t mndProcessConnectReq(SRpcMsg *pReq) { } code = -1; - taosIp2String(pReq->info.conn.clientIp, ip); if (mndCheckOperPrivilege(pMnode, pReq->info.conn.user, MND_OPER_CONNECT) != 0) { mGError("user:%s, failed to login from %s since %s", pReq->info.conn.user, ip, terrstr()); @@ -271,6 +270,7 @@ static int32_t mndProcessConnectReq(SRpcMsg *pReq) { } } +_CONNECT: pConn = mndCreateConn(pMnode, pReq->info.conn.user, connReq.connType, pReq->info.conn.clientIp, pReq->info.conn.clientPort, connReq.pid, connReq.app, connReq.startTime); if (pConn == NULL) { @@ -842,7 +842,7 @@ static int32_t packQueriesIntoBlock(SShowObj* pShow, SConnObj* pConn, SSDataBloc } varDataLen(subStatus) = strlen(&subStatus[VARSTR_HEADER_SIZE]); pColInfo = taosArrayGet(pBlock->pDataBlock, cols++); - colDataSetVal(pColInfo, curRowIndex, subStatus, false); + colDataSetVal(pColInfo, curRowIndex, subStatus, (varDataLen(subStatus) == 0) ? true : false); char sql[TSDB_SHOW_SQL_LEN + VARSTR_HEADER_SIZE] = {0}; STR_TO_VARSTR(sql, pQuery->sql); diff --git a/source/dnode/mnode/impl/src/mndStream.c b/source/dnode/mnode/impl/src/mndStream.c index be6cfae0be5e84d26802826362c2377d4ad5e159..9d8948650a23476461e8787e5ba17c8ecb1c99d1 100644 --- a/source/dnode/mnode/impl/src/mndStream.c +++ b/source/dnode/mnode/impl/src/mndStream.c @@ -1303,9 +1303,29 @@ static int32_t mndRetrieveStreamTask(SRpcMsg *pReq, SShowObj *pShow, SSDataBlock // status char status[20 + VARSTR_HEADER_SIZE] = {0}; - char status2[20] = {0}; - strcpy(status, "normal"); - STR_WITH_MAXSIZE_TO_VARSTR(status, status2, sizeof(status)); + int8_t taskStatus = atomic_load_8(&pTask->status.taskStatus); + if (taskStatus == TASK_STATUS__NORMAL) { + memcpy(varDataVal(status), "normal", 6); + varDataSetLen(status, 6); + } else if (taskStatus == TASK_STATUS__DROPPING) { + memcpy(varDataVal(status), "dropping", 8); + varDataSetLen(status, 8); + } else if (taskStatus == TASK_STATUS__FAIL) { + memcpy(varDataVal(status), "fail", 4); + varDataSetLen(status, 4); + } else if (taskStatus == TASK_STATUS__STOP) { + memcpy(varDataVal(status), "stop", 4); + varDataSetLen(status, 4); + } else if (taskStatus == TASK_STATUS__SCAN_HISTORY) { + memcpy(varDataVal(status), "history", 7); + varDataSetLen(status, 7); + } else if (taskStatus == TASK_STATUS__HALT) { + memcpy(varDataVal(status), "halt", 4); + varDataSetLen(status, 4); + } else if (taskStatus == TASK_STATUS__PAUSE) { + memcpy(varDataVal(status), "pause", 5); + varDataSetLen(status, 5); + } pColInfo = taosArrayGet(pBlock->pDataBlock, cols++); colDataSetVal(pColInfo, numOfRows, (const char *)&status, false); @@ -1358,6 +1378,11 @@ int32_t mndPauseAllStreamTaskImpl(STrans *pTrans, SArray* tasks) { if (pTask->info.taskLevel != TASK_LEVEL__SINK && mndPauseStreamTask(pTrans, pTask) < 0) { return -1; } + + if (atomic_load_8(&pTask->status.taskStatus) != TASK_STATUS__PAUSE) { + atomic_store_8(&pTask->status.keepTaskStatus, pTask->status.taskStatus); + atomic_store_8(&pTask->status.taskStatus, TASK_STATUS__PAUSE); + } } } return 0; @@ -1412,6 +1437,10 @@ static int32_t mndProcessPauseStreamReq(SRpcMsg *pReq) { } } + if (pStream->status == STREAM_STATUS__PAUSE) { + return 0; + } + if (mndCheckDbPrivilegeByName(pMnode, pReq->info.conn.user, MND_OPER_WRITE_DB, pStream->targetDb) != 0) { sdbRelease(pMnode->pSdb, pStream); return -1; @@ -1492,6 +1521,10 @@ int32_t mndResumeAllStreamTasks(STrans *pTrans, SStreamObj *pStream, int8_t igUn if (pTask->info.taskLevel != TASK_LEVEL__SINK && mndResumeStreamTask(pTrans, pTask, igUntreated) < 0) { return -1; } + + if (atomic_load_8(&pTask->status.taskStatus) == TASK_STATUS__PAUSE) { + atomic_store_8(&pTask->status.taskStatus, pTask->status.keepTaskStatus); + } } } // pStream->pHTasksList is null @@ -1521,6 +1554,10 @@ static int32_t mndProcessResumeStreamReq(SRpcMsg *pReq) { } } + if (pStream->status != STREAM_STATUS__PAUSE) { + return 0; + } + if (mndCheckDbPrivilegeByName(pMnode, pReq->info.conn.user, MND_OPER_WRITE_DB, pStream->targetDb) != 0) { sdbRelease(pMnode->pSdb, pStream); return -1; diff --git a/source/dnode/mnode/impl/src/mndSubscribe.c b/source/dnode/mnode/impl/src/mndSubscribe.c index 61691a30d5882959c8e6128abdbc040f637d43f4..7ecd994b5abf0036f63019a101a22c5d58ec6e66 100644 --- a/source/dnode/mnode/impl/src/mndSubscribe.c +++ b/source/dnode/mnode/impl/src/mndSubscribe.c @@ -160,10 +160,10 @@ static int32_t mndBuildSubChangeReq(void **pBuf, int32_t *pLen, SMqSubscribeObj static int32_t mndPersistSubChangeVgReq(SMnode *pMnode, STrans *pTrans, SMqSubscribeObj *pSub, const SMqRebOutputVg *pRebVg, SSubplan* pPlan) { -// if (pRebVg->oldConsumerId == pRebVg->newConsumerId) { -// terrno = TSDB_CODE_MND_INVALID_SUB_OPTION; -// return -1; -// } + if (pRebVg->oldConsumerId == pRebVg->newConsumerId) { + terrno = TSDB_CODE_MND_INVALID_SUB_OPTION; + return -1; + } void *buf; int32_t tlen; @@ -175,7 +175,7 @@ static int32_t mndPersistSubChangeVgReq(SMnode *pMnode, STrans *pTrans, SMqSubsc SVgObj *pVgObj = mndAcquireVgroup(pMnode, vgId); if (pVgObj == NULL) { taosMemoryFree(buf); - terrno = TSDB_CODE_OUT_OF_MEMORY; + terrno = TSDB_CODE_MND_VGROUP_NOT_EXIST; return -1; } @@ -296,17 +296,17 @@ static void addUnassignedVgroups(SMqRebOutputObj *pOutput, SHashObj *pHash) { } } -static void putNoTransferToOutput(SMqRebOutputObj *pOutput, SMqConsumerEp *pConsumerEp){ - for(int i = 0; i < taosArrayGetSize(pConsumerEp->vgs); i++){ - SMqVgEp *pVgEp = (SMqVgEp *)taosArrayGetP(pConsumerEp->vgs, i); - SMqRebOutputVg outputVg = { - .oldConsumerId = pConsumerEp->consumerId, - .newConsumerId = pConsumerEp->consumerId, - .pVgEp = pVgEp, - }; - taosArrayPush(pOutput->rebVgs, &outputVg); - } -} +//static void putNoTransferToOutput(SMqRebOutputObj *pOutput, SMqConsumerEp *pConsumerEp){ +// for(int i = 0; i < taosArrayGetSize(pConsumerEp->vgs); i++){ +// SMqVgEp *pVgEp = (SMqVgEp *)taosArrayGetP(pConsumerEp->vgs, i); +// SMqRebOutputVg outputVg = { +// .oldConsumerId = pConsumerEp->consumerId, +// .newConsumerId = pConsumerEp->consumerId, +// .pVgEp = pVgEp, +// }; +// taosArrayPush(pOutput->rebVgs, &outputVg); +// } +//} static void transferVgroupsForConsumers(SMqRebOutputObj *pOutput, SHashObj *pHash, int32_t minVgCnt, int32_t imbConsumerNum) { @@ -357,7 +357,7 @@ static void transferVgroupsForConsumers(SMqRebOutputObj *pOutput, SHashObj *pHas } } } - putNoTransferToOutput(pOutput, pConsumerEp); +// putNoTransferToOutput(pOutput, pConsumerEp); } } @@ -468,40 +468,51 @@ static int32_t mndDoRebalance(SMnode *pMnode, const SMqRebInputObj *pInput, SMqR } } - if(taosHashGetSize(pOutput->pSub->consumerHash) == 0) { // if all consumer is removed +// if(taosHashGetSize(pOutput->pSub->consumerHash) == 0) { // if all consumer is removed SMqSubscribeObj *pSub = mndAcquireSubscribeByKey(pMnode, pInput->pRebInfo->key); // put all offset rows if (pSub) { taosRLockLatch(&pSub->lock); - bool init = false; if (pOutput->pSub->offsetRows == NULL) { pOutput->pSub->offsetRows = taosArrayInit(4, sizeof(OffsetRows)); - init = true; } pIter = NULL; while (1) { pIter = taosHashIterate(pSub->consumerHash, pIter); if (pIter == NULL) break; SMqConsumerEp *pConsumerEp = (SMqConsumerEp *)pIter; - if (init) { - taosArrayAddAll(pOutput->pSub->offsetRows, pConsumerEp->offsetRows); -// mDebug("pSub->offsetRows is init"); - } else { - for (int j = 0; j < taosArrayGetSize(pConsumerEp->offsetRows); j++) { - OffsetRows *d1 = taosArrayGet(pConsumerEp->offsetRows, j); - for (int i = 0; i < taosArrayGetSize(pOutput->pSub->offsetRows); i++) { - OffsetRows *d2 = taosArrayGet(pOutput->pSub->offsetRows, i); - if (d1->vgId == d2->vgId) { - d2->rows += d1->rows; - d2->offset = d1->offset; -// mDebug("pSub->offsetRows add vgId:%d, after:%"PRId64", before:%"PRId64, d2->vgId, d2->rows, d1->rows); - } + SMqConsumerEp *pConsumerEpNew = taosHashGet(pOutput->pSub->consumerHash, &pConsumerEp->consumerId, sizeof(int64_t)); + + for (int j = 0; j < taosArrayGetSize(pConsumerEp->offsetRows); j++) { + OffsetRows *d1 = taosArrayGet(pConsumerEp->offsetRows, j); + bool jump = false; + for (int i = 0; pConsumerEpNew && i < taosArrayGetSize(pConsumerEpNew->vgs); i++){ + SMqVgEp *pVgEp = taosArrayGetP(pConsumerEpNew->vgs, i); + if(pVgEp->vgId == d1->vgId){ + jump = true; + mInfo("pSub->offsetRows jump, because consumer id:%"PRIx64 " and vgId:%d not change", pConsumerEp->consumerId, pVgEp->vgId); + break; + } + } + if(jump) continue; + bool find = false; + for (int i = 0; i < taosArrayGetSize(pOutput->pSub->offsetRows); i++) { + OffsetRows *d2 = taosArrayGet(pOutput->pSub->offsetRows, i); + if (d1->vgId == d2->vgId) { + d2->rows += d1->rows; + d2->offset = d1->offset; + find = true; + mInfo("pSub->offsetRows add vgId:%d, after:%"PRId64", before:%"PRId64, d2->vgId, d2->rows, d1->rows); + break; } } + if(!find){ + taosArrayPush(pOutput->pSub->offsetRows, d1); + } } } taosRUnLockLatch(&pSub->lock); mndReleaseSubscribe(pMnode, pSub); - } +// } } // 8. generate logs @@ -576,50 +587,44 @@ static int32_t mndPersistRebResult(SMnode *pMnode, SRpcMsg *pMsg, const SMqRebOu return -1; } + char topic[TSDB_TOPIC_FNAME_LEN] = {0}; + char cgroup[TSDB_CGROUP_LEN] = {0}; + mndSplitSubscribeKey(pOutput->pSub->key, topic, cgroup, true); + // 3. commit log: consumer to update status and epoch // 3.1 set touched consumer int32_t consumerNum = taosArrayGetSize(pOutput->modifyConsumers); for (int32_t i = 0; i < consumerNum; i++) { int64_t consumerId = *(int64_t *)taosArrayGet(pOutput->modifyConsumers, i); - SMqConsumerObj *pConsumerOld = mndAcquireConsumer(pMnode, consumerId); - SMqConsumerObj *pConsumerNew = tNewSMqConsumerObj(pConsumerOld->consumerId, pConsumerOld->cgroup); - pConsumerNew->updateType = CONSUMER_UPDATE__TOUCH; - mndReleaseConsumer(pMnode, pConsumerOld); + SMqConsumerObj *pConsumerNew = tNewSMqConsumerObj(consumerId, cgroup); + pConsumerNew->updateType = CONSUMER_UPDATE_REB_MODIFY_NOTOPIC; if (mndSetConsumerCommitLogs(pMnode, pTrans, pConsumerNew) != 0) { - tDeleteSMqConsumerObj(pConsumerNew); - taosMemoryFree(pConsumerNew); + tDeleteSMqConsumerObj(pConsumerNew, true); mndTransDrop(pTrans); return -1; } - tDeleteSMqConsumerObj(pConsumerNew); - taosMemoryFree(pConsumerNew); + tDeleteSMqConsumerObj(pConsumerNew, true); } // 3.2 set new consumer consumerNum = taosArrayGetSize(pOutput->newConsumers); for (int32_t i = 0; i < consumerNum; i++) { int64_t consumerId = *(int64_t *)taosArrayGet(pOutput->newConsumers, i); + SMqConsumerObj *pConsumerNew = tNewSMqConsumerObj(consumerId, cgroup); + pConsumerNew->updateType = CONSUMER_UPDATE_REB_MODIFY_TOPIC; - SMqConsumerObj *pConsumerOld = mndAcquireConsumer(pMnode, consumerId); - SMqConsumerObj *pConsumerNew = tNewSMqConsumerObj(pConsumerOld->consumerId, pConsumerOld->cgroup); - pConsumerNew->updateType = CONSUMER_UPDATE__ADD; - char *topic = taosMemoryCalloc(1, TSDB_TOPIC_FNAME_LEN); - char cgroup[TSDB_CGROUP_LEN]; - mndSplitSubscribeKey(pOutput->pSub->key, topic, cgroup, true); - taosArrayPush(pConsumerNew->rebNewTopics, &topic); - mndReleaseConsumer(pMnode, pConsumerOld); + char* topicTmp = taosStrdup(topic); + taosArrayPush(pConsumerNew->rebNewTopics, &topicTmp); if (mndSetConsumerCommitLogs(pMnode, pTrans, pConsumerNew) != 0) { - tDeleteSMqConsumerObj(pConsumerNew); - taosMemoryFree(pConsumerNew); + tDeleteSMqConsumerObj(pConsumerNew, true); mndTransDrop(pTrans); return -1; } - tDeleteSMqConsumerObj(pConsumerNew); - taosMemoryFree(pConsumerNew); + tDeleteSMqConsumerObj(pConsumerNew, true); } // 3.3 set removed consumer @@ -627,24 +632,19 @@ static int32_t mndPersistRebResult(SMnode *pMnode, SRpcMsg *pMsg, const SMqRebOu for (int32_t i = 0; i < consumerNum; i++) { int64_t consumerId = *(int64_t *)taosArrayGet(pOutput->removedConsumers, i); - SMqConsumerObj *pConsumerOld = mndAcquireConsumer(pMnode, consumerId); - SMqConsumerObj *pConsumerNew = tNewSMqConsumerObj(pConsumerOld->consumerId, pConsumerOld->cgroup); - pConsumerNew->updateType = CONSUMER_UPDATE__REMOVE; - char *topic = taosMemoryCalloc(1, TSDB_TOPIC_FNAME_LEN); - char cgroup[TSDB_CGROUP_LEN]; - mndSplitSubscribeKey(pOutput->pSub->key, topic, cgroup, true); - taosArrayPush(pConsumerNew->rebRemovedTopics, &topic); - mndReleaseConsumer(pMnode, pConsumerOld); + SMqConsumerObj *pConsumerNew = tNewSMqConsumerObj(consumerId, cgroup); + pConsumerNew->updateType = CONSUMER_UPDATE_REB_MODIFY_REMOVE; + + char* topicTmp = taosStrdup(topic); + taosArrayPush(pConsumerNew->rebRemovedTopics, &topicTmp); if (mndSetConsumerCommitLogs(pMnode, pTrans, pConsumerNew) != 0) { - tDeleteSMqConsumerObj(pConsumerNew); - taosMemoryFree(pConsumerNew); + tDeleteSMqConsumerObj(pConsumerNew, true); mndTransDrop(pTrans); return -1; } - tDeleteSMqConsumerObj(pConsumerNew); - taosMemoryFree(pConsumerNew); + tDeleteSMqConsumerObj(pConsumerNew, true); } // 4. TODO commit log: modification log @@ -771,8 +771,10 @@ static int32_t mndProcessRebalanceReq(SRpcMsg *pMsg) { } static int32_t mndProcessDropCgroupReq(SRpcMsg *pMsg) { - SMnode *pMnode = pMsg->info.node; - SMDropCgroupReq dropReq = {0}; + SMnode *pMnode = pMsg->info.node; + SMDropCgroupReq dropReq = {0}; + STrans *pTrans = NULL; + int32_t code = TSDB_CODE_ACTION_IN_PROGRESS; if (tDeserializeSMDropCgroupReq(pMsg->pCont, pMsg->contLen, &dropReq) != 0) { terrno = TSDB_CODE_INVALID_MSG; @@ -791,38 +793,54 @@ static int32_t mndProcessDropCgroupReq(SRpcMsg *pMsg) { } } + taosWLockLatch(&pSub->lock); if (taosHashGetSize(pSub->consumerHash) != 0) { terrno = TSDB_CODE_MND_CGROUP_USED; mError("cgroup:%s on topic:%s, failed to drop since %s", dropReq.cgroup, dropReq.topic, terrstr()); - mndReleaseSubscribe(pMnode, pSub); - return -1; + code = -1; + goto end; + } + + void *pIter = NULL; + SMqConsumerObj *pConsumer; + while (1) { + pIter = sdbFetch(pMnode->pSdb, SDB_CONSUMER, pIter, (void **)&pConsumer); + if (pIter == NULL) { + break; + } + + if (strcmp(dropReq.cgroup, pConsumer->cgroup) == 0) { + mndDropConsumerFromSdb(pMnode, pConsumer->consumerId); + } + sdbRelease(pMnode->pSdb, pConsumer); } - STrans *pTrans = mndTransCreate(pMnode, TRN_POLICY_ROLLBACK, TRN_CONFLICT_NOTHING, pMsg, "drop-cgroup"); + pTrans = mndTransCreate(pMnode, TRN_POLICY_ROLLBACK, TRN_CONFLICT_NOTHING, pMsg, "drop-cgroup"); if (pTrans == NULL) { mError("cgroup: %s on topic:%s, failed to drop since %s", dropReq.cgroup, dropReq.topic, terrstr()); - mndReleaseSubscribe(pMnode, pSub); - mndTransDrop(pTrans); - return -1; + code = -1; + goto end; } mInfo("trans:%d, used to drop cgroup:%s on topic %s", pTrans->id, dropReq.cgroup, dropReq.topic); if (mndSetDropSubCommitLogs(pMnode, pTrans, pSub) < 0) { mError("cgroup %s on topic:%s, failed to drop since %s", dropReq.cgroup, dropReq.topic, terrstr()); - mndReleaseSubscribe(pMnode, pSub); - mndTransDrop(pTrans); - return -1; + code = -1; + goto end; } if (mndTransPrepare(pMnode, pTrans) < 0) { - mndReleaseSubscribe(pMnode, pSub); - mndTransDrop(pTrans); - return -1; + code = -1; + goto end; } + +end: + taosWUnLockLatch(&pSub->lock); mndReleaseSubscribe(pMnode, pSub); + mndTransDrop(pTrans); - return TSDB_CODE_ACTION_IN_PROGRESS; + return code; } void mndCleanupSubscribe(SMnode *pMnode) {} @@ -989,6 +1007,32 @@ SMqSubscribeObj *mndAcquireSubscribeByKey(SMnode *pMnode, const char *key) { return pSub; } +int32_t mndGetGroupNumByTopic(SMnode *pMnode, const char *topicName) { + int32_t num = 0; + SSdb *pSdb = pMnode->pSdb; + + void *pIter = NULL; + SMqSubscribeObj *pSub = NULL; + while (1) { + pIter = sdbFetch(pSdb, SDB_SUBSCRIBE, pIter, (void **)&pSub); + if (pIter == NULL) break; + + + char topic[TSDB_TOPIC_FNAME_LEN]; + char cgroup[TSDB_CGROUP_LEN]; + mndSplitSubscribeKey(pSub->key, topic, cgroup, true); + if (strcmp(topic, topicName) != 0) { + sdbRelease(pSdb, pSub); + continue; + } + + num++; + sdbRelease(pSdb, pSub); + } + + return num; +} + void mndReleaseSubscribe(SMnode *pMnode, SMqSubscribeObj *pSub) { SSdb *pSdb = pMnode->pSdb; sdbRelease(pSdb, pSub); @@ -1114,9 +1158,13 @@ static int32_t buildResult(SSDataBlock *pBlock, int32_t* numOfRows, int64_t cons colDataSetVal(pColInfo, *numOfRows, (const char *)&pVgEp->vgId, false); // consumer id + char consumerIdHex[32] = {0}; + sprintf(varDataVal(consumerIdHex), "0x%"PRIx64, consumerId); + varDataSetLen(consumerIdHex, strlen(varDataVal(consumerIdHex))); + pColInfo = taosArrayGet(pBlock->pDataBlock, cols++); - colDataSetVal(pColInfo, *numOfRows, (const char *)&consumerId, consumerId == -1); - + colDataSetVal(pColInfo, *numOfRows, (const char *)consumerIdHex, consumerId == -1); + mDebug("mnd show subscriptions: topic %s, consumer:0x%" PRIx64 " cgroup %s vgid %d", varDataVal(topic), consumerId, varDataVal(cgroup), pVgEp->vgId); diff --git a/source/dnode/mnode/impl/src/mndTopic.c b/source/dnode/mnode/impl/src/mndTopic.c index 4bbe531bf8e1bb50598e0a801a0552817084a34e..485823edf3547309c2f57313415b575c961b4206 100644 --- a/source/dnode/mnode/impl/src/mndTopic.c +++ b/source/dnode/mnode/impl/src/mndTopic.c @@ -569,6 +569,11 @@ static int32_t mndProcessCreateTopicReq(SRpcMsg *pReq) { SMqTopicObj *pTopic = NULL; SDbObj *pDb = NULL; SCMCreateTopicReq createTopicReq = {0}; + if (sdbGetSize(pMnode->pSdb, SDB_TOPIC) >= tmqMaxTopicNum){ + terrno = TSDB_CODE_TMQ_TOPIC_OUT_OF_RANGE; + mError("topic num out of range"); + return code; + } if (tDeserializeSCMCreateTopicReq(pReq->pCont, pReq->contLen, &createTopicReq) != 0) { terrno = TSDB_CODE_INVALID_MSG; @@ -681,7 +686,11 @@ static int32_t mndProcessDropTopicReq(SRpcMsg *pReq) { break; } - if (pConsumer->status == MQ_CONSUMER_STATUS__LOST_REBD) continue; + if (pConsumer->status == MQ_CONSUMER_STATUS_LOST){ + mndDropConsumerFromSdb(pMnode, pConsumer->consumerId); + mndReleaseConsumer(pMnode, pConsumer); + continue; + } int32_t sz = taosArrayGetSize(pConsumer->assignedTopics); for (int32_t i = 0; i < sz; i++) { diff --git a/source/dnode/vnode/src/meta/metaTable.c b/source/dnode/vnode/src/meta/metaTable.c index 54e89ee2690d10cce3ab6be24bb867b47f56ebe2..cb4b3231f61ebce6633dd3180cee13636828ac25 100644 --- a/source/dnode/vnode/src/meta/metaTable.c +++ b/source/dnode/vnode/src/meta/metaTable.c @@ -1980,6 +1980,11 @@ static int metaUpdateTtl(SMeta *pMeta, const SMetaEntry *pME) { int metaUpdateChangeTime(SMeta *pMeta, tb_uid_t uid, int64_t changeTimeMs) { if (!tsTtlChangeOnWrite) return 0; + if (changeTimeMs <= 0) { + metaWarn("Skip to change ttl deletetion time on write, uid: %" PRId64, uid); + return TSDB_CODE_VERSION_NOT_COMPATIBLE; + } + STtlUpdCtimeCtx ctx = {.uid = uid, .changeTimeMs = changeTimeMs}; return ttlMgrUpdateChangeTime(pMeta->pTtlMgr, &ctx); diff --git a/source/dnode/vnode/src/meta/metaTtl.c b/source/dnode/vnode/src/meta/metaTtl.c index c283472c2464219502b593215f8117f74c5daabd..af4827a9c72495b8fe3d08e41f62db67309a5110 100644 --- a/source/dnode/vnode/src/meta/metaTtl.c +++ b/source/dnode/vnode/src/meta/metaTtl.c @@ -358,7 +358,8 @@ int ttlMgrFlush(STtlManger *pTtlMgr, TXN *pTxn) { STtlCacheEntry *cacheEntry = taosHashGet(pTtlMgr->pTtlCache, pUid, sizeof(*pUid)); if (cacheEntry == NULL) { - metaError("ttlMgr flush failed to get ttl cache since %s", tstrerror(terrno)); + metaError("ttlMgr flush failed to get ttl cache since %s, uid: %" PRId64 ", type: %d", tstrerror(terrno), *pUid, + pEntry->type); goto _out; } diff --git a/source/dnode/vnode/src/tq/tq.c b/source/dnode/vnode/src/tq/tq.c index 761def6dc65e7902d57b646818489b95fb25d776..250c94c2f9ba3fc92ab0975cc54cf6d37d3a09d6 100644 --- a/source/dnode/vnode/src/tq/tq.c +++ b/source/dnode/vnode/src/tq/tq.c @@ -826,7 +826,11 @@ int32_t tqExpandTask(STQ* pTq, SStreamTask* pTask, int64_t ver) { return -1; } - SReadHandle handle = {.vnode = pTq->pVnode, .initTqReader = 1, .pStateBackend = pTask->pState, .fillHistory = pTask->info.fillHistory}; + SReadHandle handle = {.vnode = pTq->pVnode, + .initTqReader = 1, + .pStateBackend = pTask->pState, + .fillHistory = pTask->info.fillHistory, + .winRange = pTask->dataRange.window}; initStorageAPI(&handle.api); pTask->exec.pExecutor = qCreateStreamExecTaskInfo(pTask->exec.qmsg, &handle, vgId); @@ -849,7 +853,11 @@ int32_t tqExpandTask(STQ* pTq, SStreamTask* pTask, int64_t ver) { } int32_t numOfVgroups = (int32_t)taosArrayGetSize(pTask->pUpstreamEpInfoList); - SReadHandle handle = {.vnode = NULL, .numOfVgroups = numOfVgroups, .pStateBackend = pTask->pState, .fillHistory = pTask->info.fillHistory}; + SReadHandle handle = {.vnode = NULL, + .numOfVgroups = numOfVgroups, + .pStateBackend = pTask->pState, + .fillHistory = pTask->info.fillHistory, + .winRange = pTask->dataRange.window}; initStorageAPI(&handle.api); pTask->exec.pExecutor = qCreateStreamExecTaskInfo(pTask->exec.qmsg, &handle, vgId); diff --git a/source/dnode/vnode/src/tq/tqRead.c b/source/dnode/vnode/src/tq/tqRead.c index e5622647118017b573d8ced380dfdfa7ea47b6dd..ef4ab9242facc14d01573d3f2cabd26c6a0ebac0 100644 --- a/source/dnode/vnode/src/tq/tqRead.c +++ b/source/dnode/vnode/src/tq/tqRead.c @@ -388,7 +388,7 @@ bool tqNextBlockInWal(STqReader* pReader, const char* id) { int32_t numOfBlocks = taosArrayGetSize(pReader->submit.aSubmitTbData); while (pReader->nextBlk < numOfBlocks) { - tqDebug("tq reader next data block %d/%d, len:%d %" PRId64 " %d", pReader->nextBlk, + tqTrace("tq reader next data block %d/%d, len:%d %" PRId64 " %d", pReader->nextBlk, numOfBlocks, pReader->msg.msgLen, pReader->msg.ver, pReader->nextBlk); SSubmitTbData* pSubmitTbData = taosArrayGet(pReader->submit.aSubmitTbData, pReader->nextBlk); @@ -403,7 +403,7 @@ bool tqNextBlockInWal(STqReader* pReader, const char* id) { void* ret = taosHashGet(pReader->tbIdHash, &pSubmitTbData->uid, sizeof(int64_t)); if (ret != NULL) { - tqDebug("tq reader return submit block, uid:%" PRId64 ", ver:%" PRId64, pSubmitTbData->uid, pReader->msg.ver); + tqTrace("tq reader return submit block, uid:%" PRId64 ", ver:%" PRId64, pSubmitTbData->uid, pReader->msg.ver); SSDataBlock* pRes = NULL; int32_t code = tqRetrieveDataBlock(pReader, &pRes, NULL); @@ -412,11 +412,11 @@ bool tqNextBlockInWal(STqReader* pReader, const char* id) { } } else { pReader->nextBlk += 1; - tqDebug("tq reader discard submit block, uid:%" PRId64 ", continue", pSubmitTbData->uid); + tqTrace("tq reader discard submit block, uid:%" PRId64 ", continue", pSubmitTbData->uid); } } - qDebug("stream scan return empty, all %d submit blocks consumed, %s", numOfBlocks, id); + qTrace("stream scan return empty, all %d submit blocks consumed, %s", numOfBlocks, id); tDestroySubmitReq(&pReader->submit, TSDB_MSG_FLG_DECODE); pReader->msg.msgStr = NULL; @@ -604,7 +604,7 @@ static int32_t doSetVal(SColumnInfoData* pColumnInfoData, int32_t rowIndex, SCol } int32_t tqRetrieveDataBlock(STqReader* pReader, SSDataBlock** pRes, const char* id) { - tqDebug("tq reader retrieve data block %p, index:%d", pReader->msg.msgStr, pReader->nextBlk); + tqTrace("tq reader retrieve data block %p, index:%d", pReader->msg.msgStr, pReader->nextBlk); SSubmitTbData* pSubmitTbData = taosArrayGet(pReader->submit.aSubmitTbData, pReader->nextBlk++); SSDataBlock* pBlock = pReader->pResBlock; diff --git a/source/dnode/vnode/src/tq/tqSink.c b/source/dnode/vnode/src/tq/tqSink.c index 964d8b105bbbdf026e01f4e015f9dd514f628e71..b22650d2498c17677607a99fce0dd4cba312d230 100644 --- a/source/dnode/vnode/src/tq/tqSink.c +++ b/source/dnode/vnode/src/tq/tqSink.c @@ -335,6 +335,7 @@ void tqSinkToTablePipeline(SStreamTask* pTask, void* vnode, int64_t ver, void* d tagArray = taosArrayInit(1, sizeof(STagVal)); if (!tagArray) { tdDestroySVCreateTbReq(pCreateTbReq); + taosMemoryFreeClear(pCreateTbReq); goto _end; } STagVal tagVal = { @@ -350,6 +351,7 @@ void tqSinkToTablePipeline(SStreamTask* pTask, void* vnode, int64_t ver, void* d tagArray = taosArrayDestroy(tagArray); if (pTag == NULL) { tdDestroySVCreateTbReq(pCreateTbReq); + taosMemoryFreeClear(pCreateTbReq); terrno = TSDB_CODE_OUT_OF_MEMORY; goto _end; } diff --git a/source/dnode/vnode/src/vnd/vnodeSvr.c b/source/dnode/vnode/src/vnd/vnodeSvr.c index c27aab5b63bfaca64c9c7bb1c5403fa4a60c30a0..40bca578278328188b5a009bbf568988956c5fcf 100644 --- a/source/dnode/vnode/src/vnd/vnodeSvr.c +++ b/source/dnode/vnode/src/vnd/vnodeSvr.c @@ -234,8 +234,10 @@ static int32_t vnodePreProcessSubmitTbData(SVnode *pVnode, SDecoder *pCoder, int } } - *(int64_t *)(pCoder->data + pCoder->pos) = ctimeMs; - pCoder->pos += sizeof(int64_t); + if (!tDecodeIsEnd(pCoder)) { + *(int64_t *)(pCoder->data + pCoder->pos) = ctimeMs; + pCoder->pos += sizeof(int64_t); + } tEndDecode(pCoder); diff --git a/source/libs/executor/inc/executorInt.h b/source/libs/executor/inc/executorInt.h index 522f648ccc2ae83fd8732f6f211a962a2d50fc35..0ba9aae1336069d24959d73b9c885bae10dce580 100644 --- a/source/libs/executor/inc/executorInt.h +++ b/source/libs/executor/inc/executorInt.h @@ -285,6 +285,8 @@ typedef struct SStreamAggSupporter { int16_t stateKeyType; SDiskbasedBuf* pResultBuf; SStateStore stateStore; + STimeWindow winRange; + SStorageAPI* pSessionAPI; } SStreamAggSupporter; typedef struct SWindowSupporter { diff --git a/source/libs/executor/src/executor.c b/source/libs/executor/src/executor.c index 5f86f195b6a257bbdb973b00b0b78c3c62acd805..d67088ebe151b6402562d01eb017bbb42b9e6fe4 100644 --- a/source/libs/executor/src/executor.c +++ b/source/libs/executor/src/executor.c @@ -1134,6 +1134,16 @@ int32_t qStreamPrepareScan(qTaskInfo_t tinfo, STqOffsetVal* pOffset, int8_t subT SOperatorInfo* pOperator = pTaskInfo->pRoot; const char* id = GET_TASKID(pTaskInfo); + if(subType == TOPIC_SUB_TYPE__COLUMN && pOffset->type == TMQ_OFFSET__LOG){ + pOperator = extractOperatorInTree(pOperator, QUERY_NODE_PHYSICAL_PLAN_STREAM_SCAN, id); + if (pOperator == NULL) { + return -1; + } + SStreamScanInfo* pInfo = pOperator->info; + SStoreTqReader* pReaderAPI = &pTaskInfo->storageAPI.tqReaderFn; + SWalReader* pWalReader = pReaderAPI->tqReaderGetWalReader(pInfo->tqReader); + walReaderVerifyOffset(pWalReader, pOffset); + } // if pOffset equal to current offset, means continue consume if (tOffsetEqual(pOffset, &pTaskInfo->streamInfo.currentOffset)) { return 0; diff --git a/source/libs/executor/src/scanoperator.c b/source/libs/executor/src/scanoperator.c index d56fa9de78ac5da0eb91c2540164262485bf42ec..bfe4ed05330f509d4ea660d826f71ca069982d28 100644 --- a/source/libs/executor/src/scanoperator.c +++ b/source/libs/executor/src/scanoperator.c @@ -2456,6 +2456,10 @@ SOperatorInfo* createStreamScanOperatorInfo(SReadHandle* pHandle, STableScanPhys if (pHandle->vnode) { SOperatorInfo* pTableScanOp = createTableScanOperatorInfo(pTableScanNode, pHandle, pTableListInfo, pTaskInfo); + if (pTableScanOp == NULL) { + qError("createTableScanOperatorInfo error, errorcode: %d", pTaskInfo->code); + goto _error; + } STableScanInfo* pTSInfo = (STableScanInfo*)pTableScanOp->info; if (pHandle->version > 0) { pTSInfo->base.cond.endVersion = pHandle->version; diff --git a/source/libs/executor/src/timewindowoperator.c b/source/libs/executor/src/timewindowoperator.c index 13d24aa531c5d32b73a8bf4663518bb5b592c946..c4111ded9259ce9238862b53c874433422ef7f33 100644 --- a/source/libs/executor/src/timewindowoperator.c +++ b/source/libs/executor/src/timewindowoperator.c @@ -2966,7 +2966,7 @@ void initDownStream(SOperatorInfo* downstream, SStreamAggSupporter* pAggSup, uin } int32_t initStreamAggSupporter(SStreamAggSupporter* pSup, SqlFunctionCtx* pCtx, int32_t numOfOutput, int64_t gap, - SStreamState* pState, int32_t keySize, int16_t keyType, SStateStore* pStore) { + SStreamState* pState, int32_t keySize, int16_t keyType, SStateStore* pStore, SReadHandle* pHandle, SStorageAPI* pApi) { pSup->resultRowSize = keySize + getResultRowSize(pCtx, numOfOutput); pSup->pScanBlock = createSpecialDataBlock(STREAM_CLEAR); pSup->gap = gap; @@ -3008,6 +3008,16 @@ int32_t initStreamAggSupporter(SStreamAggSupporter* pSup, SqlFunctionCtx* pCtx, pCtx[i].saveHandle.pBuf = pSup->pResultBuf; } + if (pHandle) { + pSup->winRange = pHandle->winRange; + // temporary + if (pSup->winRange.ekey <= 0) { + pSup->winRange.ekey = INT64_MAX; + } + } + + pSup->pSessionAPI = pApi; + return TSDB_CODE_SUCCESS; } @@ -3035,6 +3045,13 @@ void getCurSessionWindow(SStreamAggSupporter* pAggSup, TSKEY startTs, TSKEY endT bool isInvalidSessionWin(SResultWindowInfo* pWinInfo) { return pWinInfo->sessionWin.win.skey == 0; } +bool inWinRange(STimeWindow* range, STimeWindow* cur) { + if (cur->skey >= range->skey && cur->ekey <= range->ekey) { + return true; + } + return false; +} + void setSessionOutputBuf(SStreamAggSupporter* pAggSup, TSKEY startTs, TSKEY endTs, uint64_t groupId, SResultWindowInfo* pCurWin) { pCurWin->sessionWin.groupId = groupId; @@ -3043,6 +3060,12 @@ void setSessionOutputBuf(SStreamAggSupporter* pAggSup, TSKEY startTs, TSKEY endT int32_t size = pAggSup->resultRowSize; int32_t code = pAggSup->stateStore.streamStateSessionAddIfNotExist(pAggSup->pState, &pCurWin->sessionWin, pAggSup->gap, &pCurWin->pOutputBuf, &size); + if (code == TSDB_CODE_SUCCESS && !inWinRange(&pAggSup->winRange, &pCurWin->sessionWin.win)) { + code = TSDB_CODE_FAILED; + releaseOutputBuf(pAggSup->pState, NULL, (SResultRow*)pCurWin->pOutputBuf, &pAggSup->pSessionAPI->stateStore); + pCurWin->pOutputBuf = taosMemoryMalloc(size); + } + if (code == TSDB_CODE_SUCCESS) { pCurWin->isOutput = true; } else { @@ -3189,7 +3212,8 @@ static void compactSessionWindow(SOperatorInfo* pOperator, SResultWindowInfo* pC while (1) { SResultWindowInfo winInfo = {0}; SStreamStateCur* pCur = getNextSessionWinInfo(pAggSup, pStUpdated, pCurWin, &winInfo); - if (!IS_VALID_SESSION_WIN(winInfo) || !isInWindow(pCurWin, winInfo.sessionWin.win.skey, pAggSup->gap)) { + if (!IS_VALID_SESSION_WIN(winInfo) || !isInWindow(pCurWin, winInfo.sessionWin.win.skey, pAggSup->gap) || + !inWinRange(&pAggSup->winRange, &winInfo.sessionWin.win)) { taosMemoryFree(winInfo.pOutputBuf); pAPI->stateStore.streamStateFreeCur(pCur); break; @@ -3413,8 +3437,12 @@ static void rebuildSessionWindow(SOperatorInfo* pOperator, SArray* pWinArray, SS SResultWindowInfo childWin = {0}; childWin.sessionWin = *pWinKey; int32_t code = getSessionWinBuf(pChAggSup, pCur, &childWin); - if (code == TSDB_CODE_SUCCESS && pWinKey->win.skey <= childWin.sessionWin.win.skey && - childWin.sessionWin.win.ekey <= pWinKey->win.ekey) { + + if (code == TSDB_CODE_SUCCESS && !inWinRange(&pAggSup->winRange, &childWin.sessionWin.win)) { + continue; + } + + if (code == TSDB_CODE_SUCCESS && inWinRange(&pWinKey->win, &childWin.sessionWin.win)) { if (num == 0) { setSessionOutputBuf(pAggSup, pWinKey->win.skey, pWinKey->win.ekey, pWinKey->groupId, &parentWin); code = initSessionOutputBuf(&parentWin, &pResult, pSup->pCtx, numOfOutput, pSup->rowEntryInfoOffset); @@ -3678,9 +3706,16 @@ void streamSessionReleaseState(SOperatorInfo* pOperator) { } } +void resetWinRange(STimeWindow* winRange) { + winRange->skey = INT16_MIN; + winRange->skey = INT16_MAX; +} + void streamSessionReloadState(SOperatorInfo* pOperator) { SStreamSessionAggOperatorInfo* pInfo = pOperator->info; SStreamAggSupporter* pAggSup = &pInfo->streamAggSup; + resetWinRange(&pAggSup->winRange); + SResultWindowInfo winInfo = {0}; int32_t size = 0; void* pBuf = NULL; @@ -3734,7 +3769,7 @@ SOperatorInfo* createStreamSessionAggOperatorInfo(SOperatorInfo* downstream, SPh } code = initStreamAggSupporter(&pInfo->streamAggSup, pSup->pCtx, numOfCols, pSessionNode->gap, - pTaskInfo->streamInfo.pState, 0, 0, &pTaskInfo->storageAPI.stateStore); + pTaskInfo->streamInfo.pState, 0, 0, &pTaskInfo->storageAPI.stateStore, pHandle, &pTaskInfo->storageAPI); if (code != TSDB_CODE_SUCCESS) { goto _error; } @@ -4024,6 +4059,12 @@ void setStateOutputBuf(SStreamAggSupporter* pAggSup, TSKEY ts, uint64_t groupId, pCurWin->pStateKey->pData = (char*)pCurWin->pStateKey + sizeof(SStateKeys); pCurWin->pStateKey->isNull = false; + if (code == TSDB_CODE_SUCCESS && !inWinRange(&pAggSup->winRange, &pCurWin->winInfo.sessionWin.win)) { + code = TSDB_CODE_FAILED; + releaseOutputBuf(pAggSup->pState, NULL, (SResultRow*)pCurWin->winInfo.pOutputBuf, &pAggSup->pSessionAPI->stateStore); + pCurWin->winInfo.pOutputBuf = taosMemoryMalloc(size); + } + if (code == TSDB_CODE_SUCCESS) { pCurWin->winInfo.isOutput = true; } else if (pKeyData) { @@ -4292,6 +4333,8 @@ static void compactStateWindow(SOperatorInfo* pOperator, SResultWindowInfo* pCur void streamStateReloadState(SOperatorInfo* pOperator) { SStreamSessionAggOperatorInfo* pInfo = pOperator->info; SStreamAggSupporter* pAggSup = &pInfo->streamAggSup; + resetWinRange(&pAggSup->winRange); + SSessionKey seKey = {.win.skey = INT64_MIN, .win.ekey = INT64_MIN, .groupId = 0}; int32_t size = 0; void* pBuf = NULL; @@ -4361,7 +4404,7 @@ SOperatorInfo* createStreamStateAggOperatorInfo(SOperatorInfo* downstream, SPhys int32_t keySize = sizeof(SStateKeys) + pColNode->node.resType.bytes; int16_t type = pColNode->node.resType.type; code = initStreamAggSupporter(&pInfo->streamAggSup, pSup->pCtx, numOfCols, 0, pTaskInfo->streamInfo.pState, keySize, - type, &pTaskInfo->storageAPI.stateStore); + type, &pTaskInfo->storageAPI.stateStore, pHandle, &pTaskInfo->storageAPI); if (code != TSDB_CODE_SUCCESS) { goto _error; } diff --git a/source/libs/function/src/builtinsimpl.c b/source/libs/function/src/builtinsimpl.c index d447fe261d6f5a1b7464bfdc92c637b9b7fac370..21b36d69ec24fbf7c5bd6df80d0aa7088e5b052e 100644 --- a/source/libs/function/src/builtinsimpl.c +++ b/source/libs/function/src/builtinsimpl.c @@ -2415,6 +2415,10 @@ int32_t lastFunction(SqlFunctionCtx* pCtx) { } static int32_t firstLastTransferInfoImpl(SFirstLastRes* pInput, SFirstLastRes* pOutput, bool isFirst) { + if (!pInput->hasResult) { + return TSDB_CODE_FAILED; + } + if (pOutput->hasResult) { if (isFirst) { if (pInput->ts > pOutput->ts) { diff --git a/source/libs/function/src/thistogram.c b/source/libs/function/src/thistogram.c index e7d631f638da769fe0d9eabb03762bb983410a56..b56691f35d2933da298b74b13af0d2eee32ebb61 100644 --- a/source/libs/function/src/thistogram.c +++ b/source/libs/function/src/thistogram.c @@ -474,8 +474,8 @@ double* tHistogramUniform(SHistogramInfo* pHisto, double* ratio, int32_t num) { } ASSERTS(total <= numOfElem && total + pHisto->elems[j + 1].num > numOfElem, - "tHistogramUniform Error, total:%d, numOfElem:%d, elems[%d].num:%d", - total, numOfElem, j + 1, pHisto->elems[j + 1].num); + "tHistogramUniform Error, total:%ld, numOfElem:%ld, elems[%d].num:%ld", + total, (int64_t)numOfElem, j + 1, pHisto->elems[j + 1].num); double delta = numOfElem - total; if (fabs(delta) < FLT_EPSILON) { diff --git a/source/libs/function/src/tpercentile.c b/source/libs/function/src/tpercentile.c index 3ec802a7cebfc29f33b4c9e61b909908807ae501..8101b342a44e3132c969b3055839fbf7eb55c8b4 100644 --- a/source/libs/function/src/tpercentile.c +++ b/source/libs/function/src/tpercentile.c @@ -39,6 +39,7 @@ static SFilePage *loadDataFromFilePage(tMemBucket *pMemBucket, int32_t slotIdx) if (p != NULL) { pIdList = *(SArray **)p; } else { + taosMemoryFree(buffer); return NULL; } @@ -48,6 +49,7 @@ static SFilePage *loadDataFromFilePage(tMemBucket *pMemBucket, int32_t slotIdx) SFilePage *pg = getBufPage(pMemBucket->pBuffer, *pageId); if (pg == NULL) { + taosMemoryFree(buffer); return NULL; } diff --git a/source/libs/function/src/tudf.c b/source/libs/function/src/tudf.c index 31a7dfdbc530fb4e2e5742f725e23335ae18eb9f..5b9f44c8126b520715bf32144b99f9af17638174 100644 --- a/source/libs/function/src/tudf.c +++ b/source/libs/function/src/tudf.c @@ -255,6 +255,18 @@ int32_t udfStopUdfd() { return 0; } +int32_t udfGetUdfdPid(int32_t* pUdfdPid) { + SUdfdData *pData = &udfdGlobal; + if (pData->spawnErr) { + return pData->spawnErr; + } + uv_pid_t pid = uv_process_get_pid(&pData->process); + if (pUdfdPid) { + *pUdfdPid = (int32_t)pid; + } + return TSDB_CODE_SUCCESS; +} + //============================================================================================== /* Copyright (c) 2013, Ben Noordhuis * The QUEUE is copied from queue.h under libuv diff --git a/source/libs/function/src/udfd.c b/source/libs/function/src/udfd.c index 3b827a2f99a446b11236f53acda1fb4da0592e88..93259924d5913cf43eede16184f5a82fb87a6f51 100644 --- a/source/libs/function/src/udfd.c +++ b/source/libs/function/src/udfd.c @@ -965,40 +965,6 @@ int32_t udfdFillUdfInfoFromMNode(void *clientRpc, char *udfName, SUdf *udf) { return code; } -int32_t udfdConnectToMnode() { - SConnectReq connReq = {0}; - connReq.connType = CONN_TYPE__UDFD; - tstrncpy(connReq.app, "udfd", sizeof(connReq.app)); - tstrncpy(connReq.user, TSDB_DEFAULT_USER, sizeof(connReq.user)); - char pass[TSDB_PASSWORD_LEN + 1] = {0}; - taosEncryptPass_c((uint8_t *)(TSDB_DEFAULT_PASS), strlen(TSDB_DEFAULT_PASS), pass); - tstrncpy(connReq.passwd, pass, sizeof(connReq.passwd)); - connReq.pid = taosGetPId(); - connReq.startTime = taosGetTimestampMs(); - strcpy(connReq.sVer, version); - - int32_t contLen = tSerializeSConnectReq(NULL, 0, &connReq); - void *pReq = rpcMallocCont(contLen); - tSerializeSConnectReq(pReq, contLen, &connReq); - - SUdfdRpcSendRecvInfo *msgInfo = taosMemoryCalloc(1, sizeof(SUdfdRpcSendRecvInfo)); - msgInfo->rpcType = UDFD_RPC_MNODE_CONNECT; - uv_sem_init(&msgInfo->resultSem, 0); - - SRpcMsg rpcMsg = {0}; - rpcMsg.msgType = TDMT_MND_CONNECT; - rpcMsg.pCont = pReq; - rpcMsg.contLen = contLen; - rpcMsg.info.ahandle = msgInfo; - rpcSendRequest(global.clientRpc, &global.mgmtEp.epSet, &rpcMsg, NULL); - - uv_sem_wait(&msgInfo->resultSem); - int32_t code = msgInfo->code; - uv_sem_destroy(&msgInfo->resultSem); - taosMemoryFree(msgInfo); - return code; -} - static bool udfdRpcRfp(int32_t code, tmsg_t msgType) { if (code == TSDB_CODE_RPC_NETWORK_UNAVAIL || code == TSDB_CODE_RPC_BROKEN_LINK || code == TSDB_CODE_SYN_NOT_LEADER || code == TSDB_CODE_RPC_SOMENODE_NOT_CONNECTED || code == TSDB_CODE_SYN_RESTORING || @@ -1378,23 +1344,6 @@ static int32_t udfdRun() { return 0; } -void udfdConnectMnodeThreadFunc(void *args) { - int32_t retryMnodeTimes = 0; - int32_t code = 0; - while (retryMnodeTimes++ <= TSDB_MAX_REPLICA) { - uv_sleep(100 * (1 << retryMnodeTimes)); - code = udfdConnectToMnode(); - if (code == 0) { - break; - } - fnError("udfd can not connect to mnode, code: %s. retry", tstrerror(code)); - } - - if (code != 0) { - fnError("udfd can not connect to mnode"); - } -} - int32_t udfdInitResidentFuncs() { if (strlen(tsUdfdResFuncs) == 0) { return TSDB_CODE_SUCCESS; @@ -1497,9 +1446,6 @@ int main(int argc, char *argv[]) { udfdInitResidentFuncs(); - uv_thread_t mnodeConnectThread; - uv_thread_create(&mnodeConnectThread, udfdConnectMnodeThreadFunc, NULL); - udfdRun(); removeListeningPipe(); diff --git a/source/libs/geometry/src/geosWrapper.c b/source/libs/geometry/src/geosWrapper.c index dd83083ec9600fab9459e14e8079093bfd4a4578..993178e2b02f3ba75a875df39edb861f5f1f240a 100644 --- a/source/libs/geometry/src/geosWrapper.c +++ b/source/libs/geometry/src/geosWrapper.c @@ -173,6 +173,7 @@ int32_t initCtxAsText() { if (geosCtx->WKTWriter) { GEOSWKTWriter_setRoundingPrecision_r(geosCtx->handle, geosCtx->WKTWriter, 6); + GEOSWKTWriter_setTrim_r(geosCtx->handle, geosCtx->WKTWriter, 0); } else { return code; } diff --git a/source/libs/index/src/indexFilter.c b/source/libs/index/src/indexFilter.c index 2c12c840816266882ea7ccb5ab8a95b84be414ab..bfdcd2b030369033c9f350795107eab95fe8cedd 100644 --- a/source/libs/index/src/indexFilter.c +++ b/source/libs/index/src/indexFilter.c @@ -639,6 +639,10 @@ static int32_t sifDoIndex(SIFParam *left, SIFParam *right, int8_t operType, SIFP ret = indexJsonSearch(arg->ivtIdx, mtm, output->result); indexMultiTermQueryDestroy(mtm); } else { + if (left->colValType == TSDB_DATA_TYPE_GEOMETRY || right->colValType == TSDB_DATA_TYPE_GEOMETRY) { + return TSDB_CODE_QRY_GEO_NOT_SUPPORT_ERROR; + } + bool reverse = false, equal = false; FilterFunc filterFunc = sifGetFilterFunc(qtype, &reverse, &equal); diff --git a/source/libs/parser/src/parInsertSql.c b/source/libs/parser/src/parInsertSql.c index f9b4e54318b4c3c87895ad2b6a1ec6b03444d342..8d35674949801893500f63d1a6e7a9e94683f47a 100644 --- a/source/libs/parser/src/parInsertSql.c +++ b/source/libs/parser/src/parInsertSql.c @@ -331,6 +331,7 @@ static int32_t parseTagToken(const char** end, SToken* pToken, SSchema* pSchema, int64_t iv; uint64_t uv; char* endptr = NULL; + int32_t code = TSDB_CODE_SUCCESS; if (isNullValue(pSchema->type, pToken)) { if (TSDB_DATA_TYPE_TIMESTAMP == pSchema->type && PRIMARYKEY_TIMESTAMP_COL_ID == pSchema->colId) { @@ -467,8 +468,7 @@ static int32_t parseTagToken(const char** end, SToken* pToken, SSchema* pSchema, break; } - case TSDB_DATA_TYPE_BINARY: - case TSDB_DATA_TYPE_GEOMETRY: { + case TSDB_DATA_TYPE_BINARY: { // Too long values will raise the invalid sql error message if (pToken->n + VARSTR_HEADER_SIZE > pSchema->bytes) { return generateSyntaxErrMsg(pMsgBuf, TSDB_CODE_PAR_VALUE_TOO_LONG, pSchema->name); @@ -478,6 +478,30 @@ static int32_t parseTagToken(const char** end, SToken* pToken, SSchema* pSchema, break; } + case TSDB_DATA_TYPE_GEOMETRY: { + unsigned char* output = NULL; + size_t size = 0; + + code = parseGeometry(pToken, &output, &size); + if (code != TSDB_CODE_SUCCESS) { + code = buildSyntaxErrMsg(pMsgBuf, getThreadLocalGeosCtx()->errMsg, pToken->z); + } else if (size + VARSTR_HEADER_SIZE > pSchema->bytes) { + // Too long values will raise the invalid sql error message + code = generateSyntaxErrMsg(pMsgBuf, TSDB_CODE_PAR_VALUE_TOO_LONG, pSchema->name); + } else { + val->pData = taosMemoryMalloc(size); + if (NULL == val->pData) { + code = TSDB_CODE_OUT_OF_MEMORY; + } else { + memcpy(val->pData, output, size); + val->nData = size; + } + } + + geosFreeBuffer(output); + break; + } + case TSDB_DATA_TYPE_NCHAR: { int32_t output = 0; void* p = taosMemoryCalloc(1, pSchema->bytes - VARSTR_HEADER_SIZE); @@ -508,7 +532,7 @@ static int32_t parseTagToken(const char** end, SToken* pToken, SSchema* pSchema, } } - return TSDB_CODE_SUCCESS; + return code; } // input pStmt->pSql: [(tag1_name, ...)] TAGS (tag1_value, ...) ... @@ -1382,7 +1406,7 @@ static int32_t parseValueTokenImpl(SInsertParseContext* pCxt, const char** pSql, code = buildSyntaxErrMsg(&pCxt->msg, getThreadLocalGeosCtx()->errMsg, pToken->z); } // Too long values will raise the invalid sql error message - else if (size > pSchema->bytes) { + else if (size + VARSTR_HEADER_SIZE > pSchema->bytes) { code = generateSyntaxErrMsg(&pCxt->msg, TSDB_CODE_PAR_VALUE_TOO_LONG, pSchema->name); } else { diff --git a/source/libs/scalar/CMakeLists.txt b/source/libs/scalar/CMakeLists.txt index 30c68cb512b47b725caadb454fe3bff008520938..1fe0f9a18db695cf1fc4ad6b36420cdeca0dccf3 100644 --- a/source/libs/scalar/CMakeLists.txt +++ b/source/libs/scalar/CMakeLists.txt @@ -8,13 +8,14 @@ target_include_directories( ) target_link_libraries(scalar - PRIVATE os - PRIVATE util + PRIVATE os + PRIVATE util PRIVATE common PRIVATE nodes PRIVATE function PRIVATE qcom PRIVATE parser + PRIVATE geometry ) if(${BUILD_TEST}) diff --git a/source/libs/scalar/inc/filterInt.h b/source/libs/scalar/inc/filterInt.h index 1ca8ac1d8c0e9c86e02cde3f317bf7de5af510a8..5fb7b0e90c30521c3af10f596c87db50448eb3aa 100644 --- a/source/libs/scalar/inc/filterInt.h +++ b/source/libs/scalar/inc/filterInt.h @@ -271,8 +271,9 @@ struct SFilterInfo { SFilterPCtx pctx; }; -#define FILTER_NO_MERGE_DATA_TYPE(t) \ - ((t) == TSDB_DATA_TYPE_BINARY || (t) == TSDB_DATA_TYPE_NCHAR || (t) == TSDB_DATA_TYPE_JSON) +#define FILTER_NO_MERGE_DATA_TYPE(t) \ + ((t) == TSDB_DATA_TYPE_BINARY || (t) == TSDB_DATA_TYPE_NCHAR || (t) == TSDB_DATA_TYPE_JSON || \ + (t) == TSDB_DATA_TYPE_GEOMETRY) #define FILTER_NO_MERGE_OPTR(o) ((o) == OP_TYPE_IS_NULL || (o) == OP_TYPE_IS_NOT_NULL || (o) == FILTER_DUMMY_EMPTY_OPTR) #define MR_EMPTY_RES(ctx) (ctx->rs == NULL) diff --git a/source/libs/scalar/src/filter.c b/source/libs/scalar/src/filter.c index bbefcc6b3ae96157ea138a68aef0453a1caf7489..b3afbb53c1daa0314ab07e73a16a2bb67a5e24d3 100644 --- a/source/libs/scalar/src/filter.c +++ b/source/libs/scalar/src/filter.c @@ -133,7 +133,7 @@ __compar_fn_t gDataCompare[] = { setChkInBytes2, setChkInBytes4, setChkInBytes8, comparestrRegexMatch, comparestrRegexNMatch, setChkNotInBytes1, setChkNotInBytes2, setChkNotInBytes4, setChkNotInBytes8, compareChkNotInString, comparestrPatternNMatch, comparewcsPatternNMatch, - comparewcsRegexMatch, comparewcsRegexNMatch, + comparewcsRegexMatch, comparewcsRegexNMatch, compareLenBinaryVal }; __compar_fn_t gInt8SignCompare[] = {compareInt8Val, compareInt8Int16, compareInt8Int32, @@ -257,8 +257,7 @@ int8_t filterGetCompFuncIdx(int32_t type, int32_t optr) { case TSDB_DATA_TYPE_DOUBLE: comparFn = 5; break; - case TSDB_DATA_TYPE_BINARY: - case TSDB_DATA_TYPE_GEOMETRY: { + case TSDB_DATA_TYPE_BINARY: { if (optr == OP_TYPE_MATCH) { comparFn = 19; } else if (optr == OP_TYPE_NMATCH) { @@ -297,6 +296,21 @@ int8_t filterGetCompFuncIdx(int32_t type, int32_t optr) { break; } + case TSDB_DATA_TYPE_GEOMETRY: { + if (optr == OP_TYPE_EQUAL || optr == OP_TYPE_NOT_EQUAL || optr == OP_TYPE_IS_NULL || + optr == OP_TYPE_IS_NOT_NULL) { + comparFn = 30; + } else if (optr == OP_TYPE_IN) { + comparFn = 8; + } else if (optr == OP_TYPE_NOT_IN) { + comparFn = 25; + } else { + terrno = TSDB_CODE_QRY_GEO_NOT_SUPPORT_ERROR; + return 0; + } + break; + } + case TSDB_DATA_TYPE_UTINYINT: comparFn = 11; break; @@ -1042,12 +1056,12 @@ static FORCE_INLINE int32_t filterAddColFieldFromField(SFilterInfo *info, SFilte int32_t filterAddFieldFromNode(SFilterInfo *info, SNode *node, SFilterFieldId *fid) { if (node == NULL) { fltDebug("empty node"); - FLT_ERR_RET(TSDB_CODE_APP_ERROR); + goto _return; } if (nodeType(node) != QUERY_NODE_COLUMN && nodeType(node) != QUERY_NODE_VALUE && nodeType(node) != QUERY_NODE_NODE_LIST) { - FLT_ERR_RET(TSDB_CODE_APP_ERROR); + goto _return; } int32_t type; @@ -1063,6 +1077,7 @@ int32_t filterAddFieldFromNode(SFilterInfo *info, SNode *node, SFilterFieldId *f filterAddField(info, v, NULL, type, fid, 0, true, NULL); +_return: return TSDB_CODE_SUCCESS; } @@ -1948,33 +1963,15 @@ int32_t fltInitValFieldData(SFilterInfo *info) { } SDataType *dType = &var->node.resType; - size_t bytes = 0; - - if (type == TSDB_DATA_TYPE_BINARY) { - size_t len = (dType->type == TSDB_DATA_TYPE_BINARY || dType->type == TSDB_DATA_TYPE_NCHAR) ? dType->bytes - : MAX_NUM_STR_SIZE; - bytes = len + 1 + VARSTR_HEADER_SIZE; - - fi->data = taosMemoryCalloc(1, bytes); - } else if (type == TSDB_DATA_TYPE_NCHAR) { - size_t len = (dType->type == TSDB_DATA_TYPE_BINARY || dType->type == TSDB_DATA_TYPE_NCHAR) ? dType->bytes - : MAX_NUM_STR_SIZE; - bytes = (len + 1) * TSDB_NCHAR_SIZE + VARSTR_HEADER_SIZE; - - fi->data = taosMemoryCalloc(1, bytes); - } else { - fi->data = taosMemoryCalloc(1, sizeof(int64_t)); - } - if (dType->type == type) { + size_t bufBytes = TMAX(dType->bytes, sizeof(int64_t)); + fi->data = taosMemoryCalloc(1, bufBytes); assignVal(fi->data, nodesGetValueFromNode(var), dType->bytes, type); } else { SScalarParam out = {.columnData = taosMemoryCalloc(1, sizeof(SColumnInfoData))}; out.columnData->info.type = type; out.columnData->info.precision = precision; - if (IS_VAR_DATA_TYPE(type)) { - out.columnData->info.bytes = bytes; - } else { + if (!IS_VAR_DATA_TYPE(type)) { out.columnData->info.bytes = tDataTypes[type].bytes; } @@ -1985,7 +1982,13 @@ int32_t fltInitValFieldData(SFilterInfo *info) { return TSDB_CODE_TSC_INVALID_OPERATION; } - memcpy(fi->data, out.columnData->pData, out.columnData->info.bytes); + size_t bufBytes = IS_VAR_DATA_TYPE(type) ? varDataTLen(out.columnData->pData) + : TMAX(out.columnData->info.bytes, sizeof(int64_t)); + fi->data = taosMemoryCalloc(1, bufBytes); + + size_t valBytes = IS_VAR_DATA_TYPE(type) ? varDataTLen(out.columnData->pData) : out.columnData->info.bytes; + memcpy(fi->data, out.columnData->pData, valBytes); + colDataDestroy(out.columnData); taosMemoryFree(out.columnData); } @@ -2751,6 +2754,7 @@ int32_t filterPostProcessRange(SFilterInfo *info) { } int32_t filterGenerateComInfo(SFilterInfo *info) { + terrno = 0; info->cunits = taosMemoryMalloc(info->unitNum * sizeof(*info->cunits)); info->blkUnitRes = taosMemoryMalloc(sizeof(*info->blkUnitRes) * info->unitNum); info->blkUnits = taosMemoryMalloc(sizeof(*info->blkUnits) * (info->unitNum + 1) * info->groupNum); @@ -2758,7 +2762,7 @@ int32_t filterGenerateComInfo(SFilterInfo *info) { for (uint32_t i = 0; i < info->unitNum; ++i) { SFilterUnit *unit = &info->units[i]; - info->cunits[i].func = filterGetCompFuncIdx(FILTER_UNIT_DATA_TYPE(unit), unit->compare.optr); + info->cunits[i].func = filterGetCompFuncIdx(FILTER_UNIT_DATA_TYPE(unit), unit->compare.optr); // set terrno if err info->cunits[i].rfunc = filterGetRangeCompFuncFromOptrs(unit->compare.optr, unit->compare.optr2); info->cunits[i].optr = FILTER_UNIT_OPTR(unit); info->cunits[i].colData = NULL; @@ -2779,7 +2783,7 @@ int32_t filterGenerateComInfo(SFilterInfo *info) { info->cunits[i].dataType = FILTER_UNIT_DATA_TYPE(unit); } - return TSDB_CODE_SUCCESS; + return terrno; } int32_t filterUpdateComUnits(SFilterInfo *info) { @@ -3336,6 +3340,7 @@ int32_t filterSetExecFunc(SFilterInfo *info) { } int32_t filterPreprocess(SFilterInfo *info) { + int32_t code = TSDB_CODE_SUCCESS; SFilterGroupCtx **gRes = taosMemoryCalloc(info->groupNum, sizeof(SFilterGroupCtx *)); int32_t gResNum = 0; @@ -3361,7 +3366,7 @@ int32_t filterPreprocess(SFilterInfo *info) { filterRewrite(info, gRes, gResNum); - filterGenerateComInfo(info); + FLT_ERR_JRET(filterGenerateComInfo(info)); _return: @@ -3373,7 +3378,7 @@ _return: taosMemoryFreeClear(gRes); - return TSDB_CODE_SUCCESS; + return code; } int32_t fltSetColFieldDataImpl(SFilterInfo *info, void *param, filer_get_col_from_id fp, bool fromColId) { @@ -3741,10 +3746,10 @@ int32_t fltSclBuildRangeFromBlockSma(SFltSclColumnRange *colRange, SColumnDataAg taosArrayPush(points, &startPt); taosArrayPush(points, &endPt); } - SFltSclDatum min; + SFltSclDatum min = {0}; fltSclBuildDatumFromBlockSmaValue(&min, colRange->colNode->node.resType.type, pAgg->min); SFltSclPoint minPt = {.excl = false, .start = true, .val = min}; - SFltSclDatum max; + SFltSclDatum max = {0}; fltSclBuildDatumFromBlockSmaValue(&max, colRange->colNode->node.resType.type, pAgg->max); SFltSclPoint maxPt = {.excl = false, .start = false, .val = max}; taosArrayPush(points, &minPt); @@ -4290,30 +4295,27 @@ EDealRes fltReviseRewriter(SNode **pNode, void *pContext) { return DEAL_RES_ERROR; } + SColumnNode *refNode = (SColumnNode *)node->pLeft; + SExprNode *exprNode = NULL; if (OP_TYPE_IN != node->opType) { - SColumnNode *refNode = (SColumnNode *)node->pLeft; SValueNode *valueNode = (SValueNode *)node->pRight; if (FILTER_GET_FLAG(stat->info->options, FLT_OPTION_TIMESTAMP) && TSDB_DATA_TYPE_UBIGINT == valueNode->node.resType.type && valueNode->datum.u <= INT64_MAX) { valueNode->node.resType.type = TSDB_DATA_TYPE_BIGINT; } - int32_t type = vectorGetConvertType(refNode->node.resType.type, valueNode->node.resType.type); - if (0 != type && type != refNode->node.resType.type) { - stat->scalarMode = true; - return DEAL_RES_CONTINUE; - } + exprNode = &valueNode->node; } else { - SColumnNode *refNode = (SColumnNode *)node->pLeft; SNodeListNode *listNode = (SNodeListNode *)node->pRight; if (LIST_LENGTH(listNode->pNodeList) > 10) { stat->scalarMode = true; return DEAL_RES_CONTINUE; } - int32_t type = vectorGetConvertType(refNode->node.resType.type, listNode->node.resType.type); - if (0 != type && type != refNode->node.resType.type) { - stat->scalarMode = true; - return DEAL_RES_CONTINUE; - } + exprNode = &listNode->node; + } + int32_t type = vectorGetConvertType(refNode->node.resType.type, exprNode->resType.type); + if (0 != type && type != refNode->node.resType.type) { + stat->scalarMode = true; + return DEAL_RES_CONTINUE; } } @@ -4664,7 +4666,7 @@ bool filterExecute(SFilterInfo *info, SSDataBlock *pSrc, SColumnInfoData **p, SC code = scalarCalculate(info->sclCtx.node, pList, &output); taosArrayDestroy(pList); - FLT_ERR_RET(code); + FLT_ERR_RET(code); // TODO: current errcode returns as true *p = output.columnData; diff --git a/source/libs/scalar/src/sclvector.c b/source/libs/scalar/src/sclvector.c index b41eba293bdc64d646c8c39c1112eb19bb27ff49..35256d0c96c5e7e2a51df3f575c3c8bf57341fab 100644 --- a/source/libs/scalar/src/sclvector.c +++ b/source/libs/scalar/src/sclvector.c @@ -26,6 +26,7 @@ #include "tdataformat.h" #include "ttime.h" #include "ttypes.h" +#include "geosWrapper.h" #define LEFT_COL ((pLeftCol->info.type == TSDB_DATA_TYPE_JSON ? (void *)pLeftCol : pLeftCol->pData)) #define RIGHT_COL ((pRightCol->info.type == TSDB_DATA_TYPE_JSON ? (void *)pRightCol : pRightCol->pData)) @@ -378,6 +379,31 @@ static FORCE_INLINE void ncharToVar(char *buf, SScalarParam *pOut, int32_t rowIn taosMemoryFree(t); } +// todo remove this malloc +static FORCE_INLINE void varToGeometry(char *buf, SScalarParam *pOut, int32_t rowIndex, int32_t *overflow) { + //[ToDo] support to parse WKB as well as WKT + unsigned char *t = NULL; + size_t len = 0; + + if (initCtxGeomFromText()) { + sclError("failed to init geometry ctx"); + return; + } + if (doGeomFromText(buf, &t, &len)) { + sclDebug("failed to convert text to geometry"); + return; + } + + char *output = taosMemoryCalloc(1, len + VARSTR_HEADER_SIZE); + memcpy(output + VARSTR_HEADER_SIZE, t, len); + varDataSetLen(output, len); + + colDataSetVal(pOut->columnData, rowIndex, output, false); + + taosMemoryFree(output); + geosFreeBuffer(t); +} + // TODO opt performance, tmp is not needed. int32_t vectorConvertFromVarData(SSclVectorConvCtx *pCtx, int32_t *overflow) { bool vton = false; @@ -401,6 +427,8 @@ int32_t vectorConvertFromVarData(SSclVectorConvCtx *pCtx, int32_t *overflow) { vton = true; } else if (TSDB_DATA_TYPE_TIMESTAMP == pCtx->outType) { func = varToTimestamp; + } else if (TSDB_DATA_TYPE_GEOMETRY == pCtx->outType) { + func = varToGeometry; } else { sclError("invalid convert outType:%d, inType:%d", pCtx->outType, pCtx->inType); return TSDB_CODE_APP_ERROR; @@ -881,7 +909,7 @@ int32_t vectorConvertSingleColImpl(const SScalarParam *pIn, SScalarParam *pOut, } int8_t gConvertTypes[TSDB_DATA_TYPE_MAX][TSDB_DATA_TYPE_MAX] = { - /* NULL BOOL TINY SMAL INT BIG FLOA DOUB VARC TIME NCHA UTIN USMA UINT UBIG JSON GEOM VARB DECI BLOB MEDB*/ + /* NULL BOOL TINY SMAL INT BIG FLOA DOUB VARC TIME NCHA UTIN USMA UINT UBIG JSON VARB DECI BLOB MEDB GEOM*/ /*NULL*/ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /*BOOL*/ 0, 0, 2, 3, 4, 5, 6, 7, 5, 9, 7, 11, 12, 13, 14, 0, 7, 0, 0, 0, 0, /*TINY*/ 0, 0, 0, 3, 4, 5, 6, 7, 5, 9, 7, 3, 4, 5, 7, 0, 7, 0, 0, 0, 0, @@ -890,7 +918,7 @@ int8_t gConvertTypes[TSDB_DATA_TYPE_MAX][TSDB_DATA_TYPE_MAX] = { /*BIGI*/ 0, 0, 0, 0, 0, 0, 6, 7, 5, 9, 7, 5, 5, 5, 7, 0, 7, 0, 0, 0, 0, /*FLOA*/ 0, 0, 0, 0, 0, 0, 0, 7, 7, 6, 7, 6, 6, 6, 6, 0, 7, 0, 0, 0, 0, /*DOUB*/ 0, 0, 0, 0, 0, 0, 0, 0, 7, 7, 7, 7, 7, 7, 7, 0, 7, 0, 0, 0, 0, - /*VARC*/ 0, 0, 0, 0, 0, 0, 0, 0, 0, 9, 8, 7, 7, 7, 7, 0, 0, 0, 0, 0, 0, + /*VARC*/ 0, 0, 0, 0, 0, 0, 0, 0, 0, 9, 8, 7, 7, 7, 7, 0, 0, 0, 0, 0, 20, /*TIME*/ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 9, 9, 9, 9, 7, 0, 7, 0, 0, 0, 0, /*NCHA*/ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 7, 7, 7, 7, 0, 0, 0, 0, 0, 0, /*UTIN*/ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 12, 13, 14, 0, 7, 0, 0, 0, 0, diff --git a/source/libs/scheduler/inc/schInt.h b/source/libs/scheduler/inc/schInt.h index 7840fe2017a6f384e70f1553105c7293a4abb2d7..aecf3d5d916681687986ec1771d7f02e79304a8b 100644 --- a/source/libs/scheduler/inc/schInt.h +++ b/source/libs/scheduler/inc/schInt.h @@ -57,7 +57,7 @@ typedef enum { #define SCHEDULE_DEFAULT_POLICY SCH_LOAD_SEQ #define SCHEDULE_DEFAULT_MAX_NODE_NUM 20 -#define SCH_DEFAULT_TASK_TIMEOUT_USEC 60000000 +#define SCH_DEFAULT_TASK_TIMEOUT_USEC 5000000 #define SCH_MAX_TASK_TIMEOUT_USEC 300000000 #define SCH_DEFAULT_MAX_RETRY_NUM 6 #define SCH_MIN_AYSNC_EXEC_NUM 3 @@ -239,7 +239,7 @@ typedef struct SSchTask { int32_t lastMsgType; // last sent msg type int64_t timeoutUsec; // task timeout useconds before reschedule SQueryNodeAddr succeedAddr; // task executed success node address - int8_t candidateIdx; // current try condidation index + int32_t candidateIdx; // current try condidation index SArray *candidateAddrs; // condidate node addresses, element is SQueryNodeAddr SHashObj *execNodes; // all tried node for current task, element is SSchNodeInfo SSchTaskProfile profile; // task execution profile diff --git a/source/libs/scheduler/src/schTask.c b/source/libs/scheduler/src/schTask.c index 78e28bce4958f91a6802ea93798886d02c34af23..d4ded2dd8b571705c7449ae0c9a9690101513d63 100644 --- a/source/libs/scheduler/src/schTask.c +++ b/source/libs/scheduler/src/schTask.c @@ -745,7 +745,6 @@ int32_t schSetTaskCandidateAddrs(SSchJob *pJob, SSchTask *pTask) { return TSDB_CODE_SUCCESS; } - pTask->candidateIdx = 0; pTask->candidateAddrs = taosArrayInit(SCHEDULE_DEFAULT_MAX_NODE_NUM, sizeof(SQueryNodeAddr)); if (NULL == pTask->candidateAddrs) { SCH_TASK_ELOG("taosArrayInit %d condidate addrs failed", SCHEDULE_DEFAULT_MAX_NODE_NUM); @@ -770,6 +769,8 @@ int32_t schSetTaskCandidateAddrs(SSchJob *pJob, SSchTask *pTask) { SCH_ERR_RET(schSetAddrsFromNodeList(pJob, pTask)); + pTask->candidateIdx = taosRand() % taosArrayGetSize(pTask->candidateAddrs); + /* for (int32_t i = 0; i < job->dataSrcEps.numOfEps && addNum < SCH_MAX_CANDIDATE_EP_NUM; ++i) { strncpy(epSet->fqdn[epSet->numOfEps], job->dataSrcEps.fqdn[i], sizeof(job->dataSrcEps.fqdn[i])); diff --git a/source/libs/stream/src/tstreamFileState.c b/source/libs/stream/src/tstreamFileState.c index 0799671bced6cb452a27927d7843900276ba62d1..dd857141c1288da621b8ed2b58af8373d28fbe31 100644 --- a/source/libs/stream/src/tstreamFileState.c +++ b/source/libs/stream/src/tstreamFileState.c @@ -376,7 +376,7 @@ int32_t flushSnapshot(SStreamFileState* pFileState, SStreamSnapshot* pSnapshot, ASSERT(pPos->pRowBuff && pFileState->rowSize > 0); if (streamStateGetBatchSize(batch) >= BATCH_LIMIT) { - code = streamStatePutBatch_rocksdb(pFileState->pFileStore, batch); + streamStatePutBatch_rocksdb(pFileState->pFileStore, batch); streamStateClearBatch(batch); } @@ -390,7 +390,7 @@ int32_t flushSnapshot(SStreamFileState* pFileState, SStreamSnapshot* pSnapshot, taosMemoryFree(buf); if (streamStateGetBatchSize(batch) > 0) { - code = streamStatePutBatch_rocksdb(pFileState->pFileStore, batch); + streamStatePutBatch_rocksdb(pFileState->pFileStore, batch); } streamStateClearBatch(batch); @@ -407,7 +407,7 @@ int32_t flushSnapshot(SStreamFileState* pFileState, SStreamSnapshot* pSnapshot, int32_t len = 0; sprintf(keyBuf, "%s:%" PRId64 "", taskKey, ((SStreamState*)pFileState->pFileStore)->checkPointId); streamFileStateEncode(&pFileState->flushMark, &valBuf, &len); - code = streamStatePutBatch(pFileState->pFileStore, "default", batch, keyBuf, valBuf, len, 0); + streamStatePutBatch(pFileState->pFileStore, "default", batch, keyBuf, valBuf, len, 0); taosMemoryFree(valBuf); } { @@ -511,7 +511,7 @@ int32_t recoverSnapshot(SStreamFileState* pFileState) { break; } memcpy(pNewPos->pRowBuff, pVal, pVLen); - code = tSimpleHashPut(pFileState->rowBuffMap, pNewPos->pKey, pFileState->rowSize, &pNewPos, POINTER_BYTES); + code = tSimpleHashPut(pFileState->rowBuffMap, pNewPos->pKey, pFileState->keyLen, &pNewPos, POINTER_BYTES); if (code != TSDB_CODE_SUCCESS) { destroyRowBuffPos(pNewPos); break; diff --git a/source/libs/wal/src/walMeta.c b/source/libs/wal/src/walMeta.c index a12f8051ba982ed627ed0767b76d344678748ca9..1e70ce4a1c4bacac2dc5fdb046fe8adead163770 100644 --- a/source/libs/wal/src/walMeta.c +++ b/source/libs/wal/src/walMeta.c @@ -47,9 +47,7 @@ static FORCE_INLINE int walBuildTmpMetaName(SWal* pWal, char* buf) { } static FORCE_INLINE int64_t walScanLogGetLastVer(SWal* pWal, int32_t fileIdx) { - int32_t sz = taosArrayGetSize(pWal->fileInfoSet); - terrno = TSDB_CODE_SUCCESS; - + int32_t sz = taosArrayGetSize(pWal->fileInfoSet); SWalFileInfo* pFileInfo = taosArrayGet(pWal->fileInfoSet, fileIdx); char fnameStr[WAL_FILE_LEN]; walBuildLogName(pWal, pFileInfo->firstVer, fnameStr); @@ -74,13 +72,12 @@ static FORCE_INLINE int64_t walScanLogGetLastVer(SWal* pWal, int32_t fileIdx) { int64_t capacity = 0; int64_t readSize = 0; char* buf = NULL; - bool firstTrial = pFileInfo->fileSize < fileSize; int64_t offset = TMIN(pFileInfo->fileSize, fileSize); - int64_t offsetForward = offset - stepSize + walCkHeadSz - 1; - int64_t offsetBackward = offset; int64_t retVer = -1; int64_t lastEntryBeginOffset = 0; int64_t lastEntryEndOffset = 0; + int64_t recordLen = 0; + bool forwardStage = false; // check recover size if (2 * tsWalFsyncDataSizeLimit + offset < end) { @@ -91,14 +88,8 @@ static FORCE_INLINE int64_t walScanLogGetLastVer(SWal* pWal, int32_t fileIdx) { // search for the valid last WAL entry, e.g. block by block while (1) { - offset = (firstTrial) ? TMIN(fileSize, offsetForward + stepSize - walCkHeadSz + 1) - : TMAX(0, offsetBackward - stepSize + walCkHeadSz - 1); + offset = (lastEntryEndOffset > 0) ? offset : TMAX(0, offset - stepSize + walCkHeadSz - 1); end = TMIN(offset + stepSize, fileSize); - if (firstTrial) { - offsetForward = offset; - } else { - offsetBackward = offset; - } readSize = end - offset; capacity = readSize + sizeof(magic); @@ -129,7 +120,16 @@ static FORCE_INLINE int64_t walScanLogGetLastVer(SWal* pWal, int32_t fileIdx) { int64_t pos = 0; SWalCkHead* logContent = NULL; - while ((candidate = tmemmem(haystack, readSize - (haystack - buf), (char*)&magic, sizeof(magic))) != NULL) { + while (true) { + forwardStage = (lastEntryEndOffset > 0 || offset == 0); + terrno = TSDB_CODE_SUCCESS; + if (forwardStage) { + candidate = (readSize - (haystack - buf)) > 0 ? haystack : NULL; + } else { + candidate = tmemmem(haystack, readSize - (haystack - buf), (char*)&magic, sizeof(magic)); + } + + if (candidate == NULL) break; pos = candidate - buf; // validate head @@ -137,13 +137,14 @@ static FORCE_INLINE int64_t walScanLogGetLastVer(SWal* pWal, int32_t fileIdx) { if (len < walCkHeadSz) { break; } + logContent = (SWalCkHead*)(buf + pos); if (walValidHeadCksum(logContent) != 0) { terrno = TSDB_CODE_WAL_CHKSUM_MISMATCH; wWarn("vgId:%d, failed to validate checksum of wal entry header. offset:%" PRId64 ", file:%s", pWal->cfg.vgId, offset + pos, fnameStr); haystack = buf + pos + 1; - if (firstTrial) { + if (forwardStage) { break; } else { continue; @@ -151,9 +152,9 @@ static FORCE_INLINE int64_t walScanLogGetLastVer(SWal* pWal, int32_t fileIdx) { } // validate body - int64_t size = walCkHeadSz + logContent->head.bodyLen; - if (len < size) { - int64_t extraSize = size - len; + recordLen = walCkHeadSz + logContent->head.bodyLen; + if (len < recordLen) { + int64_t extraSize = recordLen - len; if (capacity < readSize + extraSize + sizeof(magic)) { capacity += extraSize; void* ptr = taosMemoryRealloc(buf, capacity); @@ -184,7 +185,7 @@ static FORCE_INLINE int64_t walScanLogGetLastVer(SWal* pWal, int32_t fileIdx) { wWarn("vgId:%d, failed to validate checksum of wal entry body. offset:%" PRId64 ", file:%s", pWal->cfg.vgId, offset + pos, fnameStr); haystack = buf + pos + 1; - if (firstTrial) { + if (forwardStage) { break; } else { continue; @@ -194,21 +195,14 @@ static FORCE_INLINE int64_t walScanLogGetLastVer(SWal* pWal, int32_t fileIdx) { // found one retVer = logContent->head.version; lastEntryBeginOffset = offset + pos; - lastEntryEndOffset = offset + pos + sizeof(SWalCkHead) + logContent->head.bodyLen; + lastEntryEndOffset = offset + pos + recordLen; // try next - haystack = buf + pos + 1; + haystack = buf + pos + recordLen; } - if (end == fileSize) firstTrial = false; - if (firstTrial) { - if (terrno == TSDB_CODE_SUCCESS) { - continue; - } else { - firstTrial = false; - } - } - if (retVer >= 0 || offset == 0) break; + offset = (lastEntryEndOffset > 0) ? lastEntryEndOffset : offset; + if (forwardStage && (terrno != TSDB_CODE_SUCCESS || end == fileSize)) break; } if (retVer < 0) { diff --git a/source/util/src/tcompare.c b/source/util/src/tcompare.c index e32ff3da95ea96a2ad949b2a9434d3e80038eac9..843f9c56dced580f188e2efb01c2f0e20415fc2d 100644 --- a/source/util/src/tcompare.c +++ b/source/util/src/tcompare.c @@ -225,6 +225,23 @@ int32_t compareLenPrefixedWStrDesc(const void *pLeft, const void *pRight) { return compareLenPrefixedWStr(pRight, pLeft); } +int32_t compareLenBinaryVal(const void *pLeft, const void *pRight) { + int32_t len1 = varDataLen(pLeft); + int32_t len2 = varDataLen(pRight); + + int32_t minLen = TMIN(len1, len2); + int32_t ret = memcmp(varDataVal(pLeft), varDataVal(pRight), minLen); + if (ret == 0) { + if (len1 == len2) { + return 0; + } else { + return len1 > len2 ? 1 : -1; + } + } else { + return ret > 0 ? 1 : -1; + } +} + // string > number > bool > null // ref: https://dev.mysql.com/doc/refman/8.0/en/json.html#json-comparison int32_t compareJsonVal(const void *pLeft, const void *pRight) { diff --git a/source/util/src/terror.c b/source/util/src/terror.c index 0a53ece746f31fb295f818a411ee4f778256f423..d2b9edf753f4bbe4e1ed50e0a7bbaaa70b710cd3 100644 --- a/source/util/src/terror.c +++ b/source/util/src/terror.c @@ -405,6 +405,7 @@ TAOS_DEFINE_ERROR(TSDB_CODE_QRY_JSON_NOT_SUPPORT_ERROR, "Json not support in t TAOS_DEFINE_ERROR(TSDB_CODE_QRY_JSON_IN_GROUP_ERROR, "Json not support in group/partition by") TAOS_DEFINE_ERROR(TSDB_CODE_QRY_JOB_NOT_EXIST, "Job not exist") TAOS_DEFINE_ERROR(TSDB_CODE_QRY_QWORKER_QUIT, "Vnode/Qnode is quitting") +TAOS_DEFINE_ERROR(TSDB_CODE_QRY_GEO_NOT_SUPPORT_ERROR, "Geometry not support in this operator") // grant TAOS_DEFINE_ERROR(TSDB_CODE_GRANT_EXPIRED, "License expired") @@ -629,7 +630,9 @@ TAOS_DEFINE_ERROR(TSDB_CODE_INDEX_INVALID_FILE, "Index file is inval TAOS_DEFINE_ERROR(TSDB_CODE_TMQ_INVALID_MSG, "Invalid message") TAOS_DEFINE_ERROR(TSDB_CODE_TMQ_CONSUMER_MISMATCH, "Consumer mismatch") TAOS_DEFINE_ERROR(TSDB_CODE_TMQ_CONSUMER_CLOSED, "Consumer closed") -TAOS_DEFINE_ERROR(TSDB_CODE_TMQ_CONSUMER_ERROR, "Consumer error, to see log") +TAOS_DEFINE_ERROR(TSDB_CODE_TMQ_CONSUMER_ERROR, "Consumer error, to see log") +TAOS_DEFINE_ERROR(TSDB_CODE_TMQ_TOPIC_OUT_OF_RANGE, "Topic num out of range") +TAOS_DEFINE_ERROR(TSDB_CODE_TMQ_GROUP_OUT_OF_RANGE, "Group num out of range 100") // stream TAOS_DEFINE_ERROR(TSDB_CODE_STREAM_TASK_NOT_EXIST, "Stream task not exist") diff --git a/tests/parallel_test/cases.task b/tests/parallel_test/cases.task index 90a7f3fe423004828f219a78cc5b9cd475753092..21fed2e1f5ba73cda0a2375fb66878f2a336db0d 100644 --- a/tests/parallel_test/cases.task +++ b/tests/parallel_test/cases.task @@ -33,6 +33,7 @@ ,,y,system-test,./pytest.sh python3 ./test.py -f 7-tmq/subscribeStb3.py ,,y,system-test,./pytest.sh python3 ./test.py -f 7-tmq/subscribeDb0.py -N 3 -n 3 ,,y,system-test,./pytest.sh python3 ./test.py -f 7-tmq/ins_topics_test.py +,,y,system-test,./pytest.sh python3 ./test.py -f 7-tmq/tmqParamsTest.py ,,y,system-test,./pytest.sh python3 ./test.py -f 7-tmq/tmqClientConsLog.py @@ -128,6 +129,7 @@ ,,y,system-test,./pytest.sh python3 ./test.py -f 99-TDcase/TD-19201.py ,,y,system-test,./pytest.sh python3 ./test.py -f 99-TDcase/TD-21561.py ,,y,system-test,./pytest.sh python3 ./test.py -f 99-TDcase/TS-3404.py +,,y,system-test,./pytest.sh python3 ./test.py -f 99-TDcase/TS-3581.py ,,y,system-test,./pytest.sh python3 ./test.py -f 0-others/balance_vgroups_r1.py -N 6 ,,y,system-test,./pytest.sh python3 ./test.py -f 0-others/taosShell.py @@ -779,7 +781,7 @@ ,,y,script,./test.sh -f tsim/user/basic.sim ,,y,script,./test.sh -f tsim/user/password.sim ,,y,script,./test.sh -f tsim/user/privilege_db.sim -,,y,script,./test.sh -f tsim/user/privilege_sysinfo.sim +#,,y,script,./test.sh -f tsim/user/privilege_sysinfo.sim ,,y,script,./test.sh -f tsim/user/privilege_topic.sim ,,y,script,./test.sh -f tsim/user/privilege_table.sim ,,y,script,./test.sh -f tsim/db/alter_option.sim diff --git a/tests/script/tsim/query/udf.sim b/tests/script/tsim/query/udf.sim index e539f115317abcd2b64279ec476f8f3464a559d5..fbf9d50c2568bb571349ae1b5874fed7c217f0e1 100644 --- a/tests/script/tsim/query/udf.sim +++ b/tests/script/tsim/query/udf.sim @@ -8,6 +8,9 @@ system sh/deploy.sh -n dnode1 -i 1 system sh/cfg.sh -n dnode1 -c udf -v 1 system sh/exec.sh -n dnode1 -s start sql connect +sql alter user root pass 'taosdata2' +system sh/exec.sh -n dnode1 -s stop +system sh/exec.sh -n dnode1 -s start print ======== step1 udf system sh/compile_udf.sh diff --git a/tests/system-test/7-tmq/checkOffsetRowParams.py b/tests/system-test/7-tmq/checkOffsetRowParams.py index 8a24148064c8125513f07bab0745865a167126cd..f7e4c61c9c115b2e3048748d0fe3965e5e6dc1d2 100644 --- a/tests/system-test/7-tmq/checkOffsetRowParams.py +++ b/tests/system-test/7-tmq/checkOffsetRowParams.py @@ -245,7 +245,7 @@ class TDTestCase: tdSql.query("show consumers") tdSql.checkRows(1) - tdSql.checkData(0, 8, "tbname:1,commit:1,interval:2000,reset:earliest") + tdSql.checkData(0, 8, "tbname:1,commit:1,interval:2000ms,reset:earliest") time.sleep(2) tdLog.info("start insert data") diff --git a/tests/system-test/7-tmq/tmqParamsTest.py b/tests/system-test/7-tmq/tmqParamsTest.py new file mode 100644 index 0000000000000000000000000000000000000000..f48eaa84d4eb7ad7b97115015de077eb05da3479 --- /dev/null +++ b/tests/system-test/7-tmq/tmqParamsTest.py @@ -0,0 +1,178 @@ + +import sys +import time +import threading +from taos.tmq import Consumer +from util.log import * +from util.sql import * +from util.cases import * +from util.dnodes import * +from util.common import * +sys.path.append("./7-tmq") +from tmqCommon import * + +class TDTestCase: + updatecfgDict = {'debugFlag': 135} + def init(self, conn, logSql, replicaVar=1): + self.replicaVar = int(replicaVar) + tdLog.debug(f"start to excute {__file__}") + tdSql.init(conn.cursor()) + self.wal_retention_period1 = 3600 + self.wal_retention_period2 = 1 + self.commit_value_list = ["true", "false"] + self.offset_value_list = ["", "earliest", "latest", "none"] + self.tbname_value_list = ["true", "false"] + self.snapshot_value_list = ["true", "false"] + + # self.commit_value_list = ["true"] + # self.offset_value_list = ["none"] + # self.tbname_value_list = ["true"] + # self.snapshot_value_list = ["true"] + + def tmqParamsTest(self): + paraDict = {'dbName': 'db1', + 'dropFlag': 1, + 'vgroups': 4, + 'stbName': 'stb', + 'colSchema': [{'type': 'INT', 'count':1}, {'type': 'binary', 'len':20, 'count':1}], + 'tagSchema': [{'type': 'INT', 'count':1}, {'type': 'binary', 'len':20, 'count':1}], + 'ctbPrefix': 'ctb', + 'ctbNum': 1, + 'rowsPerTbl': 10000, + 'batchNum': 10, + 'startTs': 1640966400000, # 2022-01-01 00:00:00.000 + 'auto_commit_interval': "100"} + + + start_group_id = 1 + for snapshot_value in self.snapshot_value_list: + for commit_value in self.commit_value_list: + for offset_value in self.offset_value_list: + for tbname_value in self.tbname_value_list: + topic_name = 'topic1' + tmqCom.initConsumerTable() + tdCom.create_database(tdSql, paraDict["dbName"],paraDict["dropFlag"], vgroups=4,replica=1) + tdLog.info("create stb") + tdCom.create_stable(tdSql, dbname=paraDict["dbName"],stbname=paraDict["stbName"], column_elm_list=paraDict['colSchema'], tag_elm_list=paraDict['tagSchema']) + tdLog.info("create ctb") + tdCom.create_ctable(tdSql, dbname=paraDict["dbName"],stbname=paraDict["stbName"],tag_elm_list=paraDict['tagSchema'],count=paraDict["ctbNum"], default_ctbname_prefix=paraDict['ctbPrefix']) + tdLog.info("insert data") + tmqCom.insert_data(tdSql,paraDict["dbName"],paraDict["ctbPrefix"],paraDict["ctbNum"],paraDict["rowsPerTbl"],paraDict["batchNum"],paraDict["startTs"]) + + + tdLog.info("create topics from stb with filter") + queryString = "select ts, log(c1), ceil(pow(c1,3)) from %s.%s where c1 %% 7 == 0" %(paraDict['dbName'], paraDict['stbName']) + sqlString = "create topic %s as %s" %(topic_name, queryString) + tdSql.query(f'select * from information_schema.ins_databases') + db_wal_retention_period_list = list(map(lambda x:x[-8] if x[0] == paraDict['dbName'] else None, tdSql.queryResult)) + for i in range(len(db_wal_retention_period_list)): + if db_wal_retention_period_list[0] is None or db_wal_retention_period_list[-1] is None: + db_wal_retention_period_list.remove(None) + if snapshot_value =="true": + if db_wal_retention_period_list[0] != self.wal_retention_period2: + tdSql.execute(f"alter database {paraDict['dbName']} wal_retention_period {self.wal_retention_period2}") + time.sleep(self.wal_retention_period2+1) + tdSql.execute(f'flush database {paraDict["dbName"]}') + else: + if db_wal_retention_period_list[0] != self.wal_retention_period1: + tdSql.execute(f"alter database {paraDict['dbName']} wal_retention_period {self.wal_retention_period1}") + tdLog.info("create topic sql: %s"%sqlString) + tdSql.execute(sqlString) + tdSql.query(queryString) + expected_res = tdSql.queryRows + group_id = "csm_" + str(start_group_id) + consumer_dict = { + "group.id": group_id, + "td.connect.user": "root", + "td.connect.pass": "taosdata", + "auto.commit.interval.ms": paraDict["auto_commit_interval"], + "enable.auto.commit": commit_value, + "auto.offset.reset": offset_value, + "experimental.snapshot.enable": snapshot_value, + "msg.with.table.name": tbname_value + } + consumer_commit = 1 if consumer_dict["enable.auto.commit"] == "true" else 0 + consumer_tbname = 1 if consumer_dict["msg.with.table.name"] == "true" else 0 + consumer_ret = "earliest" if offset_value == "" else offset_value + expected_parameters=f'tbname:{consumer_tbname},commit:{consumer_commit},interval:{paraDict["auto_commit_interval"]}ms,reset:{consumer_ret}' + if len(offset_value) == 0: + del consumer_dict["auto.offset.reset"] + consumer = Consumer(consumer_dict) + consumer.subscribe([topic_name]) + tdLog.info(f"enable.auto.commit: {commit_value}, auto.offset.reset: {offset_value}, experimental.snapshot.enable: {snapshot_value}, msg.with.table.name: {tbname_value}") + stop_flag = 0 + try: + while True: + res = consumer.poll(1) + tdSql.query('show consumers;') + consumer_info = tdSql.queryResult[0][-1] + if offset_value == "latest": + if not res and stop_flag == 1: + break + else: + if not res: + break + # err = res.error() + # if err is not None: + # raise err + # val = res.value() + # for block in val: + # print(block.fetchall()) + if offset_value == "latest" and stop_flag == 0: + tmqCom.insert_data(tdSql,paraDict["dbName"],paraDict["ctbPrefix"],paraDict["ctbNum"],paraDict["rowsPerTbl"],paraDict["batchNum"],int(round(time.time()*1000))) + stop_flag = 1 + finally: + consumer.unsubscribe() + consumer.close() + tdSql.checkEqual(consumer_info, expected_parameters) + start_group_id += 1 + tdSql.query('show subscriptions;') + subscription_info = tdSql.queryResult + if snapshot_value == "true": + if offset_value != "earliest" and offset_value != "": + if offset_value == "latest": + offset_value_list = list(map(lambda x: int(x[-2].replace("wal:", "").replace("earliest", "0")), subscription_info)) + tdSql.checkEqual(sum(offset_value_list) > 0, True) + rows_value_list = list(map(lambda x: int(x[-1]), subscription_info)) + tdSql.checkEqual(sum(rows_value_list), expected_res) + elif offset_value == "none": + offset_value_list = list(map(lambda x: x[-2], subscription_info)) + tdSql.checkEqual(offset_value_list, ['none']*len(subscription_info)) + rows_value_list = list(map(lambda x: x[-1], subscription_info)) + tdSql.checkEqual(rows_value_list, [0]*len(subscription_info)) + else: + if offset_value != "none": + offset_value_str = ",".join(list(map(lambda x: x[-2], subscription_info))) + tdSql.checkEqual("tsdb" in offset_value_str, True) + rows_value_list = list(map(lambda x: int(x[-1]), subscription_info)) + tdSql.checkEqual(sum(rows_value_list), expected_res) + else: + offset_value_list = list(map(lambda x: x[-2], subscription_info)) + tdSql.checkEqual(offset_value_list, [None]*len(subscription_info)) + rows_value_list = list(map(lambda x: x[-1], subscription_info)) + tdSql.checkEqual(rows_value_list, [None]*len(subscription_info)) + else: + if offset_value != "none": + offset_value_list = list(map(lambda x: int(x[-2].replace("wal:", "").replace("earliest", "0")), subscription_info)) + tdSql.checkEqual(sum(offset_value_list) > 0, True) + rows_value_list = list(map(lambda x: int(x[-1]), subscription_info)) + tdSql.checkEqual(sum(rows_value_list), expected_res) + else: + offset_value_list = list(map(lambda x: x[-2], subscription_info)) + tdSql.checkEqual(offset_value_list, ['none']*len(subscription_info)) + rows_value_list = list(map(lambda x: x[-1], subscription_info)) + tdSql.checkEqual(rows_value_list, [0]*len(subscription_info)) + tdSql.execute(f"drop topic if exists {topic_name}") + tdSql.execute(f'drop database if exists {paraDict["dbName"]}') + + def run(self): + self.tmqParamsTest() + + def stop(self): + tdSql.close() + tdLog.success(f"{__file__} successfully executed") + +event = threading.Event() + +tdCases.addLinux(__file__, TDTestCase()) +tdCases.addWindows(__file__, TDTestCase()) diff --git a/tests/system-test/99-TDcase/TS-3581.py b/tests/system-test/99-TDcase/TS-3581.py new file mode 100644 index 0000000000000000000000000000000000000000..18488af0a6a4a8c2d0650156d468f589c93325a7 --- /dev/null +++ b/tests/system-test/99-TDcase/TS-3581.py @@ -0,0 +1,79 @@ +import taos +import sys +import time +import socket +import os +import threading + +from util.log import * +from util.sql import * +from util.cases import * +from util.dnodes import * + +class TDTestCase: + hostname = socket.gethostname() + + def init(self, conn, logSql, replicaVar=1): + self.replicaVar = int(replicaVar) + tdLog.debug(f"start to excute {__file__}") + #tdSql.init(conn.cursor()) + tdSql.init(conn.cursor(), logSql) # output sql.txt file + + def getBuildPath(self): + selfPath = os.path.dirname(os.path.realpath(__file__)) + + if ("community" in selfPath): + projPath = selfPath[:selfPath.find("community")] + else: + projPath = selfPath[:selfPath.find("tests")] + + for root, dirs, files in os.walk(projPath): + if ("taosd" in files or "taosd.exe" in files): + rootRealPath = os.path.dirname(os.path.realpath(root)) + if ("packaging" not in rootRealPath): + buildPath = root[:len(root) - len("/build/bin")] + break + return buildPath + + def create_tables(self): + tdSql.execute(f'''CREATE STABLE `dwd_log_master` (`ts` TIMESTAMP, `dim_ip` NCHAR(64)) TAGS (`group_id` BIGINT, `st_hour` NCHAR(2), `org_id` NCHAR(32), + `dev_manufacturer_name` NCHAR(64), `dev_manufacturer_id` INT, `dev_category_name` NCHAR(64), `dev_category_id` INT, `dev_feature_name` NCHAR(64), + `dev_feature_id` INT, `dev_ip` NCHAR(64), `black_list` TINYINT, `white_list` TINYINT)''') + tdSql.execute(f'''CREATE TABLE `dwd_log_master_475021043` USING `dwd_log_master` (`group_id`, `st_hour`, `org_id`, `dev_manufacturer_name`, `dev_manufacturer_id`, + `dev_category_name`, `dev_category_id`, `dev_feature_name`, `dev_feature_id`, `dev_ip`, `black_list`, `white_list`) TAGS + (475021043, "14", NULL, NULL, NULL, NULL, NULL, NULL, NULL, "172.18.22.230", NULL, NULL)''') + + def insert_data(self): + tdLog.debug("start to insert data ............") + + tdSql.execute(f"INSERT INTO `dwd_log_master_475021043` VALUES ('2023-06-26 14:38:30.000','192.168.192.102')") + tdSql.execute(f"INSERT INTO `dwd_log_master_475021043` VALUES ('2023-06-26 14:38:31.000','172.18.23.249')") + tdSql.execute(f"INSERT INTO `dwd_log_master_475021043` VALUES ('2023-06-26 14:38:32.000','192.168.200.231')") + tdSql.execute(f"INSERT INTO `dwd_log_master_475021043` VALUES ('2023-06-26 14:38:33.000','172.18.22.231')") + tdSql.execute(f"INSERT INTO `dwd_log_master_475021043` VALUES ('2023-06-26 14:38:34.000','192.168.210.231')") + tdSql.execute(f"INSERT INTO `dwd_log_master_475021043` VALUES ('2023-06-26 14:38:35.000','192.168.192.100')") + tdSql.execute(f"INSERT INTO `dwd_log_master_475021043` VALUES ('2023-06-26 14:38:36.000','192.168.192.231')") + tdSql.execute(f"INSERT INTO `dwd_log_master_475021043` VALUES ('2023-06-26 14:38:37.000','172.18.23.231')") + + tdLog.debug("insert data ............ [OK]") + + def run(self): + tdSql.prepare() + self.create_tables() + self.insert_data() + tdLog.printNoPrefix("======== test TS-3581") + + for i in range(100): + tdSql.query(f"select first(ts), last(ts), count(*) from dwd_log_master;") + tdSql.checkRows(1) + print(tdSql.queryResult) + tdSql.checkData(0, 0, '2023-06-26 14:38:30.000') + return + + + def stop(self): + tdSql.close() + tdLog.success(f"{__file__} successfully executed") + +tdCases.addLinux(__file__, TDTestCase()) +tdCases.addWindows(__file__, TDTestCase()) diff --git a/tools/shell/src/shellWebsocket.c b/tools/shell/src/shellWebsocket.c index d8920cb4c3d79aacb441bf9a1f512f4a5a4f62b6..af7f13c69c6088017f33cfabe3b81a17c9ab2587 100644 --- a/tools/shell/src/shellWebsocket.c +++ b/tools/shell/src/shellWebsocket.c @@ -17,6 +17,9 @@ #include #include +// save current database name +char curDBName[128] = ""; // TDB_MAX_DBNAME_LEN is 24, put large + int shell_conn_ws_server(bool first) { char cuttedDsn[SHELL_WS_DSN_BUFF] = {0}; int dsnLen = strlen(shell.args.dsn); @@ -59,6 +62,14 @@ int shell_conn_ws_server(bool first) { fprintf(stdout, "successfully connected to cloud service\n"); } fflush(stdout); + + // switch to current database if have + if(curDBName[0] !=0) { + char command[256]; + sprintf(command, "use %s;", curDBName); + shellRunSingleCommandWebsocketImp(command); + } + return 0; } @@ -290,7 +301,46 @@ void shellRunSingleCommandWebsocketImp(char *command) { if (shellRegexMatch(command, "^\\s*use\\s+[a-zA-Z0-9_]+\\s*;\\s*$", REG_EXTENDED | REG_ICASE)) { - fprintf(stdout, "Database changed.\r\n\r\n"); + + // copy dbname to curDBName + char *p = command; + bool firstStart = false; + bool firstEnd = false; + int i = 0; + while (*p != 0) { + if (*p != ' ') { + // not blank + if (!firstStart) { + firstStart = true; + } else if (firstEnd) { + if(*p == ';' && *p != '\\') { + break; + } + // database name + curDBName[i++] = *p; + if(i + 4 > sizeof(curDBName)) { + // DBName is too long, reset zero and break + i = 0; + break; + } + } + } else { + // blank + if(firstStart == true && firstEnd == false){ + firstEnd = true; + } + if(firstStart && firstEnd && i > 0){ + // blank after database name + break; + } + } + // move next + p++; + } + // append end + curDBName[i] = 0; + + fprintf(stdout, "Database changed to %s.\r\n\r\n", curDBName); fflush(stdout); ws_free_result(res); return;