diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml new file mode 100644 index 0000000000000000000000000000000000000000..b873e47b7405bd73b8cb9ef5f90e58c72409977b --- /dev/null +++ b/.pre-commit-config.yaml @@ -0,0 +1,28 @@ +repos: + - repo: https://github.com/pre-commit/pre-commit-hooks + rev: v2.3.0 + hooks: + - id: check-yaml + - id: check-json + - id: end-of-file-fixer + - id: trailing-whitespace + +repos: + - repo: https://github.com/psf/black + rev: stable + hooks: + - id: black + +repos: + - repo: https://github.com/pocc/pre-commit-hooks + rev: master + hooks: + - id: cppcheck + args: ["--error-exitcode=0"] + +repos: + - repo: https://github.com/crate-ci/typos + rev: v1.15.7 + hooks: + - id: typos + diff --git a/cmake/cmake.version b/cmake/cmake.version index edc51f206cfeb0ee6eb3d4c92bda64b5af3d6814..fe35fbe7bd7822b447828f34627c1a9421393a62 100644 --- a/cmake/cmake.version +++ b/cmake/cmake.version @@ -2,7 +2,7 @@ IF (DEFINED VERNUMBER) SET(TD_VER_NUMBER ${VERNUMBER}) ELSE () - SET(TD_VER_NUMBER "3.0.6.0.alpha") + SET(TD_VER_NUMBER "3.1.0.0.alpha") ENDIF () IF (DEFINED VERCOMPATIBLE) diff --git a/docs/en/07-develop/03-insert-data/01-sql-writing.mdx b/docs/en/07-develop/03-insert-data/01-sql-writing.mdx index 3731882fb23677588e72ba5e9d39049af2dfd97d..4d1b67e451ecf50697156c2a838f83b31262b0b9 100644 --- a/docs/en/07-develop/03-insert-data/01-sql-writing.mdx +++ b/docs/en/07-develop/03-insert-data/01-sql-writing.mdx @@ -33,7 +33,7 @@ The below SQL statement is used to insert one row into table "d1001". INSERT INTO d1001 VALUES (ts1, 10.3, 219, 0.31); ``` -`ts1` is Unix timestamp, the timestamps which is larger than the difference between current time and KEEP in config is only allowed. For further detial, refer to [TDengine SQL insert timestamp section](/taos-sql/insert). +`ts1` is Unix timestamp, the timestamps which is larger than the difference between current time and KEEP in config is only allowed. For further detail, refer to [TDengine SQL insert timestamp section](/taos-sql/insert). ### Insert Multiple Rows @@ -43,7 +43,7 @@ Multiple rows can be inserted in a single SQL statement. The example below inser INSERT INTO d1001 VALUES (ts2, 10.2, 220, 0.23) (ts2, 10.3, 218, 0.25); ``` -`ts1` and `ts2` is Unix timestamp, the timestamps which is larger than the difference between current time and KEEP in config is only allowed. For further detial, refer to [TDengine SQL insert timestamp section](/taos-sql/insert). +`ts1` and `ts2` is Unix timestamp, the timestamps which is larger than the difference between current time and KEEP in config is only allowed. For further detail, refer to [TDengine SQL insert timestamp section](/taos-sql/insert). ### Insert into Multiple Tables @@ -53,7 +53,7 @@ Data can be inserted into multiple tables in the same SQL statement. The example INSERT INTO d1001 VALUES (ts1, 10.3, 219, 0.31) (ts2, 12.6, 218, 0.33) d1002 VALUES (ts3, 12.3, 221, 0.31); ``` -`ts1`, `ts2` and `ts3` is Unix timestamp, the timestamps which is larger than the difference between current time and KEEP in config is only allowed. For further detial, refer to [TDengine SQL insert timestamp section](/taos-sql/insert). +`ts1`, `ts2` and `ts3` is Unix timestamp, the timestamps which is larger than the difference between current time and KEEP in config is only allowed. For further detail, refer to [TDengine SQL insert timestamp section](/taos-sql/insert). For more details about `INSERT` please refer to [INSERT](/taos-sql/insert). diff --git a/docs/en/07-develop/07-tmq.mdx b/docs/en/07-develop/07-tmq.mdx index 578f38e73d02efa0da04531986c037176d68482b..f5e0378a00eaeae5b2ff00f80bd8bc3e858a6e88 100644 --- a/docs/en/07-develop/07-tmq.mdx +++ b/docs/en/07-develop/07-tmq.mdx @@ -244,6 +244,8 @@ The following SQL statement creates a topic in TDengine: CREATE TOPIC topic_name AS SELECT ts, c1, c2, c3 FROM tmqdb.stb WHERE c1 > 1; ``` +- There is an upper limit to the number of topics created, controlled by the parameter tmqMaxTopicNum, with a default of 20 + Multiple subscription types are supported. #### Subscribe to a Column @@ -265,14 +267,15 @@ You can subscribe to a topic through a SELECT statement. Statements that specify Syntax: ```sql -CREATE TOPIC topic_name AS STABLE stb_name +CREATE TOPIC topic_name [with meta] AS STABLE stb_name [where_condition] ``` Creating a topic in this manner differs from a `SELECT * from stbName` statement as follows: - The table schema can be modified. - Unstructured data is returned. The format of the data returned changes based on the supertable schema. -- A different table schema may exist for every data block to be processed. +- The 'with meta' parameter is optional. When selected, statements such as creating super tables and sub tables will be returned, mainly used for Taosx to perform super table migration +- The 'where_condition' parameter is optional and will be used to filter and subscribe to sub tables that meet the criteria. Where conditions cannot have ordinary columns, only tags or tbnames. Functions can be used in where conditions to filter tags, but cannot be aggregate functions because sub table tag values cannot be aggregated. It can also be a constant expression, such as 2>1 (subscribing to all child tables), Or false (subscribe to 0 sub tables) - The data returned does not include tags. ### Subscribe to a Database @@ -280,10 +283,12 @@ Creating a topic in this manner differs from a `SELECT * from stbName` statement Syntax: ```sql -CREATE TOPIC topic_name [WITH META] AS DATABASE db_name; +CREATE TOPIC topic_name [with meta] AS DATABASE db_name; ``` -This SQL statement creates a subscription to all tables in the database. You can add the `WITH META` parameter to include schema changes in the subscription, including creating and deleting supertables; adding, deleting, and modifying columns; and creating, deleting, and modifying the tags of subtables. Consumers can determine the message type from the API. Note that this differs from Kafka. +This SQL statement creates a subscription to all tables in the database. + +- The 'with meta' parameter is optional. When selected, it will return statements for creating all super tables and sub tables in the database, mainly used for Taosx database migration ## Create a Consumer @@ -295,7 +300,7 @@ You configure the following parameters when creating a consumer: | `td.connect.user` | string | User Name | | | `td.connect.pass` | string | Password | | | `td.connect.port` | string | Port of the server side | | -| `group.id` | string | Consumer group ID; consumers with the same ID are in the same group | **Required**. Maximum length: 192. | +| `group.id` | string | Consumer group ID; consumers with the same ID are in the same group | **Required**. Maximum length: 192. Each topic can create up to 100 consumer groups. | | `client.id` | string | Client ID | Maximum length: 192. | | `auto.offset.reset` | enum | Initial offset for the consumer group | Specify `earliest`, `latest`, or `none`(default) | | `enable.auto.commit` | boolean | Commit automatically; true: user application doesn't need to explicitly commit; false: user application need to handle commit by itself | Default value is true | diff --git a/docs/en/07-develop/09-udf.md b/docs/en/07-develop/09-udf.md index 825d3c6f8b9faf1d9147efed03697648d5c99ae0..5137e35c0a83ec972fb45b6aa37ee10d434bbfad 100644 --- a/docs/en/07-develop/09-udf.md +++ b/docs/en/07-develop/09-udf.md @@ -17,7 +17,7 @@ When you create a user-defined function, you must implement standard interface f - For aggregate functions, implement the `aggfn_start`, `aggfn`, and `aggfn_finish` interface functions. - To initialize your function, implement the `udf_init` function. To terminate your function, implement the `udf_destroy` function. -There are strict naming conventions for these interface functions. The names of the start, finish, init, and destroy interfaces must be _start, _finish, _init, and _destroy, respectively. Replace `scalarfn`, `aggfn`, and `udf` with the name of your user-defined function. +There are strict naming conventions for these interface functions. The names of the start, finish, init, and destroy interfaces must be `_start`, `_finish`, `_init`, and `_destroy`, respectively. Replace `scalarfn`, `aggfn`, and `udf` with the name of your user-defined function. ### Implementing a Scalar Function in C The implementation of a scalar function is described as follows: @@ -318,7 +318,7 @@ The implementation of a scalar UDF is described as follows: def process(input: datablock) -> tuple[output_type]: ``` -Description: this function prcesses datablock, which is the input; you can use datablock.data(row, col) to access the python object at location(row,col); the output is a tuple object consisted of objects of type outputtype +Description: this function processes datablock, which is the input; you can use datablock.data(row, col) to access the python object at location(row,col); the output is a tuple object consisted of objects of type outputtype #### Aggregate UDF Interface @@ -356,7 +356,7 @@ def process(input: datablock) -> tuple[output_type]: # return tuple object consisted of object of type outputtype ``` -Noteļ¼šprocess() must be implemeted, init() and destroy() must be defined too but they can do nothing. +Noteļ¼šprocess() must be implemented, init() and destroy() must be defined too but they can do nothing. #### Aggregate Template @@ -377,7 +377,7 @@ def finish(buf: bytes) -> output_type: #return obj of type outputtype ``` -Note: aggregate UDF requires init(), destroy(), start(), reduce() and finish() to be impemented. start() generates the initial result in buffer, then the input data is divided into multiple row data blocks, reduce() is invoked for each data block `inputs` and intermediate `buf`, finally finish() is invoked to generate final result from the intermediate result `buf`. +Note: aggregate UDF requires init(), destroy(), start(), reduce() and finish() to be implemented. start() generates the initial result in buffer, then the input data is divided into multiple row data blocks, reduce() is invoked for each data block `inputs` and intermediate `buf`, finally finish() is invoked to generate final result from the intermediate result `buf`. ### Data Mapping between TDengine SQL and Python UDF @@ -559,7 +559,7 @@ Note: Prior to TDengine 3.0.5.0 (excluding), updating a UDF requires to restart #### Sample 3: UDF with n arguments -A UDF which accepts n intergers, likee (x1, x2, ..., xn) and output the sum of the product of each value and its sequence number: 1 * x1 + 2 * x2 + ... + n * xn. If there is `null` in the input, then the result is `null`. The difference from sample 1 is that it can accept any number of columns as input and process each column. Assume the program is written in /root/udf/nsum.py: +A UDF which accepts n integers, likee (x1, x2, ..., xn) and output the sum of the product of each value and its sequence number: 1 * x1 + 2 * x2 + ... + n * xn. If there is `null` in the input, then the result is `null`. The difference from sample 1 is that it can accept any number of columns as input and process each column. Assume the program is written in /root/udf/nsum.py: ```python def init(): @@ -607,7 +607,7 @@ Query OK, 4 row(s) in set (0.010653s) #### Sample 4: Utilize 3rd party package -A UDF which accepts a timestamp and output the next closed Sunday. This sample requires to use third party package `moment`, you need to install it firslty. +A UDF which accepts a timestamp and output the next closed Sunday. This sample requires to use third party package `moment`, you need to install it firstly. ```shell pip3 install moment @@ -701,7 +701,7 @@ Query OK, 4 row(s) in set (1.011474s) #### Sample 5: Aggregate Function -An aggregate function which calculates the difference of the maximum and the minimum in a column. An aggregate funnction takes multiple rows as input and output only one data. The execution process of an aggregate UDF is like map-reduce, the framework divides the input into multiple parts, each mapper processes one block and the reducer aggregates the result of the mappers. The reduce() of Python UDF has the functionality of both map() and reduce(). The reduce() takes two arguments: the data to be processed; and the result of other tasks executing reduce(). For exmaple, assume the code is in `/root/udf/myspread.py`. +An aggregate function which calculates the difference of the maximum and the minimum in a column. An aggregate funnction takes multiple rows as input and output only one data. The execution process of an aggregate UDF is like map-reduce, the framework divides the input into multiple parts, each mapper processes one block and the reducer aggregates the result of the mappers. The reduce() of Python UDF has the functionality of both map() and reduce(). The reduce() takes two arguments: the data to be processed; and the result of other tasks executing reduce(). For example, assume the code is in `/root/udf/myspread.py`. ```python import io @@ -755,7 +755,7 @@ In this example, we implemented an aggregate function, and added some logging. 2. log() is the function for logging, it converts the input object to string and output with an end of line 3. destroy() closes the log file \ 4. start() returns the initial buffer for storing the intermediate result -5. reduce() processes each daa block and aggregates the result +5. reduce() processes each data block and aggregates the result 6. finish() converts the final buffer() to final result\ Create the UDF. diff --git a/docs/en/12-taos-sql/10-function.md b/docs/en/12-taos-sql/10-function.md index b517bcb3ccdd90b52d778914ba77db3dba71d393..afc1581c226b0c0fda57261d5f0c435da37874ee 100644 --- a/docs/en/12-taos-sql/10-function.md +++ b/docs/en/12-taos-sql/10-function.md @@ -672,7 +672,7 @@ If you input a specific column, the number of non-null values in the column is r ELAPSED(ts_primary_key [, time_unit]) ``` -**Description**: `elapsed` function can be used to calculate the continuous time length in which there is valid data. If it's used with `INTERVAL` clause, the returned result is the calculated time length within each time window. If it's used without `INTERVAL` caluse, the returned result is the calculated time length within the specified time range. Please be noted that the return value of `elapsed` is the number of `time_unit` in the calculated time length. +**Description**: `elapsed` function can be used to calculate the continuous time length in which there is valid data. If it's used with `INTERVAL` clause, the returned result is the calculated time length within each time window. If it's used without `INTERVAL` clause, the returned result is the calculated time length within the specified time range. Please be noted that the return value of `elapsed` is the number of `time_unit` in the calculated time length. **Return value type**: Double if the input value is not NULL; @@ -999,18 +999,14 @@ SAMPLE(expr, k) **Description**: _k_ sampling values of a specific column. The applicable range of _k_ is [1,1000]. -**Return value type**: Same as the column being operated plus the associated timestamp +**Return value type**: Same as the column being operated -**Applicable data types**: Any data type except for tags of STable +**Applicable data types**: Any data type **Applicable nested query**: Inner query and Outer query **Applicable table types**: standard tables and supertables -**More explanations**: - -- This function cannot be used in expression calculation. - ### TAIL @@ -1055,11 +1051,11 @@ TOP(expr, k) UNIQUE(expr) ``` -**Description**: The values that occur the first time in the specified column. The effect is similar to `distinct` keyword, but it can also be used to match tags or timestamp. The first occurrence of a timestamp or tag is used. +**Description**: The values that occur the first time in the specified column. The effect is similar to `distinct` keyword. **Return value type**:Same as the data type of the column being operated upon -**Applicable column types**: Any data types except for timestamp +**Applicable column types**: Any data types **Applicable table types**: table, STable diff --git a/docs/en/12-taos-sql/12-distinguished.md b/docs/en/12-taos-sql/12-distinguished.md index b082f7b888dc2e09e4a6ae5c38e7ece591d667e3..7f0b8c7769298b460ec7102d5e3fc0b8f2637ca7 100644 --- a/docs/en/12-taos-sql/12-distinguished.md +++ b/docs/en/12-taos-sql/12-distinguished.md @@ -21,7 +21,7 @@ part_list can be any scalar expression, such as a column, constant, scalar funct A PARTITION BY clause is processed as follows: - The PARTITION BY clause must occur after the WHERE clause -- The PARTITION BY caluse partitions the data according to the specified dimensions, then perform computation on each partition. The performed computation is determined by the rest of the statement - a window clause, GROUP BY clause, or SELECT clause. +- The PARTITION BY clause partitions the data according to the specified dimensions, then perform computation on each partition. The performed computation is determined by the rest of the statement - a window clause, GROUP BY clause, or SELECT clause. - The PARTITION BY clause can be used together with a window clause or GROUP BY clause. In this case, the window or GROUP BY clause takes effect on every partition. For example, the following statement partitions the table by the location tag, performs downsampling over a 10 minute window, and returns the maximum value: ```sql diff --git a/docs/en/12-taos-sql/24-show.md b/docs/en/12-taos-sql/24-show.md index eb70a7664b857a7971f24ea3ec7bc7f707f9fe38..bd4a60b20e87e21f8948aa64ed0f9bb86da6b6c6 100644 --- a/docs/en/12-taos-sql/24-show.md +++ b/docs/en/12-taos-sql/24-show.md @@ -36,7 +36,7 @@ Shows information about connections to the system. SHOW CONSUMERS; ``` -Shows information about all active consumers in the system. +Shows information about all consumers in the system. ## SHOW CREATE DATABASE diff --git a/docs/en/14-reference/03-connector/04-java.mdx b/docs/en/14-reference/03-connector/04-java.mdx index ebd2891a9ee444b4c1649385bb94b35b698cc52d..e8c407b125ab4d44a6b9512352fea9abb196ddcb 100644 --- a/docs/en/14-reference/03-connector/04-java.mdx +++ b/docs/en/14-reference/03-connector/04-java.mdx @@ -36,7 +36,8 @@ REST connection supports all platforms that can run Java. | taos-jdbcdriver version | major changes | TDengine version | | :---------------------: | :------------------------------------------------------------------------------------------------------------------------------------------------: | :--------------: | -| 3.2.1 | subscription add seek function | 3.0.5.0 or later | +| 3.2.3 | Fixed resultSet data parsing failure in some cases | 3.0.5.0 or later | +| 3.2.2 | subscription add seek function | 3.0.5.0 or later | | 3.2.1 | JDBC REST connection supports schemaless/prepareStatement over WebSocket | 3.0.3.0 or later | | 3.2.0 | This version has been deprecated | - | | 3.1.0 | JDBC REST connection supports subscription over WebSocket | - | @@ -284,9 +285,9 @@ The configuration parameters in the URL are as follows: - batchfetch: true: pulls result sets in batches when executing queries; false: pulls result sets row by row. The default value is: false. batchfetch uses HTTP for data transfer. JDBC REST supports batch pulls. taos-jdbcdriver and TDengine transfer data via WebSocket connection. Compared with HTTP, WebSocket enables JDBC REST connection to support large data volume querying and improve query performance. - charset: specify the charset to parse the string, this parameter is valid only when set batchfetch to true. - batchErrorIgnore: true: when executing executeBatch of Statement, if one SQL execution fails in the middle, continue to execute the following SQL. false: no longer execute any statement after the failed SQL. The default value is: false. -- httpConnectTimeout: REST connection timeout in milliseconds, the default value is 5000 ms. -- httpSocketTimeout: socket timeout in milliseconds, the default value is 5000 ms. It only takes effect when batchfetch is false. -- messageWaitTimeout: message transmission timeout in milliseconds, the default value is 3000 ms. It only takes effect when batchfetch is true. +- httpConnectTimeout: REST connection timeout in milliseconds, the default value is 60000 ms. +- httpSocketTimeout: socket timeout in milliseconds, the default value is 60000 ms. It only takes effect when batchfetch is false. +- messageWaitTimeout: message transmission timeout in milliseconds, the default value is 60000 ms. It only takes effect when batchfetch is true. - useSSL: connecting Securely Using SSL. true: using SSL connection, false: not using SSL connection. - httpPoolSize: size of REST concurrent requests. The default value is 20. @@ -352,9 +353,9 @@ The configuration parameters in properties are as follows. - TSDBDriver.PROPERTY_KEY_CHARSET: In the character set used by the client, the default value is the system character set. - TSDBDriver.PROPERTY_KEY_LOCALE: this only takes effect when using JDBC native connection. Client language environment, the default value is system current locale. - TSDBDriver.PROPERTY_KEY_TIME_ZONE: only takes effect when using JDBC native connection. In the time zone used by the client, the default value is the system's current time zone. -- TSDBDriver.HTTP_CONNECT_TIMEOUT: REST connection timeout in milliseconds, the default value is 5000 ms. It only takes effect when using JDBC REST connection. -- TSDBDriver.HTTP_SOCKET_TIMEOUT: socket timeout in milliseconds, the default value is 5000 ms. It only takes effect when using JDBC REST connection and batchfetch is false. -- TSDBDriver.PROPERTY_KEY_MESSAGE_WAIT_TIMEOUT: message transmission timeout in milliseconds, the default value is 3000 ms. It only takes effect when using JDBC REST connection and batchfetch is true. +- TSDBDriver.HTTP_CONNECT_TIMEOUT: REST connection timeout in milliseconds, the default value is 60000 ms. It only takes effect when using JDBC REST connection. +- TSDBDriver.HTTP_SOCKET_TIMEOUT: socket timeout in milliseconds, the default value is 60000 ms. It only takes effect when using JDBC REST connection and batchfetch is false. +- TSDBDriver.PROPERTY_KEY_MESSAGE_WAIT_TIMEOUT: message transmission timeout in milliseconds, the default value is 60000 ms. It only takes effect when using JDBC REST connection and batchfetch is true. - TSDBDriver.PROPERTY_KEY_USE_SSL: connecting Securely Using SSL. true: using SSL connection, false: not using SSL connection. It only takes effect when using JDBC REST connection. - TSDBDriver.HTTP_POOL_SIZE: size of REST concurrent requests. The default value is 20. For JDBC native connections, you can specify other parameters, such as log level, SQL length, etc., by specifying URL and Properties. For more detailed configuration, please refer to [Client Configuration](/reference/config/#Client-Only). diff --git a/docs/en/14-reference/03-connector/05-go.mdx b/docs/en/14-reference/03-connector/05-go.mdx index 06d643c6c83e677a0cdcade91296ae2339f80fda..b3d4857d75e22f18c0dbcb4f2798c268f6fbcd3a 100644 --- a/docs/en/14-reference/03-connector/05-go.mdx +++ b/docs/en/14-reference/03-connector/05-go.mdx @@ -31,63 +31,78 @@ REST connections are supported on all platforms that can run Go. Please refer to [version support list](https://github.com/taosdata/driver-go#remind) -## Supported features +## Handling exceptions -### Native connections - -A "native connection" is established by the connector directly to the TDengine instance via the TDengine client driver (taosc). The supported functional features are: - -* Normal queries -* Continuous queries -* Subscriptions -* Schemaless interface -* Parameter binding interface - -### REST connection - -A "REST connection" is a connection between the application and the TDengine instance via the REST API provided by the taosAdapter component. The following features are supported: +If it is a TDengine error, you can get the error code and error information in the following ways. +```go +// import "github.com/taosdata/driver-go/v3/errors" + if err != nil { + tError, is := err.(*errors.TaosError) + if is { + fmt.Println("errorCode:", int(tError.Code)) + fmt.Println("errorMessage:", tError.ErrStr) + } else { + fmt.Println(err.Error()) + } + } +``` -* Normal queries -* Continuous queries +## TDengine DataType vs. Go DataType + +| TDengine DataType | Go Type | +|-------------------|-----------| +| TIMESTAMP | time.Time | +| TINYINT | int8 | +| SMALLINT | int16 | +| INT | int32 | +| BIGINT | int64 | +| TINYINT UNSIGNED | uint8 | +| SMALLINT UNSIGNED | uint16 | +| INT UNSIGNED | uint32 | +| BIGINT UNSIGNED | uint64 | +| FLOAT | float32 | +| DOUBLE | float64 | +| BOOL | bool | +| BINARY | string | +| NCHAR | string | +| JSON | []byte | + +**Note**: Only TAG supports JSON types ## Installation Steps ### Pre-installation preparation * Install Go development environment (Go 1.14 and above, GCC 4.8.5 and above) -- If you use the native connector, please install the TDengine client driver. Please refer to [Install Client Driver](/reference/connector/#install-client-driver) for specific steps +* If you use the native connector, please install the TDengine client driver. Please refer to [Install Client Driver](/reference/connector/#install-client-driver) for specific steps Configure the environment variables and check the command. * ```go env``` * ```gcc -v``` -### Use go get to install - -`go get -u github.com/taosdata/driver-go/v3@latest` - -### Manage with go mod +### Install the connectors 1. Initialize the project with the `go mod` command. - ```text - go mod init taos-demo - ``` + ```text + go mod init taos-demo + ``` 2. Introduce taosSql - ```go - import ( - "database/sql" - _ "github.com/taosdata/driver-go/v3/taosSql" - ) - ``` + ```go + import ( + "database/sql" + _ "github.com/taosdata/driver-go/v3/taosSql" + ) + ``` 3. Update the dependency packages with `go mod tidy`. - ```text - go mod tidy - ``` + ```text + go mod tidy + ``` 4. Run the program with `go run taos-demo` or compile the binary with the `go build` command. @@ -98,8 +113,6 @@ Configure the environment variables and check the command. ## Establishing a connection -### Data source name (DSN) - Data source names have a standard format, e.g. [PEAR DB](http://pear.php.net/manual/en/package.database.db.intro-dsn.php), but no type prefix (square brackets indicate optionally): ``` text @@ -111,9 +124,7 @@ DSN in full form. ```text username:password@protocol(address)/dbname?param=value ``` -### Connecting via connector - - + _taosSql_ implements Go's `database/sql/driver` interface via cgo. You can use the [`database/sql`](https://golang.org/pkg/database/sql/) interface by simply introducing the driver. @@ -209,340 +220,902 @@ func main() { -## Usage examples +### Specify the URL and Properties to get the connection -### Write data +The Go connector does not support this feature -#### SQL Write +### Priority of configuration parameters - +The Go connector does not support this feature -#### InfluxDB line protocol write - - +## Usage examples -#### OpenTSDB Telnet line protocol write +### Create database and tables - +```go +var taosDSN = "root:taosdata@tcp(localhost:6030)/" +taos, err := sql.Open("taosSql", taosDSN) +if err != nil { + log.Fatalln("failed to connect TDengine, err:", err) +} +defer taos.Close() +_, err := taos.Exec("CREATE DATABASE power") +if err != nil { + log.Fatalln("failed to create database, err:", err) +} +_, err = taos.Exec("CREATE STABLE power.meters (ts TIMESTAMP, current FLOAT, voltage INT, phase FLOAT) TAGS (location BINARY(64), groupId INT)") +if err != nil { + log.Fatalln("failed to create stable, err:", err) +} +``` -#### OpenTSDB JSON line protocol write +### Insert data - + -### Query data +### Querying data -### More sample programs - -* [sample program](https://github.com/taosdata/driver-go/tree/3.0/examples) +### execute SQL with reqId +This reqId can be used to request link tracing. -## Usage limitations - -Since the REST interface is stateless, the `use db` syntax will not work. You need to put the db name into the SQL command, e.g. `create table if not exists tb1 (ts timestamp, a int)` to `create table if not exists test.tb1 (ts timestamp, a int)` otherwise it will report the error `[0x217] Database not specified or available`. +```go +db, err := sql.Open("taosSql", "root:taosdata@tcp(localhost:6030)/") +if err != nil { + panic(err) +} +defer db.Close() +ctx := context.WithValue(context.Background(), common.ReqIDKey, common.GetReqID()) +_, err = db.ExecContext(ctx, "create database if not exists example_taos_sql") +if err != nil { + panic(err) +} +``` -You can also put the db name in the DSN by changing `root:taosdata@http(localhost:6041)/` to `root:taosdata@http(localhost:6041)/test`. Executing the `create database` statement when the specified db does not exist will not report an error while executing other queries or writing against that db will report an error. +### Writing data via parameter binding -The complete example is as follows. + + ```go package main import ( - "database/sql" - "fmt" "time" - _ "github.com/taosdata/driver-go/v3/taosRestful" + "github.com/taosdata/driver-go/v3/af" + "github.com/taosdata/driver-go/v3/common" + "github.com/taosdata/driver-go/v3/common/param" ) func main() { - var taosDSN = "root:taosdata@http(localhost:6041)/test" - taos, err := sql.Open("taosRestful", taosDSN) + db, err := af.Open("", "root", "taosdata", "", 0) if err != nil { - fmt.Println("failed to connect TDengine, err:", err) - return + panic(err) } - defer taos.Close() - taos.Exec("create database if not exists test") - taos.Exec("create table if not exists tb1 (ts timestamp, a int)") - _, err = taos.Exec("insert into tb1 values(now, 0)(now+1s,1)(now+2s,2)(now+3s,3)") + defer db.Close() + _, err = db.Exec("create database if not exists example_stmt") if err != nil { - fmt.Println("failed to insert, err:", err) - return + panic(err) } - rows, err := taos.Query("select * from tb1") + _, err = db.Exec("create table if not exists example_stmt.tb1(ts timestamp," + + "c1 bool," + + "c2 tinyint," + + "c3 smallint," + + "c4 int," + + "c5 bigint," + + "c6 tinyint unsigned," + + "c7 smallint unsigned," + + "c8 int unsigned," + + "c9 bigint unsigned," + + "c10 float," + + "c11 double," + + "c12 binary(20)," + + "c13 nchar(20)" + + ")") if err != nil { - fmt.Println("failed to select from table, err:", err) - return + panic(err) } - - defer rows.Close() - for rows.Next() { - var r struct { - ts time.Time - a int - } - err := rows.Scan(&r.ts, &r.a) - if err != nil { - fmt.Println("scan error:\n", err) - return - } - fmt.Println(r.ts, r.a) + stmt := db.InsertStmt() + err = stmt.Prepare("insert into example_stmt.tb1 values(?,?,?,?,?,?,?,?,?,?,?,?,?,?)") + if err != nil { + panic(err) } + now := time.Now() + params := make([]*param.Param, 14) + params[0] = param.NewParam(2). + AddTimestamp(now, common.PrecisionMilliSecond). + AddTimestamp(now.Add(time.Second), common.PrecisionMilliSecond) + params[1] = param.NewParam(2).AddBool(true).AddNull() + params[2] = param.NewParam(2).AddTinyint(2).AddNull() + params[3] = param.NewParam(2).AddSmallint(3).AddNull() + params[4] = param.NewParam(2).AddInt(4).AddNull() + params[5] = param.NewParam(2).AddBigint(5).AddNull() + params[6] = param.NewParam(2).AddUTinyint(6).AddNull() + params[7] = param.NewParam(2).AddUSmallint(7).AddNull() + params[8] = param.NewParam(2).AddUInt(8).AddNull() + params[9] = param.NewParam(2).AddUBigint(9).AddNull() + params[10] = param.NewParam(2).AddFloat(10).AddNull() + params[11] = param.NewParam(2).AddDouble(11).AddNull() + params[12] = param.NewParam(2).AddBinary([]byte("binary")).AddNull() + params[13] = param.NewParam(2).AddNchar("nchar").AddNull() + + paramTypes := param.NewColumnType(14). + AddTimestamp(). + AddBool(). + AddTinyint(). + AddSmallint(). + AddInt(). + AddBigint(). + AddUTinyint(). + AddUSmallint(). + AddUInt(). + AddUBigint(). + AddFloat(). + AddDouble(). + AddBinary(6). + AddNchar(5) + err = stmt.BindParam(params, paramTypes) + if err != nil { + panic(err) + } + err = stmt.AddBatch() + if err != nil { + panic(err) + } + err = stmt.Execute() + if err != nil { + panic(err) + } + err = stmt.Close() + if err != nil { + panic(err) + } + // select * from example_stmt.tb1 } ``` -## Frequently Asked Questions - -1. bind interface in database/sql crashes - - REST does not support parameter binding related interface. It is recommended to use `db.Exec` and `db.Query`. - -2. error `[0x217] Database not specified or available` after executing other statements with `use db` statement - - The execution of SQL command in the REST interface is not contextual, so using `use db` statement will not work, see the usage restrictions section above. - -3. use `taosSql` without error but use `taosRestful` with error `[0x217] Database not specified or available` - - Because the REST interface is stateless, using the `use db` statement will not take effect. See the usage restrictions section above. - -4. `readBufferSize` parameter has no significant effect after being increased - - Increasing `readBufferSize` will reduce the number of `syscall` calls when fetching results. If the query result is smaller, modifying this parameter will not improve performance significantly. If you increase the parameter value too much, the bottleneck will be parsing JSON data. If you need to optimize the query speed, you must adjust the value based on the actual situation to achieve the best query performance. - -5. `disableCompression` parameter is set to `false` when the query efficiency is reduced - - When set `disableCompression` parameter to `false`, the query result will be compressed by `gzip` and then transmitted, so you have to decompress the data by `gzip` after getting it. - -6. `go get` command can't get the package, or timeout to get the package - - Set Go proxy `go env -w GOPROXY=https://goproxy.cn,direct`. - -## Common APIs - -### database/sql API - -* `sql.Open(DRIVER_NAME string, dataSourceName string) *DB` - - Use This API to open a DB, returning an object of type \*DB. - -:::info -This API is created successfully without checking permissions, but only when you execute a Query or Exec, and check if user/password/host/port is legal. -::: - -* `func (db *DB) Exec(query string, args ...interface{}) (Result, error)` - - `sql.Open` built-in method to execute non-query related SQL. - -* `func (db *DB) Query(query string, args ...interface{}) (*Rows, error)` - - `sql.Open` Built-in method to execute query statements. - -### Advanced functions (af) API - -The `af` package encapsulates TDengine advanced functions such as connection management, subscriptions, schemaless, parameter binding, etc. - -#### Connection management - -* `af.Open(host, user, pass, db string, port int) (*Connector, error)` - - This API creates a connection to taosd via cgo. - -* `func (conn *Connector) Close() error` - - Closes the connection. - -#### Subscribe - -* `func NewConsumer(conf *tmq.ConfigMap) (*Consumer, error)` - -Creates consumer group. - -* `func (c *Consumer) Subscribe(topic string, rebalanceCb RebalanceCb) error` -Note: `rebalanceCb` is reserved for compatibility purpose - -Subscribes a topic. - -* `func (c *Consumer) SubscribeTopics(topics []string, rebalanceCb RebalanceCb) error` -Note: `rebalanceCb` is reserved for compatibility purpose - -Subscribes to topics. - -* `func (c *Consumer) Poll(timeoutMs int) tmq.Event` - -Polling information. - -* `func (c *Consumer) Commit() ([]tmq.TopicPartition, error)` -Note: `tmq.TopicPartition` is reserved for compatibility purpose - -Commit information. - -* `func (c *Consumer) Assignment() (partitions []tmq.TopicPartition, err error)` - -Get Assignment(TDengine >= 3.0.5.0 and driver-go >= v3.5.0 are required). - -* `func (c *Consumer) Seek(partition tmq.TopicPartition, ignoredTimeoutMs int) error` -Note: `ignoredTimeoutMs` is reserved for compatibility purpose - -Seek offset(TDengine >= 3.0.5.0 and driver-go >= v3.5.0 are required). - -* `func (c *Consumer) Unsubscribe() error` - -Unsubscribe. - -* `func (c *Consumer) Close() error` - -Close consumer. - -#### schemaless - -* `func (conn *Connector) InfluxDBInsertLines(lines []string, precision string) error` + + - Write to InfluxDB line protocol. +```go +package main -* `func (conn *Connector) OpenTSDBInsertTelnetLines(lines []string) error` +import ( + "database/sql" + "fmt" + "time" - Write OpenTDSB telnet protocol data. + "github.com/taosdata/driver-go/v3/common" + "github.com/taosdata/driver-go/v3/common/param" + _ "github.com/taosdata/driver-go/v3/taosRestful" + "github.com/taosdata/driver-go/v3/ws/stmt" +) -* `func (conn *Connector) OpenTSDBInsertJsonPayload(payload string) error` +func main() { + db, err := sql.Open("taosRestful", "root:taosdata@http(localhost:6041)/") + if err != nil { + panic(err) + } + defer db.Close() + prepareEnv(db) + + config := stmt.NewConfig("ws://127.0.0.1:6041/rest/stmt", 0) + config.SetConnectUser("root") + config.SetConnectPass("taosdata") + config.SetConnectDB("example_ws_stmt") + config.SetMessageTimeout(common.DefaultMessageTimeout) + config.SetWriteWait(common.DefaultWriteWait) + config.SetErrorHandler(func(connector *stmt.Connector, err error) { + panic(err) + }) + config.SetCloseHandler(func() { + fmt.Println("stmt connector closed") + }) + + connector, err := stmt.NewConnector(config) + if err != nil { + panic(err) + } + now := time.Now() + { + stmt, err := connector.Init() + if err != nil { + panic(err) + } + err = stmt.Prepare("insert into ? using all_json tags(?) values(?,?,?,?,?,?,?,?,?,?,?,?,?,?)") + if err != nil { + panic(err) + } + err = stmt.SetTableName("tb1") + if err != nil { + panic(err) + } + err = stmt.SetTags(param.NewParam(1).AddJson([]byte(`{"tb":1}`)), param.NewColumnType(1).AddJson(0)) + if err != nil { + panic(err) + } + params := []*param.Param{ + param.NewParam(3).AddTimestamp(now, 0).AddTimestamp(now.Add(time.Second), 0).AddTimestamp(now.Add(time.Second*2), 0), + param.NewParam(3).AddBool(true).AddNull().AddBool(true), + param.NewParam(3).AddTinyint(1).AddNull().AddTinyint(1), + param.NewParam(3).AddSmallint(1).AddNull().AddSmallint(1), + param.NewParam(3).AddInt(1).AddNull().AddInt(1), + param.NewParam(3).AddBigint(1).AddNull().AddBigint(1), + param.NewParam(3).AddUTinyint(1).AddNull().AddUTinyint(1), + param.NewParam(3).AddUSmallint(1).AddNull().AddUSmallint(1), + param.NewParam(3).AddUInt(1).AddNull().AddUInt(1), + param.NewParam(3).AddUBigint(1).AddNull().AddUBigint(1), + param.NewParam(3).AddFloat(1).AddNull().AddFloat(1), + param.NewParam(3).AddDouble(1).AddNull().AddDouble(1), + param.NewParam(3).AddBinary([]byte("test_binary")).AddNull().AddBinary([]byte("test_binary")), + param.NewParam(3).AddNchar("test_nchar").AddNull().AddNchar("test_nchar"), + } + paramTypes := param.NewColumnType(14). + AddTimestamp(). + AddBool(). + AddTinyint(). + AddSmallint(). + AddInt(). + AddBigint(). + AddUTinyint(). + AddUSmallint(). + AddUInt(). + AddUBigint(). + AddFloat(). + AddDouble(). + AddBinary(0). + AddNchar(0) + err = stmt.BindParam(params, paramTypes) + if err != nil { + panic(err) + } + err = stmt.AddBatch() + if err != nil { + panic(err) + } + err = stmt.Exec() + if err != nil { + panic(err) + } + affected := stmt.GetAffectedRows() + fmt.Println("all_json affected rows:", affected) + err = stmt.Close() + if err != nil { + panic(err) + } + } + { + stmt, err := connector.Init() + if err != nil { + panic(err) + } + err = stmt.Prepare("insert into ? using all_all tags(?,?,?,?,?,?,?,?,?,?,?,?,?,?) values(?,?,?,?,?,?,?,?,?,?,?,?,?,?)") + err = stmt.SetTableName("tb1") + if err != nil { + panic(err) + } - Writes OpenTSDB JSON protocol data. + err = stmt.SetTableName("tb2") + if err != nil { + panic(err) + } + err = stmt.SetTags( + param.NewParam(14). + AddTimestamp(now, 0). + AddBool(true). + AddTinyint(2). + AddSmallint(2). + AddInt(2). + AddBigint(2). + AddUTinyint(2). + AddUSmallint(2). + AddUInt(2). + AddUBigint(2). + AddFloat(2). + AddDouble(2). + AddBinary([]byte("tb2")). + AddNchar("tb2"), + param.NewColumnType(14). + AddTimestamp(). + AddBool(). + AddTinyint(). + AddSmallint(). + AddInt(). + AddBigint(). + AddUTinyint(). + AddUSmallint(). + AddUInt(). + AddUBigint(). + AddFloat(). + AddDouble(). + AddBinary(0). + AddNchar(0), + ) + if err != nil { + panic(err) + } + params := []*param.Param{ + param.NewParam(3).AddTimestamp(now, 0).AddTimestamp(now.Add(time.Second), 0).AddTimestamp(now.Add(time.Second*2), 0), + param.NewParam(3).AddBool(true).AddNull().AddBool(true), + param.NewParam(3).AddTinyint(1).AddNull().AddTinyint(1), + param.NewParam(3).AddSmallint(1).AddNull().AddSmallint(1), + param.NewParam(3).AddInt(1).AddNull().AddInt(1), + param.NewParam(3).AddBigint(1).AddNull().AddBigint(1), + param.NewParam(3).AddUTinyint(1).AddNull().AddUTinyint(1), + param.NewParam(3).AddUSmallint(1).AddNull().AddUSmallint(1), + param.NewParam(3).AddUInt(1).AddNull().AddUInt(1), + param.NewParam(3).AddUBigint(1).AddNull().AddUBigint(1), + param.NewParam(3).AddFloat(1).AddNull().AddFloat(1), + param.NewParam(3).AddDouble(1).AddNull().AddDouble(1), + param.NewParam(3).AddBinary([]byte("test_binary")).AddNull().AddBinary([]byte("test_binary")), + param.NewParam(3).AddNchar("test_nchar").AddNull().AddNchar("test_nchar"), + } + paramTypes := param.NewColumnType(14). + AddTimestamp(). + AddBool(). + AddTinyint(). + AddSmallint(). + AddInt(). + AddBigint(). + AddUTinyint(). + AddUSmallint(). + AddUInt(). + AddUBigint(). + AddFloat(). + AddDouble(). + AddBinary(0). + AddNchar(0) + err = stmt.BindParam(params, paramTypes) + if err != nil { + panic(err) + } + err = stmt.AddBatch() + if err != nil { + panic(err) + } + err = stmt.Exec() + if err != nil { + panic(err) + } + affected := stmt.GetAffectedRows() + fmt.Println("all_all affected rows:", affected) + err = stmt.Close() + if err != nil { + panic(err) + } -#### parameter binding + } +} -* `func (conn *Connector) StmtExecute(sql string, params *param.Param) (res driver.Result, err error)` +func prepareEnv(db *sql.DB) { + steps := []string{ + "create database example_ws_stmt", + "create table example_ws_stmt.all_json(ts timestamp," + + "c1 bool," + + "c2 tinyint," + + "c3 smallint," + + "c4 int," + + "c5 bigint," + + "c6 tinyint unsigned," + + "c7 smallint unsigned," + + "c8 int unsigned," + + "c9 bigint unsigned," + + "c10 float," + + "c11 double," + + "c12 binary(20)," + + "c13 nchar(20)" + + ")" + + "tags(t json)", + "create table example_ws_stmt.all_all(" + + "ts timestamp," + + "c1 bool," + + "c2 tinyint," + + "c3 smallint," + + "c4 int," + + "c5 bigint," + + "c6 tinyint unsigned," + + "c7 smallint unsigned," + + "c8 int unsigned," + + "c9 bigint unsigned," + + "c10 float," + + "c11 double," + + "c12 binary(20)," + + "c13 nchar(20)" + + ")" + + "tags(" + + "tts timestamp," + + "tc1 bool," + + "tc2 tinyint," + + "tc3 smallint," + + "tc4 int," + + "tc5 bigint," + + "tc6 tinyint unsigned," + + "tc7 smallint unsigned," + + "tc8 int unsigned," + + "tc9 bigint unsigned," + + "tc10 float," + + "tc11 double," + + "tc12 binary(20)," + + "tc13 nchar(20))", + } + for _, step := range steps { + _, err := db.Exec(step) + if err != nil { + panic(err) + } + } +} - Parameter bound single row insert. +``` -* `func (conn *Connector) InsertStmt() *insertstmt.InsertStmt` + + - Initialize the parameters. -* `func (stmt *InsertStmt) Prepare(sql string) error` +### Schemaless Writing - Parameter binding preprocessing SQL statement. + + -* `func (stmt *InsertStmt) SetTableName(name string) error` +```go +import ( + "fmt" - Bind the table name parameter. + "github.com/taosdata/driver-go/v3/af" +) -* `func (stmt *InsertStmt) SetSubTableName(name string) error` +func main() { + conn, err := af.Open("localhost", "root", "taosdata", "", 6030) + if err != nil { + fmt.Println("fail to connect, err:", err) + } + defer conn.Close() + _, err = conn.Exec("create database if not exists example") + if err != nil { + panic(err) + } + _, err = conn.Exec("use example") + if err != nil { + panic(err) + } + influxdbData := "st,t1=3i64,t2=4f64,t3=\"t3\" c1=3i64,c3=L\"passit\",c2=false,c4=4f64 1626006833639000000" + err = conn.InfluxDBInsertLines([]string{influxdbData}, "ns") + if err != nil { + panic(err) + } + telnetData := "stb0_0 1626006833 4 host=host0 interface=eth0" + err = conn.OpenTSDBInsertTelnetLines([]string{telnetData}) + if err != nil { + panic(err) + } + jsonData := "{\"metric\": \"meter_current\",\"timestamp\": 1626846400,\"value\": 10.3, \"tags\": {\"groupid\": 2, \"location\": \"California.SanFrancisco\", \"id\": \"d1001\"}}" + err = conn.OpenTSDBInsertJsonPayload(jsonData) + if err != nil { + panic(err) + } +} +``` - Parameter binding to set the sub table name. + + -* `func (stmt *InsertStmt) BindParam(params []*param.Param, bindType *param.ColumnType) error` +```go +import ( + "database/sql" + "log" + "time" - Parameter bind multiple rows of data. + "github.com/taosdata/driver-go/v3/common" + _ "github.com/taosdata/driver-go/v3/taosWS" + "github.com/taosdata/driver-go/v3/ws/schemaless" +) -* `func (stmt *InsertStmt) AddBatch() error` +func main() { + db, err := sql.Open("taosWS", "root:taosdata@ws(localhost:6041)/") + if err != nil { + log.Fatal(err) + } + defer db.Close() + _, err = db.Exec("create database if not exists schemaless_ws") + if err != nil { + log.Fatal(err) + } + s, err := schemaless.NewSchemaless(schemaless.NewConfig("ws://localhost:6041/rest/schemaless", 1, + schemaless.SetDb("schemaless_ws"), + schemaless.SetReadTimeout(10*time.Second), + schemaless.SetWriteTimeout(10*time.Second), + schemaless.SetUser("root"), + schemaless.SetPassword("taosdata"), + schemaless.SetErrorHandler(func(err error) { + log.Fatal(err) + }), + )) + if err != nil { + panic(err) + } + influxdbData := "st,t1=3i64,t2=4f64,t3=\"t3\" c1=3i64,c3=L\"passit\",c2=false,c4=4f64 1626006833639000000" + telnetData := "stb0_0 1626006833 4 host=host0 interface=eth0" + jsonData := "{\"metric\": \"meter_current\",\"timestamp\": 1626846400,\"value\": 10.3, \"tags\": {\"groupid\": 2, \"location\": \"California.SanFrancisco\", \"id\": \"d1001\"}}" - Add to a parameter-bound batch. + err = s.Insert(influxdbData, schemaless.InfluxDBLineProtocol, "ns", 0, common.GetReqID()) + if err != nil { + panic(err) + } + err = s.Insert(telnetData, schemaless.OpenTSDBTelnetLineProtocol, "ms", 0, common.GetReqID()) + if err != nil { + panic(err) + } + err = s.Insert(jsonData, schemaless.OpenTSDBJsonFormatProtocol, "ms", 0, common.GetReqID()) + if err != nil { + panic(err) + } +} +``` -* `func (stmt *InsertStmt) Execute() error` + + - Execute a parameter binding. -* `func (stmt *InsertStmt) GetAffectedRows() int` +### Schemaless with reqId - Gets the number of affected rows inserted by the parameter binding. +```go +func (s *Schemaless) Insert(lines string, protocol int, precision string, ttl int, reqID int64) error +``` -* `func (stmt *InsertStmt) Close() error` +You can get the unique id by `common.GetReqID()`. - Closes the parameter binding. +### Data Subscription -### Subscribe via WebSocket +The TDengine Go Connector supports subscription functionality with the following application API. -* `func NewConsumer(conf *tmq.ConfigMap) (*Consumer, error)` +#### Create a Topic -Creates consumer group. +```go + db, err := af.Open("", "root", "taosdata", "", 0) + if err != nil { + panic(err) + } + defer db.Close() + _, err = db.Exec("create database if not exists example_tmq WAL_RETENTION_PERIOD 86400") + if err != nil { + panic(err) + } + _, err = db.Exec("create topic if not exists example_tmq_topic as DATABASE example_tmq") + if err != nil { + panic(err) + } +``` -* `func (c *Consumer) Subscribe(topic string, rebalanceCb RebalanceCb) error` -Note: `rebalanceCb` is reserved for compatibility purpose +#### Create a Consumer -Subscribes a topic. +```go + consumer, err := tmq.NewConsumer(&tmqcommon.ConfigMap{ + "group.id": "test", + "auto.offset.reset": "earliest", + "td.connect.ip": "127.0.0.1", + "td.connect.user": "root", + "td.connect.pass": "taosdata", + "td.connect.port": "6030", + "client.id": "test_tmq_client", + "enable.auto.commit": "false", + "msg.with.table.name": "true", + }) + if err != nil { + panic(err) + } +``` -* `func (c *Consumer) SubscribeTopics(topics []string, rebalanceCb RebalanceCb) error` -Note: `rebalanceCb` is reserved for compatibility purpose +#### Subscribe to consume data -Subscribes to topics. +```go + err = consumer.Subscribe("example_tmq_topic", nil) + if err != nil { + panic(err) + } + for i := 0; i < 5; i++ { + ev := consumer.Poll(500) + if ev != nil { + switch e := ev.(type) { + case *tmqcommon.DataMessage: + fmt.Printf("get message:%v\n", e) + case tmqcommon.Error: + fmt.Fprintf(os.Stderr, "%% Error: %v: %v\n", e.Code(), e) + panic(e) + } + consumer.Commit() + } + } +``` -* `func (c *Consumer) Poll(timeoutMs int) tmq.Event` +#### Assignment subscription Offset -Polling information. +```go + partitions, err := consumer.Assignment() + if err != nil { + panic(err) + } + for i := 0; i < len(partitions); i++ { + fmt.Println(partitions[i]) + err = consumer.Seek(tmqcommon.TopicPartition{ + Topic: partitions[i].Topic, + Partition: partitions[i].Partition, + Offset: 0, + }, 0) + if err != nil { + panic(err) + } + } +``` -* `func (c *Consumer) Commit() ([]tmq.TopicPartition, error)` -Note: `tmq.TopicPartition` is reserved for compatibility purpose +#### Close subscriptions -Commit information. +```go + err = consumer.Close() + if err != nil { + panic(err) + } +``` -* `func (c *Consumer) Assignment() (partitions []tmq.TopicPartition, err error)` +#### Full Sample Code -Get Assignment(TDengine >= 3.0.5.0 and driver-go >= v3.5.0 are required). + + -* `func (c *Consumer) Seek(partition tmq.TopicPartition, ignoredTimeoutMs int) error` -Note: `ignoredTimeoutMs` is reserved for compatibility purpose +```go +package main -Seek offset(TDengine >= 3.0.5.0 and driver-go >= v3.5.0 are required). +import ( + "fmt" + "os" -* `func (c *Consumer) Unsubscribe() error` + "github.com/taosdata/driver-go/v3/af" + "github.com/taosdata/driver-go/v3/af/tmq" + tmqcommon "github.com/taosdata/driver-go/v3/common/tmq" +) -Unsubscribe. +func main() { + db, err := af.Open("", "root", "taosdata", "", 0) + if err != nil { + panic(err) + } + defer db.Close() + _, err = db.Exec("create database if not exists example_tmq WAL_RETENTION_PERIOD 86400") + if err != nil { + panic(err) + } + _, err = db.Exec("create topic if not exists example_tmq_topic as DATABASE example_tmq") + if err != nil { + panic(err) + } + if err != nil { + panic(err) + } + consumer, err := tmq.NewConsumer(&tmqcommon.ConfigMap{ + "group.id": "test", + "auto.offset.reset": "earliest", + "td.connect.ip": "127.0.0.1", + "td.connect.user": "root", + "td.connect.pass": "taosdata", + "td.connect.port": "6030", + "client.id": "test_tmq_client", + "enable.auto.commit": "false", + "msg.with.table.name": "true", + }) + if err != nil { + panic(err) + } + err = consumer.Subscribe("example_tmq_topic", nil) + if err != nil { + panic(err) + } + _, err = db.Exec("create table example_tmq.t1 (ts timestamp,v int)") + if err != nil { + panic(err) + } + _, err = db.Exec("insert into example_tmq.t1 values(now,1)") + if err != nil { + panic(err) + } + for i := 0; i < 5; i++ { + ev := consumer.Poll(500) + if ev != nil { + switch e := ev.(type) { + case *tmqcommon.DataMessage: + fmt.Printf("get message:%v\n", e) + case tmqcommon.Error: + fmt.Fprintf(os.Stderr, "%% Error: %v: %v\n", e.Code(), e) + panic(e) + } + consumer.Commit() + } + } + partitions, err := consumer.Assignment() + if err != nil { + panic(err) + } + for i := 0; i < len(partitions); i++ { + fmt.Println(partitions[i]) + err = consumer.Seek(tmqcommon.TopicPartition{ + Topic: partitions[i].Topic, + Partition: partitions[i].Partition, + Offset: 0, + }, 0) + if err != nil { + panic(err) + } + } -* `func (c *Consumer) Close() error` + partitions, err = consumer.Assignment() + if err != nil { + panic(err) + } + for i := 0; i < len(partitions); i++ { + fmt.Println(partitions[i]) + } -Close consumer. + err = consumer.Close() + if err != nil { + panic(err) + } +} +``` -For a complete example see [GitHub sample file](https://github.com/taosdata/driver-go/blob/main/examples/tmqoverws/main.go) + + -### parameter binding via WebSocket +```go +package main -* `func NewConnector(config *Config) (*Connector, error)` +import ( + "database/sql" + "fmt" - Create a connection. + "github.com/taosdata/driver-go/v3/common" + tmqcommon "github.com/taosdata/driver-go/v3/common/tmq" + _ "github.com/taosdata/driver-go/v3/taosRestful" + "github.com/taosdata/driver-go/v3/ws/tmq" +) -* `func (c *Connector) Init() (*Stmt, error)` +func main() { + db, err := sql.Open("taosRestful", "root:taosdata@http(localhost:6041)/") + if err != nil { + panic(err) + } + defer db.Close() + prepareEnv(db) + consumer, err := tmq.NewConsumer(&tmqcommon.ConfigMap{ + "ws.url": "ws://127.0.0.1:6041/rest/tmq", + "ws.message.channelLen": uint(0), + "ws.message.timeout": common.DefaultMessageTimeout, + "ws.message.writeWait": common.DefaultWriteWait, + "td.connect.user": "root", + "td.connect.pass": "taosdata", + "group.id": "example", + "client.id": "example_consumer", + "auto.offset.reset": "earliest", + }) + if err != nil { + panic(err) + } + err = consumer.Subscribe("example_ws_tmq_topic", nil) + if err != nil { + panic(err) + } + go func() { + _, err := db.Exec("create table example_ws_tmq.t_all(ts timestamp," + + "c1 bool," + + "c2 tinyint," + + "c3 smallint," + + "c4 int," + + "c5 bigint," + + "c6 tinyint unsigned," + + "c7 smallint unsigned," + + "c8 int unsigned," + + "c9 bigint unsigned," + + "c10 float," + + "c11 double," + + "c12 binary(20)," + + "c13 nchar(20)" + + ")") + if err != nil { + panic(err) + } + _, err = db.Exec("insert into example_ws_tmq.t_all values(now,true,2,3,4,5,6,7,8,9,10.123,11.123,'binary','nchar')") + if err != nil { + panic(err) + } + }() + for i := 0; i < 5; i++ { + ev := consumer.Poll(500) + if ev != nil { + switch e := ev.(type) { + case *tmqcommon.DataMessage: + fmt.Printf("get message:%v\n", e) + case tmqcommon.Error: + fmt.Printf("%% Error: %v: %v\n", e.Code(), e) + panic(e) + } + consumer.Commit() + } + } + partitions, err := consumer.Assignment() + if err != nil { + panic(err) + } + for i := 0; i < len(partitions); i++ { + fmt.Println(partitions[i]) + err = consumer.Seek(tmqcommon.TopicPartition{ + Topic: partitions[i].Topic, + Partition: partitions[i].Partition, + Offset: 0, + }, 0) + if err != nil { + panic(err) + } + } - Initialize the parameters. + partitions, err = consumer.Assignment() + if err != nil { + panic(err) + } + for i := 0; i < len(partitions); i++ { + fmt.Println(partitions[i]) + } -* `func (c *Connector) Close() error` + err = consumer.Close() + if err != nil { + panic(err) + } +} - Close the connection. +func prepareEnv(db *sql.DB) { + _, err := db.Exec("create database example_ws_tmq WAL_RETENTION_PERIOD 86400") + if err != nil { + panic(err) + } + _, err = db.Exec("create topic example_ws_tmq_topic as database example_ws_tmq") + if err != nil { + panic(err) + } +} +``` -* `func (s *Stmt) Prepare(sql string) error` + + - Parameter binding preprocessing SQL statement. +### More sample programs -* `func (s *Stmt) SetTableName(name string) error` +* [sample program](https://github.com/taosdata/driver-go/tree/3.0/examples) - Bind the table name parameter. -* `func (s *Stmt) SetTags(tags *param.Param, bindType *param.ColumnType) error` +## Frequently Asked Questions - Set tags. +1. bind interface in database/sql crashes -* `func (s *Stmt) BindParam(params []*param.Param, bindType *param.ColumnType) error` + REST does not support parameter binding related interface. It is recommended to use `db.Exec` and `db.Query`. - Parameter bind multiple rows of data. +2. error `[0x217] Database not specified or available` after executing other statements with `use db` statement -* `func (s *Stmt) AddBatch() error` + The execution of SQL command in the REST interface is not contextual, so using `use db` statement will not work, see the usage restrictions section above. - Add to a parameter-bound batch. +3. use `taosSql` without error but use `taosRestful` with error `[0x217] Database not specified or available` -* `func (s *Stmt) Exec() error` + Because the REST interface is stateless, using the `use db` statement will not take effect. See the usage restrictions section above. - Execute a parameter binding. +4. `readBufferSize` parameter has no significant effect after being increased -* `func (s *Stmt) GetAffectedRows() int` + Increasing `readBufferSize` will reduce the number of `syscall` calls when fetching results. If the query result is smaller, modifying this parameter will not improve performance significantly. If you increase the parameter value too much, the bottleneck will be parsing JSON data. If you need to optimize the query speed, you must adjust the value based on the actual situation to achieve the best query performance. - Gets the number of affected rows inserted by the parameter binding. +5. `disableCompression` parameter is set to `false` when the query efficiency is reduced -* `func (s *Stmt) Close() error` + When set `disableCompression` parameter to `false`, the query result will be compressed by `gzip` and then transmitted, so you have to decompress the data by `gzip` after getting it. - Closes the parameter binding. +6. `go get` command can't get the package, or timeout to get the package -For a complete example see [GitHub sample file](https://github.com/taosdata/driver-go/blob/main/examples/stmtoverws/main.go) + Set Go proxy `go env -w GOPROXY=https://goproxy.cn,direct`. ## API Reference diff --git a/docs/en/14-reference/03-connector/06-rust.mdx b/docs/en/14-reference/03-connector/06-rust.mdx index 344bd3590ec3e970055f9d2bf3381a20de77534e..986b5cd104e0aef2dadefb60efd6f574576e7a4d 100644 --- a/docs/en/14-reference/03-connector/06-rust.mdx +++ b/docs/en/14-reference/03-connector/06-rust.mdx @@ -31,21 +31,57 @@ Websocket connections are supported on all platforms that can run Go. | connector-rust version | TDengine version | major features | | :----------------: | :--------------: | :--------------------------------------------------: | -| v0.8.10 | 3.0.5.0 or later | TMQ: Get consuming progress and seek offset to consume. | +| v0.8.12 | 3.0.5.0 or later | TMQ: Get consuming progress and seek offset to consume. | | v0.8.0 | 3.0.4.0 | Support schemaless insert. | | v0.7.6 | 3.0.3.0 | Support req_id in query. | | v0.6.0 | 3.0.0.0 | Base features. | The Rust Connector is still under rapid development and is not guaranteed to be backward compatible before 1.0. We recommend using TDengine version 3.0 or higher to avoid known issues. -## Installation +## Handling exceptions + +After the error is reported, the specific information of the error can be obtained: + +```rust +match conn.exec(sql) { + Ok(_) => { + Ok(()) + } + Err(e) => { + eprintln!("ERROR: {:?}", e); + Err(e) + } +} +``` + +## TDengine DataType vs. Rust DataType + +TDengine currently supports timestamp, number, character, Boolean type, and the corresponding type conversion with Rust is as follows: + +| TDengine DataType | Rust DataType | +| ----------------- | ----------------- | +| TIMESTAMP | Timestamp | +| INT | i32 | +| BIGINT | i64 | +| FLOAT | f32 | +| DOUBLE | f64 | +| SMALLINT | i16 | +| TINYINT | i8 | +| BOOL | bool | +| BINARY | Vec | +| NCHAR | String | +| JSON | serde_json::Value | + +Note: Only TAG supports JSON types + +## Installation Steps ### Pre-installation preparation * Install the Rust development toolchain * If using the native connection, please install the TDengine client driver. Please refer to [install client driver](/reference/connector#install-client-driver) -### Add taos dependency +### Install the connectors Depending on the connection method, add the [taos][taos] dependency in your Rust project as follows: @@ -146,7 +182,8 @@ let builder = TaosBuilder::from_dsn("taos://localhost:6030")?; let conn1 = builder.build(); // use websocket protocol. -let conn2 = TaosBuilder::from_dsn("taos+ws://localhost:6041")?; +let builder2 = TaosBuilder::from_dsn("taos+ws://localhost:6041")?; +let conn2 = builder2.build(); ``` After the connection is established, you can perform operations on your database. @@ -228,41 +265,191 @@ There are two ways to query data: Using built-in types or the [serde](https://se ## Usage examples -### Write data +### Create database and tables + +```rust +use taos::*; -#### SQL Write +#[tokio::main] +async fn main() -> anyhow::Result<()> { + let dsn = "taos://localhost:6030"; + let builder = TaosBuilder::from_dsn(dsn)?; + + let taos = builder.build()?; + + let db = "query"; + + // create database + taos.exec_many([ + format!("DROP DATABASE IF EXISTS `{db}`"), + format!("CREATE DATABASE `{db}`"), + format!("USE `{db}`"), + ]) + .await?; + + // create table + taos.exec_many([ + // create super table + "CREATE TABLE `meters` (`ts` TIMESTAMP, `current` FLOAT, `voltage` INT, `phase` FLOAT) \ + TAGS (`groupid` INT, `location` BINARY(16))", + // create child table + "CREATE TABLE `d0` USING `meters` TAGS(0, 'Los Angles')", + ]).await?; +} +``` + +> The query is consistent with operating a relational database. When using subscripts to get the contents of the returned fields, you have to start from 1. However, we recommend using the field names to get the values of the fields in the result set. + +### Insert data -#### STMT Write +### Query data + + + +### execute SQL with req_id + +This req_id can be used to request link tracing. + +```rust +let rs = taos.query_with_req_id("select * from stable where tag1 is null", 1)?; +``` + +### Writing data via parameter binding + +TDengine has significantly improved the bind APIs to support data writing (INSERT) scenarios. Writing data in this way avoids the resource consumption of SQL syntax parsing, resulting in significant write performance improvements in many cases. + +Parameter binding details see [API Reference](#stmt-api) -#### Schemaless Write +### Schemaless Writing + +TDengine supports schemaless writing. It is compatible with InfluxDB's Line Protocol, OpenTSDB's telnet line protocol, and OpenTSDB's JSON format protocol. For more information, see [Schemaless Writing](../../schemaless). -### Query data +### Schemaless with req_id - +This req_id can be used to request link tracing. -## API Reference +```rust +let sml_data = SmlDataBuilder::default() + .protocol(SchemalessProtocol::Line) + .data(data) + .req_id(100u64) + .build()?; + +client.put(&sml_data)? +``` + +### Data Subscription + +TDengine starts subscriptions through [TMQ](../../../taos-sql/tmq/). + +#### Create a Topic + +```rust +taos.exec_many([ + // create topic for subscription + format!("CREATE TOPIC tmq_meters with META AS DATABASE {db}") +]) +.await?; +``` + +#### Create a Consumer + +You create a TMQ connector by using a DSN. + +```rust +let tmq = TmqBuilder::from_dsn("taos://localhost:6030/?group.id=test")?; +``` + +Create a consumer: + +```rust +let mut consumer = tmq.build()?; +``` + +#### Subscribe to consume data + +A single consumer can subscribe to one or more topics. + +```rust +consumer.subscribe(["tmq_meters"]).await?; +``` + +The TMQ is of [futures::Stream](https://docs.rs/futures/latest/futures/stream/index.html) type. You can use the corresponding API to consume each message in the queue and then use `.commit` to mark them as consumed. + +```rust +{ + let mut stream = consumer.stream(); + + while let Some((offset, message)) = stream.try_next().await? { + // get information from offset + + // the topic + let topic = offset.topic(); + // the vgroup id, like partition id in kafka. + let vgroup_id = offset.vgroup_id(); + println!("* in vgroup id {vgroup_id} of topic {topic}\n"); + + if let Some(data) = message.into_data() { + while let Some(block) = data.fetch_raw_block().await? { + // one block for one table, get table name if needed + let name = block.table_name(); + let records: Vec = block.deserialize().try_collect()?; + println!( + "** table: {}, got {} records: {:#?}\n", + name.unwrap(), + records.len(), + records + ); + } + } + consumer.commit(offset).await?; + } +} +``` + +Get assignmentsļ¼š + +Version requirements connector-rust >= v0.8.8, TDengine >= 3.0.5.0 + +```rust +let assignments = consumer.assignments().await.unwrap(); +``` -### Connector Constructor +#### Assignment subscription Offset -You create a connector constructor by using a DSN. +Seek offsetļ¼š + +Version requirements connector-rust >= v0.8.8, TDengine >= 3.0.5.0 ```rust -let cfg = TaosBuilder::default().build()?; +consumer.offset_seek(topic, vgroup_id, offset).await; ``` -You use the builder object to create multiple connections. +#### Close subscriptions ```rust -let conn: Taos = cfg.build(); +consumer.unsubscribe().await; ``` -### Connection pooling +The following parameters can be configured for the TMQ DSN. Only `group.id` is mandatory. + +- `group.id`: Within a consumer group, load balancing is implemented by consuming messages on an at-least-once basis. +- `client.id`: Subscriber client ID. +- `auto.offset.reset`: Initial point of subscription. *earliest* subscribes from the beginning, and *latest* subscribes from the newest message. The default is earliest. Note: This parameter is set per consumer group. +- `enable.auto.commit`: Automatically commits. This can be enabled when data consistency is not essential. +- `auto.commit.interval.ms`: Interval for automatic commits. + +#### Full Sample Code + +For more information, see [GitHub sample file](https://github.com/taosdata/TDengine/blob/3.0/docs/examples/rust/nativeexample/examples/subscribe_demo.rs). + +### Use with connection pool In complex applications, we recommend enabling connection pools. [taos] implements connection pools based on [r2d2]. @@ -292,7 +479,17 @@ In the application code, use `pool.get()? ` to get a connection object [Taos]. let taos = pool.get()?; ``` -### Connectors +### More sample programs + +The source code of the sample application is under `TDengine/examples/rust` : + +[rust example](https://github.com/taosdata/TDengine/tree/3.0/examples/rust) + +## Frequently Asked Questions + +For additional troubleshooting, see [FAQ](../../../train-faq/faq). + +## API Reference The [Taos][struct.Taos] object provides an API to perform operations on multiple databases. @@ -378,9 +575,13 @@ Note that Rust asynchronous functions and an asynchronous runtime are required. - `.create_database(database: &str)`: Executes the `CREATE DATABASE` statement. - `.use_database(database: &str)`: Executes the `USE` statement. -In addition, this structure is also the entry point for [Parameter Binding](#Parameter Binding Interface) and [Line Protocol Interface](#Line Protocol Interface). Please refer to the specific API descriptions for usage. +In addition, this structure is also the entry point for Parameter Binding and Line Protocol Interface. Please refer to the specific API descriptions for usage. -### Bind Interface +

+ +Bind Interface + +

Similar to the C interface, Rust provides the bind interface's wrapping. First, the [Taos][struct.taos] object creates a parameter binding object [Stmt] for an SQL statement. @@ -391,7 +592,7 @@ stmt.prepare("INSERT INTO ? USING meters TAGS(?, ?) VALUES(?, ?, ?, ?)")?; The bind object provides a set of interfaces for implementing parameter binding. -#### `.set_tbname(name)` +`.set_tbname(name)` To bind table names. @@ -400,7 +601,7 @@ let mut stmt = taos.stmt("insert into ? values(? ,?)")?; stmt.set_tbname("d0")?; ``` -#### `.set_tags(&[tag])` +`.set_tags(&[tag])` Bind sub-table table names and tag values when the SQL statement uses a super table. @@ -410,7 +611,7 @@ stmt.set_tbname("d0")?; stmt.set_tags(&[Value::VarChar("taos".to_string())])?; ``` -#### `.bind(&[column])` +`.bind(&[column])` Bind value types. Use the [ColumnView] structure to create and bind the required types. @@ -434,7 +635,7 @@ let params = vec![ let rows = stmt.bind(¶ms)?.add_batch()?.execute()?; ``` -#### `.execute()` +`.execute()` Execute SQL. [Stmt] objects can be reused, re-binded, and executed after execution. Before execution, ensure that all data has been added to the queue with `.add_batch`. @@ -449,92 +650,6 @@ stmt.execute()?; For a working example, see [GitHub](https://github.com/taosdata/taos-connector-rust/blob/main/examples/bind.rs). -### Subscriptions - -TDengine starts subscriptions through [TMQ](../../../taos-sql/tmq/). - -You create a TMQ connector by using a DSN. - -```rust -let tmq = TmqBuilder::from_dsn("taos://localhost:6030/?group.id=test")?; -``` - -Create a consumer: - -```rust -let mut consumer = tmq.build()?; -``` - -A single consumer can subscribe to one or more topics. - -```rust -consumer.subscribe(["tmq_meters"]).await?; -``` - -The TMQ is of [futures::Stream](https://docs.rs/futures/latest/futures/stream/index.html) type. You can use the corresponding API to consume each message in the queue and then use `.commit` to mark them as consumed. - -```rust -{ - let mut stream = consumer.stream(); - - while let Some((offset, message)) = stream.try_next().await? { - // get information from offset - - // the topic - let topic = offset.topic(); - // the vgroup id, like partition id in kafka. - let vgroup_id = offset.vgroup_id(); - println!("* in vgroup id {vgroup_id} of topic {topic}\n"); - - if let Some(data) = message.into_data() { - while let Some(block) = data.fetch_raw_block().await? { - // one block for one table, get table name if needed - let name = block.table_name(); - let records: Vec = block.deserialize().try_collect()?; - println!( - "** table: {}, got {} records: {:#?}\n", - name.unwrap(), - records.len(), - records - ); - } - } - consumer.commit(offset).await?; - } -} -``` - -Get assignmentsļ¼š - -Version requirements connector-rust >= v0.8.8, TDengine >= 3.0.5.0 - -```rust -let assignments = consumer.assignments().await.unwrap(); -``` - -Seek offsetļ¼š - -Version requirements connector-rust >= v0.8.8, TDengine >= 3.0.5.0 - -```rust -consumer.offset_seek(topic, vgroup_id, offset).await; -``` - -Unsubscribe: - -```rust -consumer.unsubscribe().await; -``` - -The following parameters can be configured for the TMQ DSN. Only `group.id` is mandatory. - -- `group.id`: Within a consumer group, load balancing is implemented by consuming messages on an at-least-once basis. -- `client.id`: Subscriber client ID. -- `auto.offset.reset`: Initial point of subscription. *earliest* subscribes from the beginning, and *latest* subscribes from the newest message. The default is earliest. Note: This parameter is set per consumer group. -- `enable.auto.commit`: Automatically commits. This can be enabled when data consistency is not essential. -- `auto.commit.interval.ms`: Interval for automatic commits. - -For more information, see [GitHub sample file](https://github.com/taosdata/TDengine/blob/3.0/docs/examples/rust/nativeexample/examples/subscribe_demo.rs). For information about other structure APIs, see the [Rust documentation](https://docs.rs/taos). diff --git a/docs/en/14-reference/03-connector/07-python.mdx b/docs/en/14-reference/03-connector/07-python.mdx index 461bdfbf162e696b430c1edb9b09ada70e086fb9..43713117f95318987857a74a0c704531a998da45 100644 --- a/docs/en/14-reference/03-connector/07-python.mdx +++ b/docs/en/14-reference/03-connector/07-python.mdx @@ -24,6 +24,16 @@ The source code for the Python connector is hosted on [GitHub](https://github.co We recommend using the latest version of `taospy`, regardless of the version of TDengine. +|Python Connector Version|major changes| +|:-------------------:|:----:| +|2.7.9|support for getting assignment and seek function on subscription| +|2.7.8|add `execute_many` method| + +|Python Websocket Connector Version|major changes| +|:----------------------------:|:-----:| +|0.2.5|1. support for getting assignment and seek function on subscription
2. support schemaless
3. support STMT| +|0.2.4|support `unsubscribe` on subscription| + ## Handling Exceptions There are 4 types of exception in python connector. diff --git a/docs/en/28-releases/01-tdengine.md b/docs/en/28-releases/01-tdengine.md index f4d9ba8e428792cbd525f15632eff5e14a3ba83a..a5c1553402a75f902197c5e466d12aaf663eedb8 100644 --- a/docs/en/28-releases/01-tdengine.md +++ b/docs/en/28-releases/01-tdengine.md @@ -10,6 +10,10 @@ For TDengine 2.x installation packages by version, please visit [here](https://w import Release from "/components/ReleaseV3"; +## 3.0.6.0 + + + ## 3.0.5.1 diff --git a/docs/zh/07-develop/07-tmq.mdx b/docs/zh/07-develop/07-tmq.mdx index a87a1f64f80223a8b19b21bd277973952cf8dfc8..54a8af2287b517ee7a8792fc427731cbe9e0500f 100644 --- a/docs/zh/07-develop/07-tmq.mdx +++ b/docs/zh/07-develop/07-tmq.mdx @@ -243,6 +243,7 @@ TDengine ä½æē”Ø SQL 创å»ŗäø€äøŖ topicļ¼š ```sql CREATE TOPIC topic_name AS SELECT ts, c1, c2, c3 FROM tmqdb.stb WHERE c1 > 1; ``` +- topic创å»ŗäøŖꕰ꜉äøŠé™ļ¼Œé€ščæ‡å‚ę•° tmqMaxTopicNum ęŽ§åˆ¶ļ¼Œé»˜č®¤ 20 äøŖ TMQ ę”ÆęŒå¤šē§č®¢é˜…ē±»åž‹ļ¼š @@ -265,14 +266,15 @@ CREATE TOPIC topic_name as subquery čÆ­ę³•ļ¼š ```sql -CREATE TOPIC topic_name AS STABLE stb_name +CREATE TOPIC topic_name [with meta] AS STABLE stb_name [where_condition] ``` äøŽ `SELECT * from stbName` č®¢é˜…ēš„åŒŗ别ę˜Æļ¼š - äøä¼šé™åˆ¶ē”Øꈷēš„č”Øē»“ęž„å˜ę›“怂 - čæ”回ēš„ę˜Æ非ē»“ęž„åŒ–ēš„ę•°ę®ļ¼ščæ”å›žę•°ę®ēš„ē»“ęž„ä¼šéšä¹‹č¶…ēŗ§č”Øēš„č”Øē»“ęž„å˜åŒ–č€Œå˜åŒ–ć€‚ -- ē”ØꈷåƹäŗŽč¦å¤„ē†ēš„ęƏäø€äøŖę•°ę®å—éƒ½åÆčƒ½ęœ‰äøåŒēš„č”Øē»“Ꞅ怂 +- with meta å‚ę•°åÆ选ļ¼Œé€‰ę‹©ę—¶å°†čæ”回创å»ŗ超ēŗ§č”Øļ¼Œå­č”Øē­‰čƭ叄ļ¼Œäø»č¦ē”ØäŗŽtaosxåšč¶…ēŗ§č”Øčæē§» +- where_condition å‚ę•°åÆ选ļ¼Œé€‰ę‹©ę—¶å°†ē”Øę„čæ‡ę»¤ē¬¦åˆę”件ēš„子č”Øļ¼Œč®¢é˜…čæ™äŗ›å­č”Ø怂where ę”ä»¶é‡Œäøčƒ½ęœ‰ę™®é€šåˆ—ļ¼ŒåŖčƒ½ę˜Ætagꈖtbnameļ¼Œwhereę”ä»¶é‡ŒåÆ仄ē”Øå‡½ę•°ļ¼Œē”Øę„čæ‡ę»¤tagļ¼Œä½†ę˜Æäøčƒ½ę˜Æčšåˆå‡½ę•°ļ¼Œå› äøŗ子č”Øtagå€¼ę— ę³•åščšåˆć€‚ä¹ŸåÆ仄ę˜Æåøø量č”Øč¾¾å¼ļ¼ŒęƔ如 2 > 1ļ¼ˆč®¢é˜…å…ØéƒØ子č”Øļ¼‰ļ¼Œęˆ–者 falseļ¼ˆč®¢é˜…0äøŖ子č”Øļ¼‰ - čæ”å›žę•°ę®äøåŒ…å«ę ‡ē­¾ć€‚ ### ę•°ę®åŗ“č®¢é˜… @@ -280,11 +282,13 @@ CREATE TOPIC topic_name AS STABLE stb_name čÆ­ę³•ļ¼š ```sql -CREATE TOPIC topic_name AS DATABASE db_name; +CREATE TOPIC topic_name [with meta] AS DATABASE db_name; ``` 通čæ‡čÆ„čƭ叄åÆ创å»ŗäø€äøŖåŒ…å«ę•°ę®åŗ“ę‰€ęœ‰č”Øę•°ę®ēš„č®¢é˜… +- with meta å‚ę•°åÆ选ļ¼Œé€‰ę‹©ę—¶å°†čæ”回创å»ŗę•°ę®åŗ“é‡Œę‰€ęœ‰č¶…ēŗ§č”Øļ¼Œå­č”Øēš„čƭ叄ļ¼Œäø»č¦ē”ØäŗŽtaosxåšę•°ę®åŗ“čæē§» + ## 创å»ŗę¶ˆč“¹č€… *consumer* ę¶ˆč“¹č€…éœ€č¦é€ščæ‡äø€ē³»åˆ—配ē½®é€‰é”¹åˆ›å»ŗļ¼ŒåŸŗē”€é…ē½®é”¹å¦‚äø‹č”Øꉀē¤ŗļ¼š @@ -295,7 +299,7 @@ CREATE TOPIC topic_name AS DATABASE db_name; | `td.connect.user` | string | ē”Øęˆ·å | | | `td.connect.pass` | string | åƆē  | | | `td.connect.port` | integer | ęœåŠ”ē«Æēš„ē«Æ口号 | | -| `group.id` | string | ę¶ˆč“¹ē»„ IDļ¼ŒåŒäø€ę¶ˆč“¹ē»„å…±äŗ«ę¶ˆč“¹čæ›åŗ¦ | **åæ…唫锹**ć€‚ęœ€å¤§é•æåŗ¦ļ¼š192怂 | +| `group.id` | string | ę¶ˆč“¹ē»„ IDļ¼ŒåŒäø€ę¶ˆč“¹ē»„å…±äŗ«ę¶ˆč“¹čæ›åŗ¦ |
**åæ…唫锹**ć€‚ęœ€å¤§é•æåŗ¦ļ¼š192怂
ęƏäøŖtopicęœ€å¤šåÆå»ŗē«‹100äøŖ consumer group | | `client.id` | string | å®¢ęˆ·ē«Æ ID | ęœ€å¤§é•æåŗ¦ļ¼š192怂 | | `auto.offset.reset` | enum | ę¶ˆč“¹ē»„č®¢é˜…ēš„初始位ē½® |
`earliest`: default;ä»Žå¤“å¼€å§‹č®¢é˜…;
`latest`: ä»…ä»Žęœ€ę–°ę•°ę®å¼€å§‹č®¢é˜…;
`none`: ę²”ęœ‰ęäŗ¤ēš„ offset ę— ę³•č®¢é˜… | | `enable.auto.commit` | boolean | ę˜Æ否åÆē”Øę¶ˆč“¹ä½ē‚¹č‡ŖåŠØꏐäŗ¤ļ¼Œtrue: č‡ŖåŠØꏐäŗ¤ļ¼Œå®¢ęˆ·ē«Æåŗ”ē”Øꗠ需commitļ¼›falseļ¼šå®¢ęˆ·ē«Æåŗ”ē”Øéœ€č¦č‡Ŗč”Œcommit | é»˜č®¤å€¼äøŗ true | diff --git a/docs/zh/07-develop/09-udf.md b/docs/zh/07-develop/09-udf.md index ae11273a39779bd5cc83968f48767cace7ff346a..ff464376873767f1d6bee28b254d1f58640abffb 100644 --- a/docs/zh/07-develop/09-udf.md +++ b/docs/zh/07-develop/09-udf.md @@ -17,7 +17,7 @@ TDengine ę”Æꌁ通čæ‡ C/Python čÆ­č؀čæ›č”Œ UDF å®šä¹‰ć€‚ęŽ„äø‹ę„ē»“合ē¤ŗ例 - čšåˆå‡½ę•°éœ€č¦å®žēŽ°čšåˆęŽ„å£å‡½ę•° aggfn_start ļ¼Œ aggfn ļ¼Œ aggfn_finish怂 - å¦‚ęžœéœ€č¦åˆå§‹åŒ–ļ¼Œå®žēŽ° udf_initļ¼›å¦‚ęžœéœ€č¦ęø…ē†å·„作ļ¼Œå®žēŽ°udf_destroy怂 -ęŽ„å£å‡½ę•°ēš„名ē§°ę˜Æ UDF 名ē§°ļ¼Œęˆ–者ę˜Æ UDF 名ē§°å’Œē‰¹å®šåŽē¼€ļ¼ˆ_start, _finish, _init, _destroy)ēš„čæžęŽ„ć€‚åˆ—č”Øäø­ēš„scalarfnļ¼Œaggfn, udféœ€č¦ę›æę¢ęˆudfå‡½ę•°åć€‚ +ęŽ„å£å‡½ę•°ēš„名ē§°ę˜Æ UDF 名ē§°ļ¼Œęˆ–者ę˜Æ UDF 名ē§°å’Œē‰¹å®šåŽē¼€ļ¼ˆ`_start`, `_finish`, `_init`, `_destroy`)ēš„čæžęŽ„ć€‚åˆ—č”Øäø­ēš„scalarfnļ¼Œaggfn, udféœ€č¦ę›æę¢ęˆudfå‡½ę•°åć€‚ ### ē”Ø C čÆ­č؀实ēŽ°ę ‡é‡å‡½ę•° ę ‡é‡å‡½ę•°å®žēŽ°ęØ”ęæ如äø‹ diff --git a/docs/zh/08-connector/14-java.mdx b/docs/zh/08-connector/14-java.mdx index 27b732b8835c2290c6cc1e55c35cb6e69f3b957d..c7da2bd4f545155e62a8bb83ff7554021aa16864 100644 --- a/docs/zh/08-connector/14-java.mdx +++ b/docs/zh/08-connector/14-java.mdx @@ -36,14 +36,15 @@ REST čæžęŽ„ę”ÆęŒę‰€ęœ‰čƒ½čæč”Œ Java ēš„å¹³å°ć€‚ | taos-jdbcdriver ē‰ˆęœ¬ | äø»č¦å˜åŒ– | TDengine ē‰ˆęœ¬ | | :------------------: | :----------------------------------------------------------------------------------------------------------------------------------------------------: | :----------------: | +| 3.2.3 | äæ®å¤ ResultSet åœØäø€äŗ›ęƒ…å†µę•°ę®č§£ęžå¤±č“„ | - | | 3.2.2 | ę–°å¢žåŠŸčƒ½ļ¼šę•°ę®č®¢é˜…ę”Æꌁ seek åŠŸčƒ½ć€‚ | 3.0.5.0 åŠę›“é«˜ē‰ˆęœ¬ | | 3.2.1 | ę–°å¢žåŠŸčƒ½ļ¼šWebSocket čæžęŽ„ę”Æꌁ schemaless äøŽ prepareStatement å†™å…„ć€‚å˜ę›“ļ¼šconsumer poll čæ”回ē»“ęžœé›†äøŗ ConsumerRecordļ¼ŒåÆ通čæ‡ value() čŽ·å–ęŒ‡å®šē»“ęžœé›†ę•°ę®ć€‚ | 3.0.3.0 åŠę›“é«˜ē‰ˆęœ¬ | | 3.2.0 | 存åœØčæžęŽ„问题ļ¼ŒäøęŽØ荐ä½æē”Ø | - | | 3.1.0 | WebSocket čæžęŽ„ę”ÆęŒč®¢é˜…åŠŸčƒ½ | - | | 3.0.1 - 3.0.4 | äæ®å¤äø€äŗ›ęƒ…况äø‹ē»“ęžœé›†ę•°ę®č§£ęžé”™čÆÆēš„é—®é¢˜ć€‚3.0.1 åœØ JDK 11 ēŽÆ境ē¼–čƑļ¼ŒJDK 8 ēŽÆ境äø‹å»ŗč®®ä½æē”Ø其他ē‰ˆęœ¬ | - | | 3.0.0 | ę”Æꌁ TDengine 3.0 | 3.0.0.0 åŠę›“é«˜ē‰ˆęœ¬ | -| 2.0.42 | äæ®åœØ WebSocket čæžęŽ„äø­ wasNull ęŽ„å£čæ”回值 | - | -| 2.0.41 | äæ®ę­£ REST čæžęŽ„äø­ē”Øęˆ·åå’ŒåƆē č½¬ē ę–¹å¼ | - | +| 2.0.42 | äæ®å¤ WebSocket čæžęŽ„äø­ wasNull ęŽ„å£čæ”回值 | - | +| 2.0.41 | äæ®å¤ REST čæžęŽ„äø­ē”Øęˆ·åå’ŒåƆē č½¬ē ę–¹å¼ | - | | 2.0.39 - 2.0.40 | 增加 REST čæžęŽ„/čÆ·ę±‚ č¶…ę—¶č®¾ē½® | - | | 2.0.38 | JDBC REST čæžęŽ„å¢žåŠ ę‰¹é‡ę‹‰å–åŠŸčƒ½ | - | | 2.0.37 | 增加åƹ json tag ę”Æꌁ | - | @@ -287,9 +288,9 @@ url äø­ēš„配ē½®å‚ę•°å¦‚äø‹ļ¼š - batchfetch: trueļ¼šåœØę‰§č”ŒęŸ„čÆ¢ę—¶ę‰¹é‡ę‹‰å–ē»“ęžœé›†ļ¼›falseļ¼šé€č”Œę‹‰å–ē»“ęžœé›†ć€‚é»˜č®¤å€¼äøŗļ¼šfalseć€‚é€č”Œę‹‰å–ē»“ęžœé›†ä½æē”Ø HTTP ę–¹å¼čæ›č”Œę•°ę®ä¼ č¾“怂JDBC REST čæžęŽ„ę”ÆęŒę‰¹é‡ę‹‰å–ę•°ę®åŠŸčƒ½ć€‚taos-jdbcdriver äøŽ TDengine 之闓通čæ‡ WebSocket čæžęŽ„čæ›č”Œę•°ę®ä¼ č¾“怂ē›øč¾ƒäŗŽ HTTPļ¼ŒWebSocket åÆ仄ä½æ JDBC REST čæžęŽ„ę”ÆęŒå¤§ę•°ę®é‡ęŸ„čÆ¢ļ¼Œå¹¶ęå‡ęŸ„čÆ¢ę€§čƒ½ć€‚ - charset: 当开åÆę‰¹é‡ę‹‰å–ę•°ę®ę—¶ļ¼ŒęŒ‡å®šč§£ęžå­—ē¬¦äø²ę•°ę®ēš„å­—ē¬¦é›†ć€‚ - batchErrorIgnoreļ¼štrueļ¼šåœØę‰§č”Œ Statement ēš„ executeBatch ę—¶ļ¼Œå¦‚ęžœäø­é—“꜉äø€ę” SQL ę‰§č”Œå¤±č“„ļ¼Œē»§ē»­ę‰§č”Œäø‹é¢ēš„ SQL äŗ†ć€‚falseļ¼šäøå†ę‰§č”Œå¤±č“„ SQL 后ēš„任何čÆ­å„ć€‚é»˜č®¤å€¼äøŗļ¼šfalse怂 -- httpConnectTimeout: čæžęŽ„超ꗶꗶ闓ļ¼Œå•ä½ msļ¼Œ é»˜č®¤å€¼äøŗ 5000怂 -- httpSocketTimeout: socket 超ꗶꗶ闓ļ¼Œå•ä½ msļ¼Œé»˜č®¤å€¼äøŗ 5000怂仅åœØ batchfetch č®¾ē½®äøŗ false ę—¶ē”Ÿę•ˆć€‚ -- messageWaitTimeout: 궈ęÆ超ꗶꗶ闓, 单位 msļ¼Œ é»˜č®¤å€¼äøŗ 3000怂 仅åœØ batchfetch č®¾ē½®äøŗ true ę—¶ē”Ÿę•ˆć€‚ +- httpConnectTimeout: čæžęŽ„超ꗶꗶ闓ļ¼Œå•ä½ msļ¼Œ é»˜č®¤å€¼äøŗ 60000怂 +- httpSocketTimeout: socket 超ꗶꗶ闓ļ¼Œå•ä½ msļ¼Œé»˜č®¤å€¼äøŗ 60000怂仅åœØ batchfetch č®¾ē½®äøŗ false ę—¶ē”Ÿę•ˆć€‚ +- messageWaitTimeout: 궈ęÆ超ꗶꗶ闓, 单位 msļ¼Œ é»˜č®¤å€¼äøŗ 60000怂 仅åœØ batchfetch č®¾ē½®äøŗ true ę—¶ē”Ÿę•ˆć€‚ - useSSL: čæžęŽ„äø­ę˜Æ否ä½æē”Ø SSL怂 - httpPoolSize: REST 并发čÆ·ę±‚å¤§å°ļ¼Œé»˜č®¤ 20怂 @@ -355,9 +356,9 @@ properties äø­ēš„配ē½®å‚ę•°å¦‚äø‹ļ¼š - TSDBDriver.PROPERTY_KEY_CHARSETļ¼šå®¢ęˆ·ē«Æä½æē”Øēš„å­—ē¬¦é›†ļ¼Œé»˜č®¤å€¼äøŗē³»ē»Ÿå­—ē¬¦é›†ć€‚ - TSDBDriver.PROPERTY_KEY_LOCALEļ¼šä»…åœØä½æē”Ø JDBC 原ē”ŸčæžęŽ„ę—¶ē”Ÿę•ˆć€‚ å®¢ęˆ·ē«ÆčÆ­č؀ēŽÆ境ļ¼Œé»˜č®¤å€¼ē³»ē»Ÿå½“前 locale怂 - TSDBDriver.PROPERTY_KEY_TIME_ZONEļ¼šä»…åœØä½æē”Ø JDBC 原ē”ŸčæžęŽ„ę—¶ē”Ÿę•ˆć€‚ å®¢ęˆ·ē«Æä½æē”Øēš„ę—¶åŒŗļ¼Œé»˜č®¤å€¼äøŗē³»ē»Ÿå½“å‰ę—¶åŒŗ怂 -- TSDBDriver.HTTP_CONNECT_TIMEOUT: čæžęŽ„超ꗶꗶ闓ļ¼Œå•ä½ msļ¼Œ é»˜č®¤å€¼äøŗ 5000怂仅åœØ REST čæžęŽ„ę—¶ē”Ÿę•ˆć€‚ -- TSDBDriver.HTTP_SOCKET_TIMEOUT: socket 超ꗶꗶ闓ļ¼Œå•ä½ msļ¼Œé»˜č®¤å€¼äøŗ 5000怂仅åœØ REST čæžęŽ„äø” batchfetch č®¾ē½®äøŗ false ę—¶ē”Ÿę•ˆć€‚ -- TSDBDriver.PROPERTY_KEY_MESSAGE_WAIT_TIMEOUT: 궈ęÆ超ꗶꗶ闓, 单位 msļ¼Œ é»˜č®¤å€¼äøŗ 3000怂 仅åœØ REST čæžęŽ„äø” batchfetch č®¾ē½®äøŗ true ę—¶ē”Ÿę•ˆć€‚ +- TSDBDriver.HTTP_CONNECT_TIMEOUT: čæžęŽ„超ꗶꗶ闓ļ¼Œå•ä½ msļ¼Œ é»˜č®¤å€¼äøŗ 60000怂仅åœØ REST čæžęŽ„ę—¶ē”Ÿę•ˆć€‚ +- TSDBDriver.HTTP_SOCKET_TIMEOUT: socket 超ꗶꗶ闓ļ¼Œå•ä½ msļ¼Œé»˜č®¤å€¼äøŗ 60000怂仅åœØ REST čæžęŽ„äø” batchfetch č®¾ē½®äøŗ false ę—¶ē”Ÿę•ˆć€‚ +- TSDBDriver.PROPERTY_KEY_MESSAGE_WAIT_TIMEOUT: 궈ęÆ超ꗶꗶ闓, 单位 msļ¼Œ é»˜č®¤å€¼äøŗ 60000怂 仅åœØ REST čæžęŽ„äø” batchfetch č®¾ē½®äøŗ true ę—¶ē”Ÿę•ˆć€‚ - TSDBDriver.PROPERTY_KEY_USE_SSL: čæžęŽ„äø­ę˜Æ否ä½æē”Ø SSL怂仅åœØ REST čæžęŽ„ę—¶ē”Ÿę•ˆć€‚ - TSDBDriver.HTTP_POOL_SIZE: REST 并发čÆ·ę±‚å¤§å°ļ¼Œé»˜č®¤ 20怂 ę­¤å¤–åƹ JDBC 原ē”ŸčæžęŽ„ļ¼Œé€ščæ‡ęŒ‡å®š URL 和 Properties čæ˜åÆä»„ęŒ‡å®šå…¶ä»–å‚ę•°ļ¼ŒęÆ”å¦‚ę—„åæ—ēŗ§åˆ«ć€SQL é•æåŗ¦ē­‰ć€‚ꛓ多čƦē»†é…ē½®čÆ·å‚č€ƒ[å®¢ęˆ·ē«Æ配ē½®](/reference/config/#ä»…å®¢ęˆ·ē«Æ适ē”Ø)怂 diff --git a/docs/zh/08-connector/20-go.mdx b/docs/zh/08-connector/20-go.mdx index d431be35cb0b709cdc6c5cadd2c8043702acbe11..90ef4d83cafa604fbe531a9f9ea0ece5b11b2df4 100644 --- a/docs/zh/08-connector/20-go.mdx +++ b/docs/zh/08-connector/20-go.mdx @@ -32,24 +32,44 @@ REST čæžęŽ„ę”ÆęŒę‰€ęœ‰čƒ½čæč”Œ Go ēš„å¹³å°ć€‚ čÆ·å‚č€ƒ[ē‰ˆęœ¬ę”ÆęŒåˆ—č”Ø](https://github.com/taosdata/driver-go#remind) -## ę”Æꌁēš„åŠŸčƒ½ē‰¹ę€§ +## 处ē†å¼‚åøø -### 原ē”ŸčæžęŽ„ +å¦‚ęžœę˜Æ TDengine 错čÆÆåÆ仄通čæ‡ä»„äø‹ę–¹å¼čŽ·å–é”™čÆÆē å’Œé”™čÆÆäæ”ęÆ怂 -ā€œåŽŸē”ŸčæžęŽ„ā€ęŒ‡čæžęŽ„å™Ø通čæ‡ TDengine å®¢ęˆ·ē«Æ驱åŠØļ¼ˆtaoscļ¼‰ē›“ꎄäøŽ TDengine čæč”Œå®žä¾‹å»ŗē«‹ēš„čæžęŽ„怂ę”Æꌁēš„åŠŸčƒ½ē‰¹ę€§ęœ‰ļ¼š - -* ę™®é€šęŸ„čÆ¢ -* čæžē»­ęŸ„čÆ¢ -* č®¢é˜… -* schemaless ęŽ„å£ -* å‚ę•°ē»‘å®šęŽ„口 - -### REST čæžęŽ„ - -"REST čæžęŽ„"ꌇčæžęŽ„å™Ø通čæ‡ taosAdapter ē»„ä»¶ęä¾›ēš„ REST API äøŽ TDengine čæč”Œå®žä¾‹å»ŗē«‹ēš„čæžęŽ„怂ę”Æꌁēš„åŠŸčƒ½ē‰¹ę€§ęœ‰ļ¼š +```go +// import "github.com/taosdata/driver-go/v3/errors" + if err != nil { + tError, is := err.(*errors.TaosError) + if is { + fmt.Println("errorCode:", int(tError.Code)) + fmt.Println("errorMessage:", tError.ErrStr) + } else { + fmt.Println(err.Error()) + } + } +``` -* ę™®é€šęŸ„čÆ¢ -* čæžē»­ęŸ„čÆ¢ +## TDengine DataType 和 Go DataType + +| TDengine DataType | Go Type | +|-------------------|-----------| +| TIMESTAMP | time.Time | +| TINYINT | int8 | +| SMALLINT | int16 | +| INT | int32 | +| BIGINT | int64 | +| TINYINT UNSIGNED | uint8 | +| SMALLINT UNSIGNED | uint16 | +| INT UNSIGNED | uint32 | +| BIGINT UNSIGNED | uint64 | +| FLOAT | float32 | +| DOUBLE | float64 | +| BOOL | bool | +| BINARY | string | +| NCHAR | string | +| JSON | []byte | + +**ę³Øꄏ**ļ¼šJSON ē±»åž‹ä»…åœØ tag äø­ę”Æꌁ怂 ## å®‰č£…ę­„éŖ¤ @@ -63,32 +83,28 @@ REST čæžęŽ„ę”ÆęŒę‰€ęœ‰čƒ½čæč”Œ Go ēš„å¹³å°ć€‚ * ```go env``` * ```gcc -v``` -### ä½æē”Ø go get å®‰č£… - -`go get -u github.com/taosdata/driver-go/v3@latest` - -### ä½æē”Ø go mod ē®”ē† +### å®‰č£…čæžęŽ„å™Ø 1. ä½æē”Ø `go mod` 命令初始化锹ē›®ļ¼š - ```text - go mod init taos-demo - ``` + ```text + go mod init taos-demo + ``` 2. 引兄 taosSql ļ¼š - ```go - import ( - "database/sql" - _ "github.com/taosdata/driver-go/v3/taosSql" - ) - ``` + ```go + import ( + "database/sql" + _ "github.com/taosdata/driver-go/v3/taosSql" + ) + ``` 3. ä½æē”Ø `go mod tidy` ę›“ę–°ä¾čµ–åŒ…ļ¼š - ```text - go mod tidy - ``` + ```text + go mod tidy + ``` 4. ä½æē”Ø `go run taos-demo` čæč”Œē؋åŗęˆ–ä½æē”Ø `go build` 命令ē¼–čƑå‡ŗäŗŒčæ›åˆ¶ę–‡ä»¶ć€‚ @@ -99,8 +115,6 @@ REST čæžęŽ„ę”ÆęŒę‰€ęœ‰čƒ½čæč”Œ Go ēš„å¹³å°ć€‚ ## å»ŗē«‹čæžęŽ„ -### ę•°ę®ęŗåē§°ļ¼ˆDSNļ¼‰ - ę•°ę®ęŗåē§°å…·ęœ‰é€šē”Øę ¼å¼ļ¼Œä¾‹å¦‚ [PEAR DB](http://pear.php.net/manual/en/package.database.db.intro-dsn.php)ļ¼Œä½†ę²”꜉ē±»åž‹å‰ē¼€ļ¼ˆę–¹ę‹¬å·č”Øē¤ŗåÆ选ļ¼‰ļ¼š ``` text @@ -113,9 +127,7 @@ REST čæžęŽ„ę”ÆęŒę‰€ęœ‰čƒ½čæč”Œ Go ēš„å¹³å°ć€‚ username:password@protocol(address)/dbname?param=value ``` -### ä½æē”ØčæžęŽ„å™Øčæ›č”ŒčæžęŽ„ - - + _taosSql_ 通čæ‡ cgo 实ēŽ°äŗ† Go ēš„ `database/sql/driver` ęŽ„å£ć€‚åŖéœ€č¦å¼•å…„é©±åŠØå°±åÆ仄ä½æē”Ø [`database/sql`](https://golang.org/pkg/database/sql/) ēš„ęŽ„å£ć€‚ @@ -213,332 +225,900 @@ func main() { -## ä½æē”Øē¤ŗ例 +### ęŒ‡å®š URL 和 Properties čŽ·å–čæžęŽ„ -### å†™å…„ę•°ę® +Go čæžęŽ„å™Øäøę”ÆęŒę­¤åŠŸčƒ½ -#### SQL 写兄 - - +### 配ē½®å‚ę•°ēš„优先ēŗ§ -#### InfluxDB č”Œåč®®å†™å…„ +Go čæžęŽ„å™Øäøę”ÆęŒę­¤åŠŸčƒ½ - +## ä½æē”Øē¤ŗ例 -#### OpenTSDB Telnet č”Œåč®®å†™å…„ +### 创å»ŗę•°ę®åŗ“å’Œč”Ø - +```go +var taosDSN = "root:taosdata@tcp(localhost:6030)/" +taos, err := sql.Open("taosSql", taosDSN) +if err != nil { + log.Fatalln("failed to connect TDengine, err:", err) +} +defer taos.Close() +_, err := taos.Exec("CREATE DATABASE power") +if err != nil { + log.Fatalln("failed to create database, err:", err) +} +_, err = taos.Exec("CREATE STABLE power.meters (ts TIMESTAMP, current FLOAT, voltage INT, phase FLOAT) TAGS (location BINARY(64), groupId INT)") +if err != nil { + log.Fatalln("failed to create stable, err:", err) +} +``` -#### OpenTSDB JSON č”Œåč®®å†™å…„ +### ę’å…„ę•°ę® - + ### ęŸ„čÆ¢ę•°ę® -### ę›“å¤šē¤ŗ例ē؋åŗ - -* [ē¤ŗ例ē؋åŗ](https://github.com/taosdata/driver-go/tree/3.0/examples) -* [č§†é¢‘ę•™ē؋](https://www.taosdata.com/blog/2020/11/11/1951.html)怂 +### ę‰§č”Œåø¦ęœ‰ reqId ēš„ SQL -## ä½æē”Ø限制 +ę­¤ reqId åÆē”ØäŗŽčÆ·ę±‚é“¾č·Æčæ½čøŖ怂 -ē”±äŗŽ REST ęŽ„å£ę— ēŠ¶ę€ę‰€ä»„ `use db` čÆ­ę³•äøä¼šē”Ÿę•ˆļ¼Œéœ€č¦å°† db 名ē§°ę”¾åˆ° SQL čƭ叄äø­ļ¼Œå¦‚ļ¼š`create table if not exists tb1 (ts timestamp, a int)`ę”¹äøŗ`create table if not exists test.tb1 (ts timestamp, a int)`å¦åˆ™å°†ęŠ„é”™`[0x217] Database not specified or available`怂 +```go +db, err := sql.Open("taosSql", "root:taosdata@tcp(localhost:6030)/") +if err != nil { + panic(err) +} +defer db.Close() +ctx := context.WithValue(context.Background(), common.ReqIDKey, common.GetReqID()) +_, err = db.ExecContext(ctx, "create database if not exists example_taos_sql") +if err != nil { + panic(err) +} +``` -也åÆ仄将 db 名ē§°ę”¾åˆ° DSN äø­ļ¼Œå°† `root:taosdata@http(localhost:6041)/` ę”¹äøŗ `root:taosdata@http(localhost:6041)/test`ć€‚å½“ęŒ‡å®šēš„ db äøå­˜åœØę—¶ę‰§č”Œ `create database` čƭ叄äøä¼šęŠ„错ļ¼Œč€Œę‰§č”Œé’ˆåƹčÆ„ db ēš„å…¶ä»–ęŸ„čÆ¢ęˆ–å†™å…„ę“ä½œä¼šęŠ„é”™ć€‚ +### 通čæ‡å‚ę•°ē»‘å®šå†™å…„ę•°ę® -å®Œę•“ē¤ŗ例如äø‹ļ¼š + + ```go package main import ( - "database/sql" - "fmt" "time" - _ "github.com/taosdata/driver-go/v3/taosRestful" + "github.com/taosdata/driver-go/v3/af" + "github.com/taosdata/driver-go/v3/common" + "github.com/taosdata/driver-go/v3/common/param" ) func main() { - var taosDSN = "root:taosdata@http(localhost:6041)/test" - taos, err := sql.Open("taosRestful", taosDSN) + db, err := af.Open("", "root", "taosdata", "", 0) if err != nil { - fmt.Println("failed to connect TDengine, err:", err) - return + panic(err) } - defer taos.Close() - taos.Exec("create database if not exists test") - taos.Exec("create table if not exists tb1 (ts timestamp, a int)") - _, err = taos.Exec("insert into tb1 values(now, 0)(now+1s,1)(now+2s,2)(now+3s,3)") + defer db.Close() + _, err = db.Exec("create database if not exists example_stmt") if err != nil { - fmt.Println("failed to insert, err:", err) - return + panic(err) } - rows, err := taos.Query("select * from tb1") + _, err = db.Exec("create table if not exists example_stmt.tb1(ts timestamp," + + "c1 bool," + + "c2 tinyint," + + "c3 smallint," + + "c4 int," + + "c5 bigint," + + "c6 tinyint unsigned," + + "c7 smallint unsigned," + + "c8 int unsigned," + + "c9 bigint unsigned," + + "c10 float," + + "c11 double," + + "c12 binary(20)," + + "c13 nchar(20)" + + ")") if err != nil { - fmt.Println("failed to select from table, err:", err) - return + panic(err) } - - defer rows.Close() - for rows.Next() { - var r struct { - ts time.Time - a int - } - err := rows.Scan(&r.ts, &r.a) - if err != nil { - fmt.Println("scan error:\n", err) - return - } - fmt.Println(r.ts, r.a) + stmt := db.InsertStmt() + err = stmt.Prepare("insert into example_stmt.tb1 values(?,?,?,?,?,?,?,?,?,?,?,?,?,?)") + if err != nil { + panic(err) + } + now := time.Now() + params := make([]*param.Param, 14) + params[0] = param.NewParam(2). + AddTimestamp(now, common.PrecisionMilliSecond). + AddTimestamp(now.Add(time.Second), common.PrecisionMilliSecond) + params[1] = param.NewParam(2).AddBool(true).AddNull() + params[2] = param.NewParam(2).AddTinyint(2).AddNull() + params[3] = param.NewParam(2).AddSmallint(3).AddNull() + params[4] = param.NewParam(2).AddInt(4).AddNull() + params[5] = param.NewParam(2).AddBigint(5).AddNull() + params[6] = param.NewParam(2).AddUTinyint(6).AddNull() + params[7] = param.NewParam(2).AddUSmallint(7).AddNull() + params[8] = param.NewParam(2).AddUInt(8).AddNull() + params[9] = param.NewParam(2).AddUBigint(9).AddNull() + params[10] = param.NewParam(2).AddFloat(10).AddNull() + params[11] = param.NewParam(2).AddDouble(11).AddNull() + params[12] = param.NewParam(2).AddBinary([]byte("binary")).AddNull() + params[13] = param.NewParam(2).AddNchar("nchar").AddNull() + + paramTypes := param.NewColumnType(14). + AddTimestamp(). + AddBool(). + AddTinyint(). + AddSmallint(). + AddInt(). + AddBigint(). + AddUTinyint(). + AddUSmallint(). + AddUInt(). + AddUBigint(). + AddFloat(). + AddDouble(). + AddBinary(6). + AddNchar(5) + err = stmt.BindParam(params, paramTypes) + if err != nil { + panic(err) + } + err = stmt.AddBatch() + if err != nil { + panic(err) } + err = stmt.Execute() + if err != nil { + panic(err) + } + err = stmt.Close() + if err != nil { + panic(err) + } + // select * from example_stmt.tb1 } ``` -## åøøč§é—®é¢˜ - -1. database/sql äø­ stmtļ¼ˆå‚ę•°ē»‘定ļ¼‰ē›øå…³ęŽ„å£å“©ęŗƒ - - REST äøę”ÆęŒå‚ę•°ē»‘定ē›øå…³ęŽ„å£ļ¼Œå»ŗč®®ä½æē”Ø`db.Exec`和`db.Query`怂 - -2. ä½æē”Ø `use db` čÆ­å„åŽę‰§č”Œå…¶ä»–čÆ­å„ęŠ„é”™ `[0x217] Database not specified or available` - - åœØ REST ęŽ„å£äø­ SQL čƭ叄ēš„ę‰§č”Œę— äøŠäø‹ę–‡å…³č”ļ¼Œä½æē”Ø `use db` čƭ叄äøä¼šē”Ÿę•ˆļ¼Œč§£å†³åŠžę³•č§äøŠę–¹ä½æē”Ø限制ē« čŠ‚怂 - -3. ä½æē”Ø taosSql äøęŠ„错ä½æē”Ø taosRestful ꊄ错 `[0x217] Database not specified or available` - - 因äøŗ REST ęŽ„å£ę— ēŠ¶ę€ļ¼Œä½æē”Ø `use db` čƭ叄äøä¼šē”Ÿę•ˆļ¼Œč§£å†³åŠžę³•č§äøŠę–¹ä½æē”Ø限制ē« čŠ‚怂 - -4. `readBufferSize` å‚ę•°č°ƒå¤§åŽę— ę˜Žę˜¾ę•ˆęžœ - - `readBufferSize` č°ƒå¤§åŽä¼šå‡å°‘čŽ·å–ē»“ęžœę—¶ `syscall` ēš„č°ƒē”Øć€‚å¦‚ęžœęŸ„čÆ¢ē»“ęžœēš„ę•°ę®é‡äøå¤§ļ¼Œäæ®ę”¹čÆ„å‚ę•°äøä¼šåø¦ę„ę˜Žę˜¾ęå‡ļ¼Œå¦‚ęžœčÆ„å‚ę•°äæ®ę”¹čæ‡å¤§ļ¼Œē“¶é¢ˆä¼šåœØč§£ęž JSON ę•°ę®ć€‚å¦‚ęžœéœ€č¦ä¼˜åŒ–ęŸ„čƢ速åŗ¦ļ¼Œéœ€č¦ę ¹ę®å®žé™…ęƒ…å†µč°ƒę•“čÆ„å€¼ę„č¾¾åˆ°ęŸ„čÆ¢ę•ˆęžœęœ€ä¼˜ć€‚ - -5. `disableCompression` å‚ę•°č®¾ē½®äøŗ `false` ę—¶ęŸ„čÆ¢ę•ˆēŽ‡é™ä½Ž - - 当 `disableCompression` å‚ę•°č®¾ē½®äøŗ `false` ę—¶ęŸ„čÆ¢ē»“ęžœä¼šä½æē”Ø `gzip` 压ē¼©åŽä¼ č¾“ļ¼Œę‹æåˆ°ę•°ę®åŽč¦å…ˆčæ›č”Œ `gzip` č§£åŽ‹ć€‚ - -6. `go get` å‘½ä»¤ę— ę³•čŽ·å–åŒ…ļ¼Œęˆ–č€…čŽ·å–åŒ…č¶…ę—¶ - - č®¾ē½® Go 代ē† `go env -w GOPROXY=https://goproxy.cn,direct`怂 - -## åøøē”Ø API - -### database/sql API - -* `sql.Open(DRIVER_NAME string, dataSourceName string) *DB` - - čÆ„ API ē”Øę„ę‰“å¼€ DBļ¼Œčæ”回äø€äøŖē±»åž‹äøŗ \*DB ēš„åÆ¹č±”ć€‚ - -:::info -čÆ„ API ęˆåŠŸåˆ›å»ŗēš„ę—¶å€™ļ¼Œå¹¶ę²”ęœ‰åšęƒé™ē­‰ę£€ęŸ„ļ¼ŒåŖ꜉åœØēœŸę­£ę‰§č”Œ Query ꈖ者 Exec ēš„ę—¶å€™ę‰čƒ½ēœŸę­£ēš„去创å»ŗčæžęŽ„ļ¼Œå¹¶åŒę—¶ę£€ęŸ„ user/password/host/port ę˜Æäøę˜Æåˆę³•ć€‚ -::: - -* `func (db *DB) Exec(query string, args ...interface{}) (Result, error)` - - `sql.Open` 内ē½®ēš„ę–¹ę³•ļ¼Œē”Øę„ę‰§č”ŒéžęŸ„čÆ¢ē›ø关 SQL怂 - -* `func (db *DB) Query(query string, args ...interface{}) (*Rows, error)` - - `sql.Open` 内ē½®ēš„ę–¹ę³•ļ¼Œē”Øę„ę‰§č”ŒęŸ„čÆ¢čÆ­å„ć€‚ - -### 高ēŗ§åŠŸčƒ½ļ¼ˆafļ¼‰API - -`af` åŒ…å°č£…äŗ†čæžęŽ„ē®”ē†ć€č®¢é˜…态schemalessć€å‚ę•°ē»‘定ē­‰ TDengine 高ēŗ§åŠŸčƒ½ć€‚ - -#### čæžęŽ„ē®”ē† - -* `af.Open(host, user, pass, db string, port int) (*Connector, error)` - - čÆ„ API 通čæ‡ cgo 创å»ŗäøŽ taosd ēš„čæžęŽ„怂 - -* `func (conn *Connector) Close() error` - - 关闭äøŽ taosd ēš„čæžęŽ„怂 - -#### č®¢é˜… - -* `func NewConsumer(conf *tmq.ConfigMap) (*Consumer, error)` - - 创å»ŗę¶ˆč“¹č€…ć€‚ - -* `func (c *Consumer) Subscribe(topic string, rebalanceCb RebalanceCb) error` -ę³Øꄏļ¼šå‡ŗäŗŽå…¼å®¹ē›®ēš„äæē•™ `rebalanceCb` å‚ę•°ļ¼Œå½“前ęœŖä½æē”Ø - - č®¢é˜…å•äøŖäø»é¢˜ć€‚ - -* `func (c *Consumer) SubscribeTopics(topics []string, rebalanceCb RebalanceCb) error` -ę³Øꄏļ¼šå‡ŗäŗŽå…¼å®¹ē›®ēš„äæē•™ `rebalanceCb` å‚ę•°ļ¼Œå½“前ęœŖä½æē”Ø - - č®¢é˜…äø»é¢˜ć€‚ - -* `func (c *Consumer) Poll(timeoutMs int) tmq.Event` - - č½®čÆ¢ę¶ˆęÆ怂 - -* `func (c *Consumer) Commit() ([]tmq.TopicPartition, error)` -ę³Øꄏļ¼šå‡ŗäŗŽå…¼å®¹ē›®ēš„äæē•™ `tmq.TopicPartition` å‚ę•°ļ¼Œå½“前ęœŖä½æē”Ø - - ꏐäŗ¤ę¶ˆęÆ怂 - -* `func (c *Consumer) Assignment() (partitions []tmq.TopicPartition, err error)` - - čŽ·å–ę¶ˆč“¹čæ›åŗ¦ć€‚(éœ€č¦ TDengine >= 3.0.5.0ļ¼Œ driver-go >= v3.5.0) - -* `func (c *Consumer) Seek(partition tmq.TopicPartition, ignoredTimeoutMs int) error` -ę³Øꄏļ¼šå‡ŗäŗŽå…¼å®¹ē›®ēš„äæē•™ `ignoredTimeoutMs` å‚ę•°ļ¼Œå½“前ęœŖä½æē”Ø - - ꌉē…§ęŒ‡å®šēš„čæ›åŗ¦ę¶ˆč“¹ć€‚(éœ€č¦ TDengine >= 3.0.5.0ļ¼Œ driver-go >= v3.5.0) - -* `func (c *Consumer) Close() error` - - 关闭čæžęŽ„怂 - -#### schemaless - -* `func (conn *Connector) InfluxDBInsertLines(lines []string, precision string) error` - - 写兄 InfluxDB č”Œåč®®ć€‚ + + -* `func (conn *Connector) OpenTSDBInsertTelnetLines(lines []string) error` +```go +package main - 写兄 OpenTDSB telnet åč®®ę•°ę®ć€‚ +import ( + "database/sql" + "fmt" + "time" -* `func (conn *Connector) OpenTSDBInsertJsonPayload(payload string) error` + "github.com/taosdata/driver-go/v3/common" + "github.com/taosdata/driver-go/v3/common/param" + _ "github.com/taosdata/driver-go/v3/taosRestful" + "github.com/taosdata/driver-go/v3/ws/stmt" +) - 写兄 OpenTSDB JSON åč®®ę•°ę®ć€‚ +func main() { + db, err := sql.Open("taosRestful", "root:taosdata@http(localhost:6041)/") + if err != nil { + panic(err) + } + defer db.Close() + prepareEnv(db) + + config := stmt.NewConfig("ws://127.0.0.1:6041/rest/stmt", 0) + config.SetConnectUser("root") + config.SetConnectPass("taosdata") + config.SetConnectDB("example_ws_stmt") + config.SetMessageTimeout(common.DefaultMessageTimeout) + config.SetWriteWait(common.DefaultWriteWait) + config.SetErrorHandler(func(connector *stmt.Connector, err error) { + panic(err) + }) + config.SetCloseHandler(func() { + fmt.Println("stmt connector closed") + }) + + connector, err := stmt.NewConnector(config) + if err != nil { + panic(err) + } + now := time.Now() + { + stmt, err := connector.Init() + if err != nil { + panic(err) + } + err = stmt.Prepare("insert into ? using all_json tags(?) values(?,?,?,?,?,?,?,?,?,?,?,?,?,?)") + if err != nil { + panic(err) + } + err = stmt.SetTableName("tb1") + if err != nil { + panic(err) + } + err = stmt.SetTags(param.NewParam(1).AddJson([]byte(`{"tb":1}`)), param.NewColumnType(1).AddJson(0)) + if err != nil { + panic(err) + } + params := []*param.Param{ + param.NewParam(3).AddTimestamp(now, 0).AddTimestamp(now.Add(time.Second), 0).AddTimestamp(now.Add(time.Second*2), 0), + param.NewParam(3).AddBool(true).AddNull().AddBool(true), + param.NewParam(3).AddTinyint(1).AddNull().AddTinyint(1), + param.NewParam(3).AddSmallint(1).AddNull().AddSmallint(1), + param.NewParam(3).AddInt(1).AddNull().AddInt(1), + param.NewParam(3).AddBigint(1).AddNull().AddBigint(1), + param.NewParam(3).AddUTinyint(1).AddNull().AddUTinyint(1), + param.NewParam(3).AddUSmallint(1).AddNull().AddUSmallint(1), + param.NewParam(3).AddUInt(1).AddNull().AddUInt(1), + param.NewParam(3).AddUBigint(1).AddNull().AddUBigint(1), + param.NewParam(3).AddFloat(1).AddNull().AddFloat(1), + param.NewParam(3).AddDouble(1).AddNull().AddDouble(1), + param.NewParam(3).AddBinary([]byte("test_binary")).AddNull().AddBinary([]byte("test_binary")), + param.NewParam(3).AddNchar("test_nchar").AddNull().AddNchar("test_nchar"), + } + paramTypes := param.NewColumnType(14). + AddTimestamp(). + AddBool(). + AddTinyint(). + AddSmallint(). + AddInt(). + AddBigint(). + AddUTinyint(). + AddUSmallint(). + AddUInt(). + AddUBigint(). + AddFloat(). + AddDouble(). + AddBinary(0). + AddNchar(0) + err = stmt.BindParam(params, paramTypes) + if err != nil { + panic(err) + } + err = stmt.AddBatch() + if err != nil { + panic(err) + } + err = stmt.Exec() + if err != nil { + panic(err) + } + affected := stmt.GetAffectedRows() + fmt.Println("all_json affected rows:", affected) + err = stmt.Close() + if err != nil { + panic(err) + } + } + { + stmt, err := connector.Init() + if err != nil { + panic(err) + } + err = stmt.Prepare("insert into ? using all_all tags(?,?,?,?,?,?,?,?,?,?,?,?,?,?) values(?,?,?,?,?,?,?,?,?,?,?,?,?,?)") + err = stmt.SetTableName("tb1") + if err != nil { + panic(err) + } -#### å‚ę•°ē»‘定 + err = stmt.SetTableName("tb2") + if err != nil { + panic(err) + } + err = stmt.SetTags( + param.NewParam(14). + AddTimestamp(now, 0). + AddBool(true). + AddTinyint(2). + AddSmallint(2). + AddInt(2). + AddBigint(2). + AddUTinyint(2). + AddUSmallint(2). + AddUInt(2). + AddUBigint(2). + AddFloat(2). + AddDouble(2). + AddBinary([]byte("tb2")). + AddNchar("tb2"), + param.NewColumnType(14). + AddTimestamp(). + AddBool(). + AddTinyint(). + AddSmallint(). + AddInt(). + AddBigint(). + AddUTinyint(). + AddUSmallint(). + AddUInt(). + AddUBigint(). + AddFloat(). + AddDouble(). + AddBinary(0). + AddNchar(0), + ) + if err != nil { + panic(err) + } + params := []*param.Param{ + param.NewParam(3).AddTimestamp(now, 0).AddTimestamp(now.Add(time.Second), 0).AddTimestamp(now.Add(time.Second*2), 0), + param.NewParam(3).AddBool(true).AddNull().AddBool(true), + param.NewParam(3).AddTinyint(1).AddNull().AddTinyint(1), + param.NewParam(3).AddSmallint(1).AddNull().AddSmallint(1), + param.NewParam(3).AddInt(1).AddNull().AddInt(1), + param.NewParam(3).AddBigint(1).AddNull().AddBigint(1), + param.NewParam(3).AddUTinyint(1).AddNull().AddUTinyint(1), + param.NewParam(3).AddUSmallint(1).AddNull().AddUSmallint(1), + param.NewParam(3).AddUInt(1).AddNull().AddUInt(1), + param.NewParam(3).AddUBigint(1).AddNull().AddUBigint(1), + param.NewParam(3).AddFloat(1).AddNull().AddFloat(1), + param.NewParam(3).AddDouble(1).AddNull().AddDouble(1), + param.NewParam(3).AddBinary([]byte("test_binary")).AddNull().AddBinary([]byte("test_binary")), + param.NewParam(3).AddNchar("test_nchar").AddNull().AddNchar("test_nchar"), + } + paramTypes := param.NewColumnType(14). + AddTimestamp(). + AddBool(). + AddTinyint(). + AddSmallint(). + AddInt(). + AddBigint(). + AddUTinyint(). + AddUSmallint(). + AddUInt(). + AddUBigint(). + AddFloat(). + AddDouble(). + AddBinary(0). + AddNchar(0) + err = stmt.BindParam(params, paramTypes) + if err != nil { + panic(err) + } + err = stmt.AddBatch() + if err != nil { + panic(err) + } + err = stmt.Exec() + if err != nil { + panic(err) + } + affected := stmt.GetAffectedRows() + fmt.Println("all_all affected rows:", affected) + err = stmt.Close() + if err != nil { + panic(err) + } -* `func (conn *Connector) StmtExecute(sql string, params *param.Param) (res driver.Result, err error)` + } +} - å‚ę•°ē»‘å®šå•č”Œę’å…„怂 +func prepareEnv(db *sql.DB) { + steps := []string{ + "create database example_ws_stmt", + "create table example_ws_stmt.all_json(ts timestamp," + + "c1 bool," + + "c2 tinyint," + + "c3 smallint," + + "c4 int," + + "c5 bigint," + + "c6 tinyint unsigned," + + "c7 smallint unsigned," + + "c8 int unsigned," + + "c9 bigint unsigned," + + "c10 float," + + "c11 double," + + "c12 binary(20)," + + "c13 nchar(20)" + + ")" + + "tags(t json)", + "create table example_ws_stmt.all_all(" + + "ts timestamp," + + "c1 bool," + + "c2 tinyint," + + "c3 smallint," + + "c4 int," + + "c5 bigint," + + "c6 tinyint unsigned," + + "c7 smallint unsigned," + + "c8 int unsigned," + + "c9 bigint unsigned," + + "c10 float," + + "c11 double," + + "c12 binary(20)," + + "c13 nchar(20)" + + ")" + + "tags(" + + "tts timestamp," + + "tc1 bool," + + "tc2 tinyint," + + "tc3 smallint," + + "tc4 int," + + "tc5 bigint," + + "tc6 tinyint unsigned," + + "tc7 smallint unsigned," + + "tc8 int unsigned," + + "tc9 bigint unsigned," + + "tc10 float," + + "tc11 double," + + "tc12 binary(20)," + + "tc13 nchar(20))", + } + for _, step := range steps { + _, err := db.Exec(step) + if err != nil { + panic(err) + } + } +} -* `func (conn *Connector) InsertStmt() *insertstmt.InsertStmt` +``` - åˆå§‹åŒ–å‚ę•°ć€‚ + + -* `func (stmt *InsertStmt) Prepare(sql string) error` +### ꗠęؔ式写兄 - å‚ę•°ē»‘定预处ē† SQL čÆ­å„ć€‚ + + -* `func (stmt *InsertStmt) SetTableName(name string) error` +```go +import ( + "fmt" - å‚ę•°ē»‘å®šč®¾ē½®č”Øåć€‚ + "github.com/taosdata/driver-go/v3/af" +) -* `func (stmt *InsertStmt) SetSubTableName(name string) error` +func main() { + conn, err := af.Open("localhost", "root", "taosdata", "", 6030) + if err != nil { + fmt.Println("fail to connect, err:", err) + } + defer conn.Close() + _, err = conn.Exec("create database if not exists example") + if err != nil { + panic(err) + } + _, err = conn.Exec("use example") + if err != nil { + panic(err) + } + influxdbData := "st,t1=3i64,t2=4f64,t3=\"t3\" c1=3i64,c3=L\"passit\",c2=false,c4=4f64 1626006833639000000" + err = conn.InfluxDBInsertLines([]string{influxdbData}, "ns") + if err != nil { + panic(err) + } + telnetData := "stb0_0 1626006833 4 host=host0 interface=eth0" + err = conn.OpenTSDBInsertTelnetLines([]string{telnetData}) + if err != nil { + panic(err) + } + jsonData := "{\"metric\": \"meter_current\",\"timestamp\": 1626846400,\"value\": 10.3, \"tags\": {\"groupid\": 2, \"location\": \"California.SanFrancisco\", \"id\": \"d1001\"}}" + err = conn.OpenTSDBInsertJsonPayload(jsonData) + if err != nil { + panic(err) + } +} +``` - å‚ę•°ē»‘å®šč®¾ē½®å­č”Øåć€‚ + + -* `func (stmt *InsertStmt) BindParam(params []*param.Param, bindType *param.ColumnType) error` +```go +import ( + "database/sql" + "log" + "time" - å‚ę•°ē»‘å®šå¤šč”Œę•°ę®ć€‚ + "github.com/taosdata/driver-go/v3/common" + _ "github.com/taosdata/driver-go/v3/taosWS" + "github.com/taosdata/driver-go/v3/ws/schemaless" +) -* `func (stmt *InsertStmt) AddBatch() error` +func main() { + db, err := sql.Open("taosWS", "root:taosdata@ws(localhost:6041)/") + if err != nil { + log.Fatal(err) + } + defer db.Close() + _, err = db.Exec("create database if not exists schemaless_ws") + if err != nil { + log.Fatal(err) + } + s, err := schemaless.NewSchemaless(schemaless.NewConfig("ws://localhost:6041/rest/schemaless", 1, + schemaless.SetDb("schemaless_ws"), + schemaless.SetReadTimeout(10*time.Second), + schemaless.SetWriteTimeout(10*time.Second), + schemaless.SetUser("root"), + schemaless.SetPassword("taosdata"), + schemaless.SetErrorHandler(func(err error) { + log.Fatal(err) + }), + )) + if err != nil { + panic(err) + } + influxdbData := "st,t1=3i64,t2=4f64,t3=\"t3\" c1=3i64,c3=L\"passit\",c2=false,c4=4f64 1626006833639000000" + telnetData := "stb0_0 1626006833 4 host=host0 interface=eth0" + jsonData := "{\"metric\": \"meter_current\",\"timestamp\": 1626846400,\"value\": 10.3, \"tags\": {\"groupid\": 2, \"location\": \"California.SanFrancisco\", \"id\": \"d1001\"}}" - ę·»åŠ åˆ°å‚ę•°ē»‘å®šę‰¹å¤„ē†ć€‚ + err = s.Insert(influxdbData, schemaless.InfluxDBLineProtocol, "ns", 0, common.GetReqID()) + if err != nil { + panic(err) + } + err = s.Insert(telnetData, schemaless.OpenTSDBTelnetLineProtocol, "ms", 0, common.GetReqID()) + if err != nil { + panic(err) + } + err = s.Insert(jsonData, schemaless.OpenTSDBJsonFormatProtocol, "ms", 0, common.GetReqID()) + if err != nil { + panic(err) + } +} +``` -* `func (stmt *InsertStmt) Execute() error` + + - ę‰§č”Œå‚ę•°ē»‘å®šć€‚ +### ę‰§č”Œåø¦ęœ‰ reqId ēš„ę— ęؔ式写兄 -* `func (stmt *InsertStmt) GetAffectedRows() int` +```go +func (s *Schemaless) Insert(lines string, protocol int, precision string, ttl int, reqID int64) error +``` - čŽ·å–å‚ę•°ē»‘å®šę’å…„å—å½±å“č”Œę•°ć€‚ +åÆ仄通čæ‡ `common.GetReqID()` čŽ·å–å”Æäø€ id怂 -* `func (stmt *InsertStmt) Close() error` +### ę•°ę®č®¢é˜… - ē»“ęŸå‚ę•°ē»‘å®šć€‚ +TDengine Go čæžęŽ„å™Øę”ÆęŒč®¢é˜…åŠŸčƒ½ļ¼Œåŗ”ē”Ø API 如äø‹ļ¼š -### 通čæ‡ WebSocket č®¢é˜… +#### 创å»ŗ Topic -* `func NewConsumer(conf *tmq.ConfigMap) (*Consumer, error)` +```go + db, err := af.Open("", "root", "taosdata", "", 0) + if err != nil { + panic(err) + } + defer db.Close() + _, err = db.Exec("create database if not exists example_tmq WAL_RETENTION_PERIOD 86400") + if err != nil { + panic(err) + } + _, err = db.Exec("create topic if not exists example_tmq_topic as DATABASE example_tmq") + if err != nil { + panic(err) + } +``` - 创å»ŗę¶ˆč“¹č€…ć€‚ +#### 创å»ŗ Consumer -* `func (c *Consumer) Subscribe(topic string, rebalanceCb RebalanceCb) error` -ę³Øꄏļ¼šå‡ŗäŗŽå…¼å®¹ē›®ēš„äæē•™ `rebalanceCb` å‚ę•°ļ¼Œå½“前ęœŖä½æē”Ø +```go + consumer, err := tmq.NewConsumer(&tmqcommon.ConfigMap{ + "group.id": "test", + "auto.offset.reset": "earliest", + "td.connect.ip": "127.0.0.1", + "td.connect.user": "root", + "td.connect.pass": "taosdata", + "td.connect.port": "6030", + "client.id": "test_tmq_client", + "enable.auto.commit": "false", + "msg.with.table.name": "true", + }) + if err != nil { + panic(err) + } +``` - č®¢é˜…å•äøŖäø»é¢˜ć€‚ +#### č®¢é˜…ę¶ˆč“¹ę•°ę® -* `func (c *Consumer) SubscribeTopics(topics []string, rebalanceCb RebalanceCb) error` -ę³Øꄏļ¼šå‡ŗäŗŽå…¼å®¹ē›®ēš„äæē•™ `rebalanceCb` å‚ę•°ļ¼Œå½“前ęœŖä½æē”Ø +```go + err = consumer.Subscribe("example_tmq_topic", nil) + if err != nil { + panic(err) + } + for i := 0; i < 5; i++ { + ev := consumer.Poll(500) + if ev != nil { + switch e := ev.(type) { + case *tmqcommon.DataMessage: + fmt.Printf("get message:%v\n", e) + case tmqcommon.Error: + fmt.Fprintf(os.Stderr, "%% Error: %v: %v\n", e.Code(), e) + panic(e) + } + consumer.Commit() + } + } +``` - č®¢é˜…äø»é¢˜ć€‚ +#### ęŒ‡å®šč®¢é˜… Offset -* `func (c *Consumer) Poll(timeoutMs int) tmq.Event` +```go + partitions, err := consumer.Assignment() + if err != nil { + panic(err) + } + for i := 0; i < len(partitions); i++ { + fmt.Println(partitions[i]) + err = consumer.Seek(tmqcommon.TopicPartition{ + Topic: partitions[i].Topic, + Partition: partitions[i].Partition, + Offset: 0, + }, 0) + if err != nil { + panic(err) + } + } +``` - č½®čÆ¢ę¶ˆęÆ怂 +#### å…³é—­č®¢é˜… -* `func (c *Consumer) Commit() ([]tmq.TopicPartition, error)` -ę³Øꄏļ¼šå‡ŗäŗŽå…¼å®¹ē›®ēš„äæē•™ `tmq.TopicPartition` å‚ę•°ļ¼Œå½“前ęœŖä½æē”Ø +```go + err = consumer.Close() + if err != nil { + panic(err) + } +``` - ꏐäŗ¤ę¶ˆęÆ怂 +#### å®Œę•“ē¤ŗ例 -* `func (c *Consumer) Assignment() (partitions []tmq.TopicPartition, err error)` + + - čŽ·å–ę¶ˆč“¹čæ›åŗ¦ć€‚(éœ€č¦ TDengine >= 3.0.5.0ļ¼Œ driver-go >= v3.5.0) +```go +package main -* `func (c *Consumer) Seek(partition tmq.TopicPartition, ignoredTimeoutMs int) error` -ę³Øꄏļ¼šå‡ŗäŗŽå…¼å®¹ē›®ēš„äæē•™ `ignoredTimeoutMs` å‚ę•°ļ¼Œå½“前ęœŖä½æē”Ø +import ( + "fmt" + "os" - ꌉē…§ęŒ‡å®šēš„čæ›åŗ¦ę¶ˆč“¹ć€‚(éœ€č¦ TDengine >= 3.0.5.0ļ¼Œ driver-go >= v3.5.0) + "github.com/taosdata/driver-go/v3/af" + "github.com/taosdata/driver-go/v3/af/tmq" + tmqcommon "github.com/taosdata/driver-go/v3/common/tmq" +) -* `func (c *Consumer) Close() error` +func main() { + db, err := af.Open("", "root", "taosdata", "", 0) + if err != nil { + panic(err) + } + defer db.Close() + _, err = db.Exec("create database if not exists example_tmq WAL_RETENTION_PERIOD 86400") + if err != nil { + panic(err) + } + _, err = db.Exec("create topic if not exists example_tmq_topic as DATABASE example_tmq") + if err != nil { + panic(err) + } + if err != nil { + panic(err) + } + consumer, err := tmq.NewConsumer(&tmqcommon.ConfigMap{ + "group.id": "test", + "auto.offset.reset": "earliest", + "td.connect.ip": "127.0.0.1", + "td.connect.user": "root", + "td.connect.pass": "taosdata", + "td.connect.port": "6030", + "client.id": "test_tmq_client", + "enable.auto.commit": "false", + "msg.with.table.name": "true", + }) + if err != nil { + panic(err) + } + err = consumer.Subscribe("example_tmq_topic", nil) + if err != nil { + panic(err) + } + _, err = db.Exec("create table example_tmq.t1 (ts timestamp,v int)") + if err != nil { + panic(err) + } + _, err = db.Exec("insert into example_tmq.t1 values(now,1)") + if err != nil { + panic(err) + } + for i := 0; i < 5; i++ { + ev := consumer.Poll(500) + if ev != nil { + switch e := ev.(type) { + case *tmqcommon.DataMessage: + fmt.Printf("get message:%v\n", e) + case tmqcommon.Error: + fmt.Fprintf(os.Stderr, "%% Error: %v: %v\n", e.Code(), e) + panic(e) + } + consumer.Commit() + } + } + partitions, err := consumer.Assignment() + if err != nil { + panic(err) + } + for i := 0; i < len(partitions); i++ { + fmt.Println(partitions[i]) + err = consumer.Seek(tmqcommon.TopicPartition{ + Topic: partitions[i].Topic, + Partition: partitions[i].Partition, + Offset: 0, + }, 0) + if err != nil { + panic(err) + } + } - 关闭čæžęŽ„怂 + partitions, err = consumer.Assignment() + if err != nil { + panic(err) + } + for i := 0; i < len(partitions); i++ { + fmt.Println(partitions[i]) + } -å®Œę•“č®¢é˜…ē¤ŗä¾‹å‚č§ [GitHub ē¤ŗä¾‹ę–‡ä»¶](https://github.com/taosdata/driver-go/blob/main/examples/tmqoverws/main.go) + err = consumer.Close() + if err != nil { + panic(err) + } +} +``` -### 通čæ‡ WebSocket čæ›č”Œå‚ę•°ē»‘定 + + -* `func NewConnector(config *Config) (*Connector, error)` +```go +package main - 创å»ŗčæžęŽ„怂 +import ( + "database/sql" + "fmt" -* `func (c *Connector) Init() (*Stmt, error)` + "github.com/taosdata/driver-go/v3/common" + tmqcommon "github.com/taosdata/driver-go/v3/common/tmq" + _ "github.com/taosdata/driver-go/v3/taosRestful" + "github.com/taosdata/driver-go/v3/ws/tmq" +) - åˆå§‹åŒ–å‚ę•°ć€‚ +func main() { + db, err := sql.Open("taosRestful", "root:taosdata@http(localhost:6041)/") + if err != nil { + panic(err) + } + defer db.Close() + prepareEnv(db) + consumer, err := tmq.NewConsumer(&tmqcommon.ConfigMap{ + "ws.url": "ws://127.0.0.1:6041/rest/tmq", + "ws.message.channelLen": uint(0), + "ws.message.timeout": common.DefaultMessageTimeout, + "ws.message.writeWait": common.DefaultWriteWait, + "td.connect.user": "root", + "td.connect.pass": "taosdata", + "group.id": "example", + "client.id": "example_consumer", + "auto.offset.reset": "earliest", + }) + if err != nil { + panic(err) + } + err = consumer.Subscribe("example_ws_tmq_topic", nil) + if err != nil { + panic(err) + } + go func() { + _, err := db.Exec("create table example_ws_tmq.t_all(ts timestamp," + + "c1 bool," + + "c2 tinyint," + + "c3 smallint," + + "c4 int," + + "c5 bigint," + + "c6 tinyint unsigned," + + "c7 smallint unsigned," + + "c8 int unsigned," + + "c9 bigint unsigned," + + "c10 float," + + "c11 double," + + "c12 binary(20)," + + "c13 nchar(20)" + + ")") + if err != nil { + panic(err) + } + _, err = db.Exec("insert into example_ws_tmq.t_all values(now,true,2,3,4,5,6,7,8,9,10.123,11.123,'binary','nchar')") + if err != nil { + panic(err) + } + }() + for i := 0; i < 5; i++ { + ev := consumer.Poll(500) + if ev != nil { + switch e := ev.(type) { + case *tmqcommon.DataMessage: + fmt.Printf("get message:%v\n", e) + case tmqcommon.Error: + fmt.Printf("%% Error: %v: %v\n", e.Code(), e) + panic(e) + } + consumer.Commit() + } + } + partitions, err := consumer.Assignment() + if err != nil { + panic(err) + } + for i := 0; i < len(partitions); i++ { + fmt.Println(partitions[i]) + err = consumer.Seek(tmqcommon.TopicPartition{ + Topic: partitions[i].Topic, + Partition: partitions[i].Partition, + Offset: 0, + }, 0) + if err != nil { + panic(err) + } + } -* `func (c *Connector) Close() error` + partitions, err = consumer.Assignment() + if err != nil { + panic(err) + } + for i := 0; i < len(partitions); i++ { + fmt.Println(partitions[i]) + } - 关闭čæžęŽ„怂 + err = consumer.Close() + if err != nil { + panic(err) + } +} -* `func (s *Stmt) Prepare(sql string) error` +func prepareEnv(db *sql.DB) { + _, err := db.Exec("create database example_ws_tmq WAL_RETENTION_PERIOD 86400") + if err != nil { + panic(err) + } + _, err = db.Exec("create topic example_ws_tmq_topic as database example_ws_tmq") + if err != nil { + panic(err) + } +} +``` - å‚ę•°ē»‘定预处ē† SQL čÆ­å„ć€‚ + + -* `func (s *Stmt) SetTableName(name string) error` +### ę›“å¤šē¤ŗ例ē؋åŗ - å‚ę•°ē»‘å®šč®¾ē½®č”Øåć€‚ +* [ē¤ŗ例ē؋åŗ](https://github.com/taosdata/driver-go/tree/3.0/examples) +* [č§†é¢‘ę•™ē؋](https://www.taosdata.com/blog/2020/11/11/1951.html)怂 -* `func (s *Stmt) SetTags(tags *param.Param, bindType *param.ColumnType) error` +## åøøč§é—®é¢˜ - å‚ę•°ē»‘å®šč®¾ē½®ę ‡ē­¾ć€‚ +1. database/sql äø­ stmtļ¼ˆå‚ę•°ē»‘定ļ¼‰ē›øå…³ęŽ„å£å“©ęŗƒ -* `func (s *Stmt) BindParam(params []*param.Param, bindType *param.ColumnType) error` + REST äøę”ÆęŒå‚ę•°ē»‘定ē›øå…³ęŽ„å£ļ¼Œå»ŗč®®ä½æē”Ø`db.Exec`和`db.Query`怂 - å‚ę•°ē»‘å®šå¤šč”Œę•°ę®ć€‚ +2. ä½æē”Ø `use db` čÆ­å„åŽę‰§č”Œå…¶ä»–čÆ­å„ęŠ„é”™ `[0x217] Database not specified or available` -* `func (s *Stmt) AddBatch() error` + åœØ REST ęŽ„å£äø­ SQL čƭ叄ēš„ę‰§č”Œę— äøŠäø‹ę–‡å…³č”ļ¼Œä½æē”Ø `use db` čƭ叄äøä¼šē”Ÿę•ˆļ¼Œč§£å†³åŠžę³•č§äøŠę–¹ä½æē”Ø限制ē« čŠ‚怂 - ę·»åŠ åˆ°å‚ę•°ē»‘å®šę‰¹å¤„ē†ć€‚ +3. ä½æē”Ø taosSql äøęŠ„错ä½æē”Ø taosRestful ꊄ错 `[0x217] Database not specified or available` -* `func (s *Stmt) Exec() error` + 因äøŗ REST ęŽ„å£ę— ēŠ¶ę€ļ¼Œä½æē”Ø `use db` čƭ叄äøä¼šē”Ÿę•ˆļ¼Œč§£å†³åŠžę³•č§äøŠę–¹ä½æē”Ø限制ē« čŠ‚怂 - ę‰§č”Œå‚ę•°ē»‘å®šć€‚ +4. `readBufferSize` å‚ę•°č°ƒå¤§åŽę— ę˜Žę˜¾ę•ˆęžœ -* `func (s *Stmt) GetAffectedRows() int` + `readBufferSize` č°ƒå¤§åŽä¼šå‡å°‘čŽ·å–ē»“ęžœę—¶ `syscall` ēš„č°ƒē”Øć€‚å¦‚ęžœęŸ„čÆ¢ē»“ęžœēš„ę•°ę®é‡äøå¤§ļ¼Œäæ®ę”¹čÆ„å‚ę•°äøä¼šåø¦ę„ę˜Žę˜¾ęå‡ļ¼Œå¦‚ęžœčÆ„å‚ę•°äæ®ę”¹čæ‡å¤§ļ¼Œē“¶é¢ˆä¼šåœØč§£ęž JSON ę•°ę®ć€‚å¦‚ęžœéœ€č¦ä¼˜åŒ–ęŸ„čƢ速åŗ¦ļ¼Œéœ€č¦ę ¹ę®å®žé™…ęƒ…å†µč°ƒę•“čÆ„å€¼ę„č¾¾åˆ°ęŸ„čÆ¢ę•ˆęžœęœ€ä¼˜ć€‚ - čŽ·å–å‚ę•°ē»‘å®šę’å…„å—å½±å“č”Œę•°ć€‚ +5. `disableCompression` å‚ę•°č®¾ē½®äøŗ `false` ę—¶ęŸ„čÆ¢ę•ˆēŽ‡é™ä½Ž -* `func (s *Stmt) Close() error` + 当 `disableCompression` å‚ę•°č®¾ē½®äøŗ `false` ę—¶ęŸ„čÆ¢ē»“ęžœä¼šä½æē”Ø `gzip` 压ē¼©åŽä¼ č¾“ļ¼Œę‹æåˆ°ę•°ę®åŽč¦å…ˆčæ›č”Œ `gzip` č§£åŽ‹ć€‚ - ē»“ęŸå‚ę•°ē»‘å®šć€‚ +6. `go get` å‘½ä»¤ę— ę³•čŽ·å–åŒ…ļ¼Œęˆ–č€…čŽ·å–åŒ…č¶…ę—¶ -å®Œę•“å‚ę•°ē»‘定ē¤ŗä¾‹å‚č§ [GitHub ē¤ŗä¾‹ę–‡ä»¶](https://github.com/taosdata/driver-go/blob/main/examples/stmtoverws/main.go) + č®¾ē½® Go 代ē† `go env -w GOPROXY=https://goproxy.cn,direct`怂 ## API å‚č€ƒ diff --git a/docs/zh/08-connector/26-rust.mdx b/docs/zh/08-connector/26-rust.mdx index c23228c8cf737c25bf3bce0269a7c08cb14a874d..79a6badfead70c27fc344b1e506aa8ea5afb624d 100644 --- a/docs/zh/08-connector/26-rust.mdx +++ b/docs/zh/08-connector/26-rust.mdx @@ -30,21 +30,57 @@ Websocket čæžęŽ„ę”ÆęŒę‰€ęœ‰čƒ½čæč”Œ Rust ēš„å¹³å°ć€‚ | Rust čæžęŽ„å™Øē‰ˆęœ¬ | TDengine ē‰ˆęœ¬ | äø»č¦åŠŸčƒ½ | | :----------------: | :--------------: | :--------------------------------------------------: | -| v0.8.10 | 3.0.5.0 or later | 궈ęÆč®¢é˜…ļ¼ščŽ·å–ę¶ˆč“¹čæ›åŗ¦åŠęŒ‰ē…§ęŒ‡å®ščæ›åŗ¦å¼€å§‹ę¶ˆč“¹ć€‚ | +| v0.8.12 | 3.0.5.0 or later | 궈ęÆč®¢é˜…ļ¼ščŽ·å–ę¶ˆč“¹čæ›åŗ¦åŠęŒ‰ē…§ęŒ‡å®ščæ›åŗ¦å¼€å§‹ę¶ˆč“¹ć€‚ | | v0.8.0 | 3.0.4.0 | ę”ÆꌁꗠęØ”å¼å†™å…„ć€‚ | | v0.7.6 | 3.0.3.0 | ę”ÆꌁåœØčÆ·ę±‚äø­ä½æē”Ø req_id怂 | | v0.6.0 | 3.0.0.0 | åŸŗē”€åŠŸčƒ½ć€‚ | Rust čæžęŽ„å™Ø仍ē„¶åœØåæ«é€Ÿå¼€å‘äø­ļ¼Œ1.0 ä¹‹å‰ę— ę³•äæčÆå…¶å‘åŽå…¼å®¹ć€‚å»ŗč®®ä½æē”Ø 3.0 ē‰ˆęœ¬ä»„äøŠēš„ TDengineļ¼Œä»„éæ免已ēŸ„é—®é¢˜ć€‚ -## å®‰č£… +## 处ē†é”™čÆÆ + +åœØęŠ„é”™åŽļ¼ŒåÆä»„čŽ·å–åˆ°é”™čÆÆēš„具体äæ”ęÆļ¼š + +```rust +match conn.exec(sql) { + Ok(_) => { + Ok(()) + } + Err(e) => { + eprintln!("ERROR: {:?}", e); + Err(e) + } +} +``` + +## TDengine DataType 和 Rust DataType + +TDengine ē›®å‰ę”ÆęŒę—¶é—“ęˆ³ć€ę•°å­—ć€å­—ē¬¦ć€åøƒå°”ē±»åž‹ļ¼ŒäøŽ Rust åƹåŗ”ē±»åž‹č½¬ę¢å¦‚äø‹ļ¼š + +| TDengine DataType | Rust DataType | +| ----------------- | ----------------- | +| TIMESTAMP | Timestamp | +| INT | i32 | +| BIGINT | i64 | +| FLOAT | f32 | +| DOUBLE | f64 | +| SMALLINT | i16 | +| TINYINT | i8 | +| BOOL | bool | +| BINARY | Vec | +| NCHAR | String | +| JSON | serde_json::Value | + +**ę³Øꄏ**ļ¼šJSON ē±»åž‹ä»…åœØ tag äø­ę”Æꌁ怂 + +## å®‰č£…ę­„éŖ¤ ### å®‰č£…å‰å‡†å¤‡ * å®‰č£… Rust 开发巄具链 * å¦‚ęžœä½æē”Ø原ē”ŸčæžęŽ„ļ¼ŒčÆ·å®‰č£… TDengine å®¢ęˆ·ē«Æ驱åŠØļ¼Œå…·ä½“ę­„éŖ¤čÆ·å‚č€ƒ[å®‰č£…å®¢ęˆ·ē«Æ驱åŠØ](../#å®‰č£…å®¢ęˆ·ē«Æ驱åŠØ) -### ę·»åŠ  taos ä¾čµ– +### å®‰č£…čæžęŽ„å™Ø ę ¹ę®é€‰ę‹©ēš„čæžęŽ„ę–¹å¼ļ¼ŒęŒ‰ē…§å¦‚äø‹čÆ“ę˜ŽåœØ [Rust](https://rust-lang.org) 锹ē›®äø­ę·»åŠ  [taos][taos] ä¾čµ–ļ¼š @@ -151,7 +187,8 @@ let builder = TaosBuilder::from_dsn("taos://localhost:6030")?; let conn1 = builder.build(); // use websocket protocol. -let conn2 = TaosBuilder::from_dsn("taos+ws://localhost:6041")?; +let builder2 = TaosBuilder::from_dsn("taos+ws://localhost:6041")?; +let conn2 = builder2.build(); ``` å»ŗē«‹čæžęŽ„后ļ¼Œę‚ØåÆ仄čæ›č”Œē›øå…³ę•°ę®åŗ“ę“ä½œļ¼š @@ -233,41 +270,191 @@ async fn demo(taos: &Taos, db: &str) -> Result<(), Error> { ## ä½æē”Øē¤ŗ例 -### å†™å…„ę•°ę® +### 创å»ŗę•°ę®åŗ“å’Œč”Ø + +```rust +use taos::*; -#### SQL 写兄 +#[tokio::main] +async fn main() -> anyhow::Result<()> { + let dsn = "taos://localhost:6030"; + let builder = TaosBuilder::from_dsn(dsn)?; + + let taos = builder.build()?; + + let db = "query"; + + // create database + taos.exec_many([ + format!("DROP DATABASE IF EXISTS `{db}`"), + format!("CREATE DATABASE `{db}`"), + format!("USE `{db}`"), + ]) + .await?; + + // create table + taos.exec_many([ + // create super table + "CREATE TABLE `meters` (`ts` TIMESTAMP, `current` FLOAT, `voltage` INT, `phase` FLOAT) \ + TAGS (`groupid` INT, `location` BINARY(16))", + // create child table + "CREATE TABLE `d0` USING `meters` TAGS(0, 'Los Angles')", + ]).await?; +} +``` + +> **ę³Øꄏ**ļ¼šå¦‚ęžœäøä½æē”Ø `use db` ęŒ‡å®šę•°ę®åŗ“ļ¼Œåˆ™åŽē»­åƹč”Øēš„ę“ä½œéƒ½éœ€č¦å¢žåŠ ę•°ę®åŗ“名ē§°ä½œäøŗ前ē¼€ļ¼Œå¦‚ db.tb怂 + +### ę’å…„ę•°ę® -#### STMT 写兄 +### ęŸ„čÆ¢ę•°ę® + + + +### ę‰§č”Œåø¦ęœ‰ req_id ēš„ SQL + +ę­¤ req_id åÆē”ØäŗŽčÆ·ę±‚é“¾č·Æčæ½čøŖ怂 + +```rust +let rs = taos.query_with_req_id("select * from stable where tag1 is null", 1)?; +``` + +### 通čæ‡å‚ę•°ē»‘å®šå†™å…„ę•°ę® + +TDengine ēš„ Rust čæžęŽ„å™Ø实ēŽ°äŗ†å‚ę•°ē»‘å®šę–¹å¼åÆ¹ę•°ę®å†™å…„ļ¼ˆINSERTļ¼‰åœŗę™Æēš„ę”Æꌁ怂采ē”Øčæ™ē§ę–¹å¼å†™å…„ę•°ę®ę—¶ļ¼Œčƒ½éæ免 SQL čÆ­ę³•č§£ęžēš„资ęŗę¶ˆč€—ļ¼Œä»Žč€ŒåœØå¾ˆå¤šęƒ…å†µäø‹ę˜¾č‘—ęå‡å†™å…„ę€§čƒ½ć€‚ + +å‚ę•°ē»‘å®šęŽ„口čÆ¦č§[APIå‚č€ƒ](#stmt-api) -#### Schemaless 写兄 +### ꗠęؔ式写兄 + +TDengine ę”ÆꌁꗠęØ”å¼å†™å…„åŠŸčƒ½ć€‚ę— ęؔ式写兄兼容 InfluxDB ēš„ č”Œåč®®ļ¼ˆLine Protocolļ¼‰ć€OpenTSDB ēš„ telnet č”Œåč®®å’Œ OpenTSDB ēš„ JSON ę ¼å¼åč®®ć€‚čÆ¦ęƒ…čÆ·å‚č§[ꗠęؔ式写兄](../../reference/schemaless/)怂 -### ęŸ„čÆ¢ę•°ę® +### ę‰§č”Œåø¦ęœ‰ req_id ēš„ę— ęؔ式写兄 - +ę­¤ req_id åÆē”ØäŗŽčÆ·ę±‚é“¾č·Æčæ½čøŖ怂 -## API å‚č€ƒ +```rust +let sml_data = SmlDataBuilder::default() + .protocol(SchemalessProtocol::Line) + .data(data) + .req_id(100u64) + .build()?; -### čæžęŽ„Ꞅ造å™Ø +client.put(&sml_data)? +``` + +### ę•°ę®č®¢é˜… + +TDengine 通čæ‡ę¶ˆęÆ队列 [TMQ](../../../taos-sql/tmq/) åÆåŠØäø€äøŖč®¢é˜…ć€‚ -通čæ‡ DSN ę„ęž„å»ŗäø€äøŖčæžęŽ„å™ØꞄ造å™Ø怂 +#### 创å»ŗ Topic ```rust -let cfg = TaosBuilder::default().build()?; +taos.exec_many([ + // create topic for subscription + format!("CREATE TOPIC tmq_meters with META AS DATABASE {db}") +]) +.await?; ``` -ä½æē”Ø `builder` åÆ¹č±”åˆ›å»ŗ多äøŖčæžęŽ„ļ¼š +#### 创å»ŗ Consumer + +从 DSN 开始ļ¼Œęž„å»ŗäø€äøŖ TMQ čæžęŽ„å™Ø怂 ```rust -let conn: Taos = cfg.build(); +let tmq = TmqBuilder::from_dsn("taos://localhost:6030/?group.id=test")?; ``` -### čæžęŽ„ę±  +创å»ŗę¶ˆč“¹č€…ļ¼š + +```rust +let mut consumer = tmq.build()?; +``` + +#### č®¢é˜…ę¶ˆč“¹ę•°ę® + +ę¶ˆč“¹č€…åÆč®¢é˜…äø€äøŖęˆ–å¤šäøŖ `TOPIC`怂 + +```rust +consumer.subscribe(["tmq_meters"]).await?; +``` + +TMQ 궈ęÆ队列ę˜Æäø€äøŖ [futures::Stream](https://docs.rs/futures/latest/futures/stream/index.html) ē±»åž‹ļ¼ŒåÆ仄ä½æē”Øē›øåŗ” API åƹęƏäøŖ궈ęÆčæ›č”Œę¶ˆč“¹ļ¼Œå¹¶é€ščæ‡ `.commit` čæ›č”Œå·²ę¶ˆč“¹ę ‡č®°ć€‚ + +```rust +{ + let mut stream = consumer.stream(); + + while let Some((offset, message)) = stream.try_next().await? { + // get information from offset + + // the topic + let topic = offset.topic(); + // the vgroup id, like partition id in kafka. + let vgroup_id = offset.vgroup_id(); + println!("* in vgroup id {vgroup_id} of topic {topic}\n"); + + if let Some(data) = message.into_data() { + while let Some(block) = data.fetch_raw_block().await? { + // one block for one table, get table name if needed + let name = block.table_name(); + let records: Vec = block.deserialize().try_collect()?; + println!( + "** table: {}, got {} records: {:#?}\n", + name.unwrap(), + records.len(), + records + ); + } + } + consumer.commit(offset).await?; + } +} +``` + +čŽ·å–ę¶ˆč“¹čæ›åŗ¦ļ¼š + +ē‰ˆęœ¬č¦ę±‚ connector-rust >= v0.8.8ļ¼Œ TDengine >= 3.0.5.0 + +```rust +let assignments = consumer.assignments().await.unwrap(); +``` + +#### ęŒ‡å®šč®¢é˜… Offset + +ꌉē…§ęŒ‡å®šēš„čæ›åŗ¦ę¶ˆč“¹ļ¼š + +ē‰ˆęœ¬č¦ę±‚ connector-rust >= v0.8.8ļ¼Œ TDengine >= 3.0.5.0 + +```rust +consumer.offset_seek(topic, vgroup_id, offset).await; +``` + +#### å…³é—­č®¢é˜… + +```rust +consumer.unsubscribe().await; +``` + +åƹäŗŽ TMQ DSN, ęœ‰ä»„äø‹é…ē½®é”¹åÆ仄čæ›č”Œč®¾ē½®ļ¼Œéœ€č¦ę³Øꄏēš„ę˜Æļ¼Œ`group.id` ę˜Æåæ…é”»ēš„怂 + +- `group.id`: 同äø€äøŖę¶ˆč“¹č€…ē»„ļ¼Œå°†ä»„č‡³å°‘ę¶ˆč“¹äø€ę¬”ēš„ę–¹å¼čæ›č”Œę¶ˆęÆč“Ÿč½½å‡č””ć€‚ +- `client.id`: åÆ选ēš„č®¢é˜…å®¢ęˆ·ē«ÆčÆ†åˆ«é”¹ć€‚ +- `auto.offset.reset`: åÆé€‰åˆå§‹åŒ–č®¢é˜…čµ·ē‚¹ļ¼Œ *earliest* äøŗä»Žå¤“å¼€å§‹č®¢é˜…ļ¼Œ *latest* äøŗä»…ä»Žęœ€ę–°ę•°ę®å¼€å§‹č®¢é˜…ļ¼Œé»˜č®¤äøŗä»Žå¤“č®¢é˜…ć€‚ę³Øꄏļ¼Œę­¤é€‰é”¹åœØ同äø€äøŖ `group.id` äø­ä»…ē”Ÿę•ˆäø€ę¬”怂 +- `enable.auto.commit`: å½“č®¾ē½®äøŗ `true` ę—¶ļ¼Œå°†åÆē”Øč‡ŖåŠØꠇ记ęؔ式ļ¼Œå½“åÆ¹ę•°ę®äø€č‡“ꀧäøę•ę„Ÿę—¶ļ¼ŒåÆ仄åÆē”Øę­¤ę–¹å¼ć€‚ +- `auto.commit.interval.ms`: č‡ŖåŠØꠇ记ēš„ꗶ闓闓隔怂 + +#### å®Œę•“ē¤ŗ例 + +å®Œę•“č®¢é˜…ē¤ŗä¾‹å‚č§ [GitHub ē¤ŗä¾‹ę–‡ä»¶](https://github.com/taosdata/TDengine/blob/3.0/docs/examples/rust/nativeexample/examples/subscribe_demo.rs). + +### äøŽčæžęŽ„ę± ä½æē”Ø åœØå¤ę‚åŗ”ē”Øäø­ļ¼Œå»ŗč®®åÆē”ØčæžęŽ„걠怂[taos] ēš„čæžęŽ„걠默认ļ¼ˆå¼‚ę­„ęؔ式ļ¼‰ä½æē”Ø [deadpool] 实ēŽ°ć€‚ @@ -295,7 +482,17 @@ let pool: Pool = Pool::builder(Manager::from_dsn(self.dsn.clone()). let taos = pool.get()?; ``` -### čæžęŽ„ +### ę›“å¤šē¤ŗ例ē؋åŗ + +ē¤ŗ例ē؋åŗęŗē ä½äŗŽ `TDengine/examples/rust` äø‹: + +čÆ·å‚č€ƒļ¼š[rust example](https://github.com/taosdata/TDengine/tree/3.0/examples/rust) + +## åøøč§é—®é¢˜ + +čÆ·å‚č€ƒ [FAQ](../../../train-faq/faq) + +## API å‚č€ƒ [Taos][struct.Taos] åÆ¹č±”ęä¾›äŗ†å¤šäøŖę•°ę®åŗ“ę“ä½œēš„ APIļ¼š @@ -381,9 +578,13 @@ let taos = pool.get()?; - `.create_database(database: &str)`: ę‰§č”Œ `CREATE DATABASE` čÆ­å„ć€‚ - `.use_database(database: &str)`: ę‰§č”Œ `USE` čÆ­å„ć€‚ -é™¤ę­¤ä¹‹å¤–ļ¼ŒčÆ„ē»“ęž„ä¹Ÿę˜Æ [å‚ę•°ē»‘定](#å‚ę•°ē»‘å®šęŽ„口) 和 [č”Œåč®®ęŽ„å£](#č”Œåč®®ęŽ„å£) ēš„å…„口ļ¼Œä½æē”Øę–¹ę³•čÆ·å‚č€ƒå…·ä½“ēš„ API čÆ“ę˜Žć€‚ +é™¤ę­¤ä¹‹å¤–ļ¼ŒčÆ„ē»“ęž„ä¹Ÿę˜Æå‚ę•°ē»‘å®šå’Œč”Œåč®®ęŽ„口ēš„å…„口ļ¼Œä½æē”Øę–¹ę³•čÆ·å‚č€ƒå…·ä½“ēš„ API čÆ“ę˜Žć€‚ -### å‚ę•°ē»‘å®šęŽ„口 +

+ +å‚ę•°ē»‘å®šęŽ„口 + +

äøŽ C ęŽ„å£ē±»ä¼¼ļ¼ŒRust ęä¾›å‚ę•°ē»‘å®šęŽ„å£ć€‚é¦–å…ˆļ¼Œé€ščæ‡ [Taos][struct.Taos] åÆ¹č±”åˆ›å»ŗäø€äøŖ SQL čƭ叄ēš„å‚ę•°ē»‘定åÆ¹č±” [Stmt]ļ¼š @@ -394,7 +595,7 @@ stmt.prepare("INSERT INTO ? USING meters TAGS(?, ?) VALUES(?, ?, ?, ?)")?; å‚ę•°ē»‘定åÆ¹č±”ęä¾›äŗ†äø€ē»„ꎄ口ē”ØäŗŽå®žēŽ°å‚ę•°ē»‘定ļ¼š -#### `.set_tbname(name)` +`.set_tbname(name)` ē”ØäŗŽē»‘定č”Øåć€‚ @@ -403,7 +604,7 @@ let mut stmt = taos.stmt("insert into ? values(? ,?)")?; stmt.set_tbname("d0")?; ``` -#### `.set_tags(&[tag])` +`.set_tags(&[tag])` 当 SQL čƭ叄ä½æē”Ø超ēŗ§č”Øę—¶ļ¼Œē”ØäŗŽē»‘定子č”Øč”Øåå’Œę ‡ē­¾å€¼ļ¼š @@ -413,7 +614,7 @@ stmt.set_tbname("d0")?; stmt.set_tags(&[Value::VarChar("ę¶›ę€".to_string())])?; ``` -#### `.bind(&[column])` +`.bind(&[column])` ē”ØäŗŽē»‘定值ē±»åž‹ć€‚ä½æē”Ø [ColumnView] ē»“ęž„ä½“ęž„å»ŗéœ€č¦ēš„ē±»åž‹å¹¶ē»‘定ļ¼š @@ -437,7 +638,7 @@ let params = vec![ let rows = stmt.bind(¶ms)?.add_batch()?.execute()?; ``` -#### `.execute()` +`.execute()` ę‰§č”Œ SQL怂[Stmt] åÆ¹č±”åÆ仄复ē”Øļ¼ŒåœØę‰§č”ŒåŽåÆä»„é‡ę–°ē»‘å®šå¹¶ę‰§č”Œć€‚ę‰§č”Œå‰čÆ·ē”®äæę‰€ęœ‰ę•°ę®å·²é€ščæ‡ `.add_batch` åŠ å…„åˆ°ę‰§č”Œé˜Ÿåˆ—äø­ć€‚ @@ -452,92 +653,6 @@ stmt.execute()?; äø€äøŖåÆčæč”Œēš„ē¤ŗ例čÆ·č§ [GitHub äøŠēš„ē¤ŗ例](https://github.com/taosdata/taos-connector-rust/blob/main/examples/bind.rs)怂 -### č®¢é˜… - -TDengine 通čæ‡ę¶ˆęÆ队列 [TMQ](../../../taos-sql/tmq/) åÆåŠØäø€äøŖč®¢é˜…ć€‚ - -从 DSN 开始ļ¼Œęž„å»ŗäø€äøŖ TMQ čæžęŽ„å™Ø怂 - -```rust -let tmq = TmqBuilder::from_dsn("taos://localhost:6030/?group.id=test")?; -``` - -创å»ŗę¶ˆč“¹č€…ļ¼š - -```rust -let mut consumer = tmq.build()?; -``` - -ę¶ˆč“¹č€…åÆč®¢é˜…äø€äøŖęˆ–å¤šäøŖ `TOPIC`怂 - -```rust -consumer.subscribe(["tmq_meters"]).await?; -``` - -TMQ 궈ęÆ队列ę˜Æäø€äøŖ [futures::Stream](https://docs.rs/futures/latest/futures/stream/index.html) ē±»åž‹ļ¼ŒåÆ仄ä½æē”Øē›øåŗ” API åƹęƏäøŖ궈ęÆčæ›č”Œę¶ˆč“¹ļ¼Œå¹¶é€ščæ‡ `.commit` čæ›č”Œå·²ę¶ˆč“¹ę ‡č®°ć€‚ - -```rust -{ - let mut stream = consumer.stream(); - - while let Some((offset, message)) = stream.try_next().await? { - // get information from offset - - // the topic - let topic = offset.topic(); - // the vgroup id, like partition id in kafka. - let vgroup_id = offset.vgroup_id(); - println!("* in vgroup id {vgroup_id} of topic {topic}\n"); - - if let Some(data) = message.into_data() { - while let Some(block) = data.fetch_raw_block().await? { - // one block for one table, get table name if needed - let name = block.table_name(); - let records: Vec = block.deserialize().try_collect()?; - println!( - "** table: {}, got {} records: {:#?}\n", - name.unwrap(), - records.len(), - records - ); - } - } - consumer.commit(offset).await?; - } -} -``` - -čŽ·å–ę¶ˆč“¹čæ›åŗ¦ļ¼š - -ē‰ˆęœ¬č¦ę±‚ connector-rust >= v0.8.8ļ¼Œ TDengine >= 3.0.5.0 - -```rust -let assignments = consumer.assignments().await.unwrap(); -``` - -ꌉē…§ęŒ‡å®šēš„čæ›åŗ¦ę¶ˆč“¹ļ¼š - -ē‰ˆęœ¬č¦ę±‚ connector-rust >= v0.8.8ļ¼Œ TDengine >= 3.0.5.0 - -```rust -consumer.offset_seek(topic, vgroup_id, offset).await; -``` - -åœę­¢č®¢é˜…ļ¼š - -```rust -consumer.unsubscribe().await; -``` - -åƹäŗŽ TMQ DSN, ęœ‰ä»„äø‹é…ē½®é”¹åÆ仄čæ›č”Œč®¾ē½®ļ¼Œéœ€č¦ę³Øꄏēš„ę˜Æļ¼Œ`group.id` ę˜Æåæ…é”»ēš„怂 - -- `group.id`: 同äø€äøŖę¶ˆč“¹č€…ē»„ļ¼Œå°†ä»„č‡³å°‘ę¶ˆč“¹äø€ę¬”ēš„ę–¹å¼čæ›č”Œę¶ˆęÆč“Ÿč½½å‡č””ć€‚ -- `client.id`: åÆ选ēš„č®¢é˜…å®¢ęˆ·ē«ÆčÆ†åˆ«é”¹ć€‚ -- `auto.offset.reset`: åÆé€‰åˆå§‹åŒ–č®¢é˜…čµ·ē‚¹ļ¼Œ *earliest* äøŗä»Žå¤“å¼€å§‹č®¢é˜…ļ¼Œ *latest* äøŗä»…ä»Žęœ€ę–°ę•°ę®å¼€å§‹č®¢é˜…ļ¼Œé»˜č®¤äøŗä»Žå¤“č®¢é˜…ć€‚ę³Øꄏļ¼Œę­¤é€‰é”¹åœØ同äø€äøŖ `group.id` äø­ä»…ē”Ÿę•ˆäø€ę¬”怂 -- `enable.auto.commit`: å½“č®¾ē½®äøŗ `true` ę—¶ļ¼Œå°†åÆē”Øč‡ŖåŠØꠇ记ęؔ式ļ¼Œå½“åÆ¹ę•°ę®äø€č‡“ꀧäøę•ę„Ÿę—¶ļ¼ŒåÆ仄åÆē”Øę­¤ę–¹å¼ć€‚ -- `auto.commit.interval.ms`: č‡ŖåŠØꠇ记ēš„ꗶ闓闓隔怂 - -å®Œę•“č®¢é˜…ē¤ŗä¾‹å‚č§ [GitHub ē¤ŗä¾‹ę–‡ä»¶](https://github.com/taosdata/TDengine/blob/3.0/docs/examples/rust/nativeexample/examples/subscribe_demo.rs). 其他ē›ø关ē»“ęž„ä½“ API ä½æē”ØčÆ“ę˜ŽčÆ·ē§»ę­„ Rust ę–‡ę”£ę‰˜ē®”ē½‘锵ļ¼šć€‚ diff --git a/docs/zh/08-connector/30-python.mdx b/docs/zh/08-connector/30-python.mdx index 8752dc214565c7834cdc6903f5247cd4c64194a2..c3ec2243548d4b3f41854849595cb9d37479deda 100644 --- a/docs/zh/08-connector/30-python.mdx +++ b/docs/zh/08-connector/30-python.mdx @@ -25,6 +25,16 @@ Python čæžęŽ„å™Øēš„ęŗē ę‰˜ē®”åœØ [GitHub](https://github.com/taosdata/taos-con ꗠč®ŗä½æē”Ø什么ē‰ˆęœ¬ēš„ TDengine 都å»ŗč®®ä½æē”Ø꜀ꖰē‰ˆęœ¬ēš„ `taospy`怂 +|Python Connector ē‰ˆęœ¬|äø»č¦å˜åŒ–| +|:-------------------:|:----:| +|2.7.9|ę•°ę®č®¢é˜…ę”ÆęŒčŽ·å–ę¶ˆč“¹čæ›åŗ¦å’Œé‡ē½®ę¶ˆč“¹čæ›åŗ¦| +|2.7.8|ę–°å¢ž `execute_many`| + +|Python Websocket Connector ē‰ˆęœ¬|äø»č¦å˜åŒ–| +|:----------------------------:|:-----:| +|0.2.5|1. ę•°ę®č®¢é˜…ę”ÆęŒčŽ·å–ę¶ˆč“¹čæ›åŗ¦å’Œé‡ē½®ę¶ˆč“¹čæ›åŗ¦
2. ę”Æꌁ schemaless
3. ę”Æꌁ STMT| +|0.2.4|ę•°ę®č®¢é˜…ę–°å¢žå–ę¶ˆč®¢é˜…ę–¹ę³•| + ## 处ē†å¼‚åøø Python čæžęŽ„å™ØåÆčƒ½ä¼šäŗ§ē”Ÿ 4 ē§å¼‚åøøļ¼š @@ -549,7 +559,7 @@ consumer = Consumer({"group.id": "local", "td.connect.ip": "127.0.0.1"}) #### č®¢é˜… topics -Comsumer API ēš„ `subscribe` ę–¹ę³•ē”ØäŗŽč®¢é˜… topicsļ¼Œconsumer ę”ÆęŒåŒę—¶č®¢é˜…å¤šäøŖ topic怂 +Consumer API ēš„ `subscribe` ę–¹ę³•ē”ØäŗŽč®¢é˜… topicsļ¼Œconsumer ę”ÆęŒåŒę—¶č®¢é˜…å¤šäøŖ topic怂 ```python consumer.subscribe(['topic1', 'topic2']) @@ -631,7 +641,7 @@ consumer = taosws.(conf={"group.id": "local", "td.connect.websocket.scheme": "ws #### č®¢é˜… topics -Comsumer API ēš„ `subscribe` ę–¹ę³•ē”ØäŗŽč®¢é˜… topicsļ¼Œconsumer ę”ÆęŒåŒę—¶č®¢é˜…å¤šäøŖ topic怂 +Consumer API ēš„ `subscribe` ę–¹ę³•ē”ØäŗŽč®¢é˜… topicsļ¼Œconsumer ę”ÆęŒåŒę—¶č®¢é˜…å¤šäøŖ topic怂 ```python consumer.subscribe(['topic1', 'topic2']) diff --git a/docs/zh/12-taos-sql/10-function.md b/docs/zh/12-taos-sql/10-function.md index 416d41614d01b6c887d8ea462937b2b4fc509a55..fc0cfbe3305fbc4a70cf38ef41f9b7966a60feed 100644 --- a/docs/zh/12-taos-sql/10-function.md +++ b/docs/zh/12-taos-sql/10-function.md @@ -991,18 +991,14 @@ SAMPLE(expr, k) **åŠŸčƒ½čÆ“ę˜Ž**ļ¼š čŽ·å–ę•°ę®ēš„ k äøŖé‡‡ę ·å€¼ć€‚å‚ę•° k ēš„åˆę³•č¾“å…„čŒƒå›“ę˜Æ 1ā‰¤ k ā‰¤ 1000怂 -**čæ”回ē»“ęžœē±»åž‹**ļ¼š åŒåŽŸå§‹ę•°ę®ē±»åž‹ļ¼Œ čæ”回ē»“ęžœäø­åø¦ęœ‰čÆ„č”Œč®°å½•ēš„ę—¶é—“ęˆ³ć€‚ +**čæ”回ē»“ęžœē±»åž‹**ļ¼š åŒåŽŸå§‹ę•°ę®ē±»åž‹ć€‚ -**适ē”Øę•°ę®ē±»åž‹**ļ¼š åœØ超ēŗ§č”ØęŸ„čÆ¢äø­ä½æē”Øę—¶ļ¼Œäøčƒ½åŗ”ē”ØåœØꠇē­¾ä¹‹äøŠć€‚ +**适ē”Øę•°ę®ē±»åž‹**ļ¼š å…ØéƒØē±»åž‹å­—ꮵ怂 **åµŒå„—å­ęŸ„čÆ¢ę”Æꌁ**ļ¼š 适ē”ØäŗŽå†…å±‚ęŸ„čÆ¢å’Œå¤–å±‚ęŸ„čÆ¢ć€‚ **适ē”ØäŗŽ**ļ¼šč”Øå’Œč¶…ēŗ§č”Ø怂 -**ä½æē”ØčÆ“ę˜Ž**ļ¼š - -- äøčƒ½å‚äøŽč”Øč¾¾å¼č®”ē®—ļ¼›čÆ„å‡½ę•°åÆ仄åŗ”ē”ØåœØꙮ通č”Øå’Œč¶…ēŗ§č”ØäøŠļ¼› - ### TAIL @@ -1047,11 +1043,11 @@ TOP(expr, k) UNIQUE(expr) ``` -**åŠŸčƒ½čÆ“ę˜Ž**ļ¼ščæ”回čƄ列ēš„ę•°å€¼é¦–ę¬”å‡ŗēŽ°ēš„å€¼ć€‚čÆ„å‡½ę•°åŠŸčƒ½äøŽ distinct ē›øä¼¼ļ¼Œä½†ę˜ÆåÆä»„åŒ¹é…ę ‡ē­¾å’Œę—¶é—“ęˆ³äæ”ęÆ怂åÆ仄针åÆ¹é™¤ę—¶é—“åˆ—ä»„å¤–ēš„å­—ę®µčæ›č”ŒęŸ„čÆ¢ļ¼ŒåÆä»„åŒ¹é…ę ‡ē­¾å’Œę—¶é—“ęˆ³ļ¼Œå…¶äø­ēš„ę ‡ē­¾å’Œę—¶é—“ęˆ³ę˜Æē¬¬äø€ę¬”å‡ŗēŽ°ę—¶åˆ»ēš„ę ‡ē­¾å’Œę—¶é—“ęˆ³ć€‚ +**åŠŸčƒ½čÆ“ę˜Ž**ļ¼ščæ”回čÆ„åˆ—ę•°ę®é¦–ę¬”å‡ŗēŽ°ēš„å€¼ć€‚čÆ„å‡½ę•°åŠŸčƒ½äøŽ distinct ē›øä¼¼ć€‚ **čæ”å›žę•°ę®ē±»åž‹**ļ¼šåŒåŗ”ē”Øēš„å­—ę®µć€‚ -**适ē”Øę•°ę®ē±»åž‹**ļ¼šé€‚合äŗŽé™¤ę—¶é—“ē±»åž‹ä»„外ēš„å­—ę®µć€‚ +**适ē”Øę•°ę®ē±»åž‹**ļ¼šå…ØéƒØē±»åž‹å­—ꮵ怂 **适ē”ØäŗŽ**: č”Øå’Œč¶…ēŗ§č”Ø怂 diff --git a/docs/zh/12-taos-sql/24-show.md b/docs/zh/12-taos-sql/24-show.md index 12ad665e42fc8bf8b177efdbe43b5356d7a04d3b..f3397ae82dcc0465fb23d2f9770025e7b46f6a48 100644 --- a/docs/zh/12-taos-sql/24-show.md +++ b/docs/zh/12-taos-sql/24-show.md @@ -36,7 +36,7 @@ SHOW CONNECTIONS; SHOW CONSUMERS; ``` -ę˜¾ē¤ŗå½“å‰ę•°ę®åŗ“äø‹ę‰€ęœ‰ę“»č·ƒēš„ę¶ˆč“¹č€…ēš„äæ”ęÆ怂 +ę˜¾ē¤ŗå½“å‰ę•°ę®åŗ“äø‹ę‰€ęœ‰ę¶ˆč“¹č€…ēš„äæ”ęÆ怂 ## SHOW CREATE DATABASE diff --git a/docs/zh/28-releases/01-tdengine.md b/docs/zh/28-releases/01-tdengine.md index ae47388566ef7c1104e50b5a35bee08e8889134a..557552bc1c1b56688a3706fb63834a58128036f6 100644 --- a/docs/zh/28-releases/01-tdengine.md +++ b/docs/zh/28-releases/01-tdengine.md @@ -10,6 +10,10 @@ TDengine 2.x 各ē‰ˆęœ¬å®‰č£…包čÆ·č®æ问[čæ™é‡Œ](https://www.taosdata.com/all-do import Release from "/components/ReleaseV3"; +## 3.0.6.0 + + + ## 3.0.5.1 diff --git a/include/common/tdatablock.h b/include/common/tdatablock.h index 6cb7d8852310082e0f1431c265c01752f5d527b7..cd8e0642cf2dcae26ae288e421083f8accae54d4 100644 --- a/include/common/tdatablock.h +++ b/include/common/tdatablock.h @@ -187,6 +187,7 @@ int32_t colDataAssign(SColumnInfoData* pColumnInfoData, const SColumnInfoData* p int32_t blockDataUpdateTsWindow(SSDataBlock* pDataBlock, int32_t tsColumnIndex); int32_t colDataGetLength(const SColumnInfoData* pColumnInfoData, int32_t numOfRows); +int32_t colDataGetRowLength(const SColumnInfoData* pColumnInfoData, int32_t rowIdx); void colDataTrim(SColumnInfoData* pColumnInfoData); size_t blockDataGetNumOfCols(const SSDataBlock* pBlock); diff --git a/include/common/tglobal.h b/include/common/tglobal.h index 8aa17e46d1b832c170d808d4124ce57dcb25b4c4..bc4037c64234540f53b390cca8f82a7feb286a8e 100644 --- a/include/common/tglobal.h +++ b/include/common/tglobal.h @@ -164,6 +164,8 @@ extern char tsSmlTagName[]; // extern bool tsSmlDataFormat; // extern int32_t tsSmlBatchSize; +extern int32_t tmqMaxTopicNum; + // wal extern int64_t tsWalFsyncDataSizeLimit; @@ -184,6 +186,7 @@ extern int64_t tsStreamBufferSize; extern int64_t tsCheckpointInterval; extern bool tsFilterScalarMode; extern int32_t tsMaxStreamBackendCache; +extern int32_t tsPQSortMemThreshold; // #define NEEDTO_COMPRESSS_MSG(size) (tsCompressMsgSize != -1 && (size) > tsCompressMsgSize) diff --git a/include/common/tmsgdef.h b/include/common/tmsgdef.h index 2cf8eacdac98406fe54dce79b090334913ddcae8..3fc94f440862e843e5c6d503d8488f654d0b6c1c 100644 --- a/include/common/tmsgdef.h +++ b/include/common/tmsgdef.h @@ -145,7 +145,7 @@ enum { TD_DEF_MSG_TYPE(TDMT_MND_TMQ_DROP_TOPIC, "drop-topic", NULL, NULL) TD_DEF_MSG_TYPE(TDMT_MND_TMQ_SUBSCRIBE, "subscribe", SCMSubscribeReq, SCMSubscribeRsp) TD_DEF_MSG_TYPE(TDMT_MND_TMQ_ASK_EP, "ask-ep", SMqAskEpReq, SMqAskEpRsp) - TD_DEF_MSG_TYPE(TDMT_MND_TMQ_CONSUMER_LOST, "consumer-lost", SMqConsumerLostMsg, NULL) +// TD_DEF_MSG_TYPE(TDMT_MND_TMQ_CONSUMER_LOST, "consumer-lost", SMqConsumerLostMsg, NULL) TD_DEF_MSG_TYPE(TDMT_MND_TMQ_CONSUMER_RECOVER, "consumer-recover", SMqConsumerRecoverMsg, NULL) TD_DEF_MSG_TYPE(TDMT_MND_TMQ_HB, "consumer-hb", NULL, NULL) TD_DEF_MSG_TYPE(TDMT_MND_TMQ_DO_REBALANCE, "do-rebalance", SMqDoRebalanceMsg, NULL) diff --git a/include/libs/function/tudf.h b/include/libs/function/tudf.h index b71d50d43cc59988407576c1c1e0b9c2bce8fa3b..6b15833917cb9bf9fde78363f57740dd4e061647 100644 --- a/include/libs/function/tudf.h +++ b/include/libs/function/tudf.h @@ -111,6 +111,12 @@ int32_t udfStartUdfd(int32_t startDnodeId); */ int32_t udfStopUdfd(); +/** + * get udfd pid + * + */ + int32_t udfGetUdfdPid(int32_t* pUdfdPid); + #ifdef __cplusplus } #endif diff --git a/include/libs/nodes/plannodes.h b/include/libs/nodes/plannodes.h index 453c5d49142934b79758f3faf5e3c12c80c16a55..c1481da80cae306eceebce55c80e44ddadabca88 100644 --- a/include/libs/nodes/plannodes.h +++ b/include/libs/nodes/plannodes.h @@ -246,6 +246,7 @@ typedef struct SSortLogicNode { SLogicNode node; SNodeList* pSortKeys; bool groupSort; + int64_t maxRows; } SSortLogicNode; typedef struct SPartitionLogicNode { @@ -523,6 +524,7 @@ typedef struct SSortPhysiNode { SNodeList* pExprs; // these are expression list of order_by_clause and parameter expression of aggregate function SNodeList* pSortKeys; // element is SOrderByExprNode, and SOrderByExprNode::pExpr is SColumnNode SNodeList* pTargets; + int64_t maxRows; } SSortPhysiNode; typedef SSortPhysiNode SGroupSortPhysiNode; diff --git a/include/util/taoserror.h b/include/util/taoserror.h index ce24761df978422a677241e7ed8249ab3356deff..772a668f0fbb17c6ce233aa217b0f9dd14a09620 100644 --- a/include/util/taoserror.h +++ b/include/util/taoserror.h @@ -66,8 +66,8 @@ int32_t* taosGetErrno(); #define TSDB_CODE_RPC_BROKEN_LINK TAOS_DEF_ERROR_CODE(0, 0x0018) // #define TSDB_CODE_RPC_TIMEOUT TAOS_DEF_ERROR_CODE(0, 0x0019) // #define TSDB_CODE_RPC_SOMENODE_NOT_CONNECTED TAOS_DEF_ERROR_CODE(0, 0x0020) // "Vgroup could not be connected" -#define TSDB_CODE_RPC_SOMENODE_BROKEN_LINK TAOS_DEF_ERROR_CODE(0, 0x0021) // -#define TSDB_CODE_RPC_MAX_SESSIONS TAOS_DEF_ERROR_CODE(0, 0x0022) // +#define TSDB_CODE_RPC_SOMENODE_BROKEN_LINK TAOS_DEF_ERROR_CODE(0, 0x0021) // +#define TSDB_CODE_RPC_MAX_SESSIONS TAOS_DEF_ERROR_CODE(0, 0x0022) // @@ -277,7 +277,7 @@ int32_t* taosGetErrno(); #define TSDB_CODE_MND_INVALID_FUNC_COMMENT TAOS_DEF_ERROR_CODE(0, 0x0378) #define TSDB_CODE_MND_INVALID_FUNC_RETRIEVE TAOS_DEF_ERROR_CODE(0, 0x0379) - + // mnode-db #define TSDB_CODE_MND_DB_NOT_SELECTED TAOS_DEF_ERROR_CODE(0, 0x0380) @@ -288,9 +288,9 @@ int32_t* taosGetErrno(); #define TSDB_CODE_MND_TOO_MANY_DATABASES TAOS_DEF_ERROR_CODE(0, 0x0385) #define TSDB_CODE_MND_DB_IN_DROPPING TAOS_DEF_ERROR_CODE(0, 0x0386) // // #define TSDB_CODE_MND_VGROUP_NOT_READY TAOS_DEF_ERROR_CODE(0, 0x0387) // 2.x -#define TSDB_CODE_MND_DB_NOT_EXIST TAOS_DEF_ERROR_CODE(0, 0x0388) // +#define TSDB_CODE_MND_DB_NOT_EXIST TAOS_DEF_ERROR_CODE(0, 0x0388) // #define TSDB_CODE_MND_INVALID_DB_ACCT TAOS_DEF_ERROR_CODE(0, 0x0389) // internal -#define TSDB_CODE_MND_DB_OPTION_UNCHANGED TAOS_DEF_ERROR_CODE(0, 0x038A) // +#define TSDB_CODE_MND_DB_OPTION_UNCHANGED TAOS_DEF_ERROR_CODE(0, 0x038A) // #define TSDB_CODE_MND_DB_INDEX_NOT_EXIST TAOS_DEF_ERROR_CODE(0, 0x038B) #define TSDB_CODE_MND_DB_RETENTION_PERIOD_ZERO TAOS_DEF_ERROR_CODE(0, 0x038C) // #define TSDB_CODE_MND_INVALID_DB_OPTION_DAYS TAOS_DEF_ERROR_CODE(0, 0x0390) // 2.x @@ -516,6 +516,7 @@ int32_t* taosGetErrno(); #define TSDB_CODE_QRY_JSON_IN_GROUP_ERROR TAOS_DEF_ERROR_CODE(0, 0x072E) #define TSDB_CODE_QRY_JOB_NOT_EXIST TAOS_DEF_ERROR_CODE(0, 0x072F) #define TSDB_CODE_QRY_QWORKER_QUIT TAOS_DEF_ERROR_CODE(0, 0x0730) +#define TSDB_CODE_QRY_GEO_NOT_SUPPORT_ERROR TAOS_DEF_ERROR_CODE(0, 0x0731) // grant #define TSDB_CODE_GRANT_EXPIRED TAOS_DEF_ERROR_CODE(0, 0x0800) @@ -768,6 +769,8 @@ int32_t* taosGetErrno(); #define TSDB_CODE_TMQ_CONSUMER_MISMATCH TAOS_DEF_ERROR_CODE(0, 0x4001) #define TSDB_CODE_TMQ_CONSUMER_CLOSED TAOS_DEF_ERROR_CODE(0, 0x4002) #define TSDB_CODE_TMQ_CONSUMER_ERROR TAOS_DEF_ERROR_CODE(0, 0x4003) +#define TSDB_CODE_TMQ_TOPIC_OUT_OF_RANGE TAOS_DEF_ERROR_CODE(0, 0x4004) +#define TSDB_CODE_TMQ_GROUP_OUT_OF_RANGE TAOS_DEF_ERROR_CODE(0, 0x4005) // stream #define TSDB_CODE_STREAM_TASK_NOT_EXIST TAOS_DEF_ERROR_CODE(0, 0x4100) @@ -778,7 +781,7 @@ int32_t* taosGetErrno(); #define TSDB_CODE_TDLITE_IVLD_OPEN_DIR TAOS_DEF_ERROR_CODE(0, 0x5101) // UTIL -#define TSDB_CODE_UTIL_QUEUE_OUT_OF_MEMORY TAOS_DEF_ERROR_CODE(0, 0x6000) +#define TSDB_CODE_UTIL_QUEUE_OUT_OF_MEMORY TAOS_DEF_ERROR_CODE(0, 0x6000) #ifdef __cplusplus } diff --git a/include/util/tcompare.h b/include/util/tcompare.h index f92e1c3970a828fdfe109ee51a8e1f52f1ae0389..2fa736f4df73b0e8661484151e9b22c929e62b9c 100644 --- a/include/util/tcompare.h +++ b/include/util/tcompare.h @@ -79,6 +79,7 @@ int32_t compareDoubleVal(const void *pLeft, const void *pRight); int32_t compareLenPrefixedStr(const void *pLeft, const void *pRight); int32_t compareLenPrefixedWStr(const void *pLeft, const void *pRight); +int32_t compareLenBinaryVal(const void *pLeft, const void *pRight); int32_t comparestrRegexMatch(const void *pLeft, const void *pRight); int32_t comparestrRegexNMatch(const void *pLeft, const void *pRight); diff --git a/include/util/theap.h b/include/util/theap.h index fb5ff8301a9b08b2cb53c353a363042c2de9cc34..8ddeeb28a43c107a0986d76beecebe0f0aa894d0 100644 --- a/include/util/theap.h +++ b/include/util/theap.h @@ -17,6 +17,7 @@ #define _TD_UTIL_HEAP_H_ #include "os.h" +#include "tarray.h" #ifdef __cplusplus extern "C" { @@ -58,6 +59,48 @@ void heapDequeue(Heap* heap); size_t heapSize(Heap* heap); +typedef bool (*pq_comp_fn)(void* l, void* r, void* param); + +typedef struct PriorityQueueNode { + void* data; +} PriorityQueueNode; + +typedef struct PriorityQueue PriorityQueue; + +PriorityQueue* createPriorityQueue(pq_comp_fn fn, FDelete deleteFn, void* param); + +void taosPQSetFn(PriorityQueue* pq, pq_comp_fn fn); + +void destroyPriorityQueue(PriorityQueue* pq); + +PriorityQueueNode* taosPQTop(PriorityQueue* pq); + +size_t taosPQSize(PriorityQueue* pq); + +void taosPQPush(PriorityQueue* pq, const PriorityQueueNode* node); + +void taosPQPop(PriorityQueue* pq); + +typedef struct BoundedQueue BoundedQueue; + +BoundedQueue* createBoundedQueue(uint32_t maxSize, pq_comp_fn fn, FDelete deleteFn, void* param); + +void taosBQSetFn(BoundedQueue* q, pq_comp_fn fn); + +void destroyBoundedQueue(BoundedQueue* q); + +void taosBQPush(BoundedQueue* q, PriorityQueueNode* n); + +PriorityQueueNode* taosBQTop(BoundedQueue* q); + +size_t taosBQSize(BoundedQueue* q); + +size_t taosBQMaxSize(BoundedQueue* q); + +void taosBQBuildHeap(BoundedQueue* q); + +void taosBQPop(BoundedQueue* q); + #ifdef __cplusplus } #endif diff --git a/packaging/testpackage.sh b/packaging/testpackage.sh index 081383f89b358325f55cc1a7641015efcb0a4eed..0622b01f2b6c03e26e1b5968f208baf96b0e786e 100755 --- a/packaging/testpackage.sh +++ b/packaging/testpackage.sh @@ -152,7 +152,7 @@ function wgetFile { file=$1 versionPath=$2 sourceP=$3 -nasServerIP="192.168.1.131" +nasServerIP="192.168.1.213" packagePath="/nas/TDengine/v${versionPath}/${verMode}" if [ -f ${file} ];then echoColor YD "${file} already exists ,it will delete it and download it again " diff --git a/source/client/src/clientSml.c b/source/client/src/clientSml.c index 503120fe85e93bac6bafdc95f0ed14bfa8094700..13dc019feb29892fd1b48bf7fb8051f1da216652 100644 --- a/source/client/src/clientSml.c +++ b/source/client/src/clientSml.c @@ -749,6 +749,9 @@ static int32_t smlSendMetaMsg(SSmlHandle *info, SName *pName, SArray *pColumns, pReq.suid = pTableMeta->uid; pReq.source = TD_REQ_FROM_TAOX; pSql = (action == SCHEMA_ACTION_ADD_COLUMN) ? "sml_add_column" : "sml_modify_column_size"; + } else{ + uError("SML:0x%" PRIx64 " invalid action:%d", info->id, action); + goto end; } code = buildRequest(info->taos->id, pSql, strlen(pSql), NULL, false, &pRequest, 0); diff --git a/source/client/src/clientStmt.c b/source/client/src/clientStmt.c index 975b304bf4574bb84e16b1287b3dfec73c8ce869..8ac9550aca4e6705a848b3fef4e95a972008c819 100644 --- a/source/client/src/clientStmt.c +++ b/source/client/src/clientStmt.c @@ -939,8 +939,6 @@ int stmtClose(TAOS_STMT* stmt) { stmtCleanSQLInfo(pStmt); taosMemoryFree(stmt); - STMT_DLOG_E("stmt freed"); - return TSDB_CODE_SUCCESS; } diff --git a/source/client/src/clientTmq.c b/source/client/src/clientTmq.c index 8758cec2ec7eacabd13aba5b0ef8d61d15a2aef2..83550aa15d50b595aed684bc21694f904cc5843e 100644 --- a/source/client/src/clientTmq.c +++ b/source/client/src/clientTmq.c @@ -652,7 +652,7 @@ static void asyncCommitOffset(tmq_t* tmq, const TAOS_RES* pRes, int32_t type, tm int32_t j = 0; int32_t numOfVgroups = taosArrayGetSize(pTopic->vgs); for (j = 0; j < numOfVgroups; j++) { - SMqClientVg* pVg = taosArrayGet(pTopic->vgs, j); + SMqClientVg* pVg = (SMqClientVg*)taosArrayGet(pTopic->vgs, j); if (pVg->vgId == vgId) { break; } @@ -666,7 +666,7 @@ static void asyncCommitOffset(tmq_t* tmq, const TAOS_RES* pRes, int32_t type, tm return; } - SMqClientVg* pVg = taosArrayGet(pTopic->vgs, j); + SMqClientVg* pVg = (SMqClientVg*)taosArrayGet(pTopic->vgs, j); if (pVg->offsetInfo.currentOffset.type > 0 && !tOffsetEqual(&pVg->offsetInfo.currentOffset, &pVg->offsetInfo.committedOffset)) { code = doSendCommitMsg(tmq, pVg, pTopic->topicName, pParamSet, j, numOfVgroups, type); @@ -742,13 +742,15 @@ static void asyncCommitAllOffsets(tmq_t* tmq, tmq_commit_cb* pCommitFp, void* us static void generateTimedTask(int64_t refId, int32_t type) { tmq_t* tmq = taosAcquireRef(tmqMgmt.rsetId, refId); - if (tmq != NULL) { - int8_t* pTaskType = taosAllocateQitem(sizeof(int8_t), DEF_QITEM, 0); - *pTaskType = type; - taosWriteQitem(tmq->delayedTask, pTaskType); - tsem_post(&tmq->rspSem); - taosReleaseRef(tmqMgmt.rsetId, refId); - } + if(tmq == NULL) return; + + int8_t* pTaskType = taosAllocateQitem(sizeof(int8_t), DEF_QITEM, 0); + if(pTaskType == NULL) return; + + *pTaskType = type; + taosWriteQitem(tmq->delayedTask, pTaskType); + tsem_post(&tmq->rspSem); + taosReleaseRef(tmqMgmt.rsetId, refId); } void tmqAssignAskEpTask(void* param, void* tmrId) { @@ -763,19 +765,19 @@ void tmqAssignDelayedCommitTask(void* param, void* tmrId) { taosMemoryFree(param); } -void tmqAssignDelayedReportTask(void* param, void* tmrId) { - int64_t refId = *(int64_t*)param; - tmq_t* tmq = taosAcquireRef(tmqMgmt.rsetId, refId); - if (tmq != NULL) { - int8_t* pTaskType = taosAllocateQitem(sizeof(int8_t), DEF_QITEM, 0); - *pTaskType = TMQ_DELAYED_TASK__REPORT; - taosWriteQitem(tmq->delayedTask, pTaskType); - tsem_post(&tmq->rspSem); - } - - taosReleaseRef(tmqMgmt.rsetId, refId); - taosMemoryFree(param); -} +//void tmqAssignDelayedReportTask(void* param, void* tmrId) { +// int64_t refId = *(int64_t*)param; +// tmq_t* tmq = taosAcquireRef(tmqMgmt.rsetId, refId); +// if (tmq != NULL) { +// int8_t* pTaskType = taosAllocateQitem(sizeof(int8_t), DEF_QITEM, 0); +// *pTaskType = TMQ_DELAYED_TASK__REPORT; +// taosWriteQitem(tmq->delayedTask, pTaskType); +// tsem_post(&tmq->rspSem); +// } +// +// taosReleaseRef(tmqMgmt.rsetId, refId); +// taosMemoryFree(param); +//} int32_t tmqHbCb(void* param, SDataBuf* pMsg, int32_t code) { if (pMsg) { @@ -813,7 +815,7 @@ void tmqSendHbReq(void* param, void* tmrId) { offRows->offset = pVg->offsetInfo.currentOffset; char buf[TSDB_OFFSET_LEN] = {0}; tFormatOffset(buf, TSDB_OFFSET_LEN, &offRows->offset); - tscInfo("report offset: vgId:%d, offset:%s, rows:%"PRId64, offRows->vgId, buf, offRows->rows); + tscInfo("consumer:0x%" PRIx64 ",report offset: vgId:%d, offset:%s, rows:%"PRId64, tmq->consumerId, offRows->vgId, buf, offRows->rows); } } // tmq->needReportOffsetRows = false; @@ -1489,7 +1491,8 @@ static void initClientTopicFromRsp(SMqClientTopic* pTopic, SMqSubTopicEp* pTopic makeTopicVgroupKey(vgKey, pTopic->topicName, pVgEp->vgId); SVgroupSaveInfo* pInfo = taosHashGet(pVgOffsetHashMap, vgKey, strlen(vgKey)); - STqOffsetVal offsetNew = {.type = tmq->resetOffsetCfg}; + STqOffsetVal offsetNew = {0}; + offsetNew.type = tmq->resetOffsetCfg; SMqClientVg clientVg = { .pollCnt = 0, diff --git a/source/common/src/systable.c b/source/common/src/systable.c index 7cd33955c19689eb64fb3d4efe2517c23916cf33..c2024a9a779661eb1876525be29497664f9c6eaa 100644 --- a/source/common/src/systable.c +++ b/source/common/src/systable.c @@ -160,9 +160,9 @@ static const SSysDbTableSchema streamSchema[] = { static const SSysDbTableSchema streamTaskSchema[] = { {.name = "stream_name", .bytes = SYSTABLE_SCH_DB_NAME_LEN, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false}, - {.name = "task_id", .bytes = 8, .type = TSDB_DATA_TYPE_INT, .sysInfo = false}, + {.name = "task_id", .bytes = 4, .type = TSDB_DATA_TYPE_INT, .sysInfo = false}, {.name = "node_type", .bytes = SYSTABLE_SCH_DB_NAME_LEN, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false}, - {.name = "node_id", .bytes = 8, .type = TSDB_DATA_TYPE_INT, .sysInfo = false}, + {.name = "node_id", .bytes = 4, .type = TSDB_DATA_TYPE_INT, .sysInfo = false}, {.name = "level", .bytes = 20 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false}, {.name = "status", .bytes = 20 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false}, }; @@ -290,7 +290,7 @@ static const SSysDbTableSchema subscriptionSchema[] = { {.name = "topic_name", .bytes = TSDB_TOPIC_FNAME_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_BINARY, .sysInfo = false}, {.name = "consumer_group", .bytes = TSDB_CGROUP_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_BINARY, .sysInfo = false}, {.name = "vgroup_id", .bytes = 4, .type = TSDB_DATA_TYPE_INT, .sysInfo = false}, - {.name = "consumer_id", .bytes = 8, .type = TSDB_DATA_TYPE_BIGINT, .sysInfo = false}, + {.name = "consumer_id", .bytes = 32, .type = TSDB_DATA_TYPE_BINARY, .sysInfo = false}, {.name = "offset", .bytes = TSDB_OFFSET_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_BINARY, .sysInfo = false}, {.name = "rows", .bytes = 8, .type = TSDB_DATA_TYPE_BIGINT, .sysInfo = false}, }; @@ -352,7 +352,7 @@ static const SSysDbTableSchema connectionsSchema[] = { static const SSysDbTableSchema consumerSchema[] = { - {.name = "consumer_id", .bytes = 8, .type = TSDB_DATA_TYPE_BIGINT, .sysInfo = false}, + {.name = "consumer_id", .bytes = 32, .type = TSDB_DATA_TYPE_BINARY, .sysInfo = false}, {.name = "consumer_group", .bytes = SYSTABLE_SCH_TABLE_NAME_LEN, .type = TSDB_DATA_TYPE_BINARY, .sysInfo = false}, {.name = "client_id", .bytes = SYSTABLE_SCH_TABLE_NAME_LEN, .type = TSDB_DATA_TYPE_BINARY, .sysInfo = false}, {.name = "status", .bytes = 20 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_BINARY, .sysInfo = false}, diff --git a/source/common/src/tdatablock.c b/source/common/src/tdatablock.c index 5d1288d831c5c68ef52140d062111347684213e1..96889882b65f75ccb7f7de5095d03286c1a2609d 100644 --- a/source/common/src/tdatablock.c +++ b/source/common/src/tdatablock.c @@ -47,6 +47,17 @@ int32_t colDataGetLength(const SColumnInfoData* pColumnInfoData, int32_t numOfRo } } + +int32_t colDataGetRowLength(const SColumnInfoData* pColumnInfoData, int32_t rowIdx) { + if (colDataIsNull_s(pColumnInfoData, rowIdx)) return 0; + + if (!IS_VAR_DATA_TYPE(pColumnInfoData->info.type)) return pColumnInfoData->info.bytes; + if (pColumnInfoData->info.type == TSDB_DATA_TYPE_JSON) + return getJsonValueLen(colDataGetData(pColumnInfoData, rowIdx)); + else + return varDataTLen(colDataGetData(pColumnInfoData, rowIdx)); +} + int32_t colDataGetFullLength(const SColumnInfoData* pColumnInfoData, int32_t numOfRows) { if (IS_VAR_DATA_TYPE(pColumnInfoData->info.type)) { return pColumnInfoData->varmeta.length + sizeof(int32_t) * numOfRows; diff --git a/source/common/src/tglobal.c b/source/common/src/tglobal.c index 948174b565f133ee160f1926c9c674f53539e0ae..bacafee34992e2b9d86028bde67d55373c167caa 100644 --- a/source/common/src/tglobal.c +++ b/source/common/src/tglobal.c @@ -62,6 +62,7 @@ int32_t tsNumOfQnodeFetchThreads = 1; int32_t tsNumOfSnodeStreamThreads = 4; int32_t tsNumOfSnodeWriteThreads = 1; int32_t tsMaxStreamBackendCache = 128; // M +int32_t tsPQSortMemThreshold = 16; // M // sync raft int32_t tsElectInterval = 25 * 1000; @@ -104,6 +105,8 @@ char tsSmlChildTableName[TSDB_TABLE_NAME_LEN] = ""; // user defined child table // bool tsSmlDataFormat = false; // int32_t tsSmlBatchSize = 10000; +// tmq +int32_t tmqMaxTopicNum = 20; // query int32_t tsQueryPolicy = 1; int32_t tsQueryRspPolicy = 0; @@ -510,6 +513,8 @@ static int32_t taosAddServerCfg(SConfig *pCfg) { if (cfgAddString(pCfg, "telemetryServer", tsTelemServer, 0) != 0) return -1; if (cfgAddInt32(pCfg, "telemetryPort", tsTelemPort, 1, 65056, 0) != 0) return -1; + if (cfgAddInt32(pCfg, "tmqMaxTopicNum", tmqMaxTopicNum, 1, 10000, 1) != 0) return -1; + if (cfgAddInt32(pCfg, "transPullupInterval", tsTransPullupInterval, 1, 10000, 1) != 0) return -1; if (cfgAddInt32(pCfg, "mqRebalanceInterval", tsMqRebalanceInterval, 1, 10000, 1) != 0) return -1; if (cfgAddInt32(pCfg, "ttlUnit", tsTtlUnit, 1, 86400 * 365, 1) != 0) return -1; @@ -533,6 +538,7 @@ static int32_t taosAddServerCfg(SConfig *pCfg) { if (cfgAddBool(pCfg, "filterScalarMode", tsFilterScalarMode, 0) != 0) return -1; if (cfgAddInt32(pCfg, "maxStreamBackendCache", tsMaxStreamBackendCache, 16, 1024, 0) != 0) return -1; + if (cfgAddInt32(pCfg, "pqSortMemThreshold", tsPQSortMemThreshold, 1, 10240, 0) != 0) return -1; GRANT_CFG_ADD; return 0; @@ -880,6 +886,8 @@ static int32_t taosSetServerCfg(SConfig *pCfg) { tstrncpy(tsTelemServer, cfgGetItem(pCfg, "telemetryServer")->str, TSDB_FQDN_LEN); tsTelemPort = (uint16_t)cfgGetItem(pCfg, "telemetryPort")->i32; + tmqMaxTopicNum= cfgGetItem(pCfg, "tmqMaxTopicNum")->i32; + tsTransPullupInterval = cfgGetItem(pCfg, "transPullupInterval")->i32; tsMqRebalanceInterval = cfgGetItem(pCfg, "mqRebalanceInterval")->i32; tsTtlUnit = cfgGetItem(pCfg, "ttlUnit")->i32; @@ -914,6 +922,7 @@ static int32_t taosSetServerCfg(SConfig *pCfg) { tsFilterScalarMode = cfgGetItem(pCfg, "filterScalarMode")->bval; tsMaxStreamBackendCache = cfgGetItem(pCfg, "maxStreamBackendCache")->i32; + tsPQSortMemThreshold = cfgGetItem(pCfg, "pqSortMemThreshold")->i32; GRANT_CFG_GET; return 0; diff --git a/source/common/src/tmsg.c b/source/common/src/tmsg.c index 4e8797b1ecaf83f03c353e5a9f6a80d0680692f4..debb93e8ba75f4608f0faedfade250ebfb1e295f 100644 --- a/source/common/src/tmsg.c +++ b/source/common/src/tmsg.c @@ -6982,8 +6982,11 @@ int32_t tDecodeSVAlterTbReqSetCtime(SDecoder* pDecoder, SVAlterTbReq* pReq, int6 if (tStartDecode(pDecoder) < 0) return -1; if (tDecodeSVAlterTbReqCommon(pDecoder, pReq) < 0) return -1; - *(int64_t *)(pDecoder->data + pDecoder->pos) = ctimeMs; - if (tDecodeI64(pDecoder, &pReq->ctimeMs) < 0) return -1; + pReq->ctimeMs = 0; + if (!tDecodeIsEnd(pDecoder)) { + *(int64_t *)(pDecoder->data + pDecoder->pos) = ctimeMs; + if (tDecodeI64(pDecoder, &pReq->ctimeMs) < 0) return -1; + } tEndDecode(pDecoder); return 0; @@ -7541,8 +7544,11 @@ int32_t tDecodeSBatchDeleteReq(SDecoder *pDecoder, SBatchDeleteReq *pReq) { int32_t tDecodeSBatchDeleteReqSetCtime(SDecoder *pDecoder, SBatchDeleteReq *pReq, int64_t ctimeMs) { if (tDecodeSBatchDeleteReqCommon(pDecoder, pReq)) return -1; - *(int64_t *)(pDecoder->data + pDecoder->pos) = ctimeMs; - if (tDecodeI64(pDecoder, &pReq->ctimeMs) < 0) return -1; + pReq->ctimeMs = 0; + if (!tDecodeIsEnd(pDecoder)) { + *(int64_t *)(pDecoder->data + pDecoder->pos) = ctimeMs; + if (tDecodeI64(pDecoder, &pReq->ctimeMs) < 0) return -1; + } return 0; } diff --git a/source/common/src/ttime.c b/source/common/src/ttime.c index d8c43747f7b92822fad5455a143e35c5e918f15c..7a5581efbe210a64c2201830b7634dc7baca279c 100644 --- a/source/common/src/ttime.c +++ b/source/common/src/ttime.c @@ -969,7 +969,7 @@ void taosFormatUtcTime(char* buf, int32_t bufLen, int64_t t, int32_t precision) default: fractionLen = 0; - ASSERT(false); + return; } if (taosLocalTime(", &ptm, buf) == NULL) { diff --git a/source/dnode/mnode/impl/inc/mndConsumer.h b/source/dnode/mnode/impl/inc/mndConsumer.h index 96401511d2cd4832ad6d548a4b7286ba62227a7d..a3a31cfc5a5cfd0fdde3830ab015d2ca8cd72c98 100644 --- a/source/dnode/mnode/impl/inc/mndConsumer.h +++ b/source/dnode/mnode/impl/inc/mndConsumer.h @@ -25,14 +25,15 @@ extern "C" { enum { MQ_CONSUMER_STATUS_REBALANCE = 1, // MQ_CONSUMER_STATUS__MODIFY_IN_REB, // this value is not used anymore - MQ_CONSUMER_STATUS__READY, - MQ_CONSUMER_STATUS__LOST, + MQ_CONSUMER_STATUS_READY, + MQ_CONSUMER_STATUS_LOST, // MQ_CONSUMER_STATUS__LOST_IN_REB, // this value is not used anymore - MQ_CONSUMER_STATUS__LOST_REBD, -}; +// MQ_CONSUMER_STATUS__LOST_REBD, +};\ int32_t mndInitConsumer(SMnode *pMnode); void mndCleanupConsumer(SMnode *pMnode); +void mndDropConsumerFromSdb(SMnode *pMnode, int64_t consumerId); SMqConsumerObj *mndAcquireConsumer(SMnode *pMnode, int64_t consumerId); void mndReleaseConsumer(SMnode *pMnode, SMqConsumerObj *pConsumer); diff --git a/source/dnode/mnode/impl/inc/mndDef.h b/source/dnode/mnode/impl/inc/mndDef.h index 8b75795d41cd1924a16cc3c7121bedef7737d69c..696549fa05633cde2e0f500272e259f3613fe603 100644 --- a/source/dnode/mnode/impl/inc/mndDef.h +++ b/source/dnode/mnode/impl/inc/mndDef.h @@ -137,12 +137,12 @@ typedef enum { } EDndReason; typedef enum { - CONSUMER_UPDATE__TOUCH = 1, // rebalance req do not need change consume topic - CONSUMER_UPDATE__ADD, - CONSUMER_UPDATE__REMOVE, - CONSUMER_UPDATE__LOST, - CONSUMER_UPDATE__RECOVER, - CONSUMER_UPDATE__REBALANCE, // subscribe req need change consume topic + CONSUMER_UPDATE_REB_MODIFY_NOTOPIC = 1, // topic do not need modified after rebalance + CONSUMER_UPDATE_REB_MODIFY_TOPIC, // topic need modified after rebalance + CONSUMER_UPDATE_REB_MODIFY_REMOVE, // topic need removed after rebalance +// CONSUMER_UPDATE_TIMER_LOST, + CONSUMER_UPDATE_RECOVER, + CONSUMER_UPDATE_SUB_MODIFY, // modify after subscribe req } ECsmUpdateType; typedef struct { @@ -549,7 +549,7 @@ typedef struct { // data for display int32_t pid; SEpSet ep; - int64_t upTime; + int64_t createTime; int64_t subscribeTime; int64_t rebalanceTime; @@ -560,7 +560,7 @@ typedef struct { } SMqConsumerObj; SMqConsumerObj* tNewSMqConsumerObj(int64_t consumerId, char cgroup[TSDB_CGROUP_LEN]); -void tDeleteSMqConsumerObj(SMqConsumerObj* pConsumer); +void tDeleteSMqConsumerObj(SMqConsumerObj* pConsumer, bool delete); int32_t tEncodeSMqConsumerObj(void** buf, const SMqConsumerObj* pConsumer); void* tDecodeSMqConsumerObj(const void* buf, SMqConsumerObj* pConsumer, int8_t sver); diff --git a/source/dnode/mnode/impl/inc/mndSubscribe.h b/source/dnode/mnode/impl/inc/mndSubscribe.h index fad316ea12edce96bde4c21694b5402d97bf4ae0..ba4328b8fe821e0b8f858fb69d2deb687c36ac93 100644 --- a/source/dnode/mnode/impl/inc/mndSubscribe.h +++ b/source/dnode/mnode/impl/inc/mndSubscribe.h @@ -25,6 +25,7 @@ extern "C" { int32_t mndInitSubscribe(SMnode *pMnode); void mndCleanupSubscribe(SMnode *pMnode); +int32_t mndGetGroupNumByTopic(SMnode *pMnode, const char *topicName); SMqSubscribeObj *mndAcquireSubscribe(SMnode *pMnode, const char *CGroup, const char *topicName); SMqSubscribeObj *mndAcquireSubscribeByKey(SMnode *pMnode, const char *key); void mndReleaseSubscribe(SMnode *pMnode, SMqSubscribeObj *pSub); diff --git a/source/dnode/mnode/impl/src/mndConsumer.c b/source/dnode/mnode/impl/src/mndConsumer.c index 4dded61ce3fbbe4663a1047ee6aa1b9562eda767..47cc4a1ce7b4a0df57f54dfcd2d3e3af94acf399 100644 --- a/source/dnode/mnode/impl/src/mndConsumer.c +++ b/source/dnode/mnode/impl/src/mndConsumer.c @@ -26,6 +26,7 @@ #define MND_CONSUMER_VER_NUMBER 2 #define MND_CONSUMER_RESERVE_SIZE 64 +#define MND_MAX_GROUP_PER_TOPIC 100 #define MND_CONSUMER_LOST_HB_CNT 6 #define MND_CONSUMER_LOST_CLEAR_THRESHOLD 43200 @@ -63,7 +64,7 @@ int32_t mndInitConsumer(SMnode *pMnode) { mndSetMsgHandle(pMnode, TDMT_MND_TMQ_HB, mndProcessMqHbReq); mndSetMsgHandle(pMnode, TDMT_MND_TMQ_ASK_EP, mndProcessAskEpReq); mndSetMsgHandle(pMnode, TDMT_MND_TMQ_TIMER, mndProcessMqTimerMsg); - mndSetMsgHandle(pMnode, TDMT_MND_TMQ_CONSUMER_LOST, mndProcessConsumerLostMsg); +// mndSetMsgHandle(pMnode, TDMT_MND_TMQ_CONSUMER_LOST, mndProcessConsumerLostMsg); mndSetMsgHandle(pMnode, TDMT_MND_TMQ_CONSUMER_RECOVER, mndProcessConsumerRecoverMsg); mndSetMsgHandle(pMnode, TDMT_MND_TMQ_LOST_CONSUMER_CLEAR, mndProcessConsumerClearMsg); @@ -75,6 +76,22 @@ int32_t mndInitConsumer(SMnode *pMnode) { void mndCleanupConsumer(SMnode *pMnode) {} +void mndDropConsumerFromSdb(SMnode *pMnode, int64_t consumerId){ + SMqConsumerClearMsg *pClearMsg = rpcMallocCont(sizeof(SMqConsumerClearMsg)); + if (pClearMsg == NULL) { + mError("consumer:0x%"PRIx64" failed to clear consumer due to out of memory. alloc size:%d", consumerId, (int32_t)sizeof(SMqConsumerClearMsg)); + return; + } + + pClearMsg->consumerId = consumerId; + SRpcMsg rpcMsg = { + .msgType = TDMT_MND_TMQ_LOST_CONSUMER_CLEAR, .pCont = pClearMsg, .contLen = sizeof(SMqConsumerClearMsg)}; + + mInfo("consumer:0x%" PRIx64 " drop from sdb", consumerId); + tmsgPutToQueue(&pMnode->msgCb, WRITE_QUEUE, &rpcMsg); + return; +} + bool mndRebTryStart() { int32_t old = atomic_val_compare_exchange_32(&mqRebInExecCnt, 0, 1); mDebug("tq timer, rebalance counter old val:%d", old); @@ -105,50 +122,48 @@ void mndRebCntDec() { } } -static int32_t mndProcessConsumerLostMsg(SRpcMsg *pMsg) { - SMnode *pMnode = pMsg->info.node; - SMqConsumerLostMsg *pLostMsg = pMsg->pCont; - SMqConsumerObj *pConsumer = mndAcquireConsumer(pMnode, pLostMsg->consumerId); - if (pConsumer == NULL) { - return 0; - } - - mInfo("process consumer lost msg, consumer:0x%" PRIx64 " status:%d(%s)", pLostMsg->consumerId, pConsumer->status, - mndConsumerStatusName(pConsumer->status)); - - if (pConsumer->status != MQ_CONSUMER_STATUS__READY) { - mndReleaseConsumer(pMnode, pConsumer); - return -1; - } - - SMqConsumerObj *pConsumerNew = tNewSMqConsumerObj(pConsumer->consumerId, pConsumer->cgroup); - pConsumerNew->updateType = CONSUMER_UPDATE__LOST; - - mndReleaseConsumer(pMnode, pConsumer); - - STrans *pTrans = mndTransCreate(pMnode, TRN_POLICY_ROLLBACK, TRN_CONFLICT_NOTHING, pMsg, "lost-csm"); - if (pTrans == NULL) { - goto FAIL; - } - - if (mndSetConsumerCommitLogs(pMnode, pTrans, pConsumerNew) != 0) { - goto FAIL; - } - - if (mndTransPrepare(pMnode, pTrans) != 0) { - goto FAIL; - } - - tDeleteSMqConsumerObj(pConsumerNew); - taosMemoryFree(pConsumerNew); - mndTransDrop(pTrans); - return 0; -FAIL: - tDeleteSMqConsumerObj(pConsumerNew); - taosMemoryFree(pConsumerNew); - mndTransDrop(pTrans); - return -1; -} +//static int32_t mndProcessConsumerLostMsg(SRpcMsg *pMsg) { +// SMnode *pMnode = pMsg->info.node; +// SMqConsumerLostMsg *pLostMsg = pMsg->pCont; +// SMqConsumerObj *pConsumer = mndAcquireConsumer(pMnode, pLostMsg->consumerId); +// if (pConsumer == NULL) { +// return 0; +// } +// +// mInfo("process consumer lost msg, consumer:0x%" PRIx64 " status:%d(%s)", pLostMsg->consumerId, pConsumer->status, +// mndConsumerStatusName(pConsumer->status)); +// +// if (pConsumer->status != MQ_CONSUMER_STATUS_READY) { +// mndReleaseConsumer(pMnode, pConsumer); +// return -1; +// } +// +// SMqConsumerObj *pConsumerNew = tNewSMqConsumerObj(pConsumer->consumerId, pConsumer->cgroup); +// pConsumerNew->updateType = CONSUMER_UPDATE_TIMER_LOST; +// +// mndReleaseConsumer(pMnode, pConsumer); +// +// STrans *pTrans = mndTransCreate(pMnode, TRN_POLICY_ROLLBACK, TRN_CONFLICT_NOTHING, pMsg, "lost-csm"); +// if (pTrans == NULL) { +// goto FAIL; +// } +// +// if (mndSetConsumerCommitLogs(pMnode, pTrans, pConsumerNew) != 0) { +// goto FAIL; +// } +// +// if (mndTransPrepare(pMnode, pTrans) != 0) { +// goto FAIL; +// } +// +// tDeleteSMqConsumerObj(pConsumerNew, true); +// mndTransDrop(pTrans); +// return 0; +//FAIL: +// tDeleteSMqConsumerObj(pConsumerNew, true); +// mndTransDrop(pTrans); +// return -1; +//} static int32_t mndProcessConsumerRecoverMsg(SRpcMsg *pMsg) { SMnode *pMnode = pMsg->info.node; @@ -162,14 +177,14 @@ static int32_t mndProcessConsumerRecoverMsg(SRpcMsg *pMsg) { mInfo("receive consumer recover msg, consumer:0x%" PRIx64 " status:%d(%s)", pRecoverMsg->consumerId, pConsumer->status, mndConsumerStatusName(pConsumer->status)); - if (pConsumer->status != MQ_CONSUMER_STATUS__LOST_REBD) { + if (pConsumer->status != MQ_CONSUMER_STATUS_LOST) { mndReleaseConsumer(pMnode, pConsumer); terrno = TSDB_CODE_MND_CONSUMER_NOT_READY; return -1; } SMqConsumerObj *pConsumerNew = tNewSMqConsumerObj(pConsumer->consumerId, pConsumer->cgroup); - pConsumerNew->updateType = CONSUMER_UPDATE__RECOVER; + pConsumerNew->updateType = CONSUMER_UPDATE_RECOVER; mndReleaseConsumer(pMnode, pConsumer); @@ -181,13 +196,13 @@ static int32_t mndProcessConsumerRecoverMsg(SRpcMsg *pMsg) { if (mndSetConsumerCommitLogs(pMnode, pTrans, pConsumerNew) != 0) goto FAIL; if (mndTransPrepare(pMnode, pTrans) != 0) goto FAIL; - tDeleteSMqConsumerObj(pConsumerNew); - taosMemoryFree(pConsumerNew); + tDeleteSMqConsumerObj(pConsumerNew, true); + mndTransDrop(pTrans); return 0; FAIL: - tDeleteSMqConsumerObj(pConsumerNew); - taosMemoryFree(pConsumerNew); + tDeleteSMqConsumerObj(pConsumerNew, true); + mndTransDrop(pTrans); return -1; } @@ -206,13 +221,13 @@ static int32_t mndProcessConsumerClearMsg(SRpcMsg *pMsg) { mInfo("consumer:0x%" PRIx64 " needs to be cleared, status %s", pClearMsg->consumerId, mndConsumerStatusName(pConsumer->status)); - if (pConsumer->status != MQ_CONSUMER_STATUS__LOST_REBD) { - mndReleaseConsumer(pMnode, pConsumer); - return -1; - } +// if (pConsumer->status != MQ_CONSUMER_STATUS_LOST) { +// mndReleaseConsumer(pMnode, pConsumer); +// return -1; +// } SMqConsumerObj *pConsumerNew = tNewSMqConsumerObj(pConsumer->consumerId, pConsumer->cgroup); - pConsumerNew->updateType = CONSUMER_UPDATE__LOST; +// pConsumerNew->updateType = CONSUMER_UPDATE_TIMER_LOST; mndReleaseConsumer(pMnode, pConsumer); @@ -223,14 +238,14 @@ static int32_t mndProcessConsumerClearMsg(SRpcMsg *pMsg) { if (mndSetConsumerDropLogs(pMnode, pTrans, pConsumerNew) != 0) goto FAIL; if (mndTransPrepare(pMnode, pTrans) != 0) goto FAIL; - tDeleteSMqConsumerObj(pConsumerNew); - taosMemoryFree(pConsumerNew); + tDeleteSMqConsumerObj(pConsumerNew, true); + mndTransDrop(pTrans); return 0; FAIL: - tDeleteSMqConsumerObj(pConsumerNew); - taosMemoryFree(pConsumerNew); + tDeleteSMqConsumerObj(pConsumerNew, true); + mndTransDrop(pTrans); return -1; } @@ -297,56 +312,29 @@ static int32_t mndProcessMqTimerMsg(SRpcMsg *pMsg) { int32_t hbStatus = atomic_add_fetch_32(&pConsumer->hbStatus, 1); int32_t status = atomic_load_32(&pConsumer->status); - mDebug("check for consumer:0x%" PRIx64 " status:%d(%s), sub-time:%" PRId64 ", uptime:%" PRId64 ", hbstatus:%d", - pConsumer->consumerId, status, mndConsumerStatusName(status), pConsumer->subscribeTime, pConsumer->upTime, + mDebug("check for consumer:0x%" PRIx64 " status:%d(%s), sub-time:%" PRId64 ", createTime:%" PRId64 ", hbstatus:%d", + pConsumer->consumerId, status, mndConsumerStatusName(status), pConsumer->subscribeTime, pConsumer->createTime, hbStatus); - if (status == MQ_CONSUMER_STATUS__READY) { - if (hbStatus > MND_CONSUMER_LOST_HB_CNT) { - SMqConsumerLostMsg *pLostMsg = rpcMallocCont(sizeof(SMqConsumerLostMsg)); - if (pLostMsg == NULL) { - mError("consumer:0x%"PRIx64" failed to transfer consumer status to lost due to out of memory. alloc size:%d", - pConsumer->consumerId, (int32_t)sizeof(SMqConsumerLostMsg)); - continue; + if (status == MQ_CONSUMER_STATUS_READY) { + if (taosArrayGetSize(pConsumer->assignedTopics) == 0) { // unsubscribe or close + mndDropConsumerFromSdb(pMnode, pConsumer->consumerId); + } else if (hbStatus > MND_CONSUMER_LOST_HB_CNT) { + taosRLockLatch(&pConsumer->lock); + int32_t topicNum = taosArrayGetSize(pConsumer->currentTopics); + for (int32_t i = 0; i < topicNum; i++) { + char key[TSDB_SUBSCRIBE_KEY_LEN]; + char *removedTopic = taosArrayGetP(pConsumer->currentTopics, i); + mndMakeSubscribeKey(key, pConsumer->cgroup, removedTopic); + SMqRebInfo *pRebSub = mndGetOrCreateRebSub(pRebMsg->rebSubHash, key); + taosArrayPush(pRebSub->removedConsumers, &pConsumer->consumerId); } - - pLostMsg->consumerId = pConsumer->consumerId; - SRpcMsg rpcMsg = { - .msgType = TDMT_MND_TMQ_CONSUMER_LOST, .pCont = pLostMsg, .contLen = sizeof(SMqConsumerLostMsg)}; - - mDebug("consumer:0x%"PRIx64" hb not received beyond threshold %d, set to lost", pConsumer->consumerId, - MND_CONSUMER_LOST_HB_CNT); - tmsgPutToQueue(&pMnode->msgCb, WRITE_QUEUE, &rpcMsg); - } - } else if (status == MQ_CONSUMER_STATUS__LOST_REBD) { - // if the client is lost longer than one day, clear it. Otherwise, do nothing about the lost consumers. - if (hbStatus > MND_CONSUMER_LOST_CLEAR_THRESHOLD) { - SMqConsumerClearMsg *pClearMsg = rpcMallocCont(sizeof(SMqConsumerClearMsg)); - if (pClearMsg == NULL) { - mError("consumer:0x%"PRIx64" failed to clear consumer due to out of memory. alloc size:%d", - pConsumer->consumerId, (int32_t)sizeof(SMqConsumerClearMsg)); - continue; - } - - pClearMsg->consumerId = pConsumer->consumerId; - SRpcMsg rpcMsg = { - .msgType = TDMT_MND_TMQ_LOST_CONSUMER_CLEAR, .pCont = pClearMsg, .contLen = sizeof(SMqConsumerClearMsg)}; - - mDebug("consumer:0x%" PRIx64 " lost beyond threshold %d, clear it", pConsumer->consumerId, - MND_CONSUMER_LOST_CLEAR_THRESHOLD); - tmsgPutToQueue(&pMnode->msgCb, WRITE_QUEUE, &rpcMsg); + taosRUnLockLatch(&pConsumer->lock); } - } else if (status == MQ_CONSUMER_STATUS__LOST) { - taosRLockLatch(&pConsumer->lock); - int32_t topicNum = taosArrayGetSize(pConsumer->currentTopics); - for (int32_t i = 0; i < topicNum; i++) { - char key[TSDB_SUBSCRIBE_KEY_LEN]; - char *removedTopic = taosArrayGetP(pConsumer->currentTopics, i); - mndMakeSubscribeKey(key, pConsumer->cgroup, removedTopic); - SMqRebInfo *pRebSub = mndGetOrCreateRebSub(pRebMsg->rebSubHash, key); - taosArrayPush(pRebSub->removedConsumers, &pConsumer->consumerId); + } else if (status == MQ_CONSUMER_STATUS_LOST) { + if (hbStatus > MND_CONSUMER_LOST_CLEAR_THRESHOLD) { // clear consumer if lost a day + mndDropConsumerFromSdb(pMnode, pConsumer->consumerId); } - taosRUnLockLatch(&pConsumer->lock); } else { // MQ_CONSUMER_STATUS_REBALANCE taosRLockLatch(&pConsumer->lock); @@ -413,7 +401,7 @@ static int32_t mndProcessMqHbReq(SRpcMsg *pMsg) { int32_t status = atomic_load_32(&pConsumer->status); - if (status == MQ_CONSUMER_STATUS__LOST_REBD) { + if (status == MQ_CONSUMER_STATUS_LOST) { mInfo("try to recover consumer:0x%" PRIx64 "", consumerId); SMqConsumerRecoverMsg *pRecoverMsg = rpcMallocCont(sizeof(SMqConsumerRecoverMsg)); @@ -475,7 +463,7 @@ static int32_t mndProcessAskEpReq(SRpcMsg *pMsg) { mError("consumer:0x%" PRIx64 " group:%s not consistent with data in sdb, saved cgroup:%s", consumerId, req.cgroup, pConsumer->cgroup); terrno = TSDB_CODE_MND_CONSUMER_NOT_EXIST; - return -1; + goto FAIL; } atomic_store_32(&pConsumer->hbStatus, 0); @@ -483,7 +471,7 @@ static int32_t mndProcessAskEpReq(SRpcMsg *pMsg) { // 1. check consumer status int32_t status = atomic_load_32(&pConsumer->status); - if (status == MQ_CONSUMER_STATUS__LOST_REBD) { + if (status == MQ_CONSUMER_STATUS_LOST) { mInfo("try to recover consumer:0x%" PRIx64, consumerId); SMqConsumerRecoverMsg *pRecoverMsg = rpcMallocCont(sizeof(SMqConsumerRecoverMsg)); @@ -497,10 +485,10 @@ static int32_t mndProcessAskEpReq(SRpcMsg *pMsg) { tmsgPutToQueue(&pMnode->msgCb, WRITE_QUEUE, &pRpcMsg); } - if (status != MQ_CONSUMER_STATUS__READY) { + if (status != MQ_CONSUMER_STATUS_READY) { mInfo("consumer:0x%" PRIx64 " not ready, status: %s", consumerId, mndConsumerStatusName(status)); terrno = TSDB_CODE_MND_CONSUMER_NOT_READY; - return -1; + goto FAIL; } int32_t serverEpoch = atomic_load_32(&pConsumer->epoch); @@ -582,7 +570,7 @@ static int32_t mndProcessAskEpReq(SRpcMsg *pMsg) { void *buf = rpcMallocCont(tlen); if (buf == NULL) { terrno = TSDB_CODE_OUT_OF_MEMORY; - return -1; + goto FAIL; } SMqRspHead* pHead = buf; @@ -669,6 +657,7 @@ int32_t mndProcessSubscribeReq(SRpcMsg *pMsg) { char *cgroup = subscribe.cgroup; SMqConsumerObj *pExistedConsumer = NULL; SMqConsumerObj *pConsumerNew = NULL; + STrans *pTrans = NULL; int32_t code = -1; SArray *pTopicList = subscribe.topicNames; @@ -676,9 +665,17 @@ int32_t mndProcessSubscribeReq(SRpcMsg *pMsg) { taosArrayRemoveDuplicate(pTopicList, taosArrayCompareString, freeItem); int32_t newTopicNum = taosArrayGetSize(pTopicList); + for(int i = 0; i < newTopicNum; i++){ + int32_t gNum = mndGetGroupNumByTopic(pMnode, (const char*)taosArrayGetP(pTopicList, i)); + if(gNum >= MND_MAX_GROUP_PER_TOPIC){ + terrno = TSDB_CODE_TMQ_GROUP_OUT_OF_RANGE; + code = terrno; + goto _over; + } + } // check topic existence - STrans *pTrans = mndTransCreate(pMnode, TRN_POLICY_RETRY, TRN_CONFLICT_NOTHING, pMsg, "subscribe"); + pTrans = mndTransCreate(pMnode, TRN_POLICY_RETRY, TRN_CONFLICT_NOTHING, pMsg, "subscribe"); if (pTrans == NULL) { goto _over; } @@ -701,8 +698,7 @@ int32_t mndProcessSubscribeReq(SRpcMsg *pMsg) { pConsumerNew->autoCommitInterval = subscribe.autoCommitInterval; pConsumerNew->resetOffsetCfg = subscribe.resetOffsetCfg; - // set the update type - pConsumerNew->updateType = CONSUMER_UPDATE__REBALANCE; +// pConsumerNew->updateType = CONSUMER_UPDATE_SUB_MODIFY; // use insert logic taosArrayDestroy(pConsumerNew->assignedTopics); pConsumerNew->assignedTopics = taosArrayDup(pTopicList, topicNameDup); @@ -721,7 +717,7 @@ int32_t mndProcessSubscribeReq(SRpcMsg *pMsg) { " cgroup:%s, current status:%d(%s), subscribe topic num: %d", consumerId, subscribe.cgroup, status, mndConsumerStatusName(status), newTopicNum); - if (status != MQ_CONSUMER_STATUS__READY) { + if (status != MQ_CONSUMER_STATUS_READY) { terrno = TSDB_CODE_MND_CONSUMER_NOT_READY; goto _over; } @@ -732,11 +728,11 @@ int32_t mndProcessSubscribeReq(SRpcMsg *pMsg) { } // set the update type - pConsumerNew->updateType = CONSUMER_UPDATE__REBALANCE; + pConsumerNew->updateType = CONSUMER_UPDATE_SUB_MODIFY; taosArrayDestroy(pConsumerNew->assignedTopics); pConsumerNew->assignedTopics = taosArrayDup(pTopicList, topicNameDup); - int32_t oldTopicNum = (pExistedConsumer->currentTopics) ? taosArrayGetSize(pExistedConsumer->currentTopics) : 0; + int32_t oldTopicNum = taosArrayGetSize(pExistedConsumer->currentTopics); int32_t i = 0, j = 0; while (i < oldTopicNum || j < newTopicNum) { @@ -791,10 +787,7 @@ _over: mndReleaseConsumer(pMnode, pExistedConsumer); } - if (pConsumerNew) { - tDeleteSMqConsumerObj(pConsumerNew); - taosMemoryFree(pConsumerNew); - } + tDeleteSMqConsumerObj(pConsumerNew, true); // TODO: replace with destroy subscribe msg taosArrayDestroyP(subscribe.topicNames, (FDelete)taosMemoryFree); @@ -894,17 +887,17 @@ CM_DECODE_OVER: } static int32_t mndConsumerActionInsert(SSdb *pSdb, SMqConsumerObj *pConsumer) { - mDebug("consumer:0x%" PRIx64 " cgroup:%s status:%d(%s) epoch:%d load from sdb, perform insert action", + mInfo("consumer:0x%" PRIx64 " sub insert, cgroup:%s status:%d(%s) epoch:%d", pConsumer->consumerId, pConsumer->cgroup, pConsumer->status, mndConsumerStatusName(pConsumer->status), pConsumer->epoch); - pConsumer->subscribeTime = pConsumer->upTime; + pConsumer->subscribeTime = taosGetTimestampMs(); return 0; } static int32_t mndConsumerActionDelete(SSdb *pSdb, SMqConsumerObj *pConsumer) { - mDebug("consumer:0x%" PRIx64 " perform delete action, status:(%d)%s", pConsumer->consumerId, pConsumer->status, + mInfo("consumer:0x%" PRIx64 " perform delete action, status:(%d)%s", pConsumer->consumerId, pConsumer->status, mndConsumerStatusName(pConsumer->status)); - tDeleteSMqConsumerObj(pConsumer); + tDeleteSMqConsumerObj(pConsumer, false); return 0; } @@ -913,10 +906,9 @@ static void updateConsumerStatus(SMqConsumerObj *pConsumer) { if (taosArrayGetSize(pConsumer->rebNewTopics) == 0 && taosArrayGetSize(pConsumer->rebRemovedTopics) == 0) { if (status == MQ_CONSUMER_STATUS_REBALANCE) { - pConsumer->status = MQ_CONSUMER_STATUS__READY; - } else if (status == MQ_CONSUMER_STATUS__LOST) { - ASSERT(taosArrayGetSize(pConsumer->currentTopics) == 0); - pConsumer->status = MQ_CONSUMER_STATUS__LOST_REBD; + pConsumer->status = MQ_CONSUMER_STATUS_READY; + } else if (status == MQ_CONSUMER_STATUS_READY) { + pConsumer->status = MQ_CONSUMER_STATUS_LOST; } } } @@ -930,7 +922,7 @@ static void removeFromNewTopicList(SMqConsumerObj *pConsumer, const char *pTopic taosArrayRemove(pConsumer->rebNewTopics, i); taosMemoryFree(p); - mDebug("consumer:0x%" PRIx64 " remove new topic:%s in the topic list, remain newTopics:%d", pConsumer->consumerId, + mInfo("consumer:0x%" PRIx64 " remove new topic:%s in the topic list, remain newTopics:%d", pConsumer->consumerId, pTopic, (int)taosArrayGetSize(pConsumer->rebNewTopics)); break; } @@ -946,7 +938,7 @@ static void removeFromRemoveTopicList(SMqConsumerObj *pConsumer, const char *pTo taosArrayRemove(pConsumer->rebRemovedTopics, i); taosMemoryFree(p); - mDebug("consumer:0x%" PRIx64 " remove topic:%s in the removed topic list, remain removedTopics:%d", + mInfo("consumer:0x%" PRIx64 " remove topic:%s in the removed topic list, remain removedTopics:%d", pConsumer->consumerId, pTopic, (int)taosArrayGetSize(pConsumer->rebRemovedTopics)); break; } @@ -961,7 +953,7 @@ static void removeFromCurrentTopicList(SMqConsumerObj *pConsumer, const char *pT taosArrayRemove(pConsumer->currentTopics, i); taosMemoryFree(topic); - mDebug("consumer:0x%" PRIx64 " remove topic:%s in the current topic list, remain currentTopics:%d", + mInfo("consumer:0x%" PRIx64 " remove topic:%s in the current topic list, remain currentTopics:%d", pConsumer->consumerId, pTopic, (int)taosArrayGetSize(pConsumer->currentTopics)); break; } @@ -984,47 +976,46 @@ static bool existInCurrentTopicList(const SMqConsumerObj* pConsumer, const char* } static int32_t mndConsumerActionUpdate(SSdb *pSdb, SMqConsumerObj *pOldConsumer, SMqConsumerObj *pNewConsumer) { - mDebug("consumer:0x%" PRIx64 " perform update action, update type:%d, subscribe-time:%" PRId64 ", uptime:%" PRId64, - pOldConsumer->consumerId, pNewConsumer->updateType, pOldConsumer->subscribeTime, pOldConsumer->upTime); + mInfo("consumer:0x%" PRIx64 " perform update action, update type:%d, subscribe-time:%" PRId64 ", createTime:%" PRId64, + pOldConsumer->consumerId, pNewConsumer->updateType, pOldConsumer->subscribeTime, pOldConsumer->createTime); taosWLockLatch(&pOldConsumer->lock); - if (pNewConsumer->updateType == CONSUMER_UPDATE__REBALANCE) { + if (pNewConsumer->updateType == CONSUMER_UPDATE_SUB_MODIFY) { TSWAP(pOldConsumer->rebNewTopics, pNewConsumer->rebNewTopics); TSWAP(pOldConsumer->rebRemovedTopics, pNewConsumer->rebRemovedTopics); TSWAP(pOldConsumer->assignedTopics, pNewConsumer->assignedTopics); - pOldConsumer->subscribeTime = pNewConsumer->upTime; + pOldConsumer->subscribeTime = taosGetTimestampMs(); pOldConsumer->status = MQ_CONSUMER_STATUS_REBALANCE; - } else if (pNewConsumer->updateType == CONSUMER_UPDATE__LOST) { - int32_t sz = taosArrayGetSize(pOldConsumer->currentTopics); - for (int32_t i = 0; i < sz; i++) { - char *topic = taosStrdup(taosArrayGetP(pOldConsumer->currentTopics, i)); - taosArrayPush(pOldConsumer->rebRemovedTopics, &topic); - } - - pOldConsumer->rebalanceTime = pNewConsumer->upTime; - - int32_t prevStatus = pOldConsumer->status; - pOldConsumer->status = MQ_CONSUMER_STATUS__LOST; - mDebug("consumer:0x%" PRIx64 " state %s -> %s, reb-time:%" PRId64 ", reb-removed-topics:%d", - pOldConsumer->consumerId, mndConsumerStatusName(prevStatus), mndConsumerStatusName(pOldConsumer->status), - pOldConsumer->rebalanceTime, (int)taosArrayGetSize(pOldConsumer->rebRemovedTopics)); - } else if (pNewConsumer->updateType == CONSUMER_UPDATE__RECOVER) { + mInfo("consumer:0x%" PRIx64 " sub update, modify existed consumer",pOldConsumer->consumerId); +// } else if (pNewConsumer->updateType == CONSUMER_UPDATE_TIMER_LOST) { +// int32_t sz = taosArrayGetSize(pOldConsumer->currentTopics); +// for (int32_t i = 0; i < sz; i++) { +// char *topic = taosStrdup(taosArrayGetP(pOldConsumer->currentTopics, i)); +// taosArrayPush(pOldConsumer->rebRemovedTopics, &topic); +// } +// +// int32_t prevStatus = pOldConsumer->status; +// pOldConsumer->status = MQ_CONSUMER_STATUS_LOST; +// mInfo("consumer:0x%" PRIx64 " timer update, timer lost. state %s -> %s, reb-time:%" PRId64 ", reb-removed-topics:%d", +// pOldConsumer->consumerId, mndConsumerStatusName(prevStatus), mndConsumerStatusName(pOldConsumer->status), +// pOldConsumer->rebalanceTime, (int)taosArrayGetSize(pOldConsumer->rebRemovedTopics)); + } else if (pNewConsumer->updateType == CONSUMER_UPDATE_RECOVER) { int32_t sz = taosArrayGetSize(pOldConsumer->assignedTopics); for (int32_t i = 0; i < sz; i++) { char *topic = taosStrdup(taosArrayGetP(pOldConsumer->assignedTopics, i)); taosArrayPush(pOldConsumer->rebNewTopics, &topic); } - pOldConsumer->rebalanceTime = pNewConsumer->upTime; pOldConsumer->status = MQ_CONSUMER_STATUS_REBALANCE; - } else if (pNewConsumer->updateType == CONSUMER_UPDATE__TOUCH) { + mInfo("consumer:0x%" PRIx64 " timer update, timer recover",pOldConsumer->consumerId); + } else if (pNewConsumer->updateType == CONSUMER_UPDATE_REB_MODIFY_NOTOPIC) { atomic_add_fetch_32(&pOldConsumer->epoch, 1); - pOldConsumer->rebalanceTime = pNewConsumer->upTime; - - } else if (pNewConsumer->updateType == CONSUMER_UPDATE__ADD) { + pOldConsumer->rebalanceTime = taosGetTimestampMs(); + mInfo("consumer:0x%" PRIx64 " reb update, only rebalance time", pOldConsumer->consumerId); + } else if (pNewConsumer->updateType == CONSUMER_UPDATE_REB_MODIFY_TOPIC) { char *pNewTopic = taosStrdup(taosArrayGetP(pNewConsumer->rebNewTopics, 0)); // check if exist in current topic @@ -1033,6 +1024,7 @@ static int32_t mndConsumerActionUpdate(SSdb *pSdb, SMqConsumerObj *pOldConsumer, // add to current topic bool existing = existInCurrentTopicList(pOldConsumer, pNewTopic); if (existing) { + mError("consumer:0x%" PRIx64 "new topic:%s should not in currentTopics", pOldConsumer->consumerId, pNewTopic); taosMemoryFree(pNewTopic); } else { // added into current topic list taosArrayPush(pOldConsumer->currentTopics, &pNewTopic); @@ -1044,17 +1036,17 @@ static int32_t mndConsumerActionUpdate(SSdb *pSdb, SMqConsumerObj *pOldConsumer, updateConsumerStatus(pOldConsumer); // the re-balance is triggered when the new consumer is launched. - pOldConsumer->rebalanceTime = pNewConsumer->upTime; + pOldConsumer->rebalanceTime = taosGetTimestampMs(); atomic_add_fetch_32(&pOldConsumer->epoch, 1); - mDebug("consumer:0x%" PRIx64 " state (%d)%s -> (%d)%s, new epoch:%d, reb-time:%" PRId64 + mInfo("consumer:0x%" PRIx64 " reb update add, state (%d)%s -> (%d)%s, new epoch:%d, reb-time:%" PRId64 ", current topics:%d, newTopics:%d, removeTopics:%d", pOldConsumer->consumerId, status, mndConsumerStatusName(status), pOldConsumer->status, mndConsumerStatusName(pOldConsumer->status), pOldConsumer->epoch, pOldConsumer->rebalanceTime, (int)taosArrayGetSize(pOldConsumer->currentTopics), (int)taosArrayGetSize(pOldConsumer->rebNewTopics), (int)taosArrayGetSize(pOldConsumer->rebRemovedTopics)); - } else if (pNewConsumer->updateType == CONSUMER_UPDATE__REMOVE) { + } else if (pNewConsumer->updateType == CONSUMER_UPDATE_REB_MODIFY_REMOVE) { char *removedTopic = taosArrayGetP(pNewConsumer->rebRemovedTopics, 0); // remove from removed topic @@ -1067,10 +1059,10 @@ static int32_t mndConsumerActionUpdate(SSdb *pSdb, SMqConsumerObj *pOldConsumer, int32_t status = pOldConsumer->status; updateConsumerStatus(pOldConsumer); - pOldConsumer->rebalanceTime = pNewConsumer->upTime; + pOldConsumer->rebalanceTime = taosGetTimestampMs(); atomic_add_fetch_32(&pOldConsumer->epoch, 1); - mDebug("consumer:0x%" PRIx64 " state (%d)%s -> (%d)%s, new epoch:%d, reb-time:%" PRId64 + mInfo("consumer:0x%" PRIx64 " reb update remove, state (%d)%s -> (%d)%s, new epoch:%d, reb-time:%" PRId64 ", current topics:%d, newTopics:%d, removeTopics:%d", pOldConsumer->consumerId, status, mndConsumerStatusName(status), pOldConsumer->status, mndConsumerStatusName(pOldConsumer->status), pOldConsumer->epoch, pOldConsumer->rebalanceTime, @@ -1133,8 +1125,12 @@ static int32_t mndRetrieveConsumer(SRpcMsg *pReq, SShowObj *pShow, SSDataBlock * int32_t cols = 0; // consumer id + char consumerIdHex[32] = {0}; + sprintf(varDataVal(consumerIdHex), "0x%"PRIx64, pConsumer->consumerId); + varDataSetLen(consumerIdHex, strlen(varDataVal(consumerIdHex))); + pColInfo = taosArrayGet(pBlock->pDataBlock, cols++); - colDataSetVal(pColInfo, numOfRows, (const char *)&pConsumer->consumerId, false); + colDataSetVal(pColInfo, numOfRows, (const char *)consumerIdHex, false); // consumer group char cgroup[TSDB_CGROUP_LEN + VARSTR_HEADER_SIZE] = {0}; @@ -1175,7 +1171,7 @@ static int32_t mndRetrieveConsumer(SRpcMsg *pReq, SShowObj *pShow, SSDataBlock * // up time pColInfo = taosArrayGet(pBlock->pDataBlock, cols++); - colDataSetVal(pColInfo, numOfRows, (const char *)&pConsumer->upTime, false); + colDataSetVal(pColInfo, numOfRows, (const char *)&pConsumer->createTime, false); // subscribe time pColInfo = taosArrayGet(pBlock->pDataBlock, cols++); @@ -1190,7 +1186,7 @@ static int32_t mndRetrieveConsumer(SRpcMsg *pReq, SShowObj *pShow, SSDataBlock * tFormatOffset(buf, TSDB_OFFSET_LEN, &pVal); char parasStr[64 + TSDB_OFFSET_LEN + VARSTR_HEADER_SIZE] = {0}; - sprintf(varDataVal(parasStr), "tbname:%d,commit:%d,interval:%d,reset:%s", pConsumer->withTbName, pConsumer->autoCommit, pConsumer->autoCommitInterval, buf); + sprintf(varDataVal(parasStr), "tbname:%d,commit:%d,interval:%dms,reset:%s", pConsumer->withTbName, pConsumer->autoCommit, pConsumer->autoCommitInterval, buf); varDataSetLen(parasStr, strlen(varDataVal(parasStr))); pColInfo = taosArrayGet(pBlock->pDataBlock, cols++); @@ -1216,10 +1212,9 @@ static void mndCancelGetNextConsumer(SMnode *pMnode, void *pIter) { static const char *mndConsumerStatusName(int status) { switch (status) { - case MQ_CONSUMER_STATUS__READY: + case MQ_CONSUMER_STATUS_READY: return "ready"; - case MQ_CONSUMER_STATUS__LOST: - case MQ_CONSUMER_STATUS__LOST_REBD: + case MQ_CONSUMER_STATUS_LOST: return "lost"; case MQ_CONSUMER_STATUS_REBALANCE: return "rebalancing"; diff --git a/source/dnode/mnode/impl/src/mndDef.c b/source/dnode/mnode/impl/src/mndDef.c index 6b281fca6b5a4841e6229a64f9a229d0b2ff2dfc..287b39d8c799c157cc113081cd66963fbdb526d6 100644 --- a/source/dnode/mnode/impl/src/mndDef.c +++ b/source/dnode/mnode/impl/src/mndDef.c @@ -218,7 +218,7 @@ void *tDecodeSMqVgEp(const void *buf, SMqVgEp *pVgEp, int8_t sver) { return (void *)buf; } -SMqConsumerObj *tNewSMqConsumerObj(int64_t consumerId, char cgroup[TSDB_CGROUP_LEN]) { +SMqConsumerObj *tNewSMqConsumerObj(int64_t consumerId, char* cgroup) { SMqConsumerObj *pConsumer = taosMemoryCalloc(1, sizeof(SMqConsumerObj)); if (pConsumer == NULL) { terrno = TSDB_CODE_OUT_OF_MEMORY; @@ -249,16 +249,20 @@ SMqConsumerObj *tNewSMqConsumerObj(int64_t consumerId, char cgroup[TSDB_CGROUP_L return NULL; } - pConsumer->upTime = taosGetTimestampMs(); + pConsumer->createTime = taosGetTimestampMs(); return pConsumer; } -void tDeleteSMqConsumerObj(SMqConsumerObj *pConsumer) { +void tDeleteSMqConsumerObj(SMqConsumerObj *pConsumer, bool delete) { + if(pConsumer == NULL) return; taosArrayDestroyP(pConsumer->currentTopics, (FDelete)taosMemoryFree); taosArrayDestroyP(pConsumer->rebNewTopics, (FDelete)taosMemoryFree); taosArrayDestroyP(pConsumer->rebRemovedTopics, (FDelete)taosMemoryFree); taosArrayDestroyP(pConsumer->assignedTopics, (FDelete)taosMemoryFree); + if(delete){ + taosMemoryFree(pConsumer); + } } int32_t tEncodeSMqConsumerObj(void **buf, const SMqConsumerObj *pConsumer) { @@ -273,7 +277,7 @@ int32_t tEncodeSMqConsumerObj(void **buf, const SMqConsumerObj *pConsumer) { tlen += taosEncodeFixedI32(buf, pConsumer->pid); tlen += taosEncodeSEpSet(buf, &pConsumer->ep); - tlen += taosEncodeFixedI64(buf, pConsumer->upTime); + tlen += taosEncodeFixedI64(buf, pConsumer->createTime); tlen += taosEncodeFixedI64(buf, pConsumer->subscribeTime); tlen += taosEncodeFixedI64(buf, pConsumer->rebalanceTime); @@ -343,7 +347,7 @@ void *tDecodeSMqConsumerObj(const void *buf, SMqConsumerObj *pConsumer, int8_t s buf = taosDecodeFixedI32(buf, &pConsumer->pid); buf = taosDecodeSEpSet(buf, &pConsumer->ep); - buf = taosDecodeFixedI64(buf, &pConsumer->upTime); + buf = taosDecodeFixedI64(buf, &pConsumer->createTime); buf = taosDecodeFixedI64(buf, &pConsumer->subscribeTime); buf = taosDecodeFixedI64(buf, &pConsumer->rebalanceTime); diff --git a/source/dnode/mnode/impl/src/mndProfile.c b/source/dnode/mnode/impl/src/mndProfile.c index 5482f369409a89484974444de27f111e680c2f10..fbf54e80f3899729943cccec4a448fa967a5f11a 100644 --- a/source/dnode/mnode/impl/src/mndProfile.c +++ b/source/dnode/mnode/impl/src/mndProfile.c @@ -233,7 +233,6 @@ static int32_t mndProcessConnectReq(SRpcMsg *pReq) { } code = -1; - taosIp2String(pReq->info.conn.clientIp, ip); if (mndCheckOperPrivilege(pMnode, pReq->info.conn.user, MND_OPER_CONNECT) != 0) { mGError("user:%s, failed to login from %s since %s", pReq->info.conn.user, ip, terrstr()); @@ -271,6 +270,7 @@ static int32_t mndProcessConnectReq(SRpcMsg *pReq) { } } +_CONNECT: pConn = mndCreateConn(pMnode, pReq->info.conn.user, connReq.connType, pReq->info.conn.clientIp, pReq->info.conn.clientPort, connReq.pid, connReq.app, connReq.startTime); if (pConn == NULL) { diff --git a/source/dnode/mnode/impl/src/mndSubscribe.c b/source/dnode/mnode/impl/src/mndSubscribe.c index 61691a30d5882959c8e6128abdbc040f637d43f4..7ecd994b5abf0036f63019a101a22c5d58ec6e66 100644 --- a/source/dnode/mnode/impl/src/mndSubscribe.c +++ b/source/dnode/mnode/impl/src/mndSubscribe.c @@ -160,10 +160,10 @@ static int32_t mndBuildSubChangeReq(void **pBuf, int32_t *pLen, SMqSubscribeObj static int32_t mndPersistSubChangeVgReq(SMnode *pMnode, STrans *pTrans, SMqSubscribeObj *pSub, const SMqRebOutputVg *pRebVg, SSubplan* pPlan) { -// if (pRebVg->oldConsumerId == pRebVg->newConsumerId) { -// terrno = TSDB_CODE_MND_INVALID_SUB_OPTION; -// return -1; -// } + if (pRebVg->oldConsumerId == pRebVg->newConsumerId) { + terrno = TSDB_CODE_MND_INVALID_SUB_OPTION; + return -1; + } void *buf; int32_t tlen; @@ -175,7 +175,7 @@ static int32_t mndPersistSubChangeVgReq(SMnode *pMnode, STrans *pTrans, SMqSubsc SVgObj *pVgObj = mndAcquireVgroup(pMnode, vgId); if (pVgObj == NULL) { taosMemoryFree(buf); - terrno = TSDB_CODE_OUT_OF_MEMORY; + terrno = TSDB_CODE_MND_VGROUP_NOT_EXIST; return -1; } @@ -296,17 +296,17 @@ static void addUnassignedVgroups(SMqRebOutputObj *pOutput, SHashObj *pHash) { } } -static void putNoTransferToOutput(SMqRebOutputObj *pOutput, SMqConsumerEp *pConsumerEp){ - for(int i = 0; i < taosArrayGetSize(pConsumerEp->vgs); i++){ - SMqVgEp *pVgEp = (SMqVgEp *)taosArrayGetP(pConsumerEp->vgs, i); - SMqRebOutputVg outputVg = { - .oldConsumerId = pConsumerEp->consumerId, - .newConsumerId = pConsumerEp->consumerId, - .pVgEp = pVgEp, - }; - taosArrayPush(pOutput->rebVgs, &outputVg); - } -} +//static void putNoTransferToOutput(SMqRebOutputObj *pOutput, SMqConsumerEp *pConsumerEp){ +// for(int i = 0; i < taosArrayGetSize(pConsumerEp->vgs); i++){ +// SMqVgEp *pVgEp = (SMqVgEp *)taosArrayGetP(pConsumerEp->vgs, i); +// SMqRebOutputVg outputVg = { +// .oldConsumerId = pConsumerEp->consumerId, +// .newConsumerId = pConsumerEp->consumerId, +// .pVgEp = pVgEp, +// }; +// taosArrayPush(pOutput->rebVgs, &outputVg); +// } +//} static void transferVgroupsForConsumers(SMqRebOutputObj *pOutput, SHashObj *pHash, int32_t minVgCnt, int32_t imbConsumerNum) { @@ -357,7 +357,7 @@ static void transferVgroupsForConsumers(SMqRebOutputObj *pOutput, SHashObj *pHas } } } - putNoTransferToOutput(pOutput, pConsumerEp); +// putNoTransferToOutput(pOutput, pConsumerEp); } } @@ -468,40 +468,51 @@ static int32_t mndDoRebalance(SMnode *pMnode, const SMqRebInputObj *pInput, SMqR } } - if(taosHashGetSize(pOutput->pSub->consumerHash) == 0) { // if all consumer is removed +// if(taosHashGetSize(pOutput->pSub->consumerHash) == 0) { // if all consumer is removed SMqSubscribeObj *pSub = mndAcquireSubscribeByKey(pMnode, pInput->pRebInfo->key); // put all offset rows if (pSub) { taosRLockLatch(&pSub->lock); - bool init = false; if (pOutput->pSub->offsetRows == NULL) { pOutput->pSub->offsetRows = taosArrayInit(4, sizeof(OffsetRows)); - init = true; } pIter = NULL; while (1) { pIter = taosHashIterate(pSub->consumerHash, pIter); if (pIter == NULL) break; SMqConsumerEp *pConsumerEp = (SMqConsumerEp *)pIter; - if (init) { - taosArrayAddAll(pOutput->pSub->offsetRows, pConsumerEp->offsetRows); -// mDebug("pSub->offsetRows is init"); - } else { - for (int j = 0; j < taosArrayGetSize(pConsumerEp->offsetRows); j++) { - OffsetRows *d1 = taosArrayGet(pConsumerEp->offsetRows, j); - for (int i = 0; i < taosArrayGetSize(pOutput->pSub->offsetRows); i++) { - OffsetRows *d2 = taosArrayGet(pOutput->pSub->offsetRows, i); - if (d1->vgId == d2->vgId) { - d2->rows += d1->rows; - d2->offset = d1->offset; -// mDebug("pSub->offsetRows add vgId:%d, after:%"PRId64", before:%"PRId64, d2->vgId, d2->rows, d1->rows); - } + SMqConsumerEp *pConsumerEpNew = taosHashGet(pOutput->pSub->consumerHash, &pConsumerEp->consumerId, sizeof(int64_t)); + + for (int j = 0; j < taosArrayGetSize(pConsumerEp->offsetRows); j++) { + OffsetRows *d1 = taosArrayGet(pConsumerEp->offsetRows, j); + bool jump = false; + for (int i = 0; pConsumerEpNew && i < taosArrayGetSize(pConsumerEpNew->vgs); i++){ + SMqVgEp *pVgEp = taosArrayGetP(pConsumerEpNew->vgs, i); + if(pVgEp->vgId == d1->vgId){ + jump = true; + mInfo("pSub->offsetRows jump, because consumer id:%"PRIx64 " and vgId:%d not change", pConsumerEp->consumerId, pVgEp->vgId); + break; + } + } + if(jump) continue; + bool find = false; + for (int i = 0; i < taosArrayGetSize(pOutput->pSub->offsetRows); i++) { + OffsetRows *d2 = taosArrayGet(pOutput->pSub->offsetRows, i); + if (d1->vgId == d2->vgId) { + d2->rows += d1->rows; + d2->offset = d1->offset; + find = true; + mInfo("pSub->offsetRows add vgId:%d, after:%"PRId64", before:%"PRId64, d2->vgId, d2->rows, d1->rows); + break; } } + if(!find){ + taosArrayPush(pOutput->pSub->offsetRows, d1); + } } } taosRUnLockLatch(&pSub->lock); mndReleaseSubscribe(pMnode, pSub); - } +// } } // 8. generate logs @@ -576,50 +587,44 @@ static int32_t mndPersistRebResult(SMnode *pMnode, SRpcMsg *pMsg, const SMqRebOu return -1; } + char topic[TSDB_TOPIC_FNAME_LEN] = {0}; + char cgroup[TSDB_CGROUP_LEN] = {0}; + mndSplitSubscribeKey(pOutput->pSub->key, topic, cgroup, true); + // 3. commit log: consumer to update status and epoch // 3.1 set touched consumer int32_t consumerNum = taosArrayGetSize(pOutput->modifyConsumers); for (int32_t i = 0; i < consumerNum; i++) { int64_t consumerId = *(int64_t *)taosArrayGet(pOutput->modifyConsumers, i); - SMqConsumerObj *pConsumerOld = mndAcquireConsumer(pMnode, consumerId); - SMqConsumerObj *pConsumerNew = tNewSMqConsumerObj(pConsumerOld->consumerId, pConsumerOld->cgroup); - pConsumerNew->updateType = CONSUMER_UPDATE__TOUCH; - mndReleaseConsumer(pMnode, pConsumerOld); + SMqConsumerObj *pConsumerNew = tNewSMqConsumerObj(consumerId, cgroup); + pConsumerNew->updateType = CONSUMER_UPDATE_REB_MODIFY_NOTOPIC; if (mndSetConsumerCommitLogs(pMnode, pTrans, pConsumerNew) != 0) { - tDeleteSMqConsumerObj(pConsumerNew); - taosMemoryFree(pConsumerNew); + tDeleteSMqConsumerObj(pConsumerNew, true); mndTransDrop(pTrans); return -1; } - tDeleteSMqConsumerObj(pConsumerNew); - taosMemoryFree(pConsumerNew); + tDeleteSMqConsumerObj(pConsumerNew, true); } // 3.2 set new consumer consumerNum = taosArrayGetSize(pOutput->newConsumers); for (int32_t i = 0; i < consumerNum; i++) { int64_t consumerId = *(int64_t *)taosArrayGet(pOutput->newConsumers, i); + SMqConsumerObj *pConsumerNew = tNewSMqConsumerObj(consumerId, cgroup); + pConsumerNew->updateType = CONSUMER_UPDATE_REB_MODIFY_TOPIC; - SMqConsumerObj *pConsumerOld = mndAcquireConsumer(pMnode, consumerId); - SMqConsumerObj *pConsumerNew = tNewSMqConsumerObj(pConsumerOld->consumerId, pConsumerOld->cgroup); - pConsumerNew->updateType = CONSUMER_UPDATE__ADD; - char *topic = taosMemoryCalloc(1, TSDB_TOPIC_FNAME_LEN); - char cgroup[TSDB_CGROUP_LEN]; - mndSplitSubscribeKey(pOutput->pSub->key, topic, cgroup, true); - taosArrayPush(pConsumerNew->rebNewTopics, &topic); - mndReleaseConsumer(pMnode, pConsumerOld); + char* topicTmp = taosStrdup(topic); + taosArrayPush(pConsumerNew->rebNewTopics, &topicTmp); if (mndSetConsumerCommitLogs(pMnode, pTrans, pConsumerNew) != 0) { - tDeleteSMqConsumerObj(pConsumerNew); - taosMemoryFree(pConsumerNew); + tDeleteSMqConsumerObj(pConsumerNew, true); mndTransDrop(pTrans); return -1; } - tDeleteSMqConsumerObj(pConsumerNew); - taosMemoryFree(pConsumerNew); + tDeleteSMqConsumerObj(pConsumerNew, true); } // 3.3 set removed consumer @@ -627,24 +632,19 @@ static int32_t mndPersistRebResult(SMnode *pMnode, SRpcMsg *pMsg, const SMqRebOu for (int32_t i = 0; i < consumerNum; i++) { int64_t consumerId = *(int64_t *)taosArrayGet(pOutput->removedConsumers, i); - SMqConsumerObj *pConsumerOld = mndAcquireConsumer(pMnode, consumerId); - SMqConsumerObj *pConsumerNew = tNewSMqConsumerObj(pConsumerOld->consumerId, pConsumerOld->cgroup); - pConsumerNew->updateType = CONSUMER_UPDATE__REMOVE; - char *topic = taosMemoryCalloc(1, TSDB_TOPIC_FNAME_LEN); - char cgroup[TSDB_CGROUP_LEN]; - mndSplitSubscribeKey(pOutput->pSub->key, topic, cgroup, true); - taosArrayPush(pConsumerNew->rebRemovedTopics, &topic); - mndReleaseConsumer(pMnode, pConsumerOld); + SMqConsumerObj *pConsumerNew = tNewSMqConsumerObj(consumerId, cgroup); + pConsumerNew->updateType = CONSUMER_UPDATE_REB_MODIFY_REMOVE; + + char* topicTmp = taosStrdup(topic); + taosArrayPush(pConsumerNew->rebRemovedTopics, &topicTmp); if (mndSetConsumerCommitLogs(pMnode, pTrans, pConsumerNew) != 0) { - tDeleteSMqConsumerObj(pConsumerNew); - taosMemoryFree(pConsumerNew); + tDeleteSMqConsumerObj(pConsumerNew, true); mndTransDrop(pTrans); return -1; } - tDeleteSMqConsumerObj(pConsumerNew); - taosMemoryFree(pConsumerNew); + tDeleteSMqConsumerObj(pConsumerNew, true); } // 4. TODO commit log: modification log @@ -771,8 +771,10 @@ static int32_t mndProcessRebalanceReq(SRpcMsg *pMsg) { } static int32_t mndProcessDropCgroupReq(SRpcMsg *pMsg) { - SMnode *pMnode = pMsg->info.node; - SMDropCgroupReq dropReq = {0}; + SMnode *pMnode = pMsg->info.node; + SMDropCgroupReq dropReq = {0}; + STrans *pTrans = NULL; + int32_t code = TSDB_CODE_ACTION_IN_PROGRESS; if (tDeserializeSMDropCgroupReq(pMsg->pCont, pMsg->contLen, &dropReq) != 0) { terrno = TSDB_CODE_INVALID_MSG; @@ -791,38 +793,54 @@ static int32_t mndProcessDropCgroupReq(SRpcMsg *pMsg) { } } + taosWLockLatch(&pSub->lock); if (taosHashGetSize(pSub->consumerHash) != 0) { terrno = TSDB_CODE_MND_CGROUP_USED; mError("cgroup:%s on topic:%s, failed to drop since %s", dropReq.cgroup, dropReq.topic, terrstr()); - mndReleaseSubscribe(pMnode, pSub); - return -1; + code = -1; + goto end; + } + + void *pIter = NULL; + SMqConsumerObj *pConsumer; + while (1) { + pIter = sdbFetch(pMnode->pSdb, SDB_CONSUMER, pIter, (void **)&pConsumer); + if (pIter == NULL) { + break; + } + + if (strcmp(dropReq.cgroup, pConsumer->cgroup) == 0) { + mndDropConsumerFromSdb(pMnode, pConsumer->consumerId); + } + sdbRelease(pMnode->pSdb, pConsumer); } - STrans *pTrans = mndTransCreate(pMnode, TRN_POLICY_ROLLBACK, TRN_CONFLICT_NOTHING, pMsg, "drop-cgroup"); + pTrans = mndTransCreate(pMnode, TRN_POLICY_ROLLBACK, TRN_CONFLICT_NOTHING, pMsg, "drop-cgroup"); if (pTrans == NULL) { mError("cgroup: %s on topic:%s, failed to drop since %s", dropReq.cgroup, dropReq.topic, terrstr()); - mndReleaseSubscribe(pMnode, pSub); - mndTransDrop(pTrans); - return -1; + code = -1; + goto end; } mInfo("trans:%d, used to drop cgroup:%s on topic %s", pTrans->id, dropReq.cgroup, dropReq.topic); if (mndSetDropSubCommitLogs(pMnode, pTrans, pSub) < 0) { mError("cgroup %s on topic:%s, failed to drop since %s", dropReq.cgroup, dropReq.topic, terrstr()); - mndReleaseSubscribe(pMnode, pSub); - mndTransDrop(pTrans); - return -1; + code = -1; + goto end; } if (mndTransPrepare(pMnode, pTrans) < 0) { - mndReleaseSubscribe(pMnode, pSub); - mndTransDrop(pTrans); - return -1; + code = -1; + goto end; } + +end: + taosWUnLockLatch(&pSub->lock); mndReleaseSubscribe(pMnode, pSub); + mndTransDrop(pTrans); - return TSDB_CODE_ACTION_IN_PROGRESS; + return code; } void mndCleanupSubscribe(SMnode *pMnode) {} @@ -989,6 +1007,32 @@ SMqSubscribeObj *mndAcquireSubscribeByKey(SMnode *pMnode, const char *key) { return pSub; } +int32_t mndGetGroupNumByTopic(SMnode *pMnode, const char *topicName) { + int32_t num = 0; + SSdb *pSdb = pMnode->pSdb; + + void *pIter = NULL; + SMqSubscribeObj *pSub = NULL; + while (1) { + pIter = sdbFetch(pSdb, SDB_SUBSCRIBE, pIter, (void **)&pSub); + if (pIter == NULL) break; + + + char topic[TSDB_TOPIC_FNAME_LEN]; + char cgroup[TSDB_CGROUP_LEN]; + mndSplitSubscribeKey(pSub->key, topic, cgroup, true); + if (strcmp(topic, topicName) != 0) { + sdbRelease(pSdb, pSub); + continue; + } + + num++; + sdbRelease(pSdb, pSub); + } + + return num; +} + void mndReleaseSubscribe(SMnode *pMnode, SMqSubscribeObj *pSub) { SSdb *pSdb = pMnode->pSdb; sdbRelease(pSdb, pSub); @@ -1114,9 +1158,13 @@ static int32_t buildResult(SSDataBlock *pBlock, int32_t* numOfRows, int64_t cons colDataSetVal(pColInfo, *numOfRows, (const char *)&pVgEp->vgId, false); // consumer id + char consumerIdHex[32] = {0}; + sprintf(varDataVal(consumerIdHex), "0x%"PRIx64, consumerId); + varDataSetLen(consumerIdHex, strlen(varDataVal(consumerIdHex))); + pColInfo = taosArrayGet(pBlock->pDataBlock, cols++); - colDataSetVal(pColInfo, *numOfRows, (const char *)&consumerId, consumerId == -1); - + colDataSetVal(pColInfo, *numOfRows, (const char *)consumerIdHex, consumerId == -1); + mDebug("mnd show subscriptions: topic %s, consumer:0x%" PRIx64 " cgroup %s vgid %d", varDataVal(topic), consumerId, varDataVal(cgroup), pVgEp->vgId); diff --git a/source/dnode/mnode/impl/src/mndTopic.c b/source/dnode/mnode/impl/src/mndTopic.c index 4bbe531bf8e1bb50598e0a801a0552817084a34e..485823edf3547309c2f57313415b575c961b4206 100644 --- a/source/dnode/mnode/impl/src/mndTopic.c +++ b/source/dnode/mnode/impl/src/mndTopic.c @@ -569,6 +569,11 @@ static int32_t mndProcessCreateTopicReq(SRpcMsg *pReq) { SMqTopicObj *pTopic = NULL; SDbObj *pDb = NULL; SCMCreateTopicReq createTopicReq = {0}; + if (sdbGetSize(pMnode->pSdb, SDB_TOPIC) >= tmqMaxTopicNum){ + terrno = TSDB_CODE_TMQ_TOPIC_OUT_OF_RANGE; + mError("topic num out of range"); + return code; + } if (tDeserializeSCMCreateTopicReq(pReq->pCont, pReq->contLen, &createTopicReq) != 0) { terrno = TSDB_CODE_INVALID_MSG; @@ -681,7 +686,11 @@ static int32_t mndProcessDropTopicReq(SRpcMsg *pReq) { break; } - if (pConsumer->status == MQ_CONSUMER_STATUS__LOST_REBD) continue; + if (pConsumer->status == MQ_CONSUMER_STATUS_LOST){ + mndDropConsumerFromSdb(pMnode, pConsumer->consumerId); + mndReleaseConsumer(pMnode, pConsumer); + continue; + } int32_t sz = taosArrayGetSize(pConsumer->assignedTopics); for (int32_t i = 0; i < sz; i++) { diff --git a/source/dnode/vnode/src/meta/metaTable.c b/source/dnode/vnode/src/meta/metaTable.c index 424eac13b013efe0ce6067f82a30ed4d238e4e16..eb2d2e267bc4963dcab16b4997fbce6d4261af5d 100644 --- a/source/dnode/vnode/src/meta/metaTable.c +++ b/source/dnode/vnode/src/meta/metaTable.c @@ -1980,6 +1980,11 @@ static int metaUpdateTtl(SMeta *pMeta, const SMetaEntry *pME) { int metaUpdateChangeTime(SMeta *pMeta, tb_uid_t uid, int64_t changeTimeMs) { if (!tsTtlChangeOnWrite) return 0; + if (changeTimeMs <= 0) { + metaWarn("Skip to change ttl deletetion time on write, uid: %" PRId64, uid); + return TSDB_CODE_VERSION_NOT_COMPATIBLE; + } + STtlUpdCtimeCtx ctx = {.uid = uid, .changeTimeMs = changeTimeMs}; return ttlMgrUpdateChangeTime(pMeta->pTtlMgr, &ctx); diff --git a/source/dnode/vnode/src/meta/metaTtl.c b/source/dnode/vnode/src/meta/metaTtl.c index c283472c2464219502b593215f8117f74c5daabd..af4827a9c72495b8fe3d08e41f62db67309a5110 100644 --- a/source/dnode/vnode/src/meta/metaTtl.c +++ b/source/dnode/vnode/src/meta/metaTtl.c @@ -358,7 +358,8 @@ int ttlMgrFlush(STtlManger *pTtlMgr, TXN *pTxn) { STtlCacheEntry *cacheEntry = taosHashGet(pTtlMgr->pTtlCache, pUid, sizeof(*pUid)); if (cacheEntry == NULL) { - metaError("ttlMgr flush failed to get ttl cache since %s", tstrerror(terrno)); + metaError("ttlMgr flush failed to get ttl cache since %s, uid: %" PRId64 ", type: %d", tstrerror(terrno), *pUid, + pEntry->type); goto _out; } diff --git a/source/dnode/vnode/src/tq/tqRead.c b/source/dnode/vnode/src/tq/tqRead.c index 77a966715eb84410cecc245c38d0f1ba60bcd9aa..5f53f1c50ca7323b437017b66965028601994c99 100644 --- a/source/dnode/vnode/src/tq/tqRead.c +++ b/source/dnode/vnode/src/tq/tqRead.c @@ -388,7 +388,7 @@ bool tqNextBlockInWal(STqReader* pReader, const char* id) { int32_t numOfBlocks = taosArrayGetSize(pReader->submit.aSubmitTbData); while (pReader->nextBlk < numOfBlocks) { - tqDebug("tq reader next data block %d/%d, len:%d %" PRId64 " %d", pReader->nextBlk, + tqTrace("tq reader next data block %d/%d, len:%d %" PRId64 " %d", pReader->nextBlk, numOfBlocks, pReader->msg.msgLen, pReader->msg.ver, pReader->nextBlk); SSubmitTbData* pSubmitTbData = taosArrayGet(pReader->submit.aSubmitTbData, pReader->nextBlk); @@ -403,7 +403,7 @@ bool tqNextBlockInWal(STqReader* pReader, const char* id) { void* ret = taosHashGet(pReader->tbIdHash, &pSubmitTbData->uid, sizeof(int64_t)); if (ret != NULL) { - tqDebug("tq reader return submit block, uid:%" PRId64 ", ver:%" PRId64, pSubmitTbData->uid, pReader->msg.ver); + tqTrace("tq reader return submit block, uid:%" PRId64 ", ver:%" PRId64, pSubmitTbData->uid, pReader->msg.ver); SSDataBlock* pRes = NULL; int32_t code = tqRetrieveDataBlock(pReader, &pRes, NULL); @@ -412,11 +412,11 @@ bool tqNextBlockInWal(STqReader* pReader, const char* id) { } } else { pReader->nextBlk += 1; - tqDebug("tq reader discard submit block, uid:%" PRId64 ", continue", pSubmitTbData->uid); + tqTrace("tq reader discard submit block, uid:%" PRId64 ", continue", pSubmitTbData->uid); } } - qDebug("stream scan return empty, all %d submit blocks consumed, %s", numOfBlocks, id); + qTrace("stream scan return empty, all %d submit blocks consumed, %s", numOfBlocks, id); tDestroySubmitReq(&pReader->submit, TSDB_MSG_FLG_DECODE); pReader->msg.msgStr = NULL; @@ -604,7 +604,7 @@ static int32_t doSetVal(SColumnInfoData* pColumnInfoData, int32_t rowIndex, SCol } int32_t tqRetrieveDataBlock(STqReader* pReader, SSDataBlock** pRes, const char* id) { - tqDebug("tq reader retrieve data block %p, index:%d", pReader->msg.msgStr, pReader->nextBlk); + tqTrace("tq reader retrieve data block %p, index:%d", pReader->msg.msgStr, pReader->nextBlk); SSubmitTbData* pSubmitTbData = taosArrayGet(pReader->submit.aSubmitTbData, pReader->nextBlk++); SSDataBlock* pBlock = pReader->pResBlock; diff --git a/source/dnode/vnode/src/tq/tqSink.c b/source/dnode/vnode/src/tq/tqSink.c index 9349c6eb0dd9f36073aceb7d76d769df9871e92a..650f62828f34c29189f2b7674d54446cc52c37b1 100644 --- a/source/dnode/vnode/src/tq/tqSink.c +++ b/source/dnode/vnode/src/tq/tqSink.c @@ -335,6 +335,7 @@ void tqSinkToTablePipeline(SStreamTask* pTask, void* vnode, int64_t ver, void* d tagArray = taosArrayInit(1, sizeof(STagVal)); if (!tagArray) { tdDestroySVCreateTbReq(pCreateTbReq); + taosMemoryFreeClear(pCreateTbReq); goto _end; } STagVal tagVal = { @@ -350,6 +351,7 @@ void tqSinkToTablePipeline(SStreamTask* pTask, void* vnode, int64_t ver, void* d tagArray = taosArrayDestroy(tagArray); if (pTag == NULL) { tdDestroySVCreateTbReq(pCreateTbReq); + taosMemoryFreeClear(pCreateTbReq); terrno = TSDB_CODE_OUT_OF_MEMORY; goto _end; } diff --git a/source/dnode/vnode/src/vnd/vnodeSvr.c b/source/dnode/vnode/src/vnd/vnodeSvr.c index 485710828f98e44d1a5ca5537c5c7bae7a6f2f2e..03473788efbf1da574eb43f26f725b02da58082c 100644 --- a/source/dnode/vnode/src/vnd/vnodeSvr.c +++ b/source/dnode/vnode/src/vnd/vnodeSvr.c @@ -234,8 +234,10 @@ static int32_t vnodePreProcessSubmitTbData(SVnode *pVnode, SDecoder *pCoder, int } } - *(int64_t *)(pCoder->data + pCoder->pos) = ctimeMs; - pCoder->pos += sizeof(int64_t); + if (!tDecodeIsEnd(pCoder)) { + *(int64_t *)(pCoder->data + pCoder->pos) = ctimeMs; + pCoder->pos += sizeof(int64_t); + } tEndDecode(pCoder); diff --git a/source/libs/executor/inc/executil.h b/source/libs/executor/inc/executil.h index 30911c6061a8634e55c1bc38468bc066ced19f72..33c9d845b9bded28acf0c3ec796266bc5476371d 100644 --- a/source/libs/executor/inc/executil.h +++ b/source/libs/executor/inc/executil.h @@ -75,10 +75,11 @@ typedef struct SResultRowInfo { } SResultRowInfo; typedef struct SColMatchItem { - int32_t colId; - int32_t srcSlotId; - int32_t dstSlotId; - bool needOutput; + int32_t colId; + int32_t srcSlotId; + int32_t dstSlotId; + bool needOutput; + SDataType dataType; } SColMatchItem; typedef struct SColMatchInfo { diff --git a/source/libs/executor/inc/tsort.h b/source/libs/executor/inc/tsort.h index 78c56c0405f6010efc370be8088a367ac12e0f42..7a0d236a3778faed5cab419ce565b9fff7162312 100644 --- a/source/libs/executor/inc/tsort.h +++ b/source/libs/executor/inc/tsort.h @@ -64,10 +64,14 @@ typedef int32_t (*_sort_merge_compar_fn_t)(const void* p1, const void* p2, void* /** * * @param type + * @param maxRows keep maxRows at most + * @param maxTupleLength max len of one tuple, for check if heap sort is applicable + * @param sortBufSize sort memory buf size, for check if heap sort is applicable * @return */ SSortHandle* tsortCreateSortHandle(SArray* pOrderInfo, int32_t type, int32_t pageSize, int32_t numOfPages, - SSDataBlock* pBlock, const char* idstr); + SSDataBlock* pBlock, const char* idstr, uint64_t maxRows, uint32_t maxTupleLength, + uint32_t sortBufSize); /** * diff --git a/source/libs/executor/src/executil.c b/source/libs/executor/src/executil.c index 09280295571ac711d8b4cfaaac587e25c2d0733e..cfea233a1ca49e47d8eb26b752f380e59b1dd7c1 100644 --- a/source/libs/executor/src/executil.c +++ b/source/libs/executor/src/executil.c @@ -1305,6 +1305,7 @@ int32_t extractColMatchInfo(SNodeList* pNodeList, SDataBlockDescNode* pOutputNod c.colId = pColNode->colId; c.srcSlotId = pColNode->slotId; c.dstSlotId = pNode->slotId; + c.dataType = pColNode->node.resType; taosArrayPush(pList, &c); } } diff --git a/source/libs/executor/src/executor.c b/source/libs/executor/src/executor.c index c8b66836d53e9feeffa41d8a9cbe781c30cb409c..88e2165a127aff848a72fb8cc82aa48f341f4e34 100644 --- a/source/libs/executor/src/executor.c +++ b/source/libs/executor/src/executor.c @@ -1078,6 +1078,16 @@ int32_t qStreamPrepareScan(qTaskInfo_t tinfo, STqOffsetVal* pOffset, int8_t subT SOperatorInfo* pOperator = pTaskInfo->pRoot; const char* id = GET_TASKID(pTaskInfo); + if(subType == TOPIC_SUB_TYPE__COLUMN && pOffset->type == TMQ_OFFSET__LOG){ + pOperator = extractOperatorInTree(pOperator, QUERY_NODE_PHYSICAL_PLAN_STREAM_SCAN, id); + if (pOperator == NULL) { + return -1; + } + SStreamScanInfo* pInfo = pOperator->info; + SStoreTqReader* pReaderAPI = &pTaskInfo->storageAPI.tqReaderFn; + SWalReader* pWalReader = pReaderAPI->tqReaderGetWalReader(pInfo->tqReader); + walReaderVerifyOffset(pWalReader, pOffset); + } // if pOffset equal to current offset, means continue consume if (tOffsetEqual(pOffset, &pTaskInfo->streamInfo.currentOffset)) { return 0; diff --git a/source/libs/executor/src/scanoperator.c b/source/libs/executor/src/scanoperator.c index 008a3697fcc508810af0c30ece6f0c61c408c4ba..c3d5de572fe9b27e3925e9014714e807b3be0e64 100644 --- a/source/libs/executor/src/scanoperator.c +++ b/source/libs/executor/src/scanoperator.c @@ -2374,6 +2374,10 @@ SOperatorInfo* createStreamScanOperatorInfo(SReadHandle* pHandle, STableScanPhys if (pHandle->vnode) { SOperatorInfo* pTableScanOp = createTableScanOperatorInfo(pTableScanNode, pHandle, pTableListInfo, pTaskInfo); + if (pTableScanOp == NULL) { + qError("createTableScanOperatorInfo error, errorcode: %d", pTaskInfo->code); + goto _error; + } STableScanInfo* pTSInfo = (STableScanInfo*)pTableScanOp->info; if (pHandle->version > 0) { pTSInfo->base.cond.endVersion = pHandle->version; @@ -2801,7 +2805,7 @@ int32_t startGroupTableMergeScan(SOperatorInfo* pOperator) { pInfo->sortBufSize = pInfo->bufPageSize * (kWay + 1); int32_t numOfBufPage = pInfo->sortBufSize / pInfo->bufPageSize; pInfo->pSortHandle = tsortCreateSortHandle(pInfo->pSortInfo, SORT_MULTISOURCE_MERGE, pInfo->bufPageSize, numOfBufPage, - pInfo->pSortInputBlock, pTaskInfo->id.str); + pInfo->pSortInputBlock, pTaskInfo->id.str, 0, 0, 0); tsortSetFetchRawDataFp(pInfo->pSortHandle, getTableDataBlockImpl, NULL, NULL); diff --git a/source/libs/executor/src/sortoperator.c b/source/libs/executor/src/sortoperator.c index 585c2e8c541461347475d302cd7a305e18cea336..20fb588a026d7c287fa005188faa854d3a6ebb1f 100644 --- a/source/libs/executor/src/sortoperator.c +++ b/source/libs/executor/src/sortoperator.c @@ -29,6 +29,8 @@ typedef struct SSortOperatorInfo { int64_t startTs; // sort start time uint64_t sortElapsed; // sort elapsed time, time to flush to disk not included. SLimitInfo limitInfo; + uint64_t maxTupleLength; + int64_t maxRows; } SSortOperatorInfo; static SSDataBlock* doSort(SOperatorInfo* pOperator); @@ -36,6 +38,7 @@ static int32_t doOpenSortOperator(SOperatorInfo* pOperator); static int32_t getExplainExecInfo(SOperatorInfo* pOptr, void** pOptrExplain, uint32_t* len); static void destroySortOperatorInfo(void* param); +static int32_t calcSortOperMaxTupleLength(SSortOperatorInfo* pSortOperInfo, SNodeList* pSortKeys); // todo add limit/offset impl SOperatorInfo* createSortOperatorInfo(SOperatorInfo* downstream, SSortPhysiNode* pSortNode, SExecTaskInfo* pTaskInfo) { @@ -51,6 +54,8 @@ SOperatorInfo* createSortOperatorInfo(SOperatorInfo* downstream, SSortPhysiNode* int32_t numOfCols = 0; pOperator->exprSupp.pExprInfo = createExprInfo(pSortNode->pExprs, NULL, &numOfCols); pOperator->exprSupp.numOfExprs = numOfCols; + calcSortOperMaxTupleLength(pInfo, pSortNode->pSortKeys); + pInfo->maxRows = pSortNode->maxRows; int32_t numOfOutputCols = 0; int32_t code = @@ -193,9 +198,9 @@ int32_t doOpenSortOperator(SOperatorInfo* pOperator) { } pInfo->startTs = taosGetTimestampUs(); - // pInfo->binfo.pRes is not equalled to the input datablock. - pInfo->pSortHandle = tsortCreateSortHandle(pInfo->pSortInfo, SORT_SINGLESOURCE_SORT, -1, -1, NULL, pTaskInfo->id.str); + pInfo->pSortHandle = tsortCreateSortHandle(pInfo->pSortInfo, SORT_SINGLESOURCE_SORT, -1, -1, NULL, pTaskInfo->id.str, + pInfo->maxRows, pInfo->maxTupleLength, tsPQSortMemThreshold * 1024 * 1024); tsortSetFetchRawDataFp(pInfo->pSortHandle, loadNextDataBlock, applyScalarFunction, pOperator); @@ -286,6 +291,20 @@ int32_t getExplainExecInfo(SOperatorInfo* pOptr, void** pOptrExplain, uint32_t* return TSDB_CODE_SUCCESS; } +static int32_t calcSortOperMaxTupleLength(SSortOperatorInfo* pSortOperInfo, SNodeList* pSortKeys) { + SColMatchInfo* pColItem = &pSortOperInfo->matchInfo; + size_t size = taosArrayGetSize(pColItem->pList); + for (size_t i = 0; i < size; ++i) { + pSortOperInfo->maxTupleLength += ((SColMatchItem*)taosArrayGet(pColItem->pList, i))->dataType.bytes; + } + size = LIST_LENGTH(pSortKeys); + for (size_t i = 0; i < size; ++i) { + SOrderByExprNode* pOrderExprNode = (SOrderByExprNode*)nodesListGetNode(pSortKeys, i); + pSortOperInfo->maxTupleLength += ((SColumnNode*)pOrderExprNode->pExpr)->node.resType.bytes; + } + return TSDB_CODE_SUCCESS; +} + //===================================================================================== // Group Sort Operator typedef enum EChildOperatorStatus { CHILD_OP_NEW_GROUP, CHILD_OP_SAME_GROUP, CHILD_OP_FINISHED } EChildOperatorStatus; @@ -384,7 +403,7 @@ int32_t beginSortGroup(SOperatorInfo* pOperator) { // pInfo->binfo.pRes is not equalled to the input datablock. pInfo->pCurrSortHandle = - tsortCreateSortHandle(pInfo->pSortInfo, SORT_SINGLESOURCE_SORT, -1, -1, NULL, pTaskInfo->id.str); + tsortCreateSortHandle(pInfo->pSortInfo, SORT_SINGLESOURCE_SORT, -1, -1, NULL, pTaskInfo->id.str, 0, 0, 0); tsortSetFetchRawDataFp(pInfo->pCurrSortHandle, fetchNextGroupSortDataBlock, applyScalarFunction, pOperator); @@ -582,7 +601,7 @@ int32_t openMultiwayMergeOperator(SOperatorInfo* pOperator) { int32_t numOfBufPage = pInfo->sortBufSize / pInfo->bufPageSize; pInfo->pSortHandle = tsortCreateSortHandle(pInfo->pSortInfo, SORT_MULTISOURCE_MERGE, pInfo->bufPageSize, numOfBufPage, - pInfo->pInputBlock, pTaskInfo->id.str); + pInfo->pInputBlock, pTaskInfo->id.str, 0, 0, 0); tsortSetFetchRawDataFp(pInfo->pSortHandle, loadNextDataBlock, NULL, NULL); tsortSetCompareGroupId(pInfo->pSortHandle, pInfo->groupSort); diff --git a/source/libs/executor/src/tsort.c b/source/libs/executor/src/tsort.c index 58b3428b5bae2a88cac2ae51e7cfc9f3f7590948..daf06c81d13d0d85aa6d47f3147d822a0e915311 100644 --- a/source/libs/executor/src/tsort.c +++ b/source/libs/executor/src/tsort.c @@ -19,6 +19,7 @@ #include "tcompare.h" #include "tdatablock.h" #include "tdef.h" +#include "theap.h" #include "tlosertree.h" #include "tpagedbuf.h" #include "tsort.h" @@ -41,6 +42,12 @@ struct SSortHandle { int64_t startTs; uint64_t totalElapsed; + uint64_t maxRows; + uint32_t maxTupleLength; + uint32_t sortBufSize; + BoundedQueue* pBoundedQueue; + uint32_t tmpRowIdx; + int32_t sourceId; SSDataBlock* pDataBlock; SMsortComparParam cmpParam; @@ -61,6 +68,47 @@ struct SSortHandle { static int32_t msortComparFn(const void* pLeft, const void* pRight, void* param); +// | offset[0] | offset[1] |....| nullbitmap | data |...| +static void* createTuple(uint32_t columnNum, uint32_t tupleLen) { + uint32_t totalLen = sizeof(uint32_t) * columnNum + BitmapLen(columnNum) + tupleLen; + return taosMemoryCalloc(1, totalLen); +} +static void destoryTuple(void* t) { taosMemoryFree(t); } + +#define tupleOffset(tuple, colIdx) ((uint32_t*)(tuple + sizeof(uint32_t) * colIdx)) +#define tupleSetOffset(tuple, colIdx, offset) (*tupleOffset(tuple, colIdx) = offset) +#define tupleSetNull(tuple, colIdx, colNum) colDataSetNull_f((char*)tuple + sizeof(uint32_t) * colNum, colIdx) +#define tupleColIsNull(tuple, colIdx, colNum) colDataIsNull_f((char*)tuple + sizeof(uint32_t) * colNum, colIdx) +#define tupleGetDataStartOffset(colNum) (sizeof(uint32_t) * colNum + BitmapLen(colNum)) +#define tupleSetData(tuple, offset, data, length) memcpy(tuple + offset, data, length) + +/** + * @param t the tuple pointer addr, if realloced, *t is changed to the new addr + * @param offset copy data into pTuple start from offset + * @param colIndex the columnIndex, for setting null bitmap + * @return the next offset to add field + * */ +static inline size_t tupleAddField(char** t, uint32_t colNum, uint32_t offset, uint32_t colIdx, void* data, size_t length, + bool isNull, uint32_t tupleLen) { + tupleSetOffset(*t, colIdx, offset); + if (isNull) { + tupleSetNull(*t, colIdx, colNum); + } else { + if (offset + length > tupleLen + tupleGetDataStartOffset(colNum)) { + *t = taosMemoryRealloc(*t, offset + length); + } + tupleSetData(*t, offset, data, length); + } + return offset + length; +} + +static void* tupleGetField(char* t, uint32_t colIdx, uint32_t colNum) { + if (tupleColIsNull(t, colIdx, colNum)) return NULL; + return t + *tupleOffset(t, colIdx); +} + +static int32_t colDataComparFn(const void* pLeft, const void* pRight, void* param); + SSDataBlock* tsortGetSortedDataBlock(const SSortHandle* pSortHandle) { return createOneDataBlock(pSortHandle->pDataBlock, false); } @@ -71,7 +119,8 @@ SSDataBlock* tsortGetSortedDataBlock(const SSortHandle* pSortHandle) { * @return */ SSortHandle* tsortCreateSortHandle(SArray* pSortInfo, int32_t type, int32_t pageSize, int32_t numOfPages, - SSDataBlock* pBlock, const char* idstr) { + SSDataBlock* pBlock, const char* idstr, uint64_t maxRows, uint32_t maxTupleLength, + uint32_t sortBufSize) { SSortHandle* pSortHandle = taosMemoryCalloc(1, sizeof(SSortHandle)); pSortHandle->type = type; @@ -80,6 +129,13 @@ SSortHandle* tsortCreateSortHandle(SArray* pSortInfo, int32_t type, int32_t page pSortHandle->pSortInfo = pSortInfo; pSortHandle->loops = 0; + pSortHandle->maxTupleLength = maxTupleLength; + if (maxRows < 0) + pSortHandle->sortBufSize = 0; + else + pSortHandle->sortBufSize = sortBufSize; + pSortHandle->maxRows = maxRows; + if (pBlock != NULL) { pSortHandle->pDataBlock = createOneDataBlock(pBlock, false); } @@ -150,7 +206,6 @@ void tsortDestroySortHandle(SSortHandle* pSortHandle) { if (pSortHandle == NULL) { return; } - tsortClose(pSortHandle); if (pSortHandle->pMergeTree != NULL) { tMergeTreeDestroy(&pSortHandle->pMergeTree); @@ -159,6 +214,7 @@ void tsortDestroySortHandle(SSortHandle* pSortHandle) { destroyDiskbasedBuf(pSortHandle->pBuf); taosMemoryFreeClear(pSortHandle->idStr); blockDataDestroy(pSortHandle->pDataBlock); + if (pSortHandle->pBoundedQueue) destroyBoundedQueue(pSortHandle->pBoundedQueue); int64_t fetchUs = 0, fetchNum = 0; tsortClearOrderdSource(pSortHandle->pOrderedSource, &fetchUs, &fetchNum); @@ -769,17 +825,7 @@ static int32_t createInitialSources(SSortHandle* pHandle) { return code; } -int32_t tsortOpen(SSortHandle* pHandle) { - if (pHandle->opened) { - return 0; - } - - if (pHandle->fetchfp == NULL || pHandle->comparFn == NULL) { - return -1; - } - - pHandle->opened = true; - +static bool tsortOpenForBufMergeSort(SSortHandle* pHandle) { int32_t code = createInitialSources(pHandle); if (code != TSDB_CODE_SUCCESS) { return code; @@ -840,7 +886,7 @@ int32_t tsortSetCompareGroupId(SSortHandle* pHandle, bool compareGroupId) { return TSDB_CODE_SUCCESS; } -STupleHandle* tsortNextTuple(SSortHandle* pHandle) { +static STupleHandle* tsortBufMergeSortNextTuple(SSortHandle* pHandle) { if (tsortIsClosed(pHandle)) { return NULL; } @@ -890,6 +936,168 @@ STupleHandle* tsortNextTuple(SSortHandle* pHandle) { return &pHandle->tupleHandle; } +static bool tsortIsPQSortApplicable(SSortHandle* pHandle) { + if (pHandle->type != SORT_SINGLESOURCE_SORT) return false; + uint64_t maxRowsFitInMemory = pHandle->sortBufSize / (pHandle->maxTupleLength + sizeof(char*)); + return maxRowsFitInMemory > pHandle->maxRows; +} + +static bool tsortPQCompFn(void* a, void* b, void* param) { + SSortHandle* pHandle = param; + int32_t res = pHandle->comparFn(a, b, param); + if (res < 0) return 1; + return 0; +} + +static bool tsortPQComFnReverse(void*a, void* b, void* param) { + SSortHandle* pHandle = param; + int32_t res = pHandle->comparFn(a, b, param); + if (res > 0) return 1; + return 0; +} + +static int32_t colDataComparFn(const void* pLeft, const void* pRight, void* param) { + char* pLTuple = (char*)pLeft; + char* pRTuple = (char*)pRight; + SSortHandle* pHandle = (SSortHandle*)param; + SArray* orderInfo = (SArray*)pHandle->pSortInfo; + uint32_t colNum = blockDataGetNumOfCols(pHandle->pDataBlock); + for (int32_t i = 0; i < orderInfo->size; ++i) { + SBlockOrderInfo* pOrder = TARRAY_GET_ELEM(orderInfo, i); + void *lData = tupleGetField(pLTuple, pOrder->slotId, colNum); + void *rData = tupleGetField(pRTuple, pOrder->slotId, colNum); + if (!lData && !rData) continue; + if (!lData) return pOrder->nullFirst ? -1 : 1; + if (!rData) return pOrder->nullFirst ? 1 : -1; + + int type = ((SColumnInfoData*)taosArrayGet(pHandle->pDataBlock->pDataBlock, pOrder->slotId))->info.type; + __compar_fn_t fn = getKeyComparFunc(type, pOrder->order); + + int ret = fn(lData, rData); + if (ret == 0) { + continue; + } else { + return ret; + } + } + return 0; +} + +static int32_t tsortOpenForPQSort(SSortHandle* pHandle) { + pHandle->pBoundedQueue = createBoundedQueue(pHandle->maxRows, tsortPQCompFn, destoryTuple, pHandle); + if (NULL == pHandle->pBoundedQueue) return TSDB_CODE_OUT_OF_MEMORY; + tsortSetComparFp(pHandle, colDataComparFn); + + SSortSource** pSource = taosArrayGet(pHandle->pOrderedSource, 0); + SSortSource* source = *pSource; + + pHandle->pDataBlock = NULL; + uint32_t tupleLen = 0; + PriorityQueueNode pqNode; + while (1) { + // fetch data + SSDataBlock* pBlock = pHandle->fetchfp(source->param); + if (NULL == pBlock) break; + + if (pHandle->beforeFp != NULL) { + pHandle->beforeFp(pBlock, pHandle->param); + } + if (pHandle->pDataBlock == NULL) { + pHandle->pDataBlock = createOneDataBlock(pBlock, false); + } + if (pHandle->pDataBlock == NULL) return TSDB_CODE_OUT_OF_MEMORY; + + size_t colNum = blockDataGetNumOfCols(pBlock); + + if (tupleLen == 0) { + for (size_t colIdx = 0; colIdx < colNum; ++colIdx) { + SColumnInfoData* pCol = taosArrayGet(pBlock->pDataBlock, colIdx); + tupleLen += pCol->info.bytes; + if (IS_VAR_DATA_TYPE(pCol->info.type)) { + tupleLen += sizeof(VarDataLenT); + } + } + } + size_t colLen = 0; + for (size_t rowIdx = 0; rowIdx < pBlock->info.rows; ++rowIdx) { + void* pTuple = createTuple(colNum, tupleLen); + if (pTuple == NULL) return TSDB_CODE_OUT_OF_MEMORY; + + uint32_t offset = tupleGetDataStartOffset(colNum); + for (size_t colIdx = 0; colIdx < colNum; ++colIdx) { + SColumnInfoData* pCol = taosArrayGet(pBlock->pDataBlock, colIdx); + if (colDataIsNull_s(pCol, rowIdx)) { + offset = tupleAddField((char**)&pTuple, colNum, offset, colIdx, 0, 0, true, tupleLen); + } else { + colLen = colDataGetRowLength(pCol, rowIdx); + offset = tupleAddField((char**)&pTuple, colNum, offset, colIdx, colDataGetData(pCol, rowIdx), colLen, false, + tupleLen); + } + } + pqNode.data = pTuple; + taosBQPush(pHandle->pBoundedQueue, &pqNode); + } + } + return TSDB_CODE_SUCCESS; +} + +static STupleHandle* tsortPQSortNextTuple(SSortHandle* pHandle) { + blockDataCleanup(pHandle->pDataBlock); + blockDataEnsureCapacity(pHandle->pDataBlock, 1); + // abondan the top tuple if queue size bigger than max size + if (taosBQSize(pHandle->pBoundedQueue) == taosBQMaxSize(pHandle->pBoundedQueue) + 1) { + taosBQPop(pHandle->pBoundedQueue); + } + if (pHandle->tmpRowIdx == 0) { + // sort the results + taosBQSetFn(pHandle->pBoundedQueue, tsortPQComFnReverse); + taosBQBuildHeap(pHandle->pBoundedQueue); + } + if (taosBQSize(pHandle->pBoundedQueue) > 0) { + uint32_t colNum = blockDataGetNumOfCols(pHandle->pDataBlock); + PriorityQueueNode* node = taosBQTop(pHandle->pBoundedQueue); + char* pTuple = (char*)node->data; + + for (uint32_t i = 0; i < colNum; ++i) { + void* pData = tupleGetField(pTuple, i, colNum); + if (!pData) { + colDataSetNULL(bdGetColumnInfoData(pHandle->pDataBlock, i), 0); + } else { + colDataAppend(bdGetColumnInfoData(pHandle->pDataBlock, i), 0, pData, false); + } + } + pHandle->pDataBlock->info.rows++; + pHandle->tmpRowIdx++; + taosBQPop(pHandle->pBoundedQueue); + } + if (pHandle->pDataBlock->info.rows == 0) return NULL; + pHandle->tupleHandle.pBlock = pHandle->pDataBlock; + return &pHandle->tupleHandle; +} + +int32_t tsortOpen(SSortHandle* pHandle) { + if (pHandle->opened) { + return 0; + } + + if (pHandle->fetchfp == NULL || pHandle->comparFn == NULL) { + return -1; + } + + pHandle->opened = true; + if (tsortIsPQSortApplicable(pHandle)) + return tsortOpenForPQSort(pHandle); + else + return tsortOpenForBufMergeSort(pHandle); +} + +STupleHandle* tsortNextTuple(SSortHandle* pHandle) { + if (pHandle->pBoundedQueue) + return tsortPQSortNextTuple(pHandle); + else + return tsortBufMergeSortNextTuple(pHandle); +} + bool tsortIsNullVal(STupleHandle* pVHandle, int32_t colIndex) { SColumnInfoData* pColInfoSrc = taosArrayGet(pVHandle->pBlock->pDataBlock, colIndex); return colDataIsNull_s(pColInfoSrc, pVHandle->rowIndex); diff --git a/source/libs/function/src/builtinsimpl.c b/source/libs/function/src/builtinsimpl.c index 4365cd8b959752cdd47ce3488ef3519eaf58dacb..1c5bd6d59c0efa105b8322dbf2d6adecd47dbf04 100644 --- a/source/libs/function/src/builtinsimpl.c +++ b/source/libs/function/src/builtinsimpl.c @@ -2415,6 +2415,10 @@ int32_t lastFunction(SqlFunctionCtx* pCtx) { } static int32_t firstLastTransferInfoImpl(SFirstLastRes* pInput, SFirstLastRes* pOutput, bool isFirst) { + if (!pInput->hasResult) { + return TSDB_CODE_FAILED; + } + if (pOutput->hasResult) { if (isFirst) { if (pInput->ts > pOutput->ts) { diff --git a/source/libs/function/src/thistogram.c b/source/libs/function/src/thistogram.c index e7d631f638da769fe0d9eabb03762bb983410a56..b56691f35d2933da298b74b13af0d2eee32ebb61 100644 --- a/source/libs/function/src/thistogram.c +++ b/source/libs/function/src/thistogram.c @@ -474,8 +474,8 @@ double* tHistogramUniform(SHistogramInfo* pHisto, double* ratio, int32_t num) { } ASSERTS(total <= numOfElem && total + pHisto->elems[j + 1].num > numOfElem, - "tHistogramUniform Error, total:%d, numOfElem:%d, elems[%d].num:%d", - total, numOfElem, j + 1, pHisto->elems[j + 1].num); + "tHistogramUniform Error, total:%ld, numOfElem:%ld, elems[%d].num:%ld", + total, (int64_t)numOfElem, j + 1, pHisto->elems[j + 1].num); double delta = numOfElem - total; if (fabs(delta) < FLT_EPSILON) { diff --git a/source/libs/function/src/tpercentile.c b/source/libs/function/src/tpercentile.c index 3ec802a7cebfc29f33b4c9e61b909908807ae501..8101b342a44e3132c969b3055839fbf7eb55c8b4 100644 --- a/source/libs/function/src/tpercentile.c +++ b/source/libs/function/src/tpercentile.c @@ -39,6 +39,7 @@ static SFilePage *loadDataFromFilePage(tMemBucket *pMemBucket, int32_t slotIdx) if (p != NULL) { pIdList = *(SArray **)p; } else { + taosMemoryFree(buffer); return NULL; } @@ -48,6 +49,7 @@ static SFilePage *loadDataFromFilePage(tMemBucket *pMemBucket, int32_t slotIdx) SFilePage *pg = getBufPage(pMemBucket->pBuffer, *pageId); if (pg == NULL) { + taosMemoryFree(buffer); return NULL; } diff --git a/source/libs/function/src/tudf.c b/source/libs/function/src/tudf.c index 31a7dfdbc530fb4e2e5742f725e23335ae18eb9f..5b9f44c8126b520715bf32144b99f9af17638174 100644 --- a/source/libs/function/src/tudf.c +++ b/source/libs/function/src/tudf.c @@ -255,6 +255,18 @@ int32_t udfStopUdfd() { return 0; } +int32_t udfGetUdfdPid(int32_t* pUdfdPid) { + SUdfdData *pData = &udfdGlobal; + if (pData->spawnErr) { + return pData->spawnErr; + } + uv_pid_t pid = uv_process_get_pid(&pData->process); + if (pUdfdPid) { + *pUdfdPid = (int32_t)pid; + } + return TSDB_CODE_SUCCESS; +} + //============================================================================================== /* Copyright (c) 2013, Ben Noordhuis * The QUEUE is copied from queue.h under libuv diff --git a/source/libs/function/src/udfd.c b/source/libs/function/src/udfd.c index 3b827a2f99a446b11236f53acda1fb4da0592e88..93259924d5913cf43eede16184f5a82fb87a6f51 100644 --- a/source/libs/function/src/udfd.c +++ b/source/libs/function/src/udfd.c @@ -965,40 +965,6 @@ int32_t udfdFillUdfInfoFromMNode(void *clientRpc, char *udfName, SUdf *udf) { return code; } -int32_t udfdConnectToMnode() { - SConnectReq connReq = {0}; - connReq.connType = CONN_TYPE__UDFD; - tstrncpy(connReq.app, "udfd", sizeof(connReq.app)); - tstrncpy(connReq.user, TSDB_DEFAULT_USER, sizeof(connReq.user)); - char pass[TSDB_PASSWORD_LEN + 1] = {0}; - taosEncryptPass_c((uint8_t *)(TSDB_DEFAULT_PASS), strlen(TSDB_DEFAULT_PASS), pass); - tstrncpy(connReq.passwd, pass, sizeof(connReq.passwd)); - connReq.pid = taosGetPId(); - connReq.startTime = taosGetTimestampMs(); - strcpy(connReq.sVer, version); - - int32_t contLen = tSerializeSConnectReq(NULL, 0, &connReq); - void *pReq = rpcMallocCont(contLen); - tSerializeSConnectReq(pReq, contLen, &connReq); - - SUdfdRpcSendRecvInfo *msgInfo = taosMemoryCalloc(1, sizeof(SUdfdRpcSendRecvInfo)); - msgInfo->rpcType = UDFD_RPC_MNODE_CONNECT; - uv_sem_init(&msgInfo->resultSem, 0); - - SRpcMsg rpcMsg = {0}; - rpcMsg.msgType = TDMT_MND_CONNECT; - rpcMsg.pCont = pReq; - rpcMsg.contLen = contLen; - rpcMsg.info.ahandle = msgInfo; - rpcSendRequest(global.clientRpc, &global.mgmtEp.epSet, &rpcMsg, NULL); - - uv_sem_wait(&msgInfo->resultSem); - int32_t code = msgInfo->code; - uv_sem_destroy(&msgInfo->resultSem); - taosMemoryFree(msgInfo); - return code; -} - static bool udfdRpcRfp(int32_t code, tmsg_t msgType) { if (code == TSDB_CODE_RPC_NETWORK_UNAVAIL || code == TSDB_CODE_RPC_BROKEN_LINK || code == TSDB_CODE_SYN_NOT_LEADER || code == TSDB_CODE_RPC_SOMENODE_NOT_CONNECTED || code == TSDB_CODE_SYN_RESTORING || @@ -1378,23 +1344,6 @@ static int32_t udfdRun() { return 0; } -void udfdConnectMnodeThreadFunc(void *args) { - int32_t retryMnodeTimes = 0; - int32_t code = 0; - while (retryMnodeTimes++ <= TSDB_MAX_REPLICA) { - uv_sleep(100 * (1 << retryMnodeTimes)); - code = udfdConnectToMnode(); - if (code == 0) { - break; - } - fnError("udfd can not connect to mnode, code: %s. retry", tstrerror(code)); - } - - if (code != 0) { - fnError("udfd can not connect to mnode"); - } -} - int32_t udfdInitResidentFuncs() { if (strlen(tsUdfdResFuncs) == 0) { return TSDB_CODE_SUCCESS; @@ -1497,9 +1446,6 @@ int main(int argc, char *argv[]) { udfdInitResidentFuncs(); - uv_thread_t mnodeConnectThread; - uv_thread_create(&mnodeConnectThread, udfdConnectMnodeThreadFunc, NULL); - udfdRun(); removeListeningPipe(); diff --git a/source/libs/index/src/indexFilter.c b/source/libs/index/src/indexFilter.c index 2c12c840816266882ea7ccb5ab8a95b84be414ab..bfdcd2b030369033c9f350795107eab95fe8cedd 100644 --- a/source/libs/index/src/indexFilter.c +++ b/source/libs/index/src/indexFilter.c @@ -639,6 +639,10 @@ static int32_t sifDoIndex(SIFParam *left, SIFParam *right, int8_t operType, SIFP ret = indexJsonSearch(arg->ivtIdx, mtm, output->result); indexMultiTermQueryDestroy(mtm); } else { + if (left->colValType == TSDB_DATA_TYPE_GEOMETRY || right->colValType == TSDB_DATA_TYPE_GEOMETRY) { + return TSDB_CODE_QRY_GEO_NOT_SUPPORT_ERROR; + } + bool reverse = false, equal = false; FilterFunc filterFunc = sifGetFilterFunc(qtype, &reverse, &equal); diff --git a/source/libs/nodes/src/nodesCloneFuncs.c b/source/libs/nodes/src/nodesCloneFuncs.c index 6e4dde4ec1752f4bb3349c9c41658ab0140264ae..8305daa45e2782ac203c2376b9032c3464b66578 100644 --- a/source/libs/nodes/src/nodesCloneFuncs.c +++ b/source/libs/nodes/src/nodesCloneFuncs.c @@ -502,6 +502,7 @@ static int32_t logicSortCopy(const SSortLogicNode* pSrc, SSortLogicNode* pDst) { COPY_BASE_OBJECT_FIELD(node, logicNodeCopy); CLONE_NODE_LIST_FIELD(pSortKeys); COPY_SCALAR_FIELD(groupSort); + COPY_SCALAR_FIELD(maxRows); return TSDB_CODE_SUCCESS; } diff --git a/source/libs/nodes/src/nodesCodeFuncs.c b/source/libs/nodes/src/nodesCodeFuncs.c index 0b449c5bfe7e7543ba824a4c128f055c7aa011e0..99790e0a93f67401719ae0a5343d77bb365d0cfa 100644 --- a/source/libs/nodes/src/nodesCodeFuncs.c +++ b/source/libs/nodes/src/nodesCodeFuncs.c @@ -2100,6 +2100,7 @@ static int32_t jsonToPhysiMergeNode(const SJson* pJson, void* pObj) { static const char* jkSortPhysiPlanExprs = "Exprs"; static const char* jkSortPhysiPlanSortKeys = "SortKeys"; static const char* jkSortPhysiPlanTargets = "Targets"; +static const char* jkSortPhysiPlanMaxRows = "MaxRows"; static int32_t physiSortNodeToJson(const void* pObj, SJson* pJson) { const SSortPhysiNode* pNode = (const SSortPhysiNode*)pObj; @@ -2114,6 +2115,9 @@ static int32_t physiSortNodeToJson(const void* pObj, SJson* pJson) { if (TSDB_CODE_SUCCESS == code) { code = nodeListToJson(pJson, jkSortPhysiPlanTargets, pNode->pTargets); } + if (TSDB_CODE_SUCCESS == code) { + code = tjsonAddIntegerToObject(pJson, jkSortPhysiPlanMaxRows, pNode->maxRows); + } return code; } @@ -2131,6 +2135,9 @@ static int32_t jsonToPhysiSortNode(const SJson* pJson, void* pObj) { if (TSDB_CODE_SUCCESS == code) { code = jsonToNodeList(pJson, jkSortPhysiPlanTargets, &pNode->pTargets); } + if (TSDB_CODE_SUCCESS == code) { + code = tjsonGetBigIntValue(pJson, jkSortPhysiPlanMaxRows, &pNode->maxRows); + } return code; } diff --git a/source/libs/nodes/src/nodesMsgFuncs.c b/source/libs/nodes/src/nodesMsgFuncs.c index 1ca37defa4a76a6b679e85facae10a6cd758fb80..e79a520615142b977716cc837f5e31fc4bbd73c3 100644 --- a/source/libs/nodes/src/nodesMsgFuncs.c +++ b/source/libs/nodes/src/nodesMsgFuncs.c @@ -2594,7 +2594,7 @@ static int32_t msgToPhysiMergeNode(STlvDecoder* pDecoder, void* pObj) { return code; } -enum { PHY_SORT_CODE_BASE_NODE = 1, PHY_SORT_CODE_EXPR, PHY_SORT_CODE_SORT_KEYS, PHY_SORT_CODE_TARGETS }; +enum { PHY_SORT_CODE_BASE_NODE = 1, PHY_SORT_CODE_EXPR, PHY_SORT_CODE_SORT_KEYS, PHY_SORT_CODE_TARGETS, PHY_SORT_CODE_MAX_ROWS }; static int32_t physiSortNodeToMsg(const void* pObj, STlvEncoder* pEncoder) { const SSortPhysiNode* pNode = (const SSortPhysiNode*)pObj; @@ -2609,6 +2609,9 @@ static int32_t physiSortNodeToMsg(const void* pObj, STlvEncoder* pEncoder) { if (TSDB_CODE_SUCCESS == code) { code = tlvEncodeObj(pEncoder, PHY_SORT_CODE_TARGETS, nodeListToMsg, pNode->pTargets); } + if (TSDB_CODE_SUCCESS == code) { + code = tlvEncodeI64(pEncoder, PHY_SORT_CODE_MAX_ROWS, pNode->maxRows); + } return code; } @@ -2632,6 +2635,9 @@ static int32_t msgToPhysiSortNode(STlvDecoder* pDecoder, void* pObj) { case PHY_SORT_CODE_TARGETS: code = msgToNodeListFromTlv(pTlv, (void**)&pNode->pTargets); break; + case PHY_SORT_CODE_MAX_ROWS: + code = tlvDecodeI64(pTlv, &pNode->maxRows); + break; default: break; } diff --git a/source/libs/parser/src/parInsertSql.c b/source/libs/parser/src/parInsertSql.c index f9b4e54318b4c3c87895ad2b6a1ec6b03444d342..8d35674949801893500f63d1a6e7a9e94683f47a 100644 --- a/source/libs/parser/src/parInsertSql.c +++ b/source/libs/parser/src/parInsertSql.c @@ -331,6 +331,7 @@ static int32_t parseTagToken(const char** end, SToken* pToken, SSchema* pSchema, int64_t iv; uint64_t uv; char* endptr = NULL; + int32_t code = TSDB_CODE_SUCCESS; if (isNullValue(pSchema->type, pToken)) { if (TSDB_DATA_TYPE_TIMESTAMP == pSchema->type && PRIMARYKEY_TIMESTAMP_COL_ID == pSchema->colId) { @@ -467,8 +468,7 @@ static int32_t parseTagToken(const char** end, SToken* pToken, SSchema* pSchema, break; } - case TSDB_DATA_TYPE_BINARY: - case TSDB_DATA_TYPE_GEOMETRY: { + case TSDB_DATA_TYPE_BINARY: { // Too long values will raise the invalid sql error message if (pToken->n + VARSTR_HEADER_SIZE > pSchema->bytes) { return generateSyntaxErrMsg(pMsgBuf, TSDB_CODE_PAR_VALUE_TOO_LONG, pSchema->name); @@ -478,6 +478,30 @@ static int32_t parseTagToken(const char** end, SToken* pToken, SSchema* pSchema, break; } + case TSDB_DATA_TYPE_GEOMETRY: { + unsigned char* output = NULL; + size_t size = 0; + + code = parseGeometry(pToken, &output, &size); + if (code != TSDB_CODE_SUCCESS) { + code = buildSyntaxErrMsg(pMsgBuf, getThreadLocalGeosCtx()->errMsg, pToken->z); + } else if (size + VARSTR_HEADER_SIZE > pSchema->bytes) { + // Too long values will raise the invalid sql error message + code = generateSyntaxErrMsg(pMsgBuf, TSDB_CODE_PAR_VALUE_TOO_LONG, pSchema->name); + } else { + val->pData = taosMemoryMalloc(size); + if (NULL == val->pData) { + code = TSDB_CODE_OUT_OF_MEMORY; + } else { + memcpy(val->pData, output, size); + val->nData = size; + } + } + + geosFreeBuffer(output); + break; + } + case TSDB_DATA_TYPE_NCHAR: { int32_t output = 0; void* p = taosMemoryCalloc(1, pSchema->bytes - VARSTR_HEADER_SIZE); @@ -508,7 +532,7 @@ static int32_t parseTagToken(const char** end, SToken* pToken, SSchema* pSchema, } } - return TSDB_CODE_SUCCESS; + return code; } // input pStmt->pSql: [(tag1_name, ...)] TAGS (tag1_value, ...) ... @@ -1382,7 +1406,7 @@ static int32_t parseValueTokenImpl(SInsertParseContext* pCxt, const char** pSql, code = buildSyntaxErrMsg(&pCxt->msg, getThreadLocalGeosCtx()->errMsg, pToken->z); } // Too long values will raise the invalid sql error message - else if (size > pSchema->bytes) { + else if (size + VARSTR_HEADER_SIZE > pSchema->bytes) { code = generateSyntaxErrMsg(&pCxt->msg, TSDB_CODE_PAR_VALUE_TOO_LONG, pSchema->name); } else { diff --git a/source/libs/planner/src/planLogicCreater.c b/source/libs/planner/src/planLogicCreater.c index 713f12e2294c49bb1327728a7fa162d6313e31f2..4a8d100db310596a800fea8aa17336da02619496 100644 --- a/source/libs/planner/src/planLogicCreater.c +++ b/source/libs/planner/src/planLogicCreater.c @@ -1027,6 +1027,7 @@ static int32_t createSortLogicNode(SLogicPlanContext* pCxt, SSelectStmt* pSelect return TSDB_CODE_OUT_OF_MEMORY; } + pSort->maxRows = -1; pSort->groupSort = pSelect->groupSort; pSort->node.groupAction = pSort->groupSort ? GROUP_ACTION_KEEP : GROUP_ACTION_CLEAR; pSort->node.requireDataOrder = DATA_ORDER_LEVEL_NONE; @@ -1298,6 +1299,7 @@ static int32_t createSetOpSortLogicNode(SLogicPlanContext* pCxt, SSetOperator* p return TSDB_CODE_OUT_OF_MEMORY; } + pSort->maxRows = -1; TSWAP(pSort->node.pLimit, pSetOperator->pLimit); int32_t code = TSDB_CODE_SUCCESS; diff --git a/source/libs/planner/src/planOptimizer.c b/source/libs/planner/src/planOptimizer.c index 2d1a758f3342f5d4fea5a31d8bc578ca4960d94e..82d883714d4a3782807facf432b37f07e5a162fa 100644 --- a/source/libs/planner/src/planOptimizer.c +++ b/source/libs/planner/src/planOptimizer.c @@ -123,7 +123,7 @@ static void optSetParentOrder(SLogicNode* pNode, EOrder order, SLogicNode* pNode pNode->inputTsOrder = order; switch (nodeType(pNode)) { // for those nodes that will change the order, stop propagating - //case QUERY_NODE_LOGIC_PLAN_WINDOW: + // case QUERY_NODE_LOGIC_PLAN_WINDOW: case QUERY_NODE_LOGIC_PLAN_JOIN: case QUERY_NODE_LOGIC_PLAN_AGG: case QUERY_NODE_LOGIC_PLAN_SORT: @@ -769,8 +769,9 @@ static bool pushDownCondOptIsColEqualOnCond(SJoinLogicNode* pJoin, SNode* pCond) } SColumnNode* pLeft = (SColumnNode*)(pOper->pLeft); SColumnNode* pRight = (SColumnNode*)(pOper->pRight); - //TODO: add cast to operator and remove this restriction of optimization - if (pLeft->node.resType.type != pRight->node.resType.type || pLeft->node.resType.bytes != pRight->node.resType.bytes) { + // TODO: add cast to operator and remove this restriction of optimization + if (pLeft->node.resType.type != pRight->node.resType.type || + pLeft->node.resType.bytes != pRight->node.resType.bytes) { return false; } SNodeList* pLeftCols = ((SLogicNode*)nodesListGetNode(pJoin->node.pChildren, 0))->pTargets; @@ -2575,7 +2576,7 @@ static void tagScanOptCloneAncestorSlimit(SLogicNode* pTableScanNode) { SLogicNode* pNode = tagScanOptFindAncestorWithSlimit(pTableScanNode); if (NULL != pNode) { - //TODO: only set the slimit now. push down slimit later + // TODO: only set the slimit now. push down slimit later pTableScanNode->pSlimit = nodesCloneNode(pNode->pSlimit); ((SLimitNode*)pTableScanNode->pSlimit)->limit += ((SLimitNode*)pTableScanNode->pSlimit)->offset; ((SLimitNode*)pTableScanNode->pSlimit)->offset = 0; @@ -2629,8 +2630,16 @@ static int32_t tagScanOptimize(SOptimizeContext* pCxt, SLogicSubplan* pLogicSubp } static bool pushDownLimitOptShouldBeOptimized(SLogicNode* pNode) { - if (NULL == pNode->pLimit || 1 != LIST_LENGTH(pNode->pChildren) || - QUERY_NODE_LOGIC_PLAN_SCAN != nodeType(nodesListGetNode(pNode->pChildren, 0))) { + if (NULL == pNode->pLimit || 1 != LIST_LENGTH(pNode->pChildren)) { + return false; + } + + SLogicNode* pChild = (SLogicNode*)nodesListGetNode(pNode->pChildren, 0); + if (QUERY_NODE_LOGIC_PLAN_SORT == nodeType(pChild)) { + SLimitNode* pChildLimit = (SLimitNode*)(pChild->pLimit); + // if we have pushed down, we skip it + if ((*(SSortLogicNode*)pChild).maxRows != -1) return false; + } else if (QUERY_NODE_LOGIC_PLAN_SCAN != nodeType(pChild)) { return false; } return true; @@ -2644,8 +2653,18 @@ static int32_t pushDownLimitOptimize(SOptimizeContext* pCxt, SLogicSubplan* pLog SLogicNode* pChild = (SLogicNode*)nodesListGetNode(pNode->pChildren, 0); nodesDestroyNode(pChild->pLimit); - pChild->pLimit = pNode->pLimit; - pNode->pLimit = NULL; + if (QUERY_NODE_LOGIC_PLAN_SORT == nodeType(pChild)) { + SLimitNode* pLimitNode = (SLimitNode*)pNode->pLimit; + int64_t maxRows = -1; + if (pLimitNode->limit != -1) { + maxRows = pLimitNode->limit; + if (pLimitNode->offset != -1) maxRows += pLimitNode->offset; + } + ((SSortLogicNode*)pChild)->maxRows = maxRows; + } else { + pChild->pLimit = pNode->pLimit; + pNode->pLimit = NULL; + } pCxt->optimized = true; return TSDB_CODE_SUCCESS; @@ -2898,7 +2917,7 @@ static SSortLogicNode* sortNonPriKeySatisfied(SLogicNode* pNode) { if (sortPriKeyOptIsPriKeyOrderBy(pSort->pSortKeys)) { return NULL; } - SNode* pSortKeyNode = NULL, *pSortKeyExpr = NULL; + SNode *pSortKeyNode = NULL, *pSortKeyExpr = NULL; FOREACH(pSortKeyNode, pSort->pSortKeys) { pSortKeyExpr = ((SOrderByExprNode*)pSortKeyNode)->pExpr; switch (nodeType(pSortKeyExpr)) { @@ -2931,7 +2950,7 @@ static int32_t sortNonPriKeyOptimize(SOptimizeContext* pCxt, SLogicSubplan* pLog optFindEligibleNode(pLogicSubplan->pNode, sortNonPriKeyShouldOptimize, pNodeList); SNode* pNode = NULL; FOREACH(pNode, pNodeList) { - SSortLogicNode* pSort = (SSortLogicNode*)pNode; + SSortLogicNode* pSort = (SSortLogicNode*)pNode; SOrderByExprNode* pOrderByExpr = (SOrderByExprNode*)nodesListGetNode(pSort->pSortKeys, 0); pSort->node.outputTsOrder = pOrderByExpr->order; optSetParentOrder(pSort->node.pParent, pOrderByExpr->order, NULL); diff --git a/source/libs/planner/src/planPhysiCreater.c b/source/libs/planner/src/planPhysiCreater.c index b3d94a5e47aef06960954b88c612b9568f0e45d4..a349e2c0e9cddb0a7b5f4080649648586748e069 100644 --- a/source/libs/planner/src/planPhysiCreater.c +++ b/source/libs/planner/src/planPhysiCreater.c @@ -1374,6 +1374,7 @@ static int32_t createSortPhysiNode(SPhysiPlanContext* pCxt, SNodeList* pChildren if (NULL == pSort) { return TSDB_CODE_OUT_OF_MEMORY; } + pSort->maxRows = pSortLogicNode->maxRows; SNodeList* pPrecalcExprs = NULL; SNodeList* pSortKeys = NULL; diff --git a/source/libs/planner/src/planSpliter.c b/source/libs/planner/src/planSpliter.c index 246ee13fb00aa7d30857e63a03f18262ffb10510..f352a2bba3089ff5ef7ef89e53a9f0b54d4eff13 100644 --- a/source/libs/planner/src/planSpliter.c +++ b/source/libs/planner/src/planSpliter.c @@ -1018,6 +1018,7 @@ static int32_t stbSplCreatePartSortNode(SSortLogicNode* pSort, SLogicNode** pOut splSetParent((SLogicNode*)pPartSort); pPartSort->pSortKeys = pSortKeys; pPartSort->groupSort = pSort->groupSort; + pPartSort->maxRows = pSort->maxRows; code = stbSplCreateMergeKeys(pPartSort->pSortKeys, pPartSort->node.pTargets, &pMergeKeys); } diff --git a/source/libs/scalar/CMakeLists.txt b/source/libs/scalar/CMakeLists.txt index 30c68cb512b47b725caadb454fe3bff008520938..1fe0f9a18db695cf1fc4ad6b36420cdeca0dccf3 100644 --- a/source/libs/scalar/CMakeLists.txt +++ b/source/libs/scalar/CMakeLists.txt @@ -8,13 +8,14 @@ target_include_directories( ) target_link_libraries(scalar - PRIVATE os - PRIVATE util + PRIVATE os + PRIVATE util PRIVATE common PRIVATE nodes PRIVATE function PRIVATE qcom PRIVATE parser + PRIVATE geometry ) if(${BUILD_TEST}) diff --git a/source/libs/scalar/inc/filterInt.h b/source/libs/scalar/inc/filterInt.h index 1ca8ac1d8c0e9c86e02cde3f317bf7de5af510a8..5fb7b0e90c30521c3af10f596c87db50448eb3aa 100644 --- a/source/libs/scalar/inc/filterInt.h +++ b/source/libs/scalar/inc/filterInt.h @@ -271,8 +271,9 @@ struct SFilterInfo { SFilterPCtx pctx; }; -#define FILTER_NO_MERGE_DATA_TYPE(t) \ - ((t) == TSDB_DATA_TYPE_BINARY || (t) == TSDB_DATA_TYPE_NCHAR || (t) == TSDB_DATA_TYPE_JSON) +#define FILTER_NO_MERGE_DATA_TYPE(t) \ + ((t) == TSDB_DATA_TYPE_BINARY || (t) == TSDB_DATA_TYPE_NCHAR || (t) == TSDB_DATA_TYPE_JSON || \ + (t) == TSDB_DATA_TYPE_GEOMETRY) #define FILTER_NO_MERGE_OPTR(o) ((o) == OP_TYPE_IS_NULL || (o) == OP_TYPE_IS_NOT_NULL || (o) == FILTER_DUMMY_EMPTY_OPTR) #define MR_EMPTY_RES(ctx) (ctx->rs == NULL) diff --git a/source/libs/scalar/src/filter.c b/source/libs/scalar/src/filter.c index bbefcc6b3ae96157ea138a68aef0453a1caf7489..b3afbb53c1daa0314ab07e73a16a2bb67a5e24d3 100644 --- a/source/libs/scalar/src/filter.c +++ b/source/libs/scalar/src/filter.c @@ -133,7 +133,7 @@ __compar_fn_t gDataCompare[] = { setChkInBytes2, setChkInBytes4, setChkInBytes8, comparestrRegexMatch, comparestrRegexNMatch, setChkNotInBytes1, setChkNotInBytes2, setChkNotInBytes4, setChkNotInBytes8, compareChkNotInString, comparestrPatternNMatch, comparewcsPatternNMatch, - comparewcsRegexMatch, comparewcsRegexNMatch, + comparewcsRegexMatch, comparewcsRegexNMatch, compareLenBinaryVal }; __compar_fn_t gInt8SignCompare[] = {compareInt8Val, compareInt8Int16, compareInt8Int32, @@ -257,8 +257,7 @@ int8_t filterGetCompFuncIdx(int32_t type, int32_t optr) { case TSDB_DATA_TYPE_DOUBLE: comparFn = 5; break; - case TSDB_DATA_TYPE_BINARY: - case TSDB_DATA_TYPE_GEOMETRY: { + case TSDB_DATA_TYPE_BINARY: { if (optr == OP_TYPE_MATCH) { comparFn = 19; } else if (optr == OP_TYPE_NMATCH) { @@ -297,6 +296,21 @@ int8_t filterGetCompFuncIdx(int32_t type, int32_t optr) { break; } + case TSDB_DATA_TYPE_GEOMETRY: { + if (optr == OP_TYPE_EQUAL || optr == OP_TYPE_NOT_EQUAL || optr == OP_TYPE_IS_NULL || + optr == OP_TYPE_IS_NOT_NULL) { + comparFn = 30; + } else if (optr == OP_TYPE_IN) { + comparFn = 8; + } else if (optr == OP_TYPE_NOT_IN) { + comparFn = 25; + } else { + terrno = TSDB_CODE_QRY_GEO_NOT_SUPPORT_ERROR; + return 0; + } + break; + } + case TSDB_DATA_TYPE_UTINYINT: comparFn = 11; break; @@ -1042,12 +1056,12 @@ static FORCE_INLINE int32_t filterAddColFieldFromField(SFilterInfo *info, SFilte int32_t filterAddFieldFromNode(SFilterInfo *info, SNode *node, SFilterFieldId *fid) { if (node == NULL) { fltDebug("empty node"); - FLT_ERR_RET(TSDB_CODE_APP_ERROR); + goto _return; } if (nodeType(node) != QUERY_NODE_COLUMN && nodeType(node) != QUERY_NODE_VALUE && nodeType(node) != QUERY_NODE_NODE_LIST) { - FLT_ERR_RET(TSDB_CODE_APP_ERROR); + goto _return; } int32_t type; @@ -1063,6 +1077,7 @@ int32_t filterAddFieldFromNode(SFilterInfo *info, SNode *node, SFilterFieldId *f filterAddField(info, v, NULL, type, fid, 0, true, NULL); +_return: return TSDB_CODE_SUCCESS; } @@ -1948,33 +1963,15 @@ int32_t fltInitValFieldData(SFilterInfo *info) { } SDataType *dType = &var->node.resType; - size_t bytes = 0; - - if (type == TSDB_DATA_TYPE_BINARY) { - size_t len = (dType->type == TSDB_DATA_TYPE_BINARY || dType->type == TSDB_DATA_TYPE_NCHAR) ? dType->bytes - : MAX_NUM_STR_SIZE; - bytes = len + 1 + VARSTR_HEADER_SIZE; - - fi->data = taosMemoryCalloc(1, bytes); - } else if (type == TSDB_DATA_TYPE_NCHAR) { - size_t len = (dType->type == TSDB_DATA_TYPE_BINARY || dType->type == TSDB_DATA_TYPE_NCHAR) ? dType->bytes - : MAX_NUM_STR_SIZE; - bytes = (len + 1) * TSDB_NCHAR_SIZE + VARSTR_HEADER_SIZE; - - fi->data = taosMemoryCalloc(1, bytes); - } else { - fi->data = taosMemoryCalloc(1, sizeof(int64_t)); - } - if (dType->type == type) { + size_t bufBytes = TMAX(dType->bytes, sizeof(int64_t)); + fi->data = taosMemoryCalloc(1, bufBytes); assignVal(fi->data, nodesGetValueFromNode(var), dType->bytes, type); } else { SScalarParam out = {.columnData = taosMemoryCalloc(1, sizeof(SColumnInfoData))}; out.columnData->info.type = type; out.columnData->info.precision = precision; - if (IS_VAR_DATA_TYPE(type)) { - out.columnData->info.bytes = bytes; - } else { + if (!IS_VAR_DATA_TYPE(type)) { out.columnData->info.bytes = tDataTypes[type].bytes; } @@ -1985,7 +1982,13 @@ int32_t fltInitValFieldData(SFilterInfo *info) { return TSDB_CODE_TSC_INVALID_OPERATION; } - memcpy(fi->data, out.columnData->pData, out.columnData->info.bytes); + size_t bufBytes = IS_VAR_DATA_TYPE(type) ? varDataTLen(out.columnData->pData) + : TMAX(out.columnData->info.bytes, sizeof(int64_t)); + fi->data = taosMemoryCalloc(1, bufBytes); + + size_t valBytes = IS_VAR_DATA_TYPE(type) ? varDataTLen(out.columnData->pData) : out.columnData->info.bytes; + memcpy(fi->data, out.columnData->pData, valBytes); + colDataDestroy(out.columnData); taosMemoryFree(out.columnData); } @@ -2751,6 +2754,7 @@ int32_t filterPostProcessRange(SFilterInfo *info) { } int32_t filterGenerateComInfo(SFilterInfo *info) { + terrno = 0; info->cunits = taosMemoryMalloc(info->unitNum * sizeof(*info->cunits)); info->blkUnitRes = taosMemoryMalloc(sizeof(*info->blkUnitRes) * info->unitNum); info->blkUnits = taosMemoryMalloc(sizeof(*info->blkUnits) * (info->unitNum + 1) * info->groupNum); @@ -2758,7 +2762,7 @@ int32_t filterGenerateComInfo(SFilterInfo *info) { for (uint32_t i = 0; i < info->unitNum; ++i) { SFilterUnit *unit = &info->units[i]; - info->cunits[i].func = filterGetCompFuncIdx(FILTER_UNIT_DATA_TYPE(unit), unit->compare.optr); + info->cunits[i].func = filterGetCompFuncIdx(FILTER_UNIT_DATA_TYPE(unit), unit->compare.optr); // set terrno if err info->cunits[i].rfunc = filterGetRangeCompFuncFromOptrs(unit->compare.optr, unit->compare.optr2); info->cunits[i].optr = FILTER_UNIT_OPTR(unit); info->cunits[i].colData = NULL; @@ -2779,7 +2783,7 @@ int32_t filterGenerateComInfo(SFilterInfo *info) { info->cunits[i].dataType = FILTER_UNIT_DATA_TYPE(unit); } - return TSDB_CODE_SUCCESS; + return terrno; } int32_t filterUpdateComUnits(SFilterInfo *info) { @@ -3336,6 +3340,7 @@ int32_t filterSetExecFunc(SFilterInfo *info) { } int32_t filterPreprocess(SFilterInfo *info) { + int32_t code = TSDB_CODE_SUCCESS; SFilterGroupCtx **gRes = taosMemoryCalloc(info->groupNum, sizeof(SFilterGroupCtx *)); int32_t gResNum = 0; @@ -3361,7 +3366,7 @@ int32_t filterPreprocess(SFilterInfo *info) { filterRewrite(info, gRes, gResNum); - filterGenerateComInfo(info); + FLT_ERR_JRET(filterGenerateComInfo(info)); _return: @@ -3373,7 +3378,7 @@ _return: taosMemoryFreeClear(gRes); - return TSDB_CODE_SUCCESS; + return code; } int32_t fltSetColFieldDataImpl(SFilterInfo *info, void *param, filer_get_col_from_id fp, bool fromColId) { @@ -3741,10 +3746,10 @@ int32_t fltSclBuildRangeFromBlockSma(SFltSclColumnRange *colRange, SColumnDataAg taosArrayPush(points, &startPt); taosArrayPush(points, &endPt); } - SFltSclDatum min; + SFltSclDatum min = {0}; fltSclBuildDatumFromBlockSmaValue(&min, colRange->colNode->node.resType.type, pAgg->min); SFltSclPoint minPt = {.excl = false, .start = true, .val = min}; - SFltSclDatum max; + SFltSclDatum max = {0}; fltSclBuildDatumFromBlockSmaValue(&max, colRange->colNode->node.resType.type, pAgg->max); SFltSclPoint maxPt = {.excl = false, .start = false, .val = max}; taosArrayPush(points, &minPt); @@ -4290,30 +4295,27 @@ EDealRes fltReviseRewriter(SNode **pNode, void *pContext) { return DEAL_RES_ERROR; } + SColumnNode *refNode = (SColumnNode *)node->pLeft; + SExprNode *exprNode = NULL; if (OP_TYPE_IN != node->opType) { - SColumnNode *refNode = (SColumnNode *)node->pLeft; SValueNode *valueNode = (SValueNode *)node->pRight; if (FILTER_GET_FLAG(stat->info->options, FLT_OPTION_TIMESTAMP) && TSDB_DATA_TYPE_UBIGINT == valueNode->node.resType.type && valueNode->datum.u <= INT64_MAX) { valueNode->node.resType.type = TSDB_DATA_TYPE_BIGINT; } - int32_t type = vectorGetConvertType(refNode->node.resType.type, valueNode->node.resType.type); - if (0 != type && type != refNode->node.resType.type) { - stat->scalarMode = true; - return DEAL_RES_CONTINUE; - } + exprNode = &valueNode->node; } else { - SColumnNode *refNode = (SColumnNode *)node->pLeft; SNodeListNode *listNode = (SNodeListNode *)node->pRight; if (LIST_LENGTH(listNode->pNodeList) > 10) { stat->scalarMode = true; return DEAL_RES_CONTINUE; } - int32_t type = vectorGetConvertType(refNode->node.resType.type, listNode->node.resType.type); - if (0 != type && type != refNode->node.resType.type) { - stat->scalarMode = true; - return DEAL_RES_CONTINUE; - } + exprNode = &listNode->node; + } + int32_t type = vectorGetConvertType(refNode->node.resType.type, exprNode->resType.type); + if (0 != type && type != refNode->node.resType.type) { + stat->scalarMode = true; + return DEAL_RES_CONTINUE; } } @@ -4664,7 +4666,7 @@ bool filterExecute(SFilterInfo *info, SSDataBlock *pSrc, SColumnInfoData **p, SC code = scalarCalculate(info->sclCtx.node, pList, &output); taosArrayDestroy(pList); - FLT_ERR_RET(code); + FLT_ERR_RET(code); // TODO: current errcode returns as true *p = output.columnData; diff --git a/source/libs/scalar/src/sclvector.c b/source/libs/scalar/src/sclvector.c index b41eba293bdc64d646c8c39c1112eb19bb27ff49..35256d0c96c5e7e2a51df3f575c3c8bf57341fab 100644 --- a/source/libs/scalar/src/sclvector.c +++ b/source/libs/scalar/src/sclvector.c @@ -26,6 +26,7 @@ #include "tdataformat.h" #include "ttime.h" #include "ttypes.h" +#include "geosWrapper.h" #define LEFT_COL ((pLeftCol->info.type == TSDB_DATA_TYPE_JSON ? (void *)pLeftCol : pLeftCol->pData)) #define RIGHT_COL ((pRightCol->info.type == TSDB_DATA_TYPE_JSON ? (void *)pRightCol : pRightCol->pData)) @@ -378,6 +379,31 @@ static FORCE_INLINE void ncharToVar(char *buf, SScalarParam *pOut, int32_t rowIn taosMemoryFree(t); } +// todo remove this malloc +static FORCE_INLINE void varToGeometry(char *buf, SScalarParam *pOut, int32_t rowIndex, int32_t *overflow) { + //[ToDo] support to parse WKB as well as WKT + unsigned char *t = NULL; + size_t len = 0; + + if (initCtxGeomFromText()) { + sclError("failed to init geometry ctx"); + return; + } + if (doGeomFromText(buf, &t, &len)) { + sclDebug("failed to convert text to geometry"); + return; + } + + char *output = taosMemoryCalloc(1, len + VARSTR_HEADER_SIZE); + memcpy(output + VARSTR_HEADER_SIZE, t, len); + varDataSetLen(output, len); + + colDataSetVal(pOut->columnData, rowIndex, output, false); + + taosMemoryFree(output); + geosFreeBuffer(t); +} + // TODO opt performance, tmp is not needed. int32_t vectorConvertFromVarData(SSclVectorConvCtx *pCtx, int32_t *overflow) { bool vton = false; @@ -401,6 +427,8 @@ int32_t vectorConvertFromVarData(SSclVectorConvCtx *pCtx, int32_t *overflow) { vton = true; } else if (TSDB_DATA_TYPE_TIMESTAMP == pCtx->outType) { func = varToTimestamp; + } else if (TSDB_DATA_TYPE_GEOMETRY == pCtx->outType) { + func = varToGeometry; } else { sclError("invalid convert outType:%d, inType:%d", pCtx->outType, pCtx->inType); return TSDB_CODE_APP_ERROR; @@ -881,7 +909,7 @@ int32_t vectorConvertSingleColImpl(const SScalarParam *pIn, SScalarParam *pOut, } int8_t gConvertTypes[TSDB_DATA_TYPE_MAX][TSDB_DATA_TYPE_MAX] = { - /* NULL BOOL TINY SMAL INT BIG FLOA DOUB VARC TIME NCHA UTIN USMA UINT UBIG JSON GEOM VARB DECI BLOB MEDB*/ + /* NULL BOOL TINY SMAL INT BIG FLOA DOUB VARC TIME NCHA UTIN USMA UINT UBIG JSON VARB DECI BLOB MEDB GEOM*/ /*NULL*/ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /*BOOL*/ 0, 0, 2, 3, 4, 5, 6, 7, 5, 9, 7, 11, 12, 13, 14, 0, 7, 0, 0, 0, 0, /*TINY*/ 0, 0, 0, 3, 4, 5, 6, 7, 5, 9, 7, 3, 4, 5, 7, 0, 7, 0, 0, 0, 0, @@ -890,7 +918,7 @@ int8_t gConvertTypes[TSDB_DATA_TYPE_MAX][TSDB_DATA_TYPE_MAX] = { /*BIGI*/ 0, 0, 0, 0, 0, 0, 6, 7, 5, 9, 7, 5, 5, 5, 7, 0, 7, 0, 0, 0, 0, /*FLOA*/ 0, 0, 0, 0, 0, 0, 0, 7, 7, 6, 7, 6, 6, 6, 6, 0, 7, 0, 0, 0, 0, /*DOUB*/ 0, 0, 0, 0, 0, 0, 0, 0, 7, 7, 7, 7, 7, 7, 7, 0, 7, 0, 0, 0, 0, - /*VARC*/ 0, 0, 0, 0, 0, 0, 0, 0, 0, 9, 8, 7, 7, 7, 7, 0, 0, 0, 0, 0, 0, + /*VARC*/ 0, 0, 0, 0, 0, 0, 0, 0, 0, 9, 8, 7, 7, 7, 7, 0, 0, 0, 0, 0, 20, /*TIME*/ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 9, 9, 9, 9, 7, 0, 7, 0, 0, 0, 0, /*NCHA*/ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 7, 7, 7, 7, 0, 0, 0, 0, 0, 0, /*UTIN*/ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 12, 13, 14, 0, 7, 0, 0, 0, 0, diff --git a/source/libs/stream/src/stream.c b/source/libs/stream/src/stream.c index 7457b2197e03b456012c63e624eacd170bfffcfa..93bcd6a4d910bf3140169a3b217911e48a532cdd 100644 --- a/source/libs/stream/src/stream.c +++ b/source/libs/stream/src/stream.c @@ -74,7 +74,6 @@ void streamSchedByTimer(void* param, void* tmrId) { atomic_store_8(&pTask->triggerStatus, TASK_TRIGGER_STATUS__INACTIVE); if (tAppendDataToInputQueue(pTask, (SStreamQueueItem*)trigger) < 0) { - taosFreeQitem(trigger); taosTmrReset(streamSchedByTimer, (int32_t)pTask->triggerParam, pTask, streamEnv.timer, &pTask->timer); return; } diff --git a/source/libs/stream/src/tstreamFileState.c b/source/libs/stream/src/tstreamFileState.c index dc9a1f80bbfcd0bdb55a413d534d934135830cfc..c10ef1e5572d692ae430088d4d7884a2ba5be490 100644 --- a/source/libs/stream/src/tstreamFileState.c +++ b/source/libs/stream/src/tstreamFileState.c @@ -360,7 +360,7 @@ int32_t flushSnapshot(SStreamFileState* pFileState, SStreamSnapshot* pSnapshot, SRowBuffPos* pPos = *(SRowBuffPos**)pNode->data; ASSERT(pPos->pRowBuff && pFileState->rowSize > 0); if (streamStateGetBatchSize(batch) >= BATCH_LIMIT) { - code = streamStatePutBatch_rocksdb(pFileState->pFileStore, batch); + streamStatePutBatch_rocksdb(pFileState->pFileStore, batch); streamStateClearBatch(batch); } @@ -373,7 +373,7 @@ int32_t flushSnapshot(SStreamFileState* pFileState, SStreamSnapshot* pSnapshot, taosMemoryFree(buf); if (streamStateGetBatchSize(batch) > 0) { - code = streamStatePutBatch_rocksdb(pFileState->pFileStore, batch); + streamStatePutBatch_rocksdb(pFileState->pFileStore, batch); } streamStateClearBatch(batch); @@ -385,7 +385,7 @@ int32_t flushSnapshot(SStreamFileState* pFileState, SStreamSnapshot* pSnapshot, int32_t len = 0; sprintf(keyBuf, "%s:%" PRId64 "", taskKey, ((SStreamState*)pFileState->pFileStore)->checkPointId); streamFileStateEncode(&pFileState->flushMark, &valBuf, &len); - code = streamStatePutBatch(pFileState->pFileStore, "default", batch, keyBuf, valBuf, len, 0); + streamStatePutBatch(pFileState->pFileStore, "default", batch, keyBuf, valBuf, len, 0); taosMemoryFree(valBuf); } { @@ -489,7 +489,7 @@ int32_t recoverSnapshot(SStreamFileState* pFileState) { break; } memcpy(pNewPos->pRowBuff, pVal, pVLen); - code = tSimpleHashPut(pFileState->rowBuffMap, pNewPos->pKey, pFileState->rowSize, &pNewPos, POINTER_BYTES); + code = tSimpleHashPut(pFileState->rowBuffMap, pNewPos->pKey, pFileState->keyLen, &pNewPos, POINTER_BYTES); if (code != TSDB_CODE_SUCCESS) { destroyRowBuffPos(pNewPos); break; diff --git a/source/libs/sync/src/syncPipeline.c b/source/libs/sync/src/syncPipeline.c index 794d80bbfaec7c38eca4b38f78c62022fd6cc7cc..92f34db16d2d84b0edbf284ecb5c0b1c0a5abc60 100644 --- a/source/libs/sync/src/syncPipeline.c +++ b/source/libs/sync/src/syncPipeline.c @@ -482,6 +482,7 @@ int64_t syncLogBufferProceed(SSyncLogBuffer* pBuf, SSyncNode* pNode, SyncTerm* p if (syncLogStorePersist(pLogStore, pNode, pEntry) < 0) { sError("vgId:%d, failed to persist sync log entry from buffer since %s. index:%" PRId64, pNode->vgId, terrstr(), pEntry->index); + taosMsleep(1); goto _out; } ASSERT(pEntry->index == pBuf->matchIndex); diff --git a/source/libs/sync/src/syncUtil.c b/source/libs/sync/src/syncUtil.c index cf796c386297271d1138189ea812fe4b1fbe984c..ae1c775a18f6c47291f3065f83db0c2ff8cee94e 100644 --- a/source/libs/sync/src/syncUtil.c +++ b/source/libs/sync/src/syncUtil.c @@ -364,10 +364,10 @@ void syncLogRecvHeartbeat(SSyncNode* pSyncNode, const SyncHeartbeat* pMsg, int64 if (timeDiff > SYNC_HEARTBEAT_SLOW_MS) { pSyncNode->hbSlowNum++; - sNInfo(pSyncNode, - "recv sync-heartbeat from dnode:%d slow {term:%" PRId64 ", commit-index:%" PRId64 ", min-match:%" PRId64 - ", ts:%" PRId64 "}, %s, net elapsed:%" PRId64, - DID(&pMsg->srcId), pMsg->term, pMsg->commitIndex, pMsg->minMatchIndex, pMsg->timeStamp, s, timeDiff); + sNTrace(pSyncNode, + "recv sync-heartbeat from dnode:%d slow {term:%" PRId64 ", commit-index:%" PRId64 ", min-match:%" PRId64 + ", ts:%" PRId64 "}, %s, net elapsed:%" PRId64, + DID(&pMsg->srcId), pMsg->term, pMsg->commitIndex, pMsg->minMatchIndex, pMsg->timeStamp, s, timeDiff); } sNTrace(pSyncNode, diff --git a/source/libs/wal/src/walMeta.c b/source/libs/wal/src/walMeta.c index a12f8051ba982ed627ed0767b76d344678748ca9..1e70ce4a1c4bacac2dc5fdb046fe8adead163770 100644 --- a/source/libs/wal/src/walMeta.c +++ b/source/libs/wal/src/walMeta.c @@ -47,9 +47,7 @@ static FORCE_INLINE int walBuildTmpMetaName(SWal* pWal, char* buf) { } static FORCE_INLINE int64_t walScanLogGetLastVer(SWal* pWal, int32_t fileIdx) { - int32_t sz = taosArrayGetSize(pWal->fileInfoSet); - terrno = TSDB_CODE_SUCCESS; - + int32_t sz = taosArrayGetSize(pWal->fileInfoSet); SWalFileInfo* pFileInfo = taosArrayGet(pWal->fileInfoSet, fileIdx); char fnameStr[WAL_FILE_LEN]; walBuildLogName(pWal, pFileInfo->firstVer, fnameStr); @@ -74,13 +72,12 @@ static FORCE_INLINE int64_t walScanLogGetLastVer(SWal* pWal, int32_t fileIdx) { int64_t capacity = 0; int64_t readSize = 0; char* buf = NULL; - bool firstTrial = pFileInfo->fileSize < fileSize; int64_t offset = TMIN(pFileInfo->fileSize, fileSize); - int64_t offsetForward = offset - stepSize + walCkHeadSz - 1; - int64_t offsetBackward = offset; int64_t retVer = -1; int64_t lastEntryBeginOffset = 0; int64_t lastEntryEndOffset = 0; + int64_t recordLen = 0; + bool forwardStage = false; // check recover size if (2 * tsWalFsyncDataSizeLimit + offset < end) { @@ -91,14 +88,8 @@ static FORCE_INLINE int64_t walScanLogGetLastVer(SWal* pWal, int32_t fileIdx) { // search for the valid last WAL entry, e.g. block by block while (1) { - offset = (firstTrial) ? TMIN(fileSize, offsetForward + stepSize - walCkHeadSz + 1) - : TMAX(0, offsetBackward - stepSize + walCkHeadSz - 1); + offset = (lastEntryEndOffset > 0) ? offset : TMAX(0, offset - stepSize + walCkHeadSz - 1); end = TMIN(offset + stepSize, fileSize); - if (firstTrial) { - offsetForward = offset; - } else { - offsetBackward = offset; - } readSize = end - offset; capacity = readSize + sizeof(magic); @@ -129,7 +120,16 @@ static FORCE_INLINE int64_t walScanLogGetLastVer(SWal* pWal, int32_t fileIdx) { int64_t pos = 0; SWalCkHead* logContent = NULL; - while ((candidate = tmemmem(haystack, readSize - (haystack - buf), (char*)&magic, sizeof(magic))) != NULL) { + while (true) { + forwardStage = (lastEntryEndOffset > 0 || offset == 0); + terrno = TSDB_CODE_SUCCESS; + if (forwardStage) { + candidate = (readSize - (haystack - buf)) > 0 ? haystack : NULL; + } else { + candidate = tmemmem(haystack, readSize - (haystack - buf), (char*)&magic, sizeof(magic)); + } + + if (candidate == NULL) break; pos = candidate - buf; // validate head @@ -137,13 +137,14 @@ static FORCE_INLINE int64_t walScanLogGetLastVer(SWal* pWal, int32_t fileIdx) { if (len < walCkHeadSz) { break; } + logContent = (SWalCkHead*)(buf + pos); if (walValidHeadCksum(logContent) != 0) { terrno = TSDB_CODE_WAL_CHKSUM_MISMATCH; wWarn("vgId:%d, failed to validate checksum of wal entry header. offset:%" PRId64 ", file:%s", pWal->cfg.vgId, offset + pos, fnameStr); haystack = buf + pos + 1; - if (firstTrial) { + if (forwardStage) { break; } else { continue; @@ -151,9 +152,9 @@ static FORCE_INLINE int64_t walScanLogGetLastVer(SWal* pWal, int32_t fileIdx) { } // validate body - int64_t size = walCkHeadSz + logContent->head.bodyLen; - if (len < size) { - int64_t extraSize = size - len; + recordLen = walCkHeadSz + logContent->head.bodyLen; + if (len < recordLen) { + int64_t extraSize = recordLen - len; if (capacity < readSize + extraSize + sizeof(magic)) { capacity += extraSize; void* ptr = taosMemoryRealloc(buf, capacity); @@ -184,7 +185,7 @@ static FORCE_INLINE int64_t walScanLogGetLastVer(SWal* pWal, int32_t fileIdx) { wWarn("vgId:%d, failed to validate checksum of wal entry body. offset:%" PRId64 ", file:%s", pWal->cfg.vgId, offset + pos, fnameStr); haystack = buf + pos + 1; - if (firstTrial) { + if (forwardStage) { break; } else { continue; @@ -194,21 +195,14 @@ static FORCE_INLINE int64_t walScanLogGetLastVer(SWal* pWal, int32_t fileIdx) { // found one retVer = logContent->head.version; lastEntryBeginOffset = offset + pos; - lastEntryEndOffset = offset + pos + sizeof(SWalCkHead) + logContent->head.bodyLen; + lastEntryEndOffset = offset + pos + recordLen; // try next - haystack = buf + pos + 1; + haystack = buf + pos + recordLen; } - if (end == fileSize) firstTrial = false; - if (firstTrial) { - if (terrno == TSDB_CODE_SUCCESS) { - continue; - } else { - firstTrial = false; - } - } - if (retVer >= 0 || offset == 0) break; + offset = (lastEntryEndOffset > 0) ? lastEntryEndOffset : offset; + if (forwardStage && (terrno != TSDB_CODE_SUCCESS || end == fileSize)) break; } if (retVer < 0) { diff --git a/source/util/src/tcompare.c b/source/util/src/tcompare.c index e32ff3da95ea96a2ad949b2a9434d3e80038eac9..843f9c56dced580f188e2efb01c2f0e20415fc2d 100644 --- a/source/util/src/tcompare.c +++ b/source/util/src/tcompare.c @@ -225,6 +225,23 @@ int32_t compareLenPrefixedWStrDesc(const void *pLeft, const void *pRight) { return compareLenPrefixedWStr(pRight, pLeft); } +int32_t compareLenBinaryVal(const void *pLeft, const void *pRight) { + int32_t len1 = varDataLen(pLeft); + int32_t len2 = varDataLen(pRight); + + int32_t minLen = TMIN(len1, len2); + int32_t ret = memcmp(varDataVal(pLeft), varDataVal(pRight), minLen); + if (ret == 0) { + if (len1 == len2) { + return 0; + } else { + return len1 > len2 ? 1 : -1; + } + } else { + return ret > 0 ? 1 : -1; + } +} + // string > number > bool > null // ref: https://dev.mysql.com/doc/refman/8.0/en/json.html#json-comparison int32_t compareJsonVal(const void *pLeft, const void *pRight) { diff --git a/source/util/src/terror.c b/source/util/src/terror.c index 0a53ece746f31fb295f818a411ee4f778256f423..d2b9edf753f4bbe4e1ed50e0a7bbaaa70b710cd3 100644 --- a/source/util/src/terror.c +++ b/source/util/src/terror.c @@ -405,6 +405,7 @@ TAOS_DEFINE_ERROR(TSDB_CODE_QRY_JSON_NOT_SUPPORT_ERROR, "Json not support in t TAOS_DEFINE_ERROR(TSDB_CODE_QRY_JSON_IN_GROUP_ERROR, "Json not support in group/partition by") TAOS_DEFINE_ERROR(TSDB_CODE_QRY_JOB_NOT_EXIST, "Job not exist") TAOS_DEFINE_ERROR(TSDB_CODE_QRY_QWORKER_QUIT, "Vnode/Qnode is quitting") +TAOS_DEFINE_ERROR(TSDB_CODE_QRY_GEO_NOT_SUPPORT_ERROR, "Geometry not support in this operator") // grant TAOS_DEFINE_ERROR(TSDB_CODE_GRANT_EXPIRED, "License expired") @@ -629,7 +630,9 @@ TAOS_DEFINE_ERROR(TSDB_CODE_INDEX_INVALID_FILE, "Index file is inval TAOS_DEFINE_ERROR(TSDB_CODE_TMQ_INVALID_MSG, "Invalid message") TAOS_DEFINE_ERROR(TSDB_CODE_TMQ_CONSUMER_MISMATCH, "Consumer mismatch") TAOS_DEFINE_ERROR(TSDB_CODE_TMQ_CONSUMER_CLOSED, "Consumer closed") -TAOS_DEFINE_ERROR(TSDB_CODE_TMQ_CONSUMER_ERROR, "Consumer error, to see log") +TAOS_DEFINE_ERROR(TSDB_CODE_TMQ_CONSUMER_ERROR, "Consumer error, to see log") +TAOS_DEFINE_ERROR(TSDB_CODE_TMQ_TOPIC_OUT_OF_RANGE, "Topic num out of range") +TAOS_DEFINE_ERROR(TSDB_CODE_TMQ_GROUP_OUT_OF_RANGE, "Group num out of range 100") // stream TAOS_DEFINE_ERROR(TSDB_CODE_STREAM_TASK_NOT_EXIST, "Stream task not exist") diff --git a/source/util/src/theap.c b/source/util/src/theap.c index 8c1a1db05709e3ef3eceb3329185d82f0b5485ed..d60606008fbacfb958d8e6ba695464da5e903838 100644 --- a/source/util/src/theap.c +++ b/source/util/src/theap.c @@ -187,3 +187,172 @@ void heapRemove(Heap* heap, HeapNode* node) { } void heapDequeue(Heap* heap) { heapRemove(heap, heap->min); } + + +struct PriorityQueue { + SArray* container; + pq_comp_fn fn; + FDelete deleteFn; + void* param; +}; +PriorityQueue* createPriorityQueue(pq_comp_fn fn, FDelete deleteFn, void* param) { + PriorityQueue* pq = (PriorityQueue*)taosMemoryCalloc(1, sizeof(PriorityQueue)); + pq->container = taosArrayInit(1, sizeof(PriorityQueueNode)); + pq->fn = fn; + pq->deleteFn = deleteFn; + pq->param = param; + return pq; +} + +void taosPQSetFn(PriorityQueue* pq, pq_comp_fn fn) { + pq->fn = fn; +} + +void destroyPriorityQueue(PriorityQueue* pq) { + if (pq->deleteFn) + taosArrayDestroyP(pq->container, pq->deleteFn); + else + taosArrayDestroy(pq->container); + taosMemoryFree(pq); +} + +static size_t pqParent(size_t i) { return (--i) >> 1; /* (i - 1) / 2 */ } +static size_t pqLeft(size_t i) { return (i << 1) | 1; /* i * 2 + 1 */ } +static size_t pqRight(size_t i) { return (++i) << 1; /* (i + 1) * 2 */} +static void pqSwapPQNode(PriorityQueueNode* a, PriorityQueueNode* b) { + void * tmp = a->data; + a->data = b->data; + b->data = tmp; +} + +#define pqContainerGetEle(pq, i) ((PriorityQueueNode*)taosArrayGet((pq)->container, (i))) +#define pqContainerSize(pq) (taosArrayGetSize((pq)->container)) + +size_t taosPQSize(PriorityQueue* pq) { return pqContainerSize(pq); } + +static void pqHeapify(PriorityQueue* pq, size_t from, size_t last) { + size_t largest = from; + do { + from = largest; + size_t l = pqLeft(from); + size_t r = pqRight(from); + if (l < last && pq->fn(pqContainerGetEle(pq, from)->data, pqContainerGetEle(pq, l)->data, pq->param)) { + largest = l; + } + if (r < last && pq->fn(pqContainerGetEle(pq, largest)->data, pqContainerGetEle(pq, r)->data, pq->param)) { + largest = r; + } + if (largest != from) { + pqSwapPQNode(pqContainerGetEle(pq, from), pqContainerGetEle(pq, largest)); + } + } while (largest != from); +} + +static void pqBuildHeap(PriorityQueue* pq) { + if (pqContainerSize(pq) > 1) { + for (size_t i = pqContainerSize(pq) - 1; i > 0; --i) { + pqHeapify(pq, i, pqContainerSize(pq)); + } + pqHeapify(pq, 0, pqContainerSize(pq)); + } +} + +static void pqReverseHeapify(PriorityQueue* pq, size_t i) { + while (i > 0 && !pq->fn(pqContainerGetEle(pq, i)->data, pqContainerGetEle(pq, pqParent(i))->data, pq->param)) { + size_t parentIdx = pqParent(i); + pqSwapPQNode(pqContainerGetEle(pq, i), pqContainerGetEle(pq, parentIdx)); + i = parentIdx; + } +} + +static void pqUpdate(PriorityQueue* pq, size_t i) { + if (i == 0 || pq->fn(pqContainerGetEle(pq, i)->data, pqContainerGetEle(pq, pqParent(i))->data, pq->param)) { + // if value in pos i is smaller than parent, heapify down from i to the end + pqHeapify(pq, i, pqContainerSize(pq)); + } else { + // if value in pos i is big than parent, heapify up from i + pqReverseHeapify(pq, i); + } +} + +static void pqRemove(PriorityQueue* pq, size_t i) { + if (i == pqContainerSize(pq) - 1) { + taosArrayPop(pq->container); + return; + } + + taosArraySet(pq->container, i, taosArrayGet(pq->container, pqContainerSize(pq) - 1)); + taosArrayPop(pq->container); + pqUpdate(pq, i); +} + +PriorityQueueNode* taosPQTop(PriorityQueue* pq) { + return pqContainerGetEle(pq, 0); +} + +void taosPQPush(PriorityQueue* pq, const PriorityQueueNode* node) { + taosArrayPush(pq->container, node); + pqReverseHeapify(pq, pqContainerSize(pq) - 1); +} + +void taosPQPop(PriorityQueue* pq) { + PriorityQueueNode* top = taosPQTop(pq); + if (pq->deleteFn) pq->deleteFn(top->data); + pqRemove(pq, 0); +} + +struct BoundedQueue { + PriorityQueue* queue; + uint32_t maxSize; +}; + +BoundedQueue* createBoundedQueue(uint32_t maxSize, pq_comp_fn fn, FDelete deleteFn, void* param) { + BoundedQueue* q = (BoundedQueue*)taosMemoryCalloc(1, sizeof(BoundedQueue)); + q->queue = createPriorityQueue(fn, deleteFn, param); + taosArrayEnsureCap(q->queue->container, maxSize + 1); + q->maxSize = maxSize; + return q; +} + +void taosBQSetFn(BoundedQueue* q, pq_comp_fn fn) { + taosPQSetFn(q->queue, fn); +} + +void destroyBoundedQueue(BoundedQueue* q) { + if (!q) return; + destroyPriorityQueue(q->queue); + taosMemoryFree(q); +} + +void taosBQPush(BoundedQueue* q, PriorityQueueNode* n) { + if (pqContainerSize(q->queue) == q->maxSize + 1) { + PriorityQueueNode* top = pqContainerGetEle(q->queue, 0); + void *p = top->data; + top->data = n->data; + n->data = p; + if (q->queue->deleteFn) q->queue->deleteFn(n->data); + pqHeapify(q->queue, 0, taosBQSize(q)); + } else { + taosPQPush(q->queue, n); + } +} + +PriorityQueueNode* taosBQTop(BoundedQueue* q) { + return taosPQTop(q->queue); +} + +void taosBQBuildHeap(BoundedQueue *q) { + pqBuildHeap(q->queue); +} + +size_t taosBQMaxSize(BoundedQueue* q) { + return q->maxSize; +} + +size_t taosBQSize(BoundedQueue* q) { + return taosPQSize(q->queue); +} + +void taosBQPop(BoundedQueue* q) { + taosPQPop(q->queue); +} diff --git a/tests/parallel_test/cases.task b/tests/parallel_test/cases.task index f561bd4ed7b4cca5cbcd2be0ceb0188f7db307c0..21fed2e1f5ba73cda0a2375fb66878f2a336db0d 100644 --- a/tests/parallel_test/cases.task +++ b/tests/parallel_test/cases.task @@ -33,6 +33,8 @@ ,,y,system-test,./pytest.sh python3 ./test.py -f 7-tmq/subscribeStb3.py ,,y,system-test,./pytest.sh python3 ./test.py -f 7-tmq/subscribeDb0.py -N 3 -n 3 ,,y,system-test,./pytest.sh python3 ./test.py -f 7-tmq/ins_topics_test.py +,,y,system-test,./pytest.sh python3 ./test.py -f 7-tmq/tmqParamsTest.py +,,y,system-test,./pytest.sh python3 ./test.py -f 7-tmq/tmqClientConsLog.py ,,y,system-test,./pytest.sh python3 ./test.py -f 1-insert/delete_stable.py @@ -127,6 +129,7 @@ ,,y,system-test,./pytest.sh python3 ./test.py -f 99-TDcase/TD-19201.py ,,y,system-test,./pytest.sh python3 ./test.py -f 99-TDcase/TD-21561.py ,,y,system-test,./pytest.sh python3 ./test.py -f 99-TDcase/TS-3404.py +,,y,system-test,./pytest.sh python3 ./test.py -f 99-TDcase/TS-3581.py ,,y,system-test,./pytest.sh python3 ./test.py -f 0-others/balance_vgroups_r1.py -N 6 ,,y,system-test,./pytest.sh python3 ./test.py -f 0-others/taosShell.py @@ -778,7 +781,7 @@ ,,y,script,./test.sh -f tsim/user/basic.sim ,,y,script,./test.sh -f tsim/user/password.sim ,,y,script,./test.sh -f tsim/user/privilege_db.sim -,,y,script,./test.sh -f tsim/user/privilege_sysinfo.sim +#,,y,script,./test.sh -f tsim/user/privilege_sysinfo.sim ,,y,script,./test.sh -f tsim/user/privilege_topic.sim ,,y,script,./test.sh -f tsim/user/privilege_table.sim ,,y,script,./test.sh -f tsim/db/alter_option.sim diff --git a/tests/script/tsim/parser/limit1_stb.sim b/tests/script/tsim/parser/limit1_stb.sim index 731a218de5d11f11b623a8348344597fb84f78b8..027a4f5c797133979a90a352f5d2b58fd67094ff 100644 --- a/tests/script/tsim/parser/limit1_stb.sim +++ b/tests/script/tsim/parser/limit1_stb.sim @@ -468,7 +468,7 @@ if $data01 != 1 then endi ## supertable aggregation + where + interval + group by order by tag + limit offset -sql select _wstart, max(c1), min(c2), avg(c3), sum(c5), spread(c6), first(c7), last(c8), first(c9),t1 from $stb where ts >= $ts0 and ts <= $tsu and t1 > 1 and t1 < 5 and c1 > 0 and c2 < 9 and c3 > 1 and c4 < 7 and c5 > 4 partition by t1 interval(5m) order by t1 desc limit 2 offset 0 +sql select _wstart, max(c1), min(c2), avg(c3), sum(c5), spread(c6), first(c7), last(c8), first(c9),t1 from $stb where ts >= $ts0 and ts <= $tsu and t1 > 1 and t1 < 5 and c1 > 0 and c2 < 9 and c3 > 1 and c4 < 7 and c5 > 4 partition by t1 interval(5m) order by t1 desc, max(c1) asc limit 2 offset 0 if $rows != 2 then return -1 endi diff --git a/tests/script/tsim/parser/limit_stb.sim b/tests/script/tsim/parser/limit_stb.sim index 6950df9ee1b41816feca5c8753efd14489fda063..46bd6260c3b8c726e24e8e07ab0d88f94872a5e9 100644 --- a/tests/script/tsim/parser/limit_stb.sim +++ b/tests/script/tsim/parser/limit_stb.sim @@ -508,7 +508,7 @@ endi ### supertable aggregation + where + interval + group by order by tag + limit offset ## TBASE-345 -sql select _wstart, max(c1), min(c2), avg(c3), sum(c5), spread(c6), first(c7), last(c8), first(c9), t1 from $stb where ts >= $ts0 and ts <= $tsu and t1 > 1 and t1 < 5 and c1 > 0 and c2 < 9 and c3 > 1 and c4 < 7 and c5 > 4 partition by t1 interval(5m) order by t1 desc limit 3 offset 0 +sql select _wstart, max(c1), min(c2), avg(c3), sum(c5), spread(c6), first(c7), last(c8), first(c9), t1 from $stb where ts >= $ts0 and ts <= $tsu and t1 > 1 and t1 < 5 and c1 > 0 and c2 < 9 and c3 > 1 and c4 < 7 and c5 > 4 partition by t1 interval(5m) order by t1 desc, max(c1) asc limit 3 offset 0 if $rows != 3 then return -1 endi @@ -554,7 +554,7 @@ if $data09 != 4 then return -1 endi -sql select _wstart, max(c1), min(c2), avg(c3), sum(c5), spread(c6), first(c7), last(c8), first(c9), t1 from $stb where ts >= $ts0 and ts <= $tsu and t1 > 1 and t1 < 8 and c1 > 0 and c2 < 9 and c3 > 4 and c4 < 7 and c5 > 4 partition by t1 interval(5m) order by t1 desc limit 3 offset 0 +sql select _wstart, max(c1), min(c2), avg(c3), sum(c5), spread(c6), first(c7), last(c8), first(c9), t1 from $stb where ts >= $ts0 and ts <= $tsu and t1 > 1 and t1 < 8 and c1 > 0 and c2 < 9 and c3 > 4 and c4 < 7 and c5 > 4 partition by t1 interval(5m) order by t1 desc, max(c1) asc limit 3 offset 0 if $rows != 3 then return -1 endi diff --git a/tests/script/tsim/parser/union.sim b/tests/script/tsim/parser/union.sim index dee5da96e8488a14cc615d7ab3944ccf6558f93e..f0c534ad11b336cade9d79e2f652742ed2dfbd86 100644 --- a/tests/script/tsim/parser/union.sim +++ b/tests/script/tsim/parser/union.sim @@ -126,7 +126,6 @@ endi if $data10 != 1 then return -1 endi - sql (select 'ab' as options from union_tb1 limit 1) union all (select 'dd' as options from union_tb0 limit 1) order by options; if $rows != 2 then return -1 diff --git a/tests/script/tsim/query/r/explain_tsorder.result b/tests/script/tsim/query/r/explain_tsorder.result index 6c63a343de1b945ce681aaceadedfabb612f2136..b69a77ada52d5f6c819a5edb5a292a027d9f320e 100644 --- a/tests/script/tsim/query/r/explain_tsorder.result +++ b/tests/script/tsim/query/r/explain_tsorder.result @@ -2558,3 +2558,243 @@ taos> select a.ts, a.c2, b.c2 from meters as a join (select * from meters order 2022-05-24 00:01:08.000 | 210 | 210 | 2022-05-24 00:01:08.000 | 210 | 210 | +taos> select ts, c2 from meters order by c2; + ts | c2 | +======================================== + 2022-05-21 00:01:08.000 | 11 | + 2022-05-21 00:01:08.000 | 11 | + 2022-05-18 00:01:08.000 | 58 | + 2022-05-18 00:01:08.000 | 58 | + 2022-05-17 00:01:08.000 | 59 | + 2022-05-17 00:01:08.000 | 59 | + 2022-05-23 00:01:08.000 | 116 | + 2022-05-23 00:01:08.000 | 116 | + 2022-05-20 00:01:08.000 | 120 | + 2022-05-20 00:01:08.000 | 120 | + 2022-05-16 00:01:08.000 | 136 | + 2022-05-16 00:01:08.000 | 136 | + 2022-05-22 00:01:08.000 | 196 | + 2022-05-22 00:01:08.000 | 196 | + 2022-05-24 00:01:08.000 | 210 | + 2022-05-24 00:01:08.000 | 210 | + 2022-05-15 00:01:08.000 | 234 | + 2022-05-15 00:01:08.000 | 234 | + 2022-05-19 00:01:08.000 | 243 | + 2022-05-19 00:01:08.000 | 243 | + +taos> select ts, c2 from meters order by c2 limit 4; + ts | c2 | +======================================== + 2022-05-21 00:01:08.000 | 11 | + 2022-05-21 00:01:08.000 | 11 | + 2022-05-18 00:01:08.000 | 58 | + 2022-05-18 00:01:08.000 | 58 | + +taos> select ts, c2 from meters order by c2 limit 2,2; + ts | c2 | +======================================== + 2022-05-18 00:01:08.000 | 58 | + 2022-05-18 00:01:08.000 | 58 | + +taos> select ts, c2 from meters order by ts asc, c2 desc limit 10; + ts | c2 | +======================================== + 2022-05-15 00:01:08.000 | 234 | + 2022-05-15 00:01:08.000 | 234 | + 2022-05-16 00:01:08.000 | 136 | + 2022-05-16 00:01:08.000 | 136 | + 2022-05-17 00:01:08.000 | 59 | + 2022-05-17 00:01:08.000 | 59 | + 2022-05-18 00:01:08.000 | 58 | + 2022-05-18 00:01:08.000 | 58 | + 2022-05-19 00:01:08.000 | 243 | + 2022-05-19 00:01:08.000 | 243 | + +taos> select ts, c2 from meters order by ts asc, c2 desc limit 5,5; + ts | c2 | +======================================== + 2022-05-17 00:01:08.000 | 59 | + 2022-05-18 00:01:08.000 | 58 | + 2022-05-18 00:01:08.000 | 58 | + 2022-05-19 00:01:08.000 | 243 | + 2022-05-19 00:01:08.000 | 243 | + +taos> select ts, c2 from d1 order by c2; + ts | c2 | +======================================== + 2022-05-21 00:01:08.000 | 11 | + 2022-05-18 00:01:08.000 | 58 | + 2022-05-17 00:01:08.000 | 59 | + 2022-05-23 00:01:08.000 | 116 | + 2022-05-20 00:01:08.000 | 120 | + 2022-05-16 00:01:08.000 | 136 | + 2022-05-22 00:01:08.000 | 196 | + 2022-05-24 00:01:08.000 | 210 | + 2022-05-15 00:01:08.000 | 234 | + 2022-05-19 00:01:08.000 | 243 | + +taos> select ts, c2 from d1 order by c2 limit 4; + ts | c2 | +======================================== + 2022-05-21 00:01:08.000 | 11 | + 2022-05-18 00:01:08.000 | 58 | + 2022-05-17 00:01:08.000 | 59 | + 2022-05-23 00:01:08.000 | 116 | + +taos> select ts, c2 from d1 order by c2 limit 2,2; + ts | c2 | +======================================== + 2022-05-17 00:01:08.000 | 59 | + 2022-05-23 00:01:08.000 | 116 | + +taos> select ts, c2 from d1 order by ts asc, c2 desc limit 10; + ts | c2 | +======================================== + 2022-05-15 00:01:08.000 | 234 | + 2022-05-16 00:01:08.000 | 136 | + 2022-05-17 00:01:08.000 | 59 | + 2022-05-18 00:01:08.000 | 58 | + 2022-05-19 00:01:08.000 | 243 | + 2022-05-20 00:01:08.000 | 120 | + 2022-05-21 00:01:08.000 | 11 | + 2022-05-22 00:01:08.000 | 196 | + 2022-05-23 00:01:08.000 | 116 | + 2022-05-24 00:01:08.000 | 210 | + +taos> select ts, c2 from d1 order by ts asc, c2 desc limit 5,5; + ts | c2 | +======================================== + 2022-05-20 00:01:08.000 | 120 | + 2022-05-21 00:01:08.000 | 11 | + 2022-05-22 00:01:08.000 | 196 | + 2022-05-23 00:01:08.000 | 116 | + 2022-05-24 00:01:08.000 | 210 | + +taos> select _wstart, first(a) as d, avg(c) from (select _wstart as a, last(ts) as b, avg(c2) as c from meters interval(10s) order by a desc) where a > '2022-05-15 00:01:00.000' and a < '2022-05-21 00:01:08.000' interval(5h) fill(linear) order by avg(c) desc; + _wstart | d | avg(c) | +================================================================================ + 2022-05-18 23:00:00.000 | 2022-05-19 00:01:00.000 | 243.000000000 | + 2022-05-15 00:00:00.000 | 2022-05-15 00:01:00.000 | 234.000000000 | + 2022-05-19 04:00:00.000 | 2022-05-19 04:49:00.000 | 218.400000000 | + 2022-05-15 05:00:00.000 | 2022-05-15 06:01:00.000 | 209.500000000 | + 2022-05-18 18:00:00.000 | 2022-05-18 19:13:00.000 | 206.000000000 | + 2022-05-19 09:00:00.000 | 2022-05-19 09:37:00.000 | 193.800000000 | + 2022-05-15 10:00:00.000 | 2022-05-15 12:01:00.000 | 185.000000000 | + 2022-05-19 14:00:00.000 | 2022-05-19 14:25:00.000 | 169.200000000 | + 2022-05-18 13:00:00.000 | 2022-05-18 14:25:00.000 | 169.000000000 | + 2022-05-15 15:00:00.000 | 2022-05-15 18:01:00.000 | 160.500000000 | + 2022-05-19 19:00:00.000 | 2022-05-19 19:13:00.000 | 144.600000000 | + 2022-05-15 20:00:00.000 | 2022-05-16 00:01:00.000 | 136.000000000 | + 2022-05-18 08:00:00.000 | 2022-05-18 09:37:00.000 | 132.000000000 | + 2022-05-16 01:00:00.000 | 2022-05-16 04:49:00.000 | 120.600000000 | + 2022-05-20 00:00:00.000 | 2022-05-20 00:01:00.000 | 120.000000000 | + 2022-05-16 06:00:00.000 | 2022-05-16 09:37:00.000 | 105.200000000 | + 2022-05-18 03:00:00.000 | 2022-05-18 04:49:00.000 | 95.000000000 | + 2022-05-20 05:00:00.000 | 2022-05-20 06:01:00.000 | 92.750000000 | + 2022-05-16 11:00:00.000 | 2022-05-16 14:25:00.000 | 89.800000000 | + 2022-05-16 16:00:00.000 | 2022-05-16 19:13:00.000 | 74.400000000 | + 2022-05-20 10:00:00.000 | 2022-05-20 12:01:00.000 | 65.500000000 | + 2022-05-16 21:00:00.000 | 2022-05-17 00:01:00.000 | 59.000000000 | + 2022-05-17 02:00:00.000 | 2022-05-17 04:49:00.000 | 58.800000000 | + 2022-05-17 07:00:00.000 | 2022-05-17 09:37:00.000 | 58.600000000 | + 2022-05-17 12:00:00.000 | 2022-05-17 14:25:00.000 | 58.400000000 | + 2022-05-17 17:00:00.000 | 2022-05-17 19:13:00.000 | 58.200000000 | + 2022-05-17 22:00:00.000 | 2022-05-18 00:01:00.000 | 58.000000000 | + 2022-05-20 15:00:00.000 | 2022-05-20 18:01:00.000 | 38.250000000 | + 2022-05-20 20:00:00.000 | 2022-05-21 00:01:00.000 | 11.000000000 | + +taos> select _wstart, first(a) as d, avg(c) from (select _wstart as a, last(ts) as b, avg(c2) as c from meters interval(10s) order by a desc) where a > '2022-05-15 00:01:00.000' and a < '2022-05-21 00:01:08.000' interval(5h) fill(linear) order by avg(c) desc limit 2; + _wstart | d | avg(c) | +================================================================================ + 2022-05-18 23:00:00.000 | 2022-05-19 00:01:00.000 | 243.000000000 | + 2022-05-15 00:00:00.000 | 2022-05-15 00:01:00.000 | 234.000000000 | + +taos> select _wstart, first(a) as d, avg(c) from (select _wstart as a, last(ts) as b, avg(c2) as c from meters interval(10s) order by a desc) where a > '2022-05-15 00:01:00.000' and a < '2022-05-21 00:01:08.000' interval(5h) fill(linear) order by avg(c) desc limit 2,6; + _wstart | d | avg(c) | +================================================================================ + 2022-05-19 04:00:00.000 | 2022-05-19 04:49:00.000 | 218.400000000 | + 2022-05-15 05:00:00.000 | 2022-05-15 06:01:00.000 | 209.500000000 | + 2022-05-18 18:00:00.000 | 2022-05-18 19:13:00.000 | 206.000000000 | + 2022-05-19 09:00:00.000 | 2022-05-19 09:37:00.000 | 193.800000000 | + 2022-05-15 10:00:00.000 | 2022-05-15 12:01:00.000 | 185.000000000 | + 2022-05-19 14:00:00.000 | 2022-05-19 14:25:00.000 | 169.200000000 | + +taos> select last(ts), c2 as d from d1 group by c2 order by c2 desc limit 10; + last(ts) | d | +======================================== + 2022-05-19 00:01:08.000 | 243 | + 2022-05-15 00:01:08.000 | 234 | + 2022-05-24 00:01:08.000 | 210 | + 2022-05-22 00:01:08.000 | 196 | + 2022-05-16 00:01:08.000 | 136 | + 2022-05-20 00:01:08.000 | 120 | + 2022-05-23 00:01:08.000 | 116 | + 2022-05-17 00:01:08.000 | 59 | + 2022-05-18 00:01:08.000 | 58 | + 2022-05-21 00:01:08.000 | 11 | + +taos> select last(ts), c2 as d from d1 group by c2 order by c2 desc limit 2,8; + last(ts) | d | +======================================== + 2022-05-24 00:01:08.000 | 210 | + 2022-05-22 00:01:08.000 | 196 | + 2022-05-16 00:01:08.000 | 136 | + 2022-05-20 00:01:08.000 | 120 | + 2022-05-23 00:01:08.000 | 116 | + 2022-05-17 00:01:08.000 | 59 | + 2022-05-18 00:01:08.000 | 58 | + 2022-05-21 00:01:08.000 | 11 | + +taos> select last(ts), c2 as d from d1 group by c2 order by c2 desc limit 9,1; + last(ts) | d | +======================================== + 2022-05-21 00:01:08.000 | 11 | + +taos> select last(ts), c2 as d from d1 group by c2 order by c2 asc limit 2,8; + last(ts) | d | +======================================== + 2022-05-17 00:01:08.000 | 59 | + 2022-05-23 00:01:08.000 | 116 | + 2022-05-20 00:01:08.000 | 120 | + 2022-05-16 00:01:08.000 | 136 | + 2022-05-22 00:01:08.000 | 196 | + 2022-05-24 00:01:08.000 | 210 | + 2022-05-15 00:01:08.000 | 234 | + 2022-05-19 00:01:08.000 | 243 | + +taos> select last(ts), c2 as d from d1 group by c2 order by c2 asc limit 9,1; + last(ts) | d | +======================================== + 2022-05-19 00:01:08.000 | 243 | + +taos> select last(ts) as ts, c2 as d from d1 group by c2 order by ts desc, c2 asc limit 10; + ts | d | +======================================== + 2022-05-24 00:01:08.000 | 210 | + 2022-05-23 00:01:08.000 | 116 | + 2022-05-22 00:01:08.000 | 196 | + 2022-05-21 00:01:08.000 | 11 | + 2022-05-20 00:01:08.000 | 120 | + 2022-05-19 00:01:08.000 | 243 | + 2022-05-18 00:01:08.000 | 58 | + 2022-05-17 00:01:08.000 | 59 | + 2022-05-16 00:01:08.000 | 136 | + 2022-05-15 00:01:08.000 | 234 | + +taos> select last(ts) as ts, c2 as d from d1 group by c2 order by ts desc, c2 asc limit 2,8; + ts | d | +======================================== + 2022-05-22 00:01:08.000 | 196 | + 2022-05-21 00:01:08.000 | 11 | + 2022-05-20 00:01:08.000 | 120 | + 2022-05-19 00:01:08.000 | 243 | + 2022-05-18 00:01:08.000 | 58 | + 2022-05-17 00:01:08.000 | 59 | + 2022-05-16 00:01:08.000 | 136 | + 2022-05-15 00:01:08.000 | 234 | + +taos> select last(ts) as ts, c2 as d from d1 group by c2 order by ts desc, c2 asc limit 9,1; + ts | d | +======================================== + 2022-05-15 00:01:08.000 | 234 | + diff --git a/tests/script/tsim/query/t/explain_tsorder.sql b/tests/script/tsim/query/t/explain_tsorder.sql index d3264d8895246a4151a56b28cdc7968b5969a4e0..056ac440fee299677b991d0a996ac47a2e854073 100644 --- a/tests/script/tsim/query/t/explain_tsorder.sql +++ b/tests/script/tsim/query/t/explain_tsorder.sql @@ -71,3 +71,30 @@ select a.ts, a.c2, b.c2 from meters as a join meters as b on a.ts = b.ts order b explain verbose true select a.ts, a.c2, b.c2 from meters as a join (select ts, c2 from meters order by ts desc) b on a.ts = b.ts order by a.ts desc\G; explain verbose true select a.ts, a.c2, b.c2 from meters as a join (select ts, c2 from meters order by ts desc) b on a.ts = b.ts order by a.ts asc\G; select a.ts, a.c2, b.c2 from meters as a join (select * from meters order by ts desc) b on a.ts = b.ts order by a.ts asc; + +select ts, c2 from meters order by c2; +select ts, c2 from meters order by c2 limit 4; +select ts, c2 from meters order by c2 limit 2,2; + +select ts, c2 from meters order by ts asc, c2 desc limit 10; +select ts, c2 from meters order by ts asc, c2 desc limit 5,5; + +select ts, c2 from d1 order by c2; +select ts, c2 from d1 order by c2 limit 4; +select ts, c2 from d1 order by c2 limit 2,2; + +select ts, c2 from d1 order by ts asc, c2 desc limit 10; +select ts, c2 from d1 order by ts asc, c2 desc limit 5,5; + +select _wstart, first(a) as d, avg(c) from (select _wstart as a, last(ts) as b, avg(c2) as c from meters interval(10s) order by a desc) where a > '2022-05-15 00:01:00.000' and a < '2022-05-21 00:01:08.000' interval(5h) fill(linear) order by avg(c) desc; +select _wstart, first(a) as d, avg(c) from (select _wstart as a, last(ts) as b, avg(c2) as c from meters interval(10s) order by a desc) where a > '2022-05-15 00:01:00.000' and a < '2022-05-21 00:01:08.000' interval(5h) fill(linear) order by avg(c) desc limit 2; +select _wstart, first(a) as d, avg(c) from (select _wstart as a, last(ts) as b, avg(c2) as c from meters interval(10s) order by a desc) where a > '2022-05-15 00:01:00.000' and a < '2022-05-21 00:01:08.000' interval(5h) fill(linear) order by avg(c) desc limit 2,6; + +select last(ts), c2 as d from d1 group by c2 order by c2 desc limit 10; +select last(ts), c2 as d from d1 group by c2 order by c2 desc limit 2,8; +select last(ts), c2 as d from d1 group by c2 order by c2 desc limit 9,1; +select last(ts), c2 as d from d1 group by c2 order by c2 asc limit 2,8; +select last(ts), c2 as d from d1 group by c2 order by c2 asc limit 9,1; +select last(ts) as ts, c2 as d from d1 group by c2 order by ts desc, c2 asc limit 10; +select last(ts) as ts, c2 as d from d1 group by c2 order by ts desc, c2 asc limit 2,8; +select last(ts) as ts, c2 as d from d1 group by c2 order by ts desc, c2 asc limit 9,1; diff --git a/tests/script/tsim/query/udf.sim b/tests/script/tsim/query/udf.sim index e539f115317abcd2b64279ec476f8f3464a559d5..fbf9d50c2568bb571349ae1b5874fed7c217f0e1 100644 --- a/tests/script/tsim/query/udf.sim +++ b/tests/script/tsim/query/udf.sim @@ -8,6 +8,9 @@ system sh/deploy.sh -n dnode1 -i 1 system sh/cfg.sh -n dnode1 -c udf -v 1 system sh/exec.sh -n dnode1 -s start sql connect +sql alter user root pass 'taosdata2' +system sh/exec.sh -n dnode1 -s stop +system sh/exec.sh -n dnode1 -s start print ======== step1 udf system sh/compile_udf.sh diff --git a/tests/system-test/0-others/splitVGroup.py b/tests/system-test/0-others/splitVGroup.py index 32001f34b09566acd5f1faa6d7358dc1ce1ac150..450996106608aa60cec781392bec8f9463ff2171 100644 --- a/tests/system-test/0-others/splitVGroup.py +++ b/tests/system-test/0-others/splitVGroup.py @@ -328,9 +328,28 @@ class TDTestCase: tdLog.exit("split vgroup transaction is not finished after executing 50s") return False + # split error + def expectSplitError(self, dbName): + vgids = self.getVGroup(dbName) + selid = random.choice(vgids) + sql = f"split vgroup {selid}" + tdLog.info(sql) + tdSql.error(sql) + + # expect split ok + def expectSplitOk(self, dbName): + # split vgroup + vgList1 = self.getVGroup(dbName) + self.splitVGroup(dbName) + vgList2 = self.getVGroup(dbName) + vgNum1 = len(vgList1) + 1 + vgNum2 = len(vgList2) + if vgNum1 != vgNum2: + tdLog.exit(f" vglist len={vgNum1} is not same for expect {vgNum2}") + return + # split empty database - def splitEmptyDB(self): - + def splitEmptyDB(self): dbName = "emptydb" vgNum = 2 # create database @@ -339,17 +358,33 @@ class TDTestCase: tdSql.execute(sql) # split vgroup - self.splitVGroup(dbName) - vgList = self.getVGroup(dbName) - vgNum1 = len(vgList) - vgNum2 = vgNum + 1 - if vgNum1 != vgNum2: - tdLog.exit(f" vglist len={vgNum1} is not same for expect {vgNum2}") - return + self.expectSplitOk(dbName) + + + # forbid + def checkForbid(self): + # stream + tdLog.info("check forbid split having stream...") + tdSql.execute("create database streamdb;") + tdSql.execute("use streamdb;") + tdSql.execute("create table ta(ts timestamp, age int);") + tdSql.execute("create stream ma into sta as select count(*) from ta interval(1s);") + self.expectSplitError("streamdb") + tdSql.execute("drop stream ma;") + self.expectSplitOk("streamdb") + + # topic + tdLog.info("check forbid split having topic...") + tdSql.execute("create database topicdb wal_retention_period 10;") + tdSql.execute("use topicdb;") + tdSql.execute("create table ta(ts timestamp, age int);") + tdSql.execute("create topic toa as select * from ta;") + self.expectSplitError("topicdb") + tdSql.execute("drop topic toa;") + self.expectSplitOk("topicdb") # run def run(self): - # prepare env self.prepareEnv() @@ -360,12 +395,13 @@ class TDTestCase: # check two db query result same self.checkResult() - tdLog.info(f"split vgroup i={i} passed.") # split empty db - self.splitEmptyDB() + self.splitEmptyDB() + # check topic and stream forib + self.checkForbid() # stop def stop(self): diff --git a/tests/system-test/2-query/limit.py b/tests/system-test/2-query/limit.py index c00e3b7d56af46f662ddb855a34e154e7c723b80..4774602d691cbcb516e3400ae14eea0a62d7fe0b 100644 --- a/tests/system-test/2-query/limit.py +++ b/tests/system-test/2-query/limit.py @@ -321,7 +321,7 @@ class TDTestCase: limit = 5 offset = paraDict["rowsPerTbl"] * 2 offset = offset - 2 - sqlStr = f"select max(c1), min(c2), sum(c3), avg(c4), first(c7), last(c8), first(c9) from lm2_tb0 where ts >= 1537146000000 and ts <= 1543145400000 partition by t1 interval(5m) fill(value, -1, -2, -3, -4 ,-7 ,'-8', '-9') order by t1 limit %d offset %d"%(limit, offset) + sqlStr = f"select max(c1), min(c2), sum(c3), avg(c4), first(c7), last(c8), first(c9) from lm2_tb0 where ts >= 1537146000000 and ts <= 1543145400000 partition by t1 interval(5m) fill(value, -1, -2, -3, -4 ,-7 ,'-8', '-9') order by t1, max(c1) limit %d offset %d"%(limit, offset) # tdLog.info("====sql:%s"%(sqlStr)) tdSql.query(sqlStr) tdSql.checkRows(1) diff --git a/tests/system-test/7-tmq/checkOffsetRowParams.py b/tests/system-test/7-tmq/checkOffsetRowParams.py index 8a24148064c8125513f07bab0745865a167126cd..f7e4c61c9c115b2e3048748d0fe3965e5e6dc1d2 100644 --- a/tests/system-test/7-tmq/checkOffsetRowParams.py +++ b/tests/system-test/7-tmq/checkOffsetRowParams.py @@ -245,7 +245,7 @@ class TDTestCase: tdSql.query("show consumers") tdSql.checkRows(1) - tdSql.checkData(0, 8, "tbname:1,commit:1,interval:2000,reset:earliest") + tdSql.checkData(0, 8, "tbname:1,commit:1,interval:2000ms,reset:earliest") time.sleep(2) tdLog.info("start insert data") diff --git a/tests/system-test/7-tmq/tmqParamsTest.py b/tests/system-test/7-tmq/tmqParamsTest.py new file mode 100644 index 0000000000000000000000000000000000000000..f48eaa84d4eb7ad7b97115015de077eb05da3479 --- /dev/null +++ b/tests/system-test/7-tmq/tmqParamsTest.py @@ -0,0 +1,178 @@ + +import sys +import time +import threading +from taos.tmq import Consumer +from util.log import * +from util.sql import * +from util.cases import * +from util.dnodes import * +from util.common import * +sys.path.append("./7-tmq") +from tmqCommon import * + +class TDTestCase: + updatecfgDict = {'debugFlag': 135} + def init(self, conn, logSql, replicaVar=1): + self.replicaVar = int(replicaVar) + tdLog.debug(f"start to excute {__file__}") + tdSql.init(conn.cursor()) + self.wal_retention_period1 = 3600 + self.wal_retention_period2 = 1 + self.commit_value_list = ["true", "false"] + self.offset_value_list = ["", "earliest", "latest", "none"] + self.tbname_value_list = ["true", "false"] + self.snapshot_value_list = ["true", "false"] + + # self.commit_value_list = ["true"] + # self.offset_value_list = ["none"] + # self.tbname_value_list = ["true"] + # self.snapshot_value_list = ["true"] + + def tmqParamsTest(self): + paraDict = {'dbName': 'db1', + 'dropFlag': 1, + 'vgroups': 4, + 'stbName': 'stb', + 'colSchema': [{'type': 'INT', 'count':1}, {'type': 'binary', 'len':20, 'count':1}], + 'tagSchema': [{'type': 'INT', 'count':1}, {'type': 'binary', 'len':20, 'count':1}], + 'ctbPrefix': 'ctb', + 'ctbNum': 1, + 'rowsPerTbl': 10000, + 'batchNum': 10, + 'startTs': 1640966400000, # 2022-01-01 00:00:00.000 + 'auto_commit_interval': "100"} + + + start_group_id = 1 + for snapshot_value in self.snapshot_value_list: + for commit_value in self.commit_value_list: + for offset_value in self.offset_value_list: + for tbname_value in self.tbname_value_list: + topic_name = 'topic1' + tmqCom.initConsumerTable() + tdCom.create_database(tdSql, paraDict["dbName"],paraDict["dropFlag"], vgroups=4,replica=1) + tdLog.info("create stb") + tdCom.create_stable(tdSql, dbname=paraDict["dbName"],stbname=paraDict["stbName"], column_elm_list=paraDict['colSchema'], tag_elm_list=paraDict['tagSchema']) + tdLog.info("create ctb") + tdCom.create_ctable(tdSql, dbname=paraDict["dbName"],stbname=paraDict["stbName"],tag_elm_list=paraDict['tagSchema'],count=paraDict["ctbNum"], default_ctbname_prefix=paraDict['ctbPrefix']) + tdLog.info("insert data") + tmqCom.insert_data(tdSql,paraDict["dbName"],paraDict["ctbPrefix"],paraDict["ctbNum"],paraDict["rowsPerTbl"],paraDict["batchNum"],paraDict["startTs"]) + + + tdLog.info("create topics from stb with filter") + queryString = "select ts, log(c1), ceil(pow(c1,3)) from %s.%s where c1 %% 7 == 0" %(paraDict['dbName'], paraDict['stbName']) + sqlString = "create topic %s as %s" %(topic_name, queryString) + tdSql.query(f'select * from information_schema.ins_databases') + db_wal_retention_period_list = list(map(lambda x:x[-8] if x[0] == paraDict['dbName'] else None, tdSql.queryResult)) + for i in range(len(db_wal_retention_period_list)): + if db_wal_retention_period_list[0] is None or db_wal_retention_period_list[-1] is None: + db_wal_retention_period_list.remove(None) + if snapshot_value =="true": + if db_wal_retention_period_list[0] != self.wal_retention_period2: + tdSql.execute(f"alter database {paraDict['dbName']} wal_retention_period {self.wal_retention_period2}") + time.sleep(self.wal_retention_period2+1) + tdSql.execute(f'flush database {paraDict["dbName"]}') + else: + if db_wal_retention_period_list[0] != self.wal_retention_period1: + tdSql.execute(f"alter database {paraDict['dbName']} wal_retention_period {self.wal_retention_period1}") + tdLog.info("create topic sql: %s"%sqlString) + tdSql.execute(sqlString) + tdSql.query(queryString) + expected_res = tdSql.queryRows + group_id = "csm_" + str(start_group_id) + consumer_dict = { + "group.id": group_id, + "td.connect.user": "root", + "td.connect.pass": "taosdata", + "auto.commit.interval.ms": paraDict["auto_commit_interval"], + "enable.auto.commit": commit_value, + "auto.offset.reset": offset_value, + "experimental.snapshot.enable": snapshot_value, + "msg.with.table.name": tbname_value + } + consumer_commit = 1 if consumer_dict["enable.auto.commit"] == "true" else 0 + consumer_tbname = 1 if consumer_dict["msg.with.table.name"] == "true" else 0 + consumer_ret = "earliest" if offset_value == "" else offset_value + expected_parameters=f'tbname:{consumer_tbname},commit:{consumer_commit},interval:{paraDict["auto_commit_interval"]}ms,reset:{consumer_ret}' + if len(offset_value) == 0: + del consumer_dict["auto.offset.reset"] + consumer = Consumer(consumer_dict) + consumer.subscribe([topic_name]) + tdLog.info(f"enable.auto.commit: {commit_value}, auto.offset.reset: {offset_value}, experimental.snapshot.enable: {snapshot_value}, msg.with.table.name: {tbname_value}") + stop_flag = 0 + try: + while True: + res = consumer.poll(1) + tdSql.query('show consumers;') + consumer_info = tdSql.queryResult[0][-1] + if offset_value == "latest": + if not res and stop_flag == 1: + break + else: + if not res: + break + # err = res.error() + # if err is not None: + # raise err + # val = res.value() + # for block in val: + # print(block.fetchall()) + if offset_value == "latest" and stop_flag == 0: + tmqCom.insert_data(tdSql,paraDict["dbName"],paraDict["ctbPrefix"],paraDict["ctbNum"],paraDict["rowsPerTbl"],paraDict["batchNum"],int(round(time.time()*1000))) + stop_flag = 1 + finally: + consumer.unsubscribe() + consumer.close() + tdSql.checkEqual(consumer_info, expected_parameters) + start_group_id += 1 + tdSql.query('show subscriptions;') + subscription_info = tdSql.queryResult + if snapshot_value == "true": + if offset_value != "earliest" and offset_value != "": + if offset_value == "latest": + offset_value_list = list(map(lambda x: int(x[-2].replace("wal:", "").replace("earliest", "0")), subscription_info)) + tdSql.checkEqual(sum(offset_value_list) > 0, True) + rows_value_list = list(map(lambda x: int(x[-1]), subscription_info)) + tdSql.checkEqual(sum(rows_value_list), expected_res) + elif offset_value == "none": + offset_value_list = list(map(lambda x: x[-2], subscription_info)) + tdSql.checkEqual(offset_value_list, ['none']*len(subscription_info)) + rows_value_list = list(map(lambda x: x[-1], subscription_info)) + tdSql.checkEqual(rows_value_list, [0]*len(subscription_info)) + else: + if offset_value != "none": + offset_value_str = ",".join(list(map(lambda x: x[-2], subscription_info))) + tdSql.checkEqual("tsdb" in offset_value_str, True) + rows_value_list = list(map(lambda x: int(x[-1]), subscription_info)) + tdSql.checkEqual(sum(rows_value_list), expected_res) + else: + offset_value_list = list(map(lambda x: x[-2], subscription_info)) + tdSql.checkEqual(offset_value_list, [None]*len(subscription_info)) + rows_value_list = list(map(lambda x: x[-1], subscription_info)) + tdSql.checkEqual(rows_value_list, [None]*len(subscription_info)) + else: + if offset_value != "none": + offset_value_list = list(map(lambda x: int(x[-2].replace("wal:", "").replace("earliest", "0")), subscription_info)) + tdSql.checkEqual(sum(offset_value_list) > 0, True) + rows_value_list = list(map(lambda x: int(x[-1]), subscription_info)) + tdSql.checkEqual(sum(rows_value_list), expected_res) + else: + offset_value_list = list(map(lambda x: x[-2], subscription_info)) + tdSql.checkEqual(offset_value_list, ['none']*len(subscription_info)) + rows_value_list = list(map(lambda x: x[-1], subscription_info)) + tdSql.checkEqual(rows_value_list, [0]*len(subscription_info)) + tdSql.execute(f"drop topic if exists {topic_name}") + tdSql.execute(f'drop database if exists {paraDict["dbName"]}') + + def run(self): + self.tmqParamsTest() + + def stop(self): + tdSql.close() + tdLog.success(f"{__file__} successfully executed") + +event = threading.Event() + +tdCases.addLinux(__file__, TDTestCase()) +tdCases.addWindows(__file__, TDTestCase()) diff --git a/tests/system-test/99-TDcase/TS-3581.py b/tests/system-test/99-TDcase/TS-3581.py new file mode 100644 index 0000000000000000000000000000000000000000..18488af0a6a4a8c2d0650156d468f589c93325a7 --- /dev/null +++ b/tests/system-test/99-TDcase/TS-3581.py @@ -0,0 +1,79 @@ +import taos +import sys +import time +import socket +import os +import threading + +from util.log import * +from util.sql import * +from util.cases import * +from util.dnodes import * + +class TDTestCase: + hostname = socket.gethostname() + + def init(self, conn, logSql, replicaVar=1): + self.replicaVar = int(replicaVar) + tdLog.debug(f"start to excute {__file__}") + #tdSql.init(conn.cursor()) + tdSql.init(conn.cursor(), logSql) # output sql.txt file + + def getBuildPath(self): + selfPath = os.path.dirname(os.path.realpath(__file__)) + + if ("community" in selfPath): + projPath = selfPath[:selfPath.find("community")] + else: + projPath = selfPath[:selfPath.find("tests")] + + for root, dirs, files in os.walk(projPath): + if ("taosd" in files or "taosd.exe" in files): + rootRealPath = os.path.dirname(os.path.realpath(root)) + if ("packaging" not in rootRealPath): + buildPath = root[:len(root) - len("/build/bin")] + break + return buildPath + + def create_tables(self): + tdSql.execute(f'''CREATE STABLE `dwd_log_master` (`ts` TIMESTAMP, `dim_ip` NCHAR(64)) TAGS (`group_id` BIGINT, `st_hour` NCHAR(2), `org_id` NCHAR(32), + `dev_manufacturer_name` NCHAR(64), `dev_manufacturer_id` INT, `dev_category_name` NCHAR(64), `dev_category_id` INT, `dev_feature_name` NCHAR(64), + `dev_feature_id` INT, `dev_ip` NCHAR(64), `black_list` TINYINT, `white_list` TINYINT)''') + tdSql.execute(f'''CREATE TABLE `dwd_log_master_475021043` USING `dwd_log_master` (`group_id`, `st_hour`, `org_id`, `dev_manufacturer_name`, `dev_manufacturer_id`, + `dev_category_name`, `dev_category_id`, `dev_feature_name`, `dev_feature_id`, `dev_ip`, `black_list`, `white_list`) TAGS + (475021043, "14", NULL, NULL, NULL, NULL, NULL, NULL, NULL, "172.18.22.230", NULL, NULL)''') + + def insert_data(self): + tdLog.debug("start to insert data ............") + + tdSql.execute(f"INSERT INTO `dwd_log_master_475021043` VALUES ('2023-06-26 14:38:30.000','192.168.192.102')") + tdSql.execute(f"INSERT INTO `dwd_log_master_475021043` VALUES ('2023-06-26 14:38:31.000','172.18.23.249')") + tdSql.execute(f"INSERT INTO `dwd_log_master_475021043` VALUES ('2023-06-26 14:38:32.000','192.168.200.231')") + tdSql.execute(f"INSERT INTO `dwd_log_master_475021043` VALUES ('2023-06-26 14:38:33.000','172.18.22.231')") + tdSql.execute(f"INSERT INTO `dwd_log_master_475021043` VALUES ('2023-06-26 14:38:34.000','192.168.210.231')") + tdSql.execute(f"INSERT INTO `dwd_log_master_475021043` VALUES ('2023-06-26 14:38:35.000','192.168.192.100')") + tdSql.execute(f"INSERT INTO `dwd_log_master_475021043` VALUES ('2023-06-26 14:38:36.000','192.168.192.231')") + tdSql.execute(f"INSERT INTO `dwd_log_master_475021043` VALUES ('2023-06-26 14:38:37.000','172.18.23.231')") + + tdLog.debug("insert data ............ [OK]") + + def run(self): + tdSql.prepare() + self.create_tables() + self.insert_data() + tdLog.printNoPrefix("======== test TS-3581") + + for i in range(100): + tdSql.query(f"select first(ts), last(ts), count(*) from dwd_log_master;") + tdSql.checkRows(1) + print(tdSql.queryResult) + tdSql.checkData(0, 0, '2023-06-26 14:38:30.000') + return + + + def stop(self): + tdSql.close() + tdLog.success(f"{__file__} successfully executed") + +tdCases.addLinux(__file__, TDTestCase()) +tdCases.addWindows(__file__, TDTestCase()) diff --git a/tests/system-test/eco-system/util/Consumer.py b/tests/system-test/eco-system/util/Consumer.py new file mode 100644 index 0000000000000000000000000000000000000000..b483253a9582efe7202ff90de22c855f87fcee3b --- /dev/null +++ b/tests/system-test/eco-system/util/Consumer.py @@ -0,0 +1,82 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +# +# The option for wal_retetion_period and wal_retention_size is work well +# + +import taos +from taos.tmq import Consumer + +import os +import sys +import threading +import json +import time +from datetime import date +from datetime import datetime +from datetime import timedelta +from os import path + + +# consume topic +def consume_topic(topic_name, consume_cnt, wait): + print("start consume...") + consumer = Consumer( + { + "group.id": "tg2", + "td.connect.user": "root", + "td.connect.pass": "taosdata", + "enable.auto.commit": "true", + } + ) + print("start subscrite...") + consumer.subscribe([topic_name]) + + cnt = 0 + try: + while True and cnt < consume_cnt: + res = consumer.poll(1) + if not res: + if wait: + continue + else: + break + err = res.error() + if err is not None: + raise err + val = res.value() + cnt += 1 + print(f" consume {cnt} ") + for block in val: + print(block.fetchall()) + finally: + consumer.unsubscribe() + consumer.close() + + +if __name__ == "__main__": + print(sys.argv) + if len(sys.argv) < 2: + + print(" please input topic name for consume . -c for wait") + else: + wait = False + if "-c" == sys.argv[1]: + wait = True + topic = sys.argv[2] + else: + topic = sys.argv[1] + + print(f' wait={wait} topic={topic}') + consume_topic(topic, 10000000, wait) \ No newline at end of file diff --git a/tests/system-test/eco-system/util/restartDnodes.py b/tests/system-test/eco-system/util/restartDnodes.py new file mode 100644 index 0000000000000000000000000000000000000000..feee260fdf49957577a2204831de54e8183b222c --- /dev/null +++ b/tests/system-test/eco-system/util/restartDnodes.py @@ -0,0 +1,84 @@ +import time +import os +import subprocess +import random +import platform + +class dnode(): + def __init__(self, pid, path): + self.pid = pid + self.path = path + +# run exePath no wait finished +def runNoWait(exePath): + if platform.system().lower() == 'windows': + cmd = f"mintty -h never {exePath}" + else: + cmd = f"nohup {exePath} > /dev/null 2>&1 & " + + if os.system(cmd) != 0: + return False + else: + return True + +# get online dnodes +def getDnodes(): + cmd = "ps aux | grep taosd | awk '{{print $2,$11,$12,$13}}'" + result = os.system(cmd) + result=subprocess.check_output(cmd,shell=True) + strout = result.decode('utf-8').split("\n") + dnodes = [] + + for line in strout: + cols = line.split(' ') + if len(cols) != 4: + continue + exepath = cols[1] + if len(exepath) < 5 : + continue + if exepath[-5:] != 'taosd': + continue + + # add to list + path = cols[1] + " " + cols[2] + " " + cols[3] + dnodes.append(dnode(cols[0], path)) + + print(" show dnodes cnt=%d...\n"%(len(dnodes))) + for dn in dnodes: + print(f" pid={dn.pid} path={dn.path}") + + return dnodes + +def restartDnodes(dnodes, cnt, seconds): + print(f"start dnode cnt={cnt} wait={seconds}s") + selects = random.sample(dnodes, cnt) + for select in selects: + print(f" kill -9 {select.pid}") + cmd = f"kill -9 {select.pid}" + os.system(cmd) + print(f" restart {select.path}") + if runNoWait(select.path) == False: + print(f"run {select.path} failed.") + raise Exception("exe failed.") + print(f" sleep {seconds}s ...") + time.sleep(seconds) + +def run(): + # kill seconds interval + killLoop = 10 + minKill = 1 + maxKill = 10 + for i in range(killLoop): + dnodes = getDnodes() + killCnt = 0 + if len(dnodes) > 0: + killCnt = random.randint(1, len(dnodes)) + restartDnodes(dnodes, killCnt, random.randint(1, 5)) + + seconds = random.randint(minKill, maxKill) + print(f"----------- kill loop i={i} killCnt={killCnt} done. do sleep {seconds}s ... \n") + time.sleep(seconds) + + +if __name__ == '__main__': + run() \ No newline at end of file diff --git a/tools/shell/src/shellWebsocket.c b/tools/shell/src/shellWebsocket.c index d8920cb4c3d79aacb441bf9a1f512f4a5a4f62b6..af7f13c69c6088017f33cfabe3b81a17c9ab2587 100644 --- a/tools/shell/src/shellWebsocket.c +++ b/tools/shell/src/shellWebsocket.c @@ -17,6 +17,9 @@ #include #include +// save current database name +char curDBName[128] = ""; // TDB_MAX_DBNAME_LEN is 24, put large + int shell_conn_ws_server(bool first) { char cuttedDsn[SHELL_WS_DSN_BUFF] = {0}; int dsnLen = strlen(shell.args.dsn); @@ -59,6 +62,14 @@ int shell_conn_ws_server(bool first) { fprintf(stdout, "successfully connected to cloud service\n"); } fflush(stdout); + + // switch to current database if have + if(curDBName[0] !=0) { + char command[256]; + sprintf(command, "use %s;", curDBName); + shellRunSingleCommandWebsocketImp(command); + } + return 0; } @@ -290,7 +301,46 @@ void shellRunSingleCommandWebsocketImp(char *command) { if (shellRegexMatch(command, "^\\s*use\\s+[a-zA-Z0-9_]+\\s*;\\s*$", REG_EXTENDED | REG_ICASE)) { - fprintf(stdout, "Database changed.\r\n\r\n"); + + // copy dbname to curDBName + char *p = command; + bool firstStart = false; + bool firstEnd = false; + int i = 0; + while (*p != 0) { + if (*p != ' ') { + // not blank + if (!firstStart) { + firstStart = true; + } else if (firstEnd) { + if(*p == ';' && *p != '\\') { + break; + } + // database name + curDBName[i++] = *p; + if(i + 4 > sizeof(curDBName)) { + // DBName is too long, reset zero and break + i = 0; + break; + } + } + } else { + // blank + if(firstStart == true && firstEnd == false){ + firstEnd = true; + } + if(firstStart && firstEnd && i > 0){ + // blank after database name + break; + } + } + // move next + p++; + } + // append end + curDBName[i] = 0; + + fprintf(stdout, "Database changed to %s.\r\n\r\n", curDBName); fflush(stdout); ws_free_result(res); return;