diff --git a/Jenkinsfile2 b/Jenkinsfile2 index db49ab27d76f03bbaab0e0bf4aeba74b2f7ae361..a2b55e3acca0c141a2d550ccabb5bb129adb3d7e 100644 --- a/Jenkinsfile2 +++ b/Jenkinsfile2 @@ -269,7 +269,7 @@ pipeline { } } stage('linux test') { - agent{label " slave3_0 || slave15 || slave16 || slave17 "} + agent{label " worker03 || slave215 || slave217 || slave219 "} options { skipDefaultCheckout() } when { changeRequest() @@ -287,9 +287,9 @@ pipeline { ''' sh ''' cd ${WKC}/tests/parallel_test - export DEFAULT_RETRY_TIME=1 + export DEFAULT_RETRY_TIME=2 date - timeout 2100 time ./run.sh -e -m /home/m.json -t /tmp/cases.task -b ${BRANCH_NAME} -l ${WKDIR}/log -o 480 + timeout 2100 time ./run.sh -e -m /home/m.json -t /tmp/cases.task -b ${BRANCH_NAME}_${BUILD_ID} -l ${WKDIR}/log -o 480 ''' } } diff --git a/cmake/cmake.options b/cmake/cmake.options index cb6fd1400d43b6073d81ab43e46140343b277512..ab3c5ac1ad08b98ee2dbe09692584be63e477d71 100644 --- a/cmake/cmake.options +++ b/cmake/cmake.options @@ -49,7 +49,7 @@ IF(${TD_WINDOWS}) option( BUILD_TEST "If build unit tests using googletest" - OFF + ON ) ELSE () diff --git a/docs-cn/12-taos-sql/index.md b/docs-cn/12-taos-sql/index.md index 269bc1d2b5ddfa25c42652d8f639bfe2fb1d42e5..cb01b3a918778abc6c7891c1ff185f1db32d3d36 100644 --- a/docs-cn/12-taos-sql/index.md +++ b/docs-cn/12-taos-sql/index.md @@ -7,8 +7,6 @@ description: "TAOS SQL 支持的语法规则、主要查询功能、支持的 SQ TAOS SQL 是用户对 TDengine 进行数据写入和查询的主要工具。TAOS SQL 为了便于用户快速上手,在一定程度上提供与标准 SQL 类似的风格和模式。严格意义上,TAOS SQL 并不是也不试图提供标准的 SQL 语法。此外,由于 TDengine 针对的时序性结构化数据不提供删除功能,因此在 TAO SQL 中不提供数据删除的相关功能。 -TAOS SQL 不支持关键字的缩写,例如 DESCRIBE 不能缩写为 DESC。 - 本章节 SQL 语法遵循如下约定: - <\> 里的内容是用户需要输入的,但不要输入 <\> 本身 @@ -37,4 +35,4 @@ import DocCardList from '@theme/DocCardList'; import {useCurrentSidebarCategory} from '@docusaurus/theme-common'; -``` \ No newline at end of file +``` diff --git a/docs-en/05-get-started/index.md b/docs-en/05-get-started/index.md index 858dd6ac56e3a523220903fc63335dfdc573b752..56958ef3ec1c206ee0cff45c67fd3c3a6fa6753a 100644 --- a/docs-en/05-get-started/index.md +++ b/docs-en/05-get-started/index.md @@ -130,7 +130,7 @@ After TDengine server is running,execute `taosBenchmark` (previously named tao taosBenchmark ``` -This command will create a super table "meters" under database "test". Under "meters", 10000 tables are created with names from "d0" to "d9999". Each table has 10000 rows and each row has four columns (ts, current, voltage, phase). Time stamp is starting from "2017-07-14 10:40:00 000" to "2017-07-14 10:40:09 999". Each table has tags "location" and "groupId". groupId is set 1 to 10 randomly, and location is set to "California.SanFrancisco" or "California.SanDieo". +This command will create a super table "meters" under database "test". Under "meters", 10000 tables are created with names from "d0" to "d9999". Each table has 10000 rows and each row has four columns (ts, current, voltage, phase). Time stamp is starting from "2017-07-14 10:40:00 000" to "2017-07-14 10:40:09 999". Each table has tags "location" and "groupId". groupId is set 1 to 10 randomly, and location is set to "California.SanFrancisco" or "California.SanDiego". This command will insert 100 million rows into the database quickly. Time to insert depends on the hardware configuration, it only takes a dozen seconds for a regular PC server. diff --git a/docs-en/07-develop/08-udf.md b/docs-en/07-develop/08-udf.md index 61639e34404477d3bb5785da129a1d922a4d020e..0ee61740cc8b8aad7dd39707a1153b022822f0a9 100644 --- a/docs-en/07-develop/08-udf.md +++ b/docs-en/07-develop/08-udf.md @@ -1,24 +1,31 @@ --- sidebar_label: UDF title: User Defined Functions -description: "Scalar functions and aggregate functions developed by users can be utilized by the query framework to expand the query capability" +description: "Scalar functions and aggregate functions developed by users can be utilized by the query framework to expand query capability" --- -In some use cases, the query capability required by application programs can't be achieved directly by builtin functions. With UDF, the functions developed by users can be utilized by query framework to meet some special requirements. UDF normally takes one column of data as input, but can also support the result of sub query as input. +In some use cases, built-in functions are not adequate for the query capability required by application programs. With UDF, the functions developed by users can be utilized by the query framework to meet business and application requirements. UDF normally takes one column of data as input, but can also support the result of a sub-query as input. -From version 2.2.0.0, UDF programmed in C/C++ language can be supported by TDengine. +From version 2.2.0.0, UDF written in C/C++ are supported by TDengine. -Two kinds of functions can be implemented by UDF: scalar function and aggregate function. -## Define UDF +## Types of UDF + +Two kinds of functions can be implemented by UDF: scalar functions and aggregate functions. + +Scalar functions return multiple rows and aggregate functions return either 0 or 1 row. + +In the case of a scalar function you only have to implement the "normal" function template. + +In the case of an aggregate function, in addition to the "normal" function, you also need to implement the "merge" and "finalize" function templates even if the implementation is empty. This will become clear in the sections below. ### Scalar Function -Below function template can be used to define your own scalar function. +As mentioned earlier, a scalar UDF only has to implement the "normal" function template. The function template below can be used to define your own scalar function. `void udfNormalFunc(char* data, short itype, short ibytes, int numOfRows, long long* ts, char* dataOutput, char* interBuf, char* tsOutput, int* numOfOutput, short otype, short obytes, SUdfInit* buf)` -`udfNormalFunc` is the place holder of function name, a function implemented based on the above template can be used to perform scalar computation on data rows. The parameters are fixed to control the data exchange between UDF and TDengine. +`udfNormalFunc` is the place holder for a function name. A function implemented based on the above template can be used to perform scalar computation on data rows. The parameters are fixed to control the data exchange between UDF and TDengine. - Definitions of the parameters: @@ -30,20 +37,24 @@ Below function template can be used to define your own scalar function. - numOfRows:the number of rows in the input data - ts: the column of timestamp corresponding to the input data - dataOutput:the buffer for output data, total size is `oBytes * numberOfRows` - - interBuf:the buffer for intermediate result, its size is specified by `BUFSIZE` parameter when creating a UDF. It's normally used when the intermediate result is not same as the final result, it's allocated and freed by TDengine. + - interBuf:the buffer for an intermediate result. Its size is specified by the `BUFSIZE` parameter when creating a UDF. It's normally used when the intermediate result is not same as the final result. This buffer is allocated and freed by TDengine. - tsOutput:the column of timestamps corresponding to the output data; it can be used to output timestamp together with the output data if it's not NULL - numOfOutput:the number of rows in output data - buf:for the state exchange between UDF and TDengine - [add_one.c](https://github.com/taosdata/TDengine/blob/develop/tests/script/sh/add_one.c) is one example of the simplest UDF implementations, i.e. one instance of the above `udfNormalFunc` template. It adds one to each value of a column passed in which can be filtered using `where` clause and outputs the result. + [add_one.c](https://github.com/taosdata/TDengine/blob/develop/tests/script/sh/add_one.c) is one example of a very simple UDF implementation, i.e. one instance of the above `udfNormalFunc` template. It adds one to each value of a passed in column, which can be filtered using the `where` clause, and outputs the result. ### Aggregate Function -Below function template can be used to define your own aggregate function. +For aggregate UDF, as mentioned earlier you must implement a "normal" function template (described above) and also implement the "merge" and "finalize" templates. -`void abs_max_merge(char* data, int32_t numOfRows, char* dataOutput, int32_t* numOfOutput, SUdfInit* buf)` +#### Merge Function Template -`udfMergeFunc` is the place holder of function name, the function implemented with the above template is used to aggregate the intermediate result, only can be used in the aggregate query for STable. +The function template below can be used to define your own merge function for an aggregate UDF. + +`void udfMergeFunc(char* data, int32_t numOfRows, char* dataOutput, int32_t* numOfOutput, SUdfInit* buf)` + +`udfMergeFunc` is the place holder for a function name. The function implemented with the above template is used to aggregate intermediate results and can only be used in the aggregate query for STable. Definitions of the parameters: @@ -53,17 +64,11 @@ Definitions of the parameters: - numOfOutput:number of rows in the output data - buf:for the state exchange between UDF and TDengine -[abs_max.c](https://github.com/taosdata/TDengine/blob/develop/tests/script/sh/abs_max.c) is an user defined aggregate function to get the maximum from the absolute value of a column. - -The internal processing is that the data affected by the select statement will be divided into multiple row blocks and `udfNormalFunc`, i.e. `abs_max` in this case, is performed on each row block to generate the intermediate of each sub table, then `udfMergeFunc`, i.e. `abs_max_merge` in this case, is performed on the intermediate result of sub tables to aggregate to generate the final or intermediate result of STable. The intermediate result of STable is finally processed by `udfFinalizeFunc` to generate the final result, which contain either 0 or 1 row. - -Other typical scenarios, like covariance, can also be achieved by aggregate UDF. +#### Finalize Function Template -### Finalize +The function template below can be used to finalize the result of your own UDF, normally used when interBuf is used. -Below function template can be used to finalize the result of your own UDF, normally used when interBuf is used. - -`void abs_max_finalize(char* dataOutput, char* interBuf, int* numOfOutput, SUdfInit* buf)` +`void udfFinalizeFunc(char* dataOutput, char* interBuf, int* numOfOutput, SUdfInit* buf)` `udfFinalizeFunc` is the place holder of function name, definitions of the parameter are as below: @@ -72,47 +77,64 @@ Below function template can be used to finalize the result of your own UDF, norm - numOfOutput:number of output data, can only be 0 or 1 for aggregate function - buf:for state exchange between UDF and TDengine -## UDF Conventions +### Example abs_max.c + +[abs_max.c](https://github.com/taosdata/TDengine/blob/develop/tests/script/sh/abs_max.c) is an example of a user defined aggregate function to get the maximum from the absolute values of a column. + +The internal processing happens as follows. The results of the select statement are divided into multiple row blocks and `udfNormalFunc`, i.e. `abs_max` in this case, is performed on each row block to generate the intermediate results for each sub table. Then `udfMergeFunc`, i.e. `abs_max_merge` in this case, is performed on the intermediate result of sub tables to aggregate and generate the final or intermediate result of STable. The intermediate result of STable is finally processed by `udfFinalizeFunc`, i.e. `abs_max_finalize` in this example, to generate the final result, which contains either 0 or 1 row. + +Other typical aggregation functions such as covariance, can also be implemented using aggregate UDF. -The naming of 3 kinds of UDF, i.e. udfNormalFunc, udfMergeFunc, and udfFinalizeFunc is required to have same prefix, i.e. the actual name of udfNormalFunc, which means udfNormalFunc doesn't need a suffix following the function name. While udfMergeFunc should be udfNormalFunc followed by `_merge`, udfFinalizeFunc should be udfNormalFunc followed by `_finalize`. The naming convention is part of UDF framework, TDengine follows this convention to invoke corresponding actual functions.\ +## UDF Naming Conventions -According to the kind of UDF to implement, the functions that need to be implemented are different. +The naming convention for the 3 kinds of function templates required by UDF is as follows: + - udfNormalFunc, udfMergeFunc, and udfFinalizeFunc are required to have same prefix, i.e. the actual name of udfNormalFunc. The udfNormalFunc doesn't need a suffix following the function name. + - udfMergeFunc should be udfNormalFunc followed by `_merge` + - udfFinalizeFunc should be udfNormalFunc followed by `_finalize`. + +The naming convention is part of TDengine's UDF framework. TDengine follows this convention to invoke the corresponding actual functions. -- Scalar function:udfNormalFunc is required -- Aggregate function:udfNormalFunc, udfMergeFunc (if query on STable) and udfFinalizeFunc are required +Depending on whether you are creating a scalar UDF or aggregate UDF, the functions that you need to implement are different. -To be more accurate, assuming we want to implement a UDF named "foo". If the function is a scalar function, what we really need to implement is `foo`; if the function is aggregate function, we need to implement `foo`, `foo_merge`, and `foo_finalize`. For aggregate UDF, even though one of the three functions is not necessary, there must be an empty implementation. +- Scalar function:udfNormalFunc is required. +- Aggregate function:udfNormalFunc, udfMergeFunc (if query on STable) and udfFinalizeFunc are required. + +For clarity, assuming we want to implement a UDF named "foo": +- If the function is a scalar function, we only need to implement the "normal" function template and it should be named simply `foo`. +- If the function is an aggregate function, we need to implement `foo`, `foo_merge`, and `foo_finalize`. Note that for aggregate UDF, even though one of the three functions is not necessary, there must be an empty implementation. ## Compile UDF -The source code of UDF in C can't be utilized by TDengine directly. UDF can only be loaded into TDengine after compiling to dynamically linked library. +The source code of UDF in C can't be utilized by TDengine directly. UDF can only be loaded into TDengine after compiling to dynamically linked library (DLL). -For example, the example UDF `add_one.c` mentioned in previous sections need to be compiled into DLL using below command on Linux Shell. +For example, the example UDF `add_one.c` mentioned earlier, can be compiled into DLL using the command below, in a Linux Shell. ```bash gcc -g -O0 -fPIC -shared add_one.c -o add_one.so ``` -The generated DLL file `dd_one.so` can be used later when creating UDF. It's recommended to use GCC not older than 7.5. +The generated DLL file `add_one.so` can be used later when creating a UDF. It's recommended to use GCC not older than 7.5. ## Create and Use UDF +When a UDF is created in a TDengine instance, it is available across the databases in that instance. + ### Create UDF -SQL command can be executed on the same hos where the generated UDF DLL resides to load the UDF DLL into TDengine, this operation can't be done through REST interface or web console. Once created, all the clients of the current TDengine can use these UDF functions in their SQL commands. UDF are stored in the management node of TDengine. The UDFs loaded in TDengine would be still available after TDengine is restarted. +SQL command can be executed on the host where the generated UDF DLL resides to load the UDF DLL into TDengine. This operation cannot be done through REST interface or web console. Once created, any client of the current TDengine can use these UDF functions in their SQL commands. UDF are stored in the management node of TDengine. The UDFs loaded in TDengine would be still available after TDengine is restarted. -When creating UDF, it needs to be clarified as either scalar function or aggregate function. If the specified type is wrong, the SQL statements using the function would fail with error. Besides, the input type and output type don't need to be same in UDF, but the input data type and output data type need to be consistent with the UDF definition. +When creating UDF, the type of UDF, i.e. a scalar function or aggregate function must be specified. If the specified type is wrong, the SQL statements using the function would fail with errors. The input type and output type don't need to be the same in UDF, but the input data type and output data type must be consistent with the UDF definition. - Create Scalar Function ```sql -CREATE FUNCTION ids(X) AS ids(Y) OUTPUTTYPE typename(Z) [ BUFSIZE B ]; +CREATE FUNCTION userDefinedFunctionName AS "/absolute/path/to/userDefinedFunctionName.so" OUTPUTTYPE [BUFSIZE B]; ``` -- ids(X):the function name to be sued in SQL statement, must be consistent with the function name defined by `udfNormalFunc` -- ids(Y):the absolute path of the DLL file including the implementation of the UDF, the path needs to be quoted by single or double quotes -- typename(Z):the output data type, the value is the literal string of the type -- B:the size of intermediate buffer, in bytes; it's an optional parameter and the range is [0,512] +- userDefinedFunctionName:The function name to be used in SQL statement which must be consistent with the function name defined by `udfNormalFunc` and is also the name of the compiled DLL (.so file). +- path:The absolute path of the DLL file including the name of the shared object file (.so). The path must be quoted with single or double quotes. +- outputtype:The output data type, the value is the literal string of the supported TDengine data type. +- B:the size of intermediate buffer, in bytes; it is an optional parameter and the range is [0,512]. For example, below SQL statement can be used to create a UDF from `add_one.so`. @@ -123,17 +145,17 @@ CREATE FUNCTION add_one AS "/home/taos/udf_example/add_one.so" OUTPUTTYPE INT; - Create Aggregate Function ```sql -CREATE AGGREGATE FUNCTION ids(X) AS ids(Y) OUTPUTTYPE typename(Z) [ BUFSIZE B ]; +CREATE AGGREGATE FUNCTION userDefinedFunctionName AS "/absolute/path/to/userDefinedFunctionName.so" OUTPUTTYPE [ BUFSIZE B ]; ``` -- ids(X):the function name to be sued in SQL statement, must be consistent with the function name defined by `udfNormalFunc` -- ids(Y):the absolute path of the DLL file including the implementation of the UDF, the path needs to be quoted by single or double quotes -- typename(Z):the output data type, the value is the literal string of the type +- userDefinedFunctionName:the function name to be used in SQL statement which must be consistent with the function name defined by `udfNormalFunc` and is also the name of the compiled DLL (.so file). +- path:the absolute path of the DLL file including the name of the shared object file (.so). The path needs to be quoted by single or double quotes. +- OUTPUTTYPE:the output data type, the value is the literal string of the type - B:the size of intermediate buffer, in bytes; it's an optional parameter and the range is [0,512] For details about how to use intermediate result, please refer to example program [demo.c](https://github.com/taosdata/TDengine/blob/develop/tests/script/sh/demo.c). -For example, below SQL statement can be used to create a UDF rom `demo.so`. +For example, below SQL statement can be used to create a UDF from `demo.so`. ```sql CREATE AGGREGATE FUNCTION demo AS "/home/taos/udf_example/demo.so" OUTPUTTYPE DOUBLE bufsize 14; @@ -176,11 +198,11 @@ In current version there are some restrictions for UDF 1. Only Linux is supported when creating and invoking UDF for both client side and server side 2. UDF can't be mixed with builtin functions 3. Only one UDF can be used in a SQL statement -4. Single column is supported as input for UDF +4. Only a single column is supported as input for UDF 5. Once created successfully, UDF is persisted in MNode of TDengineUDF 6. UDF can't be created through REST interface 7. The function name used when creating UDF in SQL must be consistent with the function name defined in the DLL, i.e. the name defined by `udfNormalFunc` -8. The name name of UDF name should not conflict with any of builtin functions +8. The name of a UDF should not conflict with any of TDengine's built-in functions ## Examples diff --git a/docs-en/10-cluster/02-cluster-mgmt.md b/docs-en/10-cluster/02-cluster-mgmt.md index 9d717be236e3e89114f58fc492223e3ad94fc9ea..674c92e2766a4eb304079140af19c8efea72d55e 100644 --- a/docs-en/10-cluster/02-cluster-mgmt.md +++ b/docs-en/10-cluster/02-cluster-mgmt.md @@ -3,16 +3,16 @@ sidebar_label: Operation title: Manage DNODEs --- -The previous section [Deployment](/cluster/deploy) introduced how to deploy and start a cluster from scratch. Once a cluster is ready, the dnode status in the cluster can be shown at any time, new dnode can be added to scale out the cluster, an existing dnode can be removed, even load balance can be performed manually. +The previous section, [Deployment],(/cluster/deploy) showed you how to deploy and start a cluster from scratch. Once a cluster is ready, the status of dnode(s) in the cluster can be shown at any time. Dnodes can be managed from the TDengine CLI. New dnode(s) can be added to scale out the cluster, an existing dnode can be removed and you can even perform load balancing manually, if necessary. :::note -All the commands to be introduced in this chapter need to be run through TDengine CLI, sometimes it's necessary to use root privilege. +All the commands introduced in this chapter must be run in the TDengine CLI - `taos`. Note that sometimes it is necessary to use root privilege. ::: ## Show DNODEs -The below command can be executed in TDengine CLI `taos` to list all dnodes in the cluster, including ID, end point (fqdn:port), status (ready, offline), number of vnodes, number of free vnodes, etc. It's suggested to execute this command to check after adding or removing a dnode. +The below command can be executed in TDengine CLI `taos` to list all dnodes in the cluster, including ID, end point (fqdn:port), status (ready, offline), number of vnodes, number of free vnodes and so on. We recommend executing this command after adding or removing a dnode. ```sql SHOW DNODES; @@ -30,7 +30,7 @@ Query OK, 1 row(s) in set (0.008298s) ## Show VGROUPs -To utilize system resources efficiently and provide scalability, data sharding is required. The data of each database is divided into multiple shards and stored in multiple vnodes. These vnodes may be located in different dnodes, scaling out can be achieved by adding more vnodes from more dnodes. Each vnode can only be used for a single DB, but one DB can have multiple vnodes. The allocation of vnode is scheduled automatically by mnode according to system resources of the dnodes. +To utilize system resources efficiently and provide scalability, data sharding is required. The data of each database is divided into multiple shards and stored in multiple vnodes. These vnodes may be located on different dnodes. One way of scaling out is to add more vnodes on dnodes. Each vnode can only be used for a single DB, but one DB can have multiple vnodes. The allocation of vnode is scheduled automatically by mnode based on system resources of the dnodes. Launch TDengine CLI `taos` and execute below command: @@ -87,7 +87,7 @@ taos> show dnodes; Query OK, 2 row(s) in set (0.001017s) ``` -It can be seen that the status of the new dnode is "offline", once the dnode is started and connects the firstEp of the cluster, execute the command again and get the example output below, from which it can be seen that two dnodes are both in "ready" status. +It can be seen that the status of the new dnode is "offline". Once the dnode is started and connects to the firstEp of the cluster, you can execute the command again and get the example output below. As can be seen, both dnodes are in "ready" status. ``` taos> show dnodes; @@ -132,12 +132,12 @@ taos> show dnodes; Query OK, 1 row(s) in set (0.001137s) ``` -In the above example, when `show dnodes` is executed the first time, two dnodes are shown. Then `drop dnode 2` is executed, after that from the output of executing `show dnodes` again it can be seen that only the dnode with ID 1 is still in the cluster. +In the above example, when `show dnodes` is executed the first time, two dnodes are shown. After `drop dnode 2` is executed, you can execute `show dnodes` again and it can be seen that only the dnode with ID 1 is still in the cluster. :::note -- Once a dnode is dropped, it can't rejoin the cluster. To rejoin, the dnode needs to deployed again after cleaning up the data directory. Normally, before dropping a dnode, the data belonging to the dnode needs to be migrated to other place. -- Please be noted that `drop dnode` is different from stopping `taosd` process. `drop dnode` just removes the dnode out of TDengine cluster. Only after a dnode is dropped, can the corresponding `taosd` process be stopped. +- Once a dnode is dropped, it can't rejoin the cluster. To rejoin, the dnode needs to deployed again after cleaning up the data directory. Before dropping a dnode, the data belonging to the dnode MUST be migrated/backed up according to your data retention, data security or other SOPs. +- Please note that `drop dnode` is different from stopping `taosd` process. `drop dnode` just removes the dnode out of TDengine cluster. Only after a dnode is dropped, can the corresponding `taosd` process be stopped. - Once a dnode is dropped, other dnodes in the cluster will be notified of the drop and will not accept the request from the dropped dnode. - dnodeID is allocated automatically and can't be manually modified. dnodeID is generated in ascending order without duplication. diff --git a/docs-en/10-cluster/03-ha-and-lb.md b/docs-en/10-cluster/03-ha-and-lb.md index 6e0c386abe4100ec59f60c1c90b3305e0d187c79..bd718eef9f8dc181628132de831dbca2af59d158 100644 --- a/docs-en/10-cluster/03-ha-and-lb.md +++ b/docs-en/10-cluster/03-ha-and-lb.md @@ -7,7 +7,7 @@ title: High Availability and Load Balancing High availability of vnode and mnode can be achieved through replicas in TDengine. -The number of vnodes is associated with each DB, there can be multiple DBs in a TDengine cluster. A different number of replicas can be configured for each DB. When creating a database, the parameter `replica` is used to specify the number of replicas, the default value is 1. With single replica, the high availability of the system can't be guaranteed. Whenever one node is down, the data service will be unavailable. The number of dnodes in the cluster must NOT be lower than the number of replicas set for any DB, otherwise the `create table` operation would fail with error "more dnodes are needed". The SQL statement below is used to create a database named "demo" with 3 replicas. +A TDengine cluster can have multiple databases. Each database has a number of vnodes associated with it. A different number of replicas can be configured for each DB. When creating a database, the parameter `replica` is used to specify the number of replicas. The default value for `replica` is 1. Naturally, a single replica cannot guarantee high availability since if one node is down, the data service is unavailable. Note that the number of dnodes in the cluster must NOT be lower than the number of replicas set for any DB, otherwise the `create table` operation will fail with error "more dnodes are needed". The SQL statement below is used to create a database named "demo" with 3 replicas. ```sql CREATE DATABASE demo replica 3; @@ -15,19 +15,19 @@ CREATE DATABASE demo replica 3; The data in a DB is divided into multiple shards and stored in multiple vgroups. The number of vnodes in each vgroup is determined by the number of replicas set for the DB. The vnodes in each vgroup store exactly the same data. For the purpose of high availability, the vnodes in a vgroup must be located in different dnodes on different hosts. As long as over half of the vnodes in a vgroup are in an online state, the vgroup is able to provide data access. Otherwise the vgroup can't provide data access for reading or inserting data. -There may be data for multiple DBs in a dnode. Once a dnode is down, multiple DBs may be affected. However, it's hard to say the cluster is guaranteed to work properly as long as over half of dnodes are online because vnodes are introduced and there may be complex mapping between vnodes and dnodes. +There may be data for multiple DBs in a dnode. When a dnode is down, multiple DBs may be affected. While in theory, the cluster will provide data access for reading or inserting data if over half the vnodes in vgroups are online, because of the possibly complex mapping between vnodes and dnodes, it is difficult to guarantee that the cluster will work properly if over half of the dnodes are online. ## High Availability of Mnode -Each TDengine cluster is managed by `mnode`, which is a module of `taosd`. For the high availability of mnode, multiple mnodes can be configured using system parameter `numOfMNodes`, the valid time range is [1,3]. To make sure the data consistency between mnodes, the data replication between mnodes is performed in a synchronous way. +Each TDengine cluster is managed by `mnode`, which is a module of `taosd`. For the high availability of mnode, multiple mnodes can be configured using system parameter `numOfMNodes`. The valid range for `numOfMnodes` is [1,3]. To ensure data consistency between mnodes, data replication between mnodes is performed synchronously. -There may be multiple dnodes in a cluster, but only one mnode can be started in each dnode. Which one or ones of the dnodes will be designated as mnodes is automatically determined by TDengine according to the cluster configuration and system resources. Command `show mnodes` can be executed in TDengine `taos` to show the mnodes in the cluster. +There may be multiple dnodes in a cluster, but only one mnode can be started in each dnode. Which one or ones of the dnodes will be designated as mnodes is automatically determined by TDengine according to the cluster configuration and system resources. The command `show mnodes` can be executed in TDengine `taos` to show the mnodes in the cluster. ```sql SHOW MNODES; ``` -The end point and role/status (master, slave, unsynced, or offline) of all mnodes can be shown by the above command. When the first dnode is started in a cluster, there must be one mnode in this dnode, because there must be at least one mnode otherwise the cluster doesn't work. If `numOfMNodes` is configured to 2, another mnode will be started when the second dnode is launched. +The end point and role/status (master, slave, unsynced, or offline) of all mnodes can be shown by the above command. When the first dnode is started in a cluster, there must be one mnode in this dnode. Without at least one mnode, the cluster cannot work. If `numOfMNodes` is configured to 2, another mnode will be started when the second dnode is launched. For the high availability of mnode, `numOfMnodes` needs to be configured to 2 or a higher value. Because the data consistency between mnodes must be guaranteed, the replica confirmation parameter `quorum` is set to 2 automatically if `numOfMNodes` is set to 2 or higher. @@ -36,15 +36,16 @@ If high availability is important for your system, both vnode and mnode must be ::: -## Load Balance +## Load Balancing -Load balance will be triggered in 3 cases without manual intervention. +Load balancing will be triggered in 3 cases without manual intervention. -- When a new dnode is joined in the cluster, automatic load balancing may be triggered, some data from some dnodes may be transferred to the new dnode automatically. +- When a new dnode joins the cluster, automatic load balancing may be triggered. Some data from other dnodes may be transferred to the new dnode automatically. - When a dnode is removed from the cluster, the data from this dnode will be transferred to other dnodes automatically. - When a dnode is too hot, i.e. too much data has been stored in it, automatic load balancing may be triggered to migrate some vnodes from this dnode to other dnodes. + :::tip -Automatic load balancing is controlled by parameter `balance`, 0 means disabled and 1 means enabled. +Automatic load balancing is controlled by the parameter `balance`, 0 means disabled and 1 means enabled. This is set in the file [taos.cfg](https://docs.tdengine.com/reference/config/#balance). ::: @@ -52,22 +53,22 @@ Automatic load balancing is controlled by parameter `balance`, 0 means disabled When a dnode is offline, it can be detected by the TDengine cluster. There are two cases: -- The dnode becomes online again before the threshold configured in `offlineThreshold` is reached, it is still in the cluster and data replication is started automatically. The dnode can work properly after the data syncup is finished. +- The dnode comes online before the threshold configured in `offlineThreshold` is reached. The dnode is still in the cluster and data replication is started automatically. The dnode can work properly after the data sync is finished. -- If the dnode has been offline over the threshold configured in `offlineThreshold` in `taos.cfg`, the dnode will be removed from the cluster automatically. A system alert will be generated and automatic load balancing will be triggered if `balance` is set to 1. When the removed dnode is restarted and becomes online, it will not join in the cluster automatically, it can only be joined manually by the system operator. +- If the dnode has been offline over the threshold configured in `offlineThreshold` in `taos.cfg`, the dnode will be removed from the cluster automatically. A system alert will be generated and automatic load balancing will be triggered if `balance` is set to 1. When the removed dnode is restarted and becomes online, it will not join the cluster automatically. The system administrator has to manually join the dnode to the cluster. :::note -If all the vnodes in a vgroup (or mnodes in mnode group) are in offline or unsynced status, the master node can only be voted after all the vnodes or mnodes in the group become online and can exchange status, then the vgroup (or mnode group) is able to provide service. +If all the vnodes in a vgroup (or mnodes in mnode group) are in offline or unsynced status, the master node can only be voted on, after all the vnodes or mnodes in the group become online and can exchange status. Following this, the vgroup (or mnode group) is able to provide service. ::: ## Arbitrator -If the number of replicas is set to an even number like 2, when half of the vnodes in a vgroup don't work a master node can't be voted. A similar case is also applicable to mnode if the number of mnodes is set to an even number like 2. +The "arbitrator" component is used to address the special case when the number of replicas is set to an even number like 2,4 etc. If half of the vnodes in a vgroup don't work, it is impossible to vote and select a master node. This situation also applies to mnodes if the number of mnodes is set to an even number like 2,4 etc. -To resolve this problem, a new arbitrator component named `tarbitrator`, abbreviated for TDengine Arbitrator, was introduced. Arbitrator simulates a vnode or mnode but it's only responsible for network communication and doesn't handle any actual data access. As long as more than half of the vnode or mnode, including Arbitrator, are available the vnode group or mnode group can provide data insertion or query services normally. +To resolve this problem, a new arbitrator component named `tarbitrator`, an abbreviation of TDengine Arbitrator, was introduced. The `tarbitrator` simulates a vnode or mnode but it's only responsible for network communication and doesn't handle any actual data access. As long as more than half of the vnode or mnode, including Arbitrator, are available the vnode group or mnode group can provide data insertion or query services normally. -Normally, it's suggested to configure a replica number of each DB or system parameter `numOfMNodes` to an odd number. However, if a user is very sensitive to storage space, a replica number of 2 plus arbitrator component can be used to achieve both lower cost of storage space and high availability. +Normally, it's prudent to configure the replica number for each DB or system parameter `numOfMNodes` to be an odd number. However, if a user is very sensitive to storage space, a replica number of 2 plus arbitrator component can be used to achieve both lower cost of storage space and high availability. Arbitrator component is installed with the server package. For details about how to install, please refer to [Install](/operation/pkg-install). The `-p` parameter of `tarbitrator` can be used to specify the port on which it provides service. diff --git a/docs-en/12-taos-sql/01-data-type.md b/docs-en/12-taos-sql/01-data-type.md index 86ec941f955516e99e6bb54730a55083bc26ed09..3f5a49e3135771c6c1e62bcf158a99ee30f1ed9d 100644 --- a/docs-en/12-taos-sql/01-data-type.md +++ b/docs-en/12-taos-sql/01-data-type.md @@ -1,17 +1,17 @@ --- title: Data Types -description: "The data types supported by TDengine include timestamp, float, JSON, etc" +description: "TDengine supports a variety of data types including timestamp, float, JSON and many others." --- -When using TDengine to store and query data, the most important part of the data is timestamp. Timestamp must be specified when creating and inserting data rows or querying data, timestamp must follow the rules below: +When using TDengine to store and query data, the most important part of the data is timestamp. Timestamp must be specified when creating and inserting data rows. Timestamp must follow the rules below: -- the format must be `YYYY-MM-DD HH:mm:ss.MS`, the default time precision is millisecond (ms), for example `2017-08-12 18:25:58.128` -- internal function `now` can be used to get the current timestamp of the client side -- the current timestamp of the client side is applied when `now` is used to insert data +- The format must be `YYYY-MM-DD HH:mm:ss.MS`, the default time precision is millisecond (ms), for example `2017-08-12 18:25:58.128` +- Internal function `now` can be used to get the current timestamp on the client side +- The current timestamp of the client side is applied when `now` is used to insert data - Epoch Time:timestamp can also be a long integer number, which means the number of seconds, milliseconds or nanoseconds, depending on the time precision, from 1970-01-01 00:00:00.000 (UTC/GMT) -- timestamp can be applied with add/subtract operation, for example `now-2h` means 2 hours back from the time at which query is executed,the unit can be b(nanosecond), u(microsecond), a(millisecond), s(second), m(minute), h(hour), d(day), or w(week). So `select * from t1 where ts > now-2w and ts <= now-1w` means the data between two weeks ago and one week ago. The time unit can also be n (calendar month) or y (calendar year) when specifying the time window for down sampling operation. +- Add/subtract operations can be carried out on timestamps. For example `now-2h` means 2 hours prior to the time at which query is executed. The units of time in operations can be b(nanosecond), u(microsecond), a(millisecond), s(second), m(minute), h(hour), d(day), or w(week). So `select * from t1 where ts > now-2w and ts <= now-1w` means the data between two weeks ago and one week ago. The time unit can also be n (calendar month) or y (calendar year) when specifying the time window for down sampling operations. -Time precision in TDengine can be set by the `PRECISION` parameter when executing `CREATE DATABASE`, like below, the default time precision is millisecond. +Time precision in TDengine can be set by the `PRECISION` parameter when executing `CREATE DATABASE`. The default time precision is millisecond. In the statement below, the precision is set to nanonseconds. ```sql CREATE DATABASE db_name PRECISION 'ns'; @@ -30,8 +30,8 @@ In TDengine, the data types below can be used when specifying a column or tag. | 7 | SMALLINT | 2 | Short integer, the value range is [-32767, 32767], while -32768 is treated as NULL | | 8 | TINYINT | 1 | Single-byte integer, the value range is [-127, 127], while -128 is treated as NULL | | 9 | BOOL | 1 | Bool, the value range is {true, false} | -| 10 | NCHAR | User Defined| Multiple-Byte string that can include like Chinese characters. Each character of NCHAR type consumes 4 bytes storage. The string value should be quoted with single quotes. Literal single quote inside the string must be preceded with backslash, like `\’`. The length must be specified when defining a column or tag of NCHAR type, for example nchar(10) means it can store at most 10 characters of nchar type and will consume fixed storage of 40 bytes. An error will be reported if the string value exceeds the length defined. | -| 11 | JSON | | json type can only be used on tag, a tag of json type is excluded with any other tags of any other type | +| 10 | NCHAR | User Defined| Multi-Byte string that can include multi byte characters like Chinese characters. Each character of NCHAR type consumes 4 bytes storage. The string value should be quoted with single quotes. Literal single quote inside the string must be preceded with backslash, like `\’`. The length must be specified when defining a column or tag of NCHAR type, for example nchar(10) means it can store at most 10 characters of nchar type and will consume fixed storage of 40 bytes. An error will be reported if the string value exceeds the length defined. | +| 11 | JSON | | JSON type can only be used on tags. A tag of json type is excluded with any other tags of any other type | :::tip TDengine is case insensitive and treats any characters in the sql command as lower case by default, case sensitive strings must be quoted with single quotes. @@ -39,7 +39,7 @@ TDengine is case insensitive and treats any characters in the sql command as low ::: :::note -Only ASCII visible characters are suggested to be used in a column or tag of BINARY type. Multiple-byte characters must be stored in NCHAR type. +Only ASCII visible characters are suggested to be used in a column or tag of BINARY type. Multi-byte characters must be stored in NCHAR type. ::: diff --git a/docs-en/12-taos-sql/02-database.md b/docs-en/12-taos-sql/02-database.md index 98b75b30b3ebebb33ce1afe413554f218092bfeb..80581b2f1bc7ce9cd046c18873d3f22b6804d8cf 100644 --- a/docs-en/12-taos-sql/02-database.md +++ b/docs-en/12-taos-sql/02-database.md @@ -4,7 +4,7 @@ title: Database description: "create and drop database, show or change database parameters" --- -## Create Datable +## Create Database ``` CREATE DATABASE [IF NOT EXISTS] db_name [KEEP keep] [DAYS days] [UPDATE 1]; @@ -12,11 +12,11 @@ CREATE DATABASE [IF NOT EXISTS] db_name [KEEP keep] [DAYS days] [UPDATE 1]; :::info -1. KEEP specifies the number of days for which the data in the database to be created will be kept, the default value is 3650 days, i.e. 10 years. The data will be deleted automatically once its age exceeds this threshold. +1. KEEP specifies the number of days for which the data in the database will be retained. The default value is 3650 days, i.e. 10 years. The data will be deleted automatically once its age exceeds this threshold. 2. UPDATE specifies whether the data can be updated and how the data can be updated. - 1. UPDATE set to 0 means update operation is not allowed, the data with an existing timestamp will be dropped silently. - 2. UPDATE set to 1 means the whole row will be updated, the columns for which no value is specified will be set to NULL - 3. UPDATE set to 2 means updating a part of columns for a row is allowed, the columns for which no value is specified will be kept as no change + 1. UPDATE set to 0 means update operation is not allowed. The update for data with an existing timestamp will be discarded silently and the original record in the database will be preserved as is. + 2. UPDATE set to 1 means the whole row will be updated. The columns for which no value is specified will be set to NULL. + 3. UPDATE set to 2 means updating a subset of columns for a row is allowed. The columns for which no value is specified will be kept unchanged. 3. The maximum length of database name is 33 bytes. 4. The maximum length of a SQL statement is 65,480 bytes. 5. Below are the parameters that can be used when creating a database @@ -35,7 +35,7 @@ CREATE DATABASE [IF NOT EXISTS] db_name [KEEP keep] [DAYS days] [UPDATE 1]; - maxVgroupsPerDb: [Description](/reference/config/#maxvgroupsperdb) - comp: [Description](/reference/config/#comp) - precision: [Description](/reference/config/#precision) -6. Please note that all of the parameters mentioned in this section can be configured in configuration file `taosd.cfg` at server side and used by default, the default parameters can be overriden if they are specified in `create database` statement. +6. Please note that all of the parameters mentioned in this section are configured in configuration file `taos.cfg` on the TDengine server. If not specified in the `create database` statement, the values from taos.cfg are used by default. To override default parameters, they must be specified in the `create database` statement. ::: @@ -52,7 +52,7 @@ USE db_name; ``` :::note -This way is not applicable when using a REST connection +This way is not applicable when using a REST connection. In a REST connection the database name must be specified before a table or stable name. For e.g. to query the stable "meters" in database "test" the query would be "SELECT count(*) from test.meters" ::: @@ -63,13 +63,13 @@ DROP DATABASE [IF EXISTS] db_name; ``` :::note -All data in the database will be deleted too. This command must be used with caution. +All data in the database will be deleted too. This command must be used with extreme caution. Please follow your organization's data integrity, data backup, data security or any other applicable SOPs before using this command. ::: ## Change Database Configuration -Some examples are shown below to demonstrate how to change the configuration of a database. Please note that some configuration parameters can be changed after the database is created, but some others can't, for details of the configuration parameters of database please refer to [Configuration Parameters](/reference/config/). +Some examples are shown below to demonstrate how to change the configuration of a database. Please note that some configuration parameters can be changed after the database is created, but some cannot. For details of the configuration parameters of database please refer to [Configuration Parameters](/reference/config/). ``` ALTER DATABASE db_name COMP 2; @@ -81,7 +81,7 @@ COMP parameter specifies whether the data is compressed and how the data is comp ALTER DATABASE db_name REPLICA 2; ``` -REPLICA parameter specifies the number of replications of the database. +REPLICA parameter specifies the number of replicas of the database. ``` ALTER DATABASE db_name KEEP 365; @@ -124,4 +124,4 @@ SHOW DATABASES; SHOW CREATE DATABASE db_name; ``` -This command is useful when migrating the data from one TDengine cluster to another one. This command can be used to get the CREATE statement, which can be used in another TDengine to create the exact same database. +This command is useful when migrating the data from one TDengine cluster to another. This command can be used to get the CREATE statement, which can be used in another TDengine instance to create the exact same database. diff --git a/docs-en/12-taos-sql/03-table.md b/docs-en/12-taos-sql/03-table.md index 678965893e8b386d9f2842c6e4e650c2d650e080..0505787ff8cc597eafd8299292ebac3e0fd3d4ad 100644 --- a/docs-en/12-taos-sql/03-table.md +++ b/docs-en/12-taos-sql/03-table.md @@ -12,10 +12,10 @@ CREATE TABLE [IF NOT EXISTS] tb_name (timestamp_field_name TIMESTAMP, field1_nam :::info -1. The first column of a table must be of TIMESTAMP type, and it will be set as the primary key automatically +1. The first column of a table MUST be of type TIMESTAMP. It is automatically set as the primary key. 2. The maximum length of the table name is 192 bytes. 3. The maximum length of each row is 16k bytes, please note that the extra 2 bytes used by each BINARY/NCHAR column are also counted. -4. The name of the subtable can only consist of English characters, digits and underscore, and can't start with a digit. Table names are case insensitive. +4. The name of the subtable can only consist of characters from the English alphabet, digits and underscore. Table names can't start with a digit. Table names are case insensitive. 5. The maximum length in bytes must be specified when using BINARY or NCHAR types. 6. Escape character "\`" can be used to avoid the conflict between table names and reserved keywords, above rules will be bypassed when using escape character on table names, but the upper limit for the name length is still valid. The table names specified using escape character are case sensitive. Only ASCII visible characters can be used with escape character. For example \`aBc\` and \`abc\` are different table names but `abc` and `aBc` are same table names because they are both converted to `abc` internally. @@ -44,7 +44,7 @@ The tags for which no value is specified will be set to NULL. CREATE TABLE [IF NOT EXISTS] tb_name1 USING stb_name TAGS (tag_value1, ...) [IF NOT EXISTS] tb_name2 USING stb_name TAGS (tag_value2, ...) ...; ``` -This can be used to create a lot of tables in a single SQL statement to accelerate the speed of the creating tables. +This can be used to create a lot of tables in a single SQL statement while making table creation much faster. :::info @@ -111,7 +111,7 @@ If a table is created using a super table as template, the table definition can ALTER TABLE tb_name MODIFY COLUMN field_name data_type(length); ``` -The type of a column is variable length, like BINARY or NCHAR, this can be used to change (or increase) the length of the column. +If the type of a column is variable length, like BINARY or NCHAR, this command can be used to change the length of the column. :::note If a table is created using a super table as template, the table definition can only be changed on the corresponding super table, and the change will be automatically applied to all the subtables created using this super table as template. For tables created in the normal way, the table definition can be changed directly on the table. diff --git a/docs-en/12-taos-sql/04-stable.md b/docs-en/12-taos-sql/04-stable.md index 7354484f754b513ac2b8828ac1e13bc550a29efd..b8a608792ab327a81129d29ddd0ff44d7af6e6c5 100644 --- a/docs-en/12-taos-sql/04-stable.md +++ b/docs-en/12-taos-sql/04-stable.md @@ -9,7 +9,7 @@ Keyword `STable`, abbreviated for super table, is supported since version 2.0.15 ::: -## Crate STable +## Create STable ``` CREATE STable [IF NOT EXISTS] stb_name (timestamp_field_name TIMESTAMP, field1_name data_type1 [, field2_name data_type2 ...]) TAGS (tag1_name tag_type1, tag2_name tag_type2 [, tag3_name tag_type3]); @@ -19,7 +19,7 @@ The SQL statement of creating a STable is similar to that of creating a table, b :::info -1. The tag types specified in TAGS should NOT be timestamp. Since 2.1.3.0 timestamp type can be used in TAGS column, but its value must be fixed and arithmetic operation can't be applied on it. +1. A tag can be of type timestamp, since version 2.1.3.0, but its value must be fixed and arithmetic operations cannot be performed on it. Prior to version 2.1.3.0, tag types specified in TAGS could not be of type timestamp. 2. The tag names specified in TAGS should NOT be the same as other columns. 3. The tag names specified in TAGS should NOT be the same as any reserved keywords.(Please refer to [keywords](/taos-sql/keywords/) 4. The maximum number of tags specified in TAGS is 128, there must be at least one tag, and the total length of all tag columns should NOT exceed 16KB. @@ -76,7 +76,7 @@ ALTER STable stb_name DROP COLUMN field_name; ALTER STable stb_name MODIFY COLUMN field_name data_type(length); ``` -This command can be used to change (or increase, more specifically) the length of a column of variable length types, like BINARY or NCHAR. +This command can be used to change (or more specifically, increase) the length of a column of variable length types, like BINARY or NCHAR. ## Change Tags of A STable @@ -94,7 +94,7 @@ This command is used to add a new tag for a STable and specify the tag type. ALTER STable stb_name DROP TAG tag_name; ``` -The tag will be removed automatically from all the subtables created using the super table as template once a tag is removed from a super table. +The tag will be removed automatically from all the subtables, created using the super table as template, once a tag is removed from a super table. ### Change A Tag @@ -102,7 +102,7 @@ The tag will be removed automatically from all the subtables created using the s ALTER STable stb_name CHANGE TAG old_tag_name new_tag_name; ``` -The tag name will be changed automatically for all the subtables created using the super table as template once a tag name is changed for a super table. +The tag name will be changed automatically for all the subtables, created using the super table as template, once a tag name is changed for a super table. ### Change Tag Length @@ -110,7 +110,7 @@ The tag name will be changed automatically for all the subtables created using t ALTER STable stb_name MODIFY TAG tag_name data_type(length); ``` -This command can be used to change (or increase, more specifically) the length of a tag of variable length types, like BINARY or NCHAR. +This command can be used to change (or more specifically, increase) the length of a tag of variable length types, like BINARY or NCHAR. :::note Changing tag values can be applied to only subtables. All other tag operations, like add tag, remove tag, however, can be applied to only STable. If a new tag is added for a STable, the tag will be added with NULL value for all its subtables. diff --git a/docs-en/12-taos-sql/06-select.md b/docs-en/12-taos-sql/06-select.md index d9c39845f8576bb309d159b1c8cb6728a22c9c5d..8a017cf92e40aa4a854dcd531b7df291a9243515 100644 --- a/docs-en/12-taos-sql/06-select.md +++ b/docs-en/12-taos-sql/06-select.md @@ -21,7 +21,7 @@ SELECT select_expr [, select_expr ...] ## Wildcard -Wilcard \* can be used to specify all columns. The result includes only data columns for normal tables. +Wildcard \* can be used to specify all columns. The result includes only data columns for normal tables. ``` taos> SELECT * FROM d1001; @@ -51,14 +51,14 @@ taos> SELECT * FROM meters; Query OK, 9 row(s) in set (0.002022s) ``` -Wildcard can be used with table name as prefix, both below SQL statements have same effects and return all columns. +Wildcard can be used with table name as prefix. Both SQL statements below have the same effect and return all columns. ```SQL SELECT * FROM d1001; SELECT d1001.* FROM d1001; ``` -In JOIN query, however, with or without table name prefix will return different results. \* without table prefix will return all the columns of both tables, but \* with table name as prefix will return only the columns of that table. +In a JOIN query, however, the results are different with or without a table name prefix. \* without table prefix will return all the columns of both tables, but \* with table name as prefix will return only the columns of that table. ``` taos> SELECT * FROM d1001, d1003 WHERE d1001.ts=d1003.ts; @@ -76,7 +76,7 @@ taos> SELECT d1001.* FROM d1001,d1003 WHERE d1001.ts = d1003.ts; Query OK, 1 row(s) in set (0.020443s) ``` -Wilcard \* can be used with some functions, but the result may be different depending on the function being used. For example, `count(*)` returns only one column, i.e. the number of rows; `first`, `last` and `last_row` return all columns of the selected row. +Wildcard \* can be used with some functions, but the result may be different depending on the function being used. For example, `count(*)` returns only one column, i.e. the number of rows; `first`, `last` and `last_row` return all columns of the selected row. ``` taos> SELECT COUNT(*) FROM d1001; @@ -96,7 +96,7 @@ Query OK, 1 row(s) in set (0.000849s) ## Tags -Starting from version 2.0.14, tag columns can be selected together with data columns when querying sub tables. Please note that, however, wildcard \* doesn't represent any tag column, that means tag columns must be specified explicitly like the example below. +Starting from version 2.0.14, tag columns can be selected together with data columns when querying sub tables. Please note however, that, wildcard \* cannot be used to represent any tag column. This means that tag columns must be specified explicitly like the example below. ``` taos> SELECT location, groupid, current FROM d1001 LIMIT 2; @@ -109,7 +109,7 @@ Query OK, 2 row(s) in set (0.003112s) ## Get distinct values -`DISTINCT` keyword can be used to get all the unique values of tag columns from a super table, it can also be used to get all the unique values of data columns from a table or subtable. +`DISTINCT` keyword can be used to get all the unique values of tag columns from a super table. It can also be used to get all the unique values of data columns from a table or subtable. ```sql SELECT DISTINCT tag_name [, tag_name ...] FROM stb_name; @@ -118,15 +118,15 @@ SELECT DISTINCT col_name [, col_name ...] FROM tb_name; :::info -1. Configuration parameter `maxNumOfDistinctRes` in `taos.cfg` is used to control the number of rows to output. The minimum configurable value is 100,000, the maximum configurable value is 100,000,000, the default value is 1000,000. If the actual number of rows exceeds the value of this parameter, only the number of rows specified by this parameter will be output. -2. It can't be guaranteed that the results selected by using `DISTINCT` on columns of `FLOAT` or `DOUBLE` are exactly unique because of the precision nature of floating numbers. +1. Configuration parameter `maxNumOfDistinctRes` in `taos.cfg` is used to control the number of rows to output. The minimum configurable value is 100,000, the maximum configurable value is 100,000,000, the default value is 1,000,000. If the actual number of rows exceeds the value of this parameter, only the number of rows specified by this parameter will be output. +2. It can't be guaranteed that the results selected by using `DISTINCT` on columns of `FLOAT` or `DOUBLE` are exactly unique because of the precision errors in floating point numbers. 3. `DISTINCT` can't be used in the sub-query of a nested query statement, and can't be used together with aggregate functions, `GROUP BY` or `JOIN` in the same SQL statement. ::: ## Columns Names of Result Set -When using `SELECT`, the column names in the result set will be same as that in the select clause if `AS` is not used. `AS` can be used to rename the column names in the result set. For example +When using `SELECT`, the column names in the result set will be the same as that in the select clause if `AS` is not used. `AS` can be used to rename the column names in the result set. For example ``` taos> SELECT ts, ts AS primary_key_ts FROM d1001; @@ -161,7 +161,7 @@ SELECT * FROM d1001; ## Special Query -Some special query functionalities can be performed without `FORM` sub-clause. For example, below statement can be used to get the current database in use. +Some special query functions can be invoked without `FROM` sub-clause. For example, the statement below can be used to get the current database in use. ``` taos> SELECT DATABASE(); @@ -181,7 +181,7 @@ taos> SELECT DATABASE(); Query OK, 1 row(s) in set (0.000184s) ``` -Below statement can be used to get the version of client or server. +The statement below can be used to get the version of client or server. ``` taos> SELECT CLIENT_VERSION(); @@ -197,7 +197,7 @@ taos> SELECT SERVER_VERSION(); Query OK, 1 row(s) in set (0.000077s) ``` -Below statement is used to check the server status. One integer, like `1`, is returned if the server status is OK, otherwise an error code is returned. This is compatible with the status check for TDengine from connection pool or 3rd party tools, and can avoid the problem of losing the connection from a connection pool when using the wrong heartbeat checking SQL statement. +The statement below is used to check the server status. An integer, like `1`, is returned if the server status is OK, otherwise an error code is returned. This is compatible with the status check for TDengine from connection pool or 3rd party tools, and can avoid the problem of losing the connection from a connection pool when using the wrong heartbeat checking SQL statement. ``` taos> SELECT SERVER_STATUS(); @@ -284,7 +284,7 @@ taos> SELECT COUNT(tbname) FROM meters WHERE groupId > 2; Query OK, 1 row(s) in set (0.001091s) ``` -- Wildcard \* can be used to get all columns, or specific column names can be specified. Arithmetic operation can be performed on columns of number types, columns can be renamed in the result set. +- Wildcard \* can be used to get all columns, or specific column names can be specified. Arithmetic operation can be performed on columns of numerical types, columns can be renamed in the result set. - Arithmetic operation on columns can't be used in where clause. For example, `where a*2>6;` is not allowed but `where a>6/2;` can be used instead for the same purpose. - Arithmetic operation on columns can't be used as the objectives of select statement. For example, `select min(2*a) from t;` is not allowed but `select 2*min(a) from t;` can be used instead. - Logical operation can be used in `WHERE` clause to filter numeric values, wildcard can be used to filter string values. @@ -318,13 +318,13 @@ Logical operations in below table can be used in the `where` clause to filter th - Operator `like` is used together with wildcards to match strings - '%' matches 0 or any number of characters, '\_' matches any single ASCII character. - `\_` is used to match the \_ in the string. - - The maximum length of wildcard string is 100 bytes from version 2.1.6.1 (before that the maximum length is 20 bytes). `maxWildCardsLength` in `taos.cfg` can be used to control this threshold. Too long wildcard string may slowdown the execution performance of `LIKE` operator. + - The maximum length of wildcard string is 100 bytes from version 2.1.6.1 (before that the maximum length is 20 bytes). `maxWildCardsLength` in `taos.cfg` can be used to control this threshold. A very long wildcard string may slowdown the execution performance of `LIKE` operator. - `AND` keyword can be used to filter multiple columns simultaneously. AND/OR operation can be performed on single or multiple columns from version 2.3.0.0. However, before 2.3.0.0 `OR` can't be used on multiple columns. - For timestamp column, only one condition can be used; for other columns or tags, `OR` keyword can be used to combine multiple logical operators. For example, `((value > 20 AND value < 30) OR (value < 12))`. - From version 2.3.0.0, multiple conditions can be used on timestamp column, but the result set can only contain single time range. - From version 2.0.17.0, operator `BETWEEN AND` can be used in where clause, for example `WHERE col2 BETWEEN 1.5 AND 3.25` means the filter condition is equal to "1.5 ≤ col2 ≤ 3.25". -- From version 2.1.4.0, operator `IN` can be used in the where clause. For example, `WHERE city IN ('California.SanFrancisco', 'California.SanDiego')`. For bool type, both `{true, false}` and `{0, 1}` are allowed, but integers other than 0 or 1 are not allowed. FLOAT and DOUBLE types are impacted by floating precision, only values that match the condition within the tolerance will be selected. Non-primary key column of timestamp type can be used with `IN`. -- From version 2.3.0.0, regular expression is supported in the where clause with keyword `match` or `nmatch`, the regular expression is case insensitive. +- From version 2.1.4.0, operator `IN` can be used in the where clause. For example, `WHERE city IN ('California.SanFrancisco', 'California.SanDiego')`. For bool type, both `{true, false}` and `{0, 1}` are allowed, but integers other than 0 or 1 are not allowed. FLOAT and DOUBLE types are impacted by floating point precision errors. Only values that match the condition within the tolerance will be selected. Non-primary key column of timestamp type can be used with `IN`. +- From version 2.3.0.0, regular expression is supported in the where clause with keyword `match` or `nmatch`. The regular expression is case insensitive. ## Regular Expression @@ -364,7 +364,7 @@ FROM temp_STable t1, temp_STable t2 WHERE t1.ts = t2.ts AND t1.deviceid = t2.deviceid AND t1.status=0; ``` -Similary, join operation can be performed on the result set of multiple sub queries. +Similarly, join operations can be performed on the result set of multiple sub queries. :::note Restrictions on join operation: @@ -380,7 +380,7 @@ Restrictions on join operation: ## Nested Query -Nested query is also called sub query, that means in a single SQL statement the result of inner query can be used as the data source of the outer query. +Nested query is also called sub query. This means that in a single SQL statement the result of inner query can be used as the data source of the outer query. From 2.2.0.0, unassociated sub query can be used in the `FROM` clause. Unassociated means the sub query doesn't use the parameters in the parent query. More specifically, in the `tb_name_list` of `SELECT` statement, an independent SELECT statement can be used. So a complete nested query looks like: @@ -390,14 +390,14 @@ SELECT ... FROM (SELECT ... FROM ...) ...; :::info -- Only one layer of nesting is allowed, that means no sub query is allowed in a sub query -- The result set returned by the inner query will be used as a "virtual table" by the outer query, the "virtual table" can be renamed using `AS` keyword for easy reference in the outer query. +- Only one layer of nesting is allowed, that means no sub query is allowed within a sub query +- The result set returned by the inner query will be used as a "virtual table" by the outer query. The "virtual table" can be renamed using `AS` keyword for easy reference in the outer query. - Sub query is not allowed in continuous query. - JOIN operation is allowed between tables/STables inside both inner and outer queries. Join operation can be performed on the result set of the inner query. - UNION operation is not allowed in either inner query or outer query. -- The functionalities that can be used in the inner query is same as non-nested query. - - `ORDER BY` inside the inner query doesn't make any sense but will slow down the query performance significantly, so please avoid such usage. -- Compared to the non-nested query, the functionalities that can be used in the outer query have such restrictions as: +- The functions that can be used in the inner query are the same as those that can be used in a non-nested query. + - `ORDER BY` inside the inner query is unnecessary and will slow down the query performance significantly. It is best to avoid the use of `ORDER BY` inside the inner query. +- Compared to the non-nested query, the functionality that can be used in the outer query has the following restrictions: - Functions - If the result set returned by the inner query doesn't contain timestamp column, then functions relying on timestamp can't be used in the outer query, like `TOP`, `BOTTOM`, `FIRST`, `LAST`, `DIFF`. - Functions that need to scan the data twice can't be used in the outer query, like `STDDEV`, `PERCENTILE`. @@ -442,8 +442,8 @@ The sum of col1 and col2 for rows later than 2018-06-01 08:00:00.000 and whose c SELECT (col1 + col2) AS 'complex' FROM tb1 WHERE ts > '2018-06-01 08:00:00.000' AND col2 > 1.2 LIMIT 10 OFFSET 5; ``` -The rows in the past 10 minutes and whose col2 is bigger than 3.14 are selected and output to the result file `/home/testoutpu.csv` with below SQL statement: +The rows in the past 10 minutes and whose col2 is bigger than 3.14 are selected and output to the result file `/home/testoutput.csv` with below SQL statement: ```SQL -SELECT COUNT(*) FROM tb1 WHERE ts >= NOW - 10m AND col2 > 3.14 >> /home/testoutpu.csv; +SELECT COUNT(*) FROM tb1 WHERE ts >= NOW - 10m AND col2 > 3.14 >> /home/testoutput.csv; ``` diff --git a/docs-en/12-taos-sql/index.md b/docs-en/12-taos-sql/index.md index 32850e8c4b0a816cae94563079c79b94c8611bd5..33656338a7bba38dc55cf536bdba8e95309c5acf 100644 --- a/docs-en/12-taos-sql/index.md +++ b/docs-en/12-taos-sql/index.md @@ -3,11 +3,9 @@ title: TDengine SQL description: "The syntax supported by TDengine SQL " --- -This section explains the syntax to operating databases, tables, STables, inserting data, selecting data, functions and some tips that can be used in TDengine SQL. It would be easier to understand with some fundamental knowledge of SQL. +This section explains the syntax of SQL to perform operations on databases, tables and STables, insert data, select data and use functions. We also provide some tips that can be used in TDengine SQL. If you have previous experience with SQL this section will be fairly easy to understand. If you do not have previous experience with SQL, you'll come to appreciate the simplicity and power of SQL. -TDengine SQL is the major interface for users to write data into or query from TDengine. For users to easily use, syntax similar to standard SQL is provided. However, please note that TDengine SQL is not standard SQL. For instance, TDengine doesn't provide the functionality of deleting time series data, thus corresponding statements are not provided in TDengine SQL. - -TDengine SQL doesn't support abbreviation for keywords, for example `DESCRIBE` can't be abbreviated as `DESC`. +TDengine SQL is the major interface for users to write data into or query from TDengine. For ease of use, the syntax is similar to that of standard SQL. However, please note that TDengine SQL is not standard SQL. For instance, TDengine doesn't provide a delete function for time series data and so corresponding statements are not provided in TDengine SQL. Syntax Specifications used in this chapter: @@ -16,7 +14,7 @@ Syntax Specifications used in this chapter: - | means one of a few options, excluding | itself. - … means the item prior to it can be repeated multiple times. -To better demonstrate the syntax, usage and rules of TAOS SQL, hereinafter it's assumed that there is a data set of meters. Assuming each meter collects 3 data measurements: current, voltage, phase. The data model is shown below: +To better demonstrate the syntax, usage and rules of TAOS SQL, hereinafter it's assumed that there is a data set of data from electric meters. Each meter collects 3 data measurements: current, voltage, phase. The data model is shown below: ```sql taos> DESCRIBE meters; @@ -30,4 +28,4 @@ taos> DESCRIBE meters; groupid | INT | 4 | TAG | ``` -The data set includes the data collected by 4 meters, the corresponding table name is d1001, d1002, d1003, d1004 respectively based on the data model of TDengine. +The data set includes the data collected by 4 meters, the corresponding table name is d1001, d1002, d1003 and d1004 based on the data model of TDengine. diff --git a/docs-en/14-reference/02-rest-api/02-rest-api.mdx b/docs-en/14-reference/02-rest-api/02-rest-api.mdx index f405d551e530a37a5221e71a824f605fba0c0db9..0edc901bc373683a49dfde061f796dc0ae79ab4f 100644 --- a/docs-en/14-reference/02-rest-api/02-rest-api.mdx +++ b/docs-en/14-reference/02-rest-api/02-rest-api.mdx @@ -10,7 +10,7 @@ One difference from the native connector is that the REST interface is stateless ## Installation -The REST interface does not rely on any TDengine native library, so the client application does not need to install any TDengine libraries. The client application's development language supports the HTTP protocol is enough. +The REST interface does not rely on any TDengine native library, so the client application does not need to install any TDengine libraries. The client application's development language only needs to support the HTTP protocol. ## Verification diff --git a/docs-en/14-reference/03-connector/python.mdx b/docs-en/14-reference/03-connector/python.mdx index 2b238173e04e3e13de36b5ac4d91d0cda290ca72..c52b4f18825c083e4bdfebe26b2e68ef2025ef8a 100644 --- a/docs-en/14-reference/03-connector/python.mdx +++ b/docs-en/14-reference/03-connector/python.mdx @@ -53,7 +53,7 @@ Earlier TDengine client software includes the Python connector. If the Python co ::: -#### to install `taospy` +#### To install `taospy` @@ -320,7 +320,7 @@ All database operations will be thrown directly if an exception occurs. The appl ### About nanoseconds -Due to the current imperfection of Python's nanosecond support (see link below), the current implementation returns integers at nanosecond precision instead of the `datetime` type produced by `ms and `us`, which application developers will need to handle on their own. And it is recommended to use pandas' to_datetime(). The Python Connector may modify the interface in the future if Python officially supports nanoseconds in full. +Due to the current imperfection of Python's nanosecond support (see link below), the current implementation returns integers at nanosecond precision instead of the `datetime` type produced by `ms` and `us`, which application developers will need to handle on their own. And it is recommended to use pandas' to_datetime(). The Python Connector may modify the interface in the future if Python officially supports nanoseconds in full. 1. https://stackoverflow.com/questions/10611328/parsing-datetime-strings-containing-nanoseconds 2. https://www.python.org/dev/peps/pep-0564/ @@ -328,7 +328,7 @@ Due to the current imperfection of Python's nanosecond support (see link below), ## Frequently Asked Questions -Welcome to [ask questions or report questions] (https://github.com/taosdata/taos-connector-python/issues). +Welcome to [ask questions or report questions](https://github.com/taosdata/taos-connector-python/issues). ## Important Update diff --git a/docs-en/14-reference/04-taosadapter.md b/docs-en/14-reference/04-taosadapter.md index 4478ced10e4b47c69ecd2a7e6a935599eb03660c..55d964c14a091109d82d67f0060e846d7e513c0c 100644 --- a/docs-en/14-reference/04-taosadapter.md +++ b/docs-en/14-reference/04-taosadapter.md @@ -30,9 +30,9 @@ taosAdapter provides the following features. ### Install taosAdapter -taosAdapter has been part of TDengine server software since TDengine v2.4.0.0. If you use the TDengine server, you don't need additional steps to install taosAdapter. You can download taosAdapter from [TAOSData official website](https://taosdata.com/en/all-downloads/) to download the TDengine server installation package (taosAdapter is included in v2.4.0.0 and later version). If you need to deploy taosAdapter separately on another server other than the TDengine server, you should install the full TDengine on that server to install taosAdapter. If you need to build taosAdapter from source code, you can refer to the [Building taosAdapter]( https://github.com/taosdata/taosadapter/blob/develop/BUILD.md) documentation. +taosAdapter has been part of TDengine server software since TDengine v2.4.0.0. If you use the TDengine server, you don't need additional steps to install taosAdapter. You can download taosAdapter from [TDengine official website](https://tdengine.com/all-downloads/) to download the TDengine server installation package (taosAdapter is included in v2.4.0.0 and later version). If you need to deploy taosAdapter separately on another server other than the TDengine server, you should install the full TDengine on that server to install taosAdapter. If you need to build taosAdapter from source code, you can refer to the [Building taosAdapter]( https://github.com/taosdata/taosadapter/blob/develop/BUILD.md) documentation. -### start/stop taosAdapter +### Start/Stop taosAdapter On Linux systems, the taosAdapter service is managed by `systemd` by default. You can use the command `systemctl start taosadapter` to start the taosAdapter service and use the command `systemctl stop taosadapter` to stop the taosAdapter service. @@ -153,8 +153,7 @@ See [example/config/taosadapter.toml](https://github.com/taosdata/taosadapter/bl ## Feature List -- Compatible with RESTful interfaces - [https://www.taosdata.com/cn/documentation/connector#restful](https://www.taosdata.com/cn/documentation/connector#restful) +- Compatible with RESTful interfaces [REST API](/reference/rest-api/) - Compatible with InfluxDB v1 write interface [https://docs.influxdata.com/influxdb/v2.0/reference/api/influxdb-1x/write/](https://docs.influxdata.com/influxdb/v2.0/reference/api/influxdb-1x/write/) - Compatible with OpenTSDB JSON and telnet format writes @@ -187,7 +186,7 @@ You can use any client that supports the http protocol to write data to or query ### InfluxDB -You can use any client that supports the http protocol to access the Restful interface address `http://:6041/` to write data in InfluxDB compatible format to TDengine. The EndPoint is as follows: +You can use any client that supports the http protocol to access the RESTful interface address `http://:6041/` to write data in InfluxDB compatible format to TDengine. The EndPoint is as follows: ```text /influxdb/v1/write @@ -204,7 +203,7 @@ Note: InfluxDB token authorization is not supported at present. Only Basic autho ### OpenTSDB -You can use any client that supports the http protocol to access the Restful interface address `http://:6041/` to write data in OpenTSDB compatible format to TDengine. +You can use any client that supports the http protocol to access the RESTful interface address `http://:6041/` to write data in OpenTSDB compatible format to TDengine. ```text /opentsdb/v1/put/json/:db diff --git a/docs-en/14-reference/06-taosdump.md b/docs-en/14-reference/06-taosdump.md index 973999704b595ea9b742f1ef759f973aa1f05649..a7e216398a183a096678d8d70c429606d4e5f809 100644 --- a/docs-en/14-reference/06-taosdump.md +++ b/docs-en/14-reference/06-taosdump.md @@ -12,14 +12,13 @@ taosdump can back up a database, a super table, or a normal table as a logical d Suppose the specified location already has data files. In that case, taosdump will prompt the user and exit immediately to avoid data overwriting which means that the same path can only be used for one backup. Please be careful if you see a prompt for this. -taosdump is a logical backup tool and should not be used to back up any raw data, environment settings, Users should not use taosdump to back up raw data, environment settings, hardware information, server configuration, or cluster topology. taosdump uses [Apache AVRO](https://avro.apache.org/) as the data file format to store backup data. ## Installation There are two ways to install taosdump: -- Install the taosTools official installer. Please find taosTools from [All download links](https://www.taosdata.com/all-downloads) page and download and install it. +- Install the taosTools official installer. Please find taosTools from [All download links](https://www.tdengine.com/all-downloads) page and download and install it. - Compile taos-tools separately and install it. Please refer to the [taos-tools](https://github.com/taosdata/taos-tools) repository for details. @@ -28,14 +27,14 @@ There are two ways to install taosdump: ### taosdump backup data 1. backing up all databases: specify `-A` or `-all-databases` parameter. -2. backup multiple specified databases: use `-D db1,db2,... ` parameters; 3. +2. backup multiple specified databases: use `-D db1,db2,... ` parameters; 3. back up some super or normal tables in the specified database: use `-dbname stbname1 stbname2 tbname1 tbname2 ... ` parameters. Note that the first parameter of this input sequence is the database name, and only one database is supported. The second and subsequent parameters are the names of super or normal tables in that database, separated by spaces. 4. back up the system log database: TDengine clusters usually contain a system database named `log`. The data in this database is the data that TDengine runs itself, and the taosdump will not back up the log database by default. If users need to back up the log database, users can use the `-a` or `-allow-sys` command-line parameter. 5. Loose mode backup: taosdump version 1.4.1 onwards provides `-n` and `-L` parameters for backing up data without using escape characters and "loose" mode, which can reduce the number of backups if table names, column names, tag names do not use This can reduce the backup data time and backup data footprint if table names, column names, and tag names do not use `escape character`. If you are unsure about using `-n` and `-L` conditions, please use the default parameters for "strict" mode backup. See the [official documentation](/taos-sql/escape) for a description of escaped characters. :::tip - taosdump versions after 1.4.1 provide the `-I` argument for parsing Avro file schema and data. If users specify `-s` then only taosdump will parse schema. -- Backups after taosdump 1.4.2 use the batch count specified by the `-B` parameter. The default value is 16384. If, in some environments, low network speed or disk performance causes "Error actual dump ... batch ..." can be tried by challenging the `-B` parameter to a smaller value. +- Backups after taosdump 1.4.2 use the batch count specified by the `-B` parameter. The default value is 16384. If, in some environments, low network speed or disk performance causes "Error actual dump ... batch ...", then try changing the `-B` parameter to a smaller value. ::: @@ -44,7 +43,7 @@ There are two ways to install taosdump: Restore the data file in the specified path: use the `-i` parameter plus the path to the data file. You should not use the same directory to backup different data sets, and you should not backup the same data set multiple times in the same path. Otherwise, the backup data will cause overwriting or multiple backups. :::tip -taosdump internally uses TDengine stmt binding API for writing recovery data and currently uses 16384 as one write batch for better data recovery performance. If there are more columns in the backup data, it may cause a "WAL size exceeds limit" error. You can try to adjust to a smaller value by using the `-B` parameter. +taosdump internally uses TDengine stmt binding API for writing recovery data with a default batch size of 16384 for better data recovery performance. If there are more columns in the backup data, it may cause a "WAL size exceeds limit" error. You can try to adjust the batch size to a smaller value by using the `-B` parameter. ::: diff --git a/docs-en/14-reference/07-tdinsight/index.md b/docs-en/14-reference/07-tdinsight/index.md index e945d581c93b2ad1d7f0c32639eb3ba524e35161..16bae615c04ab92e4934418d6c0a3aaf1e1ccde8 100644 --- a/docs-en/14-reference/07-tdinsight/index.md +++ b/docs-en/14-reference/07-tdinsight/index.md @@ -61,7 +61,7 @@ sudo yum install \ ## Automated deployment of TDinsight -We provide an installation script [`TDinsight.sh`](https://github.com/taosdata/grafanaplugin/releases/latest/download/TDinsight.sh) script to allow users to configure the installation automatically and quickly. +We provide an installation script [`TDinsight.sh`](https://github.com/taosdata/grafanaplugin/releases/latest/download/TDinsight.sh) to allow users to configure the installation automatically and quickly. You can download the script via `wget` or other tools: @@ -300,7 +300,7 @@ This section contains the current information and status of the cluster, the ale ![TDengine Database TDinsight mnodes overview](./assets/TDinsight-3-mnodes.webp) -1. **MNodes Status**: a simple table view of `show mnodes`. 2. +1. **MNodes Status**: a simple table view of `show mnodes`. 2. **MNodes Number**: similar to `DNodes Number`, the number of MNodes changes. ### Request @@ -317,9 +317,9 @@ This section contains the current information and status of the cluster, the ale Database usage, repeated for each value of the variable `$database` i.e. multiple rows per database. -1. **STables**: number of super tables. 2. -2. **Total Tables**: number of all tables. 3. -3. **Sub Tables**: the number of all super table sub-tables. 4. +1. **STables**: number of super tables. +2. **Total Tables**: number of all tables. +3. **Sub Tables**: the number of all super table subtables. 4. **Tables**: graph of all normal table numbers over time. 5. **Tables Number Foreach VGroups**: The number of tables contained in each VGroups. @@ -330,18 +330,18 @@ Database usage, repeated for each value of the variable `$database` i.e. multipl Data node resource usage display with repeated multiple rows for the variable `$fqdn` i.e., each data node. Includes. 1. **Uptime**: the time elapsed since the dnode was created. -2. **Has MNodes?**: whether the current dnode is a mnode. 3. -3. **CPU Cores**: the number of CPU cores. 4. -4. **VNodes Number**: the number of VNodes in the current dnode. 5. -5. **VNodes Masters**: the number of vnodes in the master role. 6. +2. **Has MNodes?**: whether the current dnode is a mnode. +3. **CPU Cores**: the number of CPU cores. +4. **VNodes Number**: the number of VNodes in the current dnode. +5. **VNodes Masters**: the number of vnodes in the master role. 6. **Current CPU Usage of taosd**: CPU usage rate of taosd processes. 7. **Current Memory Usage of taosd**: memory usage of taosd processes. 8. **Disk Used**: The total disk usage percentage of the taosd data directory. -9. **CPU Usage**: Process and system CPU usage. 10. +9. **CPU Usage**: Process and system CPU usage. 10. **RAM Usage**: Time series view of RAM usage metrics. 11. **Disk Used**: Disks used at each level of multi-level storage (default is level0). 12. **Disk Increasing Rate per Minute**: Percentage increase or decrease in disk usage per minute. -13. **Disk IO**: Disk IO rate. 14. +13. **Disk IO**: Disk IO rate. 14. **Net IO**: Network IO, the aggregate network IO rate in addition to the local network. ### Login History @@ -376,7 +376,7 @@ TDinsight installed via the `TDinsight.sh` script can be cleaned up using the co To completely uninstall TDinsight during a manual installation, you need to clean up the following. 1. the TDinsight Dashboard in Grafana. -2. the Data Source in Grafana. 3. +2. the Data Source in Grafana. 3. remove the `tdengine-datasource` plugin from the plugin installation directory. ## Integrated Docker Example diff --git a/docs-en/14-reference/08-taos-shell.md b/docs-en/14-reference/08-taos-shell.md index fe5e5f2bc29509a4b96646253732076c7a6ee7ea..9bb5178300931e4b3808716badf06c85a4bbf396 100644 --- a/docs-en/14-reference/08-taos-shell.md +++ b/docs-en/14-reference/08-taos-shell.md @@ -4,11 +4,11 @@ sidebar_label: TDengine CLI description: Instructions and tips for using the TDengine CLI --- -The TDengine command-line application (hereafter referred to as `TDengine CLI`) is the most simplest way for users to manipulate and interact with TDengine instances. +The TDengine command-line application (hereafter referred to as `TDengine CLI`) is the simplest way for users to manipulate and interact with TDengine instances. ## Installation -If executed on the TDengine server-side, there is no need for additional installation steps to install TDengine CLI as it is already included and installed automatically. To run TDengine CLI on the environment which no TDengine server running, the TDengine client installation package needs to be installed first. For details, please refer to [connector](/reference/connector/). +If executed on the TDengine server-side, there is no need for additional installation steps to install TDengine CLI as it is already included and installed automatically. To run TDengine CLI in an environment where no TDengine server is running, the TDengine client installation package needs to be installed first. For details, please refer to [connector](/reference/connector/). ## Execution diff --git a/docs-en/14-reference/11-docker/index.md b/docs-en/14-reference/11-docker/index.md index 4ca84be369e14b3223e8609e06c9ebc4e35eaa2d..f532a263d88def21bd8b0fe9c59adaf982ee2404 100644 --- a/docs-en/14-reference/11-docker/index.md +++ b/docs-en/14-reference/11-docker/index.md @@ -315,13 +315,13 @@ password: taosdata taoslog-td2: ``` - :::note +:::note - The `VERSION` environment variable is used to set the tdengine image tag - `TAOS_FIRST_EP` must be set on the newly created instance so that it can join the TDengine cluster; if there is a high availability requirement, `TAOS_SECOND_EP` needs to be used at the same time - `TAOS_REPLICA` is used to set the default number of database replicas. Its value range is [1,3] - We recommend setting with `TAOS_ARBITRATOR` to use arbitrator in a two-nodes environment. - ::: - + We recommend setting it with `TAOS_ARBITRATOR` to use arbitrator in a two-nodes environment. + + ::: 2. Start the cluster diff --git a/docs-en/14-reference/12-config/index.md b/docs-en/14-reference/12-config/index.md index 1a84f1539938ed8456d1c21c6def97d89305914d..10e23bbdb85c1aa65ffa021d3d7a7fdaf7b77b09 100644 --- a/docs-en/14-reference/12-config/index.md +++ b/docs-en/14-reference/12-config/index.md @@ -65,7 +65,7 @@ taos --dump-config | ------------- | ------------------------------------------------------------------------ | | Applicable | Server Only | | Meaning | The FQDN of the host where `taosd` will be started. It can be IP address | -| Default Value | The first hostname configured for the hos | +| Default Value | The first hostname configured for the host | | Note | It should be within 96 bytes | ### serverPort @@ -78,7 +78,7 @@ taos --dump-config | Note | REST service is provided by `taosd` before 2.4.0.0 but by `taosAdapter` after 2.4.0.0, the default port of REST service is 6041 | :::note -TDengine uses continuous 13 ports, both TCP and TCP, from the port specified by `serverPort`. These ports need to be kept as open if firewall is enabled. Below table describes the ports used by TDengine in details. +TDengine uses continuous 13 ports, both TCP and UDP, from the port specified by `serverPort`. These ports need to be kept open if firewall is enabled. Below table describes the ports used by TDengine in details. ::: @@ -182,8 +182,8 @@ TDengine uses continuous 13 ports, both TCP and TCP, from the port specified by | ------------- | -------------------------------------------- | | Applicable | Server Only | | Meaning | The maximum number of distinct rows returned | -| Value Range | [100,000 - 100, 000, 000] | -| Default Value | 100, 000 | +| Value Range | [100,000 - 100,000,000] | +| Default Value | 100,000 | | Note | After version 2.3.0.0 | ## Locale Parameters @@ -240,7 +240,7 @@ To avoid the problems of using time strings, Unix timestamp can be used directly | Default Value | Locale configured in host | :::info -A specific type "nchar" is provided in TDengine to store non-ASCII characters such as Chinese, Japanese, Korean. The characters to be stored in nchar type are firstly encoded in UCS4-LE before sending to server side. To store non-ASCII characters correctly, the encoding format of the client side needs to be set properly. +A specific type "nchar" is provided in TDengine to store non-ASCII characters such as Chinese, Japanese, and Korean. The characters to be stored in nchar type are firstly encoded in UCS4-LE before sending to server side. To store non-ASCII characters correctly, the encoding format of the client side needs to be set properly. The characters input on the client side are encoded using the default system encoding, which is UTF-8 on Linux, or GB18030 or GBK on some systems in Chinese, POSIX in docker, CP936 on Windows in Chinese. The encoding of the operating system in use must be set correctly so that the characters in nchar type can be converted to UCS4-LE. @@ -779,7 +779,7 @@ To prevent system resource from being exhausted by multiple concurrent streams, :::note HTTP server had been provided by `taosd` prior to version 2.4.0.0, now is provided by `taosAdapter` after version 2.4.0.0. -The parameters described in this section are only application in versions prior to 2.4.0.0. If you are using any version from 2.4.0.0, please refer to [taosAdapter]](/reference/taosadapter/). +The parameters described in this section are only application in versions prior to 2.4.0.0. If you are using any version from 2.4.0.0, please refer to [taosAdapter](/reference/taosadapter/). ::: diff --git a/docs-en/14-reference/12-directory.md b/docs-en/14-reference/12-directory.md index dbdba2b715bb41baf9b70dce91a3065e585d0434..304e3bcb434ee9a6ba338577a4d1ba546b548e3f 100644 --- a/docs-en/14-reference/12-directory.md +++ b/docs-en/14-reference/12-directory.md @@ -32,7 +32,7 @@ All executable files of TDengine are in the _/usr/local/taos/bin_ directory by d - _taosd-dump-cfg.gdb_: script to facilitate debugging of taosd's gdb execution. :::note -taosdump after version 2.4.0.0 require taosTools as a standalone installation. A few version taosBenchmark is include in taosTools too. +taosdump after version 2.4.0.0 require taosTools as a standalone installation. A new version of taosBenchmark is include in taosTools too. ::: :::tip diff --git a/docs-en/14-reference/13-schemaless/13-schemaless.md b/docs-en/14-reference/13-schemaless/13-schemaless.md index d9ce9b434dd14a89d243b2ed629f3fde64e6aba0..ff0b2c51bd433788c593b6e20d4c341a9af7e921 100644 --- a/docs-en/14-reference/13-schemaless/13-schemaless.md +++ b/docs-en/14-reference/13-schemaless/13-schemaless.md @@ -3,17 +3,17 @@ title: Schemaless Writing description: "The Schemaless write method eliminates the need to create super tables/sub tables in advance and automatically creates the storage structure corresponding to the data as it is written to the interface." --- -In IoT applications, many data items are often collected for intelligent control, business analysis, device monitoring, etc. Due to the version upgrade of the application logic, or the hardware adjustment of the device itself, the data collection items may change more frequently. To facilitate the data logging work in such cases, TDengine starting from version 2.2.0.0, it provides a series of interfaces to the schemaless writing method, which eliminates the need to create super tables/sub tables in advance and automatically creates the storage structure corresponding to the data as the data is written to the interface. And when necessary, Schemaless writing will automatically add the required columns to ensure that the data written by the user is stored correctly. +In IoT applications, many data items are often collected for intelligent control, business analysis, device monitoring, etc. Due to the version upgrades of the application logic, or the hardware adjustment of the devices themselves, the data collection items may change frequently. To facilitate the data logging work in such cases, TDengine starting from version 2.2.0.0 provides a series of interfaces to the schemaless writing method, which eliminate the need to create super tables and subtables in advance by automatically creating the storage structure corresponding to the data as the data is written to the interface. And when necessary, schemaless writing will automatically add the required columns to ensure that the data written by the user is stored correctly. -The schemaless writing method creates super tables and their corresponding sub-tables completely indistinguishable from the super tables and sub-tables created directly via SQL. You can write data directly to them via SQL statements. Note that the names of tables created by schemaless writing are based on fixed mapping rules for tag values, so they are not explicitly ideographic and lack readability. +The schemaless writing method creates super tables and their corresponding subtables completely indistinguishable from the super tables and subtables created directly via SQL. You can write data directly to them via SQL statements. Note that the names of tables created by schemaless writing are based on fixed mapping rules for tag values, so they are not explicitly ideographic and lack readability. ## Schemaless Writing Line Protocol -TDengine's schemaless writing line protocol supports to be compatible with InfluxDB's Line Protocol, OpenTSDB's telnet line protocol, and OpenTSDB's JSON format protocol. However, when using these three protocols, you need to specify in the API the standard of the parsing protocol to be used for the input content. +TDengine's schemaless writing line protocol supports InfluxDB's Line Protocol, OpenTSDB's telnet line protocol, and OpenTSDB's JSON format protocol. However, when using these three protocols, you need to specify in the API the standard of the parsing protocol to be used for the input content. For the standard writing protocols of InfluxDB and OpenTSDB, please refer to the documentation of each protocol. The following is a description of TDengine's extended protocol, based on InfluxDB's line protocol first. They allow users to control the (super table) schema more granularly. -With the following formatting conventions, Schemaless writing uses a single string to express a data row (multiple rows can be passed into the writing API at once to enable bulk writing). +With the following formatting conventions, schemaless writing uses a single string to express a data row (multiple rows can be passed into the writing API at once to enable bulk writing). ```json measurement,tag_set field_set timestamp @@ -23,7 +23,7 @@ where : - measurement will be used as the data table name. It will be separated from tag_set by a comma. - tag_set will be used as tag data in the format `=,=`, i.e. multiple tags' data can be separated by a comma. It is separated from field_set by space. -- field_set will be used as normal column data in the format of `=,=`, again using a comma to separate multiple normal columns of data. It is separated from the timestamp by space. +- field_set will be used as normal column data in the format of `=,=`, again using a comma to separate multiple normal columns of data. It is separated from the timestamp by a space. - The timestamp is the primary key corresponding to the data in this row. All data in tag_set is automatically converted to the NCHAR data type and does not require double quotes ("). @@ -32,7 +32,7 @@ In the schemaless writing data line protocol, each data item in the field_set ne - If there are English double quotes on both sides, it indicates the BINARY(32) type. For example, `"abc"`. - If there are double quotes on both sides and an L prefix, it means NCHAR(32) type. For example, `L"error message"`. -- Spaces, equal signs (=), commas (,), and double quotes (") need to be escaped with a backslash (\) in front. (All refer to the ASCII character) +- Spaces, equal signs (=), commas (,), and double quotes (") need to be escaped with a backslash (\\) in front. (All refer to the ASCII character) - Numeric types will be distinguished from data types by the suffix. | **Serial number** | **Postfix** | **Mapping type** | **Size (bytes)** | @@ -58,21 +58,21 @@ Note that if the wrong case is used when describing the data type suffix, or if Schemaless writes process row data according to the following principles. -1. You can use the following rules to generate the sub-table names: first, combine the measurement name and the key and value of the label into the next string: +1. You can use the following rules to generate the subtable names: first, combine the measurement name and the key and value of the label into the next string: ```json "measurement,tag_key1=tag_value1,tag_key2=tag_value2" ``` Note that tag_key1, tag_key2 are not the original order of the tags entered by the user but the result of using the tag names in ascending order of the strings. Therefore, tag_key1 is not the first tag entered in the line protocol. -The string's MD5 hash value "md5_val" is calculated after the ranking is completed. The calculation result is then combined with the string to generate the table name: "t_md5_val". "t*" is a fixed prefix that every table generated by this mapping relationship has. 2. +The string's MD5 hash value "md5_val" is calculated after the ranking is completed. The calculation result is then combined with the string to generate the table name: "t_md5_val". "t*" is a fixed prefix that every table generated by this mapping relationship has. 2. If the super table obtained by parsing the line protocol does not exist, this super table is created. -If the sub-table obtained by the parse line protocol does not exist, Schemaless creates the sub-table according to the sub-table name determined in steps 1 or 2. 4. +If the subtable obtained by the parse line protocol does not exist, Schemaless creates the sub-table according to the subtable name determined in steps 1 or 2. 4. If the specified tag or regular column in the data row does not exist, the corresponding tag or regular column is added to the super table (only incremental). 5. If there are some tag columns or regular columns in the super table that are not specified to take values in a data row, then the values of these columns are set to NULL. 6. For BINARY or NCHAR columns, if the length of the value provided in a data row exceeds the column type limit, the maximum length of characters allowed to be stored in the column is automatically increased (only incremented and not decremented) to ensure complete preservation of the data. -7. If the specified data sub-table already exists, and the specified tag column takes a value different from the saved value this time, the value in the latest data row overwrites the old tag column take value. +7. If the specified data subtable already exists, and the specified tag column takes a value different from the saved value this time, the value in the latest data row overwrites the old tag column take value. 8. Errors encountered throughout the processing will interrupt the writing process and return an error code. :::tip diff --git a/docs-en/20-third-party/01-grafana.mdx b/docs-en/20-third-party/01-grafana.mdx index ce45a12a04be3b2d07c1efd9248772b875ff0e41..dc2033ae6f789908d4d9f9ecd96c9396748c4400 100644 --- a/docs-en/20-third-party/01-grafana.mdx +++ b/docs-en/20-third-party/01-grafana.mdx @@ -23,7 +23,7 @@ You can download The Grafana plugin for TDengine from = $from and ts < $to interval($interval)`, where, from, to and interval are built-in variables of the TDengine plugin, indicating the range and time interval of queries fetched from the Grafana plugin panel. In addition to the built-in variables, ` custom template variables are also supported. +- INPUT SQL: enter the statement to be queried (the result set of the SQL statement should be two columns and multiple rows), for example: `select avg(mem_system) from log.dn where ts >= $from and ts < $to interval($interval)`, where, from, to and interval are built-in variables of the TDengine plugin, indicating the range and time interval of queries fetched from the Grafana plugin panel. In addition to the built-in variables, custom template variables are also supported. - ALIAS BY: This allows you to set the current query alias. - GENERATE SQL: Clicking this button will automatically replace the corresponding variables and generate the final executed statement. diff --git a/docs-en/20-third-party/09-emq-broker.md b/docs-en/20-third-party/09-emq-broker.md index ae393bb085dbe84477ca577dfeb468b29e8bc40c..738372cabd736c0be47b4080cc2c984e5110236c 100644 --- a/docs-en/20-third-party/09-emq-broker.md +++ b/docs-en/20-third-party/09-emq-broker.md @@ -3,7 +3,7 @@ sidebar_label: EMQX Broker title: EMQX Broker writing --- -MQTT is a popular IoT data transfer protocol, [EMQX](https://github.com/emqx/emqx) is an open-source MQTT Broker software, without any code, only need to use "rules" in EMQX Dashboard to do simple configuration. You can write MQTT data directly to TDengine. EMQX supports saving data to TDengine by sending it to web services and provides a native TDengine driver for direct saving in the Enterprise Edition. Please refer to the [EMQX official documentation](https://www.emqx.io/docs/en/v4.4/rule/rule-engine.html) for details on how to use it. tdengine). +MQTT is a popular IoT data transfer protocol, [EMQX](https://github.com/emqx/emqx) is an open-source MQTT Broker software, you can write MQTT data directly to TDengine without any code, you only need to use "rules" in EMQX Dashboard to create a simple configuration. EMQX supports saving data to TDengine by sending it to web services and provides a native TDengine driver for direct saving in the Enterprise Edition. Please refer to the [EMQX official documentation](https://www.emqx.io/docs/en/v4.4/rule/rule-engine.html) for details on how to use it.). ## Prerequisites diff --git a/docs-en/20-third-party/11-kafka.md b/docs-en/20-third-party/11-kafka.md index 155635c231d04634afdd2012177684227b003653..9c78a6645a0578d3b8d494d1fa60831eb88b3c81 100644 --- a/docs-en/20-third-party/11-kafka.md +++ b/docs-en/20-third-party/11-kafka.md @@ -228,7 +228,7 @@ taos> select * from meters; Query OK, 4 row(s) in set (0.004208s) ``` -If you see the above data, the synchronization is successful. If not, check the logs of Kafka Connect. For detailed description of configuration parameters, see [Configuration Reference](#Configuration Reference). +If you see the above data, the synchronization is successful. If not, check the logs of Kafka Connect. For detailed description of configuration parameters, see [Configuration Reference](#configuration-reference). ## The use of TDengine Source Connector diff --git a/docs-en/27-train-faq/03-docker.md b/docs-en/27-train-faq/03-docker.md index 3f560bcfef6119480b5499649cee1602656dbd6f..8f27c35d7945043d39ad83626ceccee941ad135e 100644 --- a/docs-en/27-train-faq/03-docker.md +++ b/docs-en/27-train-faq/03-docker.md @@ -118,7 +118,7 @@ Output is like below: {"status":"succ","head":["name","created_time","ntables","vgroups","replica","quorum","days","keep0,keep1,keep(D)","cache(MB)","blocks","minrows","maxrows","wallevel","fsync","comp","cachelast","precision","update","status"],"column_meta":[["name",8,32],["created_time",9,8],["ntables",4,4],["vgroups",4,4],["replica",3,2],["quorum",3,2],["days",3,2],["keep0,keep1,keep(D)",8,24],["cache(MB)",4,4],["blocks",4,4],["minrows",4,4],["maxrows",4,4],["wallevel",2,1],["fsync",4,4],["comp",2,1],["cachelast",2,1],["precision",8,3],["update",2,1],["status",8,10]],"data":[["test","2021-08-18 06:01:11.021",10000,4,1,1,10,"3650,3650,3650",16,6,100,4096,1,3000,2,0,"ms",0,"ready"],["log","2021-08-18 05:51:51.065",4,1,1,1,10,"30,30,30",1,3,100,4096,1,3000,2,0,"us",0,"ready"]],"rows":2} ``` -For details of REST API please refer to [REST API]](/reference/rest-api/). +For details of REST API please refer to [REST API](/reference/rest-api/). ### Run TDengine server and taosAdapter inside container @@ -265,7 +265,7 @@ Below is an example output: $ taos> select groupid, location from test.d0; groupid | location | ================================= - 0 | California.SanDieo | + 0 | California.SanDiego | Query OK, 1 row(s) in set (0.003490s) ``` diff --git a/docs-examples/c/async_query_example.c b/docs-examples/c/async_query_example.c index 262757f02b5c52f2d4402d363663db80bb38a54d..b370420b124a21b05f8e0b4041fb1461b1e2478a 100644 --- a/docs-examples/c/async_query_example.c +++ b/docs-examples/c/async_query_example.c @@ -182,14 +182,14 @@ int main() { // query callback ... // ts current voltage phase location groupid // numOfRow = 8 -// 1538548685000 10.300000 219 0.310000 beijing.chaoyang 2 -// 1538548695000 12.600000 218 0.330000 beijing.chaoyang 2 -// 1538548696800 12.300000 221 0.310000 beijing.chaoyang 2 -// 1538548696650 10.300000 218 0.250000 beijing.chaoyang 3 -// 1538548685500 11.800000 221 0.280000 beijing.haidian 2 -// 1538548696600 13.400000 223 0.290000 beijing.haidian 2 -// 1538548685000 10.800000 223 0.290000 beijing.haidian 3 -// 1538548686500 11.500000 221 0.350000 beijing.haidian 3 +// 1538548685500 11.800000 221 0.280000 california.losangeles 2 +// 1538548696600 13.400000 223 0.290000 california.losangeles 2 +// 1538548685000 10.800000 223 0.290000 california.losangeles 3 +// 1538548686500 11.500000 221 0.350000 california.losangeles 3 +// 1538548685000 10.300000 219 0.310000 california.sanfrancisco 2 +// 1538548695000 12.600000 218 0.330000 california.sanfrancisco 2 +// 1538548696800 12.300000 221 0.310000 california.sanfrancisco 2 +// 1538548696650 10.300000 218 0.250000 california.sanfrancisco 3 // numOfRow = 0 // no more data, close the connection. // ANCHOR_END: demo \ No newline at end of file diff --git a/docs-examples/csharp/AsyncQueryExample.cs b/docs-examples/csharp/AsyncQueryExample.cs index fe30d21efe82e8d1dc414bd4723227ca93bc944f..3dabbebd1630a207af2e1b1b11cc4ba15bdd94a9 100644 --- a/docs-examples/csharp/AsyncQueryExample.cs +++ b/docs-examples/csharp/AsyncQueryExample.cs @@ -224,15 +224,15 @@ namespace TDengineExample } //output: -//Connect to TDengine success -//8 rows async retrieved - -//1538548685000 | 10.3 | 219 | 0.31 | beijing.chaoyang | 2 | -//1538548695000 | 12.6 | 218 | 0.33 | beijing.chaoyang | 2 | -//1538548696800 | 12.3 | 221 | 0.31 | beijing.chaoyang | 2 | -//1538548696650 | 10.3 | 218 | 0.25 | beijing.chaoyang | 3 | -//1538548685500 | 11.8 | 221 | 0.28 | beijing.haidian | 2 | -//1538548696600 | 13.4 | 223 | 0.29 | beijing.haidian | 2 | -//1538548685000 | 10.8 | 223 | 0.29 | beijing.haidian | 3 | -//1538548686500 | 11.5 | 221 | 0.35 | beijing.haidian | 3 | -//async retrieve complete. \ No newline at end of file +// Connect to TDengine success +// 8 rows async retrieved + +// 1538548685500 | 11.8 | 221 | 0.28 | california.losangeles | 2 | +// 1538548696600 | 13.4 | 223 | 0.29 | california.losangeles | 2 | +// 1538548685000 | 10.8 | 223 | 0.29 | california.losangeles | 3 | +// 1538548686500 | 11.5 | 221 | 0.35 | california.losangeles | 3 | +// 1538548685000 | 10.3 | 219 | 0.31 | california.sanfrancisco | 2 | +// 1538548695000 | 12.6 | 218 | 0.33 | california.sanfrancisco | 2 | +// 1538548696800 | 12.3 | 221 | 0.31 | california.sanfrancisco | 2 | +// 1538548696650 | 10.3 | 218 | 0.25 | california.sanfrancisco | 3 | +// async retrieve complete. \ No newline at end of file diff --git a/docs-examples/python/conn_native_pandas.py b/docs-examples/python/conn_native_pandas.py index 314759f7662c7bf4c9df2c8b3396ad3101c91cd4..56942ef57085766cd128b03cabb7a357587eab16 100644 --- a/docs-examples/python/conn_native_pandas.py +++ b/docs-examples/python/conn_native_pandas.py @@ -13,7 +13,7 @@ print(df.head(3)) # output: # RangeIndex(start=0, stop=8, step=1) # -# ts current voltage phase location groupid -# 0 2018-10-03 14:38:05.000 10.3 219 0.31 beijing.chaoyang 2 -# 1 2018-10-03 14:38:15.000 12.6 218 0.33 beijing.chaoyang 2 -# 2 2018-10-03 14:38:16.800 12.3 221 0.31 beijing.chaoyang 2 +# ts current ... location groupid +# 0 2018-10-03 14:38:05.500 11.8 ... california.losangeles 2 +# 1 2018-10-03 14:38:16.600 13.4 ... california.losangeles 2 +# 2 2018-10-03 14:38:05.000 10.8 ... california.losangeles 3 diff --git a/docs-examples/python/conn_rest_pandas.py b/docs-examples/python/conn_rest_pandas.py index 143e4275fa4eda685766297e4b90cba3935a574d..0164080cd5a05e72dce40b1d111ea423623ff9b2 100644 --- a/docs-examples/python/conn_rest_pandas.py +++ b/docs-examples/python/conn_rest_pandas.py @@ -11,9 +11,9 @@ print(type(df.ts[0])) print(df.head(3)) # output: -# # RangeIndex(start=0, stop=8, step=1) -# ts current ... location groupid -# 0 2018-10-03 14:38:05+08:00 10.3 ... beijing.chaoyang 2 -# 1 2018-10-03 14:38:15+08:00 12.6 ... beijing.chaoyang 2 -# 2 2018-10-03 14:38:16.800000+08:00 12.3 ... beijing.chaoyang 2 +# +# ts current ... location groupid +# 0 2018-10-03 06:38:05.500000+00:00 11.8 ... california.losangeles 2 +# 1 2018-10-03 06:38:16.600000+00:00 13.4 ... california.losangeles 2 +# 2 2018-10-03 06:38:05+00:00 10.8 ... california.losangeles 3 diff --git a/docs-examples/python/connect_rest_examples.py b/docs-examples/python/connect_rest_examples.py index 94e7d5f467aeceae77ab0d9f4a5dce28fecf0722..3303eb0e194ac28e9486ab153183c3b1f0b639f2 100644 --- a/docs-examples/python/connect_rest_examples.py +++ b/docs-examples/python/connect_rest_examples.py @@ -38,8 +38,7 @@ for row in data: # inserted row count: 8 # queried row count: 3 # ['ts', 'current', 'voltage', 'phase', 'location', 'groupid'] -# [datetime.datetime(2018, 10, 3, 14, 38, 5, tzinfo=datetime.timezone(datetime.timedelta(seconds=28800), '+08:00')), 10.3, 219, 0.31, 'beijing.chaoyang', 2] -# [datetime.datetime(2018, 10, 3, 14, 38, 15, tzinfo=datetime.timezone(datetime.timedelta(seconds=28800), '+08:00')), 12.6, 218, 0.33, 'beijing.chaoyang', 2] -# [datetime.datetime(2018, 10, 3, 14, 38, 16, 800000, tzinfo=datetime.timezone(datetime.timedelta(seconds=28800), '+08:00')), 12.3, 221, 0.31, 'beijing.chaoyang', 2] - +# [datetime.datetime(2018, 10, 3, 14, 38, 5, 500000, tzinfo=datetime.timezone(datetime.timedelta(seconds=28800), '+08:00')), 11.8, 221, 0.28, 'california.losangeles', 2] +# [datetime.datetime(2018, 10, 3, 14, 38, 16, 600000, tzinfo=datetime.timezone(datetime.timedelta(seconds=28800), '+08:00')), 13.4, 223, 0.29, 'california.losangeles', 2] +# [datetime.datetime(2018, 10, 3, 14, 38, 5, tzinfo=datetime.timezone(datetime.timedelta(seconds=28800), '+08:00')), 10.8, 223, 0.29, 'california.losangeles', 3] # ANCHOR_END: basic diff --git a/docs-examples/python/json_protocol_example.py b/docs-examples/python/json_protocol_example.py index bdf324f7061c964e3d913351635d9f7c4f052d0a..58b38f3ff667bcbbd902434d3409441a4d2c5b45 100644 --- a/docs-examples/python/json_protocol_example.py +++ b/docs-examples/python/json_protocol_example.py @@ -5,9 +5,9 @@ from taos import SmlProtocol, SmlPrecision lines = [{"metric": "meters.current", "timestamp": 1648432611249, "value": 10.3, "tags": {"location": "California.SanFrancisco", "groupid": 2}}, {"metric": "meters.voltage", "timestamp": 1648432611249, "value": 219, - "tags": {"location": "California.LosAngeles", "groupid": 1}}, + "tags": {"location": "California.LosAngeles", "groupid": 1}}, {"metric": "meters.current", "timestamp": 1648432611250, "value": 12.6, - "tags": {"location": "California.SanFrancisco", "groupid": 2}}, + "tags": {"location": "California.SanFrancisco", "groupid": 2}}, {"metric": "meters.voltage", "timestamp": 1648432611250, "value": 221, "tags": {"location": "California.LosAngeles", "groupid": 1}}] diff --git a/docs-examples/python/query_example.py b/docs-examples/python/query_example.py index de5f26784cbd1f523c996458f326ecb90c778da3..8afd7f07358d7e9c9a3677ee04f8eb92aae6856b 100644 --- a/docs-examples/python/query_example.py +++ b/docs-examples/python/query_example.py @@ -12,10 +12,10 @@ def query_api_demo(conn: taos.TaosConnection): # field count: 7 -# meta of files[1]: {name: ts, type: 9, bytes: 8} +# meta of fields[1]: {name: ts, type: 9, bytes: 8} # ======================Iterate on result========================= -# ('d1001', datetime.datetime(2018, 10, 3, 14, 38, 5), 10.300000190734863, 219, 0.3100000023841858, 'California.SanFrancisco', 2) -# ('d1001', datetime.datetime(2018, 10, 3, 14, 38, 15), 12.600000381469727, 218, 0.33000001311302185, 'California.SanFrancisco', 2) +# ('d1003', datetime.datetime(2018, 10, 3, 14, 38, 5, 500000), 11.800000190734863, 221, 0.2800000011920929, 'california.losangeles', 2) +# ('d1003', datetime.datetime(2018, 10, 3, 14, 38, 16, 600000), 13.399999618530273, 223, 0.28999999165534973, 'california.losangeles', 2) # ANCHOR_END: iter # ANCHOR: fetch_all @@ -29,8 +29,8 @@ def fetch_all_demo(conn: taos.TaosConnection): # row count: 2 # ===============all data=================== -# [{'ts': datetime.datetime(2018, 10, 3, 14, 38, 5), 'current': 10.300000190734863}, -# {'ts': datetime.datetime(2018, 10, 3, 14, 38, 15), 'current': 12.600000381469727}] +# [{'ts': datetime.datetime(2018, 10, 3, 14, 38, 5, 500000), 'current': 11.800000190734863}, +# {'ts': datetime.datetime(2018, 10, 3, 14, 38, 16, 600000), 'current': 13.399999618530273}] # ANCHOR_END: fetch_all if __name__ == '__main__': diff --git a/include/common/tcommon.h b/include/common/tcommon.h index 45745403f3eec74cea7febbadf06b029c226e9a2..88fa0e728f397006759e296cf1e3533816ee540f 100644 --- a/include/common/tcommon.h +++ b/include/common/tcommon.h @@ -105,12 +105,14 @@ typedef struct SColumnInfoData { } SColumnInfoData; typedef struct SQueryTableDataCond { - STimeWindow twindow; + //STimeWindow twindow; int32_t order; // desc|asc order to iterate the data block int32_t numOfCols; SColumnInfo *colList; bool loadExternalRows; // load external rows or not int32_t type; // data block load type: + int32_t numOfTWindows; + STimeWindow *twindows; } SQueryTableDataCond; void* blockDataDestroy(SSDataBlock* pBlock); diff --git a/include/common/tdatablock.h b/include/common/tdatablock.h index b6af1ee7a6c486e4cd307d3458286f61ce162174..9b71e8c454948b9be257986e01ee146a09a10e55 100644 --- a/include/common/tdatablock.h +++ b/include/common/tdatablock.h @@ -230,7 +230,7 @@ SSDataBlock* createOneDataBlock(const SSDataBlock* pDataBlock, bool copyData); void blockDebugShowData(const SArray* dataBlocks); int32_t buildSubmitReqFromDataBlock(SSubmitReq** pReq, const SArray* pDataBlocks, STSchema* pTSchema, int32_t vgId, - tb_uid_t uid, tb_uid_t suid); + tb_uid_t suid); SSubmitReq* tdBlockToSubmit(const SArray* pBlocks, const STSchema* pSchema, bool createTb, int64_t suid, const char* stbFullName, int32_t vgId); @@ -299,4 +299,3 @@ static FORCE_INLINE void blockCompressEncode(const SSDataBlock* pBlock, char* da #endif #endif /*_TD_COMMON_EP_H_*/ - diff --git a/include/dnode/mnode/mnode.h b/include/dnode/mnode/mnode.h index ddd6f1c05f99766aaaf16762ebcfb60fcb1b34ef..ab090940f218abe745fff2bfea170c9b6abf9248 100644 --- a/include/dnode/mnode/mnode.h +++ b/include/dnode/mnode/mnode.h @@ -81,7 +81,7 @@ int32_t mndGetLoad(SMnode *pMnode, SMnodeLoad *pLoad); * @param pMsg The request msg. * @return int32_t 0 for success, -1 for failure. */ -int32_t mndProcessMsg(SRpcMsg *pMsg); +int32_t mndProcessRpcMsg(SRpcMsg *pMsg); int32_t mndProcessSyncMsg(SRpcMsg *pMsg); /** diff --git a/include/dnode/qnode/qnode.h b/include/dnode/qnode/qnode.h index 1ab101f705ac3f71fad134c200a22f903e4a8e86..90a952939577fc9cd945d0dc9fd8bde8d906667f 100644 --- a/include/dnode/qnode/qnode.h +++ b/include/dnode/qnode/qnode.h @@ -26,14 +26,17 @@ extern "C" { typedef struct SQnode SQnode; typedef struct { - int64_t numOfStartTask; - int64_t numOfStopTask; - int64_t numOfRecvedFetch; - int64_t numOfSentHb; - int64_t numOfSentFetch; - int64_t numOfTaskInQueue; + int64_t numOfProcessedQuery; + int64_t numOfProcessedCQuery; + int64_t numOfProcessedFetch; + int64_t numOfProcessedDrop; + int64_t memSizeInCache; + int64_t dataSizeSend; + int64_t dataSizeRecv; + int64_t numOfQueryInQueue; int64_t numOfFetchInQueue; - int64_t numOfErrors; + int64_t waitTimeInQueryQUeue; + int64_t waitTimeInFetchQUeue; } SQnodeLoad; typedef struct { @@ -71,10 +74,10 @@ int32_t qndGetLoad(SQnode *pQnode, SQnodeLoad *pLoad); * @param pQnode The qnode object. * @param pMsg The request message */ -int32_t qndProcessQueryMsg(SQnode *pQnode, SRpcMsg *pMsg); +int32_t qndProcessQueryMsg(SQnode *pQnode, int64_t ts, SRpcMsg *pMsg); #ifdef __cplusplus } #endif -#endif /*_TD_QNODE_H_*/ \ No newline at end of file +#endif /*_TD_QNODE_H_*/ diff --git a/include/libs/catalog/catalog.h b/include/libs/catalog/catalog.h index 5b746015e33af1ed9feb7a92dec1d62030453b2b..8027b9394e0fd42c4c1d20a051868495130642f5 100644 --- a/include/libs/catalog/catalog.h +++ b/include/libs/catalog/catalog.h @@ -52,23 +52,31 @@ typedef struct SUserAuthInfo { AUTH_TYPE type; } SUserAuthInfo; +typedef struct SDbInfo { + int32_t vgVer; + int32_t tbNum; + int64_t dbId; +} SDbInfo; + typedef struct SCatalogReq { - SArray *pTableMeta; // element is SNAME SArray *pDbVgroup; // element is db full name + SArray *pDbCfg; // element is db full name + SArray *pDbInfo; // element is db full name + SArray *pTableMeta; // element is SNAME SArray *pTableHash; // element is SNAME SArray *pUdf; // element is udf name - SArray *pDbCfg; // element is db full name SArray *pIndex; // element is index name SArray *pUser; // element is SUserAuthInfo bool qNodeRequired; // valid qnode } SCatalogReq; typedef struct SMetaData { - SArray *pTableMeta; // SArray SArray *pDbVgroup; // SArray*> + SArray *pDbCfg; // SArray + SArray *pDbInfo; // SArray + SArray *pTableMeta; // SArray SArray *pTableHash; // SArray SArray *pUdfList; // SArray - SArray *pDbCfg; // SArray SArray *pIndex; // SArray SArray *pUser; // SArray SArray *pQnodeList; // SArray @@ -269,6 +277,8 @@ int32_t catalogChkAuth(SCatalog* pCtg, void *pRpc, const SEpSet* pMgmtEps, const int32_t catalogUpdateUserAuthInfo(SCatalog* pCtg, SGetUserAuthRsp* pAuth); +int32_t catalogUpdateVgEpSet(SCatalog* pCtg, const char* dbFName, int32_t vgId, SEpSet *epSet); + int32_t ctgdLaunchAsyncCall(SCatalog* pCtg, void *pTrans, const SEpSet* pMgmtEps, uint64_t reqId); diff --git a/include/libs/function/functionMgt.h b/include/libs/function/functionMgt.h index 3d86adb573cd27dfce3b93409b96a11b47b7aaf5..922136b590cb007c6acd040c7ce81d135c0dad4f 100644 --- a/include/libs/function/functionMgt.h +++ b/include/libs/function/functionMgt.h @@ -23,6 +23,9 @@ extern "C" { #include "function.h" #include "querynodes.h" +#define FUNC_AGGREGATE_UDF_ID 5001 +#define FUNC_SCALAR_UDF_ID 5002 + typedef enum EFunctionType { // aggregate function FUNCTION_TYPE_APERCENTILE = 1, @@ -126,21 +129,12 @@ typedef enum EFunctionType { struct SqlFunctionCtx; struct SResultRowEntryInfo; struct STimeWindow; -struct SCatalog; - -typedef struct SFmGetFuncInfoParam { - struct SCatalog* pCtg; - void* pRpc; - const SEpSet* pMgmtEps; - char* pErrBuf; - int32_t errBufLen; -} SFmGetFuncInfoParam; int32_t fmFuncMgtInit(); void fmFuncMgtDestroy(); -int32_t fmGetFuncInfo(SFmGetFuncInfoParam* pParam, SFunctionNode* pFunc); +int32_t fmGetFuncInfo(SFunctionNode* pFunc, char* pMsg, int32_t msgLen); bool fmIsBuiltinFunc(const char* pFunc); diff --git a/include/libs/index/index.h b/include/libs/index/index.h index 05db99db0f199169ce71e4a76d56899361aa403b..c3d31ffe3853d76d6ab6803dfc10f54dad2445c6 100644 --- a/include/libs/index/index.h +++ b/include/libs/index/index.h @@ -192,11 +192,16 @@ void indexTermDestroy(SIndexTerm* p); void indexInit(); /* index filter */ +typedef struct SIndexMetaArg { + void* metaHandle; + uint64_t suid; +} SIndexMetaArg; + typedef enum { SFLT_NOT_INDEX, SFLT_COARSE_INDEX, SFLT_ACCURATE_INDEX } SIdxFltStatus; SIdxFltStatus idxGetFltStatus(SNode* pFilterNode); -int32_t doFilterTag(const SNode* pFilterNode, SArray* result); +int32_t doFilterTag(const SNode* pFilterNode, SIndexMetaArg* metaArg, SArray* result); /* * destory index env * diff --git a/include/libs/nodes/plannodes.h b/include/libs/nodes/plannodes.h index 3ae2d18e5dab941c7693667719d2a87de80a8724..2648a468dd3fa82fe91825d60b739387d9255bd7 100644 --- a/include/libs/nodes/plannodes.h +++ b/include/libs/nodes/plannodes.h @@ -56,6 +56,9 @@ typedef struct SScanLogicNode { int8_t intervalUnit; int8_t slidingUnit; SNode* pTagCond; + int8_t triggerType; + int64_t watermark; + int16_t tsColId; } SScanLogicNode; typedef struct SJoinLogicNode { @@ -216,6 +219,9 @@ typedef struct STableScanPhysiNode { int64_t sliding; int8_t intervalUnit; int8_t slidingUnit; + int8_t triggerType; + int64_t watermark; + int16_t tsColId; } STableScanPhysiNode; typedef STableScanPhysiNode STableSeqScanPhysiNode; diff --git a/include/libs/qcom/query.h b/include/libs/qcom/query.h index a30f3be7a17398b91db04678d14c3648dcab38ed..296b18e8dea7524244dcd8ade1a1149bfe97533d 100644 --- a/include/libs/qcom/query.h +++ b/include/libs/qcom/query.h @@ -43,6 +43,12 @@ typedef enum { TASK_TYPE_TEMP, } ETaskType; +typedef enum { + TARGET_TYPE_MNODE = 1, + TARGET_TYPE_VNODE, + TARGET_TYPE_OTHER, +} ETargetType; + typedef struct STableComInfo { uint8_t numOfTags; // the number of tags in schema uint8_t precision; // the number of precision @@ -126,11 +132,18 @@ typedef struct SDataBuf { void* handle; } SDataBuf; +typedef struct STargetInfo { + ETargetType type; + char dbFName[TSDB_DB_FNAME_LEN]; // used to update db's vgroup epset + int32_t vgId; +} STargetInfo; + typedef int32_t (*__async_send_cb_fn_t)(void* param, const SDataBuf* pMsg, int32_t code); typedef int32_t (*__async_exec_fn_t)(void* param); typedef struct SMsgSendInfo { __async_send_cb_fn_t fp; // async callback function + STargetInfo target; // for update epset void* param; uint64_t requestId; uint64_t requestObjRefId; diff --git a/include/libs/qworker/qworker.h b/include/libs/qworker/qworker.h index 9e3b318019e6a689ed8b976870659f4890bcec44..5942d00cb212002d5309cec4cba253dc7e3d7388 100644 --- a/include/libs/qworker/qworker.h +++ b/include/libs/qworker/qworker.h @@ -52,22 +52,24 @@ typedef struct { int32_t qWorkerInit(int8_t nodeType, int32_t nodeId, SQWorkerCfg *cfg, void **qWorkerMgmt, const SMsgCb *pMsgCb); -int32_t qWorkerProcessQueryMsg(void *node, void *qWorkerMgmt, SRpcMsg *pMsg); +int32_t qWorkerProcessQueryMsg(void *node, void *qWorkerMgmt, SRpcMsg *pMsg, int64_t ts); -int32_t qWorkerProcessCQueryMsg(void *node, void *qWorkerMgmt, SRpcMsg *pMsg); +int32_t qWorkerProcessCQueryMsg(void *node, void *qWorkerMgmt, SRpcMsg *pMsg, int64_t ts); -int32_t qWorkerProcessFetchMsg(void *node, void *qWorkerMgmt, SRpcMsg *pMsg); +int32_t qWorkerProcessFetchMsg(void *node, void *qWorkerMgmt, SRpcMsg *pMsg, int64_t ts); -int32_t qWorkerProcessFetchRsp(void *node, void *qWorkerMgmt, SRpcMsg *pMsg); +int32_t qWorkerProcessFetchRsp(void *node, void *qWorkerMgmt, SRpcMsg *pMsg, int64_t ts); -int32_t qWorkerProcessCancelMsg(void *node, void *qWorkerMgmt, SRpcMsg *pMsg); +int32_t qWorkerProcessCancelMsg(void *node, void *qWorkerMgmt, SRpcMsg *pMsg, int64_t ts); -int32_t qWorkerProcessDropMsg(void *node, void *qWorkerMgmt, SRpcMsg *pMsg); +int32_t qWorkerProcessDropMsg(void *node, void *qWorkerMgmt, SRpcMsg *pMsg, int64_t ts); -int32_t qWorkerProcessHbMsg(void *node, void *qWorkerMgmt, SRpcMsg *pMsg); +int32_t qWorkerProcessHbMsg(void *node, void *qWorkerMgmt, SRpcMsg *pMsg, int64_t ts); void qWorkerDestroy(void **qWorkerMgmt); +int64_t qWorkerGetWaitTimeInQueue(void *qWorkerMgmt, EQueueType type); + #ifdef __cplusplus } #endif diff --git a/include/util/taoserror.h b/include/util/taoserror.h index c63d8668b592921efbebf6cac913468a904c6608..65cfe8de0be9e387cecba70141c0bab513d6fc63 100644 --- a/include/util/taoserror.h +++ b/include/util/taoserror.h @@ -69,6 +69,7 @@ int32_t* taosGetErrno(); #define TSDB_CODE_DUP_KEY TAOS_DEF_ERROR_CODE(0, 0x0027) #define TSDB_CODE_NEED_RETRY TAOS_DEF_ERROR_CODE(0, 0x0028) #define TSDB_CODE_OUT_OF_RPC_MEMORY_QUEUE TAOS_DEF_ERROR_CODE(0, 0x0029) +#define TSDB_CODE_INVALID_TIMESTAMP TAOS_DEF_ERROR_CODE(0, 0x0030) #define TSDB_CODE_REF_NO_MEMORY TAOS_DEF_ERROR_CODE(0, 0x0040) #define TSDB_CODE_REF_FULL TAOS_DEF_ERROR_CODE(0, 0x0041) @@ -655,7 +656,7 @@ int32_t* taosGetErrno(); #define TSDB_CODE_FUNC_FUNTION_PARA_NUM TAOS_DEF_ERROR_CODE(0, 0x2801) #define TSDB_CODE_FUNC_FUNTION_PARA_TYPE TAOS_DEF_ERROR_CODE(0, 0x2802) #define TSDB_CODE_FUNC_FUNTION_PARA_VALUE TAOS_DEF_ERROR_CODE(0, 0x2803) -#define TSDB_CODE_FUNC_INVALID_FUNTION TAOS_DEF_ERROR_CODE(0, 0x2804) +#define TSDB_CODE_FUNC_NOT_BUILTIN_FUNTION TAOS_DEF_ERROR_CODE(0, 0x2804) //udf #define TSDB_CODE_UDF_STOPPING TAOS_DEF_ERROR_CODE(0, 0x2901) diff --git a/include/util/tqueue.h b/include/util/tqueue.h index dbc4d03177e4c489240c04aac37710ce995102d4..466c577c0079d07774722ff2efdd30bf207e0fc3 100644 --- a/include/util/tqueue.h +++ b/include/util/tqueue.h @@ -46,6 +46,7 @@ typedef struct { void *ahandle; int32_t workerId; int32_t threadNum; + int64_t timestamp; } SQueueInfo; typedef enum { @@ -80,7 +81,7 @@ int32_t taosAddIntoQset(STaosQset *qset, STaosQueue *queue, void *ahandle); void taosRemoveFromQset(STaosQset *qset, STaosQueue *queue); int32_t taosGetQueueNumber(STaosQset *qset); -int32_t taosReadQitemFromQset(STaosQset *qset, void **ppItem, void **ahandle, FItem *itemFp); +int32_t taosReadQitemFromQset(STaosQset *qset, void **ppItem, int64_t *ts, void **ahandle, FItem *itemFp); int32_t taosReadAllQitemsFromQset(STaosQset *qset, STaosQall *qall, void **ahandle, FItems *itemsFp); void taosResetQsetThread(STaosQset *qset, void *pItem); diff --git a/source/client/src/clientImpl.c b/source/client/src/clientImpl.c index 362df7b447d07524d6af1fb5955894229b3b32a9..eb4c4cb59feac8c8a0db6cd85f45f3482b31e96f 100644 --- a/source/client/src/clientImpl.c +++ b/source/client/src/clientImpl.c @@ -729,23 +729,55 @@ static void destroySendMsgInfo(SMsgSendInfo* pMsgBody) { taosMemoryFreeClear(pMsgBody); } +void updateTargetEpSet(SMsgSendInfo* pSendInfo, STscObj* pTscObj, SRpcMsg* pMsg, SEpSet* pEpSet) { + if (NULL == pEpSet) { + return; + } + + switch (pSendInfo->target.type) { + case TARGET_TYPE_MNODE: + if (NULL == pTscObj) { + tscError("mnode epset changed but not able to update it, reqObjRefId:%" PRIx64, pSendInfo->requestObjRefId); + return; + } + + updateEpSet_s(&pTscObj->pAppInfo->mgmtEp, pEpSet); + break; + case TARGET_TYPE_VNODE: { + if (NULL == pTscObj) { + tscError("vnode epset changed but not able to update it, reqObjRefId:%" PRIx64, pSendInfo->requestObjRefId); + return; + } + + SCatalog* pCatalog = NULL; + int32_t code = catalogGetHandle(pTscObj->pAppInfo->clusterId, &pCatalog); + if (code != TSDB_CODE_SUCCESS) { + tscError("fail to get catalog handle, clusterId:%" PRIx64 ", error %s", pTscObj->pAppInfo->clusterId, tstrerror(code)); + return; + } + + catalogUpdateVgEpSet(pCatalog, pSendInfo->target.dbFName, pSendInfo->target.vgId, pEpSet); + break; + } + default: + tscDebug("epset changed, not updated, msgType %s", TMSG_INFO(pMsg->msgType)); + break; + } +} + + void processMsgFromServer(void* parent, SRpcMsg* pMsg, SEpSet* pEpSet) { SMsgSendInfo* pSendInfo = (SMsgSendInfo*)pMsg->info.ahandle; assert(pMsg->info.ahandle != NULL); + SRequestObj* pRequest = NULL; + STscObj* pTscObj = NULL; if (pSendInfo->requestObjRefId != 0) { SRequestObj* pRequest = (SRequestObj*)taosAcquireRef(clientReqRefPool, pSendInfo->requestObjRefId); assert(pRequest->self == pSendInfo->requestObjRefId); pRequest->metric.rsp = taosGetTimestampUs(); - - //STscObj* pTscObj = pRequest->pTscObj; - //if (pEpSet) { - // if (!isEpsetEqual(&pTscObj->pAppInfo->mgmtEp.epSet, pEpSet)) { - // updateEpSet_s(&pTscObj->pAppInfo->mgmtEp, pEpSet); - // } - //} - + pTscObj = pRequest->pTscObj; /* * There is not response callback function for submit response. * The actual inserted number of points is the first number. @@ -762,6 +794,8 @@ void processMsgFromServer(void* parent, SRpcMsg* pMsg, SEpSet* pEpSet) { taosReleaseRef(clientReqRefPool, pSendInfo->requestObjRefId); } + updateTargetEpSet(pSendInfo, pTscObj, pMsg, pEpSet); + SDataBuf buf = {.len = pMsg->contLen, .pData = NULL, .handle = pMsg->info.handle}; if (pMsg->contLen > 0) { @@ -1215,6 +1249,8 @@ void resetConnectDB(STscObj* pTscObj) { int32_t setQueryResultFromRsp(SReqResultInfo* pResultInfo, const SRetrieveTableRsp* pRsp, bool convertUcs4) { assert(pResultInfo != NULL && pRsp != NULL); + taosMemoryFreeClear(pResultInfo->pRspMsg); + pResultInfo->pRspMsg = (const char*)pRsp; pResultInfo->pData = (void*)pRsp->data; pResultInfo->numOfRows = htonl(pRsp->numOfRows); diff --git a/source/common/src/tdatablock.c b/source/common/src/tdatablock.c index 2d77905d866061e0a454af094a8928e5771d94e7..b54701a311dd975d60f25a0c6f70eeb5e492ed6b 100644 --- a/source/common/src/tdatablock.c +++ b/source/common/src/tdatablock.c @@ -611,6 +611,7 @@ int32_t blockDataFromBuf1(SSDataBlock* pBlock, const char* buf, size_t capacity) for (int32_t i = 0; i < numOfCols; ++i) { SColumnInfoData* pCol = taosArrayGet(pBlock->pDataBlock, i); + pCol->hasNull = true; if (IS_VAR_DATA_TYPE(pCol->info.type)) { size_t metaSize = capacity * sizeof(int32_t); @@ -1153,7 +1154,9 @@ void colInfoDataCleanup(SColumnInfoData* pColumn, uint32_t numOfRows) { if (IS_VAR_DATA_TYPE(pColumn->info.type)) { pColumn->varmeta.length = 0; } else { - memset(pColumn->nullbitmap, 0, BitmapLen(numOfRows)); + if (pColumn->nullbitmap != NULL) { + memset(pColumn->nullbitmap, 0, BitmapLen(numOfRows)); + } } } @@ -1290,8 +1293,8 @@ static void doShiftBitmap(char* nullBitmap, size_t n, size_t total) { static void colDataTrimFirstNRows(SColumnInfoData* pColInfoData, size_t n, size_t total) { if (IS_VAR_DATA_TYPE(pColInfoData->info.type)) { - memmove(pColInfoData->varmeta.offset, &pColInfoData->varmeta.offset[n], (total - n)); - memset(&pColInfoData->varmeta.offset[total - n - 1], 0, n); + memmove(pColInfoData->varmeta.offset, &pColInfoData->varmeta.offset[n], (total - n) * sizeof(int32_t)); + memset(&pColInfoData->varmeta.offset[total - n], 0, n); } else { int32_t bytes = pColInfoData->info.bytes; memmove(pColInfoData->pData, ((char*)pColInfoData->pData + n * bytes), (total - n) * bytes); @@ -1508,14 +1511,11 @@ void blockDebugShowData(const SArray* dataBlocks) { * @param pReq * @param pDataBlocks * @param vgId - * @param uid set as parameter temporarily // TODO: remove this parameter, and the executor should set uid in - * SDataBlock->info.uid * @param suid // TODO: check with Liao whether suid response is reasonable * * TODO: colId should be set */ -int32_t buildSubmitReqFromDataBlock(SSubmitReq** pReq, const SArray* pDataBlocks, STSchema* pTSchema, int32_t vgId, - tb_uid_t uid, tb_uid_t suid) { +int32_t buildSubmitReqFromDataBlock(SSubmitReq** pReq, const SArray* pDataBlocks, STSchema* pTSchema, int32_t vgId, tb_uid_t suid) { int32_t sz = taosArrayGetSize(pDataBlocks); int32_t bufSize = sizeof(SSubmitReq); for (int32_t i = 0; i < sz; ++i) { @@ -1551,7 +1551,7 @@ int32_t buildSubmitReqFromDataBlock(SSubmitReq** pReq, const SArray* pDataBlocks SSubmitBlk* pSubmitBlk = POINTER_SHIFT(pDataBuf, msgLen); pSubmitBlk->suid = suid; - pSubmitBlk->uid = uid; + pSubmitBlk->uid = pDataBlock->info.groupId; pSubmitBlk->numOfRows = rows; ++numOfBlks; @@ -1562,6 +1562,7 @@ int32_t buildSubmitReqFromDataBlock(SSubmitReq** pReq, const SArray* pDataBlocks tdSRowResetBuf(&rb, POINTER_SHIFT(pDataBuf, msgLen)); // set row buf printf("|"); bool isStartKey = false; + int32_t offset = 0; for (int32_t k = 0; k < colNum; ++k) { // iterate by column SColumnInfoData* pColInfoData = taosArrayGet(pDataBlock->pDataBlock, k); void* var = POINTER_SHIFT(pColInfoData->pData, j * pColInfoData->info.bytes); @@ -1570,18 +1571,18 @@ int32_t buildSubmitReqFromDataBlock(SSubmitReq** pReq, const SArray* pDataBlocks if (!isStartKey) { isStartKey = true; tdAppendColValToRow(&rb, PRIMARYKEY_TIMESTAMP_COL_ID, TSDB_DATA_TYPE_TIMESTAMP, TD_VTYPE_NORM, var, true, - 0, 0); + offset, k); + } else { - tdAppendColValToRow(&rb, 2, TSDB_DATA_TYPE_TIMESTAMP, TD_VTYPE_NORM, var, true, 8, k); - break; + tdAppendColValToRow(&rb, 2, TSDB_DATA_TYPE_TIMESTAMP, TD_VTYPE_NORM, var, true, offset, k); } break; case TSDB_DATA_TYPE_NCHAR: { - tdAppendColValToRow(&rb, 2, TSDB_DATA_TYPE_NCHAR, TD_VTYPE_NORM, var, true, 8, k); + tdAppendColValToRow(&rb, 2, TSDB_DATA_TYPE_NCHAR, TD_VTYPE_NORM, var, true, offset, k); break; } case TSDB_DATA_TYPE_VARCHAR: { // TSDB_DATA_TYPE_BINARY - tdAppendColValToRow(&rb, 2, TSDB_DATA_TYPE_VARCHAR, TD_VTYPE_NORM, var, true, 8, k); + tdAppendColValToRow(&rb, 2, TSDB_DATA_TYPE_VARCHAR, TD_VTYPE_NORM, var, true, offset, k); break; } case TSDB_DATA_TYPE_VARBINARY: @@ -1593,13 +1594,14 @@ int32_t buildSubmitReqFromDataBlock(SSubmitReq** pReq, const SArray* pDataBlocks break; default: if (pColInfoData->info.type < TSDB_DATA_TYPE_MAX && pColInfoData->info.type > TSDB_DATA_TYPE_NULL) { - tdAppendColValToRow(&rb, 2, pColInfoData->info.type, TD_VTYPE_NORM, var, true, 8, k); + tdAppendColValToRow(&rb, 2, pColInfoData->info.type, TD_VTYPE_NORM, var, true, offset, k); } else { printf("the column type %" PRIi16 " is undefined\n", pColInfoData->info.type); TASSERT(0); } break; } + offset += TYPE_BYTES[pColInfoData->info.type]; } dataLen += TD_ROW_LEN(rb.pBuf); } diff --git a/source/common/src/tglobal.c b/source/common/src/tglobal.c index d0a2ddd9bb6379d702b8c4d46c60085d3fa05b0c..141ec4f03b76238d6c15695c7ea3a8ea112d9e4b 100644 --- a/source/common/src/tglobal.c +++ b/source/common/src/tglobal.c @@ -293,7 +293,7 @@ int32_t taosAddClientLogCfg(SConfig *pCfg) { if (cfgAddInt32(pCfg, "jniDebugFlag", jniDebugFlag, 0, 255, 1) != 0) return -1; if (cfgAddInt32(pCfg, "simDebugFlag", 143, 0, 255, 1) != 0) return -1; if (cfgAddInt32(pCfg, "debugFlag", 0, 0, 255, 1) != 0) return -1; - if (cfgAddInt32(pCfg, "idxDebugFlag", 0, 0, 255, 1) != 0) return -1; + if (cfgAddInt32(pCfg, "idxDebugFlag", idxDebugFlag, 0, 255, 1) != 0) return -1; return 0; } diff --git a/source/common/src/trow.c b/source/common/src/trow.c index 4d0846f6c2957a6d2a1b74dabf60ee76af57287c..cc18240325ffa95aba75b4c7123d4d5749694035 100644 --- a/source/common/src/trow.c +++ b/source/common/src/trow.c @@ -1191,9 +1191,9 @@ bool tdGetTpRowDataOfCol(STSRowIter *pIter, col_type_t colType, int32_t offset, } static FORCE_INLINE int32_t compareKvRowColId(const void *key1, const void *key2) { - if (*(int16_t *)key1 > ((SColIdx *)key2)->colId) { + if (*(col_id_t *)key1 > ((SKvRowIdx *)key2)->colId) { return 1; - } else if (*(int16_t *)key1 < ((SColIdx *)key2)->colId) { + } else if (*(col_id_t *)key1 < ((SKvRowIdx *)key2)->colId) { return -1; } else { return 0; diff --git a/source/common/src/ttime.c b/source/common/src/ttime.c index 38ad948981f7bd7a260b9578bce6d9252c4290d9..8117acc3e14ad2e033cb0415f9abb54576f72add 100644 --- a/source/common/src/ttime.c +++ b/source/common/src/ttime.c @@ -521,21 +521,21 @@ int32_t convertStringToTimestamp(int16_t type, char *inputData, int64_t timePrec if (type == TSDB_DATA_TYPE_BINARY || type == TSDB_DATA_TYPE_VARBINARY) { newColData = taosMemoryCalloc(1, charLen + 1); memcpy(newColData, varDataVal(inputData), charLen); - bool ret = taosParseTime(newColData, timeVal, charLen, (int32_t)timePrec, tsDaylight); + int32_t ret = taosParseTime(newColData, timeVal, charLen, (int32_t)timePrec, tsDaylight); if (ret != TSDB_CODE_SUCCESS) { taosMemoryFree(newColData); - return ret; + return TSDB_CODE_INVALID_TIMESTAMP; } taosMemoryFree(newColData); } else if (type == TSDB_DATA_TYPE_NCHAR) { - newColData = taosMemoryCalloc(1, charLen / TSDB_NCHAR_SIZE + 1); + newColData = taosMemoryCalloc(1, charLen + TSDB_NCHAR_SIZE); int len = taosUcs4ToMbs((TdUcs4 *)varDataVal(inputData), charLen, newColData); if (len < 0){ taosMemoryFree(newColData); return TSDB_CODE_FAILED; } newColData[len] = 0; - bool ret = taosParseTime(newColData, timeVal, len + 1, (int32_t)timePrec, tsDaylight); + int32_t ret = taosParseTime(newColData, timeVal, len + 1, (int32_t)timePrec, tsDaylight); if (ret != TSDB_CODE_SUCCESS) { taosMemoryFree(newColData); return ret; diff --git a/source/dnode/mgmt/mgmt_mnode/src/mmWorker.c b/source/dnode/mgmt/mgmt_mnode/src/mmWorker.c index 85120102bc629c30f7520268a8054657fe1201ec..1de9875d063933fe1f35bb5b5770c1aabc6b8fc3 100644 --- a/source/dnode/mgmt/mgmt_mnode/src/mmWorker.c +++ b/source/dnode/mgmt/mgmt_mnode/src/mmWorker.c @@ -40,7 +40,7 @@ static void mmProcessQueue(SQueueInfo *pInfo, SRpcMsg *pMsg) { break; default: pMsg->info.node = pMgmt->pMnode; - code = mndProcessMsg(pMsg); + code = mndProcessRpcMsg(pMsg); } if (IsReq(pMsg) && pMsg->info.handle != NULL && code != TSDB_CODE_ACTION_IN_PROGRESS) { diff --git a/source/dnode/mgmt/mgmt_qnode/src/qmHandle.c b/source/dnode/mgmt/mgmt_qnode/src/qmHandle.c index 916973b4ca596ce7b6ee9d5bd89a4840161c6b86..65794b7b8136f0d6314880399ac08a195eecd22a 100644 --- a/source/dnode/mgmt/mgmt_qnode/src/qmHandle.c +++ b/source/dnode/mgmt/mgmt_qnode/src/qmHandle.c @@ -16,7 +16,11 @@ #define _DEFAULT_SOURCE #include "qmInt.h" -void qmGetMonitorInfo(SQnodeMgmt *pMgmt, SMonQmInfo *qmInfo) {} +void qmGetMonitorInfo(SQnodeMgmt *pMgmt, SMonQmInfo *qmInfo) { + SQnodeLoad qload = {0}; + qndGetLoad(pMgmt->pQnode, &qload); + +} int32_t qmProcessGetMonitorInfoReq(SQnodeMgmt *pMgmt, SRpcMsg *pMsg) { SMonQmInfo qmInfo = {0}; diff --git a/source/dnode/mgmt/mgmt_qnode/src/qmWorker.c b/source/dnode/mgmt/mgmt_qnode/src/qmWorker.c index 35c94b7fbe786434cfb59191c8899949099d0325..e7fc261b67a8a6416cdbafae07552a5c9576bc22 100644 --- a/source/dnode/mgmt/mgmt_qnode/src/qmWorker.c +++ b/source/dnode/mgmt/mgmt_qnode/src/qmWorker.c @@ -36,7 +36,7 @@ static void qmProcessQueue(SQueueInfo *pInfo, SRpcMsg *pMsg) { code = qmProcessGetMonitorInfoReq(pMgmt, pMsg); break; default: - code = qndProcessQueryMsg(pMgmt->pQnode, pMsg); + code = qndProcessQueryMsg(pMgmt->pQnode, pInfo->timestamp, pMsg); break; } diff --git a/source/dnode/mgmt/node_mgmt/src/dmTransport.c b/source/dnode/mgmt/node_mgmt/src/dmTransport.c index c7509eb9d8a7e1ed47bbc65f8b8e1e2d15364ebc..987fc5441653a09c27d889b03af30150622f96a3 100644 --- a/source/dnode/mgmt/node_mgmt/src/dmTransport.c +++ b/source/dnode/mgmt/node_mgmt/src/dmTransport.c @@ -62,7 +62,7 @@ static void dmProcessRpcMsg(SDnode *pDnode, SRpcMsg *pRpc, SEpSet *pEpSet) { dmProcessNetTestReq(pDnode, pRpc); return; } else if (pRpc->msgType == TDMT_MND_SYSTABLE_RETRIEVE_RSP || pRpc->msgType == TDMT_VND_FETCH_RSP) { - qWorkerProcessFetchRsp(NULL, NULL, pRpc); + qWorkerProcessFetchRsp(NULL, NULL, pRpc, 0); return; } else if (pRpc->msgType == TDMT_MND_STATUS_RSP && pEpSet != NULL) { dmSetMnodeEpSet(&pDnode->data, pEpSet); diff --git a/source/dnode/mnode/impl/inc/mndInt.h b/source/dnode/mnode/impl/inc/mndInt.h index cec49a4cbeae969774ee80bedf8cbe1900f85e5a..6661347e4206b28d6977b622bc4cd8777b34abb7 100644 --- a/source/dnode/mnode/impl/inc/mndInt.h +++ b/source/dnode/mnode/impl/inc/mndInt.h @@ -75,13 +75,12 @@ typedef struct { } STelemMgmt; typedef struct { - SWal *pWal; - sem_t syncSem; - int64_t sync; - bool standby; - bool restored; - int32_t errCode; - int32_t transId; + SWal *pWal; + sem_t syncSem; + int64_t sync; + bool standby; + int32_t errCode; + int32_t transId; } SSyncMgmt; typedef struct { @@ -90,34 +89,45 @@ typedef struct { } SGrantInfo; typedef struct SMnode { - int32_t selfDnodeId; - int64_t clusterId; - TdThread thread; - bool deploy; - bool stopped; - int8_t replica; - int8_t selfIndex; - SReplica replicas[TSDB_MAX_REPLICA]; - char *path; - int64_t checkTime; - SSdb *pSdb; - SMgmtWrapper *pWrapper; - SArray *pSteps; - SQHandle *pQuery; - SShowMgmt showMgmt; - SProfileMgmt profileMgmt; - STelemMgmt telemMgmt; - SSyncMgmt syncMgmt; - SHashObj *infosMeta; - SHashObj *perfsMeta; - SGrantInfo grant; - MndMsgFp msgFp[TDMT_MAX]; - SMsgCb msgCb; + int32_t selfDnodeId; + int64_t clusterId; + TdThread thread; + TdThreadRwlock lock; + int32_t rpcRef; + int32_t syncRef; + bool stopped; + bool restored; + bool deploy; + int8_t replica; + int8_t selfIndex; + SReplica replicas[TSDB_MAX_REPLICA]; + char *path; + int64_t checkTime; + SSdb *pSdb; + SArray *pSteps; + SQHandle *pQuery; + SHashObj *infosMeta; + SHashObj *perfsMeta; + SShowMgmt showMgmt; + SProfileMgmt profileMgmt; + STelemMgmt telemMgmt; + SSyncMgmt syncMgmt; + SGrantInfo grant; + MndMsgFp msgFp[TDMT_MAX]; + SMsgCb msgCb; } SMnode; void mndSetMsgHandle(SMnode *pMnode, tmsg_t msgType, MndMsgFp fp); int64_t mndGenerateUid(char *name, int32_t len); +int32_t mndAcquireRpcRef(SMnode *pMnode); +void mndReleaseRpcRef(SMnode *pMnode); +void mndSetRestore(SMnode *pMnode, bool restored); +void mndSetStop(SMnode *pMnode); +bool mndGetStop(SMnode *pMnode); +int32_t mndAcquireSyncRef(SMnode *pMnode); +void mndReleaseSyncRef(SMnode *pMnode); + #ifdef __cplusplus } #endif diff --git a/source/dnode/mnode/impl/src/mnode.c b/source/dnode/mnode/impl/src/mndMain.c similarity index 83% rename from source/dnode/mnode/impl/src/mnode.c rename to source/dnode/mnode/impl/src/mndMain.c index 5458bc5126914a4b33fa0617ca627924199313bd..995fe83cc5a69502a99a1b807ceffb6c4ec80a52 100644 --- a/source/dnode/mnode/impl/src/mnode.c +++ b/source/dnode/mnode/impl/src/mndMain.c @@ -85,7 +85,7 @@ static void *mndThreadFp(void *param) { while (1) { lastTime++; taosMsleep(100); - if (pMnode->stopped) break; + if (mndGetStop(pMnode)) break; if (lastTime % (tsTransPullupInterval * 10) == 0) { mndPullupTrans(pMnode); @@ -118,7 +118,6 @@ static int32_t mndInitTimer(SMnode *pMnode) { } static void mndCleanupTimer(SMnode *pMnode) { - pMnode->stopped = true; if (taosCheckPthreadValid(pMnode->thread)) { taosThreadJoin(pMnode->thread, NULL); taosThreadClear(&pMnode->thread); @@ -335,15 +334,19 @@ void mndClose(SMnode *pMnode) { int32_t mndStart(SMnode *pMnode) { mndSyncStart(pMnode); if (pMnode->deploy) { - if (sdbDeploy(pMnode->pSdb) != 0) return -1; - pMnode->syncMgmt.restored = true; + if (sdbDeploy(pMnode->pSdb) != 0) { + mError("failed to deploy sdb while start mnode"); + return -1; + } + mndSetRestore(pMnode, true); } return mndInitTimer(pMnode); } void mndStop(SMnode *pMnode) { + mndSetStop(pMnode); mndSyncStop(pMnode); - return mndCleanupTimer(pMnode); + mndCleanupTimer(pMnode); } int32_t mndProcessSyncMsg(SRpcMsg *pMsg) { @@ -362,6 +365,11 @@ int32_t mndProcessSyncMsg(SRpcMsg *pMsg) { return TAOS_SYNC_PROPOSE_OTHER_ERROR; } + if (mndAcquireSyncRef(pMnode) != 0) { + mError("failed to process sync msg:%p type:%s since %s", pMsg, TMSG_INFO(pMsg->msgType), terrstr()); + return TAOS_SYNC_PROPOSE_OTHER_ERROR; + } + char logBuf[512]; char *syncNodeStr = sync2SimpleStr(pMgmt->sync); snprintf(logBuf, sizeof(logBuf), "==vnodeProcessSyncReq== msgType:%d, syncNode: %s", pMsg->msgType, syncNodeStr); @@ -405,59 +413,45 @@ int32_t mndProcessSyncMsg(SRpcMsg *pMsg) { code = TAOS_SYNC_PROPOSE_OTHER_ERROR; } + mndReleaseSyncRef(pMnode); return code; } -static int32_t mndCheckMnodeMaster(SRpcMsg *pMsg) { - if (!IsReq(pMsg)) return 0; - if (mndIsMaster(pMsg->info.node)) return 0; +static int32_t mndCheckMnodeState(SRpcMsg *pMsg) { + if (mndAcquireRpcRef(pMsg->info.node) == 0) return 0; - if (pMsg->msgType == TDMT_MND_MQ_TIMER || pMsg->msgType == TDMT_MND_TELEM_TIMER || - pMsg->msgType == TDMT_MND_TRANS_TIMER) { - return -1; - } - mError("msg:%p, failed to check master since %s, app:%p type:%s", pMsg, terrstr(), pMsg->info.ahandle, - TMSG_INFO(pMsg->msgType)); + if (IsReq(pMsg) && pMsg->msgType != TDMT_MND_MQ_TIMER && pMsg->msgType != TDMT_MND_TELEM_TIMER && + pMsg->msgType != TDMT_MND_TRANS_TIMER) { + mError("msg:%p, failed to check mnode state since %s, app:%p type:%s", pMsg, terrstr(), pMsg->info.ahandle, + TMSG_INFO(pMsg->msgType)); - SEpSet epSet = {0}; - mndGetMnodeEpSet(pMsg->info.node, &epSet); + SEpSet epSet = {0}; + mndGetMnodeEpSet(pMsg->info.node, &epSet); -#if 0 - mTrace("msg:%p, is redirected, num:%d use:%d", pMsg, epSet.numOfEps, epSet.inUse); - for (int32_t i = 0; i < epSet.numOfEps; ++i) { - mTrace("mnode index:%d %s:%u", i, epSet.eps[i].fqdn, epSet.eps[i].port); - if (strcmp(epSet.eps[i].fqdn, tsLocalFqdn) == 0 && epSet.eps[i].port == tsServerPort) { - epSet.inUse = (i + 1) % epSet.numOfEps; + int32_t contLen = tSerializeSEpSet(NULL, 0, &epSet); + pMsg->info.rsp = rpcMallocCont(contLen); + if (pMsg->info.rsp != NULL) { + tSerializeSEpSet(pMsg->info.rsp, contLen, &epSet); + pMsg->info.rspLen = contLen; + terrno = TSDB_CODE_RPC_REDIRECT; + } else { + terrno = TSDB_CODE_OUT_OF_MEMORY; } } -#endif - - int32_t contLen = tSerializeSEpSet(NULL, 0, &epSet); - pMsg->info.rsp = rpcMallocCont(contLen); - if (pMsg->info.rsp != NULL) { - tSerializeSEpSet(pMsg->info.rsp, contLen, &epSet); - pMsg->info.rspLen = contLen; - terrno = TSDB_CODE_RPC_REDIRECT; - } else { - terrno = TSDB_CODE_OUT_OF_MEMORY; - } return -1; } -static int32_t mndCheckRequestValid(SRpcMsg *pMsg) { +static int32_t mndCheckMsgContent(SRpcMsg *pMsg) { if (!IsReq(pMsg)) return 0; if (pMsg->contLen != 0 && pMsg->pCont != NULL) return 0; - mError("msg:%p, failed to valid request, app:%p type:%s", pMsg, pMsg->info.ahandle, TMSG_INFO(pMsg->msgType)); + mError("msg:%p, failed to check msg content, app:%p type:%s", pMsg, pMsg->info.ahandle, TMSG_INFO(pMsg->msgType)); terrno = TSDB_CODE_INVALID_MSG_LEN; return -1; } -int32_t mndProcessMsg(SRpcMsg *pMsg) { - if (mndCheckMnodeMaster(pMsg) != 0) return -1; - if (mndCheckRequestValid(pMsg) != 0) return -1; - +int32_t mndProcessRpcMsg(SRpcMsg *pMsg) { SMnode *pMnode = pMsg->info.node; MndMsgFp fp = pMnode->msgFp[TMSG_INDEX(pMsg->msgType)]; if (fp == NULL) { @@ -466,8 +460,13 @@ int32_t mndProcessMsg(SRpcMsg *pMsg) { return -1; } - mTrace("msg:%p, will be processed in mnode, app:%p type:%s", pMsg, pMsg->info.ahandle, TMSG_INFO(pMsg->msgType)); + if (mndCheckMsgContent(pMsg) != 0) return -1; + if (mndCheckMnodeState(pMsg) != 0) return -1; + + mTrace("msg:%p, start to process in mnode, app:%p type:%s", pMsg, pMsg->info.ahandle, TMSG_INFO(pMsg->msgType)); int32_t code = (*fp)(pMsg); + mndReleaseRpcRef(pMnode); + if (code == TSDB_CODE_ACTION_IN_PROGRESS) { mTrace("msg:%p, won't response immediately since in progress", pMsg); } else if (code == 0) { @@ -476,6 +475,7 @@ int32_t mndProcessMsg(SRpcMsg *pMsg) { mError("msg:%p, failed to process since %s, app:%p type:%s", pMsg, terrstr(), pMsg->info.ahandle, TMSG_INFO(pMsg->msgType)); } + return code; } @@ -502,7 +502,7 @@ int64_t mndGenerateUid(char *name, int32_t len) { int32_t mndGetMonitorInfo(SMnode *pMnode, SMonClusterInfo *pClusterInfo, SMonVgroupInfo *pVgroupInfo, SMonGrantInfo *pGrantInfo) { - if (!mndIsMaster(pMnode)) return -1; + if (mndAcquireRpcRef(pMnode) != 0) return -1; SSdb *pSdb = pMnode->pSdb; int64_t ms = taosGetTimestampMs(); @@ -511,6 +511,7 @@ int32_t mndGetMonitorInfo(SMnode *pMnode, SMonClusterInfo *pClusterInfo, SMonVgr pClusterInfo->mnodes = taosArrayInit(sdbGetSize(pSdb, SDB_MNODE), sizeof(SMonMnodeDesc)); pVgroupInfo->vgroups = taosArrayInit(sdbGetSize(pSdb, SDB_VGROUP), sizeof(SMonVgroupDesc)); if (pClusterInfo->dnodes == NULL || pClusterInfo->mnodes == NULL || pVgroupInfo->vgroups == NULL) { + mndReleaseRpcRef(pMnode); return -1; } @@ -605,6 +606,7 @@ int32_t mndGetMonitorInfo(SMnode *pMnode, SMonClusterInfo *pClusterInfo, SMonVgr pGrantInfo->timeseries_total = INT32_MAX; } + mndReleaseRpcRef(pMnode); return 0; } @@ -612,3 +614,76 @@ int32_t mndGetLoad(SMnode *pMnode, SMnodeLoad *pLoad) { pLoad->syncState = syncGetMyRole(pMnode->syncMgmt.sync); return 0; } + +int32_t mndAcquireRpcRef(SMnode *pMnode) { + int32_t code = 0; + taosThreadRwlockRdlock(&pMnode->lock); + if (pMnode->stopped) { + terrno = TSDB_CODE_APP_NOT_READY; + code = -1; + } else if (!mndIsMaster(pMnode)) { + code = -1; + } else { + int32_t ref = atomic_add_fetch_32(&pMnode->rpcRef, 1); + mTrace("mnode rpc is acquired, ref:%d", ref); + } + taosThreadRwlockUnlock(&pMnode->lock); + return code; +} + +void mndReleaseRpcRef(SMnode *pMnode) { + taosThreadRwlockRdlock(&pMnode->lock); + int32_t ref = atomic_sub_fetch_32(&pMnode->rpcRef, 1); + mTrace("mnode rpc is released, ref:%d", ref); + taosThreadRwlockUnlock(&pMnode->lock); +} + +void mndSetRestore(SMnode *pMnode, bool restored) { + if (restored) { + taosThreadRwlockWrlock(&pMnode->lock); + pMnode->restored = true; + taosThreadRwlockUnlock(&pMnode->lock); + mTrace("mnode set restored:%d", restored); + } else { + taosThreadRwlockWrlock(&pMnode->lock); + pMnode->restored = false; + taosThreadRwlockUnlock(&pMnode->lock); + mTrace("mnode set restored:%d", restored); + while (1) { + if (pMnode->rpcRef <= 0) break; + taosMsleep(3); + } + } +} + +bool mndGetRestored(SMnode *pMnode) { return pMnode->restored; } + +void mndSetStop(SMnode *pMnode) { + taosThreadRwlockWrlock(&pMnode->lock); + pMnode->stopped = true; + taosThreadRwlockUnlock(&pMnode->lock); + mTrace("mnode set stopped"); +} + +bool mndGetStop(SMnode *pMnode) { return pMnode->stopped; } + +int32_t mndAcquireSyncRef(SMnode *pMnode) { + int32_t code = 0; + taosThreadRwlockRdlock(&pMnode->lock); + if (pMnode->stopped) { + terrno = TSDB_CODE_APP_NOT_READY; + code = -1; + } else { + int32_t ref = atomic_add_fetch_32(&pMnode->syncRef, 1); + mTrace("mnode sync is acquired, ref:%d", ref); + } + taosThreadRwlockUnlock(&pMnode->lock); + return code; +} + +void mndReleaseSyncRef(SMnode *pMnode) { + taosThreadRwlockRdlock(&pMnode->lock); + int32_t ref = atomic_sub_fetch_32(&pMnode->syncRef, 1); + mTrace("mnode sync is released, ref:%d", ref); + taosThreadRwlockUnlock(&pMnode->lock); +} \ No newline at end of file diff --git a/source/dnode/mnode/impl/src/mndQuery.c b/source/dnode/mnode/impl/src/mndQuery.c index 78b70c9a74133b859b4175b195d4a939c37ebccc..97594f2b913334ac17e2bd5e6c8fc95e19a03e9e 100644 --- a/source/dnode/mnode/impl/src/mndQuery.c +++ b/source/dnode/mnode/impl/src/mndQuery.c @@ -26,19 +26,19 @@ int32_t mndProcessQueryMsg(SRpcMsg *pMsg) { mTrace("msg:%p, in query queue is processing", pMsg); switch (pMsg->msgType) { case TDMT_VND_QUERY: - code = qWorkerProcessQueryMsg(&handle, pMnode->pQuery, pMsg); + code = qWorkerProcessQueryMsg(&handle, pMnode->pQuery, pMsg, 0); break; case TDMT_VND_QUERY_CONTINUE: - code = qWorkerProcessCQueryMsg(&handle, pMnode->pQuery, pMsg); + code = qWorkerProcessCQueryMsg(&handle, pMnode->pQuery, pMsg, 0); break; case TDMT_VND_FETCH: - code = qWorkerProcessFetchMsg(pMnode, pMnode->pQuery, pMsg); + code = qWorkerProcessFetchMsg(pMnode, pMnode->pQuery, pMsg, 0); break; case TDMT_VND_DROP_TASK: - code = qWorkerProcessDropMsg(pMnode, pMnode->pQuery, pMsg); + code = qWorkerProcessDropMsg(pMnode, pMnode->pQuery, pMsg, 0); break; case TDMT_VND_QUERY_HEARTBEAT: - code = qWorkerProcessHbMsg(pMnode, pMnode->pQuery, pMsg); + code = qWorkerProcessHbMsg(pMnode, pMnode->pQuery, pMsg, 0); break; default: terrno = TSDB_CODE_VND_APP_ERROR; diff --git a/source/dnode/mnode/impl/src/mndSubscribe.c b/source/dnode/mnode/impl/src/mndSubscribe.c index 17cf5d43b575dbe2840bec24a29e97dc399ccd7d..3f3f4f5b5d70dbb70f88f395b86d84833010c873 100644 --- a/source/dnode/mnode/impl/src/mndSubscribe.c +++ b/source/dnode/mnode/impl/src/mndSubscribe.c @@ -940,7 +940,7 @@ static int32_t mndRetrieveSubscribe(SRpcMsg *pReq, SShowObj *pShow, SSDataBlock } // do not show for cleared subscription -#if 0 +#if 1 int32_t sz = taosArrayGetSize(pSub->unassignedVgs); for (int32_t i = 0; i < sz; i++) { SMqVgEp *pVgEp = taosArrayGetP(pSub->unassignedVgs, i); diff --git a/source/dnode/mnode/impl/src/mndSync.c b/source/dnode/mnode/impl/src/mndSync.c index d5fcf9b1eb9bbc93ba623b09bbe3b8ff459b6240..39f28c8dea9746f65b3926db13b0a289856e754a 100644 --- a/source/dnode/mnode/impl/src/mndSync.c +++ b/source/dnode/mnode/impl/src/mndSync.c @@ -17,12 +17,12 @@ #include "mndSync.h" #include "mndTrans.h" -int32_t mndSyncEqMsg(const SMsgCb *msgcb, SRpcMsg *pMsg) { +int32_t mndSyncEqMsg(const SMsgCb *msgcb, SRpcMsg *pMsg) { SMsgHead *pHead = pMsg->pCont; pHead->contLen = htonl(pHead->contLen); pHead->vgId = htonl(pHead->vgId); - return tmsgPutToQueue(msgcb, SYNC_QUEUE, pMsg); + return tmsgPutToQueue(msgcb, SYNC_QUEUE, pMsg); } int32_t mndSyncSendMsg(const SEpSet *pEpSet, SRpcMsg *pMsg) { return tmsgSendReq(pEpSet, pMsg); } @@ -32,7 +32,7 @@ void mndSyncCommitMsg(struct SSyncFSM *pFsm, const SRpcMsg *pMsg, SFsmCbMeta cbM SSyncMgmt *pMgmt = &pMnode->syncMgmt; SSdbRaw *pRaw = pMsg->pCont; - int32_t transId = sdbGetIdFromRaw(pRaw); + int32_t transId = sdbGetIdFromRaw(pMnode->pSdb, pRaw); pMgmt->errCode = cbMeta.code; mTrace("trans:%d, is proposed, savedTransId:%d code:0x%x, ver:%" PRId64 " term:%" PRId64 " role:%s raw:%p", transId, pMgmt->transId, cbMeta.code, cbMeta.index, cbMeta.term, syncStr(cbMeta.state), pRaw); @@ -48,6 +48,10 @@ void mndSyncCommitMsg(struct SSyncFSM *pFsm, const SRpcMsg *pMsg, SFsmCbMeta cbM mError("trans:%d, failed to propose since %s", transId, tstrerror(pMgmt->errCode)); } tsem_post(&pMgmt->syncSem); + } else { + if (cbMeta.index - sdbGetApplyIndex(pMnode->pSdb) > 100) { + sdbWriteFile(pMnode->pSdb); + } } } @@ -63,27 +67,35 @@ void mndRestoreFinish(struct SSyncFSM *pFsm) { if (!pMnode->deploy) { mInfo("mnode sync restore finished"); mndTransPullup(pMnode); - pMnode->syncMgmt.restored = true; + mndSetRestore(pMnode, true); } } -int32_t mndSnapshotRead(struct SSyncFSM* pFsm, const SSnapshot* pSnapshot, void** ppIter, char** ppBuf, int32_t* len) { - /* +int32_t mndSnapshotRead(struct SSyncFSM *pFsm, const SSnapshot *pSnapshot, void **ppIter, char **ppBuf, int32_t *len) { SMnode *pMnode = pFsm->data; - SSdbIter *pIter; - if (iter == NULL) { - pIter = sdbIterInit(pMnode->sdb) - } else { - pIter = iter; - } - */ + mInfo("start to read snapshot from sdb"); + + // sdbStartRead + // sdbDoRead + // sdbStopRead return 0; } -int32_t mndSnapshotApply(struct SSyncFSM* pFsm, const SSnapshot* pSnapshot, char* pBuf, int32_t len) { +int32_t mndSnapshotApply(struct SSyncFSM *pFsm, const SSnapshot *pSnapshot, char *pBuf, int32_t len) { SMnode *pMnode = pFsm->data; - sdbWrite(pMnode->pSdb, (SSdbRaw*)pBuf); + + // sdbStartWrite + // sdbDoWrite + + mndSetRestore(pMnode, false); + mInfo("start to apply snapshot to sdb"); + + // sdbStopWrite + mInfo("successfully to apply snapshot to sdb"); + mndSetRestore(pMnode, true); + + // taosMemoryFree(pBuf); return 0; } @@ -116,7 +128,7 @@ SSyncFSM *mndSyncMakeFsm(SMnode *pMnode) { pFsm->FpSnapshotRead = mndSnapshotRead; pFsm->FpSnapshotApply = mndSnapshotApply; pFsm->FpReConfigCb = mndReConfig; - + return pFsm; } @@ -150,8 +162,7 @@ int32_t mndInitSync(SMnode *pMnode) { SSyncCfg *pCfg = &syncInfo.syncCfg; pCfg->replicaNum = pMnode->replica; pCfg->myIndex = pMnode->selfIndex; - mInfo("start to open mnode sync, replica:%d myindex:%d standby:%d", pCfg->replicaNum, pCfg->myIndex, - pMgmt->standby); + mInfo("start to open mnode sync, replica:%d myindex:%d standby:%d", pCfg->replicaNum, pCfg->myIndex, pMgmt->standby); for (int32_t i = 0; i < pMnode->replica; ++i) { SNodeInfo *pNode = &pCfg->nodeInfo[i]; tstrncpy(pNode->nodeFqdn, pMnode->replicas[i].fqdn, sizeof(pNode->nodeFqdn)); @@ -219,17 +230,12 @@ void mndSyncStart(SMnode *pMnode) { SSyncMgmt *pMgmt = &pMnode->syncMgmt; syncSetMsgCb(pMgmt->sync, &pMnode->msgCb); - syncStart(pMgmt->sync); - -#if 0 if (pMgmt->standby) { syncStartStandBy(pMgmt->sync); } else { syncStart(pMgmt->sync); } -#endif - - mDebug("sync:%" PRId64 " is started", pMgmt->sync); + mDebug("sync:%" PRId64 " is started, standby:%d", pMgmt->sync, pMgmt->standby); } void mndSyncStop(SMnode *pMnode) {} @@ -243,7 +249,7 @@ bool mndIsMaster(SMnode *pMnode) { return false; } - if (!pMgmt->restored) { + if (!pMnode->restored) { terrno = TSDB_CODE_APP_NOT_READY; return false; } diff --git a/source/dnode/mnode/impl/test/acct/CMakeLists.txt b/source/dnode/mnode/impl/test/acct/CMakeLists.txt index 40f8b0726e28446170a71bbbccde979376448fbb..d72292e34bd605ec91b16788fadd9f1ff1c68cc4 100644 --- a/source/dnode/mnode/impl/test/acct/CMakeLists.txt +++ b/source/dnode/mnode/impl/test/acct/CMakeLists.txt @@ -5,7 +5,9 @@ target_link_libraries( PUBLIC sut ) -add_test( - NAME acctTest - COMMAND acctTest -) +if(NOT TD_WINDOWS) + add_test( + NAME acctTest + COMMAND acctTest + ) +endif(NOT TD_WINDOWS) diff --git a/source/dnode/mnode/impl/test/func/CMakeLists.txt b/source/dnode/mnode/impl/test/func/CMakeLists.txt index ecb4f851be9d95a7c894d1e2ef2b3d9ce83067d3..2a8eb0a39d89275ae204e6405de2b774b4412619 100644 --- a/source/dnode/mnode/impl/test/func/CMakeLists.txt +++ b/source/dnode/mnode/impl/test/func/CMakeLists.txt @@ -5,7 +5,9 @@ target_link_libraries( PUBLIC sut ) -add_test( - NAME funcTest - COMMAND funcTest -) +if(NOT TD_WINDOWS) + add_test( + NAME funcTest + COMMAND funcTest + ) +endif(NOT TD_WINDOWS) diff --git a/source/dnode/mnode/impl/test/profile/CMakeLists.txt b/source/dnode/mnode/impl/test/profile/CMakeLists.txt index 8b811ebfed3a56ab139ecfc81f3556af2f9bb032..b6586192b2b4c6e428c2f00fddb11527a1747707 100644 --- a/source/dnode/mnode/impl/test/profile/CMakeLists.txt +++ b/source/dnode/mnode/impl/test/profile/CMakeLists.txt @@ -5,7 +5,9 @@ target_link_libraries( PUBLIC sut ) -add_test( - NAME profileTest - COMMAND profileTest -) +if(NOT TD_WINDOWS) + add_test( + NAME profileTest + COMMAND profileTest + ) +endif(NOT TD_WINDOWS) diff --git a/source/dnode/mnode/impl/test/sdb/sdbTest.cpp b/source/dnode/mnode/impl/test/sdb/sdbTest.cpp index df535c4456615b8b501236f2c7ad1684c2f4ac6f..43be55dd1de822d098475747a7b5b6452f379058 100644 --- a/source/dnode/mnode/impl/test/sdb/sdbTest.cpp +++ b/source/dnode/mnode/impl/test/sdb/sdbTest.cpp @@ -492,7 +492,7 @@ TEST_F(MndTestSdb, 01_Write_Str) { ASSERT_EQ(sdbGetSize(pSdb, SDB_USER), 2); ASSERT_EQ(sdbGetMaxId(pSdb, SDB_USER), -1); - ASSERT_EQ(sdbGetTableVer(pSdb, SDB_USER), 2 ); + ASSERT_EQ(sdbGetTableVer(pSdb, SDB_USER), 2); sdbSetApplyIndex(pSdb, -1); ASSERT_EQ(sdbGetApplyIndex(pSdb), -1); ASSERT_EQ(mnode.insertTimes, 2); @@ -895,7 +895,35 @@ TEST_F(MndTestSdb, 01_Read_Str) { ASSERT_EQ(code, TSDB_CODE_SDB_OBJ_CREATING); } + { + SSdbIter *pReader = NULL; + SSdbIter *pWritter = NULL; + void *pBuf = NULL; + int32_t len = 0; + int32_t code = 0; + + code = sdbStartRead(pSdb, &pReader); + ASSERT_EQ(code, 0); + code = sdbStartWrite(pSdb, &pWritter); + ASSERT_EQ(code, 0); + + while (sdbDoRead(pSdb, pReader, &pBuf, &len) == 0) { + if (pBuf != NULL && len != 0) { + sdbDoWrite(pSdb, pWritter, pBuf, len); + taosMemoryFree(pBuf); + } else { + break; + } + } + + sdbStopRead(pSdb, pReader); + sdbStopWrite(pSdb, pWritter, true); + } + + ASSERT_EQ(sdbGetSize(pSdb, SDB_CONSUMER), 1); + ASSERT_EQ(sdbGetTableVer(pSdb, SDB_CONSUMER), 4); + sdbCleanup(pSdb); - ASSERT_EQ(mnode.insertTimes, 5); - ASSERT_EQ(mnode.deleteTimes, 5); + ASSERT_EQ(mnode.insertTimes, 9); + ASSERT_EQ(mnode.deleteTimes, 9); } \ No newline at end of file diff --git a/source/dnode/mnode/impl/test/show/CMakeLists.txt b/source/dnode/mnode/impl/test/show/CMakeLists.txt index 69e93e7086147de77676ea02017a6ce5533acf42..9b4e21501ed478e527adfa69a5a2297e173876e1 100644 --- a/source/dnode/mnode/impl/test/show/CMakeLists.txt +++ b/source/dnode/mnode/impl/test/show/CMakeLists.txt @@ -5,7 +5,9 @@ target_link_libraries( PUBLIC sut ) -add_test( - NAME showTest - COMMAND showTest -) +if(NOT TD_WINDOWS) + add_test( + NAME showTest + COMMAND showTest + ) +endif(NOT TD_WINDOWS) diff --git a/source/dnode/mnode/sdb/inc/sdb.h b/source/dnode/mnode/sdb/inc/sdb.h index 3932defd8db79e0a83cfec078c01e8a97e0ec3d5..c66b47a24b13f0c9efd55dc965743416737177ea 100644 --- a/source/dnode/mnode/sdb/inc/sdb.h +++ b/source/dnode/mnode/sdb/inc/sdb.h @@ -166,7 +166,6 @@ typedef struct SSdbRow { typedef struct SSdb { SMnode *pMnode; char *currDir; - char *syncDir; char *tmpDir; int64_t lastCommitVer; int64_t curVer; @@ -182,11 +181,13 @@ typedef struct SSdb { SdbDeployFp deployFps[SDB_MAX]; SdbEncodeFp encodeFps[SDB_MAX]; SdbDecodeFp decodeFps[SDB_MAX]; + TdThreadMutex filelock; } SSdb; typedef struct SSdbIter { TdFilePtr file; - int64_t readlen; + int64_t total; + char *name; } SSdbIter; typedef struct { @@ -380,13 +381,17 @@ SSdbRow *sdbAllocRow(int32_t objSize); void *sdbGetRowObj(SSdbRow *pRow); void sdbFreeRow(SSdb *pSdb, SSdbRow *pRow, bool callFunc); -SSdbIter *sdbIterInit(SSdb *pSdb); -SSdbIter *sdbIterRead(SSdb *pSdb, SSdbIter *iter, char **ppBuf, int32_t *len); +int32_t sdbStartRead(SSdb *pSdb, SSdbIter **ppIter); +int32_t sdbStopRead(SSdb *pSdb, SSdbIter *pIter); +int32_t sdbDoRead(SSdb *pSdb, SSdbIter *pIter, void **ppBuf, int32_t *len); + +int32_t sdbStartWrite(SSdb *pSdb, SSdbIter **ppIter); +int32_t sdbStopWrite(SSdb *pSdb, SSdbIter *pIter, bool isApply); +int32_t sdbDoWrite(SSdb *pSdb, SSdbIter *pIter, void *pBuf, int32_t len); const char *sdbTableName(ESdbType type); void sdbPrintOper(SSdb *pSdb, SSdbRow *pRow, const char *oper); - -int32_t sdbGetIdFromRaw(SSdbRaw *pRaw); +int32_t sdbGetIdFromRaw(SSdb *pSdb, SSdbRaw *pRaw); #ifdef __cplusplus } diff --git a/source/dnode/mnode/sdb/inc/sdbInt.h b/source/dnode/mnode/sdb/inc/sdbInt.h deleted file mode 100644 index c49d6e8fb287619d9503282dd2e164ed432ce823..0000000000000000000000000000000000000000 --- a/source/dnode/mnode/sdb/inc/sdbInt.h +++ /dev/null @@ -1,61 +0,0 @@ -/* - * Copyright (c) 2019 TAOS Data, Inc. - * - * This program is free software: you can use, redistribute, and/or modify - * it under the terms of the GNU Affero General Public License, version 3 - * or later ("AGPL"), as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. - * - * You should have received a copy of the GNU Affero General Public License - * along with this program. If not, see . - */ - -#ifndef _TD_SDB_INT_H_ -#define _TD_SDB_INT_H_ - -#include "os.h" - -#include "sdb.h" - -#ifdef __cplusplus -extern "C" { -#endif - -// clang-format off -#define mFatal(...) { if (mDebugFlag & DEBUG_FATAL) { taosPrintLog("MND FATAL ", DEBUG_FATAL, 255, __VA_ARGS__); }} -#define mError(...) { if (mDebugFlag & DEBUG_ERROR) { taosPrintLog("MND ERROR ", DEBUG_ERROR, 255, __VA_ARGS__); }} -#define mWarn(...) { if (mDebugFlag & DEBUG_WARN) { taosPrintLog("MND WARN ", DEBUG_WARN, 255, __VA_ARGS__); }} -#define mInfo(...) { if (mDebugFlag & DEBUG_INFO) { taosPrintLog("MND ", DEBUG_INFO, 255, __VA_ARGS__); }} -#define mDebug(...) { if (mDebugFlag & DEBUG_DEBUG) { taosPrintLog("MND ", DEBUG_DEBUG, mDebugFlag, __VA_ARGS__); }} -#define mTrace(...) { if (mDebugFlag & DEBUG_TRACE) { taosPrintLog("MND ", DEBUG_TRACE, mDebugFlag, __VA_ARGS__); }} -// clang-format on - -typedef struct SSdbRaw { - int8_t type; - int8_t status; - int8_t sver; - int8_t reserved; - int32_t dataLen; - char pData[]; -} SSdbRaw; - -typedef struct SSdbRow { - ESdbType type; - ESdbStatus status; - int32_t refCount; - char pObj[]; -} SSdbRow; - -const char *sdbTableName(ESdbType type); -void sdbPrintOper(SSdb *pSdb, SSdbRow *pRow, const char *oper); - -void sdbFreeRow(SSdb *pSdb, SSdbRow *pRow, bool callFunc); - -#ifdef __cplusplus -} -#endif - -#endif /*_TD_SDB_INT_H_*/ diff --git a/source/dnode/mnode/sdb/src/sdb.c b/source/dnode/mnode/sdb/src/sdb.c index d289e30d7b4c68e85a5bc48048b52536f8e150e9..485b729deb52ffcdf4c5b76c1999124a5157f5b2 100644 --- a/source/dnode/mnode/sdb/src/sdb.c +++ b/source/dnode/mnode/sdb/src/sdb.c @@ -56,6 +56,7 @@ SSdb *sdbInit(SSdbOpt *pOption) { pSdb->curTerm = -1; pSdb->lastCommitVer = -1; pSdb->pMnode = pOption->pMnode; + taosThreadMutexInit(&pSdb->filelock, NULL); mDebug("sdb init successfully"); return pSdb; } @@ -69,11 +70,8 @@ void sdbCleanup(SSdb *pSdb) { taosMemoryFreeClear(pSdb->currDir); } - if (pSdb->syncDir != NULL) { - taosMemoryFreeClear(pSdb->syncDir); - } - if (pSdb->tmpDir != NULL) { + taosRemoveDir(pSdb->tmpDir); taosMemoryFreeClear(pSdb->tmpDir); } @@ -104,6 +102,7 @@ void sdbCleanup(SSdb *pSdb) { mDebug("sdb table:%s is cleaned up", sdbTableName(i)); } + taosThreadMutexDestroy(&pSdb->filelock); taosMemoryFree(pSdb); mDebug("sdb is cleaned up"); } diff --git a/source/dnode/mnode/sdb/src/sdbFile.c b/source/dnode/mnode/sdb/src/sdbFile.c index 25cda199568592ba809e76c92e32107a30a163da..1ca5097ce62738fd0b5c5cf8ccc5f8bdf482314d 100644 --- a/source/dnode/mnode/sdb/src/sdbFile.c +++ b/source/dnode/mnode/sdb/src/sdbFile.c @@ -22,13 +22,14 @@ #define SDB_RESERVE_SIZE 512 #define SDB_FILE_VER 1 -static int32_t sdbRunDeployFp(SSdb *pSdb) { +static int32_t sdbDeployData(SSdb *pSdb) { mDebug("start to deploy sdb"); for (int32_t i = SDB_MAX - 1; i >= 0; --i) { SdbDeployFp fp = pSdb->deployFps[i]; if (fp == NULL) continue; + mDebug("start to deploy sdb:%s", sdbTableName(i)); if ((*fp)(pSdb->pMnode) != 0) { mError("failed to deploy sdb:%s since %s", sdbTableName(i), terrstr()); return -1; @@ -39,6 +40,39 @@ static int32_t sdbRunDeployFp(SSdb *pSdb) { return 0; } +static void sdbResetData(SSdb *pSdb) { + mDebug("start to reset sdb"); + + for (ESdbType i = 0; i < SDB_MAX; ++i) { + SHashObj *hash = pSdb->hashObjs[i]; + if (hash == NULL) continue; + + SSdbRow **ppRow = taosHashIterate(hash, NULL); + while (ppRow != NULL) { + SSdbRow *pRow = *ppRow; + if (pRow == NULL) continue; + + sdbFreeRow(pSdb, pRow, true); + ppRow = taosHashIterate(hash, ppRow); + } + } + + for (ESdbType i = 0; i < SDB_MAX; ++i) { + SHashObj *hash = pSdb->hashObjs[i]; + if (hash == NULL) continue; + + taosHashClear(pSdb->hashObjs[i]); + pSdb->tableVer[i] = 0; + pSdb->maxId[i] = 0; + mDebug("sdb:%s is reset", sdbTableName(i)); + } + + pSdb->curVer = -1; + pSdb->curTerm = -1; + pSdb->lastCommitVer = -1; + mDebug("sdb reset successfully"); +} + static int32_t sdbReadFileHead(SSdb *pSdb, TdFilePtr pFile) { int64_t sver = 0; int32_t ret = taosReadFile(pFile, &sver, sizeof(int64_t)); @@ -169,11 +203,15 @@ static int32_t sdbWriteFileHead(SSdb *pSdb, TdFilePtr pFile) { return 0; } -int32_t sdbReadFile(SSdb *pSdb) { +static int32_t sdbReadFileImp(SSdb *pSdb) { int64_t offset = 0; int32_t code = 0; int32_t readLen = 0; int64_t ret = 0; + char file[PATH_MAX] = {0}; + + snprintf(file, sizeof(file), "%s%ssdb.data", pSdb->currDir, TD_DIRSEP); + mDebug("start to read file:%s", file); SSdbRaw *pRaw = taosMemoryMalloc(WAL_MAX_SIZE + 100); if (pRaw == NULL) { @@ -182,10 +220,6 @@ int32_t sdbReadFile(SSdb *pSdb) { return -1; } - char file[PATH_MAX] = {0}; - snprintf(file, sizeof(file), "%s%ssdb.data", pSdb->currDir, TD_DIRSEP); - mDebug("start to read file:%s", file); - TdFilePtr pFile = taosOpenFile(file, TD_FILE_READ); if (pFile == NULL) { taosMemoryFree(pRaw); @@ -196,8 +230,6 @@ int32_t sdbReadFile(SSdb *pSdb) { if (sdbReadFileHead(pSdb, pFile) != 0) { mError("failed to read file:%s head since %s", file, terrstr()); - pSdb->curVer = -1; - pSdb->curTerm = -1; taosMemoryFree(pRaw); taosCloseFile(&pFile); return -1; @@ -264,6 +296,20 @@ _OVER: return code; } +int32_t sdbReadFile(SSdb *pSdb) { + taosThreadMutexLock(&pSdb->filelock); + + sdbResetData(pSdb); + int32_t code = sdbReadFileImp(pSdb); + if (code != 0) { + mError("failed to read sdb since %s", terrstr()); + sdbResetData(pSdb); + } + + taosThreadMutexUnlock(&pSdb->filelock); + return code; +} + static int32_t sdbWriteFileImp(SSdb *pSdb) { int32_t code = 0; @@ -378,80 +424,188 @@ int32_t sdbWriteFile(SSdb *pSdb) { return 0; } - return sdbWriteFileImp(pSdb); + taosThreadMutexLock(&pSdb->filelock); + int32_t code = sdbWriteFileImp(pSdb); + if (code != 0) { + mError("failed to write sdb since %s", terrstr()); + } + taosThreadMutexUnlock(&pSdb->filelock); + return code; } int32_t sdbDeploy(SSdb *pSdb) { - if (sdbRunDeployFp(pSdb) != 0) { + if (sdbDeployData(pSdb) != 0) { return -1; } - if (sdbWriteFileImp(pSdb) != 0) { + if (sdbWriteFile(pSdb) != 0) { return -1; } return 0; } -SSdbIter *sdbIterInit(SSdb *pSdb) { - char datafile[PATH_MAX] = {0}; - char tmpfile[PATH_MAX] = {0}; - snprintf(datafile, sizeof(datafile), "%s%ssdb.data", pSdb->currDir, TD_DIRSEP); - snprintf(tmpfile, sizeof(datafile), "%s%ssdb.data", pSdb->tmpDir, TD_DIRSEP); - - if (taosCopyFile(datafile, tmpfile) != 0) { - terrno = TAOS_SYSTEM_ERROR(errno); - mError("failed to copy file %s to %s since %s", datafile, tmpfile, terrstr()); - return NULL; - } - +static SSdbIter *sdbCreateIter(SSdb *pSdb) { SSdbIter *pIter = taosMemoryCalloc(1, sizeof(SSdbIter)); if (pIter == NULL) { terrno = TSDB_CODE_OUT_OF_MEMORY; return NULL; } - pIter->file = taosOpenFile(tmpfile, TD_FILE_READ); - if (pIter->file == NULL) { - terrno = TAOS_SYSTEM_ERROR(errno); - mError("failed to read snapshot file:%s since %s", tmpfile, terrstr()); + char name[PATH_MAX + 100] = {0}; + snprintf(name, sizeof(name), "%s%ssdb.data.%" PRIu64, pSdb->tmpDir, TD_DIRSEP, (uint64_t)pIter); + pIter->name = strdup(name); + if (pIter->name == NULL) { taosMemoryFree(pIter); + terrno = TSDB_CODE_OUT_OF_MEMORY; return NULL; } - mDebug("start to read snapshot file:%s, iter:%p", tmpfile, pIter); return pIter; } -SSdbIter *sdbIterRead(SSdb *pSdb, SSdbIter *pIter, char **ppBuf, int32_t *buflen) { - const int32_t maxlen = 100; +static void sdbCloseIter(SSdbIter *pIter) { + if (pIter == NULL) return; - char *pBuf = taosMemoryCalloc(1, maxlen); + if (pIter->file != NULL) { + taosCloseFile(&pIter->file); + pIter->file = NULL; + } + + if (pIter->name != NULL) { + taosRemoveFile(pIter->name); + taosMemoryFree(pIter->name); + pIter->name = NULL; + } + + mInfo("sdbiter:%p, is closed, total:%" PRId64, pIter, pIter->total); + taosMemoryFree(pIter); +} + +int32_t sdbStartRead(SSdb *pSdb, SSdbIter **ppIter) { + SSdbIter *pIter = sdbCreateIter(pSdb); + if (pIter == NULL) return -1; + + char datafile[PATH_MAX] = {0}; + snprintf(datafile, sizeof(datafile), "%s%ssdb.data", pSdb->currDir, TD_DIRSEP); + + taosThreadMutexLock(&pSdb->filelock); + if (taosCopyFile(datafile, pIter->name) < 0) { + taosThreadMutexUnlock(&pSdb->filelock); + terrno = TAOS_SYSTEM_ERROR(errno); + mError("failed to copy file %s to %s since %s", datafile, pIter->name, terrstr()); + sdbCloseIter(pIter); + return -1; + } + taosThreadMutexUnlock(&pSdb->filelock); + + pIter->file = taosOpenFile(pIter->name, TD_FILE_READ); + if (pIter->file == NULL) { + terrno = TAOS_SYSTEM_ERROR(errno); + mError("failed to open file:%s since %s", pIter->name, terrstr()); + sdbCloseIter(pIter); + return -1; + } + + *ppIter = pIter; + mInfo("sdbiter:%p, is created to read snapshot, file:%s", pIter, pIter->name); + return 0; +} + +int32_t sdbStopRead(SSdb *pSdb, SSdbIter *pIter) { + sdbCloseIter(pIter); + return 0; +} + +int32_t sdbDoRead(SSdb *pSdb, SSdbIter *pIter, void **ppBuf, int32_t *len) { + int32_t maxlen = 100; + void *pBuf = taosMemoryCalloc(1, maxlen); if (pBuf == NULL) { terrno = TSDB_CODE_OUT_OF_MEMORY; - return NULL; + sdbCloseIter(pIter); + return -1; } int32_t readlen = taosReadFile(pIter->file, pBuf, maxlen); - if (readlen == 0) { - mTrace("read snapshot to the end, readlen:%" PRId64, pIter->readlen); - taosMemoryFree(pBuf); - taosCloseFile(&pIter->file); - taosMemoryFree(pIter); - pIter = NULL; - } else if (readlen < 0) { + if (readlen < 0 || readlen > maxlen) { terrno = TAOS_SYSTEM_ERROR(errno); - mError("failed to read snapshot since %s, readlen:%" PRId64, terrstr(), pIter->readlen); + mError("sdbiter:%p, failed to read snapshot since %s, total:%" PRId64, pIter, terrstr(), pIter->total); + *ppBuf = NULL; + *len = 0; taosMemoryFree(pBuf); - taosCloseFile(&pIter->file); - taosMemoryFree(pIter); - pIter = NULL; - } else { - pIter->readlen += readlen; - mTrace("read snapshot, readlen:%" PRId64, pIter->readlen); + return -1; + } else if (readlen == 0) { + mInfo("sdbiter:%p, read snapshot to the end, total:%" PRId64, pIter, pIter->total); + *ppBuf = NULL; + *len = 0; + taosMemoryFree(pBuf); + return 0; + } else { // (readlen <= maxlen) + pIter->total += readlen; + mInfo("sdbiter:%p, read:%d bytes from snapshot, total:%" PRId64, pIter, readlen, pIter->total); *ppBuf = pBuf; - *buflen = readlen; + *len = readlen; + return 0; } +} - return pIter; +int32_t sdbStartWrite(SSdb *pSdb, SSdbIter **ppIter) { + SSdbIter *pIter = sdbCreateIter(pSdb); + if (pIter == NULL) return -1; + + pIter->file = taosOpenFile(pIter->name, TD_FILE_CREATE | TD_FILE_WRITE | TD_FILE_TRUNC); + if (pIter->file == NULL) { + terrno = TAOS_SYSTEM_ERROR(errno); + mError("failed to open %s since %s", pIter->name, terrstr()); + return -1; + } + + *ppIter = pIter; + mInfo("sdbiter:%p, is created to write snapshot, file:%s", pIter, pIter->name); + return 0; +} + +int32_t sdbStopWrite(SSdb *pSdb, SSdbIter *pIter, bool isApply) { + int32_t code = 0; + + if (!isApply) { + sdbCloseIter(pIter); + mInfo("sdbiter:%p, not apply to sdb", pIter); + return 0; + } + + taosFsyncFile(pIter->file); + taosCloseFile(&pIter->file); + pIter->file = NULL; + + char datafile[PATH_MAX] = {0}; + snprintf(datafile, sizeof(datafile), "%s%ssdb.data", pSdb->currDir, TD_DIRSEP); + if (taosRenameFile(pIter->name, datafile) != 0) { + terrno = TAOS_SYSTEM_ERROR(errno); + mError("sdbiter:%p, failed to rename file %s to %s since %s", pIter, pIter->name, datafile, terrstr()); + sdbCloseIter(pIter); + return -1; + } + + sdbCloseIter(pIter); + if (sdbReadFile(pSdb) != 0) { + mError("sdbiter:%p, failed to read from %s since %s", pIter, datafile, terrstr()); + return -1; + } + + mInfo("sdbiter:%p, successfully applyed to sdb", pIter); + return 0; } + +int32_t sdbDoWrite(SSdb *pSdb, SSdbIter *pIter, void *pBuf, int32_t len) { + int32_t writelen = taosWriteFile(pIter->file, pBuf, len); + if (writelen != len) { + terrno = TAOS_SYSTEM_ERROR(errno); + mError("failed to write len:%d since %s, total:%" PRId64, len, terrstr(), pIter->total); + return -1; + } + + pIter->total += writelen; + mInfo("sdbiter:%p, write:%d bytes to snapshot, total:%" PRId64, pIter, writelen, pIter->total); + return 0; +} \ No newline at end of file diff --git a/source/dnode/mnode/sdb/src/sdbRaw.c b/source/dnode/mnode/sdb/src/sdbRaw.c index 4b61ebb627622bfdb1bb32a1df591d564a7c7b01..90643a54a9de42d4f505fdcb4f1d25ef95b80ac7 100644 --- a/source/dnode/mnode/sdb/src/sdbRaw.c +++ b/source/dnode/mnode/sdb/src/sdbRaw.c @@ -16,9 +16,14 @@ #define _DEFAULT_SOURCE #include "sdb.h" -int32_t sdbGetIdFromRaw(SSdbRaw *pRaw) { - int32_t id = *((int32_t *)(pRaw->pData)); - return id; +int32_t sdbGetIdFromRaw(SSdb *pSdb, SSdbRaw *pRaw) { + EKeyType keytype = pSdb->keyTypes[pRaw->type]; + if (keytype == SDB_KEY_INT32) { + int32_t id = *((int32_t *)(pRaw->pData)); + return id; + } else { + return -2; + } } SSdbRaw *sdbAllocRaw(ESdbType type, int8_t sver, int32_t dataLen) { diff --git a/source/dnode/qnode/src/qnode.c b/source/dnode/qnode/src/qnode.c index 6d31e20d9bc03908025b100dd135c7e706a0b647..40aa572a56709a97e454cdc82cb7e97852356b27 100644 --- a/source/dnode/qnode/src/qnode.c +++ b/source/dnode/qnode/src/qnode.c @@ -40,37 +40,46 @@ void qndClose(SQnode *pQnode) { taosMemoryFree(pQnode); } -int32_t qndGetLoad(SQnode *pQnode, SQnodeLoad *pLoad) { return 0; } +int32_t qndGetLoad(SQnode *pQnode, SQnodeLoad *pLoad) { + SMsgCb* pCb = &pQnode->msgCb; -int32_t qndProcessQueryMsg(SQnode *pQnode, SRpcMsg *pMsg) { + pLoad->numOfQueryInQueue = pCb->qsizeFp(pCb->mgmt, pQnode->qndId, QUERY_QUEUE); + pLoad->numOfFetchInQueue = pCb->qsizeFp(pCb->mgmt, pQnode->qndId, FETCH_QUEUE); + pLoad->waitTimeInQueryQUeue = qWorkerGetWaitTimeInQueue(pQnode->pQuery, QUERY_QUEUE); + pLoad->waitTimeInFetchQUeue = qWorkerGetWaitTimeInQueue(pQnode->pQuery, FETCH_QUEUE); + + return 0; +} + +int32_t qndProcessQueryMsg(SQnode *pQnode, int64_t ts, SRpcMsg *pMsg) { int32_t code = -1; SReadHandle handle = {.pMsgCb = &pQnode->msgCb}; qTrace("message in qnode queue is processing"); switch (pMsg->msgType) { case TDMT_VND_QUERY: - code = qWorkerProcessQueryMsg(&handle, pQnode->pQuery, pMsg); + code = qWorkerProcessQueryMsg(&handle, pQnode->pQuery, pMsg, ts); break; case TDMT_VND_QUERY_CONTINUE: - code = qWorkerProcessCQueryMsg(&handle, pQnode->pQuery, pMsg); + code = qWorkerProcessCQueryMsg(&handle, pQnode->pQuery, pMsg, ts); break; case TDMT_VND_FETCH: - code = qWorkerProcessFetchMsg(pQnode, pQnode->pQuery, pMsg); + code = qWorkerProcessFetchMsg(pQnode, pQnode->pQuery, pMsg, ts); break; case TDMT_VND_FETCH_RSP: - code = qWorkerProcessFetchRsp(pQnode, pQnode->pQuery, pMsg); + code = qWorkerProcessFetchRsp(pQnode, pQnode->pQuery, pMsg, ts); break; case TDMT_VND_CANCEL_TASK: - code = qWorkerProcessCancelMsg(pQnode, pQnode->pQuery, pMsg); + code = qWorkerProcessCancelMsg(pQnode, pQnode->pQuery, pMsg, ts); break; case TDMT_VND_DROP_TASK: - code = qWorkerProcessDropMsg(pQnode, pQnode->pQuery, pMsg); + code = qWorkerProcessDropMsg(pQnode, pQnode->pQuery, pMsg, ts); break; case TDMT_VND_CONSUME: // code = tqProcessConsumeReq(pQnode->pTq, pMsg); // break; case TDMT_VND_QUERY_HEARTBEAT: - code = qWorkerProcessHbMsg(pQnode, pQnode->pQuery, pMsg); + code = qWorkerProcessHbMsg(pQnode, pQnode->pQuery, pMsg, ts); break; default: qError("unknown msg type:%d in qnode queue", pMsg->msgType); diff --git a/source/dnode/vnode/inc/vnode.h b/source/dnode/vnode/inc/vnode.h index 2b713ff980beb6c5ea7aff502b0128f422256d58..e4343e3bbf63a9dd847cc1bd2f79e2ef35721cd3 100644 --- a/source/dnode/vnode/inc/vnode.h +++ b/source/dnode/vnode/inc/vnode.h @@ -105,13 +105,15 @@ tsdbReaderT tsdbQueryCacheLast(SVnode *pVnode, SQueryTableDataCond *pCond, STab void *pMemRef); int32_t tsdbGetFileBlocksDistInfo(tsdbReaderT *pReader, STableBlockDistInfo *pTableBlockInfo); bool isTsdbCacheLastRow(tsdbReaderT *pReader); -int32_t tsdbGetAllTableList(SMeta* pMeta, uint64_t uid, SArray* list); +int32_t tsdbGetAllTableList(SMeta *pMeta, uint64_t uid, SArray *list); +void * tsdbGetIdx(SMeta *pMeta); int64_t tsdbGetNumOfRowsInMemTable(tsdbReaderT *pHandle); -bool tsdbNextDataBlock(tsdbReaderT pTsdbReadHandle); -void tsdbRetrieveDataBlockInfo(tsdbReaderT *pTsdbReadHandle, SDataBlockInfo *pBlockInfo); + +bool tsdbNextDataBlock(tsdbReaderT pTsdbReadHandle); +void tsdbRetrieveDataBlockInfo(tsdbReaderT *pTsdbReadHandle, SDataBlockInfo *pBlockInfo); int32_t tsdbRetrieveDataBlockStatisInfo(tsdbReaderT *pTsdbReadHandle, SColumnDataAgg ***pBlockStatis, bool *allHave); SArray *tsdbRetrieveDataBlock(tsdbReaderT *pTsdbReadHandle, SArray *pColumnIdList); -void tsdbResetReadHandle(tsdbReaderT queryHandle, SQueryTableDataCond *pCond); +void tsdbResetReadHandle(tsdbReaderT queryHandle, SQueryTableDataCond *pCond, int32_t tWinIdx); void tsdbCleanupReadHandle(tsdbReaderT queryHandle); // tq @@ -174,7 +176,7 @@ struct SMetaEntry { int64_t version; int8_t type; tb_uid_t uid; - char *name; + char * name; union { struct { SSchemaWrapper schemaRow; @@ -202,17 +204,17 @@ struct SMetaEntry { struct SMetaReader { int32_t flags; - SMeta *pMeta; + SMeta * pMeta; SDecoder coder; SMetaEntry me; - void *pBuf; + void * pBuf; int32_t szBuf; }; struct SMTbCursor { - TBC *pDbc; - void *pKey; - void *pVal; + TBC * pDbc; + void * pKey; + void * pVal; int32_t kLen; int32_t vLen; SMetaReader mr; diff --git a/source/dnode/vnode/src/inc/vnodeInt.h b/source/dnode/vnode/src/inc/vnodeInt.h index ba25c5e2866995d71c1c7cdee2473a87b609d2fe..0e67d9e426f1b708e927d986f7c9d797acc8759d 100644 --- a/source/dnode/vnode/src/inc/vnodeInt.h +++ b/source/dnode/vnode/src/inc/vnodeInt.h @@ -103,6 +103,7 @@ SArray* metaGetSmaTbUids(SMeta* pMeta); int32_t metaSnapshotReaderOpen(SMeta* pMeta, SMetaSnapshotReader** ppReader, int64_t sver, int64_t ever); int32_t metaSnapshotReaderClose(SMetaSnapshotReader* pReader); int32_t metaSnapshotRead(SMetaSnapshotReader* pReader, void** ppData, uint32_t* nData); +void* metaGetIdx(SMeta* pMeta); int32_t metaCreateTSma(SMeta* pMeta, int64_t version, SSmaCfg* pCfg); int32_t metaDropTSma(SMeta* pMeta, int64_t indexUid); diff --git a/source/dnode/vnode/src/meta/metaQuery.c b/source/dnode/vnode/src/meta/metaQuery.c index 184b640bddb82122b95abdcf9b3934d27a1d860c..605e8049331839816b6d072388c088f581a8ffb5 100644 --- a/source/dnode/vnode/src/meta/metaQuery.c +++ b/source/dnode/vnode/src/meta/metaQuery.c @@ -155,44 +155,52 @@ int metaTbCursorNext(SMTbCursor *pTbCur) { } SSchemaWrapper *metaGetTableSchema(SMeta *pMeta, tb_uid_t uid, int32_t sver, bool isinline) { - void *pKey = NULL; - void *pVal = NULL; - int kLen = 0; - int vLen = 0; - int ret; - SSkmDbKey skmDbKey; - SSchemaWrapper *pSW = NULL; - SSchema *pSchema = NULL; - void *pBuf; - SDecoder coder = {0}; - - // fetch - skmDbKey.uid = uid; - skmDbKey.sver = sver; - pKey = &skmDbKey; - kLen = sizeof(skmDbKey); + void *pData = NULL; + int nData = 0; + int64_t version; + SSchemaWrapper schema = {0}; + SSchemaWrapper *pSchema = NULL; + SDecoder dc = {0}; + metaRLock(pMeta); - ret = tdbTbGet(pMeta->pSkmDb, pKey, kLen, &pVal, &vLen); - metaULock(pMeta); - if (ret < 0) { - return NULL; - } + if (sver < 0) { + if (tdbTbGet(pMeta->pUidIdx, &uid, sizeof(uid), &pData, &nData) < 0) { + goto _err; + } - // decode - pBuf = pVal; - pSW = taosMemoryMalloc(sizeof(SSchemaWrapper)); + version = *(int64_t *)pData; - tDecoderInit(&coder, pVal, vLen); - tDecodeSSchemaWrapper(&coder, pSW); - pSchema = taosMemoryMalloc(sizeof(SSchema) * pSW->nCols); - memcpy(pSchema, pSW->pSchema, sizeof(SSchema) * pSW->nCols); - tDecoderClear(&coder); + tdbTbGet(pMeta->pTbDb, &(STbDbKey){.uid = uid, .version = version}, sizeof(STbDbKey), &pData, &nData); + + SMetaEntry me = {0}; + tDecoderInit(&dc, pData, nData); + metaDecodeEntry(&dc, &me); + if (me.type == TSDB_SUPER_TABLE) { + pSchema = tCloneSSchemaWrapper(&me.stbEntry.schemaRow); + } else if (me.type == TSDB_NORMAL_TABLE) { + } else { + ASSERT(0); + } + tDecoderClear(&dc); + } else { + if (tdbTbGet(pMeta->pSkmDb, &(SSkmDbKey){.uid = uid, .sver = sver}, sizeof(SSkmDbKey), &pData, &nData) < 0) { + goto _err; + } - pSW->pSchema = pSchema; + tDecoderInit(&dc, pData, nData); + tDecodeSSchemaWrapper(&dc, &schema); + pSchema = tCloneSSchemaWrapper(&schema); + tDecoderClear(&dc); + } - tdbFree(pVal); + metaULock(pMeta); + tdbFree(pData); + return pSchema; - return pSW; +_err: + metaULock(pMeta); + tdbFree(pData); + return NULL; } struct SMCtbCursor { diff --git a/source/dnode/vnode/src/meta/metaTable.c b/source/dnode/vnode/src/meta/metaTable.c index 7182f496c4d6410a705a82dba1c92ff6561a5faf..f610f18126ef86a268801f73f5a951c97a380867 100644 --- a/source/dnode/vnode/src/meta/metaTable.c +++ b/source/dnode/vnode/src/meta/metaTable.c @@ -31,9 +31,9 @@ int metaCreateSTable(SMeta *pMeta, int64_t version, SVCreateStbReq *pReq) { int vLen = 0; const void *pKey = NULL; const void *pVal = NULL; - void *pBuf = NULL; + void * pBuf = NULL; int32_t szBuf = 0; - void *p = NULL; + void * p = NULL; SMetaReader mr = {0}; // validate req @@ -87,7 +87,7 @@ int metaDropSTable(SMeta *pMeta, int64_t verison, SVDropStbReq *pReq) { } // drop all child tables - TBC *pCtbIdxc = NULL; + TBC * pCtbIdxc = NULL; SArray *pArray = taosArrayInit(8, sizeof(tb_uid_t)); tdbTbcOpen(pMeta->pCtbIdx, &pCtbIdxc, &pMeta->txn); @@ -142,8 +142,8 @@ _exit: int metaAlterSTable(SMeta *pMeta, int64_t version, SVCreateStbReq *pReq) { SMetaEntry oStbEntry = {0}; SMetaEntry nStbEntry = {0}; - TBC *pUidIdxc = NULL; - TBC *pTbDbc = NULL; + TBC * pUidIdxc = NULL; + TBC * pTbDbc = NULL; const void *pData; int nData; int64_t oversion; @@ -262,7 +262,7 @@ _err: } int metaDropTable(SMeta *pMeta, int64_t version, SVDropTbReq *pReq, SArray *tbUids) { - void *pData = NULL; + void * pData = NULL; int nData = 0; int rc = 0; tb_uid_t uid; @@ -288,7 +288,7 @@ int metaDropTable(SMeta *pMeta, int64_t version, SVDropTbReq *pReq, SArray *tbUi } static int metaDropTableByUid(SMeta *pMeta, tb_uid_t uid, int *type) { - void *pData = NULL; + void * pData = NULL; int nData = 0; int rc = 0; int64_t version; @@ -324,14 +324,14 @@ static int metaDropTableByUid(SMeta *pMeta, tb_uid_t uid, int *type) { } static int metaAlterTableColumn(SMeta *pMeta, int64_t version, SVAlterTbReq *pAlterTbReq) { - void *pVal = NULL; + void * pVal = NULL; int nVal = 0; - const void *pData = NULL; + const void * pData = NULL; int nData = 0; int ret = 0; tb_uid_t uid; int64_t oversion; - SSchema *pColumn = NULL; + SSchema * pColumn = NULL; SMetaEntry entry = {0}; SSchemaWrapper *pSchema; int c; @@ -479,7 +479,7 @@ _err: static int metaUpdateTableTagVal(SMeta *pMeta, int64_t version, SVAlterTbReq *pAlterTbReq) { SMetaEntry ctbEntry = {0}; SMetaEntry stbEntry = {0}; - void *pVal = NULL; + void * pVal = NULL; int nVal = 0; int ret; int c; @@ -510,7 +510,7 @@ static int metaUpdateTableTagVal(SMeta *pMeta, int64_t version, SVAlterTbReq *pA oversion = *(int64_t *)pData; // search table.db - TBC *pTbDbc = NULL; + TBC * pTbDbc = NULL; SDecoder dc1 = {0}; SDecoder dc2 = {0}; @@ -534,7 +534,7 @@ static int metaUpdateTableTagVal(SMeta *pMeta, int64_t version, SVAlterTbReq *pA metaDecodeEntry(&dc2, &stbEntry); SSchemaWrapper *pTagSchema = &stbEntry.stbEntry.schemaTag; - SSchema *pColumn = NULL; + SSchema * pColumn = NULL; int32_t iCol = 0; for (;;) { pColumn = NULL; @@ -639,8 +639,8 @@ int metaAlterTable(SMeta *pMeta, int64_t version, SVAlterTbReq *pReq) { static int metaSaveToTbDb(SMeta *pMeta, const SMetaEntry *pME) { STbDbKey tbDbKey; - void *pKey = NULL; - void *pVal = NULL; + void * pKey = NULL; + void * pVal = NULL; int kLen = 0; int vLen = 0; SEncoder coder = {0}; @@ -755,14 +755,14 @@ static void metaDestroyTagIdxKey(STagIdxKey *pTagIdxKey) { } static int metaUpdateTagIdx(SMeta *pMeta, const SMetaEntry *pCtbEntry) { - void *pData = NULL; + void * pData = NULL; int nData = 0; STbDbKey tbDbKey = {0}; SMetaEntry stbEntry = {0}; - STagIdxKey *pTagIdxKey = NULL; + STagIdxKey * pTagIdxKey = NULL; int32_t nTagIdxKey; const SSchema *pTagColumn; // = &stbEntry.stbEntry.schema.pSchema[0]; - const void *pTagData = NULL; // + const void * pTagData = NULL; // SDecoder dc = {0}; // get super table @@ -804,7 +804,7 @@ static int metaUpdateTagIdx(SMeta *pMeta, const SMetaEntry *pCtbEntry) { static int metaSaveToSkmDb(SMeta *pMeta, const SMetaEntry *pME) { SEncoder coder = {0}; - void *pVal = NULL; + void * pVal = NULL; int vLen = 0; int rcode = 0; SSkmDbKey skmDbKey = {0}; @@ -880,3 +880,11 @@ _err: metaULock(pMeta); return -1; } +// refactor later +void *metaGetIdx(SMeta *pMeta) { +#ifdef USE_INVERTED_INDEX + return pMeta->pTagIvtIdx; +#else + return pMeta->pTagIdx; +#endif +} diff --git a/source/dnode/vnode/src/sma/smaRollup.c b/source/dnode/vnode/src/sma/smaRollup.c index df10d9d53361b013b267fdeb5cb445f4c3575bfa..731ef2e36061494ec2440cda9f4818edbc0291cd 100644 --- a/source/dnode/vnode/src/sma/smaRollup.c +++ b/source/dnode/vnode/src/sma/smaRollup.c @@ -18,7 +18,7 @@ static FORCE_INLINE int32_t tdUidStorePut(STbUidStore *pStore, tb_uid_t suid, tb_uid_t *uid); static FORCE_INLINE int32_t tdUpdateTbUidListImpl(SSma *pSma, tb_uid_t *suid, SArray *tbUids); static FORCE_INLINE int32_t tdExecuteRSmaImpl(SSma *pSma, const void *pMsg, int32_t inputType, qTaskInfo_t *taskInfo, - STSchema *pTSchema, tb_uid_t suid, tb_uid_t uid, int8_t level); + STSchema *pTSchema, tb_uid_t suid, int8_t level); struct SRSmaInfo { void *taskInfo[TSDB_RETENTION_L2]; // qTaskInfo_t @@ -364,7 +364,7 @@ static int32_t tdFetchSubmitReqSuids(SSubmitReq *pMsg, STbUidStore *pStore) { } static FORCE_INLINE int32_t tdExecuteRSmaImpl(SSma *pSma, const void *pMsg, int32_t inputType, qTaskInfo_t *taskInfo, - STSchema *pTSchema, tb_uid_t suid, tb_uid_t uid, int8_t level) { + STSchema *pTSchema, tb_uid_t suid, int8_t level) { SArray *pResult = NULL; if (!taskInfo) { @@ -399,7 +399,7 @@ static FORCE_INLINE int32_t tdExecuteRSmaImpl(SSma *pSma, const void *pMsg, int3 blockDebugShowData(pResult); STsdb *sinkTsdb = (level == TSDB_RETENTION_L1 ? pSma->pRSmaTsdb1 : pSma->pRSmaTsdb2); SSubmitReq *pReq = NULL; - if (buildSubmitReqFromDataBlock(&pReq, pResult, pTSchema, SMA_VID(pSma), uid, suid) != 0) { + if (buildSubmitReqFromDataBlock(&pReq, pResult, pTSchema, SMA_VID(pSma), suid) != 0) { taosArrayDestroy(pResult); return TSDB_CODE_FAILED; } @@ -418,15 +418,13 @@ static FORCE_INLINE int32_t tdExecuteRSmaImpl(SSma *pSma, const void *pMsg, int3 return TSDB_CODE_SUCCESS; } -static int32_t tdExecuteRSma(SSma *pSma, const void *pMsg, int32_t inputType, tb_uid_t suid, tb_uid_t uid) { +static int32_t tdExecuteRSma(SSma *pSma, const void *pMsg, int32_t inputType, tb_uid_t suid) { SSmaEnv *pEnv = SMA_RSMA_ENV(pSma); if (!pEnv) { // only applicable when rsma env exists return TSDB_CODE_SUCCESS; } - ASSERT(uid != 0); // TODO: remove later - SSmaStat *pStat = SMA_ENV_STAT(pEnv); SRSmaInfo *pRSmaInfo = NULL; @@ -448,8 +446,8 @@ static int32_t tdExecuteRSma(SSma *pSma, const void *pMsg, int32_t inputType, tb terrno = TSDB_CODE_TDB_IVD_TB_SCHEMA_VERSION; return TSDB_CODE_FAILED; } - tdExecuteRSmaImpl(pSma, pMsg, inputType, pRSmaInfo->taskInfo[0], pTSchema, suid, uid, TSDB_RETENTION_L1); - tdExecuteRSmaImpl(pSma, pMsg, inputType, pRSmaInfo->taskInfo[1], pTSchema, suid, uid, TSDB_RETENTION_L2); + tdExecuteRSmaImpl(pSma, pMsg, inputType, pRSmaInfo->taskInfo[0], pTSchema, suid, TSDB_RETENTION_L1); + tdExecuteRSmaImpl(pSma, pMsg, inputType, pRSmaInfo->taskInfo[1], pTSchema, suid, TSDB_RETENTION_L2); taosMemoryFree(pTSchema); } @@ -468,12 +466,12 @@ int32_t tdProcessRSmaSubmit(SSma *pSma, void *pMsg, int32_t inputType) { tdFetchSubmitReqSuids(pMsg, &uidStore); if (uidStore.suid != 0) { - tdExecuteRSma(pSma, pMsg, inputType, uidStore.suid, uidStore.uid); + tdExecuteRSma(pSma, pMsg, inputType, uidStore.suid); void *pIter = taosHashIterate(uidStore.uidHash, NULL); while (pIter) { tb_uid_t *pTbSuid = (tb_uid_t *)taosHashGetKey(pIter, NULL); - tdExecuteRSma(pSma, pMsg, inputType, *pTbSuid, 0); + tdExecuteRSma(pSma, pMsg, inputType, *pTbSuid); pIter = taosHashIterate(uidStore.uidHash, pIter); } diff --git a/source/dnode/vnode/src/tq/tq.c b/source/dnode/vnode/src/tq/tq.c index 192016166a4d386aa6873955d9411efe32df2412..96ce6e8eeeeaf17243d8e29baa733c369437c931 100644 --- a/source/dnode/vnode/src/tq/tq.c +++ b/source/dnode/vnode/src/tq/tq.c @@ -235,6 +235,15 @@ int32_t tqUpdateTbUidList(STQ* pTq, const SArray* tbUidList, bool isAdd) { } } } + while (1) { + pIter = taosHashIterate(pTq->pStreamTasks, pIter); + if (pIter == NULL) break; + SStreamTask* pTask = (SStreamTask*)pIter; + if (pTask->inputType == STREAM_INPUT__DATA_SUBMIT) { + int32_t code = qUpdateQualifiedTableId(pTask->exec.executor, tbUidList, isAdd); + ASSERT(code == 0); + } + } return 0; } diff --git a/source/dnode/vnode/src/tsdb/tsdbRead.c b/source/dnode/vnode/src/tsdb/tsdbRead.c index c0b97f7536963d28045d8391273ffcf0a1b15876..fbfa70c1176a163ef3a3995ab48fe6010762dc77 100644 --- a/source/dnode/vnode/src/tsdb/tsdbRead.c +++ b/source/dnode/vnode/src/tsdb/tsdbRead.c @@ -13,6 +13,7 @@ * along with this program. If not, see . */ +#include "vnode.h" #include "tsdb.h" #define EXTRA_BYTES 2 @@ -140,12 +141,6 @@ typedef struct STsdbReadHandle { STSchema* pSchema; } STsdbReadHandle; -typedef struct STableGroupSupporter { - int32_t numOfCols; - SColIndex* pCols; - SSchema* pTagSchema; -} STableGroupSupporter; - static STimeWindow updateLastrowForEachGroup(STableListInfo* pList); static int32_t checkForCachedLastRow(STsdbReadHandle* pTsdbReadHandle, STableListInfo* pList); static int32_t checkForCachedLast(STsdbReadHandle* pTsdbReadHandle); @@ -211,12 +206,6 @@ int64_t tsdbGetNumOfRowsInMemTable(tsdbReaderT* pHandle) { return rows; } - // STableData* pMem = NULL; - // STableData* pIMem = NULL; - - // SMemTable* pMemT = pMemRef->snapshot.mem; - // SMemTable* pIMemT = pMemRef->snapshot.imem; - size_t size = taosArrayGetSize(pTsdbReadHandle->pTableCheckInfo); for (int32_t i = 0; i < size; ++i) { STableCheckInfo* pCheckInfo = taosArrayGet(pTsdbReadHandle->pTableCheckInfo, i); @@ -259,8 +248,8 @@ static SArray* createCheckInfoFromTableGroup(STsdbReadHandle* pTsdbReadHandle, S } taosArrayPush(pTableCheckInfo, &info); - tsdbDebug("%p check table uid:%" PRId64 " from lastKey:%" PRId64 " %s", pTsdbReadHandle, info.tableId, - info.lastKey, pTsdbReadHandle->idStr); + tsdbDebug("%p check table uid:%" PRId64 " from lastKey:%" PRId64 " %s", pTsdbReadHandle, info.tableId, info.lastKey, + pTsdbReadHandle->idStr); } // TODO group table according to the tag value. @@ -317,28 +306,28 @@ static int64_t getEarliestValidTimestamp(STsdb* pTsdb) { return now - (tsTickPerMin[pCfg->precision] * pCfg->keep2) + 1; // needs to add one tick } -static void setQueryTimewindow(STsdbReadHandle* pTsdbReadHandle, SQueryTableDataCond* pCond) { - pTsdbReadHandle->window = pCond->twindow; +static void setQueryTimewindow(STsdbReadHandle* pTsdbReadHandle, SQueryTableDataCond* pCond, int32_t tWinIdx) { + pTsdbReadHandle->window = pCond->twindows[tWinIdx]; bool updateTs = false; int64_t startTs = getEarliestValidTimestamp(pTsdbReadHandle->pTsdb); if (ASCENDING_TRAVERSE(pTsdbReadHandle->order)) { if (startTs > pTsdbReadHandle->window.skey) { pTsdbReadHandle->window.skey = startTs; - pCond->twindow.skey = startTs; + pCond->twindows[tWinIdx].skey = startTs; updateTs = true; } } else { if (startTs > pTsdbReadHandle->window.ekey) { pTsdbReadHandle->window.ekey = startTs; - pCond->twindow.ekey = startTs; + pCond->twindows[tWinIdx].ekey = startTs; updateTs = true; } } if (updateTs) { tsdbDebug("%p update the query time window, old:%" PRId64 " - %" PRId64 ", new:%" PRId64 " - %" PRId64 ", %s", - pTsdbReadHandle, pCond->twindow.skey, pCond->twindow.ekey, pTsdbReadHandle->window.skey, + pTsdbReadHandle, pCond->twindows[tWinIdx].skey, pCond->twindows[tWinIdx].ekey, pTsdbReadHandle->window.skey, pTsdbReadHandle->window.ekey, pTsdbReadHandle->idStr); } } @@ -363,13 +352,16 @@ static STsdb* getTsdbByRetentions(SVnode* pVnode, STsdbReadHandle* pReadHandle, } if (level == TSDB_RETENTION_L0) { - tsdbDebug("vgId:%d read handle %p rsma level %d is selected to query", TD_VID(pVnode), pReadHandle, TSDB_RETENTION_L0); + tsdbDebug("vgId:%d read handle %p rsma level %d is selected to query", TD_VID(pVnode), pReadHandle, + TSDB_RETENTION_L0); return VND_RSMA0(pVnode); } else if (level == TSDB_RETENTION_L1) { - tsdbDebug("vgId:%d read handle %p rsma level %d is selected to query", TD_VID(pVnode), pReadHandle, TSDB_RETENTION_L1); + tsdbDebug("vgId:%d read handle %p rsma level %d is selected to query", TD_VID(pVnode), pReadHandle, + TSDB_RETENTION_L1); return VND_RSMA1(pVnode); } else { - tsdbDebug("vgId:%d read handle %p rsma level %d is selected to query", TD_VID(pVnode), pReadHandle, TSDB_RETENTION_L2); + tsdbDebug("vgId:%d read handle %p rsma level %d is selected to query", TD_VID(pVnode), pReadHandle, + TSDB_RETENTION_L2); return VND_RSMA2(pVnode); } } @@ -382,7 +374,7 @@ static STsdbReadHandle* tsdbQueryTablesImpl(SVnode* pVnode, SQueryTableDataCond* goto _end; } - STsdb* pTsdb = getTsdbByRetentions(pVnode, pReadHandle, pCond->twindow.skey, pVnode->config.tsdbCfg.retentions); + STsdb* pTsdb = getTsdbByRetentions(pVnode, pReadHandle, pCond->twindows[0].skey, pVnode->config.tsdbCfg.retentions); pReadHandle->order = pCond->order; pReadHandle->pTsdb = pTsdb; @@ -408,11 +400,11 @@ static STsdbReadHandle* tsdbQueryTablesImpl(SVnode* pVnode, SQueryTableDataCond* } assert(pCond != NULL); - setQueryTimewindow(pReadHandle, pCond); + setQueryTimewindow(pReadHandle, pCond, 0); if (pCond->numOfCols > 0) { int32_t rowLen = 0; - for(int32_t i = 0; i < pCond->numOfCols; ++i) { + for (int32_t i = 0; i < pCond->numOfCols; ++i) { rowLen += pCond->colList[i].bytes; } @@ -447,10 +439,10 @@ static STsdbReadHandle* tsdbQueryTablesImpl(SVnode* pVnode, SQueryTableDataCond* } pReadHandle->suppInfo.defaultLoadColumn = getDefaultLoadColumns(pReadHandle, true); - pReadHandle->suppInfo.slotIds = - taosMemoryMalloc(sizeof(int32_t) * taosArrayGetSize(pReadHandle->suppInfo.defaultLoadColumn)); - pReadHandle->suppInfo.plist = - taosMemoryCalloc(taosArrayGetSize(pReadHandle->suppInfo.defaultLoadColumn), POINTER_BYTES); + + size_t size = taosArrayGetSize(pReadHandle->suppInfo.defaultLoadColumn); + pReadHandle->suppInfo.slotIds = taosMemoryCalloc(size, sizeof(int32_t)); + pReadHandle->suppInfo.plist = taosMemoryCalloc(size, POINTER_BYTES); } pReadHandle->pDataCols = tdNewDataCols(1000, pVnode->config.tsdbCfg.maxRows); @@ -471,6 +463,39 @@ _end: return NULL; } +static int32_t setCurrentSchema(SVnode* pVnode, STsdbReadHandle* pTsdbReadHandle) { + STableCheckInfo* pCheckInfo = taosArrayGet(pTsdbReadHandle->pTableCheckInfo, 0); + + int32_t sversion = 1; + + SMetaReader mr = {0}; + metaReaderInit(&mr, pVnode->pMeta, 0); + int32_t code = metaGetTableEntryByUid(&mr, pCheckInfo->tableId); + if (code != TSDB_CODE_SUCCESS) { + terrno = TSDB_CODE_TDB_INVALID_TABLE_ID; + metaReaderClear(&mr); + return terrno; + } + + if (mr.me.type == TSDB_CHILD_TABLE) { + tb_uid_t suid = mr.me.ctbEntry.suid; + code = metaGetTableEntryByUid(&mr, suid); + if (code != TSDB_CODE_SUCCESS) { + terrno = TSDB_CODE_TDB_INVALID_TABLE_ID; + metaReaderClear(&mr); + return terrno; + } + sversion = mr.me.stbEntry.schemaRow.version; + } else { + ASSERT(mr.me.type == TSDB_NORMAL_TABLE); + sversion = mr.me.ntbEntry.schemaRow.version; + } + + metaReaderClear(&mr); + pTsdbReadHandle->pSchema = metaGetTbTSchema(pVnode->pMeta, pCheckInfo->tableId, sversion); + return TSDB_CODE_SUCCESS; +} + tsdbReaderT* tsdbQueryTables(SVnode* pVnode, SQueryTableDataCond* pCond, STableListInfo* tableList, uint64_t qId, uint64_t taskId) { STsdbReadHandle* pTsdbReadHandle = tsdbQueryTablesImpl(pVnode, pCond, qId, taskId); @@ -490,9 +515,12 @@ tsdbReaderT* tsdbQueryTables(SVnode* pVnode, SQueryTableDataCond* pCond, STableL return NULL; } - STableCheckInfo* pCheckInfo = taosArrayGet(pTsdbReadHandle->pTableCheckInfo, 0); + int32_t code = setCurrentSchema(pVnode, pTsdbReadHandle); + if (code != TSDB_CODE_SUCCESS) { + terrno = code; + return NULL; + } - pTsdbReadHandle->pSchema = metaGetTbTSchema(pVnode->pMeta, pCheckInfo->tableId, 1); int32_t numOfCols = taosArrayGetSize(pTsdbReadHandle->suppInfo.defaultLoadColumn); int16_t* ids = pTsdbReadHandle->suppInfo.defaultLoadColumn->pData; @@ -520,7 +548,7 @@ tsdbReaderT* tsdbQueryTables(SVnode* pVnode, SQueryTableDataCond* pCond, STableL return (tsdbReaderT)pTsdbReadHandle; } -void tsdbResetReadHandle(tsdbReaderT queryHandle, SQueryTableDataCond* pCond) { +void tsdbResetReadHandle(tsdbReaderT queryHandle, SQueryTableDataCond* pCond, int32_t tWinIdx) { STsdbReadHandle* pTsdbReadHandle = queryHandle; if (emptyQueryTimewindow(pTsdbReadHandle)) { @@ -533,7 +561,7 @@ void tsdbResetReadHandle(tsdbReaderT queryHandle, SQueryTableDataCond* pCond) { } pTsdbReadHandle->order = pCond->order; - pTsdbReadHandle->window = pCond->twindow; + setQueryTimewindow(pTsdbReadHandle, pCond, tWinIdx); pTsdbReadHandle->type = TSDB_QUERY_TYPE_ALL; pTsdbReadHandle->cur.fid = -1; pTsdbReadHandle->cur.win = TSWINDOW_INITIALIZER; @@ -558,11 +586,11 @@ void tsdbResetReadHandle(tsdbReaderT queryHandle, SQueryTableDataCond* pCond) { resetCheckInfo(pTsdbReadHandle); } -void tsdbResetQueryHandleForNewTable(tsdbReaderT queryHandle, SQueryTableDataCond* pCond, STableListInfo* tableList) { +void tsdbResetQueryHandleForNewTable(tsdbReaderT queryHandle, SQueryTableDataCond* pCond, STableListInfo* tableList, int32_t tWinIdx) { STsdbReadHandle* pTsdbReadHandle = queryHandle; pTsdbReadHandle->order = pCond->order; - pTsdbReadHandle->window = pCond->twindow; + pTsdbReadHandle->window = pCond->twindows[tWinIdx]; pTsdbReadHandle->type = TSDB_QUERY_TYPE_ALL; pTsdbReadHandle->cur.fid = -1; pTsdbReadHandle->cur.win = TSWINDOW_INITIALIZER; @@ -602,7 +630,7 @@ void tsdbResetQueryHandleForNewTable(tsdbReaderT queryHandle, SQueryTableDataCon tsdbReaderT tsdbQueryLastRow(SVnode* pVnode, SQueryTableDataCond* pCond, STableListInfo* pList, uint64_t qId, uint64_t taskId) { - pCond->twindow = updateLastrowForEachGroup(pList); + pCond->twindows[0] = updateLastrowForEachGroup(pList); // no qualified table if (taosArrayGetSize(pList->pTableList) == 0) { @@ -620,7 +648,7 @@ tsdbReaderT tsdbQueryLastRow(SVnode* pVnode, SQueryTableDataCond* pCond, STableL return NULL; } - assert(pCond->order == TSDB_ORDER_ASC && pCond->twindow.skey <= pCond->twindow.ekey); + assert(pCond->order == TSDB_ORDER_ASC && pCond->twindows[0].skey <= pCond->twindows[0].ekey); if (pTsdbReadHandle->cachelastrow) { pTsdbReadHandle->type = TSDB_QUERY_TYPE_LAST; } @@ -660,7 +688,7 @@ SArray* tsdbGetQueriedTableList(tsdbReaderT* pHandle) { } // leave only one table for each group -//static STableGroupInfo* trimTableGroup(STimeWindow* window, STableGroupInfo* pGroupList) { +// static STableGroupInfo* trimTableGroup(STimeWindow* window, STableGroupInfo* pGroupList) { // assert(pGroupList); // size_t numOfGroup = taosArrayGetSize(pGroupList->pGroupList); // @@ -692,7 +720,7 @@ SArray* tsdbGetQueriedTableList(tsdbReaderT* pHandle) { // return pNew; //} -//tsdbReaderT tsdbQueryRowsInExternalWindow(SVnode* pVnode, SQueryTableDataCond* pCond, STableGroupInfo* groupList, +// tsdbReaderT tsdbQueryRowsInExternalWindow(SVnode* pVnode, SQueryTableDataCond* pCond, STableGroupInfo* groupList, // uint64_t qId, uint64_t taskId) { // STableGroupInfo* pNew = trimTableGroup(&pCond->twindow, groupList); // @@ -1299,7 +1327,6 @@ static int32_t handleDataMergeIfNeeded(STsdbReadHandle* pTsdbReadHandle, SBlock* if ((ascScan && (key != TSKEY_INITIAL_VAL && key <= binfo.window.ekey)) || (!ascScan && (key != TSKEY_INITIAL_VAL && key >= binfo.window.skey))) { - bool cacheDataInFileBlockHole = (ascScan && (key != TSKEY_INITIAL_VAL && key < binfo.window.skey)) || (!ascScan && (key != TSKEY_INITIAL_VAL && key > binfo.window.ekey)); if (cacheDataInFileBlockHole) { @@ -1342,7 +1369,7 @@ static int32_t handleDataMergeIfNeeded(STsdbReadHandle* pTsdbReadHandle, SBlock* pTsdbReadHandle->realNumOfRows = binfo.rows; cur->rows = binfo.rows; - cur->win = binfo.window; + cur->win = binfo.window; cur->mixBlock = false; cur->blockCompleted = true; @@ -1353,9 +1380,9 @@ static int32_t handleDataMergeIfNeeded(STsdbReadHandle* pTsdbReadHandle, SBlock* cur->lastKey = binfo.window.skey - 1; cur->pos = -1; } - } else { // partially copy to dest buffer + } else { // partially copy to dest buffer // make sure to only load once - bool firstTimeExtract = ((cur->pos == 0 && ascScan) || (cur->pos == binfo.rows -1 && (!ascScan))); + bool firstTimeExtract = ((cur->pos == 0 && ascScan) || (cur->pos == binfo.rows - 1 && (!ascScan))); if (pTsdbReadHandle->outputCapacity < binfo.rows && firstTimeExtract) { code = doLoadFileDataBlock(pTsdbReadHandle, pBlock, pCheckInfo, cur->slot); if (code != TSDB_CODE_SUCCESS) { @@ -1864,7 +1891,7 @@ static void copyAllRemainRowsFromFileBlock(STsdbReadHandle* pTsdbReadHandle, STa bool ascScan = ASCENDING_TRAVERSE(pTsdbReadHandle->order); - int32_t step = ascScan? 1 : -1; + int32_t step = ascScan ? 1 : -1; int32_t start = cur->pos; int32_t end = endPos; @@ -1879,8 +1906,8 @@ static void copyAllRemainRowsFromFileBlock(STsdbReadHandle* pTsdbReadHandle, STa // the time window should always be ascending order: skey <= ekey cur->win = (STimeWindow){.skey = tsArray[start], .ekey = tsArray[end]}; cur->mixBlock = (numOfRows != pBlockInfo->rows); - cur->lastKey = tsArray[endPos] + step; - cur->blockCompleted = (ascScan? (endPos == pBlockInfo->rows - 1):(endPos == 0)); + cur->lastKey = tsArray[endPos] + step; + cur->blockCompleted = (ascScan ? (endPos == pBlockInfo->rows - 1) : (endPos == 0)); // The value of pos may be -1 or pBlockInfo->rows, and it is invalid in both cases. int32_t pos = endPos + step; @@ -1896,7 +1923,7 @@ int32_t getEndPosInDataBlock(STsdbReadHandle* pTsdbReadHandle, SDataBlockInfo* p // NOTE: reverse the order to find the end position in data block int32_t endPos = -1; bool ascScan = ASCENDING_TRAVERSE(pTsdbReadHandle->order); - int32_t order = ascScan? TSDB_ORDER_DESC : TSDB_ORDER_ASC; + int32_t order = ascScan ? TSDB_ORDER_DESC : TSDB_ORDER_ASC; SQueryFilePos* cur = &pTsdbReadHandle->cur; SDataCols* pCols = pTsdbReadHandle->rhelper.pDCols[0]; @@ -1956,7 +1983,7 @@ static void doMergeTwoLevelData(STsdbReadHandle* pTsdbReadHandle, STableCheckInf assert(pCols->numOfRows == pBlock->numOfRows && tsArray[0] == pBlock->keyFirst && tsArray[pBlock->numOfRows - 1] == pBlock->keyLast); - bool ascScan = ASCENDING_TRAVERSE(pTsdbReadHandle->order); + bool ascScan = ASCENDING_TRAVERSE(pTsdbReadHandle->order); int32_t step = ascScan ? 1 : -1; // for search the endPos, so the order needs to reverse @@ -1967,8 +1994,9 @@ static void doMergeTwoLevelData(STsdbReadHandle* pTsdbReadHandle, STableCheckInf STimeWindow* pWin = &blockInfo.window; tsdbDebug("%p uid:%" PRIu64 " start merge data block, file block range:%" PRIu64 "-%" PRIu64 - " rows:%d, start:%d, end:%d, %s", pTsdbReadHandle, pCheckInfo->tableId, pWin->skey, pWin->ekey, blockInfo.rows, - cur->pos, endPos, pTsdbReadHandle->idStr); + " rows:%d, start:%d, end:%d, %s", + pTsdbReadHandle, pCheckInfo->tableId, pWin->skey, pWin->ekey, blockInfo.rows, cur->pos, endPos, + pTsdbReadHandle->idStr); // compared with the data from in-memory buffer, to generate the correct timestamp array list int32_t numOfRows = 0; @@ -2087,8 +2115,9 @@ static void doMergeTwoLevelData(STsdbReadHandle* pTsdbReadHandle, STableCheckInf } // still assign data into current row - numOfRows += mergeTwoRowFromMem(pTsdbReadHandle, pTsdbReadHandle->outputCapacity, &curRow, row1, row2, numOfCols, - pCheckInfo->tableId, pSchema1, pSchema2, pCfg->update, &lastKeyAppend); + numOfRows += + mergeTwoRowFromMem(pTsdbReadHandle, pTsdbReadHandle->outputCapacity, &curRow, row1, row2, numOfCols, + pCheckInfo->tableId, pSchema1, pSchema2, pCfg->update, &lastKeyAppend); if (cur->win.skey == TSKEY_INITIAL_VAL) { cur->win.skey = key; @@ -2153,8 +2182,7 @@ static void doMergeTwoLevelData(STsdbReadHandle* pTsdbReadHandle, STableCheckInf * if cache is empty, load remain file block data. In contrast, if there are remain data in cache, do NOT * copy them all to result buffer, since it may be overlapped with file data block. */ - if (node == NULL || - ((TD_ROW_KEY((STSRow*)SL_GET_NODE_DATA(node)) > pTsdbReadHandle->window.ekey) && ascScan) || + if (node == NULL || ((TD_ROW_KEY((STSRow*)SL_GET_NODE_DATA(node)) > pTsdbReadHandle->window.ekey) && ascScan) || ((TD_ROW_KEY((STSRow*)SL_GET_NODE_DATA(node)) < pTsdbReadHandle->window.ekey) && !ascScan)) { // no data in cache or data in cache is greater than the ekey of time window, load data from file block if (cur->win.skey == TSKEY_INITIAL_VAL) { @@ -2175,7 +2203,7 @@ static void doMergeTwoLevelData(STsdbReadHandle* pTsdbReadHandle, STableCheckInf } cur->blockCompleted = (((pos > endPos || cur->lastKey > pTsdbReadHandle->window.ekey) && ascScan) || - ((pos < endPos || cur->lastKey < pTsdbReadHandle->window.ekey) && !ascScan)); + ((pos < endPos || cur->lastKey < pTsdbReadHandle->window.ekey) && !ascScan)); if (!ascScan) { TSWAP(cur->win.skey, cur->win.ekey); @@ -2794,6 +2822,12 @@ static int tsdbReadRowsFromCache(STableCheckInfo* pCheckInfo, TSKEY maxKey, int return numOfRows; } +void* tsdbGetIdx(SMeta* pMeta) { + if (pMeta == NULL) { + return NULL; + } + return metaGetIdx(pMeta); +} int32_t tsdbGetAllTableList(SMeta* pMeta, uint64_t uid, SArray* list) { SMCtbCursor* pCur = metaOpenCtbCursor(pMeta, uid); @@ -3382,65 +3416,65 @@ int32_t checkForCachedLast(STsdbReadHandle* pTsdbReadHandle) { STimeWindow updateLastrowForEachGroup(STableListInfo* pList) { STimeWindow window = {INT64_MAX, INT64_MIN}; -// int32_t totalNumOfTable = 0; -// SArray* emptyGroup = taosArrayInit(16, sizeof(int32_t)); -// -// // NOTE: starts from the buffer in case of descending timestamp order check data blocks -// size_t numOfGroups = taosArrayGetSize(groupList->pGroupList); -// for (int32_t j = 0; j < numOfGroups; ++j) { -// SArray* pGroup = taosArrayGetP(groupList->pGroupList, j); -// TSKEY key = TSKEY_INITIAL_VAL; -// -// STableKeyInfo keyInfo = {0}; -// -// size_t numOfTables = taosArrayGetSize(pGroup); -// for (int32_t i = 0; i < numOfTables; ++i) { -// STableKeyInfo* pInfo = (STableKeyInfo*)taosArrayGet(pGroup, i); -// -// // if the lastKey equals to INT64_MIN, there is no data in this table -// TSKEY lastKey = 0; //((STable*)(pInfo->pTable))->lastKey; -// if (key < lastKey) { -// key = lastKey; -// -// // keyInfo.pTable = pInfo->pTable; -// keyInfo.lastKey = key; -// pInfo->lastKey = key; -// -// if (key < window.skey) { -// window.skey = key; -// } -// -// if (key > window.ekey) { -// window.ekey = key; -// } -// } -// } -// -// // more than one table in each group, only one table left for each group -// // if (keyInfo.pTable != NULL) { -// // totalNumOfTable++; -// // if (taosArrayGetSize(pGroup) == 1) { -// // // do nothing -// // } else { -// // taosArrayClear(pGroup); -// // taosArrayPush(pGroup, &keyInfo); -// // } -// // } else { // mark all the empty groups, and remove it later -// // taosArrayDestroy(pGroup); -// // taosArrayPush(emptyGroup, &j); -// // } -// } -// -// // window does not being updated, so set the original -// if (window.skey == INT64_MAX && window.ekey == INT64_MIN) { -// window = TSWINDOW_INITIALIZER; -// assert(totalNumOfTable == 0 && taosArrayGetSize(groupList->pGroupList) == numOfGroups); -// } -// -// taosArrayRemoveBatch(groupList->pGroupList, TARRAY_GET_START(emptyGroup), (int32_t)taosArrayGetSize(emptyGroup)); -// taosArrayDestroy(emptyGroup); -// -// groupList->numOfTables = totalNumOfTable; + // int32_t totalNumOfTable = 0; + // SArray* emptyGroup = taosArrayInit(16, sizeof(int32_t)); + // + // // NOTE: starts from the buffer in case of descending timestamp order check data blocks + // size_t numOfGroups = taosArrayGetSize(groupList->pGroupList); + // for (int32_t j = 0; j < numOfGroups; ++j) { + // SArray* pGroup = taosArrayGetP(groupList->pGroupList, j); + // TSKEY key = TSKEY_INITIAL_VAL; + // + // STableKeyInfo keyInfo = {0}; + // + // size_t numOfTables = taosArrayGetSize(pGroup); + // for (int32_t i = 0; i < numOfTables; ++i) { + // STableKeyInfo* pInfo = (STableKeyInfo*)taosArrayGet(pGroup, i); + // + // // if the lastKey equals to INT64_MIN, there is no data in this table + // TSKEY lastKey = 0; //((STable*)(pInfo->pTable))->lastKey; + // if (key < lastKey) { + // key = lastKey; + // + // // keyInfo.pTable = pInfo->pTable; + // keyInfo.lastKey = key; + // pInfo->lastKey = key; + // + // if (key < window.skey) { + // window.skey = key; + // } + // + // if (key > window.ekey) { + // window.ekey = key; + // } + // } + // } + // + // // more than one table in each group, only one table left for each group + // // if (keyInfo.pTable != NULL) { + // // totalNumOfTable++; + // // if (taosArrayGetSize(pGroup) == 1) { + // // // do nothing + // // } else { + // // taosArrayClear(pGroup); + // // taosArrayPush(pGroup, &keyInfo); + // // } + // // } else { // mark all the empty groups, and remove it later + // // taosArrayDestroy(pGroup); + // // taosArrayPush(emptyGroup, &j); + // // } + // } + // + // // window does not being updated, so set the original + // if (window.skey == INT64_MAX && window.ekey == INT64_MIN) { + // window = TSWINDOW_INITIALIZER; + // assert(totalNumOfTable == 0 && taosArrayGetSize(groupList->pGroupList) == numOfGroups); + // } + // + // taosArrayRemoveBatch(groupList->pGroupList, TARRAY_GET_START(emptyGroup), (int32_t)taosArrayGetSize(emptyGroup)); + // taosArrayDestroy(emptyGroup); + // + // groupList->numOfTables = totalNumOfTable; return window; } @@ -3471,7 +3505,6 @@ void tsdbRetrieveDataBlockInfo(tsdbReaderT* pTsdbReadHandle, SDataBlockInfo* pDa pDataBlockInfo->rows = cur->rows; pDataBlockInfo->window = cur->win; - // ASSERT(pDataBlockInfo->numOfCols >= (int32_t)(QH_GET_NUM_OF_COLS(pHandle)); } /* @@ -3537,9 +3570,9 @@ int32_t tsdbRetrieveDataBlockStatisInfo(tsdbReaderT* pTsdbReadHandle, SColumnDat if (IS_BSMA_ON(&(pHandle->pSchema->columns[slotIds[i]]))) { if (pHandle->suppInfo.pstatis[i].numOfNull == -1) { // set the column data are all NULL pHandle->suppInfo.pstatis[i].numOfNull = pBlockInfo->compBlock->numOfRows; - } else { - pHandle->suppInfo.plist[i] = &pHandle->suppInfo.pstatis[i]; } + + pHandle->suppInfo.plist[i] = &pHandle->suppInfo.pstatis[i]; } else { *allHave = false; } @@ -3588,108 +3621,6 @@ SArray* tsdbRetrieveDataBlock(tsdbReaderT* pTsdbReadHandle, SArray* pIdList) { } } } -#if 0 -void filterPrepare(void* expr, void* param) { - tExprNode* pExpr = (tExprNode*)expr; - if (pExpr->_node.info != NULL) { - return; - } - - pExpr->_node.info = taosMemoryCalloc(1, sizeof(tQueryInfo)); - - STSchema* pTSSchema = (STSchema*) param; - tQueryInfo* pInfo = pExpr->_node.info; - tVariant* pCond = pExpr->_node.pRight->pVal; - SSchema* pSchema = pExpr->_node.pLeft->pSchema; - - pInfo->sch = *pSchema; - pInfo->optr = pExpr->_node.optr; - pInfo->compare = getComparFunc(pInfo->sch.type, pInfo->optr); - pInfo->indexed = pTSSchema->columns->colId == pInfo->sch.colId; - - if (pInfo->optr == TSDB_RELATION_IN) { - int dummy = -1; - SHashObj *pObj = NULL; - if (pInfo->sch.colId == TSDB_TBNAME_COLUMN_INDEX) { - pObj = taosHashInit(256, taosGetDefaultHashFunction(pInfo->sch.type), true, false); - SArray *arr = (SArray *)(pCond->arr); - for (size_t i = 0; i < taosArrayGetSize(arr); i++) { - char* p = taosArrayGetP(arr, i); - strntolower_s(varDataVal(p), varDataVal(p), varDataLen(p)); - taosHashPut(pObj, varDataVal(p), varDataLen(p), &dummy, sizeof(dummy)); - } - } else { - buildFilterSetFromBinary((void **)&pObj, pCond->pz, pCond->nLen); - } - pInfo->q = (char *)pObj; - } else if (pCond != NULL) { - uint32_t size = pCond->nLen * TSDB_NCHAR_SIZE; - if (size < (uint32_t)pSchema->bytes) { - size = pSchema->bytes; - } - // to make sure tonchar does not cause invalid write, since the '\0' needs at least sizeof(TdUcs4) space. - pInfo->q = taosMemoryCalloc(1, size + TSDB_NCHAR_SIZE + VARSTR_HEADER_SIZE); - tVariantDump(pCond, pInfo->q, pSchema->type, true); - } -} - -#endif - -static int32_t tableGroupComparFn(const void* p1, const void* p2, const void* param) { -#if 0 - STableGroupSupporter* pTableGroupSupp = (STableGroupSupporter*) param; - STable* pTable1 = ((STableKeyInfo*) p1)->uid; - STable* pTable2 = ((STableKeyInfo*) p2)->uid; - - for (int32_t i = 0; i < pTableGroupSupp->numOfCols; ++i) { - SColIndex* pColIndex = &pTableGroupSupp->pCols[i]; - int32_t colIndex = pColIndex->colIndex; - - assert(colIndex >= TSDB_TBNAME_COLUMN_INDEX); - - char * f1 = NULL; - char * f2 = NULL; - int32_t type = 0; - int32_t bytes = 0; - - if (colIndex == TSDB_TBNAME_COLUMN_INDEX) { - f1 = (char*) TABLE_NAME(pTable1); - f2 = (char*) TABLE_NAME(pTable2); - type = TSDB_DATA_TYPE_BINARY; - bytes = tGetTbnameColumnSchema()->bytes; - } else { - if (pTableGroupSupp->pTagSchema && colIndex < pTableGroupSupp->pTagSchema->numOfCols) { - STColumn* pCol = schemaColAt(pTableGroupSupp->pTagSchema, colIndex); - bytes = pCol->bytes; - type = pCol->type; - f1 = tdGetKVRowValOfCol(pTable1->tagVal, pCol->colId); - f2 = tdGetKVRowValOfCol(pTable2->tagVal, pCol->colId); - } - } - - // this tags value may be NULL - if (f1 == NULL && f2 == NULL) { - continue; - } - - if (f1 == NULL) { - return -1; - } - - if (f2 == NULL) { - return 1; - } - - int32_t ret = doCompare(f1, f2, type, bytes); - if (ret == 0) { - continue; - } else { - return ret; - } - } -#endif - return 0; -} static int tsdbCheckInfoCompar(const void* key1, const void* key2) { if (((STableCheckInfo*)key1)->tableId < ((STableCheckInfo*)key2)->tableId) { @@ -3702,170 +3633,6 @@ static int tsdbCheckInfoCompar(const void* key1, const void* key2) { } } -void createTableGroupImpl(SArray* pGroups, SArray* pTableList, size_t numOfTables, TSKEY skey, - STableGroupSupporter* pSupp, __ext_compar_fn_t compareFn) { - STable* pTable = taosArrayGetP(pTableList, 0); - SArray* g = taosArrayInit(16, sizeof(STableKeyInfo)); - - STableKeyInfo info = {.lastKey = skey}; - taosArrayPush(g, &info); - - for (int32_t i = 1; i < numOfTables; ++i) { - STable** prev = taosArrayGet(pTableList, i - 1); - STable** p = taosArrayGet(pTableList, i); - - int32_t ret = compareFn(prev, p, pSupp); - assert(ret == 0 || ret == -1); - - if (ret == 0) { - STableKeyInfo info1 = {.lastKey = skey}; - taosArrayPush(g, &info1); - } else { - taosArrayPush(pGroups, &g); // current group is ended, start a new group - g = taosArrayInit(16, sizeof(STableKeyInfo)); - - STableKeyInfo info1 = {.lastKey = skey}; - taosArrayPush(g, &info1); - } - } - - taosArrayPush(pGroups, &g); -} - -SArray* createTableGroup(SArray* pTableList, SSchemaWrapper* pTagSchema, SColIndex* pCols, int32_t numOfOrderCols, - TSKEY skey) { - assert(pTableList != NULL); - SArray* pTableGroup = taosArrayInit(1, POINTER_BYTES); - - size_t size = taosArrayGetSize(pTableList); - if (size == 0) { - tsdbDebug("no qualified tables"); - return pTableGroup; - } - - if (numOfOrderCols == 0 || size == 1) { // no group by tags clause or only one table - SArray* sa = taosArrayDup(pTableList); - if (sa == NULL) { - taosArrayDestroy(pTableGroup); - return NULL; - } - - taosArrayPush(pTableGroup, &sa); - tsdbDebug("all %" PRIzu " tables belong to one group", size); - } else { - STableGroupSupporter sup = {0}; - sup.numOfCols = numOfOrderCols; - sup.pTagSchema = pTagSchema->pSchema; - sup.pCols = pCols; - - taosqsort(pTableList->pData, size, sizeof(STableKeyInfo), &sup, tableGroupComparFn); - createTableGroupImpl(pTableGroup, pTableList, size, skey, &sup, tableGroupComparFn); - } - - return pTableGroup; -} - -// static bool tableFilterFp(const void* pNode, void* param) { -// tQueryInfo* pInfo = (tQueryInfo*) param; -// -// STable* pTable = (STable*)(SL_GET_NODE_DATA((SSkipListNode*)pNode)); -// -// char* val = NULL; -// if (pInfo->sch.colId == TSDB_TBNAME_COLUMN_INDEX) { -// val = (char*) TABLE_NAME(pTable); -// } else { -// val = tdGetKVRowValOfCol(pTable->tagVal, pInfo->sch.colId); -// } -// -// if (pInfo->optr == TSDB_RELATION_ISNULL || pInfo->optr == TSDB_RELATION_NOTNULL) { -// if (pInfo->optr == TSDB_RELATION_ISNULL) { -// return (val == NULL) || isNull(val, pInfo->sch.type); -// } else if (pInfo->optr == TSDB_RELATION_NOTNULL) { -// return (val != NULL) && (!isNull(val, pInfo->sch.type)); -// } -// } else if (pInfo->optr == TSDB_RELATION_IN) { -// int type = pInfo->sch.type; -// if (type == TSDB_DATA_TYPE_BOOL || IS_SIGNED_NUMERIC_TYPE(type) || type == TSDB_DATA_TYPE_TIMESTAMP) { -// int64_t v; -// GET_TYPED_DATA(v, int64_t, pInfo->sch.type, val); -// return NULL != taosHashGet((SHashObj *)pInfo->q, (char *)&v, sizeof(v)); -// } else if (IS_UNSIGNED_NUMERIC_TYPE(type)) { -// uint64_t v; -// GET_TYPED_DATA(v, uint64_t, pInfo->sch.type, val); -// return NULL != taosHashGet((SHashObj *)pInfo->q, (char *)&v, sizeof(v)); -// } -// else if (type == TSDB_DATA_TYPE_DOUBLE || type == TSDB_DATA_TYPE_FLOAT) { -// double v; -// GET_TYPED_DATA(v, double, pInfo->sch.type, val); -// return NULL != taosHashGet((SHashObj *)pInfo->q, (char *)&v, sizeof(v)); -// } else if (type == TSDB_DATA_TYPE_BINARY || type == TSDB_DATA_TYPE_NCHAR){ -// return NULL != taosHashGet((SHashObj *)pInfo->q, varDataVal(val), varDataLen(val)); -// } -// -// } -// -// int32_t ret = 0; -// if (val == NULL) { //the val is possible to be null, so check it out carefully -// ret = -1; // val is missing in table tags value pairs -// } else { -// ret = pInfo->compare(val, pInfo->q); -// } -// -// switch (pInfo->optr) { -// case TSDB_RELATION_EQUAL: { -// return ret == 0; -// } -// case TSDB_RELATION_NOT_EQUAL: { -// return ret != 0; -// } -// case TSDB_RELATION_GREATER_EQUAL: { -// return ret >= 0; -// } -// case TSDB_RELATION_GREATER: { -// return ret > 0; -// } -// case TSDB_RELATION_LESS_EQUAL: { -// return ret <= 0; -// } -// case TSDB_RELATION_LESS: { -// return ret < 0; -// } -// case TSDB_RELATION_LIKE: { -// return ret == 0; -// } -// case TSDB_RELATION_MATCH: { -// return ret == 0; -// } -// case TSDB_RELATION_NMATCH: { -// return ret == 0; -// } -// case TSDB_RELATION_IN: { -// return ret == 1; -// } -// -// default: -// assert(false); -// } -// -// return true; -//} - -// static void getTableListfromSkipList(tExprNode *pExpr, SSkipList *pSkipList, SArray *result, SExprTraverseSupp -// *param); - -// static int32_t doQueryTableList(STable* pSTable, SArray* pRes, tExprNode* pExpr) { -// // // query according to the expression tree -// SExprTraverseSupp supp = { -// .nodeFilterFn = (__result_filter_fn_t)tableFilterFp, -// .setupInfoFn = filterPrepare, -// .pExtInfo = pSTable->tagSchema, -// }; -// -// getTableListfromSkipList(pExpr, pSTable->pIndex, pRes, &supp); -// tExprTreeDestroy(pExpr, destroyHelper); -// return TSDB_CODE_SUCCESS; -//} - static void* doFreeColumnInfoData(SArray* pColumnInfoData) { if (pColumnInfoData == NULL) { return NULL; @@ -3934,263 +3701,3 @@ void tsdbCleanupReadHandle(tsdbReaderT queryHandle) { taosMemoryFreeClear(pTsdbReadHandle); } - -#if 0 - -static void applyFilterToSkipListNode(SSkipList *pSkipList, tExprNode *pExpr, SArray *pResult, SExprTraverseSupp *param) { - SSkipListIterator* iter = tSkipListCreateIter(pSkipList); - - // Scan each node in the skiplist by using iterator - while (tSkipListIterNext(iter)) { - SSkipListNode *pNode = tSkipListIterGet(iter); - if (exprTreeApplyFilter(pExpr, pNode, param)) { - taosArrayPush(pResult, &(SL_GET_NODE_DATA(pNode))); - } - } - - tSkipListDestroyIter(iter); -} - -typedef struct { - char* v; - int32_t optr; -} SEndPoint; - -typedef struct { - SEndPoint* start; - SEndPoint* end; -} SQueryCond; - -// todo check for malloc failure -static int32_t setQueryCond(tQueryInfo *queryColInfo, SQueryCond* pCond) { - int32_t optr = queryColInfo->optr; - - if (optr == TSDB_RELATION_GREATER || optr == TSDB_RELATION_GREATER_EQUAL || - optr == TSDB_RELATION_EQUAL || optr == TSDB_RELATION_NOT_EQUAL) { - pCond->start = taosMemoryCalloc(1, sizeof(SEndPoint)); - pCond->start->optr = queryColInfo->optr; - pCond->start->v = queryColInfo->q; - } else if (optr == TSDB_RELATION_LESS || optr == TSDB_RELATION_LESS_EQUAL) { - pCond->end = taosMemoryCalloc(1, sizeof(SEndPoint)); - pCond->end->optr = queryColInfo->optr; - pCond->end->v = queryColInfo->q; - } else if (optr == TSDB_RELATION_IN) { - pCond->start = taosMemoryCalloc(1, sizeof(SEndPoint)); - pCond->start->optr = queryColInfo->optr; - pCond->start->v = queryColInfo->q; - } else if (optr == TSDB_RELATION_LIKE) { - assert(0); - } else if (optr == TSDB_RELATION_MATCH) { - assert(0); - } else if (optr == TSDB_RELATION_NMATCH) { - assert(0); - } - - return TSDB_CODE_SUCCESS; -} - -static void queryIndexedColumn(SSkipList* pSkipList, tQueryInfo* pQueryInfo, SArray* result) { - SSkipListIterator* iter = NULL; - - SQueryCond cond = {0}; - if (setQueryCond(pQueryInfo, &cond) != TSDB_CODE_SUCCESS) { - //todo handle error - } - - if (cond.start != NULL) { - iter = tSkipListCreateIterFromVal(pSkipList, (char*) cond.start->v, pSkipList->type, TSDB_ORDER_ASC); - } else { - iter = tSkipListCreateIterFromVal(pSkipList, (char*)(cond.end ? cond.end->v: NULL), pSkipList->type, TSDB_ORDER_DESC); - } - - if (cond.start != NULL) { - int32_t optr = cond.start->optr; - - if (optr == TSDB_RELATION_EQUAL) { // equals - while(tSkipListIterNext(iter)) { - SSkipListNode* pNode = tSkipListIterGet(iter); - - int32_t ret = pQueryInfo->compare(SL_GET_NODE_KEY(pSkipList, pNode), cond.start->v); - if (ret != 0) { - break; - } - - STableKeyInfo info = {.pTable = (void*)SL_GET_NODE_DATA(pNode), .lastKey = TSKEY_INITIAL_VAL}; - taosArrayPush(result, &info); - } - } else if (optr == TSDB_RELATION_GREATER || optr == TSDB_RELATION_GREATER_EQUAL) { // greater equal - bool comp = true; - int32_t ret = 0; - - while(tSkipListIterNext(iter)) { - SSkipListNode* pNode = tSkipListIterGet(iter); - - if (comp) { - ret = pQueryInfo->compare(SL_GET_NODE_KEY(pSkipList, pNode), cond.start->v); - assert(ret >= 0); - } - - if (ret == 0 && optr == TSDB_RELATION_GREATER) { - continue; - } else { - STableKeyInfo info = {.pTable = (void*)SL_GET_NODE_DATA(pNode), .lastKey = TSKEY_INITIAL_VAL}; - taosArrayPush(result, &info); - comp = false; - } - } - } else if (optr == TSDB_RELATION_NOT_EQUAL) { // not equal - bool comp = true; - - while(tSkipListIterNext(iter)) { - SSkipListNode* pNode = tSkipListIterGet(iter); - comp = comp && (pQueryInfo->compare(SL_GET_NODE_KEY(pSkipList, pNode), cond.start->v) == 0); - if (comp) { - continue; - } - - STableKeyInfo info = {.pTable = (void*)SL_GET_NODE_DATA(pNode), .lastKey = TSKEY_INITIAL_VAL}; - taosArrayPush(result, &info); - } - - tSkipListDestroyIter(iter); - - comp = true; - iter = tSkipListCreateIterFromVal(pSkipList, (char*) cond.start->v, pSkipList->type, TSDB_ORDER_DESC); - while(tSkipListIterNext(iter)) { - SSkipListNode* pNode = tSkipListIterGet(iter); - comp = comp && (pQueryInfo->compare(SL_GET_NODE_KEY(pSkipList, pNode), cond.start->v) == 0); - if (comp) { - continue; - } - - STableKeyInfo info = {.pTable = (void*)SL_GET_NODE_DATA(pNode), .lastKey = TSKEY_INITIAL_VAL}; - taosArrayPush(result, &info); - } - - } else if (optr == TSDB_RELATION_IN) { - while(tSkipListIterNext(iter)) { - SSkipListNode* pNode = tSkipListIterGet(iter); - - int32_t ret = pQueryInfo->compare(SL_GET_NODE_KEY(pSkipList, pNode), cond.start->v); - if (ret != 0) { - break; - } - - STableKeyInfo info = {.pTable = (void*)SL_GET_NODE_DATA(pNode), .lastKey = TSKEY_INITIAL_VAL}; - taosArrayPush(result, &info); - } - - } else { - assert(0); - } - } else { - int32_t optr = cond.end ? cond.end->optr : TSDB_RELATION_INVALID; - if (optr == TSDB_RELATION_LESS || optr == TSDB_RELATION_LESS_EQUAL) { - bool comp = true; - int32_t ret = 0; - - while (tSkipListIterNext(iter)) { - SSkipListNode *pNode = tSkipListIterGet(iter); - - if (comp) { - ret = pQueryInfo->compare(SL_GET_NODE_KEY(pSkipList, pNode), cond.end->v); - assert(ret <= 0); - } - - if (ret == 0 && optr == TSDB_RELATION_LESS) { - continue; - } else { - STableKeyInfo info = {.pTable = (void *)SL_GET_NODE_DATA(pNode), .lastKey = TSKEY_INITIAL_VAL}; - taosArrayPush(result, &info); - comp = false; // no need to compare anymore - } - } - } else { - assert(pQueryInfo->optr == TSDB_RELATION_ISNULL || pQueryInfo->optr == TSDB_RELATION_NOTNULL); - - while (tSkipListIterNext(iter)) { - SSkipListNode *pNode = tSkipListIterGet(iter); - - bool isnull = isNull(SL_GET_NODE_KEY(pSkipList, pNode), pQueryInfo->sch.type); - if ((pQueryInfo->optr == TSDB_RELATION_ISNULL && isnull) || - (pQueryInfo->optr == TSDB_RELATION_NOTNULL && (!isnull))) { - STableKeyInfo info = {.pTable = (void *)SL_GET_NODE_DATA(pNode), .lastKey = TSKEY_INITIAL_VAL}; - taosArrayPush(result, &info); - } - } - } - } - - taosMemoryFree(cond.start); - taosMemoryFree(cond.end); - tSkipListDestroyIter(iter); -} - -static void queryIndexlessColumn(SSkipList* pSkipList, tQueryInfo* pQueryInfo, SArray* res, __result_filter_fn_t filterFp) { - SSkipListIterator* iter = tSkipListCreateIter(pSkipList); - - while (tSkipListIterNext(iter)) { - bool addToResult = false; - - SSkipListNode *pNode = tSkipListIterGet(iter); - - char *pData = SL_GET_NODE_DATA(pNode); - tstr *name = (tstr*) tsdbGetTableName((void*) pData); - - // todo speed up by using hash - if (pQueryInfo->sch.colId == TSDB_TBNAME_COLUMN_INDEX) { - if (pQueryInfo->optr == TSDB_RELATION_IN) { - addToResult = pQueryInfo->compare(name, pQueryInfo->q); - } else if (pQueryInfo->optr == TSDB_RELATION_LIKE || - pQueryInfo->optr == TSDB_RELATION_MATCH || - pQueryInfo->optr == TSDB_RELATION_NMATCH) { - addToResult = !pQueryInfo->compare(name, pQueryInfo->q); - } - } else { - addToResult = filterFp(pNode, pQueryInfo); - } - - if (addToResult) { - STableKeyInfo info = {.pTable = (void*)pData, .lastKey = TSKEY_INITIAL_VAL}; - taosArrayPush(res, &info); - } - } - - tSkipListDestroyIter(iter); -} - -// Apply the filter expression to each node in the skiplist to acquire the qualified nodes in skip list -//void getTableListfromSkipList(tExprNode *pExpr, SSkipList *pSkipList, SArray *result, SExprTraverseSupp *param) { -// if (pExpr == NULL) { -// return; -// } -// -// tExprNode *pLeft = pExpr->_node.pLeft; -// tExprNode *pRight = pExpr->_node.pRight; -// -// // column project -// if (pLeft->nodeType != TSQL_NODE_EXPR && pRight->nodeType != TSQL_NODE_EXPR) { -// assert(pLeft->nodeType == TSQL_NODE_COL && (pRight->nodeType == TSQL_NODE_VALUE || pRight->nodeType == TSQL_NODE_DUMMY)); -// -// param->setupInfoFn(pExpr, param->pExtInfo); -// -// tQueryInfo *pQueryInfo = pExpr->_node.info; -// if (pQueryInfo->indexed && (pQueryInfo->optr != TSDB_RELATION_LIKE -// && pQueryInfo->optr != TSDB_RELATION_MATCH && pQueryInfo->optr != TSDB_RELATION_NMATCH -// && pQueryInfo->optr != TSDB_RELATION_IN)) { -// queryIndexedColumn(pSkipList, pQueryInfo, result); -// } else { -// queryIndexlessColumn(pSkipList, pQueryInfo, result, param->nodeFilterFn); -// } -// -// return; -// } -// -// // The value of hasPK is always 0. -// uint8_t weight = pLeft->_node.hasPK + pRight->_node.hasPK; -// assert(weight == 0 && pSkipList != NULL && taosArrayGetSize(result) == 0); -// -// //apply the hierarchical filter expression to every node in skiplist to find the qualified nodes -// applyFilterToSkipListNode(pSkipList, pExpr, result, param); -//} -#endif diff --git a/source/dnode/vnode/src/tsdb/tsdbSma.c b/source/dnode/vnode/src/tsdb/tsdbSma.c index ea23858f3e592a9d675a8efc6f6db59c373ca025..45b17a0180e4dabd411b01757c35e40910d62579 100644 --- a/source/dnode/vnode/src/tsdb/tsdbSma.c +++ b/source/dnode/vnode/src/tsdb/tsdbSma.c @@ -2040,7 +2040,7 @@ static FORCE_INLINE int32_t tsdbExecuteRSmaImpl(STsdb *pTsdb, const void *pMsg, blockDebugShowData(pResult); STsdb *sinkTsdb = (level == TSDB_RETENTION_L1 ? pTsdb->pVnode->pRSma1 : pTsdb->pVnode->pRSma2); SSubmitReq *pReq = NULL; - if (buildSubmitReqFromDataBlock(&pReq, pResult, pTSchema, TD_VID(pTsdb->pVnode), uid, suid) != 0) { + if (buildSubmitReqFromDataBlock(&pReq, pResult, pTSchema, TD_VID(pTsdb->pVnode), suid) != 0) { taosArrayDestroy(pResult); return TSDB_CODE_FAILED; } @@ -2083,7 +2083,7 @@ static int32_t tsdbExecuteRSma(STsdb *pTsdb, const void *pMsg, int32_t inputType } if (inputType == STREAM_DATA_TYPE_SUBMIT_BLOCK) { - // TODO: use the proper schema instead of 0, and cache STSchema in cache + // TODO: use the proper schema instead of 1, and cache STSchema in cache STSchema *pTSchema = metaGetTbTSchema(pTsdb->pVnode->pMeta, suid, 1); if (!pTSchema) { terrno = TSDB_CODE_TDB_IVD_TB_SCHEMA_VERSION; diff --git a/source/dnode/vnode/src/vnd/vnodeSvr.c b/source/dnode/vnode/src/vnd/vnodeSvr.c index 40f75804dd36e23c06f4bcc189f355aea6b71a56..c7e4a848eba50146ae342f79a24d7a8e039e3626 100644 --- a/source/dnode/vnode/src/vnd/vnodeSvr.c +++ b/source/dnode/vnode/src/vnd/vnodeSvr.c @@ -191,9 +191,9 @@ int vnodeProcessQueryMsg(SVnode *pVnode, SRpcMsg *pMsg) { SReadHandle handle = {.meta = pVnode->pMeta, .config = &pVnode->config, .vnode = pVnode, .pMsgCb = &pVnode->msgCb}; switch (pMsg->msgType) { case TDMT_VND_QUERY: - return qWorkerProcessQueryMsg(&handle, pVnode->pQuery, pMsg); + return qWorkerProcessQueryMsg(&handle, pVnode->pQuery, pMsg, 0); case TDMT_VND_QUERY_CONTINUE: - return qWorkerProcessCQueryMsg(&handle, pVnode->pQuery, pMsg); + return qWorkerProcessCQueryMsg(&handle, pVnode->pQuery, pMsg, 0); default: vError("unknown msg type:%d in query queue", pMsg->msgType); return TSDB_CODE_VND_APP_ERROR; @@ -206,13 +206,16 @@ int vnodeProcessFetchMsg(SVnode *pVnode, SRpcMsg *pMsg, SQueueInfo *pInfo) { int32_t msgLen = pMsg->contLen - sizeof(SMsgHead); switch (pMsg->msgType) { case TDMT_VND_FETCH: - return qWorkerProcessFetchMsg(pVnode, pVnode->pQuery, pMsg); + return qWorkerProcessFetchMsg(pVnode, pVnode->pQuery, pMsg, 0); case TDMT_VND_FETCH_RSP: - return qWorkerProcessFetchRsp(pVnode, pVnode->pQuery, pMsg); + return qWorkerProcessFetchRsp(pVnode, pVnode->pQuery, pMsg, 0); case TDMT_VND_CANCEL_TASK: - return qWorkerProcessCancelMsg(pVnode, pVnode->pQuery, pMsg); + return qWorkerProcessCancelMsg(pVnode, pVnode->pQuery, pMsg, 0); case TDMT_VND_DROP_TASK: - return qWorkerProcessDropMsg(pVnode, pVnode->pQuery, pMsg); + return qWorkerProcessDropMsg(pVnode, pVnode->pQuery, pMsg, 0); + case TDMT_VND_QUERY_HEARTBEAT: + return qWorkerProcessHbMsg(pVnode, pVnode->pQuery, pMsg, 0); + case TDMT_VND_TABLE_META: return vnodeGetTableMeta(pVnode, pMsg); case TDMT_VND_CONSUME: @@ -231,9 +234,6 @@ int vnodeProcessFetchMsg(SVnode *pVnode, SRpcMsg *pMsg, SQueueInfo *pInfo) { return tqProcessTaskDispatchRsp(pVnode->pTq, pMsg); case TDMT_VND_TASK_RECOVER_RSP: return tqProcessTaskRecoverRsp(pVnode->pTq, pMsg); - - case TDMT_VND_QUERY_HEARTBEAT: - return qWorkerProcessHbMsg(pVnode, pVnode->pQuery, pMsg); default: vError("unknown msg type:%d in fetch queue", pMsg->msgType); return TSDB_CODE_VND_APP_ERROR; @@ -678,6 +678,7 @@ static int vnodeProcessSubmitReq(SVnode *pVnode, int64_t version, void *pReq, in int32_t nRows; int32_t tsize, ret; SEncoder encoder = {0}; + SArray *newTbUids = NULL; terrno = TSDB_CODE_SUCCESS; pRsp->code = 0; @@ -698,6 +699,7 @@ static int vnodeProcessSubmitReq(SVnode *pVnode, int64_t version, void *pReq, in } submitRsp.pArray = taosArrayInit(pSubmitReq->numOfBlocks, sizeof(SSubmitBlkRsp)); + newTbUids = taosArrayInit(pSubmitReq->numOfBlocks, sizeof(int64_t)); if (!submitRsp.pArray) { pRsp->code = TSDB_CODE_OUT_OF_MEMORY; goto _exit; @@ -727,6 +729,7 @@ static int vnodeProcessSubmitReq(SVnode *pVnode, int64_t version, void *pReq, in goto _exit; } } + taosArrayPush(newTbUids, &createTbReq.uid); submitBlkRsp.uid = createTbReq.uid; submitBlkRsp.tblFName = taosMemoryMalloc(strlen(pVnode->config.dbname) + strlen(createTbReq.name) + 2); @@ -754,8 +757,10 @@ static int vnodeProcessSubmitReq(SVnode *pVnode, int64_t version, void *pReq, in submitRsp.affectedRows += submitBlkRsp.affectedRows; taosArrayPush(submitRsp.pArray, &submitBlkRsp); } + tqUpdateTbUidList(pVnode->pTq, newTbUids, true); _exit: + taosArrayDestroy(newTbUids); tEncodeSize(tEncodeSSubmitRsp, &submitRsp, tsize, ret); pRsp->pCont = rpcMallocCont(tsize); pRsp->contLen = tsize; diff --git a/source/libs/catalog/inc/catalogInt.h b/source/libs/catalog/inc/catalogInt.h index 40bd3659a300b58b707774cce0a45728c9755ee3..230949ab7fbf696b050d3e10d7e76c858612e52b 100644 --- a/source/libs/catalog/inc/catalogInt.h +++ b/source/libs/catalog/inc/catalogInt.h @@ -49,19 +49,21 @@ enum { }; enum { - CTG_ACT_UPDATE_VG = 0, - CTG_ACT_UPDATE_TBL, - CTG_ACT_REMOVE_DB, - CTG_ACT_REMOVE_STB, - CTG_ACT_REMOVE_TBL, - CTG_ACT_UPDATE_USER, - CTG_ACT_MAX + CTG_OP_UPDATE_VGROUP = 0, + CTG_OP_UPDATE_TB_META, + CTG_OP_DROP_DB_CACHE, + CTG_OP_DROP_STB_META, + CTG_OP_DROP_TB_META, + CTG_OP_UPDATE_USER, + CTG_OP_UPDATE_VG_EPSET, + CTG_OP_MAX }; typedef enum { CTG_TASK_GET_QNODE = 0, CTG_TASK_GET_DB_VGROUP, CTG_TASK_GET_DB_CFG, + CTG_TASK_GET_DB_INFO, CTG_TASK_GET_TB_META, CTG_TASK_GET_TB_HASH, CTG_TASK_GET_INDEX, @@ -98,6 +100,10 @@ typedef struct SCtgDbCfgCtx { char dbFName[TSDB_DB_FNAME_LEN]; } SCtgDbCfgCtx; +typedef struct SCtgDbInfoCtx { + char dbFName[TSDB_DB_FNAME_LEN]; +} SCtgDbInfoCtx; + typedef struct SCtgTbHashCtx { char dbFName[TSDB_DB_FNAME_LEN]; SName* pName; @@ -182,6 +188,7 @@ typedef struct SCtgJob { int32_t dbCfgNum; int32_t indexNum; int32_t userNum; + int32_t dbInfoNum; } SCtgJob; typedef struct SCtgMsgCtx { @@ -285,16 +292,22 @@ typedef struct SCtgUpdateUserMsg { SGetUserAuthRsp userAuth; } SCtgUpdateUserMsg; +typedef struct SCtgUpdateEpsetMsg { + SCatalog* pCtg; + char dbFName[TSDB_DB_FNAME_LEN]; + int32_t vgId; + SEpSet epSet; +} SCtgUpdateEpsetMsg; -typedef struct SCtgMetaAction { - int32_t act; +typedef struct SCtgCacheOperation { + int32_t opId; void *data; bool syncReq; uint64_t seqId; -} SCtgMetaAction; +} SCtgCacheOperation; typedef struct SCtgQNode { - SCtgMetaAction action; + SCtgCacheOperation op; struct SCtgQNode *next; } SCtgQNode; @@ -321,13 +334,13 @@ typedef struct SCatalogMgmt { } SCatalogMgmt; typedef uint32_t (*tableNameHashFp)(const char *, uint32_t); -typedef int32_t (*ctgActFunc)(SCtgMetaAction *); +typedef int32_t (*ctgOpFunc)(SCtgCacheOperation *); -typedef struct SCtgAction { - int32_t actId; +typedef struct SCtgOperation { + int32_t opId; char name[32]; - ctgActFunc func; -} SCtgAction; + ctgOpFunc func; +} SCtgOperation; #define CTG_QUEUE_ADD() atomic_add_fetch_64(&gCtgMgmt.queue.qRemainNum, 1) #define CTG_QUEUE_SUB() atomic_sub_fetch_64(&gCtgMgmt.queue.qRemainNum, 1) @@ -435,12 +448,13 @@ int32_t ctgdShowCacheInfo(void); int32_t ctgRemoveTbMetaFromCache(SCatalog* pCtg, SName* pTableName, bool syncReq); int32_t ctgGetTbMetaFromCache(CTG_PARAMS, SCtgTbMetaCtx* ctx, STableMeta** pTableMeta); -int32_t ctgActUpdateVg(SCtgMetaAction *action); -int32_t ctgActUpdateTb(SCtgMetaAction *action); -int32_t ctgActRemoveDB(SCtgMetaAction *action); -int32_t ctgActRemoveStb(SCtgMetaAction *action); -int32_t ctgActRemoveTb(SCtgMetaAction *action); -int32_t ctgActUpdateUser(SCtgMetaAction *action); +int32_t ctgOpUpdateVgroup(SCtgCacheOperation *action); +int32_t ctgOpUpdateTbMeta(SCtgCacheOperation *action); +int32_t ctgOpDropDbCache(SCtgCacheOperation *action); +int32_t ctgOpDropStbMeta(SCtgCacheOperation *action); +int32_t ctgOpDropTbMeta(SCtgCacheOperation *action); +int32_t ctgOpUpdateUser(SCtgCacheOperation *action); +int32_t ctgOpUpdateEpset(SCtgCacheOperation *operation); int32_t ctgAcquireVgInfoFromCache(SCatalog* pCtg, const char *dbFName, SCtgDBCache **pCache); void ctgReleaseDBCache(SCatalog *pCtg, SCtgDBCache *dbCache); void ctgReleaseVgInfo(SCtgDBCache *dbCache); @@ -449,12 +463,13 @@ int32_t ctgTbMetaExistInCache(SCatalog* pCtg, char *dbFName, char* tbName, int32 int32_t ctgReadTbMetaFromCache(SCatalog* pCtg, SCtgTbMetaCtx* ctx, STableMeta** pTableMeta); int32_t ctgReadTbVerFromCache(SCatalog *pCtg, const SName *pTableName, int32_t *sver, int32_t *tver, int32_t *tbType, uint64_t *suid, char *stbName); int32_t ctgChkAuthFromCache(SCatalog* pCtg, const char* user, const char* dbFName, AUTH_TYPE type, bool *inCache, bool *pass); -int32_t ctgPutRmDBToQueue(SCatalog* pCtg, const char *dbFName, int64_t dbId); -int32_t ctgPutRmStbToQueue(SCatalog* pCtg, const char *dbFName, int64_t dbId, const char *stbName, uint64_t suid, bool syncReq); -int32_t ctgPutRmTbToQueue(SCatalog* pCtg, const char *dbFName, int64_t dbId, const char *tbName, bool syncReq); -int32_t ctgPutUpdateVgToQueue(SCatalog* pCtg, const char *dbFName, int64_t dbId, SDBVgInfo* dbInfo, bool syncReq); -int32_t ctgPutUpdateTbToQueue(SCatalog* pCtg, STableMetaOutput *output, bool syncReq); -int32_t ctgPutUpdateUserToQueue(SCatalog* pCtg, SGetUserAuthRsp *pAuth, bool syncReq); +int32_t ctgDropDbCacheEnqueue(SCatalog* pCtg, const char *dbFName, int64_t dbId); +int32_t ctgDropStbMetaEnqueue(SCatalog* pCtg, const char *dbFName, int64_t dbId, const char *stbName, uint64_t suid, bool syncReq); +int32_t ctgDropTbMetaEnqueue(SCatalog* pCtg, const char *dbFName, int64_t dbId, const char *tbName, bool syncReq); +int32_t ctgUpdateVgroupEnqueue(SCatalog* pCtg, const char *dbFName, int64_t dbId, SDBVgInfo* dbInfo, bool syncReq); +int32_t ctgUpdateTbMetaEnqueue(SCatalog* pCtg, STableMetaOutput *output, bool syncReq); +int32_t ctgUpdateUserEnqueue(SCatalog* pCtg, SGetUserAuthRsp *pAuth, bool syncReq); +int32_t ctgUpdateVgEpsetEnqueue(SCatalog* pCtg, char *dbFName, int32_t vgId, SEpSet* pEpSet); int32_t ctgMetaRentInit(SCtgRentMgmt *mgmt, uint32_t rentSec, int8_t type); int32_t ctgMetaRentAdd(SCtgRentMgmt *mgmt, void *meta, int64_t id, int32_t size); int32_t ctgMetaRentGet(SCtgRentMgmt *mgmt, void **res, uint32_t *num, int32_t size); diff --git a/source/libs/catalog/src/catalog.c b/source/libs/catalog/src/catalog.c index 1cfc498c25e8352171b7c859c7fea7ebc6028bc5..6519440dad3c7711057f3fd1e203b328ed263a52 100644 --- a/source/libs/catalog/src/catalog.c +++ b/source/libs/catalog/src/catalog.c @@ -41,9 +41,9 @@ int32_t ctgRemoveTbMetaFromCache(SCatalog* pCtg, SName* pTableName, bool syncReq tNameGetFullDbName(pTableName, dbFName); if (TSDB_SUPER_TABLE == tblMeta->tableType) { - CTG_ERR_JRET(ctgPutRmStbToQueue(pCtg, dbFName, tbCtx.tbInfo.dbId, pTableName->tname, tblMeta->suid, syncReq)); + CTG_ERR_JRET(ctgDropStbMetaEnqueue(pCtg, dbFName, tbCtx.tbInfo.dbId, pTableName->tname, tblMeta->suid, syncReq)); } else { - CTG_ERR_JRET(ctgPutRmTbToQueue(pCtg, dbFName, tbCtx.tbInfo.dbId, pTableName->tname, syncReq)); + CTG_ERR_JRET(ctgDropTbMetaEnqueue(pCtg, dbFName, tbCtx.tbInfo.dbId, pTableName->tname, syncReq)); } _return: @@ -72,7 +72,7 @@ int32_t ctgGetDBVgInfo(SCatalog* pCtg, void *pTrans, const SEpSet* pMgmtEps, con CTG_ERR_JRET(ctgCloneVgInfo(DbOut.dbVgroup, pInfo)); - CTG_ERR_RET(ctgPutUpdateVgToQueue(pCtg, dbFName, DbOut.dbId, DbOut.dbVgroup, false)); + CTG_ERR_RET(ctgUpdateVgroupEnqueue(pCtg, dbFName, DbOut.dbId, DbOut.dbVgroup, false)); return TSDB_CODE_SUCCESS; @@ -108,13 +108,13 @@ int32_t ctgRefreshDBVgInfo(SCatalog* pCtg, void *pTrans, const SEpSet* pMgmtEps, if (code) { if (CTG_DB_NOT_EXIST(code) && (NULL != dbCache)) { ctgDebug("db no longer exist, dbFName:%s, dbId:%" PRIx64, input.db, input.dbId); - ctgPutRmDBToQueue(pCtg, input.db, input.dbId); + ctgDropDbCacheEnqueue(pCtg, input.db, input.dbId); } CTG_ERR_RET(code); } - CTG_ERR_RET(ctgPutUpdateVgToQueue(pCtg, dbFName, DbOut.dbId, DbOut.dbVgroup, true)); + CTG_ERR_RET(ctgUpdateVgroupEnqueue(pCtg, dbFName, DbOut.dbId, DbOut.dbVgroup, true)); return TSDB_CODE_SUCCESS; } @@ -201,7 +201,7 @@ int32_t ctgRefreshTbMeta(CTG_PARAMS, SCtgTbMetaCtx* ctx, STableMetaOutput **pOut CTG_ERR_JRET(ctgCloneMetaOutput(output, pOutput)); } - CTG_ERR_JRET(ctgPutUpdateTbToQueue(pCtg, output, syncReq)); + CTG_ERR_JRET(ctgUpdateTbMetaEnqueue(pCtg, output, syncReq)); return TSDB_CODE_SUCCESS; @@ -298,9 +298,9 @@ _return: } if (TSDB_SUPER_TABLE == ctx->tbInfo.tbType) { - ctgPutRmStbToQueue(pCtg, dbFName, ctx->tbInfo.dbId, ctx->pName->tname, ctx->tbInfo.suid, false); + ctgDropStbMetaEnqueue(pCtg, dbFName, ctx->tbInfo.dbId, ctx->pName->tname, ctx->tbInfo.suid, false); } else { - ctgPutRmTbToQueue(pCtg, dbFName, ctx->tbInfo.dbId, ctx->pName->tname, false); + ctgDropTbMetaEnqueue(pCtg, dbFName, ctx->tbInfo.dbId, ctx->pName->tname, false); } } @@ -348,7 +348,7 @@ int32_t ctgChkAuth(SCatalog* pCtg, void *pTrans, const SEpSet* pMgmtEps, const c _return: - ctgPutUpdateUserToQueue(pCtg, &authRsp, false); + ctgUpdateUserEnqueue(pCtg, &authRsp, false); return TSDB_CODE_SUCCESS; } @@ -670,7 +670,7 @@ int32_t catalogUpdateDBVgInfo(SCatalog* pCtg, const char* dbFName, uint64_t dbId CTG_ERR_JRET(TSDB_CODE_CTG_INVALID_INPUT); } - code = ctgPutUpdateVgToQueue(pCtg, dbFName, dbId, dbInfo, false); + code = ctgUpdateVgroupEnqueue(pCtg, dbFName, dbId, dbInfo, false); _return: @@ -691,7 +691,7 @@ int32_t catalogRemoveDB(SCatalog* pCtg, const char* dbFName, uint64_t dbId) { CTG_API_LEAVE(TSDB_CODE_SUCCESS); } - CTG_ERR_JRET(ctgPutRmDBToQueue(pCtg, dbFName, dbId)); + CTG_ERR_JRET(ctgDropDbCacheEnqueue(pCtg, dbFName, dbId)); CTG_API_LEAVE(TSDB_CODE_SUCCESS); @@ -701,7 +701,19 @@ _return: } int32_t catalogUpdateVgEpSet(SCatalog* pCtg, const char* dbFName, int32_t vgId, SEpSet *epSet) { - return 0; + CTG_API_ENTER(); + + int32_t code = 0; + + if (NULL == pCtg || NULL == dbFName || NULL == epSet) { + CTG_API_LEAVE(TSDB_CODE_CTG_INVALID_INPUT); + } + + CTG_ERR_JRET(ctgUpdateVgEpsetEnqueue(pCtg, (char*)dbFName, vgId, epSet)); + +_return: + + CTG_API_LEAVE(code); } int32_t catalogRemoveTableMeta(SCatalog* pCtg, SName* pTableName) { @@ -738,7 +750,7 @@ int32_t catalogRemoveStbMeta(SCatalog* pCtg, const char* dbFName, uint64_t dbId, CTG_API_LEAVE(TSDB_CODE_SUCCESS); } - CTG_ERR_JRET(ctgPutRmStbToQueue(pCtg, dbFName, dbId, stbName, suid, true)); + CTG_ERR_JRET(ctgDropStbMetaEnqueue(pCtg, dbFName, dbId, stbName, suid, true)); CTG_API_LEAVE(TSDB_CODE_SUCCESS); @@ -791,7 +803,7 @@ int32_t catalogUpdateSTableMeta(SCatalog* pCtg, STableMetaRsp *rspMsg) { CTG_ERR_JRET(queryCreateTableMetaFromMsg(rspMsg, true, &output->tbMeta)); - CTG_ERR_JRET(ctgPutUpdateTbToQueue(pCtg, output, false)); + CTG_ERR_JRET(ctgUpdateTbMetaEnqueue(pCtg, output, false)); CTG_API_LEAVE(code); @@ -1152,7 +1164,7 @@ int32_t catalogUpdateUserAuthInfo(SCatalog* pCtg, SGetUserAuthRsp* pAuth) { CTG_API_LEAVE(TSDB_CODE_CTG_INVALID_INPUT); } - CTG_API_LEAVE(ctgPutUpdateUserToQueue(pCtg, pAuth, false)); + CTG_API_LEAVE(ctgUpdateUserEnqueue(pCtg, pAuth, false)); } diff --git a/source/libs/catalog/src/ctgAsync.c b/source/libs/catalog/src/ctgAsync.c index 0341c3638bfeb6018326d1cbad86ca1363024ad9..eb84bf00a444fb6bc57652ee32abdf44035a0426 100644 --- a/source/libs/catalog/src/ctgAsync.c +++ b/source/libs/catalog/src/ctgAsync.c @@ -95,6 +95,30 @@ int32_t ctgInitGetDbCfgTask(SCtgJob *pJob, int32_t taskIdx, char *dbFName) { return TSDB_CODE_SUCCESS; } +int32_t ctgInitGetDbInfoTask(SCtgJob *pJob, int32_t taskIdx, char *dbFName) { + SCtgTask task = {0}; + + task.type = CTG_TASK_GET_DB_INFO; + task.taskId = taskIdx; + task.pJob = pJob; + + task.taskCtx = taosMemoryCalloc(1, sizeof(SCtgDbInfoCtx)); + if (NULL == task.taskCtx) { + CTG_ERR_RET(TSDB_CODE_OUT_OF_MEMORY); + } + + SCtgDbInfoCtx* ctx = task.taskCtx; + + memcpy(ctx->dbFName, dbFName, sizeof(ctx->dbFName)); + + taosArrayPush(pJob->pTasks, &task); + + qDebug("QID:%" PRIx64 " task %d type %d initialized, dbFName:%s", pJob->queryId, taskIdx, task.type, dbFName); + + return TSDB_CODE_SUCCESS; +} + + int32_t ctgInitGetTbHashTask(SCtgJob *pJob, int32_t taskIdx, SName *name) { SCtgTask task = {0}; @@ -219,8 +243,9 @@ int32_t ctgInitJob(CTG_PARAMS, SCtgJob** job, uint64_t reqId, const SCatalogReq* int32_t dbCfgNum = (int32_t)taosArrayGetSize(pReq->pDbCfg); int32_t indexNum = (int32_t)taosArrayGetSize(pReq->pIndex); int32_t userNum = (int32_t)taosArrayGetSize(pReq->pUser); + int32_t dbInfoNum = (int32_t)taosArrayGetSize(pReq->pDbInfo); - int32_t taskNum = tbMetaNum + dbVgNum + udfNum + tbHashNum + qnodeNum + dbCfgNum + indexNum + userNum; + int32_t taskNum = tbMetaNum + dbVgNum + udfNum + tbHashNum + qnodeNum + dbCfgNum + indexNum + userNum + dbInfoNum; if (taskNum <= 0) { ctgError("empty input for job, taskNum:%d", taskNum); CTG_ERR_RET(TSDB_CODE_CTG_INVALID_INPUT); @@ -249,6 +274,7 @@ int32_t ctgInitJob(CTG_PARAMS, SCtgJob** job, uint64_t reqId, const SCatalogReq* pJob->dbCfgNum = dbCfgNum; pJob->indexNum = indexNum; pJob->userNum = userNum; + pJob->dbInfoNum = dbInfoNum; pJob->pTasks = taosArrayInit(taskNum, sizeof(SCtgTask)); @@ -268,6 +294,11 @@ int32_t ctgInitJob(CTG_PARAMS, SCtgJob** job, uint64_t reqId, const SCatalogReq* CTG_ERR_JRET(ctgInitGetDbCfgTask(pJob, taskIdx++, dbFName)); } + for (int32_t i = 0; i < dbInfoNum; ++i) { + char *dbFName = taosArrayGet(pReq->pDbInfo, i); + CTG_ERR_JRET(ctgInitGetDbInfoTask(pJob, taskIdx++, dbFName)); + } + for (int32_t i = 0; i < tbMetaNum; ++i) { SName *name = taosArrayGet(pReq->pTableMeta, i); CTG_ERR_JRET(ctgInitGetTbMetaTask(pJob, taskIdx++, name)); @@ -395,6 +426,20 @@ int32_t ctgDumpDbCfgRes(SCtgTask* pTask) { return TSDB_CODE_SUCCESS; } +int32_t ctgDumpDbInfoRes(SCtgTask* pTask) { + SCtgJob* pJob = pTask->pJob; + if (NULL == pJob->jobRes.pDbInfo) { + pJob->jobRes.pDbInfo = taosArrayInit(pJob->dbInfoNum, sizeof(SDbInfo)); + if (NULL == pJob->jobRes.pDbInfo) { + CTG_ERR_RET(TSDB_CODE_OUT_OF_MEMORY); + } + } + + taosArrayPush(pJob->jobRes.pDbInfo, pTask->res); + + return TSDB_CODE_SUCCESS; +} + int32_t ctgDumpUdfRes(SCtgTask* pTask) { SCtgJob* pJob = pTask->pJob; if (NULL == pJob->jobRes.pUdfList) { @@ -620,7 +665,7 @@ int32_t ctgHandleGetDbVgRsp(SCtgTask* pTask, int32_t reqType, const SDataBuf *pM CTG_ERR_JRET(ctgGenerateVgList(pCtg, pOut->dbVgroup->vgHash, (SArray**)&pTask->res)); - CTG_ERR_JRET(ctgPutUpdateVgToQueue(pCtg, ctx->dbFName, pOut->dbId, pOut->dbVgroup, false)); + CTG_ERR_JRET(ctgUpdateVgroupEnqueue(pCtg, ctx->dbFName, pOut->dbId, pOut->dbVgroup, false)); pOut->dbVgroup = NULL; break; @@ -659,7 +704,7 @@ int32_t ctgHandleGetTbHashRsp(SCtgTask* pTask, int32_t reqType, const SDataBuf * CTG_ERR_JRET(ctgGetVgInfoFromHashValue(pCtg, pOut->dbVgroup, ctx->pName, (SVgroupInfo*)pTask->res)); - CTG_ERR_JRET(ctgPutUpdateVgToQueue(pCtg, ctx->dbFName, pOut->dbId, pOut->dbVgroup, false)); + CTG_ERR_JRET(ctgUpdateVgroupEnqueue(pCtg, ctx->dbFName, pOut->dbId, pOut->dbVgroup, false)); pOut->dbVgroup = NULL; break; @@ -691,6 +736,11 @@ _return: CTG_RET(code); } +int32_t ctgHandleGetDbInfoRsp(SCtgTask* pTask, int32_t reqType, const SDataBuf *pMsg, int32_t rspCode) { + CTG_RET(TSDB_CODE_APP_ERROR); +} + + int32_t ctgHandleGetQnodeRsp(SCtgTask* pTask, int32_t reqType, const SDataBuf *pMsg, int32_t rspCode) { int32_t code = 0; CTG_ERR_JRET(ctgProcessRspMsg(pTask->msgCtx.out, reqType, pMsg->pData, pMsg->len, rspCode, pTask->msgCtx.target)); @@ -769,7 +819,7 @@ _return: } } - ctgPutUpdateUserToQueue(pCtg, pOut, false); + ctgUpdateUserEnqueue(pCtg, pOut, false); taosMemoryFreeClear(pTask->msgCtx.out); ctgHandleTaskEnd(pTask, code); @@ -933,6 +983,41 @@ int32_t ctgLaunchGetDbCfgTask(SCtgTask *pTask) { return TSDB_CODE_SUCCESS; } +int32_t ctgLaunchGetDbInfoTask(SCtgTask *pTask) { + int32_t code = 0; + SCatalog* pCtg = pTask->pJob->pCtg; + void *pTrans = pTask->pJob->pTrans; + const SEpSet* pMgmtEps = &pTask->pJob->pMgmtEps; + SCtgDBCache *dbCache = NULL; + SCtgDbInfoCtx* pCtx = (SCtgDbInfoCtx*)pTask->taskCtx; + + pTask->res = taosMemoryCalloc(1, sizeof(SDbInfo)); + if (NULL == pTask->res) { + CTG_ERR_RET(TSDB_CODE_OUT_OF_MEMORY); + } + + SDbInfo* pInfo = (SDbInfo*)pTask->res; + CTG_ERR_RET(ctgAcquireVgInfoFromCache(pCtg, pCtx->dbFName, &dbCache)); + if (NULL != dbCache) { + pInfo->vgVer = dbCache->vgInfo->vgVersion; + pInfo->dbId = dbCache->dbId; + pInfo->tbNum = dbCache->vgInfo->numOfTable; + } else { + pInfo->vgVer = CTG_DEFAULT_INVALID_VERSION; + } + + CTG_ERR_JRET(ctgHandleTaskEnd(pTask, 0)); + +_return: + + if (dbCache) { + ctgReleaseVgInfo(dbCache); + ctgReleaseDBCache(pCtg, dbCache); + } + + CTG_RET(code); +} + int32_t ctgLaunchGetIndexTask(SCtgTask *pTask) { SCatalog* pCtg = pTask->pJob->pCtg; void *pTrans = pTask->pJob->pTrans; @@ -992,6 +1077,7 @@ SCtgAsyncFps gCtgAsyncFps[] = { {ctgLaunchGetQnodeTask, ctgHandleGetQnodeRsp, ctgDumpQnodeRes}, {ctgLaunchGetDbVgTask, ctgHandleGetDbVgRsp, ctgDumpDbVgRes}, {ctgLaunchGetDbCfgTask, ctgHandleGetDbCfgRsp, ctgDumpDbCfgRes}, + {ctgLaunchGetDbInfoTask, ctgHandleGetDbInfoRsp, ctgDumpDbInfoRes}, {ctgLaunchGetTbMetaTask, ctgHandleGetTbMetaRsp, ctgDumpTbMetaRes}, {ctgLaunchGetTbHashTask, ctgHandleGetTbHashRsp, ctgDumpTbHashRes}, {ctgLaunchGetIndexTask, ctgHandleGetIndexRsp, ctgDumpIndexRes}, diff --git a/source/libs/catalog/src/ctgCache.c b/source/libs/catalog/src/ctgCache.c index 9161c7cb3237f0272ce9ffee71a8c6718c7e1dc9..d1e2056becc86c1ad8f36f4d8ea3bfffe9acb97a 100644 --- a/source/libs/catalog/src/ctgCache.c +++ b/source/libs/catalog/src/ctgCache.c @@ -19,37 +19,43 @@ #include "catalogInt.h" #include "systable.h" -SCtgAction gCtgAction[CTG_ACT_MAX] = { +SCtgOperation gCtgCacheOperation[CTG_OP_MAX] = { { - CTG_ACT_UPDATE_VG, + CTG_OP_UPDATE_VGROUP, "update vgInfo", - ctgActUpdateVg + ctgOpUpdateVgroup }, { - CTG_ACT_UPDATE_TBL, + CTG_OP_UPDATE_TB_META, "update tbMeta", - ctgActUpdateTb + ctgOpUpdateTbMeta }, { - CTG_ACT_REMOVE_DB, - "remove DB", - ctgActRemoveDB + CTG_OP_DROP_DB_CACHE, + "drop DB", + ctgOpDropDbCache }, { - CTG_ACT_REMOVE_STB, - "remove stbMeta", - ctgActRemoveStb + CTG_OP_DROP_STB_META, + "drop stbMeta", + ctgOpDropStbMeta }, { - CTG_ACT_REMOVE_TBL, - "remove tbMeta", - ctgActRemoveTb + CTG_OP_DROP_TB_META, + "drop tbMeta", + ctgOpDropTbMeta }, { - CTG_ACT_UPDATE_USER, + CTG_OP_UPDATE_USER, "update user", - ctgActUpdateUser + ctgOpUpdateUser + }, + { + CTG_OP_UPDATE_VG_EPSET, + "update epset", + ctgOpUpdateEpset } + }; @@ -405,7 +411,7 @@ int32_t ctgReadTbVerFromCache(SCatalog *pCtg, const SName *pTableName, int32_t * } -int32_t ctgGetTbTypeFromCache(SCatalog* pCtg, const char* dbFName, const char *tableName, int32_t *tbType) { +int32_t ctgReadTbTypeFromCache(SCatalog* pCtg, const char* dbFName, const char *tableName, int32_t *tbType) { if (NULL == pCtg->dbCache) { ctgWarn("empty db cache, dbFName:%s, tbName:%s", dbFName, tableName); return TSDB_CODE_SUCCESS; @@ -491,7 +497,7 @@ _return: } -void ctgWaitAction(SCtgMetaAction *action) { +void ctgWaitOpDone(SCtgCacheOperation *action) { while (true) { tsem_wait(&gCtgMgmt.queue.rspSem); @@ -509,7 +515,7 @@ void ctgWaitAction(SCtgMetaAction *action) { } } -void ctgPopAction(SCtgMetaAction **action) { +void ctgDequeue(SCtgCacheOperation **op) { SCtgQNode *orig = gCtgMgmt.queue.head; SCtgQNode *node = gCtgMgmt.queue.head->next; @@ -519,20 +525,20 @@ void ctgPopAction(SCtgMetaAction **action) { taosMemoryFreeClear(orig); - *action = &node->action; + *op = &node->op; } -int32_t ctgPushAction(SCatalog* pCtg, SCtgMetaAction *action) { +int32_t ctgEnqueue(SCatalog* pCtg, SCtgCacheOperation *operation) { SCtgQNode *node = taosMemoryCalloc(1, sizeof(SCtgQNode)); if (NULL == node) { qError("calloc %d failed", (int32_t)sizeof(SCtgQNode)); CTG_RET(TSDB_CODE_CTG_MEM_ERROR); } - action->seqId = atomic_add_fetch_64(&gCtgMgmt.queue.seqId, 1); + operation->seqId = atomic_add_fetch_64(&gCtgMgmt.queue.seqId, 1); - node->action = *action; + node->op = *operation; CTG_LOCK(CTG_WRITE, &gCtgMgmt.queue.qlock); gCtgMgmt.queue.tail->next = node; @@ -544,19 +550,19 @@ int32_t ctgPushAction(SCatalog* pCtg, SCtgMetaAction *action) { tsem_post(&gCtgMgmt.queue.reqSem); - ctgDebug("action [%s] added into queue", gCtgAction[action->act].name); + ctgDebug("action [%s] added into queue", gCtgCacheOperation[operation->opId].name); - if (action->syncReq) { - ctgWaitAction(action); + if (operation->syncReq) { + ctgWaitOpDone(operation); } return TSDB_CODE_SUCCESS; } -int32_t ctgPutRmDBToQueue(SCatalog* pCtg, const char *dbFName, int64_t dbId) { +int32_t ctgDropDbCacheEnqueue(SCatalog* pCtg, const char *dbFName, int64_t dbId) { int32_t code = 0; - SCtgMetaAction action= {.act = CTG_ACT_REMOVE_DB}; + SCtgCacheOperation action= {.opId = CTG_OP_DROP_DB_CACHE}; SCtgRemoveDBMsg *msg = taosMemoryMalloc(sizeof(SCtgRemoveDBMsg)); if (NULL == msg) { ctgError("malloc %d failed", (int32_t)sizeof(SCtgRemoveDBMsg)); @@ -574,7 +580,7 @@ int32_t ctgPutRmDBToQueue(SCatalog* pCtg, const char *dbFName, int64_t dbId) { action.data = msg; - CTG_ERR_JRET(ctgPushAction(pCtg, &action)); + CTG_ERR_JRET(ctgEnqueue(pCtg, &action)); return TSDB_CODE_SUCCESS; @@ -585,9 +591,9 @@ _return: } -int32_t ctgPutRmStbToQueue(SCatalog* pCtg, const char *dbFName, int64_t dbId, const char *stbName, uint64_t suid, bool syncReq) { +int32_t ctgDropStbMetaEnqueue(SCatalog* pCtg, const char *dbFName, int64_t dbId, const char *stbName, uint64_t suid, bool syncReq) { int32_t code = 0; - SCtgMetaAction action= {.act = CTG_ACT_REMOVE_STB, .syncReq = syncReq}; + SCtgCacheOperation action= {.opId = CTG_OP_DROP_STB_META, .syncReq = syncReq}; SCtgRemoveStbMsg *msg = taosMemoryMalloc(sizeof(SCtgRemoveStbMsg)); if (NULL == msg) { ctgError("malloc %d failed", (int32_t)sizeof(SCtgRemoveStbMsg)); @@ -602,7 +608,7 @@ int32_t ctgPutRmStbToQueue(SCatalog* pCtg, const char *dbFName, int64_t dbId, co action.data = msg; - CTG_ERR_JRET(ctgPushAction(pCtg, &action)); + CTG_ERR_JRET(ctgEnqueue(pCtg, &action)); return TSDB_CODE_SUCCESS; @@ -614,9 +620,9 @@ _return: -int32_t ctgPutRmTbToQueue(SCatalog* pCtg, const char *dbFName, int64_t dbId, const char *tbName, bool syncReq) { +int32_t ctgDropTbMetaEnqueue(SCatalog* pCtg, const char *dbFName, int64_t dbId, const char *tbName, bool syncReq) { int32_t code = 0; - SCtgMetaAction action= {.act = CTG_ACT_REMOVE_TBL, .syncReq = syncReq}; + SCtgCacheOperation action= {.opId = CTG_OP_DROP_TB_META, .syncReq = syncReq}; SCtgRemoveTblMsg *msg = taosMemoryMalloc(sizeof(SCtgRemoveTblMsg)); if (NULL == msg) { ctgError("malloc %d failed", (int32_t)sizeof(SCtgRemoveTblMsg)); @@ -630,7 +636,7 @@ int32_t ctgPutRmTbToQueue(SCatalog* pCtg, const char *dbFName, int64_t dbId, con action.data = msg; - CTG_ERR_JRET(ctgPushAction(pCtg, &action)); + CTG_ERR_JRET(ctgEnqueue(pCtg, &action)); return TSDB_CODE_SUCCESS; @@ -640,9 +646,9 @@ _return: CTG_RET(code); } -int32_t ctgPutUpdateVgToQueue(SCatalog* pCtg, const char *dbFName, int64_t dbId, SDBVgInfo* dbInfo, bool syncReq) { +int32_t ctgUpdateVgroupEnqueue(SCatalog* pCtg, const char *dbFName, int64_t dbId, SDBVgInfo* dbInfo, bool syncReq) { int32_t code = 0; - SCtgMetaAction action= {.act = CTG_ACT_UPDATE_VG, .syncReq = syncReq}; + SCtgCacheOperation action= {.opId = CTG_OP_UPDATE_VGROUP, .syncReq = syncReq}; SCtgUpdateVgMsg *msg = taosMemoryMalloc(sizeof(SCtgUpdateVgMsg)); if (NULL == msg) { ctgError("malloc %d failed", (int32_t)sizeof(SCtgUpdateVgMsg)); @@ -662,7 +668,7 @@ int32_t ctgPutUpdateVgToQueue(SCatalog* pCtg, const char *dbFName, int64_t dbId, action.data = msg; - CTG_ERR_JRET(ctgPushAction(pCtg, &action)); + CTG_ERR_JRET(ctgEnqueue(pCtg, &action)); return TSDB_CODE_SUCCESS; @@ -673,9 +679,9 @@ _return: CTG_RET(code); } -int32_t ctgPutUpdateTbToQueue(SCatalog* pCtg, STableMetaOutput *output, bool syncReq) { +int32_t ctgUpdateTbMetaEnqueue(SCatalog* pCtg, STableMetaOutput *output, bool syncReq) { int32_t code = 0; - SCtgMetaAction action= {.act = CTG_ACT_UPDATE_TBL, .syncReq = syncReq}; + SCtgCacheOperation action= {.opId = CTG_OP_UPDATE_TB_META, .syncReq = syncReq}; SCtgUpdateTblMsg *msg = taosMemoryMalloc(sizeof(SCtgUpdateTblMsg)); if (NULL == msg) { ctgError("malloc %d failed", (int32_t)sizeof(SCtgUpdateTblMsg)); @@ -692,7 +698,34 @@ int32_t ctgPutUpdateTbToQueue(SCatalog* pCtg, STableMetaOutput *output, bool syn action.data = msg; - CTG_ERR_JRET(ctgPushAction(pCtg, &action)); + CTG_ERR_JRET(ctgEnqueue(pCtg, &action)); + + return TSDB_CODE_SUCCESS; + +_return: + + taosMemoryFreeClear(msg); + + CTG_RET(code); +} + +int32_t ctgUpdateVgEpsetEnqueue(SCatalog* pCtg, char *dbFName, int32_t vgId, SEpSet* pEpSet) { + int32_t code = 0; + SCtgCacheOperation operation= {.opId = CTG_OP_UPDATE_VG_EPSET}; + SCtgUpdateEpsetMsg *msg = taosMemoryMalloc(sizeof(SCtgUpdateEpsetMsg)); + if (NULL == msg) { + ctgError("malloc %d failed", (int32_t)sizeof(SCtgUpdateEpsetMsg)); + CTG_ERR_RET(TSDB_CODE_CTG_MEM_ERROR); + } + + msg->pCtg = pCtg; + strcpy(msg->dbFName, dbFName); + msg->vgId = vgId; + msg->epSet = *pEpSet; + + operation.data = msg; + + CTG_ERR_JRET(ctgEnqueue(pCtg, &operation)); return TSDB_CODE_SUCCESS; @@ -703,9 +736,11 @@ _return: CTG_RET(code); } -int32_t ctgPutUpdateUserToQueue(SCatalog* pCtg, SGetUserAuthRsp *pAuth, bool syncReq) { + + +int32_t ctgUpdateUserEnqueue(SCatalog* pCtg, SGetUserAuthRsp *pAuth, bool syncReq) { int32_t code = 0; - SCtgMetaAction action= {.act = CTG_ACT_UPDATE_USER, .syncReq = syncReq}; + SCtgCacheOperation action= {.opId = CTG_OP_UPDATE_USER, .syncReq = syncReq}; SCtgUpdateUserMsg *msg = taosMemoryMalloc(sizeof(SCtgUpdateUserMsg)); if (NULL == msg) { ctgError("malloc %d failed", (int32_t)sizeof(SCtgUpdateUserMsg)); @@ -717,7 +752,7 @@ int32_t ctgPutUpdateUserToQueue(SCatalog* pCtg, SGetUserAuthRsp *pAuth, bool syn action.data = msg; - CTG_ERR_JRET(ctgPushAction(pCtg, &action)); + CTG_ERR_JRET(ctgEnqueue(pCtg, &action)); return TSDB_CODE_SUCCESS; @@ -1219,7 +1254,7 @@ int32_t ctgUpdateTbMetaToCache(SCatalog* pCtg, STableMetaOutput* pOut, bool sync int32_t code = 0; CTG_ERR_RET(ctgCloneMetaOutput(pOut, &pOutput)); - CTG_ERR_JRET(ctgPutUpdateTbToQueue(pCtg, pOutput, syncReq)); + CTG_ERR_JRET(ctgUpdateTbMetaEnqueue(pCtg, pOutput, syncReq)); return TSDB_CODE_SUCCESS; @@ -1230,9 +1265,9 @@ _return: } -int32_t ctgActUpdateVg(SCtgMetaAction *action) { +int32_t ctgOpUpdateVgroup(SCtgCacheOperation *operation) { int32_t code = 0; - SCtgUpdateVgMsg *msg = action->data; + SCtgUpdateVgMsg *msg = operation->data; CTG_ERR_JRET(ctgWriteDBVgInfoToCache(msg->pCtg, msg->dbFName, msg->dbId, &msg->dbInfo)); @@ -1244,9 +1279,9 @@ _return: CTG_RET(code); } -int32_t ctgActRemoveDB(SCtgMetaAction *action) { +int32_t ctgOpDropDbCache(SCtgCacheOperation *operation) { int32_t code = 0; - SCtgRemoveDBMsg *msg = action->data; + SCtgRemoveDBMsg *msg = operation->data; SCatalog* pCtg = msg->pCtg; SCtgDBCache *dbCache = NULL; @@ -1270,9 +1305,9 @@ _return: } -int32_t ctgActUpdateTb(SCtgMetaAction *action) { +int32_t ctgOpUpdateTbMeta(SCtgCacheOperation *operation) { int32_t code = 0; - SCtgUpdateTblMsg *msg = action->data; + SCtgUpdateTblMsg *msg = operation->data; SCatalog* pCtg = msg->pCtg; STableMetaOutput* output = msg->output; SCtgDBCache *dbCache = NULL; @@ -1316,9 +1351,9 @@ _return: } -int32_t ctgActRemoveStb(SCtgMetaAction *action) { +int32_t ctgOpDropStbMeta(SCtgCacheOperation *operation) { int32_t code = 0; - SCtgRemoveStbMsg *msg = action->data; + SCtgRemoveStbMsg *msg = operation->data; SCatalog* pCtg = msg->pCtg; SCtgDBCache *dbCache = NULL; @@ -1362,9 +1397,9 @@ _return: CTG_RET(code); } -int32_t ctgActRemoveTb(SCtgMetaAction *action) { +int32_t ctgOpDropTbMeta(SCtgCacheOperation *operation) { int32_t code = 0; - SCtgRemoveTblMsg *msg = action->data; + SCtgRemoveTblMsg *msg = operation->data; SCatalog* pCtg = msg->pCtg; SCtgDBCache *dbCache = NULL; @@ -1397,9 +1432,9 @@ _return: CTG_RET(code); } -int32_t ctgActUpdateUser(SCtgMetaAction *action) { +int32_t ctgOpUpdateUser(SCtgCacheOperation *operation) { int32_t code = 0; - SCtgUpdateUserMsg *msg = action->data; + SCtgUpdateUserMsg *msg = operation->data; SCatalog* pCtg = msg->pCtg; if (NULL == pCtg->userCache) { @@ -1460,14 +1495,60 @@ _return: CTG_RET(code); } -void ctgUpdateThreadFuncUnexpectedStopped(void) { +int32_t ctgOpUpdateEpset(SCtgCacheOperation *operation) { + int32_t code = 0; + SCtgUpdateEpsetMsg *msg = operation->data; + SCatalog* pCtg = msg->pCtg; + + SCtgDBCache *dbCache = NULL; + CTG_ERR_RET(ctgAcquireDBCache(pCtg, msg->dbFName, &dbCache)); + if (NULL == dbCache) { + ctgDebug("db %s not exist, ignore epset update", msg->dbFName); + goto _return; + } + + SDBVgInfo *vgInfo = NULL; + CTG_ERR_RET(ctgWAcquireVgInfo(pCtg, dbCache)); + + if (NULL == dbCache->vgInfo) { + ctgWReleaseVgInfo(dbCache); + ctgDebug("vgroup in db %s not cached, ignore epset update", msg->dbFName); + goto _return; + } + + SVgroupInfo* pInfo = taosHashGet(dbCache->vgInfo->vgHash, &msg->vgId, sizeof(msg->vgId)); + if (NULL == pInfo) { + ctgWReleaseVgInfo(dbCache); + ctgDebug("no vgroup %d in db %s, ignore epset update", msg->vgId, msg->dbFName); + goto _return; + } + + pInfo->epSet = msg->epSet; + + ctgDebug("epset in vgroup %d updated, dbFName:%s", pInfo->vgId, msg->dbFName); + + ctgWReleaseVgInfo(dbCache); + +_return: + + if (dbCache) { + ctgReleaseDBCache(msg->pCtg, dbCache); + } + + taosMemoryFreeClear(msg); + + CTG_RET(code); +} + + +void ctgUpdateThreadUnexpectedStopped(void) { if (CTG_IS_LOCKED(&gCtgMgmt.lock) > 0) CTG_UNLOCK(CTG_READ, &gCtgMgmt.lock); } void* ctgUpdateThreadFunc(void* param) { setThreadName("catalog"); #ifdef WINDOWS - atexit(ctgUpdateThreadFuncUnexpectedStopped); + atexit(ctgUpdateThreadUnexpectedStopped); #endif qInfo("catalog update thread started"); @@ -1483,17 +1564,17 @@ void* ctgUpdateThreadFunc(void* param) { break; } - SCtgMetaAction *action = NULL; - ctgPopAction(&action); - SCatalog *pCtg = ((SCtgUpdateMsgHeader *)action->data)->pCtg; + SCtgCacheOperation *operation = NULL; + ctgDequeue(&operation); + SCatalog *pCtg = ((SCtgUpdateMsgHeader *)operation->data)->pCtg; - ctgDebug("process [%s] action", gCtgAction[action->act].name); + ctgDebug("process [%s] operation", gCtgCacheOperation[operation->opId].name); - (*gCtgAction[action->act].func)(action); + (*gCtgCacheOperation[operation->opId].func)(operation); - gCtgMgmt.queue.seqDone = action->seqId; + gCtgMgmt.queue.seqDone = operation->seqId; - if (action->syncReq) { + if (operation->syncReq) { tsem_post(&gCtgMgmt.queue.rspSem); } diff --git a/source/libs/catalog/src/ctgDbg.c b/source/libs/catalog/src/ctgDbg.c index 849c66fd126dcbb0b0bdee1de1ec54ea8bd3697c..fdab50db0f65fd67d16d6f5b134f847dc0f882bc 100644 --- a/source/libs/catalog/src/ctgDbg.c +++ b/source/libs/catalog/src/ctgDbg.c @@ -71,6 +71,16 @@ void ctgdUserCallback(SMetaData* pResult, void* param, int32_t code) { qDebug("empty db vgroup"); } + if (pResult->pDbInfo && taosArrayGetSize(pResult->pDbInfo) > 0) { + num = taosArrayGetSize(pResult->pDbInfo); + for (int32_t i = 0; i < num; ++i) { + SDbInfo *pDb = taosArrayGet(pResult->pDbInfo, i); + qDebug("db %d dbInfo: vgVer:%d, tbNum:%d, dbId:%" PRIx64, i, pDb->vgVer, pDb->tbNum, pDb->dbId); + } + } else { + qDebug("empty db info"); + } + if (pResult->pTableHash && taosArrayGetSize(pResult->pTableHash) > 0) { num = taosArrayGetSize(pResult->pTableHash); for (int32_t i = 0; i < num; ++i) { @@ -127,6 +137,7 @@ int32_t ctgdLaunchAsyncCall(SCatalog* pCtg, void *pTrans, const SEpSet* pMgmtEps SCatalogReq req = {0}; req.pTableMeta = taosArrayInit(2, sizeof(SName)); req.pDbVgroup = taosArrayInit(2, TSDB_DB_FNAME_LEN); + req.pDbInfo = taosArrayInit(2, TSDB_DB_FNAME_LEN); req.pTableHash = taosArrayInit(2, sizeof(SName)); req.pUdf = taosArrayInit(2, TSDB_FUNC_NAME_LEN); req.pDbCfg = taosArrayInit(2, TSDB_DB_FNAME_LEN); @@ -149,9 +160,11 @@ int32_t ctgdLaunchAsyncCall(SCatalog* pCtg, void *pTrans, const SEpSet* pMgmtEps strcpy(dbFName, "1.db1"); taosArrayPush(req.pDbVgroup, dbFName); taosArrayPush(req.pDbCfg, dbFName); + taosArrayPush(req.pDbInfo, dbFName); strcpy(dbFName, "1.db2"); taosArrayPush(req.pDbVgroup, dbFName); taosArrayPush(req.pDbCfg, dbFName); + taosArrayPush(req.pDbInfo, dbFName); strcpy(funcName, "udf1"); taosArrayPush(req.pUdf, funcName); diff --git a/source/libs/catalog/src/ctgUtil.c b/source/libs/catalog/src/ctgUtil.c index 1f78a97733614fcf7cbbf48a1a90be62dfa61ce9..4fbf1463d8f0191a26c99399f26e66d32b319ca5 100644 --- a/source/libs/catalog/src/ctgUtil.c +++ b/source/libs/catalog/src/ctgUtil.c @@ -42,6 +42,9 @@ void ctgFreeSMetaData(SMetaData* pData) { } taosArrayDestroy(pData->pDbCfg); pData->pDbCfg = NULL; + + taosArrayDestroy(pData->pDbInfo); + pData->pDbInfo = NULL; taosArrayDestroy(pData->pIndex); pData->pIndex = NULL; @@ -293,9 +296,12 @@ void ctgFreeTask(SCtgTask* pTask) { } case CTG_TASK_GET_DB_CFG: { taosMemoryFreeClear(pTask->taskCtx); - if (pTask->res) { - taosMemoryFreeClear(pTask->res); - } + taosMemoryFreeClear(pTask->res); + break; + } + case CTG_TASK_GET_DB_INFO: { + taosMemoryFreeClear(pTask->taskCtx); + taosMemoryFreeClear(pTask->res); break; } case CTG_TASK_GET_TB_HASH: { diff --git a/source/libs/catalog/test/catalogTests.cpp b/source/libs/catalog/test/catalogTests.cpp index 6c7d1ac4ca554e69b92bcde3e4c64f20a46d0dcb..81d206a0f3fee7f33f24b9740c973ab8d89b10d1 100644 --- a/source/libs/catalog/test/catalogTests.cpp +++ b/source/libs/catalog/test/catalogTests.cpp @@ -41,7 +41,6 @@ namespace { extern "C" int32_t ctgdGetClusterCacheNum(struct SCatalog* pCatalog, int32_t type); -extern "C" int32_t ctgActUpdateTb(SCtgMetaAction *action); extern "C" int32_t ctgdEnableDebug(char *option); extern "C" int32_t ctgdGetStatNum(char *option, void *res); @@ -888,9 +887,9 @@ void *ctgTestSetCtableMetaThread(void *param) { int32_t n = 0; STableMetaOutput *output = NULL; - SCtgMetaAction action = {0}; + SCtgCacheOperation operation = {0}; - action.act = CTG_ACT_UPDATE_TBL; + operation.opId = CTG_OP_UPDATE_TB_META; while (!ctgTestStop) { output = (STableMetaOutput *)taosMemoryMalloc(sizeof(STableMetaOutput)); @@ -899,9 +898,9 @@ void *ctgTestSetCtableMetaThread(void *param) { SCtgUpdateTblMsg *msg = (SCtgUpdateTblMsg *)taosMemoryMalloc(sizeof(SCtgUpdateTblMsg)); msg->pCtg = pCtg; msg->output = output; - action.data = msg; + operation.data = msg; - code = ctgActUpdateTb(&action); + code = ctgOpUpdateTbMeta(&operation); if (code) { assert(0); } diff --git a/source/libs/command/src/explain.c b/source/libs/command/src/explain.c index 26a0f3bf6cf85bfe4d81a0ab5d8913d7e1767eeb..831b7017b2632a3e52e3050c08b2c29ffa463eeb 100644 --- a/source/libs/command/src/explain.c +++ b/source/libs/command/src/explain.c @@ -560,8 +560,10 @@ int32_t qExplainResNodeToRowsImpl(SExplainResNode *pResNode, SExplainCtx *ctx, i QRY_ERR_RET(qExplainBufAppendExecInfo(pResNode->pExecInfo, tbuf, &tlen)); EXPLAIN_ROW_APPEND(EXPLAIN_BLANK_FORMAT); } - EXPLAIN_ROW_APPEND(EXPLAIN_FUNCTIONS_FORMAT, pAggNode->pAggFuncs->length); - EXPLAIN_ROW_APPEND(EXPLAIN_BLANK_FORMAT); + if (pAggNode->pAggFuncs) { + EXPLAIN_ROW_APPEND(EXPLAIN_FUNCTIONS_FORMAT, pAggNode->pAggFuncs->length); + EXPLAIN_ROW_APPEND(EXPLAIN_BLANK_FORMAT); + } EXPLAIN_ROW_APPEND(EXPLAIN_WIDTH_FORMAT, pAggNode->node.pOutputDataBlockDesc->totalRowSize); if (pAggNode->pGroupKeys) { EXPLAIN_ROW_APPEND(EXPLAIN_BLANK_FORMAT); diff --git a/source/libs/executor/inc/executorimpl.h b/source/libs/executor/inc/executorimpl.h index 6e47ec42bd03f1e4cfb48530d41f65f7255a481b..88f4bdbd3db6aef21f8df64744128a9e89743466 100644 --- a/source/libs/executor/inc/executorimpl.h +++ b/source/libs/executor/inc/executorimpl.h @@ -332,6 +332,8 @@ typedef struct STableScanInfo { int32_t dataBlockLoadFlag; double sampleRatio; // data block sample ratio, 1 by default SInterval interval; // if the upstream is an interval operator, the interval info is also kept here to get the time window to check if current data block needs to be loaded. + + int32_t curTWinIdx; } STableScanInfo; typedef struct STagScanInfo { @@ -371,30 +373,33 @@ typedef struct SessionWindowSupporter { } SessionWindowSupporter; typedef struct SStreamBlockScanInfo { - SArray* pBlockLists; // multiple SSDatablock. - SSDataBlock* pRes; // result SSDataBlock - SSDataBlock* pUpdateRes; // update SSDataBlock - int32_t updateResIndex; - int32_t blockType; // current block type - int32_t validBlockIndex; // Is current data has returned? - SColumnInfo* pCols; // the output column info - uint64_t numOfExec; // execution times - void* streamBlockReader;// stream block reader handle - SArray* pColMatchInfo; // - SNode* pCondition; - SArray* tsArray; - SUpdateInfo* pUpdateInfo; - int32_t primaryTsIndex; // primary time stamp slot id - void* pDataReader; - SReadHandle readHandle; - uint64_t tableUid; // queried super table uid + SArray* pBlockLists; // multiple SSDatablock. + SSDataBlock* pRes; // result SSDataBlock + SSDataBlock* pUpdateRes; // update SSDataBlock + int32_t updateResIndex; + int32_t blockType; // current block type + int32_t validBlockIndex; // Is current data has returned? + SColumnInfo* pCols; // the output column info + uint64_t numOfExec; // execution times + void* streamBlockReader;// stream block reader handle + SArray* pColMatchInfo; // + SNode* pCondition; + SArray* tsArray; + SUpdateInfo* pUpdateInfo; + + SExprInfo* pPseudoExpr; + int32_t numOfPseudoExpr; + + int32_t primaryTsIndex; // primary time stamp slot id + void* pDataReader; + SReadHandle readHandle; + uint64_t tableUid; // queried super table uid EStreamScanMode scanMode; SOperatorInfo* pOperatorDumy; SInterval interval; // if the upstream is an interval operator, the interval info is also kept here. - SCatchSupporter childAggSup; - SArray* childIds; + SArray* childIds; SessionWindowSupporter sessionSup; - bool assignBlockUid; // assign block uid to groupId, temporarily used for generating rollup SMA. + bool assignBlockUid; // assign block uid to groupId, temporarily used for generating rollup SMA. } SStreamBlockScanInfo; typedef struct SSysTableScanInfo { @@ -433,6 +438,7 @@ typedef struct SAggSupporter { typedef struct STimeWindowSupp { int8_t calTrigger; int64_t waterMark; + TSKEY maxTs; SColumnInfoData timeWindowData; // query time window info for scalar function execution. } STimeWindowAggSupp; @@ -576,6 +582,7 @@ typedef struct SResultWindowInfo { SResultRowPosition pos; STimeWindow win; bool isOutput; + bool isClosed; } SResultWindowInfo; typedef struct SStreamSessionAggOperatorInfo { @@ -748,10 +755,11 @@ SOperatorInfo* createGroupOperatorInfo(SOperatorInfo* downstream, SExprInfo* pEx SSDataBlock* pResultBlock, SArray* pGroupColList, SNode* pCondition, SExprInfo* pScalarExprInfo, int32_t numOfScalarExpr, SExecTaskInfo* pTaskInfo); SOperatorInfo* createDataBlockInfoScanOperator(void* dataReader, SExecTaskInfo* pTaskInfo); -SOperatorInfo* createStreamScanOperatorInfo(void* streamReadHandle, void* pDataReader, SReadHandle* pHandle, - uint64_t uid, SSDataBlock* pResBlock, SArray* pColList, - SArray* pTableIdList, SExecTaskInfo* pTaskInfo, SNode* pCondition, - SOperatorInfo* pOperatorDumy); + +SOperatorInfo* createStreamScanOperatorInfo(void* pDataReader, SReadHandle* pHandle, + SArray* pTableIdList, STableScanPhysiNode* pTableScanNode, SExecTaskInfo* pTaskInfo, + STimeWindowAggSupp* pTwSup, int16_t tsColId); + SOperatorInfo* createFillOperatorInfo(SOperatorInfo* downstream, SExprInfo* pExpr, int32_t numOfCols, SInterval* pInterval, STimeWindow* pWindow, SSDataBlock* pResBlock, int32_t fillType, SNodeListNode* fillVal, @@ -822,8 +830,6 @@ int32_t getNumOfRowsInTimeWindow(SDataBlockInfo* pDataBlockInfo, TSKEY* pPrimary int32_t startPos, TSKEY ekey, __block_search_fn_t searchFn, STableQueryInfo* item, int32_t order); int32_t binarySearchForKey(char* pValue, int num, TSKEY key, int order); -int32_t initCacheSupporter(SCatchSupporter* pCatchSup, size_t rowSize, const char* pKey, - const char* pDir); int32_t initStreamAggSupporter(SStreamAggSupporter* pSup, const char* pKey); SResultRow* getNewResultRow_rv(SDiskbasedBuf* pResultBuf, int64_t tableGroupId, int32_t interBufSize); SResultWindowInfo* getSessionTimeWindow(SArray* pWinInfos, TSKEY ts, int64_t gap, @@ -831,6 +837,8 @@ SResultWindowInfo* getSessionTimeWindow(SArray* pWinInfos, TSKEY ts, int64_t gap int32_t updateSessionWindowInfo(SResultWindowInfo* pWinInfo, TSKEY* pTs, int32_t rows, int32_t start, int64_t gap, SHashObj* pStDeleted); bool functionNeedToExecute(SqlFunctionCtx* pCtx); + +int32_t compareTimeWindow(const void* p1, const void* p2, const void* param); #ifdef __cplusplus } #endif diff --git a/source/libs/executor/src/executil.c b/source/libs/executor/src/executil.c index 5a02547f58aa4cf73c5297dda771ba0900bce141..aea9d70f316806286fbd69f0d13dc72ccae54dfe 100644 --- a/source/libs/executor/src/executil.c +++ b/source/libs/executor/src/executil.c @@ -233,7 +233,7 @@ void initGroupedResultInfo(SGroupResInfo* pGroupResInfo, SHashObj* pHashmap, int void initMultiResInfoFromArrayList(SGroupResInfo* pGroupResInfo, SArray* pArrayList) { if (pGroupResInfo->pRows != NULL) { - taosArrayDestroy(pGroupResInfo->pRows); + taosArrayDestroyP(pGroupResInfo->pRows, taosMemoryFree); } pGroupResInfo->pRows = pArrayList; diff --git a/source/libs/executor/src/executorimpl.c b/source/libs/executor/src/executorimpl.c index c1bcd271091f9b8e3d8ee19f293361e7da4f6d7e..6ce6589dfe22e346926ac9a2041473372f62650d 100644 --- a/source/libs/executor/src/executorimpl.c +++ b/source/libs/executor/src/executorimpl.c @@ -28,13 +28,13 @@ #include "ttime.h" #include "executorimpl.h" +#include "index.h" #include "query.h" #include "tcompare.h" #include "tcompression.h" #include "thash.h" #include "ttypes.h" #include "vnode.h" -#include "index.h" #define IS_MAIN_SCAN(runtime) ((runtime)->scanFlag == MAIN_SCAN) #define IS_REVERSE_SCAN(runtime) ((runtime)->scanFlag == REVERSE_SCAN) @@ -87,7 +87,7 @@ static UNUSED_FUNC void* u_realloc(void* p, size_t __size) { #define realloc u_realloc #endif -#define CLEAR_QUERY_STATUS(q, st) ((q)->status &= (~(st))) +#define CLEAR_QUERY_STATUS(q, st) ((q)->status &= (~(st))) //#define GET_NUM_OF_TABLEGROUP(q) taosArrayGetSize((q)->tableqinfoGroupInfo.pGroupList) #define QUERY_IS_INTERVAL_QUERY(_q) ((_q)->interval.interval > 0) @@ -2601,6 +2601,7 @@ int32_t setSDataBlockFromFetchRsp(SSDataBlock* pRes, SLoadRemoteDataInfo* pLoadI pStart += sizeof(int32_t) * numOfRows; if (colLen[i] > 0) { + taosMemoryFreeClear(pColInfoData->pData); pColInfoData->pData = taosMemoryMalloc(colLen[i]); } } else { @@ -2758,6 +2759,7 @@ static SSDataBlock* concurrentlyLoadRemoteDataImpl(SOperatorInfo* pOperator, SEx pExchangeInfo->loadInfo.totalRows); pDataInfo->status = EX_SOURCE_DATA_EXHAUSTED; completed += 1; + taosMemoryFreeClear(pDataInfo->pRsp); continue; } @@ -2765,6 +2767,7 @@ static SSDataBlock* concurrentlyLoadRemoteDataImpl(SOperatorInfo* pOperator, SEx code = setSDataBlockFromFetchRsp(pExchangeInfo->pResult, pLoadInfo, pTableRsp->numOfRows, pTableRsp->data, pTableRsp->compLen, pTableRsp->numOfCols, startTs, &pDataInfo->totalRows, NULL); if (code != 0) { + taosMemoryFreeClear(pDataInfo->pRsp); goto _error; } @@ -2785,10 +2788,12 @@ static SSDataBlock* concurrentlyLoadRemoteDataImpl(SOperatorInfo* pOperator, SEx pDataInfo->status = EX_SOURCE_DATA_NOT_READY; code = doSendFetchDataRequest(pExchangeInfo, pTaskInfo, i); if (code != TSDB_CODE_SUCCESS) { + taosMemoryFreeClear(pDataInfo->pRsp); goto _error; } } + taosMemoryFreeClear(pDataInfo->pRsp); return pExchangeInfo->pResult; } @@ -2890,7 +2895,8 @@ static SSDataBlock* seqLoadRemoteData(SOperatorInfo* pOperator) { pDataInfo->totalRows, pLoadInfo->totalRows); pDataInfo->status = EX_SOURCE_DATA_EXHAUSTED; - pExchangeInfo->current += 1; + pExchangeInfo->current += 1; + taosMemoryFreeClear(pDataInfo->pRsp); continue; } @@ -2916,6 +2922,7 @@ static SSDataBlock* seqLoadRemoteData(SOperatorInfo* pOperator) { } pOperator->resultInfo.totalRows += pRes->info.rows; + taosMemoryFreeClear(pDataInfo->pRsp); return pExchangeInfo->pResult; } } @@ -3967,11 +3974,11 @@ int32_t doInitAggInfoSup(SAggSupporter* pAggSup, SqlFunctionCtx* pCtx, int32_t n return TSDB_CODE_OUT_OF_MEMORY; } - uint32_t defaultPgsz = 0; + uint32_t defaultPgsz = 0; uint32_t defaultBufsz = 0; getBufferPgSize(pAggSup->resultRowSize, &defaultPgsz, &defaultBufsz); - int32_t code = createDiskbasedBuf(&pAggSup->pResultBuf, defaultPgsz, defaultBufsz, pKey, TD_TMP_DIR_PATH); + int32_t code = createDiskbasedBuf(&pAggSup->pResultBuf, defaultPgsz, defaultBufsz, pKey, TD_TMP_DIR_PATH); if (code != TSDB_CODE_SUCCESS) { return code; } @@ -4008,7 +4015,7 @@ void initResultSizeInfo(SOperatorInfo* pOperator, int32_t numOfRows) { } } -//static STableQueryInfo* initTableQueryInfo(const STableListInfo* pTableListInfo) { +// static STableQueryInfo* initTableQueryInfo(const STableListInfo* pTableListInfo) { // int32_t size = taosArrayGetSize(pTableListInfo->pTableList); // if (size == 0) { // return NULL; @@ -4441,9 +4448,11 @@ static SExecTaskInfo* createExecTaskInfo(uint64_t queryId, uint64_t taskId, EOPT } static tsdbReaderT doCreateDataReader(STableScanPhysiNode* pTableScanNode, SReadHandle* pHandle, - STableListInfo* pTableGroupInfo, uint64_t queryId, uint64_t taskId, SNode* pTagCond); + STableListInfo* pTableGroupInfo, uint64_t queryId, uint64_t taskId, + SNode* pTagCond); -static int32_t getTableList(void* metaHandle, int32_t tableType, uint64_t tableUid, STableListInfo* pListInfo, SNode* pTagCond); +static int32_t getTableList(void* metaHandle, int32_t tableType, uint64_t tableUid, STableListInfo* pListInfo, + SNode* pTagCond); static SArray* extractTableIdList(const STableListInfo* pTableGroupInfo); static SArray* extractColumnInfo(SNodeList* pNodeList); @@ -4480,7 +4489,8 @@ SOperatorInfo* createOperatorTree(SPhysiNode* pPhyNode, SExecTaskInfo* pTaskInfo if (QUERY_NODE_PHYSICAL_PLAN_TABLE_SCAN == type) { STableScanPhysiNode* pTableScanNode = (STableScanPhysiNode*)pPhyNode; - tsdbReaderT pDataReader = doCreateDataReader(pTableScanNode, pHandle, pTableListInfo, (uint64_t)queryId, taskId, pTagCond); + tsdbReaderT pDataReader = + doCreateDataReader(pTableScanNode, pHandle, pTableListInfo, (uint64_t)queryId, taskId, pTagCond); if (pDataReader == NULL && terrno != 0) { return NULL; } @@ -4499,9 +4509,8 @@ SOperatorInfo* createOperatorTree(SPhysiNode* pPhyNode, SExecTaskInfo* pTaskInfo } else if (QUERY_NODE_PHYSICAL_PLAN_STREAM_SCAN == type) { SScanPhysiNode* pScanPhyNode = (SScanPhysiNode*)pPhyNode; // simple child table. STableScanPhysiNode* pTableScanNode = (STableScanPhysiNode*)pPhyNode; - - int32_t numOfCols = 0; - + STimeWindowAggSupp twSup = {.waterMark = pTableScanNode->watermark, + .calTrigger = pTableScanNode->triggerType, .maxTs = INT64_MIN}; tsdbReaderT pDataReader = NULL; if (pHandle->vnode) { pDataReader = doCreateDataReader(pTableScanNode, pHandle, pTableListInfo, (uint64_t)queryId, taskId, pTagCond); @@ -4510,24 +4519,15 @@ SOperatorInfo* createOperatorTree(SPhysiNode* pPhyNode, SExecTaskInfo* pTaskInfo } if (pDataReader == NULL && terrno != 0) { - qDebug("pDataReader is NULL"); + qDebug("%s pDataReader is NULL", GET_TASKID(pTaskInfo)); // return NULL; } else { - qDebug("pDataReader is not NULL"); + qDebug("%s pDataReader is not NULL", GET_TASKID(pTaskInfo)); } - - SDataBlockDescNode* pDescNode = pScanPhyNode->node.pOutputDataBlockDesc; - SOperatorInfo* pOperatorDumy = createTableScanOperatorInfo(pTableScanNode, pDataReader, pHandle, pTaskInfo); - SArray* tableIdList = extractTableIdList(pTableListInfo); + SOperatorInfo* pOperator = createStreamScanOperatorInfo(pDataReader, pHandle, + tableIdList, pTableScanNode, pTaskInfo, &twSup, pTableScanNode->tsColId); - SSDataBlock* pResBlock = createResDataBlock(pDescNode); - SArray* pCols = - extractColMatchInfo(pScanPhyNode->pScanCols, pDescNode, &numOfCols, pTaskInfo, COL_MATCH_FROM_COL_ID); - - SOperatorInfo* pOperator = - createStreamScanOperatorInfo(pHandle->reader, pDataReader, pHandle, pScanPhyNode->uid, pResBlock, pCols, - tableIdList, pTaskInfo, pScanPhyNode->node.pConditions, pOperatorDumy); taosArrayDestroy(tableIdList); return pOperator; } else if (QUERY_NODE_PHYSICAL_PLAN_SYSTABLE_SCAN == type) { @@ -4609,8 +4609,8 @@ SOperatorInfo* createOperatorTree(SPhysiNode* pPhyNode, SExecTaskInfo* pTaskInfo pOptr = createGroupOperatorInfo(ops[0], pExprInfo, num, pResBlock, pColList, pAggNode->node.pConditions, pScalarExprInfo, numOfScalarExpr, pTaskInfo); } else { - pOptr = createAggregateOperatorInfo(ops[0], pExprInfo, num, pResBlock, pScalarExprInfo, numOfScalarExpr, - pTaskInfo); + pOptr = + createAggregateOperatorInfo(ops[0], pExprInfo, num, pResBlock, pScalarExprInfo, numOfScalarExpr, pTaskInfo); } } else if (QUERY_NODE_PHYSICAL_PLAN_INTERVAL == type || QUERY_NODE_PHYSICAL_PLAN_STREAM_INTERVAL == type) { SIntervalPhysiNode* pIntervalPhyNode = (SIntervalPhysiNode*)pPhyNode; @@ -4626,7 +4626,8 @@ SOperatorInfo* createOperatorTree(SPhysiNode* pPhyNode, SExecTaskInfo* pTaskInfo .precision = ((SColumnNode*)pIntervalPhyNode->window.pTspk)->node.resType.precision}; STimeWindowAggSupp as = {.waterMark = pIntervalPhyNode->window.watermark, - .calTrigger = pIntervalPhyNode->window.triggerType}; + .calTrigger = pIntervalPhyNode->window.triggerType, + .maxTs = INT64_MIN}; int32_t tsSlotId = ((SColumnNode*)pIntervalPhyNode->window.pTspk)->slotId; pOptr = createIntervalOperatorInfo(ops[0], pExprInfo, num, pResBlock, &interval, tsSlotId, &as, pTaskInfo); @@ -4713,6 +4714,18 @@ SOperatorInfo* createOperatorTree(SPhysiNode* pPhyNode, SExecTaskInfo* pTaskInfo return pOptr; } +int32_t compareTimeWindow(const void* p1, const void* p2, const void* param) { + const SQueryTableDataCond* pCond = param; + const STimeWindow* pWin1 = p1; + const STimeWindow* pWin2 = p2; + if (pCond->order == TSDB_ORDER_ASC) { + return pWin1->skey - pWin2->skey; + } else if (pCond->order == TSDB_ORDER_DESC) { + return pWin2->skey - pWin1->skey; + } + return 0; +} + int32_t initQueryTableDataCond(SQueryTableDataCond* pCond, const STableScanPhysiNode* pTableScanNode) { pCond->loadExternalRows = false; @@ -4724,16 +4737,30 @@ int32_t initQueryTableDataCond(SQueryTableDataCond* pCond, const STableScanPhysi return terrno; } - pCond->twindow = pTableScanNode->scanRange; + // pCond->twindow = pTableScanNode->scanRange; + // TODO: get it from stable scan node + pCond->numOfTWindows = 1; + pCond->twindows = taosMemoryCalloc(pCond->numOfTWindows, sizeof(STimeWindow)); + pCond->twindows[0] = pTableScanNode->scanRange; #if 1 // todo work around a problem, remove it later - if ((pCond->order == TSDB_ORDER_ASC && pCond->twindow.skey > pCond->twindow.ekey) || - (pCond->order == TSDB_ORDER_DESC && pCond->twindow.skey < pCond->twindow.ekey)) { - TSWAP(pCond->twindow.skey, pCond->twindow.ekey); + for (int32_t i = 0; i < pCond->numOfTWindows; ++i) { + if ((pCond->order == TSDB_ORDER_ASC && pCond->twindows[i].skey > pCond->twindows[i].ekey) || + (pCond->order == TSDB_ORDER_DESC && pCond->twindows[i].skey < pCond->twindows[i].ekey)) { + TSWAP(pCond->twindows[i].skey, pCond->twindows[i].ekey); + } } #endif + for (int32_t i = 0; i < pCond->numOfTWindows; ++i) { + if ((pCond->order == TSDB_ORDER_ASC && pCond->twindows[i].skey > pCond->twindows[i].ekey) || + (pCond->order == TSDB_ORDER_DESC && pCond->twindows[i].skey < pCond->twindows[i].ekey)) { + TSWAP(pCond->twindows[i].skey, pCond->twindows[i].ekey); + } + } + taosqsort(pCond->twindows, pCond->numOfTWindows, sizeof(STimeWindow), pCond, compareTimeWindow); + pCond->type = BLOCK_LOAD_OFFSET_SEQ_ORDER; // pCond->type = pTableScanNode->scanFlag; @@ -4892,27 +4919,31 @@ SArray* extractColMatchInfo(SNodeList* pNodeList, SDataBlockDescNode* pOutputNod return pList; } -int32_t getTableList(void* metaHandle, int32_t tableType, uint64_t tableUid, - STableListInfo* pListInfo, SNode* pTagCond) { +int32_t getTableList(void* metaHandle, int32_t tableType, uint64_t tableUid, STableListInfo* pListInfo, + SNode* pTagCond) { int32_t code = TSDB_CODE_SUCCESS; pListInfo->pTableList = taosArrayInit(8, sizeof(STableKeyInfo)); if (tableType == TSDB_SUPER_TABLE) { - if(pTagCond){ + if (pTagCond) { + SIndexMetaArg metaArg = {.metaHandle = tsdbGetIdx(metaHandle), .suid = tableUid}; + SArray* res = taosArrayInit(8, sizeof(uint64_t)); - code = doFilterTag(pTagCond, res); + code = doFilterTag(pTagCond, &metaArg, res); if (code != TSDB_CODE_SUCCESS) { - qError("doFilterTag error:%d", code); + qError("failed to get tableIds, reason: %s, suid: %" PRIu64 "", tstrerror(code), tableUid); taosArrayDestroy(res); terrno = code; return code; + } else { + qDebug("sucess to get tableIds, size: %d, suid: %" PRIu64 "", (int)taosArrayGetSize(res), tableUid); } - for(int i = 0; i < taosArrayGetSize(res); i++){ + for (int i = 0; i < taosArrayGetSize(res); i++) { STableKeyInfo info = {.lastKey = TSKEY_INITIAL_VAL, .uid = *(uint64_t*)taosArrayGet(res, i)}; taosArrayPush(pListInfo->pTableList, &info); } taosArrayDestroy(res); - }else{ + } else { code = tsdbGetAllTableList(metaHandle, tableUid, pListInfo->pTableList); } } else { // Create one table group. @@ -4937,7 +4968,8 @@ SArray* extractTableIdList(const STableListInfo* pTableGroupInfo) { tsdbReaderT doCreateDataReader(STableScanPhysiNode* pTableScanNode, SReadHandle* pHandle, STableListInfo* pTableListInfo, uint64_t queryId, uint64_t taskId, SNode* pTagCond) { - int32_t code = getTableList(pHandle->meta, pTableScanNode->scan.tableType, pTableScanNode->scan.uid, pTableListInfo, pTagCond); + int32_t code = + getTableList(pHandle->meta, pTableScanNode->scan.tableType, pTableScanNode->scan.uid, pTableListInfo, pTagCond); if (code != TSDB_CODE_SUCCESS) { goto _error; } @@ -5057,8 +5089,8 @@ int32_t createExecTaskInfoImpl(SSubplan* pPlan, SExecTaskInfo** pTaskInfo, SRead goto _complete; } - (*pTaskInfo)->pRoot = - createOperatorTree(pPlan->pNode, *pTaskInfo, pHandle, queryId, taskId, &(*pTaskInfo)->tableqinfoList, pPlan->pTagCond); + (*pTaskInfo)->pRoot = createOperatorTree(pPlan->pNode, *pTaskInfo, pHandle, queryId, taskId, + &(*pTaskInfo)->tableqinfoList, pPlan->pTagCond); if (NULL == (*pTaskInfo)->pRoot) { code = terrno; goto _complete; @@ -5243,20 +5275,6 @@ int32_t getOperatorExplainExecInfo(SOperatorInfo* operatorInfo, SExplainExecInfo return TSDB_CODE_SUCCESS; } -int32_t initCacheSupporter(SCatchSupporter* pCatchSup, size_t rowSize, const char* pKey, const char* pDir) { - pCatchSup->keySize = sizeof(int64_t) + sizeof(int64_t) + sizeof(TSKEY); - pCatchSup->pKeyBuf = taosMemoryCalloc(1, pCatchSup->keySize); - _hash_fn_t hashFn = taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY); - pCatchSup->pWindowHashTable = taosHashInit(10000, hashFn, true, HASH_NO_LOCK); - if (pCatchSup->pKeyBuf == NULL || pCatchSup->pWindowHashTable == NULL) { - return TSDB_CODE_OUT_OF_MEMORY; - } - - int32_t pageSize = rowSize * 32; - int32_t bufSize = pageSize * 4096; - return createDiskbasedBuf(&pCatchSup->pDataBuf, pageSize, bufSize, pKey, pDir); -} - int32_t initStreamAggSupporter(SStreamAggSupporter* pSup, const char* pKey) { pSup->keySize = sizeof(int64_t) + sizeof(TSKEY); pSup->pKeyBuf = taosMemoryCalloc(1, pSup->keySize); @@ -5274,5 +5292,5 @@ int32_t initStreamAggSupporter(SStreamAggSupporter* pSup, const char* pKey) { if (bufSize <= pageSize) { bufSize = pageSize * 4; } - return createDiskbasedBuf(&pSup->pResultBuf, pageSize, bufSize, pKey, "/tmp/"); + return createDiskbasedBuf(&pSup->pResultBuf, pageSize, bufSize, pKey, TD_TMP_DIR_PATH); } diff --git a/source/libs/executor/src/scanoperator.c b/source/libs/executor/src/scanoperator.c index 613fa26c2d0b170be359de59bfc4fef357c5d563..b954eb3a221a187bc4fe96a3088125e149304ece 100644 --- a/source/libs/executor/src/scanoperator.c +++ b/source/libs/executor/src/scanoperator.c @@ -158,7 +158,7 @@ static bool overlapWithTimeWindow(SInterval* pInterval, SDataBlockInfo* pBlockIn return false; } -static void addTagPseudoColumnData(STableScanInfo* pTableScanInfo, SSDataBlock* pBlock); +static void addTagPseudoColumnData(SReadHandle *pHandle, SExprInfo* pPseudoExpr, int32_t numOfPseudoExpr, SSDataBlock* pBlock); static int32_t loadDataBlock(SOperatorInfo* pOperator, STableScanInfo* pTableScanInfo, SSDataBlock* pBlock, uint32_t* status) { @@ -250,7 +250,7 @@ static int32_t loadDataBlock(SOperatorInfo* pOperator, STableScanInfo* pTableSca // currently only the tbname pseudo column if (pTableScanInfo->numOfPseudoExpr > 0) { - addTagPseudoColumnData(pTableScanInfo, pBlock); + addTagPseudoColumnData(&pTableScanInfo->readHandle, pTableScanInfo->pPseudoExpr, pTableScanInfo->numOfPseudoExpr, pBlock); } int64_t st = taosGetTimestampMs(); @@ -274,23 +274,31 @@ static void prepareForDescendingScan(STableScanInfo* pTableScanInfo, SqlFunction switchCtxOrder(pCtx, numOfOutput); // setupQueryRangeForReverseScan(pTableScanInfo); - STimeWindow* pTWindow = &pTableScanInfo->cond.twindow; - TSWAP(pTWindow->skey, pTWindow->ekey); pTableScanInfo->cond.order = TSDB_ORDER_DESC; + for (int32_t i = 0; i < pTableScanInfo->cond.numOfTWindows; ++i) { + STimeWindow* pTWindow = &pTableScanInfo->cond.twindows[i]; + TSWAP(pTWindow->skey, pTWindow->ekey); + } + SQueryTableDataCond *pCond = &pTableScanInfo->cond; + taosqsort(pCond->twindows, + pCond->numOfTWindows, + sizeof(STimeWindow), + pCond, + compareTimeWindow); } -void addTagPseudoColumnData(STableScanInfo* pTableScanInfo, SSDataBlock* pBlock) { +void addTagPseudoColumnData(SReadHandle *pHandle, SExprInfo* pPseudoExpr, int32_t numOfPseudoExpr, SSDataBlock* pBlock) { // currently only the tbname pseudo column - if (pTableScanInfo->numOfPseudoExpr == 0) { + if (numOfPseudoExpr == 0) { return; } SMetaReader mr = {0}; - metaReaderInit(&mr, pTableScanInfo->readHandle.meta, 0); + metaReaderInit(&mr, pHandle->meta, 0); metaGetTableEntryByUid(&mr, pBlock->info.uid); - for (int32_t j = 0; j < pTableScanInfo->numOfPseudoExpr; ++j) { - SExprInfo* pExpr = &pTableScanInfo->pPseudoExpr[j]; + for (int32_t j = 0; j < numOfPseudoExpr; ++j) { + SExprInfo* pExpr = &pPseudoExpr[j]; int32_t dstSlotId = pExpr->base.resSchema.slotId; @@ -301,7 +309,7 @@ void addTagPseudoColumnData(STableScanInfo* pTableScanInfo, SSDataBlock* pBlock) // this is to handle the tbname if (fmIsScanPseudoColumnFunc(functionId)) { - setTbNameColData(pTableScanInfo->readHandle.meta, pBlock, pColInfoData, functionId); + setTbNameColData(pHandle->meta, pBlock, pColInfoData, functionId); } else { // these are tags const char* p = NULL; if (pColInfoData->info.type == TSDB_DATA_TYPE_JSON) { @@ -380,7 +388,6 @@ static SSDataBlock* doTableScanImpl(SOperatorInfo* pOperator) { pOperator->cost.totalCost = pTableScanInfo->readRecorder.elapsedTime; return pBlock; } - return NULL; } @@ -395,9 +402,15 @@ static SSDataBlock* doTableScan(SOperatorInfo* pOperator) { // do the ascending order traverse in the first place. while (pTableScanInfo->scanTimes < pTableScanInfo->scanInfo.numOfAsc) { - SSDataBlock* p = doTableScanImpl(pOperator); - if (p != NULL) { - return p; + while (pTableScanInfo->curTWinIdx < pTableScanInfo->cond.numOfTWindows) { + SSDataBlock* p = doTableScanImpl(pOperator); + if (p != NULL) { + return p; + } + pTableScanInfo->curTWinIdx += 1; + if (pTableScanInfo->curTWinIdx < pTableScanInfo->cond.numOfTWindows) { + tsdbResetReadHandle(pTableScanInfo->dataReader, &pTableScanInfo->cond, pTableScanInfo->curTWinIdx); + } } pTableScanInfo->scanTimes += 1; @@ -405,14 +418,14 @@ static SSDataBlock* doTableScan(SOperatorInfo* pOperator) { if (pTableScanInfo->scanTimes < pTableScanInfo->scanInfo.numOfAsc) { setTaskStatus(pTaskInfo, TASK_NOT_COMPLETED); pTableScanInfo->scanFlag = REPEAT_SCAN; - - STimeWindow* pWin = &pTableScanInfo->cond.twindow; - qDebug("%s start to repeat ascending order scan data blocks due to query func required, qrange:%" PRId64 - "-%" PRId64, - GET_TASKID(pTaskInfo), pWin->skey, pWin->ekey); - + qDebug("%s start to repeat ascending order scan data blocks due to query func required", GET_TASKID(pTaskInfo)); + for (int32_t i = 0; i < pTableScanInfo->cond.numOfTWindows; ++i) { + STimeWindow* pWin = &pTableScanInfo->cond.twindows[i]; + qDebug("%s\t qrange:%" PRId64 "-%" PRId64, GET_TASKID(pTaskInfo), pWin->skey, pWin->ekey); + } // do prepare for the next round table scan operation - tsdbResetReadHandle(pTableScanInfo->dataReader, &pTableScanInfo->cond); + tsdbResetReadHandle(pTableScanInfo->dataReader, &pTableScanInfo->cond, 0); + pTableScanInfo->curTWinIdx = 0; } } @@ -420,31 +433,40 @@ static SSDataBlock* doTableScan(SOperatorInfo* pOperator) { if (pTableScanInfo->scanTimes < total) { if (pTableScanInfo->cond.order == TSDB_ORDER_ASC) { prepareForDescendingScan(pTableScanInfo, pTableScanInfo->pCtx, pTableScanInfo->numOfOutput); - tsdbResetReadHandle(pTableScanInfo->dataReader, &pTableScanInfo->cond); + tsdbResetReadHandle(pTableScanInfo->dataReader, &pTableScanInfo->cond, 0); + pTableScanInfo->curTWinIdx = 0; } - STimeWindow* pWin = &pTableScanInfo->cond.twindow; - qDebug("%s start to descending order scan data blocks due to query func required, qrange:%" PRId64 "-%" PRId64, - GET_TASKID(pTaskInfo), pWin->skey, pWin->ekey); - + qDebug("%s start to descending order scan data blocks due to query func required", GET_TASKID(pTaskInfo)); + for (int32_t i = 0; i < pTableScanInfo->cond.numOfTWindows; ++i) { + STimeWindow* pWin = &pTableScanInfo->cond.twindows[i]; + qDebug("%s\t qrange:%" PRId64 "-%" PRId64, GET_TASKID(pTaskInfo), pWin->skey, pWin->ekey); + } while (pTableScanInfo->scanTimes < total) { - SSDataBlock* p = doTableScanImpl(pOperator); - if (p != NULL) { - return p; + while (pTableScanInfo->curTWinIdx < pTableScanInfo->cond.numOfTWindows) { + SSDataBlock* p = doTableScanImpl(pOperator); + if (p != NULL) { + return p; + } + pTableScanInfo->curTWinIdx += 1; + if (pTableScanInfo->curTWinIdx < pTableScanInfo->cond.numOfTWindows) { + tsdbResetReadHandle(pTableScanInfo->dataReader, &pTableScanInfo->cond, pTableScanInfo->curTWinIdx); + } } pTableScanInfo->scanTimes += 1; - if (pTableScanInfo->scanTimes < pTableScanInfo->scanInfo.numOfAsc) { + if (pTableScanInfo->scanTimes < total) { setTaskStatus(pTaskInfo, TASK_NOT_COMPLETED); pTableScanInfo->scanFlag = REPEAT_SCAN; - qDebug("%s start to repeat descending order scan data blocks due to query func required, qrange:%" PRId64 - "-%" PRId64, - GET_TASKID(pTaskInfo), pTaskInfo->window.skey, pTaskInfo->window.ekey); - - // do prepare for the next round table scan operation - tsdbResetReadHandle(pTableScanInfo->dataReader, &pTableScanInfo->cond); + qDebug("%s start to repeat descending order scan data blocks due to query func required", GET_TASKID(pTaskInfo)); + for (int32_t i = 0; i < pTableScanInfo->cond.numOfTWindows; ++i) { + STimeWindow* pWin = &pTableScanInfo->cond.twindows[i]; + qDebug("%s\t qrange:%" PRId64 "-%" PRId64, GET_TASKID(pTaskInfo), pWin->skey, pWin->ekey); + } + tsdbResetReadHandle(pTableScanInfo->dataReader, &pTableScanInfo->cond, 0); + pTableScanInfo->curTWinIdx = 0; } } } @@ -524,6 +546,7 @@ SOperatorInfo* createTableScanOperatorInfo(STableScanPhysiNode* pTableScanNode, pInfo->dataReader = pDataReader; pInfo->scanFlag = MAIN_SCAN; pInfo->pColMatchInfo = pColList; + pInfo->curTWinIdx = 0; pOperator->name = "TableScanOperator"; // for debug purpose pOperator->operatorType = QUERY_NODE_PHYSICAL_PLAN_TABLE_SCAN; @@ -678,8 +701,9 @@ static bool prepareDataScan(SStreamBlockScanInfo* pInfo) { binarySearchForKey, NULL, TSDB_ORDER_ASC); } STableScanInfo* pTableScanInfo = pInfo->pOperatorDumy->info; - pTableScanInfo->cond.twindow = win; - tsdbResetReadHandle(pTableScanInfo->dataReader, &pTableScanInfo->cond); + pTableScanInfo->cond.twindows[0] = win; + pTableScanInfo->curTWinIdx = 0; + tsdbResetReadHandle(pTableScanInfo->dataReader, &pTableScanInfo->cond, 0); pTableScanInfo->scanTimes = 0; return true; } else { @@ -731,91 +755,6 @@ static SSDataBlock* getUpdateDataBlock(SStreamBlockScanInfo* pInfo, bool inverti return NULL; } -void static setSupKeyBuf(SCatchSupporter* pSup, int64_t groupId, int64_t childId, TSKEY ts) { - int64_t* pKey = (int64_t*)pSup->pKeyBuf; - pKey[0] = groupId; - pKey[1] = childId; - pKey[2] = ts; -} - -static int32_t catchWidonwInfo(SSDataBlock* pDataBlock, SCatchSupporter* pSup, int32_t pageId, int32_t tsIndex, - int64_t childId) { - SColumnInfoData* pColDataInfo = taosArrayGet(pDataBlock->pDataBlock, tsIndex); - TSKEY* tsCols = (int64_t*)pColDataInfo->pData; - for (int32_t i = 0; i < pDataBlock->info.rows; i++) { - setSupKeyBuf(pSup, pDataBlock->info.groupId, childId, tsCols[i]); - SWindowPosition* p1 = (SWindowPosition*)taosHashGet(pSup->pWindowHashTable, pSup->pKeyBuf, pSup->keySize); - if (p1 == NULL) { - SWindowPosition pos = {.pageId = pageId, .rowId = i}; - int32_t code = taosHashPut(pSup->pWindowHashTable, pSup->pKeyBuf, pSup->keySize, &pos, sizeof(SWindowPosition)); - if (code != TSDB_CODE_SUCCESS) { - return code; - } - } else { - p1->pageId = pageId; - p1->rowId = i; - } - } - return TSDB_CODE_SUCCESS; -} - -static int32_t catchDatablock(SSDataBlock* pDataBlock, SCatchSupporter* pSup, int32_t tsIndex, int64_t childId) { - int32_t start = 0; - int32_t stop = 0; - int32_t pageSize = getBufPageSize(pSup->pDataBuf); - while (start < pDataBlock->info.rows) { - blockDataSplitRows(pDataBlock, pDataBlock->info.hasVarCol, start, &stop, pageSize); - SSDataBlock* pDB = blockDataExtractBlock(pDataBlock, start, stop - start + 1); - if (pDB == NULL) { - return terrno; - } - int32_t pageId = -1; - void* pPage = getNewBufPage(pSup->pDataBuf, pDataBlock->info.groupId, &pageId); - if (pPage == NULL) { - blockDataDestroy(pDB); - return terrno; - } - int32_t size = blockDataGetSize(pDB) + sizeof(int32_t) + pDB->info.numOfCols * sizeof(int32_t); - assert(size <= pageSize); - blockDataToBuf(pPage, pDB); - setBufPageDirty(pPage, true); - releaseBufPage(pSup->pDataBuf, pPage); - blockDataDestroy(pDB); - start = stop + 1; - int32_t code = catchWidonwInfo(pDB, pSup, pageId, tsIndex, childId); - if (code != TSDB_CODE_SUCCESS) { - return code; - } - } - return TSDB_CODE_SUCCESS; -} - -static SSDataBlock* getDataFromCatch(SStreamBlockScanInfo* pInfo) { - SSDataBlock* pBlock = pInfo->pUpdateRes; - if (pInfo->updateResIndex < pBlock->info.rows) { - blockDataCleanup(pInfo->pRes); - SCatchSupporter* pCSup = &pInfo->childAggSup; - SColumnInfoData* pColDataInfo = taosArrayGet(pBlock->pDataBlock, 0); - TSKEY* tsCols = (TSKEY*)pColDataInfo->pData; - int32_t size = taosArrayGetSize(pInfo->childIds); - for (int32_t i = 0; i < size; i++) { - int64_t id = *(int64_t*)taosArrayGet(pInfo->childIds, i); - setSupKeyBuf(pCSup, pBlock->info.groupId, id, tsCols[pInfo->updateResIndex]); - SWindowPosition* pos = (SWindowPosition*)taosHashGet(pCSup->pWindowHashTable, pCSup->pKeyBuf, pCSup->keySize); - void* buf = getBufPage(pCSup->pDataBuf, pos->pageId); - SSDataBlock* pDB = createOneDataBlock(pInfo->pRes, false); - blockDataFromBuf(pDB, buf); - SSDataBlock* pSub = blockDataExtractBlock(pDB, pos->rowId, 1); - blockDataMerge(pInfo->pRes, pSub); - blockDataDestroy(pDB); - blockDataDestroy(pSub); - } - pInfo->updateResIndex++; - return pInfo->pRes; - } - return NULL; -} - static SSDataBlock* doStreamBlockScan(SOperatorInfo* pOperator) { // NOTE: this operator does never check if current status is done or not SExecTaskInfo* pTaskInfo = pOperator->pTaskInfo; @@ -886,8 +825,7 @@ static SSDataBlock* doStreamBlockScan(SOperatorInfo* pOperator) { pInfo->pRes->info.groupId = groupId; } - int32_t numOfCols = pInfo->pRes->info.numOfCols; - for (int32_t i = 0; i < numOfCols; ++i) { + for (int32_t i = 0; i < taosArrayGetSize(pInfo->pColMatchInfo); ++i) { SColMatchInfo* pColMatchInfo = taosArrayGet(pInfo->pColMatchInfo, i); if (!pColMatchInfo->output) { continue; @@ -917,10 +855,16 @@ static SSDataBlock* doStreamBlockScan(SOperatorInfo* pOperator) { pTaskInfo->code = terrno; return NULL; } + rows = pBlockInfo->rows; + + // currently only the tbname pseudo column + if (pInfo->numOfPseudoExpr > 0) { + addTagPseudoColumnData(&pInfo->readHandle, pInfo->pPseudoExpr, pInfo->numOfPseudoExpr, pInfo->pRes); + } + doFilter(pInfo->pCondition, pInfo->pRes, NULL); blockDataUpdateTsWindow(pInfo->pRes, 0); - break; } @@ -948,10 +892,9 @@ static SSDataBlock* doStreamBlockScan(SOperatorInfo* pOperator) { } } -SOperatorInfo* createStreamScanOperatorInfo(void* streamReadHandle, void* pDataReader, SReadHandle* pHandle, - uint64_t uid, SSDataBlock* pResBlock, SArray* pColList, - SArray* pTableIdList, SExecTaskInfo* pTaskInfo, SNode* pCondition, - SOperatorInfo* pOperatorDumy) { +SOperatorInfo* createStreamScanOperatorInfo(void* pDataReader, SReadHandle* pHandle, + SArray* pTableIdList, STableScanPhysiNode* pTableScanNode, SExecTaskInfo* pTaskInfo, + STimeWindowAggSupp* pTwSup, int16_t tsColId) { SStreamBlockScanInfo* pInfo = taosMemoryCalloc(1, sizeof(SStreamBlockScanInfo)); SOperatorInfo* pOperator = taosMemoryCalloc(1, sizeof(SOperatorInfo)); if (pInfo == NULL || pOperator == NULL) { @@ -959,22 +902,28 @@ SOperatorInfo* createStreamScanOperatorInfo(void* streamReadHandle, void* pDataR goto _error; } - STableScanInfo* pSTInfo = (STableScanInfo*)pOperatorDumy->info; + SScanPhysiNode* pScanPhyNode = &pTableScanNode->scan; + + SDataBlockDescNode* pDescNode = pScanPhyNode->node.pOutputDataBlockDesc; + SOperatorInfo* pTableScanDummy = createTableScanOperatorInfo(pTableScanNode, pDataReader, pHandle, pTaskInfo); + + STableScanInfo* pSTInfo = (STableScanInfo*)pTableScanDummy->info; - int32_t numOfOutput = taosArrayGetSize(pColList); + int32_t numOfCols = 0; + pInfo->pColMatchInfo = extractColMatchInfo(pScanPhyNode->pScanCols, pDescNode, &numOfCols, pTaskInfo, COL_MATCH_FROM_COL_ID); - SArray* pColIds = taosArrayInit(4, sizeof(int16_t)); + int32_t numOfOutput = taosArrayGetSize(pInfo->pColMatchInfo); + SArray* pColIds = taosArrayInit(numOfOutput, sizeof(int16_t)); for (int32_t i = 0; i < numOfOutput; ++i) { - SColMatchInfo* id = taosArrayGet(pColList, i); - int16_t colId = id->colId; + SColMatchInfo* id = taosArrayGet(pInfo->pColMatchInfo, i); + + int16_t colId = id->colId; taosArrayPush(pColIds, &colId); } - pInfo->pColMatchInfo = pColList; - // set the extract column id to streamHandle - tqReadHandleSetColIdList((STqReadHandle*)streamReadHandle, pColIds); - int32_t code = tqReadHandleSetTbUidList(streamReadHandle, pTableIdList); + tqReadHandleSetColIdList((STqReadHandle*)pHandle->reader, pColIds); + int32_t code = tqReadHandleSetTbUidList(pHandle->reader, pTableIdList); if (code != 0) { goto _error; } @@ -990,37 +939,39 @@ SOperatorInfo* createStreamScanOperatorInfo(void* streamReadHandle, void* pDataR goto _error; } - pInfo->primaryTsIndex = 0; // TODO(liuyao) get it from physical plan + pInfo->primaryTsIndex = tsColId; if (pSTInfo->interval.interval > 0) { - pInfo->pUpdateInfo = updateInfoInitP(&pSTInfo->interval, 10000); // TODO(liuyao) get watermark from physical plan + pInfo->pUpdateInfo = updateInfoInitP(&pSTInfo->interval, pTwSup->waterMark); } else { pInfo->pUpdateInfo = NULL; } - pInfo->readHandle = *pHandle; - pInfo->tableUid = uid; - pInfo->streamBlockReader = streamReadHandle; - pInfo->pRes = pResBlock; - pInfo->pCondition = pCondition; - pInfo->pDataReader = pDataReader; - pInfo->scanMode = STREAM_SCAN_FROM_READERHANDLE; - pInfo->pOperatorDumy = pOperatorDumy; - pInfo->interval = pSTInfo->interval; - pInfo->sessionSup = (SessionWindowSupporter){.pStreamAggSup = NULL, .gap = -1}; - - initCacheSupporter(&pInfo->childAggSup, 1024, "StreamFinalInterval", - "/tmp/"); // TODO(liuyao) get row size from phy plan + // create the pseduo columns info + if (pTableScanNode->scan.pScanPseudoCols != NULL) { + pInfo->pPseudoExpr = createExprInfo(pTableScanNode->scan.pScanPseudoCols, NULL, &pInfo->numOfPseudoExpr); + } - pOperator->name = "StreamBlockScanOperator"; + pInfo->readHandle = *pHandle; + pInfo->tableUid = pScanPhyNode->uid; + pInfo->streamBlockReader = pHandle->reader; + pInfo->pRes = createResDataBlock(pDescNode); + pInfo->pCondition = pScanPhyNode->node.pConditions; + pInfo->pDataReader = pDataReader; + pInfo->scanMode = STREAM_SCAN_FROM_READERHANDLE; + pInfo->pOperatorDumy = pTableScanDummy; + pInfo->interval = pSTInfo->interval; + pInfo->sessionSup = (SessionWindowSupporter){.pStreamAggSup = NULL, .gap = -1}; + + pOperator->name = "StreamBlockScanOperator"; pOperator->operatorType = QUERY_NODE_PHYSICAL_PLAN_STREAM_SCAN; - pOperator->blocking = false; - pOperator->status = OP_NOT_OPENED; - pOperator->info = pInfo; - pOperator->numOfExprs = pResBlock->info.numOfCols; - pOperator->pTaskInfo = pTaskInfo; - - pOperator->fpSet = - createOperatorFpSet(operatorDummyOpenFn, doStreamBlockScan, NULL, NULL, operatorDummyCloseFn, NULL, NULL, NULL); + pOperator->blocking = false; + pOperator->status = OP_NOT_OPENED; + pOperator->info = pInfo; + pOperator->numOfExprs = pInfo->pRes->info.numOfCols; + pOperator->pTaskInfo = pTaskInfo; + + pOperator->fpSet = createOperatorFpSet(operatorDummyOpenFn, doStreamBlockScan, NULL, + NULL, operatorDummyCloseFn, NULL, NULL, NULL); return pOperator; diff --git a/source/libs/executor/src/timewindowoperator.c b/source/libs/executor/src/timewindowoperator.c index 5d0a0a0270da9a54508be98f11a146d18201849b..829968d37f9a8a97cf1f256b493035ad0129f71a 100644 --- a/source/libs/executor/src/timewindowoperator.c +++ b/source/libs/executor/src/timewindowoperator.c @@ -622,18 +622,103 @@ static void saveDataBlockLastRow(char** pRow, SArray* pDataBlock, int32_t rowInd } } -static SArray* hashIntervalAgg(SOperatorInfo* pOperatorInfo, SResultRowInfo* pResultRowInfo, SSDataBlock* pBlock, - uint64_t tableGroupId) { +typedef int64_t (*__get_value_fn_t)(void* data, int32_t index); + +int32_t binarySearch(void* keyList, int num, TSKEY key, int order, + __get_value_fn_t getValuefn) { + int firstPos = 0, lastPos = num - 1, midPos = -1; + int numOfRows = 0; + + if (num <= 0) return -1; + if (order == TSDB_ORDER_DESC) { + // find the first position which is smaller or equal than the key + while (1) { + if (key >= getValuefn(keyList, lastPos)) return lastPos; + if (key == getValuefn(keyList, firstPos)) return firstPos; + if (key < getValuefn(keyList, firstPos)) return firstPos - 1; + + numOfRows = lastPos - firstPos + 1; + midPos = (numOfRows >> 1) + firstPos; + + if (key < getValuefn(keyList, midPos)) { + lastPos = midPos - 1; + } else if (key > getValuefn(keyList, midPos)) { + firstPos = midPos + 1; + } else { + break; + } + } + + } else { + // find the first position which is bigger or equal than the key + while (1) { + if (key <= getValuefn(keyList, firstPos)) return firstPos; + if (key == getValuefn(keyList, lastPos)) return lastPos; + + if (key > getValuefn(keyList, lastPos)) { + lastPos = lastPos + 1; + if (lastPos >= num) + return -1; + else + return lastPos; + } + + numOfRows = lastPos - firstPos + 1; + midPos = (numOfRows >> 1) + firstPos; + + if (key < getValuefn(keyList, midPos)) { + lastPos = midPos - 1; + } else if (key > getValuefn(keyList, midPos)) { + firstPos = midPos + 1; + } else { + break; + } + } + } + + return midPos; +} + +int64_t getReskey(void* data, int32_t index) { + SArray* res = (SArray*) data; + SResKeyPos* pos = taosArrayGetP(res, index); + return *(int64_t*)pos->key; +} + +static int32_t saveResult(SResultRow* result, uint64_t groupId, SArray* pUpdated) { + int32_t size = taosArrayGetSize(pUpdated); + int32_t index = binarySearch(pUpdated, size, result->win.skey, TSDB_ORDER_DESC, getReskey); + if (index == -1) { + index = 0; + } else { + TSKEY resTs = getReskey(pUpdated, index); + if (resTs < result->win.skey) { + index++; + } else { + return TSDB_CODE_SUCCESS; + } + } + + SResKeyPos* newPos = taosMemoryMalloc(sizeof(SResKeyPos) + sizeof(uint64_t)); + if (newPos == NULL) { + return TSDB_CODE_OUT_OF_MEMORY; + } + newPos->groupId = groupId; + newPos->pos = (SResultRowPosition){.pageId = result->pageId, .offset = result->offset}; + *(int64_t*)newPos->key = result->win.skey; + if (taosArrayInsert(pUpdated, index, &newPos) == NULL ){ + return TSDB_CODE_OUT_OF_MEMORY; + } + return TSDB_CODE_SUCCESS; +} + +static void hashIntervalAgg(SOperatorInfo* pOperatorInfo, SResultRowInfo* pResultRowInfo, SSDataBlock* pBlock, + uint64_t tableGroupId, SArray* pUpdated) { SIntervalAggOperatorInfo* pInfo = (SIntervalAggOperatorInfo*)pOperatorInfo->info; SExecTaskInfo* pTaskInfo = pOperatorInfo->pTaskInfo; int32_t numOfOutput = pOperatorInfo->numOfExprs; - SArray* pUpdated = NULL; - if (pInfo->execModel == OPTR_EXEC_MODEL_STREAM) { - pUpdated = taosArrayInit(4, POINTER_BYTES); - } - int32_t step = 1; bool ascScan = (pInfo->order == TSDB_ORDER_ASC); @@ -663,13 +748,10 @@ static SArray* hashIntervalAgg(SOperatorInfo* pOperatorInfo, SResultRowInfo* pRe longjmp(pTaskInfo->env, TSDB_CODE_QRY_OUT_OF_MEMORY); } - if (pInfo->execModel == OPTR_EXEC_MODEL_STREAM) { - SResKeyPos* pos = taosMemoryMalloc(sizeof(SResKeyPos) + sizeof(uint64_t)); - pos->groupId = tableGroupId; - pos->pos = (SResultRowPosition){.pageId = pResult->pageId, .offset = pResult->offset}; - *(int64_t*)pos->key = pResult->win.skey; - - taosArrayPush(pUpdated, &pos); + if (pInfo->execModel == OPTR_EXEC_MODEL_STREAM && + (pInfo->twAggSup.calTrigger == STREAM_TRIGGER_AT_ONCE || + pInfo->twAggSup.calTrigger == 0) ) { + saveResult(pResult, tableGroupId, pUpdated); } int32_t forwardStep = 0; @@ -742,13 +824,10 @@ static SArray* hashIntervalAgg(SOperatorInfo* pOperatorInfo, SResultRowInfo* pRe longjmp(pTaskInfo->env, TSDB_CODE_QRY_OUT_OF_MEMORY); } - if (pInfo->execModel == OPTR_EXEC_MODEL_STREAM) { - SResKeyPos* pos = taosMemoryMalloc(sizeof(SResKeyPos) + sizeof(uint64_t)); - pos->groupId = tableGroupId; - pos->pos = (SResultRowPosition){.pageId = pResult->pageId, .offset = pResult->offset}; - *(int64_t*)pos->key = pResult->win.skey; - - taosArrayPush(pUpdated, &pos); + if (pInfo->execModel == OPTR_EXEC_MODEL_STREAM && + (pInfo->twAggSup.calTrigger == STREAM_TRIGGER_AT_ONCE || + pInfo->twAggSup.calTrigger == 0) ) { + saveResult(pResult, tableGroupId, pUpdated); } ekey = ascScan? nextWin.ekey:nextWin.skey; @@ -769,7 +848,6 @@ static SArray* hashIntervalAgg(SOperatorInfo* pOperatorInfo, SResultRowInfo* pRe saveDataBlockLastRow(pInfo->pRow, pBlock->pDataBlock, rowIndex, pBlock->info.numOfCols); } - return pUpdated; // updateResultRowInfoActiveIndex(pResultRowInfo, &pInfo->win, pRuntimeEnv->current->lastKey, true, false); } @@ -799,7 +877,7 @@ static int32_t doOpenIntervalAgg(SOperatorInfo* pOperator) { STableQueryInfo* pTableQueryInfo = pInfo->pCurrent; setIntervalQueryRange(pTableQueryInfo, pBlock->info.window.skey, &pTaskInfo->window); - hashIntervalAgg(pOperator, &pInfo->binfo.resultRowInfo, pBlock, pBlock->info.groupId); + hashIntervalAgg(pOperator, &pInfo->binfo.resultRowInfo, pBlock, pBlock->info.groupId, NULL); #if 0 // test for encode/decode result info if(pOperator->encodeResultRow){ @@ -1067,7 +1145,7 @@ void doClearWindow(SAggSupporter* pSup, SOptrBasicInfo* pBinfo, char* pData, } static void doClearWindows(SAggSupporter* pSup, SOptrBasicInfo* pBinfo, - SInterval* pIntrerval, int32_t tsIndex, int32_t numOfOutput, SSDataBlock* pBlock, + SInterval* pInterval, int32_t tsIndex, int32_t numOfOutput, SSDataBlock* pBlock, SArray* pUpWins) { SColumnInfoData* pColDataInfo = taosArrayGet(pBlock->pDataBlock, tsIndex); TSKEY *tsCols = (TSKEY*)pColDataInfo->pData; @@ -1075,8 +1153,8 @@ static void doClearWindows(SAggSupporter* pSup, SOptrBasicInfo* pBinfo, for (int32_t i = 0; i < pBlock->info.rows; i += step) { SResultRowInfo dumyInfo; dumyInfo.cur.pageId = -1; - STimeWindow win = getActiveTimeWindow(NULL, &dumyInfo, tsCols[i], pIntrerval, - pIntrerval->precision, NULL); + STimeWindow win = getActiveTimeWindow(NULL, &dumyInfo, tsCols[i], pInterval, + pInterval->precision, NULL); step = getNumOfRowsInTimeWindow(&pBlock->info, tsCols, i, win.ekey, binarySearchForKey, NULL, TSDB_ORDER_ASC); doClearWindow(pSup, pBinfo, (char*)&win.skey, sizeof(TKEY), pBlock->info.groupId, numOfOutput); @@ -1086,6 +1164,39 @@ static void doClearWindows(SAggSupporter* pSup, SOptrBasicInfo* pBinfo, } } +static int32_t closeIntervalWindow(SHashObj *pHashMap, STimeWindowAggSupp *pSup, + SInterval* pInterval, SArray* closeWins) { + void *pIte = NULL; + size_t keyLen = 0; + while((pIte = taosHashIterate(pHashMap, pIte)) != NULL) { + void* key = taosHashGetKey(pIte, &keyLen); + uint64_t groupId = *(uint64_t*) key; + ASSERT(keyLen == GET_RES_WINDOW_KEY_LEN(sizeof(TSKEY))); + TSKEY ts = *(uint64_t*) ((char*)key + sizeof(uint64_t)); + SResultRowInfo dumyInfo; + dumyInfo.cur.pageId = -1; + STimeWindow win = getActiveTimeWindow(NULL, &dumyInfo, ts, pInterval, + pInterval->precision, NULL); + if (win.ekey < pSup->maxTs - pSup->waterMark) { + char keyBuf[GET_RES_WINDOW_KEY_LEN(sizeof(TSKEY))]; + SET_RES_WINDOW_KEY(keyBuf, &ts, sizeof(TSKEY), groupId); + taosHashRemove(pHashMap, keyBuf, keyLen); + SResKeyPos* pos = taosMemoryMalloc(sizeof(SResKeyPos) + sizeof(uint64_t)); + if (pos == NULL) { + return TSDB_CODE_OUT_OF_MEMORY; + } + pos->groupId = groupId; + pos->pos = *(SResultRowPosition*) pIte; + *(int64_t*)pos->key = ts; + if (!taosArrayPush(closeWins, &pos)) { + taosMemoryFree(pos); + return TSDB_CODE_OUT_OF_MEMORY; + } + } + } + return TSDB_CODE_SUCCESS; +} + static SSDataBlock* doStreamIntervalAgg(SOperatorInfo* pOperator) { SIntervalAggOperatorInfo* pInfo = pOperator->info; SExecTaskInfo* pTaskInfo = pOperator->pTaskInfo; @@ -1106,7 +1217,9 @@ static SSDataBlock* doStreamIntervalAgg(SOperatorInfo* pOperator) { SOperatorInfo* downstream = pOperator->pDownstream[0]; - SArray* pUpdated = NULL; + SArray* pUpdated = taosArrayInit(4, POINTER_BYTES); + SArray* pClosed = taosArrayInit(4, POINTER_BYTES); + while (1) { SSDataBlock* pBlock = downstream->fpSet.getNextFn(downstream); if (pBlock == NULL) { @@ -1128,10 +1241,19 @@ static SSDataBlock* doStreamIntervalAgg(SOperatorInfo* pOperator) { continue; } - pUpdated = hashIntervalAgg(pOperator, &pInfo->binfo.resultRowInfo, pBlock, pBlock->info.groupId); + hashIntervalAgg(pOperator, &pInfo->binfo.resultRowInfo, pBlock, pBlock->info.groupId, pUpdated); + pInfo->twAggSup.maxTs = TMAX(pInfo->twAggSup.maxTs, pBlock->info.window.ekey); } - - finalizeUpdatedResult(pOperator->numOfExprs, pInfo->aggSup.pResultBuf, pUpdated, pInfo->binfo.rowCellInfoOffset); + closeIntervalWindow(pInfo->aggSup.pResultRowHashTable, &pInfo->twAggSup, + &pInfo->interval, pClosed); + finalizeUpdatedResult(pOperator->numOfExprs, pInfo->aggSup.pResultBuf, pClosed, + pInfo->binfo.rowCellInfoOffset); + if (pInfo->twAggSup.calTrigger == STREAM_TRIGGER__WINDOW_CLOSE) { + taosArrayAddAll(pUpdated, pClosed); + } + taosArrayDestroy(pClosed); + finalizeUpdatedResult(pOperator->numOfExprs, pInfo->aggSup.pResultBuf, pUpdated, + pInfo->binfo.rowCellInfoOffset); initMultiResInfoFromArrayList(&pInfo->groupResInfo, pUpdated); blockDataEnsureCapacity(pInfo->binfo.pRes, pOperator->resultInfo.capacity); @@ -1935,63 +2057,6 @@ _error: return NULL; } -typedef int64_t (*__get_value_fn_t)(void* data, int32_t index); - -int32_t binarySearch(void* keyList, int num, TSKEY key, int order, - __get_value_fn_t getValuefn) { - int firstPos = 0, lastPos = num - 1, midPos = -1; - int numOfRows = 0; - - if (num <= 0) return -1; - if (order == TSDB_ORDER_DESC) { - // find the first position which is smaller than the key - while (1) { - if (key >= getValuefn(keyList, lastPos)) return lastPos; - if (key == getValuefn(keyList, firstPos)) return firstPos; - if (key < getValuefn(keyList, firstPos)) return firstPos - 1; - - numOfRows = lastPos - firstPos + 1; - midPos = (numOfRows >> 1) + firstPos; - - if (key < getValuefn(keyList, midPos)) { - lastPos = midPos - 1; - } else if (key > getValuefn(keyList, midPos)) { - firstPos = midPos + 1; - } else { - break; - } - } - - } else { - // find the first position which is bigger than the key - while (1) { - if (key <= getValuefn(keyList, firstPos)) return firstPos; - if (key == getValuefn(keyList, lastPos)) return lastPos; - - if (key > getValuefn(keyList, lastPos)) { - lastPos = lastPos + 1; - if (lastPos >= num) - return -1; - else - return lastPos; - } - - numOfRows = lastPos - firstPos + 1; - midPos = (numOfRows >> 1) + firstPos; - - if (key < getValuefn(keyList, midPos)) { - lastPos = midPos - 1; - } else if (key > getValuefn(keyList, midPos)) { - firstPos = midPos + 1; - } else { - break; - } - } - } - - return midPos; -} - int64_t getSessionWindowEndkey(void* data, int32_t index) { SArray* pWinInfos = (SArray*) data; SResultWindowInfo* pWin = taosArrayGet(pWinInfos, index); @@ -2223,12 +2288,14 @@ static void doStreamSessionWindowAggImpl(SOperatorInfo* pOperator, if (winNum > 0) { compactTimeWindow(pInfo, winIndex, winNum, groupId, numOfOutput, pTaskInfo, pStUpdated, pStDeleted); } - - code = taosHashPut(pStUpdated, &pCurWin->pos, sizeof(SResultRowPosition), &(pCurWin->win.skey), sizeof(TSKEY)); - if (code != TSDB_CODE_SUCCESS) { - longjmp(pTaskInfo->env, TSDB_CODE_QRY_OUT_OF_MEMORY); + pCurWin->isClosed = false; + if (pInfo->twAggSup.calTrigger == STREAM_TRIGGER_AT_ONCE) { + code = taosHashPut(pStUpdated, &pCurWin->pos, sizeof(SResultRowPosition), &(pCurWin->win.skey), sizeof(TSKEY)); + if (code != TSDB_CODE_SUCCESS) { + longjmp(pTaskInfo->env, TSDB_CODE_QRY_OUT_OF_MEMORY); + } + pCurWin->isOutput = true; } - pCurWin->isOutput = true; i += winRows; } } @@ -2325,6 +2392,37 @@ bool isFinalSession(SStreamSessionAggOperatorInfo* pInfo) { return pInfo->pChildren != NULL; } +int32_t closeSessionWindow(SArray *pWins, STimeWindowAggSupp *pTwSup, SArray *pClosed, + int8_t calTrigger) { + // Todo(liuyao) save window to tdb + int32_t size = taosArrayGetSize(pWins); + for (int32_t i = 0; i < size; i++) { + SResultWindowInfo *pSeWin = taosArrayGet(pWins, i); + if (pSeWin->win.ekey < pTwSup->maxTs - pTwSup->waterMark) { + if (!pSeWin->isClosed) { + SResKeyPos* pos = taosMemoryMalloc(sizeof(SResKeyPos) + sizeof(uint64_t)); + if (pos == NULL) { + return TSDB_CODE_OUT_OF_MEMORY; + } + pos->groupId = 0; + pos->pos = pSeWin->pos; + *(int64_t*)pos->key = pSeWin->win.ekey; + if (!taosArrayPush(pClosed, &pos)) { + taosMemoryFree(pos); + return TSDB_CODE_OUT_OF_MEMORY; + } + pSeWin->isClosed = true; + if (calTrigger == STREAM_TRIGGER__WINDOW_CLOSE) { + pSeWin->isOutput = true; + } + } + continue; + } + break; + } + return TSDB_CODE_SUCCESS; +} + static SSDataBlock* doStreamSessionWindowAgg(SOperatorInfo* pOperator) { if (pOperator->status == OP_EXEC_DONE) { return NULL; @@ -2377,13 +2475,21 @@ static SSDataBlock* doStreamSessionWindowAgg(SOperatorInfo* pOperator) { doStreamSessionWindowAggImpl(pOperator, pBlock, NULL, NULL); } doStreamSessionWindowAggImpl(pOperator, pBlock, pStUpdated, pInfo->pStDeleted); + pInfo->twAggSup.maxTs = TMAX(pInfo->twAggSup.maxTs, pBlock->info.window.ekey); } - // restore the value pOperator->status = OP_RES_TO_RETURN; + + SArray* pClosed = taosArrayInit(16, POINTER_BYTES); + closeSessionWindow(pInfo->streamAggSup.pResultRows, &pInfo->twAggSup, pClosed, + pInfo->twAggSup.calTrigger); SArray* pUpdated = taosArrayInit(16, POINTER_BYTES); copyUpdateResult(pStUpdated, pUpdated, pBInfo->pRes->info.groupId); taosHashCleanup(pStUpdated); + if (pInfo->twAggSup.calTrigger == STREAM_TRIGGER__WINDOW_CLOSE) { + taosArrayAddAll(pUpdated, pClosed); + } + finalizeUpdatedResult(pOperator->numOfExprs, pInfo->streamAggSup.pResultBuf, pUpdated, pInfo->binfo.rowCellInfoOffset); initMultiResInfoFromArrayList(&pInfo->groupResInfo, pUpdated); diff --git a/source/libs/function/CMakeLists.txt b/source/libs/function/CMakeLists.txt index 7a4cd8092205786065015252432dcb4de0a1db41..ea401e56e5c6585b93344af99280bb450137f98f 100644 --- a/source/libs/function/CMakeLists.txt +++ b/source/libs/function/CMakeLists.txt @@ -14,7 +14,7 @@ target_include_directories( target_link_libraries( function - PRIVATE os util common nodes scalar catalog qcom transport + PRIVATE os util common nodes scalar qcom transport PUBLIC uv_a ) diff --git a/source/libs/function/inc/functionMgtInt.h b/source/libs/function/inc/functionMgtInt.h index 21d277665872fc520ecea0fe6157b8338789499b..29dd0bcd90d6297ca539bad8a5c5cd78ff151d1d 100644 --- a/source/libs/function/inc/functionMgtInt.h +++ b/source/libs/function/inc/functionMgtInt.h @@ -44,9 +44,7 @@ extern "C" { #define FUNC_MGT_TEST_MASK(val, mask) (((val) & (mask)) != 0) -#define FUNC_UDF_ID_START 5000 -#define FUNC_AGGREGATE_UDF_ID 5001 -#define FUNC_SCALAR_UDF_ID 5002 +#define FUNC_UDF_ID_START 5000 extern const int funcMgtUdfNum; diff --git a/source/libs/function/src/builtins.c b/source/libs/function/src/builtins.c index 9563a81b7b22494993b93adaa02addccb188d397..24a781855ac2337863a381e7d01d22159ee78937 100644 --- a/source/libs/function/src/builtins.c +++ b/source/libs/function/src/builtins.c @@ -156,6 +156,14 @@ static int32_t translatePercentile(SFunctionNode* pFunc, char* pErrBuf, int32_t return invaildFuncParaNumErrMsg(pErrBuf, len, pFunc->functionName); } + //param0 + SNode* pParamNode0 = nodesListGetNode(pFunc->pParameterList, 0); + if (nodeType(pParamNode0) != QUERY_NODE_COLUMN) { + return buildFuncErrMsg(pErrBuf, len, TSDB_CODE_FUNC_FUNTION_ERROR, + "The first parameter of PERCENTILE function can only be column"); + } + + //param1 SValueNode* pValue = (SValueNode*)nodesListGetNode(pFunc->pParameterList, 1); if (pValue->datum.i < 0 || pValue->datum.i > 100) { @@ -170,6 +178,7 @@ static int32_t translatePercentile(SFunctionNode* pFunc, char* pErrBuf, int32_t return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName); } + //set result type pFunc->node.resType = (SDataType){.bytes = tDataTypes[TSDB_DATA_TYPE_DOUBLE].bytes, .type = TSDB_DATA_TYPE_DOUBLE}; return TSDB_CODE_SUCCESS; } @@ -188,30 +197,47 @@ static int32_t translateApercentile(SFunctionNode* pFunc, char* pErrBuf, int32_t return invaildFuncParaNumErrMsg(pErrBuf, len, pFunc->functionName); } - uint8_t para1Type = ((SExprNode*)nodesListGetNode(pFunc->pParameterList, 0))->resType.type; - uint8_t para2Type = ((SExprNode*)nodesListGetNode(pFunc->pParameterList, 1))->resType.type; - if (!IS_NUMERIC_TYPE(para1Type) || !IS_INTEGER_TYPE(para2Type)) { - return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName); + //param0 + SNode* pParamNode0 = nodesListGetNode(pFunc->pParameterList, 0); + if (nodeType(pParamNode0) != QUERY_NODE_COLUMN) { + return buildFuncErrMsg(pErrBuf, len, TSDB_CODE_FUNC_FUNTION_ERROR, + "The first parameter of APERCENTILE function can only be column"); } - SNode* pParamNode = nodesListGetNode(pFunc->pParameterList, 1); - if (nodeType(pParamNode) != QUERY_NODE_VALUE) { + //param1 + SNode* pParamNode1 = nodesListGetNode(pFunc->pParameterList, 1); + if (nodeType(pParamNode1) != QUERY_NODE_VALUE) { return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName); } - SValueNode* pValue = (SValueNode*)pParamNode; + SValueNode* pValue = (SValueNode*)pParamNode1; if (pValue->datum.i < 0 || pValue->datum.i > 100) { return invaildFuncParaValueErrMsg(pErrBuf, len, pFunc->functionName); } pValue->notReserved = true; + uint8_t para1Type = ((SExprNode*)nodesListGetNode(pFunc->pParameterList, 0))->resType.type; + uint8_t para2Type = ((SExprNode*)nodesListGetNode(pFunc->pParameterList, 1))->resType.type; + if (!IS_NUMERIC_TYPE(para1Type) || !IS_INTEGER_TYPE(para2Type)) { + return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName); + } + + //param2 if (3 == numOfParams) { - SNode* pPara3 = nodesListGetNode(pFunc->pParameterList, 2); - if (QUERY_NODE_VALUE != nodeType(pPara3) || !validAperventileAlgo((SValueNode*)pPara3)) { + uint8_t para3Type = ((SExprNode*)nodesListGetNode(pFunc->pParameterList, 2))->resType.type; + if (!IS_VAR_DATA_TYPE(para3Type)) { + return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName); + } + + SNode* pParamNode2 = nodesListGetNode(pFunc->pParameterList, 2); + if (QUERY_NODE_VALUE != nodeType(pParamNode2) || !validAperventileAlgo((SValueNode*)pParamNode2)) { return buildFuncErrMsg(pErrBuf, len, TSDB_CODE_FUNC_FUNTION_ERROR, "Third parameter algorithm of apercentile must be 'default' or 't-digest'"); } + + pValue = (SValueNode*)pParamNode2; + pValue->notReserved = true; } pFunc->node.resType = (SDataType){.bytes = tDataTypes[TSDB_DATA_TYPE_DOUBLE].bytes, .type = TSDB_DATA_TYPE_DOUBLE}; @@ -700,6 +726,11 @@ static int32_t translateDiff(SFunctionNode* pFunc, char* pErrBuf, int32_t len) { //param1 if (numOfParams == 2) { + uint8_t paraType = ((SExprNode*)nodesListGetNode(pFunc->pParameterList, 1))->resType.type; + if (!IS_INTEGER_TYPE(paraType)) { + return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName); + } + SNode* pParamNode1 = nodesListGetNode(pFunc->pParameterList, 1); if (QUERY_NODE_VALUE != nodeType(pParamNode1)) { return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName); @@ -714,7 +745,13 @@ static int32_t translateDiff(SFunctionNode* pFunc, char* pErrBuf, int32_t len) { pValue->notReserved = true; } - pFunc->node.resType = (SDataType){.bytes = tDataTypes[colType].bytes, .type = colType}; + uint8_t resType; + if (IS_SIGNED_NUMERIC_TYPE(colType) || TSDB_DATA_TYPE_BOOL == colType) { + resType = TSDB_DATA_TYPE_BIGINT; + } else { + resType = TSDB_DATA_TYPE_DOUBLE; + } + pFunc->node.resType = (SDataType){.bytes = tDataTypes[resType].bytes, .type = resType}; return TSDB_CODE_SUCCESS; } diff --git a/source/libs/function/src/builtinsimpl.c b/source/libs/function/src/builtinsimpl.c index 154ba43d51388077cf271474e86ba319328fb408..068d06fc31a9e73c05e84308a0b137386c69b060 100644 --- a/source/libs/function/src/builtinsimpl.c +++ b/source/libs/function/src/builtinsimpl.c @@ -834,12 +834,14 @@ int32_t avgFinalize(SqlFunctionCtx* pCtx, SSDataBlock* pBlock) { if (IS_INTEGER_TYPE(type)) { pAvgRes->result = pAvgRes->sum.isum / ((double)pAvgRes->count); } else { - if (isinf(pAvgRes->sum.dsum) || isnan(pAvgRes->sum.dsum)) { - GET_RES_INFO(pCtx)->isNullRes = 1; - } pAvgRes->result = pAvgRes->sum.dsum / ((double)pAvgRes->count); } + //check for overflow + if (isinf(pAvgRes->result) || isnan(pAvgRes->result)) { + GET_RES_INFO(pCtx)->isNullRes = 1; + } + return functionFinalize(pCtx, pBlock); } @@ -1963,7 +1965,7 @@ bool apercentileFunctionSetup(SqlFunctionCtx* pCtx, SResultRowEntryInfo* pResult if (pCtx->numOfParams == 2) { pInfo->algo = APERCT_ALGO_DEFAULT; } else if (pCtx->numOfParams == 3) { - pInfo->algo = getApercentileAlgo(pCtx->param[2].param.pz); + pInfo->algo = getApercentileAlgo(varDataVal(pCtx->param[2].param.pz)); if (pInfo->algo == APERCT_ALGO_UNKNOWN) { return false; } @@ -2299,15 +2301,15 @@ static void doSetPrevVal(SDiffInfo* pDiffInfo, int32_t type, const char* pv) { } static void doHandleDiff(SDiffInfo* pDiffInfo, int32_t type, const char* pv, SColumnInfoData* pOutput, int32_t pos, int32_t order) { - int32_t factor = (order == TSDB_ORDER_ASC)? 1:-1; + int32_t factor = (order == TSDB_ORDER_ASC)? 1:-1; switch (type) { case TSDB_DATA_TYPE_INT: { int32_t v = *(int32_t*)pv; - int32_t delta = factor*(v - pDiffInfo->prev.i64); // direct previous may be null + int64_t delta = factor*(v - pDiffInfo->prev.i64); // direct previous may be null if (delta < 0 && pDiffInfo->ignoreNegative) { colDataSetNull_f(pOutput->nullbitmap, pos); } else { - colDataAppendInt32(pOutput, pos, &delta); + colDataAppendInt64(pOutput, pos, &delta); } pDiffInfo->prev.i64 = v; break; @@ -2315,22 +2317,22 @@ static void doHandleDiff(SDiffInfo* pDiffInfo, int32_t type, const char* pv, SCo case TSDB_DATA_TYPE_BOOL: case TSDB_DATA_TYPE_TINYINT: { int8_t v = *(int8_t*)pv; - int8_t delta = factor*(v - pDiffInfo->prev.i64); // direct previous may be null + int64_t delta = factor*(v - pDiffInfo->prev.i64); // direct previous may be null if (delta < 0 && pDiffInfo->ignoreNegative) { colDataSetNull_f(pOutput->nullbitmap, pos); } else { - colDataAppendInt8(pOutput, pos, &delta); + colDataAppendInt64(pOutput, pos, &delta); } pDiffInfo->prev.i64 = v; break; } case TSDB_DATA_TYPE_SMALLINT: { int16_t v = *(int16_t*)pv; - int16_t delta = factor*(v - pDiffInfo->prev.i64); // direct previous may be null + int64_t delta = factor*(v - pDiffInfo->prev.i64); // direct previous may be null if (delta < 0 && pDiffInfo->ignoreNegative) { colDataSetNull_f(pOutput->nullbitmap, pos); } else { - colDataAppendInt16(pOutput, pos, &delta); + colDataAppendInt64(pOutput, pos, &delta); } pDiffInfo->prev.i64 = v; break; @@ -2348,11 +2350,11 @@ static void doHandleDiff(SDiffInfo* pDiffInfo, int32_t type, const char* pv, SCo } case TSDB_DATA_TYPE_FLOAT: { float v = *(float*)pv; - float delta = factor*(v - pDiffInfo->prev.d64); // direct previous may be null + double delta = factor*(v - pDiffInfo->prev.d64); // direct previous may be null if ((delta < 0 && pDiffInfo->ignoreNegative) || isinf(delta) || isnan(delta)) { //check for overflow colDataSetNull_f(pOutput->nullbitmap, pos); } else { - colDataAppendFloat(pOutput, pos, &delta); + colDataAppendDouble(pOutput, pos, &delta); } pDiffInfo->prev.d64 = v; break; diff --git a/source/libs/function/src/functionMgt.c b/source/libs/function/src/functionMgt.c index 506b0eb8da98444491b2f86f0e9951b71193de75..c2b325bc928be50ac908c103bb6a14a907156b39 100644 --- a/source/libs/function/src/functionMgt.c +++ b/source/libs/function/src/functionMgt.c @@ -16,7 +16,6 @@ #include "functionMgt.h" #include "builtins.h" -#include "catalog.h" #include "functionMgtInt.h" #include "taos.h" #include "taoserror.h" @@ -65,35 +64,19 @@ static bool isSpecificClassifyFunc(int32_t funcId, uint64_t classification) { return FUNC_MGT_TEST_MASK(funcMgtBuiltins[funcId].classification, classification); } -static int32_t getUdfInfo(SFmGetFuncInfoParam* pParam, SFunctionNode* pFunc) { - SFuncInfo funcInfo = {0}; - int32_t code = catalogGetUdfInfo(pParam->pCtg, pParam->pRpc, pParam->pMgmtEps, pFunc->functionName, &funcInfo); - if (TSDB_CODE_SUCCESS != code) { - return code; - } - - pFunc->funcType = FUNCTION_TYPE_UDF; - pFunc->funcId = TSDB_FUNC_TYPE_AGGREGATE == funcInfo.funcType ? FUNC_AGGREGATE_UDF_ID : FUNC_SCALAR_UDF_ID; - pFunc->node.resType.type = funcInfo.outputType; - pFunc->node.resType.bytes = funcInfo.outputLen; - pFunc->udfBufSize = funcInfo.bufSize; - tFreeSFuncInfo(&funcInfo); - return TSDB_CODE_SUCCESS; -} - int32_t fmFuncMgtInit() { taosThreadOnce(&functionHashTableInit, doInitFunctionTable); return initFunctionCode; } -int32_t fmGetFuncInfo(SFmGetFuncInfoParam* pParam, SFunctionNode* pFunc) { +int32_t fmGetFuncInfo(SFunctionNode* pFunc, char* pMsg, int32_t msgLen) { void* pVal = taosHashGet(gFunMgtService.pFuncNameHashTable, pFunc->functionName, strlen(pFunc->functionName)); if (NULL != pVal) { pFunc->funcId = *(int32_t*)pVal; pFunc->funcType = funcMgtBuiltins[pFunc->funcId].type; - return funcMgtBuiltins[pFunc->funcId].translateFunc(pFunc, pParam->pErrBuf, pParam->errBufLen); + return funcMgtBuiltins[pFunc->funcId].translateFunc(pFunc, pMsg, msgLen); } - return getUdfInfo(pParam, pFunc); + return TSDB_CODE_FUNC_NOT_BUILTIN_FUNTION; } bool fmIsBuiltinFunc(const char* pFunc) { diff --git a/source/libs/index/inc/indexCache.h b/source/libs/index/inc/indexCache.h index 6cbe2532cc5b7532e011f14f76dea49437087006..1046a04db34062367fb84bef2c6b292da6b147d5 100644 --- a/source/libs/index/inc/indexCache.h +++ b/source/libs/index/inc/indexCache.h @@ -74,7 +74,7 @@ void indexCacheIteratorDestroy(Iterate* iiter); int indexCachePut(void* cache, SIndexTerm* term, uint64_t uid); // int indexCacheGet(void *cache, uint64_t *rst); -int indexCacheSearch(void* cache, SIndexTermQuery* query, SIdxTempResult* tr, STermValueType* s); +int indexCacheSearch(void* cache, SIndexTermQuery* query, SIdxTRslt* tr, STermValueType* s); void indexCacheRef(IndexCache* cache); void indexCacheUnRef(IndexCache* cache); diff --git a/source/libs/index/inc/indexTfile.h b/source/libs/index/inc/indexTfile.h index af32caa8219016cd6562423466d5f8a44eeb0229..ca55aa93da5a47bcefa26bf880d115abeb46b8c8 100644 --- a/source/libs/index/inc/indexTfile.h +++ b/source/libs/index/inc/indexTfile.h @@ -105,7 +105,7 @@ TFileReader* tfileGetReaderByCol(IndexTFile* tf, uint64_t suid, char* colName); TFileReader* tfileReaderOpen(char* path, uint64_t suid, int64_t version, const char* colName); TFileReader* tfileReaderCreate(WriterCtx* ctx); void tfileReaderDestroy(TFileReader* reader); -int tfileReaderSearch(TFileReader* reader, SIndexTermQuery* query, SIdxTempResult* tr); +int tfileReaderSearch(TFileReader* reader, SIndexTermQuery* query, SIdxTRslt* tr); void tfileReaderRef(TFileReader* reader); void tfileReaderUnRef(TFileReader* reader); @@ -120,7 +120,7 @@ int tfileWriterFinish(TFileWriter* tw); IndexTFile* indexTFileCreate(const char* path); void indexTFileDestroy(IndexTFile* tfile); int indexTFilePut(void* tfile, SIndexTerm* term, uint64_t uid); -int indexTFileSearch(void* tfile, SIndexTermQuery* query, SIdxTempResult* tr); +int indexTFileSearch(void* tfile, SIndexTermQuery* query, SIdxTRslt* tr); Iterate* tfileIteratorCreate(TFileReader* reader); void tfileIteratorDestroy(Iterate* iterator); diff --git a/source/libs/index/inc/indexUtil.h b/source/libs/index/inc/indexUtil.h index f1676ed411a5e2074667816d1746dc607dc0f44d..dbaecaa9630b04b8b50f108c1a59e499f04899dc 100644 --- a/source/libs/index/inc/indexUtil.h +++ b/source/libs/index/inc/indexUtil.h @@ -66,7 +66,7 @@ extern "C" { * [1, 4, 5] * output:[4, 5] */ -void iIntersection(SArray *interResults, SArray *finalResult); +void iIntersection(SArray *in, SArray *out); /* multi sorted result union * input: [1, 2, 4, 5] @@ -74,7 +74,7 @@ void iIntersection(SArray *interResults, SArray *finalResult); * [1, 4, 5] * output:[1, 2, 3, 4, 5] */ -void iUnion(SArray *interResults, SArray *finalResult); +void iUnion(SArray *in, SArray *out); /* see example * total: [1, 2, 4, 5, 7, 8] @@ -92,19 +92,24 @@ typedef struct { uint64_t data; } SIdxVerdata; +/* + * index temp result + * + */ typedef struct { SArray *total; - SArray *added; - SArray *deled; -} SIdxTempResult; + SArray *add; + SArray *del; +} SIdxTRslt; + +SIdxTRslt *idxTRsltCreate(); -SIdxTempResult *sIdxTempResultCreate(); +void idxTRsltClear(SIdxTRslt *tr); -void sIdxTempResultClear(SIdxTempResult *tr); +void idxTRsltDestroy(SIdxTRslt *tr); -void sIdxTempResultDestroy(SIdxTempResult *tr); +void idxTRsltMergeTo(SIdxTRslt *tr, SArray *out); -void sIdxTempResultMergeTo(SArray *result, SIdxTempResult *tr); #ifdef __cplusplus } #endif diff --git a/source/libs/index/src/index.c b/source/libs/index/src/index.c index 500f5706491b61e05deea65d567b68ecc8cb1694..8584d95bf26bc2a586e0e5842ab8c4e5b5572bbd 100644 --- a/source/libs/index/src/index.c +++ b/source/libs/index/src/index.c @@ -29,7 +29,7 @@ #include "lucene++/Lucene_c.h" #endif -#define INDEX_NUM_OF_THREADS 1 +#define INDEX_NUM_OF_THREADS 5 #define INDEX_QUEUE_SIZE 200 #define INDEX_DATA_BOOL_NULL 0x02 @@ -85,7 +85,7 @@ static int indexMergeFinalResults(SArray* interResults, EIndexOperatorType oTyp static int indexGenTFile(SIndex* index, IndexCache* cache, SArray* batch); // merge cache and tfile by opera type -static void indexMergeCacheAndTFile(SArray* result, IterateValue* icache, IterateValue* iTfv, SIdxTempResult* helper); +static void indexMergeCacheAndTFile(SArray* result, IterateValue* icache, IterateValue* iTfv, SIdxTRslt* helper); // static int32_t indexSerialTermKey(SIndexTerm* itm, char* buf); // int32_t indexSerialKey(ICacheKey* key, char* buf); @@ -201,6 +201,7 @@ int indexPut(SIndex* index, SIndexMultiTerm* fVals, uint64_t uid) { char buf[128] = {0}; ICacheKey key = {.suid = p->suid, .colName = p->colName, .nColName = strlen(p->colName), .colType = p->colType}; int32_t sz = indexSerialCacheKey(&key, buf); + indexDebug("suid: %" PRIu64 ", colName: %s, colType: %d", key.suid, key.colName, key.colType); IndexCache** cache = taosHashGet(index->colObj, buf, sz); assert(*cache != NULL); @@ -328,6 +329,7 @@ static int indexTermSearch(SIndex* sIdx, SIndexTermQuery* query, SArray** result char buf[128] = {0}; ICacheKey key = { .suid = term->suid, .colName = term->colName, .nColName = strlen(term->colName), .colType = term->colType}; + indexDebug("suid: %" PRIu64 ", colName: %s, colType: %d", key.suid, key.colName, key.colType); int32_t sz = indexSerialCacheKey(&key, buf); taosThreadMutexLock(&sIdx->mtx); @@ -341,7 +343,7 @@ static int indexTermSearch(SIndex* sIdx, SIndexTermQuery* query, SArray** result int64_t st = taosGetTimestampUs(); - SIdxTempResult* tr = sIdxTempResultCreate(); + SIdxTRslt* tr = idxTRsltCreate(); if (0 == indexCacheSearch(cache, query, tr, &s)) { if (s == kTypeDeletion) { indexInfo("col: %s already drop by", term->colName); @@ -363,12 +365,12 @@ static int indexTermSearch(SIndex* sIdx, SIndexTermQuery* query, SArray** result int64_t cost = taosGetTimestampUs() - st; indexInfo("search cost: %" PRIu64 "us", cost); - sIdxTempResultMergeTo(*result, tr); + idxTRsltMergeTo(tr, *result); - sIdxTempResultDestroy(tr); + idxTRsltDestroy(tr); return 0; END: - sIdxTempResultDestroy(tr); + idxTRsltDestroy(tr); return -1; } static void indexInterResultsDestroy(SArray* results) { @@ -404,18 +406,18 @@ static int indexMergeFinalResults(SArray* interResults, EIndexOperatorType oType return 0; } -static void indexMayMergeTempToFinalResult(SArray* result, TFileValue* tfv, SIdxTempResult* tr) { +static void indexMayMergeTempToFinalResult(SArray* result, TFileValue* tfv, SIdxTRslt* tr) { int32_t sz = taosArrayGetSize(result); if (sz > 0) { TFileValue* lv = taosArrayGetP(result, sz - 1); if (tfv != NULL && strcmp(lv->colVal, tfv->colVal) != 0) { - sIdxTempResultMergeTo(lv->tableId, tr); - sIdxTempResultClear(tr); + idxTRsltMergeTo(tr, lv->tableId); + idxTRsltClear(tr); taosArrayPush(result, &tfv); } else if (tfv == NULL) { // handle last iterator - sIdxTempResultMergeTo(lv->tableId, tr); + idxTRsltMergeTo(tr, lv->tableId); } else { // temp result saved in help tfileValueDestroy(tfv); @@ -424,7 +426,7 @@ static void indexMayMergeTempToFinalResult(SArray* result, TFileValue* tfv, SIdx taosArrayPush(result, &tfv); } } -static void indexMergeCacheAndTFile(SArray* result, IterateValue* cv, IterateValue* tv, SIdxTempResult* tr) { +static void indexMergeCacheAndTFile(SArray* result, IterateValue* cv, IterateValue* tv, SIdxTRslt* tr) { char* colVal = (cv != NULL) ? cv->colVal : tv->colVal; TFileValue* tfv = tfileValueCreate(colVal); @@ -434,9 +436,9 @@ static void indexMergeCacheAndTFile(SArray* result, IterateValue* cv, IterateVal uint64_t id = *(uint64_t*)taosArrayGet(cv->val, 0); uint32_t ver = cv->ver; if (cv->type == ADD_VALUE) { - INDEX_MERGE_ADD_DEL(tr->deled, tr->added, id) + INDEX_MERGE_ADD_DEL(tr->del, tr->add, id) } else if (cv->type == DEL_VALUE) { - INDEX_MERGE_ADD_DEL(tr->added, tr->deled, id) + INDEX_MERGE_ADD_DEL(tr->add, tr->del, id) } } if (tv != NULL) { @@ -489,7 +491,7 @@ int indexFlushCacheToTFile(SIndex* sIdx, void* cache) { bool cn = cacheIter ? cacheIter->next(cacheIter) : false; bool tn = tfileIter ? tfileIter->next(tfileIter) : false; - SIdxTempResult* tr = sIdxTempResultCreate(); + SIdxTRslt* tr = idxTRsltCreate(); while (cn == true || tn == true) { IterateValue* cv = (cn == true) ? cacheIter->getValue(cacheIter) : NULL; IterateValue* tv = (tn == true) ? tfileIter->getValue(tfileIter) : NULL; @@ -515,7 +517,7 @@ int indexFlushCacheToTFile(SIndex* sIdx, void* cache) { } } indexMayMergeTempToFinalResult(result, NULL, tr); - sIdxTempResultDestroy(tr); + idxTRsltDestroy(tr); int ret = indexGenTFile(sIdx, pCache, result); indexDestroyFinalResult(result); diff --git a/source/libs/index/src/indexCache.c b/source/libs/index/src/indexCache.c index 6e52c4b1ba03ecd77cc4476022d61d160ae34890..3b33006452989fbe8f69155f30041d6345b1d1e0 100644 --- a/source/libs/index/src/indexCache.c +++ b/source/libs/index/src/indexCache.c @@ -36,32 +36,31 @@ static char* indexCacheTermGet(const void* pData); static MemTable* indexInternalCacheCreate(int8_t type); -static int32_t cacheSearchTerm(void* cache, SIndexTerm* ct, SIdxTempResult* tr, STermValueType* s); -static int32_t cacheSearchPrefix(void* cache, SIndexTerm* ct, SIdxTempResult* tr, STermValueType* s); -static int32_t cacheSearchSuffix(void* cache, SIndexTerm* ct, SIdxTempResult* tr, STermValueType* s); -static int32_t cacheSearchRegex(void* cache, SIndexTerm* ct, SIdxTempResult* tr, STermValueType* s); -static int32_t cacheSearchLessThan(void* cache, SIndexTerm* ct, SIdxTempResult* tr, STermValueType* s); -static int32_t cacheSearchLessEqual(void* cache, SIndexTerm* ct, SIdxTempResult* tr, STermValueType* s); -static int32_t cacheSearchGreaterThan(void* cache, SIndexTerm* ct, SIdxTempResult* tr, STermValueType* s); -static int32_t cacheSearchGreaterEqual(void* cache, SIndexTerm* ct, SIdxTempResult* tr, STermValueType* s); -static int32_t cacheSearchRange(void* cache, SIndexTerm* ct, SIdxTempResult* tr, STermValueType* s); +static int32_t cacheSearchTerm(void* cache, SIndexTerm* ct, SIdxTRslt* tr, STermValueType* s); +static int32_t cacheSearchPrefix(void* cache, SIndexTerm* ct, SIdxTRslt* tr, STermValueType* s); +static int32_t cacheSearchSuffix(void* cache, SIndexTerm* ct, SIdxTRslt* tr, STermValueType* s); +static int32_t cacheSearchRegex(void* cache, SIndexTerm* ct, SIdxTRslt* tr, STermValueType* s); +static int32_t cacheSearchLessThan(void* cache, SIndexTerm* ct, SIdxTRslt* tr, STermValueType* s); +static int32_t cacheSearchLessEqual(void* cache, SIndexTerm* ct, SIdxTRslt* tr, STermValueType* s); +static int32_t cacheSearchGreaterThan(void* cache, SIndexTerm* ct, SIdxTRslt* tr, STermValueType* s); +static int32_t cacheSearchGreaterEqual(void* cache, SIndexTerm* ct, SIdxTRslt* tr, STermValueType* s); +static int32_t cacheSearchRange(void* cache, SIndexTerm* ct, SIdxTRslt* tr, STermValueType* s); /*comm func of compare, used in (LE/LT/GE/GT compare)*/ -static int32_t cacheSearchCompareFunc(void* cache, SIndexTerm* ct, SIdxTempResult* tr, STermValueType* s, - RangeType type); -static int32_t cacheSearchTerm_JSON(void* cache, SIndexTerm* ct, SIdxTempResult* tr, STermValueType* s); -static int32_t cacheSearchPrefix_JSON(void* cache, SIndexTerm* ct, SIdxTempResult* tr, STermValueType* s); -static int32_t cacheSearchSuffix_JSON(void* cache, SIndexTerm* ct, SIdxTempResult* tr, STermValueType* s); -static int32_t cacheSearchRegex_JSON(void* cache, SIndexTerm* ct, SIdxTempResult* tr, STermValueType* s); -static int32_t cacheSearchLessThan_JSON(void* cache, SIndexTerm* ct, SIdxTempResult* tr, STermValueType* s); -static int32_t cacheSearchLessEqual_JSON(void* cache, SIndexTerm* ct, SIdxTempResult* tr, STermValueType* s); -static int32_t cacheSearchGreaterThan_JSON(void* cache, SIndexTerm* ct, SIdxTempResult* tr, STermValueType* s); -static int32_t cacheSearchGreaterEqual_JSON(void* cache, SIndexTerm* ct, SIdxTempResult* tr, STermValueType* s); -static int32_t cacheSearchRange_JSON(void* cache, SIndexTerm* ct, SIdxTempResult* tr, STermValueType* s); - -static int32_t cacheSearchCompareFunc_JSON(void* cache, SIndexTerm* term, SIdxTempResult* tr, STermValueType* s, +static int32_t cacheSearchCompareFunc(void* cache, SIndexTerm* ct, SIdxTRslt* tr, STermValueType* s, RangeType type); +static int32_t cacheSearchTerm_JSON(void* cache, SIndexTerm* ct, SIdxTRslt* tr, STermValueType* s); +static int32_t cacheSearchPrefix_JSON(void* cache, SIndexTerm* ct, SIdxTRslt* tr, STermValueType* s); +static int32_t cacheSearchSuffix_JSON(void* cache, SIndexTerm* ct, SIdxTRslt* tr, STermValueType* s); +static int32_t cacheSearchRegex_JSON(void* cache, SIndexTerm* ct, SIdxTRslt* tr, STermValueType* s); +static int32_t cacheSearchLessThan_JSON(void* cache, SIndexTerm* ct, SIdxTRslt* tr, STermValueType* s); +static int32_t cacheSearchLessEqual_JSON(void* cache, SIndexTerm* ct, SIdxTRslt* tr, STermValueType* s); +static int32_t cacheSearchGreaterThan_JSON(void* cache, SIndexTerm* ct, SIdxTRslt* tr, STermValueType* s); +static int32_t cacheSearchGreaterEqual_JSON(void* cache, SIndexTerm* ct, SIdxTRslt* tr, STermValueType* s); +static int32_t cacheSearchRange_JSON(void* cache, SIndexTerm* ct, SIdxTRslt* tr, STermValueType* s); + +static int32_t cacheSearchCompareFunc_JSON(void* cache, SIndexTerm* term, SIdxTRslt* tr, STermValueType* s, RangeType type); -static int32_t (*cacheSearch[][QUERY_MAX])(void* cache, SIndexTerm* ct, SIdxTempResult* tr, STermValueType* s) = { +static int32_t (*cacheSearch[][QUERY_MAX])(void* cache, SIndexTerm* ct, SIdxTRslt* tr, STermValueType* s) = { {cacheSearchTerm, cacheSearchPrefix, cacheSearchSuffix, cacheSearchRegex, cacheSearchLessThan, cacheSearchLessEqual, cacheSearchGreaterThan, cacheSearchGreaterEqual, cacheSearchRange}, {cacheSearchTerm_JSON, cacheSearchPrefix_JSON, cacheSearchSuffix_JSON, cacheSearchRegex_JSON, @@ -71,7 +70,7 @@ static int32_t (*cacheSearch[][QUERY_MAX])(void* cache, SIndexTerm* ct, SIdxTemp static void doMergeWork(SSchedMsg* msg); static bool indexCacheIteratorNext(Iterate* itera); -static int32_t cacheSearchTerm(void* cache, SIndexTerm* term, SIdxTempResult* tr, STermValueType* s) { +static int32_t cacheSearchTerm(void* cache, SIndexTerm* term, SIdxTRslt* tr, STermValueType* s) { if (cache == NULL) { return 0; } @@ -93,11 +92,11 @@ static int32_t cacheSearchTerm(void* cache, SIndexTerm* term, SIdxTempResult* tr CacheTerm* c = (CacheTerm*)SL_GET_NODE_DATA(node); if (0 == strcmp(c->colVal, pCt->colVal)) { if (c->operaType == ADD_VALUE) { - INDEX_MERGE_ADD_DEL(tr->deled, tr->added, c->uid) + INDEX_MERGE_ADD_DEL(tr->del, tr->add, c->uid) // taosArrayPush(result, &c->uid); *s = kTypeValue; } else if (c->operaType == DEL_VALUE) { - INDEX_MERGE_ADD_DEL(tr->added, tr->deled, c->uid) + INDEX_MERGE_ADD_DEL(tr->add, tr->del, c->uid) } } else { break; @@ -108,20 +107,19 @@ static int32_t cacheSearchTerm(void* cache, SIndexTerm* term, SIdxTempResult* tr tSkipListDestroyIter(iter); return 0; } -static int32_t cacheSearchPrefix(void* cache, SIndexTerm* term, SIdxTempResult* tr, STermValueType* s) { +static int32_t cacheSearchPrefix(void* cache, SIndexTerm* term, SIdxTRslt* tr, STermValueType* s) { // impl later return 0; } -static int32_t cacheSearchSuffix(void* cache, SIndexTerm* term, SIdxTempResult* tr, STermValueType* s) { +static int32_t cacheSearchSuffix(void* cache, SIndexTerm* term, SIdxTRslt* tr, STermValueType* s) { // impl later return 0; } -static int32_t cacheSearchRegex(void* cache, SIndexTerm* term, SIdxTempResult* tr, STermValueType* s) { +static int32_t cacheSearchRegex(void* cache, SIndexTerm* term, SIdxTRslt* tr, STermValueType* s) { // impl later return 0; } -static int32_t cacheSearchCompareFunc(void* cache, SIndexTerm* term, SIdxTempResult* tr, STermValueType* s, - RangeType type) { +static int32_t cacheSearchCompareFunc(void* cache, SIndexTerm* term, SIdxTRslt* tr, STermValueType* s, RangeType type) { if (cache == NULL) { return 0; } @@ -133,6 +131,7 @@ static int32_t cacheSearchCompareFunc(void* cache, SIndexTerm* term, SIdxTempRes CacheTerm* pCt = taosMemoryCalloc(1, sizeof(CacheTerm)); pCt->colVal = term->colVal; + pCt->colType = term->colType; pCt->version = atomic_load_64(&pCache->version); char* key = indexCacheTermGet(pCt); @@ -147,11 +146,11 @@ static int32_t cacheSearchCompareFunc(void* cache, SIndexTerm* term, SIdxTempRes TExeCond cond = cmpFn(c->colVal, pCt->colVal, pCt->colType); if (cond == MATCH) { if (c->operaType == ADD_VALUE) { - INDEX_MERGE_ADD_DEL(tr->deled, tr->added, c->uid) + INDEX_MERGE_ADD_DEL(tr->del, tr->add, c->uid) // taosArrayPush(result, &c->uid); *s = kTypeValue; } else if (c->operaType == DEL_VALUE) { - INDEX_MERGE_ADD_DEL(tr->added, tr->deled, c->uid) + INDEX_MERGE_ADD_DEL(tr->add, tr->del, c->uid) } } else if (cond == CONTINUE) { continue; @@ -163,20 +162,20 @@ static int32_t cacheSearchCompareFunc(void* cache, SIndexTerm* term, SIdxTempRes tSkipListDestroyIter(iter); return TSDB_CODE_SUCCESS; } -static int32_t cacheSearchLessThan(void* cache, SIndexTerm* term, SIdxTempResult* tr, STermValueType* s) { +static int32_t cacheSearchLessThan(void* cache, SIndexTerm* term, SIdxTRslt* tr, STermValueType* s) { return cacheSearchCompareFunc(cache, term, tr, s, LT); } -static int32_t cacheSearchLessEqual(void* cache, SIndexTerm* term, SIdxTempResult* tr, STermValueType* s) { +static int32_t cacheSearchLessEqual(void* cache, SIndexTerm* term, SIdxTRslt* tr, STermValueType* s) { return cacheSearchCompareFunc(cache, term, tr, s, LE); } -static int32_t cacheSearchGreaterThan(void* cache, SIndexTerm* term, SIdxTempResult* tr, STermValueType* s) { +static int32_t cacheSearchGreaterThan(void* cache, SIndexTerm* term, SIdxTRslt* tr, STermValueType* s) { return cacheSearchCompareFunc(cache, term, tr, s, GT); } -static int32_t cacheSearchGreaterEqual(void* cache, SIndexTerm* term, SIdxTempResult* tr, STermValueType* s) { +static int32_t cacheSearchGreaterEqual(void* cache, SIndexTerm* term, SIdxTRslt* tr, STermValueType* s) { return cacheSearchCompareFunc(cache, term, tr, s, GE); } -static int32_t cacheSearchTerm_JSON(void* cache, SIndexTerm* term, SIdxTempResult* tr, STermValueType* s) { +static int32_t cacheSearchTerm_JSON(void* cache, SIndexTerm* term, SIdxTRslt* tr, STermValueType* s) { if (cache == NULL) { return 0; } @@ -204,11 +203,11 @@ static int32_t cacheSearchTerm_JSON(void* cache, SIndexTerm* term, SIdxTempResul if (0 == strcmp(c->colVal, pCt->colVal)) { if (c->operaType == ADD_VALUE) { - INDEX_MERGE_ADD_DEL(tr->deled, tr->added, c->uid) + INDEX_MERGE_ADD_DEL(tr->del, tr->add, c->uid) // taosArrayPush(result, &c->uid); *s = kTypeValue; } else if (c->operaType == DEL_VALUE) { - INDEX_MERGE_ADD_DEL(tr->added, tr->deled, c->uid) + INDEX_MERGE_ADD_DEL(tr->add, tr->del, c->uid) } } else { break; @@ -222,32 +221,32 @@ static int32_t cacheSearchTerm_JSON(void* cache, SIndexTerm* term, SIdxTempResul return TSDB_CODE_SUCCESS; } -static int32_t cacheSearchPrefix_JSON(void* cache, SIndexTerm* term, SIdxTempResult* tr, STermValueType* s) { +static int32_t cacheSearchPrefix_JSON(void* cache, SIndexTerm* term, SIdxTRslt* tr, STermValueType* s) { return TSDB_CODE_SUCCESS; } -static int32_t cacheSearchSuffix_JSON(void* cache, SIndexTerm* term, SIdxTempResult* tr, STermValueType* s) { +static int32_t cacheSearchSuffix_JSON(void* cache, SIndexTerm* term, SIdxTRslt* tr, STermValueType* s) { return TSDB_CODE_SUCCESS; } -static int32_t cacheSearchRegex_JSON(void* cache, SIndexTerm* term, SIdxTempResult* tr, STermValueType* s) { +static int32_t cacheSearchRegex_JSON(void* cache, SIndexTerm* term, SIdxTRslt* tr, STermValueType* s) { return TSDB_CODE_SUCCESS; } -static int32_t cacheSearchLessThan_JSON(void* cache, SIndexTerm* term, SIdxTempResult* tr, STermValueType* s) { +static int32_t cacheSearchLessThan_JSON(void* cache, SIndexTerm* term, SIdxTRslt* tr, STermValueType* s) { return cacheSearchCompareFunc_JSON(cache, term, tr, s, LT); } -static int32_t cacheSearchLessEqual_JSON(void* cache, SIndexTerm* term, SIdxTempResult* tr, STermValueType* s) { +static int32_t cacheSearchLessEqual_JSON(void* cache, SIndexTerm* term, SIdxTRslt* tr, STermValueType* s) { return cacheSearchCompareFunc_JSON(cache, term, tr, s, LE); } -static int32_t cacheSearchGreaterThan_JSON(void* cache, SIndexTerm* term, SIdxTempResult* tr, STermValueType* s) { +static int32_t cacheSearchGreaterThan_JSON(void* cache, SIndexTerm* term, SIdxTRslt* tr, STermValueType* s) { return cacheSearchCompareFunc_JSON(cache, term, tr, s, GT); } -static int32_t cacheSearchGreaterEqual_JSON(void* cache, SIndexTerm* term, SIdxTempResult* tr, STermValueType* s) { +static int32_t cacheSearchGreaterEqual_JSON(void* cache, SIndexTerm* term, SIdxTRslt* tr, STermValueType* s) { return cacheSearchCompareFunc_JSON(cache, term, tr, s, GE); } -static int32_t cacheSearchRange_JSON(void* cache, SIndexTerm* term, SIdxTempResult* tr, STermValueType* s) { +static int32_t cacheSearchRange_JSON(void* cache, SIndexTerm* term, SIdxTRslt* tr, STermValueType* s) { return TSDB_CODE_SUCCESS; } -static int32_t cacheSearchCompareFunc_JSON(void* cache, SIndexTerm* term, SIdxTempResult* tr, STermValueType* s, +static int32_t cacheSearchCompareFunc_JSON(void* cache, SIndexTerm* term, SIdxTRslt* tr, STermValueType* s, RangeType type) { if (cache == NULL) { return 0; @@ -289,11 +288,11 @@ static int32_t cacheSearchCompareFunc_JSON(void* cache, SIndexTerm* term, SIdxTe TExeCond cond = cmpFn(p + skip, term->colVal, dType); if (cond == MATCH) { if (c->operaType == ADD_VALUE) { - INDEX_MERGE_ADD_DEL(tr->deled, tr->added, c->uid) + INDEX_MERGE_ADD_DEL(tr->del, tr->add, c->uid) // taosArrayPush(result, &c->uid); *s = kTypeValue; } else if (c->operaType == DEL_VALUE) { - INDEX_MERGE_ADD_DEL(tr->added, tr->deled, c->uid) + INDEX_MERGE_ADD_DEL(tr->add, tr->del, c->uid) } } else if (cond == CONTINUE) { continue; @@ -309,7 +308,7 @@ static int32_t cacheSearchCompareFunc_JSON(void* cache, SIndexTerm* term, SIdxTe return TSDB_CODE_SUCCESS; } -static int32_t cacheSearchRange(void* cache, SIndexTerm* term, SIdxTempResult* tr, STermValueType* s) { +static int32_t cacheSearchRange(void* cache, SIndexTerm* term, SIdxTRslt* tr, STermValueType* s) { // impl later return 0; } @@ -568,7 +567,7 @@ int indexCacheDel(void* cache, const char* fieldValue, int32_t fvlen, uint64_t u return 0; } -static int32_t indexQueryMem(MemTable* mem, SIndexTermQuery* query, SIdxTempResult* tr, STermValueType* s) { +static int32_t indexQueryMem(MemTable* mem, SIndexTermQuery* query, SIdxTRslt* tr, STermValueType* s) { if (mem == NULL) { return 0; } @@ -582,7 +581,7 @@ static int32_t indexQueryMem(MemTable* mem, SIndexTermQuery* query, SIdxTempResu return cacheSearch[0][qtype](mem, term, tr, s); } } -int indexCacheSearch(void* cache, SIndexTermQuery* query, SIdxTempResult* result, STermValueType* s) { +int indexCacheSearch(void* cache, SIndexTermQuery* query, SIdxTRslt* result, STermValueType* s) { int64_t st = taosGetTimestampUs(); if (cache == NULL) { return 0; @@ -597,10 +596,10 @@ int indexCacheSearch(void* cache, SIndexTermQuery* query, SIdxTempResult* result indexMemRef(imm); taosThreadMutexUnlock(&pCache->mtx); - int ret = indexQueryMem(mem, query, result, s); + int ret = (mem && mem->mem) ? indexQueryMem(mem, query, result, s) : 0; if (ret == 0 && *s != kTypeDeletion) { // continue search in imm - ret = indexQueryMem(imm, query, result, s); + ret = (imm && imm->mem) ? indexQueryMem(imm, query, result, s) : 0; } indexMemUnRef(mem); @@ -709,7 +708,7 @@ static int32_t indexCacheJsonTermCompare(const void* l, const void* r) { return cmp; } static MemTable* indexInternalCacheCreate(int8_t type) { - int ttype = INDEX_TYPE_CONTAIN_EXTERN_TYPE(type, TSDB_DATA_TYPE_JSON) ? TSDB_DATA_TYPE_BINARY : type; + int ttype = INDEX_TYPE_CONTAIN_EXTERN_TYPE(type, TSDB_DATA_TYPE_JSON) ? TSDB_DATA_TYPE_BINARY : TSDB_DATA_TYPE_BINARY; int32_t (*cmpFn)(const void* l, const void* r) = INDEX_TYPE_CONTAIN_EXTERN_TYPE(type, TSDB_DATA_TYPE_JSON) ? indexCacheJsonTermCompare : indexCacheTermCompare; diff --git a/source/libs/index/src/indexFilter.c b/source/libs/index/src/indexFilter.c index 0273867ccf040f3d3344066270ef3b8aa6a3bae2..b882caa168a3b89dcd037ee34eefa2f8b82bd904 100644 --- a/source/libs/index/src/indexFilter.c +++ b/source/libs/index/src/indexFilter.c @@ -37,12 +37,15 @@ typedef struct SIFParam { int64_t suid; // add later char dbName[TSDB_DB_NAME_LEN]; char colName[TSDB_COL_NAME_LEN]; + + SIndexMetaArg arg; } SIFParam; typedef struct SIFCtx { - int32_t code; - SHashObj *pRes; /* element is SIFParam */ - bool noExec; // true: just iterate condition tree, and add hint to executor plan + int32_t code; + SHashObj * pRes; /* element is SIFParam */ + bool noExec; // true: just iterate condition tree, and add hint to executor plan + SIndexMetaArg arg; // SIdxFltStatus st; } SIFCtx; @@ -257,7 +260,9 @@ static int32_t sifExecFunction(SFunctionNode *node, SIFCtx *ctx, SIFParam *outpu return TSDB_CODE_QRY_INVALID_INPUT; } static int32_t sifDoIndex(SIFParam *left, SIFParam *right, int8_t operType, SIFParam *output) { - SIndexTerm *tm = indexTermCreate(left->suid, DEFAULT, left->colValType, left->colName, strlen(left->colName), +#ifdef USE_INVERTED_INDEX + SIndexMetaArg *arg = &output->arg; + SIndexTerm * tm = indexTermCreate(arg->suid, DEFAULT, left->colValType, left->colName, strlen(left->colName), right->condValue, strlen(right->condValue)); if (tm == NULL) { return TSDB_CODE_QRY_OUT_OF_MEMORY; @@ -268,9 +273,13 @@ static int32_t sifDoIndex(SIFParam *left, SIFParam *right, int8_t operType, SIFP SIndexMultiTermQuery *mtm = indexMultiTermQueryCreate(MUST); indexMultiTermQueryAdd(mtm, tm, qtype); - int ret = indexSearch(NULL, mtm, output->result); + int ret = indexSearch(arg->metaHandle, mtm, output->result); + indexDebug("index filter data size: %d", (int)taosArrayGetSize(output->result)); indexMultiTermQueryDestroy(mtm); return ret; +#else + return 0; +#endif } static int32_t sifLessThanFunc(SIFParam *left, SIFParam *right, SIFParam *output) { @@ -372,6 +381,8 @@ static int32_t sifExecOper(SOperatorNode *node, SIFCtx *ctx, SIFParam *output) { SIFParam *params = NULL; SIF_ERR_RET(sifInitOperParams(¶ms, node, ctx)); + // ugly code, refactor later + output->arg = ctx->arg; sif_func_t operFn = sifGetOperFn(node->opType); if (ctx->noExec && operFn == NULL) { output->status = SFLT_NOT_INDEX; @@ -423,7 +434,7 @@ _return: static EDealRes sifWalkFunction(SNode *pNode, void *context) { SFunctionNode *node = (SFunctionNode *)pNode; - SIFParam output = {0}; + SIFParam output = {.result = taosArrayInit(8, sizeof(uint64_t))}; SIFCtx *ctx = context; ctx->code = sifExecFunction(node, ctx, &output); @@ -439,7 +450,8 @@ static EDealRes sifWalkFunction(SNode *pNode, void *context) { } static EDealRes sifWalkLogic(SNode *pNode, void *context) { SLogicConditionNode *node = (SLogicConditionNode *)pNode; - SIFParam output = {0}; + + SIFParam output = {.result = taosArrayInit(8, sizeof(uint64_t))}; SIFCtx *ctx = context; ctx->code = sifExecLogic(node, ctx, &output); @@ -455,7 +467,7 @@ static EDealRes sifWalkLogic(SNode *pNode, void *context) { } static EDealRes sifWalkOper(SNode *pNode, void *context) { SOperatorNode *node = (SOperatorNode *)pNode; - SIFParam output = {0}; + SIFParam output = {.result = taosArrayInit(8, sizeof(uint64_t))}; SIFCtx *ctx = context; ctx->code = sifExecOper(node, ctx, &output); @@ -507,8 +519,9 @@ static int32_t sifCalculate(SNode *pNode, SIFParam *pDst) { return TSDB_CODE_QRY_INVALID_INPUT; } int32_t code = 0; - SIFCtx ctx = {.code = 0, .noExec = false}; + SIFCtx ctx = {.code = 0, .noExec = false, .arg = pDst->arg}; ctx.pRes = taosHashInit(4, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT), false, HASH_NO_LOCK); + if (NULL == ctx.pRes) { indexError("index-filter failed to taosHashInit"); return TSDB_CODE_QRY_OUT_OF_MEMORY; @@ -523,7 +536,9 @@ static int32_t sifCalculate(SNode *pNode, SIFParam *pDst) { indexError("no valid res in hash, node:(%p), type(%d)", (void *)&pNode, nodeType(pNode)); SIF_ERR_RET(TSDB_CODE_QRY_APP_ERROR); } - taosArrayAddAll(pDst->result, res->result); + if (res->result != NULL) { + taosArrayAddAll(pDst->result, res->result); + } sifFreeParam(res); taosHashRemove(ctx.pRes, (void *)&pNode, POINTER_BYTES); @@ -561,7 +576,7 @@ static int32_t sifGetFltHint(SNode *pNode, SIdxFltStatus *status) { SIF_RET(code); } -int32_t doFilterTag(const SNode *pFilterNode, SArray *result) { +int32_t doFilterTag(const SNode *pFilterNode, SIndexMetaArg *metaArg, SArray *result) { if (pFilterNode == NULL) { return TSDB_CODE_SUCCESS; } @@ -570,10 +585,12 @@ int32_t doFilterTag(const SNode *pFilterNode, SArray *result) { // todo move to the initialization function // SIF_ERR_RET(filterInitFromNode((SNode *)pFilterNode, &filter, 0)); - SIFParam param = {0}; + SArray * output = taosArrayInit(8, sizeof(uint64_t)); + SIFParam param = {.arg = *metaArg, .result = output}; SIF_ERR_RET(sifCalculate((SNode *)pFilterNode, ¶m)); taosArrayAddAll(result, param.result); + // taosArrayAddAll(result, param.result); sifFreeParam(¶m); SIF_RET(TSDB_CODE_SUCCESS); } diff --git a/source/libs/index/src/indexTfile.c b/source/libs/index/src/indexTfile.c index 3de556e8b50c27f11687ea6b45fcf5da9675fed3..53dd2923ac8c1f07b62098a3663c030016b46a72 100644 --- a/source/libs/index/src/indexTfile.c +++ b/source/libs/index/src/indexTfile.c @@ -60,31 +60,31 @@ static void tfileGenFileFullName(char* fullname, const char* path, uint64_t s /* * search from tfile */ -static int32_t tfSearchTerm(void* reader, SIndexTerm* tem, SIdxTempResult* tr); -static int32_t tfSearchPrefix(void* reader, SIndexTerm* tem, SIdxTempResult* tr); -static int32_t tfSearchSuffix(void* reader, SIndexTerm* tem, SIdxTempResult* tr); -static int32_t tfSearchRegex(void* reader, SIndexTerm* tem, SIdxTempResult* tr); -static int32_t tfSearchLessThan(void* reader, SIndexTerm* tem, SIdxTempResult* tr); -static int32_t tfSearchLessEqual(void* reader, SIndexTerm* tem, SIdxTempResult* tr); -static int32_t tfSearchGreaterThan(void* reader, SIndexTerm* tem, SIdxTempResult* tr); -static int32_t tfSearchGreaterEqual(void* reader, SIndexTerm* tem, SIdxTempResult* tr); -static int32_t tfSearchRange(void* reader, SIndexTerm* tem, SIdxTempResult* tr); - -static int32_t tfSearchCompareFunc(void* reader, SIndexTerm* tem, SIdxTempResult* tr, RangeType ctype); - -static int32_t tfSearchTerm_JSON(void* reader, SIndexTerm* tem, SIdxTempResult* tr); -static int32_t tfSearchPrefix_JSON(void* reader, SIndexTerm* tem, SIdxTempResult* tr); -static int32_t tfSearchSuffix_JSON(void* reader, SIndexTerm* tem, SIdxTempResult* tr); -static int32_t tfSearchRegex_JSON(void* reader, SIndexTerm* tem, SIdxTempResult* tr); -static int32_t tfSearchLessThan_JSON(void* reader, SIndexTerm* tem, SIdxTempResult* tr); -static int32_t tfSearchLessEqual_JSON(void* reader, SIndexTerm* tem, SIdxTempResult* tr); -static int32_t tfSearchGreaterThan_JSON(void* reader, SIndexTerm* tem, SIdxTempResult* tr); -static int32_t tfSearchGreaterEqual_JSON(void* reader, SIndexTerm* tem, SIdxTempResult* tr); -static int32_t tfSearchRange_JSON(void* reader, SIndexTerm* tem, SIdxTempResult* tr); - -static int32_t tfSearchCompareFunc_JSON(void* reader, SIndexTerm* tem, SIdxTempResult* tr, RangeType ctype); - -static int32_t (*tfSearch[][QUERY_MAX])(void* reader, SIndexTerm* tem, SIdxTempResult* tr) = { +static int32_t tfSearchTerm(void* reader, SIndexTerm* tem, SIdxTRslt* tr); +static int32_t tfSearchPrefix(void* reader, SIndexTerm* tem, SIdxTRslt* tr); +static int32_t tfSearchSuffix(void* reader, SIndexTerm* tem, SIdxTRslt* tr); +static int32_t tfSearchRegex(void* reader, SIndexTerm* tem, SIdxTRslt* tr); +static int32_t tfSearchLessThan(void* reader, SIndexTerm* tem, SIdxTRslt* tr); +static int32_t tfSearchLessEqual(void* reader, SIndexTerm* tem, SIdxTRslt* tr); +static int32_t tfSearchGreaterThan(void* reader, SIndexTerm* tem, SIdxTRslt* tr); +static int32_t tfSearchGreaterEqual(void* reader, SIndexTerm* tem, SIdxTRslt* tr); +static int32_t tfSearchRange(void* reader, SIndexTerm* tem, SIdxTRslt* tr); + +static int32_t tfSearchCompareFunc(void* reader, SIndexTerm* tem, SIdxTRslt* tr, RangeType ctype); + +static int32_t tfSearchTerm_JSON(void* reader, SIndexTerm* tem, SIdxTRslt* tr); +static int32_t tfSearchPrefix_JSON(void* reader, SIndexTerm* tem, SIdxTRslt* tr); +static int32_t tfSearchSuffix_JSON(void* reader, SIndexTerm* tem, SIdxTRslt* tr); +static int32_t tfSearchRegex_JSON(void* reader, SIndexTerm* tem, SIdxTRslt* tr); +static int32_t tfSearchLessThan_JSON(void* reader, SIndexTerm* tem, SIdxTRslt* tr); +static int32_t tfSearchLessEqual_JSON(void* reader, SIndexTerm* tem, SIdxTRslt* tr); +static int32_t tfSearchGreaterThan_JSON(void* reader, SIndexTerm* tem, SIdxTRslt* tr); +static int32_t tfSearchGreaterEqual_JSON(void* reader, SIndexTerm* tem, SIdxTRslt* tr); +static int32_t tfSearchRange_JSON(void* reader, SIndexTerm* tem, SIdxTRslt* tr); + +static int32_t tfSearchCompareFunc_JSON(void* reader, SIndexTerm* tem, SIdxTRslt* tr, RangeType ctype); + +static int32_t (*tfSearch[][QUERY_MAX])(void* reader, SIndexTerm* tem, SIdxTRslt* tr) = { {tfSearchTerm, tfSearchPrefix, tfSearchSuffix, tfSearchRegex, tfSearchLessThan, tfSearchLessEqual, tfSearchGreaterThan, tfSearchGreaterEqual, tfSearchRange}, {tfSearchTerm_JSON, tfSearchPrefix_JSON, tfSearchSuffix_JSON, tfSearchRegex_JSON, tfSearchLessThan_JSON, @@ -211,16 +211,16 @@ void tfileReaderDestroy(TFileReader* reader) { } // T_REF_INC(reader); fstDestroy(reader->fst); - writerCtxDestroy(reader->ctx, reader->remove); if (reader->remove) { indexInfo("%s is removed", reader->ctx->file.buf); } else { indexInfo("%s is not removed", reader->ctx->file.buf); } + writerCtxDestroy(reader->ctx, reader->remove); taosMemoryFree(reader); } -static int32_t tfSearchTerm(void* reader, SIndexTerm* tem, SIdxTempResult* tr) { +static int32_t tfSearchTerm(void* reader, SIndexTerm* tem, SIdxTRslt* tr) { int ret = 0; char* p = tem->colVal; uint64_t sz = tem->nColVal; @@ -243,7 +243,7 @@ static int32_t tfSearchTerm(void* reader, SIndexTerm* tem, SIdxTempResult* tr) { return 0; } -static int32_t tfSearchPrefix(void* reader, SIndexTerm* tem, SIdxTempResult* tr) { +static int32_t tfSearchPrefix(void* reader, SIndexTerm* tem, SIdxTRslt* tr) { bool hasJson = INDEX_TYPE_CONTAIN_EXTERN_TYPE(tem->colType, TSDB_DATA_TYPE_JSON); char* p = tem->colVal; uint64_t sz = tem->nColVal; @@ -279,7 +279,7 @@ static int32_t tfSearchPrefix(void* reader, SIndexTerm* tem, SIdxTempResult* tr) } return 0; } -static int32_t tfSearchSuffix(void* reader, SIndexTerm* tem, SIdxTempResult* tr) { +static int32_t tfSearchSuffix(void* reader, SIndexTerm* tem, SIdxTRslt* tr) { bool hasJson = INDEX_TYPE_CONTAIN_EXTERN_TYPE(tem->colType, TSDB_DATA_TYPE_JSON); int ret = 0; @@ -298,7 +298,7 @@ static int32_t tfSearchSuffix(void* reader, SIndexTerm* tem, SIdxTempResult* tr) fstSliceDestroy(&key); return 0; } -static int32_t tfSearchRegex(void* reader, SIndexTerm* tem, SIdxTempResult* tr) { +static int32_t tfSearchRegex(void* reader, SIndexTerm* tem, SIdxTRslt* tr) { bool hasJson = INDEX_TYPE_CONTAIN_EXTERN_TYPE(tem->colType, TSDB_DATA_TYPE_JSON); int ret = 0; @@ -319,7 +319,7 @@ static int32_t tfSearchRegex(void* reader, SIndexTerm* tem, SIdxTempResult* tr) return 0; } -static int32_t tfSearchCompareFunc(void* reader, SIndexTerm* tem, SIdxTempResult* tr, RangeType type) { +static int32_t tfSearchCompareFunc(void* reader, SIndexTerm* tem, SIdxTRslt* tr, RangeType type) { int ret = 0; char* p = tem->colVal; int skip = 0; @@ -358,19 +358,19 @@ static int32_t tfSearchCompareFunc(void* reader, SIndexTerm* tem, SIdxTempResult fstStreamBuilderDestroy(sb); return TSDB_CODE_SUCCESS; } -static int32_t tfSearchLessThan(void* reader, SIndexTerm* tem, SIdxTempResult* tr) { +static int32_t tfSearchLessThan(void* reader, SIndexTerm* tem, SIdxTRslt* tr) { return tfSearchCompareFunc(reader, tem, tr, LT); } -static int32_t tfSearchLessEqual(void* reader, SIndexTerm* tem, SIdxTempResult* tr) { +static int32_t tfSearchLessEqual(void* reader, SIndexTerm* tem, SIdxTRslt* tr) { return tfSearchCompareFunc(reader, tem, tr, LE); } -static int32_t tfSearchGreaterThan(void* reader, SIndexTerm* tem, SIdxTempResult* tr) { +static int32_t tfSearchGreaterThan(void* reader, SIndexTerm* tem, SIdxTRslt* tr) { return tfSearchCompareFunc(reader, tem, tr, GT); } -static int32_t tfSearchGreaterEqual(void* reader, SIndexTerm* tem, SIdxTempResult* tr) { +static int32_t tfSearchGreaterEqual(void* reader, SIndexTerm* tem, SIdxTRslt* tr) { return tfSearchCompareFunc(reader, tem, tr, GE); } -static int32_t tfSearchRange(void* reader, SIndexTerm* tem, SIdxTempResult* tr) { +static int32_t tfSearchRange(void* reader, SIndexTerm* tem, SIdxTRslt* tr) { bool hasJson = INDEX_TYPE_CONTAIN_EXTERN_TYPE(tem->colType, TSDB_DATA_TYPE_JSON); int ret = 0; char* p = tem->colVal; @@ -399,7 +399,7 @@ static int32_t tfSearchRange(void* reader, SIndexTerm* tem, SIdxTempResult* tr) fstSliceDestroy(&key); return 0; } -static int32_t tfSearchTerm_JSON(void* reader, SIndexTerm* tem, SIdxTempResult* tr) { +static int32_t tfSearchTerm_JSON(void* reader, SIndexTerm* tem, SIdxTRslt* tr) { int ret = 0; char* p = indexPackJsonData(tem); int sz = strlen(p); @@ -424,36 +424,36 @@ static int32_t tfSearchTerm_JSON(void* reader, SIndexTerm* tem, SIdxTempResult* // deprecate api return TSDB_CODE_SUCCESS; } -static int32_t tfSearchPrefix_JSON(void* reader, SIndexTerm* tem, SIdxTempResult* tr) { +static int32_t tfSearchPrefix_JSON(void* reader, SIndexTerm* tem, SIdxTRslt* tr) { // impl later return TSDB_CODE_SUCCESS; } -static int32_t tfSearchSuffix_JSON(void* reader, SIndexTerm* tem, SIdxTempResult* tr) { +static int32_t tfSearchSuffix_JSON(void* reader, SIndexTerm* tem, SIdxTRslt* tr) { // impl later return TSDB_CODE_SUCCESS; } -static int32_t tfSearchRegex_JSON(void* reader, SIndexTerm* tem, SIdxTempResult* tr) { +static int32_t tfSearchRegex_JSON(void* reader, SIndexTerm* tem, SIdxTRslt* tr) { // impl later return TSDB_CODE_SUCCESS; } -static int32_t tfSearchLessThan_JSON(void* reader, SIndexTerm* tem, SIdxTempResult* tr) { +static int32_t tfSearchLessThan_JSON(void* reader, SIndexTerm* tem, SIdxTRslt* tr) { return tfSearchCompareFunc_JSON(reader, tem, tr, LT); } -static int32_t tfSearchLessEqual_JSON(void* reader, SIndexTerm* tem, SIdxTempResult* tr) { +static int32_t tfSearchLessEqual_JSON(void* reader, SIndexTerm* tem, SIdxTRslt* tr) { return tfSearchCompareFunc_JSON(reader, tem, tr, LE); } -static int32_t tfSearchGreaterThan_JSON(void* reader, SIndexTerm* tem, SIdxTempResult* tr) { +static int32_t tfSearchGreaterThan_JSON(void* reader, SIndexTerm* tem, SIdxTRslt* tr) { return tfSearchCompareFunc_JSON(reader, tem, tr, GT); } -static int32_t tfSearchGreaterEqual_JSON(void* reader, SIndexTerm* tem, SIdxTempResult* tr) { +static int32_t tfSearchGreaterEqual_JSON(void* reader, SIndexTerm* tem, SIdxTRslt* tr) { return tfSearchCompareFunc_JSON(reader, tem, tr, GE); } -static int32_t tfSearchRange_JSON(void* reader, SIndexTerm* tem, SIdxTempResult* tr) { +static int32_t tfSearchRange_JSON(void* reader, SIndexTerm* tem, SIdxTRslt* tr) { // impl later return TSDB_CODE_SUCCESS; } -static int32_t tfSearchCompareFunc_JSON(void* reader, SIndexTerm* tem, SIdxTempResult* tr, RangeType ctype) { +static int32_t tfSearchCompareFunc_JSON(void* reader, SIndexTerm* tem, SIdxTRslt* tr, RangeType ctype) { int ret = 0; int skip = 0; @@ -501,7 +501,7 @@ static int32_t tfSearchCompareFunc_JSON(void* reader, SIndexTerm* tem, SIdxTempR fstStreamBuilderDestroy(sb); return TSDB_CODE_SUCCESS; } -int tfileReaderSearch(TFileReader* reader, SIndexTermQuery* query, SIdxTempResult* tr) { +int tfileReaderSearch(TFileReader* reader, SIndexTermQuery* query, SIdxTRslt* tr) { SIndexTerm* term = query->term; EIndexQueryType qtype = query->qType; int ret = 0; @@ -673,7 +673,7 @@ void indexTFileDestroy(IndexTFile* tfile) { taosMemoryFree(tfile); } -int indexTFileSearch(void* tfile, SIndexTermQuery* query, SIdxTempResult* result) { +int indexTFileSearch(void* tfile, SIndexTermQuery* query, SIdxTRslt* result) { int ret = -1; if (tfile == NULL) { return ret; diff --git a/source/libs/index/src/indexUtil.c b/source/libs/index/src/indexUtil.c index a618787fd49c96b729e782b4a01a5374c76639be..1d2027889572fcd809e378dcae13560b0bae51c1 100644 --- a/source/libs/index/src/indexUtil.c +++ b/source/libs/index/src/indexUtil.c @@ -36,24 +36,24 @@ static int iBinarySearch(SArray *arr, int s, int e, uint64_t k) { return s; } -void iIntersection(SArray *inters, SArray *final) { - int32_t sz = (int32_t)taosArrayGetSize(inters); +void iIntersection(SArray *in, SArray *out) { + int32_t sz = (int32_t)taosArrayGetSize(in); if (sz <= 0) { return; } MergeIndex *mi = taosMemoryCalloc(sz, sizeof(MergeIndex)); for (int i = 0; i < sz; i++) { - SArray *t = taosArrayGetP(inters, i); + SArray *t = taosArrayGetP(in, i); mi[i].len = (int32_t)taosArrayGetSize(t); mi[i].idx = 0; } - SArray *base = taosArrayGetP(inters, 0); + SArray *base = taosArrayGetP(in, 0); for (int i = 0; i < taosArrayGetSize(base); i++) { uint64_t tgt = *(uint64_t *)taosArrayGet(base, i); bool has = true; - for (int j = 1; j < taosArrayGetSize(inters); j++) { - SArray *oth = taosArrayGetP(inters, j); + for (int j = 1; j < taosArrayGetSize(in); j++) { + SArray *oth = taosArrayGetP(in, j); int mid = iBinarySearch(oth, mi[j].idx, mi[j].len - 1, tgt); if (mid >= 0 && mid < mi[j].len) { uint64_t val = *(uint64_t *)taosArrayGet(oth, mid); @@ -64,33 +64,33 @@ void iIntersection(SArray *inters, SArray *final) { } } if (has == true) { - taosArrayPush(final, &tgt); + taosArrayPush(out, &tgt); } } taosMemoryFreeClear(mi); } -void iUnion(SArray *inters, SArray *final) { - int32_t sz = (int32_t)taosArrayGetSize(inters); +void iUnion(SArray *in, SArray *out) { + int32_t sz = (int32_t)taosArrayGetSize(in); if (sz <= 0) { return; } if (sz == 1) { - taosArrayAddAll(final, taosArrayGetP(inters, 0)); + taosArrayAddAll(out, taosArrayGetP(in, 0)); return; } MergeIndex *mi = taosMemoryCalloc(sz, sizeof(MergeIndex)); for (int i = 0; i < sz; i++) { - SArray *t = taosArrayGetP(inters, i); + SArray *t = taosArrayGetP(in, i); mi[i].len = (int32_t)taosArrayGetSize(t); mi[i].idx = 0; } while (1) { - uint64_t mVal = UINT_MAX; + uint64_t mVal = UINT64_MAX; int mIdx = -1; for (int j = 0; j < sz; j++) { - SArray *t = taosArrayGetP(inters, j); + SArray *t = taosArrayGetP(in, j); if (mi[j].idx >= mi[j].len) { continue; } @@ -102,13 +102,13 @@ void iUnion(SArray *inters, SArray *final) { } if (mIdx != -1) { mi[mIdx].idx++; - if (taosArrayGetSize(final) > 0) { - uint64_t lVal = *(uint64_t *)taosArrayGetLast(final); + if (taosArrayGetSize(out) > 0) { + uint64_t lVal = *(uint64_t *)taosArrayGetLast(out); if (lVal == mVal) { continue; } } - taosArrayPush(final, &mVal); + taosArrayPush(out, &mVal); } else { break; } @@ -158,41 +158,44 @@ int verdataCompare(const void *a, const void *b) { return cmp; } -SIdxTempResult *sIdxTempResultCreate() { - SIdxTempResult *tr = taosMemoryCalloc(1, sizeof(SIdxTempResult)); +SIdxTRslt *idxTRsltCreate() { + SIdxTRslt *tr = taosMemoryCalloc(1, sizeof(SIdxTRslt)); tr->total = taosArrayInit(4, sizeof(uint64_t)); - tr->added = taosArrayInit(4, sizeof(uint64_t)); - tr->deled = taosArrayInit(4, sizeof(uint64_t)); + tr->add = taosArrayInit(4, sizeof(uint64_t)); + tr->del = taosArrayInit(4, sizeof(uint64_t)); return tr; } -void sIdxTempResultClear(SIdxTempResult *tr) { +void idxTRsltClear(SIdxTRslt *tr) { if (tr == NULL) { return; } taosArrayClear(tr->total); - taosArrayClear(tr->added); - taosArrayClear(tr->deled); + taosArrayClear(tr->add); + taosArrayClear(tr->del); } -void sIdxTempResultDestroy(SIdxTempResult *tr) { +void idxTRsltDestroy(SIdxTRslt *tr) { if (tr == NULL) { return; } taosArrayDestroy(tr->total); - taosArrayDestroy(tr->added); - taosArrayDestroy(tr->deled); + taosArrayDestroy(tr->add); + taosArrayDestroy(tr->del); } -void sIdxTempResultMergeTo(SArray *result, SIdxTempResult *tr) { +void idxTRsltMergeTo(SIdxTRslt *tr, SArray *result) { taosArraySort(tr->total, uidCompare); - taosArraySort(tr->added, uidCompare); - taosArraySort(tr->deled, uidCompare); - - SArray *arrs = taosArrayInit(2, sizeof(void *)); - taosArrayPush(arrs, &tr->total); - taosArrayPush(arrs, &tr->added); - - iUnion(arrs, result); - taosArrayDestroy(arrs); - - iExcept(result, tr->deled); + taosArraySort(tr->add, uidCompare); + taosArraySort(tr->del, uidCompare); + + if (taosArrayGetSize(tr->total) == 0 || taosArrayGetSize(tr->add) == 0) { + SArray *t = taosArrayGetSize(tr->total) == 0 ? tr->add : tr->total; + taosArrayAddAll(result, t); + } else { + SArray *arrs = taosArrayInit(2, sizeof(void *)); + taosArrayPush(arrs, &tr->total); + taosArrayPush(arrs, &tr->add); + iUnion(arrs, result); + taosArrayDestroy(arrs); + } + iExcept(result, tr->del); } diff --git a/source/libs/index/test/CMakeLists.txt b/source/libs/index/test/CMakeLists.txt index c0b47e74c6b0561141806dae8ce14ab4d632ec8e..2835084a81b87e358916c20ce0e6c70cf6884021 100644 --- a/source/libs/index/test/CMakeLists.txt +++ b/source/libs/index/test/CMakeLists.txt @@ -1,74 +1,74 @@ -add_executable(indexTest "") -add_executable(fstTest "") -add_executable(fstUT "") -add_executable(UtilUT "") -add_executable(jsonUT "") +add_executable(idxTest "") +add_executable(idxFstTest "") +add_executable(idxFstUT "") +add_executable(idxUtilUT "") +add_executable(idxJsonUT "") -target_sources(indexTest +target_sources(idxTest PRIVATE "indexTests.cc" ) -target_sources(fstTest +target_sources(idxFstTest PRIVATE "fstTest.cc" ) -target_sources(fstUT +target_sources(idxFstUT PRIVATE "fstUT.cc" ) -target_sources(UtilUT +target_sources(idxUtilUT PRIVATE "utilUT.cc" ) -target_sources(jsonUT +target_sources(idxJsonUT PRIVATE "jsonUT.cc" ) -target_include_directories ( indexTest +target_include_directories (idxTest PUBLIC "${TD_SOURCE_DIR}/include/libs/index" "${CMAKE_CURRENT_SOURCE_DIR}/../inc" ) -target_include_directories ( fstTest +target_include_directories (idxFstTest PUBLIC "${TD_SOURCE_DIR}/include/libs/index" "${CMAKE_CURRENT_SOURCE_DIR}/../inc" ) -target_include_directories ( fstUT +target_include_directories (idxFstUT PUBLIC "${TD_SOURCE_DIR}/include/libs/index" "${CMAKE_CURRENT_SOURCE_DIR}/../inc" ) -target_include_directories ( UtilUT +target_include_directories (idxUtilUT PUBLIC "${TD_SOURCE_DIR}/include/libs/index" "${CMAKE_CURRENT_SOURCE_DIR}/../inc" ) -target_include_directories (jsonUT +target_include_directories (idxJsonUT PUBLIC "${TD_SOURCE_DIR}/include/libs/index" "${CMAKE_CURRENT_SOURCE_DIR}/../inc" ) -target_link_libraries (indexTest +target_link_libraries (idxTest os util common gtest_main index ) -target_link_libraries (fstTest +target_link_libraries (idxFstTest os util common gtest_main index ) -target_link_libraries (fstUT +target_link_libraries (idxFstUT os util common @@ -76,7 +76,7 @@ target_link_libraries (fstUT index ) -target_link_libraries (UtilUT +target_link_libraries (idxUtilUT os util common @@ -84,7 +84,7 @@ target_link_libraries (UtilUT index ) -target_link_libraries (jsonUT +target_link_libraries (idxJsonUT os util common @@ -92,19 +92,21 @@ target_link_libraries (jsonUT index ) -add_test( - NAME idxtest - COMMAND indexTest -) -add_test( - NAME idxJsonUT - COMMAND jsonUT -) +if(NOT TD_WINDOWS) + add_test( + NAME idxtest + COMMAND idxTest + ) + add_test( + NAME idxJsonUT + COMMAND idxJsonUT + ) +endif(NOT TD_WINDOWS) add_test( NAME idxUtilUT - COMMAND UtilUT + COMMAND idxUtilUT ) add_test( NAME idxFstUT - COMMAND fstUT + COMMAND idxFstUT ) diff --git a/source/libs/index/test/indexTests.cc b/source/libs/index/test/indexTests.cc index 2d06002af854b1860faf7985fd23e68275207c46..74a30c3387ea3c3133e4e4f82ffd3dd8dc38f540 100644 --- a/source/libs/index/test/indexTests.cc +++ b/source/libs/index/test/indexTests.cc @@ -272,7 +272,7 @@ void validateFst() { } delete m; } -static std::string logDir = "/tmp/log"; +static std::string logDir = TD_TMP_DIR_PATH "log"; static void initLog() { const char* defaultLogFileNamePrefix = "taoslog"; @@ -411,12 +411,12 @@ class TFileObj { // // } - SIdxTempResult* tr = sIdxTempResultCreate(); + SIdxTRslt* tr = idxTRsltCreate(); int ret = tfileReaderSearch(reader_, query, tr); - sIdxTempResultMergeTo(result, tr); - sIdxTempResultDestroy(tr); + idxTRsltMergeTo(tr, result); + idxTRsltDestroy(tr); return ret; } ~TFileObj() { @@ -531,11 +531,11 @@ class CacheObj { indexCacheDebug(cache); } int Get(SIndexTermQuery* query, int16_t colId, int32_t version, SArray* result, STermValueType* s) { - SIdxTempResult* tr = sIdxTempResultCreate(); + SIdxTRslt* tr = idxTRsltCreate(); int ret = indexCacheSearch(cache, query, tr, s); - sIdxTempResultMergeTo(result, tr); - sIdxTempResultDestroy(tr); + idxTRsltMergeTo(tr, result); + idxTRsltDestroy(tr); if (ret != 0) { std::cout << "failed to get from cache:" << ret << std::endl; @@ -916,7 +916,7 @@ TEST_F(IndexEnv2, testIndexOpen) { } } TEST_F(IndexEnv2, testEmptyIndexOpen) { - std::string path = "/tmp/test"; + std::string path = TD_TMP_DIR_PATH "test"; if (index->Init(path) != 0) { std::cout << "failed to init index" << std::endl; exit(1); diff --git a/source/libs/index/test/utilUT.cc b/source/libs/index/test/utilUT.cc index 18a2b457c41c2cd66f20a01f3690d0af4fe69d3d..4a30160244d82b8c00b3e7b031d6fd492057ec21 100644 --- a/source/libs/index/test/utilUT.cc +++ b/source/libs/index/test/utilUT.cc @@ -226,6 +226,22 @@ TEST_F(UtilEnv, 04union) { iUnion(src, rslt); assert(taosArrayGetSize(rslt) == 12); } +TEST_F(UtilEnv, 05unionExcept) { + clearSourceArray(src); + clearFinalArray(rslt); + + uint64_t arr2[] = {7}; + SArray * f = (SArray *)taosArrayGetP(src, 1); + for (int i = 0; i < sizeof(arr2) / sizeof(arr2[0]); i++) { + taosArrayPush(f, &arr2[i]); + } + + iUnion(src, rslt); + + SArray *ept = taosArrayInit(0, sizeof(uint64_t)); + iExcept(rslt, ept); + EXPECT_EQ(taosArrayGetSize(rslt), 1); +} TEST_F(UtilEnv, 01Except) { SArray *total = taosArrayInit(4, sizeof(uint64_t)); { @@ -308,16 +324,36 @@ TEST_F(UtilEnv, 01Except) { ASSERT_EQ(*(uint64_t *)taosArrayGet(total, 1), 100); } TEST_F(UtilEnv, testFill) { - for (int i = 0; i < 10000000; i++) { + for (int i = 0; i < 1000000; i++) { int64_t val = i; char buf[65] = {0}; indexInt2str(val, buf, 1); EXPECT_EQ(val, taosStr2int64(buf)); } - for (int i = 0; i < 10000000; i++) { + for (int i = 0; i < 1000000; i++) { int64_t val = 0 - i; char buf[65] = {0}; indexInt2str(val, buf, -1); EXPECT_EQ(val, taosStr2int64(buf)); } } +TEST_F(UtilEnv, TempResult) { + SIdxTRslt *relt = idxTRsltCreate(); + + SArray *f = taosArrayInit(0, sizeof(uint64_t)); + + uint64_t val = UINT64_MAX - 1; + taosArrayPush(relt->add, &val); + idxTRsltMergeTo(relt, f); + EXPECT_EQ(taosArrayGetSize(f), 1); +} +TEST_F(UtilEnv, TempResultExcept) { + SIdxTRslt *relt = idxTRsltCreate(); + + SArray *f = taosArrayInit(0, sizeof(uint64_t)); + + uint64_t val = UINT64_MAX; + taosArrayPush(relt->add, &val); + idxTRsltMergeTo(relt, f); + EXPECT_EQ(taosArrayGetSize(f), 1); +} diff --git a/source/libs/nodes/src/nodesCodeFuncs.c b/source/libs/nodes/src/nodesCodeFuncs.c index 8887b9841ac8dc907d3a9a71360db20674278cfd..78710569cbe6718c6fa899448a1cab11edebaab3 100644 --- a/source/libs/nodes/src/nodesCodeFuncs.c +++ b/source/libs/nodes/src/nodesCodeFuncs.c @@ -1130,6 +1130,9 @@ static const char* jkTableScanPhysiPlanOffset = "Offset"; static const char* jkTableScanPhysiPlanSliding = "Sliding"; static const char* jkTableScanPhysiPlanIntervalUnit = "intervalUnit"; static const char* jkTableScanPhysiPlanSlidingUnit = "slidingUnit"; +static const char* jkTableScanPhysiPlanTriggerType = "triggerType"; +static const char* jkTableScanPhysiPlanWatermark = "watermark"; +static const char* jkTableScanPhysiPlanTsColId = "tsColId"; static int32_t physiTableScanNodeToJson(const void* pObj, SJson* pJson) { const STableScanPhysiNode* pNode = (const STableScanPhysiNode*)pObj; @@ -1171,6 +1174,15 @@ static int32_t physiTableScanNodeToJson(const void* pObj, SJson* pJson) { if (TSDB_CODE_SUCCESS == code) { code = tjsonAddIntegerToObject(pJson, jkTableScanPhysiPlanSlidingUnit, pNode->slidingUnit); } + if (TSDB_CODE_SUCCESS == code) { + code = tjsonAddIntegerToObject(pJson, jkTableScanPhysiPlanTriggerType, pNode->triggerType); + } + if (TSDB_CODE_SUCCESS == code) { + code = tjsonAddIntegerToObject(pJson, jkTableScanPhysiPlanWatermark, pNode->watermark); + } + if (TSDB_CODE_SUCCESS == code) { + code = tjsonAddIntegerToObject(pJson, jkTableScanPhysiPlanTsColId, pNode->tsColId); + } return code; } @@ -1221,6 +1233,15 @@ static int32_t jsonToPhysiTableScanNode(const SJson* pJson, void* pObj) { tjsonGetNumberValue(pJson, jkTableScanPhysiPlanSlidingUnit, pNode->slidingUnit, code); ; } + if (TSDB_CODE_SUCCESS == code) { + tjsonGetNumberValue(pJson, jkTableScanPhysiPlanTriggerType, pNode->triggerType, code); + } + if (TSDB_CODE_SUCCESS == code) { + tjsonGetNumberValue(pJson, jkTableScanPhysiPlanWatermark, pNode->watermark, code); + } + if (TSDB_CODE_SUCCESS == code) { + tjsonGetNumberValue(pJson, jkTableScanPhysiPlanTsColId, pNode->tsColId, code); + } return code; } @@ -2534,7 +2555,7 @@ static const char* jkSessionWindowTsPrimaryKey = "TsPrimaryKey"; static const char* jkSessionWindowGap = "Gap"; static int32_t sessionWindowNodeToJson(const void* pObj, SJson* pJson) { - const SSessionWindowNode * pNode = (const SSessionWindowNode*)pObj; + const SSessionWindowNode* pNode = (const SSessionWindowNode*)pObj; int32_t code = tjsonAddObject(pJson, jkSessionWindowTsPrimaryKey, nodeToJson, pNode->pCol); if (TSDB_CODE_SUCCESS == code) { @@ -2546,9 +2567,9 @@ static int32_t sessionWindowNodeToJson(const void* pObj, SJson* pJson) { static int32_t jsonToSessionWindowNode(const SJson* pJson, void* pObj) { SSessionWindowNode* pNode = (SSessionWindowNode*)pObj; - int32_t code = jsonToNodeObject(pJson, jkSessionWindowTsPrimaryKey, (SNode **)&pNode->pCol); + int32_t code = jsonToNodeObject(pJson, jkSessionWindowTsPrimaryKey, (SNode**)&pNode->pCol); if (TSDB_CODE_SUCCESS == code) { - code = jsonToNodeObject(pJson, jkSessionWindowGap, (SNode **)&pNode->pGap); + code = jsonToNodeObject(pJson, jkSessionWindowGap, (SNode**)&pNode->pGap); } return code; } @@ -2775,6 +2796,150 @@ static int32_t jsonToDownstreamSourceNode(const SJson* pJson, void* pObj) { return code; } +static const char* jkDatabaseOptionsBuffer = "Buffer"; +static const char* jkDatabaseOptionsCachelast = "Cachelast"; +static const char* jkDatabaseOptionsCompressionLevel = "CompressionLevel"; +static const char* jkDatabaseOptionsDaysPerFileNode = "DaysPerFileNode"; +static const char* jkDatabaseOptionsDaysPerFile = "DaysPerFile"; +static const char* jkDatabaseOptionsFsyncPeriod = "FsyncPeriod"; +static const char* jkDatabaseOptionsMaxRowsPerBlock = "MaxRowsPerBlock"; +static const char* jkDatabaseOptionsMinRowsPerBlock = "MinRowsPerBlock"; +static const char* jkDatabaseOptionsKeep = "Keep"; +static const char* jkDatabaseOptionsPages = "Pages"; +static const char* jkDatabaseOptionsPagesize = "Pagesize"; +static const char* jkDatabaseOptionsPrecision = "Precision"; +static const char* jkDatabaseOptionsReplica = "Replica"; +static const char* jkDatabaseOptionsStrict = "Strict"; +static const char* jkDatabaseOptionsWalLevel = "WalLevel"; +static const char* jkDatabaseOptionsNumOfVgroups = "NumOfVgroups"; +static const char* jkDatabaseOptionsSingleStable = "SingleStable"; +static const char* jkDatabaseOptionsRetentions = "Retentions"; +static const char* jkDatabaseOptionsSchemaless = "Schemaless"; + +static int32_t databaseOptionsToJson(const void* pObj, SJson* pJson) { + const SDatabaseOptions* pNode = (const SDatabaseOptions*)pObj; + + int32_t code = tjsonAddIntegerToObject(pJson, jkDatabaseOptionsBuffer, pNode->buffer); + if (TSDB_CODE_SUCCESS == code) { + code = tjsonAddIntegerToObject(pJson, jkDatabaseOptionsCachelast, pNode->cachelast); + } + if (TSDB_CODE_SUCCESS == code) { + code = tjsonAddIntegerToObject(pJson, jkDatabaseOptionsCompressionLevel, pNode->compressionLevel); + } + if (TSDB_CODE_SUCCESS == code) { + code = tjsonAddObject(pJson, jkDatabaseOptionsDaysPerFileNode, nodeToJson, pNode->pDaysPerFile); + } + if (TSDB_CODE_SUCCESS == code) { + code = tjsonAddIntegerToObject(pJson, jkDatabaseOptionsDaysPerFile, pNode->daysPerFile); + } + if (TSDB_CODE_SUCCESS == code) { + code = tjsonAddIntegerToObject(pJson, jkDatabaseOptionsFsyncPeriod, pNode->fsyncPeriod); + } + if (TSDB_CODE_SUCCESS == code) { + code = tjsonAddIntegerToObject(pJson, jkDatabaseOptionsMaxRowsPerBlock, pNode->maxRowsPerBlock); + } + if (TSDB_CODE_SUCCESS == code) { + code = tjsonAddIntegerToObject(pJson, jkDatabaseOptionsMinRowsPerBlock, pNode->minRowsPerBlock); + } + if (TSDB_CODE_SUCCESS == code) { + code = nodeListToJson(pJson, jkDatabaseOptionsKeep, pNode->pKeep); + } + if (TSDB_CODE_SUCCESS == code) { + code = tjsonAddIntegerToObject(pJson, jkDatabaseOptionsPages, pNode->pages); + } + if (TSDB_CODE_SUCCESS == code) { + code = tjsonAddIntegerToObject(pJson, jkDatabaseOptionsPagesize, pNode->pagesize); + } + if (TSDB_CODE_SUCCESS == code) { + code = tjsonAddStringToObject(pJson, jkDatabaseOptionsPrecision, pNode->precisionStr); + } + if (TSDB_CODE_SUCCESS == code) { + code = tjsonAddIntegerToObject(pJson, jkDatabaseOptionsReplica, pNode->replica); + } + if (TSDB_CODE_SUCCESS == code) { + code = tjsonAddIntegerToObject(pJson, jkDatabaseOptionsStrict, pNode->strict); + } + if (TSDB_CODE_SUCCESS == code) { + code = tjsonAddIntegerToObject(pJson, jkDatabaseOptionsWalLevel, pNode->walLevel); + } + if (TSDB_CODE_SUCCESS == code) { + code = tjsonAddIntegerToObject(pJson, jkDatabaseOptionsNumOfVgroups, pNode->numOfVgroups); + } + if (TSDB_CODE_SUCCESS == code) { + code = tjsonAddIntegerToObject(pJson, jkDatabaseOptionsSingleStable, pNode->singleStable); + } + if (TSDB_CODE_SUCCESS == code) { + code = nodeListToJson(pJson, jkDatabaseOptionsRetentions, pNode->pRetentions); + } + if (TSDB_CODE_SUCCESS == code) { + code = tjsonAddIntegerToObject(pJson, jkDatabaseOptionsSchemaless, pNode->schemaless); + } + + return code; +} + +static int32_t jsonToDatabaseOptions(const SJson* pJson, void* pObj) { + SDatabaseOptions* pNode = (SDatabaseOptions*)pObj; + + int32_t code = tjsonGetIntValue(pJson, jkDatabaseOptionsBuffer, &pNode->buffer); + if (TSDB_CODE_SUCCESS == code) { + code = tjsonGetTinyIntValue(pJson, jkDatabaseOptionsCachelast, &pNode->cachelast); + } + if (TSDB_CODE_SUCCESS == code) { + code = tjsonGetTinyIntValue(pJson, jkDatabaseOptionsCompressionLevel, &pNode->compressionLevel); + } + if (TSDB_CODE_SUCCESS == code) { + code = jsonToNodeObject(pJson, jkDatabaseOptionsDaysPerFileNode, (SNode**)&pNode->pDaysPerFile); + } + if (TSDB_CODE_SUCCESS == code) { + code = tjsonGetIntValue(pJson, jkDatabaseOptionsDaysPerFile, &pNode->daysPerFile); + } + if (TSDB_CODE_SUCCESS == code) { + code = tjsonGetIntValue(pJson, jkDatabaseOptionsFsyncPeriod, &pNode->fsyncPeriod); + } + if (TSDB_CODE_SUCCESS == code) { + code = tjsonGetIntValue(pJson, jkDatabaseOptionsMaxRowsPerBlock, &pNode->maxRowsPerBlock); + } + if (TSDB_CODE_SUCCESS == code) { + code = tjsonGetIntValue(pJson, jkDatabaseOptionsMinRowsPerBlock, &pNode->minRowsPerBlock); + } + if (TSDB_CODE_SUCCESS == code) { + code = jsonToNodeList(pJson, jkDatabaseOptionsKeep, &pNode->pKeep); + } + if (TSDB_CODE_SUCCESS == code) { + code = tjsonGetIntValue(pJson, jkDatabaseOptionsPages, &pNode->pages); + } + if (TSDB_CODE_SUCCESS == code) { + code = tjsonGetIntValue(pJson, jkDatabaseOptionsPagesize, &pNode->pagesize); + } + if (TSDB_CODE_SUCCESS == code) { + code = tjsonGetStringValue(pJson, jkDatabaseOptionsPrecision, pNode->precisionStr); + } + if (TSDB_CODE_SUCCESS == code) { + code = tjsonGetTinyIntValue(pJson, jkDatabaseOptionsReplica, &pNode->replica); + } + if (TSDB_CODE_SUCCESS == code) { + code = tjsonGetTinyIntValue(pJson, jkDatabaseOptionsStrict, &pNode->strict); + } + if (TSDB_CODE_SUCCESS == code) { + code = tjsonGetTinyIntValue(pJson, jkDatabaseOptionsWalLevel, &pNode->walLevel); + } + if (TSDB_CODE_SUCCESS == code) { + code = tjsonGetIntValue(pJson, jkDatabaseOptionsNumOfVgroups, &pNode->numOfVgroups); + } + if (TSDB_CODE_SUCCESS == code) { + code = tjsonGetTinyIntValue(pJson, jkDatabaseOptionsSingleStable, &pNode->singleStable); + } + if (TSDB_CODE_SUCCESS == code) { + code = jsonToNodeList(pJson, jkDatabaseOptionsRetentions, &pNode->pRetentions); + } + if (TSDB_CODE_SUCCESS == code) { + code = tjsonGetTinyIntValue(pJson, jkDatabaseOptionsSchemaless, &pNode->schemaless); + } + + return code; +} + static const char* jkDataBlockDescDataBlockId = "DataBlockId"; static const char* jkDataBlockDescSlots = "Slots"; static const char* jkDataBlockTotalRowSize = "TotalRowSize"; @@ -2977,6 +3142,130 @@ static int32_t jsonToSelectStmt(const SJson* pJson, void* pObj) { return code; } +static const char* jkAlterDatabaseStmtDbName = "DbName"; +static const char* jkAlterDatabaseStmtOptions = "Options"; + +static int32_t alterDatabaseStmtToJson(const void* pObj, SJson* pJson) { + const SAlterDatabaseStmt* pNode = (const SAlterDatabaseStmt*)pObj; + + int32_t code = tjsonAddStringToObject(pJson, jkAlterDatabaseStmtDbName, pNode->dbName); + if (TSDB_CODE_SUCCESS == code) { + code = tjsonAddObject(pJson, jkAlterDatabaseStmtOptions, nodeToJson, pNode->pOptions); + } + + return code; +} + +static int32_t jsonToAlterDatabaseStmt(const SJson* pJson, void* pObj) { + SAlterDatabaseStmt* pNode = (SAlterDatabaseStmt*)pObj; + + int32_t code = tjsonGetStringValue(pJson, jkAlterDatabaseStmtDbName, pNode->dbName); + if (TSDB_CODE_SUCCESS == code) { + code = jsonToNodeObject(pJson, jkAlterDatabaseStmtOptions, (SNode**)&pNode->pOptions); + } + + return code; +} + +static const char* jkAlterTableStmtDbName = "DbName"; +static const char* jkAlterTableStmtTableName = "TableName"; +static const char* jkAlterTableStmtAlterType = "AlterType"; +static const char* jkAlterTableStmtColName = "ColName"; +static const char* jkAlterTableStmtNewColName = "NewColName"; +static const char* jkAlterTableStmtOptions = "Options"; +static const char* jkAlterTableStmtNewDataType = "NewDataType"; +static const char* jkAlterTableStmtNewTagVal = "NewTagVal"; + +static int32_t alterTableStmtToJson(const void* pObj, SJson* pJson) { + const SAlterTableStmt* pNode = (const SAlterTableStmt*)pObj; + + int32_t code = tjsonAddStringToObject(pJson, jkAlterTableStmtDbName, pNode->dbName); + if (TSDB_CODE_SUCCESS == code) { + code = tjsonAddStringToObject(pJson, jkAlterTableStmtTableName, pNode->tableName); + } + if (TSDB_CODE_SUCCESS == code) { + code = tjsonAddIntegerToObject(pJson, jkAlterTableStmtAlterType, pNode->alterType); + } + if (TSDB_CODE_SUCCESS == code) { + code = tjsonAddStringToObject(pJson, jkAlterTableStmtColName, pNode->colName); + } + if (TSDB_CODE_SUCCESS == code) { + code = tjsonAddStringToObject(pJson, jkAlterTableStmtNewColName, pNode->newColName); + } + if (TSDB_CODE_SUCCESS == code) { + code = tjsonAddObject(pJson, jkAlterTableStmtOptions, nodeToJson, pNode->pOptions); + } + if (TSDB_CODE_SUCCESS == code) { + code = tjsonAddObject(pJson, jkAlterTableStmtNewDataType, dataTypeToJson, &pNode->dataType); + } + if (TSDB_CODE_SUCCESS == code) { + code = tjsonAddObject(pJson, jkAlterTableStmtOptions, nodeToJson, pNode->pVal); + } + + return code; +} + +static int32_t jsonToAlterTableStmt(const SJson* pJson, void* pObj) { + SAlterTableStmt* pNode = (SAlterTableStmt*)pObj; + + int32_t code = tjsonGetStringValue(pJson, jkAlterTableStmtDbName, pNode->dbName); + if (TSDB_CODE_SUCCESS == code) { + code = tjsonGetStringValue(pJson, jkAlterTableStmtTableName, pNode->tableName); + } + if (TSDB_CODE_SUCCESS == code) { + code = tjsonGetTinyIntValue(pJson, jkAlterTableStmtAlterType, &pNode->alterType); + } + if (TSDB_CODE_SUCCESS == code) { + code = tjsonGetStringValue(pJson, jkAlterTableStmtColName, pNode->colName); + } + if (TSDB_CODE_SUCCESS == code) { + code = tjsonGetStringValue(pJson, jkAlterTableStmtNewColName, pNode->newColName); + } + if (TSDB_CODE_SUCCESS == code) { + code = jsonToNodeObject(pJson, jkAlterTableStmtOptions, (SNode**)&pNode->pOptions); + } + if (TSDB_CODE_SUCCESS == code) { + code = tjsonToObject(pJson, jkAlterTableStmtNewDataType, jsonToDataType, &pNode->dataType); + } + if (TSDB_CODE_SUCCESS == code) { + code = jsonToNodeObject(pJson, jkAlterTableStmtOptions, (SNode**)&pNode->pVal); + } + + return code; +} + +static const char* jkAlterDnodeStmtDnodeId = "DnodeId"; +static const char* jkAlterDnodeStmtConfig = "Config"; +static const char* jkAlterDnodeStmtValue = "Value"; + +static int32_t alterDnodeStmtToJson(const void* pObj, SJson* pJson) { + const SAlterDnodeStmt* pNode = (const SAlterDnodeStmt*)pObj; + + int32_t code = tjsonAddIntegerToObject(pJson, jkAlterDnodeStmtDnodeId, pNode->dnodeId); + if (TSDB_CODE_SUCCESS == code) { + code = tjsonAddStringToObject(pJson, jkAlterDnodeStmtConfig, pNode->config); + } + if (TSDB_CODE_SUCCESS == code) { + code = tjsonAddStringToObject(pJson, jkAlterDnodeStmtValue, pNode->value); + } + + return code; +} + +static int32_t jsonToAlterDnodeStmt(const SJson* pJson, void* pObj) { + SAlterDnodeStmt* pNode = (SAlterDnodeStmt*)pObj; + + int32_t code = tjsonGetIntValue(pJson, jkAlterDnodeStmtDnodeId, &pNode->dnodeId); + if (TSDB_CODE_SUCCESS == code) { + code = tjsonGetStringValue(pJson, jkAlterDnodeStmtConfig, pNode->config); + } + if (TSDB_CODE_SUCCESS == code) { + code = tjsonGetStringValue(pJson, jkAlterDnodeStmtValue, pNode->value); + } + + return code; +} + static const char* jkCreateTopicStmtTopicName = "TopicName"; static const char* jkCreateTopicStmtSubscribeDbName = "SubscribeDbName"; static const char* jkCreateTopicStmtIgnoreExists = "IgnoreExists"; @@ -3061,6 +3350,8 @@ static int32_t specificNodeToJson(const void* pObj, SJson* pJson) { break; case QUERY_NODE_DOWNSTREAM_SOURCE: return downstreamSourceNodeToJson(pObj, pJson); + case QUERY_NODE_DATABASE_OPTIONS: + return databaseOptionsToJson(pObj, pJson); case QUERY_NODE_LEFT_VALUE: return TSDB_CODE_SUCCESS; // SLeftValueNode has no fields to serialize. case QUERY_NODE_SET_OPERATOR: @@ -3069,8 +3360,17 @@ static int32_t specificNodeToJson(const void* pObj, SJson* pJson) { return selectStmtToJson(pObj, pJson); case QUERY_NODE_VNODE_MODIF_STMT: case QUERY_NODE_CREATE_DATABASE_STMT: + break; + case QUERY_NODE_ALTER_DATABASE_STMT: + return alterDatabaseStmtToJson(pObj, pJson); case QUERY_NODE_CREATE_TABLE_STMT: + break; + case QUERY_NODE_ALTER_TABLE_STMT: + return alterTableStmtToJson(pObj, pJson); case QUERY_NODE_USE_DATABASE_STMT: + break; + case QUERY_NODE_ALTER_DNODE_STMT: + return alterDnodeStmtToJson(pObj, pJson); case QUERY_NODE_SHOW_DATABASES_STMT: case QUERY_NODE_SHOW_TABLES_STMT: break; @@ -3177,12 +3477,20 @@ static int32_t jsonToSpecificNode(const SJson* pJson, void* pObj) { return jsonToSlotDescNode(pJson, pObj); case QUERY_NODE_DOWNSTREAM_SOURCE: return jsonToDownstreamSourceNode(pJson, pObj); + case QUERY_NODE_DATABASE_OPTIONS: + return jsonToDatabaseOptions(pJson, pObj); case QUERY_NODE_LEFT_VALUE: return TSDB_CODE_SUCCESS; // SLeftValueNode has no fields to deserialize. case QUERY_NODE_SET_OPERATOR: return jsonToSetOperator(pJson, pObj); case QUERY_NODE_SELECT_STMT: return jsonToSelectStmt(pJson, pObj); + case QUERY_NODE_ALTER_DATABASE_STMT: + return jsonToAlterDatabaseStmt(pJson, pObj); + case QUERY_NODE_ALTER_TABLE_STMT: + return jsonToAlterTableStmt(pJson, pObj); + case QUERY_NODE_ALTER_DNODE_STMT: + return jsonToAlterDnodeStmt(pJson, pObj); case QUERY_NODE_CREATE_TOPIC_STMT: return jsonToCreateTopicStmt(pJson, pObj); case QUERY_NODE_LOGIC_PLAN_SCAN: diff --git a/source/libs/parser/inc/parAst.h b/source/libs/parser/inc/parAst.h index 50c05170b3937d37862b3ea7e8a2f47046be5c80..a1c304118bfcdc5078bf0a19b73a8bde17e3c0cf 100644 --- a/source/libs/parser/inc/parAst.h +++ b/source/libs/parser/inc/parAst.h @@ -27,14 +27,13 @@ extern "C" { #include "querynodes.h" typedef struct SAstCreateContext { - SParseContext* pQueryCxt; - SMsgBuf msgBuf; - bool notSupport; - SNode* pRootNode; - int16_t placeholderNo; - SArray* pPlaceholderValues; - int32_t errCode; - SParseMetaCache* pMetaCache; + SParseContext* pQueryCxt; + SMsgBuf msgBuf; + bool notSupport; + SNode* pRootNode; + int16_t placeholderNo; + SArray* pPlaceholderValues; + int32_t errCode; } SAstCreateContext; typedef enum EDatabaseOptionType { @@ -75,7 +74,7 @@ typedef struct SAlterOption { extern SToken nil_token; -int32_t initAstCreateContext(SParseContext* pParseCxt, SAstCreateContext* pCxt); +void initAstCreateContext(SParseContext* pParseCxt, SAstCreateContext* pCxt); SNode* createRawExprNode(SAstCreateContext* pCxt, const SToken* pToken, SNode* pNode); SNode* createRawExprNodeExt(SAstCreateContext* pCxt, const SToken* pStart, const SToken* pEnd, SNode* pNode); diff --git a/source/libs/parser/inc/parInt.h b/source/libs/parser/inc/parInt.h index 2ad1ebc1121d96f243fff9d55980b26bffdf6c04..184ed7d8b243ed2ec97e4c38b1f1e31de9e3f2c2 100644 --- a/source/libs/parser/inc/parInt.h +++ b/source/libs/parser/inc/parInt.h @@ -26,6 +26,7 @@ extern "C" { int32_t parseInsertSql(SParseContext* pContext, SQuery** pQuery); int32_t parse(SParseContext* pParseCxt, SQuery** pQuery); +int32_t collectMetaKey(SParseContext* pParseCxt, SQuery* pQuery); int32_t authenticate(SParseContext* pParseCxt, SQuery* pQuery); int32_t translate(SParseContext* pParseCxt, SQuery* pQuery); int32_t extractResultSchema(const SNode* pRoot, int32_t* numOfCols, SSchema** pSchema); diff --git a/source/libs/parser/inc/parUtil.h b/source/libs/parser/inc/parUtil.h index 7ad5a7ecab20c99114cf8d05363037985c74d9bd..80288dbc448a0cd35212da5e672b6b59bc021313 100644 --- a/source/libs/parser/inc/parUtil.h +++ b/source/libs/parser/inc/parUtil.h @@ -24,12 +24,12 @@ extern "C" { #include "os.h" #include "query.h" -#define parserFatal(param, ...) qFatal("PARSER: " param, __VA_ARGS__) -#define parserError(param, ...) qError("PARSER: " param, __VA_ARGS__) -#define parserWarn(param, ...) qWarn("PARSER: " param, __VA_ARGS__) -#define parserInfo(param, ...) qInfo("PARSER: " param, __VA_ARGS__) -#define parserDebug(param, ...) qDebug("PARSER: " param, __VA_ARGS__) -#define parserTrace(param, ...) qTrace("PARSER: " param, __VA_ARGS__) +#define parserFatal(param, ...) qFatal("PARSER: " param, ##__VA_ARGS__) +#define parserError(param, ...) qError("PARSER: " param, ##__VA_ARGS__) +#define parserWarn(param, ...) qWarn("PARSER: " param, ##__VA_ARGS__) +#define parserInfo(param, ...) qInfo("PARSER: " param, ##__VA_ARGS__) +#define parserDebug(param, ...) qDebug("PARSER: " param, ##__VA_ARGS__) +#define parserTrace(param, ...) qTrace("PARSER: " param, ##__VA_ARGS__) #define PK_TS_COL_INTERNAL_NAME "_rowts" @@ -42,7 +42,10 @@ typedef struct SParseMetaCache { SHashObj* pTableMeta; // key is tbFName, element is STableMeta* SHashObj* pDbVgroup; // key is dbFName, element is SArray* SHashObj* pTableVgroup; // key is tbFName, element is SVgroupInfo* - SHashObj* pDbCfg; // key is tbFName, element is SDbCfgInfo + SHashObj* pDbCfg; // key is tbFName, element is SDbCfgInfo* + SHashObj* pDbInfo; // key is tbFName, element is SDbInfo* + SHashObj* pUserAuth; // key is SUserAuthInfo serialized string, element is bool indicating whether or not to pass + SHashObj* pUdf; // key is funcName, element is SFuncInfo* } SParseMetaCache; int32_t generateSyntaxErrMsg(SMsgBuf* pBuf, int32_t errCode, ...); @@ -62,12 +65,22 @@ int32_t trimString(const char* src, int32_t len, char* dst, int32_t dlen); int32_t buildCatalogReq(const SParseMetaCache* pMetaCache, SCatalogReq* pCatalogReq); int32_t putMetaDataToCache(const SCatalogReq* pCatalogReq, const SMetaData* pMetaData, SParseMetaCache* pMetaCache); int32_t reserveTableMetaInCache(int32_t acctId, const char* pDb, const char* pTable, SParseMetaCache* pMetaCache); +int32_t reserveDbVgInfoInCache(int32_t acctId, const char* pDb, SParseMetaCache* pMetaCache); +int32_t reserveTableVgroupInCache(int32_t acctId, const char* pDb, const char* pTable, SParseMetaCache* pMetaCache); +int32_t reserveDbVgVersionInCache(int32_t acctId, const char* pDb, SParseMetaCache* pMetaCache); +int32_t reserveDbCfgInCache(int32_t acctId, const char* pDb, SParseMetaCache* pMetaCache); +int32_t reserveUserAuthInCache(int32_t acctId, const char* pUser, const char* pDb, AUTH_TYPE type, + SParseMetaCache* pMetaCache); +int32_t reserveUdfInCache(const char* pFunc, SParseMetaCache* pMetaCache); int32_t getTableMetaFromCache(SParseMetaCache* pMetaCache, const SName* pName, STableMeta** pMeta); -int32_t getDBVgInfoFromCache(SParseMetaCache* pMetaCache, const char* pDbFName, SArray** pVgInfo); -int32_t getTableHashVgroupFromCache(SParseMetaCache* pMetaCache, const SName* pName, SVgroupInfo* pVgroup); -int32_t getDBVgVersionFromCache(SParseMetaCache* pMetaCache, const char* pDbFName, int32_t* pVersion, int64_t* pDbId, +int32_t getDbVgInfoFromCache(SParseMetaCache* pMetaCache, const char* pDbFName, SArray** pVgInfo); +int32_t getTableVgroupFromCache(SParseMetaCache* pMetaCache, const SName* pName, SVgroupInfo* pVgroup); +int32_t getDbVgVersionFromCache(SParseMetaCache* pMetaCache, const char* pDbFName, int32_t* pVersion, int64_t* pDbId, int32_t* pTableNum); -int32_t getDBCfgFromCache(SParseMetaCache* pMetaCache, const char* pDbFName, SDbCfgInfo* pInfo); +int32_t getDbCfgFromCache(SParseMetaCache* pMetaCache, const char* pDbFName, SDbCfgInfo* pInfo); +int32_t getUserAuthFromCache(SParseMetaCache* pMetaCache, const char* pUser, const char* pDb, AUTH_TYPE type, + bool* pPass); +int32_t getUdfInfoFromCache(SParseMetaCache* pMetaCache, const char* pFunc, SFuncInfo* pInfo); #ifdef __cplusplus } diff --git a/source/libs/parser/src/parAstCreater.c b/source/libs/parser/src/parAstCreater.c index 4aa28409b73e08e4ec445c22f55c02fbdd328b90..836a0cb520684e264cecb3cd6425ae3c7688de68 100644 --- a/source/libs/parser/src/parAstCreater.c +++ b/source/libs/parser/src/parAstCreater.c @@ -38,7 +38,7 @@ SToken nil_token = {.type = TK_NK_NIL, .n = 0, .z = NULL}; -int32_t initAstCreateContext(SParseContext* pParseCxt, SAstCreateContext* pCxt) { +void initAstCreateContext(SParseContext* pParseCxt, SAstCreateContext* pCxt) { memset(pCxt, 0, sizeof(SAstCreateContext)); pCxt->pQueryCxt = pParseCxt; pCxt->msgBuf.buf = pParseCxt->pMsg; @@ -48,13 +48,6 @@ int32_t initAstCreateContext(SParseContext* pParseCxt, SAstCreateContext* pCxt) pCxt->placeholderNo = 0; pCxt->pPlaceholderValues = NULL; pCxt->errCode = TSDB_CODE_SUCCESS; - if (pParseCxt->async) { - pCxt->pMetaCache = taosMemoryCalloc(1, sizeof(SParseMetaCache)); - if (NULL == pCxt->pMetaCache) { - return TSDB_CODE_OUT_OF_MEMORY; - } - } - return TSDB_CODE_SUCCESS; } static void copyStringFormStringToken(SToken* pToken, char* pBuf, int32_t len) { @@ -472,13 +465,6 @@ SNode* createRealTableNode(SAstCreateContext* pCxt, SToken* pDbName, SToken* pTa strncpy(realTable->table.tableAlias, pTableName->z, pTableName->n); } strncpy(realTable->table.tableName, pTableName->z, pTableName->n); - if (NULL != pCxt->pMetaCache) { - if (TSDB_CODE_SUCCESS != reserveTableMetaInCache(pCxt->pQueryCxt->acctId, realTable->table.dbName, - realTable->table.tableName, pCxt->pMetaCache)) { - nodesDestroyNode(realTable); - CHECK_OUT_OF_MEM(NULL); - } - } return (SNode*)realTable; } diff --git a/source/libs/parser/src/parAstParser.c b/source/libs/parser/src/parAstParser.c index 28c79a88f05f03885f0ace1dfe73d8e52dd498f9..5d65a0b80bebc98a02e21458ae558661c4e5439b 100644 --- a/source/libs/parser/src/parAstParser.c +++ b/source/libs/parser/src/parAstParser.c @@ -13,11 +13,12 @@ * along with this program. If not, see . */ +#include "functionMgt.h" #include "os.h" -#include "parInt.h" - #include "parAst.h" +#include "parInt.h" #include "parToken.h" +#include "systable.h" typedef void* (*FMalloc)(size_t); typedef void (*FFree)(void*); @@ -82,8 +83,386 @@ abort_parse: (*pQuery)->pRoot = cxt.pRootNode; (*pQuery)->placeholderNum = cxt.placeholderNo; TSWAP((*pQuery)->pPlaceholderValues, cxt.pPlaceholderValues); - TSWAP((*pQuery)->pMetaCache, cxt.pMetaCache); } taosArrayDestroy(cxt.pPlaceholderValues); return cxt.errCode; } + +typedef struct SCollectMetaKeyCxt { + SParseContext* pParseCxt; + SParseMetaCache* pMetaCache; +} SCollectMetaKeyCxt; + +static void destroyCollectMetaKeyCxt(SCollectMetaKeyCxt* pCxt) { + if (NULL != pCxt->pMetaCache) { + // TODO + } +} + +typedef struct SCollectMetaKeyFromExprCxt { + SCollectMetaKeyCxt* pComCxt; + int32_t errCode; +} SCollectMetaKeyFromExprCxt; + +static int32_t collectMetaKeyFromQuery(SCollectMetaKeyCxt* pCxt, SNode* pStmt); + +static EDealRes collectMetaKeyFromFunction(SCollectMetaKeyFromExprCxt* pCxt, SFunctionNode* pFunc) { + if (fmIsBuiltinFunc(pFunc->functionName)) { + return TSDB_CODE_SUCCESS; + } + return reserveUdfInCache(pFunc->functionName, pCxt->pComCxt->pMetaCache); +} + +static EDealRes collectMetaKeyFromRealTable(SCollectMetaKeyFromExprCxt* pCxt, SRealTableNode* pRealTable) { + pCxt->errCode = reserveTableMetaInCache(pCxt->pComCxt->pParseCxt->acctId, pRealTable->table.dbName, + pRealTable->table.tableName, pCxt->pComCxt->pMetaCache); + if (TSDB_CODE_SUCCESS == pCxt->errCode) { + pCxt->errCode = reserveTableVgroupInCache(pCxt->pComCxt->pParseCxt->acctId, pRealTable->table.dbName, + pRealTable->table.tableName, pCxt->pComCxt->pMetaCache); + } + if (TSDB_CODE_SUCCESS == pCxt->errCode) { + pCxt->errCode = reserveUserAuthInCache(pCxt->pComCxt->pParseCxt->acctId, pCxt->pComCxt->pParseCxt->pUser, + pRealTable->table.dbName, AUTH_TYPE_READ, pCxt->pComCxt->pMetaCache); + } + return TSDB_CODE_SUCCESS == pCxt->errCode ? DEAL_RES_CONTINUE : DEAL_RES_ERROR; +} + +static EDealRes collectMetaKeyFromTempTable(SCollectMetaKeyFromExprCxt* pCxt, STempTableNode* pTempTable) { + pCxt->errCode = collectMetaKeyFromQuery(pCxt->pComCxt, pTempTable->pSubquery); + return TSDB_CODE_SUCCESS == pCxt->errCode ? DEAL_RES_CONTINUE : DEAL_RES_ERROR; +} + +static EDealRes collectMetaKeyFromExprImpl(SNode* pNode, void* pContext) { + SCollectMetaKeyFromExprCxt* pCxt = pContext; + switch (nodeType(pNode)) { + case QUERY_NODE_FUNCTION: + return collectMetaKeyFromFunction(pCxt, (SFunctionNode*)pNode); + case QUERY_NODE_REAL_TABLE: + return collectMetaKeyFromRealTable(pCxt, (SRealTableNode*)pNode); + case QUERY_NODE_TEMP_TABLE: + return collectMetaKeyFromTempTable(pCxt, (STempTableNode*)pNode); + default: + break; + } + return DEAL_RES_CONTINUE; +} + +static int32_t collectMetaKeyFromExprs(SCollectMetaKeyCxt* pCxt, SNodeList* pList) { + SCollectMetaKeyFromExprCxt cxt = {.pComCxt = pCxt, .errCode = TSDB_CODE_SUCCESS}; + nodesWalkExprs(pList, collectMetaKeyFromExprImpl, &cxt); + return cxt.errCode; +} + +static int32_t collectMetaKeyFromSetOperator(SCollectMetaKeyCxt* pCxt, SSetOperator* pStmt) { + int32_t code = collectMetaKeyFromQuery(pCxt, pStmt->pLeft); + if (TSDB_CODE_SUCCESS == code) { + code = collectMetaKeyFromQuery(pCxt, pStmt->pRight); + } + if (TSDB_CODE_SUCCESS == code) { + code = collectMetaKeyFromExprs(pCxt, pStmt->pOrderByList); + } + return code; +} + +static int32_t collectMetaKeyFromSelect(SCollectMetaKeyCxt* pCxt, SSelectStmt* pStmt) { + SCollectMetaKeyFromExprCxt cxt = {.pComCxt = pCxt, .errCode = TSDB_CODE_SUCCESS}; + nodesWalkSelectStmt(pStmt, SQL_CLAUSE_FROM, collectMetaKeyFromExprImpl, &cxt); + return cxt.errCode; +} + +static int32_t collectMetaKeyFromCreateTable(SCollectMetaKeyCxt* pCxt, SCreateTableStmt* pStmt) { + if (NULL == pStmt->pTags) { + return reserveTableVgroupInCache(pCxt->pParseCxt->acctId, pStmt->dbName, pStmt->tableName, pCxt->pMetaCache); + } else { + return reserveDbCfgInCache(pCxt->pParseCxt->acctId, pStmt->dbName, pCxt->pMetaCache); + } +} + +static int32_t collectMetaKeyFromCreateMultiTable(SCollectMetaKeyCxt* pCxt, SCreateMultiTableStmt* pStmt) { + int32_t code = TSDB_CODE_SUCCESS; + SNode* pNode = NULL; + FOREACH(pNode, pStmt->pSubTables) { + SCreateSubTableClause* pClause = (SCreateSubTableClause*)pNode; + code = + reserveTableMetaInCache(pCxt->pParseCxt->acctId, pClause->useDbName, pClause->useTableName, pCxt->pMetaCache); + if (TSDB_CODE_SUCCESS == code) { + code = reserveTableVgroupInCache(pCxt->pParseCxt->acctId, pClause->dbName, pClause->tableName, pCxt->pMetaCache); + } + if (TSDB_CODE_SUCCESS != code) { + break; + } + } + return code; +} + +static int32_t collectMetaKeyFromAlterTable(SCollectMetaKeyCxt* pCxt, SAlterTableStmt* pStmt) { + int32_t code = reserveTableMetaInCache(pCxt->pParseCxt->acctId, pStmt->dbName, pStmt->tableName, pCxt->pMetaCache); + if (TSDB_CODE_SUCCESS == code) { + code = reserveTableVgroupInCache(pCxt->pParseCxt->acctId, pStmt->dbName, pStmt->tableName, pCxt->pMetaCache); + } + return code; +} + +static int32_t collectMetaKeyFromUseDatabase(SCollectMetaKeyCxt* pCxt, SUseDatabaseStmt* pStmt) { + return reserveDbVgVersionInCache(pCxt->pParseCxt->acctId, pStmt->dbName, pCxt->pMetaCache); +} + +static int32_t collectMetaKeyFromCreateIndex(SCollectMetaKeyCxt* pCxt, SCreateIndexStmt* pStmt) { + int32_t code = TSDB_CODE_SUCCESS; + if (INDEX_TYPE_SMA == pStmt->indexType) { + code = reserveTableMetaInCache(pCxt->pParseCxt->acctId, pCxt->pParseCxt->db, pStmt->tableName, pCxt->pMetaCache); + if (TSDB_CODE_SUCCESS == code) { + code = + reserveTableVgroupInCache(pCxt->pParseCxt->acctId, pCxt->pParseCxt->db, pStmt->tableName, pCxt->pMetaCache); + } + } + return code; +} + +static int32_t collectMetaKeyFromCreateTopic(SCollectMetaKeyCxt* pCxt, SCreateTopicStmt* pStmt) { + if (NULL != pStmt->pQuery) { + return collectMetaKeyFromQuery(pCxt, pStmt->pQuery); + } + return TSDB_CODE_SUCCESS; +} + +static int32_t collectMetaKeyFromExplain(SCollectMetaKeyCxt* pCxt, SExplainStmt* pStmt) { + return collectMetaKeyFromQuery(pCxt, pStmt->pQuery); +} + +static int32_t collectMetaKeyFromCreateStream(SCollectMetaKeyCxt* pCxt, SCreateStreamStmt* pStmt) { + return collectMetaKeyFromQuery(pCxt, pStmt->pQuery); +} + +static int32_t collectMetaKeyFromShowDnodes(SCollectMetaKeyCxt* pCxt, SShowStmt* pStmt) { + return reserveTableMetaInCache(pCxt->pParseCxt->acctId, TSDB_INFORMATION_SCHEMA_DB, TSDB_INS_TABLE_DNODES, + pCxt->pMetaCache); +} + +static int32_t collectMetaKeyFromShowMnodes(SCollectMetaKeyCxt* pCxt, SShowStmt* pStmt) { + return reserveTableMetaInCache(pCxt->pParseCxt->acctId, TSDB_INFORMATION_SCHEMA_DB, TSDB_INS_TABLE_MNODES, + pCxt->pMetaCache); +} + +static int32_t collectMetaKeyFromShowModules(SCollectMetaKeyCxt* pCxt, SShowStmt* pStmt) { + return reserveTableMetaInCache(pCxt->pParseCxt->acctId, TSDB_INFORMATION_SCHEMA_DB, TSDB_INS_TABLE_MODULES, + pCxt->pMetaCache); +} + +static int32_t collectMetaKeyFromShowQnodes(SCollectMetaKeyCxt* pCxt, SShowStmt* pStmt) { + return reserveTableMetaInCache(pCxt->pParseCxt->acctId, TSDB_INFORMATION_SCHEMA_DB, TSDB_INS_TABLE_QNODES, + pCxt->pMetaCache); +} + +static int32_t collectMetaKeyFromShowSnodes(SCollectMetaKeyCxt* pCxt, SShowStmt* pStmt) { + return reserveTableMetaInCache(pCxt->pParseCxt->acctId, TSDB_INFORMATION_SCHEMA_DB, TSDB_INS_TABLE_SNODES, + pCxt->pMetaCache); +} + +static int32_t collectMetaKeyFromShowBnodes(SCollectMetaKeyCxt* pCxt, SShowStmt* pStmt) { + return reserveTableMetaInCache(pCxt->pParseCxt->acctId, TSDB_INFORMATION_SCHEMA_DB, TSDB_INS_TABLE_BNODES, + pCxt->pMetaCache); +} + +static int32_t collectMetaKeyFromShowDatabases(SCollectMetaKeyCxt* pCxt, SShowStmt* pStmt) { + return reserveTableMetaInCache(pCxt->pParseCxt->acctId, TSDB_INFORMATION_SCHEMA_DB, TSDB_INS_TABLE_USER_DATABASES, + pCxt->pMetaCache); +} + +static int32_t collectMetaKeyFromShowFunctions(SCollectMetaKeyCxt* pCxt, SShowStmt* pStmt) { + return reserveTableMetaInCache(pCxt->pParseCxt->acctId, TSDB_INFORMATION_SCHEMA_DB, TSDB_INS_TABLE_USER_FUNCTIONS, + pCxt->pMetaCache); +} + +static int32_t collectMetaKeyFromShowIndexes(SCollectMetaKeyCxt* pCxt, SShowStmt* pStmt) { + return reserveTableMetaInCache(pCxt->pParseCxt->acctId, TSDB_INFORMATION_SCHEMA_DB, TSDB_INS_TABLE_USER_INDEXES, + pCxt->pMetaCache); +} + +static int32_t collectMetaKeyFromShowStables(SCollectMetaKeyCxt* pCxt, SShowStmt* pStmt) { + return reserveTableMetaInCache(pCxt->pParseCxt->acctId, TSDB_INFORMATION_SCHEMA_DB, TSDB_INS_TABLE_USER_STABLES, + pCxt->pMetaCache); +} + +static int32_t collectMetaKeyFromShowStreams(SCollectMetaKeyCxt* pCxt, SShowStmt* pStmt) { + return reserveTableMetaInCache(pCxt->pParseCxt->acctId, TSDB_PERFORMANCE_SCHEMA_DB, TSDB_PERFS_TABLE_STREAMS, + pCxt->pMetaCache); +} + +static int32_t collectMetaKeyFromShowTables(SCollectMetaKeyCxt* pCxt, SShowStmt* pStmt) { + int32_t code = reserveTableMetaInCache(pCxt->pParseCxt->acctId, TSDB_INFORMATION_SCHEMA_DB, + TSDB_INS_TABLE_USER_TABLES, pCxt->pMetaCache); + if (TSDB_CODE_SUCCESS == code) { + if (NULL != pStmt->pDbName) { + code = reserveDbVgInfoInCache(pCxt->pParseCxt->acctId, ((SValueNode*)pStmt->pDbName)->literal, pCxt->pMetaCache); + } else { + code = reserveDbVgInfoInCache(pCxt->pParseCxt->acctId, TSDB_INFORMATION_SCHEMA_DB, pCxt->pMetaCache); + } + } + return code; +} + +static int32_t collectMetaKeyFromShowUsers(SCollectMetaKeyCxt* pCxt, SShowStmt* pStmt) { + return reserveTableMetaInCache(pCxt->pParseCxt->acctId, TSDB_INFORMATION_SCHEMA_DB, TSDB_INS_TABLE_USER_USERS, + pCxt->pMetaCache); +} + +static int32_t collectMetaKeyFromShowLicence(SCollectMetaKeyCxt* pCxt, SShowStmt* pStmt) { + return reserveTableMetaInCache(pCxt->pParseCxt->acctId, TSDB_INFORMATION_SCHEMA_DB, TSDB_INS_TABLE_LICENCES, + pCxt->pMetaCache); +} + +static int32_t collectMetaKeyFromShowVgroups(SCollectMetaKeyCxt* pCxt, SShowStmt* pStmt) { + return reserveTableMetaInCache(pCxt->pParseCxt->acctId, TSDB_INFORMATION_SCHEMA_DB, TSDB_INS_TABLE_VGROUPS, + pCxt->pMetaCache); +} + +static int32_t collectMetaKeyFromShowTopics(SCollectMetaKeyCxt* pCxt, SShowStmt* pStmt) { + return reserveTableMetaInCache(pCxt->pParseCxt->acctId, TSDB_PERFORMANCE_SCHEMA_DB, TSDB_PERFS_TABLE_TOPICS, + pCxt->pMetaCache); +} + +static int32_t collectMetaKeyFromShowTransactions(SCollectMetaKeyCxt* pCxt, SShowStmt* pStmt) { + return reserveTableMetaInCache(pCxt->pParseCxt->acctId, TSDB_PERFORMANCE_SCHEMA_DB, TSDB_PERFS_TABLE_TRANS, + pCxt->pMetaCache); +} + +static int32_t collectMetaKeyFromQuery(SCollectMetaKeyCxt* pCxt, SNode* pStmt) { + switch (nodeType(pStmt)) { + case QUERY_NODE_SET_OPERATOR: + return collectMetaKeyFromSetOperator(pCxt, (SSetOperator*)pStmt); + case QUERY_NODE_SELECT_STMT: + return collectMetaKeyFromSelect(pCxt, (SSelectStmt*)pStmt); + case QUERY_NODE_VNODE_MODIF_STMT: + case QUERY_NODE_CREATE_DATABASE_STMT: + case QUERY_NODE_DROP_DATABASE_STMT: + case QUERY_NODE_ALTER_DATABASE_STMT: + break; + case QUERY_NODE_CREATE_TABLE_STMT: + return collectMetaKeyFromCreateTable(pCxt, (SCreateTableStmt*)pStmt); + case QUERY_NODE_CREATE_SUBTABLE_CLAUSE: + break; + case QUERY_NODE_CREATE_MULTI_TABLE_STMT: + return collectMetaKeyFromCreateMultiTable(pCxt, (SCreateMultiTableStmt*)pStmt); + case QUERY_NODE_DROP_TABLE_CLAUSE: + case QUERY_NODE_DROP_TABLE_STMT: + case QUERY_NODE_DROP_SUPER_TABLE_STMT: + break; + case QUERY_NODE_ALTER_TABLE_STMT: + return collectMetaKeyFromAlterTable(pCxt, (SAlterTableStmt*)pStmt); + case QUERY_NODE_CREATE_USER_STMT: + case QUERY_NODE_ALTER_USER_STMT: + case QUERY_NODE_DROP_USER_STMT: + break; + case QUERY_NODE_USE_DATABASE_STMT: + return collectMetaKeyFromUseDatabase(pCxt, (SUseDatabaseStmt*)pStmt); + case QUERY_NODE_CREATE_DNODE_STMT: + case QUERY_NODE_DROP_DNODE_STMT: + case QUERY_NODE_ALTER_DNODE_STMT: + break; + case QUERY_NODE_CREATE_INDEX_STMT: + return collectMetaKeyFromCreateIndex(pCxt, (SCreateIndexStmt*)pStmt); + case QUERY_NODE_DROP_INDEX_STMT: + case QUERY_NODE_CREATE_QNODE_STMT: + case QUERY_NODE_DROP_QNODE_STMT: + case QUERY_NODE_CREATE_BNODE_STMT: + case QUERY_NODE_DROP_BNODE_STMT: + case QUERY_NODE_CREATE_SNODE_STMT: + case QUERY_NODE_DROP_SNODE_STMT: + case QUERY_NODE_CREATE_MNODE_STMT: + case QUERY_NODE_DROP_MNODE_STMT: + break; + case QUERY_NODE_CREATE_TOPIC_STMT: + return collectMetaKeyFromCreateTopic(pCxt, (SCreateTopicStmt*)pStmt); + case QUERY_NODE_DROP_TOPIC_STMT: + case QUERY_NODE_DROP_CGROUP_STMT: + case QUERY_NODE_ALTER_LOCAL_STMT: + break; + case QUERY_NODE_EXPLAIN_STMT: + return collectMetaKeyFromExplain(pCxt, (SExplainStmt*)pStmt); + case QUERY_NODE_DESCRIBE_STMT: + case QUERY_NODE_RESET_QUERY_CACHE_STMT: + case QUERY_NODE_COMPACT_STMT: + case QUERY_NODE_CREATE_FUNCTION_STMT: + case QUERY_NODE_DROP_FUNCTION_STMT: + break; + case QUERY_NODE_CREATE_STREAM_STMT: + return collectMetaKeyFromCreateStream(pCxt, (SCreateStreamStmt*)pStmt); + case QUERY_NODE_DROP_STREAM_STMT: + case QUERY_NODE_MERGE_VGROUP_STMT: + case QUERY_NODE_REDISTRIBUTE_VGROUP_STMT: + case QUERY_NODE_SPLIT_VGROUP_STMT: + case QUERY_NODE_SYNCDB_STMT: + case QUERY_NODE_GRANT_STMT: + case QUERY_NODE_REVOKE_STMT: + case QUERY_NODE_SHOW_DNODES_STMT: + return collectMetaKeyFromShowDnodes(pCxt, (SShowStmt*)pStmt); + case QUERY_NODE_SHOW_MNODES_STMT: + return collectMetaKeyFromShowMnodes(pCxt, (SShowStmt*)pStmt); + case QUERY_NODE_SHOW_MODULES_STMT: + return collectMetaKeyFromShowModules(pCxt, (SShowStmt*)pStmt); + case QUERY_NODE_SHOW_QNODES_STMT: + return collectMetaKeyFromShowQnodes(pCxt, (SShowStmt*)pStmt); + case QUERY_NODE_SHOW_SNODES_STMT: + return collectMetaKeyFromShowSnodes(pCxt, (SShowStmt*)pStmt); + case QUERY_NODE_SHOW_BNODES_STMT: + return collectMetaKeyFromShowBnodes(pCxt, (SShowStmt*)pStmt); + case QUERY_NODE_SHOW_CLUSTER_STMT: + break; + case QUERY_NODE_SHOW_DATABASES_STMT: + return collectMetaKeyFromShowDatabases(pCxt, (SShowStmt*)pStmt); + case QUERY_NODE_SHOW_FUNCTIONS_STMT: + return collectMetaKeyFromShowFunctions(pCxt, (SShowStmt*)pStmt); + case QUERY_NODE_SHOW_INDEXES_STMT: + return collectMetaKeyFromShowIndexes(pCxt, (SShowStmt*)pStmt); + case QUERY_NODE_SHOW_STABLES_STMT: + return collectMetaKeyFromShowStables(pCxt, (SShowStmt*)pStmt); + case QUERY_NODE_SHOW_STREAMS_STMT: + return collectMetaKeyFromShowStreams(pCxt, (SShowStmt*)pStmt); + case QUERY_NODE_SHOW_TABLES_STMT: + return collectMetaKeyFromShowTables(pCxt, (SShowStmt*)pStmt); + case QUERY_NODE_SHOW_USERS_STMT: + return collectMetaKeyFromShowUsers(pCxt, (SShowStmt*)pStmt); + case QUERY_NODE_SHOW_LICENCE_STMT: + return collectMetaKeyFromShowLicence(pCxt, (SShowStmt*)pStmt); + case QUERY_NODE_SHOW_VGROUPS_STMT: + return collectMetaKeyFromShowVgroups(pCxt, (SShowStmt*)pStmt); + case QUERY_NODE_SHOW_TOPICS_STMT: + return collectMetaKeyFromShowTopics(pCxt, (SShowStmt*)pStmt); + case QUERY_NODE_SHOW_CONSUMERS_STMT: + case QUERY_NODE_SHOW_SUBSCRIBES_STMT: + case QUERY_NODE_SHOW_SMAS_STMT: + case QUERY_NODE_SHOW_CONFIGS_STMT: + case QUERY_NODE_SHOW_CONNECTIONS_STMT: + case QUERY_NODE_SHOW_QUERIES_STMT: + case QUERY_NODE_SHOW_VNODES_STMT: + case QUERY_NODE_SHOW_APPS_STMT: + case QUERY_NODE_SHOW_SCORES_STMT: + case QUERY_NODE_SHOW_VARIABLE_STMT: + case QUERY_NODE_SHOW_CREATE_DATABASE_STMT: + case QUERY_NODE_SHOW_CREATE_TABLE_STMT: + case QUERY_NODE_SHOW_CREATE_STABLE_STMT: + break; + case QUERY_NODE_SHOW_TRANSACTIONS_STMT: + return collectMetaKeyFromShowTransactions(pCxt, (SShowStmt*)pStmt); + case QUERY_NODE_KILL_CONNECTION_STMT: + case QUERY_NODE_KILL_QUERY_STMT: + case QUERY_NODE_KILL_TRANSACTION_STMT: + default: + break; + } + return TSDB_CODE_SUCCESS; +} + +int32_t collectMetaKey(SParseContext* pParseCxt, SQuery* pQuery) { + SCollectMetaKeyCxt cxt = {.pParseCxt = pParseCxt, .pMetaCache = taosMemoryCalloc(1, sizeof(SParseMetaCache))}; + if (NULL == cxt.pMetaCache) { + return TSDB_CODE_OUT_OF_MEMORY; + } + int32_t code = collectMetaKeyFromQuery(&cxt, pQuery->pRoot); + if (TSDB_CODE_SUCCESS == code) { + TSWAP(pQuery->pMetaCache, cxt.pMetaCache); + } + destroyCollectMetaKeyCxt(&cxt); + return code; +} diff --git a/source/libs/parser/src/parAuthenticator.c b/source/libs/parser/src/parAuthenticator.c index 250e7910d69847a130fa4f0b2132b3dcb99da8e7..2670e5710b9f5418c401e9799678c68d82c8f29d 100644 --- a/source/libs/parser/src/parAuthenticator.c +++ b/source/libs/parser/src/parAuthenticator.c @@ -18,23 +18,30 @@ #include "parInt.h" typedef struct SAuthCxt { - SParseContext* pParseCxt; - int32_t errCode; + SParseContext* pParseCxt; + SParseMetaCache* pMetaCache; + int32_t errCode; } SAuthCxt; static int32_t authQuery(SAuthCxt* pCxt, SNode* pStmt); -static int32_t checkAuth(SParseContext* pCxt, const char* pDbName, AUTH_TYPE type) { - if (pCxt->isSuperUser) { +static int32_t checkAuth(SAuthCxt* pCxt, const char* pDbName, AUTH_TYPE type) { + SParseContext* pParseCxt = pCxt->pParseCxt; + if (pParseCxt->isSuperUser) { return TSDB_CODE_SUCCESS; } SName name; - tNameSetDbName(&name, pCxt->acctId, pDbName, strlen(pDbName)); + tNameSetDbName(&name, pParseCxt->acctId, pDbName, strlen(pDbName)); char dbFname[TSDB_DB_FNAME_LEN] = {0}; tNameGetFullDbName(&name, dbFname); + int32_t code = TSDB_CODE_SUCCESS; bool pass = false; - int32_t code = - catalogChkAuth(pCxt->pCatalog, pCxt->pTransporter, &pCxt->mgmtEpSet, pCxt->pUser, dbFname, type, &pass); + if (NULL != pCxt->pMetaCache) { + code = getUserAuthFromCache(pCxt->pMetaCache, pParseCxt->pUser, dbFname, type, &pass); + } else { + code = catalogChkAuth(pParseCxt->pCatalog, pParseCxt->pTransporter, &pParseCxt->mgmtEpSet, pParseCxt->pUser, + dbFname, type, &pass); + } return TSDB_CODE_SUCCESS == code ? (pass ? TSDB_CODE_SUCCESS : TSDB_CODE_PAR_PERMISSION_DENIED) : code; } @@ -45,7 +52,7 @@ static EDealRes authSubquery(SAuthCxt* pCxt, SNode* pStmt) { static EDealRes authSelectImpl(SNode* pNode, void* pContext) { SAuthCxt* pCxt = pContext; if (QUERY_NODE_REAL_TABLE == nodeType(pNode)) { - pCxt->errCode = checkAuth(pCxt->pParseCxt, ((SRealTableNode*)pNode)->table.dbName, AUTH_TYPE_READ); + pCxt->errCode = checkAuth(pCxt, ((SRealTableNode*)pNode)->table.dbName, AUTH_TYPE_READ); return TSDB_CODE_SUCCESS == pCxt->errCode ? DEAL_RES_CONTINUE : DEAL_RES_ERROR; } else if (QUERY_NODE_TEMP_TABLE == nodeType(pNode)) { return authSubquery(pCxt, ((STempTableNode*)pNode)->pSubquery); @@ -79,87 +86,8 @@ static int32_t authQuery(SAuthCxt* pCxt, SNode* pStmt) { return authSetOperator(pCxt, (SSetOperator*)pStmt); case QUERY_NODE_SELECT_STMT: return authSelect(pCxt, (SSelectStmt*)pStmt); - case QUERY_NODE_CREATE_DATABASE_STMT: - case QUERY_NODE_DROP_DATABASE_STMT: - case QUERY_NODE_ALTER_DATABASE_STMT: - case QUERY_NODE_CREATE_TABLE_STMT: - case QUERY_NODE_CREATE_SUBTABLE_CLAUSE: - case QUERY_NODE_CREATE_MULTI_TABLE_STMT: - case QUERY_NODE_DROP_TABLE_CLAUSE: - case QUERY_NODE_DROP_TABLE_STMT: - case QUERY_NODE_DROP_SUPER_TABLE_STMT: - case QUERY_NODE_ALTER_TABLE_STMT: - case QUERY_NODE_CREATE_USER_STMT: - case QUERY_NODE_ALTER_USER_STMT: - break; - case QUERY_NODE_DROP_USER_STMT: { + case QUERY_NODE_DROP_USER_STMT: return authDropUser(pCxt, (SDropUserStmt*)pStmt); - } - case QUERY_NODE_USE_DATABASE_STMT: - case QUERY_NODE_CREATE_DNODE_STMT: - case QUERY_NODE_DROP_DNODE_STMT: - case QUERY_NODE_ALTER_DNODE_STMT: - case QUERY_NODE_CREATE_INDEX_STMT: - case QUERY_NODE_DROP_INDEX_STMT: - case QUERY_NODE_CREATE_QNODE_STMT: - case QUERY_NODE_DROP_QNODE_STMT: - case QUERY_NODE_CREATE_BNODE_STMT: - case QUERY_NODE_DROP_BNODE_STMT: - case QUERY_NODE_CREATE_SNODE_STMT: - case QUERY_NODE_DROP_SNODE_STMT: - case QUERY_NODE_CREATE_MNODE_STMT: - case QUERY_NODE_DROP_MNODE_STMT: - case QUERY_NODE_CREATE_TOPIC_STMT: - case QUERY_NODE_DROP_TOPIC_STMT: - case QUERY_NODE_ALTER_LOCAL_STMT: - case QUERY_NODE_EXPLAIN_STMT: - case QUERY_NODE_DESCRIBE_STMT: - case QUERY_NODE_RESET_QUERY_CACHE_STMT: - case QUERY_NODE_COMPACT_STMT: - case QUERY_NODE_CREATE_FUNCTION_STMT: - case QUERY_NODE_DROP_FUNCTION_STMT: - case QUERY_NODE_CREATE_STREAM_STMT: - case QUERY_NODE_DROP_STREAM_STMT: - case QUERY_NODE_MERGE_VGROUP_STMT: - case QUERY_NODE_REDISTRIBUTE_VGROUP_STMT: - case QUERY_NODE_SPLIT_VGROUP_STMT: - case QUERY_NODE_SYNCDB_STMT: - case QUERY_NODE_GRANT_STMT: - case QUERY_NODE_REVOKE_STMT: - case QUERY_NODE_SHOW_DNODES_STMT: - case QUERY_NODE_SHOW_MNODES_STMT: - case QUERY_NODE_SHOW_MODULES_STMT: - case QUERY_NODE_SHOW_QNODES_STMT: - case QUERY_NODE_SHOW_SNODES_STMT: - case QUERY_NODE_SHOW_BNODES_STMT: - case QUERY_NODE_SHOW_CLUSTER_STMT: - case QUERY_NODE_SHOW_DATABASES_STMT: - case QUERY_NODE_SHOW_FUNCTIONS_STMT: - case QUERY_NODE_SHOW_INDEXES_STMT: - case QUERY_NODE_SHOW_STABLES_STMT: - case QUERY_NODE_SHOW_STREAMS_STMT: - case QUERY_NODE_SHOW_TABLES_STMT: - case QUERY_NODE_SHOW_USERS_STMT: - case QUERY_NODE_SHOW_LICENCE_STMT: - case QUERY_NODE_SHOW_VGROUPS_STMT: - case QUERY_NODE_SHOW_TOPICS_STMT: - case QUERY_NODE_SHOW_CONSUMERS_STMT: - case QUERY_NODE_SHOW_SUBSCRIBES_STMT: - case QUERY_NODE_SHOW_SMAS_STMT: - case QUERY_NODE_SHOW_CONFIGS_STMT: - case QUERY_NODE_SHOW_CONNECTIONS_STMT: - case QUERY_NODE_SHOW_QUERIES_STMT: - case QUERY_NODE_SHOW_VNODES_STMT: - case QUERY_NODE_SHOW_APPS_STMT: - case QUERY_NODE_SHOW_SCORES_STMT: - case QUERY_NODE_SHOW_VARIABLE_STMT: - case QUERY_NODE_SHOW_CREATE_DATABASE_STMT: - case QUERY_NODE_SHOW_CREATE_TABLE_STMT: - case QUERY_NODE_SHOW_CREATE_STABLE_STMT: - case QUERY_NODE_SHOW_TRANSACTIONS_STMT: - case QUERY_NODE_KILL_CONNECTION_STMT: - case QUERY_NODE_KILL_QUERY_STMT: - case QUERY_NODE_KILL_TRANSACTION_STMT: default: break; } @@ -168,6 +96,6 @@ static int32_t authQuery(SAuthCxt* pCxt, SNode* pStmt) { } int32_t authenticate(SParseContext* pParseCxt, SQuery* pQuery) { - SAuthCxt cxt = {.pParseCxt = pParseCxt, .errCode = TSDB_CODE_SUCCESS}; + SAuthCxt cxt = {.pParseCxt = pParseCxt, .pMetaCache = pQuery->pMetaCache, .errCode = TSDB_CODE_SUCCESS}; return authQuery(&cxt, pQuery->pRoot); } diff --git a/source/libs/parser/src/parTranslater.c b/source/libs/parser/src/parTranslater.c index d142d89373065df859259aaa7d3d565f1216c9f7..d84b005f7f0cd8bd91a3f9bbd17e9a8e7fa81a78 100644 --- a/source/libs/parser/src/parTranslater.c +++ b/source/libs/parser/src/parTranslater.c @@ -152,7 +152,7 @@ static int32_t getDBVgInfoImpl(STranslateContext* pCxt, const SName* pName, SArr tNameGetFullDbName(pName, fullDbName); int32_t code = TSDB_CODE_SUCCESS; if (pParCxt->async) { - code = getDBVgInfoFromCache(pCxt->pMetaCache, fullDbName, pVgInfo); + code = getDbVgInfoFromCache(pCxt->pMetaCache, fullDbName, pVgInfo); } else { code = collectUseDatabaseImpl(fullDbName, pCxt->pDbs); if (TSDB_CODE_SUCCESS == code) { @@ -177,7 +177,7 @@ static int32_t getTableHashVgroupImpl(STranslateContext* pCxt, const SName* pNam SParseContext* pParCxt = pCxt->pParseCxt; int32_t code = TSDB_CODE_SUCCESS; if (pParCxt->async) { - code = getTableHashVgroupFromCache(pCxt->pMetaCache, pName, pInfo); + code = getTableVgroupFromCache(pCxt->pMetaCache, pName, pInfo); } else { code = collectUseDatabase(pName, pCxt->pDbs); if (TSDB_CODE_SUCCESS == code) { @@ -205,7 +205,7 @@ static int32_t getDBVgVersion(STranslateContext* pCxt, const char* pDbFName, int SParseContext* pParCxt = pCxt->pParseCxt; int32_t code = TSDB_CODE_SUCCESS; if (pParCxt->async) { - code = getDBVgVersionFromCache(pCxt->pMetaCache, pDbFName, pVersion, pDbId, pTableNum); + code = getDbVgVersionFromCache(pCxt->pMetaCache, pDbFName, pVersion, pDbId, pTableNum); } else { code = collectUseDatabaseImpl(pDbFName, pCxt->pDbs); if (TSDB_CODE_SUCCESS == code) { @@ -226,7 +226,7 @@ static int32_t getDBCfg(STranslateContext* pCxt, const char* pDbName, SDbCfgInfo tNameGetFullDbName(&name, dbFname); int32_t code = TSDB_CODE_SUCCESS; if (pParCxt->async) { - code = getDBCfgFromCache(pCxt->pMetaCache, dbFname, pInfo); + code = getDbCfgFromCache(pCxt->pMetaCache, dbFname, pInfo); } else { code = collectUseDatabaseImpl(dbFname, pCxt->pDbs); if (TSDB_CODE_SUCCESS == code) { @@ -239,6 +239,27 @@ static int32_t getDBCfg(STranslateContext* pCxt, const char* pDbName, SDbCfgInfo return code; } +static int32_t getUdfInfo(STranslateContext* pCxt, SFunctionNode* pFunc) { + SParseContext* pParCxt = pCxt->pParseCxt; + SFuncInfo funcInfo = {0}; + int32_t code = TSDB_CODE_SUCCESS; + if (pParCxt->async) { + code = getUdfInfoFromCache(pCxt->pMetaCache, pFunc->functionName, &funcInfo); + } else { + code = catalogGetUdfInfo(pParCxt->pCatalog, pParCxt->pTransporter, &pParCxt->mgmtEpSet, pFunc->functionName, + &funcInfo); + } + if (TSDB_CODE_SUCCESS == code) { + pFunc->funcType = FUNCTION_TYPE_UDF; + pFunc->funcId = TSDB_FUNC_TYPE_AGGREGATE == funcInfo.funcType ? FUNC_AGGREGATE_UDF_ID : FUNC_SCALAR_UDF_ID; + pFunc->node.resType.type = funcInfo.outputType; + pFunc->node.resType.bytes = funcInfo.outputLen; + pFunc->udfBufSize = funcInfo.bufSize; + tFreeSFuncInfo(&funcInfo); + } + return code; +} + static int32_t initTranslateContext(SParseContext* pParseCxt, SParseMetaCache* pMetaCache, STranslateContext* pCxt) { pCxt->pParseCxt = pParseCxt; pCxt->errCode = TSDB_CODE_SUCCESS; @@ -873,12 +894,11 @@ static bool hasInvalidFuncNesting(SNodeList* pParameterList) { } static int32_t getFuncInfo(STranslateContext* pCxt, SFunctionNode* pFunc) { - SFmGetFuncInfoParam param = {.pCtg = pCxt->pParseCxt->pCatalog, - .pRpc = pCxt->pParseCxt->pTransporter, - .pMgmtEps = &pCxt->pParseCxt->mgmtEpSet, - .pErrBuf = pCxt->msgBuf.buf, - .errBufLen = pCxt->msgBuf.len}; - return fmGetFuncInfo(¶m, pFunc); + int32_t code = fmGetFuncInfo(pFunc, pCxt->msgBuf.buf, pCxt->msgBuf.len); + if (TSDB_CODE_FUNC_NOT_BUILTIN_FUNTION == code) { + code = getUdfInfo(pCxt, pFunc); + } + return code; } static int32_t translateAggFunc(STranslateContext* pCxt, SFunctionNode* pFunc) { @@ -1212,7 +1232,6 @@ static int32_t setSysTableVgroupList(STranslateContext* pCxt, SName* pName, SRea int32_t code = TSDB_CODE_SUCCESS; SArray* vgroupList = NULL; if ('\0' != pRealTable->qualDbName[0]) { - // todo release after mnode can be processed if (0 != strcmp(pRealTable->qualDbName, TSDB_INFORMATION_SCHEMA_DB)) { code = getDBVgInfo(pCxt, pRealTable->qualDbName, &vgroupList); } @@ -1220,7 +1239,6 @@ static int32_t setSysTableVgroupList(STranslateContext* pCxt, SName* pName, SRea code = getDBVgInfoImpl(pCxt, pName, &vgroupList); } - // todo release after mnode can be processed if (TSDB_CODE_SUCCESS == code) { code = addMnodeToVgroupList(&pCxt->pParseCxt->mgmtEpSet, &vgroupList); } diff --git a/source/libs/parser/src/parUtil.c b/source/libs/parser/src/parUtil.c index 9de43b8cd38d612a6bfd96685edc3f92bca23060..34b01991545cdfdea46203b6edc73098e273fd39 100644 --- a/source/libs/parser/src/parUtil.c +++ b/source/libs/parser/src/parUtil.c @@ -15,6 +15,9 @@ #include "parUtil.h" #include "cJSON.h" +#include "querynodes.h" + +#define USER_AUTH_KEY_MAX_LEN TSDB_USER_LEN + TSDB_DB_FNAME_LEN + 2 static char* getSyntaxErrFormat(int32_t errCode) { switch (errCode) { @@ -255,17 +258,8 @@ STableComInfo getTableInfo(const STableMeta* pTableMeta) { return pTableMeta->tableInfo; } -static uint32_t getTableMetaSize(const STableMeta* pTableMeta) { - int32_t totalCols = 0; - if (pTableMeta->tableInfo.numOfColumns >= 0) { - totalCols = pTableMeta->tableInfo.numOfColumns + pTableMeta->tableInfo.numOfTags; - } - - return sizeof(STableMeta) + totalCols * sizeof(SSchema); -} - STableMeta* tableMetaDup(const STableMeta* pTableMeta) { - size_t size = getTableMetaSize(pTableMeta); + size_t size = TABLE_META_SIZE(pTableMeta); STableMeta* p = taosMemoryMalloc(size); memcpy(p, pTableMeta, size); @@ -449,6 +443,26 @@ end: return retCode; } +static int32_t userAuthToString(int32_t acctId, const char* pUser, const char* pDb, AUTH_TYPE type, char* pStr) { + return sprintf(pStr, "%s*%d.%s*%d", pUser, acctId, pDb, type); +} + +static int32_t userAuthToStringExt(const char* pUser, const char* pDbFName, AUTH_TYPE type, char* pStr) { + return sprintf(pStr, "%s*%s*%d", pUser, pDbFName, type); +} + +static void stringToUserAuth(const char* pStr, int32_t len, SUserAuthInfo* pUserAuth) { + char* p1 = strchr(pStr, '*'); + strncpy(pUserAuth->user, pStr, p1 - pStr); + ++p1; + char* p2 = strchr(p1, '*'); + strncpy(pUserAuth->dbFName, p1, p2 - p1); + ++p2; + char buf[10] = {0}; + strncpy(buf, p2, len - (p2 - pStr)); + pUserAuth->type = taosStr2Int32(buf, NULL, 10); +} + static int32_t buildTableReq(SHashObj* pTablesHash, SArray** pTables) { if (NULL != pTablesHash) { *pTables = taosArrayInit(taosHashGetSize(pTablesHash), sizeof(SName)); @@ -503,6 +517,44 @@ static int32_t buildTableVgroupReq(SHashObj* pTableVgroupHash, SArray** pTableVg static int32_t buildDbCfgReq(SHashObj* pDbCfgHash, SArray** pDbCfg) { return buildDbReq(pDbCfgHash, pDbCfg); } +static int32_t buildUserAuthReq(SHashObj* pUserAuthHash, SArray** pUserAuth) { + if (NULL != pUserAuthHash) { + *pUserAuth = taosArrayInit(taosHashGetSize(pUserAuthHash), sizeof(SUserAuthInfo)); + if (NULL == *pUserAuth) { + return TSDB_CODE_OUT_OF_MEMORY; + } + void* p = taosHashIterate(pUserAuthHash, NULL); + while (NULL != p) { + size_t len = 0; + char* pKey = taosHashGetKey(p, &len); + SUserAuthInfo userAuth = {0}; + stringToUserAuth(pKey, len, &userAuth); + taosArrayPush(*pUserAuth, &userAuth); + p = taosHashIterate(pUserAuthHash, p); + } + } + return TSDB_CODE_SUCCESS; +} + +static int32_t buildUdfReq(SHashObj* pUdfHash, SArray** pUdf) { + if (NULL != pUdfHash) { + *pUdf = taosArrayInit(taosHashGetSize(pUdfHash), TSDB_FUNC_NAME_LEN); + if (NULL == *pUdf) { + return TSDB_CODE_OUT_OF_MEMORY; + } + void* p = taosHashIterate(pUdfHash, NULL); + while (NULL != p) { + size_t len = 0; + char* pFunc = taosHashGetKey(p, &len); + char func[TSDB_FUNC_NAME_LEN] = {0}; + strncpy(func, pFunc, len); + taosArrayPush(*pUdf, func); + p = taosHashIterate(pUdfHash, p); + } + } + return TSDB_CODE_SUCCESS; +} + int32_t buildCatalogReq(const SParseMetaCache* pMetaCache, SCatalogReq* pCatalogReq) { int32_t code = buildTableMetaReq(pMetaCache->pTableMeta, &pCatalogReq->pTableMeta); if (TSDB_CODE_SUCCESS == code) { @@ -512,7 +564,13 @@ int32_t buildCatalogReq(const SParseMetaCache* pMetaCache, SCatalogReq* pCatalog code = buildTableVgroupReq(pMetaCache->pTableVgroup, &pCatalogReq->pTableHash); } if (TSDB_CODE_SUCCESS == code) { - code = buildDbCfgReq(pMetaCache->pDbVgroup, &pCatalogReq->pDbCfg); + code = buildDbCfgReq(pMetaCache->pDbCfg, &pCatalogReq->pDbCfg); + } + if (TSDB_CODE_SUCCESS == code) { + code = buildUserAuthReq(pMetaCache->pUserAuth, &pCatalogReq->pUser); + } + if (TSDB_CODE_SUCCESS == code) { + code = buildUdfReq(pMetaCache->pUdf, &pCatalogReq->pUdf); } return code; } @@ -568,6 +626,31 @@ static int32_t putDbCfgToCache(const SArray* pDbCfgReq, const SArray* pDbCfgData return TSDB_CODE_SUCCESS; } +static int32_t putUserAuthToCache(const SArray* pUserAuthReq, const SArray* pUserAuthData, SHashObj* pUserAuth) { + int32_t nvgs = taosArrayGetSize(pUserAuthReq); + for (int32_t i = 0; i < nvgs; ++i) { + SUserAuthInfo* pUser = taosArrayGet(pUserAuthReq, i); + char key[USER_AUTH_KEY_MAX_LEN] = {0}; + int32_t len = userAuthToStringExt(pUser->user, pUser->dbFName, pUser->type, key); + if (TSDB_CODE_SUCCESS != taosHashPut(pUserAuth, key, len, taosArrayGet(pUserAuthData, i), sizeof(bool))) { + return TSDB_CODE_OUT_OF_MEMORY; + } + } + return TSDB_CODE_SUCCESS; +} + +static int32_t putUdfToCache(const SArray* pUdfReq, const SArray* pUdfData, SHashObj* pUdf) { + int32_t num = taosArrayGetSize(pUdfReq); + for (int32_t i = 0; i < num; ++i) { + char* pFunc = taosArrayGet(pUdfReq, i); + SFuncInfo* pInfo = taosArrayGet(pUdfData, i); + if (TSDB_CODE_SUCCESS != taosHashPut(pUdf, pFunc, strlen(pFunc), &pInfo, POINTER_BYTES)) { + return TSDB_CODE_OUT_OF_MEMORY; + } + } + return TSDB_CODE_SUCCESS; +} + int32_t putMetaDataToCache(const SCatalogReq* pCatalogReq, const SMetaData* pMetaData, SParseMetaCache* pMetaCache) { int32_t code = putTableMetaToCache(pCatalogReq->pTableMeta, pMetaData->pTableMeta, pMetaCache->pTableMeta); if (TSDB_CODE_SUCCESS == code) { @@ -579,54 +662,161 @@ int32_t putMetaDataToCache(const SCatalogReq* pCatalogReq, const SMetaData* pMet if (TSDB_CODE_SUCCESS == code) { code = putDbCfgToCache(pCatalogReq->pDbCfg, pMetaData->pDbCfg, pMetaCache->pDbCfg); } + if (TSDB_CODE_SUCCESS == code) { + code = putUserAuthToCache(pCatalogReq->pUser, pMetaData->pUser, pMetaCache->pUserAuth); + } + if (TSDB_CODE_SUCCESS == code) { + code = putUdfToCache(pCatalogReq->pUdf, pMetaData->pUdfList, pMetaCache->pUdf); + } return code; } -int32_t reserveTableMetaInCache(int32_t acctId, const char* pDb, const char* pTable, SParseMetaCache* pMetaCache) { - if (NULL == pMetaCache->pTableMeta) { - pMetaCache->pTableMeta = taosHashInit(4, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), true, HASH_NO_LOCK); - if (NULL == pMetaCache->pTableMeta) { +static int32_t reserveTableReqInCache(int32_t acctId, const char* pDb, const char* pTable, SHashObj** pTables) { + if (NULL == *pTables) { + *pTables = taosHashInit(4, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), true, HASH_NO_LOCK); + if (NULL == *pTables) { return TSDB_CODE_OUT_OF_MEMORY; } } char fullName[TSDB_TABLE_FNAME_LEN]; int32_t len = snprintf(fullName, sizeof(fullName), "%d.%s.%s", acctId, pDb, pTable); - return taosHashPut(pMetaCache->pTableMeta, fullName, len, &len, POINTER_BYTES); + return taosHashPut(*pTables, fullName, len, &pTables, POINTER_BYTES); +} + +int32_t reserveTableMetaInCache(int32_t acctId, const char* pDb, const char* pTable, SParseMetaCache* pMetaCache) { + return reserveTableReqInCache(acctId, pDb, pTable, &pMetaCache->pTableMeta); } int32_t getTableMetaFromCache(SParseMetaCache* pMetaCache, const SName* pName, STableMeta** pMeta) { char fullName[TSDB_TABLE_FNAME_LEN]; tNameExtractFullName(pName, fullName); - *pMeta = taosHashGet(pMetaCache->pTableMeta, fullName, strlen(fullName)); - return NULL == *pMeta ? TSDB_CODE_PAR_INTERNAL_ERROR : TSDB_CODE_SUCCESS; + STableMeta** pRes = taosHashGet(pMetaCache->pTableMeta, fullName, strlen(fullName)); + if (NULL == pRes || NULL == *pRes) { + return TSDB_CODE_PAR_INTERNAL_ERROR; + } + *pMeta = tableMetaDup(*pRes); + if (NULL == *pMeta) { + return TSDB_CODE_OUT_OF_MEMORY; + } + return TSDB_CODE_SUCCESS; +} + +static int32_t reserveDbReqInCache(int32_t acctId, const char* pDb, SHashObj** pDbs) { + if (NULL == *pDbs) { + *pDbs = taosHashInit(4, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), true, HASH_NO_LOCK); + if (NULL == *pDbs) { + return TSDB_CODE_OUT_OF_MEMORY; + } + } + char fullName[TSDB_TABLE_FNAME_LEN]; + int32_t len = snprintf(fullName, sizeof(fullName), "%d.%s", acctId, pDb); + return taosHashPut(*pDbs, fullName, len, &pDbs, POINTER_BYTES); +} + +int32_t reserveDbVgInfoInCache(int32_t acctId, const char* pDb, SParseMetaCache* pMetaCache) { + return reserveDbReqInCache(acctId, pDb, &pMetaCache->pDbVgroup); +} + +int32_t getDbVgInfoFromCache(SParseMetaCache* pMetaCache, const char* pDbFName, SArray** pVgInfo) { + SArray** pRes = taosHashGet(pMetaCache->pDbVgroup, pDbFName, strlen(pDbFName)); + if (NULL == pRes) { + return TSDB_CODE_PAR_INTERNAL_ERROR; + } + // *pRes is null, which is a legal value, indicating that the user DB has not been created + if (NULL != *pRes) { + *pVgInfo = taosArrayDup(*pRes); + if (NULL == *pVgInfo) { + return TSDB_CODE_OUT_OF_MEMORY; + } + } + return TSDB_CODE_SUCCESS; } -int32_t getDBVgInfoFromCache(SParseMetaCache* pMetaCache, const char* pDbFName, SArray** pVgInfo) { - *pVgInfo = taosHashGet(pMetaCache->pDbVgroup, pDbFName, strlen(pDbFName)); - return NULL == *pVgInfo ? TSDB_CODE_PAR_INTERNAL_ERROR : TSDB_CODE_SUCCESS; +int32_t reserveTableVgroupInCache(int32_t acctId, const char* pDb, const char* pTable, SParseMetaCache* pMetaCache) { + return reserveTableReqInCache(acctId, pDb, pTable, &pMetaCache->pTableVgroup); } -int32_t getTableHashVgroupFromCache(SParseMetaCache* pMetaCache, const SName* pName, SVgroupInfo* pVgroup) { +int32_t getTableVgroupFromCache(SParseMetaCache* pMetaCache, const SName* pName, SVgroupInfo* pVgroup) { char fullName[TSDB_TABLE_FNAME_LEN]; tNameExtractFullName(pName, fullName); - SVgroupInfo* pInfo = taosHashGet(pMetaCache->pTableVgroup, fullName, strlen(fullName)); - if (NULL == pInfo) { + SVgroupInfo** pRes = taosHashGet(pMetaCache->pTableVgroup, fullName, strlen(fullName)); + if (NULL == pRes || NULL == *pRes) { return TSDB_CODE_PAR_INTERNAL_ERROR; } - memcpy(pVgroup, pInfo, sizeof(SVgroupInfo)); + memcpy(pVgroup, *pRes, sizeof(SVgroupInfo)); return TSDB_CODE_SUCCESS; } -int32_t getDBVgVersionFromCache(SParseMetaCache* pMetaCache, const char* pDbFName, int32_t* pVersion, int64_t* pDbId, +int32_t reserveDbVgVersionInCache(int32_t acctId, const char* pDb, SParseMetaCache* pMetaCache) { + return reserveDbReqInCache(acctId, pDb, &pMetaCache->pDbCfg); +} + +int32_t getDbVgVersionFromCache(SParseMetaCache* pMetaCache, const char* pDbFName, int32_t* pVersion, int64_t* pDbId, int32_t* pTableNum) { - return TSDB_CODE_PAR_INTERNAL_ERROR; + SDbInfo** pRes = taosHashGet(pMetaCache->pDbCfg, pDbFName, strlen(pDbFName)); + if (NULL == pRes || NULL == *pRes) { + return TSDB_CODE_PAR_INTERNAL_ERROR; + } + *pVersion = (*pRes)->vgVer; + *pDbId = (*pRes)->dbId; + *pTableNum = (*pRes)->tbNum; + return TSDB_CODE_SUCCESS; +} + +int32_t reserveDbCfgInCache(int32_t acctId, const char* pDb, SParseMetaCache* pMetaCache) { + return reserveDbReqInCache(acctId, pDb, &pMetaCache->pDbCfg); +} + +int32_t getDbCfgFromCache(SParseMetaCache* pMetaCache, const char* pDbFName, SDbCfgInfo* pInfo) { + SDbCfgInfo** pRes = taosHashGet(pMetaCache->pDbCfg, pDbFName, strlen(pDbFName)); + if (NULL == pRes || NULL == *pRes) { + return TSDB_CODE_PAR_INTERNAL_ERROR; + } + memcpy(pInfo, *pRes, sizeof(SDbCfgInfo)); + return TSDB_CODE_SUCCESS; +} + +int32_t reserveUserAuthInCache(int32_t acctId, const char* pUser, const char* pDb, AUTH_TYPE type, + SParseMetaCache* pMetaCache) { + if (NULL == pMetaCache->pUserAuth) { + pMetaCache->pUserAuth = taosHashInit(4, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), true, HASH_NO_LOCK); + if (NULL == pMetaCache->pUserAuth) { + return TSDB_CODE_OUT_OF_MEMORY; + } + } + char key[USER_AUTH_KEY_MAX_LEN] = {0}; + int32_t len = userAuthToString(acctId, pUser, pDb, type, key); + bool pass = false; + return taosHashPut(pMetaCache->pUserAuth, key, len, &pass, sizeof(pass)); +} + +int32_t getUserAuthFromCache(SParseMetaCache* pMetaCache, const char* pUser, const char* pDbFName, AUTH_TYPE type, + bool* pPass) { + char key[USER_AUTH_KEY_MAX_LEN] = {0}; + int32_t len = userAuthToStringExt(pUser, pDbFName, type, key); + bool* pRes = taosHashGet(pMetaCache->pUserAuth, key, len); + if (NULL == pRes) { + return TSDB_CODE_PAR_INTERNAL_ERROR; + } + *pPass = *pRes; + return TSDB_CODE_SUCCESS; +} + +int32_t reserveUdfInCache(const char* pFunc, SParseMetaCache* pMetaCache) { + if (NULL == pMetaCache->pUdf) { + pMetaCache->pUdf = taosHashInit(4, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), true, HASH_NO_LOCK); + if (NULL == pMetaCache->pUdf) { + return TSDB_CODE_OUT_OF_MEMORY; + } + } + return taosHashPut(pMetaCache->pUdf, pFunc, strlen(pFunc), &pMetaCache, POINTER_BYTES); } -int32_t getDBCfgFromCache(SParseMetaCache* pMetaCache, const char* pDbFName, SDbCfgInfo* pInfo) { - SDbCfgInfo* pDbCfg = taosHashGet(pMetaCache->pDbCfg, pDbFName, strlen(pDbFName)); - if (NULL == pDbCfg) { +int32_t getUdfInfoFromCache(SParseMetaCache* pMetaCache, const char* pFunc, SFuncInfo* pInfo) { + SFuncInfo** pRes = taosHashGet(pMetaCache->pUdf, pFunc, strlen(pFunc)); + if (NULL == pRes || NULL == *pRes) { return TSDB_CODE_PAR_INTERNAL_ERROR; } - memcpy(pInfo, pDbCfg, sizeof(SDbCfgInfo)); + memcpy(pInfo, *pRes, sizeof(SFuncInfo)); return TSDB_CODE_SUCCESS; } diff --git a/source/libs/parser/src/parser.c b/source/libs/parser/src/parser.c index a9962b044491fc20dd55237f66026576b36ef49f..54aa9c642cfb9cf4c580addf374f3a91907dd56a 100644 --- a/source/libs/parser/src/parser.c +++ b/source/libs/parser/src/parser.c @@ -59,6 +59,14 @@ static int32_t parseSqlIntoAst(SParseContext* pCxt, SQuery** pQuery) { return code; } +static int32_t syntaxParseSql(SParseContext* pCxt, SQuery** pQuery) { + int32_t code = parse(pCxt, pQuery); + if (TSDB_CODE_SUCCESS == code) { + code = collectMetaKey(pCxt, *pQuery); + } + return code; +} + static int32_t setValueByBindParam(SValueNode* pVal, TAOS_MULTI_BIND* pParam) { if (pParam->is_null && 1 == *(pParam->is_null)) { pVal->node.resType.type = TSDB_DATA_TYPE_NULL; @@ -98,6 +106,7 @@ static int32_t setValueByBindParam(SValueNode* pVal, TAOS_MULTI_BIND* pParam) { } varDataSetLen(pVal->datum.p, pVal->node.resType.bytes); strncpy(varDataVal(pVal->datum.p), (const char*)pParam->buffer, pVal->node.resType.bytes); + pVal->node.resType.bytes += VARSTR_HEADER_SIZE; break; case TSDB_DATA_TYPE_NCHAR: { pVal->node.resType.bytes *= TSDB_NCHAR_SIZE; @@ -112,7 +121,7 @@ static int32_t setValueByBindParam(SValueNode* pVal, TAOS_MULTI_BIND* pParam) { return errno; } varDataSetLen(pVal->datum.p, output); - pVal->node.resType.bytes = output; + pVal->node.resType.bytes = output + VARSTR_HEADER_SIZE; break; } case TSDB_DATA_TYPE_TIMESTAMP: @@ -188,7 +197,7 @@ int32_t qSyntaxParseSql(SParseContext* pCxt, SQuery** pQuery, struct SCatalogReq if (qIsInsertSql(pCxt->pSql, pCxt->sqlLen)) { // todo insert sql } else { - code = parse(pCxt, pQuery); + code = syntaxParseSql(pCxt, pQuery); } if (TSDB_CODE_SUCCESS == code) { code = buildCatalogReq((*pQuery)->pMetaCache, pCatalogReq); diff --git a/source/libs/parser/test/CMakeLists.txt b/source/libs/parser/test/CMakeLists.txt index 0e8adb978dd0e9fca5a67e9999ce7c5faa877cc0..c252653e9ee743ca8c9e899f6851e1893fb766be 100644 --- a/source/libs/parser/test/CMakeLists.txt +++ b/source/libs/parser/test/CMakeLists.txt @@ -26,7 +26,9 @@ if(${BUILD_WINGETOPT}) target_link_libraries(parserTest PUBLIC wingetopt) endif() -add_test( - NAME parserTest - COMMAND parserTest -) +if(NOT TD_WINDOWS) + add_test( + NAME parserTest + COMMAND parserTest + ) +endif(NOT TD_WINDOWS) diff --git a/source/libs/parser/test/mockCatalog.cpp b/source/libs/parser/test/mockCatalog.cpp index 19460fb87a914519e8501c5f1381df16a419dade..154f13ea686aa172d9c2ad53bfadcae893305ed0 100644 --- a/source/libs/parser/test/mockCatalog.cpp +++ b/source/libs/parser/test/mockCatalog.cpp @@ -103,7 +103,7 @@ void generatePerformanceSchema(MockCatalogService* mcs) { } { ITableBuilder& builder = mcs->createTableBuilder("performance_schema", "streams", TSDB_SYSTEM_TABLE, 1) - .addColumn("stream_name", TSDB_DATA_TYPE_BINARY, TSDB_TABLE_NAME_LEN); + .addColumn("stream_name", TSDB_DATA_TYPE_BINARY, TSDB_TABLE_NAME_LEN); builder.done(); } } @@ -157,6 +157,12 @@ void generateTestST1(MockCatalogService* mcs) { mcs->createSubTable("test", "st1", "st1s3", 1); } +void generateFunctions(MockCatalogService* mcs) { + mcs->createFunction("udf1", TSDB_FUNC_TYPE_SCALAR, TSDB_DATA_TYPE_INT, tDataTypes[TSDB_DATA_TYPE_INT].bytes, 0); + mcs->createFunction("udf2", TSDB_FUNC_TYPE_AGGREGATE, TSDB_DATA_TYPE_DOUBLE, tDataTypes[TSDB_DATA_TYPE_DOUBLE].bytes, + 8); +} + } // namespace int32_t __catalogGetHandle(const char* clusterId, struct SCatalog** catalogHandle) { return 0; } @@ -196,6 +202,11 @@ int32_t __catalogChkAuth(SCatalog* pCtg, void* pRpc, const SEpSet* pMgmtEps, con return 0; } +int32_t __catalogGetUdfInfo(SCatalog* pCtg, void* pTrans, const SEpSet* pMgmtEps, const char* funcName, + SFuncInfo* pInfo) { + return g_mockCatalogService->catalogGetUdfInfo(funcName, pInfo); +} + void initMetaDataEnv() { g_mockCatalogService.reset(new MockCatalogService()); @@ -209,6 +220,7 @@ void initMetaDataEnv() { stub.set(catalogGetDBVgInfo, __catalogGetDBVgInfo); stub.set(catalogGetDBCfg, __catalogGetDBCfg); stub.set(catalogChkAuth, __catalogChkAuth); + stub.set(catalogGetUdfInfo, __catalogGetUdfInfo); // { // AddrAny any("libcatalog.so"); // std::map result; @@ -256,6 +268,7 @@ void generateMetaData() { generatePerformanceSchema(g_mockCatalogService.get()); generateTestT1(g_mockCatalogService.get()); generateTestST1(g_mockCatalogService.get()); + generateFunctions(g_mockCatalogService.get()); g_mockCatalogService->showTables(); } diff --git a/source/libs/parser/test/mockCatalogService.cpp b/source/libs/parser/test/mockCatalogService.cpp index 9e9e5cd2af16fa9569765cec395eb2d9f6e9b11c..1b03b9683045597a5c57d37d8572b603eae47be2 100644 --- a/source/libs/parser/test/mockCatalogService.cpp +++ b/source/libs/parser/test/mockCatalogService.cpp @@ -120,11 +120,35 @@ class MockCatalogServiceImpl { return copyTableVgroup(db, tNameGetTableName(pTableName), vgList); } + int32_t catalogGetUdfInfo(const std::string& funcName, SFuncInfo* pInfo) const { + auto it = udf_.find(funcName); + if (udf_.end() == it) { + return TSDB_CODE_FAILED; + } + memcpy(pInfo, it->second.get(), sizeof(SFuncInfo)); + return TSDB_CODE_SUCCESS; + } + int32_t catalogGetAllMeta(const SCatalogReq* pCatalogReq, SMetaData* pMetaData) const { int32_t code = getAllTableMeta(pCatalogReq->pTableMeta, &pMetaData->pTableMeta); if (TSDB_CODE_SUCCESS == code) { code = getAllTableVgroup(pCatalogReq->pTableHash, &pMetaData->pTableHash); } + if (TSDB_CODE_SUCCESS == code) { + code = getAllDbVgroup(pCatalogReq->pDbVgroup, &pMetaData->pDbVgroup); + } + if (TSDB_CODE_SUCCESS == code) { + code = getAllDbCfg(pCatalogReq->pDbCfg, &pMetaData->pDbCfg); + } + if (TSDB_CODE_SUCCESS == code) { + code = getAllDbInfo(pCatalogReq->pDbInfo, &pMetaData->pDbInfo); + } + if (TSDB_CODE_SUCCESS == code) { + code = getAllUserAuth(pCatalogReq->pUser, &pMetaData->pUser); + } + if (TSDB_CODE_SUCCESS == code) { + code = getAllUdf(pCatalogReq->pUdf, &pMetaData->pUdfList); + } return code; } @@ -211,21 +235,21 @@ class MockCatalogServiceImpl { } } - std::shared_ptr getTableMeta(const std::string& db, const std::string& tbname) const { - DbMetaCache::const_iterator it = meta_.find(db); - if (meta_.end() == it) { - return std::shared_ptr(); - } - TableMetaCache::const_iterator tit = it->second.find(tbname); - if (it->second.end() == tit) { - return std::shared_ptr(); - } - return tit->second; + void createFunction(const std::string& func, int8_t funcType, int8_t outputType, int32_t outputLen, int32_t bufSize) { + std::shared_ptr info(new SFuncInfo); + strcpy(info->name, func.c_str()); + info->funcType = funcType; + info->scriptType = TSDB_FUNC_SCRIPT_BIN_LIB; + info->outputType = outputType; + info->outputLen = outputLen; + info->bufSize = bufSize; + udf_.insert(std::make_pair(func, info)); } private: typedef std::map> TableMetaCache; typedef std::map DbMetaCache; + typedef std::map> UdfMetaCache; std::string toDbname(const std::string& dbFullName) const { std::string::size_type n = dbFullName.find("."); @@ -308,6 +332,18 @@ class MockCatalogServiceImpl { return TSDB_CODE_SUCCESS; } + std::shared_ptr getTableMeta(const std::string& db, const std::string& tbname) const { + DbMetaCache::const_iterator it = meta_.find(db); + if (meta_.end() == it) { + return std::shared_ptr(); + } + TableMetaCache::const_iterator tit = it->second.find(tbname); + if (it->second.end() == tit) { + return std::shared_ptr(); + } + return tit->second; + } + int32_t getAllTableMeta(SArray* pTableMetaReq, SArray** pTableMetaData) const { int32_t code = TSDB_CODE_SUCCESS; if (NULL != pTableMetaReq) { @@ -330,12 +366,82 @@ class MockCatalogServiceImpl { int32_t code = TSDB_CODE_SUCCESS; if (NULL != pTableVgroupReq) { int32_t ntables = taosArrayGetSize(pTableVgroupReq); - *pTableVgroupData = taosArrayInit(ntables, POINTER_BYTES); + *pTableVgroupData = taosArrayInit(ntables, sizeof(SVgroupInfo)); for (int32_t i = 0; i < ntables; ++i) { - SVgroupInfo* pVgInfo = (SVgroupInfo*)taosMemoryCalloc(1, sizeof(SVgroupInfo)); - code = catalogGetTableHashVgroup((const SName*)taosArrayGet(pTableVgroupReq, i), pVgInfo); + SVgroupInfo vgInfo = {0}; + code = catalogGetTableHashVgroup((const SName*)taosArrayGet(pTableVgroupReq, i), &vgInfo); if (TSDB_CODE_SUCCESS == code) { - taosArrayPush(*pTableVgroupData, &pVgInfo); + taosArrayPush(*pTableVgroupData, &vgInfo); + } else { + break; + } + } + } + return code; + } + + int32_t getAllDbVgroup(SArray* pDbVgroupReq, SArray** pDbVgroupData) const { + int32_t code = TSDB_CODE_SUCCESS; + if (NULL != pDbVgroupReq) { + int32_t ndbs = taosArrayGetSize(pDbVgroupReq); + *pDbVgroupData = taosArrayInit(ndbs, POINTER_BYTES); + for (int32_t i = 0; i < ndbs; ++i) { + int64_t zeroVg = 0; + taosArrayPush(*pDbVgroupData, &zeroVg); + } + } + return code; + } + + int32_t getAllDbCfg(SArray* pDbCfgReq, SArray** pDbCfgData) const { + int32_t code = TSDB_CODE_SUCCESS; + if (NULL != pDbCfgReq) { + int32_t ndbs = taosArrayGetSize(pDbCfgReq); + *pDbCfgData = taosArrayInit(ndbs, sizeof(SDbCfgInfo)); + for (int32_t i = 0; i < ndbs; ++i) { + SDbCfgInfo dbCfg = {0}; + taosArrayPush(*pDbCfgData, &dbCfg); + } + } + return code; + } + + int32_t getAllDbInfo(SArray* pDbInfoReq, SArray** pDbInfoData) const { + int32_t code = TSDB_CODE_SUCCESS; + if (NULL != pDbInfoReq) { + int32_t ndbs = taosArrayGetSize(pDbInfoReq); + *pDbInfoData = taosArrayInit(ndbs, sizeof(SDbCfgInfo)); + for (int32_t i = 0; i < ndbs; ++i) { + SDbInfo dbInfo = {0}; + taosArrayPush(*pDbInfoData, &dbInfo); + } + } + return code; + } + + int32_t getAllUserAuth(SArray* pUserAuthReq, SArray** pUserAuthData) const { + int32_t code = TSDB_CODE_SUCCESS; + if (NULL != pUserAuthReq) { + int32_t num = taosArrayGetSize(pUserAuthReq); + *pUserAuthData = taosArrayInit(num, sizeof(bool)); + for (int32_t i = 0; i < num; ++i) { + bool pass = true; + taosArrayPush(*pUserAuthData, &pass); + } + } + return code; + } + + int32_t getAllUdf(SArray* pUdfReq, SArray** pUdfData) const { + int32_t code = TSDB_CODE_SUCCESS; + if (NULL != pUdfReq) { + int32_t num = taosArrayGetSize(pUdfReq); + *pUdfData = taosArrayInit(num, sizeof(SFuncInfo)); + for (int32_t i = 0; i < num; ++i) { + SFuncInfo info = {0}; + code = catalogGetUdfInfo((char*)taosArrayGet(pUdfReq, i), &info); + if (TSDB_CODE_SUCCESS == code) { + taosArrayPush(*pUdfData, &info); } else { break; } @@ -347,6 +453,7 @@ class MockCatalogServiceImpl { uint64_t id_; std::unique_ptr builder_; DbMetaCache meta_; + UdfMetaCache udf_; }; MockCatalogService::MockCatalogService() : impl_(new MockCatalogServiceImpl()) {} @@ -365,9 +472,9 @@ void MockCatalogService::createSubTable(const std::string& db, const std::string void MockCatalogService::showTables() const { impl_->showTables(); } -std::shared_ptr MockCatalogService::getTableMeta(const std::string& db, - const std::string& tbname) const { - return impl_->getTableMeta(db, tbname); +void MockCatalogService::createFunction(const std::string& func, int8_t funcType, int8_t outputType, int32_t outputLen, + int32_t bufSize) { + impl_->createFunction(func, funcType, outputType, outputLen, bufSize); } int32_t MockCatalogService::catalogGetTableMeta(const SName* pTableName, STableMeta** pTableMeta) const { @@ -382,6 +489,10 @@ int32_t MockCatalogService::catalogGetTableDistVgInfo(const SName* pTableName, S return impl_->catalogGetTableDistVgInfo(pTableName, pVgList); } +int32_t MockCatalogService::catalogGetUdfInfo(const std::string& funcName, SFuncInfo* pInfo) const { + return impl_->catalogGetUdfInfo(funcName, pInfo); +} + int32_t MockCatalogService::catalogGetAllMeta(const SCatalogReq* pCatalogReq, SMetaData* pMetaData) const { return impl_->catalogGetAllMeta(pCatalogReq, pMetaData); } diff --git a/source/libs/parser/test/mockCatalogService.h b/source/libs/parser/test/mockCatalogService.h index bfc35247fec3335f7c6090ca811a4d13637d4cc7..cb0f10e95bfcb05ce46ea0eb423d9753477db422 100644 --- a/source/libs/parser/test/mockCatalogService.h +++ b/source/libs/parser/test/mockCatalogService.h @@ -56,11 +56,12 @@ class MockCatalogService { int32_t numOfColumns, int32_t numOfTags = 0); void createSubTable(const std::string& db, const std::string& stbname, const std::string& tbname, int16_t vgid); void showTables() const; - std::shared_ptr getTableMeta(const std::string& db, const std::string& tbname) const; + void createFunction(const std::string& func, int8_t funcType, int8_t outputType, int32_t outputLen, int32_t bufSize); int32_t catalogGetTableMeta(const SName* pTableName, STableMeta** pTableMeta) const; int32_t catalogGetTableHashVgroup(const SName* pTableName, SVgroupInfo* vgInfo) const; int32_t catalogGetTableDistVgInfo(const SName* pTableName, SArray** pVgList) const; + int32_t catalogGetUdfInfo(const std::string& funcName, SFuncInfo* pInfo) const; int32_t catalogGetAllMeta(const SCatalogReq* pCatalogReq, SMetaData* pMetaData) const; private: diff --git a/source/libs/parser/test/parInitialCTest.cpp b/source/libs/parser/test/parInitialCTest.cpp index a5e7ef51a797a01ff404dc01275ded61534fde33..e55f36376cbce26f1954211fe7308070a0a192bd 100644 --- a/source/libs/parser/test/parInitialCTest.cpp +++ b/source/libs/parser/test/parInitialCTest.cpp @@ -228,7 +228,44 @@ TEST_F(ParserInitialCTest, createDnode) { run("CREATE DNODE 1.1.1.1 PORT 9000"); } -// todo CREATE FUNCTION +// CREATE [AGGREGATE] FUNCTION [IF NOT EXISTS] func_name AS library_path OUTPUTTYPE type_name [BUFSIZE value] +TEST_F(ParserInitialCTest, createFunction) { + useDb("root", "test"); + + SCreateFuncReq expect = {0}; + + auto setCreateFuncReqFunc = [&](const char* pUdfName, int8_t outputType, int32_t outputBytes = 0, + int8_t funcType = TSDB_FUNC_TYPE_SCALAR, int8_t igExists = 0, int32_t bufSize = 0) { + memset(&expect, 0, sizeof(SCreateFuncReq)); + strcpy(expect.name, pUdfName); + expect.igExists = igExists; + expect.funcType = funcType; + expect.scriptType = TSDB_FUNC_SCRIPT_BIN_LIB; + expect.outputType = outputType; + expect.outputLen = outputBytes > 0 ? outputBytes : tDataTypes[outputType].bytes; + expect.bufSize = bufSize; + }; + + setCheckDdlFunc([&](const SQuery* pQuery, ParserStage stage) { + ASSERT_EQ(nodeType(pQuery->pRoot), QUERY_NODE_CREATE_FUNCTION_STMT); + SCreateFuncReq req = {0}; + ASSERT_TRUE(TSDB_CODE_SUCCESS == tDeserializeSCreateFuncReq(pQuery->pCmdMsg->pMsg, pQuery->pCmdMsg->msgLen, &req)); + + ASSERT_EQ(std::string(req.name), std::string(expect.name)); + ASSERT_EQ(req.igExists, expect.igExists); + ASSERT_EQ(req.funcType, expect.funcType); + ASSERT_EQ(req.scriptType, expect.scriptType); + ASSERT_EQ(req.outputType, expect.outputType); + ASSERT_EQ(req.outputLen, expect.outputLen); + ASSERT_EQ(req.bufSize, expect.bufSize); + }); + + setCreateFuncReqFunc("udf1", TSDB_DATA_TYPE_INT); + // run("CREATE FUNCTION udf1 AS './build/lib/libudf1.so' OUTPUTTYPE INT"); + + setCreateFuncReqFunc("udf2", TSDB_DATA_TYPE_DOUBLE, 0, TSDB_FUNC_TYPE_AGGREGATE, 1, 8); + // run("CREATE AGGREGATE FUNCTION IF NOT EXISTS udf2 AS './build/lib/libudf2.so' OUTPUTTYPE DOUBLE BUFSIZE 8"); +} TEST_F(ParserInitialCTest, createIndexSma) { useDb("root", "test"); diff --git a/source/libs/parser/test/parInitialDTest.cpp b/source/libs/parser/test/parInitialDTest.cpp index 7cf3337fea3c29afcd0eaac8d6bd160c5ec9aacd..57d349e7eeecd33fd9855f5a0d8df22548c5ceee 100644 --- a/source/libs/parser/test/parInitialDTest.cpp +++ b/source/libs/parser/test/parInitialDTest.cpp @@ -103,6 +103,7 @@ TEST_F(ParserInitialDTest, dropTopic) { } TEST_F(ParserInitialDTest, dropUser) { + login("root"); useDb("root", "test"); run("drop user wxy"); diff --git a/source/libs/parser/test/parSelectTest.cpp b/source/libs/parser/test/parSelectTest.cpp index f00500faa4963f4efef561bce103658585a029a6..2d4fe41d4fed38bb6f97fcb37c6972aa8c7d65fc 100644 --- a/source/libs/parser/test/parSelectTest.cpp +++ b/source/libs/parser/test/parSelectTest.cpp @@ -141,6 +141,14 @@ TEST_F(ParserSelectTest, IndefiniteRowsFuncSemanticCheck) { // run("SELECT DIFF(c1) FROM t1 INTERVAL(10s)"); } +TEST_F(ParserSelectTest, useDefinedFunc) { + useDb("root", "test"); + + run("SELECT udf1(c1) FROM t1"); + + run("SELECT udf2(c1) FROM t1 GROUP BY c2"); +} + TEST_F(ParserSelectTest, groupBy) { useDb("root", "test"); diff --git a/source/libs/parser/test/parTestMain.cpp b/source/libs/parser/test/parTestMain.cpp index aadf8e7c66325f20da60e0eb9d25fb8f23042293..820b8cca3cdc02633982a3ea797aa605db1e3fd3 100644 --- a/source/libs/parser/test/parTestMain.cpp +++ b/source/libs/parser/test/parTestMain.cpp @@ -37,6 +37,7 @@ class ParserEnv : public testing::Environment { virtual void SetUp() { initMetaDataEnv(); generateMetaData(); + initLog(TD_TMP_DIR_PATH "td"); } virtual void TearDown() { @@ -47,20 +48,55 @@ class ParserEnv : public testing::Environment { ParserEnv() {} virtual ~ParserEnv() {} + + private: + void initLog(const char* path) { + int32_t logLevel = getLogLevel(); + dDebugFlag = logLevel; + vDebugFlag = logLevel; + mDebugFlag = logLevel; + cDebugFlag = logLevel; + jniDebugFlag = logLevel; + tmrDebugFlag = logLevel; + uDebugFlag = logLevel; + rpcDebugFlag = logLevel; + qDebugFlag = logLevel; + wDebugFlag = logLevel; + sDebugFlag = logLevel; + tsdbDebugFlag = logLevel; + tsLogEmbedded = 1; + tsAsyncLog = 0; + + taosRemoveDir(path); + taosMkDir(path); + tstrncpy(tsLogDir, path, PATH_MAX); + if (taosInitLog("taoslog", 1) != 0) { + std::cout << "failed to init log file" << std::endl; + } + } }; static void parseArg(int argc, char* argv[]) { - int opt = 0; - const char* optstring = ""; + int opt = 0; + const char* optstring = ""; + // clang-format off static struct option long_options[] = { - {"dump", no_argument, NULL, 'd'}, {"async", no_argument, NULL, 'a'}, {0, 0, 0, 0}}; + {"dump", no_argument, NULL, 'd'}, + {"async", required_argument, NULL, 'a'}, + {"skipSql", required_argument, NULL, 's'}, + {0, 0, 0, 0} + }; + // clang-format on while ((opt = getopt_long(argc, argv, optstring, long_options, NULL)) != -1) { switch (opt) { case 'd': g_dump = true; break; case 'a': - g_testAsyncApis = true; + setAsyncFlag(optarg); + break; + case 's': + setSkipSqlNum(optarg); break; default: break; diff --git a/source/libs/parser/test/parTestUtil.cpp b/source/libs/parser/test/parTestUtil.cpp index 8b15cc8a23af4eea91a1d3090442e621cc5f9a0b..fab7ed35b1cb408a5cdd6f455994da07a26596fd 100644 --- a/source/libs/parser/test/parTestUtil.cpp +++ b/source/libs/parser/test/parTestUtil.cpp @@ -44,23 +44,40 @@ namespace ParserTest { } \ } while (0); -bool g_dump = false; -bool g_testAsyncApis = false; +bool g_dump = false; +bool g_testAsyncApis = true; +int32_t g_logLevel = 131; +int32_t g_skipSql = 0; + +void setAsyncFlag(const char* pFlag) { g_testAsyncApis = stoi(pFlag) > 0 ? true : false; } +void setSkipSqlNum(const char* pNum) { g_skipSql = stoi(pNum); } struct TerminateFlag : public exception { const char* what() const throw() { return "success and terminate"; } }; +void setLogLevel(const char* pLogLevel) { g_logLevel = stoi(pLogLevel); } + +int32_t getLogLevel() { return g_logLevel; } + class ParserTestBaseImpl { public: ParserTestBaseImpl(ParserTestBase* pBase) : pBase_(pBase) {} + void login(const std::string& user) { caseEnv_.user_ = user; } + void useDb(const string& acctId, const string& db) { caseEnv_.acctId_ = acctId; caseEnv_.db_ = db; + caseEnv_.nsql_ = g_skipSql; } void run(const string& sql, int32_t expect, ParserStage checkStage) { + if (caseEnv_.nsql_ > 0) { + --(caseEnv_.nsql_); + return; + } + reset(expect, checkStage); try { SParseContext cxt = {0}; @@ -69,6 +86,8 @@ class ParserTestBaseImpl { SQuery* pQuery = nullptr; doParse(&cxt, &pQuery); + doAuthenticate(&cxt, pQuery); + doTranslate(&cxt, pQuery); doCalculateConstant(&cxt, pQuery); @@ -89,59 +108,14 @@ class ParserTestBaseImpl { } } - void runAsync(const string& sql, int32_t expect, ParserStage checkStage) { - reset(expect, checkStage); - try { - SParseContext cxt = {0}; - setParseContext(sql, &cxt, true); - - SQuery* pQuery = nullptr; - doParse(&cxt, &pQuery); - - SCatalogReq catalogReq = {0}; - doBuildCatalogReq(pQuery->pMetaCache, &catalogReq); - - string err; - thread t1([&]() { - try { - SMetaData metaData = {0}; - doGetAllMeta(&catalogReq, &metaData); - - doPutMetaDataToCache(&catalogReq, &metaData, pQuery->pMetaCache); - - doTranslate(&cxt, pQuery); - - doCalculateConstant(&cxt, pQuery); - } catch (const TerminateFlag& e) { - // success and terminate - } catch (const runtime_error& e) { - err = e.what(); - } catch (...) { - err = "unknown error"; - } - }); - - t1.join(); - if (!err.empty()) { - throw runtime_error(err); - } - - if (g_dump) { - dump(); - } - } catch (const TerminateFlag& e) { - // success and terminate - return; - } catch (...) { - dump(); - throw; - } - } - private: struct caseEnv { - string acctId_; - string db_; + string acctId_; + string user_; + string db_; + int32_t nsql_; + + caseEnv() : user_("wangxiaoyu"), nsql_(0) {} }; struct stmtEnv { @@ -207,6 +181,8 @@ class ParserTestBaseImpl { pCxt->acctId = atoi(caseEnv_.acctId_.c_str()); pCxt->db = caseEnv_.db_.c_str(); + pCxt->pUser = caseEnv_.user_.c_str(); + pCxt->isSuperUser = caseEnv_.user_ == "root"; pCxt->pSql = stmtEnv_.sql_.c_str(); pCxt->sqlLen = stmtEnv_.sql_.length(); pCxt->pMsg = stmtEnv_.msgBuf_.data(); @@ -220,6 +196,11 @@ class ParserTestBaseImpl { res_.parsedAst_ = toString((*pQuery)->pRoot); } + void doCollectMetaKey(SParseContext* pCxt, SQuery* pQuery) { + DO_WITH_THROW(collectMetaKey, pCxt, pQuery); + ASSERT_NE(pQuery->pMetaCache, nullptr); + } + void doBuildCatalogReq(const SParseMetaCache* pMetaCache, SCatalogReq* pCatalogReq) { DO_WITH_THROW(buildCatalogReq, pMetaCache, pCatalogReq); } @@ -232,6 +213,8 @@ class ParserTestBaseImpl { DO_WITH_THROW(putMetaDataToCache, pCatalogReq, pMetaData, pMetaCache); } + void doAuthenticate(SParseContext* pCxt, SQuery* pQuery) { DO_WITH_THROW(authenticate, pCxt, pQuery); } + void doTranslate(SParseContext* pCxt, SQuery* pQuery) { DO_WITH_THROW(translate, pCxt, pQuery); checkQuery(pQuery, PARSER_STAGE_TRANSLATE); @@ -254,6 +237,59 @@ class ParserTestBaseImpl { void checkQuery(const SQuery* pQuery, ParserStage stage) { pBase_->checkDdl(pQuery, stage); } + void runAsync(const string& sql, int32_t expect, ParserStage checkStage) { + reset(expect, checkStage); + try { + SParseContext cxt = {0}; + setParseContext(sql, &cxt, true); + + SQuery* pQuery = nullptr; + doParse(&cxt, &pQuery); + + doCollectMetaKey(&cxt, pQuery); + + SCatalogReq catalogReq = {0}; + doBuildCatalogReq(pQuery->pMetaCache, &catalogReq); + + string err; + thread t1([&]() { + try { + SMetaData metaData = {0}; + doGetAllMeta(&catalogReq, &metaData); + + doPutMetaDataToCache(&catalogReq, &metaData, pQuery->pMetaCache); + + doAuthenticate(&cxt, pQuery); + + doTranslate(&cxt, pQuery); + + doCalculateConstant(&cxt, pQuery); + } catch (const TerminateFlag& e) { + // success and terminate + } catch (const runtime_error& e) { + err = e.what(); + } catch (...) { + err = "unknown error"; + } + }); + + t1.join(); + if (!err.empty()) { + throw runtime_error(err); + } + + if (g_dump) { + dump(); + } + } catch (const TerminateFlag& e) { + // success and terminate + return; + } catch (...) { + dump(); + throw; + } + } + caseEnv caseEnv_; stmtEnv stmtEnv_; stmtRes res_; @@ -264,16 +300,14 @@ ParserTestBase::ParserTestBase() : impl_(new ParserTestBaseImpl(this)) {} ParserTestBase::~ParserTestBase() {} +void ParserTestBase::login(const std::string& user) { return impl_->login(user); } + void ParserTestBase::useDb(const std::string& acctId, const std::string& db) { impl_->useDb(acctId, db); } void ParserTestBase::run(const std::string& sql, int32_t expect, ParserStage checkStage) { return impl_->run(sql, expect, checkStage); } -void ParserTestBase::runAsync(const std::string& sql, int32_t expect, ParserStage checkStage) { - return impl_->runAsync(sql, expect, checkStage); -} - void ParserTestBase::checkDdl(const SQuery* pQuery, ParserStage stage) { return; } } // namespace ParserTest diff --git a/source/libs/parser/test/parTestUtil.h b/source/libs/parser/test/parTestUtil.h index 43feb3d5f19a120120d8f194faede0a0d92e6822..44be7a24746ecde078f69555c88e4d85344b8313 100644 --- a/source/libs/parser/test/parTestUtil.h +++ b/source/libs/parser/test/parTestUtil.h @@ -34,9 +34,9 @@ class ParserTestBase : public testing::Test { ParserTestBase(); virtual ~ParserTestBase(); + void login(const std::string& user); void useDb(const std::string& acctId, const std::string& db); void run(const std::string& sql, int32_t expect = TSDB_CODE_SUCCESS, ParserStage checkStage = PARSER_STAGE_ALL); - void runAsync(const std::string& sql, int32_t expect = TSDB_CODE_SUCCESS, ParserStage checkStage = PARSER_STAGE_ALL); virtual void checkDdl(const SQuery* pQuery, ParserStage stage); @@ -65,7 +65,11 @@ class ParserDdlTest : public ParserTestBase { }; extern bool g_dump; -extern bool g_testAsyncApis; + +extern void setAsyncFlag(const char* pFlag); +extern void setLogLevel(const char* pLogLevel); +extern int32_t getLogLevel(); +extern void setSkipSqlNum(const char* pNum); } // namespace ParserTest diff --git a/source/libs/planner/src/planOptimizer.c b/source/libs/planner/src/planOptimizer.c index 4d489f68e7c4ff042e6f0d0c82bbd98a6dbbfb2b..adc07fcd0d6b6a0c6f98fdf5032151dab3ae71f3 100644 --- a/source/libs/planner/src/planOptimizer.c +++ b/source/libs/planner/src/planOptimizer.c @@ -223,6 +223,9 @@ static void setScanWindowInfo(SScanLogicNode* pScan) { pScan->sliding = ((SWindowLogicNode*)pScan->node.pParent)->sliding; pScan->intervalUnit = ((SWindowLogicNode*)pScan->node.pParent)->intervalUnit; pScan->slidingUnit = ((SWindowLogicNode*)pScan->node.pParent)->slidingUnit; + pScan->triggerType = ((SWindowLogicNode*)pScan->node.pParent)->triggerType; + pScan->watermark = ((SWindowLogicNode*)pScan->node.pParent)->watermark; + pScan->tsColId = ((SColumnNode*)((SWindowLogicNode*)pScan->node.pParent)->pTspk)->colId; } } diff --git a/source/libs/planner/src/planPhysiCreater.c b/source/libs/planner/src/planPhysiCreater.c index 0f88a54e913c57c1fdc848317d7b8a85a4ac0e88..a45eabefb9f1f1f7fe9c97a3f8c7cf16385d2fc3 100644 --- a/source/libs/planner/src/planPhysiCreater.c +++ b/source/libs/planner/src/planPhysiCreater.c @@ -503,6 +503,9 @@ static int32_t createTableScanPhysiNode(SPhysiPlanContext* pCxt, SSubplan* pSubp pTableScan->sliding = pScanLogicNode->sliding; pTableScan->intervalUnit = pScanLogicNode->intervalUnit; pTableScan->slidingUnit = pScanLogicNode->slidingUnit; + pTableScan->triggerType = pScanLogicNode->triggerType; + pTableScan->watermark = pScanLogicNode->watermark; + pTableScan->tsColId = pScanLogicNode->tsColId; return createScanPhysiNodeFinalize(pCxt, pSubplan, pScanLogicNode, (SScanPhysiNode*)pTableScan, pPhyNode); } diff --git a/source/libs/planner/test/CMakeLists.txt b/source/libs/planner/test/CMakeLists.txt index a21b36fef6b3eecc51bdbe4abbb7fff3dc065098..abea60b0c798a055617abf3693be25f365fbc867 100644 --- a/source/libs/planner/test/CMakeLists.txt +++ b/source/libs/planner/test/CMakeLists.txt @@ -32,7 +32,9 @@ if(${BUILD_WINGETOPT}) target_link_libraries(plannerTest PUBLIC wingetopt) endif() -add_test( - NAME plannerTest - COMMAND plannerTest -) +if(NOT TD_WINDOWS) + add_test( + NAME plannerTest + COMMAND plannerTest + ) +endif(NOT TD_WINDOWS) diff --git a/source/libs/planner/test/planTestUtil.cpp b/source/libs/planner/test/planTestUtil.cpp index 084762088823edee627b4ea3bad2286208d570ac..e2082d49364727719bc72f3445bcb038d5584976 100644 --- a/source/libs/planner/test/planTestUtil.cpp +++ b/source/libs/planner/test/planTestUtil.cpp @@ -73,7 +73,7 @@ void setDumpModule(const char* pModule) { } } -void setSkipSqlNum(const char* pNum) { g_skipSql = stoi(optarg); } +void setSkipSqlNum(const char* pNum) { g_skipSql = stoi(pNum); } void setLogLevel(const char* pLogLevel) { g_logLevel = stoi(pLogLevel); } diff --git a/source/libs/qworker/inc/qwInt.h b/source/libs/qworker/inc/qwInt.h index f6d35ac4c1d57a2448b5e558b138f5b2e73597e3..b0a102069dc7d00e3002d14c76ec9c65f0854d92 100644 --- a/source/libs/qworker/inc/qwInt.h +++ b/source/libs/qworker/inc/qwInt.h @@ -145,6 +145,15 @@ typedef struct SQWSchStatus { SHashObj *tasksHash; // key:queryId+taskId, value: SQWTaskStatus } SQWSchStatus; +typedef struct SQWWaitTimeStat { + uint64_t num; + uint64_t total; +} SQWWaitTimeStat; + +typedef struct SQWStat { + SQWWaitTimeStat msgWait[2]; +} SQWStat; + // Qnode/Vnode level task management typedef struct SQWorker { int64_t refId; @@ -155,9 +164,10 @@ typedef struct SQWorker { tmr_h hbTimer; SRWLatch schLock; // SRWLatch ctxLock; - SHashObj *schHash; // key: schedulerId, value: SQWSchStatus - SHashObj *ctxHash; // key: queryId+taskId, value: SQWTaskCtx - SMsgCb msgCb; + SHashObj *schHash; // key: schedulerId, value: SQWSchStatus + SHashObj *ctxHash; // key: queryId+taskId, value: SQWTaskCtx + SMsgCb msgCb; + SQWStat stat; } SQWorker; typedef struct SQWorkerMgmt { @@ -322,6 +332,8 @@ int32_t qwDropTask(QW_FPARAMS_DEF); void qwSaveTbVersionInfo(qTaskInfo_t pTaskInfo, SQWTaskCtx *ctx); int32_t qwOpenRef(void); void qwSetHbParam(int64_t refId, SQWHbParam **pParam); +int32_t qwUpdateWaitTimeInQueue(SQWorker *mgmt, int64_t ts, EQueueType type); +int64_t qwGetWaitTimeInQueue(SQWorker *mgmt, EQueueType type); void qwDbgDumpMgmtInfo(SQWorker *mgmt); int32_t qwDbgValidateStatus(QW_FPARAMS_DEF, int8_t oriStatus, int8_t newStatus, bool *ignore); diff --git a/source/libs/qworker/src/qwMsg.c b/source/libs/qworker/src/qwMsg.c index 46c2084494b2c64f6ffc81ca11205fba6adf890c..b9dc18cd2fd22ff196a300451d1d39b5bcd2353d 100644 --- a/source/libs/qworker/src/qwMsg.c +++ b/source/libs/qworker/src/qwMsg.c @@ -248,7 +248,7 @@ int32_t qwRegisterHbBrokenLinkArg(SQWorker *mgmt, uint64_t sId, SRpcHandleInfo * return TSDB_CODE_SUCCESS; } -int32_t qWorkerProcessQueryMsg(void *node, void *qWorkerMgmt, SRpcMsg *pMsg) { +int32_t qWorkerProcessQueryMsg(void *node, void *qWorkerMgmt, SRpcMsg *pMsg, int64_t ts) { if (NULL == node || NULL == qWorkerMgmt || NULL == pMsg) { QW_ERR_RET(TSDB_CODE_QRY_INVALID_INPUT); } @@ -257,6 +257,8 @@ int32_t qWorkerProcessQueryMsg(void *node, void *qWorkerMgmt, SRpcMsg *pMsg) { SSubQueryMsg *msg = pMsg->pCont; SQWorker * mgmt = (SQWorker *)qWorkerMgmt; + qwUpdateWaitTimeInQueue(mgmt, ts, QUERY_QUEUE); + if (NULL == msg || pMsg->contLen <= sizeof(*msg)) { QW_ELOG("invalid query msg, msg:%p, msgLen:%d", msg, pMsg->contLen); QW_ERR_RET(TSDB_CODE_QRY_INVALID_INPUT); @@ -286,7 +288,7 @@ int32_t qWorkerProcessQueryMsg(void *node, void *qWorkerMgmt, SRpcMsg *pMsg) { return TSDB_CODE_SUCCESS; } -int32_t qWorkerProcessCQueryMsg(void *node, void *qWorkerMgmt, SRpcMsg *pMsg) { +int32_t qWorkerProcessCQueryMsg(void *node, void *qWorkerMgmt, SRpcMsg *pMsg, int64_t ts) { int32_t code = 0; int8_t status = 0; bool queryDone = false; @@ -295,6 +297,8 @@ int32_t qWorkerProcessCQueryMsg(void *node, void *qWorkerMgmt, SRpcMsg *pMsg) { SQWTaskCtx * handles = NULL; SQWorker * mgmt = (SQWorker *)qWorkerMgmt; + qwUpdateWaitTimeInQueue(mgmt, ts, QUERY_QUEUE); + if (NULL == msg || pMsg->contLen < sizeof(*msg)) { QW_ELOG("invalid cquery msg, msg:%p, msgLen:%d", msg, pMsg->contLen); QW_ERR_RET(TSDB_CODE_QRY_INVALID_INPUT); @@ -316,7 +320,7 @@ int32_t qWorkerProcessCQueryMsg(void *node, void *qWorkerMgmt, SRpcMsg *pMsg) { return TSDB_CODE_SUCCESS; } -int32_t qWorkerProcessFetchMsg(void *node, void *qWorkerMgmt, SRpcMsg *pMsg) { +int32_t qWorkerProcessFetchMsg(void *node, void *qWorkerMgmt, SRpcMsg *pMsg, int64_t ts) { if (NULL == node || NULL == qWorkerMgmt || NULL == pMsg) { return TSDB_CODE_QRY_INVALID_INPUT; } @@ -324,6 +328,8 @@ int32_t qWorkerProcessFetchMsg(void *node, void *qWorkerMgmt, SRpcMsg *pMsg) { SResFetchReq *msg = pMsg->pCont; SQWorker * mgmt = (SQWorker *)qWorkerMgmt; + qwUpdateWaitTimeInQueue(mgmt, ts, FETCH_QUEUE); + if (NULL == msg || pMsg->contLen < sizeof(*msg)) { QW_ELOG("invalid fetch msg, msg:%p, msgLen:%d", msg, pMsg->contLen); QW_ERR_RET(TSDB_CODE_QRY_INVALID_INPUT); @@ -349,13 +355,16 @@ int32_t qWorkerProcessFetchMsg(void *node, void *qWorkerMgmt, SRpcMsg *pMsg) { return TSDB_CODE_SUCCESS; } -int32_t qWorkerProcessFetchRsp(void *node, void *qWorkerMgmt, SRpcMsg *pMsg) { +int32_t qWorkerProcessFetchRsp(void *node, void *qWorkerMgmt, SRpcMsg *pMsg, int64_t ts) { + SQWorker * mgmt = (SQWorker *)qWorkerMgmt; + qwUpdateWaitTimeInQueue(mgmt, ts, FETCH_QUEUE); + qProcessFetchRsp(NULL, pMsg, NULL); pMsg->pCont = NULL; return TSDB_CODE_SUCCESS; } -int32_t qWorkerProcessCancelMsg(void *node, void *qWorkerMgmt, SRpcMsg *pMsg) { +int32_t qWorkerProcessCancelMsg(void *node, void *qWorkerMgmt, SRpcMsg *pMsg, int64_t ts) { if (NULL == node || NULL == qWorkerMgmt || NULL == pMsg) { return TSDB_CODE_QRY_INVALID_INPUT; } @@ -363,6 +372,9 @@ int32_t qWorkerProcessCancelMsg(void *node, void *qWorkerMgmt, SRpcMsg *pMsg) { SQWorker * mgmt = (SQWorker *)qWorkerMgmt; int32_t code = 0; STaskCancelReq *msg = pMsg->pCont; + + qwUpdateWaitTimeInQueue(mgmt, ts, FETCH_QUEUE); + if (NULL == msg || pMsg->contLen < sizeof(*msg)) { qError("invalid task cancel msg"); QW_ERR_RET(TSDB_CODE_QRY_INVALID_INPUT); @@ -390,7 +402,7 @@ _return: return TSDB_CODE_SUCCESS; } -int32_t qWorkerProcessDropMsg(void *node, void *qWorkerMgmt, SRpcMsg *pMsg) { +int32_t qWorkerProcessDropMsg(void *node, void *qWorkerMgmt, SRpcMsg *pMsg, int64_t ts) { if (NULL == node || NULL == qWorkerMgmt || NULL == pMsg) { return TSDB_CODE_QRY_INVALID_INPUT; } @@ -399,6 +411,8 @@ int32_t qWorkerProcessDropMsg(void *node, void *qWorkerMgmt, SRpcMsg *pMsg) { STaskDropReq *msg = pMsg->pCont; SQWorker * mgmt = (SQWorker *)qWorkerMgmt; + qwUpdateWaitTimeInQueue(mgmt, ts, FETCH_QUEUE); + if (NULL == msg || pMsg->contLen < sizeof(*msg)) { QW_ELOG("invalid task drop msg, msg:%p, msgLen:%d", msg, pMsg->contLen); QW_ERR_RET(TSDB_CODE_QRY_INVALID_INPUT); @@ -429,7 +443,7 @@ int32_t qWorkerProcessDropMsg(void *node, void *qWorkerMgmt, SRpcMsg *pMsg) { return TSDB_CODE_SUCCESS; } -int32_t qWorkerProcessHbMsg(void *node, void *qWorkerMgmt, SRpcMsg *pMsg) { +int32_t qWorkerProcessHbMsg(void *node, void *qWorkerMgmt, SRpcMsg *pMsg, int64_t ts) { if (NULL == node || NULL == qWorkerMgmt || NULL == pMsg) { return TSDB_CODE_QRY_INVALID_INPUT; } @@ -438,6 +452,8 @@ int32_t qWorkerProcessHbMsg(void *node, void *qWorkerMgmt, SRpcMsg *pMsg) { SSchedulerHbReq req = {0}; SQWorker * mgmt = (SQWorker *)qWorkerMgmt; + qwUpdateWaitTimeInQueue(mgmt, ts, FETCH_QUEUE); + if (NULL == pMsg->pCont) { QW_ELOG("invalid hb msg, msg:%p, msgLen:%d", pMsg->pCont, pMsg->contLen); QW_ERR_RET(TSDB_CODE_QRY_INVALID_INPUT); diff --git a/source/libs/qworker/src/qwUtil.c b/source/libs/qworker/src/qwUtil.c index a96a3343e701d222e1cc6e0b27fa7ede7e581f02..a4bc22fc88121de7d51e3e67655468046e95c3bf 100644 --- a/source/libs/qworker/src/qwUtil.c +++ b/source/libs/qworker/src/qwUtil.c @@ -499,4 +499,43 @@ int32_t qwOpenRef(void) { return TSDB_CODE_SUCCESS; } +int32_t qwUpdateWaitTimeInQueue(SQWorker *mgmt, int64_t ts, EQueueType type) { + if (ts <= 0) { + return TSDB_CODE_SUCCESS; + } + + int64_t duration = taosGetTimestampUs() - ts; + switch (type) { + case QUERY_QUEUE: + ++mgmt->stat.msgWait[0].num; + mgmt->stat.msgWait[0].total += duration; + break; + case FETCH_QUEUE: + ++mgmt->stat.msgWait[1].num; + mgmt->stat.msgWait[1].total += duration; + break; + default: + qError("unsupported queue type %d", type); + return TSDB_CODE_APP_ERROR; + } + + return TSDB_CODE_SUCCESS; +} + +int64_t qwGetWaitTimeInQueue(SQWorker *mgmt, EQueueType type) { + SQWWaitTimeStat *pStat = NULL; + switch (type) { + case QUERY_QUEUE: + pStat = &mgmt->stat.msgWait[0]; + return pStat->num ? (pStat->total/pStat->num) : 0; + case FETCH_QUEUE: + pStat = &mgmt->stat.msgWait[1]; + return pStat->num ? (pStat->total/pStat->num) : 0; + default: + qError("unsupported queue type %d", type); + return -1; + } +} + + diff --git a/source/libs/qworker/src/qworker.c b/source/libs/qworker/src/qworker.c index 3ee152606e78bd309e47c9dee1e1dd91643c0eb1..7201820854e6a87a1dffc12a47c37b8d6b692668 100644 --- a/source/libs/qworker/src/qworker.c +++ b/source/libs/qworker/src/qworker.c @@ -950,4 +950,9 @@ void qWorkerDestroy(void **qWorkerMgmt) { } } +int64_t qWorkerGetWaitTimeInQueue(void *qWorkerMgmt, EQueueType type) { + return qwGetWaitTimeInQueue((SQWorker *)qWorkerMgmt, type); +} + + diff --git a/source/libs/qworker/test/qworkerTests.cpp b/source/libs/qworker/test/qworkerTests.cpp index 42596b1cd22f73dd822a1e9c85f04b6a60ecfb3f..1b959fbe633e0c50ddc7b80af321ee0420a9616d 100644 --- a/source/libs/qworker/test/qworkerTests.cpp +++ b/source/libs/qworker/test/qworkerTests.cpp @@ -635,7 +635,7 @@ void *queryThread(void *param) { while (!qwtTestStop) { qwtBuildQueryReqMsg(&queryRpc); - qWorkerProcessQueryMsg(mockPointer, mgmt, &queryRpc); + qWorkerProcessQueryMsg(mockPointer, mgmt, &queryRpc, 0); if (qwtTestEnableSleep) { taosUsleep(taosRand()%5); } @@ -657,7 +657,7 @@ void *fetchThread(void *param) { while (!qwtTestStop) { qwtBuildFetchReqMsg(&fetchMsg, &fetchRpc); - code = qWorkerProcessFetchMsg(mockPointer, mgmt, &fetchRpc); + code = qWorkerProcessFetchMsg(mockPointer, mgmt, &fetchRpc, 0); if (qwtTestEnableSleep) { taosUsleep(taosRand()%5); } @@ -679,7 +679,7 @@ void *dropThread(void *param) { while (!qwtTestStop) { qwtBuildDropReqMsg(&dropMsg, &dropRpc); - code = qWorkerProcessDropMsg(mockPointer, mgmt, &dropRpc); + code = qWorkerProcessDropMsg(mockPointer, mgmt, &dropRpc, 0); if (qwtTestEnableSleep) { taosUsleep(taosRand()%5); } @@ -758,9 +758,9 @@ void *queryQueueThread(void *param) { } if (TDMT_VND_QUERY == queryRpc->msgType) { - qWorkerProcessQueryMsg(mockPointer, mgmt, queryRpc); + qWorkerProcessQueryMsg(mockPointer, mgmt, queryRpc, 0); } else if (TDMT_VND_QUERY_CONTINUE == queryRpc->msgType) { - qWorkerProcessCQueryMsg(mockPointer, mgmt, queryRpc); + qWorkerProcessCQueryMsg(mockPointer, mgmt, queryRpc, 0); } else { printf("unknown msg in query queue, type:%d\n", queryRpc->msgType); assert(0); @@ -815,13 +815,13 @@ void *fetchQueueThread(void *param) { switch (fetchRpc->msgType) { case TDMT_VND_FETCH: - qWorkerProcessFetchMsg(mockPointer, mgmt, fetchRpc); + qWorkerProcessFetchMsg(mockPointer, mgmt, fetchRpc, 0); break; case TDMT_VND_CANCEL_TASK: - qWorkerProcessCancelMsg(mockPointer, mgmt, fetchRpc); + qWorkerProcessCancelMsg(mockPointer, mgmt, fetchRpc, 0); break; case TDMT_VND_DROP_TASK: - qWorkerProcessDropMsg(mockPointer, mgmt, fetchRpc); + qWorkerProcessDropMsg(mockPointer, mgmt, fetchRpc, 0); break; default: printf("unknown msg type:%d in fetch queue", fetchRpc->msgType); @@ -878,16 +878,16 @@ TEST(seqTest, normalCase) { code = qWorkerInit(NODE_TYPE_VNODE, 1, NULL, &mgmt, &msgCb); ASSERT_EQ(code, 0); - code = qWorkerProcessQueryMsg(mockPointer, mgmt, &queryRpc); + code = qWorkerProcessQueryMsg(mockPointer, mgmt, &queryRpc, 0); ASSERT_EQ(code, 0); //code = qWorkerProcessReadyMsg(mockPointer, mgmt, &readyRpc); //ASSERT_EQ(code, 0); - code = qWorkerProcessFetchMsg(mockPointer, mgmt, &fetchRpc); + code = qWorkerProcessFetchMsg(mockPointer, mgmt, &fetchRpc, 0); ASSERT_EQ(code, 0); - code = qWorkerProcessDropMsg(mockPointer, mgmt, &dropRpc); + code = qWorkerProcessDropMsg(mockPointer, mgmt, &dropRpc, 0); ASSERT_EQ(code, 0); qWorkerDestroy(&mgmt); @@ -914,10 +914,10 @@ TEST(seqTest, cancelFirst) { code = qWorkerInit(NODE_TYPE_VNODE, 1, NULL, &mgmt, &msgCb); ASSERT_EQ(code, 0); - code = qWorkerProcessDropMsg(mockPointer, mgmt, &dropRpc); + code = qWorkerProcessDropMsg(mockPointer, mgmt, &dropRpc, 0); ASSERT_EQ(code, 0); - code = qWorkerProcessQueryMsg(mockPointer, mgmt, &queryRpc); + code = qWorkerProcessQueryMsg(mockPointer, mgmt, &queryRpc, 0); ASSERT_TRUE(0 != code); qWorkerDestroy(&mgmt); @@ -959,7 +959,7 @@ TEST(seqTest, randCase) { if (r >= 0 && r < maxr/5) { printf("Query,%d\n", t++); qwtBuildQueryReqMsg(&queryRpc); - code = qWorkerProcessQueryMsg(mockPointer, mgmt, &queryRpc); + code = qWorkerProcessQueryMsg(mockPointer, mgmt, &queryRpc, 0); } else if (r >= maxr/5 && r < maxr * 2/5) { //printf("Ready,%d\n", t++); //qwtBuildReadyReqMsg(&readyMsg, &readyRpc); @@ -970,14 +970,14 @@ TEST(seqTest, randCase) { } else if (r >= maxr * 2/5 && r < maxr* 3/5) { printf("Fetch,%d\n", t++); qwtBuildFetchReqMsg(&fetchMsg, &fetchRpc); - code = qWorkerProcessFetchMsg(mockPointer, mgmt, &fetchRpc); + code = qWorkerProcessFetchMsg(mockPointer, mgmt, &fetchRpc, 0); if (qwtTestEnableSleep) { taosUsleep(1); } } else if (r >= maxr * 3/5 && r < maxr * 4/5) { printf("Drop,%d\n", t++); qwtBuildDropReqMsg(&dropMsg, &dropRpc); - code = qWorkerProcessDropMsg(mockPointer, mgmt, &dropRpc); + code = qWorkerProcessDropMsg(mockPointer, mgmt, &dropRpc, 0); if (qwtTestEnableSleep) { taosUsleep(1); } diff --git a/source/libs/scalar/inc/sclInt.h b/source/libs/scalar/inc/sclInt.h index 9dbfeceb5940d4237ead01ff445529c2d7d447ac..1c2e4a358a2c256cf3ed577be568c2e93fe13cbe 100644 --- a/source/libs/scalar/inc/sclInt.h +++ b/source/libs/scalar/inc/sclInt.h @@ -51,7 +51,7 @@ typedef struct SScalarCtx { int32_t doConvertDataType(SValueNode* pValueNode, SScalarParam* out); SColumnInfoData* createColumnInfoData(SDataType* pType, int32_t numOfRows); -void sclConvertToTsValueNode(int8_t precision, SValueNode* valueNode); +int32_t sclConvertToTsValueNode(int8_t precision, SValueNode* valueNode); #define GET_PARAM_TYPE(_c) ((_c)->columnData->info.type) #define GET_PARAM_BYTES(_c) ((_c)->columnData->info.bytes) diff --git a/source/libs/scalar/src/filter.c b/source/libs/scalar/src/filter.c index 4317ad325e7e0d7b468dd7929c1f4a7c9ff7c169..195ec8a57791062cbca0e4c1a39ccce1866a5095 100644 --- a/source/libs/scalar/src/filter.c +++ b/source/libs/scalar/src/filter.c @@ -3553,7 +3553,11 @@ EDealRes fltReviseRewriter(SNode** pNode, void* pContext) { return DEAL_RES_CONTINUE; } - sclConvertToTsValueNode(stat->precision, valueNode); + int32_t code = sclConvertToTsValueNode(stat->precision, valueNode); + if (code) { + stat->code = code; + return DEAL_RES_ERROR; + } return DEAL_RES_CONTINUE; } @@ -3687,7 +3691,7 @@ int32_t fltReviseNodes(SFilterInfo *pInfo, SNode** pNode, SFltTreeStat *pStat) { for (int32_t i = 0; i < nodeNum; ++i) { SValueNode *valueNode = *(SValueNode **)taosArrayGet(pStat->nodeList, i); - sclConvertToTsValueNode(pStat->precision, valueNode); + FLT_ERR_JRET(sclConvertToTsValueNode(pStat->precision, valueNode)); } _return: diff --git a/source/libs/scalar/src/scalar.c b/source/libs/scalar/src/scalar.c index fb03eaefa4fe79034d731b74de6bd166fa0db83e..d2436b9948f2cf7bfa15d061cdc9bbfdfefd6f08 100644 --- a/source/libs/scalar/src/scalar.c +++ b/source/libs/scalar/src/scalar.c @@ -20,17 +20,19 @@ int32_t scalarGetOperatorParamNum(EOperatorType type) { return 2; } -void sclConvertToTsValueNode(int8_t precision, SValueNode* valueNode) { +int32_t sclConvertToTsValueNode(int8_t precision, SValueNode* valueNode) { char *timeStr = valueNode->datum.p; - if (convertStringToTimestamp(valueNode->node.resType.type, valueNode->datum.p, precision, &valueNode->datum.i) != - TSDB_CODE_SUCCESS) { - valueNode->datum.i = 0; + int32_t code = convertStringToTimestamp(valueNode->node.resType.type, valueNode->datum.p, precision, &valueNode->datum.i); + if (code != TSDB_CODE_SUCCESS) { + return code; } taosMemoryFree(timeStr); valueNode->typeData = valueNode->datum.i; valueNode->node.resType.type = TSDB_DATA_TYPE_TIMESTAMP; valueNode->node.resType.bytes = tDataTypes[TSDB_DATA_TYPE_TIMESTAMP].bytes; + + return TSDB_CODE_SUCCESS; } @@ -546,6 +548,7 @@ EDealRes sclRewriteBasedOnOptr(SNode** pNode, SScalarCtx *ctx, EOperatorType opT EDealRes sclRewriteNonConstOperator(SNode** pNode, SScalarCtx *ctx) { SOperatorNode *node = (SOperatorNode *)*pNode; + int32_t code = 0; if (node->pLeft && (QUERY_NODE_VALUE == nodeType(node->pLeft))) { SValueNode *valueNode = (SValueNode *)node->pLeft; @@ -555,7 +558,11 @@ EDealRes sclRewriteNonConstOperator(SNode** pNode, SScalarCtx *ctx) { if (IS_STR_DATA_TYPE(valueNode->node.resType.type) && node->pRight && nodesIsExprNode(node->pRight) && ((SExprNode*)node->pRight)->resType.type == TSDB_DATA_TYPE_TIMESTAMP) { - sclConvertToTsValueNode(((SExprNode*)node->pRight)->resType.precision, valueNode); + code = sclConvertToTsValueNode(((SExprNode*)node->pRight)->resType.precision, valueNode); + if (code) { + ctx->code = code; + return DEAL_RES_ERROR; + } } } @@ -567,7 +574,11 @@ EDealRes sclRewriteNonConstOperator(SNode** pNode, SScalarCtx *ctx) { if (IS_STR_DATA_TYPE(valueNode->node.resType.type) && node->pLeft && nodesIsExprNode(node->pLeft) && ((SExprNode*)node->pLeft)->resType.type == TSDB_DATA_TYPE_TIMESTAMP) { - sclConvertToTsValueNode(((SExprNode*)node->pLeft)->resType.precision, valueNode); + code = sclConvertToTsValueNode(((SExprNode*)node->pLeft)->resType.precision, valueNode); + if (code) { + ctx->code = code; + return DEAL_RES_ERROR; + } } } diff --git a/source/libs/scalar/src/sclfunc.c b/source/libs/scalar/src/sclfunc.c index 0d47595b3e25214482ab8b6442e521ff00ebdc05..d422b2579e7baa669254aa39defc15c7323ef80c 100644 --- a/source/libs/scalar/src/sclfunc.c +++ b/source/libs/scalar/src/sclfunc.c @@ -633,7 +633,7 @@ static int32_t doTrimFunction(SScalarParam *pInput, int32_t inputNum, SScalarPar continue; } - char *input = colDataGetData(pInput[0].columnData, i); + char *input = colDataGetData(pInputData, i); int32_t len = varDataLen(input); int32_t charLen = (type == TSDB_DATA_TYPE_VARCHAR) ? len : len / TSDB_NCHAR_SIZE; trimFn(input, output, type, charLen); @@ -925,10 +925,9 @@ int32_t toUnixtimestampFunction(SScalarParam *pInput, int32_t inputNum, SScalarP int32_t ret = convertStringToTimestamp(type, input, timePrec, &timeVal); if (ret != TSDB_CODE_SUCCESS) { colDataAppendNULL(pOutput->columnData, i); - continue; + } else { + colDataAppend(pOutput->columnData, i, (char *)&timeVal, false); } - - colDataAppend(pOutput->columnData, i, (char *)&timeVal, false); } pOutput->numOfRows = pInput->numOfRows; diff --git a/source/libs/scalar/test/scalar/CMakeLists.txt b/source/libs/scalar/test/scalar/CMakeLists.txt index 480c22321d73acb63ed350b5164a9d9af3e31685..672cb5a3de39bfed51c9d399ac3d0431614f50ab 100644 --- a/source/libs/scalar/test/scalar/CMakeLists.txt +++ b/source/libs/scalar/test/scalar/CMakeLists.txt @@ -17,7 +17,9 @@ TARGET_INCLUDE_DIRECTORIES( PUBLIC "${TD_SOURCE_DIR}/source/libs/parser/inc" PRIVATE "${TD_SOURCE_DIR}/source/libs/scalar/inc" ) -add_test( - NAME scalarTest - COMMAND scalarTest -) +if(NOT TD_WINDOWS) + add_test( + NAME scalarTest + COMMAND scalarTest + ) +endif(NOT TD_WINDOWS) diff --git a/source/libs/scheduler/src/schRemote.c b/source/libs/scheduler/src/schRemote.c index dad4f7196ffaca8ad09da225dd37944210899435..312d587b6f0ee29a9f2da22afc23a2834747b063 100644 --- a/source/libs/scheduler/src/schRemote.c +++ b/source/libs/scheduler/src/schRemote.c @@ -94,6 +94,7 @@ int32_t schHandleResponseMsg(SSchJob *pJob, SSchTask *pTask, int32_t msgType, ch if (schJobNeedToStop(pJob, &status)) { SCH_TASK_ELOG("rsp not processed cause of job status, job status:%s, rspCode:0x%x", jobTaskStatusStr(status), rspCode); + taosMemoryFreeClear(msg); SCH_RET(atomic_load_32(&pJob->errCode)); } @@ -121,6 +122,8 @@ int32_t schHandleResponseMsg(SSchJob *pJob, SSchTask *pTask, int32_t msgType, ch } SCH_ERR_JRET(rspCode); + taosMemoryFreeClear(msg); + SCH_ERR_RET(schProcessOnTaskSuccess(pJob, pTask)); break; } @@ -145,6 +148,8 @@ int32_t schHandleResponseMsg(SSchJob *pJob, SSchTask *pTask, int32_t msgType, ch } SCH_ERR_JRET(rspCode); + taosMemoryFreeClear(msg); + SCH_ERR_RET(schProcessOnTaskSuccess(pJob, pTask)); break; } @@ -164,6 +169,9 @@ int32_t schHandleResponseMsg(SSchJob *pJob, SSchTask *pTask, int32_t msgType, ch if (NULL == msg) { SCH_ERR_JRET(TSDB_CODE_QRY_INVALID_INPUT); } + + taosMemoryFreeClear(msg); + SCH_ERR_RET(schProcessOnTaskSuccess(pJob, pTask)); break; } @@ -210,6 +218,8 @@ int32_t schHandleResponseMsg(SSchJob *pJob, SSchTask *pTask, int32_t msgType, ch SCH_UNLOCK(SCH_WRITE, &pJob->resLock); } + taosMemoryFreeClear(msg); + SCH_ERR_RET(schProcessOnTaskSuccess(pJob, pTask)); break; @@ -224,6 +234,8 @@ int32_t schHandleResponseMsg(SSchJob *pJob, SSchTask *pTask, int32_t msgType, ch SCH_ERR_JRET(rsp->code); SCH_ERR_JRET(schSaveJobQueryRes(pJob, rsp)); + + taosMemoryFreeClear(msg); SCH_ERR_RET(schProcessOnTaskSuccess(pJob, pTask)); @@ -275,6 +287,8 @@ int32_t schHandleResponseMsg(SSchJob *pJob, SSchTask *pTask, int32_t msgType, ch SCH_ERR_JRET(schProcessOnExplainDone(pJob, pTask, pRsp)); } + taosMemoryFreeClear(msg); + return TSDB_CODE_SUCCESS; } @@ -282,6 +296,8 @@ int32_t schHandleResponseMsg(SSchJob *pJob, SSchTask *pTask, int32_t msgType, ch SCH_ERR_JRET(schFetchFromRemote(pJob)); + taosMemoryFreeClear(msg); + return TSDB_CODE_SUCCESS; } @@ -300,6 +316,8 @@ int32_t schHandleResponseMsg(SSchJob *pJob, SSchTask *pTask, int32_t msgType, ch SCH_TASK_DLOG("got fetch rsp, rows:%d, complete:%d", htonl(rsp->numOfRows), rsp->completed); + msg = NULL; + schProcessOnDataFetched(pJob); break; } @@ -322,6 +340,8 @@ int32_t schHandleResponseMsg(SSchJob *pJob, SSchTask *pTask, int32_t msgType, ch _return: + taosMemoryFreeClear(msg); + SCH_RET(schProcessOnTaskFailure(pJob, pTask, code)); } diff --git a/source/libs/stream/src/tstream.c b/source/libs/stream/src/tstream.c index 7e4f83a693cf9301da29493ea984828c2731552a..cc9fc8bd803ae68e909c69634e0077e1e5507c90 100644 --- a/source/libs/stream/src/tstream.c +++ b/source/libs/stream/src/tstream.c @@ -35,6 +35,14 @@ void* streamDataBlockDecode(const void* buf, SStreamDataBlock* pInput) { return (void*)buf; } +static int32_t streamBuildDispatchMsg(SStreamTask* pTask, SArray* data, SRpcMsg* pMsg, SEpSet** ppEpSet) { + SStreamDispatchReq req = { + .streamId = pTask->streamId, + .data = data, + }; + return 0; +} + static int32_t streamBuildExecMsg(SStreamTask* pTask, SArray* data, SRpcMsg* pMsg, SEpSet** ppEpSet) { SStreamTaskExecReq req = { .streamId = pTask->streamId, @@ -407,6 +415,26 @@ int32_t streamProcessRecoverRsp(SStreamTask* pTask, SStreamTaskRecoverRsp* pRsp) return 0; } +int32_t tEncodeStreamDispatchReq(SEncoder* pEncoder, const SStreamDispatchReq* pReq) { + if (tStartEncode(pEncoder) < 0) return -1; + if (tEncodeI64(pEncoder, pReq->streamId) < 0) return -1; + if (tEncodeI32(pEncoder, pReq->taskId) < 0) return -1; + if (tEncodeI32(pEncoder, pReq->sourceTaskId) < 0) return -1; + if (tEncodeI32(pEncoder, pReq->sourceVg) < 0) return -1; + tEndEncode(pEncoder); + return 0; +} + +int32_t tDecodeStreamDispatchReq(SDecoder* pDecoder, SStreamDispatchReq* pReq) { + if (tStartDecode(pDecoder) < 0) return -1; + if (tDecodeI64(pDecoder, &pReq->streamId) < 0) return -1; + if (tDecodeI32(pDecoder, &pReq->taskId) < 0) return -1; + if (tDecodeI32(pDecoder, &pReq->sourceTaskId) < 0) return -1; + if (tDecodeI32(pDecoder, &pReq->sourceVg) < 0) return -1; + tEndDecode(pDecoder); + return 0; +} + int32_t tEncodeSStreamTaskExecReq(void** buf, const SStreamTaskExecReq* pReq) { int32_t tlen = 0; tlen += taosEncodeFixedI64(buf, pReq->streamId); diff --git a/source/libs/stream/src/tstreamUpdate.c b/source/libs/stream/src/tstreamUpdate.c index 75319a2354f638d6dab9d871bdd402cfb15ee2c4..7587fcecc99962b2cd0eda135a121acb281a1a48 100644 --- a/source/libs/stream/src/tstreamUpdate.c +++ b/source/libs/stream/src/tstreamUpdate.c @@ -72,12 +72,14 @@ static int64_t adjustInterval(int64_t interval, int32_t precision) { return val; } -static int64_t adjustWatermark(int64_t interval, int64_t watermark) { - if (watermark <= 0 || watermark > MAX_NUM_SCALABLE_BF * interval) { - watermark = MAX_NUM_SCALABLE_BF * interval; - } else if (watermark < MIN_NUM_SCALABLE_BF * interval) { - watermark = MIN_NUM_SCALABLE_BF * interval; - } +static int64_t adjustWatermark(int64_t adjInterval, int64_t originInt, int64_t watermark) { + if (watermark <= 0) { + watermark = TMIN(originInt/adjInterval, 1) * adjInterval; + } else if (watermark > MAX_NUM_SCALABLE_BF * adjInterval) { + watermark = MAX_NUM_SCALABLE_BF * adjInterval; + }/* else if (watermark < MIN_NUM_SCALABLE_BF * adjInterval) { + watermark = MIN_NUM_SCALABLE_BF * adjInterval; + }*/ // Todo(liuyao) save window info to tdb return watermark; } @@ -94,7 +96,7 @@ SUpdateInfo *updateInfoInit(int64_t interval, int32_t precision, int64_t waterma pInfo->pTsSBFs = NULL; pInfo->minTS = -1; pInfo->interval = adjustInterval(interval, precision); - pInfo->watermark = adjustWatermark(pInfo->interval, watermark); + pInfo->watermark = adjustWatermark(pInfo->interval, interval, watermark); uint64_t bfSize = (uint64_t)(pInfo->watermark / pInfo->interval); @@ -149,13 +151,18 @@ static SScalableBf *getSBf(SUpdateInfo *pInfo, TSKEY ts) { bool updateInfoIsUpdated(SUpdateInfo *pInfo, tb_uid_t tableId, TSKEY ts) { int32_t res = TSDB_CODE_FAILED; uint64_t index = ((uint64_t)tableId) % pInfo->numBuckets; + TSKEY maxTs = *(TSKEY *)taosArrayGet(pInfo->pTsBuckets, index); + if (ts < maxTs - pInfo->watermark) { + // this window has been closed. + return true; + } + SScalableBf *pSBf = getSBf(pInfo, ts); // pSBf may be a null pointer if (pSBf) { res = tScalableBfPut(pSBf, &ts, sizeof(TSKEY)); } - TSKEY maxTs = *(TSKEY *)taosArrayGet(pInfo->pTsBuckets, index); if (maxTs < ts) { taosArraySet(pInfo->pTsBuckets, index, &ts); return false; diff --git a/source/libs/sync/inc/syncRaftCfg.h b/source/libs/sync/inc/syncRaftCfg.h index f4c857bb06068eaec7e9a1d9324b47b505e51eba..1061e8bdc4b248511eb3a580b76056cbc830f02b 100644 --- a/source/libs/sync/inc/syncRaftCfg.h +++ b/source/libs/sync/inc/syncRaftCfg.h @@ -27,6 +27,8 @@ extern "C" { #include "syncInt.h" #include "taosdef.h" +#define CONFIG_FILE_LEN 1024 + typedef struct SRaftCfg { SSyncCfg cfg; TdFilePtr pFile; diff --git a/source/libs/sync/src/syncRaftCfg.c b/source/libs/sync/src/syncRaftCfg.c index daf7992d431d2956dd87bf92ae98355363b44297..70481b853ece4ba5cab45f303184042494b44609 100644 --- a/source/libs/sync/src/syncRaftCfg.c +++ b/source/libs/sync/src/syncRaftCfg.c @@ -50,10 +50,18 @@ int32_t raftCfgPersist(SRaftCfg *pRaftCfg) { char *s = raftCfg2Str(pRaftCfg); taosLSeekFile(pRaftCfg->pFile, 0, SEEK_SET); - int64_t ret = taosWriteFile(pRaftCfg->pFile, s, strlen(s) + 1); - assert(ret == strlen(s) + 1); - taosMemoryFree(s); + char buf[CONFIG_FILE_LEN]; + memset(buf, 0, sizeof(buf)); + ASSERT(strlen(s) + 1 <= CONFIG_FILE_LEN); + snprintf(buf, sizeof(buf), "%s", s); + int64_t ret = taosWriteFile(pRaftCfg->pFile, buf, sizeof(buf)); + assert(ret == sizeof(buf)); + + //int64_t ret = taosWriteFile(pRaftCfg->pFile, s, strlen(s) + 1); + //assert(ret == strlen(s) + 1); + + taosMemoryFree(s); taosFsyncFile(pRaftCfg->pFile); return 0; } @@ -163,8 +171,16 @@ int32_t raftCfgCreateFile(SSyncCfg *pCfg, int8_t isStandBy, const char *path) { raftCfg.cfg = *pCfg; raftCfg.isStandBy = isStandBy; char * s = raftCfg2Str(&raftCfg); - int64_t ret = taosWriteFile(pFile, s, strlen(s) + 1); - assert(ret == strlen(s) + 1); + + char buf[CONFIG_FILE_LEN]; + memset(buf, 0, sizeof(buf)); + ASSERT(strlen(s) + 1 <= CONFIG_FILE_LEN); + snprintf(buf, sizeof(buf), "%s", s); + int64_t ret = taosWriteFile(pFile, buf, sizeof(buf)); + assert(ret == sizeof(buf)); + + //int64_t ret = taosWriteFile(pFile, s, strlen(s) + 1); + //assert(ret == strlen(s) + 1); taosMemoryFree(s); taosCloseFile(&pFile); diff --git a/source/libs/transport/src/transSrv.c b/source/libs/transport/src/transSvr.c similarity index 91% rename from source/libs/transport/src/transSrv.c rename to source/libs/transport/src/transSvr.c index 9018eaacf600a9f8ceedde86672b2362039fbd0e..52b36433bb45ace6b0fa4224fb80b65e0e5e2627 100644 --- a/source/libs/transport/src/transSrv.c +++ b/source/libs/transport/src/transSvr.c @@ -20,15 +20,15 @@ static TdThreadOnce transModuleInit = PTHREAD_ONCE_INIT; static char* notify = "a"; -static int transSrvInst = 0; +static int tranSSvrInst = 0; typedef struct { int notifyCount; // int init; // init or not STransMsg msg; -} SSrvRegArg; +} SSvrRegArg; -typedef struct SSrvConn { +typedef struct SSvrConn { T_REF_DECLARE() uv_tcp_t* pTcp; uv_write_t pWriter; @@ -42,7 +42,7 @@ typedef struct SSrvConn { void* hostThrd; STransQueue srvMsgs; - SSrvRegArg regArg; + SSvrRegArg regArg; bool broken; // conn broken; ConnStatus status; @@ -55,14 +55,14 @@ typedef struct SSrvConn { char user[TSDB_UNI_LEN]; // user ID for the link char secret[TSDB_PASSWORD_LEN]; char ckey[TSDB_PASSWORD_LEN]; // ciphering key -} SSrvConn; +} SSvrConn; -typedef struct SSrvMsg { - SSrvConn* pConn; +typedef struct SSvrMsg { + SSvrConn* pConn; STransMsg msg; queue q; STransMsgType type; -} SSrvMsg; +} SSvrMsg; typedef struct SWorkThrdObj { TdThread thread; @@ -127,25 +127,25 @@ static void uvWorkAfterTask(uv_work_t* req, int status); static void uvWalkCb(uv_handle_t* handle, void* arg); static void uvFreeCb(uv_handle_t* handle); -static void uvStartSendRespInternal(SSrvMsg* smsg); -static void uvPrepareSendData(SSrvMsg* msg, uv_buf_t* wb); -static void uvStartSendResp(SSrvMsg* msg); +static void uvStartSendRespInternal(SSvrMsg* smsg); +static void uvPrepareSendData(SSvrMsg* msg, uv_buf_t* wb); +static void uvStartSendResp(SSvrMsg* msg); -static void uvNotifyLinkBrokenToApp(SSrvConn* conn); +static void uvNotifyLinkBrokenToApp(SSvrConn* conn); -static void destroySmsg(SSrvMsg* smsg); +static void destroySmsg(SSvrMsg* smsg); // check whether already read complete packet -static SSrvConn* createConn(void* hThrd); -static void destroyConn(SSrvConn* conn, bool clear /*clear handle or not*/); -static void destroyConnRegArg(SSrvConn* conn); +static SSvrConn* createConn(void* hThrd); +static void destroyConn(SSvrConn* conn, bool clear /*clear handle or not*/); +static void destroyConnRegArg(SSvrConn* conn); -static int reallocConnRefHandle(SSrvConn* conn); +static int reallocConnRefHandle(SSvrConn* conn); -static void uvHandleQuit(SSrvMsg* msg, SWorkThrdObj* thrd); -static void uvHandleRelease(SSrvMsg* msg, SWorkThrdObj* thrd); -static void uvHandleResp(SSrvMsg* msg, SWorkThrdObj* thrd); -static void uvHandleRegister(SSrvMsg* msg, SWorkThrdObj* thrd); -static void (*transAsyncHandle[])(SSrvMsg* msg, SWorkThrdObj* thrd) = {uvHandleResp, uvHandleQuit, uvHandleRelease, +static void uvHandleQuit(SSvrMsg* msg, SWorkThrdObj* thrd); +static void uvHandleRelease(SSvrMsg* msg, SWorkThrdObj* thrd); +static void uvHandleResp(SSvrMsg* msg, SWorkThrdObj* thrd); +static void uvHandleRegister(SSvrMsg* msg, SWorkThrdObj* thrd); +static void (*transAsyncHandle[])(SSvrMsg* msg, SWorkThrdObj* thrd) = {uvHandleResp, uvHandleQuit, uvHandleRelease, uvHandleRegister, NULL}; static int32_t exHandlesMgt; @@ -178,7 +178,7 @@ static bool addHandleToAcceptloop(void* arg); tTrace("server conn %p received release request", conn); \ \ STransMsg tmsg = {.code = 0, .info.handle = (void*)conn, .info.ahandle = NULL}; \ - SSrvMsg* srvMsg = taosMemoryCalloc(1, sizeof(SSrvMsg)); \ + SSvrMsg* srvMsg = taosMemoryCalloc(1, sizeof(SSvrMsg)); \ srvMsg->msg = tmsg; \ srvMsg->type = Release; \ srvMsg->pConn = conn; \ @@ -233,18 +233,18 @@ static bool addHandleToAcceptloop(void* arg); } while (0) void uvAllocRecvBufferCb(uv_handle_t* handle, size_t suggested_size, uv_buf_t* buf) { - SSrvConn* conn = handle->data; + SSvrConn* conn = handle->data; SConnBuffer* pBuf = &conn->readBuf; transAllocBuffer(pBuf, buf); } // refers specifically to query or insert timeout static void uvHandleActivityTimeout(uv_timer_t* handle) { - SSrvConn* conn = handle->data; + SSvrConn* conn = handle->data; tDebug("%p timeout since no activity", conn); } -static void uvHandleReq(SSrvConn* pConn) { +static void uvHandleReq(SSvrConn* pConn) { SConnBuffer* pBuf = &pConn->readBuf; char* msg = pBuf->buf; uint32_t msgLen = pBuf->len; @@ -316,7 +316,7 @@ static void uvHandleReq(SSrvConn* pConn) { void uvOnRecvCb(uv_stream_t* cli, ssize_t nread, const uv_buf_t* buf) { // opt - SSrvConn* conn = cli->data; + SSvrConn* conn = cli->data; SConnBuffer* pBuf = &conn->readBuf; if (nread > 0) { pBuf->len += nread; @@ -354,17 +354,17 @@ void uvAllocConnBufferCb(uv_handle_t* handle, size_t suggested_size, uv_buf_t* b void uvOnTimeoutCb(uv_timer_t* handle) { // opt - SSrvConn* pConn = handle->data; + SSvrConn* pConn = handle->data; tError("server conn %p time out", pConn); } void uvOnSendCb(uv_write_t* req, int status) { - SSrvConn* conn = req->data; + SSvrConn* conn = req->data; // transClearBuffer(&conn->readBuf); if (status == 0) { tTrace("server conn %p data already was written on stream", conn); if (!transQueueEmpty(&conn->srvMsgs)) { - SSrvMsg* msg = transQueuePop(&conn->srvMsgs); + SSvrMsg* msg = transQueuePop(&conn->srvMsgs); // if (msg->type == Release && conn->status != ConnNormal) { // conn->status = ConnNormal; // transUnrefSrvHandle(conn); @@ -376,7 +376,7 @@ void uvOnSendCb(uv_write_t* req, int status) { destroySmsg(msg); // send second data, just use for push if (!transQueueEmpty(&conn->srvMsgs)) { - msg = (SSrvMsg*)transQueueGet(&conn->srvMsgs, 0); + msg = (SSvrMsg*)transQueueGet(&conn->srvMsgs, 0); if (msg->type == Register && conn->status == ConnAcquire) { conn->regArg.notifyCount = 0; conn->regArg.init = 1; @@ -389,7 +389,7 @@ void uvOnSendCb(uv_write_t* req, int status) { transQueuePop(&conn->srvMsgs); taosMemoryFree(msg); - msg = (SSrvMsg*)transQueueGet(&conn->srvMsgs, 0); + msg = (SSvrMsg*)transQueueGet(&conn->srvMsgs, 0); if (msg != NULL) { uvStartSendRespInternal(msg); } @@ -415,10 +415,10 @@ static void uvOnPipeWriteCb(uv_write_t* req, int status) { taosMemoryFree(req); } -static void uvPrepareSendData(SSrvMsg* smsg, uv_buf_t* wb) { +static void uvPrepareSendData(SSvrMsg* smsg, uv_buf_t* wb) { tTrace("server conn %p prepare to send resp", smsg->pConn); - SSrvConn* pConn = smsg->pConn; + SSvrConn* pConn = smsg->pConn; STransMsg* pMsg = &smsg->msg; if (pMsg->pCont == 0) { pMsg->pCont = (void*)rpcMallocCont(0); @@ -455,17 +455,17 @@ static void uvPrepareSendData(SSrvMsg* smsg, uv_buf_t* wb) { wb->len = len; } -static void uvStartSendRespInternal(SSrvMsg* smsg) { +static void uvStartSendRespInternal(SSvrMsg* smsg) { uv_buf_t wb; uvPrepareSendData(smsg, &wb); - SSrvConn* pConn = smsg->pConn; + SSvrConn* pConn = smsg->pConn; // uv_timer_stop(&pConn->pTimer); uv_write(&pConn->pWriter, (uv_stream_t*)pConn->pTcp, &wb, 1, uvOnSendCb); } -static void uvStartSendResp(SSrvMsg* smsg) { +static void uvStartSendResp(SSvrMsg* smsg) { // impl - SSrvConn* pConn = smsg->pConn; + SSvrConn* pConn = smsg->pConn; if (pConn->broken == true) { // persist by @@ -485,7 +485,7 @@ static void uvStartSendResp(SSrvMsg* smsg) { return; } -static void destroySmsg(SSrvMsg* smsg) { +static void destroySmsg(SSvrMsg* smsg) { if (smsg == NULL) { return; } @@ -499,7 +499,7 @@ static void destroyAllConn(SWorkThrdObj* pThrd) { QUEUE_REMOVE(h); QUEUE_INIT(h); - SSrvConn* c = QUEUE_DATA(h, SSrvConn, queue); + SSvrConn* c = QUEUE_DATA(h, SSvrConn, queue); while (T_REF_VAL_GET(c) >= 2) { transUnrefSrvHandle(c); } @@ -509,7 +509,7 @@ static void destroyAllConn(SWorkThrdObj* pThrd) { void uvWorkerAsyncCb(uv_async_t* handle) { SAsyncItem* item = handle->data; SWorkThrdObj* pThrd = item->pThrd; - SSrvConn* conn = NULL; + SSvrConn* conn = NULL; queue wq; // batch process to avoid to lock/unlock frequently @@ -521,7 +521,7 @@ void uvWorkerAsyncCb(uv_async_t* handle) { queue* head = QUEUE_HEAD(&wq); QUEUE_REMOVE(head); - SSrvMsg* msg = QUEUE_DATA(head, SSrvMsg, q); + SSvrMsg* msg = QUEUE_DATA(head, SSvrMsg, q); if (msg == NULL) { tError("unexcept occurred, continue"); continue; @@ -649,7 +649,7 @@ void uvOnConnectionCb(uv_stream_t* q, ssize_t nread, const uv_buf_t* buf) { uv_handle_type pending = uv_pipe_pending_type(pipe); assert(pending == UV_TCP); - SSrvConn* pConn = createConn(pThrd); + SSvrConn* pConn = createConn(pThrd); pConn->pTransInst = pThrd->pTransInst; /* init conn timer*/ @@ -768,10 +768,10 @@ void* transWorkerThread(void* arg) { return NULL; } -static SSrvConn* createConn(void* hThrd) { +static SSvrConn* createConn(void* hThrd) { SWorkThrdObj* pThrd = hThrd; - SSrvConn* pConn = (SSrvConn*)taosMemoryCalloc(1, sizeof(SSrvConn)); + SSvrConn* pConn = (SSvrConn*)taosMemoryCalloc(1, sizeof(SSvrConn)); QUEUE_INIT(&pConn->queue); QUEUE_PUSH(&pThrd->conn, &pConn->queue); @@ -794,7 +794,7 @@ static SSrvConn* createConn(void* hThrd) { return pConn; } -static void destroyConn(SSrvConn* conn, bool clear) { +static void destroyConn(SSvrConn* conn, bool clear) { if (conn == NULL) { return; } @@ -808,13 +808,13 @@ static void destroyConn(SSrvConn* conn, bool clear) { // uv_shutdown(req, (uv_stream_t*)conn->pTcp, uvShutDownCb); } } -static void destroyConnRegArg(SSrvConn* conn) { +static void destroyConnRegArg(SSvrConn* conn) { if (conn->regArg.init == 1) { transFreeMsg(conn->regArg.msg.pCont); conn->regArg.init = 0; } } -static int reallocConnRefHandle(SSrvConn* conn) { +static int reallocConnRefHandle(SSvrConn* conn) { uvReleaseExHandle(conn->refId); uvRemoveExHandle(conn->refId); // avoid app continue to send msg on invalid handle @@ -828,7 +828,7 @@ static int reallocConnRefHandle(SSrvConn* conn) { return 0; } static void uvDestroyConn(uv_handle_t* handle) { - SSrvConn* conn = handle->data; + SSvrConn* conn = handle->data; if (conn == NULL) { return; } @@ -884,7 +884,7 @@ void* transInitServer(uint32_t ip, uint32_t port, char* label, int numOfThreads, uv_loop_init(srv->loop); taosThreadOnce(&transModuleInit, uvInitEnv); - transSrvInst++; + tranSSvrInst++; assert(0 == uv_pipe_init(srv->loop, &srv->pipeListen, 0)); #ifdef WINDOWS @@ -981,7 +981,7 @@ void uvDestoryExHandle(void* handle) { taosMemoryFree(handle); } -void uvHandleQuit(SSrvMsg* msg, SWorkThrdObj* thrd) { +void uvHandleQuit(SSvrMsg* msg, SWorkThrdObj* thrd) { thrd->quit = true; if (QUEUE_IS_EMPTY(&thrd->conn)) { uv_walk(thrd->loop, uvWalkCb, NULL); @@ -990,8 +990,8 @@ void uvHandleQuit(SSrvMsg* msg, SWorkThrdObj* thrd) { } taosMemoryFree(msg); } -void uvHandleRelease(SSrvMsg* msg, SWorkThrdObj* thrd) { - SSrvConn* conn = msg->pConn; +void uvHandleRelease(SSvrMsg* msg, SWorkThrdObj* thrd) { + SSvrConn* conn = msg->pConn; if (conn->status == ConnAcquire) { reallocConnRefHandle(conn); if (!transQueuePush(&conn->srvMsgs, msg)) { @@ -1004,13 +1004,13 @@ void uvHandleRelease(SSrvMsg* msg, SWorkThrdObj* thrd) { } destroySmsg(msg); } -void uvHandleResp(SSrvMsg* msg, SWorkThrdObj* thrd) { +void uvHandleResp(SSvrMsg* msg, SWorkThrdObj* thrd) { // send msg to client tDebug("server conn %p start to send resp (2/2)", msg->pConn); uvStartSendResp(msg); } -void uvHandleRegister(SSrvMsg* msg, SWorkThrdObj* thrd) { - SSrvConn* conn = msg->pConn; +void uvHandleRegister(SSvrMsg* msg, SWorkThrdObj* thrd) { + SSvrConn* conn = msg->pConn; tDebug("server conn %p register brokenlink callback", conn); if (conn->status == ConnAcquire) { if (!transQueuePush(&conn->srvMsgs, msg)) { @@ -1036,13 +1036,13 @@ void destroyWorkThrd(SWorkThrdObj* pThrd) { } taosThreadJoin(pThrd->thread, NULL); SRV_RELEASE_UV(pThrd->loop); - TRANS_DESTROY_ASYNC_POOL_MSG(pThrd->asyncPool, SSrvMsg, destroySmsg); + TRANS_DESTROY_ASYNC_POOL_MSG(pThrd->asyncPool, SSvrMsg, destroySmsg); transDestroyAsyncPool(pThrd->asyncPool); taosMemoryFree(pThrd->loop); taosMemoryFree(pThrd); } void sendQuitToWorkThrd(SWorkThrdObj* pThrd) { - SSrvMsg* msg = taosMemoryCalloc(1, sizeof(SSrvMsg)); + SSvrMsg* msg = taosMemoryCalloc(1, sizeof(SSvrMsg)); msg->type = Quit; tDebug("server send quit msg to work thread"); transSendAsync(pThrd->asyncPool, &msg->q); @@ -1075,8 +1075,8 @@ void transCloseServer(void* arg) { taosMemoryFree(srv); - transSrvInst--; - if (transSrvInst == 0) { + tranSSvrInst--; + if (tranSSvrInst == 0) { TdThreadOnce tmpInit = PTHREAD_ONCE_INIT; memcpy(&transModuleInit, &tmpInit, sizeof(TdThreadOnce)); uvCloseExHandleMgt(); @@ -1087,7 +1087,7 @@ void transRefSrvHandle(void* handle) { if (handle == NULL) { return; } - int ref = T_REF_INC((SSrvConn*)handle); + int ref = T_REF_INC((SSvrConn*)handle); tDebug("server conn %p ref count: %d", handle, ref); } @@ -1095,10 +1095,10 @@ void transUnrefSrvHandle(void* handle) { if (handle == NULL) { return; } - int ref = T_REF_DEC((SSrvConn*)handle); + int ref = T_REF_DEC((SSvrConn*)handle); tDebug("server conn %p ref count: %d", handle, ref); if (ref == 0) { - destroyConn((SSrvConn*)handle, true); + destroyConn((SSvrConn*)handle, true); } } @@ -1113,12 +1113,12 @@ void transReleaseSrvHandle(void* handle) { STransMsg tmsg = {.code = 0, .info.handle = exh, .info.ahandle = NULL, .info.refId = refId}; - SSrvMsg* srvMsg = taosMemoryCalloc(1, sizeof(SSrvMsg)); - srvMsg->msg = tmsg; - srvMsg->type = Release; + SSvrMsg* m = taosMemoryCalloc(1, sizeof(SSvrMsg)); + m->msg = tmsg; + m->type = Release; tTrace("server conn %p start to release", exh->handle); - transSendAsync(pThrd->asyncPool, &srvMsg->q); + transSendAsync(pThrd->asyncPool, &m->q); uvReleaseExHandle(refId); return; _return1: @@ -1141,11 +1141,11 @@ void transSendResponse(const STransMsg* msg) { SWorkThrdObj* pThrd = exh->pThrd; ASYNC_ERR_JRET(pThrd); - SSrvMsg* srvMsg = taosMemoryCalloc(1, sizeof(SSrvMsg)); - srvMsg->msg = tmsg; - srvMsg->type = Normal; + SSvrMsg* m = taosMemoryCalloc(1, sizeof(SSvrMsg)); + m->msg = tmsg; + m->type = Normal; tDebug("server conn %p start to send resp (1/2)", exh->handle); - transSendAsync(pThrd->asyncPool, &srvMsg->q); + transSendAsync(pThrd->asyncPool, &m->q); uvReleaseExHandle(refId); return; _return1: @@ -1169,11 +1169,11 @@ void transRegisterMsg(const STransMsg* msg) { SWorkThrdObj* pThrd = exh->pThrd; ASYNC_ERR_JRET(pThrd); - SSrvMsg* srvMsg = taosMemoryCalloc(1, sizeof(SSrvMsg)); - srvMsg->msg = tmsg; - srvMsg->type = Register; + SSvrMsg* m = taosMemoryCalloc(1, sizeof(SSvrMsg)); + m->msg = tmsg; + m->type = Register; tTrace("server conn %p start to register brokenlink callback", exh->handle); - transSendAsync(pThrd->asyncPool, &srvMsg->q); + transSendAsync(pThrd->asyncPool, &m->q); uvReleaseExHandle(refId); return; @@ -1193,7 +1193,7 @@ int transGetConnInfo(void* thandle, STransHandleInfo* pInfo) { return -1; } SExHandle* ex = thandle; - SSrvConn* pConn = ex->handle; + SSvrConn* pConn = ex->handle; struct sockaddr_in addr = pConn->addr; pInfo->clientIp = (uint32_t)(addr.sin_addr.s_addr); diff --git a/source/libs/transport/test/CMakeLists.txt b/source/libs/transport/test/CMakeLists.txt index 98a252e008d85b27206fa58055f757dd02d64a78..468b70fb711a15a83c97a5a45adb68dee3d1c368 100644 --- a/source/libs/transport/test/CMakeLists.txt +++ b/source/libs/transport/test/CMakeLists.txt @@ -111,10 +111,12 @@ target_link_libraries (pushServer ) -add_test( - NAME transUT - COMMAND transUT -) +if(NOT TD_WINDOWS) + add_test( + NAME transUT + COMMAND transUT + ) +endif(NOT TD_WINDOWS) add_test( NAME transUtilUt COMMAND transportTest diff --git a/source/util/src/terror.c b/source/util/src/terror.c index 66d6ea3ef39c69cca349caf75c4983617e89630c..178d6e8d2b48a5adc62b6c5d83dd414050ffa9f1 100644 --- a/source/util/src/terror.c +++ b/source/util/src/terror.c @@ -74,6 +74,7 @@ TAOS_DEFINE_ERROR(TSDB_CODE_REPEAT_INIT, "Repeat initialization TAOS_DEFINE_ERROR(TSDB_CODE_DUP_KEY, "Cannot add duplicate keys to hash") TAOS_DEFINE_ERROR(TSDB_CODE_NEED_RETRY, "Retry needed") TAOS_DEFINE_ERROR(TSDB_CODE_OUT_OF_RPC_MEMORY_QUEUE, "Out of memory in rpc queue") +TAOS_DEFINE_ERROR(TSDB_CODE_INVALID_TIMESTAMP, "Invalid timestamp format") TAOS_DEFINE_ERROR(TSDB_CODE_REF_NO_MEMORY, "Ref out of memory") TAOS_DEFINE_ERROR(TSDB_CODE_REF_FULL, "too many Ref Objs") diff --git a/source/util/src/tqueue.c b/source/util/src/tqueue.c index 6a10794ea154306f3c26b9666482a7c3a5b61958..37935087fad693eed254549977182ccaca1085f2 100644 --- a/source/util/src/tqueue.c +++ b/source/util/src/tqueue.c @@ -26,6 +26,7 @@ typedef struct STaosQnode STaosQnode; typedef struct STaosQnode { STaosQnode *next; STaosQueue *queue; + int64_t timestamp; int32_t size; int8_t itype; int8_t reserved[3]; @@ -144,6 +145,7 @@ void *taosAllocateQitem(int32_t size, EQItype itype) { STaosQnode *pNode = taosMemoryCalloc(1, sizeof(STaosQnode) + size); pNode->size = size; pNode->itype = itype; + pNode->timestamp = taosGetTimestampUs(); if (pNode == NULL) { terrno = TSDB_CODE_OUT_OF_MEMORY; @@ -393,7 +395,7 @@ void taosRemoveFromQset(STaosQset *qset, STaosQueue *queue) { int32_t taosGetQueueNumber(STaosQset *qset) { return qset->numOfQueues; } -int32_t taosReadQitemFromQset(STaosQset *qset, void **ppItem, void **ahandle, FItem *itemFp) { +int32_t taosReadQitemFromQset(STaosQset *qset, void **ppItem, int64_t *ts, void **ahandle, FItem *itemFp) { STaosQnode *pNode = NULL; int32_t code = 0; @@ -415,6 +417,7 @@ int32_t taosReadQitemFromQset(STaosQset *qset, void **ppItem, void **ahandle, FI *ppItem = pNode->item; if (ahandle) *ahandle = queue->ahandle; if (itemFp) *itemFp = queue->itemFp; + if (ts) *ts = pNode->timestamp; queue->head = pNode->next; if (queue->head == NULL) queue->tail = NULL; diff --git a/source/util/src/tworker.c b/source/util/src/tworker.c index dc48fc3f8d2b2e803e8f1593d5471184fa99e059..686e0696ec689b48ecff8f27c7db2eb86daa5eb2 100644 --- a/source/util/src/tworker.c +++ b/source/util/src/tworker.c @@ -75,19 +75,20 @@ static void *tQWorkerThreadFp(SQWorker *worker) { void *msg = NULL; void *ahandle = NULL; int32_t code = 0; + int64_t ts = 0; taosBlockSIGPIPE(); setThreadName(pool->name); uDebug("worker:%s:%d is running", pool->name, worker->id); while (1) { - if (taosReadQitemFromQset(pool->qset, (void **)&msg, &ahandle, &fp) == 0) { + if (taosReadQitemFromQset(pool->qset, (void **)&msg, &ts, &ahandle, &fp) == 0) { uDebug("worker:%s:%d qset:%p, got no message and exiting", pool->name, worker->id, pool->qset); break; } if (fp != NULL) { - SQueueInfo info = {.ahandle = ahandle, .workerId = worker->id, .threadNum = pool->num}; + SQueueInfo info = {.ahandle = ahandle, .workerId = worker->id, .threadNum = pool->num, .timestamp = ts}; (*fp)(&info, msg); } } diff --git a/tests/pytest/stream/test3.py b/tests/pytest/stream/test3.py new file mode 100644 index 0000000000000000000000000000000000000000..b45521a9476961394c1cf4b2454d6fb9e2368c68 --- /dev/null +++ b/tests/pytest/stream/test3.py @@ -0,0 +1,34 @@ +# -*- coding: utf-8 -*- + +import sys +from util.log import * +from util.cases import * +from util.sql import * +from util.common import tdCom +class TDTestCase: + def init(self, conn, logSql): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor(), logSql) + + def run(self): + #for i in range(100): + tdSql.prepare() + dbname = tdCom.getLongName(10, "letters") + tdSql.execute('create database if not exists djnhawvlgq vgroups 1') + tdSql.execute('use djnhawvlgq') + tdSql.execute('create table if not exists downsampling_stb (ts timestamp, c1 int, c2 double, c3 varchar(100), c4 bool) tags (t1 int, t2 double, t3 varchar(100), t4 bool);') + tdSql.execute('create table downsampling_ct1 using downsampling_stb tags(10, 10.1, "Beijing", True);') + tdSql.execute('create table if not exists scalar_stb (ts timestamp, c1 int, c2 double, c3 binary(20), c4 nchar(20), c5 nchar(20)) tags (t1 int);') + tdSql.execute('create table scalar_ct1 using scalar_stb tags(10);') + tdSql.execute('create table if not exists data_filter_stb (ts timestamp, c1 tinyint, c2 smallint, c3 int, c4 bigint, c5 float, c6 double, c7 binary(100), c8 nchar(200), c9 bool, c10 tinyint unsigned, c11 smallint unsigned, c12 int unsigned, c13 bigint unsigned) tags (t1 tinyint, t2 smallint, t3 int, t4 bigint, t5 float, t6 double, t7 binary(100), t8 nchar(200), t9 bool, t10 tinyint unsigned, t11 smallint unsigned, t12 int unsigned, t13 bigint unsigned)') + tdSql.execute('create table if not exists data_filter_ct1 using data_filter_stb tags (1, 2, 3, 4, 5.5, 6.6, "binary7", "nchar8", true, 11, 12, 13, 14)') + tdSql.execute('create stream data_filter_stream into output_data_filter_stb as select * from data_filter_stb where ts >= 1653648072973+1s and c1 = 1 or c2 > 1 and c3 != 4 or c4 <= 3 and c5 <> 0 or c6 is not Null or c7 is Null or c8 between "na" and "nchar4" and c8 not between "bi" and "binary" and c8 match "nchar[19]" and c8 nmatch "nchar[25]" or c9 in (1, 2, 3) or c10 not in (6, 7) and c8 like "nch%" and c7 not like "bina_" and c11 <= 10 or c12 is Null or c13 >= 4;') + tdSql.execute('insert into data_filter_ct1 values (1653648072973, 1, 1, 1, 3, 1.1, 1.1, "binary1", "nchar1", true, 1, 2, 3, 4);') + tdSql.execute('insert into data_filter_ct1 values (1653648072973+1s, 2, 2, 1, 3, 1.1, 1.1, "binary2", "nchar2", true, 2, 3, 4, 5);') + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) diff --git a/tests/pytest/test.py b/tests/pytest/test.py index 9d146462f28f77fca6a6ada08fb3972770ef855d..30ab6ae3cc14e2d36f4979f03bdc99871cfcd8fa 100644 --- a/tests/pytest/test.py +++ b/tests/pytest/test.py @@ -18,6 +18,7 @@ import getopt import subprocess import time from distutils.log import warn as printf +import platform from util.log import * from util.dnodes import * @@ -36,8 +37,10 @@ if __name__ == "__main__": stop = 0 restart = False windows = 0 - opts, args = getopt.gnu_getopt(sys.argv[1:], 'f:p:m:l:scghrw', [ - 'file=', 'path=', 'master', 'logSql', 'stop', 'cluster', 'valgrind', 'help', 'windows']) + if platform.system().lower() == 'windows': + windows = 1 + opts, args = getopt.gnu_getopt(sys.argv[1:], 'f:p:m:l:scghr', [ + 'file=', 'path=', 'master', 'logSql', 'stop', 'cluster', 'valgrind', 'help', 'restart']) for key, value in opts: if key in ['-h', '--help']: tdLog.printNoPrefix( @@ -64,9 +67,6 @@ if __name__ == "__main__": if key in ['-m', '--master']: masterIp = value - if key in ['-w', '--windows']: - windows = 1 - if key in ['-l', '--logSql']: if (value.upper() == "TRUE"): logSql = True @@ -146,7 +146,7 @@ if __name__ == "__main__": else: pass tdDnodes.deploy(1,{}) - tdDnodes.startWin(1) + tdDnodes.start(1) else: remote_conn = Connection("root@%s"%host) with remote_conn.cd('/var/lib/jenkins/workspace/TDinternal/community/tests/pytest'): diff --git a/tests/pytest/util/dnodes.py b/tests/pytest/util/dnodes.py index 1a1fd319cda35136282b95765aa6b0db880830bf..e8d01de3e5a6a5943472778453d3be28f758f18c 100644 --- a/tests/pytest/util/dnodes.py +++ b/tests/pytest/util/dnodes.py @@ -17,6 +17,10 @@ import os.path import platform import subprocess from time import sleep +import base64 +import json +import copy +from fabric2 import Connection from util.log import * @@ -111,6 +115,7 @@ class TDDnode: self.deployed = 0 self.testCluster = False self.valgrind = 0 + self.remoteIP = "" self.cfgDict = { "walLevel": "2", "fsync": "1000", @@ -137,8 +142,9 @@ class TDDnode: "telemetryReporting": "0" } - def init(self, path): + def init(self, path, remoteIP = ""): self.path = path + self.remoteIP = remoteIP def setTestCluster(self, value): self.testCluster = value @@ -162,6 +168,24 @@ class TDDnode: def addExtraCfg(self, option, value): self.cfgDict.update({option: value}) + def remoteExec(self, updateCfgDict, execCmd): + remote_conn = Connection(self.remoteIP, port=22, user='root', connect_kwargs={'password':'123456'}) + remote_top_dir = '~/test' + valgrindStr = '' + if (self.valgrind==1): + valgrindStr = '-g' + remoteCfgDict = copy.deepcopy(updateCfgDict) + if ("logDir" in remoteCfgDict): + del remoteCfgDict["logDir"] + if ("dataDir" in remoteCfgDict): + del remoteCfgDict["dataDir"] + if ("cfgDir" in remoteCfgDict): + del remoteCfgDict["cfgDir"] + remoteCfgDictStr = base64.b64encode(json.dumps(remoteCfgDict).encode()).decode() + execCmdStr = base64.b64encode(execCmd.encode()).decode() + with remote_conn.cd((remote_top_dir+sys.path[0].replace(self.path, '')).replace('\\','/')): + remote_conn.run("python3 ./test.py %s -d %s -e %s"%(valgrindStr,remoteCfgDictStr,execCmdStr)) + def deploy(self, *updatecfgDict): self.logDir = "%s/sim/dnode%d/log" % (self.path, self.index) self.dataDir = "%s/sim/dnode%d/data" % (self.path, self.index) @@ -229,8 +253,11 @@ class TDDnode: self.cfg(value, key) else: self.addExtraCfg(key, value) - for key, value in self.cfgDict.items(): - self.cfg(key, value) + if (self.remoteIP == ""): + for key, value in self.cfgDict.items(): + self.cfg(key, value) + else: + self.remoteExec(self.cfgDict, "tdDnodes.deploy(%d,updateCfgDict)"%self.index) self.deployed = 1 tdLog.debug( @@ -268,117 +295,68 @@ class TDDnode: tdLog.exit("dnode:%d is not deployed" % (self.index)) if self.valgrind == 0: - cmd = "nohup %s -c %s > /dev/null 2>&1 & " % ( - binPath, self.cfgDir) + if platform.system().lower() == 'windows': + cmd = "mintty -h never -w hide %s -c %s" % ( + binPath, self.cfgDir) + else: + cmd = "nohup %s -c %s > /dev/null 2>&1 & " % ( + binPath, self.cfgDir) else: valgrindCmdline = "valgrind --log-file=\"%s/../log/valgrind.log\" --tool=memcheck --leak-check=full --show-reachable=no --track-origins=yes --show-leak-kinds=all -v --workaround-gcc296-bugs=yes"%self.cfgDir - cmd = "nohup %s %s -c %s 2>&1 & " % ( - valgrindCmdline, binPath, self.cfgDir) + if platform.system().lower() == 'windows': + cmd = "mintty -h never -w hide %s %s -c %s" % ( + valgrindCmdline, binPath, self.cfgDir) + else: + cmd = "nohup %s %s -c %s 2>&1 & " % ( + valgrindCmdline, binPath, self.cfgDir) print(cmd) - if os.system(cmd) != 0: - tdLog.exit(cmd) - self.running = 1 - tdLog.debug("dnode:%d is running with %s " % (self.index, cmd)) - if self.valgrind == 0: - time.sleep(0.1) - key = 'from offline to online' - bkey = bytes(key, encoding="utf8") - logFile = self.logDir + "/taosdlog.0" - i = 0 - while not os.path.exists(logFile): - sleep(0.1) - i += 1 - if i > 50: - break - popen = subprocess.Popen( - 'tail -f ' + logFile, - stdout=subprocess.PIPE, - stderr=subprocess.PIPE, - shell=True) - pid = popen.pid - # print('Popen.pid:' + str(pid)) - timeout = time.time() + 60 * 2 - while True: - line = popen.stdout.readline().strip() - if bkey in line: - popen.kill() - break - if time.time() > timeout: - tdLog.exit('wait too long for taosd start') - tdLog.debug("the dnode:%d has been started." % (self.index)) - else: - tdLog.debug( - "wait 10 seconds for the dnode:%d to start." % - (self.index)) - time.sleep(10) - - # time.sleep(5) - def startWin(self): - binPath = self.getPath("taosd.exe") - - if (binPath == ""): - tdLog.exit("taosd.exe not found!") - else: - tdLog.info("taosd.exe found: %s" % binPath) - - taosadapterBinPath = self.getPath("taosadapter.exe") - if (taosadapterBinPath == ""): - tdLog.info("taosAdapter.exe not found!") + if (not self.remoteIP == ""): + self.remoteExec(self.cfgDict, "tdDnodes.deploy(%d,updateCfgDict)\ntdDnodes.start(%d)"%(self.index, self.index)) + self.running = 1 else: - tdLog.info("taosAdapter.exe found in %s" % taosadapterBuildPath) - - if self.deployed == 0: - tdLog.exit("dnode:%d is not deployed" % (self.index)) - - cmd = "mintty -h never %s -c %s" % ( - binPath, self.cfgDir) - - if (taosadapterBinPath != ""): - taosadapterCmd = "mintty -h never -w hide %s --monitor.writeToTD=false " % ( - taosadapterBinPath) - if os.system(taosadapterCmd) != 0: - tdLog.exit(taosadapterCmd) - - if os.system(cmd) != 0: - tdLog.exit(cmd) - - self.running = 1 - tdLog.debug("dnode:%d is running with %s " % (self.index, cmd)) - if self.valgrind == 0: - time.sleep(0.1) - key = 'from offline to online' - bkey = bytes(key, encoding="utf8") - logFile = self.logDir + "/taosdlog.0" - i = 0 - while not os.path.exists(logFile): - sleep(0.1) - i += 1 - if i > 50: - break - popen = subprocess.Popen( - 'tail -n +0 -f ' + logFile, - stdout=subprocess.PIPE, - stderr=subprocess.PIPE, - shell=True) - pid = popen.pid - # print('Popen.pid:' + str(pid)) - timeout = time.time() + 60 * 2 - while True: - line = popen.stdout.readline().strip() - if bkey in line: - popen.kill() - break - if time.time() > timeout: - tdLog.exit('wait too long for taosd start') - tdLog.debug("the dnode:%d has been started." % (self.index)) - else: - tdLog.debug( - "wait 10 seconds for the dnode:%d to start." % - (self.index)) - time.sleep(10) + if os.system(cmd) != 0: + tdLog.exit(cmd) + self.running = 1 + print("dnode:%d is running with %s " % (self.index, cmd)) + tdLog.debug("dnode:%d is running with %s " % (self.index, cmd)) + if self.valgrind == 0: + time.sleep(0.1) + key = 'from offline to online' + bkey = bytes(key, encoding="utf8") + logFile = self.logDir + "/taosdlog.0" + i = 0 + while not os.path.exists(logFile): + sleep(0.1) + i += 1 + if i > 50: + break + tailCmdStr = 'tail -f ' + if platform.system().lower() == 'windows': + tailCmdStr = 'tail -n +0 -f ' + popen = subprocess.Popen( + tailCmdStr + logFile, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + shell=True) + pid = popen.pid + # print('Popen.pid:' + str(pid)) + timeout = time.time() + 60 * 2 + while True: + line = popen.stdout.readline().strip() + if bkey in line: + popen.kill() + break + if time.time() > timeout: + tdLog.exit('wait too long for taosd start') + tdLog.debug("the dnode:%d has been started." % (self.index)) + else: + tdLog.debug( + "wait 10 seconds for the dnode:%d to start." % + (self.index)) + time.sleep(10) def startWithoutSleep(self): binPath = self.getPath() @@ -402,12 +380,19 @@ class TDDnode: print(cmd) - if os.system(cmd) != 0: - tdLog.exit(cmd) + if (self.remoteIP == ""): + if os.system(cmd) != 0: + tdLog.exit(cmd) + else: + self.remoteExec(self.cfgDict, "tdDnodes.deploy(%d,updateCfgDict)\ntdDnodes.startWithoutSleep(%d)"%(self.index, self.index)) + self.running = 1 tdLog.debug("dnode:%d is running with %s " % (self.index, cmd)) def stop(self): + if (not self.remoteIP == ""): + self.remoteExec(self.cfgDict, "tdDnodes.stop(%d)"%self.index) + return if self.valgrind == 0: toBeKilled = "taosd" else: @@ -424,9 +409,10 @@ class TDDnode: time.sleep(1) processID = subprocess.check_output( psCmd, shell=True).decode("utf-8") - for port in range(6030, 6041): - fuserCmd = "fuser -k -n tcp %d" % port - os.system(fuserCmd) + if not platform.system().lower() == 'windows': + for port in range(6030, 6041): + fuserCmd = "fuser -k -n tcp %d" % port + os.system(fuserCmd) if self.valgrind: time.sleep(2) @@ -434,6 +420,9 @@ class TDDnode: tdLog.debug("dnode:%d is stopped by kill -INT" % (self.index)) def forcestop(self): + if (not self.remoteIP == ""): + self.remoteExec(self.cfgDict, "tdDnodes.forcestop(%d)"%self.index) + return if self.valgrind == 0: toBeKilled = "taosd" else: @@ -498,8 +487,10 @@ class TDDnodes: self.dnodes.append(TDDnode(9)) self.dnodes.append(TDDnode(10)) self.simDeployed = False + self.testCluster = False + self.valgrind = 0 - def init(self, path): + def init(self, path, remoteIP = ""): psCmd = "ps -ef|grep -w taosd| grep -v grep| grep -v defunct | awk '{print $2}'" processID = subprocess.check_output(psCmd, shell=True).decode("utf-8") while(processID): @@ -519,9 +510,9 @@ class TDDnodes: psCmd, shell=True).decode("utf-8") binPath = self.dnodes[0].getPath() + "/../../../" - tdLog.debug("binPath %s" % (binPath)) + # tdLog.debug("binPath %s" % (binPath)) binPath = os.path.realpath(binPath) - tdLog.debug("binPath real path %s" % (binPath)) + # tdLog.debug("binPath real path %s" % (binPath)) # cmd = "sudo cp %s/build/lib/libtaos.so /usr/local/lib/taos/" % (binPath) # tdLog.debug(cmd) @@ -544,7 +535,7 @@ class TDDnodes: self.path = os.path.realpath(path) for i in range(len(self.dnodes)): - self.dnodes[i].init(self.path) + self.dnodes[i].init(self.path, remoteIP) self.sim = TDSimClient(self.path) def setTestCluster(self, value): @@ -573,10 +564,6 @@ class TDDnodes: self.check(index) self.dnodes[index - 1].start() - def startWin(self, index): - self.check(index) - self.dnodes[index - 1].startWin() - def startWithoutSleep(self, index): self.check(index) self.dnodes[index - 1].startWithoutSleep() diff --git a/tests/script/jenkins/basic.txt b/tests/script/jenkins/basic.txt index 76c5cbab6c234e0a188e0870f753e019e80864f6..217c23158dd08739caea79d5b74679d4da291968 100644 --- a/tests/script/jenkins/basic.txt +++ b/tests/script/jenkins/basic.txt @@ -118,7 +118,7 @@ #./test.sh -f tsim/mnode/basic1.sim -m # --- sma -./test.sh -f tsim/sma/tsmaCreateInsertData.sim +#./test.sh -f tsim/sma/tsmaCreateInsertData.sim ./test.sh -f tsim/sma/rsmaCreateInsertQuery.sim # --- valgrind diff --git a/tests/script/tsim/mnode/basic1.sim b/tests/script/tsim/mnode/basic1.sim index 9131237ca6d655eb289e549f1d68cfb37118fbed..e922ebe37605d64d637e63aa176b53af93b06921 100644 --- a/tests/script/tsim/mnode/basic1.sim +++ b/tests/script/tsim/mnode/basic1.sim @@ -36,13 +36,14 @@ if $data(2)[4] != ready then goto step1 endi -print =============== create drop mnode 1 sql_error create mnode on dnode 1 sql_error drop mnode on dnode 1 + +print =============== create mnode 2 sql create mnode on dnode 2 $x = 0 -step1: +step2: $x = $x + 1 sleep 1000 if $x == 20 then @@ -65,11 +66,11 @@ if $data(2)[0] != 2 then return -1 endi if $data(2)[2] != FOLLOWER then - goto step1 + goto step2 endi sleep 2000 -print ============ drop mnodes +print ============ drop mnode 2 sql drop mnode on dnode 2 sql show mnodes if $rows != 1 then diff --git a/tests/script/tsim/sma/rsmaCreateInsertQuery.sim b/tests/script/tsim/sma/rsmaCreateInsertQuery.sim index 38ae0dc0a298d7743f3eb1466357ff0bbb621d06..5d9425e5064d3fc65038c174dae109cc6283991e 100644 --- a/tests/script/tsim/sma/rsmaCreateInsertQuery.sim +++ b/tests/script/tsim/sma/rsmaCreateInsertQuery.sim @@ -37,6 +37,15 @@ if $rows > 2 then print retention level 2 file rows $rows > 2 return -1 endi + + +if $data01 != 1 then + if $data01 != 10 then + print retention level 2 file result $data01 != 1 or 10 + return -1 + endi +endi + print =============== select * from retention level 1 from memory sql select * from ct1 where ts > now-8d; print $data00 $data01 @@ -44,15 +53,30 @@ if $rows > 2 then print retention level 1 file rows $rows > 2 return -1 endi + +if $data01 != 1 then + if $data01 != 10 then + print retention level 1 file result $data01 != 1 or 10 + return -1 + endi +endi + print =============== select * from retention level 0 from memory sql select * from ct1 where ts > now-3d; print $data00 $data01 print $data10 $data11 print $data20 $data21 + if $rows < 1 then print retention level 0 file rows $rows < 1 return -1 endi + +if $data01 != 10 then + print retention level 0 file result $data01 != 10 + return -1 +endi + #=================================================================== @@ -68,6 +92,13 @@ if $rows > 2 then return -1 endi +if $data01 != 1 then + if $data01 != 10 then + print retention level 2 file result $data01 != 1 or 10 + return -1 + endi +endi + print =============== select * from retention level 1 from file sql select * from ct1 where ts > now-8d; print $data00 $data01 @@ -76,6 +107,13 @@ if $rows > 2 then return -1 endi +if $data01 != 1 then + if $data01 != 10 then + print retention level 1 file result $data01 != 1 or 10 + return -1 + endi +endi + print =============== select * from retention level 0 from file sql select * from ct1 where ts > now-3d; print $data00 $data01 @@ -86,4 +124,9 @@ if $rows < 1 then return -1 endi +if $data01 != 10 then + print retention level 0 file result $data01 != 10 + return -1 +endi + system sh/exec.sh -n dnode1 -s stop -x SIGINT \ No newline at end of file diff --git a/tests/script/tsim/sma/tsmaCreateInsertData.sim b/tests/script/tsim/sma/tsmaCreateInsertData.sim index b7a127e1b0d67f9af620919740dae87e649c82cd..07c5adef5d8114e65bb82b66e334b30c3b59ad5b 100644 --- a/tests/script/tsim/sma/tsmaCreateInsertData.sim +++ b/tests/script/tsim/sma/tsmaCreateInsertData.sim @@ -37,5 +37,12 @@ print =============== trigger stream to execute sma aggr task and insert sma dat sql insert into ct1 values(now+5s, 20, 20.0, 30.0) #=================================================================== +print =============== select * from ct1 from memory +sql select * from ct1; +print $data00 $data01 +if $rows != 5 then + print rows $rows != 5 + return -1 +endi system sh/exec.sh -n dnode1 -s stop -x SIGINT diff --git a/tests/script/tsim/stable/alter_count.sim b/tests/script/tsim/stable/alter_count.sim index 9c9ece7ee4725a5e6da2c292a2c5d2acaa31e75b..e5af9a5735e6f7f9844d055be8d4c2892d6b2ed7 100644 --- a/tests/script/tsim/stable/alter_count.sim +++ b/tests/script/tsim/stable/alter_count.sim @@ -141,6 +141,8 @@ sql connect sql select count(a), count(b), count(c), count(d), count(e), count(f), count(g), count(h) from d1.tb; sql select count(a), count(b), count(c), count(d), count(e), count(f), count(g), count(h) from d1.tb; + +sql use d1 sql select count(a), count(b), count(c), count(d), count(e), count(f), count(g), count(h) from tb if $data00 != 24 then return -1 diff --git a/tests/script/tsim/stream/triggerInterval0.sim b/tests/script/tsim/stream/triggerInterval0.sim new file mode 100644 index 0000000000000000000000000000000000000000..6f1d8f4b7bf88913239ccf1cc3a89fb1dbdf6bc9 --- /dev/null +++ b/tests/script/tsim/stream/triggerInterval0.sim @@ -0,0 +1,185 @@ +system sh/stop_dnodes.sh +system sh/deploy.sh -n dnode1 -i 1 +system sh/exec.sh -n dnode1 -s start +sleep 50 +sql connect + +print =============== create database +sql create database test vgroups 1 +sql show databases +if $rows != 3 then + return -1 +endi + +print $data00 $data01 $data02 + +sql use test +sql create table t1(ts timestamp, a int, b int , c int, d double); +sql create stream streams1 trigger window_close into streamt as select _wstartts, count(*) c1, count(d) c2 , sum(a) c3 , max(b) c4, min(c) c5 from t1 interval(10s); + +sql insert into t1 values(1648791213001,1,2,3,1.0); +sleep 300 +sql select * from streamt; +if $rows != 0 then + print ======$rows + return -1 +endi + +sql insert into t1 values(1648791223001,2,2,3,1.1); +sql insert into t1 values(1648791223002,2,2,3,1.1); +sql insert into t1 values(1648791223003,2,2,3,1.1); +sql insert into t1 values(1648791223001,2,2,3,1.1); +sleep 300 +sql select * from streamt; +if $rows != 1 then + print ======$rows + return -1 +endi + +if $data01 != 1 then + print ======$data01 + return -1 +endi + +sql insert into t1 values(1648791233001,2,2,3,1.1); +sleep 300 +sql select * from streamt; +if $rows != 2 then + print ======$rows + return -1 +endi +if $data01 != 1 then + print ======$data01 + return -1 +endi +if $data11 != 3 then + print ======$data11 + return -1 +endi + +sql insert into t1 values(1648791223004,2,2,3,1.1); +sql insert into t1 values(1648791223004,2,2,3,1.1); +sql insert into t1 values(1648791223005,2,2,3,1.1); +sleep 300 +sql select * from streamt; +if $rows != 2 then + print ======$rows + return -1 +endi +if $data01 != 1 then + print ======$data01 + return -1 +endi +if $data11 != 5 then + print ======$data11 + return -1 +endi + + +sql insert into t1 values(1648791233002,3,2,3,2.1); +sql insert into t1 values(1648791213002,4,2,3,3.1) +sql insert into t1 values(1648791213002,4,2,3,4.1); +sleep 300 +sql select * from streamt; +if $rows != 2 then + print ======$rows + return -1 +endi +if $data01 != 2 then + print ======$data01 + return -1 +endi +if $data11 != 5 then + print ======$data11 + return -1 +endi + +sql create table t2(ts timestamp, a int, b int , c int, d double); +sql create stream streams2 trigger window_close watermark 20s into streamt2 as select _wstartts, count(*) c1, count(d) c2 , sum(a) c3 , max(b) c4, min(c) c5 from t2 interval(10s); +sql insert into t2 values(1648791213000,1,2,3,1.0); +sql insert into t2 values(1648791239999,1,2,3,1.0); +sleep 300 +sql select * from streamt2; +if $rows != 0 then + print ======$rows + return -1 +endi + +sql insert into t2 values(1648791240000,1,2,3,1.0); +sleep 300 +sql select * from streamt2; +if $rows != 1 then + print ======$rows + return -1 +endi +if $data01 != 1 then + print ======$data01 + return -1 +endi + +sql insert into t2 values(1648791250001,1,2,3,1.0) (1648791250002,1,2,3,1.0) (1648791250003,1,2,3,1.0) (1648791240000,1,2,3,1.0); +sleep 300 +sql select * from streamt2; +if $rows != 1 then + print ======$rows + return -1 +endi +if $data01 != 1 then + print ======$data01 + return -1 +endi + +sql insert into t2 values(1648791280000,1,2,3,1.0); +sleep 300 +sql select * from streamt2; +if $rows != 4 then + print ======$rows + return -1 +endi +if $data01 != 1 then + print ======$data01 + return -1 +endi +if $data11 != 1 then + print ======$data11 + return -1 +endi +if $data21 != 1 then + print ======$data21 + return -1 +endi +if $data31 != 3 then + print ======$data31 + return -1 +endi + +sql insert into t2 values(1648791250001,1,2,3,1.0) (1648791250002,1,2,3,1.0) (1648791250003,1,2,3,1.0) (1648791280000,1,2,3,1.0) (1648791280001,1,2,3,1.0) (1648791280002,1,2,3,1.0) (1648791310000,1,2,3,1.0) (1648791280001,1,2,3,1.0); +sleep 300 +sql select * from streamt2; + +if $rows != 5 then + print ======$rows + return -1 +endi +if $data01 != 1 then + print ======$data01 + return -1 +endi +if $data11 != 1 then + print ======$data11 + return -1 +endi +if $data21 != 1 then + print ======$data21 + return -1 +endi +if $data31 != 3 then + print ======$data31 + return -1 +endi +if $data41 != 3 then + print ======$data31 + return -1 +endi + +system sh/exec.sh -n dnode1 -s stop -x SIGINT \ No newline at end of file diff --git a/tests/script/tsim/stream/triggerSession0.sim b/tests/script/tsim/stream/triggerSession0.sim new file mode 100644 index 0000000000000000000000000000000000000000..fb0666fdcfe847dd25a3e4eb3b66acd16ed09f63 --- /dev/null +++ b/tests/script/tsim/stream/triggerSession0.sim @@ -0,0 +1,105 @@ +system sh/stop_dnodes.sh +system sh/deploy.sh -n dnode1 -i 1 +system sh/exec.sh -n dnode1 -s start +sleep 50 +sql connect + +print =============== create database +sql create database test vgroups 1 +sql show databases +if $rows != 3 then + return -1 +endi + +print $data00 $data01 $data02 + +sql use test +sql create table t2(ts timestamp, a int, b int , c int, d double); +sql create stream streams2 trigger window_close into streamt2 as select _wstartts, count(*) c1, count(d) c2 , sum(a) c3 , max(b) c4, min(c) c5 from t2 session(ts, 10s); + +sql insert into t2 values(1648791213000,1,2,3,1.0); +sql insert into t2 values(1648791222999,1,2,3,1.0); +sql insert into t2 values(1648791223000,1,2,3,1.0); +sql insert into t2 values(1648791223001,1,2,3,1.0); +sql insert into t2 values(1648791233001,1,2,3,1.0); +sleep 300 +sql select * from streamt2; +if $rows != 0 then + print ======$rows + return -1 +endi + +sql insert into t2 values(1648791243002,1,2,3,1.0); +sleep 300 +sql select * from streamt2; +if $rows != 1 then + print ======$rows + return -1 +endi + +if $data01 != 5 then + print ======$data01 + return -1 +endi + +sql insert into t2 values(1648791223001,1,2,3,1.0) (1648791223002,1,2,3,1.0) (1648791222999,1,2,3,1.0); +sleep 300 +sql select * from streamt2; +if $rows != 1 then + print ======$rows + return -1 +endi + +if $data01 != 6 then + print ======$data01 + return -1 +endi + +sql insert into t2 values(1648791233002,1,2,3,1.0); +sleep 300 +sql select * from streamt2; +if $rows != 1 then + print ======$rows + return -1 +endi + +if $data01 != 6 then + print ======$data01 + return -1 +endi + +sql insert into t2 values(1648791253003,1,2,3,1.0); +sleep 300 +sql select * from streamt2; +if $rows != 1 then + print ======$rows + return -1 +endi + +if $data01 != 8 then + print ======$data01 + return -1 +endi + +sql insert into t2 values(1648791243003,1,2,3,1.0) (1648791243002,1,2,3,1.0) (1648791270004,1,2,3,1.0) (1648791280005,1,2,3,1.0) (1648791290006,1,2,3,1.0); +sleep 500 +sql select * from streamt2; +if $rows != 3 then + print ======$rows + return -1 +endi + +if $data01 != 10 then + print ======$data01 + return -1 +endi +if $data11 != 1 then + print ======$data11 + return -1 +endi +if $data21 != 1 then + print ======$data21 + return -1 +endi + +#system sh/exec.sh -n dnode1 -s stop -x SIGINT \ No newline at end of file diff --git a/tests/script/unique/account/account_create.sim b/tests/script/unique/account/account_create.sim deleted file mode 100644 index e36de29e7c5835ddc78a9f3eab4b2b4d34634c42..0000000000000000000000000000000000000000 --- a/tests/script/unique/account/account_create.sim +++ /dev/null @@ -1,80 +0,0 @@ -system sh/stop_dnodes.sh -system sh/deploy.sh -n dnode1 -i 1 -system sh/cfg.sh -n dnode1 -c wallevel -v 0 -system sh/exec.sh -n dnode1 -s start - -sleep 2000 -sql connect - -print ============================ dnode1 start - -$i = 0 -$dbPrefix = acdb -$tbPrefix = actb -$db = $dbPrefix . $i -$tb = $tbPrefix . $i -$accountPrefix = acac - -print =============== step1-4 -sql show accounts -if $rows != 1 then - return -1 -endi -sql show users -if $rows != 3 then - return -1 -endi - -$i = 0 -$acc = $accountPrefix . $i -sql_error create account $acc PASS pass123 -sql create account $acc PASS 'pass123' -#sql create account $acc PASS 'pass123' -x step1 -# return -1 -#step1: -sql create user $acc PASS 'pass123' -x step2 - return -1 -step2: - -sql show accounts -if $rows != 2 then - return -1 -endi - -sql show users -if $rows != 3 then - return -1 -endi - -print =============== step5-6 -sql drop account $acc -sql drop account $acc -x step5 - return -1 -step5: -sql show accounts -if $rows != 1 then - return -1 -endi -sql show users -if $rows != 3 then - return -1 -endi - -print =============== step7 -sql create account $acc PASS 'pass123' -#sql create account $acc PASS 'pass123' -x step7 -# return -1 -#step7: - -sql show accounts -if $rows != 2 then - return -1 -endi - -sql drop account $acc -sql show accounts -if $rows != 1 then - return -1 -endi - -system sh/exec.sh -n dnode1 -s stop -x SIGINT \ No newline at end of file diff --git a/tests/script/unique/account/account_delete.sim b/tests/script/unique/account/account_delete.sim deleted file mode 100644 index d99a8b559dc6e04e4d6996e042d915671781d699..0000000000000000000000000000000000000000 --- a/tests/script/unique/account/account_delete.sim +++ /dev/null @@ -1,99 +0,0 @@ -system sh/stop_dnodes.sh -system sh/deploy.sh -n dnode1 -i 1 -system sh/cfg.sh -n dnode1 -c wallevel -v 0 -system sh/exec.sh -n dnode1 -s start - -sleep 2000 -sql connect - -print ============= step1 -sql create account oroot pass 'taosdata' -sql close -sql connect oroot -sleep 2000 - -print ============= step2 -sql create user read pass 'taosdata' -sql create user write pass 'taosdata' - -sql create database d1 -sql create database d2 -sql create table d1.t1 (ts timestamp, i int) -sql create table d2.t2 (ts timestamp, i int) -sql insert into d1.t1 values(now, 1) -sql insert into d2.t2 values(now, 1) -sql insert into d2.t2 values(now+1s, 2) - -sql show databases -if $rows != 2 then - return -1 -endi -sql show users -if $rows != 4 then - return -1 -endi -sql select * from d1.t1 -if $rows != 1 then - return -1 -endi -sql select * from d2.t2 -if $rows != 2 then - return -1 -endi - -print ============= step3 -sql close -sql connect -sleep 2000 - -sql show databases -if $rows != 0 then - return -1 -endi -sql show dnodes -print $data00 $data01 $data02 $data03 -if $data02 != 2 then - return -1 -endi -sql drop account oroot - -print ============= step4 -$x = 0 -show4: - $x = $x + 1 - sleep 2000 - if $x == 10 then - return -1 - endi - -sql show dnodes -if $data02 != 0 then - goto show4 -endi - -print ============= step5 -sql create account oroot pass 'taosdata' - -sql close -sql connect oroot -sleep 2000 - -sql show databases -if $rows != 0 then - return -1 -endi -sql show users -if $rows != 2 then - return -1 -endi - -sql close -sql connect -sleep 2000 -sql drop account oroot -sql show accounts -if $rows != 1 then - return -1 -endi - -system sh/exec.sh -n dnode1 -s stop -x SIGINT \ No newline at end of file diff --git a/tests/script/unique/account/account_len.sim b/tests/script/unique/account/account_len.sim deleted file mode 100644 index f8379bdf954bdde122e68585b973f4957ef15739..0000000000000000000000000000000000000000 --- a/tests/script/unique/account/account_len.sim +++ /dev/null @@ -1,92 +0,0 @@ -system sh/stop_dnodes.sh -system sh/deploy.sh -n dnode1 -i 1 -system sh/cfg.sh -n dnode1 -c wallevel -v 0 -system sh/exec.sh -n dnode1 -s start - -sleep 2000 -sql connect - -$i = 0 -$dbPrefix = aldb -$tbPrefix = altb -$db = $dbPrefix . $i -$tb = $tbPrefix . $i - -print =============== step1 -sql drop account ac -x step0 - return -1 -step0: - -sql create account PASS 123 -x step1 - return -1 -step1: - -sql show accounts -if $rows != 1 then - return -1 -endi - -print =============== step2 -sql drop account a -x step2 -step2: -sql create account a PASS '123' -sql show accounts -if $rows != 2 then - return -1 -endi - -sql drop account a -sql show accounts -if $rows != 1 then - return -1 -endi - -print =============== step3 -sql drop account abc01234567890123456789 -x step3 -step3: -sql create account abc01234567890123456789 PASS '123' -sql show accounts -if $rows != 2 then - return -1 -endi - -sql drop account abc01234567890123456789 -sql show accounts -if $rows != 1 then - return -1 -endi - -print =============== step4 -sql create account abcd01234567890123456789012345689012345 PASS '123' -x step4 - return -1 -step4: -sql show accounts -if $rows != 1 then - return -1 -endi - -print =============== step5 -sql drop account 123 -x step5 -step5: -sql create account 123 pass '123' -x step51 - return -1 -step51: - -sql create account a123 PASS '123' -sql show accounts -if $rows != 2 then - return -1 -endi - -sql drop account a123 -sql show accounts -if $rows != 1 then - return -1 -endi - -sql show users -if $rows != 3 then - return -1 -endi - -system sh/exec.sh -n dnode1 -s stop -x SIGINT \ No newline at end of file diff --git a/tests/script/unique/account/authority.sim b/tests/script/unique/account/authority.sim deleted file mode 100644 index 8f2408de1429a8ea34add79e335f6bf7f42ca2b0..0000000000000000000000000000000000000000 --- a/tests/script/unique/account/authority.sim +++ /dev/null @@ -1,346 +0,0 @@ -system sh/stop_dnodes.sh -system sh/deploy.sh -n dnode1 -i 1 -system sh/cfg.sh -n dnode1 -c wallevel -v 0 -system sh/exec.sh -n dnode1 -s start - -sleep 2000 -sql connect - -print ============= step1 - -sql create user read pass 'taosdata' -sql create user write pass 'taosdata' -sql create user manage pass 'taosdata' - -sql create user a PASS 'ade' privilege -x step11 - return -1 -step11: - -sql create user a PASS 'ade' privilege a -x step12 - return -1 -step12: - -sql create user a PASS 'ade' privilege read -x step13 - return -1 -step13: - -sql show accounts -if $rows != 1 then - return -1 -endi -sql show users -if $rows != 6 then - return -1 -endi - -sql alter user read privilege read -sql alter user write privilege write -sql_error alter user manage privilege super - -print ============= step2 -sql close -sql connect write -sleep 2000 - -sql create database d1 -sql create database d2 -sql create table d1.t1 (ts timestamp, i int) -sql create table d2.t2 (ts timestamp, i int) -sql insert into d1.t1 values(now, 1) -sql insert into d2.t2 values(now, 1) -sql insert into d2.t2 values(now+1s, 2) - -sql show accounts -if $rows != 1 then - return -1 -endi -sql show users -if $rows != 6 then - return -1 -endi -sql show databases -if $rows != 2 then - return -1 -endi -sql select * from d1.t1 -if $rows != 1 then - return -1 -endi -sql select * from d2.t2 -if $rows != 2 then - return -1 -endi - -sql create account t1 pass 'taosdata' -x step21 - return -1 -step21: - -sql create user t1 pass 'taosdata' -x step22 - return -1 -step22: - -sql alter user read pass 'taosdata' -x step23 - return -1 -step23: - -sql create dnode $hostname2 -x step24 - return -1 -step24: - -sql drop dnode $hostname2 -x step25 - return -1 -step25: - -sql create mnode 192.168.0.2 -x step26 - return -1 -step26: - -sql drop mnode 192.168.0.2 -x step27 - return -1 -step27: - -sql drop account root -x step28 - return -1 -step28: - -sql alter user write pass 'taosdata' - -print ============= step3 -sql close -sql connect read -sleep 2000 - -sql create database d3 -x step31 - return -1 -step31: - -sql create table d1.t3 (ts timestamp, i int) -x step32 - return -1 -step32: - -#sql insert into d1.t1 values(now, 2) -x step33 -# return -1 -#step33: - -sql show accounts -if $rows != 1 then - return -1 -endi -sql show users -if $rows != 6 then - return -1 -endi -sql show databases -if $rows != 2 then - return -1 -endi -sql select * from d1.t1 -if $rows != 1 then - return -1 -endi - -sql select * from d2.t2 -if $rows != 2 then - return -1 -endi - -sql sql create account t1 pass 'taosdata' -x step34 - return -1 -step34: - -sql sql create user t1 pass 'taosdata' -x step35 - return -1 -step35: - -print ============= step4 -sql close -sql connect manage -sleep 2000 - -sql create database d3 -sql create database d4 -sql create table d3.t3 (ts timestamp, i int) -sql create table d4.t4 (ts timestamp, i int) - -sql show accounts -if $rows != 1 then - return -1 -endi -sql show users -if $rows != 6 then - return -1 -endi -sql show databases -if $rows != 4 then - return -1 -endi -sql select * from d1.t1 -if $rows != 1 then - return -1 -endi -sql select * from d2.t2 -if $rows != 2 then - return -1 -endi - -sql create account other pass 'taosdata' -x step41 - return -1 -step41: - -sql close -sql connect -sleep 2000 -sql create account other pass 'taosdata' - -print ============= step5 -sql close -sql connect other -sleep 2000 -sql create user read pass 'taosdata' -x step51 - return -1 -step51: -sql create other write pass 'taosdata' -x step52 - return -1 -step52: - -sql create user oread pass 'taosdata' -sql create user owrite pass 'taosdata' -sql create user omanage pass 'taosdata' - -sql show users -print show users $rows -if $rows != 5 then - return -1 -endi - -sql alter user oread privilege read -sql alter user owrite privilege write -sql alter user oroot privilege super -x step53 - return -1 -step53: -sql alter user read privilege read -x step54 - return -1 -step54: - -print ============= step6 -sql close -sql connect owrite -sleep 2000 -sql reset query cache -sleep 1000 -sql create database d1 -sql create database d3 -sql create table d1.t1 (ts timestamp, i int) -sql create table d3.t3 (ts timestamp, i int) -sql insert into d1.t1 values(now, 11) -sql insert into d3.t3 values(now, 11) -sql insert into d3.t3 values(now+1s, 12) - -sql show databases -if $rows != 2 then - return -1 -endi -sql select * from d1.t1 -if $rows != 1 then - return -1 -endi -sql select * from d2.t2 -x step6 - return -1 -step6: -sql select * from d3.t3 -if $rows != 2 then - return -1 -endi - -sql sql create account t1 pass 'taosdata' -x step61 - return -1 -step61: - -sql sql create user t1 pass 'taosdata' -x step62 - return -1 -step62: - -print ============= step7 -sql close -sql connect oread -sleep 2000 - -sql create database d7 -x step71 - return -1 -step71: - -sql show databases -if $rows != 2 then - return -1 -endi -sql select * from d1.t1 -if $rows != 1 then - return -1 -endi -sql select * from d2.t2 -x step72 - return -1 -step72: -sql select * from d3.t3 -if $rows != 2 then - return -1 -endi - -sql sql create account t1 pass 'taosdata' -x step73 - return -1 -step73: - -sql sql create user t1 pass 'taosdata' -x step74 - return -1 -step74: - -print ============= step8 -sql close -sql connect omanage -sleep 2000 - -sql create account t1 pass 'taosdata' -x step81 - return -1 -step81: - -sql create database d4 -sql create table d4.t4 (ts timestamp, i int) - -sql show databases -if $rows != 3 then - return -1 -endi -sql select * from d1.t1 -if $rows != 1 then - return -1 -endi -sql select * from d2.t2 -x step82 - return -1 -step82: -sql select * from d3.t3 -if $rows != 2 then - return -1 -endi - -print ============= step9 -sql close -sql connect -sleep 2000 -sql show databases -if $rows != 4 then - return -1 -endi - -sql drop account other -sql drop user read -sql drop user manage -sql drop user write - -sql close -sql connect -sleep 2000 -sql drop database d1 -sql drop database d2 -sql drop database d3 -sql drop database d4 - -system sh/exec.sh -n dnode1 -s stop -x SIGINT \ No newline at end of file diff --git a/tests/script/unique/account/basic.sim b/tests/script/unique/account/basic.sim deleted file mode 100644 index 00e706a4482d9fa57ed2f97a9995ce84d3667fa1..0000000000000000000000000000000000000000 --- a/tests/script/unique/account/basic.sim +++ /dev/null @@ -1,46 +0,0 @@ -system sh/stop_dnodes.sh -system sh/deploy.sh -n dnode1 -i 1 -system sh/exec.sh -n dnode1 -s start -sleep 2000 -sql connect - -print =============== show accounts -sql show accounts -if $rows != 1 then - return -1 -endi - -print $data00 $data01 $data02 - -print =============== create account1 -sql create account account1 PASS 'account1' -sql show accounts -if $rows != 2 then - return -1 -endi - -print $data00 $data01 $data02 -print $data10 $data11 $data22 - -print =============== create account2 -sql create account account2 PASS 'account2' -sql show accounts -if $rows != 3 then - return -1 -endi - -print $data00 $data01 $data02 -print $data10 $data11 $data22 -print $data20 $data11 $data22 - -print =============== drop account1 -sql drop account account1 -sql show accounts -if $rows != 2 then - return -1 -endi - -print $data00 $data01 $data02 -print $data10 $data11 $data22 - -system sh/exec.sh -n dnode1 -s stop -x SIGINT \ No newline at end of file diff --git a/tests/script/unique/account/paras.sim b/tests/script/unique/account/paras.sim deleted file mode 100644 index 102f5b6a381e5100b35a4f0125b1318bcb8b1d76..0000000000000000000000000000000000000000 --- a/tests/script/unique/account/paras.sim +++ /dev/null @@ -1,114 +0,0 @@ -system sh/stop_dnodes.sh -system sh/deploy.sh -n dnode1 -i 1 -system sh/exec.sh -n dnode1 -s start -sleep 2000 -sql connect - -print =============== show accounts -sql show accounts -if $rows != 1 then - return -1 -endi - -print $data00 $data01 $data02 $data03 $data04 $data05 $data06 -if $data00 != root then - return -1 -endi -if $data02 != 3/128 then - return -1 -endi -if $data03 != 0/128 then - return -1 -endi -if $data04 != 0/2147483647 then - return -1 -endi -if $data05 != 0/1000 then - return -1 -endi -if $data06 != 0.000/unlimited then - return -1 -endi - -print =============== create account -sql create account hou pass "hou" tseries 80000 storage 10737418240 streams 10 qtime 3600 dbs 3 users 3 conns 10 -sql show accounts -if $rows != 2 then - return -1 -endi - -print $data10 $data11 $data12 $data13 $data14 $data15 $data16 -if $data10 != hou then - return -1 -endi -if $data12 != 2/3 then - return -1 -endi -if $data13 != 0/3 then - return -1 -endi -if $data14 != 0/80000 then - return -1 -endi -if $data15 != 0/10 then - return -1 -endi -if $data16 != 0.000/10.000 then - return -1 -endi - -print =============== alter account -sql alter account hou pass "hou" tseries 8000 streams 10 dbs 5 users 5 -sql show accounts -if $rows != 2 then - return -1 -endi - -print $data10 $data11 $data12 $data13 $data14 $data15 $data16 -if $data10 != hou then - return -1 -endi -if $data12 != 2/5 then - return -1 -endi -if $data13 != 0/5 then - return -1 -endi -if $data14 != 0/8000 then - return -1 -endi -if $data15 != 0/10 then - return -1 -endi -if $data16 != 0.000/10.000 then - return -1 -endi - -print =============== alter account -sql create account hou pass "hou" tseries 8000 streams 10 dbs 5 users 6 -sql show accounts -if $rows != 2 then - return -1 -endi - -print $data10 $data11 $data12 $data13 $data14 $data15 $data16 -if $data10 != hou then - return -1 -endi -if $data12 != 2/6 then - return -1 -endi -if $data13 != 0/5 then - return -1 -endi -if $data14 != 0/8000 then - return -1 -endi -if $data15 != 0/10 then - return -1 -endi -if $data16 != 0.000/10.000 then - return -1 -endi - -system sh/exec.sh -n dnode1 -s stop -x SIGINT \ No newline at end of file diff --git a/tests/script/unique/account/pass_alter.sim b/tests/script/unique/account/pass_alter.sim deleted file mode 100644 index 8b857b014a292d53536c5acf2a00daa15be11239..0000000000000000000000000000000000000000 --- a/tests/script/unique/account/pass_alter.sim +++ /dev/null @@ -1,116 +0,0 @@ -system sh/stop_dnodes.sh -system sh/deploy.sh -n dnode1 -i 1 -system sh/cfg.sh -n dnode1 -c wallevel -v 0 -system sh/exec.sh -n dnode1 -s start - -sleep 2000 -sql connect - -print ============= step1 -sql create user read pass 'taosdata1' -sql create user write pass 'taosdata1' - -sql alter user read pass 'taosdata' -sql alter user write pass 'taosdata' - -sql show accounts -if $rows != 1 then - return -1 -endi -sql show users -if $rows != 5 then - return -1 -endi - -print ============= step2 -sql close -sql connect read -sleep 2000 -sql alter user read pass 'taosdata' -sql alter user write pass 'taosdata1' -x step2 - return -1 -step2: - - -print ============= step3 -sql close -sql connect write -sleep 2000 -sql alter user write pass 'taosdata' -sql alter user read pass 'taosdata' -x step3 - return -1 -step3: - -print ============= step4 -sql close -sleep 1000 -sql connect -sleep 2000 -sql create account oroot pass 'taosdata' -sql show accounts -if $rows != 2 then - return -1 -endi -sql show users -if $rows != 5 then - return -1 -endi - -print ============= step5 -sql close -sql connect oroot -sleep 2000 - -sql create user oread pass 'taosdata1' -sql create user owrite pass 'taosdata1' -sql alter user oread pass 'taosdata' -sql alter user owrite pass 'taosdata' - -sql create user read pass 'taosdata1' -x step51 - return -1 -step51: -sql alter user read pass 'taosdata1' -x step52 - return -1 -step52: - -sql show accounts -x step53 - return -1 -step53: -sql show users -print show users $rows -if $rows != 4 then - return -1 -endi - -print ============= step6 -sql close -sql connect oread -sleep 2000 -sql alter user oread pass 'taosdata' -sql alter user owrite pass 'taosdata1' -x step6 - return -1 -step6: - - -print ============= step7 -sql close -sql connect owrite -sleep 2000 -sql alter user owrite pass 'taosdata' -sql alter user oread pass 'taosdata' -x step7 - return -1 -step7: - -print ============= step8 -sql close -sql connect -sleep 2000 -sql alter user oread pass 'taosdata' -sql alter user owrite pass 'taosdata' -sql alter user oroot pass 'taosdata' - -sql drop account oroot -sql drop user read -sql drop user write - -system sh/exec.sh -n dnode1 -s stop -x SIGINT \ No newline at end of file diff --git a/tests/script/unique/account/pass_len.sim b/tests/script/unique/account/pass_len.sim deleted file mode 100644 index f4ceb76f7b8b41873217bd11ae2c3d385386b0e9..0000000000000000000000000000000000000000 --- a/tests/script/unique/account/pass_len.sim +++ /dev/null @@ -1,81 +0,0 @@ -system sh/stop_dnodes.sh -system sh/deploy.sh -n dnode1 -i 1 -system sh/cfg.sh -n dnode1 -c wallevel -v 0 -system sh/exec.sh -n dnode1 -s start - -sleep 2000 -sql connect - -$i = 0 -$dbPrefix = apdb -$tbPrefix = aptb -$db = $dbPrefix . $i -$tb = $tbPrefix . $i -$userPrefix = apusr - -print =============== step1 -$i = 0 -$user = $userPrefix . $i - -sql drop user $user -x step11 - return -1 -step11: - -sql create user $user PASS -x step12 - return -1 -step12: - -sql create user $user PASS 'taosdata' - -sql show users -if $rows != 4 then - return -1 -endi - -print =============== step2 -$i = 1 -$user = $userPrefix . $i -sql drop user $user -x step2 -step2: -sql create user $user PASS '1' -sql show users -if $rows != 5 then - return -1 -endi - -print =============== step3 -$i = 2 -$user = $userPrefix . $i -sql drop user $user -x step3 -step3: - -sql create user $user PASS 'abc0123456789' -sql show users -if $rows != 6 then - return -1 -endi - -print =============== step4 -$i = 3 -$user = $userPrefix . $i -sql create user $user PASS 'abcd012345678901234567891234567890' -x step4 - return -1 -step4: -sql show users -if $rows != 6 then - return -1 -endi - -$i = 0 -while $i < 3 - $user = $userPrefix . $i - sql drop user $user - $i = $i + 1 -endw - -sql show users -if $rows != 3 then - return -1 -endi - -system sh/exec.sh -n dnode1 -s stop -x SIGINT \ No newline at end of file diff --git a/tests/script/unique/account/testSuite.sim b/tests/script/unique/account/testSuite.sim deleted file mode 100644 index 9d4141cfe0c086f9a8863fffb00a9cb0f410e265..0000000000000000000000000000000000000000 --- a/tests/script/unique/account/testSuite.sim +++ /dev/null @@ -1,11 +0,0 @@ -run unique/account/account_create.sim -run unique/account/account_delete.sim -run unique/account/account_len.sim -run unique/account/authority.sim -run unique/account/basic.sim -run unique/account/paras.sim -run unique/account/pass_alter.sim -run unique/account/pass_len.sim -run unique/account/usage.sim -run unique/account/user_create.sim -run unique/account/user_len.sim diff --git a/tests/script/unique/account/usage.sim b/tests/script/unique/account/usage.sim deleted file mode 100644 index 3b9c20b159a6237f469fc1e48b5b3a3f4ca5f7b8..0000000000000000000000000000000000000000 --- a/tests/script/unique/account/usage.sim +++ /dev/null @@ -1,154 +0,0 @@ -system sh/stop_dnodes.sh -system sh/deploy.sh -n dnode1 -i 1 -system sh/exec.sh -n dnode1 -s start -#system sh/exec.sh -n monitor -s 1 -system sh/exec.sh -n monitorInterval -s 1 -sleep 2000 -sql connect - -print =============== show accounts - -print =============== create account -sql alter account root pass "taosdata" tseries 8000 streams 10 dbs 5 users 5 -sql show accounts -print $data00 $data01 $data02 $data03 $data04 $data05 $data06 -if $data00 != root then - return -1 -endi -if $data02 != 3/5 then - return -1 -endi -if $data03 != 0/5 then - return -1 -endi -if $data04 != 0/8000 then - return -1 -endi -if $data05 != 0/10 then - return -1 -endi -if $data06 != 0.000/unlimited then - return -1 -endi - -print =============== check usage account -sql create database d1 wal 2 -sql create database d2 wal 2 -sql create database d3 wal 2 -sql create database d4 wal 2 -sql create database d5 wal 2 - -sql create table d1.t1 (ts timestamp, i int); -sql create user u1 pass "u1" - -sql show accounts -print $data10 $data11 $data12 $data13 $data14 $data15 $data16 -if $data00 != root then - return -1 -endi -if $data02 != 4/5 then - return -1 -endi -if $data03 != 5/5 then - return -1 -endi -if $data04 != 1/8000 then - return -1 -endi -if $data05 != 0/10 then - return -1 -endi -if $data06 != 0.000/unlimited then - return -1 -endi - -print =============== step2 -sql alter account root pass "taosdata" tseries 10 storage 1073741824 streams 10 dbs 5 users 5 -sql show accounts -print $data00 $data01 $data02 $data03 $data04 $data05 $data06 -if $data00 != root then - return -1 -endi -if $data02 != 4/5 then - return -1 -endi -if $data03 != 5/5 then - return -1 -endi -if $data04 != 1/10 then - return -1 -endi -if $data05 != 0/10 then - return -1 -endi -if $data06 != 0.000/1.000 then - return -1 -endi - -print =============== step3 -sql alter account root pass "taosdata" tseries 10 storage 16 streams 10 dbs 5 users 5 -sql show accounts -print $data00 $data01 $data02 $data03 $data04 $data05 $data06 -if $data00 != root then - return -1 -endi -if $data02 != 4/5 then - return -1 -endi -if $data03 != 5/5 then - return -1 -endi -if $data04 != 1/10 then - return -1 -endi -if $data05 != 0/10 then - return -1 -endi -if $data06 != 0.000/0.000 then - return -1 -endi - -print =============== step4 -sql insert into d1.t1 values(now + 1s, 1) -sql insert into d1.t1 values(now + 2s, 2) - -sleep 10000 -print no write auth -sql_error insert into d1.t1 values(now + 3s, 2) -sql_error insert into d1.t1 values(now + 4s, 2) - -sql alter account root pass "taosdata" tseries 10 storage 36 streams 10 dbs 5 users 5 -sleep 10000 -print has write auth -sql insert into d1.t1 values(now + 5s, 1) -sql insert into d1.t1 values(now + 6s, 2) - -# no write auth -sleep 10000 -print no write auth -sql_error insert into d1.t1 values(now + 7s, 2) -sql_error insert into d1.t1 values(now + 8s, 2) - -print =============== step5 -sql alter account root pass "taosdata" tseries 10 storage 3600 streams 10 dbs 5 users 5 state all -sleep 10000 - -sql insert into d1.t1 values(now + 11s, 1) -sql insert into d1.t1 values(now + 12s, 2) - -sql alter account root pass "taosdata" tseries 10 storage 3600 streams 10 dbs 5 users 5 state no -sleep 10000 -print no write auth -sql_error insert into d1.t1 values(now + 13s, 2) -sql_error insert into d1.t1 values(now + 14s, 2) - -sql alter account root pass "taosdata" tseries 10 storage 3600 streams 10 dbs 5 users 5 state all -sleep 10000 -print has write auth -sql insert into d1.t1 values(now + 15s, 1) -sql insert into d1.t1 values(now + 16s, 2) - -print =============== check grant -sql_error create database d6 - -system sh/exec.sh -n dnode1 -s stop -x SIGINT \ No newline at end of file diff --git a/tests/script/unique/account/user_create.sim b/tests/script/unique/account/user_create.sim deleted file mode 100644 index e54a380f0dbef8107de452354ea01bc58262d548..0000000000000000000000000000000000000000 --- a/tests/script/unique/account/user_create.sim +++ /dev/null @@ -1,84 +0,0 @@ -system sh/stop_dnodes.sh -system sh/deploy.sh -n dnode1 -i 1 -system sh/cfg.sh -n dnode1 -c wallevel -v 0 -system sh/exec.sh -n dnode1 -s start - -sleep 2000 -sql connect - -print =============== step1 -sql show users -if $rows != 3 then - return -1 -endi - -sql create user read PASS 'pass123' -sql create user read PASS 'pass123' -x step1 - return -1 -step1: - -sql show users -if $rows != 4 then - return -1 -endi - -sql alter user read PASS 'taosdata' - -print =============== step2 -sql close -sql connect read -sleep 2000 - -sql alter user read PASS 'taosdata' - -print =============== step3 -sql drop user read -x step31 - return -1 -step31: -sql drop user _root -x step32 - return -1 -step32: -sql drop user monitor -x step33 - return -1 -step33: - -print =============== step4 -sql close -sql connect -sleep 2000 - -sql alter user read privilege read -sql show users -print $data1_read -if $data1_read != readable then - return -1 -endi - -sql_error alter user read privilege super -sql show users -print $data1_read -if $data1_read != readable then - return -1 -endi - -sql alter user read privilege write -sql show users -if $data1_read != writable then - return -1 -endi - -sql alter user read privilege 1 -x step43 - return -1 -step43: - -sql drop user _root -x step41 - return -1 -step41: - -sql drop user monitor -x step42 - return -1 -step42: - -sql drop user read - -system sh/exec.sh -n dnode1 -s stop -x SIGINT \ No newline at end of file diff --git a/tests/script/unique/account/user_len.sim b/tests/script/unique/account/user_len.sim deleted file mode 100644 index b8d448f0ffc9e43cbc0f0a5a849bda215e72e790..0000000000000000000000000000000000000000 --- a/tests/script/unique/account/user_len.sim +++ /dev/null @@ -1,94 +0,0 @@ -system sh/stop_dnodes.sh -system sh/deploy.sh -n dnode1 -i 1 -system sh/cfg.sh -n dnode1 -c wallevel -v 0 -system sh/exec.sh -n dnode1 -s start - -sleep 2000 -sql connect - -$i = 0 -$dbPrefix = lm_us_db -$tbPrefix = lm_us_tb -$db = $dbPrefix . $i -$tb = $tbPrefix . $i - -print =============== step1 -sql drop user ac -x step0 - return -1 -step0: - -sql create user PASS '123' -x step1 - return -1 -step1: - -sql show users -if $rows != 3 then - return -1 -endi - -print =============== step2 -sql drop user a -x step2 -step2: -sleep 1000 -sql create user a PASS '123' -sql show users -if $rows != 4 then - return -1 -endi - -sql drop user a -sql show users -if $rows != 3 then - return -1 -endi - -print =============== step3 -sql drop user abc01234567890123456789 -x step3 -step3: - -sql create user abc01234567890123456789 PASS '123' -sql show users -if $rows != 4 then - return -1 -endi - -sql drop user abc01234567890123456789 -sql show users -if $rows != 3 then - return -1 -endi - -print =============== step4 -sql create user abcd0123456789012345678901234567890111 PASS '123' -x step4 - return -1 -step4: -sql show users -if $rows != 3 then - return -1 -endi - -print =============== step5 -sql drop user 123 -x step5 -step5: -sql create user 123 PASS '123' -x step61 - return -1 -step61: - -sql create user a123 PASS '123' -sql show users -if $rows != 4 then - return -1 -endi - -sql drop user a123 -sql show users -if $rows != 3 then - return -1 -endi - -sql show accounts -if $rows != 1 then - return -1 -endi - -system sh/exec.sh -n dnode1 -s stop -x SIGINT \ No newline at end of file diff --git a/tests/script/unique/http/admin.sim b/tests/script/unique/http/admin.sim deleted file mode 100644 index ae206744c4e93ab7cebd5f4db7d8d4b84ad5ebbb..0000000000000000000000000000000000000000 --- a/tests/script/unique/http/admin.sim +++ /dev/null @@ -1,192 +0,0 @@ -system sh/stop_dnodes.sh - -system sh/deploy.sh -n dnode1 -i 1 -system sh/cfg.sh -n dnode1 -c http -v 1 -system sh/cfg.sh -n dnode1 -c wallevel -v 0 -#system sh/cfg.sh -n dnode1 -c adminRowLimit -v 10 -system sh/cfg.sh -n dnode1 -c httpDebugFlag -v 135 -system sh/exec.sh -n dnode1 -s start - -sql connect -sleep 2000 - -print ============================ dnode1 start - -print =============== step0 - prepare data -sql create database d1 -sql use d1 - -sql create table table_admin (ts timestamp, i int) - -sql insert into table_admin values('2017-12-25 21:28:41.022', 1) -sql insert into table_admin values('2017-12-25 21:28:42.022', 2) -sql insert into table_admin values('2017-12-25 21:28:43.022', 3) -sql insert into table_admin values('2017-12-25 21:28:44.022', 4) -sql insert into table_admin values('2017-12-25 21:28:45.022', 5) -sql insert into table_admin values('2017-12-25 21:28:46.022', 6) -sql insert into table_admin values('2017-12-25 21:28:47.022', 7) -sql insert into table_admin values('2017-12-25 21:28:48.022', 8) -sql insert into table_admin values('2017-12-25 21:28:49.022', 9) -sql insert into table_admin values('2017-12-25 21:28:50.022', 10) - -print =============== step1 - login - -system_content curl 127.0.0.1:7111/admin/ -print 1-> $system_content -if $system_content != @{"status":"error","code":4357,"desc":"no auth info input"}@ then - print actual: $system_content - return -1 -endi - -system_content curl 127.0.0.1:7111/admin/xx -print 2-> $system_content -if $system_content != @{"status":"error","code":4357,"desc":"no auth info input"}@ then - return -1 -endi - -system_content curl 127.0.0.1:7111/admin/login -print 3-> $system_content -if $system_content != @{"status":"error","code":4357,"desc":"no auth info input"}@ then - return -1 -endi - -system_content curl 127.0.0.1:7111/admin/login/root -print 4-> $system_content -if $system_content != @{"status":"error","code":4357,"desc":"no auth info input"}@ then - return -1 -endi - -system_content curl 127.0.0.1:7111/admin/login/root/123 -print 5-> $system_content -if $system_content != @{"status":"error","code":3,"desc":"Authentication failure"}@ then - return -1 -endi - -system_content curl 127.0.0.1:7111/admin/login/root/123/1/1/3 -print 6-> $system_content -if $system_content != @{"status":"error","code":3,"desc":"Authentication failure"}@ then - return -1 -endi - -system_content curl -H 'Authorization: Bearer eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9.' -d 'show databases' 127.0.0.1:7111/admin/login/root/1 -print 7-> $system_content -if $system_content != @{"status":"error","code":4387,"desc":"invalid format of Authorization"}@ then - return -1 -endi - -system_content curl -H 'Authorization: Taosd eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9.eyJpc3MiOiJ3d3cudGFvc2RhdGEuY29tIiwicGFzcyI6InRhb3NkYXRhIiwic3ViIjoicm9vdCJ9.xPv3b5odlR7YF8G_QWASjIRbMtA5v4ItToJ35fFgi' 127.0.0.1:7111/admin/login/root/1 -print 8-> $system_content -if $system_content != @{"status":"error","code":4387,"desc":"invalid format of Authorization"}@ then - return -1 -endi - -sleep 2000 -system_content curl 127.0.0.1:7111/admin/login/root/taosdata -print 9 -----> $system_content - -if $system_content != {"status":"succ","code":0,"desc":"/KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04"} then - return -1 -endi - -#system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d 'show databases' 127.0.0.1:7111/admin/login/root/1 -#print 10-> $system_content -#if $system_content != @{"status":"error","code":29,"desc":"failed to connect to server"}@ then -# return -1 -#endi - -print =============== step2 - logout - -system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' 127.0.0.1:7111/admin/logout -print 10 -----> $system_content - -if $system_content != @{"status":"succ","code":0,"desc":"logout success"}@ then - return -1 -endi - -system_content curl 127.0.0.1:7111/admin/logout -print 11 -----> $system_content - -if $system_content != @{"status":"error","code":4357,"desc":"no auth info input"}@ then - return -1 -endi - -print =============== step3 - info - -system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' 127.0.0.1:7111/admin/info -print curl 127.0.0.1:7111/admin/info -----> $system_content -if $system_content != {"status":"succ","data":[{"dbs":1,"tables":1,"users":3,"mnodes":1,"dnodes":1}]} then - return -1 -endi - -print =============== step4 - meta - -system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d 'show mnodes' 127.0.0.1:7111/admin/meta -print curl 127.0.0.1:7111/admin/meta -----> $system_content -#if $system_content != @{"status":"succ","head":["column type","column name","column bytes"],"data":[["binary","IP",16],["timestamp","created time",8],["binary","status",10],["binary","role",10],["binary","public ip",16]],"rows":5}@ then -# return -1 -#endi - -print =============== step5 - query data - -system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d 'select * from d1.table_admin' 127.0.0.1:7111/admin/sql -print curl 127.0.0.1:7111/admin/all -----> $system_content -if $system_content != @{"status":"succ","head":["ts","i"],"data":[["2017-12-25 21:28:41.022",1],["2017-12-25 21:28:42.022",2],["2017-12-25 21:28:43.022",3],["2017-12-25 21:28:44.022",4],["2017-12-25 21:28:45.022",5],["2017-12-25 21:28:46.022",6],["2017-12-25 21:28:47.022",7],["2017-12-25 21:28:48.022",8],["2017-12-25 21:28:49.022",9],["2017-12-25 21:28:50.022",10]],"rows":10}@ then - return -1 -endi - -system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d 'select * from d1.table_admin' 127.0.0.1:7111/admin/sql -print curl 127.0.0.1:7111/admin/sql -----> $system_content -if $system_content != @{"status":"succ","head":["ts","i"],"data":[["2017-12-25 21:28:41.022",1],["2017-12-25 21:28:42.022",2],["2017-12-25 21:28:43.022",3],["2017-12-25 21:28:44.022",4],["2017-12-25 21:28:45.022",5],["2017-12-25 21:28:46.022",6],["2017-12-25 21:28:47.022",7],["2017-12-25 21:28:48.022",8],["2017-12-25 21:28:49.022",9],["2017-12-25 21:28:50.022",10]],"rows":10}@ then - return -1 -endi - -print =============== step6 - insert data -system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d "insert into d1.table_admin values('2017-12-25 21:28:51.022', 11)" 127.0.0.1:7111/admin/sql -print curl 127.0.0.1:7111/admin/sql -----> $system_content -if $system_content != @{"status":"succ","head":["affect_rows"],"data":[[1]],"rows":1}@ then - return -1 -endi - -system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d 'select * from d1.table_admin' 127.0.0.1:7111/admin/all -print curl 127.0.0.1:7111/admin/all -----> $system_content -if $system_content != @{"status":"succ","head":["ts","i"],"data":[["2017-12-25 21:28:41.022",1],["2017-12-25 21:28:42.022",2],["2017-12-25 21:28:43.022",3],["2017-12-25 21:28:44.022",4],["2017-12-25 21:28:45.022",5],["2017-12-25 21:28:46.022",6],["2017-12-25 21:28:47.022",7],["2017-12-25 21:28:48.022",8],["2017-12-25 21:28:49.022",9],["2017-12-25 21:28:50.022",10],["2017-12-25 21:28:51.022",11]],"rows":11}@ then - print actual: $system_content - print expect =======> {"status":"succ","head":["ts","i"],"data":[["2017-12-25 21:28:41.022",1],["2017-12-25 21:28:42.022",2],["2017-12-25 21:28:43.022",3],["2017-12-25 21:28:44.022",4],["2017-12-25 21:28:45.022",5],["2017-12-25 21:28:46.022",6],["2017-12-25 21:28:47.022",7],["2017-12-25 21:28:48.022",8],["2017-12-25 21:28:49.022",9],["2017-12-25 21:28:50.022",10],["2017-12-25 21:28:51.022",11]],"rows":11} - return -1 -endi - -#system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d 'select * from d1.table_admin' 127.0.0.1:7111/admin/sql -#print curl 127.0.0.1:7111/admin/sql -----> $system_content -#if $system_content != @{"status":"succ","head":["ts","i"],"data":[["2017-12-25 21:28:51.022",11],["2017-12-25 21:28:50.022",10],["2017-12-25 21:28:49.022",9],["2017-12-25 21:28:48.022",8],["2017-12-25 21:28:47.022",7],["2017-12-25 21:28:46.022",6],["2017-12-25 21:28:45.022",5],["2017-12-25 21:28:44.022",4],["2017-12-25 21:28:43.022",3],["2017-12-25 21:28:42.022",2]],"rows":10}@ then -# return -1 -#endi - -system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' 127.0.0.1:7111/admin/info -print curl 127.0.0.1:7111/admin/info -----> $system_content -if $system_content != {"status":"succ","data":[{"dbs":1,"tables":1,"users":3,"mnodes":1,"dnodes":1}]} then - return -1 -endi - -print =============== step7 - use dbs - -system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d 'use d1;' 127.0.0.1:7111/admin/all -print 23-> $system_content -if $system_content != @{"status":"error","code":4360,"desc":"no need to execute use db cmd"}@ then - return -1 -endi - -print =============== step8 - monitor dbs -#system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d 'show dnodes;show mnodes;' 127.0.0.1:7111/admin/sqls -#print 24-> $system_content -#if $system_content != @[{"status":"succ","head":["IP","created time","open vnodes","free vnodes","status","balance state"],"data":[["127.0.0.1","2018-09-04 #11:16:13.985",1,3,"ready","balanced"]],"rows":1},{"status":"succ","head":["IP","created time","status","role"],"data":[["127.0.0.1","2018-09-04 11:16:13.371","serving","master"]],"rows":1}]@ then -# return -1 -# endi - -system sh/exec.sh -n dnode1 -s stop -x SIGINT -system sh/exec.sh -n dnode2 -s stop -x SIGINT -system sh/exec.sh -n dnode3 -s stop -x SIGINT -system sh/exec.sh -n dnode4 -s stop -x SIGINT -system sh/exec.sh -n dnode5 -s stop -x SIGINT -system sh/exec.sh -n dnode6 -s stop -x SIGINT -system sh/exec.sh -n dnode7 -s stop -x SIGINT -system sh/exec.sh -n dnode8 -s stop -x SIGINT \ No newline at end of file diff --git a/tests/script/unique/http/opentsdb.sim b/tests/script/unique/http/opentsdb.sim deleted file mode 100644 index 7d1e6b03d4547a6b0b2a6a7857000a8a6518a002..0000000000000000000000000000000000000000 --- a/tests/script/unique/http/opentsdb.sim +++ /dev/null @@ -1,247 +0,0 @@ -system sh/stop_dnodes.sh - -system sh/deploy.sh -n dnode1 -i 1 -system sh/cfg.sh -n dnode1 -c http -v 1 -system sh/cfg.sh -n dnode1 -c wallevel -v 0 -system sh/exec.sh -n dnode1 -s start - -sleep 2000 -sql connect - -print ============================ dnode1 start - -print =============== step1 - parse -system_content curl -u root:taosdata -d '[{"metric": "sys_cpu","timestamp": 1346846400,"value": 18,"tags": {"host": "web01","group1": "1","dc": "lga"}}]' 127.0.0.1:7111/opentsdb/ -print $system_content -if $system_content != @{"status":"error","code":4496,"desc":"database name can not be null"}@ then - return -1 -endi - -system_content curl -u root:taosdata -d '[{"metric": "sys_cpu","timestamp": 1346846400,"value": 18,"tags": {"host": "web01","group1": "1","dc": "lga"}}]' 127.0.0.1:7111/opentsdb/db123456789012345678901234567890db -print $system_content -if $system_content != @{"status":"error","code":4497,"desc":"database name too long"}@ then - return -1 -endi - -system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d '[{"metric": "sys_cpu","timestamp": 1346846400,"value": 18,"tags": {"host": "web01","group1": "1","dc": "lga"}}]' 127.0.0.1:7111/opentsdb/ -print $system_content -if $system_content != @{"status":"error","code":4496,"desc":"database name can not be null"}@ then - return -1 -endi - -system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d '[{"metric": "sys_cpu","timestamp": 1346846400,"value": 18,"tags": {"host": "web01","group1": "1","dc": "lga"}}]' 127.0.0.1:7111/opentsdb/db/put2 -print $system_content -if $system_content != @{"status":"error","code":4354,"desc":"invalid url format"}@ then - return -1 -endi - -system_content curl -u root:taosdata -d '[]' 127.0.0.1:7111/opentsdb/db/put -print $system_content -if $system_content != @{"status":"error","code":4499,"desc":"metrics size is 0"}@ then - return -1 -endi - -system_content curl -u root:taosdata -d '[' 127.0.0.1:7111/opentsdb/db/put -print $system_content -if $system_content != @{"status":"error","code":4498,"desc":"invalid opentsdb json fromat"}@ then - return -1 -endi - -system_content curl -u root:taosdata -d '{}' 127.0.0.1:7111/opentsdb/db/put -print $system_content -if $system_content != @{"status":"error","code":4499,"desc":"metrics size is 0"}@ then - return -1 -endi - -system_content curl -u root:taosdata -d '[{}]' 127.0.0.1:7111/opentsdb/db/put -print $system_content - -if $system_content != @{"status":"error","code":4501,"desc":"metric name not find"}@ then - return -1 -endi - -system_content curl -u root:taosdata -d '[{"metric": 1,"timestamp": 1346846400,"value": 18,"tags": {"host": "web01","group1": "1","dc": "lga"}}]' 127.0.0.1:7111/opentsdb/db/put -print $system_content -if $system_content != @{"status":"error","code":4502,"desc":"metric name type should be string"}@ then - return -1 -endi - -system_content curl -u root:taosdata -d '[{"metric": "","timestamp": 1346846400,"value": 18,"tags": {"host": "web01","group1": "1","dc": "lga"}}]' 127.0.0.1:7111/opentsdb/db/put -print $system_content -if $system_content != @{"status":"error","code":4503,"desc":"metric name length is 0"}@ then - return -1 -endi - -system_content curl -u root:taosdata -d '[{"metric": "ab1234567890123456789012345678ab1234567890123456789012345678","timestamp": 1346846400,"value": 18,"tags": {"host": "web01","group1": "1","dc": "lga"}}]' 127.0.0.1:7111/opentsdb/db/put -print $system_content -if $system_content != @{"errors":[{"datapoint":{"metric":"ab1234567890123456789012345678ab1234567890123456789012345678","stable":"ab1234567890123456789012345678ab1234567890123456789012345678_d_bbb","table":"ab1234567890123456789012345678ab1234567890123456789012345678_d_bbb_lga_1_web01","timestamp":1346846400,"value":18.000000,"tags":{"dc":"lga","group1":"1","host":"web01"},"status":"error","code":1547,"desc":"Timestamp data out of range"}}],"failed":1,"success":0,"affected_rows":0}@ then - return -1 -endi - -system_content curl -u root:taosdata -d '[{"metric": "sys_cpu","value": 18,"tags": {"host": "web01","group1": "1","dc": "lga"}}]' 127.0.0.1:7111/opentsdb/db/put -print $system_content -if $system_content != @{"status":"error","code":4505,"desc":"timestamp not find"}@ then - return -1 -endi - -system_content curl -u root:taosdata -d '[{"metric": "sys_cpu","timestamp": "2","value": 18,"tags": {"host": "web01","group1": "1","dc": "lga"}}]' 127.0.0.1:7111/opentsdb/db/put -print $system_content -if $system_content != @{"status":"error","code":4506,"desc":"timestamp type should be integer"}@ then - return -1 -endi - -system_content curl -u root:taosdata -d '[{"metric": "sys_cpu","timestamp": -1,"value": 18,"tags": {"host": "web01","group1": "1","dc": "lga"}}]' 127.0.0.1:7111/opentsdb/db/put -print $system_content -if $system_content != @{"status":"error","code":4507,"desc":"timestamp value smaller than 0"}@ then - return -1 -endi - -system_content curl -u root:taosdata -d '[{"metric": "sys_cpu","timestamp": 1346846400,"tags": {"host": "web01","group1": "1","dc": "lga"}}]' 127.0.0.1:7111/opentsdb/db/put -print $system_content -if $system_content != @{"status":"error","code":4517,"desc":"value not find"}@ then - return -1 -endi - -####### - -system_content curl -u root:taosdata -d '[{"metric": "sys_cpu","timestamp": 1346846400,"value": 18}]' 127.0.0.1:7111/opentsdb/db/put -print $system_content -if $system_content != @{"status":"error","code":4508,"desc":"tags not find"}@ then - return -1 -endi - -system_content curl -u root:taosdata -d '[{"metric": "sys_cpu","timestamp": 1346846400,"value": 18,"tags": {}}]' 127.0.0.1:7111/opentsdb/db/put -print $system_content -if $system_content != @{"status":"error","code":4509,"desc":"tags size is 0"}@ then - return -1 -endi - -system_content curl -u root:taosdata -d '[{"metric": "sys_cpu","timestamp": 1346846400,"value": 18,"tags": 0}]' 127.0.0.1:7111/opentsdb/db/put -print $system_content -if $system_content != @{"status":"error","code":4509,"desc":"tags size is 0"}@ then - return -1 -endi - -system_content curl -u root:taosdata -d '[{"metric": "sys_cpu","timestamp": 1346846400,"value": 18,"tags": {"host": "web01","group1": "1","group1": "1","group1": "1","group1": "1","group1": "1","dc": "lga"}}]' 127.0.0.1:7111/opentsdb/db/put -print $system_content -if $system_content != @{"errors":[{"datapoint":{"metric":"sys_cpu","stable":"sys_cpu_d_bbbbbbb","table":"sys_cpu_d_bbbbbbb_lga_1_1_1_1_1_web01","timestamp":1346846400,"value":18.000000,"tags":{"dc":"lga","group1":"1","group1":"1","group1":"1","group1":"1","group1":"1","host":"web01"},"status":"error","code":866,"desc":"failed to create table"}}],"failed":1,"success":0,"affected_rows":0}@ then - return -1 -endi - -system_content curl -u root:taosdata -d '[{"metric": "sys_cpu","timestamp": 1346846400,"value": 18,"tags": {"": "web01"}}]' 127.0.0.1:7111/opentsdb/db/put -print $system_content -if $system_content != @{"status":"error","code":4512,"desc":"tag name is null"}@ then - return -1 -endi - -system_content curl -u root:taosdata -d '[{"metric": "sys_cpu","timestamp": 1346846400,"value": 18,"tags": {"host01123456789001123456789001123456789001123456789001123456789001123456789": "01"}}]' 127.0.0.1:7111/opentsdb/db/put -print $system_content -if $system_content != @{"status":"error","code":4513,"desc":"tag name length too long"}@ then - return -1 -endi - -system_content curl -u root:taosdata -d '[{"metric": "sys_cpu","timestamp": 1346846400,"value": 18,"tags": {"host": "web011234567890011234567890011234567890011234567890011234567890011234567890011234567890011234567890"}}]' 127.0.0.1:7111/opentsdb/db/put -print $system_content -if $system_content != @{"status":"error","code":4516,"desc":"tag value can not more than 64"}@ then - return -1 -endi - -system_content curl -u root:taosdata -d '[{"metric": "sys_cpu","timestamp": 1346846400,"value": 18,"tags": {"host": ""}}]' 127.0.0.1:7111/opentsdb/db/put -print $system_content -if $system_content != @{"status":"error","code":4515,"desc":"tag value is null"}@ then - return -1 -endi - -sleep 2000 - -print =============== step2 - insert single data -system_content curl -u root:taosdata -d '[{"metric": "sys_cpu","timestamp": 1346846400000,"value": 18,"tags": {"host": "web01","group1": "1","dc": "lga"}}]' 127.0.0.1:7111/opentsdb/db/put -print $system_content -if $system_content != @{"errors":[{"datapoint":{"metric":"sys_cpu","stable":"sys_cpu_d_bbb","table":"sys_cpu_d_bbb_lga_1_web01","timestamp":1346846400000,"value":18.000000,"tags":{"dc":"lga","group1":"1","host":"web01"},"affected_rows":1,"status":"succ"}}],"failed":0,"success":1,"affected_rows":1}@ then - return -1 -endi - -system_content curl -u root:taosdata -d '[{"metric": "sys_cpu","timestamp": 1346846400000,"value": 18,"tags": {"host": "web01","group1": "1","dc": "lga"}}]' 127.0.0.1:7111/opentsdb/db/put -print $system_content -if $system_content != @{"errors":[{"datapoint":{"metric":"sys_cpu","stable":"sys_cpu_d_bbb","table":"sys_cpu_d_bbb_lga_1_web01","timestamp":1346846400000,"value":18.000000,"tags":{"dc":"lga","group1":"1","host":"web01"},"affected_rows":1,"status":"succ"}}],"failed":0,"success":1,"affected_rows":1}@ then - return -1 -endi - -system_content curl -u root:taosdata -d 'select * from db.sys_cpu_d_bbb_lga_1_web01' 127.0.0.1:7111/rest/sql/ -print $system_content -if $system_content != @{"status":"succ","head":["ts","value"],"column_meta":[["ts",9,8],["value",7,8]],"data":[["2012-09-05 20:00:00.000",18.000000000]],"rows":1}@ then - return -1 -endi - -print =============== step3 - multi-query data -system_content curl -u root:taosdata -d '[{"metric": "sys_cpu","timestamp": 1346846405000,"value": 18,"tags": {"host": "web01","group1": "1","dc": "lga"}},{"metric": "sys_cpu","timestamp": 1346846402000,"value": 18,"tags": {"host": "web02","group1": "1","dc": "lga"}}]' 127.0.0.1:7111/opentsdb/db/put - -print $system_content - -if $system_content != @{"errors":[{"datapoint":{"metric":"sys_cpu","stable":"sys_cpu_d_bbb","table":"sys_cpu_d_bbb_lga_1_web01","timestamp":1346846405000,"value":18.000000,"tags":{"dc":"lga","group1":"1","host":"web01"},"affected_rows":1,"status":"succ"}},{"datapoint":{"metric":"sys_cpu","stable":"sys_cpu_d_bbb","table":"sys_cpu_d_bbb_lga_1_web02","timestamp":1346846402000,"value":18.000000,"tags":{"dc":"lga","group1":"1","host":"web02"},"affected_rows":1,"status":"succ"}}],"failed":0,"success":2,"affected_rows":2}@ then - return -1 -endi - -system_content curl -u root:taosdata -d 'select * from db.sys_cpu_d_bbb_lga_1_web01' 127.0.0.1:7111/rest/sql/ - -print $system_content - -if $system_content != @{"status":"succ","head":["ts","value"],"column_meta":[["ts",9,8],["value",7,8]],"data":[["2012-09-05 20:00:00.000",18.000000000],["2012-09-05 20:00:05.000",18.000000000]],"rows":2}@ then - return -1 -endi - -system_content curl -u root:taosdata -d 'select count(*) from db.sys_cpu_d_bbb' 127.0.0.1:7111/rest/sql/ - -print $system_content - -if $system_content != @{"status":"succ","head":["count(*)"],"column_meta":[["count(*)",5,8]],"data":[[3]],"rows":1}@ then - return -1 -endi - -print =============== step4 - summary-put data -system_content curl -u root:taosdata -d '[{"metric": "sys_mem","timestamp": 1346846400000,"value": 8,"tags": {"host": "web01","group1": "1","dc": "lga"}},{"metric": "sys_mem","timestamp": 1346846405000,"value": 9,"tags": {"host": "web01","group1": "1","dc": "lga"}}]' 127.0.0.1:7111/opentsdb/db/put?details=false - -print $system_content - -if $system_content != @{"failed":0,"success":2}@ then - return -1 -endi - -system_content curl -u root:taosdata -d 'select * from db.sys_mem_d_bbb_lga_1_web01' 127.0.0.1:7111/rest/sql/ - -print $system_content - -if $system_content != @{"status":"succ","head":["ts","value"],"column_meta":[["ts",9,8],["value",7,8]],"data":[["2012-09-05 20:00:00.000",8.000000000],["2012-09-05 20:00:05.000",9.000000000]],"rows":2}@ then - return -1 -endi - -system_content curl -u root:taosdata -d 'select count(*) from db.sys_mem_d_bbb' 127.0.0.1:7111/rest/sql/ - -print $system_content - -if $system_content != @{"status":"succ","head":["count(*)"],"column_meta":[["count(*)",5,8]],"data":[[2]],"rows":1}@ then - return -1 -endi - -print =============== step5 - prepare data - -system_content curl -u root:taosdata -d '[{"metric": "sys_cpu","timestamp": 1346846402000,"value": 19,"tags": {"host": "web01","group1": "1","dc": "lga"}},{"metric": "sys_cpu","timestamp": 1346846402,"value": 19,"tags": {"host": "web02","group1": "1","dc": "lga"}}]' 127.0.0.1:7111/opentsdb/db/put -system_content curl -u root:taosdata -d '[{"metric": "sys_cpu","timestamp": 1346846403000,"value": 20,"tags": {"host": "web01","group1": "1","dc": "lga"}},{"metric": "sys_cpu","timestamp": 1346846403,"value": 20,"tags": {"host": "web02","group1": "1","dc": "lga"}}]' 127.0.0.1:7111/opentsdb/db/put -system_content curl -u root:taosdata -d '[{"metric": "sys_cpu","timestamp": 1346846404000,"value": 21,"tags": {"host": "web01","group1": "1","dc": "lga"}},{"metric": "sys_cpu","timestamp": 1346846404,"value": 21,"tags": {"host": "web02","group1": "1","dc": "lga"}}]' 127.0.0.1:7111/opentsdb/db/put -system_content curl -u root:taosdata -d '[{"metric": "sys_cpu","timestamp": 1346846405000,"value": 22,"tags": {"host": "web01","group1": "1","dc": "lga"}},{"metric": "sys_cpu","timestamp": 1346846405,"value": 22,"tags": {"host": "web02","group1": "1","dc": "lga"}}]' 127.0.0.1:7111/opentsdb/db/put -system_content curl -u root:taosdata -d '[{"metric": "sys_cpu","timestamp": 1346846406000,"value": 23,"tags": {"host": "web01","group1": "1","dc": "lga"}},{"metric": "sys_cpu","timestamp": 1346846406,"value": 23,"tags": {"host": "web02","group1": "1","dc": "lga"}}]' 127.0.0.1:7111/opentsdb/db/put - -system_content curl -u root:taosdata -d 'select count(*) from db.sys_cpu_d_bbb' 127.0.0.1:7111/rest/sql/ -print $system_content -if $system_content != @{"status":"succ","head":["count(*)"],"column_meta":[["count(*)",5,8]],"data":[[7]],"rows":1}@ then - return -1 -endi - -system sh/exec.sh -n dnode1 -s stop -x SIGINT -system sh/exec.sh -n dnode2 -s stop -x SIGINT -system sh/exec.sh -n dnode3 -s stop -x SIGINT -system sh/exec.sh -n dnode4 -s stop -x SIGINT -system sh/exec.sh -n dnode5 -s stop -x SIGINT -system sh/exec.sh -n dnode6 -s stop -x SIGINT -system sh/exec.sh -n dnode7 -s stop -x SIGINT -system sh/exec.sh -n dnode8 -s stop -x SIGINT diff --git a/tests/script/unique/http/testSuite.sim b/tests/script/unique/http/testSuite.sim deleted file mode 100644 index 3a9753e744b84bfea28e40e8b3554cb82d2ebb40..0000000000000000000000000000000000000000 --- a/tests/script/unique/http/testSuite.sim +++ /dev/null @@ -1,2 +0,0 @@ -run unique/http/admin.sim -run general/http/opentsdb.sim \ No newline at end of file diff --git a/tests/script/unique/mnode/mgmt20.sim b/tests/script/unique/mnode/mgmt20.sim deleted file mode 100644 index 8945cffab226ab5dc379057d55e562f5c3ed9cfa..0000000000000000000000000000000000000000 --- a/tests/script/unique/mnode/mgmt20.sim +++ /dev/null @@ -1,88 +0,0 @@ -system sh/stop_dnodes.sh -system sh/deploy.sh -n dnode1 -i 1 -system sh/deploy.sh -n dnode2 -i 2 - -system sh/cfg.sh -n dnode1 -c numOfMnodes -v 2 -system sh/cfg.sh -n dnode2 -c numOfMnodes -v 2 - -system sh/cfg.sh -n dnode1 -c monitor -v 1 -system sh/cfg.sh -n dnode2 -c monitor -v 1 - -print ============== step1 -system sh/exec.sh -n dnode1 -s start -system sh/exec.sh -n dnode2 -s start -sql connect - -print ============== step2 -sql create dnode $hostname2 - -$x = 0 -show2: - $x = $x + 1 - sleep 1000 - if $x == 20 then - return -1 - endi - -sql show mnodes -print dnode1 ==> $data2_1 -print dnode2 ==> $data2_2 -if $data2_1 != master then - goto show2 -endi -if $data2_2 != slave then - goto show2 -endi - -system sh/exec.sh -n dnode1 -s stop -x SIGINT -system sh/exec.sh -n dnode2 -s stop -x SIGINT - -print ============== step3 -system sh/exec.sh -n dnode2 -s start -sleep 10000 - -system sh/exec.sh -n dnode1 -s start -sql connect - -print =============== step4 -sql select * from log.dn1 -$d1_first = $rows -sql select * from log.dn2 -$d2_first = $rows - -$x = 0 -show4: - $x = $x + 1 - sleep 1000 - if $x == 20 then - return -1 - endi - -sql show mnodes -print dnode1 ==> $data2_1 -print dnode2 ==> $data2_2 -if $data2_1 != master then - goto show4 -endi -if $data2_2 != slave then - goto show4 -endi - -sleep 2000 -sql select * from log.dn1 -$d1_second = $rows -sql select * from log.dn2 -$d2_second = $rows - -print dnode1 $d1_first $d1_second -print dnode2 $d2_first $d2_second -if $d1_first >= $d1_second then - return -1 -endi - -if $d2_first >= $d2_second then - return -1 -endi - -system sh/exec.sh -n dnode1 -s stop -x SIGINT -system sh/exec.sh -n dnode2 -s stop -x SIGINT \ No newline at end of file diff --git a/tests/script/unique/mnode/mgmt21.sim b/tests/script/unique/mnode/mgmt21.sim deleted file mode 100644 index 8409383309dbde5500b9719cd64fd74ca5e384b2..0000000000000000000000000000000000000000 --- a/tests/script/unique/mnode/mgmt21.sim +++ /dev/null @@ -1,44 +0,0 @@ -system sh/stop_dnodes.sh -system sh/deploy.sh -n dnode1 -i 1 -system sh/deploy.sh -n dnode2 -i 2 - -system sh/cfg.sh -n dnode1 -c numOfMnodes -v 2 -system sh/cfg.sh -n dnode2 -c numOfMnodes -v 2 - -print ============== step1 -system sh/exec.sh -n dnode2 -s start -sleep 10000 - -system sh/exec.sh -n dnode1 -s start -sql connect - -sql show mnodes -print dnode1 ==> $data2_1 -print dnode2 ==> $data2_2 -if $data2_1 != master then - return -1 -endi - -print ============== step2 -sql create dnode $hostname2 - -$x = 0 -show2: - $x = $x + 1 - sleep 2000 - if $x == 5 then - return -1 - endi - -sql show mnodes -x show2 -print dnode1 ==> $data2_1 -print dnode2 ==> $data2_2 -if $data2_1 != master then - goto show2 -endi -if $data2_2 != slave then - goto show2 -endi - -system sh/exec.sh -n dnode1 -s stop -x SIGINT -system sh/exec.sh -n dnode2 -s stop -x SIGINT \ No newline at end of file diff --git a/tests/script/unique/mnode/mgmt22.sim b/tests/script/unique/mnode/mgmt22.sim deleted file mode 100644 index 399805312ba905d55bceffe011cfe074c831684e..0000000000000000000000000000000000000000 --- a/tests/script/unique/mnode/mgmt22.sim +++ /dev/null @@ -1,114 +0,0 @@ -system sh/stop_dnodes.sh -system sh/deploy.sh -n dnode1 -i 1 -system sh/deploy.sh -n dnode2 -i 2 -system sh/deploy.sh -n dnode3 -i 3 - -system sh/cfg.sh -n dnode1 -c numOfMnodes -v 2 -system sh/cfg.sh -n dnode2 -c numOfMnodes -v 2 -system sh/cfg.sh -n dnode3 -c numOfMnodes -v 2 - -print ============== step1 -system sh/exec.sh -n dnode1 -s start -sql connect - -sql show mnodes -print dnode1 ==> $data2_1 -print dnode2 ==> $data2_2 -if $data2_1 != master then - return -1 -endi - -print ============== step2 -system sh/exec.sh -n dnode2 -s start -sql create dnode $hostname2 - -$x = 0 -show2: - $x = $x + 1 - sleep 1000 - if $x == 20 then - return -1 - endi - -sql show mnodes -print dnode1 ==> $data2_1 -print dnode2 ==> $data2_2 -if $data2_1 != master then - goto show2 -endi -if $data2_2 != slave then - goto show2 -endi - -print ============== step3 -sql_error drop dnode $hostname1 -x error1 -print should not drop master - -print ============== step4 -system sh/exec.sh -n dnode1 -s stop -x SIGINT -sleep 3000 -sql_error show mnodes -print error of no master - -print ============== step5 -sql_error drop dnode $hostname1 -print error of no master - -print ============== step6 -system sh/exec.sh -n dnode1 -s start -sleep 2000 -sql close -sql connect - -$x = 0 -show6: - $x = $x + 1 - sleep 1000 - if $x == 20 then - return -1 - endi - -sql show mnodes -x show6 -print dnode1 ==> $data2_1 -print dnode2 ==> $data2_2 -if $data2_1 != master then - goto show6 -endi -if $data2_2 != slave then - goto show6 -endi - -print ============== step7 -system sh/exec.sh -n dnode3 -s start -sql create dnode $hostname3 - -$x = 0 -show7: - $x = $x + 1 - sleep 1000 - if $x == 20 then - return -1 - endi - -sql show mnodes -print dnode1 ==> $data2_1 -print dnode2 ==> $data2_2 -print dnode3 ==> $data2_3 -if $data2_1 != master then - goto show7 -endi -if $data2_2 != slave then - goto show7 -endi -if $data3_3 != null then - goto show7 -endi - -system sh/exec.sh -n dnode1 -s stop -x SIGINT -system sh/exec.sh -n dnode2 -s stop -x SIGINT -system sh/exec.sh -n dnode3 -s stop -x SIGINT -system sh/exec.sh -n dnode4 -s stop -x SIGINT -system sh/exec.sh -n dnode5 -s stop -x SIGINT -system sh/exec.sh -n dnode6 -s stop -x SIGINT -system sh/exec.sh -n dnode7 -s stop -x SIGINT -system sh/exec.sh -n dnode8 -s stop -x SIGINT \ No newline at end of file diff --git a/tests/script/unique/mnode/mgmt23.sim b/tests/script/unique/mnode/mgmt23.sim deleted file mode 100644 index 19c7b4ba762d4bf5a73c10c1afa39e927c7a1c91..0000000000000000000000000000000000000000 --- a/tests/script/unique/mnode/mgmt23.sim +++ /dev/null @@ -1,141 +0,0 @@ -system sh/stop_dnodes.sh -system sh/deploy.sh -n dnode1 -i 1 -system sh/deploy.sh -n dnode2 -i 2 -system sh/deploy.sh -n dnode3 -i 3 - -system sh/cfg.sh -n dnode1 -c numOfMnodes -v 2 -system sh/cfg.sh -n dnode2 -c numOfMnodes -v 2 -system sh/cfg.sh -n dnode3 -c numOfMnodes -v 2 - -print ============== step1 -system sh/exec.sh -n dnode1 -s start -sql connect - -sql show mnodes -print dnode1 ==> $data2_1 -print dnode2 ==> $data2_2 -if $data2_1 != master then - return -1 -endi - -print ============== step2 -system sh/exec.sh -n dnode2 -s start -sql create dnode $hostname2 - -$x = 0 -show2: - $x = $x + 1 - sleep 1000 - if $x == 20 then - return -1 - endi - -sql show mnodes -print dnode1 ==> $data2_1 -print dnode2 ==> $data2_2 -if $data2_1 != master then - goto show2 -endi -if $data2_2 != slave then - goto show2 -endi - -print ============== step3 -system sh/exec.sh -n dnode3 -s start -sql create dnode $hostname3 -sleep 8000 - -sql show mnodes -$dnode1Role = $data2_1 -$dnode2Role = $data2_2 -$dnode3Role = $data2_3 -print dnode1 ==> $dnode1Role -print dnode2 ==> $dnode2Role -print dnode3 ==> $dnode3Role - -if $dnode1Role != master then - return -1 -endi -if $dnode2Role != slave then - return -1 -endi -if $dnode3Role != null then - return -1 -endi - -print ============== step4 -sql drop dnode $hostname2 - -$x = 0 -step4: - $x = $x + 1 - sleep 1000 - if $x == 20 then - return -1 - endi - -sql show mnodes -$dnode1Role = $data2_1 -$dnode2Role = $data2_2 -$dnode3Role = $data2_3 -print dnode1 ==> $dnode1Role -print dnode2 ==> $dnode2Role -print dnode3 ==> $dnode3Role - -if $dnode1Role != master then - goto step4 -endi -if $dnode2Role != null then - goto step4 -endi -if $dnode3Role != slave then - goto step4 -endi - -system sh/exec.sh -n dnode2 -s stop - -print ============== step5 -sleep 2000 -sql create dnode $hostname2 -system sh/deploy.sh -n dnode2 -i 2 -system sh/cfg.sh -n dnode2 -c numOfMnodes -v 2 -system sh/exec.sh -n dnode2 -s start - -$x = 0 -step5: - $x = $x + 1 - sleep 1000 - if $x == 20 then - return -1 - endi - -sql show mnodes -$dnode1Role = $data2_1 -$dnode2Role = $data2_2 -$dnode3Role = $data2_3 -print dnode1 ==> $dnode1Role -print dnode2 ==> $dnode2Role -print dnode3 ==> $dnode3Role - -if $dnode1Role != master then - goto step5 -endi -if $dnode2Role != null then - goto step5 -endi -if $dnode3Role != slave then - goto step5 -endi - -print ============== step6 -system sh/exec.sh -n dnode1 -s stop -sql_error show mnodes - -print ============== step7 -sql_error drop dnode $hostname1 - -system sh/exec.sh -n dnode1 -s stop -x SIGINT -system sh/exec.sh -n dnode2 -s stop -x SIGINT -system sh/exec.sh -n dnode3 -s stop -x SIGINT -system sh/exec.sh -n dnode4 -s stop -x SIGINT -system sh/exec.sh -n dnode5 -s stop -x SIGINT \ No newline at end of file diff --git a/tests/script/unique/mnode/mgmt24.sim b/tests/script/unique/mnode/mgmt24.sim deleted file mode 100644 index a7bcc59ac0bfa6163d1e2fddfd3a817b102bfa3c..0000000000000000000000000000000000000000 --- a/tests/script/unique/mnode/mgmt24.sim +++ /dev/null @@ -1,84 +0,0 @@ -system sh/stop_dnodes.sh -system sh/deploy.sh -n dnode1 -i 1 -system sh/deploy.sh -n dnode2 -i 2 -system sh/deploy.sh -n dnode3 -i 3 - -system sh/cfg.sh -n dnode1 -c numOfMnodes -v 2 -system sh/cfg.sh -n dnode2 -c numOfMnodes -v 2 -system sh/cfg.sh -n dnode3 -c numOfMnodes -v 2 - -print ============== step1 -system sh/exec.sh -n dnode1 -s start -sql connect - -sql show mnodes -print dnode1 ==> $data2_1 -print dnode2 ==> $data2_2 -if $data2_1 != master then - return -1 -endi - -print ============== step2 -system sh/exec.sh -n dnode2 -s start -sql create dnode $hostname2 - -$x = 0 -show2: - $x = $x + 1 - sleep 2000 - if $x == 10 then - return -1 - endi - -sql show mnodes -print dnode1 ==> $data2_1 -print dnode2 ==> $data2_2 -if $data2_1 != master then - goto show2 -endi -if $data2_2 != slave then - goto show2 -endi - -print ============== step3 -system sh/exec.sh -n dnode1 -s stop -sleep 2000 -sql_error show mnodes - -print ============== step4 -sql_error drop dnode $hostname1 - -print ============== step5 -system sh/exec.sh -n dnode1 -s start -sql_error create dnode $hostname1 - -sql close -sql connect - -$x = 0 -step5: - $x = $x + 1 - sleep 2000 - if $x == 10 then - return -1 - endi - -sql show mnodes -x step5 - -print dnode1 ==> $data2_1 -print dnode2 ==> $data2_2 -if $data2_1 != master then - goto step5 -endi -if $data2_2 != slave then - goto step5 -endi - -system sh/exec.sh -n dnode1 -s stop -x SIGINT -system sh/exec.sh -n dnode2 -s stop -x SIGINT -system sh/exec.sh -n dnode3 -s stop -x SIGINT -system sh/exec.sh -n dnode4 -s stop -x SIGINT -system sh/exec.sh -n dnode5 -s stop -x SIGINT -system sh/exec.sh -n dnode6 -s stop -x SIGINT -system sh/exec.sh -n dnode7 -s stop -x SIGINT -system sh/exec.sh -n dnode8 -s stop -x SIGINT \ No newline at end of file diff --git a/tests/script/unique/mnode/mgmt25.sim b/tests/script/unique/mnode/mgmt25.sim deleted file mode 100644 index 9cca9c844806b138faf52186ffc3184d4876a1d6..0000000000000000000000000000000000000000 --- a/tests/script/unique/mnode/mgmt25.sim +++ /dev/null @@ -1,95 +0,0 @@ -system sh/stop_dnodes.sh -system sh/deploy.sh -n dnode1 -i 1 -system sh/deploy.sh -n dnode2 -i 2 -system sh/deploy.sh -n dnode3 -i 3 - -system sh/cfg.sh -n dnode1 -c numOfMnodes -v 2 -system sh/cfg.sh -n dnode2 -c numOfMnodes -v 2 -system sh/cfg.sh -n dnode3 -c numOfMnodes -v 2 - -print ============== step1 -system sh/exec.sh -n dnode1 -s start -sql connect - -sql show mnodes -print dnode1 ==> $data2_1 -print dnode2 ==> $data2_2 -if $data2_1 != master then - return -1 -endi - -print ============== step2 -system sh/exec.sh -n dnode2 -s start -sql create dnode $hostname2 - -$x = 0 -show2: - $x = $x + 1 - sleep 2000 - if $x == 10 then - return -1 - endi - -sql show mnodes -print dnode1 ==> $data2_1 -print dnode2 ==> $data2_2 -if $data2_1 != master then - goto show2 -endi -if $data2_2 != slave then - goto show2 -endi - -print ============== step3 -system sh/exec.sh -n dnode3 -s start -sql create dnode $hostname3 -sleep 6000 - -sql show mnodes -$dnode1Role = $data2_1 -$dnode2Role = $data2_2 -$dnode3Role = $data2_3 -print dnode1 ==> $dnode1Role -print dnode2 ==> $dnode2Role -print dnode3 ==> $dnode3Role - -if $dnode1Role != master then - return -1 -endi -if $dnode2Role != slave then - return -1 -endi -if $dnode3Role != null then - return -1 -endi - -print ============== step4 -sql drop dnode $hostname2 -sleep 6000 - -sql show mnodes -$dnode1Role = $data2_1 -$dnode2Role = $data2_2 -$dnode3Role = $data2_3 -print dnode1 ==> $dnode1Role -print dnode2 ==> $dnode2Role -print dnode3 ==> $dnode3Role - -if $dnode1Role != master then - return -1 -endi -if $dnode2Role != null then - return -1 -endi -if $dnode3Role != slave then - return -1 -endi - -system sh/exec.sh -n dnode1 -s stop -x SIGINT -system sh/exec.sh -n dnode2 -s stop -x SIGINT -system sh/exec.sh -n dnode3 -s stop -x SIGINT -system sh/exec.sh -n dnode4 -s stop -x SIGINT -system sh/exec.sh -n dnode5 -s stop -x SIGINT -system sh/exec.sh -n dnode6 -s stop -x SIGINT -system sh/exec.sh -n dnode7 -s stop -x SIGINT -system sh/exec.sh -n dnode8 -s stop -x SIGINT \ No newline at end of file diff --git a/tests/script/unique/mnode/mgmt26.sim b/tests/script/unique/mnode/mgmt26.sim deleted file mode 100644 index 2816845052e835cf11e0ec7d4ddc71cbdee0ada1..0000000000000000000000000000000000000000 --- a/tests/script/unique/mnode/mgmt26.sim +++ /dev/null @@ -1,123 +0,0 @@ -system sh/stop_dnodes.sh -system sh/deploy.sh -n dnode1 -i 1 -system sh/deploy.sh -n dnode2 -i 2 -system sh/deploy.sh -n dnode3 -i 3 - -system sh/cfg.sh -n dnode1 -c numOfMnodes -v 2 -system sh/cfg.sh -n dnode2 -c numOfMnodes -v 2 -system sh/cfg.sh -n dnode3 -c numOfMnodes -v 2 - -print ============== step1 -system sh/exec.sh -n dnode1 -s start -sql connect - -sql show mnodes -print dnode1 ==> $data2_1 -print dnode2 ==> $data2_2 -if $data2_1 != master then - return -1 -endi - -print ============== step2 -system sh/exec.sh -n dnode2 -s start -sql create dnode $hostname2 - -$x = 0 -show2: - $x = $x + 1 - sleep 2000 - if $x == 10 then - return -1 - endi - -sql show mnodes -print dnode1 ==> $data2_1 -print dnode2 ==> $data2_2 -if $data2_1 != master then - goto show2 -endi -if $data2_2 != slave then - goto show2 -endi - -print ============== step3 -system sh/exec.sh -n dnode3 -s start -sql create dnode $hostname3 -sleep 6000 - -sql show mnodes -$dnode1Role = $data2_1 -$dnode2Role = $data2_2 -$dnode3Role = $data2_3 -print dnode1 ==> $dnode1Role -print dnode2 ==> $dnode2Role -print dnode3 ==> $dnode3Role - -if $dnode1Role != master then - return -1 -endi -if $dnode2Role != slave then - return -1 -endi -if $dnode3Role != null then - return -1 -endi - - -print ============== step4 -sql drop dnode $hostname2 -sleep 6000 - -sql show mnodes -$dnode1Role = $data2_1 -$dnode2Role = $data2_2 -$dnode3Role = $data2_3 -print dnode1 ==> $dnode1Role -print dnode2 ==> $dnode2Role -print dnode3 ==> $dnode3Role - -if $dnode1Role != master then - return -1 -endi -if $dnode2Role != null then - return -1 -endi -if $dnode3Role != slave then - return -1 -endi - -print ============== step5 -system sh/exec.sh -n dnode2 -s stop -system sh/deploy.sh -n dnode2 -i 2 -system sh/cfg.sh -n dnode2 -c numOfMnodes -v 2 -sleep 3000 -system sh/exec.sh -n dnode2 -s start -sql create dnode $hostname2 -sleep 6000 - -sql show mnodes -$dnode1Role = $data2_1 -$dnode2Role = $data2_2 -$dnode3Role = $data2_3 -print dnode1 ==> $dnode1Role -print dnode2 ==> $dnode2Role -print dnode3 ==> $dnode3Role - -if $dnode1Role != master then - return -1 -endi -if $dnode2Role != null then - return -1 -endi -if $dnode3Role != slave then - return -1 -endi - -system sh/exec.sh -n dnode1 -s stop -x SIGINT -system sh/exec.sh -n dnode2 -s stop -x SIGINT -system sh/exec.sh -n dnode3 -s stop -x SIGINT -system sh/exec.sh -n dnode4 -s stop -x SIGINT -system sh/exec.sh -n dnode5 -s stop -x SIGINT -system sh/exec.sh -n dnode6 -s stop -x SIGINT -system sh/exec.sh -n dnode7 -s stop -x SIGINT -system sh/exec.sh -n dnode8 -s stop -x SIGINT \ No newline at end of file diff --git a/tests/script/unique/mnode/mgmt30.sim b/tests/script/unique/mnode/mgmt30.sim deleted file mode 100644 index d0858c0d6cdffa1cb1cd7f2ba570ae0521f412d5..0000000000000000000000000000000000000000 --- a/tests/script/unique/mnode/mgmt30.sim +++ /dev/null @@ -1,68 +0,0 @@ -system sh/stop_dnodes.sh -system sh/deploy.sh -n dnode1 -i 1 -system sh/deploy.sh -n dnode2 -i 2 -system sh/deploy.sh -n dnode3 -i 3 - -system sh/cfg.sh -n dnode1 -c numOfMnodes -v 3 -system sh/cfg.sh -n dnode2 -c numOfMnodes -v 3 -system sh/cfg.sh -n dnode3 -c numOfMnodes -v 3 - -system sh/cfg.sh -n dnode1 -c balanceInterval -v 3000 -system sh/cfg.sh -n dnode2 -c balanceInterval -v 3000 -system sh/cfg.sh -n dnode3 -c balanceInterval -v 3000 - -print ============== step1 -system sh/exec.sh -n dnode1 -s start -sql connect - -sql show mnodes -print dnode1 ==> $data2_1 -print dnode2 ==> $data2_2 -print dnode3 ==> $data3_3 -if $data2_1 != master then - return -1 -endi -if $data3_2 != null then - return -1 -endi -if $data3_3 != null then - return -1 -endi - -print ============== step2 -system sh/exec.sh -n dnode2 -s start -system sh/exec.sh -n dnode3 -s start -sleep 3000 - -sql create dnode $hostname2 -sql create dnode $hostname3 - -$x = 0 -step2: - $x = $x + 1 - sleep 1000 - if $x == 10 then - return -1 - endi - -sql show mnodes -$dnode1Role = $data2_1 -$dnode2Role = $data2_2 -$dnode3Role = $data2_3 -print dnode1 ==> $dnode1Role -print dnode2 ==> $dnode2Role -print dnode3 ==> $dnode3Role - -if $dnode1Role != master then - goto step2 -endi -if $dnode2Role != slave then - goto step2 -endi -if $dnode3Role != slave then - goto step2 -endi - -system sh/exec.sh -n dnode1 -s stop -x SIGINT -system sh/exec.sh -n dnode2 -s stop -x SIGINT -system sh/exec.sh -n dnode3 -s stop -x SIGINT \ No newline at end of file diff --git a/tests/script/unique/mnode/mgmt33.sim b/tests/script/unique/mnode/mgmt33.sim deleted file mode 100644 index ce7cdce35d8c0463564f46d26a0711d39340c8bf..0000000000000000000000000000000000000000 --- a/tests/script/unique/mnode/mgmt33.sim +++ /dev/null @@ -1,214 +0,0 @@ -system sh/stop_dnodes.sh -system sh/deploy.sh -n dnode1 -i 1 -system sh/deploy.sh -n dnode2 -i 2 -system sh/deploy.sh -n dnode3 -i 3 - -system sh/cfg.sh -n dnode1 -c numOfMnodes -v 3 -system sh/cfg.sh -n dnode2 -c numOfMnodes -v 3 -system sh/cfg.sh -n dnode3 -c numOfMnodes -v 3 - -print ============== step1 -system sh/exec.sh -n dnode1 -s start -sql connect - -sql show mnodes -print dnode1 ==> $data2_1 -print dnode2 ==> $data2_2 -print dnode3 ==> $data3_3 -if $data2_1 != master then - return -1 -endi -if $data3_2 != null then - return -1 -endi -if $data3_3 != null then - return -1 -endi - -print ============== step2 -system sh/exec.sh -n dnode2 -s start -sql create dnode $hostname2 - -$x = 0 -step2: - $x = $x + 1 - sleep 1000 - if $x == 10 then - return -1 - endi - -sql show mnodes -$dnode1Role = $data2_1 -$dnode2Role = $data2_2 -$dnode3Role = $data2_3 -print dnode1 ==> $dnode1Role -print dnode2 ==> $dnode2Role -print dnode3 ==> $dnode3Role - -if $dnode1Role != master then - goto step2 -endi -if $dnode2Role != slave then - goto step2 -endi -if $dnode3Role != null then - goto step2 -endi - -print ============== step3 -system sh/exec.sh -n dnode3 -s start -sql create dnode $hostname3 - -$x = 0 -step3: - $x = $x + 1 - sleep 1000 - if $x == 10 then - return -1 - endi - -sql show mnodes -$dnode1Role = $data2_1 -$dnode2Role = $data2_2 -$dnode3Role = $data2_3 -print dnode1 ==> $dnode1Role -print dnode2 ==> $dnode2Role -print dnode3 ==> $dnode3Role - -if $dnode1Role != master then - goto step3 -endi -if $dnode2Role != slave then - goto step3 -endi -if $dnode3Role != slave then - goto step3 -endi - -print ============== step4 -sql drop dnode $hostname2 - -$x = 0 -step4: - $x = $x + 1 - sleep 1000 - if $x == 10 then - return -1 - endi - -sql show mnodes -$dnode1Role = $data2_1 -$dnode2Role = $data2_2 -$dnode3Role = $data2_3 -print dnode1 ==> $dnode1Role -print dnode2 ==> $dnode2Role -print dnode3 ==> $dnode3Role - -if $dnode1Role != master then - goto step4 -endi -if $dnode2Role != null then - goto step4 -endi -if $dnode3Role != slave then - goto step4 -endi - -system sh/exec.sh -n dnode2 -s stop -system sh/deploy.sh -n dnode2 -i 2 -system sh/cfg.sh -n dnode2 -c numOfMnodes -v 3 -system sh/exec.sh -n dnode2 -s start - -print ============== step5 -sql create dnode $hostname2 - -$x = 0 -step5: - $x = $x + 1 - sleep 1000 - if $x == 10 then - return -1 - endi - -sql show mnodes -$dnode1Role = $data2_1 -$dnode2Role = $data2_4 -$dnode3Role = $data2_3 -print dnode1 ==> $dnode1Role -print dnode2 ==> $dnode2Role -print dnode3 ==> $dnode3Role - -if $dnode1Role != master then - goto step5 -endi -if $dnode2Role != slave then - goto step5 -endi -if $dnode3Role != slave then - goto step5 -endi - -print ============== step6 -system sh/exec.sh -n dnode1 -s stop -$x = 0 -step6: - $x = $x + 1 - sleep 1000 - if $x == 10 then - return -1 - endi - -sql show mnodes -x step6 -$dnode1Role = $data2_1 -$dnode2Role = $data2_4 -$dnode3Role = $data2_3 -print dnode1 ==> $dnode1Role -print dnode2 ==> $dnode2Role -print dnode3 ==> $dnode3Role - -if $dnode1Role != offline then - goto step6 -endi -#if $dnode2Role != master then -# return -1 -#endi -#if $dnode3Role != slave then -# return -1 -#endi - -print ============== step7 -sql drop dnode $hostname1 -$x = 0 -step7: - $x = $x + 1 - sleep 1000 - if $x == 10 then - return -1 - endi - -sql show mnodes -x step7 -$dnode1Role = $data2_1 -$dnode2Role = $data2_2 -$dnode3Role = $data2_3 -print dnode1 ==> $dnode1Role -print dnode2 ==> $dnode2Role -print dnode3 ==> $dnode3Role - -if $dnode1Role != null then - goto step7 -endi -#if $dnode2Role != master then -# return -1 -#endi -#if $dnode3Role != slave then -# return -1 -#endi - -system sh/exec.sh -n dnode1 -s stop -x SIGINT -system sh/exec.sh -n dnode2 -s stop -x SIGINT -system sh/exec.sh -n dnode3 -s stop -x SIGINT -system sh/exec.sh -n dnode4 -s stop -x SIGINT -system sh/exec.sh -n dnode5 -s stop -x SIGINT -system sh/exec.sh -n dnode6 -s stop -x SIGINT -system sh/exec.sh -n dnode7 -s stop -x SIGINT -system sh/exec.sh -n dnode8 -s stop -x SIGINT \ No newline at end of file diff --git a/tests/script/unique/mnode/mgmt34.sim b/tests/script/unique/mnode/mgmt34.sim deleted file mode 100644 index d8a46b0955f59273279bbbc5c89c07c05db672d7..0000000000000000000000000000000000000000 --- a/tests/script/unique/mnode/mgmt34.sim +++ /dev/null @@ -1,269 +0,0 @@ -system sh/stop_dnodes.sh - -system sh/deploy.sh -n dnode1 -i 1 -system sh/deploy.sh -n dnode2 -i 2 -system sh/deploy.sh -n dnode3 -i 3 -system sh/deploy.sh -n dnode4 -i 4 - -system sh/cfg.sh -n dnode1 -c numOfMnodes -v 3 -system sh/cfg.sh -n dnode2 -c numOfMnodes -v 3 -system sh/cfg.sh -n dnode3 -c numOfMnodes -v 3 -system sh/cfg.sh -n dnode4 -c numOfMnodes -v 3 - -print ============== step1 -system sh/exec.sh -n dnode1 -s start -sql connect - -sql show mnodes -print dnode1 ==> $data2_1 -print dnode2 ==> $data2_2 -print dnode3 ==> $data3_3 -if $data2_1 != master then - return -1 -endi -if $data3_2 != null then - return -1 -endi -if $data3_3 != null then - return -1 -endi - -print ============== step2 -system sh/exec.sh -n dnode2 -s start -sql create dnode $hostname2 -$x = 0 -step2: - $x = $x + 1 - sleep 1000 - if $x == 10 then - return -1 - endi - -sql show mnodes -$dnode1Role = $data2_1 -$dnode2Role = $data2_2 -$dnode3Role = $data2_3 -$dnode4Role = $data2_4 -print dnode1 ==> $dnode1Role -print dnode2 ==> $dnode2Role -print dnode3 ==> $dnode3Role -print dnode4 ==> $dnode4Role - -if $dnode1Role != master then - goto step2 -endi -if $dnode2Role != slave then - goto step2 -endi -if $dnode3Role != null then - goto step2 -endi -if $dnode4Role != null then - goto step2 -endi - -print ============== step3 -system sh/exec.sh -n dnode3 -s start -sql create dnode $hostname3 - -$x = 0 -step3: - $x = $x + 1 - sleep 1000 - if $x == 10 then - return -1 - endi - -sql show mnodes -$dnode1Role = $data2_1 -$dnode2Role = $data2_2 -$dnode3Role = $data2_3 -$dnode4Role = $data2_4 -print dnode1 ==> $dnode1Role -print dnode2 ==> $dnode2Role -print dnode3 ==> $dnode3Role -print dnode4 ==> $dnode4Role - -if $dnode1Role != master then - goto step3 -endi -if $dnode2Role != slave then - goto step3 -endi -if $dnode3Role != slave then - goto step3 -endi -if $dnode4Role != null then - goto step3 -endi - - -print ============== step4 -system sh/exec.sh -n dnode4 -s start -sql create dnode $hostname4 -$x = 0 -step4: - $x = $x + 1 - sleep 1000 - if $x == 10 then - return -1 - endi - -sql show mnodes -$dnode1Role = $data2_1 -$dnode2Role = $data2_2 -$dnode3Role = $data2_3 -$dnode4Role = $data2_4 -print dnode1 ==> $dnode1Role -print dnode2 ==> $dnode2Role -print dnode3 ==> $dnode3Role -print dnode4 ==> $dnode4Role - -if $dnode1Role != master then - goto step4 -endi -if $dnode2Role != slave then - goto step4 -endi -if $dnode3Role != slave then - goto step4 -endi -if $dnode4Role != null then - goto step4 -endi - -print ============== step5 -sql drop dnode $hostname2 -$x = 0 -step5: - $x = $x + 1 - sleep 1000 - if $x == 10 then - return -1 - endi - -sql show mnodes -$dnode1Role = $data2_1 -$dnode2Role = $data2_2 -$dnode3Role = $data2_3 -$dnode4Role = $data2_4 -print dnode1 ==> $dnode1Role -print dnode2 ==> $dnode2Role -print dnode3 ==> $dnode3Role -print dnode4 ==> $dnode4Role - -if $dnode1Role != master then - goto step5 -endi -if $dnode2Role != null then - goto step5 -endi -if $dnode3Role != slave then - goto step5 -endi -if $dnode4Role != slave then - goto step5 -endi - -system sh/exec.sh -n dnode2 -s stop -system sh/deploy.sh -n dnode2 -i 2 -system sh/cfg.sh -n dnode2 -c numOfMnodes -v 3 -system sh/exec.sh -n dnode2 -s start - -print ============== step6 -sql create dnode $hostname2 -$x = 0 -step6: - $x = $x + 1 - sleep 1000 - if $x == 10 then - return -1 - endi - -sql show mnodes -$dnode1Role = $data2_1 -$dnode2Role = $data2_2 -$dnode3Role = $data2_3 -$dnode4Role = $data2_4 -print dnode1 ==> $dnode1Role -print dnode2 ==> $dnode2Role -print dnode3 ==> $dnode3Role -print dnode4 ==> $dnode4Role - -if $dnode1Role != master then - goto step6 -endi -if $dnode2Role != null then - goto step6 -endi -if $dnode3Role != slave then - goto step6 -endi -if $dnode4Role != slave then - goto step6 -endi - -print ============== step7 -system sh/exec.sh -n dnode1 -s stop -$x = 0 -step7: - $x = $x + 1 - sleep 1000 - if $x == 10 then - return -1 - endi - -sql show mnodes -x step7 -$dnode1Role = $data2_1 -$dnode2Role = $data2_2 -$dnode3Role = $data2_3 -$dnode4Role = $data2_4 -print dnode1 ==> $dnode1Role -print dnode2 ==> $dnode2Role -print dnode3 ==> $dnode3Role -print dnode4 ==> $dnode4Role - -if $dnode1Role != offline then - goto step7 -endi - -print ============== step8 -sql drop dnode $hostname1 -step8: - $x = $x + 1 - sleep 1000 - if $x == 10 then - return -1 - endi - -sql show mnodes -x step8 -$dnode1Role = $data2_1 -$dnode2Role = $data2_5 -$dnode3Role = $data2_3 -$dnode4Role = $data2_4 -print dnode1 ==> $dnode1Role -print dnode2 ==> $dnode2Role -print dnode3 ==> $dnode3Role -print dnode4 ==> $dnode4Role - -if $dnode1Role != null then - goto step8 -endi -if $dnode2Role != slave then - goto step8 -endi -#if $dnode3Role != master then -# return -1 -#endi -#if $dnode4Role != slave then -# return -1 -#endi - -system sh/exec.sh -n dnode1 -s stop -x SIGINT -system sh/exec.sh -n dnode2 -s stop -x SIGINT -system sh/exec.sh -n dnode3 -s stop -x SIGINT -system sh/exec.sh -n dnode4 -s stop -x SIGINT -system sh/exec.sh -n dnode5 -s stop -x SIGINT -system sh/exec.sh -n dnode6 -s stop -x SIGINT -system sh/exec.sh -n dnode7 -s stop -x SIGINT -system sh/exec.sh -n dnode8 -s stop -x SIGINT \ No newline at end of file diff --git a/tests/script/unique/mnode/mgmtr2.sim b/tests/script/unique/mnode/mgmtr2.sim deleted file mode 100644 index 5afb41905846bff3ce9894e928245a7d34078354..0000000000000000000000000000000000000000 --- a/tests/script/unique/mnode/mgmtr2.sim +++ /dev/null @@ -1,87 +0,0 @@ -system sh/stop_dnodes.sh -system sh/deploy.sh -n dnode1 -i 1 -system sh/deploy.sh -n dnode2 -i 2 -system sh/deploy.sh -n dnode3 -i 3 - -system sh/cfg.sh -n dnode1 -c numOfMnodes -v 2 -system sh/cfg.sh -n dnode2 -c numOfMnodes -v 2 -system sh/cfg.sh -n dnode3 -c numOfMnodes -v 2 - -print ============== step1 -system sh/exec.sh -n dnode1 -s start -sleep 2000 -sql connect - -sql show mnodes -$dnode1Role = $data2_1 -$dnode2Role = $data2_2 -$dnode3Role = $data2_3 -print dnode1 ==> $dnode1Role -print dnode2 ==> $dnode2Role -print dnode3 ==> $dnode3Role - -if $dnode1Role != master then - return -1 -endi -if $dnode2Role != null then - return -1 -endi -if $dnode3Role != null then - return -1 -endi - -print ============== step2 -sql create dnode $hostname2 -sql create dnode $hostname3 - -print ============== step3 -print ========= start dnode2 and dnode3 - -system sh/exec.sh -n dnode2 -s start -sleep 1000 -system sh/exec.sh -n dnode3 -s start - -sleep 8000 -system sh/exec.sh -n dnode2 -s stop -system sh/exec.sh -n dnode3 -s stop -sleep 4000 -system sh/exec.sh -n dnode2 -s start -system sh/exec.sh -n dnode3 -s start -sleep 4000 -system sh/exec.sh -n dnode2 -s stop -system sh/exec.sh -n dnode3 -s stop -sleep 4000 -system sh/exec.sh -n dnode2 -s start -system sh/exec.sh -n dnode3 -s start - -print ============== step4 -$x = 0 -step4: - $x = $x + 1 - sleep 1000 - if $x == 20 then - return -1 - endi - -sql show mnodes - -$dnode1Role = $data2_1 -$dnode2Role = $data2_2 -$dnode3Role = $data2_3 -print dnode1 ==> $dnode1Role -print dnode2 ==> $dnode2Role -print dnode3 ==> $dnode3Role - -if $dnode1Role != master then - goto step4 -endi -if $dnode2Role != slave then - goto step4 -endi -if $dnode3Role != null then - goto step4 -endi - -system sh/exec.sh -n dnode1 -s stop -x SIGINT -system sh/exec.sh -n dnode2 -s stop -x SIGINT -system sh/exec.sh -n dnode3 -s stop -x SIGINT \ No newline at end of file diff --git a/tests/script/unique/mnode/testSuite.sim b/tests/script/unique/mnode/testSuite.sim deleted file mode 100644 index b9adbe06a282548d56d7e7feb8a36f99198d8c0d..0000000000000000000000000000000000000000 --- a/tests/script/unique/mnode/testSuite.sim +++ /dev/null @@ -1,9 +0,0 @@ -run unique/mnode/mgmt21.sim -run unique/mnode/mgmt22.sim -run unique/mnode/mgmt23.sim -run unique/mnode/mgmt24.sim -run unique/mnode/mgmt25.sim -run unique/mnode/mgmt26.sim -run unique/mnode/mgmt33.sim -run unique/mnode/mgmt34.sim -run unique/mnode/mgmtr2.sim diff --git a/tests/script/unique/stream/metrics_balance.sim b/tests/script/unique/stream/metrics_balance.sim deleted file mode 100644 index ff48c2236709635c8d1a790104b0185144a96866..0000000000000000000000000000000000000000 --- a/tests/script/unique/stream/metrics_balance.sim +++ /dev/null @@ -1,312 +0,0 @@ -system sh/stop_dnodes.sh - - - -system sh/deploy.sh -n dnode1 -i 1 -system sh/deploy.sh -n dnode2 -i 2 -system sh/cfg.sh -n dnode1 -c statusInterval -v 1 -system sh/cfg.sh -n dnode2 -c statusInterval -v 1 -system sh/cfg.sh -n dnode1 -c balanceInterval -v 10 -system sh/cfg.sh -n dnode2 -c balanceInterval -v 10 -system sh/cfg.sh -n dnode1 -c walLevel -v 1 -system sh/cfg.sh -n dnode2 -c walLevel -v 1 -system sh/cfg.sh -n dnode1 -c mnodeEqualVnodeNum -v 0 -system sh/cfg.sh -n dnode2 -c mnodeEqualVnodeNum -v 0 -system sh/cfg.sh -n dnode1 -c maxTablesPerVnode -v 4 -system sh/cfg.sh -n dnode2 -c maxTablesPerVnode -v 4 -system sh/cfg.sh -n dnode1 -c numOfMnodes -v 2 -system sh/cfg.sh -n dnode2 -c numOfMnodes -v 2 - -$dbPrefix = db -$tbPrefix = tb -$mtPrefix = mt -$stPrefix = st -$tbNum = 3 -$rowNum = 200 - -print ========= start dnode1 -system sh/exec.sh -n dnode1 -s start -sql connect - -print ============== step1 -$db = $dbPrefix -sql create database $db -sql use $db - -$i = 0 -$st = $stPrefix . $i -$mt = $mtPrefix . $i -$tbNum = 3 -sql create table $mt (ts timestamp, tbcol int, tbcol2 float) TAGS(tgcol int) -while $i < $tbNum - $tb = $tbPrefix . $i - sql create table $tb using $mt tags( $i ) - if $i == 0 then - sleep 2000 - endi - - $x = 0 - $y = 0 - while $y < $rowNum - $ms = $x . s - sql insert into $tb values (now + $ms , $y , $y ) - $x = $x + 1 - $y = $y + 1 - endw - - $i = $i + 1 -endw -sql create table $st as select count(*), count(tbcol), count(tbcol2) from $tb interval(10s) - -$st = $stPrefix . $i -$mt = $mtPrefix . $i -$tbNum = 6 -sql create table $mt (ts timestamp, tbcol int, tbcol2 float) TAGS(tgcol int) -while $i < $tbNum - $tb = $tbPrefix . $i - sql create table $tb using $mt tags( $i ) - if $i == 0 then - sleep 2000 - endi - - $x = 0 - $y = 0 - while $y < $rowNum - $ms = $x . s - sql insert into $tb values (now + $ms , $y , $y ) - $x = $x + 1 - $y = $y + 1 - endw - - $i = $i + 1 -endw -sql create table $st as select count(*), count(tbcol), count(tbcol2) from $tb interval(10s) - -$st = $stPrefix . $i -$mt = $mtPrefix . $i -$tbNum = 9 -sql create table $mt (ts timestamp, tbcol int, tbcol2 float) TAGS(tgcol int) -while $i < $tbNum - $tb = $tbPrefix . $i - sql create table $tb using $mt tags( $i ) - if $i == 0 then - sleep 2000 - endi - - $x = 0 - $y = 0 - while $y < $rowNum - $ms = $x . s - sql insert into $tb values (now + $ms , $y , $y ) - $x = $x + 1 - $y = $y + 1 - endw - - $i = $i + 1 -endw -sql create table $st as select count(*), count(tbcol), count(tbcol2) from $tb interval(10s) - -$st = $stPrefix . $i -$mt = $mtPrefix . $i -$tbNum = 12 -sql create table $mt (ts timestamp, tbcol int, tbcol2 float) TAGS(tgcol int) -while $i < $tbNum - $tb = $tbPrefix . $i - sql create table $tb using $mt tags( $i ) - if $i == 0 then - sleep 2000 - endi - - $x = 0 - $y = 0 - while $y < $rowNum - $ms = $x . s - sql insert into $tb values (now + $ms , $y , $y ) - $x = $x + 1 - $y = $y + 1 - endw - - $i = $i + 1 -endw -sql create table $st as select count(*), count(tbcol), count(tbcol2) from $tb interval(10s) - - -print =============== step2 - -sql show tables -if $rows != 16 then - return -1 -endi - -print =============== step3 -print sleep 22 seconds -sleep 22000 - -$i = 0 -$mt = $mtPrefix . $i -$st = $stPrefix . $i -sql select * from $st -$r0 = $rows -print $st ==> $r0 $data00 $data01 $data10 $data11 - -$i = 3 -$mt = $mtPrefix . $i -$st = $stPrefix . $i -sql select * from $st -$r3 = $rows -print $st ==> $r3 $data00 $data01 $data10 $data11 - -$i = 6 -$mt = $mtPrefix . $i -$st = $stPrefix . $i -sql select * from $st -$r6 = $rows -print $st ==> $r6 $data00 $data01 $data10 $data11 - -$i = 9 -$mt = $mtPrefix . $i -$st = $stPrefix . $i -sql select * from $st -$r9 = $rows -print $st ==> $r9 $data00 $data01 $data10 $data11 - -print rows0=>$r0 rows3=>$r3 rows6=>$r6 rows9=>$r9 - -$x = 0 -show1: - $x = $x + 1 - sleep 2000 - if $x == 20 then - return -1 - endi -sql show dnodes -x show1 -$dnode1Vnodes = $data3_192.168.0.1 -print dnode1 $dnode1Vnodes -$dnode2Vnodes = $data3_192.168.0.2 -print dnode2 $dnode2Vnodes - -if $dnode1Vnodes != 0 then - goto show1 -endi -if $dnode2Vnodes != NULL then - goto show1 -endi - -print =============== step4 start dnode2 -sql create dnode $hostname2 -system sh/exec.sh -n dnode2 -s start -sleep 8000 - -$x = 0 -show2: - $x = $x + 1 - sleep 2000 - if $x == 20 then - return -1 - endi -sql show dnodes -x show2 -$dnode1Vnodes = $data3_192.168.0.1 -print dnode1 $dnode1Vnodes -$dnode2Vnodes = $data3_192.168.0.2 -print dnode2 $dnode2Vnodes - -if $dnode1Vnodes != 2 then - goto show2 -endi -if $dnode2Vnodes != 2 then - goto show2 -endi - -print rows0=>$r0 rows3=>$r3 rows6=>$r6 rows9=>$r9 -print =============== step5 -print sleep 22 seconds -sleep 22000 - -print =============== step6 -$i = 0 -$tb = $tbPrefix . $i -$st = $stPrefix . $i -sql select * from $tb -if $rows != $rowNum then - return -1 -endi - -$i = 3 -$tb = $tbPrefix . $i -$st = $stPrefix . $i -sql select * from $tb -if $rows != $rowNum then - return -1 -endi - -$i = 6 -$tb = $tbPrefix . $i -$st = $stPrefix . $i -sql select * from $tb -if $rows != $rowNum then - return -1 -endi - -$i = 9 -$tb = $tbPrefix . $i -$st = $stPrefix . $i -sql select * from $tb -if $rows != $rowNum then - return -1 -endi - -print rows0=>$r0 rows3=>$r3 rows6=>$r6 rows9=>$r9 -print =============== step7 -$i = 0 -$tb = $tbPrefix . $i -$st = $stPrefix . $i -sql select * from $st -print $st ==> $r0 $rows , $data00 $data01 $data10 $data11 -if $rows == 0 then - return -1 -endi -if $rows <= $r0 then - return -1 -endi - -$i = 3 -$tb = $tbPrefix . $i -$st = $stPrefix . $i -sql select * from $st -print $st ==> $r3 $rows , $data00 $data01 $data10 $data11 -if $rows == 0 then - return -1 -endi -if $rows <= $r3 then - return -1 -endi - - -$i = 6 -$tb = $tbPrefix . $i -$st = $stPrefix . $i -sql select * from $st -print $st ==> $r6 $rows , $data00 $data01 $data10 $data11 -if $rows == 0 then - return -1 -endi -if $rows <= $r6 then - return -1 -endi - -$i = 9 -$tb = $tbPrefix . $i -$st = $stPrefix . $i -sql select * from $st -print $st ==> $r0 $rows , $data00 $data01 $data10 $data11 -if $rows == 0 then - return -1 -endi -if $rows <= $r9 then - return -1 -endi - -print =============== clear -system sh/exec.sh -n dnode1 -s stop -system sh/exec.sh -n dnode2 -s stop - diff --git a/tests/script/unique/stream/metrics_replica1_dnode2.sim b/tests/script/unique/stream/metrics_replica1_dnode2.sim deleted file mode 100644 index 20c37cefc39f8fa6393d49934adb046f409fca25..0000000000000000000000000000000000000000 --- a/tests/script/unique/stream/metrics_replica1_dnode2.sim +++ /dev/null @@ -1,260 +0,0 @@ -system sh/stop_dnodes.sh - - - -system sh/deploy.sh -n dnode1 -i 1 -system sh/deploy.sh -n dnode2 -i 2 -system sh/cfg.sh -n dnode1 -c walLevel -v 1 -system sh/cfg.sh -n dnode2 -c walLevel -v 1 -system sh/cfg.sh -n dnode1 -c maxtablesPerVnode -v 4 -system sh/cfg.sh -n dnode2 -c maxtablesPerVnode -v 4 -system sh/exec.sh -n dnode1 -s start - -sql connect - -sql create dnode $hostname2 -system sh/exec.sh -n dnode2 -s start -$x = 0 -createDnode: - $x = $x + 1 - sleep 1000 - if $x == 20 then - return -1 - endi -sql show dnodes; -if $data4_192.168.0.2 == offline then - goto createDnode -endi -print ======================== dnode1 start - -$dbPrefix = m1d_db -$tbPrefix = m1d_tb -$mtPrefix = m1d_mt -$stPrefix = m1d_st -$tbNum = 10 -$rowNum = 20 -$totalNum = 200 - -print =============== step1 -$i = 0 -$db = $dbPrefix . $i -$mt = $mtPrefix . $i -$st = $stPrefix . $i - -sql drop databae $db -x step1 -step1: -sql create database $db -sql use $db -sql create table $mt (ts timestamp, tbcol int, tbcol2 float) TAGS(tgcol int) - -$i = 0 -while $i < $tbNum - $tb = $tbPrefix . $i - sql create table $tb using $mt tags( $i ) - - $x = -1440 - $y = 0 - while $y < $rowNum - $ms = $x . m - sql insert into $tb values (now $ms , $y , $y ) - $x = $x + 1 - $y = $y + 1 - endw - - $i = $i + 1 -endw - -sleep 100 - -print =============== step2 c1 - -sql select count(*) from $mt interval(1d) -print select count(*) from $mt interval(1d) ===> $data00 $data01 -if $data01 != 200 then - return -1 -endi - -$st = $stPrefix . c1 -sql create table $st as select count(*) from $mt interval(1d) - -print =============== step3 c2 -sql select count(tbcol) from $mt interval(1d) -print select count(tbcol) from $mt interval(1d) ===> $data00 $data01 -if $data01 != 200 then - return -1 -endi - -$st = $stPrefix . c2 -sql create table $st as select count(tbcol) from $mt interval(1d) - -print =============== step4 c3 -sql select count(tbcol2) from $mt interval(1d) -print select count(tbcol2) from $mt interval(1d) ===> $data00 $data01 -if $data01 != 200 then - return -1 -endi - -$st = $stPrefix . c3 -sql create table $st as select count(tbcol2) from $mt interval(1d) - -print =============== step5 avg -sql select avg(tbcol) from $mt interval(1d) -print select avg(tbcol) from $mt interval(1d) ===> $data00 $data01 -if $data01 != 9.500000000 then - return -1 -endi - -$st = $stPrefix . av -sql create table $st as select avg(tbcol) from $mt interval(1d) - -print =============== step6 su -sql select sum(tbcol) from $mt interval(1d) -print select sum(tbcol) from $mt interval(1d) ===> $data00 $data01 -if $data01 != 1900 then - return -1 -endi - -$st = $stPrefix . su -sql create table $st as select sum(tbcol) from $mt interval(1d) - -print =============== step7 mi -sql select min(tbcol) from $mt interval(1d) -print select min(tbcol) from $mt interval(1d) ===> $data00 $data01 -if $data01 != 0 then - return -1 -endi - -$st = $stPrefix . mi -sql create table $st as select min(tbcol) from $mt interval(1d) - -print =============== step8 ma -sql select max(tbcol) from $mt interval(1d) -print select max(tbcol) from $mt interval(1d) ===> $data00 $data01 -if $data01 != 19 then - return -1 -endi - -$st = $stPrefix . ma -sql create table $st as select max(tbcol) from $mt interval(1d) - -print =============== step9 fi -sql select first(tbcol) from $mt interval(1d) -print select first(tbcol) from $mt interval(1d) ===> $data00 $data01 -if $data01 != 0 then - return -1 -endi - -$st = $stPrefix . fi -sql create table $st as select first(tbcol) from $mt interval(1d) - -print =============== step10 la -sql select last(tbcol) from $mt interval(1d) -print select last(tbcol) from $mt interval(1d) ===> $data00 $data01 -if $data01 != 19 then - return -1 -endi - -$st = $stPrefix . la -sql create table $st as select last(tbcol) from $mt interval(1d) - -print =============== step11 wh -sql select count(tbcol) from $mt where ts < now + 4m interval(1d) -print select count(tbcol) from $mt where ts < now + 4m interval(1d) ===> $data00 $data01 -if $data01 != 200 then - return -1 -endi - -$st = $stPrefix . wh -#sql create table $st as select count(tbcol) from $mt where ts < now + 4m interval(1d) - -print =============== step12 as -sql select count(tbcol) from $mt interval(1d) -print select count(tbcol) from $mt interval(1d) ===> $data00 $data01 -if $data01 != 200 then - return -1 -endi - -$st = $stPrefix . as -sql create table $st as select count(tbcol) as c from $mt interval(1d) - -print =============== step13 -print sleep 22 seconds -sleep 32000 - -print =============== step14 -$st = $stPrefix . c1 -sql select * from $st -print ===> select * from $st ===> $data00 $data01 -if $data01 != 200 then - return -1 -endi - -$st = $stPrefix . c2 -sql select * from $st -print ===> select * from $st ===> $data00 $data01 -if $data01 != 200 then - return -1 -endi - -$st = $stPrefix . c3 -sql select * from $st -print ===> select * from $st ===> $data00 $data01 -if $data01 != 200 then - return -1 -endi - -$st = $stPrefix . av -sql select * from $st -print ===> select * from $st ===> $data00 $data01 -if $data01 != 9.500000000 then - return -1 -endi - -$st = $stPrefix . su -sql select * from $st -print ===> select * from $st ===> $data00 $data01 -if $data01 != 1900 then - return -1 -endi - -$st = $stPrefix . mi -sql select * from $st -print ===> select * from $st ===> $data00 $data01 -if $data01 != 0 then - return -1 -endi - -$st = $stPrefix . ma -sql select * from $st -print ===> select * from $st ===> $data00 $data01 -if $data01 != 19 then - return -1 -endi - -$st = $stPrefix . fi -sql select * from $st -print ===> select * from $st ===> $data00 $data01 -if $data01 != 0 then - return -1 -endi - -$st = $stPrefix . la -sql select * from $st -print ===> select * from $st ===> $data00 $data01 -if $data01 != 19 then - return -1 -endi - -$st = $stPrefix . wh -#sql select * from $st -#print ===> select * from $st ===> $data00 $data01 -#if $data01 != 200 then -# return -1 -#endi - -$st = $stPrefix . as -sql select * from $st -print ===> select * from $st ===> $data00 $data01 -if $data01 != 200 then - return -1 -endi diff --git a/tests/script/unique/stream/metrics_replica2_dnode2.sim b/tests/script/unique/stream/metrics_replica2_dnode2.sim deleted file mode 100644 index aa8c1871017982cecc695abc8f64d732a8a7fc4e..0000000000000000000000000000000000000000 --- a/tests/script/unique/stream/metrics_replica2_dnode2.sim +++ /dev/null @@ -1,260 +0,0 @@ -system sh/stop_dnodes.sh - - - -system sh/deploy.sh -n dnode1 -i 1 -system sh/deploy.sh -n dnode2 -i 2 -system sh/cfg.sh -n dnode1 -c walLevel -v 1 -system sh/cfg.sh -n dnode2 -c walLevel -v 1 -system sh/exec.sh -n dnode1 -s start - -sql connect - -sql create dnode $hostname2 -system sh/exec.sh -n dnode2 -s start -$x = 0 -createDnode: - $x = $x + 1 - sleep 1000 - if $x == 20 then - return -1 - endi -sql show dnodes; -if $data4_192.168.0.2 == offline then - goto createDnode -endi - - -print ======================== dnode1 start - -$dbPrefix = m2d_db -$tbPrefix = m2d_tb -$mtPrefix = m2d_mt -$stPrefix = m2d_st -$tbNum = 10 -$rowNum = 20 -$totalNum = 200 - -print =============== step1 -$i = 0 -$db = $dbPrefix . $i -$mt = $mtPrefix . $i -$st = $stPrefix . $i - -sql drop databae $db -x step1 -step1: -sql create database $db replica 2 -sql use $db -sql create table $mt (ts timestamp, tbcol int, tbcol2 float) TAGS(tgcol int) - -$i = 0 -while $i < $tbNum - $tb = $tbPrefix . $i - sql create table $tb using $mt tags( $i ) - - $x = -1440 - $y = 0 - while $y < $rowNum - $ms = $x . m - sql insert into $tb values (now $ms , $y , $y ) - $x = $x + 1 - $y = $y + 1 - endw - - $i = $i + 1 -endw - -sleep 100 - -print =============== step2 c1 - -sql select count(*) from $mt interval(1d) -print select count(*) from $mt interval(1d) ===> $data00 $data01 -if $data01 != 200 then - return -1 -endi - -$st = $stPrefix . c1 -sql create table $st as select count(*) from $mt interval(1d) - -print =============== step3 c2 -sql select count(tbcol) from $mt interval(1d) -print select count(tbcol) from $mt interval(1d) ===> $data00 $data01 -if $data01 != 200 then - return -1 -endi - -$st = $stPrefix . c2 -sql create table $st as select count(tbcol) from $mt interval(1d) - -print =============== step4 c3 -sql select count(tbcol2) from $mt interval(1d) -print select count(tbcol2) from $mt interval(1d) ===> $data00 $data01 -if $data01 != 200 then - return -1 -endi - -$st = $stPrefix . c3 -sql create table $st as select count(tbcol2) from $mt interval(1d) - -print =============== step5 avg -sql select avg(tbcol) from $mt interval(1d) -print select avg(tbcol) from $mt interval(1d) ===> $data00 $data01 -if $data01 != 9.500000000 then - return -1 -endi - -$st = $stPrefix . av -sql create table $st as select avg(tbcol) from $mt interval(1d) - -print =============== step6 su -sql select sum(tbcol) from $mt interval(1d) -print select sum(tbcol) from $mt interval(1d) ===> $data00 $data01 -if $data01 != 1900 then - return -1 -endi - -$st = $stPrefix . su -sql create table $st as select sum(tbcol) from $mt interval(1d) - -print =============== step7 mi -sql select min(tbcol) from $mt interval(1d) -print select min(tbcol) from $mt interval(1d) ===> $data00 $data01 -if $data01 != 0 then - return -1 -endi - -$st = $stPrefix . mi -sql create table $st as select min(tbcol) from $mt interval(1d) - -print =============== step8 ma -sql select max(tbcol) from $mt interval(1d) -print select max(tbcol) from $mt interval(1d) ===> $data00 $data01 -if $data01 != 19 then - return -1 -endi - -$st = $stPrefix . ma -sql create table $st as select max(tbcol) from $mt interval(1d) - -print =============== step9 fi -sql select first(tbcol) from $mt interval(1d) -print select first(tbcol) from $mt interval(1d) ===> $data00 $data01 -if $data01 != 0 then - return -1 -endi - -$st = $stPrefix . fi -sql create table $st as select first(tbcol) from $mt interval(1d) - -print =============== step10 la -sql select last(tbcol) from $mt interval(1d) -print select last(tbcol) from $mt interval(1d) ===> $data00 $data01 -if $data01 != 19 then - return -1 -endi - -$st = $stPrefix . la -sql create table $st as select last(tbcol) from $mt interval(1d) - -print =============== step11 wh -sql select count(tbcol) from $mt where ts < now + 4m interval(1d) -print select count(tbcol) from $mt where ts < now + 4m interval(1d) ===> $data00 $data01 -if $data01 != 200 then - return -1 -endi - -$st = $stPrefix . wh -#sql create table $st as select count(tbcol) from $mt where ts < now + 4m interval(1d) - -print =============== step12 as -sql select count(tbcol) from $mt interval(1d) -print select count(tbcol) from $mt interval(1d) ===> $data00 $data01 -if $data01 != 200 then - return -1 -endi - -$st = $stPrefix . as -sql create table $st as select count(tbcol) as c from $mt interval(1d) - -print =============== step13 -print sleep 22 seconds -sleep 22000 - -print =============== step14 -$st = $stPrefix . c1 -sql select * from $st -print ===> select * from $st ===> $data00 $data01 -if $data01 != 200 then - return -1 -endi - -$st = $stPrefix . c2 -sql select * from $st -print ===> select * from $st ===> $data00 $data01 -if $data01 != 200 then - return -1 -endi - -$st = $stPrefix . c3 -sql select * from $st -print ===> select * from $st ===> $data00 $data01 -if $data01 != 200 then - return -1 -endi - -$st = $stPrefix . av -sql select * from $st -print ===> select * from $st ===> $data00 $data01 -if $data01 != 9.500000000 then - return -1 -endi - -$st = $stPrefix . su -sql select * from $st -print ===> select * from $st ===> $data00 $data01 -if $data01 != 1900 then - return -1 -endi - -$st = $stPrefix . mi -sql select * from $st -print ===> select * from $st ===> $data00 $data01 -if $data01 != 0 then - return -1 -endi - -$st = $stPrefix . ma -sql select * from $st -print ===> select * from $st ===> $data00 $data01 -if $data01 != 19 then - return -1 -endi - -$st = $stPrefix . fi -sql select * from $st -print ===> select * from $st ===> $data00 $data01 -if $data01 != 0 then - return -1 -endi - -$st = $stPrefix . la -sql select * from $st -print ===> select * from $st ===> $data00 $data01 -if $data01 != 19 then - return -1 -endi - -$st = $stPrefix . wh -#sql select * from $st -#print ===> select * from $st ===> $data00 $data01 -#if $data01 != 200 then -# return -1 -#endi - -$st = $stPrefix . as -sql select * from $st -print ===> select * from $st ===> $data00 $data01 -if $data01 != 200 then - return -1 -endi diff --git a/tests/script/unique/stream/metrics_replica2_dnode2_vnoden.sim b/tests/script/unique/stream/metrics_replica2_dnode2_vnoden.sim deleted file mode 100644 index be2fcefe66ed6ca2e24a44cd22fa072201137b89..0000000000000000000000000000000000000000 --- a/tests/script/unique/stream/metrics_replica2_dnode2_vnoden.sim +++ /dev/null @@ -1,261 +0,0 @@ -system sh/stop_dnodes.sh - - - -system sh/deploy.sh -n dnode1 -i 1 -system sh/deploy.sh -n dnode2 -i 2 -system sh/cfg.sh -n dnode1 -c walLevel -v 1 -system sh/cfg.sh -n dnode2 -c walLevel -v 1 -system sh/cfg.sh -n dnode1 -c maxtablesPerVnode -v 4 -system sh/cfg.sh -n dnode2 -c maxtablesPerVnode -v 4 -system sh/exec.sh -n dnode1 -s start - -sql connect - -sql create dnode $hostname2 -system sh/exec.sh -n dnode2 -s start -$x = 0 -createDnode: - $x = $x + 1 - sleep 1000 - if $x == 20 then - return -1 - endi -sql show dnodes; -if $data4_192.168.0.2 == offline then - goto createDnode -endi - -print ======================== dnode1 start - -$dbPrefix = m2dv_db -$tbPrefix = m2dv_tb -$mtPrefix = m2dv_mt -$stPrefix = m2dv_st -$tbNum = 10 -$rowNum = 20 -$totalNum = 200 - -print =============== step1 -$i = 0 -$db = $dbPrefix . $i -$mt = $mtPrefix . $i -$st = $stPrefix . $i - -sql drop databae $db -x step1 -step1: -sql create database $db replica 2 -sql use $db -sql create table $mt (ts timestamp, tbcol int, tbcol2 float) TAGS(tgcol int) - -$i = 0 -while $i < $tbNum - $tb = $tbPrefix . $i - sql create table $tb using $mt tags( $i ) - - $x = -1440 - $y = 0 - while $y < $rowNum - $ms = $x . m - sql insert into $tb values (now $ms , $y , $y ) - $x = $x + 1 - $y = $y + 1 - endw - - $i = $i + 1 -endw - -sleep 100 - -print =============== step2 c1 - -sql select count(*) from $mt interval(1d) -print select count(*) from $mt interval(1d) ===> $data00 $data01 -if $data01 != 200 then - return -1 -endi - -$st = $stPrefix . c1 -sql create table $st as select count(*) from $mt interval(1d) - -print =============== step3 c2 -sql select count(tbcol) from $mt interval(1d) -print select count(tbcol) from $mt interval(1d) ===> $data00 $data01 -if $data01 != 200 then - return -1 -endi - -$st = $stPrefix . c2 -sql create table $st as select count(tbcol) from $mt interval(1d) - -print =============== step4 c3 -sql select count(tbcol2) from $mt interval(1d) -print select count(tbcol2) from $mt interval(1d) ===> $data00 $data01 -if $data01 != 200 then - return -1 -endi - -$st = $stPrefix . c3 -sql create table $st as select count(tbcol2) from $mt interval(1d) - -print =============== step5 avg -sql select avg(tbcol) from $mt interval(1d) -print select avg(tbcol) from $mt interval(1d) ===> $data00 $data01 -if $data01 != 9.500000000 then - return -1 -endi - -$st = $stPrefix . av -sql create table $st as select avg(tbcol) from $mt interval(1d) - -print =============== step6 su -sql select sum(tbcol) from $mt interval(1d) -print select sum(tbcol) from $mt interval(1d) ===> $data00 $data01 -if $data01 != 1900 then - return -1 -endi - -$st = $stPrefix . su -sql create table $st as select sum(tbcol) from $mt interval(1d) - -print =============== step7 mi -sql select min(tbcol) from $mt interval(1d) -print select min(tbcol) from $mt interval(1d) ===> $data00 $data01 -if $data01 != 0 then - return -1 -endi - -$st = $stPrefix . mi -sql create table $st as select min(tbcol) from $mt interval(1d) - -print =============== step8 ma -sql select max(tbcol) from $mt interval(1d) -print select max(tbcol) from $mt interval(1d) ===> $data00 $data01 -if $data01 != 19 then - return -1 -endi - -$st = $stPrefix . ma -sql create table $st as select max(tbcol) from $mt interval(1d) - -print =============== step9 fi -sql select first(tbcol) from $mt interval(1d) -print select first(tbcol) from $mt interval(1d) ===> $data00 $data01 -if $data01 != 0 then - return -1 -endi - -$st = $stPrefix . fi -sql create table $st as select first(tbcol) from $mt interval(1d) - -print =============== step10 la -sql select last(tbcol) from $mt interval(1d) -print select last(tbcol) from $mt interval(1d) ===> $data00 $data01 -if $data01 != 19 then - return -1 -endi - -$st = $stPrefix . la -sql create table $st as select last(tbcol) from $mt interval(1d) - -print =============== step11 wh -sql select count(tbcol) from $mt where ts < now + 4m interval(1d) -print select count(tbcol) from $mt where ts < now + 4m interval(1d) ===> $data00 $data01 -if $data01 != 200 then - return -1 -endi - -$st = $stPrefix . wh -#sql create table $st as select count(tbcol) from $mt where ts < now + 4m interval(1d) - -print =============== step12 as -sql select count(tbcol) from $mt interval(1d) -print select count(tbcol) from $mt interval(1d) ===> $data00 $data01 -if $data01 != 200 then - return -1 -endi - -$st = $stPrefix . as -sql create table $st as select count(tbcol) as c from $mt interval(1d) - -print =============== step13 -print sleep 22 seconds -sleep 22000 - -print =============== step14 -$st = $stPrefix . c1 -sql select * from $st -print ===> select * from $st ===> $data00 $data01 -if $data01 != 200 then - return -1 -endi - -$st = $stPrefix . c2 -sql select * from $st -print ===> select * from $st ===> $data00 $data01 -if $data01 != 200 then - return -1 -endi - -$st = $stPrefix . c3 -sql select * from $st -print ===> select * from $st ===> $data00 $data01 -if $data01 != 200 then - return -1 -endi - -$st = $stPrefix . av -sql select * from $st -print ===> select * from $st ===> $data00 $data01 -if $data01 != 9.500000000 then - return -1 -endi - -$st = $stPrefix . su -sql select * from $st -print ===> select * from $st ===> $data00 $data01 -if $data01 != 1900 then - return -1 -endi - -$st = $stPrefix . mi -sql select * from $st -print ===> select * from $st ===> $data00 $data01 -if $data01 != 0 then - return -1 -endi - -$st = $stPrefix . ma -sql select * from $st -print ===> select * from $st ===> $data00 $data01 -if $data01 != 19 then - return -1 -endi - -$st = $stPrefix . fi -sql select * from $st -print ===> select * from $st ===> $data00 $data01 -if $data01 != 0 then - return -1 -endi - -$st = $stPrefix . la -sql select * from $st -print ===> select * from $st ===> $data00 $data01 -if $data01 != 19 then - return -1 -endi - -$st = $stPrefix . wh -#sql select * from $st -#print ===> select * from $st ===> $data00 $data01 -#if $data01 != 200 then -# return -1 -#endi - -$st = $stPrefix . as -sql select * from $st -print ===> select * from $st ===> $data00 $data01 -if $data01 != 200 then - return -1 -endi diff --git a/tests/script/unique/stream/metrics_replica2_dnode3.sim b/tests/script/unique/stream/metrics_replica2_dnode3.sim deleted file mode 100644 index f7b17610c380d9f90a2cefd4af86ea766facdffa..0000000000000000000000000000000000000000 --- a/tests/script/unique/stream/metrics_replica2_dnode3.sim +++ /dev/null @@ -1,270 +0,0 @@ -system sh/stop_dnodes.sh - - - - -system sh/deploy.sh -n dnode1 -i 1 -system sh/deploy.sh -n dnode2 -i 2 -system sh/deploy.sh -n dnode3 -i 3 -system sh/cfg.sh -n dnode1 -c walLevel -v 1 -system sh/cfg.sh -n dnode2 -c walLevel -v 1 -system sh/cfg.sh -n dnode3 -c walLevel -v 1 -system sh/cfg.sh -n dnode1 -c maxtablesPerVnode -v 4 -system sh/cfg.sh -n dnode2 -c maxtablesPerVnode -v 4 -system sh/cfg.sh -n dnode3 -c maxtablesPerVnode -v 4 -system sh/exec.sh -n dnode1 -s start - -sql connect - -sql create dnode $hostname2 -sql create dnode $hostname3 -system sh/exec.sh -n dnode2 -s start -system sh/exec.sh -n dnode3 -s start -$x = 0 -createDnode: - $x = $x + 1 - sleep 1000 - if $x == 20 then - return -1 - endi -sql show dnodes; -if $data4_192.168.0.2 == offline then - goto createDnode -endi -if $data4_192.168.0.3 == offline then - goto createDnode -endi - -print ======================== dnode1 start - -$dbPrefix = m2d3_db -$tbPrefix = m2d3_tb -$mtPrefix = m2d3_mt -$stPrefix = m2d3_st -$tbNum = 10 -$rowNum = 20 -$totalNum = 200 - -print =============== step1 -$i = 0 -$db = $dbPrefix . $i -$mt = $mtPrefix . $i -$st = $stPrefix . $i - -sql drop databae $db -x step1 -step1: -sql create database $db replica 2 -sql use $db -sql create table $mt (ts timestamp, tbcol int, tbcol2 float) TAGS(tgcol int) - -$i = 0 -while $i < $tbNum - $tb = $tbPrefix . $i - sql create table $tb using $mt tags( $i ) - - $x = -1440 - $y = 0 - while $y < $rowNum - $ms = $x . m - sql insert into $tb values (now $ms , $y , $y ) - $x = $x + 1 - $y = $y + 1 - endw - - $i = $i + 1 -endw - -sleep 100 - -print =============== step2 c1 - -sql select count(*) from $mt interval(1d) -print select count(*) from $mt interval(1d) ===> $data00 $data01 -if $data01 != 200 then - return -1 -endi - -$st = $stPrefix . c1 -sql create table $st as select count(*) from $mt interval(1d) - -print =============== step3 c2 -sql select count(tbcol) from $mt interval(1d) -print select count(tbcol) from $mt interval(1d) ===> $data00 $data01 -if $data01 != 200 then - return -1 -endi - -$st = $stPrefix . c2 -sql create table $st as select count(tbcol) from $mt interval(1d) - -print =============== step4 c3 -sql select count(tbcol2) from $mt interval(1d) -print select count(tbcol2) from $mt interval(1d) ===> $data00 $data01 -if $data01 != 200 then - return -1 -endi - -$st = $stPrefix . c3 -sql create table $st as select count(tbcol2) from $mt interval(1d) - -print =============== step5 avg -sql select avg(tbcol) from $mt interval(1d) -print select avg(tbcol) from $mt interval(1d) ===> $data00 $data01 -if $data01 != 9.500000000 then - return -1 -endi - -$st = $stPrefix . av -sql create table $st as select avg(tbcol) from $mt interval(1d) - -print =============== step6 su -sql select sum(tbcol) from $mt interval(1d) -print select sum(tbcol) from $mt interval(1d) ===> $data00 $data01 -if $data01 != 1900 then - return -1 -endi - -$st = $stPrefix . su -sql create table $st as select sum(tbcol) from $mt interval(1d) - -print =============== step7 mi -sql select min(tbcol) from $mt interval(1d) -print select min(tbcol) from $mt interval(1d) ===> $data00 $data01 -if $data01 != 0 then - return -1 -endi - -$st = $stPrefix . mi -sql create table $st as select min(tbcol) from $mt interval(1d) - -print =============== step8 ma -sql select max(tbcol) from $mt interval(1d) -print select max(tbcol) from $mt interval(1d) ===> $data00 $data01 -if $data01 != 19 then - return -1 -endi - -$st = $stPrefix . ma -sql create table $st as select max(tbcol) from $mt interval(1d) - -print =============== step9 fi -sql select first(tbcol) from $mt interval(1d) -print select first(tbcol) from $mt interval(1d) ===> $data00 $data01 -if $data01 != 0 then - return -1 -endi - -$st = $stPrefix . fi -sql create table $st as select first(tbcol) from $mt interval(1d) - -print =============== step10 la -sql select last(tbcol) from $mt interval(1d) -print select last(tbcol) from $mt interval(1d) ===> $data00 $data01 -if $data01 != 19 then - return -1 -endi - -$st = $stPrefix . la -sql create table $st as select last(tbcol) from $mt interval(1d) - -print =============== step11 wh -sql select count(tbcol) from $mt where ts < now + 4m interval(1d) -print select count(tbcol) from $mt where ts < now + 4m interval(1d) ===> $data00 $data01 -if $data01 != 200 then - return -1 -endi - -$st = $stPrefix . wh -#sql create table $st as select count(tbcol) from $mt where ts < now + 4m interval(1d) - -print =============== step12 as -sql select count(tbcol) from $mt interval(1d) -print select count(tbcol) from $mt interval(1d) ===> $data00 $data01 -if $data01 != 200 then - return -1 -endi - -$st = $stPrefix . as -sql create table $st as select count(tbcol) as c from $mt interval(1d) - -print =============== step13 -print sleep 22 seconds -sleep 22000 - -print =============== step14 -$st = $stPrefix . c1 -sql select * from $st -print ===> select * from $st ===> $data00 $data01 -if $data01 != 200 then - return -1 -endi - -$st = $stPrefix . c2 -sql select * from $st -print ===> select * from $st ===> $data00 $data01 -if $data01 != 200 then - return -1 -endi - -$st = $stPrefix . c3 -sql select * from $st -print ===> select * from $st ===> $data00 $data01 -if $data01 != 200 then - return -1 -endi - -$st = $stPrefix . av -sql select * from $st -print ===> select * from $st ===> $data00 $data01 -if $data01 != 9.500000000 then - return -1 -endi - -$st = $stPrefix . su -sql select * from $st -print ===> select * from $st ===> $data00 $data01 -if $data01 != 1900 then - return -1 -endi - -$st = $stPrefix . mi -sql select * from $st -print ===> select * from $st ===> $data00 $data01 -if $data01 != 0 then - return -1 -endi - -$st = $stPrefix . ma -sql select * from $st -print ===> select * from $st ===> $data00 $data01 -if $data01 != 19 then - return -1 -endi - -$st = $stPrefix . fi -sql select * from $st -print ===> select * from $st ===> $data00 $data01 -if $data01 != 0 then - return -1 -endi - -$st = $stPrefix . la -sql select * from $st -print ===> select * from $st ===> $data00 $data01 -if $data01 != 19 then - return -1 -endi - -$st = $stPrefix . wh -#sql select * from $st -#print ===> select * from $st ===> $data00 $data01 -#if $data01 != 200 then -# return -1 -#endi - -$st = $stPrefix . as -sql select * from $st -print ===> select * from $st ===> $data00 $data01 -if $data01 != 200 then - return -1 -endi diff --git a/tests/script/unique/stream/metrics_replica3_dnode4.sim b/tests/script/unique/stream/metrics_replica3_dnode4.sim deleted file mode 100644 index 402712800313ff5b96f970d12ffe007f77bc26f7..0000000000000000000000000000000000000000 --- a/tests/script/unique/stream/metrics_replica3_dnode4.sim +++ /dev/null @@ -1,280 +0,0 @@ -system sh/stop_dnodes.sh - - - - - -system sh/deploy.sh -n dnode1 -i 1 -system sh/deploy.sh -n dnode2 -i 2 -system sh/deploy.sh -n dnode3 -i 3 -system sh/deploy.sh -n dnode4 -i 4 -system sh/cfg.sh -n dnode1 -c walLevel -v 1 -system sh/cfg.sh -n dnode2 -c walLevel -v 1 -system sh/cfg.sh -n dnode3 -c walLevel -v 1 -system sh/cfg.sh -n dnode4 -c walLevel -v 1 -system sh/cfg.sh -n dnode1 -c maxtablesPerVnode -v 4 -system sh/cfg.sh -n dnode2 -c maxtablesPerVnode -v 4 -system sh/cfg.sh -n dnode3 -c maxtablesPerVnode -v 4 -system sh/cfg.sh -n dnode4 -c maxtablesPerVnode -v 4 -system sh/exec.sh -n dnode1 -s start - -sql connect - -sql create dnode $hostname2 -sql create dnode $hostname3 -sql create dnode $hostname4 -system sh/exec.sh -n dnode2 -s start -system sh/exec.sh -n dnode3 -s start -system sh/exec.sh -n dnode4 -s start - -$x = 0 -createDnode: - $x = $x + 1 - sleep 1000 - if $x == 20 then - return -1 - endi -sql show dnodes; -if $data4_192.168.0.2 == offline then - goto createDnode -endi -if $data4_192.168.0.3 == offline then - goto createDnode -endi -if $data4_192.168.0.4 == offline then - goto createDnode -endi - -print ======================== dnode1 start - -$dbPrefix = m2d3_db -$tbPrefix = m2d3_tb -$mtPrefix = m2d3_mt -$stPrefix = m2d3_st -$tbNum = 10 -$rowNum = 20 -$totalNum = 200 - -print =============== step1 -$i = 0 -$db = $dbPrefix . $i -$mt = $mtPrefix . $i -$st = $stPrefix . $i - -sql drop databae $db -x step1 -step1: -sql create database $db replica 2 -sql use $db -sql create table $mt (ts timestamp, tbcol int, tbcol2 float) TAGS(tgcol int) - -$i = 0 -while $i < $tbNum - $tb = $tbPrefix . $i - sql create table $tb using $mt tags( $i ) - - $x = -1440 - $y = 0 - while $y < $rowNum - $ms = $x . m - sql insert into $tb values (now $ms , $y , $y ) - $x = $x + 1 - $y = $y + 1 - endw - - $i = $i + 1 -endw - -sleep 100 - -print =============== step2 c1 - -sql select count(*) from $mt interval(1d) -print select count(*) from $mt interval(1d) ===> $data00 $data01 -if $data01 != 200 then - return -1 -endi - -$st = $stPrefix . c1 -sql create table $st as select count(*) from $mt interval(1d) - -print =============== step3 c2 -sql select count(tbcol) from $mt interval(1d) -print select count(tbcol) from $mt interval(1d) ===> $data00 $data01 -if $data01 != 200 then - return -1 -endi - -$st = $stPrefix . c2 -sql create table $st as select count(tbcol) from $mt interval(1d) - -print =============== step4 c3 -sql select count(tbcol2) from $mt interval(1d) -print select count(tbcol2) from $mt interval(1d) ===> $data00 $data01 -if $data01 != 200 then - return -1 -endi - -$st = $stPrefix . c3 -sql create table $st as select count(tbcol2) from $mt interval(1d) - -print =============== step5 avg -sql select avg(tbcol) from $mt interval(1d) -print select avg(tbcol) from $mt interval(1d) ===> $data00 $data01 -if $data01 != 9.500000000 then - return -1 -endi - -$st = $stPrefix . av -sql create table $st as select avg(tbcol) from $mt interval(1d) - -print =============== step6 su -sql select sum(tbcol) from $mt interval(1d) -print select sum(tbcol) from $mt interval(1d) ===> $data00 $data01 -if $data01 != 1900 then - return -1 -endi - -$st = $stPrefix . su -sql create table $st as select sum(tbcol) from $mt interval(1d) - -print =============== step7 mi -sql select min(tbcol) from $mt interval(1d) -print select min(tbcol) from $mt interval(1d) ===> $data00 $data01 -if $data01 != 0 then - return -1 -endi - -$st = $stPrefix . mi -sql create table $st as select min(tbcol) from $mt interval(1d) - -print =============== step8 ma -sql select max(tbcol) from $mt interval(1d) -print select max(tbcol) from $mt interval(1d) ===> $data00 $data01 -if $data01 != 19 then - return -1 -endi - -$st = $stPrefix . ma -sql create table $st as select max(tbcol) from $mt interval(1d) - -print =============== step9 fi -sql select first(tbcol) from $mt interval(1d) -print select first(tbcol) from $mt interval(1d) ===> $data00 $data01 -if $data01 != 0 then - return -1 -endi - -$st = $stPrefix . fi -sql create table $st as select first(tbcol) from $mt interval(1d) - -print =============== step10 la -sql select last(tbcol) from $mt interval(1d) -print select last(tbcol) from $mt interval(1d) ===> $data00 $data01 -if $data01 != 19 then - return -1 -endi - -$st = $stPrefix . la -sql create table $st as select last(tbcol) from $mt interval(1d) - -print =============== step11 wh -sql select count(tbcol) from $mt where ts < now + 4m interval(1d) -print select count(tbcol) from $mt where ts < now + 4m interval(1d) ===> $data00 $data01 -if $data01 != 200 then - return -1 -endi - -$st = $stPrefix . wh -#sql create table $st as select count(tbcol) from $mt where ts < now + 4m interval(1d) - -print =============== step12 as -sql select count(tbcol) from $mt interval(1d) -print select count(tbcol) from $mt interval(1d) ===> $data00 $data01 -if $data01 != 200 then - return -1 -endi - -$st = $stPrefix . as -sql create table $st as select count(tbcol) as c from $mt interval(1d) - -print =============== step13 -print sleep 22 seconds -sleep 22000 - -print =============== step14 -$st = $stPrefix . c1 -sql select * from $st -print ===> select * from $st ===> $data00 $data01 -if $data01 != 200 then - return -1 -endi - -$st = $stPrefix . c2 -sql select * from $st -print ===> select * from $st ===> $data00 $data01 -if $data01 != 200 then - return -1 -endi - -$st = $stPrefix . c3 -sql select * from $st -print ===> select * from $st ===> $data00 $data01 -if $data01 != 200 then - return -1 -endi - -$st = $stPrefix . av -sql select * from $st -print ===> select * from $st ===> $data00 $data01 -if $data01 != 9.500000000 then - return -1 -endi - -$st = $stPrefix . su -sql select * from $st -print ===> select * from $st ===> $data00 $data01 -if $data01 != 1900 then - return -1 -endi - -$st = $stPrefix . mi -sql select * from $st -print ===> select * from $st ===> $data00 $data01 -if $data01 != 0 then - return -1 -endi - -$st = $stPrefix . ma -sql select * from $st -print ===> select * from $st ===> $data00 $data01 -if $data01 != 19 then - return -1 -endi - -$st = $stPrefix . fi -sql select * from $st -print ===> select * from $st ===> $data00 $data01 -if $data01 != 0 then - return -1 -endi - -$st = $stPrefix . la -sql select * from $st -print ===> select * from $st ===> $data00 $data01 -if $data01 != 19 then - return -1 -endi - -$st = $stPrefix . wh -#sql select * from $st -#print ===> select * from $st ===> $data00 $data01 -#if $data01 != 200 then -# return -1 -#endi - -$st = $stPrefix . as -sql select * from $st -print ===> select * from $st ===> $data00 $data01 -if $data01 != 200 then - return -1 -endi diff --git a/tests/script/unique/stream/metrics_vnode_stop.sim b/tests/script/unique/stream/metrics_vnode_stop.sim deleted file mode 100644 index cd84cb3cdf5f8096f4986a222cc371db3900f765..0000000000000000000000000000000000000000 --- a/tests/script/unique/stream/metrics_vnode_stop.sim +++ /dev/null @@ -1,188 +0,0 @@ -system sh/stop_dnodes.sh - - - -system sh/deploy.sh -n dnode1 -i 1 -system sh/deploy.sh -n dnode2 -i 2 -system sh/cfg.sh -n dnode1 -c walLevel -v 1 -system sh/cfg.sh -n dnode2 -c walLevel -v 1 -system sh/cfg.sh -n dnode1 -c numOfMnodes -v 2 -system sh/cfg.sh -n dnode2 -c numOfMnodes -v 2 -system sh/exec.sh -n dnode1 -s start - -sql connect - -sql create dnode $hostname2 -system sh/exec.sh -n dnode2 -s start -$x = 0 -createDnode: - $x = $x + 1 - sleep 1000 - if $x == 20 then - return -1 - endi -sql show dnodes; -if $data4_192.168.0.2 == offline then - goto createDnode -endi -print ======================== dnode start - -$dbPrefix = db -$tbPrefix = tb -$mtPrefix = mt -$stPrefix = st -$tbNum = 10 -$rowNum = 20 -$totalNum = 200 - -print =============== step1 -$i = 0 -$db = $dbPrefix . $i -$mt = $mtPrefix . $i -$st = $stPrefix . $i - -sql create database $db replica 2 -sql use $db -sql create table $mt (ts timestamp, tbcol int, tbcol2 float) TAGS(tgcol int) - -$i = 0 -while $i < $tbNum - $tb = $tbPrefix . $i - sql create table $tb using $mt tags( $i ) - - $x = -1440 - $y = 0 - while $y < $rowNum - $ms = $x . m - sql insert into $tb values (now $ms , $y , $y ) - $x = $x + 1 - $y = $y + 1 - endw - - $i = $i + 1 -endw - -sleep 100 - -print =============== step2 c1 -$i = 1 -$tb = $tbPrefix . $i - -sql select count(*) from $mt interval(1d) -print select count(*) from $mt interval(1d) ===> $data00 $data01 -if $data01 != 200 then - return -1 -endi - -$st = $stPrefix . c1 -sql create table $st as select count(*) from $mt interval(1d) - -print =============== step3 -system sh/exec.sh -n dnode2 -s stop - -print =============== step4 -print sleep 22 seconds -sleep 22000 - -print =============== step5 -$st = $stPrefix . c1 -sql select * from $st -print ===> select * from $st ===> $data00 $data01 -if $data01 != 200 then - return -1 -endi - -print ============= step6 - -sql close -system sh/exec.sh -n dnode1 -s stop -system sh/exec.sh -n dnode2 -s stop -system sh/deploy.sh -n dnode1 -i 1 -system sh/deploy.sh -n dnode2 -i 2 -system sh/cfg.sh -n dnode1 -c walLevel -v 1 -system sh/cfg.sh -n dnode2 -c walLevel -v 1 -system sh/exec.sh -n dnode2 -s start -sleep 2000 - -$x = 0 -connectTbase2: - $x = $x + 1 - sleep 1000 - if $x == 20 then - return -1 - endi -sql connect -x connectTbase2 -sleep 2000 - -sql create dnode $hostname1 -system sh/exec.sh -n dnode1 -s start -sleep 2000 -print ======================== dnode start - -$dbPrefix = db -$tbPrefix = tb -$mtPrefix = mt -$stPrefix = st -$tbNum = 10 -$rowNum = 20 -$totalNum = 200 - -print =============== step7 -$i = 0 -$db = $dbPrefix . $i -$mt = $mtPrefix . $i -$st = $stPrefix . $i - -sql create database $db replica 2 -sql use $db -sql create table $mt (ts timestamp, tbcol int, tbcol2 float) TAGS(tgcol int) - -$i = 0 -while $i < $tbNum - $tb = $tbPrefix . $i - sql create table $tb using $mt tags( $i ) - - $x = -1440 - $y = 0 - while $y < $rowNum - $ms = $x . m - sql insert into $tb values (now $ms , $y , $y ) - $x = $x + 1 - $y = $y + 1 - endw - - $i = $i + 1 -endw - -sleep 100 - -print =============== step8 c1 -$i = 1 -$tb = $tbPrefix . $i - -sql select count(*) from $mt interval(1d) -print select count(*) from $mt interval(1d) ===> $data00 $data01 -if $data01 != 200 then - return -1 -endi - -$st = $stPrefix . c1 -sql create table $st as select count(*) from $mt interval(1d) - -print =============== step9 -system sh/exec.sh -n dnode1 -s stop - -print =============== step10 -print sleep 22 seconds -sleep 22000 - -print =============== step11 -$st = $stPrefix . c1 -sql select * from $st -print ===> select * from $st ===> $data00 $data01 -if $data01 != 200 then - return -1 -endi - - - diff --git a/tests/script/unique/stream/table_balance.sim b/tests/script/unique/stream/table_balance.sim deleted file mode 100644 index 45e054e2efdfbd7f3d01e3a860c5ac227f3327fc..0000000000000000000000000000000000000000 --- a/tests/script/unique/stream/table_balance.sim +++ /dev/null @@ -1,238 +0,0 @@ -system sh/stop_dnodes.sh - -system sh/deploy.sh -n dnode1 -i 1 -system sh/deploy.sh -n dnode2 -i 2 -system sh/cfg.sh -n dnode1 -c statusInterval -v 1 -system sh/cfg.sh -n dnode2 -c statusInterval -v 1 -system sh/cfg.sh -n dnode1 -c balanceInterval -v 10 -system sh/cfg.sh -n dnode2 -c balanceInterval -v 10 -system sh/cfg.sh -n dnode1 -c walLevel -v 1 -system sh/cfg.sh -n dnode2 -c walLevel -v 1 -system sh/cfg.sh -n dnode1 -c mnodeEqualVnodeNum -v 0 -system sh/cfg.sh -n dnode2 -c mnodeEqualVnodeNum -v 0 -system sh/cfg.sh -n dnode1 -c maxtablesPerVnode -v 4 -system sh/cfg.sh -n dnode2 -c maxtablesPerVnode -v 4 -system sh/cfg.sh -n dnode1 -c numOfMnodes -v 2 -system sh/cfg.sh -n dnode2 -c numOfMnodes -v 2 - -$dbPrefix = tb_db -$tbPrefix = tb_tb -$mtPrefix = tb_mt -$stPrefix = tb_st -$tbNum = 10 -$rowNum = 200 -$totalNum = 200 - -print ========= start dnode1 -system sh/exec.sh -n dnode1 -s start - -sql connect - -print ============== step1 -$i = 0 -$db = $dbPrefix -$mt = $mtPrefix -$st = $stPrefix . $i - -sql create database $db -sql use $db -sql create table $mt (ts timestamp, tbcol int, tbcol2 float) TAGS(tgcol int) - -$i = 0 -while $i < $tbNum - $tb = $tbPrefix . $i - sql create table $tb using $mt tags( $i ) - if $i == 0 then - sleep 2000 - endi - - $x = 0 - $y = 0 - while $y < $rowNum - $ms = $x . s - sql insert into $tb values (now + $ms , $y , $y ) - $x = $x + 1 - $y = $y + 1 - endw - - $i = $i + 1 -endw - -sleep 100 - -print =============== step2 - -$i = 1 -$tb = $tbPrefix . $i -$st = $stPrefix . $i -sql create table $st as select count(*), count(tbcol), count(tbcol2) from $tb interval(10s) - -$i = 5 -$tb = $tbPrefix . $i -$st = $stPrefix . $i -sql create table $st as select count(*), count(tbcol), count(tbcol2) from $tb interval(10s) - -$i = 8 -$tb = $tbPrefix . $i -$st = $stPrefix . $i -sql create table $st as select count(*), count(tbcol), count(tbcol2) from $tb interval(10s) - -sql show tables -if $rows != 13 then - return -1 -endi - -print =============== step3 -print sleep 22 seconds -sleep 22000 - -$i = 1 -$tb = $tbPrefix . $i -$st = $stPrefix . $i -sql select * from $st -$r1 = $rows -print $st ==> $r1 $data00 $data01 $data10 $data11 - -$i = 5 -$tb = $tbPrefix . $i -$st = $stPrefix . $i -sql select * from $st -$r5 = $rows -print $st ==> $r5 $data00 $data01 $data10 $data11 - -$i = 8 -$tb = $tbPrefix . $i -$st = $stPrefix . $i -sql select * from $st -$r8 = $rows -print $st ==> $r8 $data00 $data01 $data10 $data11 - -print rows1=>$r1 rows5=>$r5 rows8=>$r8 - -$x = 0 -show1: - $x = $x + 1 - sleep 2000 - if $x == 20 then - return -1 - endi -sql show dnodes -x show1 -$dnode1Vnodes = $data3_192.168.0.1 -print dnode1 $dnode1Vnodes -$dnode2Vnodes = $data3_192.168.0.2 -print dnode2 $dnode2Vnodes - -if $dnode1Vnodes != 0 then - goto show1 -endi -if $dnode2Vnodes != NULL then - goto show1 -endi - -print =============== step4 start dnode2 -sql create dnode $hostname2 -system sh/exec.sh -n dnode2 -s start -sleep 8000 - -$x = 0 -show2: - $x = $x + 1 - sleep 2000 - if $x == 20 then - return -1 - endi -sql show dnodes -x show2 -$dnode1Vnodes = $data3_192.168.0.1 -print dnode1 $dnode1Vnodes -$dnode2Vnodes = $data3_192.168.0.2 -print dnode2 $dnode2Vnodes - -if $dnode1Vnodes != 2 then - goto show2 -endi -if $dnode2Vnodes != 2 then - goto show2 -endi - -print rows1=>$r1 rows5=>$r5 rows8=>$r8 -print =============== step5 -print sleep 22 seconds -sleep 22000 - -print =============== step6 -$i = 1 -$tb = $tbPrefix . $i -$st = $stPrefix . $i -sql select * from $tb -if $rows != $rowNum then - return -1 -endi - -$i = 5 -$tb = $tbPrefix . $i -$st = $stPrefix . $i -sql select * from $tb -if $rows != $rowNum then - return -1 -endi - -$i = 8 -$tb = $tbPrefix . $i -$st = $stPrefix . $i -sql select * from $tb -if $rows != $rowNum then - return -1 -endi - -print rows1=>$r1 rows5=>$r5 rows8=>$r8 -print =============== step7 -$i = 1 -$tb = $tbPrefix . $i -$st = $stPrefix . $i -sql select * from $st -print $st ==> $r1 $rows , $data00 $data01 $data10 $data11 -if $rows == 0 then - return -1 -endi -if $rows <= $r1 then - return -1 -endi - -$i = 5 -$tb = $tbPrefix . $i -$st = $stPrefix . $i -sql select * from $st -print $st ==> $r5 $rows , $data00 $data01 $data10 $data11 -if $rows == 0 then - return -1 -endi -if $rows <= $r5 then - return -1 -endi - - -$i = 8 -$tb = $tbPrefix . $i -$st = $stPrefix . $i -sql select * from $st -print $st ==> $r8 $rows , $data00 $data01 $data10 $data11 -if $rows == 0 then - return -1 -endi -if $rows <= $r8 then - return -1 -endi - - -if $r1 != $r5 then - return -1 -endi - -if $r8 != $r5 then - return -1 -endi - -print =============== clear -system sh/exec.sh -n dnode1 -s stop -system sh/exec.sh -n dnode2 -s stop - diff --git a/tests/script/unique/stream/table_move.sim b/tests/script/unique/stream/table_move.sim deleted file mode 100644 index 964a0c025363fd650e8051312a812fffbddaea7d..0000000000000000000000000000000000000000 --- a/tests/script/unique/stream/table_move.sim +++ /dev/null @@ -1,269 +0,0 @@ -system sh/stop_dnodes.sh - -system sh/deploy.sh -n dnode1 -i 1 -system sh/deploy.sh -n dnode2 -i 2 -system sh/deploy.sh -n dnode3 -i 3 -system sh/deploy.sh -n dnode4 -i 4 - -system sh/cfg.sh -n dnode1 -c statusInterval -v 1 -system sh/cfg.sh -n dnode2 -c statusInterval -v 1 -system sh/cfg.sh -n dnode3 -c statusInterval -v 1 -system sh/cfg.sh -n dnode4 -c statusInterval -v 1 - -system sh/cfg.sh -n dnode1 -c balanceInterval -v 10 -system sh/cfg.sh -n dnode2 -c balanceInterval -v 10 -system sh/cfg.sh -n dnode3 -c balanceInterval -v 10 -system sh/cfg.sh -n dnode4 -c balanceInterval -v 10 - -system sh/cfg.sh -n dnode1 -c wallevel -v 1 -system sh/cfg.sh -n dnode2 -c wallevel -v 1 -system sh/cfg.sh -n dnode3 -c wallevel -v 1 -system sh/cfg.sh -n dnode4 -c wallevel -v 1 - -system sh/cfg.sh -n dnode1 -c mnodeEqualVnodeNum -v 0 -system sh/cfg.sh -n dnode2 -c mnodeEqualVnodeNum -v 0 -system sh/cfg.sh -n dnode3 -c mnodeEqualVnodeNum -v 0 -system sh/cfg.sh -n dnode4 -c mnodeEqualVnodeNum -v 0 - -system sh/cfg.sh -n dnode1 -c maxtablesPerVnode -v 4 -system sh/cfg.sh -n dnode2 -c maxtablesPerVnode -v 4 -system sh/cfg.sh -n dnode3 -c maxtablesPerVnode -v 4 -system sh/cfg.sh -n dnode4 -c maxtablesPerVnode -v 4 - -system sh/cfg.sh -n dnode1 -c maxTablesPerVnode -v 4 -system sh/cfg.sh -n dnode2 -c maxTablesPerVnode -v 4 -system sh/cfg.sh -n dnode3 -c maxTablesPerVnode -v 4 -system sh/cfg.sh -n dnode4 -c maxTablesPerVnode -v 4 - -system sh/cfg.sh -n dnode1 -c numOfMnodes -v 3 -system sh/cfg.sh -n dnode2 -c numOfMnodes -v 3 -system sh/cfg.sh -n dnode3 -c numOfMnodes -v 3 -system sh/cfg.sh -n dnode4 -c numOfMnodes -v 3 - -system sh/cfg.sh -n dnode1 -c maxVnodeConnections -v 1000 -system sh/cfg.sh -n dnode1 -c maxMeterConnections -v 1000 -system sh/cfg.sh -n dnode1 -c maxShellConns -v 1000 -system sh/cfg.sh -n dnode1 -c maxMgmtConnections -v 1000 - -system sh/cfg.sh -n dnode2 -c maxVnodeConnections -v 1000 -system sh/cfg.sh -n dnode2 -c maxMeterConnections -v 1000 -system sh/cfg.sh -n dnode2 -c maxShellConns -v 1000 -system sh/cfg.sh -n dnode2 -c maxMgmtConnections -v 1000 - -$dbPrefix = db -$tbPrefix = tb -$mtPrefix = mt -$stPrefix = st -$tbNum = 5 -$rowNum = 20 -$totalNum = 200 - -print ============== step1 -print ========= start dnode1 -system sh/exec.sh -n dnode1 -s start - -sql connect -sleep 2000 - -$i = 0 -$db = $dbPrefix . $i -$mt = $mtPrefix . $i -$st = $stPrefix . $i - -sql create database $db -sql use $db -sql create table $mt (ts timestamp, tbcol int, tbcol2 float) TAGS(tgcol int) - -$i = 0 -while $i < $tbNum - $tb = $tbPrefix . $i - sql create table $tb using $mt tags( $i ) - - $x = -20 - $y = 0 - while $y < $rowNum - $ms = $x . s - sql insert into $tb values (now $ms , $x , $x ) - $x = $x + 1 - $y = $y + 1 - endw - - $i = $i + 1 -endw - -sleep 100 - -print =============== step2 -$i = 0 -$tb = $tbPrefix . $i -$st = $stPrefix . $i - -sql select count(*), count(tbcol), count(tbcol2) from $tb interval(1d) -print select count(*), count(tbcol), count(tbcol2) from $tb interval(1d) ===> $data00 $data01 $data02, $data03 -if $data01 != $rowNum then - return -1 -endi -if $data02 != $rowNum then - return -1 -endi -if $data03 != $rowNum then - return -1 -endi - -sql show tables -if $rows != 5 then - return -1 -endi - -sql create table $st as select count(*), count(tbcol), count(tbcol2) from $tb interval(10s) - -sql show tables -if $rows != 6 then - return -1 -endi - -print =============== step3 -print sleep 22 seconds -sleep 22000 - -sql select * from $tb -if $rows != 20 then - return -1 -endi - -sql select * from $mt -if $rows != 100 then - return -1 -endi - -sql select * from $st -print select * from $st => $data01 -if $rows == 0 then - return -1 -endi - -$x = 0 -show1: - $x = $x + 1 - sleep 1000 - if $x == 20 then - return -1 - endi -sql show dnodes -x show1 -$dnode1Vnodes = $data3_192.168.0.1 -print dnode1 $dnode1Vnodes -$dnode2Vnodes = $data3_192.168.0.2 -print dnode2 $dnode2Vnodes - -if $dnode1Vnodes != 6 then - goto show1 -endi -if $dnode2Vnodes != NULL then - goto show1 -endi - -print =============== step4 start dnode2 -sql create dnode $hostname2 -system sh/exec.sh -n dnode2 -s start -sleep 8000 - -$x = 0 -show2: - $x = $x + 1 - sleep 2000 - if $x == 20 then - return -1 - endi -sql show dnodes -x show2 -$dnode1Vnodes = $data3_192.168.0.1 -print dnode1 $dnode1Vnodes -$dnode2Vnodes = $data3_192.168.0.2 -print dnode2 $dnode2Vnodes - -if $dnode1Vnodes != 7 then - goto show2 -endi -if $dnode2Vnodes != 7 then - goto show2 -endi - -print =============== step5 drop dnode1 -system sh/exec.sh -n dnode1 -s stop -print stop dnode1 and sleep 10000 -sleep 10000 - -sql drop dnode $hostname1 -print drop dnode1 and sleep 9000 -sleep 9000 - -$x = 0 -show6: - $x = $x + 1 - sleep 2000 - if $x == 20 then - return -1 - endi -sql show dnodes -x show6 -$dnode1Vnodes = $data3_192.168.0.1 -print dnode1 $dnode1Vnodes -$dnode2Vnodes = $data3_192.168.0.2 -print dnode2 $dnode2Vnodes - -if $dnode1Vnodes != NULL then - goto show6 -endi -if $dnode2Vnodes != 6 then - goto show6 -endi - -print =============== step6 - -print select * from $tb -sql select * from $tb -if $rows != 20 then - return -1 -endi - -print select * from $mt -sql select * from $mt -if $rows != 80 then - return -1 -endi - - -print =============== step7 -$i = 0 -while $i < $tbNum - $tb = $tbPrefix . $i - - $x = 0 - $y = 0 - while $y < $rowNum - $ms = $x . s - sql insert into $tb values (now + $ms , $y , $y ) - $x = $x + 1 - $y = $y + 1 - endw - - $i = $i + 1 -endw - -$i = 0 -$tb = $tbPrefix . $i -$st = $stPrefix . $i - -print =============== step8 -print sleep 22 seconds -sleep 22000 - -print select * from $st -sql select * from $st -if $rows == 0 then - return -1 -endi - - -print =============== clear -system sh/exec.sh -n dnode1 -s stop -system sh/exec.sh -n dnode2 -s stop - diff --git a/tests/script/unique/stream/table_replica1_dnode2.sim b/tests/script/unique/stream/table_replica1_dnode2.sim deleted file mode 100644 index ccc6026e9c92975ccdd4fd12366a11f50a818d3f..0000000000000000000000000000000000000000 --- a/tests/script/unique/stream/table_replica1_dnode2.sim +++ /dev/null @@ -1,137 +0,0 @@ -system sh/stop_dnodes.sh - -system sh/deploy.sh -n dnode1 -i 1 -system sh/deploy.sh -n dnode2 -i 2 -system sh/cfg.sh -n dnode1 -c walLevel -v 1 -system sh/cfg.sh -n dnode2 -c walLevel -v 1 -system sh/cfg.sh -n dnode1 -c maxtablesPerVnode -v 4 -system sh/cfg.sh -n dnode2 -c maxtablesPerVnode -v 4 -system sh/exec.sh -n dnode1 -s start - -sql connect - -sql create dnode $hostname2 -system sh/exec.sh -n dnode2 -s start -$x = 0 -createDnode: - $x = $x + 1 - sleep 1000 - if $x == 20 then - return -1 - endi -sql show dnodes; -if $data4_192.168.0.2 == offline then - goto createDnode -endi -print ======================== dnode1 start - -$dbPrefix = t1d_db -$tbPrefix = t1d_tb -$mtPrefix = t1d_mt -$stPrefix = t1d_st -$tbNum = 10 -$rowNum = 20 -$totalNum = 200 - -print =============== step1 -$i = 0 -$db = $dbPrefix . $i -$mt = $mtPrefix . $i -$st = $stPrefix . $i - -sql drop databae $db -x step1 -step1: -sql create database $db -sql use $db -sql create table $mt (ts timestamp, tbcol int, tbcol2 float) TAGS(tgcol int) - -$i = 0 -while $i < $tbNum - $tb = $tbPrefix . $i - sql create table $tb using $mt tags( $i ) - - $x = -1440 - $y = 0 - while $y < $rowNum - $ms = $x . m - sql insert into $tb values (now $ms , $y , $y ) - $x = $x + 1 - $y = $y + 1 - endw - - $i = $i + 1 -endw - -sleep 100 - -print =============== step2 - -$i = 1 -$tb = $tbPrefix . $i -$st = $stPrefix . $i -sql select count(*), count(tbcol), count(tbcol2) from $tb interval(1d) -print select count(*), count(tbcol), count(tbcol2) from $tb interval(1d) --> $data00 $data01 $data02 $data03 -sql create table $st as select count(*), count(tbcol), count(tbcol2) from $tb interval(1d) - -$i = 5 -$tb = $tbPrefix . $i -$st = $stPrefix . $i -sql select count(*), count(tbcol), count(tbcol2) from $tb interval(1d) -print select count(*), count(tbcol), count(tbcol2) from $tb interval(1d) --> $data00 $data01 $data02 $data03 -sql create table $st as select count(*), count(tbcol), count(tbcol2) from $tb interval(1d) - -$i = 8 -$tb = $tbPrefix . $i -$st = $stPrefix . $i -sql select count(*), count(tbcol), count(tbcol2) from $tb interval(1d) -print select count(*), count(tbcol), count(tbcol2) from $tb interval(1d) --> $data00 $data01 $data02 $data03 -sql create table $st as select count(*), count(tbcol), count(tbcol2) from $tb interval(1d) - -sql show tables -if $rows != 13 then - return -1 -endi - -print =============== step3 -print sleep 22 seconds -sleep 22000 - - -print =============== step4 -$i = 1 -$tb = $tbPrefix . $i -$st = $stPrefix . $i -sql select * from $st -print $st ==> $rows $data00 $data01 $data10 $data11 -$rows1 = $rows -if $data01 != 20 then - return -1 -endi - -$i = 5 -$tb = $tbPrefix . $i -$st = $stPrefix . $i -sql select * from $st -print $st => $rows $data00 $data01 $data10 $data11 -$rows5 = $rows -if $data01 != 20 then - return -1 -endi - -$i = 8 -$tb = $tbPrefix . $i -$st = $stPrefix . $i -sql select * from $st -print $st ==> $rows $data00 $data01 $data10 $data11 -$rows8 = $rows -if $data01 != 20 then - return -1 -endi - -if $rows8 != $rows5 then - return -1 -endi - -if $rows8 != $rows1 then - return -1 -endi \ No newline at end of file diff --git a/tests/script/unique/stream/table_replica2_dnode2.sim b/tests/script/unique/stream/table_replica2_dnode2.sim deleted file mode 100644 index 947fa0d2f9093c802a9c99c74edddeffca102d38..0000000000000000000000000000000000000000 --- a/tests/script/unique/stream/table_replica2_dnode2.sim +++ /dev/null @@ -1,312 +0,0 @@ -system sh/stop_dnodes.sh - - - -system sh/deploy.sh -n dnode1 -i 1 -system sh/deploy.sh -n dnode2 -i 2 -system sh/cfg.sh -n dnode1 -c walLevel -v 1 -system sh/cfg.sh -n dnode2 -c walLevel -v 1 -system sh/exec.sh -n dnode1 -s start - -sql connect - -sql create dnode $hostname2 -system sh/exec.sh -n dnode2 -s start -$x = 0 -createDnode: - $x = $x + 1 - sleep 1000 - if $x == 20 then - return -1 - endi -sql show dnodes; -if $data4_192.168.0.2 == offline then - goto createDnode -endi -print ======================== dnode1 start - -$dbPrefix = t2d_db -$tbPrefix = t2d_tb -$mtPrefix = t2d_mt -$stPrefix = t2d_st -$tbNum = 10 -$rowNum = 20 -$totalNum = 200 - -print =============== step1 -$i = 0 -$db = $dbPrefix . $i -$mt = $mtPrefix . $i -$st = $stPrefix . $i - -sql drop database $db -x step1 -step1: -sql create database $db replica 2 -sql use $db -sql create table $mt (ts timestamp, tbcol int, tbcol2 float) TAGS(tgcol int) - -$i = 0 -while $i < $tbNum - $tb = $tbPrefix . $i - sql create table $tb using $mt tags( $i ) - - $x = -1440 - $y = 0 - while $y < $rowNum - $ms = $x . m - sql insert into $tb values (now $ms , $y , $y ) - $x = $x + 1 - $y = $y + 1 - endw - - $i = $i + 1 -endw - -sleep 100 - -print =============== step2 c1 -$i = 1 -$tb = $tbPrefix . $i - -sql select count(*) from $tb interval(1d) -print select count(*) from $tb interval(1d) ===> $data00 $data01 -if $data01 != $rowNum then - return -1 -endi - -$st = $stPrefix . c1 -sql create table $st as select count(*) from $tb interval(1d) - -print =============== step3 c2 -sql select count(tbcol) from $tb interval(1d) -print select count(tbcol) from $tb interval(1d) ===> $data00 $data01 -if $data01 != $rowNum then - return -1 -endi - -$st = $stPrefix . c2 -sql create table $st as select count(tbcol) from $tb interval(1d) - -print =============== step4 c3 -sql select count(tbcol2) from $tb interval(1d) -print select count(tbcol2) from $tb interval(1d) ===> $data00 $data01 -if $data01 != $rowNum then - return -1 -endi - -$st = $stPrefix . c3 -sql create table $st as select count(tbcol2) from $tb interval(1d) - -print =============== step5 avg -sql select avg(tbcol) from $tb interval(1d) -print select avg(tbcol) from $tb interval(1d) ===> $data00 $data01 -if $data01 != 9.500000000 then - return -1 -endi - -$st = $stPrefix . av -sql create table $st as select avg(tbcol) from $tb interval(1d) - -print =============== step6 su -sql select sum(tbcol) from $tb interval(1d) -print select sum(tbcol) from $tb interval(1d) ===> $data00 $data01 -if $data01 != 190 then - return -1 -endi - -$st = $stPrefix . su -sql create table $st as select sum(tbcol) from $tb interval(1d) - -print =============== step7 mi -sql select min(tbcol) from $tb interval(1d) -print select min(tbcol) from $tb interval(1d) ===> $data00 $data01 -if $data01 != 0 then - return -1 -endi - -$st = $stPrefix . mi -sql create table $st as select min(tbcol) from $tb interval(1d) - -print =============== step8 ma -sql select max(tbcol) from $tb interval(1d) -print select max(tbcol) from $tb interval(1d) ===> $data00 $data01 -if $data01 != 19 then - return -1 -endi - -$st = $stPrefix . ma -sql create table $st as select max(tbcol) from $tb interval(1d) - -print =============== step9 fi -sql select first(tbcol) from $tb interval(1d) -print select first(tbcol) from $tb interval(1d) ===> $data00 $data01 -if $data01 != 0 then - return -1 -endi - -$st = $stPrefix . fi -sql create table $st as select first(tbcol) from $tb interval(1d) - -print =============== step10 la -sql select last(tbcol) from $tb interval(1d) -print select last(tbcol) from $tb interval(1d) ===> $data00 $data01 -if $data01 != 19 then - return -1 -endi - -$st = $stPrefix . la -sql create table $st as select last(tbcol) from $tb interval(1d) - -print =============== step11 st -sql select stddev(tbcol) from $tb interval(1d) -print select stddev(tbcol) from $tb interval(1d) ===> $data00 $data01 -if $data01 != 5.766281297 then - return -1 -endi - -$st = $stPrefix . std -sql create table $st as select stddev(tbcol) from $tb interval(1d) - -print =============== step12 le -sql select leastsquares(tbcol, 1, 1) from $tb interval(1d) -print select leastsquares(tbcol, 1, 1) from $tb interval(1d) ===> $data00 $data01 -#if $data01 != @(0.000017, -25362055.126740)@ then -# return -1 -#endi - -$st = $stPrefix . le -sql create table $st as select leastsquares(tbcol, 1, 1) from $tb interval(1d) - -print =============== step13 pe - -sql select percentile(tbcol, 1) from $tb interval(1d) -print select percentile(tbcol, 1) from $tb interval(1d) ===> $data00 $data01 -if $data01 != 0.190000000 then - return -1 -endi - -$st = $stPrefix . pe -sql create table $st as select percentile(tbcol, 1) from $tb interval(1d) - -print =============== step14 wh -sql select count(tbcol) from $tb where ts < now + 4m interval(1d) -print select count(tbcol) from $tb where ts < now + 4m interval(1d) ===> $data00 $data01 -if $data01 != $rowNum then - return -1 -endi - -$st = $stPrefix . wh -#sql create table $st as select count(tbcol) from $tb where ts < now + 4m interval(1d) - -print =============== step15 as -sql select count(tbcol) from $tb interval(1d) -print select count(tbcol) from $tb interval(1d) ===> $data00 $data01 -if $data01 != $rowNum then - return -1 -endi - -$st = $stPrefix . as -sql create table $st as select count(tbcol) as c from $tb interval(1d) - -print =============== step16 -print sleep 22 seconds -sleep 22000 - -print =============== step17 -$st = $stPrefix . c1 -sql select * from $st -print ===> select * from $st ===> $data00 $data01 -if $data01 != $rowNum then - return -1 -endi - -$st = $stPrefix . c2 -sql select * from $st -print ===> select * from $st ===> $data00 $data01 -if $data01 != $rowNum then - return -1 -endi - -$st = $stPrefix . c3 -sql select * from $st -print ===> select * from $st ===> $data00 $data01 -if $data01 != $rowNum then - return -1 -endi - -$st = $stPrefix . av -sql select * from $st -print ===> select * from $st ===> $data00 $data01 -if $data01 != 9.500000000 then - return -1 -endi - -$st = $stPrefix . su -sql select * from $st -print ===> select * from $st ===> $data00 $data01 -if $data01 != 190 then - return -1 -endi - -$st = $stPrefix . mi -sql select * from $st -print ===> select * from $st ===> $data00 $data01 -if $data01 != 0 then - return -1 -endi - -$st = $stPrefix . ma -sql select * from $st -print ===> select * from $st ===> $data00 $data01 -if $data01 != 19 then - return -1 -endi - -$st = $stPrefix . fi -sql select * from $st -print ===> select * from $st ===> $data00 $data01 -if $data01 != 0 then - return -1 -endi - -$st = $stPrefix . la -sql select * from $st -print ===> select * from $st ===> $data00 $data01 -if $data01 != 19 then - return -1 -endi - -$st = $stPrefix . std -sql select * from $st -print ===> select * from $st ===> $data00 $data01 -if $data01 != 5.766281297 then - return -1 -endi - -$st = $stPrefix . le -sql select * from $st -#print ===> select * from $st ===> $data00 $data01 -#if $data01 != @(0.000017, -25270086.331047)@ then -# return -1 -#endi - -$st = $stPrefix . pe -sql select * from $st -print ===> select * from $st ===> $data00 $data01 -if $data01 != 0.190000000 then - return -1 -endi - -$st = $stPrefix . wh -#sql select * from $st -#print ===> select * from $st ===> $data00 $data01 -#if $data01 != $rowNum then -# return -1 -#endi - -$st = $stPrefix . as -sql select * from $st -print ===> select * from $st ===> $data00 $data01 -if $data01 != $rowNum then - return -1 -endi diff --git a/tests/script/unique/stream/table_replica2_dnode2_vnoden.sim b/tests/script/unique/stream/table_replica2_dnode2_vnoden.sim deleted file mode 100644 index 75300362393eaa543740307d4d11f9a4eabbbc50..0000000000000000000000000000000000000000 --- a/tests/script/unique/stream/table_replica2_dnode2_vnoden.sim +++ /dev/null @@ -1,314 +0,0 @@ -system sh/stop_dnodes.sh - - - -system sh/deploy.sh -n dnode1 -i 1 -system sh/deploy.sh -n dnode2 -i 2 -system sh/cfg.sh -n dnode1 -c walLevel -v 1 -system sh/cfg.sh -n dnode2 -c walLevel -v 1 -system sh/cfg.sh -n dnode1 -c maxtablesPerVnode -v 4 -system sh/cfg.sh -n dnode2 -c maxtablesPerVnode -v 4 -system sh/exec.sh -n dnode1 -s start - -sql connect - -sql create dnode $hostname2 -system sh/exec.sh -n dnode2 -s start -$x = 0 -createDnode: - $x = $x + 1 - sleep 1000 - if $x == 20 then - return -1 - endi -sql show dnodes; -if $data4_192.168.0.2 == offline then - goto createDnode -endi -print ======================== dnode1 start - -$dbPrefix = t2dv_db -$tbPrefix = t2dv_tb -$mtPrefix = t2dv_mt -$stPrefix = t2dv_st -$tbNum = 10 -$rowNum = 20 -$totalNum = 200 - -print =============== step1 -$i = 0 -$db = $dbPrefix . $i -$mt = $mtPrefix . $i -$st = $stPrefix . $i - -sql drop databae $db -x step1 -step1: -sql create database $db replica 2 -sql use $db -sql create table $mt (ts timestamp, tbcol int, tbcol2 float) TAGS(tgcol int) - -$i = 0 -while $i < $tbNum - $tb = $tbPrefix . $i - sql create table $tb using $mt tags( $i ) - - $x = -1440 - $y = 0 - while $y < $rowNum - $ms = $x . m - sql insert into $tb values (now $ms , $y , $y ) - $x = $x + 1 - $y = $y + 1 - endw - - $i = $i + 1 -endw - -sleep 100 - -print =============== step2 c1 -$i = 1 -$tb = $tbPrefix . $i - -sql select count(*) from $tb interval(1d) -print select count(*) from $tb interval(1d) ===> $data00 $data01 -if $data01 != $rowNum then - return -1 -endi - -$st = $stPrefix . c1 -sql create table $st as select count(*) from $tb interval(1d) - -print =============== step3 c2 -sql select count(tbcol) from $tb interval(1d) -print select count(tbcol) from $tb interval(1d) ===> $data00 $data01 -if $data01 != $rowNum then - return -1 -endi - -$st = $stPrefix . c2 -sql create table $st as select count(tbcol) from $tb interval(1d) - -print =============== step4 c3 -sql select count(tbcol2) from $tb interval(1d) -print select count(tbcol2) from $tb interval(1d) ===> $data00 $data01 -if $data01 != $rowNum then - return -1 -endi - -$st = $stPrefix . c3 -sql create table $st as select count(tbcol2) from $tb interval(1d) - -print =============== step5 avg -sql select avg(tbcol) from $tb interval(1d) -print select avg(tbcol) from $tb interval(1d) ===> $data00 $data01 -if $data01 != 9.500000000 then - return -1 -endi - -$st = $stPrefix . av -sql create table $st as select avg(tbcol) from $tb interval(1d) - -print =============== step6 su -sql select sum(tbcol) from $tb interval(1d) -print select sum(tbcol) from $tb interval(1d) ===> $data00 $data01 -if $data01 != 190 then - return -1 -endi - -$st = $stPrefix . su -sql create table $st as select sum(tbcol) from $tb interval(1d) - -print =============== step7 mi -sql select min(tbcol) from $tb interval(1d) -print select min(tbcol) from $tb interval(1d) ===> $data00 $data01 -if $data01 != 0 then - return -1 -endi - -$st = $stPrefix . mi -sql create table $st as select min(tbcol) from $tb interval(1d) - -print =============== step8 ma -sql select max(tbcol) from $tb interval(1d) -print select max(tbcol) from $tb interval(1d) ===> $data00 $data01 -if $data01 != 19 then - return -1 -endi - -$st = $stPrefix . ma -sql create table $st as select max(tbcol) from $tb interval(1d) - -print =============== step9 fi -sql select first(tbcol) from $tb interval(1d) -print select first(tbcol) from $tb interval(1d) ===> $data00 $data01 -if $data01 != 0 then - return -1 -endi - -$st = $stPrefix . fi -sql create table $st as select first(tbcol) from $tb interval(1d) - -print =============== step10 la -sql select last(tbcol) from $tb interval(1d) -print select last(tbcol) from $tb interval(1d) ===> $data00 $data01 -if $data01 != 19 then - return -1 -endi - -$st = $stPrefix . la -sql create table $st as select last(tbcol) from $tb interval(1d) - -print =============== step11 st -sql select stddev(tbcol) from $tb interval(1d) -print select stddev(tbcol) from $tb interval(1d) ===> $data00 $data01 -if $data01 != 5.766281297 then - return -1 -endi - -$st = $stPrefix . std -sql create table $st as select stddev(tbcol) from $tb interval(1d) - -print =============== step12 le -sql select leastsquares(tbcol, 1, 1) from $tb interval(1d) -print select leastsquares(tbcol, 1, 1) from $tb interval(1d) ===> $data00 $data01 -#if $data01 != @(0.000017, -25362055.126740)@ then -# return -1 -#endi - -$st = $stPrefix . le -sql create table $st as select leastsquares(tbcol, 1, 1) from $tb interval(1d) - -print =============== step13 pe - -sql select percentile(tbcol, 1) from $tb interval(1d) -print select percentile(tbcol, 1) from $tb interval(1d) ===> $data00 $data01 -if $data01 != 0.190000000 then - return -1 -endi - -$st = $stPrefix . pe -sql create table $st as select percentile(tbcol, 1) from $tb interval(1d) - -print =============== step14 wh -sql select count(tbcol) from $tb where ts < now + 4m interval(1d) -print select count(tbcol) from $tb where ts < now + 4m interval(1d) ===> $data00 $data01 -if $data01 != $rowNum then - return -1 -endi - -$st = $stPrefix . wh -#sql create table $st as select count(tbcol) from $tb where ts < now + 4m interval(1d) - -print =============== step15 as -sql select count(tbcol) from $tb interval(1d) -print select count(tbcol) from $tb interval(1d) ===> $data00 $data01 -if $data01 != $rowNum then - return -1 -endi - -$st = $stPrefix . as -sql create table $st as select count(tbcol) as c from $tb interval(1d) - -print =============== step16 -print sleep 22 seconds -sleep 22000 - -print =============== step17 -$st = $stPrefix . c1 -sql select * from $st -print ===> select * from $st ===> $data00 $data01 -if $data01 != $rowNum then - return -1 -endi - -$st = $stPrefix . c2 -sql select * from $st -print ===> select * from $st ===> $data00 $data01 -if $data01 != $rowNum then - return -1 -endi - -$st = $stPrefix . c3 -sql select * from $st -print ===> select * from $st ===> $data00 $data01 -if $data01 != $rowNum then - return -1 -endi - -$st = $stPrefix . av -sql select * from $st -print ===> select * from $st ===> $data00 $data01 -if $data01 != 9.500000000 then - return -1 -endi - -$st = $stPrefix . su -sql select * from $st -print ===> select * from $st ===> $data00 $data01 -if $data01 != 190 then - return -1 -endi - -$st = $stPrefix . mi -sql select * from $st -print ===> select * from $st ===> $data00 $data01 -if $data01 != 0 then - return -1 -endi - -$st = $stPrefix . ma -sql select * from $st -print ===> select * from $st ===> $data00 $data01 -if $data01 != 19 then - return -1 -endi - -$st = $stPrefix . fi -sql select * from $st -print ===> select * from $st ===> $data00 $data01 -if $data01 != 0 then - return -1 -endi - -$st = $stPrefix . la -sql select * from $st -print ===> select * from $st ===> $data00 $data01 -if $data01 != 19 then - return -1 -endi - -$st = $stPrefix . std -sql select * from $st -print ===> select * from $st ===> $data00 $data01 -if $data01 != 5.766281297 then - return -1 -endi - -$st = $stPrefix . le -sql select * from $st -#print ===> select * from $st ===> $data00 $data01 -#if $data01 != @(0.000017, -25270086.331047)@ then -# return -1 -#endi - -$st = $stPrefix . pe -sql select * from $st -print ===> select * from $st ===> $data00 $data01 -if $data01 != 0.190000000 then - return -1 -endi - -$st = $stPrefix . wh -#sql select * from $st -#print ===> select * from $st ===> $data00 $data01 -#if $data01 != $rowNum then -# return -1 -#endi - -$st = $stPrefix . as -sql select * from $st -print ===> select * from $st ===> $data00 $data01 -if $data01 != $rowNum then - return -1 -endi diff --git a/tests/script/unique/stream/table_replica2_dnode3.sim b/tests/script/unique/stream/table_replica2_dnode3.sim deleted file mode 100644 index 49eb3563b3964f05f31d72a8fd1ff12f2b5b3a03..0000000000000000000000000000000000000000 --- a/tests/script/unique/stream/table_replica2_dnode3.sim +++ /dev/null @@ -1,325 +0,0 @@ -system sh/stop_dnodes.sh - - - - -system sh/deploy.sh -n dnode1 -i 1 -system sh/deploy.sh -n dnode2 -i 2 -system sh/deploy.sh -n dnode3 -i 3 -system sh/cfg.sh -n dnode1 -c walLevel -v 1 -system sh/cfg.sh -n dnode2 -c walLevel -v 1 -system sh/cfg.sh -n dnode3 -c walLevel -v 1 -system sh/cfg.sh -n dnode1 -c maxtablesPerVnode -v 4 -system sh/cfg.sh -n dnode2 -c maxtablesPerVnode -v 4 -system sh/cfg.sh -n dnode3 -c maxtablesPerVnode -v 4 -system sh/exec.sh -n dnode1 -s start - - -sql connect - -sql create dnode $hostname2 -sql create dnode $hostname3 -system sh/exec.sh -n dnode2 -s start -system sh/exec.sh -n dnode3 -s start -$x = 0 -createDnode: - $x = $x + 1 - sleep 1000 - if $x == 20 then - return -1 - endi -sql show dnodes; -if $data4_192.168.0.2 == offline then - goto createDnode -endi -if $data4_192.168.0.3 == offline then - goto createDnode -endi - -print ======================== dnode1 start - -$dbPrefix = t2d3_db -$tbPrefix = t2d3_tb -$mtPrefix = t2d3_mt -$stPrefix = t2d3_st -$tbNum = 10 -$rowNum = 20 -$totalNum = 200 - -print =============== step1 -$i = 0 -$db = $dbPrefix . $i -$mt = $mtPrefix . $i -$st = $stPrefix . $i - -sql drop databae $db -x step1 -step1: -sql create database $db replica 2 -sql use $db -sql create table $mt (ts timestamp, tbcol int, tbcol2 float) TAGS(tgcol int) - -$i = 0 -while $i < $tbNum - $tb = $tbPrefix . $i - sql create table $tb using $mt tags( $i ) - - $x = -1440 - $y = 0 - while $y < $rowNum - $ms = $x . m - sql insert into $tb values (now $ms , $y , $y ) - $x = $x + 1 - $y = $y + 1 - endw - - $i = $i + 1 -endw - -sleep 100 - -print =============== step2 c1 -$i = 1 -$tb = $tbPrefix . $i - -sql select count(*) from $tb interval(1d) -print select count(*) from $tb interval(1d) ===> $data00 $data01 -if $data01 != $rowNum then - return -1 -endi - -$st = $stPrefix . c1 -sql create table $st as select count(*) from $tb interval(1d) - -print =============== step3 c2 -sql select count(tbcol) from $tb interval(1d) -print select count(tbcol) from $tb interval(1d) ===> $data00 $data01 -if $data01 != $rowNum then - return -1 -endi - -$st = $stPrefix . c2 -sql create table $st as select count(tbcol) from $tb interval(1d) - -print =============== step4 c3 -sql select count(tbcol2) from $tb interval(1d) -print select count(tbcol2) from $tb interval(1d) ===> $data00 $data01 -if $data01 != $rowNum then - return -1 -endi - -$st = $stPrefix . c3 -sql create table $st as select count(tbcol2) from $tb interval(1d) - -print =============== step5 avg -sql select avg(tbcol) from $tb interval(1d) -print select avg(tbcol) from $tb interval(1d) ===> $data00 $data01 -if $data01 != 9.500000000 then - return -1 -endi - -$st = $stPrefix . av -sql create table $st as select avg(tbcol) from $tb interval(1d) - -print =============== step6 su -sql select sum(tbcol) from $tb interval(1d) -print select sum(tbcol) from $tb interval(1d) ===> $data00 $data01 -if $data01 != 190 then - return -1 -endi - -$st = $stPrefix . su -sql create table $st as select sum(tbcol) from $tb interval(1d) - -print =============== step7 mi -sql select min(tbcol) from $tb interval(1d) -print select min(tbcol) from $tb interval(1d) ===> $data00 $data01 -if $data01 != 0 then - return -1 -endi - -$st = $stPrefix . mi -sql create table $st as select min(tbcol) from $tb interval(1d) - -print =============== step8 ma -sql select max(tbcol) from $tb interval(1d) -print select max(tbcol) from $tb interval(1d) ===> $data00 $data01 -if $data01 != 19 then - return -1 -endi - -$st = $stPrefix . ma -sql create table $st as select max(tbcol) from $tb interval(1d) - -print =============== step9 fi -sql select first(tbcol) from $tb interval(1d) -print select first(tbcol) from $tb interval(1d) ===> $data00 $data01 -if $data01 != 0 then - return -1 -endi - -$st = $stPrefix . fi -sql create table $st as select first(tbcol) from $tb interval(1d) - -print =============== step10 la -sql select last(tbcol) from $tb interval(1d) -print select last(tbcol) from $tb interval(1d) ===> $data00 $data01 -if $data01 != 19 then - return -1 -endi - -$st = $stPrefix . la -sql create table $st as select last(tbcol) from $tb interval(1d) - -print =============== step11 st -sql select stddev(tbcol) from $tb interval(1d) -print select stddev(tbcol) from $tb interval(1d) ===> $data00 $data01 -if $data01 != 5.766281297 then - return -1 -endi - -$st = $stPrefix . std -sql create table $st as select stddev(tbcol) from $tb interval(1d) - -print =============== step12 le -sql select leastsquares(tbcol, 1, 1) from $tb interval(1d) -print select leastsquares(tbcol, 1, 1) from $tb interval(1d) ===> $data00 $data01 -#if $data01 != @(0.000017, -25362055.126740)@ then -# return -1 -#endi - -$st = $stPrefix . le -sql create table $st as select leastsquares(tbcol, 1, 1) from $tb interval(1d) - -print =============== step13 pe - -sql select percentile(tbcol, 1) from $tb interval(1d) -print select percentile(tbcol, 1) from $tb interval(1d) ===> $data00 $data01 -if $data01 != 0.190000000 then - return -1 -endi - -$st = $stPrefix . pe -sql create table $st as select percentile(tbcol, 1) from $tb interval(1d) - -print =============== step14 wh -sql select count(tbcol) from $tb where ts < now + 4m interval(1d) -print select count(tbcol) from $tb where ts < now + 4m interval(1d) ===> $data00 $data01 -if $data01 != $rowNum then - return -1 -endi - -$st = $stPrefix . wh -#sql create table $st as select count(tbcol) from $tb where ts < now + 4m interval(1d) - -print =============== step15 as -sql select count(tbcol) from $tb interval(1d) -print select count(tbcol) from $tb interval(1d) ===> $data00 $data01 -if $data01 != $rowNum then - return -1 -endi - -$st = $stPrefix . as -sql create table $st as select count(tbcol) as c from $tb interval(1d) - -print =============== step16 -print sleep 22 seconds -sleep 22000 - -print =============== step17 -$st = $stPrefix . c1 -sql select * from $st -print ===> select * from $st ===> $data00 $data01 -if $data01 != $rowNum then - return -1 -endi - -$st = $stPrefix . c2 -sql select * from $st -print ===> select * from $st ===> $data00 $data01 -if $data01 != $rowNum then - return -1 -endi - -$st = $stPrefix . c3 -sql select * from $st -print ===> select * from $st ===> $data00 $data01 -if $data01 != $rowNum then - return -1 -endi - -$st = $stPrefix . av -sql select * from $st -print ===> select * from $st ===> $data00 $data01 -if $data01 != 9.500000000 then - return -1 -endi - -$st = $stPrefix . su -sql select * from $st -print ===> select * from $st ===> $data00 $data01 -if $data01 != 190 then - return -1 -endi - -$st = $stPrefix . mi -sql select * from $st -print ===> select * from $st ===> $data00 $data01 -if $data01 != 0 then - return -1 -endi - -$st = $stPrefix . ma -sql select * from $st -print ===> select * from $st ===> $data00 $data01 -if $data01 != 19 then - return -1 -endi - -$st = $stPrefix . fi -sql select * from $st -print ===> select * from $st ===> $data00 $data01 -if $data01 != 0 then - return -1 -endi - -$st = $stPrefix . la -sql select * from $st -print ===> select * from $st ===> $data00 $data01 -if $data01 != 19 then - return -1 -endi - -$st = $stPrefix . std -sql select * from $st -print ===> select * from $st ===> $data00 $data01 -if $data01 != 5.766281297 then - return -1 -endi - -$st = $stPrefix . le -sql select * from $st -#print ===> select * from $st ===> $data00 $data01 -#if $data01 != @(0.000017, -25270086.331047)@ then -# return -1 -#endi - -$st = $stPrefix . pe -sql select * from $st -print ===> select * from $st ===> $data00 $data01 -if $data01 != 0.190000000 then - return -1 -endi - -$st = $stPrefix . wh -#sql select * from $st -#print ===> select * from $st ===> $data00 $data01 -#if $data01 != $rowNum then -# return -1 -#endi - -$st = $stPrefix . as -sql select * from $st -print ===> select * from $st ===> $data00 $data01 -if $data01 != $rowNum then - return -1 -endi diff --git a/tests/script/unique/stream/table_replica3_dnode4.sim b/tests/script/unique/stream/table_replica3_dnode4.sim deleted file mode 100644 index 2cc443c72fc656b87ca8c1d330381ed5078cd755..0000000000000000000000000000000000000000 --- a/tests/script/unique/stream/table_replica3_dnode4.sim +++ /dev/null @@ -1,333 +0,0 @@ -system sh/stop_dnodes.sh - - - - - -system sh/deploy.sh -n dnode1 -i 1 -system sh/deploy.sh -n dnode2 -i 2 -system sh/deploy.sh -n dnode3 -i 3 -system sh/deploy.sh -n dnode4 -i 4 -system sh/cfg.sh -n dnode1 -c walLevel -v 1 -system sh/cfg.sh -n dnode2 -c walLevel -v 1 -system sh/cfg.sh -n dnode3 -c walLevel -v 1 -system sh/cfg.sh -n dnode4 -c walLevel -v 1 -system sh/cfg.sh -n dnode1 -c maxtablesPerVnode -v 4 -system sh/cfg.sh -n dnode2 -c maxtablesPerVnode -v 4 -system sh/cfg.sh -n dnode3 -c maxtablesPerVnode -v 4 -system sh/cfg.sh -n dnode4 -c maxtablesPerVnode -v 4 -system sh/exec.sh -n dnode1 -s start - -sql connect - -sql create dnode $hostname2 -sql create dnode $hostname3 -sql create dnode $hostname4 -system sh/exec.sh -n dnode2 -s start -system sh/exec.sh -n dnode3 -s start -system sh/exec.sh -n dnode4 -s start -$x = 0 -createDnode: - $x = $x + 1 - sleep 1000 - if $x == 20 then - return -1 - endi -sql show dnodes; -if $data4_192.168.0.2 == offline then - goto createDnode -endi -if $data4_192.168.0.3 == offline then - goto createDnode -endi -if $data4_192.168.0.4 == offline then - goto createDnode -endi - -print ======================== dnode1 start - -$dbPrefix = t3d_db -$tbPrefix = t3d_tb -$mtPrefix = t3d_mt -$stPrefix = t3d_st -$tbNum = 10 -$rowNum = 20 -$totalNum = 200 - -print =============== step1 -$i = 0 -$db = $dbPrefix . $i -$mt = $mtPrefix . $i -$st = $stPrefix . $i - -sql drop databae $db -x step1 -step1: -sql create database $db replica 3 -sql use $db -sql create table $mt (ts timestamp, tbcol int, tbcol2 float) TAGS(tgcol int) - -$i = 0 -while $i < $tbNum - $tb = $tbPrefix . $i - sql create table $tb using $mt tags( $i ) - - $x = -1440 - $y = 0 - while $y < $rowNum - $ms = $x . m - sql insert into $tb values (now $ms , $y , $y ) - $x = $x + 1 - $y = $y + 1 - endw - - $i = $i + 1 -endw - -sleep 100 - -print =============== step2 c1 -$i = 1 -$tb = $tbPrefix . $i - -sql select count(*) from $tb interval(1d) -print select count(*) from $tb interval(1d) ===> $data00 $data01 -if $data01 != $rowNum then - return -1 -endi - -$st = $stPrefix . c1 -sql create table $st as select count(*) from $tb interval(1d) - -print =============== step3 c2 -sql select count(tbcol) from $tb interval(1d) -print select count(tbcol) from $tb interval(1d) ===> $data00 $data01 -if $data01 != $rowNum then - return -1 -endi - -$st = $stPrefix . c2 -sql create table $st as select count(tbcol) from $tb interval(1d) - -print =============== step4 c3 -sql select count(tbcol2) from $tb interval(1d) -print select count(tbcol2) from $tb interval(1d) ===> $data00 $data01 -if $data01 != $rowNum then - return -1 -endi - -$st = $stPrefix . c3 -sql create table $st as select count(tbcol2) from $tb interval(1d) - -print =============== step5 avg -sql select avg(tbcol) from $tb interval(1d) -print select avg(tbcol) from $tb interval(1d) ===> $data00 $data01 -if $data01 != 9.500000000 then - return -1 -endi - -$st = $stPrefix . av -sql create table $st as select avg(tbcol) from $tb interval(1d) - -print =============== step6 su -sql select sum(tbcol) from $tb interval(1d) -print select sum(tbcol) from $tb interval(1d) ===> $data00 $data01 -if $data01 != 190 then - return -1 -endi - -$st = $stPrefix . su -sql create table $st as select sum(tbcol) from $tb interval(1d) - -print =============== step7 mi -sql select min(tbcol) from $tb interval(1d) -print select min(tbcol) from $tb interval(1d) ===> $data00 $data01 -if $data01 != 0 then - return -1 -endi - -$st = $stPrefix . mi -sql create table $st as select min(tbcol) from $tb interval(1d) - -print =============== step8 ma -sql select max(tbcol) from $tb interval(1d) -print select max(tbcol) from $tb interval(1d) ===> $data00 $data01 -if $data01 != 19 then - return -1 -endi - -$st = $stPrefix . ma -sql create table $st as select max(tbcol) from $tb interval(1d) - -print =============== step9 fi -sql select first(tbcol) from $tb interval(1d) -print select first(tbcol) from $tb interval(1d) ===> $data00 $data01 -if $data01 != 0 then - return -1 -endi - -$st = $stPrefix . fi -sql create table $st as select first(tbcol) from $tb interval(1d) - -print =============== step10 la -sql select last(tbcol) from $tb interval(1d) -print select last(tbcol) from $tb interval(1d) ===> $data00 $data01 -if $data01 != 19 then - return -1 -endi - -$st = $stPrefix . la -sql create table $st as select last(tbcol) from $tb interval(1d) - -print =============== step11 st -sql select stddev(tbcol) from $tb interval(1d) -print select stddev(tbcol) from $tb interval(1d) ===> $data00 $data01 -if $data01 != 5.766281297 then - return -1 -endi - -$st = $stPrefix . std -sql create table $st as select stddev(tbcol) from $tb interval(1d) - -print =============== step12 le -sql select leastsquares(tbcol, 1, 1) from $tb interval(1d) -print select leastsquares(tbcol, 1, 1) from $tb interval(1d) ===> $data00 $data01 -#if $data01 != @(0.000017, -25362055.126740)@ then -# return -1 -#endi - -$st = $stPrefix . le -sql create table $st as select leastsquares(tbcol, 1, 1) from $tb interval(1d) - -print =============== step13 pe - -sql select percentile(tbcol, 1) from $tb interval(1d) -print select percentile(tbcol, 1) from $tb interval(1d) ===> $data00 $data01 -if $data01 != 0.190000000 then - return -1 -endi - -$st = $stPrefix . pe -sql create table $st as select percentile(tbcol, 1) from $tb interval(1d) - -print =============== step14 wh -sql select count(tbcol) from $tb where ts < now + 4m interval(1d) -print select count(tbcol) from $tb where ts < now + 4m interval(1d) ===> $data00 $data01 -if $data01 != $rowNum then - return -1 -endi - -$st = $stPrefix . wh -#sql create table $st as select count(tbcol) from $tb where ts < now + 4m interval(1d) - -print =============== step15 as -sql select count(tbcol) from $tb interval(1d) -print select count(tbcol) from $tb interval(1d) ===> $data00 $data01 -if $data01 != $rowNum then - return -1 -endi - -$st = $stPrefix . as -sql create table $st as select count(tbcol) as c from $tb interval(1d) - -print =============== step16 -print sleep 22 seconds -sleep 22000 - -print =============== step17 -$st = $stPrefix . c1 -sql select * from $st -print ===> select * from $st ===> $data00 $data01 -if $data01 != $rowNum then - return -1 -endi - -$st = $stPrefix . c2 -sql select * from $st -print ===> select * from $st ===> $data00 $data01 -if $data01 != $rowNum then - return -1 -endi - -$st = $stPrefix . c3 -sql select * from $st -print ===> select * from $st ===> $data00 $data01 -if $data01 != $rowNum then - return -1 -endi - -$st = $stPrefix . av -sql select * from $st -print ===> select * from $st ===> $data00 $data01 -if $data01 != 9.500000000 then - return -1 -endi - -$st = $stPrefix . su -sql select * from $st -print ===> select * from $st ===> $data00 $data01 -if $data01 != 190 then - return -1 -endi - -$st = $stPrefix . mi -sql select * from $st -print ===> select * from $st ===> $data00 $data01 -if $data01 != 0 then - return -1 -endi - -$st = $stPrefix . ma -sql select * from $st -print ===> select * from $st ===> $data00 $data01 -if $data01 != 19 then - return -1 -endi - -$st = $stPrefix . fi -sql select * from $st -print ===> select * from $st ===> $data00 $data01 -if $data01 != 0 then - return -1 -endi - -$st = $stPrefix . la -sql select * from $st -print ===> select * from $st ===> $data00 $data01 -if $data01 != 19 then - return -1 -endi - -$st = $stPrefix . std -sql select * from $st -print ===> select * from $st ===> $data00 $data01 -if $data01 != 5.766281297 then - return -1 -endi - -$st = $stPrefix . le -sql select * from $st -#print ===> select * from $st ===> $data00 $data01 -#if $data01 != @(0.000017, -25270086.331047)@ then -# return -1 -#endi - -$st = $stPrefix . pe -sql select * from $st -print ===> select * from $st ===> $data00 $data01 -if $data01 != 0.190000000 then - return -1 -endi - -$st = $stPrefix . wh -#sql select * from $st -#print ===> select * from $st ===> $data00 $data01 -#if $data01 != $rowNum then -# return -1 -#endi - -$st = $stPrefix . as -sql select * from $st -print ===> select * from $st ===> $data00 $data01 -if $data01 != $rowNum then - return -1 -endi diff --git a/tests/script/unique/stream/table_vnode_stop.sim b/tests/script/unique/stream/table_vnode_stop.sim deleted file mode 100644 index 625de32a8d7a1e5336dd10f313565bdbc0daf0fc..0000000000000000000000000000000000000000 --- a/tests/script/unique/stream/table_vnode_stop.sim +++ /dev/null @@ -1,189 +0,0 @@ -system sh/stop_dnodes.sh - - - -system sh/deploy.sh -n dnode1 -i 1 -system sh/deploy.sh -n dnode2 -i 2 -system sh/cfg.sh -n dnode1 -c walLevel -v 1 -system sh/cfg.sh -n dnode2 -c walLevel -v 1 -system sh/cfg.sh -n dnode1 -c numOfMnodes -v 2 -system sh/cfg.sh -n dnode2 -c numOfMnodes -v 2 -system sh/exec.sh -n dnode1 -s start - -sql connect - -sql create dnode $hostname2 -system sh/exec.sh -n dnode2 -s start -$x = 0 -createDnode: - $x = $x + 1 - sleep 1000 - if $x == 20 then - return -1 - endi -sql show dnodes; -if $data4_192.168.0.2 == offline then - goto createDnode -endi - -print ======================== dnode start - -$dbPrefix = db -$tbPrefix = tb -$mtPrefix = mt -$stPrefix = st -$tbNum = 10 -$rowNum = 20 -$totalNum = 200 - -print =============== step1 -$i = 0 -$db = $dbPrefix . $i -$mt = $mtPrefix . $i -$st = $stPrefix . $i - -sql create database $db replica 2 -sql use $db -sql create table $mt (ts timestamp, tbcol int, tbcol2 float) TAGS(tgcol int) - -$i = 0 -while $i < $tbNum - $tb = $tbPrefix . $i - sql create table $tb using $mt tags( $i ) - - $x = -1440 - $y = 0 - while $y < $rowNum - $ms = $x . m - sql insert into $tb values (now $ms , $y , $y ) - $x = $x + 1 - $y = $y + 1 - endw - - $i = $i + 1 -endw - -sleep 100 - -print =============== step2 c1 -$i = 1 -$tb = $tbPrefix . $i - -sql select count(*) from $tb interval(1d) -print select count(*) from $tb interval(1d) ===> $data00 $data01 -if $data01 != $rowNum then - return -1 -endi - -$st = $stPrefix . c1 -sql create table $st as select count(*) from $tb interval(1d) - -print =============== step3 -system sh/exec.sh -n dnode2 -s stop - -print =============== step4 -print sleep 22 seconds -sleep 22000 - -print =============== step5 -$st = $stPrefix . c1 -sql select * from $st -print ===> select * from $st ===> $data00 $data01 -if $data01 != $rowNum then - return -1 -endi - -print ============= step6 - -sql close -system sh/exec.sh -n dnode1 -s stop -system sh/exec.sh -n dnode2 -s stop -system sh/deploy.sh -n dnode1 -i 1 -system sh/deploy.sh -n dnode2 -i 2 -system sh/cfg.sh -n dnode1 -c walLevel -v 1 -system sh/cfg.sh -n dnode2 -c walLevel -v 1 -sleep 2000 -system sh/exec.sh -n dnode2 -s start - -$x = 0 -connectTbase2: - $x = $x + 1 - sleep 1000 - if $x == 20 then - return -1 - endi -sql connect -x connectTbase2 -sleep 2000 - -sql create dnode $hostname1 -system sh/exec.sh -n dnode1 -s start -sleep 2000 -print ======================== dnode start - -$dbPrefix = db -$tbPrefix = tb -$mtPrefix = mt -$stPrefix = st -$tbNum = 10 -$rowNum = 20 -$totalNum = 200 - -print =============== step7 -$i = 0 -$db = $dbPrefix . $i -$mt = $mtPrefix . $i -$st = $stPrefix . $i - -sql create database $db replica 2 -sql use $db -sql create table $mt (ts timestamp, tbcol int, tbcol2 float) TAGS(tgcol int) - -$i = 0 -while $i < $tbNum - $tb = $tbPrefix . $i - sql create table $tb using $mt tags( $i ) - - $x = -1440 - $y = 0 - while $y < $rowNum - $ms = $x . m - sql insert into $tb values (now $ms , $y , $y ) - $x = $x + 1 - $y = $y + 1 - endw - - $i = $i + 1 -endw - -sleep 100 - -print =============== step8 c1 -$i = 1 -$tb = $tbPrefix . $i - -sql select count(*) from $tb interval(1d) -print select count(*) from $tb interval(1d) ===> $data00 $data01 -if $data01 != $rowNum then - return -1 -endi - -$st = $stPrefix . c1 -sql create table $st as select count(*) from $tb interval(1d) - -print =============== step9 -system sh/exec.sh -n dnode1 -s stop - -print =============== step10 -print sleep 22 seconds -sleep 22000 - -print =============== step11 -$st = $stPrefix . c1 -sql select * from $st -print ===> select * from $st ===> $data00 $data01 -if $data01 != $rowNum then - return -1 -endi - - - diff --git a/tests/script/unique/stream/testSuite.sim b/tests/script/unique/stream/testSuite.sim deleted file mode 100644 index bbf5da3d376d9eccc02aa61b1122cadb5fc04813..0000000000000000000000000000000000000000 --- a/tests/script/unique/stream/testSuite.sim +++ /dev/null @@ -1,15 +0,0 @@ -#run unique/stream/table_replica1_dnode2.sim -#run unique/stream/metrics_replica1_dnode2.sim -#run unique/stream/table_replica2_dnode2.sim -#run unique/stream/metrics_replica2_dnode2.sim -#run unique/stream/table_replica2_dnode2_vnoden.sim -#run unique/stream/metrics_replica2_dnode2_vnoden.sim -#run unique/stream/table_replica2_dnode3.sim -#run unique/stream/metrics_replica2_dnode3.sim -#run unique/stream/table_replica3_dnode4.sim -#run unique/stream/metrics_replica3_dnode4.sim -#run unique/stream/table_vnode_stop.sim -#run unique/stream/metrics_vnode_stop.sim -##run unique/stream/table_balance.sim -##run unique/stream/metrics_balance.sim -##run unique/stream/table_move.sim \ No newline at end of file diff --git a/tests/system-test/0-others/taosShellError.py b/tests/system-test/0-others/taosShellError.py index 6d247c5700192e723ef0efb4cc0608bb824fa929..e00fe89461b2e8aeb7e9c545f0d40e8aa6363a50 100644 --- a/tests/system-test/0-others/taosShellError.py +++ b/tests/system-test/0-others/taosShellError.py @@ -3,7 +3,11 @@ import taos import sys import time import socket -import pexpect +import platform +if platform.system().lower() == 'windows': + import wexpect as taosExpect +else: + import pexpect as taosExpect import os from util.log import * @@ -15,7 +19,11 @@ def taos_command (buildPath, key, value, expectString, cfgDir, sqlString='', key if len(key) == 0: tdLog.exit("taos test key is null!") - taosCmd = buildPath + '/build/bin/taos ' + if platform.system().lower() == 'windows': + taosCmd = buildPath + '\\build\\bin\\taos.exe ' + taosCmd = taosCmd.replace('\\','\\\\') + else: + taosCmd = buildPath + '/build/bin/taos ' if len(cfgDir) != 0: taosCmd = taosCmd + '-c ' + cfgDir @@ -36,23 +44,29 @@ def taos_command (buildPath, key, value, expectString, cfgDir, sqlString='', key tdLog.info ("taos cmd: %s" % taosCmd) - child = pexpect.spawn(taosCmd, timeout=3) + child = taosExpect.spawn(taosCmd, timeout=3) #output = child.readline() #print (output.decode()) if len(expectString) != 0: - i = child.expect([expectString, pexpect.TIMEOUT, pexpect.EOF], timeout=6) + i = child.expect([expectString, taosExpect.TIMEOUT, taosExpect.EOF], timeout=6) else: - i = child.expect([pexpect.TIMEOUT, pexpect.EOF], timeout=6) + i = child.expect([taosExpect.TIMEOUT, taosExpect.EOF], timeout=6) - retResult = child.before.decode() + if platform.system().lower() == 'windows': + retResult = child.before + else: + retResult = child.before.decode() print("cmd return result:\n%s\n"%retResult) #print(child.after.decode()) if i == 0: print ('taos login success! Here can run sql, taos> ') if len(sqlString) != 0: child.sendline (sqlString) - w = child.expect(["Query OK", pexpect.TIMEOUT, pexpect.EOF], timeout=1) - retResult = child.before.decode() + w = child.expect(["Query OK", taosExpect.TIMEOUT, taosExpect.EOF], timeout=1) + if platform.system().lower() == 'windows': + retResult = child.before + else: + retResult = child.before.decode() if w == 0: return "TAOS_OK", retResult else: @@ -103,7 +117,7 @@ class TDTestCase: projPath = selfPath[:selfPath.find("tests")] for root, dirs, files in os.walk(projPath): - if ("taosd" in files): + if ("taosd" in files or "taosd.exe" in files): rootRealPath = os.path.dirname(os.path.realpath(root)) if ("packaging" not in rootRealPath): buildPath = root[:len(root) - len("/build/bin")] diff --git a/tests/system-test/0-others/taosShellNetChk.py b/tests/system-test/0-others/taosShellNetChk.py index bbaeacf328fd5422ccd018a79ce6d9c632a370a9..c81d4af3c555a27b117e1551d6aef01820d3ee1c 100644 --- a/tests/system-test/0-others/taosShellNetChk.py +++ b/tests/system-test/0-others/taosShellNetChk.py @@ -3,7 +3,11 @@ import taos import sys import time import socket -import pexpect +import platform +if platform.system().lower() == 'windows': + import wexpect as taosExpect +else: + import pexpect as taosExpect import os from util.log import * @@ -15,7 +19,11 @@ def taos_command (buildPath, key, value, expectString, cfgDir, sqlString='', key if len(key) == 0: tdLog.exit("taos test key is null!") - taosCmd = buildPath + '/build/bin/taos ' + if platform.system().lower() == 'windows': + taosCmd = buildPath + '\\build\\bin\\taos.exe ' + taosCmd = taosCmd.replace('\\','\\\\') + else: + taosCmd = buildPath + '/build/bin/taos ' if len(cfgDir) != 0: taosCmd = taosCmd + '-c ' + cfgDir @@ -36,23 +44,29 @@ def taos_command (buildPath, key, value, expectString, cfgDir, sqlString='', key tdLog.info ("taos cmd: %s" % taosCmd) - child = pexpect.spawn(taosCmd, timeout=3) + child = taosExpect.spawn(taosCmd, timeout=3) #output = child.readline() #print (output.decode()) if len(expectString) != 0: - i = child.expect([expectString, pexpect.TIMEOUT, pexpect.EOF], timeout=6) + i = child.expect([expectString, taosExpect.TIMEOUT, taosExpect.EOF], timeout=6) else: - i = child.expect([pexpect.TIMEOUT, pexpect.EOF], timeout=6) + i = child.expect([taosExpect.TIMEOUT, taosExpect.EOF], timeout=6) - retResult = child.before.decode() + if platform.system().lower() == 'windows': + retResult = child.before + else: + retResult = child.before.decode() print("expect() return code: %d, content:\n %s\n"%(i, retResult)) #print(child.after.decode()) if i == 0: print ('taos login success! Here can run sql, taos> ') if len(sqlString) != 0: child.sendline (sqlString) - w = child.expect(["Query OK", pexpect.TIMEOUT, pexpect.EOF], timeout=1) - retResult = child.before.decode() + w = child.expect(["Query OK", taosExpect.TIMEOUT, taosExpect.EOF], timeout=1) + if platform.system().lower() == 'windows': + retResult = child.before + else: + retResult = child.before.decode() if w == 0: return "TAOS_OK", retResult else: @@ -103,7 +117,7 @@ class TDTestCase: projPath = selfPath[:selfPath.find("tests")] for root, dirs, files in os.walk(projPath): - if ("taosd" in files): + if ("taosd" in files or "taosd.exe" in files): rootRealPath = os.path.dirname(os.path.realpath(root)) if ("packaging" not in rootRealPath): buildPath = root[:len(root) - len("/build/bin")] @@ -168,21 +182,33 @@ class TDTestCase: tdDnodes.stop(1) role = 'server' - taosCmd = 'nohup ' + buildPath + '/build/bin/taos -c ' + keyDict['c'] - taosCmd = taosCmd + ' -n ' + role + ' > /dev/null 2>&1 &' + if platform.system().lower() == 'windows': + taosCmd = 'mintty -h never -w hide ' + buildPath + '\\build\\bin\\taos.exe -c ' + keyDict['c'] + taosCmd = taosCmd.replace('\\','\\\\') + taosCmd = taosCmd + ' -n ' + role + else: + taosCmd = 'nohup ' + buildPath + '/build/bin/taos -c ' + keyDict['c'] + taosCmd = taosCmd + ' -n ' + role + ' > /dev/null 2>&1 &' print (taosCmd) os.system(taosCmd) pktLen = '2000' pktNum = '10' role = 'client' - taosCmd = buildPath + '/build/bin/taos -c ' + keyDict['c'] + if platform.system().lower() == 'windows': + taosCmd = buildPath + '\\build\\bin\\taos.exe -c ' + keyDict['c'] + taosCmd = taosCmd.replace('\\','\\\\') + else: + taosCmd = buildPath + '/build/bin/taos -c ' + keyDict['c'] taosCmd = taosCmd + ' -n ' + role + ' -l ' + pktLen + ' -N ' + pktNum print (taosCmd) - child = pexpect.spawn(taosCmd, timeout=3) - i = child.expect([pexpect.TIMEOUT, pexpect.EOF], timeout=6) + child = taosExpect.spawn(taosCmd, timeout=3) + i = child.expect([taosExpect.TIMEOUT, taosExpect.EOF], timeout=6) - retResult = child.before.decode() + if platform.system().lower() == 'windows': + retResult = child.before + else: + retResult = child.before.decode() print("expect() return code: %d, content:\n %s\n"%(i, retResult)) #print(child.after.decode()) if i == 0: @@ -195,7 +221,10 @@ class TDTestCase: else: tdLog.exit('taos -n client fail!') - os.system('pkill taos') + if platform.system().lower() == 'windows': + os.system('ps -a | grep taos | awk \'{print $2}\' | xargs kill -9') + else: + os.system('pkill taos') def stop(self): tdSql.close() diff --git a/tests/system-test/0-others/taosdMonitor.py b/tests/system-test/0-others/taosdMonitor.py index a3d3b052047faa12618a0b68846518269c9de3f5..657979658e5b048e8d51d0bf8c67bada5f321402 100644 --- a/tests/system-test/0-others/taosdMonitor.py +++ b/tests/system-test/0-others/taosdMonitor.py @@ -2,7 +2,7 @@ import taos import sys import time import socket -import pexpect +# import pexpect import os import http.server import gzip diff --git a/tests/system-test/0-others/telemetry.py b/tests/system-test/0-others/telemetry.py index 3ab39f9e7bb14b40f7caaa2b6f3bff43869c1e21..203f87c085fe91a9a75cc4176065a893fc29cf1e 100644 --- a/tests/system-test/0-others/telemetry.py +++ b/tests/system-test/0-others/telemetry.py @@ -2,7 +2,7 @@ import taos import sys import time import socket -import pexpect +# import pexpect import os import http.server import gzip diff --git a/tests/system-test/7-tmq/basic5.py b/tests/system-test/7-tmq/basic5.py index 4a29cacd97b7bad3bcd469fe1ebc2b445061397a..500e8671217f5d4bb8ae0793f288791095303135 100644 --- a/tests/system-test/7-tmq/basic5.py +++ b/tests/system-test/7-tmq/basic5.py @@ -22,8 +22,8 @@ class TDTestCase: def init(self, conn, logSql): tdLog.debug(f"start to excute {__file__}") - #tdSql.init(conn.cursor()) - tdSql.init(conn.cursor(), logSql) # output sql.txt file + tdSql.init(conn.cursor()) + #tdSql.init(conn.cursor(), logSql) # output sql.txt file def getBuildPath(self): selfPath = os.path.dirname(os.path.realpath(__file__)) @@ -186,7 +186,7 @@ class TDTestCase: time.sleep(1) tdLog.info("start consume processor") - pollDelay = 5 + pollDelay = 100 showMsg = 1 showRow = 1 @@ -228,7 +228,7 @@ class TDTestCase: 'stbName': 'stb', \ 'ctbNum': 10, \ 'rowsPerTbl': 10000, \ - 'batchNum': 100, \ + 'batchNum': 200, \ 'startTs': 1640966400000} # 2022-01-01 00:00:00.000 parameterDict['cfg'] = cfgPath @@ -300,7 +300,7 @@ class TDTestCase: time.sleep(1) tdLog.info("start consume processor") - pollDelay = 5 + pollDelay = 100 showMsg = 1 showRow = 1 @@ -349,8 +349,8 @@ class TDTestCase: 'vgroups': 1, \ 'stbName': 'stb', \ 'ctbNum': 10, \ - 'rowsPerTbl': 30000, \ - 'batchNum': 100, \ + 'rowsPerTbl': 10000, \ + 'batchNum': 200, \ 'startTs': 1640966400000} # 2022-01-01 00:00:00.000 parameterDict['cfg'] = cfgPath @@ -381,8 +381,8 @@ class TDTestCase: 'vgroups': 1, \ 'stbName': 'stb2', \ 'ctbNum': 10, \ - 'rowsPerTbl': 30000, \ - 'batchNum': 100, \ + 'rowsPerTbl': 10000, \ + 'batchNum': 200, \ 'startTs': 1640966400000} # 2022-01-01 00:00:00.000 parameterDict2['cfg'] = cfgPath tdSql.execute("create stable if not exists %s.%s (ts timestamp, c1 bigint, c2 binary(16)) tags(t1 int)"%(parameterDict2['dbName'], parameterDict2['stbName'])) @@ -432,7 +432,7 @@ class TDTestCase: time.sleep(1) tdLog.info("start consume processor") - pollDelay = 5 + pollDelay = 100 showMsg = 1 showRow = 1 diff --git a/tests/system-test/7-tmq/subscribeDb.py b/tests/system-test/7-tmq/subscribeDb.py index 29a2342fb7988efe25612822f0d0c378dffdbf10..157bc7928b2800c5ba68c5f1b65ec601274dc4b9 100644 --- a/tests/system-test/7-tmq/subscribeDb.py +++ b/tests/system-test/7-tmq/subscribeDb.py @@ -22,8 +22,8 @@ class TDTestCase: def init(self, conn, logSql): tdLog.debug(f"start to excute {__file__}") - #tdSql.init(conn.cursor()) - tdSql.init(conn.cursor(), logSql) # output sql.txt file + tdSql.init(conn.cursor()) + #tdSql.init(conn.cursor(), logSql) # output sql.txt file def getBuildPath(self): selfPath = os.path.dirname(os.path.realpath(__file__)) @@ -167,7 +167,7 @@ class TDTestCase: 'vgroups': 4, \ 'stbName': 'stb', \ 'ctbNum': 10, \ - 'rowsPerTbl': 10000, \ + 'rowsPerTbl': 5000, \ 'batchNum': 100, \ 'startTs': 1640966400000} # 2022-01-01 00:00:00.000 parameterDict['cfg'] = cfgPath @@ -197,7 +197,7 @@ class TDTestCase: event.wait() tdLog.info("start consume processor") - pollDelay = 5 + pollDelay = 100 showMsg = 1 showRow = 1 self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) @@ -236,7 +236,7 @@ class TDTestCase: self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) tdLog.info("start consume processor") - pollDelay = 5 + pollDelay = 20 showMsg = 1 showRow = 1 self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) @@ -264,7 +264,7 @@ class TDTestCase: 'vgroups': 4, \ 'stbName': 'stb', \ 'ctbNum': 10, \ - 'rowsPerTbl': 10000, \ + 'rowsPerTbl': 5000, \ 'batchNum': 100, \ 'startTs': 1640966400000} # 2022-01-01 00:00:00.000 parameterDict['cfg'] = cfgPath @@ -298,7 +298,7 @@ class TDTestCase: event.wait() tdLog.info("start consume processor") - pollDelay = 5 + pollDelay = 20 showMsg = 1 showRow = 1 self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) @@ -313,8 +313,8 @@ class TDTestCase: for i in range(expectRows): totalConsumeRows += resultList[i] - if totalConsumeRows != expectrowcnt: - tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt)) + tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt)) + if not (totalConsumeRows >= expectrowcnt): tdLog.exit("tmq consume rows error!") tdSql.query("drop topic %s"%topicName1) @@ -330,7 +330,7 @@ class TDTestCase: 'vgroups': 4, \ 'stbName': 'stb1', \ 'ctbNum': 10, \ - 'rowsPerTbl': 10000, \ + 'rowsPerTbl': 5000, \ 'batchNum': 100, \ 'startTs': 1640966400000} # 2022-01-01 00:00:00.000 parameterDict['cfg'] = cfgPath @@ -364,7 +364,7 @@ class TDTestCase: self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) tdLog.info("start consume processor") - pollDelay = 10 + pollDelay = 100 showMsg = 1 showRow = 1 self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) @@ -399,7 +399,7 @@ class TDTestCase: 'vgroups': 4, \ 'stbName': 'stb', \ 'ctbNum': 10, \ - 'rowsPerTbl': 10000, \ + 'rowsPerTbl': 5000, \ 'batchNum': 100, \ 'startTs': 1640966400000} # 2022-01-01 00:00:00.000 parameterDict['cfg'] = cfgPath @@ -416,7 +416,7 @@ class TDTestCase: 'vgroups': 4, \ 'stbName': 'stb2', \ 'ctbNum': 10, \ - 'rowsPerTbl': 10000, \ + 'rowsPerTbl': 5000, \ 'batchNum': 100, \ 'startTs': 1640966400000} # 2022-01-01 00:00:00.000 parameterDict['cfg'] = cfgPath @@ -446,7 +446,7 @@ class TDTestCase: event.wait() tdLog.info("start consume processor") - pollDelay = 10 + pollDelay = 100 showMsg = 1 showRow = 1 self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) @@ -470,336 +470,6 @@ class TDTestCase: tdLog.printNoPrefix("======== test case 3 end ...... ") - def tmqCase4(self, cfgPath, buildPath): - tdLog.printNoPrefix("======== test case 4: Produce while two consumers to subscribe one db, include 2 stb") - tdLog.info("step 1: create database, stb, ctb and insert data") - # create and start thread - parameterDict = {'cfg': '', \ - 'dbName': 'db4', \ - 'vgroups': 4, \ - 'stbName': 'stb', \ - 'ctbNum': 10, \ - 'rowsPerTbl': 10000, \ - 'batchNum': 100, \ - 'startTs': 1640966400000} # 2022-01-01 00:00:00.000 - parameterDict['cfg'] = cfgPath - - self.initConsumerTable() - - tdSql.execute("create database if not exists %s vgroups %d" %(parameterDict['dbName'], parameterDict['vgroups'])) - - prepareEnvThread = threading.Thread(target=self.prepareEnv, kwargs=parameterDict) - prepareEnvThread.start() - - parameterDict2 = {'cfg': '', \ - 'dbName': 'db4', \ - 'vgroups': 4, \ - 'stbName': 'stb2', \ - 'ctbNum': 10, \ - 'rowsPerTbl': 10000, \ - 'batchNum': 100, \ - 'startTs': 1640966400000} # 2022-01-01 00:00:00.000 - parameterDict['cfg'] = cfgPath - - prepareEnvThread2 = threading.Thread(target=self.prepareEnv, kwargs=parameterDict2) - prepareEnvThread2.start() - - tdLog.info("create topics from db") - topicName1 = 'topic_db1' - - tdSql.execute("create topic %s as %s" %(topicName1, parameterDict['dbName'])) - - consumerId = 0 - expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"] + parameterDict2["rowsPerTbl"] * parameterDict2["ctbNum"] - topicList = topicName1 - ifcheckdata = 0 - ifManualCommit = 1 - keyList = 'group.id:cgrp1,\ - enable.auto.commit:false,\ - auto.commit.interval.ms:6000,\ - auto.offset.reset:earliest' - self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) - - consumerId = 1 - self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) - - event.wait() - - tdLog.info("start consume processor") - pollDelay = 5 - showMsg = 1 - showRow = 1 - self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) - - # wait for data ready - prepareEnvThread.join() - prepareEnvThread2.join() - - tdLog.info("insert process end, and start to check consume result") - expectRows = 2 - resultList = self.selectConsumeResult(expectRows) - totalConsumeRows = 0 - for i in range(expectRows): - totalConsumeRows += resultList[i] - - if totalConsumeRows != expectrowcnt: - tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt)) - tdLog.exit("tmq consume rows error!") - - tdSql.query("drop topic %s"%topicName1) - - tdLog.printNoPrefix("======== test case 4 end ...... ") - - def tmqCase5(self, cfgPath, buildPath): - tdLog.printNoPrefix("======== test case 5: Produce while two consumers to subscribe one db, firstly create one stb, after start consume create other stb") - tdLog.info("step 1: create database, stb, ctb and insert data") - # create and start thread - parameterDict = {'cfg': '', \ - 'dbName': 'db5', \ - 'vgroups': 4, \ - 'stbName': 'stb', \ - 'ctbNum': 10, \ - 'rowsPerTbl': 10000, \ - 'batchNum': 100, \ - 'startTs': 1640966400000} # 2022-01-01 00:00:00.000 - parameterDict['cfg'] = cfgPath - - self.initConsumerTable() - - tdSql.execute("create database if not exists %s vgroups %d" %(parameterDict['dbName'], parameterDict['vgroups'])) - - prepareEnvThread = threading.Thread(target=self.prepareEnv, kwargs=parameterDict) - prepareEnvThread.start() - - parameterDict2 = {'cfg': '', \ - 'dbName': 'db5', \ - 'vgroups': 4, \ - 'stbName': 'stb2', \ - 'ctbNum': 10, \ - 'rowsPerTbl': 10000, \ - 'batchNum': 100, \ - 'startTs': 1640966400000} # 2022-01-01 00:00:00.000 - parameterDict['cfg'] = cfgPath - - tdLog.info("create topics from db") - topicName1 = 'topic_db1' - - tdSql.execute("create topic %s as %s" %(topicName1, parameterDict['dbName'])) - - consumerId = 0 - expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"] + parameterDict2["rowsPerTbl"] * parameterDict2["ctbNum"] - topicList = topicName1 - ifcheckdata = 0 - ifManualCommit = 1 - keyList = 'group.id:cgrp1,\ - enable.auto.commit:false,\ - auto.commit.interval.ms:6000,\ - auto.offset.reset:earliest' - self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) - - consumerId = 1 - self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) - - event.wait() - - tdLog.info("start consume processor") - pollDelay = 5 - showMsg = 1 - showRow = 1 - self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) - - prepareEnvThread2 = threading.Thread(target=self.prepareEnv, kwargs=parameterDict2) - prepareEnvThread2.start() - - # wait for data ready - prepareEnvThread.join() - prepareEnvThread2.join() - - tdLog.info("insert process end, and start to check consume result") - expectRows = 2 - resultList = self.selectConsumeResult(expectRows) - totalConsumeRows = 0 - for i in range(expectRows): - totalConsumeRows += resultList[i] - - if totalConsumeRows < expectrowcnt: - tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt)) - tdLog.exit("tmq consume rows error!") - - tdSql.query("drop topic %s"%topicName1) - - tdLog.printNoPrefix("======== test case 5 end ...... ") - - def tmqCase6(self, cfgPath, buildPath): - tdLog.printNoPrefix("======== test case 6: Produce while one consumers to subscribe tow topic, Each contains one db") - tdLog.info("step 1: create database, stb, ctb and insert data") - # create and start thread - parameterDict = {'cfg': '', \ - 'dbName': 'db60', \ - 'vgroups': 4, \ - 'stbName': 'stb', \ - 'ctbNum': 10, \ - 'rowsPerTbl': 10000, \ - 'batchNum': 100, \ - 'startTs': 1640966400000} # 2022-01-01 00:00:00.000 - parameterDict['cfg'] = cfgPath - - self.initConsumerTable() - - tdSql.execute("create database if not exists %s vgroups %d" %(parameterDict['dbName'], parameterDict['vgroups'])) - - prepareEnvThread = threading.Thread(target=self.prepareEnv, kwargs=parameterDict) - prepareEnvThread.start() - - parameterDict2 = {'cfg': '', \ - 'dbName': 'db61', \ - 'vgroups': 4, \ - 'stbName': 'stb2', \ - 'ctbNum': 10, \ - 'rowsPerTbl': 10000, \ - 'batchNum': 100, \ - 'startTs': 1640966400000} # 2022-01-01 00:00:00.000 - parameterDict['cfg'] = cfgPath - - tdSql.execute("create database if not exists %s vgroups %d" %(parameterDict2['dbName'], parameterDict2['vgroups'])) - - prepareEnvThread2 = threading.Thread(target=self.prepareEnv, kwargs=parameterDict2) - prepareEnvThread2.start() - - tdLog.info("create topics from db") - topicName1 = 'topic_db60' - topicName2 = 'topic_db61' - - tdSql.execute("create topic %s as %s" %(topicName1, parameterDict['dbName'])) - tdSql.execute("create topic %s as %s" %(topicName2, parameterDict2['dbName'])) - - consumerId = 0 - expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"] + parameterDict2["rowsPerTbl"] * parameterDict2["ctbNum"] - topicList = topicName1 + ',' + topicName2 - ifcheckdata = 0 - ifManualCommit = 0 - keyList = 'group.id:cgrp1,\ - enable.auto.commit:false,\ - auto.commit.interval.ms:6000,\ - auto.offset.reset:earliest' - self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) - - #consumerId = 1 - #self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) - - event.wait() - - tdLog.info("start consume processor") - pollDelay = 5 - showMsg = 1 - showRow = 1 - self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) - - # wait for data ready - prepareEnvThread.join() - prepareEnvThread2.join() - - tdLog.info("insert process end, and start to check consume result") - expectRows = 1 - resultList = self.selectConsumeResult(expectRows) - totalConsumeRows = 0 - for i in range(expectRows): - totalConsumeRows += resultList[i] - - if totalConsumeRows != expectrowcnt: - tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt)) - tdLog.exit("tmq consume rows error!") - - tdSql.query("drop topic %s"%topicName1) - tdSql.query("drop topic %s"%topicName2) - - tdLog.printNoPrefix("======== test case 6 end ...... ") - - def tmqCase7(self, cfgPath, buildPath): - tdLog.printNoPrefix("======== test case 7: Produce while two consumers to subscribe tow topic, Each contains one db") - tdLog.info("step 1: create database, stb, ctb and insert data") - # create and start thread - parameterDict = {'cfg': '', \ - 'dbName': 'db70', \ - 'vgroups': 4, \ - 'stbName': 'stb', \ - 'ctbNum': 10, \ - 'rowsPerTbl': 10000, \ - 'batchNum': 100, \ - 'startTs': 1640966400000} # 2022-01-01 00:00:00.000 - parameterDict['cfg'] = cfgPath - - self.initConsumerTable() - - tdSql.execute("create database if not exists %s vgroups %d" %(parameterDict['dbName'], parameterDict['vgroups'])) - - prepareEnvThread = threading.Thread(target=self.prepareEnv, kwargs=parameterDict) - prepareEnvThread.start() - - parameterDict2 = {'cfg': '', \ - 'dbName': 'db71', \ - 'vgroups': 4, \ - 'stbName': 'stb2', \ - 'ctbNum': 10, \ - 'rowsPerTbl': 10000, \ - 'batchNum': 100, \ - 'startTs': 1640966400000} # 2022-01-01 00:00:00.000 - parameterDict['cfg'] = cfgPath - - tdSql.execute("create database if not exists %s vgroups %d" %(parameterDict2['dbName'], parameterDict2['vgroups'])) - - prepareEnvThread2 = threading.Thread(target=self.prepareEnv, kwargs=parameterDict2) - prepareEnvThread2.start() - - tdLog.info("create topics from db") - topicName1 = 'topic_db60' - topicName2 = 'topic_db61' - - tdSql.execute("create topic %s as %s" %(topicName1, parameterDict['dbName'])) - tdSql.execute("create topic %s as %s" %(topicName2, parameterDict2['dbName'])) - - consumerId = 0 - expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"] + parameterDict2["rowsPerTbl"] * parameterDict2["ctbNum"] - topicList = topicName1 + ',' + topicName2 - ifcheckdata = 0 - ifManualCommit = 1 - keyList = 'group.id:cgrp1,\ - enable.auto.commit:false,\ - auto.commit.interval.ms:6000,\ - auto.offset.reset:earliest' - self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) - - consumerId = 1 - self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) - - event.wait() - - tdLog.info("start consume processor") - pollDelay = 5 - showMsg = 1 - showRow = 1 - self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) - - # wait for data ready - prepareEnvThread.join() - prepareEnvThread2.join() - - tdLog.info("insert process end, and start to check consume result") - expectRows = 2 - resultList = self.selectConsumeResult(expectRows) - totalConsumeRows = 0 - for i in range(expectRows): - totalConsumeRows += resultList[i] - - if totalConsumeRows != expectrowcnt: - tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt)) - tdLog.exit("tmq consume rows error!") - - tdSql.query("drop topic %s"%topicName1) - tdSql.query("drop topic %s"%topicName2) - - tdLog.printNoPrefix("======== test case 7 end ...... ") - def run(self): tdSql.prepare() @@ -815,12 +485,7 @@ class TDTestCase: self.tmqCase2(cfgPath, buildPath) self.tmqCase2a(cfgPath, buildPath) self.tmqCase3(cfgPath, buildPath) - self.tmqCase4(cfgPath, buildPath) - self.tmqCase5(cfgPath, buildPath) - self.tmqCase6(cfgPath, buildPath) - self.tmqCase7(cfgPath, buildPath) - - + def stop(self): tdSql.close() tdLog.success(f"{__file__} successfully executed") diff --git a/tests/system-test/7-tmq/subscribeDb0.py b/tests/system-test/7-tmq/subscribeDb0.py new file mode 100644 index 0000000000000000000000000000000000000000..d6f93acfd6599c60708f0726caf26b7fec01a0f3 --- /dev/null +++ b/tests/system-test/7-tmq/subscribeDb0.py @@ -0,0 +1,515 @@ + +import taos +import sys +import time +import socket +import os +import threading + +from util.log import * +from util.sql import * +from util.cases import * +from util.dnodes import * + +class TDTestCase: + hostname = socket.gethostname() + #rpcDebugFlagVal = '143' + #clientCfgDict = {'serverPort': '', 'firstEp': '', 'secondEp':'', 'rpcDebugFlag':'135', 'fqdn':''} + #clientCfgDict["rpcDebugFlag"] = rpcDebugFlagVal + #updatecfgDict = {'clientCfg': {}, 'serverPort': '', 'firstEp': '', 'secondEp':'', 'rpcDebugFlag':'135', 'fqdn':''} + #updatecfgDict["rpcDebugFlag"] = rpcDebugFlagVal + #print ("===================: ", updatecfgDict) + + def init(self, conn, logSql): + tdLog.debug(f"start to excute {__file__}") + tdSql.init(conn.cursor()) + #tdSql.init(conn.cursor(), logSql) # output sql.txt file + + def getBuildPath(self): + selfPath = os.path.dirname(os.path.realpath(__file__)) + + if ("community" in selfPath): + projPath = selfPath[:selfPath.find("community")] + else: + projPath = selfPath[:selfPath.find("tests")] + + for root, dirs, files in os.walk(projPath): + if ("taosd" in files): + rootRealPath = os.path.dirname(os.path.realpath(root)) + if ("packaging" not in rootRealPath): + buildPath = root[:len(root) - len("/build/bin")] + break + return buildPath + + def newcur(self,cfg,host,port): + user = "root" + password = "taosdata" + con=taos.connect(host=host, user=user, password=password, config=cfg ,port=port) + cur=con.cursor() + print(cur) + return cur + + def initConsumerTable(self,cdbName='cdb'): + tdLog.info("create consume database, and consume info table, and consume result table") + tdSql.query("create database if not exists %s vgroups 1"%(cdbName)) + tdSql.query("drop table if exists %s.consumeinfo "%(cdbName)) + tdSql.query("drop table if exists %s.consumeresult "%(cdbName)) + + tdSql.query("create table %s.consumeinfo (ts timestamp, consumerid int, topiclist binary(1024), keylist binary(1024), expectmsgcnt bigint, ifcheckdata int, ifmanualcommit int)"%cdbName) + tdSql.query("create table %s.consumeresult (ts timestamp, consumerid int, consummsgcnt bigint, consumrowcnt bigint, checkresult int)"%cdbName) + + def insertConsumerInfo(self,consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifmanualcommit,cdbName='cdb'): + sql = "insert into %s.consumeinfo values "%cdbName + sql += "(now, %d, '%s', '%s', %d, %d, %d)"%(consumerId, topicList, keyList, expectrowcnt, ifcheckdata, ifmanualcommit) + tdLog.info("consume info sql: %s"%sql) + tdSql.query(sql) + + def selectConsumeResult(self,expectRows,cdbName='cdb'): + resultList=[] + while 1: + tdSql.query("select * from %s.consumeresult"%cdbName) + #tdLog.info("row: %d, %l64d, %l64d"%(tdSql.getData(0, 1),tdSql.getData(0, 2),tdSql.getData(0, 3)) + if tdSql.getRows() == expectRows: + break + else: + time.sleep(5) + + for i in range(expectRows): + tdLog.info ("consume id: %d, consume msgs: %d, consume rows: %d"%(tdSql.getData(i , 1), tdSql.getData(i , 2), tdSql.getData(i , 3))) + resultList.append(tdSql.getData(i , 3)) + + return resultList + + def startTmqSimProcess(self,buildPath,cfgPath,pollDelay,dbName,showMsg=1,showRow=1,cdbName='cdb',valgrind=0): + shellCmd = 'nohup ' + if valgrind == 1: + logFile = cfgPath + '/../log/valgrind-tmq.log' + shellCmd = 'nohup valgrind --log-file=' + logFile + shellCmd += '--tool=memcheck --leak-check=full --show-reachable=no --track-origins=yes --show-leak-kinds=all --num-callers=20 -v --workaround-gcc296-bugs=yes ' + + shellCmd += buildPath + '/build/bin/tmq_sim -c ' + cfgPath + shellCmd += " -y %d -d %s -g %d -r %d -w %s "%(pollDelay, dbName, showMsg, showRow, cdbName) + shellCmd += "> /dev/null 2>&1 &" + tdLog.info(shellCmd) + os.system(shellCmd) + + def create_tables(self,tsql, dbName,vgroups,stbName,ctbNum): + tsql.execute("create database if not exists %s vgroups %d"%(dbName, vgroups)) + tsql.execute("use %s" %dbName) + tsql.execute("create table if not exists %s (ts timestamp, c1 bigint, c2 binary(16)) tags(t1 int)"%stbName) + pre_create = "create table" + sql = pre_create + #tdLog.debug("doing create one stable %s and %d child table in %s ..." %(stbname, count ,dbname)) + for i in range(ctbNum): + sql += " %s_%d using %s tags(%d)"%(stbName,i,stbName,i+1) + if (i > 0) and (i%100 == 0): + tsql.execute(sql) + sql = pre_create + if sql != pre_create: + tsql.execute(sql) + + event.set() + tdLog.debug("complete to create database[%s], stable[%s] and %d child tables" %(dbName, stbName, ctbNum)) + return + + def insert_data(self,tsql,dbName,stbName,ctbNum,rowsPerTbl,batchNum,startTs): + tdLog.debug("start to insert data ............") + tsql.execute("use %s" %dbName) + pre_insert = "insert into " + sql = pre_insert + + t = time.time() + startTs = int(round(t * 1000)) + #tdLog.debug("doing insert data into stable:%s rows:%d ..."%(stbName, allRows)) + for i in range(ctbNum): + sql += " %s_%d values "%(stbName,i) + for j in range(rowsPerTbl): + sql += "(%d, %d, 'tmqrow_%d') "%(startTs + j, j, j) + if (j > 0) and ((j%batchNum == 0) or (j == rowsPerTbl - 1)): + tsql.execute(sql) + if j < rowsPerTbl - 1: + sql = "insert into %s_%d values " %(stbName,i) + else: + sql = "insert into " + #end sql + if sql != pre_insert: + #print("insert sql:%s"%sql) + tsql.execute(sql) + tdLog.debug("insert data ............ [OK]") + return + + def prepareEnv(self, **parameterDict): + print ("input parameters:") + print (parameterDict) + # create new connector for my thread + tsql=self.newcur(parameterDict['cfg'], 'localhost', 6030) + self.create_tables(tsql,\ + parameterDict["dbName"],\ + parameterDict["vgroups"],\ + parameterDict["stbName"],\ + parameterDict["ctbNum"]) + + self.insert_data(tsql,\ + parameterDict["dbName"],\ + parameterDict["stbName"],\ + parameterDict["ctbNum"],\ + parameterDict["rowsPerTbl"],\ + parameterDict["batchNum"],\ + parameterDict["startTs"]) + return + + def tmqCase4(self, cfgPath, buildPath): + tdLog.printNoPrefix("======== test case 4: Produce while two consumers to subscribe one db, include 2 stb") + tdLog.info("step 1: create database, stb, ctb and insert data") + # create and start thread + parameterDict = {'cfg': '', \ + 'dbName': 'db4', \ + 'vgroups': 4, \ + 'stbName': 'stb', \ + 'ctbNum': 10, \ + 'rowsPerTbl': 5000, \ + 'batchNum': 100, \ + 'startTs': 1640966400000} # 2022-01-01 00:00:00.000 + parameterDict['cfg'] = cfgPath + + self.initConsumerTable() + + tdSql.execute("create database if not exists %s vgroups %d" %(parameterDict['dbName'], parameterDict['vgroups'])) + + prepareEnvThread = threading.Thread(target=self.prepareEnv, kwargs=parameterDict) + prepareEnvThread.start() + + parameterDict2 = {'cfg': '', \ + 'dbName': 'db4', \ + 'vgroups': 4, \ + 'stbName': 'stb2', \ + 'ctbNum': 10, \ + 'rowsPerTbl': 5000, \ + 'batchNum': 100, \ + 'startTs': 1640966400000} # 2022-01-01 00:00:00.000 + parameterDict['cfg'] = cfgPath + + prepareEnvThread2 = threading.Thread(target=self.prepareEnv, kwargs=parameterDict2) + prepareEnvThread2.start() + + tdLog.info("create topics from db") + topicName1 = 'topic_db1' + + tdSql.execute("create topic %s as %s" %(topicName1, parameterDict['dbName'])) + + consumerId = 0 + expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"] + parameterDict2["rowsPerTbl"] * parameterDict2["ctbNum"] + topicList = topicName1 + ifcheckdata = 0 + ifManualCommit = 1 + keyList = 'group.id:cgrp1,\ + enable.auto.commit:false,\ + auto.commit.interval.ms:6000,\ + auto.offset.reset:earliest' + self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) + + consumerId = 1 + self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) + + event.wait() + + tdLog.info("start consume processor") + pollDelay = 100 + showMsg = 1 + showRow = 1 + self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) + + # wait for data ready + prepareEnvThread.join() + prepareEnvThread2.join() + + tdLog.info("insert process end, and start to check consume result") + expectRows = 2 + resultList = self.selectConsumeResult(expectRows) + totalConsumeRows = 0 + for i in range(expectRows): + totalConsumeRows += resultList[i] + + if totalConsumeRows != expectrowcnt: + tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt)) + tdLog.exit("tmq consume rows error!") + + tdSql.query("drop topic %s"%topicName1) + + tdLog.printNoPrefix("======== test case 4 end ...... ") + + def tmqCase5(self, cfgPath, buildPath): + tdLog.printNoPrefix("======== test case 5: Produce while two consumers to subscribe one db, firstly create one stb, after start consume create other stb") + tdLog.info("step 1: create database, stb, ctb and insert data") + # create and start thread + parameterDict = {'cfg': '', \ + 'dbName': 'db5', \ + 'vgroups': 4, \ + 'stbName': 'stb', \ + 'ctbNum': 10, \ + 'rowsPerTbl': 5000, \ + 'batchNum': 100, \ + 'startTs': 1640966400000} # 2022-01-01 00:00:00.000 + parameterDict['cfg'] = cfgPath + + self.initConsumerTable() + + tdSql.execute("create database if not exists %s vgroups %d" %(parameterDict['dbName'], parameterDict['vgroups'])) + + prepareEnvThread = threading.Thread(target=self.prepareEnv, kwargs=parameterDict) + prepareEnvThread.start() + + parameterDict2 = {'cfg': '', \ + 'dbName': 'db5', \ + 'vgroups': 4, \ + 'stbName': 'stb2', \ + 'ctbNum': 10, \ + 'rowsPerTbl': 5000, \ + 'batchNum': 100, \ + 'startTs': 1640966400000} # 2022-01-01 00:00:00.000 + parameterDict['cfg'] = cfgPath + + tdLog.info("create topics from db") + topicName1 = 'topic_db1' + + tdSql.execute("create topic %s as %s" %(topicName1, parameterDict['dbName'])) + + consumerId = 0 + expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"] + parameterDict2["rowsPerTbl"] * parameterDict2["ctbNum"] + topicList = topicName1 + ifcheckdata = 0 + ifManualCommit = 1 + keyList = 'group.id:cgrp1,\ + enable.auto.commit:false,\ + auto.commit.interval.ms:6000,\ + auto.offset.reset:earliest' + self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) + + consumerId = 1 + self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) + + event.wait() + + tdLog.info("start consume processor") + pollDelay = 100 + showMsg = 1 + showRow = 1 + self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) + + prepareEnvThread2 = threading.Thread(target=self.prepareEnv, kwargs=parameterDict2) + prepareEnvThread2.start() + + # wait for data ready + prepareEnvThread.join() + prepareEnvThread2.join() + + tdLog.info("insert process end, and start to check consume result") + expectRows = 2 + resultList = self.selectConsumeResult(expectRows) + totalConsumeRows = 0 + for i in range(expectRows): + totalConsumeRows += resultList[i] + + if totalConsumeRows < expectrowcnt: + tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt)) + tdLog.exit("tmq consume rows error!") + + tdSql.query("drop topic %s"%topicName1) + + tdLog.printNoPrefix("======== test case 5 end ...... ") + + def tmqCase6(self, cfgPath, buildPath): + tdLog.printNoPrefix("======== test case 6: Produce while one consumers to subscribe tow topic, Each contains one db") + tdLog.info("step 1: create database, stb, ctb and insert data") + # create and start thread + parameterDict = {'cfg': '', \ + 'dbName': 'db60', \ + 'vgroups': 4, \ + 'stbName': 'stb', \ + 'ctbNum': 10, \ + 'rowsPerTbl': 5000, \ + 'batchNum': 100, \ + 'startTs': 1640966400000} # 2022-01-01 00:00:00.000 + parameterDict['cfg'] = cfgPath + + self.initConsumerTable() + + tdSql.execute("create database if not exists %s vgroups %d" %(parameterDict['dbName'], parameterDict['vgroups'])) + + prepareEnvThread = threading.Thread(target=self.prepareEnv, kwargs=parameterDict) + prepareEnvThread.start() + + parameterDict2 = {'cfg': '', \ + 'dbName': 'db61', \ + 'vgroups': 4, \ + 'stbName': 'stb2', \ + 'ctbNum': 10, \ + 'rowsPerTbl': 5000, \ + 'batchNum': 100, \ + 'startTs': 1640966400000} # 2022-01-01 00:00:00.000 + parameterDict['cfg'] = cfgPath + + tdSql.execute("create database if not exists %s vgroups %d" %(parameterDict2['dbName'], parameterDict2['vgroups'])) + + prepareEnvThread2 = threading.Thread(target=self.prepareEnv, kwargs=parameterDict2) + prepareEnvThread2.start() + + tdLog.info("create topics from db") + topicName1 = 'topic_db60' + topicName2 = 'topic_db61' + + tdSql.execute("create topic %s as %s" %(topicName1, parameterDict['dbName'])) + tdSql.execute("create topic %s as %s" %(topicName2, parameterDict2['dbName'])) + + consumerId = 0 + expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"] + parameterDict2["rowsPerTbl"] * parameterDict2["ctbNum"] + topicList = topicName1 + ',' + topicName2 + ifcheckdata = 0 + ifManualCommit = 0 + keyList = 'group.id:cgrp1,\ + enable.auto.commit:false,\ + auto.commit.interval.ms:6000,\ + auto.offset.reset:earliest' + self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) + + #consumerId = 1 + #self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) + + event.wait() + + tdLog.info("start consume processor") + pollDelay = 100 + showMsg = 1 + showRow = 1 + self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) + + # wait for data ready + prepareEnvThread.join() + prepareEnvThread2.join() + + tdLog.info("insert process end, and start to check consume result") + expectRows = 1 + resultList = self.selectConsumeResult(expectRows) + totalConsumeRows = 0 + for i in range(expectRows): + totalConsumeRows += resultList[i] + + if totalConsumeRows != expectrowcnt: + tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt)) + tdLog.exit("tmq consume rows error!") + + tdSql.query("drop topic %s"%topicName1) + tdSql.query("drop topic %s"%topicName2) + + tdLog.printNoPrefix("======== test case 6 end ...... ") + + def tmqCase7(self, cfgPath, buildPath): + tdLog.printNoPrefix("======== test case 7: Produce while two consumers to subscribe tow topic, Each contains one db") + tdLog.info("step 1: create database, stb, ctb and insert data") + # create and start thread + parameterDict = {'cfg': '', \ + 'dbName': 'db70', \ + 'vgroups': 4, \ + 'stbName': 'stb', \ + 'ctbNum': 10, \ + 'rowsPerTbl': 5000, \ + 'batchNum': 100, \ + 'startTs': 1640966400000} # 2022-01-01 00:00:00.000 + parameterDict['cfg'] = cfgPath + + self.initConsumerTable() + + tdSql.execute("create database if not exists %s vgroups %d" %(parameterDict['dbName'], parameterDict['vgroups'])) + + prepareEnvThread = threading.Thread(target=self.prepareEnv, kwargs=parameterDict) + prepareEnvThread.start() + + parameterDict2 = {'cfg': '', \ + 'dbName': 'db71', \ + 'vgroups': 4, \ + 'stbName': 'stb2', \ + 'ctbNum': 10, \ + 'rowsPerTbl': 5000, \ + 'batchNum': 100, \ + 'startTs': 1640966400000} # 2022-01-01 00:00:00.000 + parameterDict['cfg'] = cfgPath + + tdSql.execute("create database if not exists %s vgroups %d" %(parameterDict2['dbName'], parameterDict2['vgroups'])) + + prepareEnvThread2 = threading.Thread(target=self.prepareEnv, kwargs=parameterDict2) + prepareEnvThread2.start() + + tdLog.info("create topics from db") + topicName1 = 'topic_db60' + topicName2 = 'topic_db61' + + tdSql.execute("create topic %s as %s" %(topicName1, parameterDict['dbName'])) + tdSql.execute("create topic %s as %s" %(topicName2, parameterDict2['dbName'])) + + consumerId = 0 + expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"] + parameterDict2["rowsPerTbl"] * parameterDict2["ctbNum"] + topicList = topicName1 + ',' + topicName2 + ifcheckdata = 0 + ifManualCommit = 1 + keyList = 'group.id:cgrp1,\ + enable.auto.commit:false,\ + auto.commit.interval.ms:6000,\ + auto.offset.reset:earliest' + self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) + + consumerId = 1 + self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) + + event.wait() + + tdLog.info("start consume processor") + pollDelay = 100 + showMsg = 1 + showRow = 1 + self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) + + # wait for data ready + prepareEnvThread.join() + prepareEnvThread2.join() + + tdLog.info("insert process end, and start to check consume result") + expectRows = 2 + resultList = self.selectConsumeResult(expectRows) + totalConsumeRows = 0 + for i in range(expectRows): + totalConsumeRows += resultList[i] + + if totalConsumeRows != expectrowcnt: + tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt)) + tdLog.exit("tmq consume rows error!") + + tdSql.query("drop topic %s"%topicName1) + tdSql.query("drop topic %s"%topicName2) + + tdLog.printNoPrefix("======== test case 7 end ...... ") + + def run(self): + tdSql.prepare() + + buildPath = self.getBuildPath() + if (buildPath == ""): + tdLog.exit("taosd not found!") + else: + tdLog.info("taosd found in %s" % buildPath) + cfgPath = buildPath + "/../sim/psim/cfg" + tdLog.info("cfgPath: %s" % cfgPath) + + self.tmqCase4(cfgPath, buildPath) + self.tmqCase5(cfgPath, buildPath) + self.tmqCase6(cfgPath, buildPath) + self.tmqCase7(cfgPath, buildPath) + + + def stop(self): + tdSql.close() + tdLog.success(f"{__file__} successfully executed") + +event = threading.Event() + +tdCases.addLinux(__file__, TDTestCase()) +tdCases.addWindows(__file__, TDTestCase()) diff --git a/tests/system-test/7-tmq/subscribeDb1.py b/tests/system-test/7-tmq/subscribeDb1.py index 2b9d344b744cb92b4ecf8e7d92117a88d9551b4e..56db157ab849f609eb22debde6936d2de406ee06 100644 --- a/tests/system-test/7-tmq/subscribeDb1.py +++ b/tests/system-test/7-tmq/subscribeDb1.py @@ -22,8 +22,8 @@ class TDTestCase: def init(self, conn, logSql): tdLog.debug(f"start to excute {__file__}") - #tdSql.init(conn.cursor()) - tdSql.init(conn.cursor(), logSql) # output sql.txt file + tdSql.init(conn.cursor()) + #tdSql.init(conn.cursor(), logSql) # output sql.txt file def getBuildPath(self): selfPath = os.path.dirname(os.path.realpath(__file__)) @@ -198,7 +198,7 @@ class TDTestCase: event.wait() tdLog.info("start consume processor") - pollDelay = 5 + pollDelay = 100 showMsg = 1 showRow = 1 self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) @@ -276,7 +276,7 @@ class TDTestCase: event.wait() tdLog.info("start consume processor") - pollDelay = 5 + pollDelay = 100 showMsg = 1 showRow = 1 self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) @@ -354,7 +354,7 @@ class TDTestCase: event.wait() tdLog.info("start consume processor") - pollDelay = 15 + pollDelay = 100 showMsg = 1 showRow = 1 self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) @@ -425,7 +425,7 @@ class TDTestCase: event.wait() tdLog.info("start consume processor") - pollDelay = 5 + pollDelay = 100 showMsg = 1 showRow = 1 self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) diff --git a/tests/system-test/7-tmq/subscribeStb.py b/tests/system-test/7-tmq/subscribeStb.py index a0b3668d47bb45a637da035f19da3cbe01dfa9c1..2b7f0d3d5ff06ea0c36f9768c3a7f6d3eae715a0 100644 --- a/tests/system-test/7-tmq/subscribeStb.py +++ b/tests/system-test/7-tmq/subscribeStb.py @@ -29,8 +29,8 @@ class TDTestCase: def init(self, conn, logSql): tdLog.debug(f"start to excute {__file__}") - #tdSql.init(conn.cursor()) - tdSql.init(conn.cursor(), logSql) # output sql.txt file + tdSql.init(conn.cursor()) + #tdSql.init(conn.cursor(), logSql) # output sql.txt file def getBuildPath(self): selfPath = os.path.dirname(os.path.realpath(__file__)) @@ -346,1024 +346,6 @@ class TDTestCase: tdLog.printNoPrefix("======== test case 2 end ...... ") - def tmqCase3(self, cfgPath, buildPath): - tdLog.printNoPrefix("======== test case 3: ") - - self.initConsumerTable() - - # create and start thread - parameterDict = {'cfg': '', \ - 'actionType': 0, \ - 'dbName': 'db3', \ - 'dropFlag': 1, \ - 'vgroups': 4, \ - 'replica': 1, \ - 'stbName': 'stb1', \ - 'ctbNum': 10, \ - 'rowsPerTbl': 50000, \ - 'batchNum': 13, \ - 'startTs': 1640966400000} # 2022-01-01 00:00:00.000 - parameterDict['cfg'] = cfgPath - - self.create_database(tdSql, parameterDict["dbName"]) - self.create_stable(tdSql, parameterDict["dbName"], parameterDict["stbName"]) - self.create_ctables(tdSql, parameterDict["dbName"], parameterDict["stbName"], parameterDict["ctbNum"]) - self.insert_data(tdSql,parameterDict["dbName"],parameterDict["stbName"],parameterDict["ctbNum"],parameterDict["rowsPerTbl"],parameterDict["batchNum"]) - - tdLog.info("create topics from stb1") - topicFromStb1 = 'topic_stb1' - - tdSql.execute("create topic %s as select ts, c1, c2 from %s.%s" %(topicFromStb1, parameterDict['dbName'], parameterDict['stbName'])) - consumerId = 0 - expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"] - topicList = topicFromStb1 - ifcheckdata = 0 - ifManualCommit = 0 - keyList = 'group.id:cgrp1,\ - enable.auto.commit:false,\ - auto.commit.interval.ms:6000,\ - auto.offset.reset:earliest' - self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) - - tdLog.info("start consume processor") - pollDelay = 5 - showMsg = 1 - showRow = 1 - self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) - - time.sleep(5) - tdLog.info("drop som child table of stb1") - dropTblNum = 4 - tdSql.query("drop table if exists %s.%s_1"%(parameterDict["dbName"], parameterDict["stbName"])) - tdSql.query("drop table if exists %s.%s_2"%(parameterDict["dbName"], parameterDict["stbName"])) - tdSql.query("drop table if exists %s.%s_3"%(parameterDict["dbName"], parameterDict["stbName"])) - tdSql.query("drop table if exists %s.%s_4"%(parameterDict["dbName"], parameterDict["stbName"])) - - tdLog.info("drop some child tables, then start to check consume result") - expectRows = 1 - resultList = self.selectConsumeResult(expectRows) - totalConsumeRows = 0 - for i in range(expectRows): - totalConsumeRows += resultList[i] - - remaindrowcnt = parameterDict["rowsPerTbl"] * (parameterDict["ctbNum"] - dropTblNum) - - if not (totalConsumeRows < expectrowcnt and totalConsumeRows > remaindrowcnt): - tdLog.info("act consume rows: %d, expect consume rows: between %d and %d"%(totalConsumeRows, remaindrowcnt, expectrowcnt)) - tdLog.exit("tmq consume rows error!") - - tdSql.query("drop topic %s"%topicFromStb1) - - tdLog.printNoPrefix("======== test case 3 end ...... ") - - def tmqCase4(self, cfgPath, buildPath): - tdLog.printNoPrefix("======== test case 4: ") - - self.initConsumerTable() - - # create and start thread - parameterDict = {'cfg': '', \ - 'actionType': 0, \ - 'dbName': 'db4', \ - 'dropFlag': 1, \ - 'vgroups': 4, \ - 'replica': 1, \ - 'stbName': 'stb1', \ - 'ctbNum': 10, \ - 'rowsPerTbl': 10000, \ - 'batchNum': 100, \ - 'startTs': 1640966400000} # 2022-01-01 00:00:00.000 - parameterDict['cfg'] = cfgPath - - self.create_database(tdSql, parameterDict["dbName"]) - self.create_stable(tdSql, parameterDict["dbName"], parameterDict["stbName"]) - self.create_ctables(tdSql, parameterDict["dbName"], parameterDict["stbName"], parameterDict["ctbNum"]) - self.insert_data(tdSql,\ - parameterDict["dbName"],\ - parameterDict["stbName"],\ - parameterDict["ctbNum"],\ - parameterDict["rowsPerTbl"],\ - parameterDict["batchNum"]) - - tdLog.info("create topics from stb1") - topicFromStb1 = 'topic_stb1' - - tdSql.execute("create topic %s as select ts, c1, c2 from %s.%s" %(topicFromStb1, parameterDict['dbName'], parameterDict['stbName'])) - consumerId = 0 - expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"] - topicList = topicFromStb1 - ifcheckdata = 0 - ifManualCommit = 1 - keyList = 'group.id:cgrp1,\ - enable.auto.commit:false,\ - auto.commit.interval.ms:6000,\ - auto.offset.reset:earliest' - self.insertConsumerInfo(consumerId, expectrowcnt/4,topicList,keyList,ifcheckdata,ifManualCommit) - - tdLog.info("start consume processor") - pollDelay = 5 - showMsg = 1 - showRow = 1 - self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) - - tdLog.info("start to check consume result") - expectRows = 1 - resultList = self.selectConsumeResult(expectRows) - totalConsumeRows = 0 - for i in range(expectRows): - totalConsumeRows += resultList[i] - - if totalConsumeRows != expectrowcnt/4: - tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt/4)) - tdLog.exit("tmq consume rows error!") - - self.initConsumerInfoTable() - consumerId = 1 - self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) - - tdLog.info("again start consume processor") - self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) - - tdLog.info("again check consume result") - expectRows = 2 - resultList = self.selectConsumeResult(expectRows) - totalConsumeRows = 0 - for i in range(expectRows): - totalConsumeRows += resultList[i] - - if totalConsumeRows != expectrowcnt: - tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt)) - tdLog.exit("tmq consume rows error!") - - tdSql.query("drop topic %s"%topicFromStb1) - - tdLog.printNoPrefix("======== test case 4 end ...... ") - - def tmqCase5(self, cfgPath, buildPath): - tdLog.printNoPrefix("======== test case 5: ") - - self.initConsumerTable() - - # create and start thread - parameterDict = {'cfg': '', \ - 'actionType': 0, \ - 'dbName': 'db5', \ - 'dropFlag': 1, \ - 'vgroups': 4, \ - 'replica': 1, \ - 'stbName': 'stb1', \ - 'ctbNum': 10, \ - 'rowsPerTbl': 10000, \ - 'batchNum': 100, \ - 'startTs': 1640966400000} # 2022-01-01 00:00:00.000 - parameterDict['cfg'] = cfgPath - - self.create_database(tdSql, parameterDict["dbName"]) - self.create_stable(tdSql, parameterDict["dbName"], parameterDict["stbName"]) - self.create_ctables(tdSql, parameterDict["dbName"], parameterDict["stbName"], parameterDict["ctbNum"]) - self.insert_data(tdSql,\ - parameterDict["dbName"],\ - parameterDict["stbName"],\ - parameterDict["ctbNum"],\ - parameterDict["rowsPerTbl"],\ - parameterDict["batchNum"]) - - tdLog.info("create topics from stb1") - topicFromStb1 = 'topic_stb1' - - tdSql.execute("create topic %s as select ts, c1, c2 from %s.%s" %(topicFromStb1, parameterDict['dbName'], parameterDict['stbName'])) - consumerId = 0 - expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"] - topicList = topicFromStb1 - ifcheckdata = 0 - ifManualCommit = 0 - keyList = 'group.id:cgrp1,\ - enable.auto.commit:false,\ - auto.commit.interval.ms:6000,\ - auto.offset.reset:earliest' - self.insertConsumerInfo(consumerId, expectrowcnt/4,topicList,keyList,ifcheckdata,ifManualCommit) - - tdLog.info("start consume processor") - pollDelay = 5 - showMsg = 1 - showRow = 1 - self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) - - tdLog.info("start to check consume result") - expectRows = 1 - resultList = self.selectConsumeResult(expectRows) - totalConsumeRows = 0 - for i in range(expectRows): - totalConsumeRows += resultList[i] - - if totalConsumeRows != expectrowcnt/4: - tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt/4)) - tdLog.exit("tmq consume rows error!") - - self.initConsumerInfoTable() - consumerId = 1 - self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) - - tdLog.info("again start consume processor") - self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) - - tdLog.info("again check consume result") - expectRows = 2 - resultList = self.selectConsumeResult(expectRows) - totalConsumeRows = 0 - for i in range(expectRows): - totalConsumeRows += resultList[i] - - if totalConsumeRows != (expectrowcnt * (1 + 1/4)): - tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt)) - tdLog.exit("tmq consume rows error!") - - tdSql.query("drop topic %s"%topicFromStb1) - - tdLog.printNoPrefix("======== test case 5 end ...... ") - - def tmqCase6(self, cfgPath, buildPath): - tdLog.printNoPrefix("======== test case 6: ") - - self.initConsumerTable() - - # create and start thread - parameterDict = {'cfg': '', \ - 'actionType': 0, \ - 'dbName': 'db6', \ - 'dropFlag': 1, \ - 'vgroups': 4, \ - 'replica': 1, \ - 'stbName': 'stb1', \ - 'ctbNum': 10, \ - 'rowsPerTbl': 10000, \ - 'batchNum': 100, \ - 'startTs': 1640966400000} # 2022-01-01 00:00:00.000 - parameterDict['cfg'] = cfgPath - - self.create_database(tdSql, parameterDict["dbName"]) - self.create_stable(tdSql, parameterDict["dbName"], parameterDict["stbName"]) - self.create_ctables(tdSql, parameterDict["dbName"], parameterDict["stbName"], parameterDict["ctbNum"]) - self.insert_data(tdSql,\ - parameterDict["dbName"],\ - parameterDict["stbName"],\ - parameterDict["ctbNum"],\ - parameterDict["rowsPerTbl"],\ - parameterDict["batchNum"]) - - tdLog.info("create topics from stb1") - topicFromStb1 = 'topic_stb1' - - tdSql.execute("create topic %s as select ts, c1, c2 from %s.%s" %(topicFromStb1, parameterDict['dbName'], parameterDict['stbName'])) - consumerId = 0 - expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"] - topicList = topicFromStb1 - ifcheckdata = 0 - ifManualCommit = 1 - keyList = 'group.id:cgrp1,\ - enable.auto.commit:false,\ - auto.commit.interval.ms:6000,\ - auto.offset.reset:earliest' - self.insertConsumerInfo(consumerId, expectrowcnt/4,topicList,keyList,ifcheckdata,ifManualCommit) - - tdLog.info("start consume processor") - pollDelay = 5 - showMsg = 1 - showRow = 1 - self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) - - tdLog.info("start to check consume result") - expectRows = 1 - resultList = self.selectConsumeResult(expectRows) - totalConsumeRows = 0 - for i in range(expectRows): - totalConsumeRows += resultList[i] - - if totalConsumeRows != expectrowcnt/4: - tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt/4)) - tdLog.exit("tmq consume rows error!") - - self.initConsumerInfoTable() - consumerId = 1 - keyList = 'group.id:cgrp1,\ - enable.auto.commit:false,\ - auto.commit.interval.ms:6000,\ - auto.offset.reset:latest' - self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) - - tdLog.info("again start consume processor") - self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) - - tdLog.info("again check consume result") - expectRows = 2 - resultList = self.selectConsumeResult(expectRows) - totalConsumeRows = 0 - for i in range(expectRows): - totalConsumeRows += resultList[i] - - if totalConsumeRows != expectrowcnt: - tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt)) - tdLog.exit("tmq consume rows error!") - - tdSql.query("drop topic %s"%topicFromStb1) - - tdLog.printNoPrefix("======== test case 6 end ...... ") - - def tmqCase7(self, cfgPath, buildPath): - tdLog.printNoPrefix("======== test case 7: ") - - self.initConsumerTable() - - # create and start thread - parameterDict = {'cfg': '', \ - 'actionType': 0, \ - 'dbName': 'db7', \ - 'dropFlag': 1, \ - 'vgroups': 4, \ - 'replica': 1, \ - 'stbName': 'stb1', \ - 'ctbNum': 10, \ - 'rowsPerTbl': 10000, \ - 'batchNum': 100, \ - 'startTs': 1640966400000} # 2022-01-01 00:00:00.000 - parameterDict['cfg'] = cfgPath - - self.create_database(tdSql, parameterDict["dbName"]) - self.create_stable(tdSql, parameterDict["dbName"], parameterDict["stbName"]) - self.create_ctables(tdSql, parameterDict["dbName"], parameterDict["stbName"], parameterDict["ctbNum"]) - self.insert_data(tdSql,\ - parameterDict["dbName"],\ - parameterDict["stbName"],\ - parameterDict["ctbNum"],\ - parameterDict["rowsPerTbl"],\ - parameterDict["batchNum"]) - - tdLog.info("create topics from stb1") - topicFromStb1 = 'topic_stb1' - - tdSql.execute("create topic %s as select ts, c1, c2 from %s.%s" %(topicFromStb1, parameterDict['dbName'], parameterDict['stbName'])) - consumerId = 0 - expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"] - topicList = topicFromStb1 - ifcheckdata = 0 - ifManualCommit = 1 - keyList = 'group.id:cgrp1,\ - enable.auto.commit:false,\ - auto.commit.interval.ms:6000,\ - auto.offset.reset:latest' - self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) - - tdLog.info("start consume processor") - pollDelay = 5 - showMsg = 1 - showRow = 1 - self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) - - tdLog.info("start to check consume result") - expectRows = 1 - resultList = self.selectConsumeResult(expectRows) - totalConsumeRows = 0 - for i in range(expectRows): - totalConsumeRows += resultList[i] - - if totalConsumeRows != 0: - tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, 0)) - tdLog.exit("tmq consume rows error!") - - self.initConsumerInfoTable() - consumerId = 1 - self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) - - tdLog.info("again start consume processor") - self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) - - tdLog.info("again check consume result") - expectRows = 2 - resultList = self.selectConsumeResult(expectRows) - totalConsumeRows = 0 - for i in range(expectRows): - totalConsumeRows += resultList[i] - - if totalConsumeRows != 0: - tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, 0)) - tdLog.exit("tmq consume rows error!") - - tdSql.query("drop topic %s"%topicFromStb1) - - tdLog.printNoPrefix("======== test case 7 end ...... ") - - def tmqCase8(self, cfgPath, buildPath): - tdLog.printNoPrefix("======== test case 8: ") - - self.initConsumerTable() - - # create and start thread - parameterDict = {'cfg': '', \ - 'actionType': 0, \ - 'dbName': 'db8', \ - 'dropFlag': 1, \ - 'vgroups': 4, \ - 'replica': 1, \ - 'stbName': 'stb1', \ - 'ctbNum': 10, \ - 'rowsPerTbl': 10000, \ - 'batchNum': 100, \ - 'startTs': 1640966400000} # 2022-01-01 00:00:00.000 - parameterDict['cfg'] = cfgPath - - self.create_database(tdSql, parameterDict["dbName"]) - self.create_stable(tdSql, parameterDict["dbName"], parameterDict["stbName"]) - self.create_ctables(tdSql, parameterDict["dbName"], parameterDict["stbName"], parameterDict["ctbNum"]) - self.insert_data(tdSql,\ - parameterDict["dbName"],\ - parameterDict["stbName"],\ - parameterDict["ctbNum"],\ - parameterDict["rowsPerTbl"],\ - parameterDict["batchNum"]) - - tdLog.info("create topics from stb1") - topicFromStb1 = 'topic_stb1' - - tdSql.execute("create topic %s as select ts, c1, c2 from %s.%s" %(topicFromStb1, parameterDict['dbName'], parameterDict['stbName'])) - consumerId = 0 - expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"] - topicList = topicFromStb1 - ifcheckdata = 0 - ifManualCommit = 1 - keyList = 'group.id:cgrp1,\ - enable.auto.commit:false,\ - auto.commit.interval.ms:6000,\ - auto.offset.reset:latest' - self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) - - tdLog.info("start consume 0 processor") - pollDelay = 10 - showMsg = 1 - showRow = 1 - self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) - - tdLog.info("start to check consume 0 result") - expectRows = 1 - resultList = self.selectConsumeResult(expectRows) - totalConsumeRows = 0 - for i in range(expectRows): - totalConsumeRows += resultList[i] - - if totalConsumeRows != 0: - tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, 0)) - tdLog.exit("tmq consume rows error!") - - tdLog.info("start consume 1 processor") - self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) - - tdLog.info("start one new thread to insert data") - parameterDict['actionType'] = actionType.INSERT_DATA - prepareEnvThread = threading.Thread(target=self.prepareEnv, kwargs=parameterDict) - prepareEnvThread.start() - prepareEnvThread.join() - - tdLog.info("start to check consume 0 and 1 result") - expectRows = 2 - resultList = self.selectConsumeResult(expectRows) - totalConsumeRows = 0 - for i in range(expectRows): - totalConsumeRows += resultList[i] - - if totalConsumeRows != expectrowcnt: - tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt)) - tdLog.exit("tmq consume rows error!") - - tdLog.info("start consume 2 processor") - self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) - - tdLog.info("start one new thread to insert data") - parameterDict['actionType'] = actionType.INSERT_DATA - prepareEnvThread = threading.Thread(target=self.prepareEnv, kwargs=parameterDict) - prepareEnvThread.start() - prepareEnvThread.join() - - tdLog.info("start to check consume 0 and 1 and 2 result") - expectRows = 3 - resultList = self.selectConsumeResult(expectRows) - totalConsumeRows = 0 - for i in range(expectRows): - totalConsumeRows += resultList[i] - - if totalConsumeRows != expectrowcnt*2: - tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt*2)) - tdLog.exit("tmq consume rows error!") - - tdSql.query("drop topic %s"%topicFromStb1) - - tdLog.printNoPrefix("======== test case 8 end ...... ") - - def tmqCase9(self, cfgPath, buildPath): - tdLog.printNoPrefix("======== test case 9: ") - - self.initConsumerTable() - - # create and start thread - parameterDict = {'cfg': '', \ - 'actionType': 0, \ - 'dbName': 'db9', \ - 'dropFlag': 1, \ - 'vgroups': 4, \ - 'replica': 1, \ - 'stbName': 'stb1', \ - 'ctbNum': 10, \ - 'rowsPerTbl': 10000, \ - 'batchNum': 100, \ - 'startTs': 1640966400000} # 2022-01-01 00:00:00.000 - parameterDict['cfg'] = cfgPath - - self.create_database(tdSql, parameterDict["dbName"]) - self.create_stable(tdSql, parameterDict["dbName"], parameterDict["stbName"]) - self.create_ctables(tdSql, parameterDict["dbName"], parameterDict["stbName"], parameterDict["ctbNum"]) - self.insert_data(tdSql,\ - parameterDict["dbName"],\ - parameterDict["stbName"],\ - parameterDict["ctbNum"],\ - parameterDict["rowsPerTbl"],\ - parameterDict["batchNum"]) - - tdLog.info("create topics from stb1") - topicFromStb1 = 'topic_stb1' - - tdSql.execute("create topic %s as select ts, c1, c2 from %s.%s" %(topicFromStb1, parameterDict['dbName'], parameterDict['stbName'])) - consumerId = 0 - expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"] - topicList = topicFromStb1 - ifcheckdata = 0 - ifManualCommit = 1 - keyList = 'group.id:cgrp1,\ - enable.auto.commit:false,\ - auto.commit.interval.ms:6000,\ - auto.offset.reset:latest' - self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) - - tdLog.info("start consume 0 processor") - pollDelay = 10 - showMsg = 1 - showRow = 1 - self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) - - tdLog.info("start to check consume 0 result") - expectRows = 1 - resultList = self.selectConsumeResult(expectRows) - totalConsumeRows = 0 - for i in range(expectRows): - totalConsumeRows += resultList[i] - - if totalConsumeRows != 0: - tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, 0)) - tdLog.exit("tmq consume rows error!") - - tdLog.info("start consume 1 processor") - self.initConsumerInfoTable() - consumerId = 1 - ifManualCommit = 0 - self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) - self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) - - tdLog.info("start one new thread to insert data") - parameterDict['actionType'] = actionType.INSERT_DATA - prepareEnvThread = threading.Thread(target=self.prepareEnv, kwargs=parameterDict) - prepareEnvThread.start() - prepareEnvThread.join() - - tdLog.info("start to check consume 0 and 1 result") - expectRows = 2 - resultList = self.selectConsumeResult(expectRows) - totalConsumeRows = 0 - for i in range(expectRows): - totalConsumeRows += resultList[i] - - if totalConsumeRows != expectrowcnt: - tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt)) - tdLog.exit("tmq consume rows error!") - - tdLog.info("start consume 2 processor") - self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) - - tdLog.info("start one new thread to insert data") - parameterDict['actionType'] = actionType.INSERT_DATA - prepareEnvThread = threading.Thread(target=self.prepareEnv, kwargs=parameterDict) - prepareEnvThread.start() - prepareEnvThread.join() - - tdLog.info("start to check consume 0 and 1 and 2 result") - expectRows = 3 - resultList = self.selectConsumeResult(expectRows) - totalConsumeRows = 0 - for i in range(expectRows): - totalConsumeRows += resultList[i] - - if totalConsumeRows != expectrowcnt*2: - tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt*2)) - tdLog.exit("tmq consume rows error!") - - tdSql.query("drop topic %s"%topicFromStb1) - - tdLog.printNoPrefix("======== test case 9 end ...... ") - - def tmqCase10(self, cfgPath, buildPath): - tdLog.printNoPrefix("======== test case 10: ") - - self.initConsumerTable() - - # create and start thread - parameterDict = {'cfg': '', \ - 'actionType': 0, \ - 'dbName': 'db10', \ - 'dropFlag': 1, \ - 'vgroups': 4, \ - 'replica': 1, \ - 'stbName': 'stb1', \ - 'ctbNum': 10, \ - 'rowsPerTbl': 10000, \ - 'batchNum': 100, \ - 'startTs': 1640966400000} # 2022-01-01 00:00:00.000 - parameterDict['cfg'] = cfgPath - - self.create_database(tdSql, parameterDict["dbName"]) - self.create_stable(tdSql, parameterDict["dbName"], parameterDict["stbName"]) - self.create_ctables(tdSql, parameterDict["dbName"], parameterDict["stbName"], parameterDict["ctbNum"]) - self.insert_data(tdSql,\ - parameterDict["dbName"],\ - parameterDict["stbName"],\ - parameterDict["ctbNum"],\ - parameterDict["rowsPerTbl"],\ - parameterDict["batchNum"]) - - tdLog.info("create topics from stb1") - topicFromStb1 = 'topic_stb1' - - tdSql.execute("create topic %s as select ts, c1, c2 from %s.%s" %(topicFromStb1, parameterDict['dbName'], parameterDict['stbName'])) - consumerId = 0 - expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"] - topicList = topicFromStb1 - ifcheckdata = 0 - ifManualCommit = 1 - keyList = 'group.id:cgrp1,\ - enable.auto.commit:false,\ - auto.commit.interval.ms:6000,\ - auto.offset.reset:latest' - self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) - - tdLog.info("start consume 0 processor") - pollDelay = 10 - showMsg = 1 - showRow = 1 - self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) - - tdLog.info("start to check consume 0 result") - expectRows = 1 - resultList = self.selectConsumeResult(expectRows) - totalConsumeRows = 0 - for i in range(expectRows): - totalConsumeRows += resultList[i] - - if totalConsumeRows != 0: - tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, 0)) - tdLog.exit("tmq consume rows error!") - - tdLog.info("start consume 1 processor") - self.initConsumerInfoTable() - consumerId = 1 - ifManualCommit = 1 - self.insertConsumerInfo(consumerId, expectrowcnt-10000,topicList,keyList,ifcheckdata,ifManualCommit) - self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) - - tdLog.info("start one new thread to insert data") - parameterDict['actionType'] = actionType.INSERT_DATA - prepareEnvThread = threading.Thread(target=self.prepareEnv, kwargs=parameterDict) - prepareEnvThread.start() - prepareEnvThread.join() - - tdLog.info("start to check consume 0 and 1 result") - expectRows = 2 - resultList = self.selectConsumeResult(expectRows) - totalConsumeRows = 0 - for i in range(expectRows): - totalConsumeRows += resultList[i] - - if totalConsumeRows != expectrowcnt-10000: - tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt-10000)) - tdLog.exit("tmq consume rows error!") - - tdLog.info("start consume 2 processor") - self.initConsumerInfoTable() - consumerId = 2 - ifManualCommit = 1 - self.insertConsumerInfo(consumerId, expectrowcnt+10000,topicList,keyList,ifcheckdata,ifManualCommit) - self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) - - tdLog.info("start one new thread to insert data") - parameterDict['actionType'] = actionType.INSERT_DATA - prepareEnvThread = threading.Thread(target=self.prepareEnv, kwargs=parameterDict) - prepareEnvThread.start() - prepareEnvThread.join() - - tdLog.info("start to check consume 0 and 1 and 2 result") - expectRows = 3 - resultList = self.selectConsumeResult(expectRows) - totalConsumeRows = 0 - for i in range(expectRows): - totalConsumeRows += resultList[i] - - if totalConsumeRows != expectrowcnt*2: - tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt*2)) - tdLog.exit("tmq consume rows error!") - - tdSql.query("drop topic %s"%topicFromStb1) - - tdLog.printNoPrefix("======== test case 10 end ...... ") - - def tmqCase11(self, cfgPath, buildPath): - tdLog.printNoPrefix("======== test case 11: ") - - self.initConsumerTable() - - # create and start thread - parameterDict = {'cfg': '', \ - 'actionType': 0, \ - 'dbName': 'db11', \ - 'dropFlag': 1, \ - 'vgroups': 4, \ - 'replica': 1, \ - 'stbName': 'stb1', \ - 'ctbNum': 10, \ - 'rowsPerTbl': 10000, \ - 'batchNum': 100, \ - 'startTs': 1640966400000} # 2022-01-01 00:00:00.000 - parameterDict['cfg'] = cfgPath - - self.create_database(tdSql, parameterDict["dbName"]) - self.create_stable(tdSql, parameterDict["dbName"], parameterDict["stbName"]) - self.create_ctables(tdSql, parameterDict["dbName"], parameterDict["stbName"], parameterDict["ctbNum"]) - self.insert_data(tdSql,\ - parameterDict["dbName"],\ - parameterDict["stbName"],\ - parameterDict["ctbNum"],\ - parameterDict["rowsPerTbl"],\ - parameterDict["batchNum"]) - - tdLog.info("create topics from stb1") - topicFromStb1 = 'topic_stb1' - - tdSql.execute("create topic %s as select ts, c1, c2 from %s.%s" %(topicFromStb1, parameterDict['dbName'], parameterDict['stbName'])) - consumerId = 0 - expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"] - topicList = topicFromStb1 - ifcheckdata = 0 - ifManualCommit = 1 - keyList = 'group.id:cgrp1,\ - enable.auto.commit:false,\ - auto.commit.interval.ms:6000,\ - auto.offset.reset:none' - self.insertConsumerInfo(consumerId, expectrowcnt/4,topicList,keyList,ifcheckdata,ifManualCommit) - - tdLog.info("start consume processor") - pollDelay = 5 - showMsg = 1 - showRow = 1 - self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) - - tdLog.info("start to check consume result") - expectRows = 1 - resultList = self.selectConsumeResult(expectRows) - totalConsumeRows = 0 - for i in range(expectRows): - totalConsumeRows += resultList[i] - - if totalConsumeRows != 0: - tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, 0)) - tdLog.exit("tmq consume rows error!") - - self.initConsumerInfoTable() - consumerId = 1 - keyList = 'group.id:cgrp1,\ - enable.auto.commit:false,\ - auto.commit.interval.ms:6000,\ - auto.offset.reset:none' - self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) - - tdLog.info("again start consume processor") - self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) - - tdLog.info("again check consume result") - expectRows = 2 - resultList = self.selectConsumeResult(expectRows) - totalConsumeRows = 0 - for i in range(expectRows): - totalConsumeRows += resultList[i] - - if totalConsumeRows != 0: - tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, 0)) - tdLog.exit("tmq consume rows error!") - - tdSql.query("drop topic %s"%topicFromStb1) - - tdLog.printNoPrefix("======== test case 11 end ...... ") - - def tmqCase12(self, cfgPath, buildPath): - tdLog.printNoPrefix("======== test case 12: ") - - self.initConsumerTable() - - # create and start thread - parameterDict = {'cfg': '', \ - 'actionType': 0, \ - 'dbName': 'db12', \ - 'dropFlag': 1, \ - 'vgroups': 4, \ - 'replica': 1, \ - 'stbName': 'stb1', \ - 'ctbNum': 10, \ - 'rowsPerTbl': 10000, \ - 'batchNum': 100, \ - 'startTs': 1640966400000} # 2022-01-01 00:00:00.000 - parameterDict['cfg'] = cfgPath - - self.create_database(tdSql, parameterDict["dbName"]) - self.create_stable(tdSql, parameterDict["dbName"], parameterDict["stbName"]) - self.create_ctables(tdSql, parameterDict["dbName"], parameterDict["stbName"], parameterDict["ctbNum"]) - self.insert_data(tdSql,\ - parameterDict["dbName"],\ - parameterDict["stbName"],\ - parameterDict["ctbNum"],\ - parameterDict["rowsPerTbl"],\ - parameterDict["batchNum"]) - - tdLog.info("create topics from stb1") - topicFromStb1 = 'topic_stb1' - - tdSql.execute("create topic %s as select ts, c1, c2 from %s.%s" %(topicFromStb1, parameterDict['dbName'], parameterDict['stbName'])) - consumerId = 0 - expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"] - topicList = topicFromStb1 - ifcheckdata = 0 - ifManualCommit = 0 - keyList = 'group.id:cgrp1,\ - enable.auto.commit:false,\ - auto.commit.interval.ms:6000,\ - auto.offset.reset:earliest' - self.insertConsumerInfo(consumerId, expectrowcnt/4,topicList,keyList,ifcheckdata,ifManualCommit) - - tdLog.info("start consume processor") - pollDelay = 5 - showMsg = 1 - showRow = 1 - self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) - - tdLog.info("start to check consume result") - expectRows = 1 - resultList = self.selectConsumeResult(expectRows) - totalConsumeRows = 0 - for i in range(expectRows): - totalConsumeRows += resultList[i] - - if totalConsumeRows != expectrowcnt/4: - tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt/4)) - tdLog.exit("tmq consume rows error!") - - self.initConsumerInfoTable() - consumerId = 1 - keyList = 'group.id:cgrp1,\ - enable.auto.commit:false,\ - auto.commit.interval.ms:6000,\ - auto.offset.reset:none' - self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) - - tdLog.info("again start consume processor") - self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) - - tdLog.info("again check consume result") - expectRows = 2 - resultList = self.selectConsumeResult(expectRows) - totalConsumeRows = 0 - for i in range(expectRows): - totalConsumeRows += resultList[i] - - if totalConsumeRows != expectrowcnt/4: - tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt/4)) - tdLog.exit("tmq consume rows error!") - - tdSql.query("drop topic %s"%topicFromStb1) - - tdLog.printNoPrefix("======== test case 12 end ...... ") - - def tmqCase13(self, cfgPath, buildPath): - tdLog.printNoPrefix("======== test case 13: ") - - self.initConsumerTable() - - # create and start thread - parameterDict = {'cfg': '', \ - 'actionType': 0, \ - 'dbName': 'db13', \ - 'dropFlag': 1, \ - 'vgroups': 4, \ - 'replica': 1, \ - 'stbName': 'stb1', \ - 'ctbNum': 10, \ - 'rowsPerTbl': 10000, \ - 'batchNum': 100, \ - 'startTs': 1640966400000} # 2022-01-01 00:00:00.000 - parameterDict['cfg'] = cfgPath - - self.create_database(tdSql, parameterDict["dbName"]) - self.create_stable(tdSql, parameterDict["dbName"], parameterDict["stbName"]) - self.create_ctables(tdSql, parameterDict["dbName"], parameterDict["stbName"], parameterDict["ctbNum"]) - self.insert_data(tdSql,\ - parameterDict["dbName"],\ - parameterDict["stbName"],\ - parameterDict["ctbNum"],\ - parameterDict["rowsPerTbl"],\ - parameterDict["batchNum"]) - - tdLog.info("create topics from stb1") - topicFromStb1 = 'topic_stb1' - - tdSql.execute("create topic %s as select ts, c1, c2 from %s.%s" %(topicFromStb1, parameterDict['dbName'], parameterDict['stbName'])) - consumerId = 0 - expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"] - topicList = topicFromStb1 - ifcheckdata = 0 - ifManualCommit = 1 - keyList = 'group.id:cgrp1,\ - enable.auto.commit:false,\ - auto.commit.interval.ms:6000,\ - auto.offset.reset:earliest' - self.insertConsumerInfo(consumerId, expectrowcnt/4,topicList,keyList,ifcheckdata,ifManualCommit) - - tdLog.info("start consume processor") - pollDelay = 5 - showMsg = 1 - showRow = 1 - self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) - - tdLog.info("start to check consume result") - expectRows = 1 - resultList = self.selectConsumeResult(expectRows) - totalConsumeRows = 0 - for i in range(expectRows): - totalConsumeRows += resultList[i] - - if totalConsumeRows != expectrowcnt/4: - tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt/4)) - tdLog.exit("tmq consume rows error!") - - self.initConsumerInfoTable() - consumerId = 1 - ifManualCommit = 1 - keyList = 'group.id:cgrp1,\ - enable.auto.commit:false,\ - auto.commit.interval.ms:6000,\ - auto.offset.reset:none' - self.insertConsumerInfo(consumerId, expectrowcnt/2,topicList,keyList,ifcheckdata,ifManualCommit) - - tdLog.info("again start consume processor") - self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) - - tdLog.info("again check consume result") - expectRows = 2 - resultList = self.selectConsumeResult(expectRows) - totalConsumeRows = 0 - for i in range(expectRows): - totalConsumeRows += resultList[i] - - if totalConsumeRows != expectrowcnt*(1/2+1/4): - tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt*(1/2+1/4))) - tdLog.exit("tmq consume rows error!") - - self.initConsumerInfoTable() - consumerId = 2 - ifManualCommit = 1 - keyList = 'group.id:cgrp1,\ - enable.auto.commit:false,\ - auto.commit.interval.ms:6000,\ - auto.offset.reset:none' - self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) - - tdLog.info("again start consume processor") - self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) - - tdLog.info("again check consume result") - expectRows = 3 - resultList = self.selectConsumeResult(expectRows) - totalConsumeRows = 0 - for i in range(expectRows): - totalConsumeRows += resultList[i] - - if totalConsumeRows != expectrowcnt: - tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt)) - tdLog.exit("tmq consume rows error!") - - tdSql.query("drop topic %s"%topicFromStb1) - - tdLog.printNoPrefix("======== test case 13 end ...... ") - def run(self): tdSql.prepare() @@ -1377,9 +359,6 @@ class TDTestCase: self.tmqCase1(cfgPath, buildPath) self.tmqCase2(cfgPath, buildPath) - # self.tmqCase3(cfgPath, buildPath) - # self.tmqCase4(cfgPath, buildPath) - # self.tmqCase5(cfgPath, buildPath) def stop(self): tdSql.close() diff --git a/tests/system-test/7-tmq/subscribeStb0.py b/tests/system-test/7-tmq/subscribeStb0.py index 7864a4bc731aaa860c1bd7995817c8c83701732b..a212cf759066f4cc67bec18800e6b9581013ab0e 100644 --- a/tests/system-test/7-tmq/subscribeStb0.py +++ b/tests/system-test/7-tmq/subscribeStb0.py @@ -29,8 +29,8 @@ class TDTestCase: def init(self, conn, logSql): tdLog.debug(f"start to excute {__file__}") - #tdSql.init(conn.cursor()) - tdSql.init(conn.cursor(), logSql) # output sql.txt file + tdSql.init(conn.cursor()) + #tdSql.init(conn.cursor(), logSql) # output sql.txt file def getBuildPath(self): selfPath = os.path.dirname(os.path.realpath(__file__)) @@ -183,169 +183,6 @@ class TDTestCase: return - def tmqCase1(self, cfgPath, buildPath): - tdLog.printNoPrefix("======== test case 1: ") - - self.initConsumerTable() - - # create and start thread - parameterDict = {'cfg': '', \ - 'actionType': 0, \ - 'dbName': 'db1', \ - 'dropFlag': 1, \ - 'vgroups': 4, \ - 'replica': 1, \ - 'stbName': 'stb1', \ - 'ctbNum': 10, \ - 'rowsPerTbl': 10000, \ - 'batchNum': 100, \ - 'startTs': 1640966400000} # 2022-01-01 00:00:00.000 - parameterDict['cfg'] = cfgPath - - self.create_database(tdSql, parameterDict["dbName"]) - self.create_stable(tdSql, parameterDict["dbName"], parameterDict["stbName"]) - - tdLog.info("create topics from stb1") - topicFromStb1 = 'topic_stb1' - - tdSql.execute("create topic %s as select ts, c1, c2 from %s.%s" %(topicFromStb1, parameterDict['dbName'], parameterDict['stbName'])) - consumerId = 0 - expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"] - topicList = topicFromStb1 - ifcheckdata = 0 - ifManualCommit = 0 - keyList = 'group.id:cgrp1,\ - enable.auto.commit:false,\ - auto.commit.interval.ms:6000,\ - auto.offset.reset:earliest' - self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) - - tdLog.info("start consume processor") - pollDelay = 100 - showMsg = 1 - showRow = 1 - self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) - - time.sleep(5) - self.create_ctables(tdSql, parameterDict["dbName"], parameterDict["stbName"], parameterDict["ctbNum"]) - self.insert_data(tdSql,\ - parameterDict["dbName"],\ - parameterDict["stbName"],\ - parameterDict["ctbNum"],\ - parameterDict["rowsPerTbl"],\ - parameterDict["batchNum"]) - - tdLog.info("insert process end, and start to check consume result") - expectRows = 1 - resultList = self.selectConsumeResult(expectRows) - totalConsumeRows = 0 - for i in range(expectRows): - totalConsumeRows += resultList[i] - - if totalConsumeRows != expectrowcnt: - tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt)) - tdLog.exit("tmq consume rows error!") - - tdSql.query("drop topic %s"%topicFromStb1) - - tdLog.printNoPrefix("======== test case 1 end ...... ") - - def tmqCase2(self, cfgPath, buildPath): - tdLog.printNoPrefix("======== test case 2: ") - - self.initConsumerTable() - - # create and start thread - parameterDict = {'cfg': '', \ - 'actionType': 0, \ - 'dbName': 'db2', \ - 'dropFlag': 1, \ - 'vgroups': 4, \ - 'replica': 1, \ - 'stbName': 'stb1', \ - 'ctbNum': 10, \ - 'rowsPerTbl': 10000, \ - 'batchNum': 100, \ - 'startTs': 1640966400000} # 2022-01-01 00:00:00.000 - parameterDict['cfg'] = cfgPath - - parameterDict2 = {'cfg': '', \ - 'actionType': 0, \ - 'dbName': 'db2', \ - 'dropFlag': 1, \ - 'vgroups': 4, \ - 'replica': 1, \ - 'stbName': 'stb2', \ - 'ctbNum': 10, \ - 'rowsPerTbl': 10000, \ - 'batchNum': 100, \ - 'startTs': 1640966400000} # 2022-01-01 00:00:00.000 - parameterDict2['cfg'] = cfgPath - - self.create_database(tdSql, parameterDict["dbName"]) - self.create_stable(tdSql, parameterDict["dbName"], parameterDict["stbName"]) - self.create_stable(tdSql, parameterDict2["dbName"], parameterDict2["stbName"]) - - tdLog.info("create topics from stb1") - topicFromStb1 = 'topic_stb1' - - tdSql.execute("create topic %s as select ts, c1, c2 from %s.%s" %(topicFromStb1, parameterDict['dbName'], parameterDict['stbName'])) - consumerId = 0 - expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"] - topicList = topicFromStb1 - ifcheckdata = 0 - ifManualCommit = 0 - keyList = 'group.id:cgrp1,\ - enable.auto.commit:false,\ - auto.commit.interval.ms:6000,\ - auto.offset.reset:earliest' - self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) - - tdLog.info("start consume processor") - pollDelay = 100 - showMsg = 1 - showRow = 1 - self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) - - tdLog.info("start create child tables of stb1 and stb2") - parameterDict['actionType'] = actionType.CREATE_CTABLE - parameterDict2['actionType'] = actionType.CREATE_CTABLE - - prepareEnvThread = threading.Thread(target=self.prepareEnv, kwargs=parameterDict) - prepareEnvThread.start() - prepareEnvThread2 = threading.Thread(target=self.prepareEnv, kwargs=parameterDict2) - prepareEnvThread2.start() - - prepareEnvThread.join() - prepareEnvThread2.join() - - tdLog.info("start insert data into child tables of stb1 and stb2") - parameterDict['actionType'] = actionType.INSERT_DATA - parameterDict2['actionType'] = actionType.INSERT_DATA - - prepareEnvThread = threading.Thread(target=self.prepareEnv, kwargs=parameterDict) - prepareEnvThread.start() - prepareEnvThread2 = threading.Thread(target=self.prepareEnv, kwargs=parameterDict2) - prepareEnvThread2.start() - - prepareEnvThread.join() - prepareEnvThread2.join() - - tdLog.info("insert process end, and start to check consume result") - expectRows = 1 - resultList = self.selectConsumeResult(expectRows) - totalConsumeRows = 0 - for i in range(expectRows): - totalConsumeRows += resultList[i] - - if totalConsumeRows != expectrowcnt: - tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt)) - tdLog.exit("tmq consume rows error!") - - tdSql.query("drop topic %s"%topicFromStb1) - - tdLog.printNoPrefix("======== test case 2 end ...... ") - def tmqCase3(self, cfgPath, buildPath): tdLog.printNoPrefix("======== test case 3: ") @@ -386,7 +223,7 @@ class TDTestCase: self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) tdLog.info("start consume processor") - pollDelay = 5 + pollDelay = 100 showMsg = 1 showRow = 1 self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) @@ -461,7 +298,7 @@ class TDTestCase: self.insertConsumerInfo(consumerId, expectrowcnt/4,topicList,keyList,ifcheckdata,ifManualCommit) tdLog.info("start consume processor") - pollDelay = 5 + pollDelay = 100 showMsg = 1 showRow = 1 self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) @@ -544,7 +381,7 @@ class TDTestCase: self.insertConsumerInfo(consumerId, expectrowcnt/4,topicList,keyList,ifcheckdata,ifManualCommit) tdLog.info("start consume processor") - pollDelay = 5 + pollDelay = 100 showMsg = 1 showRow = 1 self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) @@ -580,789 +417,7 @@ class TDTestCase: tdSql.query("drop topic %s"%topicFromStb1) - tdLog.printNoPrefix("======== test case 5 end ...... ") - - def tmqCase6(self, cfgPath, buildPath): - tdLog.printNoPrefix("======== test case 6: ") - - self.initConsumerTable() - - # create and start thread - parameterDict = {'cfg': '', \ - 'actionType': 0, \ - 'dbName': 'db6', \ - 'dropFlag': 1, \ - 'vgroups': 4, \ - 'replica': 1, \ - 'stbName': 'stb1', \ - 'ctbNum': 10, \ - 'rowsPerTbl': 10000, \ - 'batchNum': 100, \ - 'startTs': 1640966400000} # 2022-01-01 00:00:00.000 - parameterDict['cfg'] = cfgPath - - self.create_database(tdSql, parameterDict["dbName"]) - self.create_stable(tdSql, parameterDict["dbName"], parameterDict["stbName"]) - self.create_ctables(tdSql, parameterDict["dbName"], parameterDict["stbName"], parameterDict["ctbNum"]) - self.insert_data(tdSql,\ - parameterDict["dbName"],\ - parameterDict["stbName"],\ - parameterDict["ctbNum"],\ - parameterDict["rowsPerTbl"],\ - parameterDict["batchNum"]) - - tdLog.info("create topics from stb1") - topicFromStb1 = 'topic_stb1' - - tdSql.execute("create topic %s as select ts, c1, c2 from %s.%s" %(topicFromStb1, parameterDict['dbName'], parameterDict['stbName'])) - consumerId = 0 - expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"] - topicList = topicFromStb1 - ifcheckdata = 0 - ifManualCommit = 1 - keyList = 'group.id:cgrp1,\ - enable.auto.commit:false,\ - auto.commit.interval.ms:6000,\ - auto.offset.reset:earliest' - self.insertConsumerInfo(consumerId, expectrowcnt/4,topicList,keyList,ifcheckdata,ifManualCommit) - - tdLog.info("start consume processor") - pollDelay = 5 - showMsg = 1 - showRow = 1 - self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) - - tdLog.info("start to check consume result") - expectRows = 1 - resultList = self.selectConsumeResult(expectRows) - totalConsumeRows = 0 - for i in range(expectRows): - totalConsumeRows += resultList[i] - - if totalConsumeRows != expectrowcnt/4: - tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt/4)) - tdLog.exit("tmq consume rows error!") - - self.initConsumerInfoTable() - consumerId = 1 - keyList = 'group.id:cgrp1,\ - enable.auto.commit:false,\ - auto.commit.interval.ms:6000,\ - auto.offset.reset:latest' - self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) - - tdLog.info("again start consume processor") - self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) - - tdLog.info("again check consume result") - expectRows = 2 - resultList = self.selectConsumeResult(expectRows) - totalConsumeRows = 0 - for i in range(expectRows): - totalConsumeRows += resultList[i] - - if totalConsumeRows != expectrowcnt: - tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt)) - tdLog.exit("tmq consume rows error!") - - tdSql.query("drop topic %s"%topicFromStb1) - - tdLog.printNoPrefix("======== test case 6 end ...... ") - - def tmqCase7(self, cfgPath, buildPath): - tdLog.printNoPrefix("======== test case 7: ") - - self.initConsumerTable() - - # create and start thread - parameterDict = {'cfg': '', \ - 'actionType': 0, \ - 'dbName': 'db7', \ - 'dropFlag': 1, \ - 'vgroups': 4, \ - 'replica': 1, \ - 'stbName': 'stb1', \ - 'ctbNum': 10, \ - 'rowsPerTbl': 10000, \ - 'batchNum': 100, \ - 'startTs': 1640966400000} # 2022-01-01 00:00:00.000 - parameterDict['cfg'] = cfgPath - - self.create_database(tdSql, parameterDict["dbName"]) - self.create_stable(tdSql, parameterDict["dbName"], parameterDict["stbName"]) - self.create_ctables(tdSql, parameterDict["dbName"], parameterDict["stbName"], parameterDict["ctbNum"]) - self.insert_data(tdSql,\ - parameterDict["dbName"],\ - parameterDict["stbName"],\ - parameterDict["ctbNum"],\ - parameterDict["rowsPerTbl"],\ - parameterDict["batchNum"]) - - tdLog.info("create topics from stb1") - topicFromStb1 = 'topic_stb1' - - tdSql.execute("create topic %s as select ts, c1, c2 from %s.%s" %(topicFromStb1, parameterDict['dbName'], parameterDict['stbName'])) - consumerId = 0 - expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"] - topicList = topicFromStb1 - ifcheckdata = 0 - ifManualCommit = 1 - keyList = 'group.id:cgrp1,\ - enable.auto.commit:false,\ - auto.commit.interval.ms:6000,\ - auto.offset.reset:latest' - self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) - - tdLog.info("start consume processor") - pollDelay = 5 - showMsg = 1 - showRow = 1 - self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) - - tdLog.info("start to check consume result") - expectRows = 1 - resultList = self.selectConsumeResult(expectRows) - totalConsumeRows = 0 - for i in range(expectRows): - totalConsumeRows += resultList[i] - - if totalConsumeRows != 0: - tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, 0)) - tdLog.exit("tmq consume rows error!") - - self.initConsumerInfoTable() - consumerId = 1 - self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) - - tdLog.info("again start consume processor") - self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) - - tdLog.info("again check consume result") - expectRows = 2 - resultList = self.selectConsumeResult(expectRows) - totalConsumeRows = 0 - for i in range(expectRows): - totalConsumeRows += resultList[i] - - if totalConsumeRows != 0: - tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, 0)) - tdLog.exit("tmq consume rows error!") - - tdSql.query("drop topic %s"%topicFromStb1) - - tdLog.printNoPrefix("======== test case 7 end ...... ") - - def tmqCase8(self, cfgPath, buildPath): - tdLog.printNoPrefix("======== test case 8: ") - - self.initConsumerTable() - - # create and start thread - parameterDict = {'cfg': '', \ - 'actionType': 0, \ - 'dbName': 'db8', \ - 'dropFlag': 1, \ - 'vgroups': 4, \ - 'replica': 1, \ - 'stbName': 'stb1', \ - 'ctbNum': 10, \ - 'rowsPerTbl': 10000, \ - 'batchNum': 100, \ - 'startTs': 1640966400000} # 2022-01-01 00:00:00.000 - parameterDict['cfg'] = cfgPath - - self.create_database(tdSql, parameterDict["dbName"]) - self.create_stable(tdSql, parameterDict["dbName"], parameterDict["stbName"]) - self.create_ctables(tdSql, parameterDict["dbName"], parameterDict["stbName"], parameterDict["ctbNum"]) - self.insert_data(tdSql,\ - parameterDict["dbName"],\ - parameterDict["stbName"],\ - parameterDict["ctbNum"],\ - parameterDict["rowsPerTbl"],\ - parameterDict["batchNum"]) - - tdLog.info("create topics from stb1") - topicFromStb1 = 'topic_stb1' - - tdSql.execute("create topic %s as select ts, c1, c2 from %s.%s" %(topicFromStb1, parameterDict['dbName'], parameterDict['stbName'])) - consumerId = 0 - expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"] - topicList = topicFromStb1 - ifcheckdata = 0 - ifManualCommit = 1 - keyList = 'group.id:cgrp1,\ - enable.auto.commit:false,\ - auto.commit.interval.ms:6000,\ - auto.offset.reset:latest' - self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) - - tdLog.info("start consume 0 processor") - pollDelay = 10 - showMsg = 1 - showRow = 1 - self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) - - tdLog.info("start to check consume 0 result") - expectRows = 1 - resultList = self.selectConsumeResult(expectRows) - totalConsumeRows = 0 - for i in range(expectRows): - totalConsumeRows += resultList[i] - - if totalConsumeRows != 0: - tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, 0)) - tdLog.exit("tmq consume rows error!") - - tdLog.info("start consume 1 processor") - self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) - - tdLog.info("start one new thread to insert data") - parameterDict['actionType'] = actionType.INSERT_DATA - prepareEnvThread = threading.Thread(target=self.prepareEnv, kwargs=parameterDict) - prepareEnvThread.start() - prepareEnvThread.join() - - tdLog.info("start to check consume 0 and 1 result") - expectRows = 2 - resultList = self.selectConsumeResult(expectRows) - totalConsumeRows = 0 - for i in range(expectRows): - totalConsumeRows += resultList[i] - - if totalConsumeRows != expectrowcnt: - tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt)) - tdLog.exit("tmq consume rows error!") - - tdLog.info("start consume 2 processor") - self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) - - tdLog.info("start one new thread to insert data") - parameterDict['actionType'] = actionType.INSERT_DATA - prepareEnvThread = threading.Thread(target=self.prepareEnv, kwargs=parameterDict) - prepareEnvThread.start() - prepareEnvThread.join() - - tdLog.info("start to check consume 0 and 1 and 2 result") - expectRows = 3 - resultList = self.selectConsumeResult(expectRows) - totalConsumeRows = 0 - for i in range(expectRows): - totalConsumeRows += resultList[i] - - if totalConsumeRows != expectrowcnt*2: - tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt*2)) - tdLog.exit("tmq consume rows error!") - - tdSql.query("drop topic %s"%topicFromStb1) - - tdLog.printNoPrefix("======== test case 8 end ...... ") - - def tmqCase9(self, cfgPath, buildPath): - tdLog.printNoPrefix("======== test case 9: ") - - self.initConsumerTable() - - # create and start thread - parameterDict = {'cfg': '', \ - 'actionType': 0, \ - 'dbName': 'db9', \ - 'dropFlag': 1, \ - 'vgroups': 4, \ - 'replica': 1, \ - 'stbName': 'stb1', \ - 'ctbNum': 10, \ - 'rowsPerTbl': 10000, \ - 'batchNum': 100, \ - 'startTs': 1640966400000} # 2022-01-01 00:00:00.000 - parameterDict['cfg'] = cfgPath - - self.create_database(tdSql, parameterDict["dbName"]) - self.create_stable(tdSql, parameterDict["dbName"], parameterDict["stbName"]) - self.create_ctables(tdSql, parameterDict["dbName"], parameterDict["stbName"], parameterDict["ctbNum"]) - self.insert_data(tdSql,\ - parameterDict["dbName"],\ - parameterDict["stbName"],\ - parameterDict["ctbNum"],\ - parameterDict["rowsPerTbl"],\ - parameterDict["batchNum"]) - - tdLog.info("create topics from stb1") - topicFromStb1 = 'topic_stb1' - - tdSql.execute("create topic %s as select ts, c1, c2 from %s.%s" %(topicFromStb1, parameterDict['dbName'], parameterDict['stbName'])) - consumerId = 0 - expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"] - topicList = topicFromStb1 - ifcheckdata = 0 - ifManualCommit = 1 - keyList = 'group.id:cgrp1,\ - enable.auto.commit:false,\ - auto.commit.interval.ms:6000,\ - auto.offset.reset:latest' - self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) - - tdLog.info("start consume 0 processor") - pollDelay = 10 - showMsg = 1 - showRow = 1 - self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) - - tdLog.info("start to check consume 0 result") - expectRows = 1 - resultList = self.selectConsumeResult(expectRows) - totalConsumeRows = 0 - for i in range(expectRows): - totalConsumeRows += resultList[i] - - if totalConsumeRows != 0: - tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, 0)) - tdLog.exit("tmq consume rows error!") - - tdLog.info("start consume 1 processor") - self.initConsumerInfoTable() - consumerId = 1 - ifManualCommit = 0 - self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) - self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) - - tdLog.info("start one new thread to insert data") - parameterDict['actionType'] = actionType.INSERT_DATA - prepareEnvThread = threading.Thread(target=self.prepareEnv, kwargs=parameterDict) - prepareEnvThread.start() - prepareEnvThread.join() - - tdLog.info("start to check consume 0 and 1 result") - expectRows = 2 - resultList = self.selectConsumeResult(expectRows) - totalConsumeRows = 0 - for i in range(expectRows): - totalConsumeRows += resultList[i] - - if totalConsumeRows != expectrowcnt: - tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt)) - tdLog.exit("tmq consume rows error!") - - tdLog.info("start consume 2 processor") - self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) - - tdLog.info("start one new thread to insert data") - parameterDict['actionType'] = actionType.INSERT_DATA - prepareEnvThread = threading.Thread(target=self.prepareEnv, kwargs=parameterDict) - prepareEnvThread.start() - prepareEnvThread.join() - - tdLog.info("start to check consume 0 and 1 and 2 result") - expectRows = 3 - resultList = self.selectConsumeResult(expectRows) - totalConsumeRows = 0 - for i in range(expectRows): - totalConsumeRows += resultList[i] - - if totalConsumeRows != expectrowcnt*2: - tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt*2)) - tdLog.exit("tmq consume rows error!") - - tdSql.query("drop topic %s"%topicFromStb1) - - tdLog.printNoPrefix("======== test case 9 end ...... ") - - def tmqCase10(self, cfgPath, buildPath): - tdLog.printNoPrefix("======== test case 10: ") - - self.initConsumerTable() - - # create and start thread - parameterDict = {'cfg': '', \ - 'actionType': 0, \ - 'dbName': 'db10', \ - 'dropFlag': 1, \ - 'vgroups': 4, \ - 'replica': 1, \ - 'stbName': 'stb1', \ - 'ctbNum': 10, \ - 'rowsPerTbl': 10000, \ - 'batchNum': 100, \ - 'startTs': 1640966400000} # 2022-01-01 00:00:00.000 - parameterDict['cfg'] = cfgPath - - self.create_database(tdSql, parameterDict["dbName"]) - self.create_stable(tdSql, parameterDict["dbName"], parameterDict["stbName"]) - self.create_ctables(tdSql, parameterDict["dbName"], parameterDict["stbName"], parameterDict["ctbNum"]) - self.insert_data(tdSql,\ - parameterDict["dbName"],\ - parameterDict["stbName"],\ - parameterDict["ctbNum"],\ - parameterDict["rowsPerTbl"],\ - parameterDict["batchNum"]) - - tdLog.info("create topics from stb1") - topicFromStb1 = 'topic_stb1' - - tdSql.execute("create topic %s as select ts, c1, c2 from %s.%s" %(topicFromStb1, parameterDict['dbName'], parameterDict['stbName'])) - consumerId = 0 - expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"] - topicList = topicFromStb1 - ifcheckdata = 0 - ifManualCommit = 1 - keyList = 'group.id:cgrp1,\ - enable.auto.commit:false,\ - auto.commit.interval.ms:6000,\ - auto.offset.reset:latest' - self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) - - tdLog.info("start consume 0 processor") - pollDelay = 10 - showMsg = 1 - showRow = 1 - self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) - - tdLog.info("start to check consume 0 result") - expectRows = 1 - resultList = self.selectConsumeResult(expectRows) - totalConsumeRows = 0 - for i in range(expectRows): - totalConsumeRows += resultList[i] - - if totalConsumeRows != 0: - tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, 0)) - tdLog.exit("tmq consume rows error!") - - tdLog.info("start consume 1 processor") - self.initConsumerInfoTable() - consumerId = 1 - ifManualCommit = 1 - self.insertConsumerInfo(consumerId, expectrowcnt-10000,topicList,keyList,ifcheckdata,ifManualCommit) - self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) - - tdLog.info("start one new thread to insert data") - parameterDict['actionType'] = actionType.INSERT_DATA - prepareEnvThread = threading.Thread(target=self.prepareEnv, kwargs=parameterDict) - prepareEnvThread.start() - prepareEnvThread.join() - - tdLog.info("start to check consume 0 and 1 result") - expectRows = 2 - resultList = self.selectConsumeResult(expectRows) - totalConsumeRows = 0 - for i in range(expectRows): - totalConsumeRows += resultList[i] - - if totalConsumeRows != expectrowcnt-10000: - tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt-10000)) - tdLog.exit("tmq consume rows error!") - - tdLog.info("start consume 2 processor") - self.initConsumerInfoTable() - consumerId = 2 - ifManualCommit = 1 - self.insertConsumerInfo(consumerId, expectrowcnt+10000,topicList,keyList,ifcheckdata,ifManualCommit) - self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) - - tdLog.info("start one new thread to insert data") - parameterDict['actionType'] = actionType.INSERT_DATA - prepareEnvThread = threading.Thread(target=self.prepareEnv, kwargs=parameterDict) - prepareEnvThread.start() - prepareEnvThread.join() - - tdLog.info("start to check consume 0 and 1 and 2 result") - expectRows = 3 - resultList = self.selectConsumeResult(expectRows) - totalConsumeRows = 0 - for i in range(expectRows): - totalConsumeRows += resultList[i] - - if totalConsumeRows != expectrowcnt*2: - tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt*2)) - tdLog.exit("tmq consume rows error!") - - tdSql.query("drop topic %s"%topicFromStb1) - - tdLog.printNoPrefix("======== test case 10 end ...... ") - - def tmqCase11(self, cfgPath, buildPath): - tdLog.printNoPrefix("======== test case 11: ") - - self.initConsumerTable() - - # create and start thread - parameterDict = {'cfg': '', \ - 'actionType': 0, \ - 'dbName': 'db11', \ - 'dropFlag': 1, \ - 'vgroups': 4, \ - 'replica': 1, \ - 'stbName': 'stb1', \ - 'ctbNum': 10, \ - 'rowsPerTbl': 10000, \ - 'batchNum': 100, \ - 'startTs': 1640966400000} # 2022-01-01 00:00:00.000 - parameterDict['cfg'] = cfgPath - - self.create_database(tdSql, parameterDict["dbName"]) - self.create_stable(tdSql, parameterDict["dbName"], parameterDict["stbName"]) - self.create_ctables(tdSql, parameterDict["dbName"], parameterDict["stbName"], parameterDict["ctbNum"]) - self.insert_data(tdSql,\ - parameterDict["dbName"],\ - parameterDict["stbName"],\ - parameterDict["ctbNum"],\ - parameterDict["rowsPerTbl"],\ - parameterDict["batchNum"]) - - tdLog.info("create topics from stb1") - topicFromStb1 = 'topic_stb1' - - tdSql.execute("create topic %s as select ts, c1, c2 from %s.%s" %(topicFromStb1, parameterDict['dbName'], parameterDict['stbName'])) - consumerId = 0 - expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"] - topicList = topicFromStb1 - ifcheckdata = 0 - ifManualCommit = 1 - keyList = 'group.id:cgrp1,\ - enable.auto.commit:false,\ - auto.commit.interval.ms:6000,\ - auto.offset.reset:none' - self.insertConsumerInfo(consumerId, expectrowcnt/4,topicList,keyList,ifcheckdata,ifManualCommit) - - tdLog.info("start consume processor") - pollDelay = 5 - showMsg = 1 - showRow = 1 - self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) - - tdLog.info("start to check consume result") - expectRows = 1 - resultList = self.selectConsumeResult(expectRows) - totalConsumeRows = 0 - for i in range(expectRows): - totalConsumeRows += resultList[i] - - if totalConsumeRows != 0: - tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, 0)) - tdLog.exit("tmq consume rows error!") - - self.initConsumerInfoTable() - consumerId = 1 - keyList = 'group.id:cgrp1,\ - enable.auto.commit:false,\ - auto.commit.interval.ms:6000,\ - auto.offset.reset:none' - self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) - - tdLog.info("again start consume processor") - self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) - - tdLog.info("again check consume result") - expectRows = 2 - resultList = self.selectConsumeResult(expectRows) - totalConsumeRows = 0 - for i in range(expectRows): - totalConsumeRows += resultList[i] - - if totalConsumeRows != 0: - tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, 0)) - tdLog.exit("tmq consume rows error!") - - tdSql.query("drop topic %s"%topicFromStb1) - - tdLog.printNoPrefix("======== test case 11 end ...... ") - - def tmqCase12(self, cfgPath, buildPath): - tdLog.printNoPrefix("======== test case 12: ") - - self.initConsumerTable() - - # create and start thread - parameterDict = {'cfg': '', \ - 'actionType': 0, \ - 'dbName': 'db12', \ - 'dropFlag': 1, \ - 'vgroups': 4, \ - 'replica': 1, \ - 'stbName': 'stb1', \ - 'ctbNum': 10, \ - 'rowsPerTbl': 10000, \ - 'batchNum': 100, \ - 'startTs': 1640966400000} # 2022-01-01 00:00:00.000 - parameterDict['cfg'] = cfgPath - - self.create_database(tdSql, parameterDict["dbName"]) - self.create_stable(tdSql, parameterDict["dbName"], parameterDict["stbName"]) - self.create_ctables(tdSql, parameterDict["dbName"], parameterDict["stbName"], parameterDict["ctbNum"]) - self.insert_data(tdSql,\ - parameterDict["dbName"],\ - parameterDict["stbName"],\ - parameterDict["ctbNum"],\ - parameterDict["rowsPerTbl"],\ - parameterDict["batchNum"]) - - tdLog.info("create topics from stb1") - topicFromStb1 = 'topic_stb1' - - tdSql.execute("create topic %s as select ts, c1, c2 from %s.%s" %(topicFromStb1, parameterDict['dbName'], parameterDict['stbName'])) - consumerId = 0 - expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"] - topicList = topicFromStb1 - ifcheckdata = 0 - ifManualCommit = 0 - keyList = 'group.id:cgrp1,\ - enable.auto.commit:false,\ - auto.commit.interval.ms:6000,\ - auto.offset.reset:earliest' - self.insertConsumerInfo(consumerId, expectrowcnt/4,topicList,keyList,ifcheckdata,ifManualCommit) - - tdLog.info("start consume processor") - pollDelay = 5 - showMsg = 1 - showRow = 1 - self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) - - tdLog.info("start to check consume result") - expectRows = 1 - resultList = self.selectConsumeResult(expectRows) - totalConsumeRows = 0 - for i in range(expectRows): - totalConsumeRows += resultList[i] - - if totalConsumeRows != expectrowcnt/4: - tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt/4)) - tdLog.exit("tmq consume rows error!") - - self.initConsumerInfoTable() - consumerId = 1 - keyList = 'group.id:cgrp1,\ - enable.auto.commit:false,\ - auto.commit.interval.ms:6000,\ - auto.offset.reset:none' - self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) - - tdLog.info("again start consume processor") - self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) - - tdLog.info("again check consume result") - expectRows = 2 - resultList = self.selectConsumeResult(expectRows) - totalConsumeRows = 0 - for i in range(expectRows): - totalConsumeRows += resultList[i] - - if totalConsumeRows != expectrowcnt/4: - tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt/4)) - tdLog.exit("tmq consume rows error!") - - tdSql.query("drop topic %s"%topicFromStb1) - - tdLog.printNoPrefix("======== test case 12 end ...... ") - - def tmqCase13(self, cfgPath, buildPath): - tdLog.printNoPrefix("======== test case 13: ") - - self.initConsumerTable() - - # create and start thread - parameterDict = {'cfg': '', \ - 'actionType': 0, \ - 'dbName': 'db13', \ - 'dropFlag': 1, \ - 'vgroups': 4, \ - 'replica': 1, \ - 'stbName': 'stb1', \ - 'ctbNum': 10, \ - 'rowsPerTbl': 10000, \ - 'batchNum': 100, \ - 'startTs': 1640966400000} # 2022-01-01 00:00:00.000 - parameterDict['cfg'] = cfgPath - - self.create_database(tdSql, parameterDict["dbName"]) - self.create_stable(tdSql, parameterDict["dbName"], parameterDict["stbName"]) - self.create_ctables(tdSql, parameterDict["dbName"], parameterDict["stbName"], parameterDict["ctbNum"]) - self.insert_data(tdSql,\ - parameterDict["dbName"],\ - parameterDict["stbName"],\ - parameterDict["ctbNum"],\ - parameterDict["rowsPerTbl"],\ - parameterDict["batchNum"]) - - tdLog.info("create topics from stb1") - topicFromStb1 = 'topic_stb1' - - tdSql.execute("create topic %s as select ts, c1, c2 from %s.%s" %(topicFromStb1, parameterDict['dbName'], parameterDict['stbName'])) - consumerId = 0 - expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"] - topicList = topicFromStb1 - ifcheckdata = 0 - ifManualCommit = 1 - keyList = 'group.id:cgrp1,\ - enable.auto.commit:false,\ - auto.commit.interval.ms:6000,\ - auto.offset.reset:earliest' - self.insertConsumerInfo(consumerId, expectrowcnt/4,topicList,keyList,ifcheckdata,ifManualCommit) - - tdLog.info("start consume processor") - pollDelay = 5 - showMsg = 1 - showRow = 1 - self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) - - tdLog.info("start to check consume result") - expectRows = 1 - resultList = self.selectConsumeResult(expectRows) - totalConsumeRows = 0 - for i in range(expectRows): - totalConsumeRows += resultList[i] - - if totalConsumeRows != expectrowcnt/4: - tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt/4)) - tdLog.exit("tmq consume rows error!") - - self.initConsumerInfoTable() - consumerId = 1 - ifManualCommit = 1 - keyList = 'group.id:cgrp1,\ - enable.auto.commit:false,\ - auto.commit.interval.ms:6000,\ - auto.offset.reset:none' - self.insertConsumerInfo(consumerId, expectrowcnt/2,topicList,keyList,ifcheckdata,ifManualCommit) - - tdLog.info("again start consume processor") - self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) - - tdLog.info("again check consume result") - expectRows = 2 - resultList = self.selectConsumeResult(expectRows) - totalConsumeRows = 0 - for i in range(expectRows): - totalConsumeRows += resultList[i] - - if totalConsumeRows != expectrowcnt*(1/2+1/4): - tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt*(1/2+1/4))) - tdLog.exit("tmq consume rows error!") - - self.initConsumerInfoTable() - consumerId = 2 - ifManualCommit = 1 - keyList = 'group.id:cgrp1,\ - enable.auto.commit:false,\ - auto.commit.interval.ms:6000,\ - auto.offset.reset:none' - self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) - - tdLog.info("again start consume processor") - self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) - - tdLog.info("again check consume result") - expectRows = 3 - resultList = self.selectConsumeResult(expectRows) - totalConsumeRows = 0 - for i in range(expectRows): - totalConsumeRows += resultList[i] - - if totalConsumeRows != expectrowcnt: - tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt)) - tdLog.exit("tmq consume rows error!") - - tdSql.query("drop topic %s"%topicFromStb1) - - tdLog.printNoPrefix("======== test case 13 end ...... ") + tdLog.printNoPrefix("======== test case 5 end ...... ") def run(self): tdSql.prepare() @@ -1375,8 +430,6 @@ class TDTestCase: cfgPath = buildPath + "/../sim/psim/cfg" tdLog.info("cfgPath: %s" % cfgPath) - # self.tmqCase1(cfgPath, buildPath) - # self.tmqCase2(cfgPath, buildPath) self.tmqCase3(cfgPath, buildPath) self.tmqCase4(cfgPath, buildPath) self.tmqCase5(cfgPath, buildPath) diff --git a/tests/system-test/7-tmq/subscribeStb1.py b/tests/system-test/7-tmq/subscribeStb1.py index 049b297d2df0f54e7d681c6236d942340da2d19f..92347690d9a14f35e50ac11e18c51daa7fb1f716 100644 --- a/tests/system-test/7-tmq/subscribeStb1.py +++ b/tests/system-test/7-tmq/subscribeStb1.py @@ -29,8 +29,8 @@ class TDTestCase: def init(self, conn, logSql): tdLog.debug(f"start to excute {__file__}") - #tdSql.init(conn.cursor()) - tdSql.init(conn.cursor(), logSql) # output sql.txt file + tdSql.init(conn.cursor()) + #tdSql.init(conn.cursor(), logSql) # output sql.txt file def getBuildPath(self): selfPath = os.path.dirname(os.path.realpath(__file__)) @@ -183,248 +183,15 @@ class TDTestCase: return - def tmqCase1(self, cfgPath, buildPath): - tdLog.printNoPrefix("======== test case 1: ") - - self.initConsumerTable() - - # create and start thread - parameterDict = {'cfg': '', \ - 'actionType': 0, \ - 'dbName': 'db1', \ - 'dropFlag': 1, \ - 'vgroups': 4, \ - 'replica': 1, \ - 'stbName': 'stb1', \ - 'ctbNum': 10, \ - 'rowsPerTbl': 10000, \ - 'batchNum': 100, \ - 'startTs': 1640966400000} # 2022-01-01 00:00:00.000 - parameterDict['cfg'] = cfgPath - - self.create_database(tdSql, parameterDict["dbName"]) - self.create_stable(tdSql, parameterDict["dbName"], parameterDict["stbName"]) - - tdLog.info("create topics from stb1") - topicFromStb1 = 'topic_stb1' - - tdSql.execute("create topic %s as select ts, c1, c2 from %s.%s" %(topicFromStb1, parameterDict['dbName'], parameterDict['stbName'])) - consumerId = 0 - expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"] - topicList = topicFromStb1 - ifcheckdata = 0 - ifManualCommit = 0 - keyList = 'group.id:cgrp1,\ - enable.auto.commit:false,\ - auto.commit.interval.ms:6000,\ - auto.offset.reset:earliest' - self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) - - tdLog.info("start consume processor") - pollDelay = 100 - showMsg = 1 - showRow = 1 - self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) - - time.sleep(5) - self.create_ctables(tdSql, parameterDict["dbName"], parameterDict["stbName"], parameterDict["ctbNum"]) - self.insert_data(tdSql,\ - parameterDict["dbName"],\ - parameterDict["stbName"],\ - parameterDict["ctbNum"],\ - parameterDict["rowsPerTbl"],\ - parameterDict["batchNum"]) - - tdLog.info("insert process end, and start to check consume result") - expectRows = 1 - resultList = self.selectConsumeResult(expectRows) - totalConsumeRows = 0 - for i in range(expectRows): - totalConsumeRows += resultList[i] - - if totalConsumeRows != expectrowcnt: - tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt)) - tdLog.exit("tmq consume rows error!") - - tdSql.query("drop topic %s"%topicFromStb1) - - tdLog.printNoPrefix("======== test case 1 end ...... ") - - def tmqCase2(self, cfgPath, buildPath): - tdLog.printNoPrefix("======== test case 2: ") - - self.initConsumerTable() - - # create and start thread - parameterDict = {'cfg': '', \ - 'actionType': 0, \ - 'dbName': 'db2', \ - 'dropFlag': 1, \ - 'vgroups': 4, \ - 'replica': 1, \ - 'stbName': 'stb1', \ - 'ctbNum': 10, \ - 'rowsPerTbl': 10000, \ - 'batchNum': 100, \ - 'startTs': 1640966400000} # 2022-01-01 00:00:00.000 - parameterDict['cfg'] = cfgPath - - parameterDict2 = {'cfg': '', \ - 'actionType': 0, \ - 'dbName': 'db2', \ - 'dropFlag': 1, \ - 'vgroups': 4, \ - 'replica': 1, \ - 'stbName': 'stb2', \ - 'ctbNum': 10, \ - 'rowsPerTbl': 10000, \ - 'batchNum': 100, \ - 'startTs': 1640966400000} # 2022-01-01 00:00:00.000 - parameterDict2['cfg'] = cfgPath - - self.create_database(tdSql, parameterDict["dbName"]) - self.create_stable(tdSql, parameterDict["dbName"], parameterDict["stbName"]) - self.create_stable(tdSql, parameterDict2["dbName"], parameterDict2["stbName"]) - - tdLog.info("create topics from stb1") - topicFromStb1 = 'topic_stb1' - - tdSql.execute("create topic %s as select ts, c1, c2 from %s.%s" %(topicFromStb1, parameterDict['dbName'], parameterDict['stbName'])) - consumerId = 0 - expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"] - topicList = topicFromStb1 - ifcheckdata = 0 - ifManualCommit = 0 - keyList = 'group.id:cgrp1,\ - enable.auto.commit:false,\ - auto.commit.interval.ms:6000,\ - auto.offset.reset:earliest' - self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) - - tdLog.info("start consume processor") - pollDelay = 100 - showMsg = 1 - showRow = 1 - self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) - - tdLog.info("start create child tables of stb1 and stb2") - parameterDict['actionType'] = actionType.CREATE_CTABLE - parameterDict2['actionType'] = actionType.CREATE_CTABLE - - prepareEnvThread = threading.Thread(target=self.prepareEnv, kwargs=parameterDict) - prepareEnvThread.start() - prepareEnvThread2 = threading.Thread(target=self.prepareEnv, kwargs=parameterDict2) - prepareEnvThread2.start() - - prepareEnvThread.join() - prepareEnvThread2.join() - - tdLog.info("start insert data into child tables of stb1 and stb2") - parameterDict['actionType'] = actionType.INSERT_DATA - parameterDict2['actionType'] = actionType.INSERT_DATA - - prepareEnvThread = threading.Thread(target=self.prepareEnv, kwargs=parameterDict) - prepareEnvThread.start() - prepareEnvThread2 = threading.Thread(target=self.prepareEnv, kwargs=parameterDict2) - prepareEnvThread2.start() - - prepareEnvThread.join() - prepareEnvThread2.join() - - tdLog.info("insert process end, and start to check consume result") - expectRows = 1 - resultList = self.selectConsumeResult(expectRows) - totalConsumeRows = 0 - for i in range(expectRows): - totalConsumeRows += resultList[i] - - if totalConsumeRows != expectrowcnt: - tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt)) - tdLog.exit("tmq consume rows error!") - - tdSql.query("drop topic %s"%topicFromStb1) - - tdLog.printNoPrefix("======== test case 2 end ...... ") - - def tmqCase3(self, cfgPath, buildPath): - tdLog.printNoPrefix("======== test case 3: ") - - self.initConsumerTable() - - # create and start thread - parameterDict = {'cfg': '', \ - 'actionType': 0, \ - 'dbName': 'db3', \ - 'dropFlag': 1, \ - 'vgroups': 4, \ - 'replica': 1, \ - 'stbName': 'stb1', \ - 'ctbNum': 10, \ - 'rowsPerTbl': 10000, \ - 'batchNum': 13, \ - 'startTs': 1640966400000} # 2022-01-01 00:00:00.000 - parameterDict['cfg'] = cfgPath - - self.create_database(tdSql, parameterDict["dbName"]) - self.create_stable(tdSql, parameterDict["dbName"], parameterDict["stbName"]) - self.create_ctables(tdSql, parameterDict["dbName"], parameterDict["stbName"], parameterDict["ctbNum"]) - self.insert_data(tdSql,parameterDict["dbName"],parameterDict["stbName"],parameterDict["ctbNum"],parameterDict["rowsPerTbl"],parameterDict["batchNum"]) - - tdLog.info("create topics from stb1") - topicFromStb1 = 'topic_stb1' - - tdSql.execute("create topic %s as select ts, c1, c2 from %s.%s" %(topicFromStb1, parameterDict['dbName'], parameterDict['stbName'])) - consumerId = 0 - expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"] - topicList = topicFromStb1 - ifcheckdata = 0 - ifManualCommit = 0 - keyList = 'group.id:cgrp1,\ - enable.auto.commit:false,\ - auto.commit.interval.ms:6000,\ - auto.offset.reset:earliest' - self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) - - tdLog.info("start consume processor") - pollDelay = 5 - showMsg = 1 - showRow = 1 - self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) - - time.sleep(2) - tdLog.info("drop som child table of stb1") - dropTblNum = 4 - tdSql.query("drop table if exists %s.%s_9"%(parameterDict["dbName"], parameterDict["stbName"])) - tdSql.query("drop table if exists %s.%s_8"%(parameterDict["dbName"], parameterDict["stbName"])) - tdSql.query("drop table if exists %s.%s_7"%(parameterDict["dbName"], parameterDict["stbName"])) - tdSql.query("drop table if exists %s.%s_3"%(parameterDict["dbName"], parameterDict["stbName"])) - - tdLog.info("drop some child tables, then start to check consume result") - expectRows = 1 - resultList = self.selectConsumeResult(expectRows) - totalConsumeRows = 0 - for i in range(expectRows): - totalConsumeRows += resultList[i] - - remaindrowcnt = parameterDict["rowsPerTbl"] * (parameterDict["ctbNum"] - dropTblNum) - - if not (totalConsumeRows < expectrowcnt and totalConsumeRows > remaindrowcnt): - tdLog.info("act consume rows: %d, expect consume rows: between %d and %d"%(totalConsumeRows, remaindrowcnt, expectrowcnt)) - tdLog.exit("tmq consume rows error!") - - tdSql.query("drop topic %s"%topicFromStb1) - - tdLog.printNoPrefix("======== test case 3 end ...... ") - - def tmqCase4(self, cfgPath, buildPath): - tdLog.printNoPrefix("======== test case 4: ") + def tmqCase6(self, cfgPath, buildPath): + tdLog.printNoPrefix("======== test case 6: ") self.initConsumerTable() # create and start thread parameterDict = {'cfg': '', \ 'actionType': 0, \ - 'dbName': 'db4', \ + 'dbName': 'db6', \ 'dropFlag': 1, \ 'vgroups': 4, \ 'replica': 1, \ @@ -461,7 +228,7 @@ class TDTestCase: self.insertConsumerInfo(consumerId, expectrowcnt/4,topicList,keyList,ifcheckdata,ifManualCommit) tdLog.info("start consume processor") - pollDelay = 5 + pollDelay = 100 showMsg = 1 showRow = 1 self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) @@ -479,6 +246,10 @@ class TDTestCase: self.initConsumerInfoTable() consumerId = 1 + keyList = 'group.id:cgrp1,\ + enable.auto.commit:false,\ + auto.commit.interval.ms:6000,\ + auto.offset.reset:latest' self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) tdLog.info("again start consume processor") @@ -497,17 +268,17 @@ class TDTestCase: tdSql.query("drop topic %s"%topicFromStb1) - tdLog.printNoPrefix("======== test case 4 end ...... ") + tdLog.printNoPrefix("======== test case 6 end ...... ") - def tmqCase5(self, cfgPath, buildPath): - tdLog.printNoPrefix("======== test case 5: ") + def tmqCase7(self, cfgPath, buildPath): + tdLog.printNoPrefix("======== test case 7: ") self.initConsumerTable() # create and start thread parameterDict = {'cfg': '', \ 'actionType': 0, \ - 'dbName': 'db5', \ + 'dbName': 'db7', \ 'dropFlag': 1, \ 'vgroups': 4, \ 'replica': 1, \ @@ -536,15 +307,15 @@ class TDTestCase: expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"] topicList = topicFromStb1 ifcheckdata = 0 - ifManualCommit = 0 + ifManualCommit = 1 keyList = 'group.id:cgrp1,\ enable.auto.commit:false,\ auto.commit.interval.ms:6000,\ - auto.offset.reset:earliest' - self.insertConsumerInfo(consumerId, expectrowcnt/4,topicList,keyList,ifcheckdata,ifManualCommit) + auto.offset.reset:latest' + self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) tdLog.info("start consume processor") - pollDelay = 5 + pollDelay = 100 showMsg = 1 showRow = 1 self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) @@ -556,8 +327,8 @@ class TDTestCase: for i in range(expectRows): totalConsumeRows += resultList[i] - if totalConsumeRows != expectrowcnt/4: - tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt/4)) + if totalConsumeRows != 0: + tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, 0)) tdLog.exit("tmq consume rows error!") self.initConsumerInfoTable() @@ -574,796 +345,14 @@ class TDTestCase: for i in range(expectRows): totalConsumeRows += resultList[i] - if totalConsumeRows != (expectrowcnt * (1 + 1/4)): - tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt)) - tdLog.exit("tmq consume rows error!") - - tdSql.query("drop topic %s"%topicFromStb1) - - tdLog.printNoPrefix("======== test case 5 end ...... ") - - def tmqCase6(self, cfgPath, buildPath): - tdLog.printNoPrefix("======== test case 6: ") - - self.initConsumerTable() - - # create and start thread - parameterDict = {'cfg': '', \ - 'actionType': 0, \ - 'dbName': 'db6', \ - 'dropFlag': 1, \ - 'vgroups': 4, \ - 'replica': 1, \ - 'stbName': 'stb1', \ - 'ctbNum': 10, \ - 'rowsPerTbl': 10000, \ - 'batchNum': 100, \ - 'startTs': 1640966400000} # 2022-01-01 00:00:00.000 - parameterDict['cfg'] = cfgPath - - self.create_database(tdSql, parameterDict["dbName"]) - self.create_stable(tdSql, parameterDict["dbName"], parameterDict["stbName"]) - self.create_ctables(tdSql, parameterDict["dbName"], parameterDict["stbName"], parameterDict["ctbNum"]) - self.insert_data(tdSql,\ - parameterDict["dbName"],\ - parameterDict["stbName"],\ - parameterDict["ctbNum"],\ - parameterDict["rowsPerTbl"],\ - parameterDict["batchNum"]) - - tdLog.info("create topics from stb1") - topicFromStb1 = 'topic_stb1' - - tdSql.execute("create topic %s as select ts, c1, c2 from %s.%s" %(topicFromStb1, parameterDict['dbName'], parameterDict['stbName'])) - consumerId = 0 - expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"] - topicList = topicFromStb1 - ifcheckdata = 0 - ifManualCommit = 1 - keyList = 'group.id:cgrp1,\ - enable.auto.commit:false,\ - auto.commit.interval.ms:6000,\ - auto.offset.reset:earliest' - self.insertConsumerInfo(consumerId, expectrowcnt/4,topicList,keyList,ifcheckdata,ifManualCommit) - - tdLog.info("start consume processor") - pollDelay = 5 - showMsg = 1 - showRow = 1 - self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) - - tdLog.info("start to check consume result") - expectRows = 1 - resultList = self.selectConsumeResult(expectRows) - totalConsumeRows = 0 - for i in range(expectRows): - totalConsumeRows += resultList[i] - - if totalConsumeRows != expectrowcnt/4: - tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt/4)) - tdLog.exit("tmq consume rows error!") - - self.initConsumerInfoTable() - consumerId = 1 - keyList = 'group.id:cgrp1,\ - enable.auto.commit:false,\ - auto.commit.interval.ms:6000,\ - auto.offset.reset:latest' - self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) - - tdLog.info("again start consume processor") - self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) - - tdLog.info("again check consume result") - expectRows = 2 - resultList = self.selectConsumeResult(expectRows) - totalConsumeRows = 0 - for i in range(expectRows): - totalConsumeRows += resultList[i] - - if totalConsumeRows != expectrowcnt: - tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt)) - tdLog.exit("tmq consume rows error!") - - tdSql.query("drop topic %s"%topicFromStb1) - - tdLog.printNoPrefix("======== test case 6 end ...... ") - - def tmqCase7(self, cfgPath, buildPath): - tdLog.printNoPrefix("======== test case 7: ") - - self.initConsumerTable() - - # create and start thread - parameterDict = {'cfg': '', \ - 'actionType': 0, \ - 'dbName': 'db7', \ - 'dropFlag': 1, \ - 'vgroups': 4, \ - 'replica': 1, \ - 'stbName': 'stb1', \ - 'ctbNum': 10, \ - 'rowsPerTbl': 10000, \ - 'batchNum': 100, \ - 'startTs': 1640966400000} # 2022-01-01 00:00:00.000 - parameterDict['cfg'] = cfgPath - - self.create_database(tdSql, parameterDict["dbName"]) - self.create_stable(tdSql, parameterDict["dbName"], parameterDict["stbName"]) - self.create_ctables(tdSql, parameterDict["dbName"], parameterDict["stbName"], parameterDict["ctbNum"]) - self.insert_data(tdSql,\ - parameterDict["dbName"],\ - parameterDict["stbName"],\ - parameterDict["ctbNum"],\ - parameterDict["rowsPerTbl"],\ - parameterDict["batchNum"]) - - tdLog.info("create topics from stb1") - topicFromStb1 = 'topic_stb1' - - tdSql.execute("create topic %s as select ts, c1, c2 from %s.%s" %(topicFromStb1, parameterDict['dbName'], parameterDict['stbName'])) - consumerId = 0 - expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"] - topicList = topicFromStb1 - ifcheckdata = 0 - ifManualCommit = 1 - keyList = 'group.id:cgrp1,\ - enable.auto.commit:false,\ - auto.commit.interval.ms:6000,\ - auto.offset.reset:latest' - self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) - - tdLog.info("start consume processor") - pollDelay = 5 - showMsg = 1 - showRow = 1 - self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) - - tdLog.info("start to check consume result") - expectRows = 1 - resultList = self.selectConsumeResult(expectRows) - totalConsumeRows = 0 - for i in range(expectRows): - totalConsumeRows += resultList[i] - - if totalConsumeRows != 0: - tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, 0)) - tdLog.exit("tmq consume rows error!") - - self.initConsumerInfoTable() - consumerId = 1 - self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) - - tdLog.info("again start consume processor") - self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) - - tdLog.info("again check consume result") - expectRows = 2 - resultList = self.selectConsumeResult(expectRows) - totalConsumeRows = 0 - for i in range(expectRows): - totalConsumeRows += resultList[i] - - if totalConsumeRows != 0: - tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, 0)) + if totalConsumeRows != 0: + tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, 0)) tdLog.exit("tmq consume rows error!") tdSql.query("drop topic %s"%topicFromStb1) tdLog.printNoPrefix("======== test case 7 end ...... ") - def tmqCase8(self, cfgPath, buildPath): - tdLog.printNoPrefix("======== test case 8: ") - - self.initConsumerTable() - - # create and start thread - parameterDict = {'cfg': '', \ - 'actionType': 0, \ - 'dbName': 'db8', \ - 'dropFlag': 1, \ - 'vgroups': 4, \ - 'replica': 1, \ - 'stbName': 'stb1', \ - 'ctbNum': 10, \ - 'rowsPerTbl': 10000, \ - 'batchNum': 100, \ - 'startTs': 1640966400000} # 2022-01-01 00:00:00.000 - parameterDict['cfg'] = cfgPath - - self.create_database(tdSql, parameterDict["dbName"]) - self.create_stable(tdSql, parameterDict["dbName"], parameterDict["stbName"]) - self.create_ctables(tdSql, parameterDict["dbName"], parameterDict["stbName"], parameterDict["ctbNum"]) - self.insert_data(tdSql,\ - parameterDict["dbName"],\ - parameterDict["stbName"],\ - parameterDict["ctbNum"],\ - parameterDict["rowsPerTbl"],\ - parameterDict["batchNum"]) - - tdLog.info("create topics from stb1") - topicFromStb1 = 'topic_stb1' - - tdSql.execute("create topic %s as select ts, c1, c2 from %s.%s" %(topicFromStb1, parameterDict['dbName'], parameterDict['stbName'])) - consumerId = 0 - expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"] - topicList = topicFromStb1 - ifcheckdata = 0 - ifManualCommit = 1 - keyList = 'group.id:cgrp1,\ - enable.auto.commit:false,\ - auto.commit.interval.ms:6000,\ - auto.offset.reset:latest' - self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) - - tdLog.info("start consume 0 processor") - pollDelay = 10 - showMsg = 1 - showRow = 1 - self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) - - tdLog.info("start to check consume 0 result") - expectRows = 1 - resultList = self.selectConsumeResult(expectRows) - totalConsumeRows = 0 - for i in range(expectRows): - totalConsumeRows += resultList[i] - - if totalConsumeRows != 0: - tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, 0)) - tdLog.exit("tmq consume rows error!") - - tdLog.info("start consume 1 processor") - self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) - - tdLog.info("start one new thread to insert data") - parameterDict['actionType'] = actionType.INSERT_DATA - prepareEnvThread = threading.Thread(target=self.prepareEnv, kwargs=parameterDict) - prepareEnvThread.start() - prepareEnvThread.join() - - tdLog.info("start to check consume 0 and 1 result") - expectRows = 2 - resultList = self.selectConsumeResult(expectRows) - totalConsumeRows = 0 - for i in range(expectRows): - totalConsumeRows += resultList[i] - - if totalConsumeRows != expectrowcnt: - tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt)) - tdLog.exit("tmq consume rows error!") - - tdLog.info("start consume 2 processor") - self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) - - tdLog.info("start one new thread to insert data") - parameterDict['actionType'] = actionType.INSERT_DATA - prepareEnvThread = threading.Thread(target=self.prepareEnv, kwargs=parameterDict) - prepareEnvThread.start() - prepareEnvThread.join() - - tdLog.info("start to check consume 0 and 1 and 2 result") - expectRows = 3 - resultList = self.selectConsumeResult(expectRows) - totalConsumeRows = 0 - for i in range(expectRows): - totalConsumeRows += resultList[i] - - if totalConsumeRows != expectrowcnt*2: - tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt*2)) - tdLog.exit("tmq consume rows error!") - - tdSql.query("drop topic %s"%topicFromStb1) - - tdLog.printNoPrefix("======== test case 8 end ...... ") - - def tmqCase9(self, cfgPath, buildPath): - tdLog.printNoPrefix("======== test case 9: ") - - self.initConsumerTable() - - # create and start thread - parameterDict = {'cfg': '', \ - 'actionType': 0, \ - 'dbName': 'db9', \ - 'dropFlag': 1, \ - 'vgroups': 4, \ - 'replica': 1, \ - 'stbName': 'stb1', \ - 'ctbNum': 10, \ - 'rowsPerTbl': 10000, \ - 'batchNum': 100, \ - 'startTs': 1640966400000} # 2022-01-01 00:00:00.000 - parameterDict['cfg'] = cfgPath - - self.create_database(tdSql, parameterDict["dbName"]) - self.create_stable(tdSql, parameterDict["dbName"], parameterDict["stbName"]) - self.create_ctables(tdSql, parameterDict["dbName"], parameterDict["stbName"], parameterDict["ctbNum"]) - self.insert_data(tdSql,\ - parameterDict["dbName"],\ - parameterDict["stbName"],\ - parameterDict["ctbNum"],\ - parameterDict["rowsPerTbl"],\ - parameterDict["batchNum"]) - - tdLog.info("create topics from stb1") - topicFromStb1 = 'topic_stb1' - - tdSql.execute("create topic %s as select ts, c1, c2 from %s.%s" %(topicFromStb1, parameterDict['dbName'], parameterDict['stbName'])) - consumerId = 0 - expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"] - topicList = topicFromStb1 - ifcheckdata = 0 - ifManualCommit = 1 - keyList = 'group.id:cgrp1,\ - enable.auto.commit:false,\ - auto.commit.interval.ms:6000,\ - auto.offset.reset:latest' - self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) - - tdLog.info("start consume 0 processor") - pollDelay = 10 - showMsg = 1 - showRow = 1 - self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) - - tdLog.info("start to check consume 0 result") - expectRows = 1 - resultList = self.selectConsumeResult(expectRows) - totalConsumeRows = 0 - for i in range(expectRows): - totalConsumeRows += resultList[i] - - if totalConsumeRows != 0: - tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, 0)) - tdLog.exit("tmq consume rows error!") - - tdLog.info("start consume 1 processor") - self.initConsumerInfoTable() - consumerId = 1 - ifManualCommit = 0 - self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) - self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) - - tdLog.info("start one new thread to insert data") - parameterDict['actionType'] = actionType.INSERT_DATA - prepareEnvThread = threading.Thread(target=self.prepareEnv, kwargs=parameterDict) - prepareEnvThread.start() - prepareEnvThread.join() - - tdLog.info("start to check consume 0 and 1 result") - expectRows = 2 - resultList = self.selectConsumeResult(expectRows) - totalConsumeRows = 0 - for i in range(expectRows): - totalConsumeRows += resultList[i] - - if totalConsumeRows != expectrowcnt: - tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt)) - tdLog.exit("tmq consume rows error!") - - tdLog.info("start consume 2 processor") - self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) - - tdLog.info("start one new thread to insert data") - parameterDict['actionType'] = actionType.INSERT_DATA - prepareEnvThread = threading.Thread(target=self.prepareEnv, kwargs=parameterDict) - prepareEnvThread.start() - prepareEnvThread.join() - - tdLog.info("start to check consume 0 and 1 and 2 result") - expectRows = 3 - resultList = self.selectConsumeResult(expectRows) - totalConsumeRows = 0 - for i in range(expectRows): - totalConsumeRows += resultList[i] - - if totalConsumeRows != expectrowcnt*2: - tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt*2)) - tdLog.exit("tmq consume rows error!") - - tdSql.query("drop topic %s"%topicFromStb1) - - tdLog.printNoPrefix("======== test case 9 end ...... ") - - def tmqCase10(self, cfgPath, buildPath): - tdLog.printNoPrefix("======== test case 10: ") - - self.initConsumerTable() - - # create and start thread - parameterDict = {'cfg': '', \ - 'actionType': 0, \ - 'dbName': 'db10', \ - 'dropFlag': 1, \ - 'vgroups': 4, \ - 'replica': 1, \ - 'stbName': 'stb1', \ - 'ctbNum': 10, \ - 'rowsPerTbl': 10000, \ - 'batchNum': 100, \ - 'startTs': 1640966400000} # 2022-01-01 00:00:00.000 - parameterDict['cfg'] = cfgPath - - self.create_database(tdSql, parameterDict["dbName"]) - self.create_stable(tdSql, parameterDict["dbName"], parameterDict["stbName"]) - self.create_ctables(tdSql, parameterDict["dbName"], parameterDict["stbName"], parameterDict["ctbNum"]) - self.insert_data(tdSql,\ - parameterDict["dbName"],\ - parameterDict["stbName"],\ - parameterDict["ctbNum"],\ - parameterDict["rowsPerTbl"],\ - parameterDict["batchNum"]) - - tdLog.info("create topics from stb1") - topicFromStb1 = 'topic_stb1' - - tdSql.execute("create topic %s as select ts, c1, c2 from %s.%s" %(topicFromStb1, parameterDict['dbName'], parameterDict['stbName'])) - consumerId = 0 - expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"] - topicList = topicFromStb1 - ifcheckdata = 0 - ifManualCommit = 1 - keyList = 'group.id:cgrp1,\ - enable.auto.commit:false,\ - auto.commit.interval.ms:6000,\ - auto.offset.reset:latest' - self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) - - tdLog.info("start consume 0 processor") - pollDelay = 10 - showMsg = 1 - showRow = 1 - self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) - - tdLog.info("start to check consume 0 result") - expectRows = 1 - resultList = self.selectConsumeResult(expectRows) - totalConsumeRows = 0 - for i in range(expectRows): - totalConsumeRows += resultList[i] - - if totalConsumeRows != 0: - tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, 0)) - tdLog.exit("tmq consume rows error!") - - tdLog.info("start consume 1 processor") - self.initConsumerInfoTable() - consumerId = 1 - ifManualCommit = 1 - self.insertConsumerInfo(consumerId, expectrowcnt-10000,topicList,keyList,ifcheckdata,ifManualCommit) - self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) - - tdLog.info("start one new thread to insert data") - parameterDict['actionType'] = actionType.INSERT_DATA - prepareEnvThread = threading.Thread(target=self.prepareEnv, kwargs=parameterDict) - prepareEnvThread.start() - prepareEnvThread.join() - - tdLog.info("start to check consume 0 and 1 result") - expectRows = 2 - resultList = self.selectConsumeResult(expectRows) - totalConsumeRows = 0 - for i in range(expectRows): - totalConsumeRows += resultList[i] - - if totalConsumeRows != expectrowcnt-10000: - tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt-10000)) - tdLog.exit("tmq consume rows error!") - - tdLog.info("start consume 2 processor") - self.initConsumerInfoTable() - consumerId = 2 - ifManualCommit = 1 - self.insertConsumerInfo(consumerId, expectrowcnt+10000,topicList,keyList,ifcheckdata,ifManualCommit) - self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) - - tdLog.info("start one new thread to insert data") - parameterDict['actionType'] = actionType.INSERT_DATA - prepareEnvThread = threading.Thread(target=self.prepareEnv, kwargs=parameterDict) - prepareEnvThread.start() - prepareEnvThread.join() - - tdLog.info("start to check consume 0 and 1 and 2 result") - expectRows = 3 - resultList = self.selectConsumeResult(expectRows) - totalConsumeRows = 0 - for i in range(expectRows): - totalConsumeRows += resultList[i] - - if totalConsumeRows != expectrowcnt*2: - tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt*2)) - tdLog.exit("tmq consume rows error!") - - tdSql.query("drop topic %s"%topicFromStb1) - - tdLog.printNoPrefix("======== test case 10 end ...... ") - - def tmqCase11(self, cfgPath, buildPath): - tdLog.printNoPrefix("======== test case 11: ") - - self.initConsumerTable() - - # create and start thread - parameterDict = {'cfg': '', \ - 'actionType': 0, \ - 'dbName': 'db11', \ - 'dropFlag': 1, \ - 'vgroups': 4, \ - 'replica': 1, \ - 'stbName': 'stb1', \ - 'ctbNum': 10, \ - 'rowsPerTbl': 10000, \ - 'batchNum': 100, \ - 'startTs': 1640966400000} # 2022-01-01 00:00:00.000 - parameterDict['cfg'] = cfgPath - - self.create_database(tdSql, parameterDict["dbName"]) - self.create_stable(tdSql, parameterDict["dbName"], parameterDict["stbName"]) - self.create_ctables(tdSql, parameterDict["dbName"], parameterDict["stbName"], parameterDict["ctbNum"]) - self.insert_data(tdSql,\ - parameterDict["dbName"],\ - parameterDict["stbName"],\ - parameterDict["ctbNum"],\ - parameterDict["rowsPerTbl"],\ - parameterDict["batchNum"]) - - tdLog.info("create topics from stb1") - topicFromStb1 = 'topic_stb1' - - tdSql.execute("create topic %s as select ts, c1, c2 from %s.%s" %(topicFromStb1, parameterDict['dbName'], parameterDict['stbName'])) - consumerId = 0 - expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"] - topicList = topicFromStb1 - ifcheckdata = 0 - ifManualCommit = 1 - keyList = 'group.id:cgrp1,\ - enable.auto.commit:false,\ - auto.commit.interval.ms:6000,\ - auto.offset.reset:none' - self.insertConsumerInfo(consumerId, expectrowcnt/4,topicList,keyList,ifcheckdata,ifManualCommit) - - tdLog.info("start consume processor") - pollDelay = 5 - showMsg = 1 - showRow = 1 - self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) - - tdLog.info("start to check consume result") - expectRows = 1 - resultList = self.selectConsumeResult(expectRows) - totalConsumeRows = 0 - for i in range(expectRows): - totalConsumeRows += resultList[i] - - if totalConsumeRows != 0: - tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, 0)) - tdLog.exit("tmq consume rows error!") - - self.initConsumerInfoTable() - consumerId = 1 - keyList = 'group.id:cgrp1,\ - enable.auto.commit:false,\ - auto.commit.interval.ms:6000,\ - auto.offset.reset:none' - self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) - - tdLog.info("again start consume processor") - self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) - - tdLog.info("again check consume result") - expectRows = 2 - resultList = self.selectConsumeResult(expectRows) - totalConsumeRows = 0 - for i in range(expectRows): - totalConsumeRows += resultList[i] - - if totalConsumeRows != 0: - tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, 0)) - tdLog.exit("tmq consume rows error!") - - tdSql.query("drop topic %s"%topicFromStb1) - - tdLog.printNoPrefix("======== test case 11 end ...... ") - - def tmqCase12(self, cfgPath, buildPath): - tdLog.printNoPrefix("======== test case 12: ") - - self.initConsumerTable() - - # create and start thread - parameterDict = {'cfg': '', \ - 'actionType': 0, \ - 'dbName': 'db12', \ - 'dropFlag': 1, \ - 'vgroups': 4, \ - 'replica': 1, \ - 'stbName': 'stb1', \ - 'ctbNum': 10, \ - 'rowsPerTbl': 10000, \ - 'batchNum': 100, \ - 'startTs': 1640966400000} # 2022-01-01 00:00:00.000 - parameterDict['cfg'] = cfgPath - - self.create_database(tdSql, parameterDict["dbName"]) - self.create_stable(tdSql, parameterDict["dbName"], parameterDict["stbName"]) - self.create_ctables(tdSql, parameterDict["dbName"], parameterDict["stbName"], parameterDict["ctbNum"]) - self.insert_data(tdSql,\ - parameterDict["dbName"],\ - parameterDict["stbName"],\ - parameterDict["ctbNum"],\ - parameterDict["rowsPerTbl"],\ - parameterDict["batchNum"]) - - tdLog.info("create topics from stb1") - topicFromStb1 = 'topic_stb1' - - tdSql.execute("create topic %s as select ts, c1, c2 from %s.%s" %(topicFromStb1, parameterDict['dbName'], parameterDict['stbName'])) - consumerId = 0 - expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"] - topicList = topicFromStb1 - ifcheckdata = 0 - ifManualCommit = 0 - keyList = 'group.id:cgrp1,\ - enable.auto.commit:false,\ - auto.commit.interval.ms:6000,\ - auto.offset.reset:earliest' - self.insertConsumerInfo(consumerId, expectrowcnt/4,topicList,keyList,ifcheckdata,ifManualCommit) - - tdLog.info("start consume processor") - pollDelay = 5 - showMsg = 1 - showRow = 1 - self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) - - tdLog.info("start to check consume result") - expectRows = 1 - resultList = self.selectConsumeResult(expectRows) - totalConsumeRows = 0 - for i in range(expectRows): - totalConsumeRows += resultList[i] - - if totalConsumeRows != expectrowcnt/4: - tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt/4)) - tdLog.exit("tmq consume rows error!") - - self.initConsumerInfoTable() - consumerId = 1 - keyList = 'group.id:cgrp1,\ - enable.auto.commit:false,\ - auto.commit.interval.ms:6000,\ - auto.offset.reset:none' - self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) - - tdLog.info("again start consume processor") - self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) - - tdLog.info("again check consume result") - expectRows = 2 - resultList = self.selectConsumeResult(expectRows) - totalConsumeRows = 0 - for i in range(expectRows): - totalConsumeRows += resultList[i] - - if totalConsumeRows != expectrowcnt/4: - tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt/4)) - tdLog.exit("tmq consume rows error!") - - tdSql.query("drop topic %s"%topicFromStb1) - - tdLog.printNoPrefix("======== test case 12 end ...... ") - - def tmqCase13(self, cfgPath, buildPath): - tdLog.printNoPrefix("======== test case 13: ") - - self.initConsumerTable() - - # create and start thread - parameterDict = {'cfg': '', \ - 'actionType': 0, \ - 'dbName': 'db13', \ - 'dropFlag': 1, \ - 'vgroups': 4, \ - 'replica': 1, \ - 'stbName': 'stb1', \ - 'ctbNum': 10, \ - 'rowsPerTbl': 10000, \ - 'batchNum': 100, \ - 'startTs': 1640966400000} # 2022-01-01 00:00:00.000 - parameterDict['cfg'] = cfgPath - - self.create_database(tdSql, parameterDict["dbName"]) - self.create_stable(tdSql, parameterDict["dbName"], parameterDict["stbName"]) - self.create_ctables(tdSql, parameterDict["dbName"], parameterDict["stbName"], parameterDict["ctbNum"]) - self.insert_data(tdSql,\ - parameterDict["dbName"],\ - parameterDict["stbName"],\ - parameterDict["ctbNum"],\ - parameterDict["rowsPerTbl"],\ - parameterDict["batchNum"]) - - tdLog.info("create topics from stb1") - topicFromStb1 = 'topic_stb1' - - tdSql.execute("create topic %s as select ts, c1, c2 from %s.%s" %(topicFromStb1, parameterDict['dbName'], parameterDict['stbName'])) - consumerId = 0 - expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"] - topicList = topicFromStb1 - ifcheckdata = 0 - ifManualCommit = 1 - keyList = 'group.id:cgrp1,\ - enable.auto.commit:false,\ - auto.commit.interval.ms:6000,\ - auto.offset.reset:earliest' - self.insertConsumerInfo(consumerId, expectrowcnt/4,topicList,keyList,ifcheckdata,ifManualCommit) - - tdLog.info("start consume processor") - pollDelay = 5 - showMsg = 1 - showRow = 1 - self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) - - tdLog.info("start to check consume result") - expectRows = 1 - resultList = self.selectConsumeResult(expectRows) - totalConsumeRows = 0 - for i in range(expectRows): - totalConsumeRows += resultList[i] - - if totalConsumeRows != expectrowcnt/4: - tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt/4)) - tdLog.exit("tmq consume rows error!") - - self.initConsumerInfoTable() - consumerId = 1 - ifManualCommit = 1 - keyList = 'group.id:cgrp1,\ - enable.auto.commit:false,\ - auto.commit.interval.ms:6000,\ - auto.offset.reset:none' - self.insertConsumerInfo(consumerId, expectrowcnt/2,topicList,keyList,ifcheckdata,ifManualCommit) - - tdLog.info("again start consume processor") - self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) - - tdLog.info("again check consume result") - expectRows = 2 - resultList = self.selectConsumeResult(expectRows) - totalConsumeRows = 0 - for i in range(expectRows): - totalConsumeRows += resultList[i] - - if totalConsumeRows != expectrowcnt*(1/2+1/4): - tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt*(1/2+1/4))) - tdLog.exit("tmq consume rows error!") - - self.initConsumerInfoTable() - consumerId = 2 - ifManualCommit = 1 - keyList = 'group.id:cgrp1,\ - enable.auto.commit:false,\ - auto.commit.interval.ms:6000,\ - auto.offset.reset:none' - self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) - - tdLog.info("again start consume processor") - self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) - - tdLog.info("again check consume result") - expectRows = 3 - resultList = self.selectConsumeResult(expectRows) - totalConsumeRows = 0 - for i in range(expectRows): - totalConsumeRows += resultList[i] - - if totalConsumeRows != expectrowcnt: - tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt)) - tdLog.exit("tmq consume rows error!") - - tdSql.query("drop topic %s"%topicFromStb1) - - tdLog.printNoPrefix("======== test case 13 end ...... ") - def run(self): tdSql.prepare() @@ -1375,19 +364,8 @@ class TDTestCase: cfgPath = buildPath + "/../sim/psim/cfg" tdLog.info("cfgPath: %s" % cfgPath) - #self.tmqCase1(cfgPath, buildPath) - #self.tmqCase2(cfgPath, buildPath) - #self.tmqCase3(cfgPath, buildPath) - #self.tmqCase4(cfgPath, buildPath) - #self.tmqCase5(cfgPath, buildPath) self.tmqCase6(cfgPath, buildPath) self.tmqCase7(cfgPath, buildPath) - self.tmqCase8(cfgPath, buildPath) - self.tmqCase9(cfgPath, buildPath) - self.tmqCase10(cfgPath, buildPath) - self.tmqCase11(cfgPath, buildPath) - self.tmqCase12(cfgPath, buildPath) - self.tmqCase13(cfgPath, buildPath) def stop(self): tdSql.close() diff --git a/tests/system-test/7-tmq/subscribeStb2.py b/tests/system-test/7-tmq/subscribeStb2.py index e825ebd3b6ab15eaf9dcd0ab833557fef62664f9..d08adcdc8374d01a0f91dfd596b2de6521d86f84 100644 --- a/tests/system-test/7-tmq/subscribeStb2.py +++ b/tests/system-test/7-tmq/subscribeStb2.py @@ -29,8 +29,8 @@ class TDTestCase: def init(self, conn, logSql): tdLog.debug(f"start to excute {__file__}") - #tdSql.init(conn.cursor()) - tdSql.init(conn.cursor(), logSql) # output sql.txt file + tdSql.init(conn.cursor()) + #tdSql.init(conn.cursor(), logSql) # output sql.txt file def getBuildPath(self): selfPath = os.path.dirname(os.path.realpath(__file__)) @@ -183,18 +183,15 @@ class TDTestCase: return - def tmqCase1(self, cfgPath, buildPath): - tdLog.printNoPrefix("======== test case 1: ") + def tmqCase8(self, cfgPath, buildPath): + tdLog.printNoPrefix("======== test case 8: ") self.initConsumerTable() - auotCtbNum = 5 - auotCtbPrefix = 'autoCtb' - # create and start thread parameterDict = {'cfg': '', \ 'actionType': 0, \ - 'dbName': 'db1', \ + 'dbName': 'db8', \ 'dropFlag': 1, \ 'vgroups': 4, \ 'replica': 1, \ @@ -204,42 +201,60 @@ class TDTestCase: 'batchNum': 100, \ 'startTs': 1640966400000} # 2022-01-01 00:00:00.000 parameterDict['cfg'] = cfgPath - + self.create_database(tdSql, parameterDict["dbName"]) self.create_stable(tdSql, parameterDict["dbName"], parameterDict["stbName"]) self.create_ctables(tdSql, parameterDict["dbName"], parameterDict["stbName"], parameterDict["ctbNum"]) - self.insert_data(tdSql,parameterDict["dbName"],parameterDict["stbName"],parameterDict["ctbNum"],parameterDict["rowsPerTbl"],parameterDict["batchNum"]) + self.insert_data(tdSql,\ + parameterDict["dbName"],\ + parameterDict["stbName"],\ + parameterDict["ctbNum"],\ + parameterDict["rowsPerTbl"],\ + parameterDict["batchNum"]) tdLog.info("create topics from stb1") topicFromStb1 = 'topic_stb1' tdSql.execute("create topic %s as select ts, c1, c2 from %s.%s" %(topicFromStb1, parameterDict['dbName'], parameterDict['stbName'])) consumerId = 0 - expectrowcnt = parameterDict["rowsPerTbl"] * (auotCtbNum + parameterDict["ctbNum"]) + expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"] topicList = topicFromStb1 ifcheckdata = 0 - ifManualCommit = 0 + ifManualCommit = 1 keyList = 'group.id:cgrp1,\ enable.auto.commit:false,\ auto.commit.interval.ms:6000,\ - auto.offset.reset:earliest' + auto.offset.reset:latest' self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) - tdLog.info("start consume processor") + tdLog.info("start consume 0 processor") pollDelay = 100 showMsg = 1 showRow = 1 self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) - # add some new child tables using auto ctreating mode - time.sleep(1) - for index in range(auotCtbNum): - tdSql.query("create table %s.%s_%d using %s.%s tags(%d)"%(parameterDict["dbName"], auotCtbPrefix, index, parameterDict["dbName"], parameterDict["stbName"], index)) + tdLog.info("start to check consume 0 result") + expectRows = 1 + resultList = self.selectConsumeResult(expectRows) + totalConsumeRows = 0 + for i in range(expectRows): + totalConsumeRows += resultList[i] + + if totalConsumeRows != 0: + tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, 0)) + tdLog.exit("tmq consume rows error!") + + tdLog.info("start consume 1 processor") + self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) - self.insert_data(tdSql,parameterDict["dbName"],auotCtbPrefix,auotCtbNum,parameterDict["rowsPerTbl"],parameterDict["batchNum"]) + tdLog.info("start one new thread to insert data") + parameterDict['actionType'] = actionType.INSERT_DATA + prepareEnvThread = threading.Thread(target=self.prepareEnv, kwargs=parameterDict) + prepareEnvThread.start() + prepareEnvThread.join() - tdLog.info("insert process end, and start to check consume result") - expectRows = 1 + tdLog.info("start to check consume 0 and 1 result") + expectRows = 2 resultList = self.selectConsumeResult(expectRows) totalConsumeRows = 0 for i in range(expectRows): @@ -249,22 +264,39 @@ class TDTestCase: tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt)) tdLog.exit("tmq consume rows error!") + tdLog.info("start consume 2 processor") + self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) + + tdLog.info("start one new thread to insert data") + parameterDict['actionType'] = actionType.INSERT_DATA + prepareEnvThread = threading.Thread(target=self.prepareEnv, kwargs=parameterDict) + prepareEnvThread.start() + prepareEnvThread.join() + + tdLog.info("start to check consume 0 and 1 and 2 result") + expectRows = 3 + resultList = self.selectConsumeResult(expectRows) + totalConsumeRows = 0 + for i in range(expectRows): + totalConsumeRows += resultList[i] + + if totalConsumeRows != expectrowcnt*2: + tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt*2)) + tdLog.exit("tmq consume rows error!") + tdSql.query("drop topic %s"%topicFromStb1) - tdLog.printNoPrefix("======== test case 1 end ...... ") + tdLog.printNoPrefix("======== test case 8 end ...... ") - def tmqCase2(self, cfgPath, buildPath): - tdLog.printNoPrefix("======== test case 2: ") + def tmqCase9(self, cfgPath, buildPath): + tdLog.printNoPrefix("======== test case 9: ") self.initConsumerTable() - auotCtbNum = 10 - auotCtbPrefix = 'autoCtb' - # create and start thread parameterDict = {'cfg': '', \ 'actionType': 0, \ - 'dbName': 'db2', \ + 'dbName': 'db9', \ 'dropFlag': 1, \ 'vgroups': 4, \ 'replica': 1, \ @@ -274,46 +306,64 @@ class TDTestCase: 'batchNum': 100, \ 'startTs': 1640966400000} # 2022-01-01 00:00:00.000 parameterDict['cfg'] = cfgPath - + self.create_database(tdSql, parameterDict["dbName"]) self.create_stable(tdSql, parameterDict["dbName"], parameterDict["stbName"]) self.create_ctables(tdSql, parameterDict["dbName"], parameterDict["stbName"], parameterDict["ctbNum"]) - self.insert_data(tdSql,parameterDict["dbName"],parameterDict["stbName"],parameterDict["ctbNum"],parameterDict["rowsPerTbl"],parameterDict["batchNum"]) - - self.create_stable(tdSql, parameterDict["dbName"], 'stb2') + self.insert_data(tdSql,\ + parameterDict["dbName"],\ + parameterDict["stbName"],\ + parameterDict["ctbNum"],\ + parameterDict["rowsPerTbl"],\ + parameterDict["batchNum"]) - tdLog.info("create topics from stb0/stb1") + tdLog.info("create topics from stb1") topicFromStb1 = 'topic_stb1' - topicFromStb2 = 'topic_stb2' - + tdSql.execute("create topic %s as select ts, c1, c2 from %s.%s" %(topicFromStb1, parameterDict['dbName'], parameterDict['stbName'])) - tdSql.execute("create topic %s as select ts, c1, c2 from %s.%s" %(topicFromStb2, parameterDict['dbName'], 'stb2')) consumerId = 0 - expectrowcnt = parameterDict["rowsPerTbl"] * (auotCtbNum + parameterDict["ctbNum"]) - topicList = '%s, %s'%(topicFromStb1,topicFromStb2) + expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"] + topicList = topicFromStb1 ifcheckdata = 0 - ifManualCommit = 0 + ifManualCommit = 1 keyList = 'group.id:cgrp1,\ enable.auto.commit:false,\ auto.commit.interval.ms:6000,\ - auto.offset.reset:earliest' + auto.offset.reset:latest' self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) - tdLog.info("start consume processor") + tdLog.info("start consume 0 processor") pollDelay = 100 showMsg = 1 showRow = 1 self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) - # add some new child tables using auto ctreating mode - time.sleep(1) - for index in range(auotCtbNum): - tdSql.query("create table %s.%s_%d using %s.%s tags(%d)"%(parameterDict["dbName"], auotCtbPrefix, index, parameterDict["dbName"], 'stb2', index)) + tdLog.info("start to check consume 0 result") + expectRows = 1 + resultList = self.selectConsumeResult(expectRows) + totalConsumeRows = 0 + for i in range(expectRows): + totalConsumeRows += resultList[i] + + if totalConsumeRows != 0: + tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, 0)) + tdLog.exit("tmq consume rows error!") - self.insert_data(tdSql,parameterDict["dbName"],auotCtbPrefix,auotCtbNum,parameterDict["rowsPerTbl"],parameterDict["batchNum"]) + tdLog.info("start consume 1 processor") + self.initConsumerInfoTable() + consumerId = 1 + ifManualCommit = 0 + self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) + self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) - tdLog.info("insert process end, and start to check consume result") - expectRows = 1 + tdLog.info("start one new thread to insert data") + parameterDict['actionType'] = actionType.INSERT_DATA + prepareEnvThread = threading.Thread(target=self.prepareEnv, kwargs=parameterDict) + prepareEnvThread.start() + prepareEnvThread.join() + + tdLog.info("start to check consume 0 and 1 result") + expectRows = 2 resultList = self.selectConsumeResult(expectRows) totalConsumeRows = 0 for i in range(expectRows): @@ -323,9 +373,29 @@ class TDTestCase: tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt)) tdLog.exit("tmq consume rows error!") + tdLog.info("start consume 2 processor") + self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) + + tdLog.info("start one new thread to insert data") + parameterDict['actionType'] = actionType.INSERT_DATA + prepareEnvThread = threading.Thread(target=self.prepareEnv, kwargs=parameterDict) + prepareEnvThread.start() + prepareEnvThread.join() + + tdLog.info("start to check consume 0 and 1 and 2 result") + expectRows = 3 + resultList = self.selectConsumeResult(expectRows) + totalConsumeRows = 0 + for i in range(expectRows): + totalConsumeRows += resultList[i] + + if totalConsumeRows != expectrowcnt*2: + tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt*2)) + tdLog.exit("tmq consume rows error!") + tdSql.query("drop topic %s"%topicFromStb1) - tdLog.printNoPrefix("======== test case 2 end ...... ") + tdLog.printNoPrefix("======== test case 9 end ...... ") def run(self): tdSql.prepare() @@ -338,8 +408,8 @@ class TDTestCase: cfgPath = buildPath + "/../sim/psim/cfg" tdLog.info("cfgPath: %s" % cfgPath) - self.tmqCase1(cfgPath, buildPath) - self.tmqCase2(cfgPath, buildPath) + self.tmqCase8(cfgPath, buildPath) + self.tmqCase9(cfgPath, buildPath) def stop(self): tdSql.close() diff --git a/tests/system-test/7-tmq/subscribeStb3.py b/tests/system-test/7-tmq/subscribeStb3.py new file mode 100644 index 0000000000000000000000000000000000000000..58e36911c1407add56a5ef023364f5925e2629b1 --- /dev/null +++ b/tests/system-test/7-tmq/subscribeStb3.py @@ -0,0 +1,607 @@ + +import taos +import sys +import time +import socket +import os +import threading +from enum import Enum + +from util.log import * +from util.sql import * +from util.cases import * +from util.dnodes import * + +class actionType(Enum): + CREATE_DATABASE = 0 + CREATE_STABLE = 1 + CREATE_CTABLE = 2 + INSERT_DATA = 3 + +class TDTestCase: + hostname = socket.gethostname() + #rpcDebugFlagVal = '143' + #clientCfgDict = {'serverPort': '', 'firstEp': '', 'secondEp':'', 'rpcDebugFlag':'135', 'fqdn':''} + #clientCfgDict["rpcDebugFlag"] = rpcDebugFlagVal + #updatecfgDict = {'clientCfg': {}, 'serverPort': '', 'firstEp': '', 'secondEp':'', 'rpcDebugFlag':'135', 'fqdn':''} + #updatecfgDict["rpcDebugFlag"] = rpcDebugFlagVal + #print ("===================: ", updatecfgDict) + + def init(self, conn, logSql): + tdLog.debug(f"start to excute {__file__}") + tdSql.init(conn.cursor()) + #tdSql.init(conn.cursor(), logSql) # output sql.txt file + + def getBuildPath(self): + selfPath = os.path.dirname(os.path.realpath(__file__)) + + if ("community" in selfPath): + projPath = selfPath[:selfPath.find("community")] + else: + projPath = selfPath[:selfPath.find("tests")] + + for root, dirs, files in os.walk(projPath): + if ("taosd" in files): + rootRealPath = os.path.dirname(os.path.realpath(root)) + if ("packaging" not in rootRealPath): + buildPath = root[:len(root) - len("/build/bin")] + break + return buildPath + + def newcur(self,cfg,host,port): + user = "root" + password = "taosdata" + con=taos.connect(host=host, user=user, password=password, config=cfg ,port=port) + cur=con.cursor() + print(cur) + return cur + + def initConsumerTable(self,cdbName='cdb'): + tdLog.info("create consume database, and consume info table, and consume result table") + tdSql.query("create database if not exists %s vgroups 1"%(cdbName)) + tdSql.query("drop table if exists %s.consumeinfo "%(cdbName)) + tdSql.query("drop table if exists %s.consumeresult "%(cdbName)) + + tdSql.query("create table %s.consumeinfo (ts timestamp, consumerid int, topiclist binary(1024), keylist binary(1024), expectmsgcnt bigint, ifcheckdata int, ifmanualcommit int)"%cdbName) + tdSql.query("create table %s.consumeresult (ts timestamp, consumerid int, consummsgcnt bigint, consumrowcnt bigint, checkresult int)"%cdbName) + + def initConsumerInfoTable(self,cdbName='cdb'): + tdLog.info("drop consumeinfo table") + tdSql.query("drop table if exists %s.consumeinfo "%(cdbName)) + tdSql.query("create table %s.consumeinfo (ts timestamp, consumerid int, topiclist binary(1024), keylist binary(1024), expectmsgcnt bigint, ifcheckdata int, ifmanualcommit int)"%cdbName) + + def insertConsumerInfo(self,consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifmanualcommit,cdbName='cdb'): + sql = "insert into %s.consumeinfo values "%cdbName + sql += "(now, %d, '%s', '%s', %d, %d, %d)"%(consumerId, topicList, keyList, expectrowcnt, ifcheckdata, ifmanualcommit) + tdLog.info("consume info sql: %s"%sql) + tdSql.query(sql) + + def selectConsumeResult(self,expectRows,cdbName='cdb'): + resultList=[] + while 1: + tdSql.query("select * from %s.consumeresult"%cdbName) + #tdLog.info("row: %d, %l64d, %l64d"%(tdSql.getData(0, 1),tdSql.getData(0, 2),tdSql.getData(0, 3)) + if tdSql.getRows() == expectRows: + break + else: + time.sleep(5) + + for i in range(expectRows): + tdLog.info ("consume id: %d, consume msgs: %d, consume rows: %d"%(tdSql.getData(i , 1), tdSql.getData(i , 2), tdSql.getData(i , 3))) + resultList.append(tdSql.getData(i , 3)) + + return resultList + + def startTmqSimProcess(self,buildPath,cfgPath,pollDelay,dbName,showMsg=1,showRow=1,cdbName='cdb',valgrind=0): + shellCmd = 'nohup ' + if valgrind == 1: + logFile = cfgPath + '/../log/valgrind-tmq.log' + shellCmd = 'nohup valgrind --log-file=' + logFile + shellCmd += '--tool=memcheck --leak-check=full --show-reachable=no --track-origins=yes --show-leak-kinds=all --num-callers=20 -v --workaround-gcc296-bugs=yes ' + + shellCmd += buildPath + '/build/bin/tmq_sim -c ' + cfgPath + shellCmd += " -y %d -d %s -g %d -r %d -w %s "%(pollDelay, dbName, showMsg, showRow, cdbName) + shellCmd += "> /dev/null 2>&1 &" + tdLog.info(shellCmd) + os.system(shellCmd) + + def create_database(self,tsql, dbName,dropFlag=1,vgroups=4,replica=1): + if dropFlag == 1: + tsql.execute("drop database if exists %s"%(dbName)) + + tsql.execute("create database if not exists %s vgroups %d replica %d"%(dbName, vgroups, replica)) + tdLog.debug("complete to create database %s"%(dbName)) + return + + def create_stable(self,tsql, dbName,stbName): + tsql.execute("create table if not exists %s.%s (ts timestamp, c1 bigint, c2 binary(16)) tags(t1 int)"%(dbName, stbName)) + tdLog.debug("complete to create %s.%s" %(dbName, stbName)) + return + + def create_ctables(self,tsql, dbName,stbName,ctbNum): + tsql.execute("use %s" %dbName) + pre_create = "create table" + sql = pre_create + #tdLog.debug("doing create one stable %s and %d child table in %s ..." %(stbname, count ,dbname)) + for i in range(ctbNum): + sql += " %s_%d using %s tags(%d)"%(stbName,i,stbName,i+1) + if (i > 0) and (i%100 == 0): + tsql.execute(sql) + sql = pre_create + if sql != pre_create: + tsql.execute(sql) + + tdLog.debug("complete to create %d child tables in %s.%s" %(ctbNum, dbName, stbName)) + return + + def insert_data(self,tsql,dbName,stbName,ctbNum,rowsPerTbl,batchNum,startTs=0): + tdLog.debug("start to insert data ............") + tsql.execute("use %s" %dbName) + pre_insert = "insert into " + sql = pre_insert + + if startTs == 0: + t = time.time() + startTs = int(round(t * 1000)) + + #tdLog.debug("doing insert data into stable:%s rows:%d ..."%(stbName, allRows)) + rowsOfSql = 0 + for i in range(ctbNum): + sql += " %s_%d values "%(stbName,i) + for j in range(rowsPerTbl): + sql += "(%d, %d, 'tmqrow_%d') "%(startTs + j, j, j) + rowsOfSql += 1 + if (j > 0) and ((rowsOfSql == batchNum) or (j == rowsPerTbl - 1)): + tsql.execute(sql) + rowsOfSql = 0 + if j < rowsPerTbl - 1: + sql = "insert into %s_%d values " %(stbName,i) + else: + sql = "insert into " + #end sql + if sql != pre_insert: + #print("insert sql:%s"%sql) + tsql.execute(sql) + tdLog.debug("insert data ............ [OK]") + return + + def prepareEnv(self, **parameterDict): + # create new connector for my thread + tsql=self.newcur(parameterDict['cfg'], 'localhost', 6030) + + if parameterDict["actionType"] == actionType.CREATE_DATABASE: + self.create_database(tsql, parameterDict["dbName"]) + elif parameterDict["actionType"] == actionType.CREATE_STABLE: + self.create_stable(tsql, parameterDict["dbName"], parameterDict["stbName"]) + elif parameterDict["actionType"] == actionType.CREATE_CTABLE: + self.create_ctables(tsql, parameterDict["dbName"], parameterDict["stbName"], parameterDict["ctbNum"]) + elif parameterDict["actionType"] == actionType.INSERT_DATA: + self.insert_data(tsql, parameterDict["dbName"], parameterDict["stbName"], parameterDict["ctbNum"],\ + parameterDict["rowsPerTbl"],parameterDict["batchNum"]) + else: + tdLog.exit("not support's action: ", parameterDict["actionType"]) + + return + + def tmqCase10(self, cfgPath, buildPath): + tdLog.printNoPrefix("======== test case 10: ") + + self.initConsumerTable() + + # create and start thread + parameterDict = {'cfg': '', \ + 'actionType': 0, \ + 'dbName': 'db10', \ + 'dropFlag': 1, \ + 'vgroups': 4, \ + 'replica': 1, \ + 'stbName': 'stb1', \ + 'ctbNum': 10, \ + 'rowsPerTbl': 10000, \ + 'batchNum': 100, \ + 'startTs': 1640966400000} # 2022-01-01 00:00:00.000 + parameterDict['cfg'] = cfgPath + + self.create_database(tdSql, parameterDict["dbName"]) + self.create_stable(tdSql, parameterDict["dbName"], parameterDict["stbName"]) + self.create_ctables(tdSql, parameterDict["dbName"], parameterDict["stbName"], parameterDict["ctbNum"]) + self.insert_data(tdSql,\ + parameterDict["dbName"],\ + parameterDict["stbName"],\ + parameterDict["ctbNum"],\ + parameterDict["rowsPerTbl"],\ + parameterDict["batchNum"]) + + tdLog.info("create topics from stb1") + topicFromStb1 = 'topic_stb1' + + tdSql.execute("create topic %s as select ts, c1, c2 from %s.%s" %(topicFromStb1, parameterDict['dbName'], parameterDict['stbName'])) + consumerId = 0 + expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"] + topicList = topicFromStb1 + ifcheckdata = 0 + ifManualCommit = 1 + keyList = 'group.id:cgrp1,\ + enable.auto.commit:false,\ + auto.commit.interval.ms:6000,\ + auto.offset.reset:latest' + self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) + + tdLog.info("start consume 0 processor") + pollDelay = 100 + showMsg = 1 + showRow = 1 + self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) + + tdLog.info("start to check consume 0 result") + expectRows = 1 + resultList = self.selectConsumeResult(expectRows) + totalConsumeRows = 0 + for i in range(expectRows): + totalConsumeRows += resultList[i] + + if totalConsumeRows != 0: + tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, 0)) + tdLog.exit("tmq consume rows error!") + + tdLog.info("start consume 1 processor") + self.initConsumerInfoTable() + consumerId = 1 + ifManualCommit = 1 + self.insertConsumerInfo(consumerId, expectrowcnt-10000,topicList,keyList,ifcheckdata,ifManualCommit) + self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) + + tdLog.info("start one new thread to insert data") + parameterDict['actionType'] = actionType.INSERT_DATA + prepareEnvThread = threading.Thread(target=self.prepareEnv, kwargs=parameterDict) + prepareEnvThread.start() + prepareEnvThread.join() + + tdLog.info("start to check consume 0 and 1 result") + expectRows = 2 + resultList = self.selectConsumeResult(expectRows) + totalConsumeRows = 0 + for i in range(expectRows): + totalConsumeRows += resultList[i] + + if totalConsumeRows != expectrowcnt-10000: + tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt-10000)) + tdLog.exit("tmq consume rows error!") + + tdLog.info("start consume 2 processor") + self.initConsumerInfoTable() + consumerId = 2 + ifManualCommit = 1 + self.insertConsumerInfo(consumerId, expectrowcnt+10000,topicList,keyList,ifcheckdata,ifManualCommit) + self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) + + tdLog.info("start one new thread to insert data") + parameterDict['actionType'] = actionType.INSERT_DATA + prepareEnvThread = threading.Thread(target=self.prepareEnv, kwargs=parameterDict) + prepareEnvThread.start() + prepareEnvThread.join() + + tdLog.info("start to check consume 0 and 1 and 2 result") + expectRows = 3 + resultList = self.selectConsumeResult(expectRows) + totalConsumeRows = 0 + for i in range(expectRows): + totalConsumeRows += resultList[i] + + if totalConsumeRows != expectrowcnt*2: + tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt*2)) + tdLog.exit("tmq consume rows error!") + + tdSql.query("drop topic %s"%topicFromStb1) + + tdLog.printNoPrefix("======== test case 10 end ...... ") + + def tmqCase11(self, cfgPath, buildPath): + tdLog.printNoPrefix("======== test case 11: ") + + self.initConsumerTable() + + # create and start thread + parameterDict = {'cfg': '', \ + 'actionType': 0, \ + 'dbName': 'db11', \ + 'dropFlag': 1, \ + 'vgroups': 4, \ + 'replica': 1, \ + 'stbName': 'stb1', \ + 'ctbNum': 10, \ + 'rowsPerTbl': 10000, \ + 'batchNum': 100, \ + 'startTs': 1640966400000} # 2022-01-01 00:00:00.000 + parameterDict['cfg'] = cfgPath + + self.create_database(tdSql, parameterDict["dbName"]) + self.create_stable(tdSql, parameterDict["dbName"], parameterDict["stbName"]) + self.create_ctables(tdSql, parameterDict["dbName"], parameterDict["stbName"], parameterDict["ctbNum"]) + self.insert_data(tdSql,\ + parameterDict["dbName"],\ + parameterDict["stbName"],\ + parameterDict["ctbNum"],\ + parameterDict["rowsPerTbl"],\ + parameterDict["batchNum"]) + + tdLog.info("create topics from stb1") + topicFromStb1 = 'topic_stb1' + + tdSql.execute("create topic %s as select ts, c1, c2 from %s.%s" %(topicFromStb1, parameterDict['dbName'], parameterDict['stbName'])) + consumerId = 0 + expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"] + topicList = topicFromStb1 + ifcheckdata = 0 + ifManualCommit = 1 + keyList = 'group.id:cgrp1,\ + enable.auto.commit:false,\ + auto.commit.interval.ms:6000,\ + auto.offset.reset:none' + self.insertConsumerInfo(consumerId, expectrowcnt/4,topicList,keyList,ifcheckdata,ifManualCommit) + + tdLog.info("start consume processor") + pollDelay = 100 + showMsg = 1 + showRow = 1 + self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) + + tdLog.info("start to check consume result") + expectRows = 1 + resultList = self.selectConsumeResult(expectRows) + totalConsumeRows = 0 + for i in range(expectRows): + totalConsumeRows += resultList[i] + + if totalConsumeRows != 0: + tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, 0)) + tdLog.exit("tmq consume rows error!") + + self.initConsumerInfoTable() + consumerId = 1 + keyList = 'group.id:cgrp1,\ + enable.auto.commit:false,\ + auto.commit.interval.ms:6000,\ + auto.offset.reset:none' + self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) + + tdLog.info("again start consume processor") + self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) + + tdLog.info("again check consume result") + expectRows = 2 + resultList = self.selectConsumeResult(expectRows) + totalConsumeRows = 0 + for i in range(expectRows): + totalConsumeRows += resultList[i] + + if totalConsumeRows != 0: + tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, 0)) + tdLog.exit("tmq consume rows error!") + + tdSql.query("drop topic %s"%topicFromStb1) + + tdLog.printNoPrefix("======== test case 11 end ...... ") + + def tmqCase12(self, cfgPath, buildPath): + tdLog.printNoPrefix("======== test case 12: ") + + self.initConsumerTable() + + # create and start thread + parameterDict = {'cfg': '', \ + 'actionType': 0, \ + 'dbName': 'db12', \ + 'dropFlag': 1, \ + 'vgroups': 4, \ + 'replica': 1, \ + 'stbName': 'stb1', \ + 'ctbNum': 10, \ + 'rowsPerTbl': 10000, \ + 'batchNum': 100, \ + 'startTs': 1640966400000} # 2022-01-01 00:00:00.000 + parameterDict['cfg'] = cfgPath + + self.create_database(tdSql, parameterDict["dbName"]) + self.create_stable(tdSql, parameterDict["dbName"], parameterDict["stbName"]) + self.create_ctables(tdSql, parameterDict["dbName"], parameterDict["stbName"], parameterDict["ctbNum"]) + self.insert_data(tdSql,\ + parameterDict["dbName"],\ + parameterDict["stbName"],\ + parameterDict["ctbNum"],\ + parameterDict["rowsPerTbl"],\ + parameterDict["batchNum"]) + + tdLog.info("create topics from stb1") + topicFromStb1 = 'topic_stb1' + + tdSql.execute("create topic %s as select ts, c1, c2 from %s.%s" %(topicFromStb1, parameterDict['dbName'], parameterDict['stbName'])) + consumerId = 0 + expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"] + topicList = topicFromStb1 + ifcheckdata = 0 + ifManualCommit = 0 + keyList = 'group.id:cgrp1,\ + enable.auto.commit:false,\ + auto.commit.interval.ms:6000,\ + auto.offset.reset:earliest' + self.insertConsumerInfo(consumerId, expectrowcnt/4,topicList,keyList,ifcheckdata,ifManualCommit) + + tdLog.info("start consume processor") + pollDelay = 100 + showMsg = 1 + showRow = 1 + self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) + + tdLog.info("start to check consume result") + expectRows = 1 + resultList = self.selectConsumeResult(expectRows) + totalConsumeRows = 0 + for i in range(expectRows): + totalConsumeRows += resultList[i] + + if totalConsumeRows != expectrowcnt/4: + tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt/4)) + tdLog.exit("tmq consume rows error!") + + self.initConsumerInfoTable() + consumerId = 1 + keyList = 'group.id:cgrp1,\ + enable.auto.commit:false,\ + auto.commit.interval.ms:6000,\ + auto.offset.reset:none' + self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) + + tdLog.info("again start consume processor") + self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) + + tdLog.info("again check consume result") + expectRows = 2 + resultList = self.selectConsumeResult(expectRows) + totalConsumeRows = 0 + for i in range(expectRows): + totalConsumeRows += resultList[i] + + if totalConsumeRows != expectrowcnt/4: + tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt/4)) + tdLog.exit("tmq consume rows error!") + + tdSql.query("drop topic %s"%topicFromStb1) + + tdLog.printNoPrefix("======== test case 12 end ...... ") + + def tmqCase13(self, cfgPath, buildPath): + tdLog.printNoPrefix("======== test case 13: ") + + self.initConsumerTable() + + # create and start thread + parameterDict = {'cfg': '', \ + 'actionType': 0, \ + 'dbName': 'db13', \ + 'dropFlag': 1, \ + 'vgroups': 4, \ + 'replica': 1, \ + 'stbName': 'stb1', \ + 'ctbNum': 10, \ + 'rowsPerTbl': 10000, \ + 'batchNum': 100, \ + 'startTs': 1640966400000} # 2022-01-01 00:00:00.000 + parameterDict['cfg'] = cfgPath + + self.create_database(tdSql, parameterDict["dbName"]) + self.create_stable(tdSql, parameterDict["dbName"], parameterDict["stbName"]) + self.create_ctables(tdSql, parameterDict["dbName"], parameterDict["stbName"], parameterDict["ctbNum"]) + self.insert_data(tdSql,\ + parameterDict["dbName"],\ + parameterDict["stbName"],\ + parameterDict["ctbNum"],\ + parameterDict["rowsPerTbl"],\ + parameterDict["batchNum"]) + + tdLog.info("create topics from stb1") + topicFromStb1 = 'topic_stb1' + + tdSql.execute("create topic %s as select ts, c1, c2 from %s.%s" %(topicFromStb1, parameterDict['dbName'], parameterDict['stbName'])) + consumerId = 0 + expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"] + topicList = topicFromStb1 + ifcheckdata = 0 + ifManualCommit = 1 + keyList = 'group.id:cgrp1,\ + enable.auto.commit:false,\ + auto.commit.interval.ms:6000,\ + auto.offset.reset:earliest' + self.insertConsumerInfo(consumerId, expectrowcnt/4,topicList,keyList,ifcheckdata,ifManualCommit) + + tdLog.info("start consume processor") + pollDelay = 100 + showMsg = 1 + showRow = 1 + self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) + + tdLog.info("start to check consume result") + expectRows = 1 + resultList = self.selectConsumeResult(expectRows) + totalConsumeRows = 0 + for i in range(expectRows): + totalConsumeRows += resultList[i] + + if totalConsumeRows != expectrowcnt/4: + tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt/4)) + tdLog.exit("tmq consume rows error!") + + self.initConsumerInfoTable() + consumerId = 1 + ifManualCommit = 1 + keyList = 'group.id:cgrp1,\ + enable.auto.commit:false,\ + auto.commit.interval.ms:6000,\ + auto.offset.reset:none' + self.insertConsumerInfo(consumerId, expectrowcnt/2,topicList,keyList,ifcheckdata,ifManualCommit) + + tdLog.info("again start consume processor") + self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) + + tdLog.info("again check consume result") + expectRows = 2 + resultList = self.selectConsumeResult(expectRows) + totalConsumeRows = 0 + for i in range(expectRows): + totalConsumeRows += resultList[i] + + if totalConsumeRows != expectrowcnt*(1/2+1/4): + tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt*(1/2+1/4))) + tdLog.exit("tmq consume rows error!") + + self.initConsumerInfoTable() + consumerId = 2 + ifManualCommit = 1 + keyList = 'group.id:cgrp1,\ + enable.auto.commit:false,\ + auto.commit.interval.ms:6000,\ + auto.offset.reset:none' + self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) + + tdLog.info("again start consume processor") + self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) + + tdLog.info("again check consume result") + expectRows = 3 + resultList = self.selectConsumeResult(expectRows) + totalConsumeRows = 0 + for i in range(expectRows): + totalConsumeRows += resultList[i] + + if totalConsumeRows != expectrowcnt: + tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt)) + tdLog.exit("tmq consume rows error!") + + tdSql.query("drop topic %s"%topicFromStb1) + + tdLog.printNoPrefix("======== test case 13 end ...... ") + + def run(self): + tdSql.prepare() + + buildPath = self.getBuildPath() + if (buildPath == ""): + tdLog.exit("taosd not found!") + else: + tdLog.info("taosd found in %s" % buildPath) + cfgPath = buildPath + "/../sim/psim/cfg" + tdLog.info("cfgPath: %s" % cfgPath) + + self.tmqCase10(cfgPath, buildPath) + self.tmqCase11(cfgPath, buildPath) + self.tmqCase12(cfgPath, buildPath) + self.tmqCase13(cfgPath, buildPath) + + def stop(self): + tdSql.close() + tdLog.success(f"{__file__} successfully executed") + +event = threading.Event() + +tdCases.addLinux(__file__, TDTestCase()) +tdCases.addWindows(__file__, TDTestCase()) diff --git a/tests/system-test/7-tmq/subscribeStb4.py b/tests/system-test/7-tmq/subscribeStb4.py new file mode 100644 index 0000000000000000000000000000000000000000..d06e14479667d172a2a7cc42f8019957d131f749 --- /dev/null +++ b/tests/system-test/7-tmq/subscribeStb4.py @@ -0,0 +1,351 @@ + +import taos +import sys +import time +import socket +import os +import threading +from enum import Enum + +from util.log import * +from util.sql import * +from util.cases import * +from util.dnodes import * + +class actionType(Enum): + CREATE_DATABASE = 0 + CREATE_STABLE = 1 + CREATE_CTABLE = 2 + INSERT_DATA = 3 + +class TDTestCase: + hostname = socket.gethostname() + #rpcDebugFlagVal = '143' + #clientCfgDict = {'serverPort': '', 'firstEp': '', 'secondEp':'', 'rpcDebugFlag':'135', 'fqdn':''} + #clientCfgDict["rpcDebugFlag"] = rpcDebugFlagVal + #updatecfgDict = {'clientCfg': {}, 'serverPort': '', 'firstEp': '', 'secondEp':'', 'rpcDebugFlag':'135', 'fqdn':''} + #updatecfgDict["rpcDebugFlag"] = rpcDebugFlagVal + #print ("===================: ", updatecfgDict) + + def init(self, conn, logSql): + tdLog.debug(f"start to excute {__file__}") + tdSql.init(conn.cursor()) + #tdSql.init(conn.cursor(), logSql) # output sql.txt file + + def getBuildPath(self): + selfPath = os.path.dirname(os.path.realpath(__file__)) + + if ("community" in selfPath): + projPath = selfPath[:selfPath.find("community")] + else: + projPath = selfPath[:selfPath.find("tests")] + + for root, dirs, files in os.walk(projPath): + if ("taosd" in files): + rootRealPath = os.path.dirname(os.path.realpath(root)) + if ("packaging" not in rootRealPath): + buildPath = root[:len(root) - len("/build/bin")] + break + return buildPath + + def newcur(self,cfg,host,port): + user = "root" + password = "taosdata" + con=taos.connect(host=host, user=user, password=password, config=cfg ,port=port) + cur=con.cursor() + print(cur) + return cur + + def initConsumerTable(self,cdbName='cdb'): + tdLog.info("create consume database, and consume info table, and consume result table") + tdSql.query("create database if not exists %s vgroups 1"%(cdbName)) + tdSql.query("drop table if exists %s.consumeinfo "%(cdbName)) + tdSql.query("drop table if exists %s.consumeresult "%(cdbName)) + + tdSql.query("create table %s.consumeinfo (ts timestamp, consumerid int, topiclist binary(1024), keylist binary(1024), expectmsgcnt bigint, ifcheckdata int, ifmanualcommit int)"%cdbName) + tdSql.query("create table %s.consumeresult (ts timestamp, consumerid int, consummsgcnt bigint, consumrowcnt bigint, checkresult int)"%cdbName) + + def initConsumerInfoTable(self,cdbName='cdb'): + tdLog.info("drop consumeinfo table") + tdSql.query("drop table if exists %s.consumeinfo "%(cdbName)) + tdSql.query("create table %s.consumeinfo (ts timestamp, consumerid int, topiclist binary(1024), keylist binary(1024), expectmsgcnt bigint, ifcheckdata int, ifmanualcommit int)"%cdbName) + + def insertConsumerInfo(self,consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifmanualcommit,cdbName='cdb'): + sql = "insert into %s.consumeinfo values "%cdbName + sql += "(now, %d, '%s', '%s', %d, %d, %d)"%(consumerId, topicList, keyList, expectrowcnt, ifcheckdata, ifmanualcommit) + tdLog.info("consume info sql: %s"%sql) + tdSql.query(sql) + + def selectConsumeResult(self,expectRows,cdbName='cdb'): + resultList=[] + while 1: + tdSql.query("select * from %s.consumeresult"%cdbName) + #tdLog.info("row: %d, %l64d, %l64d"%(tdSql.getData(0, 1),tdSql.getData(0, 2),tdSql.getData(0, 3)) + if tdSql.getRows() == expectRows: + break + else: + time.sleep(5) + + for i in range(expectRows): + tdLog.info ("consume id: %d, consume msgs: %d, consume rows: %d"%(tdSql.getData(i , 1), tdSql.getData(i , 2), tdSql.getData(i , 3))) + resultList.append(tdSql.getData(i , 3)) + + return resultList + + def startTmqSimProcess(self,buildPath,cfgPath,pollDelay,dbName,showMsg=1,showRow=1,cdbName='cdb',valgrind=0): + shellCmd = 'nohup ' + if valgrind == 1: + logFile = cfgPath + '/../log/valgrind-tmq.log' + shellCmd = 'nohup valgrind --log-file=' + logFile + shellCmd += '--tool=memcheck --leak-check=full --show-reachable=no --track-origins=yes --show-leak-kinds=all --num-callers=20 -v --workaround-gcc296-bugs=yes ' + + shellCmd += buildPath + '/build/bin/tmq_sim -c ' + cfgPath + shellCmd += " -y %d -d %s -g %d -r %d -w %s "%(pollDelay, dbName, showMsg, showRow, cdbName) + shellCmd += "> /dev/null 2>&1 &" + tdLog.info(shellCmd) + os.system(shellCmd) + + def create_database(self,tsql, dbName,dropFlag=1,vgroups=4,replica=1): + if dropFlag == 1: + tsql.execute("drop database if exists %s"%(dbName)) + + tsql.execute("create database if not exists %s vgroups %d replica %d"%(dbName, vgroups, replica)) + tdLog.debug("complete to create database %s"%(dbName)) + return + + def create_stable(self,tsql, dbName,stbName): + tsql.execute("create table if not exists %s.%s (ts timestamp, c1 bigint, c2 binary(16)) tags(t1 int)"%(dbName, stbName)) + tdLog.debug("complete to create %s.%s" %(dbName, stbName)) + return + + def create_ctables(self,tsql, dbName,stbName,ctbNum): + tsql.execute("use %s" %dbName) + pre_create = "create table" + sql = pre_create + #tdLog.debug("doing create one stable %s and %d child table in %s ..." %(stbname, count ,dbname)) + for i in range(ctbNum): + sql += " %s_%d using %s tags(%d)"%(stbName,i,stbName,i+1) + if (i > 0) and (i%100 == 0): + tsql.execute(sql) + sql = pre_create + if sql != pre_create: + tsql.execute(sql) + + tdLog.debug("complete to create %d child tables in %s.%s" %(ctbNum, dbName, stbName)) + return + + def insert_data(self,tsql,dbName,stbName,ctbNum,rowsPerTbl,batchNum,startTs=0): + tdLog.debug("start to insert data ............") + tsql.execute("use %s" %dbName) + pre_insert = "insert into " + sql = pre_insert + + if startTs == 0: + t = time.time() + startTs = int(round(t * 1000)) + + #tdLog.debug("doing insert data into stable:%s rows:%d ..."%(stbName, allRows)) + rowsOfSql = 0 + for i in range(ctbNum): + sql += " %s_%d values "%(stbName,i) + for j in range(rowsPerTbl): + sql += "(%d, %d, 'tmqrow_%d') "%(startTs + j, j, j) + rowsOfSql += 1 + if (j > 0) and ((rowsOfSql == batchNum) or (j == rowsPerTbl - 1)): + tsql.execute(sql) + rowsOfSql = 0 + if j < rowsPerTbl - 1: + sql = "insert into %s_%d values " %(stbName,i) + else: + sql = "insert into " + #end sql + if sql != pre_insert: + #print("insert sql:%s"%sql) + tsql.execute(sql) + tdLog.debug("insert data ............ [OK]") + return + + def prepareEnv(self, **parameterDict): + # create new connector for my thread + tsql=self.newcur(parameterDict['cfg'], 'localhost', 6030) + + if parameterDict["actionType"] == actionType.CREATE_DATABASE: + self.create_database(tsql, parameterDict["dbName"]) + elif parameterDict["actionType"] == actionType.CREATE_STABLE: + self.create_stable(tsql, parameterDict["dbName"], parameterDict["stbName"]) + elif parameterDict["actionType"] == actionType.CREATE_CTABLE: + self.create_ctables(tsql, parameterDict["dbName"], parameterDict["stbName"], parameterDict["ctbNum"]) + elif parameterDict["actionType"] == actionType.INSERT_DATA: + self.insert_data(tsql, parameterDict["dbName"], parameterDict["stbName"], parameterDict["ctbNum"],\ + parameterDict["rowsPerTbl"],parameterDict["batchNum"]) + else: + tdLog.exit("not support's action: ", parameterDict["actionType"]) + + return + + def tmqCase1(self, cfgPath, buildPath): + tdLog.printNoPrefix("======== test case 1: ") + + self.initConsumerTable() + + auotCtbNum = 5 + auotCtbPrefix = 'autoCtb' + + # create and start thread + parameterDict = {'cfg': '', \ + 'actionType': 0, \ + 'dbName': 'db1', \ + 'dropFlag': 1, \ + 'vgroups': 4, \ + 'replica': 1, \ + 'stbName': 'stb1', \ + 'ctbNum': 10, \ + 'rowsPerTbl': 10000, \ + 'batchNum': 100, \ + 'startTs': 1640966400000} # 2022-01-01 00:00:00.000 + parameterDict['cfg'] = cfgPath + + self.create_database(tdSql, parameterDict["dbName"]) + self.create_stable(tdSql, parameterDict["dbName"], parameterDict["stbName"]) + self.create_ctables(tdSql, parameterDict["dbName"], parameterDict["stbName"], parameterDict["ctbNum"]) + self.insert_data(tdSql,parameterDict["dbName"],parameterDict["stbName"],parameterDict["ctbNum"],parameterDict["rowsPerTbl"],parameterDict["batchNum"]) + + tdLog.info("create topics from stb1") + topicFromStb1 = 'topic_stb1' + + tdSql.execute("create topic %s as select ts, c1, c2 from %s.%s" %(topicFromStb1, parameterDict['dbName'], parameterDict['stbName'])) + consumerId = 0 + expectrowcnt = parameterDict["rowsPerTbl"] * (auotCtbNum + parameterDict["ctbNum"]) + topicList = topicFromStb1 + ifcheckdata = 0 + ifManualCommit = 0 + keyList = 'group.id:cgrp1,\ + enable.auto.commit:false,\ + auto.commit.interval.ms:6000,\ + auto.offset.reset:earliest' + self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) + + tdLog.info("start consume processor") + pollDelay = 100 + showMsg = 1 + showRow = 1 + self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) + + # add some new child tables using auto ctreating mode + time.sleep(1) + for index in range(auotCtbNum): + tdSql.query("create table %s.%s_%d using %s.%s tags(%d)"%(parameterDict["dbName"], auotCtbPrefix, index, parameterDict["dbName"], parameterDict["stbName"], index)) + + self.insert_data(tdSql,parameterDict["dbName"],auotCtbPrefix,auotCtbNum,parameterDict["rowsPerTbl"],parameterDict["batchNum"]) + + tdLog.info("insert process end, and start to check consume result") + expectRows = 1 + resultList = self.selectConsumeResult(expectRows) + totalConsumeRows = 0 + for i in range(expectRows): + totalConsumeRows += resultList[i] + + if totalConsumeRows != expectrowcnt: + tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt)) + tdLog.exit("tmq consume rows error!") + + tdSql.query("drop topic %s"%topicFromStb1) + + tdLog.printNoPrefix("======== test case 1 end ...... ") + + def tmqCase2(self, cfgPath, buildPath): + tdLog.printNoPrefix("======== test case 2: ") + + self.initConsumerTable() + + auotCtbNum = 10 + auotCtbPrefix = 'autoCtb' + + # create and start thread + parameterDict = {'cfg': '', \ + 'actionType': 0, \ + 'dbName': 'db2', \ + 'dropFlag': 1, \ + 'vgroups': 4, \ + 'replica': 1, \ + 'stbName': 'stb1', \ + 'ctbNum': 10, \ + 'rowsPerTbl': 10000, \ + 'batchNum': 100, \ + 'startTs': 1640966400000} # 2022-01-01 00:00:00.000 + parameterDict['cfg'] = cfgPath + + self.create_database(tdSql, parameterDict["dbName"]) + self.create_stable(tdSql, parameterDict["dbName"], parameterDict["stbName"]) + self.create_ctables(tdSql, parameterDict["dbName"], parameterDict["stbName"], parameterDict["ctbNum"]) + self.insert_data(tdSql,parameterDict["dbName"],parameterDict["stbName"],parameterDict["ctbNum"],parameterDict["rowsPerTbl"],parameterDict["batchNum"]) + + self.create_stable(tdSql, parameterDict["dbName"], 'stb2') + + tdLog.info("create topics from stb0/stb1") + topicFromStb1 = 'topic_stb1' + topicFromStb2 = 'topic_stb2' + + tdSql.execute("create topic %s as select ts, c1, c2 from %s.%s" %(topicFromStb1, parameterDict['dbName'], parameterDict['stbName'])) + tdSql.execute("create topic %s as select ts, c1, c2 from %s.%s" %(topicFromStb2, parameterDict['dbName'], 'stb2')) + consumerId = 0 + expectrowcnt = parameterDict["rowsPerTbl"] * (auotCtbNum + parameterDict["ctbNum"]) + topicList = '%s, %s'%(topicFromStb1,topicFromStb2) + ifcheckdata = 0 + ifManualCommit = 0 + keyList = 'group.id:cgrp1,\ + enable.auto.commit:false,\ + auto.commit.interval.ms:6000,\ + auto.offset.reset:earliest' + self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) + + tdLog.info("start consume processor") + pollDelay = 100 + showMsg = 1 + showRow = 1 + self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) + + # add some new child tables using auto ctreating mode + time.sleep(1) + for index in range(auotCtbNum): + tdSql.query("create table %s.%s_%d using %s.%s tags(%d)"%(parameterDict["dbName"], auotCtbPrefix, index, parameterDict["dbName"], 'stb2', index)) + + self.insert_data(tdSql,parameterDict["dbName"],auotCtbPrefix,auotCtbNum,parameterDict["rowsPerTbl"],parameterDict["batchNum"]) + + tdLog.info("insert process end, and start to check consume result") + expectRows = 1 + resultList = self.selectConsumeResult(expectRows) + totalConsumeRows = 0 + for i in range(expectRows): + totalConsumeRows += resultList[i] + + if totalConsumeRows != expectrowcnt: + tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt)) + tdLog.exit("tmq consume rows error!") + + tdSql.query("drop topic %s"%topicFromStb1) + + tdLog.printNoPrefix("======== test case 2 end ...... ") + + def run(self): + tdSql.prepare() + + buildPath = self.getBuildPath() + if (buildPath == ""): + tdLog.exit("taosd not found!") + else: + tdLog.info("taosd found in %s" % buildPath) + cfgPath = buildPath + "/../sim/psim/cfg" + tdLog.info("cfgPath: %s" % cfgPath) + + self.tmqCase1(cfgPath, buildPath) + self.tmqCase2(cfgPath, buildPath) + + def stop(self): + tdSql.close() + tdLog.success(f"{__file__} successfully executed") + +event = threading.Event() + +tdCases.addLinux(__file__, TDTestCase()) +tdCases.addWindows(__file__, TDTestCase()) diff --git a/tests/system-test/7-tmq/tmqDnode.py b/tests/system-test/7-tmq/tmqDnode.py index 4200b357a7e314720d9c5aeff8199a07dbcd45dd..bb287134b12010a6697e437622ec1ddcff11e7b9 100644 --- a/tests/system-test/7-tmq/tmqDnode.py +++ b/tests/system-test/7-tmq/tmqDnode.py @@ -29,8 +29,8 @@ class TDTestCase: def init(self, conn, logSql): tdLog.debug(f"start to excute {__file__}") - #tdSql.init(conn.cursor()) - tdSql.init(conn.cursor(), logSql) # output sql.txt file + tdSql.init(conn.cursor()) + #tdSql.init(conn.cursor(), logSql) # output sql.txt file def getBuildPath(self): selfPath = os.path.dirname(os.path.realpath(__file__)) @@ -118,13 +118,13 @@ class TDTestCase: tdLog.debug("complete to create %s.%s" %(dbName, stbName)) return - def create_ctables(self,tsql, dbName,stbName,ctbNum): + def create_ctables(self,tsql, dbName,stbName,ctbPrefix,ctbNum): tsql.execute("use %s" %dbName) pre_create = "create table" sql = pre_create #tdLog.debug("doing create one stable %s and %d child table in %s ..." %(stbname, count ,dbname)) for i in range(ctbNum): - sql += " %s_%d using %s tags(%d)"%(stbName,i,stbName,i+1) + sql += " %s_%d using %s tags(%d)"%(ctbPrefix,i,stbName,i+1) if (i > 0) and (i%100 == 0): tsql.execute(sql) sql = pre_create @@ -211,7 +211,7 @@ class TDTestCase: for i in range(ctbNum): sql += " %s.%s_%d using %s.%s tags (%d) values "%(dbName,ctbPrefix,i,dbName,stbName,i) for j in range(rowsPerTbl): - sql += "(%d, %d, 'tmqrow_%d') "%(startTs + j, j, j) + sql += "(%d, %d, 'autodata_%d') "%(startTs + j, j, j) rowsOfSql += 1 if (j > 0) and ((rowsOfSql == batchNum) or (j == rowsPerTbl - 1)): tsql.execute(sql) @@ -236,7 +236,7 @@ class TDTestCase: elif parameterDict["actionType"] == actionType.CREATE_STABLE: self.create_stable(tsql, parameterDict["dbName"], parameterDict["stbName"]) elif parameterDict["actionType"] == actionType.CREATE_CTABLE: - self.create_ctables(tsql, parameterDict["dbName"], parameterDict["stbName"], parameterDict["ctbNum"]) + self.create_ctables(tsql, parameterDict["dbName"], parameterDict["stbName"], parameterDict["stbName"], parameterDict["ctbNum"]) elif parameterDict["actionType"] == actionType.INSERT_DATA: self.insert_data(tsql, parameterDict["dbName"], parameterDict["stbName"], parameterDict["ctbNum"],\ parameterDict["rowsPerTbl"],parameterDict["batchNum"]) @@ -257,16 +257,17 @@ class TDTestCase: 'dropFlag': 1, \ 'vgroups': 4, \ 'replica': 1, \ - 'stbName': 'stb1', \ + 'stbName': 'stb1', \ + 'ctbPrefix': 'stb1', \ 'ctbNum': 10, \ 'rowsPerTbl': 10000, \ - 'batchNum': 33, \ + 'batchNum': 23, \ 'startTs': 1640966400000} # 2022-01-01 00:00:00.000 parameterDict['cfg'] = cfgPath self.create_database(tdSql, parameterDict["dbName"]) self.create_stable(tdSql, parameterDict["dbName"], parameterDict["stbName"]) - self.create_ctables(tdSql, parameterDict["dbName"], parameterDict["stbName"], parameterDict["ctbNum"]) + self.create_ctables(tdSql, parameterDict["dbName"], parameterDict["stbName"], parameterDict["ctbPrefix"], parameterDict["ctbNum"]) self.insert_data_interlaceByMultiTbl(tdSql,parameterDict["dbName"],parameterDict["stbName"],parameterDict["ctbNum"],parameterDict["rowsPerTbl"],parameterDict["batchNum"]) tdLog.info("create topics from stb1") @@ -290,8 +291,8 @@ class TDTestCase: showRow = 1 self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) - print("================= restart dnode ===========================") - time.sleep(3) + time.sleep(3) + tdLog.info("================= restart dnode ===========================") tdDnodes.stop(1) tdDnodes.start(1) time.sleep(2) @@ -323,36 +324,25 @@ class TDTestCase: 'dropFlag': 1, \ 'vgroups': 4, \ 'replica': 1, \ - 'stbName': 'stb1', \ + 'stbName': 'stb1', \ + 'ctbPrefix': 'stb1', \ 'ctbNum': 10, \ - 'rowsPerTbl': 15000, \ - 'batchNum': 100, \ + 'rowsPerTbl': 10000, \ + 'batchNum': 40, \ 'startTs': 1640966400000} # 2022-01-01 00:00:00.000 parameterDict['cfg'] = cfgPath - parameterDict2 = {'cfg': '', \ - 'actionType': 0, \ - 'dbName': 'db2', \ - 'dropFlag': 1, \ - 'vgroups': 4, \ - 'replica': 1, \ - 'stbName': 'stb2', \ - 'ctbNum': 10, \ - 'rowsPerTbl': 16000, \ - 'batchNum': 100, \ - 'startTs': 1640966400000} # 2022-01-01 00:00:00.000 - parameterDict2['cfg'] = cfgPath - self.create_database(tdSql, parameterDict["dbName"]) self.create_stable(tdSql, parameterDict["dbName"], parameterDict["stbName"]) - self.create_stable(tdSql, parameterDict2["dbName"], parameterDict2["stbName"]) + self.create_ctables(tdSql, parameterDict["dbName"], parameterDict["stbName"], parameterDict["ctbPrefix"], parameterDict["ctbNum"]) + self.insert_data_interlaceByMultiTbl(tdSql,parameterDict["dbName"],parameterDict["stbName"],parameterDict["ctbNum"],parameterDict["rowsPerTbl"],parameterDict["batchNum"]) tdLog.info("create topics from stb1") topicFromStb1 = 'topic_stb1' tdSql.execute("create topic %s as select ts, c1, c2 from %s.%s" %(topicFromStb1, parameterDict['dbName'], parameterDict['stbName'])) consumerId = 0 - expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"] + expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"] * 2 topicList = topicFromStb1 ifcheckdata = 0 ifManualCommit = 0 @@ -363,36 +353,16 @@ class TDTestCase: self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) tdLog.info("start consume processor") - pollDelay = 0 + pollDelay = 50 showMsg = 1 showRow = 1 self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) - tdLog.info("start create child tables of stb1 and stb2") - parameterDict['actionType'] = actionType.CREATE_CTABLE - parameterDict2['actionType'] = actionType.CREATE_CTABLE - - prepareEnvThread = threading.Thread(target=self.prepareEnv, kwargs=parameterDict) - prepareEnvThread.start() - prepareEnvThread2 = threading.Thread(target=self.prepareEnv, kwargs=parameterDict2) - prepareEnvThread2.start() - - prepareEnvThread.join() - prepareEnvThread2.join() + tdLog.info("create some new child table and insert data ") + parameterDict['batchNum'] = 100 + self.insert_data_with_autoCreateTbl(tdSql,parameterDict["dbName"],parameterDict["stbName"],"ctb",parameterDict["ctbNum"],parameterDict["rowsPerTbl"],parameterDict["batchNum"]) - tdLog.info("start insert data into child tables of stb1 and stb2") - parameterDict['actionType'] = actionType.INSERT_DATA - parameterDict2['actionType'] = actionType.INSERT_DATA - - prepareEnvThread = threading.Thread(target=self.prepareEnv, kwargs=parameterDict) - prepareEnvThread.start() - prepareEnvThread2 = threading.Thread(target=self.prepareEnv, kwargs=parameterDict2) - prepareEnvThread2.start() - - prepareEnvThread.join() - prepareEnvThread2.join() - - print("================= restart dnode ===========================") + tdLog.info("================= restart dnode ===========================") tdDnodes.stop(1) tdDnodes.start(1) time.sleep(2) @@ -412,187 +382,35 @@ class TDTestCase: tdLog.printNoPrefix("======== test case 2 end ...... ") - def tmqCase3(self, cfgPath, buildPath): - tdLog.printNoPrefix("======== test case 3: ") - - self.initConsumerTable() - - # create and start thread - parameterDict = {'cfg': '', \ - 'actionType': 0, \ - 'dbName': 'db3', \ - 'dropFlag': 1, \ - 'vgroups': 4, \ - 'replica': 1, \ - 'stbName': 'stb1', \ - 'ctbNum': 10, \ - 'rowsPerTbl': 50000, \ - 'batchNum': 13, \ - 'startTs': 1640966400000} # 2022-01-01 00:00:00.000 - parameterDict['cfg'] = cfgPath - - self.create_database(tdSql, parameterDict["dbName"]) - self.create_stable(tdSql, parameterDict["dbName"], parameterDict["stbName"]) - self.create_ctables(tdSql, parameterDict["dbName"], parameterDict["stbName"], parameterDict["ctbNum"]) - self.insert_data(tdSql,parameterDict["dbName"],parameterDict["stbName"],parameterDict["ctbNum"],parameterDict["rowsPerTbl"],parameterDict["batchNum"]) - - tdLog.info("create topics from stb1") - topicFromStb1 = 'topic_stb1' - - tdSql.execute("create topic %s as select ts, c1, c2 from %s.%s" %(topicFromStb1, parameterDict['dbName'], parameterDict['stbName'])) - consumerId = 0 - expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"] - topicList = topicFromStb1 - ifcheckdata = 0 - ifManualCommit = 0 - keyList = 'group.id:cgrp1,\ - enable.auto.commit:false,\ - auto.commit.interval.ms:6000,\ - auto.offset.reset:earliest' - self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) - - tdLog.info("start consume processor") - pollDelay = 5 - showMsg = 1 - showRow = 1 - self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) - - time.sleep(5) - tdLog.info("drop som child table of stb1") - dropTblNum = 4 - tdSql.query("drop table if exists %s.%s_1"%(parameterDict["dbName"], parameterDict["stbName"])) - tdSql.query("drop table if exists %s.%s_2"%(parameterDict["dbName"], parameterDict["stbName"])) - tdSql.query("drop table if exists %s.%s_3"%(parameterDict["dbName"], parameterDict["stbName"])) - tdSql.query("drop table if exists %s.%s_4"%(parameterDict["dbName"], parameterDict["stbName"])) - - tdLog.info("drop some child tables, then start to check consume result") - expectRows = 1 - resultList = self.selectConsumeResult(expectRows) - totalConsumeRows = 0 - for i in range(expectRows): - totalConsumeRows += resultList[i] - - remaindrowcnt = parameterDict["rowsPerTbl"] * (parameterDict["ctbNum"] - dropTblNum) - - if not (totalConsumeRows < expectrowcnt and totalConsumeRows > remaindrowcnt): - tdLog.info("act consume rows: %d, expect consume rows: between %d and %d"%(totalConsumeRows, remaindrowcnt, expectrowcnt)) - tdLog.exit("tmq consume rows error!") - tdSql.query("drop topic %s"%topicFromStb1) - tdLog.printNoPrefix("======== test case 3 end ...... ") - def tmqCase4(self, cfgPath, buildPath): - tdLog.printNoPrefix("======== test case 4: ") + # 自动建表完成数据插入,启动消费 + def tmqCase3(self, cfgPath, buildPath): + tdLog.printNoPrefix("======== test case 3: ") self.initConsumerTable() # create and start thread parameterDict = {'cfg': '', \ 'actionType': 0, \ - 'dbName': 'db4', \ + 'dbName': 'db3', \ 'dropFlag': 1, \ 'vgroups': 4, \ 'replica': 1, \ 'stbName': 'stb1', \ + 'ctbPrefix': 'stb1', \ 'ctbNum': 10, \ 'rowsPerTbl': 10000, \ - 'batchNum': 100, \ + 'batchNum': 40, \ 'startTs': 1640966400000} # 2022-01-01 00:00:00.000 parameterDict['cfg'] = cfgPath - - self.create_database(tdSql, parameterDict["dbName"]) - self.create_stable(tdSql, parameterDict["dbName"], parameterDict["stbName"]) - self.create_ctables(tdSql, parameterDict["dbName"], parameterDict["stbName"], parameterDict["ctbNum"]) - self.insert_data(tdSql,\ - parameterDict["dbName"],\ - parameterDict["stbName"],\ - parameterDict["ctbNum"],\ - parameterDict["rowsPerTbl"],\ - parameterDict["batchNum"]) - - tdLog.info("create topics from stb1") - topicFromStb1 = 'topic_stb1' - - tdSql.execute("create topic %s as select ts, c1, c2 from %s.%s" %(topicFromStb1, parameterDict['dbName'], parameterDict['stbName'])) - consumerId = 0 - expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"] - topicList = topicFromStb1 - ifcheckdata = 0 - ifManualCommit = 1 - keyList = 'group.id:cgrp1,\ - enable.auto.commit:false,\ - auto.commit.interval.ms:6000,\ - auto.offset.reset:earliest' - self.insertConsumerInfo(consumerId, expectrowcnt/4,topicList,keyList,ifcheckdata,ifManualCommit) - - tdLog.info("start consume processor") - pollDelay = 5 - showMsg = 1 - showRow = 1 - self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) - - tdLog.info("start to check consume result") - expectRows = 1 - resultList = self.selectConsumeResult(expectRows) - totalConsumeRows = 0 - for i in range(expectRows): - totalConsumeRows += resultList[i] - - if totalConsumeRows != expectrowcnt/4: - tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt/4)) - tdLog.exit("tmq consume rows error!") - - self.initConsumerInfoTable() - consumerId = 1 - self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) - - tdLog.info("again start consume processor") - self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) - - tdLog.info("again check consume result") - expectRows = 2 - resultList = self.selectConsumeResult(expectRows) - totalConsumeRows = 0 - for i in range(expectRows): - totalConsumeRows += resultList[i] - - if totalConsumeRows != expectrowcnt: - tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt)) - tdLog.exit("tmq consume rows error!") - tdSql.query("drop topic %s"%topicFromStb1) - - tdLog.printNoPrefix("======== test case 4 end ...... ") - - def tmqCase5(self, cfgPath, buildPath): - tdLog.printNoPrefix("======== test case 5: ") - - self.initConsumerTable() - - # create and start thread - parameterDict = {'cfg': '', \ - 'actionType': 0, \ - 'dbName': 'db5', \ - 'dropFlag': 1, \ - 'vgroups': 4, \ - 'replica': 1, \ - 'stbName': 'stb1', \ - 'ctbNum': 10, \ - 'rowsPerTbl': 10000, \ - 'batchNum': 100, \ - 'startTs': 1640966400000} # 2022-01-01 00:00:00.000 - parameterDict['cfg'] = cfgPath - self.create_database(tdSql, parameterDict["dbName"]) self.create_stable(tdSql, parameterDict["dbName"], parameterDict["stbName"]) - self.create_ctables(tdSql, parameterDict["dbName"], parameterDict["stbName"], parameterDict["ctbNum"]) - self.insert_data(tdSql,\ - parameterDict["dbName"],\ - parameterDict["stbName"],\ - parameterDict["ctbNum"],\ - parameterDict["rowsPerTbl"],\ - parameterDict["batchNum"]) + #self.create_ctables(tdSql, parameterDict["dbName"], parameterDict["stbName"], parameterDict["ctbPrefix"], parameterDict["ctbNum"]) + #self.insert_data_interlaceByMultiTbl(tdSql,parameterDict["dbName"],parameterDict["stbName"],parameterDict["ctbNum"],parameterDict["rowsPerTbl"],parameterDict["batchNum"]) + self.insert_data_with_autoCreateTbl(tdSql,parameterDict["dbName"],parameterDict["stbName"],"ctb",parameterDict["ctbNum"],parameterDict["rowsPerTbl"],parameterDict["batchNum"]) tdLog.info("create topics from stb1") topicFromStb1 = 'topic_stb1' @@ -607,121 +425,21 @@ class TDTestCase: enable.auto.commit:false,\ auto.commit.interval.ms:6000,\ auto.offset.reset:earliest' - self.insertConsumerInfo(consumerId, expectrowcnt/4,topicList,keyList,ifcheckdata,ifManualCommit) - - tdLog.info("start consume processor") - pollDelay = 5 - showMsg = 1 - showRow = 1 - self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) - - tdLog.info("start to check consume result") - expectRows = 1 - resultList = self.selectConsumeResult(expectRows) - totalConsumeRows = 0 - for i in range(expectRows): - totalConsumeRows += resultList[i] - - if totalConsumeRows != expectrowcnt/4: - tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt/4)) - tdLog.exit("tmq consume rows error!") - - self.initConsumerInfoTable() - consumerId = 1 self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) - tdLog.info("again start consume processor") - self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) - - tdLog.info("again check consume result") - expectRows = 2 - resultList = self.selectConsumeResult(expectRows) - totalConsumeRows = 0 - for i in range(expectRows): - totalConsumeRows += resultList[i] - - if totalConsumeRows != (expectrowcnt * (1 + 1/4)): - tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt)) - tdLog.exit("tmq consume rows error!") - - tdSql.query("drop topic %s"%topicFromStb1) - - tdLog.printNoPrefix("======== test case 5 end ...... ") - - def tmqCase6(self, cfgPath, buildPath): - tdLog.printNoPrefix("======== test case 6: ") - - self.initConsumerTable() - - # create and start thread - parameterDict = {'cfg': '', \ - 'actionType': 0, \ - 'dbName': 'db6', \ - 'dropFlag': 1, \ - 'vgroups': 4, \ - 'replica': 1, \ - 'stbName': 'stb1', \ - 'ctbNum': 10, \ - 'rowsPerTbl': 10000, \ - 'batchNum': 100, \ - 'startTs': 1640966400000} # 2022-01-01 00:00:00.000 - parameterDict['cfg'] = cfgPath - - self.create_database(tdSql, parameterDict["dbName"]) - self.create_stable(tdSql, parameterDict["dbName"], parameterDict["stbName"]) - self.create_ctables(tdSql, parameterDict["dbName"], parameterDict["stbName"], parameterDict["ctbNum"]) - self.insert_data(tdSql,\ - parameterDict["dbName"],\ - parameterDict["stbName"],\ - parameterDict["ctbNum"],\ - parameterDict["rowsPerTbl"],\ - parameterDict["batchNum"]) - - tdLog.info("create topics from stb1") - topicFromStb1 = 'topic_stb1' - - tdSql.execute("create topic %s as select ts, c1, c2 from %s.%s" %(topicFromStb1, parameterDict['dbName'], parameterDict['stbName'])) - consumerId = 0 - expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"] - topicList = topicFromStb1 - ifcheckdata = 0 - ifManualCommit = 1 - keyList = 'group.id:cgrp1,\ - enable.auto.commit:false,\ - auto.commit.interval.ms:6000,\ - auto.offset.reset:earliest' - self.insertConsumerInfo(consumerId, expectrowcnt/4,topicList,keyList,ifcheckdata,ifManualCommit) - tdLog.info("start consume processor") - pollDelay = 5 + pollDelay = 10 showMsg = 1 showRow = 1 self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) - tdLog.info("start to check consume result") - expectRows = 1 - resultList = self.selectConsumeResult(expectRows) - totalConsumeRows = 0 - for i in range(expectRows): - totalConsumeRows += resultList[i] - - if totalConsumeRows != expectrowcnt/4: - tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt/4)) - tdLog.exit("tmq consume rows error!") - - self.initConsumerInfoTable() - consumerId = 1 - keyList = 'group.id:cgrp1,\ - enable.auto.commit:false,\ - auto.commit.interval.ms:6000,\ - auto.offset.reset:latest' - self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) - - tdLog.info("again start consume processor") - self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) + # tdLog.info("================= restart dnode ===========================") + # tdDnodes.stop(1) + # tdDnodes.start(1) + # time.sleep(2) - tdLog.info("again check consume result") - expectRows = 2 + tdLog.info("insert process end, and start to check consume result") + expectRows = 1 resultList = self.selectConsumeResult(expectRows) totalConsumeRows = 0 for i in range(expectRows): @@ -733,702 +451,8 @@ class TDTestCase: tdSql.query("drop topic %s"%topicFromStb1) - tdLog.printNoPrefix("======== test case 6 end ...... ") - - def tmqCase7(self, cfgPath, buildPath): - tdLog.printNoPrefix("======== test case 7: ") - - self.initConsumerTable() - - # create and start thread - parameterDict = {'cfg': '', \ - 'actionType': 0, \ - 'dbName': 'db7', \ - 'dropFlag': 1, \ - 'vgroups': 4, \ - 'replica': 1, \ - 'stbName': 'stb1', \ - 'ctbNum': 10, \ - 'rowsPerTbl': 10000, \ - 'batchNum': 100, \ - 'startTs': 1640966400000} # 2022-01-01 00:00:00.000 - parameterDict['cfg'] = cfgPath - - self.create_database(tdSql, parameterDict["dbName"]) - self.create_stable(tdSql, parameterDict["dbName"], parameterDict["stbName"]) - self.create_ctables(tdSql, parameterDict["dbName"], parameterDict["stbName"], parameterDict["ctbNum"]) - self.insert_data(tdSql,\ - parameterDict["dbName"],\ - parameterDict["stbName"],\ - parameterDict["ctbNum"],\ - parameterDict["rowsPerTbl"],\ - parameterDict["batchNum"]) - - tdLog.info("create topics from stb1") - topicFromStb1 = 'topic_stb1' - - tdSql.execute("create topic %s as select ts, c1, c2 from %s.%s" %(topicFromStb1, parameterDict['dbName'], parameterDict['stbName'])) - consumerId = 0 - expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"] - topicList = topicFromStb1 - ifcheckdata = 0 - ifManualCommit = 1 - keyList = 'group.id:cgrp1,\ - enable.auto.commit:false,\ - auto.commit.interval.ms:6000,\ - auto.offset.reset:latest' - self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) - - tdLog.info("start consume processor") - pollDelay = 5 - showMsg = 1 - showRow = 1 - self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) - - tdLog.info("start to check consume result") - expectRows = 1 - resultList = self.selectConsumeResult(expectRows) - totalConsumeRows = 0 - for i in range(expectRows): - totalConsumeRows += resultList[i] - - if totalConsumeRows != 0: - tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, 0)) - tdLog.exit("tmq consume rows error!") - - self.initConsumerInfoTable() - consumerId = 1 - self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) - - tdLog.info("again start consume processor") - self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) - - tdLog.info("again check consume result") - expectRows = 2 - resultList = self.selectConsumeResult(expectRows) - totalConsumeRows = 0 - for i in range(expectRows): - totalConsumeRows += resultList[i] - - if totalConsumeRows != 0: - tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, 0)) - tdLog.exit("tmq consume rows error!") - - tdSql.query("drop topic %s"%topicFromStb1) - - tdLog.printNoPrefix("======== test case 7 end ...... ") - - def tmqCase8(self, cfgPath, buildPath): - tdLog.printNoPrefix("======== test case 8: ") - - self.initConsumerTable() - - # create and start thread - parameterDict = {'cfg': '', \ - 'actionType': 0, \ - 'dbName': 'db8', \ - 'dropFlag': 1, \ - 'vgroups': 4, \ - 'replica': 1, \ - 'stbName': 'stb1', \ - 'ctbNum': 10, \ - 'rowsPerTbl': 10000, \ - 'batchNum': 100, \ - 'startTs': 1640966400000} # 2022-01-01 00:00:00.000 - parameterDict['cfg'] = cfgPath - - self.create_database(tdSql, parameterDict["dbName"]) - self.create_stable(tdSql, parameterDict["dbName"], parameterDict["stbName"]) - self.create_ctables(tdSql, parameterDict["dbName"], parameterDict["stbName"], parameterDict["ctbNum"]) - self.insert_data(tdSql,\ - parameterDict["dbName"],\ - parameterDict["stbName"],\ - parameterDict["ctbNum"],\ - parameterDict["rowsPerTbl"],\ - parameterDict["batchNum"]) - - tdLog.info("create topics from stb1") - topicFromStb1 = 'topic_stb1' - - tdSql.execute("create topic %s as select ts, c1, c2 from %s.%s" %(topicFromStb1, parameterDict['dbName'], parameterDict['stbName'])) - consumerId = 0 - expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"] - topicList = topicFromStb1 - ifcheckdata = 0 - ifManualCommit = 1 - keyList = 'group.id:cgrp1,\ - enable.auto.commit:false,\ - auto.commit.interval.ms:6000,\ - auto.offset.reset:latest' - self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) - - tdLog.info("start consume 0 processor") - pollDelay = 10 - showMsg = 1 - showRow = 1 - self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) - - tdLog.info("start to check consume 0 result") - expectRows = 1 - resultList = self.selectConsumeResult(expectRows) - totalConsumeRows = 0 - for i in range(expectRows): - totalConsumeRows += resultList[i] - - if totalConsumeRows != 0: - tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, 0)) - tdLog.exit("tmq consume rows error!") - - tdLog.info("start consume 1 processor") - self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) - - tdLog.info("start one new thread to insert data") - parameterDict['actionType'] = actionType.INSERT_DATA - prepareEnvThread = threading.Thread(target=self.prepareEnv, kwargs=parameterDict) - prepareEnvThread.start() - prepareEnvThread.join() - - tdLog.info("start to check consume 0 and 1 result") - expectRows = 2 - resultList = self.selectConsumeResult(expectRows) - totalConsumeRows = 0 - for i in range(expectRows): - totalConsumeRows += resultList[i] - - if totalConsumeRows != expectrowcnt: - tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt)) - tdLog.exit("tmq consume rows error!") - - tdLog.info("start consume 2 processor") - self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) - - tdLog.info("start one new thread to insert data") - parameterDict['actionType'] = actionType.INSERT_DATA - prepareEnvThread = threading.Thread(target=self.prepareEnv, kwargs=parameterDict) - prepareEnvThread.start() - prepareEnvThread.join() - - tdLog.info("start to check consume 0 and 1 and 2 result") - expectRows = 3 - resultList = self.selectConsumeResult(expectRows) - totalConsumeRows = 0 - for i in range(expectRows): - totalConsumeRows += resultList[i] - - if totalConsumeRows != expectrowcnt*2: - tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt*2)) - tdLog.exit("tmq consume rows error!") - - tdSql.query("drop topic %s"%topicFromStb1) - - tdLog.printNoPrefix("======== test case 8 end ...... ") - - def tmqCase9(self, cfgPath, buildPath): - tdLog.printNoPrefix("======== test case 9: ") - - self.initConsumerTable() - - # create and start thread - parameterDict = {'cfg': '', \ - 'actionType': 0, \ - 'dbName': 'db9', \ - 'dropFlag': 1, \ - 'vgroups': 4, \ - 'replica': 1, \ - 'stbName': 'stb1', \ - 'ctbNum': 10, \ - 'rowsPerTbl': 10000, \ - 'batchNum': 100, \ - 'startTs': 1640966400000} # 2022-01-01 00:00:00.000 - parameterDict['cfg'] = cfgPath - - self.create_database(tdSql, parameterDict["dbName"]) - self.create_stable(tdSql, parameterDict["dbName"], parameterDict["stbName"]) - self.create_ctables(tdSql, parameterDict["dbName"], parameterDict["stbName"], parameterDict["ctbNum"]) - self.insert_data(tdSql,\ - parameterDict["dbName"],\ - parameterDict["stbName"],\ - parameterDict["ctbNum"],\ - parameterDict["rowsPerTbl"],\ - parameterDict["batchNum"]) - - tdLog.info("create topics from stb1") - topicFromStb1 = 'topic_stb1' - - tdSql.execute("create topic %s as select ts, c1, c2 from %s.%s" %(topicFromStb1, parameterDict['dbName'], parameterDict['stbName'])) - consumerId = 0 - expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"] - topicList = topicFromStb1 - ifcheckdata = 0 - ifManualCommit = 1 - keyList = 'group.id:cgrp1,\ - enable.auto.commit:false,\ - auto.commit.interval.ms:6000,\ - auto.offset.reset:latest' - self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) - - tdLog.info("start consume 0 processor") - pollDelay = 10 - showMsg = 1 - showRow = 1 - self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) - - tdLog.info("start to check consume 0 result") - expectRows = 1 - resultList = self.selectConsumeResult(expectRows) - totalConsumeRows = 0 - for i in range(expectRows): - totalConsumeRows += resultList[i] - - if totalConsumeRows != 0: - tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, 0)) - tdLog.exit("tmq consume rows error!") - - tdLog.info("start consume 1 processor") - self.initConsumerInfoTable() - consumerId = 1 - ifManualCommit = 0 - self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) - self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) - - tdLog.info("start one new thread to insert data") - parameterDict['actionType'] = actionType.INSERT_DATA - prepareEnvThread = threading.Thread(target=self.prepareEnv, kwargs=parameterDict) - prepareEnvThread.start() - prepareEnvThread.join() - - tdLog.info("start to check consume 0 and 1 result") - expectRows = 2 - resultList = self.selectConsumeResult(expectRows) - totalConsumeRows = 0 - for i in range(expectRows): - totalConsumeRows += resultList[i] - - if totalConsumeRows != expectrowcnt: - tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt)) - tdLog.exit("tmq consume rows error!") - - tdLog.info("start consume 2 processor") - self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) - - tdLog.info("start one new thread to insert data") - parameterDict['actionType'] = actionType.INSERT_DATA - prepareEnvThread = threading.Thread(target=self.prepareEnv, kwargs=parameterDict) - prepareEnvThread.start() - prepareEnvThread.join() - - tdLog.info("start to check consume 0 and 1 and 2 result") - expectRows = 3 - resultList = self.selectConsumeResult(expectRows) - totalConsumeRows = 0 - for i in range(expectRows): - totalConsumeRows += resultList[i] - - if totalConsumeRows != expectrowcnt*2: - tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt*2)) - tdLog.exit("tmq consume rows error!") - - tdSql.query("drop topic %s"%topicFromStb1) - - tdLog.printNoPrefix("======== test case 9 end ...... ") - - def tmqCase10(self, cfgPath, buildPath): - tdLog.printNoPrefix("======== test case 10: ") - - self.initConsumerTable() - - # create and start thread - parameterDict = {'cfg': '', \ - 'actionType': 0, \ - 'dbName': 'db10', \ - 'dropFlag': 1, \ - 'vgroups': 4, \ - 'replica': 1, \ - 'stbName': 'stb1', \ - 'ctbNum': 10, \ - 'rowsPerTbl': 10000, \ - 'batchNum': 100, \ - 'startTs': 1640966400000} # 2022-01-01 00:00:00.000 - parameterDict['cfg'] = cfgPath - - self.create_database(tdSql, parameterDict["dbName"]) - self.create_stable(tdSql, parameterDict["dbName"], parameterDict["stbName"]) - self.create_ctables(tdSql, parameterDict["dbName"], parameterDict["stbName"], parameterDict["ctbNum"]) - self.insert_data(tdSql,\ - parameterDict["dbName"],\ - parameterDict["stbName"],\ - parameterDict["ctbNum"],\ - parameterDict["rowsPerTbl"],\ - parameterDict["batchNum"]) - - tdLog.info("create topics from stb1") - topicFromStb1 = 'topic_stb1' - - tdSql.execute("create topic %s as select ts, c1, c2 from %s.%s" %(topicFromStb1, parameterDict['dbName'], parameterDict['stbName'])) - consumerId = 0 - expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"] - topicList = topicFromStb1 - ifcheckdata = 0 - ifManualCommit = 1 - keyList = 'group.id:cgrp1,\ - enable.auto.commit:false,\ - auto.commit.interval.ms:6000,\ - auto.offset.reset:latest' - self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) - - tdLog.info("start consume 0 processor") - pollDelay = 10 - showMsg = 1 - showRow = 1 - self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) - - tdLog.info("start to check consume 0 result") - expectRows = 1 - resultList = self.selectConsumeResult(expectRows) - totalConsumeRows = 0 - for i in range(expectRows): - totalConsumeRows += resultList[i] - - if totalConsumeRows != 0: - tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, 0)) - tdLog.exit("tmq consume rows error!") - - tdLog.info("start consume 1 processor") - self.initConsumerInfoTable() - consumerId = 1 - ifManualCommit = 1 - self.insertConsumerInfo(consumerId, expectrowcnt-10000,topicList,keyList,ifcheckdata,ifManualCommit) - self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) - - tdLog.info("start one new thread to insert data") - parameterDict['actionType'] = actionType.INSERT_DATA - prepareEnvThread = threading.Thread(target=self.prepareEnv, kwargs=parameterDict) - prepareEnvThread.start() - prepareEnvThread.join() - - tdLog.info("start to check consume 0 and 1 result") - expectRows = 2 - resultList = self.selectConsumeResult(expectRows) - totalConsumeRows = 0 - for i in range(expectRows): - totalConsumeRows += resultList[i] - - if totalConsumeRows != expectrowcnt-10000: - tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt-10000)) - tdLog.exit("tmq consume rows error!") - - tdLog.info("start consume 2 processor") - self.initConsumerInfoTable() - consumerId = 2 - ifManualCommit = 1 - self.insertConsumerInfo(consumerId, expectrowcnt+10000,topicList,keyList,ifcheckdata,ifManualCommit) - self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) - - tdLog.info("start one new thread to insert data") - parameterDict['actionType'] = actionType.INSERT_DATA - prepareEnvThread = threading.Thread(target=self.prepareEnv, kwargs=parameterDict) - prepareEnvThread.start() - prepareEnvThread.join() - - tdLog.info("start to check consume 0 and 1 and 2 result") - expectRows = 3 - resultList = self.selectConsumeResult(expectRows) - totalConsumeRows = 0 - for i in range(expectRows): - totalConsumeRows += resultList[i] - - if totalConsumeRows != expectrowcnt*2: - tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt*2)) - tdLog.exit("tmq consume rows error!") - - tdSql.query("drop topic %s"%topicFromStb1) - - tdLog.printNoPrefix("======== test case 10 end ...... ") - - def tmqCase11(self, cfgPath, buildPath): - tdLog.printNoPrefix("======== test case 11: ") - - self.initConsumerTable() - - # create and start thread - parameterDict = {'cfg': '', \ - 'actionType': 0, \ - 'dbName': 'db11', \ - 'dropFlag': 1, \ - 'vgroups': 4, \ - 'replica': 1, \ - 'stbName': 'stb1', \ - 'ctbNum': 10, \ - 'rowsPerTbl': 10000, \ - 'batchNum': 100, \ - 'startTs': 1640966400000} # 2022-01-01 00:00:00.000 - parameterDict['cfg'] = cfgPath - - self.create_database(tdSql, parameterDict["dbName"]) - self.create_stable(tdSql, parameterDict["dbName"], parameterDict["stbName"]) - self.create_ctables(tdSql, parameterDict["dbName"], parameterDict["stbName"], parameterDict["ctbNum"]) - self.insert_data(tdSql,\ - parameterDict["dbName"],\ - parameterDict["stbName"],\ - parameterDict["ctbNum"],\ - parameterDict["rowsPerTbl"],\ - parameterDict["batchNum"]) - - tdLog.info("create topics from stb1") - topicFromStb1 = 'topic_stb1' - - tdSql.execute("create topic %s as select ts, c1, c2 from %s.%s" %(topicFromStb1, parameterDict['dbName'], parameterDict['stbName'])) - consumerId = 0 - expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"] - topicList = topicFromStb1 - ifcheckdata = 0 - ifManualCommit = 1 - keyList = 'group.id:cgrp1,\ - enable.auto.commit:false,\ - auto.commit.interval.ms:6000,\ - auto.offset.reset:none' - self.insertConsumerInfo(consumerId, expectrowcnt/4,topicList,keyList,ifcheckdata,ifManualCommit) - - tdLog.info("start consume processor") - pollDelay = 5 - showMsg = 1 - showRow = 1 - self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) - - tdLog.info("start to check consume result") - expectRows = 1 - resultList = self.selectConsumeResult(expectRows) - totalConsumeRows = 0 - for i in range(expectRows): - totalConsumeRows += resultList[i] - - if totalConsumeRows != 0: - tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, 0)) - tdLog.exit("tmq consume rows error!") - - self.initConsumerInfoTable() - consumerId = 1 - keyList = 'group.id:cgrp1,\ - enable.auto.commit:false,\ - auto.commit.interval.ms:6000,\ - auto.offset.reset:none' - self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) - - tdLog.info("again start consume processor") - self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) - - tdLog.info("again check consume result") - expectRows = 2 - resultList = self.selectConsumeResult(expectRows) - totalConsumeRows = 0 - for i in range(expectRows): - totalConsumeRows += resultList[i] - - if totalConsumeRows != 0: - tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, 0)) - tdLog.exit("tmq consume rows error!") - - tdSql.query("drop topic %s"%topicFromStb1) - - tdLog.printNoPrefix("======== test case 11 end ...... ") - - def tmqCase12(self, cfgPath, buildPath): - tdLog.printNoPrefix("======== test case 12: ") - - self.initConsumerTable() - - # create and start thread - parameterDict = {'cfg': '', \ - 'actionType': 0, \ - 'dbName': 'db12', \ - 'dropFlag': 1, \ - 'vgroups': 4, \ - 'replica': 1, \ - 'stbName': 'stb1', \ - 'ctbNum': 10, \ - 'rowsPerTbl': 10000, \ - 'batchNum': 100, \ - 'startTs': 1640966400000} # 2022-01-01 00:00:00.000 - parameterDict['cfg'] = cfgPath - - self.create_database(tdSql, parameterDict["dbName"]) - self.create_stable(tdSql, parameterDict["dbName"], parameterDict["stbName"]) - self.create_ctables(tdSql, parameterDict["dbName"], parameterDict["stbName"], parameterDict["ctbNum"]) - self.insert_data(tdSql,\ - parameterDict["dbName"],\ - parameterDict["stbName"],\ - parameterDict["ctbNum"],\ - parameterDict["rowsPerTbl"],\ - parameterDict["batchNum"]) - - tdLog.info("create topics from stb1") - topicFromStb1 = 'topic_stb1' - - tdSql.execute("create topic %s as select ts, c1, c2 from %s.%s" %(topicFromStb1, parameterDict['dbName'], parameterDict['stbName'])) - consumerId = 0 - expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"] - topicList = topicFromStb1 - ifcheckdata = 0 - ifManualCommit = 0 - keyList = 'group.id:cgrp1,\ - enable.auto.commit:false,\ - auto.commit.interval.ms:6000,\ - auto.offset.reset:earliest' - self.insertConsumerInfo(consumerId, expectrowcnt/4,topicList,keyList,ifcheckdata,ifManualCommit) - - tdLog.info("start consume processor") - pollDelay = 5 - showMsg = 1 - showRow = 1 - self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) - - tdLog.info("start to check consume result") - expectRows = 1 - resultList = self.selectConsumeResult(expectRows) - totalConsumeRows = 0 - for i in range(expectRows): - totalConsumeRows += resultList[i] - - if totalConsumeRows != expectrowcnt/4: - tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt/4)) - tdLog.exit("tmq consume rows error!") - - self.initConsumerInfoTable() - consumerId = 1 - keyList = 'group.id:cgrp1,\ - enable.auto.commit:false,\ - auto.commit.interval.ms:6000,\ - auto.offset.reset:none' - self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) - - tdLog.info("again start consume processor") - self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) - - tdLog.info("again check consume result") - expectRows = 2 - resultList = self.selectConsumeResult(expectRows) - totalConsumeRows = 0 - for i in range(expectRows): - totalConsumeRows += resultList[i] - - if totalConsumeRows != expectrowcnt/4: - tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt/4)) - tdLog.exit("tmq consume rows error!") - - tdSql.query("drop topic %s"%topicFromStb1) - - tdLog.printNoPrefix("======== test case 12 end ...... ") - - def tmqCase13(self, cfgPath, buildPath): - tdLog.printNoPrefix("======== test case 13: ") - - self.initConsumerTable() - - # create and start thread - parameterDict = {'cfg': '', \ - 'actionType': 0, \ - 'dbName': 'db13', \ - 'dropFlag': 1, \ - 'vgroups': 4, \ - 'replica': 1, \ - 'stbName': 'stb1', \ - 'ctbNum': 10, \ - 'rowsPerTbl': 10000, \ - 'batchNum': 100, \ - 'startTs': 1640966400000} # 2022-01-01 00:00:00.000 - parameterDict['cfg'] = cfgPath - - self.create_database(tdSql, parameterDict["dbName"]) - self.create_stable(tdSql, parameterDict["dbName"], parameterDict["stbName"]) - self.create_ctables(tdSql, parameterDict["dbName"], parameterDict["stbName"], parameterDict["ctbNum"]) - self.insert_data(tdSql,\ - parameterDict["dbName"],\ - parameterDict["stbName"],\ - parameterDict["ctbNum"],\ - parameterDict["rowsPerTbl"],\ - parameterDict["batchNum"]) - - tdLog.info("create topics from stb1") - topicFromStb1 = 'topic_stb1' - - tdSql.execute("create topic %s as select ts, c1, c2 from %s.%s" %(topicFromStb1, parameterDict['dbName'], parameterDict['stbName'])) - consumerId = 0 - expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"] - topicList = topicFromStb1 - ifcheckdata = 0 - ifManualCommit = 1 - keyList = 'group.id:cgrp1,\ - enable.auto.commit:false,\ - auto.commit.interval.ms:6000,\ - auto.offset.reset:earliest' - self.insertConsumerInfo(consumerId, expectrowcnt/4,topicList,keyList,ifcheckdata,ifManualCommit) - - tdLog.info("start consume processor") - pollDelay = 5 - showMsg = 1 - showRow = 1 - self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) - - tdLog.info("start to check consume result") - expectRows = 1 - resultList = self.selectConsumeResult(expectRows) - totalConsumeRows = 0 - for i in range(expectRows): - totalConsumeRows += resultList[i] - - if totalConsumeRows != expectrowcnt/4: - tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt/4)) - tdLog.exit("tmq consume rows error!") - - self.initConsumerInfoTable() - consumerId = 1 - ifManualCommit = 1 - keyList = 'group.id:cgrp1,\ - enable.auto.commit:false,\ - auto.commit.interval.ms:6000,\ - auto.offset.reset:none' - self.insertConsumerInfo(consumerId, expectrowcnt/2,topicList,keyList,ifcheckdata,ifManualCommit) - - tdLog.info("again start consume processor") - self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) - - tdLog.info("again check consume result") - expectRows = 2 - resultList = self.selectConsumeResult(expectRows) - totalConsumeRows = 0 - for i in range(expectRows): - totalConsumeRows += resultList[i] - - if totalConsumeRows != expectrowcnt*(1/2+1/4): - tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt*(1/2+1/4))) - tdLog.exit("tmq consume rows error!") - - self.initConsumerInfoTable() - consumerId = 2 - ifManualCommit = 1 - keyList = 'group.id:cgrp1,\ - enable.auto.commit:false,\ - auto.commit.interval.ms:6000,\ - auto.offset.reset:none' - self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) - - tdLog.info("again start consume processor") - self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) - - tdLog.info("again check consume result") - expectRows = 3 - resultList = self.selectConsumeResult(expectRows) - totalConsumeRows = 0 - for i in range(expectRows): - totalConsumeRows += resultList[i] - - if totalConsumeRows != expectrowcnt: - tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt)) - tdLog.exit("tmq consume rows error!") - - tdSql.query("drop topic %s"%topicFromStb1) + tdLog.printNoPrefix("======== test case 3 end ...... ") - tdLog.printNoPrefix("======== test case 13 end ...... ") def run(self): tdSql.prepare() @@ -1442,8 +466,8 @@ class TDTestCase: tdLog.info("cfgPath: %s" % cfgPath) # self.tmqCase1(cfgPath, buildPath) - self.tmqCase2(cfgPath, buildPath) - # self.tmqCase3(cfgPath, buildPath) + # self.tmqCase2(cfgPath, buildPath) + self.tmqCase3(cfgPath, buildPath) # self.tmqCase4(cfgPath, buildPath) # self.tmqCase5(cfgPath, buildPath) diff --git a/tests/system-test/7-tmq/tmqModule.py b/tests/system-test/7-tmq/tmqModule.py index 8e0d741040d4aa68cebe12cf20c25a81c4bd7ee2..ad5b4d70b35ba1ade92bb00c1903ce02340ebb19 100644 --- a/tests/system-test/7-tmq/tmqModule.py +++ b/tests/system-test/7-tmq/tmqModule.py @@ -29,8 +29,8 @@ class TDTestCase: def init(self, conn, logSql): tdLog.debug(f"start to excute {__file__}") - #tdSql.init(conn.cursor()) - tdSql.init(conn.cursor(), logSql) # output sql.txt file + tdSql.init(conn.cursor()) + #tdSql.init(conn.cursor(), logSql) # output sql.txt file def getBuildPath(self): selfPath = os.path.dirname(os.path.realpath(__file__)) diff --git a/tests/system-test/fulltest.bat b/tests/system-test/fulltest.bat new file mode 100644 index 0000000000000000000000000000000000000000..f9dfdf9a5486bb13d0ad6ed16b8edeece083a6d7 --- /dev/null +++ b/tests/system-test/fulltest.bat @@ -0,0 +1,3 @@ + +python3 .\test.py -f 0-others\taosShellError.py +python3 .\test.py -f 0-others\taosShellNetChk.py \ No newline at end of file diff --git a/tests/system-test/fulltest.sh b/tests/system-test/fulltest.sh index aa6843435c7a1707a7b42665b417d14d2a4b3c57..37c7f18177d27a7b56f4d6219b0151f973cd8cbb 100755 --- a/tests/system-test/fulltest.sh +++ b/tests/system-test/fulltest.sh @@ -73,8 +73,12 @@ python3 ./test.py -f 2-query/query_cols_tags_and_or.py python3 ./test.py -f 7-tmq/basic5.py python3 ./test.py -f 7-tmq/subscribeDb.py +python3 ./test.py -f 7-tmq/subscribeDb0.py python3 ./test.py -f 7-tmq/subscribeDb1.py python3 ./test.py -f 7-tmq/subscribeStb.py python3 ./test.py -f 7-tmq/subscribeStb0.py python3 ./test.py -f 7-tmq/subscribeStb1.py python3 ./test.py -f 7-tmq/subscribeStb2.py +python3 ./test.py -f 7-tmq/subscribeStb3.py +python3 ./test.py -f 7-tmq/subscribeStb4.py +python3 ./test.py -f 7-tmq/subscribeStb2.py diff --git a/tests/system-test/test-all.bat b/tests/system-test/test-all.bat new file mode 100644 index 0000000000000000000000000000000000000000..ae6c98b06f3504b20e712630d40184b093143835 --- /dev/null +++ b/tests/system-test/test-all.bat @@ -0,0 +1,25 @@ +@echo off +SETLOCAL EnableDelayedExpansion +for /F "tokens=1,2 delims=#" %%a in ('"prompt #$H#$E# & echo on & for %%b in (1) do rem"') do ( set "DEL=%%a") +set /a a=0 +@REM echo Windows Taosd Test +@REM for /F "usebackq tokens=*" %%i in (fulltest.bat) do ( +@REM echo Processing %%i +@REM set /a a+=1 +@REM call %%i ARG1 > result_!a!.txt 2>error_!a!.txt +@REM if errorlevel 1 ( call :colorEcho 0c "failed" &echo. && exit 8 ) else ( call :colorEcho 0a "Success" &echo. ) +@REM ) +echo Linux Taosd Test +for /F "usebackq tokens=*" %%i in (fulltest.bat) do ( + echo Processing %%i + set /a a+=1 + call %%i ARG1 -m %1 > result_!a!.txt 2>error_!a!.txt + if errorlevel 1 ( call :colorEcho 0c "failed" &echo. && exit 8 ) else ( call :colorEcho 0a "Success" &echo. ) +) +exit + +:colorEcho +echo off + "%~2" +findstr /v /a:%1 /R "^$" "%~2" nul +del "%~2" > nul 2>&1i \ No newline at end of file diff --git a/tests/system-test/test.py b/tests/system-test/test.py index da8876a47cd60fe80e2270d7561bb3754343998d..6fd7237b339dc1d2eeeee1d1f5965ec77d03b53d 100644 --- a/tests/system-test/test.py +++ b/tests/system-test/test.py @@ -19,6 +19,7 @@ import subprocess import time import base64 import json +import platform from distutils.log import warn as printf from fabric2 import Connection sys.path.append("../pytest") @@ -40,9 +41,12 @@ if __name__ == "__main__": stop = 0 restart = False windows = 0 + if platform.system().lower() == 'windows': + windows = 1 updateCfgDict = {} - opts, args = getopt.gnu_getopt(sys.argv[1:], 'f:p:m:l:scghrwd:', [ - 'file=', 'path=', 'master', 'logSql', 'stop', 'cluster', 'valgrind', 'help', 'windows', 'updateCfgDict']) + execCmd = "" + opts, args = getopt.gnu_getopt(sys.argv[1:], 'f:p:m:l:scghrd:e:', [ + 'file=', 'path=', 'master', 'logSql', 'stop', 'cluster', 'valgrind', 'help', 'restart', 'updateCfgDict', 'execCmd']) for key, value in opts: if key in ['-h', '--help']: tdLog.printNoPrefix( @@ -55,8 +59,8 @@ if __name__ == "__main__": tdLog.printNoPrefix('-c Test Cluster Flag') tdLog.printNoPrefix('-g valgrind Test Flag') tdLog.printNoPrefix('-r taosd restart test') - tdLog.printNoPrefix('-w taos on windows') tdLog.printNoPrefix('-d update cfg dict, base64 json str') + tdLog.printNoPrefix('-e eval str to run') sys.exit(0) if key in ['-r', '--restart']: @@ -89,15 +93,25 @@ if __name__ == "__main__": if key in ['-s', '--stop']: stop = 1 - if key in ['-w', '--windows']: - windows = 1 - if key in ['-d', '--updateCfgDict']: try: updateCfgDict = eval(base64.b64decode(value.encode()).decode()) except: print('updateCfgDict convert fail.') sys.exit(0) + + if key in ['-e', '--execCmd']: + try: + execCmd = base64.b64decode(value.encode()).decode() + except: + print('updateCfgDict convert fail.') + sys.exit(0) + + if not execCmd == "": + tdDnodes.init(deployPath) + exec(execCmd) + quit() + if (stop != 0): if (valgrind == 0): toBeKilled = "taosd" @@ -137,7 +151,7 @@ if __name__ == "__main__": if windows: tdCases.logSql(logSql) tdLog.info("Procedures for testing self-deployment") - tdDnodes.init(deployPath) + tdDnodes.init(deployPath, masterIp) tdDnodes.setTestCluster(testCluster) tdDnodes.setValgrind(valgrind) tdDnodes.stopAll() @@ -162,15 +176,7 @@ if __name__ == "__main__": else: pass tdDnodes.deploy(1,updateCfgDict) - if masterIp == "" or masterIp == "localhost": - tdDnodes.startWin(1) - else: - remote_conn = Connection("root@%s"%host) - with remote_conn.cd('/var/lib/jenkins/workspace/TDinternal/community/tests/pytest'): - remote_conn.run("python3 ./test.py %s"%updateCfgDictStr) - # print("docker exec -d cross_platform bash -c \"cd ~/test/community/tests/system-test && python3 ./test.py %s\""%updateCfgDictStr) - # os.system("docker exec -d cross_platform bash -c \"cd ~/test/community/tests/system-test && python3 ./test.py %s\""%updateCfgDictStr) - # time.sleep(2) + tdDnodes.start(1) conn = taos.connect( host="%s"%(host), config=tdDnodes.sim.getCfgDir()) @@ -179,7 +185,7 @@ if __name__ == "__main__": else: tdCases.runAllWindows(conn) else: - tdDnodes.init(deployPath) + tdDnodes.init(deployPath, masterIp) tdDnodes.setTestCluster(testCluster) tdDnodes.setValgrind(valgrind) tdDnodes.stopAll() diff --git a/tests/test/c/sdbDump.c b/tests/test/c/sdbDump.c index 5641587c569aa2a2ea3f5a3f18c398f33979714c..8be2822c0ae8d6c2176895d0a6e51449d81ea44b 100644 --- a/tests/test/c/sdbDump.c +++ b/tests/test/c/sdbDump.c @@ -20,13 +20,13 @@ #include "tconfig.h" #include "tjson.h" -#define TMP_DNODE_DIR "/tmp/dumpsdb" -#define TMP_MNODE_DIR "/tmp/dumpsdb/mnode" -#define TMP_SDB_DATA_DIR "/tmp/dumpsdb/mnode/data" -#define TMP_SDB_SYNC_DIR "/tmp/dumpsdb/mnode/sync" -#define TMP_SDB_DATA_FILE "/tmp/dumpsdb/mnode/data/sdb.data" -#define TMP_SDB_RAFT_CFG_FILE "/tmp/dumpsdb/mnode/sync/raft_config.json" -#define TMP_SDB_RAFT_STORE_FILE "/tmp/dumpsdb/mnode/sync/raft_store.json" +#define TMP_DNODE_DIR TD_TMP_DIR_PATH "dumpsdb" +#define TMP_MNODE_DIR TD_TMP_DIR_PATH "dumpsdb/mnode" +#define TMP_SDB_DATA_DIR TD_TMP_DIR_PATH "dumpsdb/mnode/data" +#define TMP_SDB_SYNC_DIR TD_TMP_DIR_PATH "dumpsdb/mnode/sync" +#define TMP_SDB_DATA_FILE TD_TMP_DIR_PATH "dumpsdb/mnode/data/sdb.data" +#define TMP_SDB_RAFT_CFG_FILE TD_TMP_DIR_PATH "dumpsdb/mnode/sync/raft_config.json" +#define TMP_SDB_RAFT_STORE_FILE TD_TMP_DIR_PATH "dumpsdb/mnode/sync/raft_store.json" void reportStartup(const char *name, const char *desc) {} diff --git a/tools/shell/src/shellEngine.c b/tools/shell/src/shellEngine.c index a866488d3ad2d239c47b3279f506e755737b88bf..851d9a2070b75f7863f8e55f5779e9bac90607db 100644 --- a/tools/shell/src/shellEngine.c +++ b/tools/shell/src/shellEngine.c @@ -587,6 +587,8 @@ int32_t shellCalcColWidth(TAOS_FIELD *field, int32_t precision) { int32_t width = (int32_t)strlen(field->name); switch (field->type) { + case TSDB_DATA_TYPE_NULL: + return TMAX(4, width); // null case TSDB_DATA_TYPE_BOOL: return TMAX(5, width); // 'false' diff --git a/tools/taos-tools b/tools/taos-tools index 4d83d8c62973506f760bcaa3a33f4665ed9046d0..2f3dfddd4d9a869e706ba3cf98fb6d769404cd7c 160000 --- a/tools/taos-tools +++ b/tools/taos-tools @@ -1 +1 @@ -Subproject commit 4d83d8c62973506f760bcaa3a33f4665ed9046d0 +Subproject commit 2f3dfddd4d9a869e706ba3cf98fb6d769404cd7c